本文介绍了在内网环境下,使用k8s api(java fabric8io/kubernetes-client)实现helm组件部署的方法。使用k3s-helm-controller规避helm命令行操作。并记录helm仓库的简易
需求
我想在代码里通过helm部署 kube-prometheus-stack,经过考察,k3s-helm-controller可以比较好地满足我的需求。它可以将HelmChart作为k8s的crd,所以可以使用k8s的api来对helm chart进行管理。
在这个基础上,我还想实现内网部署,除了私有docker镜像仓库,因为使用到了helm,还需要helm仓库。但由于我只需要部署kube-prometheus-stack一个charts,不想增加一个组件,所以直接在代码里提供文件api来充当helm仓库(也可以是任意符合格式要求的http文件服务器),本文记录这个过程的注意事项。
部署k3s-helm-controller
创建kube-config的configmap
部署时需要读取/root/.kube/config文件,下面创建kube-config的脚本不一定适用于你的环境
| 12
 3
 4
 5
 6
 7
 8
 9
 
 | # 获取 Kubernetes 主节点的 IP 地址
 K8S_MASTER_IP=$(kubectl get nodes -o wide | awk 'NR==2 {print $6}')
 # 读取 config 文件内容到变量中
 CONFIG_CONTENT=$(cat /root/.kube/config)
 # 使用 sed 命令将其中的 127.0.0.1 替换为 Kubernetes 主节点的 IP 地址
 NEW_CONFIG_CONTENT=$(echo "$CONFIG_CONTENT" | sed "s/127.0.0.1/$K8S_MASTER_IP/g")
 # 使用 kubectl create configmap 命令将其发布为 ConfigMap
 echo -e "$NEW_CONFIG_CONTENT" | kubectl -n=kube-system create configmap kube-config --from-file=config=/dev/stdin
 
 | 
部署helm-controller
| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 
 | cat <<EOF >./deploy-cluster-scoped.yamlapiVersion: apps/v1
 kind: Deployment
 metadata:
 name: helm-controller
 labels:
 app: helm-controller
 spec:
 replicas: 1
 selector:
 matchLabels:
 app: helm-controller
 template:
 metadata:
 labels:
 app: helm-controller
 spec:
 containers:
 - name: helm-controller
 image: rancher/helm-controller:v0.15.4
 command: ["helm-controller"]
 args: ["--kubeconfig", "/root/.kube/config"]
 volumeMounts:
 - name: kube-config
 mountPath: /root/.kube/config
 subPath: config
 volumes:
 - name: kube-config
 configMap:
 name: kube-config
 
 EOF
 
 kubectl -n=kube-system create -f deploy-cluster-scoped.yaml
 
 
 | 
测试部署helmchart
| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 
 | # 部署cat <<EOF | kubectl create -f -
 apiVersion: helm.cattle.io/v1
 kind: HelmChart
 metadata:
 name: traefik
 namespace: kube-system
 spec:
 chart: stable/traefik
 set:
 rbac.enabled: "true"
 ssl.enabled: "true"
 EOF
 
 # 删除
 kubectl -n=kube-system delete helmchart traefik
 
 
 | 
使用helm-controller部署Prometheus
如下,使用国内helm镜像仓库在monitoring命名空间安装kube-prometheus-stack的chart
需要修改的参数可以都放到set里面
| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 
 | cat <<EOF >./prometheus-helm-chart.yamlapiVersion: helm.cattle.io/v1
 kind: HelmChart
 metadata:
 name: prometheus-stack
 namespace: monitoring
 spec:
 #repo: https://charts.grapps.cn
 repo: https://helm-charts.itboon.top/prometheus-community
 chart: kube-prometheus-stack
 targetNamespace: monitoring
 set:
 namespaceOverride: "monitoring"
 alertmanager.service.type: "NodePort"
 grafana.defaultDashboardsTimezone: "Asia/Shanghai"
 grafana.adminPassword: "1qaz@WSX"
 grafana.sidecar.dashboards.folderAnnotation: "folder"
 grafana.sidecar.dashboards.provider.allowUiUpdates: "true"
 grafana.service.nodePort: "30902"
 grafana.service.type: "NodePort"
 prometheus.service.nodePort: "30900"
 prometheus.service.type: "NodePort"
 prometheus.prometheusSpec.additionalScrapeConfigsSecret.enabled: "true"
 prometheus.prometheusSpec.additionalScrapeConfigsSecret.name: "additional-scrape-configs"
 prometheus.prometheusSpec.additionalScrapeConfigsSecret.key: "prometheus-additional.yaml"
 kubelet.serviceMonitor.cAdvisorMetricRelabelings: ""
 
 EOF
 
 kubectl apply -f prometheus-helm-chart.yaml
 
 # 卸载
 # kubectl -n=monitoring delete helmchart prometheus-stack
 
 
 |