网站首页 > 基础教程 正文
引言
随着云计算的普及,Kubernetes(K8s)已经成为现代微服务架构的事实标准。Kubernetes 控制器(Controller)是其自动化运维的重要组成部分,负责监听集群中的资源变化,并执行相应的操作。掌握 Kubernetes 控制器的开发,不仅能提升系统的可扩展性,还能帮助工程师深入理解 Kubernetes 生态。
本文将从零开始,带你完整体验 Kubernetes 控制器的开发、镜像构建和部署过程,帮助你快速上手并应用到实际生产环境。阅读本篇文章你将学到:
- 如何使用 kubebuilder 初始化 Kubernetes 控制器项目
- 编写核心控制逻辑,实现自定义资源管理
- 进行本地调试、监控并优化控制器的性能
- 构建 Docker 镜像并推送至镜像仓库
- 使用 Helm 进行部署与管理
一、初始化项目
$ go version
go version go1.23.0 linux/amd64
$ mkdir lb-layer7
$ cd lb-layer7/
$ go mod init lb-layer7
...
$ kubebuilder version
Version: main.version{KubeBuilderVersion:"4.2.0", KubernetesVendor:"1.31.0", GitCommit:"c7cde5172dc8271267dbf2899e65ef6f9d30f91e", BuildDate:"2024-08-17T09:41:45Z", GoOs:"linux", GoArch:"amd64"}
$ kubebuilder init --domain k8s.qihoo.net
...
//不创建resource,使用标准ingress资源
$ kubebuilder create api --group network --version v1alpha1 --kind Ingress
INFO Create Resource [y/n]
n
INFO Create Controller [y/n]
y
二、核心代码编写
- cmd/main.go
// 注意端口: 通过 flag 定义的 metricsAddr 是 controller-runtime 框架自带的 metrics 服务
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8888", "The address the metrics endpoint binds to. "+
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
// 测试环境调试可以将该值设置为false,这样curl http://localhost:8888/metrics 才有结果
flag.BoolVar(&secureMetrics, "metrics-secure", false,
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
// 初始化 metrics
metrics.InitMetrics()
if err = (&controller.IngressReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("Layer7Reconciler"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Ingress")
os.Exit(1)
}
- lb-layer7/internal/controller/ingress_controller.go
type IngressReconciler struct {
client.Client
Scheme *runtime.Scheme
layer7Client *k8scloud.Layer7
Recorder record.EventRecorder
}
func (r *IngressReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx)
log.Info("reconcile starting ...")
// 更新调试计数器并验证
metrics.ReconcileTotal.WithLabelValues(metrics.Reconcile).Inc()
startTime := time.Now()
defer func() {
// 记录调谐时间
metrics.ReconcileDealTime.WithLabelValues(
req.Namespace,
req.Name,
).Observe(utils.GetElapsedTime(startTime))
log.Info("Reconcile done", "ElapsedTime", utils.GetElapsedTime(startTime))
}()
ingress := &networkingv1.Ingress{}
if err := r.Get(ctx, req.NamespacedName, ingress); err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
// 检查必要的组件是否已初始化
if r.Recorder == nil {
log.Error(fmt.Errorf("event recorder not initialized"), "failed to record event")
return ctrl.Result{}, fmt.Errorf("event recorder not initialized")
}
// Check the ingressClassName
if ingress.Spec.IngressClassName != nil && *ingress.Spec.IngressClassName == "layer7" {
log.Info(fmt.Sprintf("Processing Ingress with custom IngressClassName: %s", *ingress.Spec.IngressClassName))
for _, rule := range ingress.Spec.Rules {
host := rule.Host
for _, path := range rule.HTTP.Paths {
serviceName := path.Backend.Service.Name
serviceNamespace := ingress.Namespace
// 获取 Pod 列表
var podList corev1.PodList
if err := r.List(ctx, &podList,
client.InNamespace(serviceNamespace),
client.MatchingLabels(svc.Spec.Selector)); err != nil {
continue
}
// 收集 Pod IPs
var podIPs []string
for _, pod := range podList.Items {
if pod.Status.Phase == corev1.PodRunning {
podIPs = append(podIPs, pod.Status.PodIP)
}
}
// 记录事件
r.Recorder.Event(ingress,
corev1.EventTypeWarning,
"Listing Pods",
"success to get current pods list !")
}
}
}
return ctrl.Result{}, nil
}
// 创建索引器用于根据 Service 查找 Ingress
func (r *IngressReconciler) SetupWithManager(mgr ctrl.Manager) error {
...
}
- 监控指标 metrics/metric.go
package metrics
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/metrics"
)
const (
Reconcile = "Reconcile"
)
var (
// 添加一个初始化标志
initialized bool
log = ctrl.Log.WithName("metrics")
// ReconcileTotal 记录调谐次数
ReconcileTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "layer7",
Subsystem: "controller",
Name: "reconcile_total",
Help: "Number of reconcile",
}, []string{"name"})
)
// InitMetrics 确保指标只被注册一次
func InitMetrics() {
if !initialized {
log.Info("Initializing metrics...")
// 注册其他指标
metricsToRegister := []prometheus.Collector{
ReconcileTotal,
}
for _, m := range metricsToRegister {
if err := metrics.Registry.Register(m); err != nil {
log.Error(err, "Failed to register metric", "metric", fmt.Sprintf("%T", m))
metrics.Registry.Unregister(m)
metrics.Registry.MustRegister(m)
}
}
initialized = true
log.Info("Metrics initialized successfully")
}
}
三、调试及验证
①vscode下launch.json文件配置环境变量:KUBECONFIG
{
"version": "0.2.0",
"configurations": [
{
"name": "layer7",
"type": "go",
"request": "launch",
"mode": "debug",
"program": "${workspaceFolder}/cmd/main.go",
"cwd": "${workspaceFolder}/cmd",
"env": {
"KUBECONFIG": "/home/xxx/.kube/private-kube-config.conf"
}
}
]
}
②准备调试yaml(含deployment、service、ingress)
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: lb-layer7-demo
name: lb-layer7-nginx-test-01
labels:
app: lb-layer7-nginx-test-01
spec:
replicas: 1
selector:
matchLabels:
app: lb-layer7-nginx-test-01
template:
metadata:
labels:
app: lb-layer7-nginx-test-01
spec:
containers:
- name: nginx
image: mirror.k8s.qihoo.net/docker/nginx:latest
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
namespace: lb-layer7-demo
name: lb-layer7-nginx-test-01
labels:
app: lb-layer7-nginx-test-01
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
app: lb-layer7-nginx-test-01
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: lb-layer7-nginx-test
namespace: lb-layer7-demo
spec:
ingressClassName: layer7
rules:
- host: lb.layer7.test.com
http:
paths:
- backend:
service:
name: lb-layer7-nginx-test-01
port:
number: 80
path: /
pathType: Prefix
③创建命名空间及部署资源
$ kubectl create ns lb-layer7-demo
namespace/lb-layer7-demo created
$ kubectl apply -f lb-layer7.yaml
deployment.apps/lb-layer7-nginx-test-01 created
service/lb-layer7-nginx-test-01 created
ingress.networking.k8s.io/lb-layer7-nginx-test created
$ kubectl get deploy,svc,ing
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/lb-layer7-nginx-test-01 1/1 1 1 2m26s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/lb-layer7-nginx-test-01 ClusterIP 172.24.105.41 <none> 80/TCP 2m26s
NAME CLASS HOSTS ADDRESS PORTS AGE
ingress.networking.k8s.io/lb-layer7-nginx-test layer7 lb.layer7.test.com 80 2m26s
④启动调试
⑤查看端口:8888
$ lsof -i:8888
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
__debug_b 3207798 shijianpeng 8u IPv6 563094246 0t0 TCP *:ddi-tcp-1 (LISTEN)
⑥查看暴漏指标
$ curl -s http://localhost:8888/metrics | grep "layer7"
# HELP layer7_controller_reconcile_total Number of reconcile
# TYPE layer7_controller_reconcile_total counter
layer7_controller_reconcile_total{name="Reconcile"} 2
# HELP layer7_controller_sync_total Total number of layer7 configuration synchronizations
# TYPE layer7_controller_sync_total counter
layer7_controller_sync_total{host="lb.layer7.test.com",name="lb-layer7-nginx-test-01",namespace="lb-layer7-demo",reason="",status="success"} 1
三、镜像构建及推送
1、使用vendor模式构建镜像,修改Dockerfile
- 替换基础镜像:
sed -i 's#golang:1.22#swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/golang:1.23.1#' Dockerfile
sed -i 's#gcr.io/distroless/static:nonroot#swr.cn-north-4.myhuaweicloud.com/ddn-k8s/gcr.io/distroless/static:nonroot#' Dockerfile
- Dockerfile内容
# Build the manager binary
FROM swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/library/golang:1.22.5 AS builder
ARG TARGETOS
ARG TARGETARCH
WORKDIR /workspace
# 复制整个项目目录
COPY . .
# 使用vendor模式构建
RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -mod=vendor -a -o manager cmd/main.go
# 使用更小的基础镜像
FROM swr.cn-north-4.myhuaweicloud.com/ddn-k8s/gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER 65532:65532
ENTRYPOINT ["/manager"]
2、构建镜像
$ go mod vendor
$ sudo docker build -t lb-controller/lb-layer7:v0.0.1 .
$ sudo docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
lb-controller/lb-layer7 v0.0.1 da64931a01d2 8 seconds ago 74.7MB
3、镜像tag标记
#标记镜像
$ sudo docker tag lb-controller/lb-layer7:v0.0.1 harbor.qihoo.net/lb-controller/lb-layer7:v0.0.1
4、镜像推送
#登录远程仓库
$ docker login harbor.qihoo.net/lb-controller
#推送
$ docker push harbor.qihoo.net/lb-controller/lb-layer7:v0.0.1
5、本地部署验证
#查看构建的镜像
$ sudo docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
lb-controller/lb-layer7 v0.0.1 da64931a01d2 8 seconds ago 74.7MB
#部署到当前集群
$ make deploy IMG=lb-controller/lb-layer7:v0.0.1
#验证
$ kubectl get svc,deploy,pod -n lb-layer7-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/lb-layer7-controller-manager-metrics-service ClusterIP 172.24.178.129 <none> 8443/TCP 7m15s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/lb-layer7-controller-manager 1/1 1 1 7m15s
NAME READY STATUS RESTARTS AGE
pod/lb-layer7-controller-manager-78d58db99d-wkvwb 1/1 Running 0 2m6s
# 卸载部署
$ make undeploy
三、使用helm构建chart
- 创建helm及变更配置
#创建
$ helm create lb-layer7
- Chart.yaml
apiVersion: v2
name: lb-layer7
description: A Helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: "v0.0.1"
- values.yaml
replicaCount: 1
image:
repository: harbor.qihoo.net/lb-controller/lb-layer7
tag: v0.0.1
pullPolicy: IfNotPresent
resources:
limits:
cpu: 500m
memory: 256Mi
requests:
cpu: 250m
memory: 128Mi
- templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
labels:
app: {{ .Chart.Name }}
release: {{ .Release.Name }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ .Chart.Name }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ .Chart.Name }}
release: {{ .Release.Name }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
四、helm调试&安装&打包命令
#渲染 Chart 模板
$ helm template --debug ./lb-layer7
#调试安装或升级
$ helm install --debug --dry-run lb-layer7 ./lb-layer7 -n lb-layer7-system
#安装
$ helm install lb-layer7 ./lb-layer7
#查看状态
$ helm status lb-layer7
#查看生成的完整的YAML
$ helm get manifest lb-layer7
#查看集群里安装的chart
$ helm list -A
#打包
$ helm package lb-layer7
#发布
$ helm push lb-layer7-0.0.1.tgz https://harbor.qihoo.net/chartrepo/xxx
#卸载
$ helm uninstall lb-layer7
五、部署controller注意:
①pod镜像私有仓库访问
#使用私有仓库时,首次拉取镜像无权限,创建一个secret
$ kubectl create secret docker-registry harbor-360 --namespace=lb-layer7-system \
--docker-server="https://harbor.qihoo.net" \
--docker-username=xxx \
--docker-password=xxx
#添加imagePullSecrets
$ kubectl patch deploy lb-layer7 -n lb-layer7-system --type='json' \
-p='[
{"op": "add", "path": "/spec/template/spec/imagePullSecrets", "value": [{"name": "harbor-360"}]}
]'
#配置sa
$ kubectl patch deploy lb-layer7 -n lb-layer7-system --type='json' \
-p='[
{"op": "replace", "path": "/spec/template/spec/serviceAccountName", "value": "lb-layer7"}
]'
②配置serviceaccount及rbac权限文件:sa.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: lb-layer7
namespace: lb-layer7-system
---
apiVersion: v1
kind: Secret
metadata:
name: lb-layer7-secret
annotations:
kubernetes.io/service-account.name: "lb-layer7"
type: kubernetes.io/service-account-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: lb-layer7
rules:
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: lb-layer7-binding
subjects:
- kind: ServiceAccount
name: lb-layer7
namespace: lb-layer7-system
roleRef:
kind: ClusterRole
name: lb-layer7
apiGroup: rbac.authorization.k8s.io
③验证
$ kubectl apply -f sa.yaml
#验证ingress权限
$ kubectl auth can-i watch ingresses.networking.k8s.io --as=system:serviceaccount:lb-layer7-system:lb-layer7 --all-namespaces
#验证service权限
$ kubectl auth can-i list services --as=system:serviceaccount:lb-layer7-system:lb-layer7 --all-namespaces
#验证pods权限
$ kubectl auth can-i list pods --as=system:serviceaccount:lb-layer7-system:lb-layer7 --all-namespaces
#或者
$ kubectl describe clusterrole lb-layer7
$ kubectl describe clusterrolebinding lb-layer7-binding
总结
本篇从环境初始化到 Helm 部署,详细介绍了 Kubernetes 控制器的开发流程。通过实际操作,你应该掌握了以下关键技能:
- 如何使用 kubebuilder 创建控制器
- 监听 Kubernetes 资源并编写 Reconcile 逻辑
- 监控 Metrics 并优化性能
- 构建、推送镜像及 Helm 部署
掌握 Kubernetes 控制器开发,可以帮助你自动化集群管理,提高运维效率,并深入理解 Kubernetes 的工作原理。
猜你喜欢
- 2025-06-24 Docker容器基于国产平台T3的入门部署方法说明——(3)构建镜像
- 2025-06-24 使用docker 和marathon 构建一个带分析的web应用
- 2025-06-24 技术分享 | 详解在docker中更新镜像——基于Prometheus
- 2025-06-24 Spring Boot与Docker结合部署的最佳实践
- 2025-06-24 【Docker 新手入门指南】第九章:仓库管理
- 2025-06-24 Docker入门实战(二)——Docker镜像操作
- 2025-06-24 Docker本地构建镜像到远程服务器部署
- 2025-06-24 Docker工具的使用方法进阶-关于镜像
- 2025-06-24 5分钟自建一个轻量级Docker镜像仓库,带可视化管理,真香
- 2025-06-24 Docker+Jenkins:为Java项目插上自动化翅膀
- 最近发表
- 标签列表
-
- jsp (69)
- gitpush (78)
- gitreset (66)
- python字典 (67)
- dockercp (63)
- gitclone命令 (63)
- dockersave (62)
- linux命令大全 (65)
- pythonif (86)
- location.href (69)
- dockerexec (65)
- tail-f (79)
- queryselectorall (63)
- location.search (79)
- bootstrap教程 (74)
- 单例 (62)
- linuxgzip (68)
- 字符串连接 (73)
- html标签 (69)
- c++初始化列表 (64)
- mysqlinnodbmyisam区别 (63)
- arraylistadd (66)
- mysqldatesub函数 (63)
- window10java环境变量设置 (66)
- c++虚函数和纯虚函数的区别 (66)