环境说明

系统:CentOS Linux release 7.6.1810 (Core)
golang:v1.15
operator-sdk:v1.7.0
docker:v1.19 # 因为 operator-sdk 使用了多阶段构建功能,所以 docker 必须大于等于 v1.17 版本
k8s:k3s v1.20.6+k3s1 单节点

需求

定义一个 crd ,spec 包含以下信息:

Replicas # 副本数
Image       # 镜像
Resources   # 资源限制
Envs        # 环境变量
Ports       # 服务端口

根据以上信息,controller 自动创建或者更新一个 deployment + service

注意:本实例只是展示了大概开发流程,故 crd 资源的 status 状态未做任何更新,实际编写中需要根据实际情况自行更新 status 状态

创建 APP

$ mkdir -p $GOPATH/src/github.com/zxl/app
$ cd $GOPATH/src/github.com/zxl/app
$ operator-sdk init --domain=example.com --repo=github.com/zxl/app

创建 API

$ operator-sdk create api --group app --version v1 --kind App --resource=true --controller=true

修改 CRD 类型定义代码 api/v1/app_types.go

/*
Copyright 2021.Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License athttp://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/package v1import (appsv1 "k8s.io/api/apps/v1"corev1 "k8s.io/api/core/v1"metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)/*
修改定义后需要使用 make generate 生成新的 zz_generated.deepcopy.go 文件*/// EDIT THIS FILE!  THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required.  Any new fields you add must have json tags for the fields to be serialized.// AppSpec defines the desired state of App
type AppSpec struct {// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster// Important: Run "make" to regenerate code after modifying this fileReplicas *int32               `json:"replicas"`      // 副本数Image    string               `json:"image"`      // 镜像Resources corev1.ResourceRequirements  `json:"resources,omitempty"`    // 资源限制Envs     []corev1.EnvVar      `json:"envs,omitempty"`    // 环境变量Ports    []corev1.ServicePort `json:"ports,omitempty"`   // 服务端口
}// AppStatus defines the observed state of App
type AppStatus struct {// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster// Important: Run "make" to regenerate code after modifying this file//Conditions []AppCondition//Phase stringappsv1.DeploymentStatus `json:",inline"` // 直接引用 DeploymentStatus
}//type AppCondition struct {
//  Type string
//  Message string
//  Reason string
//  Ready bool
//  LastUpdateTime metav1.Time
//  LastTransitionTime metav1.Time
//}//+kubebuilder:object:root=true
//+kubebuilder:subresource:status// App is the Schema for the apps API
type App struct {metav1.TypeMeta   `json:",inline"`metav1.ObjectMeta `json:"metadata,omitempty"`Spec   AppSpec   `json:"spec,omitempty"`Status AppStatus `json:"status,omitempty"`
}//+kubebuilder:object:root=true// AppList contains a list of App
type AppList struct {metav1.TypeMeta `json:",inline"`metav1.ListMeta `json:"metadata,omitempty"`Items           []App `json:"items"`
}func init() {SchemeBuilder.Register(&App{}, &AppList{})
}

新增 resource/deployment/deployment.go

package deploymentimport (appv1 "github.com/leffss/app/api/v1"appsv1 "k8s.io/api/apps/v1"corev1 "k8s.io/api/core/v1"metav1 "k8s.io/apimachinery/pkg/apis/meta/v1""k8s.io/apimachinery/pkg/runtime/schema"
)func New(app *appv1.App) *appsv1.Deployment {labels := map[string]string{"app.example.com/v1": app.Name}selector := &metav1.LabelSelector{MatchLabels: labels}return &appsv1.Deployment{TypeMeta:   metav1.TypeMeta{APIVersion: "apps/v1",Kind: "Deployment",},ObjectMeta: metav1.ObjectMeta{Name: app.Name,Namespace: app.Namespace,OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(app, schema.GroupVersionKind{Group: appv1.GroupVersion.Group,Version: appv1.GroupVersion.Version,Kind: "App",}),},},Spec:       appsv1.DeploymentSpec{Replicas: app.Spec.Replicas,Selector: selector,Template: corev1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: labels,},Spec: corev1.PodSpec{Containers: newContainers(app),},},},}
}func newContainers(app *appv1.App) []corev1.Container  {var containerPorts []corev1.ContainerPortfor _, servicePort := range app.Spec.Ports {var cport corev1.ContainerPortcport.ContainerPort = servicePort.TargetPort.IntValcontainerPorts = append(containerPorts, cport)}return []corev1.Container{{Name:            app.Name,Image:           app.Spec.Image,Ports:           containerPorts,Env:             app.Spec.Envs,Resources:       app.Spec.Resources,ImagePullPolicy: corev1.PullIfNotPresent,},}
}

新增 resource/service/service.go

package serviceimport (appv1 "github.com/zxl/app/api/v1"corev1 "k8s.io/api/core/v1"metav1 "k8s.io/apimachinery/pkg/apis/meta/v1""k8s.io/apimachinery/pkg/runtime/schema"
)func New(app *appv1.App) *corev1.Service {return &corev1.Service{TypeMeta: metav1.TypeMeta{Kind:       "Service",APIVersion: "v1",},ObjectMeta: metav1.ObjectMeta{Name:                       app.Name,Namespace: app.Namespace,OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(app, schema.GroupVersionKind{Group: appv1.GroupVersion.Group,Version: appv1.GroupVersion.Version,Kind: "App",}),},},Spec: corev1.ServiceSpec{Ports:                    app.Spec.Ports,Selector: map[string]string{"app.example.com/v1": app.Name,},},}
}

修改 controller 代码 controllers/app_controller.go

/*
Copyright 2021.Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License athttp://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/package controllersimport ("context""encoding/json""reflect""github.com/zxl/app/resource/deployment""github.com/zxl/app/resource/service""k8s.io/apimachinery/pkg/api/errors""github.com/go-logr/logr""k8s.io/apimachinery/pkg/runtime"ctrl "sigs.k8s.io/controller-runtime""sigs.k8s.io/controller-runtime/pkg/client"appv1 "github.com/zxl/app/api/v1"corev1 "k8s.io/api/core/v1"appsv1 "k8s.io/api/apps/v1"
)// AppReconciler reconciles a App object
type AppReconciler struct {client.ClientLog    logr.LoggerScheme *runtime.Scheme
}//+kubebuilder:rbac:groups=app.example.com,resources=apps,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=app.example.com,resources=apps/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=app.example.com,resources=apps/finalizers,verbs=update// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the App object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.2/pkg/reconcile
func (r *AppReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {_ = r.Log.WithValues("app", req.NamespacedName)// your logic here// 获取 crd 资源instance := &appv1.App{}if err := r.Client.Get(ctx, req.NamespacedName, instance); err != nil {if errors.IsNotFound(err) {return ctrl.Result{}, nil}return ctrl.Result{}, err}// crd 资源已经标记为删除if instance.DeletionTimestamp != nil {return ctrl.Result{}, nil}oldDeploy := &appsv1.Deployment{}if err := r.Client.Get(ctx, req.NamespacedName, oldDeploy); err != nil {// deployment 不存在,创建if errors.IsNotFound(err) {// 创建deploymentif err := r.Client.Create(ctx, deployment.New(instance)); err != nil {return ctrl.Result{}, err}// 创建serviceif err := r.Client.Create(ctx, service.New(instance)); err != nil {return ctrl.Result{}, err}// 更新 crd 资源的 Annotationsdata, _ := json.Marshal(instance.Spec)if instance.Annotations != nil {instance.Annotations["spec"] = string(data)} else {instance.Annotations = map[string]string{"spec": string(data)}}if err := r.Client.Update(ctx, instance); err != nil {return ctrl.Result{}, err}} else {return  ctrl.Result{}, err}} else {// deployment 存在,更新oldSpec := appv1.AppSpec{}if err := json.Unmarshal([]byte(instance.Annotations["spec"]), &oldSpec); err != nil {return ctrl.Result{}, err}if !reflect.DeepEqual(instance.Spec, oldSpec) {// 更新deploymentnewDeploy := deployment.New(instance)oldDeploy.Spec = newDeploy.Specif err := r.Client.Update(ctx, oldDeploy); err != nil {return ctrl.Result{}, err}// 更新servicenewService := service.New(instance)oldService := &corev1.Service{}if err := r.Client.Get(ctx, req.NamespacedName, oldService); err != nil {return ctrl.Result{}, err}clusterIP := oldService.Spec.ClusterIP   // 更新 service 必须设置老的 clusterIPoldService.Spec = newService.SpecoldService.Spec.ClusterIP = clusterIPif err := r.Client.Update(ctx, oldService); err != nil {return ctrl.Result{}, err}// 更新 crd 资源的 Annotationsdata, _ := json.Marshal(instance.Spec)if instance.Annotations != nil {instance.Annotations["spec"] = string(data)} else {instance.Annotations = map[string]string{"spec": string(data)}}if err := r.Client.Update(ctx, instance); err != nil {return ctrl.Result{}, err}}}return ctrl.Result{}, nil
}// SetupWithManager sets up the controller with the Manager.
func (r *AppReconciler) SetupWithManager(mgr ctrl.Manager) error {return ctrl.NewControllerManagedBy(mgr).For(&appv1.App{}).Complete(r)
}

修改 CRD 资源定义 config/samples/app_v1_app.yaml

apiVersion: app.example.com/v1
kind: App
metadata:name: app-samplenamespace: default
spec:# Add fields herereplicas: 2image: nginx:1.16.1ports:- targetPort: 80port: 8080envs:- name: DEMOvalue: app- name: GOPATHvalue: gopathresources:limits:cpu: 500mmemory: 500Mirequests:cpu: 100mmemory: 100Mi

修改 Dockerfile

# Build the manager binary
FROM golang:1.15 as builderWORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layerENV GOPROXY https://goproxy.cn,directRUN go mod download# Copy the go source
COPY main.go main.go
COPY api/ api/
COPY controllers/ controllers/
COPY resource/ resource/# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
#FROM gcr.io/distroless/static:nonroot
FROM kubeimages/distroless-static:latest
WORKDIR /
COPY --from=builder /workspace/manager .
USER 65532:65532ENTRYPOINT ["/manager"]
  • 添加了 goproxy 环境变量
  • 新增 COPY 自定义的文件夹 resource
  • gcr.io/distroless/static:nonroot 变更为 kubeimages/distroless-static:latest

部署运行

第一种:本地运行 controller

用于开发测试

app 项目根目录运行:

$ make generate && make manifests && make install && make run
  • 本机需确保安装了 kubectl 工具,并且证书文件 ~/.kube/config 存在(保证为集群管理员权限)
  • 测试完毕后使用 ctrl + c 停止程序,然后 make uninstall 删除 crd 定义

第二种:集群部署

1、make

$ make generate && make manifests && make install

2、构建镜像

$ make docker-build IMG=leffss/app:v1$ docker images |grep app
leffss/app                v1                  1eaa4b6a4781        About a minute ago   46.5MB

3、准备镜像:

# 因为 k3s 安装的 k8s v1.20 默认使用的是 containerd,所以要导入镜像
$ docker save zxl/app:v1 > app.tar
$ ctr image import app.tar$ docker pull kubesphere/kube-rbac-proxy:v0.8.0
$ docker tag kubesphere/kube-rbac-proxy:v0.8.0 gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
$ docker save gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 > kube-rabc-proxy.tar
$ ctr image import kube-rabc-proxy.tar

4、运行

$ make deploy IMG=leffss/app:v1

5、结果确认:

$ kubectl get service -A |grep app$ kubectl -n app-system get pod$ kubectl -n app-system get deployment

6、CRD 确认

$ kubectl get crd
NAME                              CREATED AT
addons.k3s.cattle.io              2021-05-02T01:03:34Z
helmcharts.helm.cattle.io         2021-05-02T01:03:34Z
helmchartconfigs.helm.cattle.io   2021-05-02T01:03:34Z
apps.app.example.com              2021-05-04T12:10:43Z

创建自定义资源

$ kubectl apply -f config/samples/app_v1_app.yaml
app.app.example.com/app-sample created

查看控制台日志:

$ kubectl -n memcached-operator-system logs memcached-operator-controller-manager-6cf86db855-sqhpj -c manager2021-05-05T19:56:32.798+0800    INFO    controller-runtime.metrics      metrics server is starting to listen      {"addr": ":8080"}
2021-05-05T19:56:32.798+0800    INFO    setup   starting manager
2021-05-05T19:56:32.799+0800    INFO    controller-runtime.manager      starting metrics server {"path": "/metrics"}
2021-05-05T19:56:32.800+0800    INFO    controller-runtime.manager.controller.app       Starting EventSource      {"reconciler group": "app.example.com", "reconciler kind": "App", "source": "kind source: /, Kind="}
2021-05-05T19:56:32.901+0800    INFO    controller-runtime.manager.controller.app       Starting Controller       {"reconciler group": "app.example.com", "reconciler kind": "App"}
2021-05-05T19:56:32.901+0800    INFO    controller-runtime.manager.controller.app       Starting workers {"reconciler group": "app.example.com", "reconciler kind": "App", "worker count": 1}
  • 如果是第一种部署方式则直接查看控制台,第二种则查看集群中 app-controller-manager 的 pod 中的 manager 容器控制台日志

如果是第二种直接将 controller 部署到 k8s 集群的方式,可能会出现 RBAC 权限错误,解决方法是修改部署时的权限配置,这里我们使用最简单的方法是直接给 controller 绑定到 cluster-admin 集群管理员即可

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:name: cluster-admin-rolebinding
roleRef:apiGroup: rbac.authorization.k8s.iokind: ClusterRolename: cluster-admin
subjects:
- kind: ServiceAccountname: app-controller-managernamespace: app-system

删除 CRD 资源

$ kubectl apply -f config/samples/app_v1_app.yaml
app.app.example.com/app-sample deleted

删除 CRD 定义

$ make uninstall

删除 controller

$ make undeploy
  • 只适用第二种部署方式

总结

实际开发中只需要使用 operator-sdk(或者 kubebuilder)创建 CRD 以及相应的 controller,然后根据需求自行定义 crd 属性,并编写对应的 controller 逻辑代码,最终就可以实现一个完整的 operator。

补充:make deploy 含义

部署时使用 make deploy 实际是执行的什么命令呢?通过查看项目根目录 Makefile 可知实际运行命令为:

deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}$(KUSTOMIZE) build config/default | kubectl apply -f -
  • 实际上就是使用 kustomize 工具生成部署的 yaml 文件,然后使用 kubectl 工具应用

其他的 make install,make run 等命令也都在 Makefile 中有相关定义。

所以当在实际部署过程中,如果向手动部署的话,可以直接参考 Makefile 首先生成部署 yaml 文件,然后手动应用到 k8s 集群即可。

operator-sdk实战开发K8S CRD自定义资源对象相关推荐

  1. 基于operator sdk编写一个k8s自定义资源管理应用

    简介:operator 是一种 kubernetes 的扩展形式,可以帮助用户以 Kubernetes 的声明式 API 风格自定义来管理应用及服务,operator已经成为分布式应用在k8s集群部署 ...

  2. k8s操作自定义资源

    如何操作自定义资源? client-go为每种K8S内置资源提供对应的clientset和informer.那如果我们要监听和操作⾃定义资源对象,应该如何做呢?这⾥我们有两种⽅式: ⽅式⼀: 使⽤cl ...

  3. Crd(自定义资源类型)2021.12.05

    目录 文章目录 目录 实验环境 实验软件 1.什么是CRD 2.CRD的定义 3.Controller 4.Operator 5.参考文档 关于我 最后 实验环境 实验环境: 1.win10,vmwr ...

  4. k8s的自定义资源代码生成详解

    为什么下这篇文章呢,在代码生成这一块跌了两次跟头,说白了只知道照抄,并不知道里面到底是干什么的,要做到知其然并知其所以然,也是为以后在自动生成代码的时候做一个参考. 生成的项目目录结构如下 . ├── ...

  5. Kubernetes CRD (CustomResourceDefinition) 自定义资源类型

    目录 1.CRD (CustomResourceDefinition) 介绍 1.1 client-go 组件 1.2 Custom Controller 组件 2.环境.软件准备 3.Kuberne ...

  6. 千锋很火的SpringBoot实战开发教程视频

    springboot是什么? Spring Boot是由Pivotal团队提供的全新框架,其设计目的是用来简化新Spring应用的初始搭建以及开发过程.该框架使用了特定的方式来进行配置,从而使开发人员 ...

  7. CoreDNS与k8s资源对象详解-Day03

    1. K8s DNS 官网地址:https://github.com/coredns/coredns https://coredns.io/ https://coredns.io/plugins 1. ...

  8. k8s系列(四)——资源对象

    k8s系列四--资源对象 pod概念 思考:为什么k8s会引出pod这个概念,容器不能解决么? 我的理解:一组密切相关的服务使用容器的话,如果他们的镜像不在一个容器里的话,那么就需要配置反向代理进行通 ...

  9. k8s篇-网络-Ingress对象详解

    一.什么是Ingress 为什么需要 Ingress: Ingress 也是为了解决在集群之外,访问集群内部Service服务的问题. 实际上,将service的type设置为nodePort或Loa ...

最新文章

  1. python一箭穿心代码怎样复制,Python Decimal copy_sign()用法及代码示例
  2. 为新手准备的 Codea 着色器(Shader)教程
  3. 用 Flask 来写个轻博客
  4. Java之transient关键字
  5. 421. 数组中两个数的最大异或值
  6. OSG——- 对点选物体平移(鼠标点选物体、物体随鼠标移动、屏幕坐标转世界坐标)
  7. php stripcslashes 转义,stripcslashes()
  8. 实验server2003的域环境里安装一台Server2008 DC
  9. 2021-09-01175. 组合两个表 SQL
  10. 运动图像国际压缩标准-整理
  11. 指针进阶:函数指针的应用场景
  12. BurpSuite 通过google浏览器抓取https流量包
  13. Java核心类库之(类加载器、反射机制、模块化)
  14. 戴尔服务器显示屏报警PDR1101 fault detected on drive 3. Check drive
  15. 多线程与简单统筹学--Python语言描述
  16. 晋城一中oj 议员秘密
  17. c语言综合作业程序填空,C语言程序填空题及答案
  18. 【美团测开二面准备】
  19. Bash shell(二)-变量的丰富功能
  20. 融资、量产和一栈式布局,这家Tier 1如此备战高阶智驾决赛圈

热门文章

  1. PyQt5 笔记2 -- Qt Designer使用
  2. Debug Docker: Error response from daemon: dial unix docker.raw.sock: connect: connection refused
  3. linux内核杂记(3)-进程(2)
  4. AI理论知识基础(25)-机器学习常见损失函数, 共轭梯度法(1)
  5. 【机器学习】小数据集怎么上分? 几行代码生成伪标签数据集
  6. 【深度学习】你不知道的车牌识别系统
  7. 易天教你如何保养SFP光模块
  8. Redis--发布订阅模式
  9. Windows Phone如何获取和添加联系人
  10. 使用YCSB检测MongoDB