容器云项目-CICD工具平台部署

一、工具介绍

上一章已经把整个kubernetes集群部署完成,本章计划搭以下工具平台

工具平台 包含组件 部署节点 备注
持续集成 jenkins gitlab harbor nexus sonarqube node20 node21 node18 gitlab部署在宿主机node18上,且node18不在集群内
监控 elasticsearch grafana prometheus fluentd kubernetes-dashborad node22 node23
存储 nfs 所有节点

二、nfs共享存储部署

node18上执行下面操作

yunm -y install nfs-utils rpcbind   
mkdir -p /data/{jenkins,harbor,nexus,nfs,sonarqube}   

cat exports

/data/jenkins *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)   
/data/harbor/chartmuseum *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)
/data/harbor/database *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)
/data/harbor/jobservice *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)
/data/harbor/redis *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)
/data/harbor/registry *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)
/data/harbor/trivy *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)
/data/nexus *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)
/data/sonarqube/sonarqube-database *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)
/data/sonarqube/conf *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)
/data/sonarqube/data *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)
/data/sonarqube/extensions *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)
/data/nfs *(rw,no_root_squash,no_subtree_check,no_all_squash,sync,anonuid=501,anongid=501)

systemctl start nfs

node19~node23上执行下面操作
yum -y install nfs-utils

三、持续集成工具部署

给四个节点打上标签以方便将持续集成和监控两个平台的组件分别调度到不同的节点
kubectl label node node20 app=devops
kubectl label node node21 app=devops
kubectl label node node22 app=monitor
kubectl label node node23 app=monitor

2.1、jenkins部署

cat jenkins-rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: jenkins
  namespace: devops
  labels:
    k8s-app: jenkins
    addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: jenkins
  labels:
    k8s-app: jenkins
    addonmanager.kubernetes.io/mode: Reconcile
rules:
  - apiGroups:
      - ""
    resources:
      - "services"
      - "namespaces"
      - "endpoints"
    verbs:
      - "get"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: devops
  name: jenkins
  labels:
    k8s-app: jenkins
    addonmanager.kubernetes.io/mode: Reconcile
subjects:
  - kind: ServiceAccount
    name: jenkins
    namespace: devops
    apiGroup: ""
roleRef:
  kind: ClusterRole
  name: jenkins
  apiGroup: ""

cat jenkins-pvc-pv.yaml

kind: Namespace
metadata:
  name: devops
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: jenkins-pv
  namespace: devops
  labels:
    pv: jenkins-pv
spec:
  capacity:
    storage: 15Gi
  accessModes:
    - ReadWriteMany
  nfs:
    path: /data/jenkins
    server: 10.0.0.18
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: jenkins-pvc
  namespace: devops
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 15Gi
  selector:
    matchLabels:
      pv: "jenkins-pv"

cat jenkins-deploy.yaml

apiVersion: apps/v1
kind: Deployment 
metadata: 
  name: jenkins
  namespace: devops
spec: 
  selector:
    matchLabels:
      name: jenkins
  template: 
    metadata: 
      labels: 
        name: jenkins
    spec: 
      serviceAccountName: jenkins
      containers: 
      - name: jenkins 
        image: jenkinsci/blueocean:master
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            cpu: 1000m
            memory: 2Gi
          requests:
            cpu: 100m
            memory: 1Gi
        env:
        - name: "JENKINS_OPTS"
          value: "--prefix=/jenkins"
        ports: 
        - containerPort: 8080
        volumeMounts:
        - name: jenkins
          mountPath: "/var/jenkins_home"
      volumes:
      - name: jenkins
        persistentVolumeClaim:
          claimName: jenkins-pvc
      nodeSelector:
        app: devops

cat jenkins-service.yaml

kind: Service
apiVersion: v1
metadata:
  name: jenkins-svc
  namespace: devops
  labels:
    nfs: jenkins-service
    deploy-jenkins: support
  annotations:
    description: Exposes Jenkins Service
spec:
  type: NodePort
  selector:     
    name: jenkins    
  ports:
    - name: jenkins
      port: 8080
      targetPort: 8080
      nodePort: 30080

cat Dockerfile

FROM jenkinsci/blueocean:latest

USER root

RUN rm -f /etc/localtime && ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \
&& echo "Asia/Shanghai" > /etc/timezone 

WORKDIR /usr
RUN mkdir /usr/local/maven
ADD apache-maven-3.5.4-bin.tar.gz /usr/local/maven
RUN ln -s /usr/local/maven/apache-maven-3.5.4 /usr/local/maven/maven
ENV MAVEN_HOME /usr/local/maven/maven
ENV PATH ${MAVEN_HOME}/bin:$PATH

注意这里之所以添加了maven,是因为编译java应用需要

依次执行

docker build -t jenkinsci/blueocean:master .
kubectl create -f jenkins-rbac.yaml 
kubectl create -f jenkins-deploy.yaml
kubectl create -f jenkins-service.yaml

2.2、harbor部署

要在kubernetes集群中部署harbor,最好的方式还是利用helm来安装
Helm下载
解压文件

tar zvxf helm-v3.2.4-linux-amd64.tar.gz   
cp helm-v3.2.4-linux-amd64 /usr/bin/helm   
helm version   

返回
version.BuildInfo{Version:"v3.2.4", GitCommit:"0ad800ef43d3b826f31a5ad8dfbb4fe05d143688", GitTreeState:"clean", GoVersion:"go1.13.12"}说明安装成功

下面是用helm来直接安装harbor,你可以将其理解成kubernetes集群内的yum

helm repo add https://helm.goharbor.io添加源

helm search repo harbor查找

harbor/harbor   1.6.2           2.2.2           An open source trusted cloud native registry th.

helm pull harbor/harbor拉取安装包
tar zvxf harbor-1.6.2.tgz && mv harbor-1.6.2 harbor

在具体安装前做一下配置,其他默认
cat harbor/values.yaml

expose:
  type: nodePort  
  tls:
    enabled: false
    certSource: auto
    auto:
      commonName: ""
    secret:
      secretName: ""
      notarySecretName: ""
  nodePort:
    name: harbor
    ports:
      http:
        port: 80
        nodePort: 30002
      https:
        port: 443
        nodePort: 30003
      notary:
        port: 4443
        nodePort: 30004
......
externalURL: http://10.0.0.19:30002
......
persistence:
  enabled: true
  resourcePolicy: "keep"
  persistentVolumeClaim:
    registry:
      existingClaim: "harbor-registry"
      storageClass: "harbor-registry"
      subPath: ""
      accessMode: ReadWriteOnce
      size: 5Gi
    chartmuseum:
      existingClaim: "harbor-chartmuseum"
      storageClass: "harbor-chartmuseum"
      subPath: ""
      accessMode: ReadWriteOnce
      size: 5Gi
    jobservice:
      existingClaim: "harbor-jobservice"
      storageClass: "harbor-jobservice"
      subPath: ""
      accessMode: ReadWriteOnce
      size: 1Gi
    database:
      existingClaim: "harbor-database"
      storageClass: "harbor-database"
      subPath: ""
      accessMode: ReadWriteOnce
      size: 1Gi
    redis:
      existingClaim: "harbor-redis"
      storageClass: "harbor-redis"
      subPath: ""
      accessMode: ReadWriteOnce
      size: 1Gi
    trivy:
      existingClaim: "harbor-trivy"
      storageClass: "harbor-trivy"
      subPath: ""
      accessMode: ReadWriteOnce
      size: 5Gi

创建对应的pvc与pv
cat harbor-pvc-pv.yaml

apiVersion: v1
kind: PersistentVolume
metadata:
  name: harbor-chartmuseum
  namespace: devops
  labels:
    app: harbor
    component: chartmuseum
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  storageClassName: harbor-chartmuseum
  persistentVolumeReclaimPolicy: Recycle
  nfs:
    server: 10.0.0.18
    path: /data/harbor/chartmuseum
  
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: harbor-jobservice
  namespace: devops
  labels:
    app: harbor
    component: jobservice
spec:
  capacity:
    storage: 1Gi
  accessModes: 
    - ReadWriteOnce
  storageClassName: harbor-jobservice
  persistentVolumeReclaimPolicy: Recycle
  nfs:
    server: 10.0.0.18
    path: /data/harbor/jobservice

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: harbor-registry
  namespace: devops
  labels:
    app: harbor
    component: registry
spec:
  capacity:
    storage: 5Gi
  accessModes: 
    - ReadWriteOnce
  storageClassName:  harbor-registry
  persistentVolumeReclaimPolicy: Recycle
  nfs:
    server: 10.0.0.18
    path: /data/harbor/registry

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: harbor-database
  namespace: devops
  labels:
    app: harbor
    component: database
spec:
  capacity:
    storage: 1Gi
  accessModes: 
    - ReadWriteOnce
  storageClassName:  harbor-database
  persistentVolumeReclaimPolicy: Recycle
  nfs:
    server: 10.0.0.18
    path: /data/harbor/database
    
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: harbor-redis
  namespace: devops
  labels:
    app: harbor
    component: redis
spec:
  capacity:
    storage: 1Gi
  accessModes: 
    - ReadWriteOnce
  storageClassName: harbor-redis
  persistentVolumeReclaimPolicy: Recycle
  nfs:
    server: 10.0.0.18
    path: /data/harbor/redis

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: harbor-trivy
  namespace: devops
  labels:
    app: harbor
    component: trivy
spec:
  capacity:
    storage: 5Gi
  accessModes: 
    - ReadWriteOnce
  storageClassName: harbor-trivy
  persistentVolumeReclaimPolicy: Recycle
  nfs:
    server: 10.0.0.18
    path: /data/harbor/trivy
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: harbor-chartmuseum
  namespace: devops
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
  storageClassName: harbor-chartmuseum
  selector:
    matchLabels:
      app: "harbor"
      component: "chartmuseum"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: harbor-jobservice
  namespace: devops
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
  storageClassName: harbor-jobservice
  selector:
    matchLabels:
      app: "harbor"
      component: "jobservice"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: harbor-registry
  namespace: devops
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
  storageClassName: harbor-registry
  selector:
    matchLabels:
      app: "harbor"
      component: "registry"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: harbor-database
  namespace: devops
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
  storageClassName: harbor-database
  selector:
    matchLabels:
      app: "harbor"
      component: "database"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: harbor-redis
  namespace: devops
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
  storageClassName: harbor-redis
  selector:
    matchLabels:
      app: "harbor"
      component: "redis"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: harbor-trivy
  namespace: devops
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
  storageClassName: harbor-trivy
  selector:
    matchLabels:
      app: "harbor"
      component: "trivy"

kubectl create -f harbor-pvc-pv.yaml创建存储
helm install my-harbor harbor/ -n devops部署harbor
ps:harbor部署成功后请编辑harbor对应的控制器添加字段nodeSelector:app=devops让harbor的组件调度到前面规划的节点node20 node21上

2.3、nexus部署

cat nexus-rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: nexus
  namespace: devops
  labels:
    k8s-app: nexus
    addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nexus
  labels:
    k8s-app: nexus
    addonmanager.kubernetes.io/mode: Reconcile
rules:
  - apiGroups:
      - ""
    resources:
      - "services"
      - "namespaces"
      - "endpoints"
    verbs:
      - "get"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: devops
  name: nexus
  labels:
    k8s-app: nexus
    addonmanager.kubernetes.io/mode: Reconcile
subjects:
  - kind: ServiceAccount
    name: nexus
    namespace: devops
    apiGroup: ""
roleRef:
  kind: ClusterRole
  name: nexus
  apiGroup: ""

cat nexus-pvc-pv.yaml

apiVersion: v1
kind: PersistentVolume
metadata:
  name: nexus-pv
  namespace: devops
  labels:
    pv: nexus-pv
spec:
  capacity:
    storage: 15Gi
  accessModes:
    - ReadWriteMany   
  nfs:
    path: /data/nexus
    server: 10.0.0.18
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nexus-pvc
  namespace: devops
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 15Gi
  selector:
    matchLabels:
      pv: "nexus-pv"

cat nexus-deploy.yaml

kind: Deployment 
metadata: 
  name: nexus
  namespace: devops
spec: 
  selector:
    matchLabels:
      name: nexus
  template: 
    metadata: 
      labels: 
        name: nexus
    spec: 
      serviceAccountName: nexus
      containers: 
        - name: nexus 
          image: sonatype/nexus3
          imagePullPolicy: IfNotPresent
          env:
            - name: NEXUS_CONTEXT
              value: nexus
          ports: 
            - containerPort: 8081
            - containerPort: 8082
          resources:
            limits:
              cpu: 1000m
              memory: 2Gi
            requests:
              cpu: 100m
              memory: 1Gi
          volumeMounts:
            - name: nexus
              mountPath: "/nexus-data"
      volumes:
      - name: nexus
        persistentVolumeClaim:
          claimName: nexus-pvc
      nodeSelector:
        app: devops

cat nexus-service.yaml

kind: Service
apiVersion: v1
metadata:
  name: nexus-svc
  namespace: devops
  labels:
    nfs: nexus-service
    deploy-nexus: support
  annotations:
    description: Exposes Nexus Service
spec:
  type: NodePort
  selector:     
    name: nexus    
  ports:
    - name: nexus
      port: 8081
      targetPort: 8081
      nodePort: 30100

依次执行

kubectl create -f nexus-rbac.yaml
kubectl create -f nexus-pvc-pv.yaml
kubectl create -f nexus-deploy.yaml
kubectl create -f nexus-service.yaml

2.4、gitlab部署

gitlab部署在node18上,该节点不在集群内,使用yum方式部署在宿主机上

添加gitlab的yum源
cat /etc/yum.repos.d/gitlab-ce.repo

[gitlab-ce]
name=Gitlab CE Repository
baseurl=https://mirrors.tuna.tsinghua.edu.cn/gitlab-ce/yum/el$releasever/
gpgcheck=0
enabled=1

EXTERNAL_URL="http://10.0.0.18:9002/gitlab" yum install -y gitlab-ce
EXTERNAL_URL是决定项目的地址,同时也是访问gitlab的地址,node18有外网地址,但是这里不要用外网地址,因为整个容器云项目各个组件不对外开放写入权限,只能从node18上通过特定的处理才能访问容器云的各个组件,这个后面会详细说明

修改配置文件
cat /etc/gitlab/gitlab.rb

external_url 'http://10.0.0.18:9002/gitlab'
gitlab_rails['gitlab_shell_ssh_port'] = 9002
nginx['listen_port'] = 9002

ps:external_url是访问地址,nginx['listen_port'] = 9002是gitlab的入口,默认是80,但这里修改成9002了,这与访问地址端口一致,gitlab_rails['gitlab_shell_ssh_port'] = 9002是项目地址端口,这个如果不改,那么项目的地址会是http://10.0.0.18/gitlab,这个地址是无法拉取的,默认是22

配置后需要对gitlab重配置并启动
gitlab reconfigure && gitlab-cli start

初始密码可以在/etc/gitlab/initial_root_password找到

2.5、sonarqube部署

sonarqube分两部分部署
数据库部署
cat sonarqube-postgresql-rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: sonarqube-database
  namespace: devops
  labels:
    k8s-app: sonarqube-database
    addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: sonarqube-database
  labels:
    k8s-app: sonarqube-database
    addonmanager.kubernetes.io/mode: Reconcile
rules:
  - apiGroups:
      - ""
    resources:
      - "services"
      - "namespaces"
      - "endpoints"
    verbs:
      - "get"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: devops
  name: sonarqube-database
  labels:
    k8s-app: sonarqube-database
    addonmanager.kubernetes.io/mode: Reconcile
subjects:
  - kind: ServiceAccount
    name: sonarqube-database
    namespace: devops
    apiGroup: ""
roleRef:
  kind: ClusterRole
  name: sonarqube-database
  apiGroup: ""

cat sonarqube-postgresql-configmap.yaml

apiVersion: v1
kind: ConfigMap
metadata:
  name: sonarqube-database-info
  namespace: devops
data:
  POSTGRES_DB: sonarDB
  POSTGRES_USER: admin
  POSTGRES_PASSWORD: 123456

cat sonarqube-postgresql-pvc-pv.yaml

apiVersion: v1
kind: PersistentVolume
metadata:
  name: sonarqube-database-pv
  namespace: devops
  labels:
    pv: sonarqube-database-pv
spec:
  capacity:
    storage: 15Gi
  accessModes:
    - ReadWriteMany   
  nfs:
    path: /data/sonarqube/sonarqube-database
    server: 10.0.0.18
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: sonarqube-database-pvc
  namespace: devops
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 15Gi
  selector:
    matchLabels:
      pv: "sonarqube-database-pv"

cat sonarqube-postgresql-sts.yaml

apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: sonarqube-database
  labels:
    app: sonarqube-database
  namespace: devops
spec:
  serviceName: sonarqube-database
  replicas: 1
  selector:
    matchLabels:
      app: sonarqube-database
  template:
    metadata:
      labels:
        app: sonarqube-database
    spec:
      serviceAccountName: sonarqube-database
      containers:
      - name: sonarqube-database
        image: postgres:11.4
        imagePullPolicy: IfNotPresent
        env:
          - name: POSTGRES_DB
            valueFrom:         
              configMapKeyRef:   
                name: sonarqube-database-info 
                key: POSTGRES_DB
          - name: POSTGRES_USER
            valueFrom:
              configMapKeyRef:
                name: sonarqube-database-info
                key: POSTGRES_USER
          - name: POSTGRES_PASSWORD
            valueFrom:
              configMapKeyRef:
                name: sonarqube-database-info
                key: POSTGRES_PASSWORD
        ports:
        - containerPort: 5432
        volumeMounts:
          - name: data
            mountPath: /var/lib/postgresql/data
      volumes:
        - name: data
          persistentVolumeClaim:
            claimName: sonarqube-database-pvc
      nodeSelector:
        app: devops

cat sonarqube-postgresql-service.yaml

apiVersion: v1
kind: Service
metadata:
  name: sonarqube-database
  namespace: devops
  labels:
    app: sonarqube-database
spec:
  ports:
  - port: 5432
    targetPort: 5432
  selector:
    app: sonarqube-database

依次执行

kubectl create -f sonarqube-postgresql-rbac.yaml   
kubectl create -f sonarqube-postgresql-configmap.yaml
kubectl create -f sonarqube-postgresql-pvc-pv.yaml
kubectl create -f sonarqube-postgresql-service.yaml
kubectl create -f sonarqube-postgresql-sts.yaml

sonarqube部署
cat sonarqube-rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: sonarqube
  namespace: devops
  labels:
    k8s-app: sonarqube
    addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: sonarqube
  labels:
    k8s-app: sonarqube
    addonmanager.kubernetes.io/mode: Reconcile
rules:
  - apiGroups:
      - ""
    resources:
      - "services"
      - "namespaces"
      - "endpoints"
    verbs:
      - "get"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: devops
  name: sonarqube
  labels:
    k8s-app: sonarqube
    addonmanager.kubernetes.io/mode: Reconcile
subjects:
  - kind: ServiceAccount
    name: sonarqube
    namespace: devops
    apiGroup: ""
roleRef:
  kind: ClusterRole
  name: sonarqube
  apiGroup: ""

cat sonarqube-configmap.yaml

apiVersion: v1
kind: ConfigMap
metadata:
  name: sonarqube-database-connection-info
  namespace: devops
data:
  SONARQUBE_JDBC_USERNAME: admin
  SONARQUBE_JDBC_PASSWORD: Qazwsx!23
  SONARQUBE_JDBC_URL: jdbc:postgresql://sonarqube-database:5432/sonarDB

cat sonarqube-pvc-pv.yaml

apiVersion: v1
kind: PersistentVolume
metadata:
  name: sonarqube-conf
  namespace: devops
  labels:
    app: sonarqube
    component: conf
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  nfs:
    server: 10.0.0.18
    path: /data/sonarqube/conf
  
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: sonarqube-data
  namespace: devops
  labels:
    app: sonarqube
    component: data
spec:
  capacity:
    storage: 5Gi
  accessModes: 
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  nfs:
    server: 10.0.0.18
    path: /data/sonarqube/data

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: sonarqube-extensions
  namespace: devops
  labels:
    app: sonarqube
    component: extensions
spec:
  capacity:
    storage: 5Gi
  accessModes: 
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  nfs:
    server: 10.0.0.18
    path: /data/sonarqube/extensions

---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: sonarqube-conf
  namespace: devops
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
  selector:
    matchLabels:
      app: "sonarqube"
      component: "conf"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: sonarqube-data
  namespace: devops
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
  selector:
    matchLabels:
      app: "sonarqube"
      component: "data"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: sonarqube-extensions
  namespace: devops
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
  selector:
    matchLabels:
      app: "sonarqube"
      component: "extensions"

cat sonarqube-deploy.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: sonarqube
  namespace: devops
  labels:
    app: sonarqube
spec:
  replicas: 1
  selector:
    matchLabels:
      app: sonarqube
  template:
    metadata:
      labels:
        app: sonarqube
    spec:
      serviceAccountName: sonarqube
      initContainers:
      - name: init-sysctl
        image: busybox
        imagePullPolicy: IfNotPresent
        command: ["sysctl", "-w", "vm.max_map_count=524288"] 
        securityContext:
          privileged: true
      containers:
      - name: sonarqube
        image: sonarqube:lts
        ports:
        - containerPort: 9000
        env:
          - name: SONARQUBE_JDBC_USERNAME
            valueFrom:         
              configMapKeyRef:   
                name: sonarqube-database-connection-info
                key: SONARQUBE_JDBC_USERNAME
          - name: SONARQUBE_JDBC_PASSWORD
            valueFrom:
              configMapKeyRef:
                name: sonarqube-database-connection-info
                key: SONARQUBE_JDBC_PASSWORD
          - name: SONARQUBE_JDBC_URL
            valueFrom:
              configMapKeyRef:
                name: sonarqube-database-connection-info
                key: SONARQUBE_JDBC_URL
        volumeMounts:
        - mountPath: /opt/sonarqube/conf
          name: conf
        - mountPath: /opt/sonarqube/data
          name: data
        - mountPath: /opt/sonarqube/extensions
          name: extensions
      volumes:
      - name: conf
        persistentVolumeClaim:
          claimName: sonarqube-conf
      - name: data
        persistentVolumeClaim:
          claimName: sonarqube-data
      - name: extensions
        persistentVolumeClaim:
          claimName: sonarqube-extensions
      nodeSelector:
        app: devops

cat sonarqube-service.yaml

kind: Service
metadata:
  name: sonarqube
  namespace: devops
  labels:
    app: sonarqube
spec:
  type: NodePort
  ports:
    - name: sonarqube
      port: 9000
      targetPort: 9000
      nodePort: 30003
  selector:
    app: sonarqube

依次执行

kubectl create -f sonarqube-rbac.yaml   
kubectl create -f sonarqube-configmap.yaml
kubectl create -f sonarqube-pvc-pv.yaml  
kubectl create -f sonarqube-deploy.yaml
kubectl create -f sonarqube-service.yaml