Master-slave replication of database--container version

 Machine: (ubuntu20.04) kubernetes:1.23

mater 192.168.111.141
node01 192.168.111.142
node02 192.168.111.143
nfs 192.168.111.144

1. Configure nfs

Configure nfs to create sustainable storage.

Install nfs-server on nfs

apt -y install nfs-server

Create a folder for backend storage.

mkdir /data-share
#修改权限
chown nobody:nogroup /data-share/
chmod 777 /data-share/

Edit the /etc/exports file to manage folders

root@nfs:~# cat /etc/exports 
# /etc/exports: the access control list for filesystems which may be exported
#		to NFS clients.  See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)
#
/data-share 192.168.111.0/24(rw,sync,no_root_squash,no_subtree_check)

Run the following command to reload nfs

root@nfs:~# exportfs -arv
root@nfs:~# showmount -e 127.0.0.1
Export list for 127.0.0.1:
/data-share 192.168.111.0/24

Download nfs-common on the client, namely node01 and node02

apt -y install nfs-common

Use node01 and node02 to mount nfs respectively and check whether the mounting can be successful.

#挂载
root@node01:~# mount -t nfs 192.168.111.144:/data-share /data

#df -h 查看
root@node01:/data# df -h
Filesystem                         Size  Used Avail Use% Mounted on
tmpfs                              389M  2.4M  387M   1% /run
/dev/mapper/ubuntu--vg-ubuntu--lv   29G   13G   15G  46% /
tmpfs                              1.9G     0  1.9G   0% /dev/shm
tmpfs                              5.0M     0  5.0M   0% /run/lock
/dev/sda2                          1.5G  246M  1.2G  18% /boot
tmpfs                              389M  4.0K  389M   1% /run/user/0
nfs:/data-share                     29G  7.8G   20G  29% /data

#测试一下是否可以共享文件夹
root@node01:/data# echo "Hello,I'm node01" >> hello
root@node01:/data# cat hello 
Hello,I'm node01

# 切换为nfs服务器,查看
root@nfs:~# cd /data-share/
root@nfs:/data-share# ll
total 12
drwxrwxrwx  2 nobody nogroup 4096 Oct 20 12:16 ./
drwxr-xr-x 20 root   root    4096 Oct 20 03:47 ../
-rw-r--r--  1 nobody nogroup   17 Oct 20 12:17 hello
root@nfs:/data-share# cat hello 
Hello,I'm node01
是可以挂载成功的。node02也是同样的方式测试一下。

2. Write pv for continuous storage

1. Install nfs:

Download the yaml file of nfs and modify the configuration of the yaml file.

Yaml file download address: https://raw.githubusercontent.com/kubernetes-sigs/nfs-subdir-external-provisioner/master/deploy/deployment.yaml

root@master01:/opt/k8s/controller/mysql# wget https://raw.githubusercontent.com/kubernetes-sigs/nfs-subdir-external-provisioner/master/deploy/deployment.yaml
#改一下名字更好的辨认
root@master01:/opt/k8s/controller/mysql# mv deployment.yaml  nfs-deploy.yaml
## nfs-deploy.yaml配置如下
root@master01:/opt/k8s/controller/mysql# cat nfs-deploy.yaml 
apiVersion: v1
kind: Namespace
metadata:
  name: nfs-pro

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-pro    #更改为我们上面创建的namespace
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: registry.cn-hangzhou.aliyuncs.com/smxy-cc/nfs-subdir-external-provisioner:v4.0.2 #更改一下镜像,用原来的镜像可能会出现下载失败的情况
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 192.168.111.144  #更改为我们nfs server的IP地址
            - name: NFS_PATH
              value: /data-share    #更改为我们想要挂载的目录
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.111.144   #更改为我们nfs server的IP地址
            path: /data-share      #更改为我们想要挂载的目录


##创建
root@master01:/opt/k8s/controller/mysql# kubectl apply -f nfs-deploy.yaml

Create the rbac.yaml file (remember to change the namespace to nfs-pro created above)

[root@master01/opt/k8s/controller/mysql] # cat nfs-rbac.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner   #与deployment文件中的一致
  namespace: nfs-pro
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs-pro
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-pro
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-pro
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs-pro
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

Create storageclass (after creating sc, we no longer need to create pv one by one)

root@master01/opt/k8s/controller/mysql] # cat nfs-sc.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-storage    #StorageClass的名字,创建PVC时要用到
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner #NFS 的provisioner 名称,与deployment中设置的需一致
parameters:
  archiveOnDelete: "false"

[root@master01/opt/k8s/controller/mysql] # kubectl apply -f nfs-sc.yaml

Create pvc

[root@master01/opt/k8s/controller/mysql] # cat nfs-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata: 
  name: nfs-pvc
  namespace: nfs-pro
spec:
  accessModes:
  - ReadWriteMany
  volumeMode: Filesystem
  storageClassName:  nfs-storage   #与上节创建的storageClass名称一致
  resources:
    requests:
       storage: 8Gi


[root@master01/opt/k8s/controller/mysql] # kubectl apply -f nfs-pvc.yaml

3. Implement master-slave replication of database

When implementing master-slave replication of the database, we need to solve three points:

  1. The master node and slave node must have different configuration files (i.e. my.cnf).
  2. The matser node and slave node must be able to transmit backup information files.
  3. Before the slave node is started for the first time, some initial SQL operations need to be performed.

Let’s solve it below:

Because this is quite complicated to deploy, we first define a framework and then add configurations to the framework.

 Let’s introduce this framework together.

The controller we choose is StatefulSet, because our master-slave replication cluster is a stateful pod, and StatefulSet is prepared for stateful clusters, so it is reasonable to choose StatefulSet as our controller. I will talk about this later during the actual deployment.

seletor: Indicates that this controller will only control pods with the label app:mysql

serviceName: is also the biggest difference between statefulSet and deployment. This tells StatefulSet to use the mysql Headless Service to ensure the "resolvable identity" of the Pod when executing the control loop.

replicas: mainly controls the number of pods.

template: This is the template information of the pod. Among them, initContainers is the initialization pods, because the three problems we have to solve before need to make a judgment, that is, the judgment of the master node and the slave node. And this initContainers can help us judge master and salve.

volumeClaimTemplates: Because our database cluster is a cluster with storage status, we need to sustainably store our data through pv.

Next, create two svcs, one for the master node and the other for the slave node. The master node uses headless service, while the slave node uses ordinary service.

[root@master01/opt/k8s/controller/mysql] # cat mysql-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: mysql
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  clusterIP: None
  selector:
    app: mysql
---
apiVersion: v1
kind: Service
metadata:
  name: mysql-read
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  selector:
    app: mysql

That's about it for the framework. Next we will address the three points we raised above respectively.

1. The master node and slave node require different configuration files (my.cnf)

Because configmap can better help us mount the required files, we write a configmap to store configuration files.

ConfigMap.yaml

[root@master01/opt/k8s/controller/mysql] # cat ConfigMap.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: mysql
  labels:
    app: mysql
data:
  master.cnf: |
    # 主节点MySQL的配置文件
    [mysqld]
    log-bin
  slave.cnf: |
    # 从节点MySQL的配置文件
    [mysqld]
    super-read-only

configmap is also a value in the form of key-value. The one in front of | is the key, and the one after | is the value. The master node is log-bin, and the slave node is super-read-only, which means only reading is allowed.

Create a configmap and view details.

[root@master01/opt/k8s/controller/mysql] # kubectl describe cm mysql 
Name:         mysql
Namespace:    default
Labels:       app=mysql
Annotations:  <none>

Data
====
master.cnf:
----
# 主节点MySQL的配置文件
[mysqld]
log-bin

slave.cnf:
----
# 从节点MySQL的配置文件
[mysqld]
super-read-only


BinaryData
====

Events:  <none>

 Mount our configmap to our pod through the mount command, and the name of the mount is config-map.

      - name: config-map
        configMap:
         name: mysql

Next, we need to place master.cnf and slave.cnf on the corresponding nodes.

The following code has detailed explanation.

initContainers:
      - name: init-mysql
        image: mysql:5.7
        command:
        - bash
        - "-c"
        - |
          # 这个命令的作用就是下面如果命令执行错误时,会停止不会再往下执行。
          set -ex
          # 第一步运用正则匹配,若 - 后面的数字是0-9的进行下面的操作,若不是则退出。
          [[ $HOSTNAME =~ -([0-9]+)$ ]] || exit 1
          # ordinal 等于hostname中 - 后面的数字
          ordinal=${BASH_REMATCH[1]}
          # 输入【mysqld】到文件里,作用是创建文件server-id.cnf
          echo [mysqld] > /mnt/conf.d/server-id.cnf
          # 因为server-id=0的话有特殊的含义,所以我们+100来避开他
          echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
          # 下面是判断mysql的主从,这里是将序号为0的设为主数据库,其他的都是从数据库。
          # 我们通过下面的挂载操作(volumeMounts中的config-map),是将configmap中的两个 
          #配置文件放在了 /mnt/config-map 下。
          # 若pod的序列号为0(master节点),我们将/mnt/config-map下的master.cnf文件放 
          #在/mnt/conf.d下面。若不为0(slave节点),我们将/mnt/config-map下的slave.cnf文 
          #件放在/mnt/conf.d下面。
          # 并将/mnt/conf.d/进行挂载,名字为conf
          if [[ $ordinal -eq 0 ]]; then
            cp /mnt/config-map/master.cnf /mnt/conf.d/
          else
            cp /mnt/config-map/slave.cnf /mnt/conf.d/
          fi
        # 进行挂载上面已经讲过了。          
        volumeMounts:
        - name: conf
          mountPath: /mnt/conf.d
        - name: config-map
          mountPath: /mnt/config-map

2. The matser node and slave node must be able to transmit backup information files.

Here we will use the Xtrabackup plug-in. XtraBackup is an open source MySQL backup and recovery tool mainly used in the industry. XtraBackup is used to back up the data of the Master node to the specified directory. This step will automatically generate an xtrabackup_binlog_info file with two contents, which are needed when we initialize the slave node.

The code is as follows (detailed explanation):

      - name: clone-mysql
        image: yizhiyong/xtrabackup
        command:
        - bash
        - "-c"
        - |
          set -ex
          # 拷贝任务只第一次启动时才执行,所以如果有文件的话,说明已经不是第一次执行了,所以退出
          [[ -d /var/lib/mysql/mysql ]] && exit 0
          # 判断如果 - 后面的数字不是 0-9,就退出.
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          # ordinal= ‘-’ 后面的数字
          ordinal=${BASH_REMATCH[1]}
          # 判断ordinal的值是不是0,也就是说mysql为mysql-0(master节点),master节点并不需要备份,所以退出。剩下的都是slave节点,也是需要从master节点备份数据的。
          [[ $ordinal -eq 0 ]] && exit 0
          # 通过ncat监视mysql的3307端口,并用xbstream将/var/lib/mysql(mysql的)文件夹下的文件拷贝过来
          ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
          # 进行将得到的数据保存在/var/lib/mysql(xtrabackup的)下,用于下面的挂载
          xtrabackup --prepare --target-dir=/var/lib/mysql
        # 将我们的拷贝过来的文件夹挂载一下,用于我们下面的mysql容器使用。        
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
        - name: conf
          mountPath: /etc/mysql/conf.d

3. Initialize the slave node

We currently have the configuration files (master.cnf, slave.cnf) and the files we want to backup, but we need to configure some SQL commands when initializing the slave node. So we used a new container to configure the SQL command of the slave node.

The code is as follows (detailed explanation):

      - name: xtrabackup
        image: yizhiyong/xtrabackup
        ports:
        - name: xtrabackup
          containerPort: 3307
        command:
        - bash
        - "-c"
        - |
          set -ex
          # 通过下面的挂载操作我们已经在/var/lib/mysql中挂载到了clone-mysql中的数据,进入/var/lib/mysql中
          cd /var/lib/mysql
          # 判断前面挂载的数据是不是salve节点的文件,如果是slave节点的文件,我们可以直接拷贝过来使用。
          if [[ -f xtrabackup_slave_info && "x$(<xtrabackup_slave_info)" != "x" ]]; then
            # 我们直接去出有用信息放在change_master_to.sql.in文件中
            cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in
            # 我们用完后就删除,因为初始化操作执行一次就行了。
            rm -f xtrabackup_slave_info xtrabackup_binlog_info
          # 判断是master节点的话,我们需要在xtrabackup_binlog_info中取出我们有用的两个信息。
          elif [[ -f xtrabackup_binlog_info ]]; then
            # 去出xtrabackup_binlog_info文件中的两个信息,我上面有讲,使用Xtrabackup时会创建一个xtrabackup_binlog_info的文件,文件中有两个信息是我们初始化salve节点时使用的。
            [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
            # 取出后就删掉,防止我们再次初始化
            rm -f xtrabackup_binlog_info xtrabackup_slave_info
            # 将我们取出的数据编写一下放进change_master_to.sql.in文件中。
            echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
                  MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
          fi
          # 如果有这个文件就说明我们需要做初始化操作了。
          if [[ -f change_master_to.sql.in ]]; then
            # 我们必须要等到mysql容器启动起来再进行操作初始化操作。所以until一下我们的初始化操作。
            echo "Waiting for mysqld to be ready (accepting connections)"
            until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
            # 进行初始操作,用我们前面得到的change_master_to.sql.in拼接成一个完整的SQL命令。
            echo "Initializing replication from clone position"
            mysql -h 127.0.0.1 \
                  -e "$(<change_master_to.sql.in), \
                          MASTER_HOST='mysql-0.mysql', \
                          MASTER_USER='root', \
                          MASTER_PASSWORD='', \
                          MASTER_CONNECT_RETRY=10; \
                        START SLAVE;" || exit 1
            # 删掉,防止重启时,再次初始化。
            mv change_master_to.sql.in change_master_to.sql.orig
          fi
          # 使用ncat打开一个3307的端口。这也与我们前面监听3307端口联系上了。它的作用是,在收到传输请求的时候,直接执行"xtrabackup --backup"命令,备份MySQL的数据并发送给请求者
          exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
            "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"          
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        #对该容器做一个限制,因为这个容器只不过是一个我们的配置容器。
        resources:
          requests:
            cpu: 100m
            memory: 100Mi

4. The definition of the mysql container itself

So far we have successfully solved the above three points. We can look at the definition of the mysql container itself, which is very simple.

The code is as follows (detailed explanation):

      - name: mysql
        image: mysql:5.7
        env:
        - name: MYSQL_ALLOW_EMPTY_PASSWORD
          value: "1"
        ports:
        - name: mysql
          containerPort: 3306
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 500m
            memory: 1Gi
        # 前面的没啥好说的一眼都知道什么意思了。
        # 下面做了一个监控容器健康的检查。可以看一下我前面的文章有介绍
        livenessProbe:
          exec:
          # 执行命令ping一下本身,如果ping不同,就是监看有问题
            command: ["mysqladmin", "ping"]
          # 容器创建成功30S后执行
          initialDelaySeconds: 30
          # 每10s执行一次
          periodSeconds: 10
          #  超时5S就说明健康有问题。  
          timeoutSeconds: 5
        readinessProbe:
          exec:
            # Check we can execute queries over TCP (skip-networking is off).
            command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
          initialDelaySeconds: 5
          periodSeconds: 2
          timeoutSeconds: 1

At this point, all our preparations are complete.

5. Complete yaml file

[root@master01/opt/k8s/controller/mysql] # cat mysql-stateSetful2.yaml 
apiVersion: apps/v1

kind: StatefulSet

metadata:

  name: mysql

spec:

  selector:

    matchLabels:

      app: mysql

      app.kubernetes.io/name: mysql

  serviceName: mysql

  replicas: 3

  template:

    metadata:

      labels:

        app: mysql

        app.kubernetes.io/name: mysql

    spec:

      initContainers:

      - name: init-mysql

        image: mysql:5.7

        command:

        - bash

        - "-c"

        - |

          set -ex

          # Generate mysql server-id from pod ordinal index.

          [[ $HOSTNAME =~ -([0-9]+)$ ]] || exit 1

          ordinal=${BASH_REMATCH[1]}

          echo [mysqld] > /mnt/conf.d/server-id.cnf

          # Add an offset to avoid reserved server-id=0 value.

          echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf

          # Copy appropriate conf.d files from config-map to emptyDir.

          if [[ $ordinal -eq 0 ]]; then

            cp /mnt/config-map/primary.cnf /mnt/conf.d/

          else

            cp /mnt/config-map/replica.cnf /mnt/conf.d/

          fi          

        volumeMounts:

        - name: conf

          mountPath: /mnt/conf.d

        - name: config-map

          mountPath: /mnt/config-map

      - name: clone-mysql

        image: gcr.io/google-samples/xtrabackup:1.0

        command:

        - bash

        - "-c"

        - |

          set -ex

          # Skip the clone if data already exists.

          [[ -d /var/lib/mysql/mysql ]] && exit 0

          # Skip the clone on primary (ordinal index 0).

          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1

          ordinal=${BASH_REMATCH[1]}

          [[ $ordinal -eq 0 ]] && exit 0

          # Clone data from previous peer.

          ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql

          # Prepare the backup.

          xtrabackup --prepare --target-dir=/var/lib/mysql          

        volumeMounts:

        - name: data

          mountPath: /var/lib/mysql

          subPath: mysql

        - name: conf

          mountPath: /etc/mysql/conf.d

      containers:

      - name: mysql

        image: mysql:5.7

        env:

        - name: MYSQL_ALLOW_EMPTY_PASSWORD

          value: "1"

        ports:

        - name: mysql

          containerPort: 3306

        volumeMounts:

        - name: data

          mountPath: /var/lib/mysql

          subPath: mysql

        - name: conf

          mountPath: /etc/mysql/conf.d

        resources:

          requests:

            cpu: 500m

            memory: 1Gi

        livenessProbe:

          exec:

            command: ["mysqladmin", "ping"]

          initialDelaySeconds: 30

          periodSeconds: 10

          timeoutSeconds: 5

        readinessProbe:

          exec:

            # Check we can execute queries over TCP (skip-networking is off).

            command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]

          initialDelaySeconds: 5

          periodSeconds: 2

          timeoutSeconds: 1

      - name: xtrabackup

        image: gcr.io/google-samples/xtrabackup:1.0

        ports:

        - name: xtrabackup

          containerPort: 3307

        command:

        - bash

        - "-c"

        - |

          set -ex

          cd /var/lib/mysql



          # Determine binlog position of cloned data, if any.

          if [[ -f xtrabackup_slave_info && "x$(<xtrabackup_slave_info)" != "x" ]]; then

            # XtraBackup already generated a partial "CHANGE MASTER TO" query

            # because we're cloning from an existing replica. (Need to remove the tailing semicolon!)

            cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in

            # Ignore xtrabackup_binlog_info in this case (it's useless).

            rm -f xtrabackup_slave_info xtrabackup_binlog_info

          elif [[ -f xtrabackup_binlog_info ]]; then

            # We're cloning directly from primary. Parse binlog position.

            [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1

            rm -f xtrabackup_binlog_info xtrabackup_slave_info

            echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\

                  MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in

          fi



          # Check if we need to complete a clone by starting replication.

          if [[ -f change_master_to.sql.in ]]; then

            echo "Waiting for mysqld to be ready (accepting connections)"

            until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done



            echo "Initializing replication from clone position"

            mysql -h 127.0.0.1 \

                  -e "$(<change_master_to.sql.in), \

                          MASTER_HOST='mysql-0.mysql', \

                          MASTER_USER='root', \

                          MASTER_PASSWORD='', \

                          MASTER_CONNECT_RETRY=10; \

                        START SLAVE;" || exit 1

            # In case of container restart, attempt this at-most-once.

            mv change_master_to.sql.in change_master_to.sql.orig

          fi



          # Start a server to send backups when requested by peers.

          exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \

            "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"          

        volumeMounts:

        - name: data

          mountPath: /var/lib/mysql

          subPath: mysql

        - name: conf

          mountPath: /etc/mysql/conf.d

        resources:

          requests:

            cpu: 100m

            memory: 100Mi

      volumes:

      - name: conf

        emptyDir: {}

      - name: config-map

        configMap:

          name: mysql

  volumeClaimTemplates:

  - metadata:
      name: data
    spec:
      storageClassName: "nfs-storage"
      accessModes: ["ReadWriteOnce"]
      resources:
        requests:
          storage: 5Gi

Create statefulset

kubectl apply -f mysql-statefulset.yaml

 Created successfully

[root@master01/opt/k8s/controller/mysql] # kubectl get pods -owide
NAME      READY   STATUS    RESTARTS   AGE   IP               NODE     NOMINATED NODE   READINESS GATES
mysql-0   2/2     Running   0          10m   10.220.140.73    node02   <none>           <none>
mysql-1   2/2     Running   0          10m   10.220.196.133   node01   <none>           <none>

 6. Testing

Write data through the master node.


kubectl run mysql-client --image=mysql:5.7 -i --rm --restart=Never --\
mysql -h mysql-0.mysql <<EOF
CREATE DATABASE test;
CREATE TABLE test.messages (message VARCHAR(250));
INSERT INTO test.messages VALUES ('hello');
EOF

 View data through the mysql-read service.


kubectl run mysql-client --image=mysql:5.7 -i -t --rm --restart=Never --\
mysql -h mysql-read -e "SELECT * FROM test.messages"
Waiting for pod default/mysql-client to be running, status is Pending, pod ready: false
+---------+
| message |
+---------+
| hello   |
+---------+
pod "mysql-client" deleted

Let's connect to mysql-1 to check the data. It is found that the data written by mysql-0 above can be viewed, indicating that the master-slave replication is successful.

## 这是我后面测试时,通过上面的方法新加入的数据
[root@master01/opt/k8s/controller/mysql] # kubectl run -it mysql-client  --image=mysql:5.7 --rm  --restart=Never -- mysql -h mysql-1.mysql -e "select * from test.messages;"
+-------------------+
| message           |
+-------------------+
| hello,kubernetes! |
| hello,mysql-read! |
+-------------------+
pod "mysql-client" deleted

 We use mysql-1 to enter the data. An error was found, and the error content is that the server permission is read-only and cannot be written.

[root@master01/opt/k8s/controller/mysql] # kubectl run -it mysql-client  --image=mysql:5.7 --rm  --restart=Never -- mysql -h mysql-1.mysql -e "insert into test.messages values ('hello,I am mysql-1');"
ERROR 1290 (HY000) at line 1: The MySQL server is running with the --super-read-only option so it cannot execute this statement
pod "mysql-client" deleted
pod default/mysql-client terminated (Error)

 7. Recording errors

Finally, record the errors encountered

root@master01:/opt/k8s/controller/mysql# kubectl get pods 
NAME      READY   STATUS             RESTARTS     AGE
mysql-0   1/2     CrashLoopBackOff   3 (5s ago)   60s
root@master01:/opt/k8s/controller/mysql# kubectl logs mysql-0 
error: a container name must be specified for pod mysql-0, choose one of: [mysql xtrabackup] or one of the init containers: [init-mysql clone-mysql]
root@master01:/opt/k8s/controller/mysql# kubectl logs mysql-0 -c clone-mysql
+ [[ -d /var/lib/mysql/mysql ]]
++ hostname
+ [[ mysql-0 =~ -([0-9]+)$ ]]
+ ordinal=0
+ [[ 0 -eq 0 ]]
+ exit 0
root@master01:/opt/k8s/controller/mysql# kubectl logs mysql-0 -c mysql
2022-10-21 17:09:31+00:00 [Note] [Entrypoint]: Entrypoint script for MySQL Server 5.7.36-1debian10 started.
chown: changing ownership of '/var/lib/mysql/': Operation not permitted
root@master01:/opt/k8s/controller/mysql# kubectl logs mysql-0 -c xtrabackup
+ cd /var/lib/mysql
+ [[ -f xtrabackup_slave_info ]]
+ [[ -f xtrabackup_binlog_info ]]
+ [[ -f change_master_to.sql.in ]]
+ exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c 'xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root'
root@master01:/opt/k8s/controller/mysql# kubectl logs mysql-0 -c clone-mysql
+ [[ -d /var/lib/mysql/mysql ]]
++ hostname
+ [[ mysql-0 =~ -([0-9]+)$ ]]
+ ordinal=0
+ [[ 0 -eq 0 ]]
+ exit 0

Found an error when creating mysql! ! ! ! Then it’s time to find fault. . . . Found an error when creating mysql pod

chown: changing ownership of '/var/lib/mysql/': Operation not permitted

It means there is no permission. Then I looked at my nfs configuration, it was wrong. .

Change /etc/exports to the following:

root@nfs:/data-share# cat /etc/exports 
# /etc/exports: the access control list for filesystems which may be exported
#		to NFS clients.  See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)
#
/data-share 192.168.111.0/24(rw,sync,no_root_squash,no_subtree_check)

Guess you like

Origin blog.csdn.net/qq_48480384/article/details/127423322