Redis cluster data migration deployed by k8s

redis cluster data migration

System version CentOS 7.9

k8s version v1.19.16

redis version 6.2.6

Source: single point redis data

Purpose: redis cluster, 3 masters and 3 slaves

1. View cluster information

[root@k8s-master01 redis]# kubectl -n ops get po -owide |grep redis
redis-0                          1/1     Running   0          13m     100.84.122.168   k8s-master02   <none>           <none>
redis-cluster-0                  1/1     Running   0          16h     100.84.122.161   k8s-master02   <none>           <none>
redis-cluster-1                  1/1     Running   0          16h     100.66.195.13    k8s-master03   <none>           <none>
redis-cluster-2                  1/1     Running   0          16h     100.84.122.160   k8s-master02   <none>           <none>
redis-cluster-3                  1/1     Running   0          16h     100.66.195.11    k8s-master03   <none>           <none>
redis-cluster-4                  1/1     Running   0          16h     100.84.122.166   k8s-master02   <none>           <none>
redis-cluster-5                  1/1     Running   0          16h     100.66.195.12    k8s-master03   <none>           <none>
[root@k8s-master01 redis]# 

[root@k8s-master01 redis]# kubectl get pvc -n ops |grep redis-pvc
redis-pvc                  Bound    pvc-399ae488-2ca8-4100-92d6-21b47c836234   1Gi        RWO            nfs-boge       33m
[root@k8s-master01 redis]# 



2. Migration ideas

1. Create a single-point redis pvc named redis-pvc

cat > 01-redis-pv.yaml <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  namespace: ops
  name: redis-pvc
spec:
  storageClassName: nfs-boge  #修改为自己的动态存储名称
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
EOF



kubectl apply -f 01-redis-pv.yaml


2. Import the redis backup file into pvc (redis-pvc)
3. Deploy a single point redis (redis-0)

cat > 02-redis-dan-yaml.yaml << 'eof'
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: ops
  name: redis-single-config
data:
  redis.conf: |
    daemonize no
    bind 0.0.0.0
    port 6379
    tcp-backlog 511
    timeout 0
    tcp-keepalive 300
    pidfile /data/redis-server.pid
    logfile /data/redis.log
    loglevel notice
    databases 1
    always-show-logo yes
    save 900 1
    save 300 10
    save 60 10000
    stop-writes-on-bgsave-error yes
    rdbcompression yes
    rdbchecksum yes
    dbfilename dump.rdb
    dir /data
    slave-serve-stale-data yes
    slave-read-only yes
    repl-diskless-sync no
    repl-diskless-sync-delay 5
    repl-disable-tcp-nodelay no
    slave-priority 100
    appendonly yes
    appendfilename "appendonly.aof"
    appendfsync everysec
    no-appendfsync-on-rewrite no
    auto-aof-rewrite-percentage 100
    auto-aof-rewrite-min-size 64mb
    aof-load-truncated yes
    lua-time-limit 5000
    slowlog-log-slower-than 10000
    slowlog-max-len 128
    latency-monitor-threshold 0
    notify-keyspace-events ""
    hash-max-ziplist-entries 512
    hash-max-ziplist-value 64
    list-max-ziplist-size -2
    list-compress-depth 0
    set-max-intset-entries 512
    zset-max-ziplist-entries 128
    zset-max-ziplist-value 64
    hll-sparse-max-bytes 3000
    activerehashing yes
    client-output-buffer-limit normal 0 0 0
    client-output-buffer-limit slave 256mb 64mb 60
    client-output-buffer-limit pubsub 32mb 8mb 60
    hz 10
    aof-rewrite-incremental-fsync yes
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  namespace: ops
  name: redis
spec:
  serviceName: redis
  replicas: 1  # 副本数,根据需求进行更改
  selector:
    matchLabels:
      app: redis
  template:
    metadata:
      labels:
        app: redis
    spec:
      containers:
      - name: redis
        image: redis:6.2.6
        ports:
        - containerPort: 6379
        volumeMounts:
        - name: redis-data
          mountPath: /data
        - name: redis-single-config
          mountPath: "/etc/redis/redis.conf"
      volumes:
        - name: redis-single-config
          configMap:
            name: redis-single-config
        - name: redis-data
          persistentVolumeClaim:
            claimName: redis-pvc
---
apiVersion: v1
kind: Service
metadata:
  namespace: ops
  name: redis-service
spec:
  selector:
    app: redis
  type: NodePort     # 添加type字段,并设置为NodePort类型
  ports:
    - name: redis
      port: 6379
      protocol: TCP
      targetPort: 6379
      nodePort: 30011  # 添加nodePort字段,设置为您想要的节点端口号
eof


kubectl apply -f  02-redis-dan-yaml.yaml



4. The cluster executes the command to synchronize the data in redis-0

You can import data from an external instance to a Redis cluster.
Import the external Redis instance (100.84.122.168:6379) to any node in the cluster.
After importing, the key of the original cluster becomes empty , and the key imported to the new cluster will automatically Slot
-cluster-replace sharded to each mater node. If the key of the external redis instance exists in the cluster (100.84.122.161:6379), the value of (100.84.122.161:6379) will be overwritten.

kubectl -n ops exec -it redis-cluster-0 -- redis-cli --cluster import 100.84.122.161:6379 --cluster-from 100.84.122.168:6379 --cluster-replace -a OpsRedis

3. Verify redis information

#登入redis
kubectl -n ops exec -it redis-cluster-0 -- redis-cli -c -p 6379  -a OpsRedis
127.0.0.1:6379> keys *

kubectl -n ops exec -it redis-cluster-1 -- redis-cli -c -p 6379  -a OpsRedis
127.0.0.1:6379> keys *

kubectl -n ops exec -it redis-cluster-2 -- redis-cli -c -p 6379  -a OpsRedis
127.0.0.1:6379> keys *

Guess you like

Origin blog.csdn.net/qq_35583325/article/details/132756793