在vSphere 7 with K8S的整体架构中,Tanzu Kubernetes集群是完全由开发人员建立/维护/销毁。运维人员无法从wcp的控制平台对其进行操作。同时,Tanzu Kubernetes集群还可以用于多租户场景。
有关Tanzu Kubernetes集群的备置(建立),请参考vSphere 7 Kubernetes 初体验中的:使用 Tanzu Kubernetes Grid 服务置备Tanzu Kubernetes 集群。
首先,我们还是对vSphere 7 with K8S的整体逻辑视图进行复习:
我们可以看到,Tanzu Kubernetes集群存在于Superior Namespace内。
继续使用上一篇vSphere 7 Kubernetes 初体验中备置的Tanzu Kubernetes集群。
1.改变yaml文件,弹性扩容Tanzu Kubernetes集群。
[root@localhost ~]# cat tz-cluster.yaml
apiVersion: run.tanzu.vmware.com/v1alpha1 #TKG API endpoint
kind: TanzuKubernetesCluster #required parameter
metadata:
name: tkg-cluster-vmlab #cluster name, user defined
namespace: ns-tkg #supervisor namespace
spec:
distribution:
version: v1.16 #resolved kubernetes version
topology:
controlPlane:
count: 3 #number of control plane nodes
class: guaranteed-small #vmclass for control plane nodes
storageClass: wcp-policy #storageclass for control plane
workers:
count: 4 #number of worker nodes
class: guaranteed-small #vmclass for worker nodes
storageClass: wcp-policy #storageclass for worker nodes
将controlPlane的数量改为3,worker的数量改为4.
应用以后,从vCenter的界面可以看到:
值得注意的是,在集群名字旁,指名了该集群的管理:开发人员管理。
如我们想在Tanzu Kubernetes集群进行缩容,将controlPlane的数量改为1,worker的数量改为3,执行之后:
[root@localhost ~]# kubectl apply -f tz-cluster.yaml
Error from server (control plane scale-in not supported, worker scale-in not supported): error when applying patch:
{"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"run.tanzu.vmware.com/v1alpha1\",\"kind\":\"TanzuKubernetesCluster\",\"metadata\":{\"annotations\":{},\"name\":\"tkg-cluster-vmlab\",\"namespace\":\"ns-vpods\"},\"spec\":{\"distribution\":{\"version\":\"v1.16\"},\"topology\":{\"controlPlane\":{\"class\":\"guaranteed-small\",\"count\":1,\"storageClass\":\"wcp-normal\"},\"workers\":{\"class\":\"guaranteed-small\",\"count\":3,\"storageClass\":\"wcp-normal\"}}}}\n"}},"spec":{"topology":{"controlPlane":{"count":1},"workers":{"count":3}}}}
to:
Resource: "run.tanzu.vmware.com/v1alpha1, Resource=tanzukubernetesclusters", GroupVersionKind: "run.tanzu.vmware.com/v1alpha1, Kind=TanzuKubernetesCluster"
Name: "tkg-cluster-vmlab", Namespace: "ns-vpods"
Object: &{map["apiVersion":"run.tanzu.vmware.com/v1alpha1" "kind":"TanzuKubernetesCluster" "metadata":map["annotations":map["kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"run.tanzu.vmware.com/v1alpha1\",\"kind\":\"TanzuKubernetesCluster\",\"metadata\":{\"annotations\":{},\"name\":\"tkg-cluster-vmlab\",\"namespace\":\"ns-vpods\"},\"spec\":{\"distribution\":{\"version\":\"v1.16\"},\"topology\":{\"controlPlane\":{\"class\":\"guaranteed-small\",\"count\":3,\"storageClass\":\"wcp-normal\"},\"workers\":{\"class\":\"guaranteed-small\",\"count\":4,\"storageClass\":\"wcp-normal\"}}}}\n"] "creationTimestamp":"2020-06-12T04:10:47Z" "finalizers":["tanzukubernetescluster.run.tanzu.vmware.com"] "generation":'\x01' "name":"tkg-cluster-vmlab" "namespace":"ns-vpods" "resourceVersion":"16696" "selfLink":"/apis/run.tanzu.vmware.com/v1alpha1/namespaces/ns-vpods/tanzukubernetesclusters/tkg-cluster-vmlab" "uid":"bef3a3a5-96de-4216-92e4-d4e940d933a8"] "spec":map["distribution":map["fullVersion":"v1.16.8+vmware.1-tkg.3.60d2ffd" "version":"v1.16"] "settings":map["network":map["cni":map["name":"calico"] "pods":map["cidrBlocks":["192.168.0.0/16"]] "serviceDomain":"cluster.local" "services":map["cidrBlocks":["10.96.0.0/12"]]]] "topology":map["controlPlane":map["class":"guaranteed-small" "count":'\x03' "storageClass":"wcp-normal"] "workers":map["class":"guaranteed-small" "count":'\x04' "storageClass":"wcp-normal"]]] "status":map["addons":map["authsvc":map["name":"" "status":"unmanaged"] "cloudprovider":map["name":"vmware-guest-cluster" "status":"applied" "version":"v1.16.8+vmware.1-tkg.3.60d2ffd"] "cni":map["name":"calico" "status":"applied" "version":"v1.16.8+vmware.1-tkg.3.60d2ffd"] "csi":map["name":"pvcsi" "status":"applied" "version":"v1.16.8+vmware.1-tkg.3.60d2ffd"] "dns":map["name":"CoreDNS" "status":"applied" "version":"v1.6.5_vmware.2"] "proxy":map["name":"kube-proxy" "status":"applied" "version":"1.16.8+vmware.1"] "psp":map["name":"defaultpsp" "status":"applied" "version":"v1.16.8+vmware.1-tkg.3.60d2ffd"]] "clusterApiStatus":map["apiEndpoints":[map["host":"172.206.24.2" "port":'\u192b']] "phase":"provisioned"] "nodeStatus":map["tkg-cluster-vmlab-control-plane-8ghw8":"ready" "tkg-cluster-vmlab-control-plane-lgkgb":"ready" "tkg-cluster-vmlab-control-plane-z55lg":"ready" "tkg-cluster-vmlab-workers-8scnj-675ffb688c-657bt":"ready" "tkg-cluster-vmlab-workers-8scnj-675ffb688c-7wxpj":"ready" "tkg-cluster-vmlab-workers-8scnj-675ffb688c-995pc":"ready" "tkg-cluster-vmlab-workers-8scnj-675ffb688c-srxff":"ready"] "phase":"running" "vmStatus":map["tkg-cluster-vmlab-control-plane-8ghw8":"ready" "tkg-cluster-vmlab-control-plane-lgkgb":"ready" "tkg-cluster-vmlab-control-plane-z55lg":"ready" "tkg-cluster-vmlab-workers-8scnj-675ffb688c-657bt":"ready" "tkg-cluster-vmlab-workers-8scnj-675ffb688c-7wxpj":"ready" "tkg-cluster-vmlab-workers-8scnj-675ffb688c-995pc":"ready" "tkg-cluster-vmlab-workers-8scnj-675ffb688c-srxff":"ready"]]]}
for: "tz-cluster.yaml": admission webhook "default.validating.tanzukubernetescluster.run.tanzu.vmware.com" denied the request: control plane scale-in not supported, worker scale-in not supported
由以上系统提示,缩容操作不支持!~~
2.将工作负载部署到 Tanzu Kubernetes集群
- 2.1登录到Tanzu Kubernetes集群
使用命令登录到 Tanzu Kubernetes集群
[root@localhost ~]# kubectl-vsphere login --server=https://172.206.0.1 --vsphere-username [email protected] --tanzu-kubernetes-cluster-name tkg-cluster-vmlab --tanzu-kubernetes-cluster-namespace ns-tkg --insecure-skip-tls-verify
Password:
Logged in successfully.
You have access to the following contexts:
172.206.0.1
ns-tkg
ns-vsphere-pod
test
tkg-cluster-vmlab
If the context you wish to use is not in this list, you may need to try
logging in again later, or contact your cluster administrator.
To change context, use `kubectl config use-context <workload name>`
在Tanzu Kubernetes内部可以查看node情况
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
tkg-cluster-vmlab-control-plane-jkt4z Ready master 2d10h v1.16.8+vmware.1
tkg-cluster-vmlab-control-plane-mr8bk Ready master 10h v1.16.8+vmware.1
tkg-cluster-vmlab-control-plane-n2d67 Ready master 10h v1.16.8+vmware.1
tkg-cluster-vmlab-workers-przrr-745b744dcb-4t9zd Ready <none> 2d10h v1.16.8+vmware.1
tkg-cluster-vmlab-workers-przrr-745b744dcb-9xhtz Ready <none> 2d10h v1.16.8+vmware.1
tkg-cluster-vmlab-workers-przrr-745b744dcb-ls5d7 Ready <none> 10h v1.16.8+vmware.1
tkg-cluster-vmlab-workers-przrr-745b744dcb-pft84 Ready <none> 2d10h v1.16.8+vmware.1
- 2.2 部署Wordpress应用
我们使用的测试用例来自Kubernetes官网示例:使用 Persistent Volumes 部署 WordPress 和 MySQL
- 2.2.1 生成配置所需secrets:mysql-pass
[root@localhost ~]# kubectl create secret generic mysql-pass --from-literal=password=VMware1!
[root@localhost ~]# kubectl get secrets
NAME TYPE DATA AGE
default-token-kt2cl kubernetes.io/service-account-token 3 2d10h
mysql-pass Opaque 1 13h
- 2.2.2 修改yaml文件中的pvc
我们需要用到环境中的持久化存储,在VMware官网提供了Tanzu Kubernetes 持久卷声明示例
在我们的环境中,查看存储类
[root@localhost ~]# kubectl get storageclasses.storage.k8s.io
NAME PROVISIONER AGE
wcp-policy csi.vsphere.vmware.com 2d10h
修改yaml文件中pvc的配置。
单实例 MySQL 部署。MySQL 容器将 PersistentVolume 挂载在/var/lib/mysql。
application/wordpress/mysql-deployment.yaml
apiVersion: v1
kind: Service
metadata:
name: wordpress-mysql
labels:
app: wordpress
spec:
ports:
- port: 3306
selector:
app: wordpress
tier: mysql
clusterIP: None
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pv-claim
labels:
app: wordpress
spec:
accessModes:
- ReadWriteOnce
storageClassName: wcp-policy #增加本环境的存储类
resources:
requests:
storage: 20Gi
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: wordpress-mysql
labels:
app: wordpress
spec:
selector:
matchLabels:
app: wordpress
tier: mysql
strategy:
type: Recreate
template:
metadata:
labels:
app: wordpress
tier: mysql
spec:
containers:
- image: mysql:5.6
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-pass
key: password
ports:
- containerPort: 3306
name: mysql
volumeMounts:
- name: mysql-persistent-storage
mountPath: /var/lib/mysql
volumes:
- name: mysql-persistent-storage
persistentVolumeClaim:
claimName: mysql-pv-claim
同样,在单实例 WordPress 部署的yaml文件中,也修改pvc的配置,修改以后的配置如下:
application/wordpress/wordpress-deployment.yaml
apiVersion: v1
kind: Service
metadata:
name: wordpress
labels:
app: wordpress
spec:
ports:
- port: 80
selector:
app: wordpress
tier: frontend
type: LoadBalancer
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: wp-pv-claim
labels:
app: wordpress
spec:
accessModes:
- ReadWriteOnce
storageClassName: wcp-policy #增加本环境的存储类
resources:
requests:
storage: 20Gi
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: wordpress
labels:
app: wordpress
spec:
selector:
matchLabels:
app: wordpress
tier: frontend
strategy:
type: Recreate
template:
metadata:
labels:
app: wordpress
tier: frontend
spec:
containers:
- image: wordpress:4.8-apache
name: wordpress
env:
- name: WORDPRESS_DB_HOST
value: wordpress-mysql
- name: WORDPRESS_DB_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-pass
key: password
ports:
- containerPort: 80
name: wordpress
volumeMounts:
- name: wordpress-persistent-storage
mountPath: /var/www/html
volumes:
- name: wordpress-persistent-storage
persistentVolumeClaim:
claimName: wp-pv-claim
- 2.2.3应用yaml文件后,生成deployment,pvc和services。
[root@localhost ~]# kubectl get deployments.apps
NAME READY UP-TO-DATE AVAILABLE AGE
wordpress 1/1 1 1 13h
wordpress-mysql 1/1 1 1 13h
[root@localhost ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2d11h
supervisor ClusterIP None <none> 6443/TCP 2d11h
wordpress LoadBalancer 10.110.169.180 172.206.0.4 80:31399/TCP 13h
wordpress-mysql ClusterIP None <none> 3306/TCP 13h
[root@localhost ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
mysql-pv-claim Bound pvc-59a58519-e186-4222-a4c8-c66a022f284e 20Gi RWO wcp-policy 13h
wp-pv-claim Bound pvc-18e33a57-cd8a-4870-af8d-658e5f37e156 20Gi RWO wcp-policy 13h
我们可以看到对外的wordpress服务地址为172.206.0.4,在浏览器中打开:
小结:
- Tanzu kubernetes集群的基本操作;
- 集群的扩容;不支持缩容;
- 在Tanzu Kubernetes中使用持久化存储;
- 验证Tanzu Kubernetes中部署的应用可以对外提供服务。