Start kubernetes to build and dashboard pages

  

### View kubernetes state
```
kubectl get pods -A # check the status
kubectl get cs # to view the ready state k8s
kubectl get node # View k8s node status
kubectl -n kube-system get service kubernetes-dashboard # Gets port information
```

### operation and maintenance matters
```
kubectl get pods --all-namespaces get status
journalctl -f -u kubelet.service View kubernetes log
kubectl apply -f deploy a component
kubectl delete delete a component
kubectl get node to get all the node status
kubectl describe pod kubernetes-dashboard-7d75c474bb-zvc85 -n kube-system
View the status pod of reasons
kubectl logs -f kubernetes-dashboard-7d75c474bb-zvc85 -n kube-system receiving See
Log
```

###initialization
```
kubeadm init --kubernetes-version=v1.15.0 --pod-network-cidr=172.16.0.0/16 --apiserver-advertise-address=192.168.1.239
Add k8s node #
kubeadm join 192.168.1.239:6443 --token mcpg7g.hosgnl6ljwconxxe \
    --discovery-token-ca-cert-hash sha256:864cf0a1b8ee307a557f780ef30856278898dbe36259575699134d5389d9e935

#installation
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# Get pods state
kubectl get pods coredns-5c98db65d4-6hbhd -n kube-system -o yaml
kubectl get pods coredns-5c98db65d4-6hbhd --namespace=kube-system -o yaml | grep resources

### initialization state acquisition pods view the log node in accordance with yaml delete k8s
sudo kubeadm init --kubernetes-version=v1.15.0 --apiserver-advertise-address=192.168.1.239 --pod-network-cidr=192.168.0.0/16
kubectl get pods --all-namespaces
kubectl get node 
journalctl -f -u kubelet.service log
kubectl delete RBAC-kdd.yaml


kubectl delete  calico.yaml
kubectl delete pod calico-node-zplqs -n kube-system
kubectl describe pod kubernetes-dashboard-7d75c474bb-zvc85  -n kube-system
kubectl logs -f kube-apiserver-server -n kube-system


kubectl delete -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
kubectl delete -f calico.yaml
kubectl apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
kubectl apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
```

### kubernetes installation
```
1. Machine Installation Preparation

Close selinux, firewall, virtual memory (execute all nodes)

Turn off the firewall
systemctl stop firewalld.service
systemctl disable firewalld.service 

Close selinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

Edit the file
swapoff -a
Edit / etc / fstab

Set ip6
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF


Installation docker
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum update
yum install docker -y
systemctl start docker
systemctl enable docker


we /etc/docker/daemon.json
{
  "registry-mirrors": [
    "https://dockerhub.azk8s.cn",
    "https://reg-mirror.qiniu.com"
  ]
}

sudo systemctl daemon-reload
sudo systemctl restart docker

docker info verification


k8s installation

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[Kubernetes]
name = Kubernetes
baseurl = https: //mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF


cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

systemctl enable kubelet && systemctl start kubelet

cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl --system




kubeadm config images list


k8s accelerator
#!/bin/bash
images=(
    kube-apiserver: v1.15.0
    kube-controller-manager:v1.15.0
    kube-scheduler:v1.15.0
    kube-proxy:v1.15.0
    pause:3.1
    etcd:3.3.10
    coredns:1.3.1
)
for imageName in ${images[@]} ; do
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
done


init-kubeadm --kubernetes version v1.15.0 --pod-network-CIDR = 10.244.0.0 / 16


mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config


Allows master nodes running node
kubectl taint nodes --all node-role.kubernetes.io/master-node/kube untainted


kubeadm join 192.168.1.116:6443 --token plrite.69a1a1jwrb7bjonb --discovery-token-ca-cert-hash sha256:91af313ecdf8f14832e0cc199bc4576af58b6c6f609270904d10c377326db1a3 --ignore-preflight-errors=Swap

docker pull quay.io/coreos/flannel:v0.11.0-amd64

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'


 kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller

 kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'


 kubectl -n kube-system describe secret $ (kubectl -n kube-system get secret | grep admin-user | awk '{print $ 1}') to generate a token
```

Guess you like

Origin www.cnblogs.com/sxgaofeng/p/12029101.html