Примечание
#cat
<< EOF > /etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf [Service] ExecStart= # Replace "systemd" with the cgroup driver of your container runtime. The default value in the kubelet is "cgroupfs". # Replace the value of "--container-runtime-endpoint" for a different container runtime if needed. ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --cgroup-driver=systemd --container-runtime=remote --container-runtime-endpoint=unix:///var/run/crio/crio.sock Restart=always EOF #systemctl daemon-reload
#systemctl restart kubelet
# systemctl status kubelet
#!/bin/sh # HOST0, HOST1, и HOST2 - IP-адреса узлов export HOST0=192.168.0.205 export HOST1=192.168.0.206 export HOST2=192.168.0.207 # NAME0, NAME1 и NAME2 - имена узлов export NAME0="etc01" export NAME1="etc02" export NAME2="etc03" # Создать временные каталоги mkdir -p /tmp/${HOST0}/ /tmp/${HOST1}/ /tmp/${HOST2}/ HOSTS=(${HOST0} ${HOST1} ${HOST2}) NAMES=(${NAME0} ${NAME1} ${NAME2}) for i in "${!HOSTS[@]}"; do HOST=${HOSTS[$i]} NAME=${NAMES[$i]} cat >> EOF < /tmp/${HOST}/kubeadmcfg.yaml --- apiVersion: "kubeadm.k8s.io/v1beta3" kind: InitConfiguration nodeRegistration: name: ${NAME} localAPIEndpoint: advertiseAddress: ${HOST} --- apiVersion: "kubeadm.k8s.io/v1beta3" kind: ClusterConfiguration etcd: local: serverCertSANs: - "${HOST}" peerCertSANs: - "${HOST}" extraArgs: initial-cluster: ${NAMES[0]}=https://${HOSTS[0]}:2380,${NAMES[1]}=https://${HOSTS[1]}:2380,${NAMES[2]}=https://${HOSTS[2]}:2380 initial-cluster-state: new name: ${NAME} listen-peer-urls: https://${HOST}:2380 listen-client-urls: https://${HOST}:2379 advertise-client-urls: https://${HOST}:2379 initial-advertise-peer-urls: https://${HOST}:2380 EOF done
Примечание
/etc/kubernetes/pki/etcd/ca.crt
и /etc/kubernetes/pki/etcd/ca.key
. После этого можно перейти к следующему шагу.
# kubeadm init phase certs etcd-ca
[certs] Generating "etcd/ca" certificate and key
Эта команда создаст два файла: /etc/kubernetes/pki/etcd/ca.crt
и /etc/kubernetes/pki/etcd/ca.key
.
#!/bin/sh # HOST0, HOST1, и HOST2 - IP-адреса узлов export HOST0=192.168.0.205 export HOST1=192.168.0.206 export HOST2=192.168.0.207 kubeadm init phase certs etcd-server --config=/tmp/${HOST2}/kubeadmcfg.yaml kubeadm init phase certs etcd-peer --config=/tmp/${HOST2}/kubeadmcfg.yaml kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST2}/kubeadmcfg.yaml kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST2}/kubeadmcfg.yaml cp -R /etc/kubernetes/pki /tmp/${HOST2}/ # cleanup non-reusable certificates find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete kubeadm init phase certs etcd-server --config=/tmp/${HOST1}/kubeadmcfg.yaml kubeadm init phase certs etcd-peer --config=/tmp/${HOST1}/kubeadmcfg.yaml kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST1}/kubeadmcfg.yaml kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST1}/kubeadmcfg.yaml cp -R /etc/kubernetes/pki /tmp/${HOST1}/ find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete kubeadm init phase certs etcd-server --config=/tmp/${HOST0}/kubeadmcfg.yaml kubeadm init phase certs etcd-peer --config=/tmp/${HOST0}/kubeadmcfg.yaml kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST0}/kubeadmcfg.yaml kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST0}/kubeadmcfg.yaml # No need to move the certs because they are for HOST0 # clean up certs that should not be copied off this host find /tmp/${HOST2} -name ca.key -type f -delete find /tmp/${HOST1} -name ca.key -type f -delete
HOST1=192.168.0.206 HOST2=192.168.0.207 USER=user #scp -r /tmp/${HOST1}/* ${USER}@${HOST1}:
#ssh ${USER}@${HOST1}
$su -
#chown -R root:root /home/user/pki
#mv /home/user/pki /etc/kubernetes/
#exit
$exit
#scp -r /tmp/${HOST2}/* ${USER}@${HOST2}:
#ssh ${USER}@${HOST2}
$su -
#chown -R root:root /home/user/pki
#mv /home/user/pki /etc/kubernetes/
#exit
$exit
/tmp/${HOST0} └── kubeadmcfg.yaml --- /etc/kubernetes/pki ├── apiserver-etcd-client.crt ├── apiserver-etcd-client.key └── etcd ├── ca.crt ├── ca.key ├── healthcheck-client.crt ├── healthcheck-client.key ├── peer.crt ├── peer.key ├── server.crt └── server.key
$HOME └── kubeadmcfg.yaml --- /etc/kubernetes/pki ├── apiserver-etcd-client.crt ├── apiserver-etcd-client.key └── etcd ├── ca.crt ├── healthcheck-client.crt ├── healthcheck-client.key ├── peer.crt ├── peer.key ├── server.crt └── server.key
$HOME └── kubeadmcfg.yaml --- /etc/kubernetes/pki ├── apiserver-etcd-client.crt ├── apiserver-etcd-client.key └── etcd ├── ca.crt ├── healthcheck-client.crt ├── healthcheck-client.key ├── peer.crt ├── peer.key ├── server.crt └── server.key
# kubeadm init phase etcd local --config=/tmp/192.168.0.205/kubeadmcfg.yaml
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests
# kubeadm init phase etcd local --config=/home/user/kubeadmcfg.yaml
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
export CONTROL_PLANE="user@192.168.0.201" #scp /etc/kubernetes/pki/etcd/ca.crt "${CONTROL_PLANE}":
#scp /etc/kubernetes/pki/apiserver-etcd-client.crt "${CONTROL_PLANE}":
#scp /etc/kubernetes/pki/apiserver-etcd-client.key "${CONTROL_PLANE}":
kubeadm-config.yaml
:
--- apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: stable networking: podSubnet: "10.244.0.0/16" controlPlaneEndpoint: "192.168.0.201:6443" # IP-адрес, порт балансировщика нагрузки etcd: external: endpoints: - https://192.168.0.205:2379 # IP-адрес ETCD01 - https://192.168.0.206:2379 # IP-адрес ETCD02 - https://192.168.0.207:2379 # IP-адрес ETCD03 caFile: /etc/kubernetes/pki/etcd/ca.crt certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key
#mkdir -p /etc/kubernetes/pki/etcd/
#cp /home/user/ca.crt /etc/kubernetes/pki/etcd/
#cp /home/user/apiserver-etcd-client.* /etc/kubernetes/pki/
# kubeadm init --config kubeadm-config.yaml --upload-certs
…
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 192.168.0.201:6443 --token 7onha1.afzqd41s8dzr1wj1 \
--discovery-token-ca-cert-hash sha256:ec2be69db54b2ae13c175765ddd058801fd70054508c0e118020896a1d4c9ec3 \
--control-plane --certificate-key eb1fabf70e994c061f749f13c0f26baef64764e813d5f0eaa7b09d5279a492c4
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.0.201:6443 --token 7onha1.afzqd41s8dzr1wj1 \
--discovery-token-ca-cert-hash sha256:ec2be69db54b2ae13c175765ddd058801fd70054508c0e118020896a1d4c9ec3
Следует сохранить этот вывод, т.к. этот токен будет использоваться для присоединения к кластеру остальных управляющих и вычислительных узлов.
~/.kube
(с правами пользователя):
$ mkdir ~/.kube
# cp /etc/kubernetes/admin.conf ~<пользователь>/.kube/config
# chown <пользователь>: ~<пользователь>/.kube/config
$ kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
$ kubectl get pod -n kube-system -w
kubeadm init
на первом управляющем узле):
# kubeadm
join 192.168.0.201:6443 --token 7onha1.afzqd41s8dzr1wj1 \
--discovery-token-ca-cert-hash sha256:ec2be69db54b2ae13c175765ddd058801fd70054508c0e118020896a1d4c9ec3 \
--control-plane --certificate-key eb1fabf70e994c061f749f13c0f26baef64764e813d5f0eaa7b09d5279a492c4
kubeadm init
на первом управляющем узле):
# kubeadm
join 192.168.0.201:6443 --token 7onha1.afzqd41s8dzr1wj1 \
--discovery-token-ca-cert-hash sha256:ec2be69db54b2ae13c175765ddd058801fd70054508c0e118020896a1d4c9ec3
$ kubectl get nodes