4 月 022021
 

1 Control Plane + etcd
3 Worker

创建配置文件

[root@localhost ~]# rke config --name cluster.yml
[+] Cluster Level SSH Private Key Path [~/.ssh/id_rsa]: 
[+] Number of Hosts [1]: 4
[+] SSH Address of host (1) [none]: 192.168.3.201
[+] SSH Port of host (1) [22]: 
[+] SSH Private Key Path of host (192.168.3.201) [none]: ~/.ssh/id_rsa
[+] SSH User of host (192.168.3.201) [ubuntu]: deployer
[+] Is host (192.168.3.201) a Control Plane host (y/n)? [y]: y
[+] Is host (192.168.3.201) a Worker host (y/n)? [n]: n
[+] Is host (192.168.3.201) an etcd host (y/n)? [n]: y
[+] Override Hostname of host (192.168.3.201) [none]: k8s-cluster01-01
[+] Internal IP of host (192.168.3.201) [none]: 
[+] Docker socket path on host (192.168.3.201) [/var/run/docker.sock]: 
[+] SSH Address of host (2) [none]: 192.168.3.202
[+] SSH Port of host (2) [22]: 
[+] SSH Private Key Path of host (192.168.3.202) [none]: ~/.ssh/id_rsa
[+] SSH User of host (192.168.3.202) [ubuntu]: deployer
[+] Is host (192.168.3.202) a Control Plane host (y/n)? [y]: n
[+] Is host (192.168.3.202) a Worker host (y/n)? [n]: y
[+] Is host (192.168.3.202) an etcd host (y/n)? [n]: n
[+] Override Hostname of host (192.168.3.202) [none]: k8s-cluster01-02
[+] Internal IP of host (192.168.3.202) [none]: 
[+] Docker socket path on host (192.168.3.202) [/var/run/docker.sock]: 
[+] SSH Address of host (3) [none]: 192.168.3.203
[+] SSH Port of host (3) [22]: 
[+] SSH Private Key Path of host (192.168.3.203) [none]: ~/.ssh/id_rsa
[+] SSH User of host (192.168.3.203) [ubuntu]: deployer
[+] Is host (192.168.3.203) a Control Plane host (y/n)? [y]: n
[+] Is host (192.168.3.203) a Worker host (y/n)? [n]: y
[+] Is host (192.168.3.203) an etcd host (y/n)? [n]: n
[+] Override Hostname of host (192.168.3.203) [none]: k8s-cluster01-03
[+] Internal IP of host (192.168.3.203) [none]: 
[+] Docker socket path on host (192.168.3.203) [/var/run/docker.sock]: 
[+] SSH Address of host (4) [none]: 192.168.3.204
[+] SSH Port of host (4) [22]: 
[+] SSH Private Key Path of host (192.168.3.204) [none]: ~/.ssh/id_rsa
[+] SSH User of host (192.168.3.204) [ubuntu]: deployer
[+] Is host (192.168.3.204) a Control Plane host (y/n)? [y]: n
[+] Is host (192.168.3.204) a Worker host (y/n)? [n]: y
[+] Is host (192.168.3.204) an etcd host (y/n)? [n]: n
[+] Override Hostname of host (192.168.3.204) [none]: k8s-cluster01-04
[+] Internal IP of host (192.168.3.204) [none]: 
[+] Docker socket path on host (192.168.3.204) [/var/run/docker.sock]: 
[+] Network Plugin Type (flannel, calico, weave, canal, aci) [canal]: flannel
[+] Authentication Strategy [x509]: 
[+] Authorization Mode (rbac, none) [rbac]: 
[+] Kubernetes Docker image [rancher/hyperkube:v1.20.5-rancher1]: rancher/hyperkube:v1.19.9-rancher1
[+] Cluster domain [cluster.local]: 
[+] Service Cluster IP Range [10.43.0.0/16]: 
[+] Enable PodSecurityPolicy [n]: 
[+] Cluster Network CIDR [10.42.0.0/16]: 
[+] Cluster DNS Service IP [10.43.0.10]: 
[+] Add addon manifest URLs or YAML files [no]: 
[root@localhost ~]#

Rancher Kubernetes Docker image版本可选参数

https://github.com/rancher/rke/releases

New Images in v1.20.5-rancher1-1, v1.19.9-rancher1-1 and v1.18.16-rancher1-1
Updated Hyperkube Image based on k8s versions

rancher/hyperkube:v1.20.5-rancher1
rancher/hyperkube:v1.19.9-rancher1
rancher/hyperkube:v1.18.17-rancher1

配置文件

[root@localhost ~]# cat cluster.yml
# If you intened to deploy Kubernetes in an air-gapped environment,
# please consult the documentation on how to configure custom RKE images.
nodes:
- address: 192.168.3.201
  port: "22"
  internal_address: ""
  role:
  - controlplane
  - etcd
  hostname_override: k8s-cluster01-01
  user: deployer
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/.ssh/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
  taints: []
- address: 192.168.3.202
  port: "22"
  internal_address: ""
  role:
  - worker
  hostname_override: k8s-cluster01-02
  user: deployer
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/.ssh/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
  taints: []
- address: 192.168.3.203
  port: "22"
  internal_address: ""
  role:
  - worker
  hostname_override: k8s-cluster01-03
  user: deployer
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/.ssh/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
  taints: []
- address: 192.168.3.204
  port: "22"
  internal_address: ""
  role:
  - worker
  hostname_override: k8s-cluster01-04
  user: deployer
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/.ssh/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
  taints: []
services:
  etcd:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    win_extra_args: {}
    win_extra_binds: []
    win_extra_env: []
    external_urls: []
    ca_cert: ""
    cert: ""
    key: ""
    path: ""
    uid: 0
    gid: 0
    snapshot: null
    retention: ""
    creation: ""
    backup_config: null
  kube-api:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    win_extra_args: {}
    win_extra_binds: []
    win_extra_env: []
    service_cluster_ip_range: 10.43.0.0/16
    service_node_port_range: ""
    pod_security_policy: false
    always_pull_images: false
    secrets_encryption_config: null
    audit_log: null
    admission_configuration: null
    event_rate_limit: null
  kube-controller:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    win_extra_args: {}
    win_extra_binds: []
    win_extra_env: []
    cluster_cidr: 10.42.0.0/16
    service_cluster_ip_range: 10.43.0.0/16
  scheduler:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    win_extra_args: {}
    win_extra_binds: []
    win_extra_env: []
  kubelet:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    win_extra_args: {}
    win_extra_binds: []
    win_extra_env: []
    cluster_domain: cluster.local
    infra_container_image: ""
    cluster_dns_server: 10.43.0.10
    fail_swap_on: false
    generate_serving_certificate: false
  kubeproxy:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    win_extra_args: {}
    win_extra_binds: []
    win_extra_env: []
network:
  plugin: flannel
  options: {}
  mtu: 0
  node_selector: {}
  update_strategy: null
  tolerations: []
authentication:
  strategy: x509
  sans: []
  webhook: null
addons: ""
addons_include: []
system_images:
  etcd: rancher/coreos-etcd:v3.4.14-rancher1
  alpine: rancher/rke-tools:v0.1.72
  nginx_proxy: rancher/rke-tools:v0.1.72
  cert_downloader: rancher/rke-tools:v0.1.72
  kubernetes_services_sidecar: rancher/rke-tools:v0.1.72
  kubedns: rancher/k8s-dns-kube-dns:1.15.10
  dnsmasq: rancher/k8s-dns-dnsmasq-nanny:1.15.10
  kubedns_sidecar: rancher/k8s-dns-sidecar:1.15.10
  kubedns_autoscaler: rancher/cluster-proportional-autoscaler:1.8.1
  coredns: rancher/coredns-coredns:1.8.0
  coredns_autoscaler: rancher/cluster-proportional-autoscaler:1.8.1
  nodelocal: rancher/k8s-dns-node-cache:1.15.13
  kubernetes: rancher/hyperkube:v1.19.9-rancher1
  flannel: rancher/coreos-flannel:v0.13.0-rancher1
  flannel_cni: rancher/flannel-cni:v0.3.0-rancher6
  calico_node: rancher/calico-node:v3.17.2
  calico_cni: rancher/calico-cni:v3.17.2
  calico_controllers: rancher/calico-kube-controllers:v3.17.2
  calico_ctl: rancher/calico-ctl:v3.17.2
  calico_flexvol: rancher/calico-pod2daemon-flexvol:v3.17.2
  canal_node: rancher/calico-node:v3.17.2
  canal_cni: rancher/calico-cni:v3.17.2
  canal_controllers: rancher/calico-kube-controllers:v3.17.2
  canal_flannel: rancher/coreos-flannel:v0.13.0-rancher1
  canal_flexvol: rancher/calico-pod2daemon-flexvol:v3.17.2
  weave_node: weaveworks/weave-kube:2.8.1
  weave_cni: weaveworks/weave-npc:2.8.1
  pod_infra_container: rancher/pause:3.2
  ingress: rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
  ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
  metrics_server: rancher/metrics-server:v0.4.1
  windows_pod_infra_container: rancher/kubelet-pause:v0.1.6
  aci_cni_deploy_container: noiro/cnideploy:5.1.1.0.1ae238a
  aci_host_container: noiro/aci-containers-host:5.1.1.0.1ae238a
  aci_opflex_container: noiro/opflex:5.1.1.0.1ae238a
  aci_mcast_container: noiro/opflex:5.1.1.0.1ae238a
  aci_ovs_container: noiro/openvswitch:5.1.1.0.1ae238a
  aci_controller_container: noiro/aci-containers-controller:5.1.1.0.1ae238a
  aci_gbp_server_container: noiro/gbp-server:5.1.1.0.1ae238a
  aci_opflex_server_container: noiro/opflex-server:5.1.1.0.1ae238a
ssh_key_path: ~/.ssh/id_rsa
ssh_cert_path: ""
ssh_agent_auth: false
authorization:
  mode: rbac
  options: {}
ignore_docker_version: null
kubernetes_version: ""
private_registries: []
ingress:
  provider: ""
  options: {}
  node_selector: {}
  extra_args: {}
  dns_policy: ""
  extra_envs: []
  extra_volumes: []
  extra_volume_mounts: []
  update_strategy: null
  http_port: 0
  https_port: 0
  network_mode: ""
  tolerations: []
  default_backend: null
  default_http_backend_priority_class_name: ""
  nginx_ingress_controller_priority_class_name: ""
cluster_name: ""
cloud_provider:
  name: ""
prefix_path: ""
win_prefix_path: ""
addon_job_timeout: 0
bastion_host:
  address: ""
  port: ""
  user: ""
  ssh_key: ""
  ssh_key_path: ""
  ssh_cert: ""
  ssh_cert_path: ""
monitoring:
  provider: ""
  options: {}
  node_selector: {}
  update_strategy: null
  replicas: null
  tolerations: []
  metrics_server_priority_class_name: ""
restore:
  restore: false
  snapshot_name: ""
rotate_encryption_key: false
dns: null
[root@localhost ~]#

下载安装最新版本或指定版本(二进制Kubernetes组件)

https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/
https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md

查看Kubectl版本信息及节点信息

[root@localhost ~]# kubectl version --client
Client Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.9", GitCommit:"9dd794e454ac32d97cde41ae10be801ae98f75df", GitTreeState:"clean", BuildDate:"2021-03-18T01:09:28Z", GoVersion:"go1.15.8", Compiler:"gc", Platform:"linux/amd64"}
[root@localhost ~]# kubectl --kubeconfig kube_config_cluster.yml get nodes -o wide
NAME               STATUS   ROLES               AGE   VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE         KERNEL-VERSION          CONTAINER-RUNTIME
k8s-cluster01-01   Ready    controlplane,etcd   33m   v1.19.9   192.168.3.201   <none>        CentOS Linux 8   4.18.0-240.el8.x86_64   docker://19.3.15
k8s-cluster01-02   Ready    worker              33m   v1.19.9   192.168.3.202   <none>        CentOS Linux 8   4.18.0-240.el8.x86_64   docker://19.3.15
k8s-cluster01-03   Ready    worker              32m   v1.19.9   192.168.3.203   <none>        CentOS Linux 8   4.18.0-240.el8.x86_64   docker://19.3.15
k8s-cluster01-04   Ready    worker              33m   v1.19.9   192.168.3.204   <none>        CentOS Linux 8   4.18.0-240.el8.x86_64   docker://19.3.15
[root@localhost ~]#

 

6 月 122020
 

https://rancher.com/docs/rke/latest/en/installation/
https://rancher.com/docs/rke/latest/en/example-yamls/
https://kubernetes.io/docs/tasks/tools/install-kubectl/
https://rancher.com/docs/rke/latest/en/kubeconfig/

节点主机名及IP信息

167.172.114.10 10.138.218.141 rancher-01
159.65.106.35 10.138.218.144 rancher-02
159.65.102.101 10.138.218.146 rancher-03

节点基础环境配置

sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config;
echo "167.172.114.10 rancher-01">>/etc/hosts;
echo "159.65.106.35 rancher-02">>/etc/hosts;
echo "159.65.102.101 rancher-03">>/etc/hosts;
init 6

节点Docker运行环境配置

curl https://releases.rancher.com/install-docker/18.09.sh | sh;
useradd rancher;
usermod -aG docker rancher
echo "rancherpwd" | passwd --stdin rancher

为节点生成并配置密钥对

生成密钥对

[root@rancher-01 ~]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:sfL3YnyrNZsioS3ThuOTRME7AIyLxm4Yq396LAaeQOY root@rancher-01
The key's randomart image is:
+---[RSA 2048]----+
| o.. .           |
|. . . o          |
|o.   . o.        |
|+=    +  o       |
|Bo   ...S        |
|=E    .o.        |
|=... . *.o. o    |
|.oo + O =.=o.+   |
| oo= ..* o.==.   |
+----[SHA256]-----+
[root@rancher-01 ~]#

分发密钥对

[root@rancher-01 ~]# ssh-copy-id -i .ssh/id_rsa.pub rancher@rancher-01
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: ".ssh/id_rsa.pub"
The authenticity of host 'rancher-01 (::1)' can't be established.
ECDSA key fingerprint is SHA256:NTaQJddPf6G3saQd2d6iQnF+Txp6YpkwhyiNuSImgNg.
ECDSA key fingerprint is MD5:ee:13:1b:70:95:ab:28:30:20:38:64:69:44:bd:1a:4a.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
rancher@rancher-01's password:

Number of key(s) added: 1

Now try logging into the machine, with: "ssh 'rancher@rancher-01'"
and check to make sure that only the key(s) you wanted were added.

[root@rancher-01 ~]# ssh-copy-id -i .ssh/id_rsa.pub rancher@rancher-02
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: ".ssh/id_rsa.pub"
The authenticity of host 'rancher-02 (159.65.106.35)' can't be established.
ECDSA key fingerprint is SHA256:bZ2ZGx9IIzSGC2fkMEtWULbau8RcAeOOCwh+4QOMU2g.
ECDSA key fingerprint is MD5:48:d9:55:3c:9e:91:8a:47:c1:1a:3e:77:c7:f2:21:a7.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
rancher@rancher-02's password:

Number of key(s) added: 1

Now try logging into the machine, with: "ssh 'rancher@rancher-02'"
and check to make sure that only the key(s) you wanted were added.

[root@rancher-01 ~]# ssh-copy-id -i .ssh/id_rsa.pub rancher@rancher-03
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: ".ssh/id_rsa.pub"
The authenticity of host 'rancher-03 (159.65.102.101)' can't be established.
ECDSA key fingerprint is SHA256:74nZvSQC34O7LrXlRzu/k0MsQzFcucn/n6c8X9CREwM.
ECDSA key fingerprint is MD5:37:2c:97:0e:d2:8e:4b:f5:7e:c5:b2:34:b5:f2:86:60.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
rancher@rancher-03's password:

Number of key(s) added: 1

Now try logging into the machine, with: "ssh 'rancher@rancher-03'"
and check to make sure that only the key(s) you wanted were added.

[root@rancher-01 ~]#

下载安装RKE(Rancher Kubernetes Engine)

[root@rancher-01 ~]# yum -y install wget
[root@rancher-01 ~]# wget https://github.com/rancher/rke/releases/download/v1.1.2/rke_linux-amd64
[root@rancher-01 ~]# ls
anaconda-ks.cfg original-ks.cfg rke_linux-arm64
[root@rancher-01 ~]# mv rke_linux-amd64 /usr/bin/rke
[root@rancher-01 ~]# chmod +x /usr/bin/rke

查看RKE版本信息

[root@rancher-01 ~]# rke --version
rke version v1.1.2
[root@rancher-01 ~]#

生成RKE集权配置文件(OpenSSH Server版本6.7及以上,禁止使用root用户,需指定docker socket路径/var/run/docker.sock)

[root@rancher-01 ~]# rke config --name cluster.yml
[+] Cluster Level SSH Private Key Path [~/.ssh/id_rsa]:
[+] Number of Hosts [1]: 3
[+] SSH Address of host (1) [none]: 167.172.114.10
[+] SSH Port of host (1) [22]:
[+] SSH Private Key Path of host (167.172.114.10) [none]:
[-] You have entered empty SSH key path, trying fetch from SSH key parameter
[+] SSH Private Key of host (167.172.114.10) [none]: ^C
[root@rancher-01 ~]# rke config --name cluster.yml
[+] Cluster Level SSH Private Key Path [~/.ssh/id_rsa]:
[+] Number of Hosts [1]: 3
[+] SSH Address of host (1) [none]: 167.172.114.10
[+] SSH Port of host (1) [22]:
[+] SSH Private Key Path of host (167.172.114.10) [none]: ~/.ssh/id_rsa
[+] SSH User of host (167.172.114.10) [ubuntu]: rancher
[+] Is host (167.172.114.10) a Control Plane host (y/n)? [y]:
[+] Is host (167.172.114.10) a Worker host (y/n)? [n]:
[+] Is host (167.172.114.10) an etcd host (y/n)? [n]: y
[+] Override Hostname of host (167.172.114.10) [none]: rancher-01
[+] Internal IP of host (167.172.114.10) [none]: 10.138.218.141
[+] Docker socket path on host (167.172.114.10) [/var/run/docker.sock]:
[+] SSH Address of host (2) [none]: 159.65.106.35
[+] SSH Port of host (2) [22]:
[+] SSH Private Key Path of host (159.65.106.35) [none]: ~/.ssh/id_rsa
[+] SSH User of host (159.65.106.35) [ubuntu]: rancher
[+] Is host (159.65.106.35) a Control Plane host (y/n)? [y]: n
[+] Is host (159.65.106.35) a Worker host (y/n)? [n]: y
[+] Is host (159.65.106.35) an etcd host (y/n)? [n]:
[+] Override Hostname of host (159.65.106.35) [none]: rancher-02
[+] Internal IP of host (159.65.106.35) [none]: 10.138.218.144
[+] Docker socket path on host (159.65.106.35) [/var/run/docker.sock]:
[+] SSH Address of host (3) [none]: 159.65.102.101
[+] SSH Port of host (3) [22]:
[+] SSH Private Key Path of host (159.65.102.101) [none]: ~/.ssh/id_rsa
[+] SSH User of host (159.65.102.101) [ubuntu]: rancher
[+] Is host (159.65.102.101) a Control Plane host (y/n)? [y]: n
[+] Is host (159.65.102.101) a Worker host (y/n)? [n]: y
[+] Is host (159.65.102.101) an etcd host (y/n)? [n]:
[+] Override Hostname of host (159.65.102.101) [none]: rancher-03
[+] Internal IP of host (159.65.102.101) [none]: 10.138.218.146
[+] Docker socket path on host (159.65.102.101) [/var/run/docker.sock]:
[+] Network Plugin Type (flannel, calico, weave, canal) [canal]:
[+] Authentication Strategy [x509]:
[+] Authorization Mode (rbac, none) [rbac]:
[+] Kubernetes Docker image [rancher/hyperkube:v1.17.6-rancher2]:
[+] Cluster domain [cluster.local]:
[+] Service Cluster IP Range [10.43.0.0/16]:
[+] Enable PodSecurityPolicy [n]:
[+] Cluster Network CIDR [10.42.0.0/16]:
[+] Cluster DNS Service IP [10.43.0.10]:
[+] Add addon manifest URLs or YAML files [no]:
[root@rancher-01 ~]#

查看RKE集群配置文件

[root@rancher-01 ~]# cat cluster.yml
# If you intened to deploy Kubernetes in an air-gapped environment,
# please consult the documentation on how to configure custom RKE images.
nodes:
- address: 167.172.114.10
  port: "22"
  internal_address: 10.138.218.141
  role:
  - controlplane
  - etcd
  hostname_override: rancher-01
  user: rancher
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/.ssh/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
  taints: []
- address: 159.65.106.35
  port: "22"
  internal_address: 10.138.218.144
  role:
  - worker
  hostname_override: rancher-02
  user: rancher
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/.ssh/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
  taints: []
- address: 159.65.102.101
  port: "22"
  internal_address: 10.138.218.146
  role:
  - worker
  hostname_override: rancher-03
  user: rancher
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/.ssh/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
  taints: []
services:
  etcd:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    external_urls: []
    ca_cert: ""
    cert: ""
    key: ""
    path: ""
    uid: 0
    gid: 0
    snapshot: null
    retention: ""
    creation: ""
    backup_config: null
  kube-api:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    service_cluster_ip_range: 10.43.0.0/16
    service_node_port_range: ""
    pod_security_policy: false
    always_pull_images: false
    secrets_encryption_config: null
    audit_log: null
    admission_configuration: null
    event_rate_limit: null
  kube-controller:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    cluster_cidr: 10.42.0.0/16
    service_cluster_ip_range: 10.43.0.0/16
  scheduler:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
  kubelet:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
    cluster_domain: cluster.local
    infra_container_image: ""
    cluster_dns_server: 10.43.0.10
    fail_swap_on: false
    generate_serving_certificate: false
  kubeproxy:
    image: ""
    extra_args: {}
    extra_binds: []
    extra_env: []
network:
  plugin: canal
  options: {}
  mtu: 0
  node_selector: {}
  update_strategy: null
authentication:
  strategy: x509
  sans: []
  webhook: null
addons: ""
addons_include: []
system_images:
  etcd: rancher/coreos-etcd:v3.4.3-rancher1
  alpine: rancher/rke-tools:v0.1.56
  nginx_proxy: rancher/rke-tools:v0.1.56
  cert_downloader: rancher/rke-tools:v0.1.56
  kubernetes_services_sidecar: rancher/rke-tools:v0.1.56
  kubedns: rancher/k8s-dns-kube-dns:1.15.0
  dnsmasq: rancher/k8s-dns-dnsmasq-nanny:1.15.0
  kubedns_sidecar: rancher/k8s-dns-sidecar:1.15.0
  kubedns_autoscaler: rancher/cluster-proportional-autoscaler:1.7.1
  coredns: rancher/coredns-coredns:1.6.5
  coredns_autoscaler: rancher/cluster-proportional-autoscaler:1.7.1
  nodelocal: rancher/k8s-dns-node-cache:1.15.7
  kubernetes: rancher/hyperkube:v1.17.6-rancher2
  flannel: rancher/coreos-flannel:v0.11.0-rancher1
  flannel_cni: rancher/flannel-cni:v0.3.0-rancher6
  calico_node: rancher/calico-node:v3.13.4
  calico_cni: rancher/calico-cni:v3.13.4
  calico_controllers: rancher/calico-kube-controllers:v3.13.4
  calico_ctl: rancher/calico-ctl:v3.13.4
  calico_flexvol: rancher/calico-pod2daemon-flexvol:v3.13.4
  canal_node: rancher/calico-node:v3.13.4
  canal_cni: rancher/calico-cni:v3.13.4
  canal_flannel: rancher/coreos-flannel:v0.11.0
  canal_flexvol: rancher/calico-pod2daemon-flexvol:v3.13.4
  weave_node: weaveworks/weave-kube:2.6.4
  weave_cni: weaveworks/weave-npc:2.6.4
  pod_infra_container: rancher/pause:3.1
  ingress: rancher/nginx-ingress-controller:nginx-0.32.0-rancher1
  ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
  metrics_server: rancher/metrics-server:v0.3.6
  windows_pod_infra_container: rancher/kubelet-pause:v0.1.3
ssh_key_path: ~/.ssh/id_rsa
ssh_cert_path: ""
ssh_agent_auth: false
authorization:
  mode: rbac
  options: {}
ignore_docker_version: false
kubernetes_version: ""
private_registries: []
ingress:
  provider: ""
  options: {}
  node_selector: {}
  extra_args: {}
  dns_policy: ""
  extra_envs: []
  extra_volumes: []
  extra_volume_mounts: []
  update_strategy: null
cluster_name: ""
cloud_provider:
  name: ""
prefix_path: ""
addon_job_timeout: 0
bastion_host:
  address: ""
  port: ""
  user: ""
  ssh_key: ""
  ssh_key_path: ""
  ssh_cert: ""
  ssh_cert_path: ""
monitoring:
  provider: ""
  options: {}
  node_selector: {}
  update_strategy: null
  replicas: null
restore:
  restore: false
  snapshot_name: ""
dns: null
[root@rancher-01 ~]#

执行集群部署

[root@rancher-01 ~]# rke up --config cluster.yml
INFO[0000] Running RKE version: v1.1.2
INFO[0000] Initiating Kubernetes cluster
INFO[0000] [dialer] Setup tunnel for host [159.65.102.101]
INFO[0000] [dialer] Setup tunnel for host [159.65.106.35]
INFO[0000] [dialer] Setup tunnel for host [167.172.114.10]
INFO[0000] Checking if container [cluster-state-deployer] is running on host [167.172.114.10], try #1
INFO[0000] Pulling image [rancher/rke-tools:v0.1.56] on host [167.172.114.10], try #1
INFO[0005] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0005] Starting container [cluster-state-deployer] on host [167.172.114.10], try #1
INFO[0005] [state] Successfully started [cluster-state-deployer] container on host [167.172.114.10]
INFO[0006] Checking if container [cluster-state-deployer] is running on host [159.65.106.35], try #1
INFO[0006] Pulling image [rancher/rke-tools:v0.1.56] on host [159.65.106.35], try #1
INFO[0012] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.106.35]
INFO[0012] Starting container [cluster-state-deployer] on host [159.65.106.35], try #1
INFO[0012] [state] Successfully started [cluster-state-deployer] container on host [159.65.106.35]
INFO[0012] Checking if container [cluster-state-deployer] is running on host [159.65.102.101], try #1
INFO[0012] Pulling image [rancher/rke-tools:v0.1.56] on host [159.65.102.101], try #1
INFO[0020] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.102.101]
INFO[0020] Starting container [cluster-state-deployer] on host [159.65.102.101], try #1
INFO[0021] [state] Successfully started [cluster-state-deployer] container on host [159.65.102.101]
INFO[0021] [certificates] Generating CA kubernetes certificates
INFO[0021] [certificates] Generating Kubernetes API server aggregation layer requestheader client CA certificates
INFO[0021] [certificates] GenerateServingCertificate is disabled, checking if there are unused kubelet certificates
INFO[0021] [certificates] Generating Kubernetes API server certificates
INFO[0022] [certificates] Generating Service account token key
INFO[0022] [certificates] Generating Kube Controller certificates
INFO[0022] [certificates] Generating Kube Scheduler certificates
INFO[0022] [certificates] Generating Kube Proxy certificates
INFO[0022] [certificates] Generating Node certificate
INFO[0022] [certificates] Generating admin certificates and kubeconfig
INFO[0022] [certificates] Generating Kubernetes API server proxy client certificates
INFO[0023] [certificates] Generating kube-etcd-10-138-218-141 certificate and key
INFO[0023] Successfully Deployed state file at [./cluster.rkestate]
INFO[0023] Building Kubernetes cluster
INFO[0023] [dialer] Setup tunnel for host [159.65.102.101]
INFO[0023] [dialer] Setup tunnel for host [167.172.114.10]
INFO[0023] [dialer] Setup tunnel for host [159.65.106.35]
INFO[0023] [network] Deploying port listener containers
INFO[0023] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0023] Starting container [rke-etcd-port-listener] on host [167.172.114.10], try #1
INFO[0024] [network] Successfully started [rke-etcd-port-listener] container on host [167.172.114.10]
INFO[0024] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0024] Starting container [rke-cp-port-listener] on host [167.172.114.10], try #1
INFO[0024] [network] Successfully started [rke-cp-port-listener] container on host [167.172.114.10]
INFO[0024] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.106.35]
INFO[0024] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.102.101]
INFO[0024] Starting container [rke-worker-port-listener] on host [159.65.102.101], try #1
INFO[0024] Starting container [rke-worker-port-listener] on host [159.65.106.35], try #1
INFO[0024] [network] Successfully started [rke-worker-port-listener] container on host [159.65.102.101]
INFO[0024] [network] Successfully started [rke-worker-port-listener] container on host [159.65.106.35]
INFO[0024] [network] Port listener containers deployed successfully
INFO[0024] [network] Running control plane -> etcd port checks
INFO[0024] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0024] Starting container [rke-port-checker] on host [167.172.114.10], try #1
INFO[0025] [network] Successfully started [rke-port-checker] container on host [167.172.114.10]
INFO[0025] Removing container [rke-port-checker] on host [167.172.114.10], try #1
INFO[0025] [network] Running control plane -> worker port checks
INFO[0025] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0025] Starting container [rke-port-checker] on host [167.172.114.10], try #1
INFO[0025] [network] Successfully started [rke-port-checker] container on host [167.172.114.10]
INFO[0025] Removing container [rke-port-checker] on host [167.172.114.10], try #1
INFO[0025] [network] Running workers -> control plane port checks
INFO[0025] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.106.35]
INFO[0025] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.102.101]
INFO[0025] Starting container [rke-port-checker] on host [159.65.106.35], try #1
INFO[0025] Starting container [rke-port-checker] on host [159.65.102.101], try #1
INFO[0025] [network] Successfully started [rke-port-checker] container on host [159.65.106.35]
INFO[0025] Removing container [rke-port-checker] on host [159.65.106.35], try #1
INFO[0026] [network] Successfully started [rke-port-checker] container on host [159.65.102.101]
INFO[0026] Removing container [rke-port-checker] on host [159.65.102.101], try #1
INFO[0026] [network] Checking KubeAPI port Control Plane hosts
INFO[0026] [network] Removing port listener containers
INFO[0026] Removing container [rke-etcd-port-listener] on host [167.172.114.10], try #1
INFO[0026] [remove/rke-etcd-port-listener] Successfully removed container on host [167.172.114.10]
INFO[0026] Removing container [rke-cp-port-listener] on host [167.172.114.10], try #1
INFO[0026] [remove/rke-cp-port-listener] Successfully removed container on host [167.172.114.10]
INFO[0026] Removing container [rke-worker-port-listener] on host [159.65.106.35], try #1
INFO[0026] Removing container [rke-worker-port-listener] on host [159.65.102.101], try #1
INFO[0026] [remove/rke-worker-port-listener] Successfully removed container on host [159.65.102.101]
INFO[0026] [remove/rke-worker-port-listener] Successfully removed container on host [159.65.106.35]
INFO[0026] [network] Port listener containers removed successfully
INFO[0026] [certificates] Deploying kubernetes certificates to Cluster nodes
INFO[0026] Checking if container [cert-deployer] is running on host [159.65.106.35], try #1
INFO[0026] Checking if container [cert-deployer] is running on host [159.65.102.101], try #1
INFO[0026] Checking if container [cert-deployer] is running on host [167.172.114.10], try #1
INFO[0026] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.106.35]
INFO[0026] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.102.101]
INFO[0026] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0026] Starting container [cert-deployer] on host [167.172.114.10], try #1
INFO[0026] Starting container [cert-deployer] on host [159.65.106.35], try #1
INFO[0026] Starting container [cert-deployer] on host [159.65.102.101], try #1
INFO[0027] Checking if container [cert-deployer] is running on host [167.172.114.10], try #1
INFO[0027] Checking if container [cert-deployer] is running on host [159.65.106.35], try #1
INFO[0027] Checking if container [cert-deployer] is running on host [159.65.102.101], try #1
INFO[0032] Checking if container [cert-deployer] is running on host [167.172.114.10], try #1
INFO[0032] Removing container [cert-deployer] on host [167.172.114.10], try #1
INFO[0032] Checking if container [cert-deployer] is running on host [159.65.106.35], try #1
INFO[0032] Removing container [cert-deployer] on host [159.65.106.35], try #1
INFO[0032] Checking if container [cert-deployer] is running on host [159.65.102.101], try #1
INFO[0032] Removing container [cert-deployer] on host [159.65.102.101], try #1
INFO[0032] [reconcile] Rebuilding and updating local kube config
INFO[0032] Successfully Deployed local admin kubeconfig at [./kube_config_cluster.yml]
INFO[0032] [certificates] Successfully deployed kubernetes certificates to Cluster nodes
INFO[0032] [file-deploy] Deploying file [/etc/kubernetes/audit-policy.yaml] to node [167.172.114.10]
INFO[0032] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0032] Starting container [file-deployer] on host [167.172.114.10], try #1
INFO[0032] Successfully started [file-deployer] container on host [167.172.114.10]
INFO[0032] Waiting for [file-deployer] container to exit on host [167.172.114.10]
INFO[0032] Waiting for [file-deployer] container to exit on host [167.172.114.10]
INFO[0032] Container [file-deployer] is still running on host [167.172.114.10]
INFO[0033] Waiting for [file-deployer] container to exit on host [167.172.114.10]
INFO[0033] Removing container [file-deployer] on host [167.172.114.10], try #1
INFO[0033] [remove/file-deployer] Successfully removed container on host [167.172.114.10]
INFO[0033] [/etc/kubernetes/audit-policy.yaml] Successfully deployed audit policy file to Cluster control nodes
INFO[0033] [reconcile] Reconciling cluster state
INFO[0033] [reconcile] This is newly generated cluster
INFO[0033] Pre-pulling kubernetes images
INFO[0033] Pulling image [rancher/hyperkube:v1.17.6-rancher2] on host [167.172.114.10], try #1
INFO[0033] Pulling image [rancher/hyperkube:v1.17.6-rancher2] on host [159.65.102.101], try #1
INFO[0033] Pulling image [rancher/hyperkube:v1.17.6-rancher2] on host [159.65.106.35], try #1
INFO[0065] Image [rancher/hyperkube:v1.17.6-rancher2] exists on host [167.172.114.10]
INFO[0071] Image [rancher/hyperkube:v1.17.6-rancher2] exists on host [159.65.106.35]
INFO[0080] Image [rancher/hyperkube:v1.17.6-rancher2] exists on host [159.65.102.101]
INFO[0080] Kubernetes images pulled successfully
INFO[0080] [etcd] Building up etcd plane..
INFO[0080] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0080] Starting container [etcd-fix-perm] on host [167.172.114.10], try #1
INFO[0081] Successfully started [etcd-fix-perm] container on host [167.172.114.10]
INFO[0081] Waiting for [etcd-fix-perm] container to exit on host [167.172.114.10]
INFO[0081] Waiting for [etcd-fix-perm] container to exit on host [167.172.114.10]
INFO[0081] Container [etcd-fix-perm] is still running on host [167.172.114.10]
INFO[0082] Waiting for [etcd-fix-perm] container to exit on host [167.172.114.10]
INFO[0082] Removing container [etcd-fix-perm] on host [167.172.114.10], try #1
INFO[0082] [remove/etcd-fix-perm] Successfully removed container on host [167.172.114.10]
INFO[0082] Pulling image [rancher/coreos-etcd:v3.4.3-rancher1] on host [167.172.114.10], try #1
INFO[0085] Image [rancher/coreos-etcd:v3.4.3-rancher1] exists on host [167.172.114.10]
INFO[0085] Starting container [etcd] on host [167.172.114.10], try #1
INFO[0086] [etcd] Successfully started [etcd] container on host [167.172.114.10]
INFO[0086] [etcd] Running rolling snapshot container [etcd-snapshot-once] on host [167.172.114.10]
INFO[0086] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0086] Starting container [etcd-rolling-snapshots] on host [167.172.114.10], try #1
INFO[0086] [etcd] Successfully started [etcd-rolling-snapshots] container on host [167.172.114.10]
INFO[0091] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0091] Starting container [rke-bundle-cert] on host [167.172.114.10], try #1
INFO[0091] [certificates] Successfully started [rke-bundle-cert] container on host [167.172.114.10]
INFO[0091] Waiting for [rke-bundle-cert] container to exit on host [167.172.114.10]
INFO[0091] Container [rke-bundle-cert] is still running on host [167.172.114.10]
INFO[0092] Waiting for [rke-bundle-cert] container to exit on host [167.172.114.10]
INFO[0092] [certificates] successfully saved certificate bundle [/opt/rke/etcd-snapshots//pki.bundle.tar.gz] on host [167.172.114.10]
INFO[0092] Removing container [rke-bundle-cert] on host [167.172.114.10], try #1
INFO[0092] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0092] Starting container [rke-log-linker] on host [167.172.114.10], try #1
INFO[0093] [etcd] Successfully started [rke-log-linker] container on host [167.172.114.10]
INFO[0093] Removing container [rke-log-linker] on host [167.172.114.10], try #1
INFO[0093] [remove/rke-log-linker] Successfully removed container on host [167.172.114.10]
INFO[0093] [etcd] Successfully started etcd plane.. Checking etcd cluster health
INFO[0093] [controlplane] Building up Controller Plane..
INFO[0093] Checking if container [service-sidekick] is running on host [167.172.114.10], try #1
INFO[0093] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0093] Image [rancher/hyperkube:v1.17.6-rancher2] exists on host [167.172.114.10]
INFO[0093] Starting container [kube-apiserver] on host [167.172.114.10], try #1
INFO[0093] [controlplane] Successfully started [kube-apiserver] container on host [167.172.114.10]
INFO[0093] [healthcheck] Start Healthcheck on service [kube-apiserver] on host [167.172.114.10]
INFO[0098] [healthcheck] service [kube-apiserver] on host [167.172.114.10] is healthy
INFO[0098] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0098] Starting container [rke-log-linker] on host [167.172.114.10], try #1
INFO[0099] [controlplane] Successfully started [rke-log-linker] container on host [167.172.114.10]
INFO[0099] Removing container [rke-log-linker] on host [167.172.114.10], try #1
INFO[0099] [remove/rke-log-linker] Successfully removed container on host [167.172.114.10]
INFO[0099] Image [rancher/hyperkube:v1.17.6-rancher2] exists on host [167.172.114.10]
INFO[0099] Starting container [kube-controller-manager] on host [167.172.114.10], try #1
INFO[0099] [controlplane] Successfully started [kube-controller-manager] container on host [167.172.114.10]
INFO[0099] [healthcheck] Start Healthcheck on service [kube-controller-manager] on host [167.172.114.10]
INFO[0104] [healthcheck] service [kube-controller-manager] on host [167.172.114.10] is healthy
INFO[0104] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0104] Starting container [rke-log-linker] on host [167.172.114.10], try #1
INFO[0104] [controlplane] Successfully started [rke-log-linker] container on host [167.172.114.10]
INFO[0104] Removing container [rke-log-linker] on host [167.172.114.10], try #1
INFO[0105] [remove/rke-log-linker] Successfully removed container on host [167.172.114.10]
INFO[0105] Image [rancher/hyperkube:v1.17.6-rancher2] exists on host [167.172.114.10]
INFO[0105] Starting container [kube-scheduler] on host [167.172.114.10], try #1
INFO[0105] [controlplane] Successfully started [kube-scheduler] container on host [167.172.114.10]
INFO[0105] [healthcheck] Start Healthcheck on service [kube-scheduler] on host [167.172.114.10]
INFO[0110] [healthcheck] service [kube-scheduler] on host [167.172.114.10] is healthy
INFO[0110] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0110] Starting container [rke-log-linker] on host [167.172.114.10], try #1
INFO[0110] [controlplane] Successfully started [rke-log-linker] container on host [167.172.114.10]
INFO[0110] Removing container [rke-log-linker] on host [167.172.114.10], try #1
INFO[0110] [remove/rke-log-linker] Successfully removed container on host [167.172.114.10]
INFO[0110] [controlplane] Successfully started Controller Plane..
INFO[0110] [authz] Creating rke-job-deployer ServiceAccount
INFO[0110] [authz] rke-job-deployer ServiceAccount created successfully
INFO[0110] [authz] Creating system:node ClusterRoleBinding
INFO[0110] [authz] system:node ClusterRoleBinding created successfully
INFO[0110] [authz] Creating kube-apiserver proxy ClusterRole and ClusterRoleBinding
INFO[0110] [authz] kube-apiserver proxy ClusterRole and ClusterRoleBinding created successfully
INFO[0110] Successfully Deployed state file at [./cluster.rkestate]
INFO[0110] [state] Saving full cluster state to Kubernetes
INFO[0111] [state] Successfully Saved full cluster state to Kubernetes ConfigMap: full-cluster-state
INFO[0111] [worker] Building up Worker Plane..
INFO[0111] Checking if container [service-sidekick] is running on host [167.172.114.10], try #1
INFO[0111] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.102.101]
INFO[0111] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.106.35]
INFO[0111] [sidekick] Sidekick container already created on host [167.172.114.10]
INFO[0111] Image [rancher/hyperkube:v1.17.6-rancher2] exists on host [167.172.114.10]
INFO[0111] Starting container [kubelet] on host [167.172.114.10], try #1
INFO[0111] Starting container [nginx-proxy] on host [159.65.106.35], try #1
INFO[0111] Starting container [nginx-proxy] on host [159.65.102.101], try #1
INFO[0111] [worker] Successfully started [kubelet] container on host [167.172.114.10]
INFO[0111] [healthcheck] Start Healthcheck on service [kubelet] on host [167.172.114.10]
INFO[0111] [worker] Successfully started [nginx-proxy] container on host [159.65.106.35]
INFO[0111] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.106.35]
INFO[0111] [worker] Successfully started [nginx-proxy] container on host [159.65.102.101]
INFO[0111] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.102.101]
INFO[0111] Starting container [rke-log-linker] on host [159.65.106.35], try #1
INFO[0111] Starting container [rke-log-linker] on host [159.65.102.101], try #1
INFO[0111] [worker] Successfully started [rke-log-linker] container on host [159.65.106.35]
INFO[0111] Removing container [rke-log-linker] on host [159.65.106.35], try #1
INFO[0111] [worker] Successfully started [rke-log-linker] container on host [159.65.102.101]
INFO[0111] Removing container [rke-log-linker] on host [159.65.102.101], try #1
INFO[0111] [remove/rke-log-linker] Successfully removed container on host [159.65.106.35]
INFO[0111] Checking if container [service-sidekick] is running on host [159.65.106.35], try #1
INFO[0111] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.106.35]
INFO[0111] Image [rancher/hyperkube:v1.17.6-rancher2] exists on host [159.65.106.35]
INFO[0112] [remove/rke-log-linker] Successfully removed container on host [159.65.102.101]
INFO[0112] Checking if container [service-sidekick] is running on host [159.65.102.101], try #1
INFO[0112] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.102.101]
INFO[0112] Starting container [kubelet] on host [159.65.106.35], try #1
INFO[0112] Image [rancher/hyperkube:v1.17.6-rancher2] exists on host [159.65.102.101]
INFO[0112] Starting container [kubelet] on host [159.65.102.101], try #1
INFO[0112] [worker] Successfully started [kubelet] container on host [159.65.106.35]
INFO[0112] [healthcheck] Start Healthcheck on service [kubelet] on host [159.65.106.35]
INFO[0112] [worker] Successfully started [kubelet] container on host [159.65.102.101]
INFO[0112] [healthcheck] Start Healthcheck on service [kubelet] on host [159.65.102.101]
INFO[0116] [healthcheck] service [kubelet] on host [167.172.114.10] is healthy
INFO[0116] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0116] Starting container [rke-log-linker] on host [167.172.114.10], try #1
INFO[0116] [worker] Successfully started [rke-log-linker] container on host [167.172.114.10]
INFO[0116] Removing container [rke-log-linker] on host [167.172.114.10], try #1
INFO[0116] [remove/rke-log-linker] Successfully removed container on host [167.172.114.10]
INFO[0116] Image [rancher/hyperkube:v1.17.6-rancher2] exists on host [167.172.114.10]
INFO[0116] Starting container [kube-proxy] on host [167.172.114.10], try #1
INFO[0117] [worker] Successfully started [kube-proxy] container on host [167.172.114.10]
INFO[0117] [healthcheck] Start Healthcheck on service [kube-proxy] on host [167.172.114.10]
INFO[0117] [healthcheck] service [kubelet] on host [159.65.106.35] is healthy
INFO[0117] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.106.35]
INFO[0117] [healthcheck] service [kubelet] on host [159.65.102.101] is healthy
INFO[0117] Starting container [rke-log-linker] on host [159.65.106.35], try #1
INFO[0117] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.102.101]
INFO[0117] Starting container [rke-log-linker] on host [159.65.102.101], try #1
INFO[0117] [worker] Successfully started [rke-log-linker] container on host [159.65.106.35]
INFO[0117] Removing container [rke-log-linker] on host [159.65.106.35], try #1
INFO[0117] [worker] Successfully started [rke-log-linker] container on host [159.65.102.101]
INFO[0117] Removing container [rke-log-linker] on host [159.65.102.101], try #1
INFO[0118] [remove/rke-log-linker] Successfully removed container on host [159.65.106.35]
INFO[0118] Image [rancher/hyperkube:v1.17.6-rancher2] exists on host [159.65.106.35]
INFO[0118] Starting container [kube-proxy] on host [159.65.106.35], try #1
INFO[0118] [remove/rke-log-linker] Successfully removed container on host [159.65.102.101]
INFO[0118] Image [rancher/hyperkube:v1.17.6-rancher2] exists on host [159.65.102.101]
INFO[0118] Starting container [kube-proxy] on host [159.65.102.101], try #1
INFO[0118] [worker] Successfully started [kube-proxy] container on host [159.65.106.35]
INFO[0118] [healthcheck] Start Healthcheck on service [kube-proxy] on host [159.65.106.35]
INFO[0118] [worker] Successfully started [kube-proxy] container on host [159.65.102.101]
INFO[0118] [healthcheck] Start Healthcheck on service [kube-proxy] on host [159.65.102.101]
INFO[0122] [healthcheck] service [kube-proxy] on host [167.172.114.10] is healthy
INFO[0122] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0122] Starting container [rke-log-linker] on host [167.172.114.10], try #1
INFO[0122] [worker] Successfully started [rke-log-linker] container on host [167.172.114.10]
INFO[0122] Removing container [rke-log-linker] on host [167.172.114.10], try #1
INFO[0122] [remove/rke-log-linker] Successfully removed container on host [167.172.114.10]
INFO[0123] [healthcheck] service [kube-proxy] on host [159.65.106.35] is healthy
INFO[0123] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.106.35]
INFO[0123] Starting container [rke-log-linker] on host [159.65.106.35], try #1
INFO[0123] [healthcheck] service [kube-proxy] on host [159.65.102.101] is healthy
INFO[0123] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.102.101]
INFO[0123] Starting container [rke-log-linker] on host [159.65.102.101], try #1
INFO[0123] [worker] Successfully started [rke-log-linker] container on host [159.65.106.35]
INFO[0123] Removing container [rke-log-linker] on host [159.65.106.35], try #1
INFO[0124] [remove/rke-log-linker] Successfully removed container on host [159.65.106.35]
INFO[0124] [worker] Successfully started [rke-log-linker] container on host [159.65.102.101]
INFO[0124] Removing container [rke-log-linker] on host [159.65.102.101], try #1
INFO[0124] [remove/rke-log-linker] Successfully removed container on host [159.65.102.101]
INFO[0124] [worker] Successfully started Worker Plane..
INFO[0124] Image [rancher/rke-tools:v0.1.56] exists on host [167.172.114.10]
INFO[0124] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.106.35]
INFO[0124] Image [rancher/rke-tools:v0.1.56] exists on host [159.65.102.101]
INFO[0124] Starting container [rke-log-cleaner] on host [167.172.114.10], try #1
INFO[0124] Starting container [rke-log-cleaner] on host [159.65.106.35], try #1
INFO[0124] Starting container [rke-log-cleaner] on host [159.65.102.101], try #1
INFO[0124] [cleanup] Successfully started [rke-log-cleaner] container on host [167.172.114.10]
INFO[0124] Removing container [rke-log-cleaner] on host [167.172.114.10], try #1
INFO[0124] [cleanup] Successfully started [rke-log-cleaner] container on host [159.65.106.35]
INFO[0124] Removing container [rke-log-cleaner] on host [159.65.106.35], try #1
INFO[0125] [remove/rke-log-cleaner] Successfully removed container on host [167.172.114.10]
INFO[0125] [cleanup] Successfully started [rke-log-cleaner] container on host [159.65.102.101]
INFO[0125] Removing container [rke-log-cleaner] on host [159.65.102.101], try #1
INFO[0125] [remove/rke-log-cleaner] Successfully removed container on host [159.65.106.35]
INFO[0125] [remove/rke-log-cleaner] Successfully removed container on host [159.65.102.101]
INFO[0125] [sync] Syncing nodes Labels and Taints
INFO[0125] [sync] Successfully synced nodes Labels and Taints
INFO[0125] [network] Setting up network plugin: canal
INFO[0125] [addons] Saving ConfigMap for addon rke-network-plugin to Kubernetes
INFO[0125] [addons] Successfully saved ConfigMap for addon rke-network-plugin to Kubernetes
INFO[0125] [addons] Executing deploy job rke-network-plugin
INFO[0130] [addons] Setting up coredns
INFO[0130] [addons] Saving ConfigMap for addon rke-coredns-addon to Kubernetes
INFO[0130] [addons] Successfully saved ConfigMap for addon rke-coredns-addon to Kubernetes
INFO[0130] [addons] Executing deploy job rke-coredns-addon
INFO[0135] [addons] CoreDNS deployed successfully
INFO[0135] [dns] DNS provider coredns deployed successfully
INFO[0135] [addons] Setting up Metrics Server
INFO[0135] [addons] Saving ConfigMap for addon rke-metrics-addon to Kubernetes
INFO[0135] [addons] Successfully saved ConfigMap for addon rke-metrics-addon to Kubernetes
INFO[0135] [addons] Executing deploy job rke-metrics-addon
INFO[0140] [addons] Metrics Server deployed successfully
INFO[0140] [ingress] Setting up nginx ingress controller
INFO[0140] [addons] Saving ConfigMap for addon rke-ingress-controller to Kubernetes
INFO[0140] [addons] Successfully saved ConfigMap for addon rke-ingress-controller to Kubernetes
INFO[0140] [addons] Executing deploy job rke-ingress-controller
INFO[0145] [ingress] ingress controller nginx deployed successfully
INFO[0145] [addons] Setting up user addons
INFO[0145] [addons] no user addons defined
INFO[0145] Finished building Kubernetes cluster successfully
[root@rancher-01 ~]#

查看生成的kubeconfig配置文件

[root@rancher-01 ~]# cat kube_config_cluster.yml
apiVersion: v1
kind: Config
clusters:
- cluster:
    api-version: v1
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN3akNDQWFxZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFTTVJBd0RnWURWUVFERXdkcmRXSmwKTFdOaE1CNFhEVEl3TURZeE1qQTROVFF3T0ZvWERUTXdNRFl4TURBNE5UUXdPRm93RWpFUU1BNEdBMVVFQXhNSAphM1ZpWlMxallUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU5NU3FXQWRkSjVECjRxQ0NMWXRHVWY4SjdIdUIvVlpaYldLckY4M3NZSzVaaEFsK0VrRnlhWFUwWU9aZGdHQldTZDVoMDVNa0VJQmkKZEdFK1gxWDd5RVM1R0NUMGUxYTU2Z1hXMXljQUxiYzVxZUxSQmtpV2p5b09KT0tJSHJBWGxOZVpQcHA4Tm9oYgpuOG50c29BaHc2TVcySjRERWQ2L2lZcGUxMkl4QlhQVVE1R1Y3aE5SV3k3WHVoQ2NDclRDQTRrNmVlakNTdTFrCnYyRUxUMTZ1ekc5OStwUFM2MElRd25FTjFzOFV2Rms2VU1ZVzFPTnFQbXBGdE52M0hmTkZ3OGpHVWh5MXZKeEgKVm9Ed2JqUmZQUHlabERvSmFETXBnSFpxSlhTSlFvaHkxaVk3aXBuaHh6L1VHMExyMmZZZk4vZEhDeUJHOWp4NAplM1JwMGRxTWFOVUNBd0VBQWFNak1DRXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CCkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQlJUUVJVSDRSM2tPVEJLQVJZTkV4YW1VZ3BqQjNDNmVFZGgKaVdOU2lJU0FnMHlSUnlLakpIb21ibUpjRHA3UVAxUWtnamg5a3Z3TnpOU1EwQ3NaRmVMaUpJVHc3U3l0d1huaApmZmtQUnpTTXB2ZVpzZ0RVWExadlkzQjM3bFpCSDhSR0xNQTlYRVZRVUdUcDBSWTFqNE5VN2llaUJycDRveG42CnZNajQra0xocE9pZUJjQlZ6MXVLN0Vkbk5uRTZvcVFNemkzNHBNbDJHT3dWbEY1UVVYcVRNTTJoM244MU04WEcKdFpDOFY2RTlzSXZsTjFkT2tPTEpYWnIwQnI1aGdpbVNCd0xKbks3WlhRTkFjL2pzb3FCa3BEM2ZwWG1OUzFNaQpQWnJqb0t6RWlhclVwNXZsQndFY2pRUDI3TlNQOWVROGkzV2dpVE85VEd6djQ4SzdiOUU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
    server: "https://167.172.114.10:6443"
  name: "local"
contexts:
- context:
    cluster: "local"
    user: "kube-admin-local"
  name: "local"
current-context: "local"
users:
- name: "kube-admin-local"
  user:
    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2VENDQWRHZ0F3SUJBZ0lJSEN2MzNVd2FnWTB3RFFZSktvWklodmNOQVFFTEJRQXdFakVRTUE0R0ExVUUKQXhNSGEzVmlaUzFqWVRBZUZ3MHlNREEyTVRJd09EVTBNRGhhRncwek1EQTJNVEF3T0RVME1EbGFNQzR4RnpBVgpCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVJNd0VRWURWUVFERXdwcmRXSmxMV0ZrYldsdU1JSUJJakFOCkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXFMdjUvTDYxdG5ybitZV2VNMDlDWnJNeEI5NkEKSVdSZFQ5M2poNTdYaXdsb0Jtd3NsOStYLzdmZnBGTzZYcXV1QUVDWW4zZEJ2WnMvc256R1I5YUl2NXhpZ1pxRgpDZ0ZCakpsNjE0UVB3N0FGYVJDUTRyMTlxTEdEUS9EMmhhV25YQm4rZU5pNlZsRXlFNVU0cEttVUM1U2FITUdXCmRRR0h2MTZ4bmdyQVllb2gwRzRCbmErV0wyNDNybG5DNVROZ2QwOUJRV2V5Vng5SUppZ3hzcCtkTEMyM2J2MUkKS1VIM0VwV0hJNGFLK05CeWN2SzRMUU9jRUVlWEZuTnRDUmZ3ZkVNeThVbTAwQUZiZG90OGpHajhYTzhlYzlpRgplT21pbUhXZFdDa01uZHJiNDFtSWU3MEVKUGZwM0FxVmRTMkg4azd3MWxaa2NzVkNBa2psbWpYZVlRSURBUUFCCm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUhBd0l3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dFQkFKTnNFaUhta0tPVnpnSVJWOEdSRTZqL2lGQ1lWSzVIakVtR0YzTk9KcUhBNUVLZAo0SDVRZWFubTBuRUpzOFVYdithSUhNcTZ3QjRBc3c5MnJsdnV5NUxIZVNJbVN6UCtVbTdqT0hYZGdjK3d2TXI3Cmt6L1VuT3FPNlJPQ3JUZ1Rod1ZtbHYvNTRxTTZJTkI3aWI1YzNZRlRFU2lJbHdxM05KYU1rMDV6QWp6N3lPM3YKaXdDQ1U0ckJRa2l4MGVQVFlLREJYV1lNOFpUakhLby9TT2JYRFBFRTFVYWFnM2FsMU4xUXNiWUcrYlk2ZWt0VQpSdkpxV0lJNTE5Um5kVWxGMW9zaGNySVJRYlFTSll0S0E5clJhVEZ6SUpIOVR5dldJeXcrSHUrYUpBdkpJdTRnCmIvMkpBUzFHZ0orcjQwc1lqL3o1d04xMHBXWVgyS1RTMWxrVUlnYz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcUx2NS9MNjF0bnJuK1lXZU0wOUNack14Qjk2QUlXUmRUOTNqaDU3WGl3bG9CbXdzCmw5K1gvN2ZmcEZPNlhxdXVBRUNZbjNkQnZacy9zbnpHUjlhSXY1eGlnWnFGQ2dGQmpKbDYxNFFQdzdBRmFSQ1EKNHIxOXFMR0RRL0QyaGFXblhCbitlTmk2VmxFeUU1VTRwS21VQzVTYUhNR1dkUUdIdjE2eG5nckFZZW9oMEc0QgpuYStXTDI0M3JsbkM1VE5nZDA5QlFXZXlWeDlJSmlneHNwK2RMQzIzYnYxSUtVSDNFcFdISTRhSytOQnljdks0CkxRT2NFRWVYRm5OdENSZndmRU15OFVtMDBBRmJkb3Q4akdqOFhPOGVjOWlGZU9taW1IV2RXQ2tNbmRyYjQxbUkKZTcwRUpQZnAzQXFWZFMySDhrN3cxbFprY3NWQ0FramxtalhlWVFJREFRQUJBb0lCQUJyVjRwbE8zMm1KUEpHVApyYWh0WjVzYnpxVjR2cG9RODBJN2dPOVYxT1A0K0FGbGZPWWVtbmNDRUdCN0xIM1lBaEZxTkp2UUJMV2FGbFJWCndkYzFDSVNvNDRYSFJIZGw0YjN4dnZhOXV5QWRRNDhGSW5YZE96bjBHWE5aeEd0WEFEb0dyRkVkN3V6QmR4eGsKTkNFRUUxYVFLTDZBRDJUR2ZJZDBFUDJZcWlZb0syRjFwSGJ3N1FPNGhkcXdpWWRwK2xzcDZDQTd0NGpMTnpjeApkaFZHWkE4eHFpdU9MUndmTk85RXhFN1FyTmZtcGpWcHA2di93Q3FpMkdPWGdHVnp3WUtqWm1Yc2NxclltazN6CjZ5RjNXQVJLTDNsQTk0WWxnYTdHaTAzL0tDdS9CMXFoMVJKRU1wcFhva3RXNVJRdStxNU82aG92ZjNONTlOWkYKdlFmNU10MENnWUVBelljM0dMNk5OQmRGVVlpWDRHK21SM2FzNVU5QkdmcWpnSE1YMWtWZXlIZUc2WFlHT29iawpWSHdxU3pReE95VS9JcFRKeHNIR3RKZ2ZkOU1ncXd3bDloSTBpc3pUT2M5bkxlckVLZXdpdG9yejU2USthVEY5CjNGSjhBTExPOTZxVEk5WkxYV3BSRnZZL2lJMlJZRHZYemJybFZ1ZDkzRzhkUFoydUE0YkFtL2NDZ1lFQTBpdXEKdmRPSUtsTXFzOUVQcWdUZ2lZbXRzd0x1Q1djL1ZjWnpWTm5UZWcrYnBSajFxVmdWMTNyOTB6RTAyYmtCU0g5NgorWlRvWEdVbGEzY1p4c0VKZStwRXFtc3RpMDI5NzJTUzI3ZHNodXFEU2NrdVJLM2RUTW1SVXRubXR1SkJJbFhHCnJhSGJ6aXhVL1lwR1o4VEtpdzFaYmhiS3ZLWTNUSGxlbWxET1VtY0NnWUJlVmY3N0U1TjZZbWdGeVgxMG5hcWoKeUp3SlVMeGY4VVFVMUQ4UHNaMlV4QkFmbm5XemJYRG1PbXVyUXhTSndrbmRWSS9jODlxQjBBVTVtYVczL1FaNwprTldmRSs2cjdUKzl1ckU1VU5LS0dQTmswbVYzSVNsVTlHTklhc3BHc1h1Q0NuMWpMa1owRktrS3czZ0R4TlFECjhSSU5Ob24xb09hNS9tTDk2VjhFOXdLQmdRQ1JXa3ZxbnZwRU0yS01IQ0ZXTjZ0RzArWkNzTnNKdTlOTXNrUWYKUWNzRlZ2Z1JGWk1JL0hlV29HUWRoS0dGbG5LeHZpREJyZCtKenhZekhackJIODQ4V2dnRlNMeWw1QzFnL0ZDcApEbEZMZWJNMCs2TTVNbm1qMnAvY0NnR0xLQzFkM3E3YWROKzgxbUl0TzAxNEJOMERrRWJ5WVdielU0MVpJWE54CkRFTzFMd0tCZ0FpNkhWblZTN3NyZFYrTnRGTk1FMXl4b1g2S2svZ09VZ2ZESzduN2kzL1dWUWFSTGw0Umh4aTUKbzljN0xTbmZFRXptQUhxR0RmNUE4a2hDR01tZ0xyNnZQbkV3bXNDMmo4ankvRnZIMkpPdnp1QW02NFNUM1IvUQpkUktZVXZhT0ZDc3J4bjZiVVdFZnl3L1ZDeDJPWlZmU1AwOHp5Ymx6TDJQWUhWclFHMjAyCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==[root@rancher-01 ~]#

安装kubectl二进制工具

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubectl-1.17.6

查看版本信息

[root@rancher-01 ~]# kubectl version --client
Client Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.6", GitCommit:"d32e40e20d167e103faf894261614c5b45c44198", GitTreeState:"clean", BuildDate:"2020-05-20T13:16:24Z", GoVersion:"go1.13.9", Compiler:"gc", Platform:"linux/amd64"}
[root@rancher-01 ~]#

查看集群节点信息

[root@rancher-01 ~]# kubectl --kubeconfig kube_config_cluster.yml get nodes -o wide
NAME         STATUS   ROLES               AGE   VERSION   INTERNAL-IP      EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION               CONTAINER-RUNTIME
rancher-01   Ready    controlplane,etcd   12m   v1.17.6   10.138.218.141   <none>        CentOS Linux 7 (Core)   3.10.0-957.27.2.el7.x86_64   docker://18.9.9
rancher-02   Ready    worker              12m   v1.17.6   10.138.218.144   <none>        CentOS Linux 7 (Core)   3.10.0-957.27.2.el7.x86_64   docker://18.9.9
rancher-03   Ready    worker              12m   v1.17.6   10.138.218.146   <none>        CentOS Linux 7 (Core)   3.10.0-957.27.2.el7.x86_64   docker://18.9.9
[root@rancher-01 ~]#

查看集群组件状态信息

[root@rancher-01 ~]# kubectl --kubeconfig kube_config_cluster.yml get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-0               Healthy   {"health":"true"}
[root@rancher-01 ~]#

查看命名空间列表

[root@rancher-01 ~]# kubectl --kubeconfig kube_config_cluster.yml get namespace
NAME              STATUS   AGE
default           Active   16m
ingress-nginx     Active   15m
kube-node-lease   Active   16m
kube-public       Active   16m
kube-system       Active   16m
[root@rancher-01 ~]#

查看kube-system命名空间下Pods状态信息

[root@rancher-01 ~]# kubectl --kubeconfig kube_config_cluster.yml get pods --namespace=kube-system -o wide
NAME                                      READY   STATUS      RESTARTS   AGE   IP               NODE         NOMINATED NODE   READINESS GATES
canal-dgt4n                               2/2     Running     0          17m   10.138.218.146   rancher-03   <none>           <none>
canal-v9pkx                               2/2     Running     0          17m   10.138.218.141   rancher-01   <none>           <none>
canal-xdg2l                               2/2     Running     0          17m   10.138.218.144   rancher-02   <none>           <none>
coredns-7c5566588d-d9pvd                  1/1     Running     0          17m   10.42.0.3        rancher-03   <none>           <none>
coredns-7c5566588d-tzkvn                  1/1     Running     0          16m   10.42.2.4        rancher-02   <none>           <none>
coredns-autoscaler-65bfc8d47d-8drw8       1/1     Running     0          17m   10.42.2.3        rancher-02   <none>           <none>
metrics-server-6b55c64f86-tmbpr           1/1     Running     0          16m   10.42.2.2        rancher-02   <none>           <none>
rke-coredns-addon-deploy-job-nt4pd        0/1     Completed   0          17m   10.138.218.141   rancher-01   <none>           <none>
rke-ingress-controller-deploy-job-tnbqq   0/1     Completed   0          16m   10.138.218.141   rancher-01   <none>           <none>
rke-metrics-addon-deploy-job-t4jrv        0/1     Completed   0          17m   10.138.218.141   rancher-01   <none>           <none>
rke-network-plugin-deploy-job-fk8tc       0/1     Completed   0          17m   10.138.218.141   rancher-01   <none>           <none>
[root@rancher-01 ~]#
6 月 122020
 

Rancher关于Kubernetes 集群节点的角色定义

https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/
https://kubernetes.io/docs/concepts/overview/components/

etcd

具有etcd角色的节点运行etcd,这是一个用于存储Kubernetes集群配置数据,具有一致性且高可用的键值存储服务。 etcd将数据复制到每个节点。
注意:在用户界面中,具有etcd角色的节点显示为“Unschedulable”,这意味着默认情况下不会将Pod调度到这些节点。

controlplane

具有controlplane角色的节点运行Kubernetes主组件(不包括etcd,因为它是单独的角色)。 有关组件包括kube-apiserver,kube-scheduler,kube-controller-manager和cloud-controller-manager。
注意:在用户界面中,具有controlplane角色的节点显示为“Unschedulable”,这意味着默认情况下不会将Pod调度到这些节点。

worker

具有worker角色的节点运行Kubernetes节点组件。 有关组件包括kubelet,kube-proxy,Container runtime。

6 月 112020
 

Rancher is a complete software stack for teams adopting containers. It addresses the operational and security challenges of managing multiple Kubernetes clusters across any infrastructure, while providing DevOps teams with integrated tools for running containerized workloads.

Rancher是供采用容器的团队使用的完整软件堆栈。 它解决了在任何基础架构上管理多个Kubernetes集群的运营和安全挑战,同时为DevOps团队提供了用于运行容器化工作负载的集成工具。

禁用SELinux配置

[root@rancher ~]# sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
[root@rancher ~]# setenforce 0
[root@rancher ~]# getenforce 
Permissive
[root@rancher ~]#

安装Docker运行环境

[root@rancher ~]# curl https://releases.rancher.com/install-docker/18.09.sh | sh
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100 15521  100 15521    0     0  92374      0 --:--:-- --:--:-- --:--:-- 92940
+ '[' centos = redhat ']'
+ sh -c 'yum install -y -q yum-utils'
Delta RPMs disabled because /usr/bin/applydeltarpm not installed.
warning: /var/cache/yum/x86_64/7/updates/packages/yum-utils-1.1.31-54.el7_8.noarch.rpm: Header V3 RSA/SHA256 Signature, key ID f4a80eb5: NOKEY
Public key for yum-utils-1.1.31-54.el7_8.noarch.rpm is not installed
Importing GPG key 0xF4A80EB5:
 Userid     : "CentOS-7 Key (CentOS 7 Official Signing Key) <security@centos.org>"
 Fingerprint: 6341 ab27 53d7 8a78 a7c2 7bb1 24c6 a8a7 f4a8 0eb5
 Package    : centos-release-7-6.1810.2.el7.centos.x86_64 (installed)
 From       : /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
+ sh -c 'yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo'
Loaded plugins: fastestmirror
adding repo from: https://download.docker.com/linux/centos/docker-ce.repo
grabbing file https://download.docker.com/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
+ '[' stable '!=' stable ']'
+ sh -c 'yum makecache fast'
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirror.keystealth.org
 * extras: mirror.fileplanet.com
 * updates: mirror.web-ster.com
base                                                                                                                                                     | 3.6 kB  00:00:00     
docker-ce-stable                                                                                                                                         | 3.5 kB  00:00:00     
extras                                                                                                                                                   | 2.9 kB  00:00:00     
updates                                                                                                                                                  | 2.9 kB  00:00:00     
(1/2): docker-ce-stable/x86_64/updateinfo                                                                                                                |   55 B  00:00:00     
(2/2): docker-ce-stable/x86_64/primary_db                                                                                                                |  44 kB  00:00:00     
Metadata Cache Created
+ sh -c 'yum install -y -q docker-ce-18.09.9 docker-ce-cli-18.09.9'
warning: /var/cache/yum/x86_64/7/docker-ce-stable/packages/containerd.io-1.2.13-3.2.el7.x86_64.rpm: Header V4 RSA/SHA512 Signature, key ID 621e9f35: NOKEY
Public key for containerd.io-1.2.13-3.2.el7.x86_64.rpm is not installed
Importing GPG key 0x621E9F35:
 Userid     : "Docker Release (CE rpm) <docker@docker.com>"
 Fingerprint: 060a 61c5 1b55 8a7f 742b 77aa c52f eb6b 621e 9f35
 From       : https://download.docker.com/linux/centos/gpg
setsebool:  SELinux is disabled.
+ '[' -d /run/systemd/system ']'
+ sh -c 'service docker start'
Redirecting to /bin/systemctl start docker.service
+ sh -c 'docker version'
Client:
 Version:           18.09.9
 API version:       1.39
 Go version:        go1.11.13
 Git commit:        039a7df9ba
 Built:             Wed Sep  4 16:51:21 2019
 OS/Arch:           linux/amd64
 Experimental:      false

Server: Docker Engine - Community
 Engine:
  Version:          18.09.9
  API version:      1.39 (minimum version 1.12)
  Go version:       go1.11.13
  Git commit:       039a7df
  Built:            Wed Sep  4 16:22:32 2019
  OS/Arch:          linux/amd64
  Experimental:     false

If you would like to use Docker as a non-root user, you should now consider
adding your user to the "docker" group with something like:

  sudo usermod -aG docker your-user

Remember that you will have to log out and back in for this to take effect!

WARNING: Adding a user to the "docker" group will grant the ability to run
         containers which can be used to obtain root privileges on the
         docker host.
         Refer to https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface
         for more information.

[root@rancher ~]#

可用Docker版本安装脚本列表

https://github.com/rancher/install-docker

配置DNS指向

rancher.bcoc.site ----> 167.71.149.159

安装Rancher并配置持久化存储和Let’s Encrypt证书

docker run -d --restart=unless-stopped \
  -p 80:80 -p 443:443 \
  -v /opt/rancher:/var/lib/rancher \
  rancher/rancher:latest \
  --acme-domain rancher.bcoc.site
  
[root@rancher ~]# docker image ls
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
[root@rancher ~]# docker container ps
CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
[root@rancher ~]# 
[root@rancher ~]# docker run -d --restart=unless-stopped \
>   -p 80:80 -p 443:443 \
>   -v /opt/rancher:/var/lib/rancher \
>   rancher/rancher:latest \
>   --acme-domain rancher.bcoc.site
Unable to find image 'rancher/rancher:latest' locally
latest: Pulling from rancher/rancher
23884877105a: Pull complete 
bc38caa0f5b9: Pull complete 
2910811b6c42: Pull complete 
36505266dcc6: Pull complete 
99447ff7670f: Pull complete 
879c87dc86fd: Pull complete 
5b954e5aebf8: Pull complete 
664e1faf26b5: Pull complete 
bf7ac75d932b: Pull complete 
7e972d16ff5b: Pull complete 
08314b1e671c: Pull complete 
d5ce20b3d070: Pull complete 
20e75cd9c8e9: Pull complete 
80daa2770be8: Pull complete 
7fb927855713: Pull complete 
af20d79674f1: Pull complete 
d6a9086242eb: Pull complete 
887a8f050cee: Pull complete 
834df47e622f: Pull complete 
Digest: sha256:25ab51f5366ee7b7add66bc41203eac4b8654386630432ac4f334f69f8baf706
Status: Downloaded newer image for rancher/rancher:latest
7b54dbd549650b332c9ded7904e044774ddce775f54e3f6802d22f9c2e626057
[root@rancher ~]#

查看当前运行的rancher容器

[root@rancher ~]# docker container ps
CONTAINER ID        IMAGE                    COMMAND                  CREATED             STATUS              PORTS                                      NAMES
7b54dbd54965        rancher/rancher:latest   "entrypoint.sh --acm…"   20 seconds ago      Up 19 seconds       0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp   recursing_joliot
[root@rancher ~]#

登录Web控制台并为默认用户admin设置密码

确认Web控制台访问URL地址

控制台主界面

查看https证书信息

创建集群配置

集群配置详情

按照节点角色类型生成集群节点配置命令

在一个或多个已安装Docker的节点上运行

sudo docker run -d --privileged --restart=unless-stopped --net=host \
-v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent:v2.4.4 \
--server https://rancher.bcoc.site --token 7lmgztttzn7z2l8w6t4xhdz9gz2l7rpks6x7gc8222pjddt2mxlwcp \
--etcd --controlplane --worker
在rancher-01上运行

[root@rancher-01 ~]# sudo docker run -d --privileged --restart=unless-stopped --net=host \
> -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent:v2.4.4 \
> --server https://rancher.bcoc.site --token 7lmgztttzn7z2l8w6t4xhdz9gz2l7rpks6x7gc8222pjddt2mxlwcp \
> --etcd --controlplane --worker
Unable to find image 'rancher/rancher-agent:v2.4.4' locally
v2.4.4: Pulling from rancher/rancher-agent
23884877105a: Pull complete 
bc38caa0f5b9: Pull complete 
2910811b6c42: Pull complete 
36505266dcc6: Pull complete 
839286d9c3a6: Pull complete 
8a1ba646e5a3: Pull complete 
4917caa38753: Pull complete 
b56094248bdf: Pull complete 
77f08dadb4eb: Pull complete 
d925a4b78308: Pull complete 
Digest: sha256:a6b416d7e5f89d28f8f8a54472cabe656378bc8c1903d08e1c2e9e453cdab1ff
Status: Downloaded newer image for rancher/rancher-agent:v2.4.4
eea306867dca30ad9f70dcd764e723fec2b10239212205535ab83f24fc6827ed
[root@rancher-01 ~]#

在rancher-02上运行

[root@rancher-02 ~]# sudo docker run -d --privileged --restart=unless-stopped --net=host \
> -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent:v2.4.4 \
> --server https://rancher.bcoc.site --token 7lmgztttzn7z2l8w6t4xhdz9gz2l7rpks6x7gc8222pjddt2mxlwcp \
> --etcd --controlplane --worker
Unable to find image 'rancher/rancher-agent:v2.4.4' locally
v2.4.4: Pulling from rancher/rancher-agent
23884877105a: Pull complete 
bc38caa0f5b9: Pull complete 
2910811b6c42: Pull complete 
36505266dcc6: Pull complete 
839286d9c3a6: Pull complete 
8a1ba646e5a3: Pull complete 
4917caa38753: Pull complete 
b56094248bdf: Pull complete 
77f08dadb4eb: Pull complete 
d925a4b78308: Pull complete 
Digest: sha256:a6b416d7e5f89d28f8f8a54472cabe656378bc8c1903d08e1c2e9e453cdab1ff
Status: Downloaded newer image for rancher/rancher-agent:v2.4.4
1f84c5b8afa35475fada986834458c08c565ff7d2b3dd4965a55a2439036e45b
[root@rancher-02 ~]#

查看Web控制台显示集群创建中

集群创建成功

5 月 072020
 

准备三个版本的Deployment配置文件(Apache httpd Server版本升级更新)

添加revisionHistoryLimit属性以控制kubectl apply操作历史版本的保留数量

[root@k8s-01 ~]# vi httpd-deployment.v1.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: httpd
spec:
  revisionHistoryLimit: 10
  replicas: 3
  selector:
    matchLabels:
      run: httpd
  template:
    metadata:
      labels:
        run: httpd
    spec:
      containers:
      - name: httpd
        image: httpd:2.4.39
        ports:
        - containerPort: 80

[root@k8s-01 ~]# vi httpd-deployment.v2.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: httpd
spec:
  revisionHistoryLimit: 10
  replicas: 3
  selector:
    matchLabels:
      run: httpd
  template:
    metadata:
      labels:
        run: httpd
    spec:
      containers:
      - name: httpd
        image: httpd:2.4.41
        ports:
        - containerPort: 80

[root@k8s-01 ~]# vi httpd-deployment.v3.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: httpd
spec:
  revisionHistoryLimit: 10
  replicas: 3
  selector:
    matchLabels:
      run: httpd
  template:
    metadata:
      labels:
        run: httpd
    spec:
      containers:
      - name: httpd
        image: httpd:2.4.43
        ports:
        - containerPort: 80

依次应用三个版本配置文件并将操作记录版本化

部署版本2.4.39

[root@k8s-01 ~]# kubectl apply -f httpd-deployment.v1.yaml --record
deployment.apps/httpd created
[root@k8s-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE     NOMINATED NODE   READINESS GATES
httpd-5bbc9b449d-p7828   1/1     Running   0          30s   10.244.2.4   k8s-03   <none>           <none>
httpd-5bbc9b449d-twmv9   1/1     Running   0          30s   10.244.1.3   k8s-02   <none>           <none>
httpd-5bbc9b449d-zj4zn   1/1     Running   0          30s   10.244.1.2   k8s-02   <none>           <none>
[root@k8s-01 ~]#

查看Deployment应用版本信息(2.4.39)

[root@k8s-01 ~]# kubectl get deployments.apps -o wide
NAME    READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES         SELECTOR
httpd   3/3     3            3           58s   httpd        httpd:2.4.39   run=httpd
[root@k8s-01 ~]#

更新版本2.4.41

[root@k8s-01 ~]# kubectl apply -f httpd-deployment.v2.yaml --record
deployment.apps/httpd configured
[root@k8s-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS              RESTARTS   AGE   IP           NODE     NOMINATED NODE   READINESS GATES
httpd-5bb8cdb99c-45tjk   0/1     ContainerCreating   0          8s    <none>       k8s-02   <none>           <none>
httpd-5bb8cdb99c-nz4n6   1/1     Running             0          17s   10.244.2.5   k8s-03   <none>           <none>
httpd-5bbc9b449d-p7828   1/1     Running             0          94s   10.244.2.4   k8s-03   <none>           <none>
httpd-5bbc9b449d-zj4zn   1/1     Running             0          94s   10.244.1.2   k8s-02   <none>           <none>
[root@k8s-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS              RESTARTS   AGE    IP           NODE     NOMINATED NODE   READINESS GATES
httpd-5bb8cdb99c-45tjk   1/1     Running             0          24s    10.244.1.4   k8s-02   <none>           <none>
httpd-5bb8cdb99c-kmqxb   0/1     ContainerCreating   0          1s     <none>       k8s-02   <none>           <none>
httpd-5bb8cdb99c-nz4n6   1/1     Running             0          33s    10.244.2.5   k8s-03   <none>           <none>
httpd-5bbc9b449d-p7828   1/1     Running             0          110s   10.244.2.4   k8s-03   <none>           <none>
httpd-5bbc9b449d-zj4zn   1/1     Terminating         0          110s   10.244.1.2   k8s-02   <none>           <none>
[root@k8s-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE     NOMINATED NODE   READINESS GATES
httpd-5bb8cdb99c-45tjk   1/1     Running   0          44s   10.244.1.4   k8s-02   <none>           <none>
httpd-5bb8cdb99c-kmqxb   1/1     Running   0          21s   10.244.1.5   k8s-02   <none>           <none>
httpd-5bb8cdb99c-nz4n6   1/1     Running   0          53s   10.244.2.5   k8s-03   <none>           <none>
[root@k8s-01 ~]#

查看Deployment应用版本信息(2.4.41)

[root@k8s-01 ~]# kubectl get deployments.apps -o wide
NAME    READY   UP-TO-DATE   AVAILABLE   AGE     CONTAINERS   IMAGES         SELECTOR
httpd   3/3     3            3           3m13s   httpd        httpd:2.4.41   run=httpd
[root@k8s-01 ~]#

更新版本2.4.43

[root@k8s-01 ~]# kubectl apply -f httpd-deployment.v3.yaml --record
deployment.apps/httpd configured
[root@k8s-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE     IP           NODE     NOMINATED NODE   READINESS GATES
httpd-7c68f97dc5-8vwxx   1/1     Running   0          3m26s   10.244.2.6   k8s-03   <none>           <none>
httpd-7c68f97dc5-fn6ql   1/1     Running   0          2m55s   10.244.1.7   k8s-02   <none>           <none>
httpd-7c68f97dc5-s7mwv   1/1     Running   0          3m17s   10.244.1.6   k8s-02   <none>           <none>
[root@k8s-01 ~]#

查看Deployment应用版本信息(2.4.43)

[root@k8s-01 ~]# kubectl get deployments.apps -o wide
NAME    READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES         SELECTOR
httpd   3/3     3            3           5m    httpd        httpd:2.4.43   run=httpd
[root@k8s-01 ~]#

查看deployment的kubectl apply操作历史版本信息

[root@k8s-01 ~]# kubectl rollout history deployment httpd 
deployment.apps/httpd 
REVISION  CHANGE-CAUSE
1         kubectl apply --filename=httpd-deployment.v1.yaml --record=true
2         kubectl apply --filename=httpd-deployment.v2.yaml --record=true
3         kubectl apply --filename=httpd-deployment.v3.yaml --record=true

[root@k8s-01 ~]#

回滚到指定版本(上一个版本)

[root@k8s-01 ~]# kubectl rollout undo deployment httpd --to-revision=2
deployment.apps/httpd rolled back
[root@k8s-01 ~]#

查看历史版本信息

[root@k8s-01 ~]# kubectl rollout history deployment httpd 
deployment.apps/httpd 
REVISION  CHANGE-CAUSE
1         kubectl apply --filename=httpd-deployment.v1.yaml --record=true
3         kubectl apply --filename=httpd-deployment.v3.yaml --record=true
4         kubectl apply --filename=httpd-deployment.v2.yaml --record=true

[root@k8s-01 ~]#

查看当前Deployment应用版本信息

[root@k8s-01 ~]# kubectl get deployments.apps -o wide
NAME    READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES         SELECTOR
httpd   3/3     3            3           9m    httpd        httpd:2.4.41   run=httpd
[root@k8s-01 ~]#
5 月 062020
 

Kubernetes集群中的Service从逻辑上代表了一组Pod,并通过label建立与pod的关联

准备Deployment配置文件

[root@k8s-01 ~]# vi httpd-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: httpd
spec:
  replicas: 3
  selector:
    matchLabels:
      run: httpd
  template:
    metadata:
      labels:
        run: httpd
    spec:
      containers:
      - name: httpd
        image: httpd:2.4.41
        ports:
        - containerPort: 80
[root@k8s-01 ~]# kubectl apply -f httpd-deployment.yaml 
deployment.apps/httpd created
[root@k8s-01 ~]#

获取集群pod列表详情

[root@k8s-01 ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE     IP           NODE     NOMINATED NODE   READINESS GATES
httpd-5bb8cdb99c-g5m95   1/1     Running   0          4m29s   10.244.2.3   k8s-03   <none>           <none>
httpd-5bb8cdb99c-hzjqd   1/1     Running   0          4m29s   10.244.1.3   k8s-02   <none>           <none>
httpd-5bb8cdb99c-s4q25   1/1     Running   0          4m29s   10.244.1.4   k8s-02   <none>           <none>
[root@k8s-01 ~]#

使用CURL模拟浏览器请求pod的IP地址(Pod的IP地址只能被集群中的容器和节点访问到)

[root@k8s-01 ~]# curl 10.244.2.3
<html><body><h1>It works!</h1></body></html>
[root@k8s-01 ~]# curl 10.244.1.3
<html><body><h1>It works!</h1></body></html>
[root@k8s-01 ~]# curl 10.244.1.4
<html><body><h1>It works!</h1></body></html>
[root@k8s-01 ~]#

[root@k8s-02 ~]# curl 10.244.2.3
<html><body><h1>It works!</h1></body></html>
[root@k8s-02 ~]# curl 10.244.1.3
<html><body><h1>It works!</h1></body></html>
[root@k8s-02 ~]# curl 10.244.1.4
<html><body><h1>It works!</h1></body></html>
[root@k8s-02 ~]#

[root@k8s-03 ~]# curl 10.244.2.3
<html><body><h1>It works!</h1></body></html>
[root@k8s-03 ~]# curl 10.244.1.3
<html><body><h1>It works!</h1></body></html>
[root@k8s-03 ~]# curl 10.244.1.4
<html><body><h1>It works!</h1></body></html>
[root@k8s-03 ~]#

对Pod IP进行PING测试

[root@k8s-01 ~]# ping -c 2 10.244.2.3
PING 10.244.2.3 (10.244.2.3) 56(84) bytes of data.
64 bytes from 10.244.2.3: icmp_seq=1 ttl=63 time=2.03 ms
64 bytes from 10.244.2.3: icmp_seq=2 ttl=63 time=0.660 ms

--- 10.244.2.3 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.660/1.348/2.036/0.688 ms
[root@k8s-01 ~]# ping -c 2 10.244.1.3
PING 10.244.1.3 (10.244.1.3) 56(84) bytes of data.
64 bytes from 10.244.1.3: icmp_seq=1 ttl=63 time=1.58 ms
64 bytes from 10.244.1.3: icmp_seq=2 ttl=63 time=0.641 ms

--- 10.244.1.3 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.641/1.115/1.589/0.474 ms
[root@k8s-01 ~]# ping -c 2 10.244.1.4
PING 10.244.1.4 (10.244.1.4) 56(84) bytes of data.
64 bytes from 10.244.1.4: icmp_seq=1 ttl=63 time=0.658 ms
64 bytes from 10.244.1.4: icmp_seq=2 ttl=63 time=0.483 ms

--- 10.244.1.4 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.483/0.570/0.658/0.090 ms
[root@k8s-01 ~]#

创建服务Service配置文件

[root@k8s-01 ~]# vi httpd-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: httpd-service
spec:
  selector:
    run: httpd
  ports:
  - protocol: TCP
    port: 8080
    targetPort: 80
[root@k8s-01 ~]# kubectl apply -f httpd-service.yaml
service/httpd-service created
[root@k8s-01 ~]#

获取集群Service列表详情

[root@k8s-01 ~]# kubectl get services -o wide
NAME            TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE    SELECTOR
httpd-service   ClusterIP   10.109.145.140   <none>        8080/TCP   4m9s   run=httpd
kubernetes      ClusterIP   10.96.0.1        <none>        443/TCP    10m    <none>
[root@k8s-01 ~]#

尝试ping集群IP地址(默认无法ping通)

[root@k8s-01 ~]# ping 10.109.145.140
PING 10.109.145.140 (10.109.145.140) 56(84) bytes of data.
^C
--- 10.109.145.140 ping statistics ---
3 packets transmitted, 0 received, 100% packet loss, time 1999ms

[root@k8s-01 ~]#

使用Service获得的集群IP访问具有run=httpd标签的后端Pod及容器

[root@k8s-01 ~]# curl 10.109.145.140:8080
<html><body><h1>It works!</h1></body></html>
[root@k8s-01 ~]# curl 10.109.145.140:8080
<html><body><h1>It works!</h1></body></html>
[root@k8s-01 ~]# curl 10.109.145.140:8080
<html><body><h1>It works!</h1></body></html>
[root@k8s-01 ~]# curl -I 10.109.145.140:8080
HTTP/1.1 200 OK
Date: Wed, 06 May 2020 07:24:57 GMT
Server: Apache/2.4.41 (Unix)
Last-Modified: Mon, 11 Jun 2007 18:53:14 GMT
ETag: "2d-432a5e4a73a80"
Accept-Ranges: bytes
Content-Length: 45
Content-Type: text/html

[root@k8s-01 ~]#

获取服务详情以确认Cluster IP指向的后端Pod IP信息

[root@k8s-01 ~]# kubectl describe services httpd-service
Name:              httpd-service
Namespace:         default
Labels:            <none>
Annotations:       kubectl.kubernetes.io/last-applied-configuration:
                     {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"name":"httpd-service","namespace":"default"},"spec":{"ports":[{"port":80...
Selector:          run=httpd
Type:              ClusterIP
IP:                10.109.145.140
Port:              <unset>  8080/TCP
TargetPort:        80/TCP
Endpoints:         10.244.1.3:80,10.244.1.4:80,10.244.2.3:80
Session Affinity:  None
Events:            <none>
[root@k8s-01 ~]#
[root@k8s-01 ~]# kubectl get endpoints httpd-service
NAME            ENDPOINTS                                   AGE
httpd-service   10.244.1.3:80,10.244.1.4:80,10.244.2.3:80   5m23s
[root@k8s-01 ~]#
4 月 272020
 

相较于Deployment资源,DaemonSet在每个节点仅运行一个副本,以提供守护服务。

查看DaemonSet类型的系统组件(kube-proxy和kube-flannel-ds-amd64)

获取kube-system命名空间的daemonset列表

[root@k8s01 ~]# kubectl get daemonsets.apps --namespace=kube-system 
NAME                      DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
kube-flannel-ds-amd64     5         5         5       5            5           <none>                   6d16h
kube-flannel-ds-arm       0         0         0       0            0           <none>                   6d16h
kube-flannel-ds-arm64     0         0         0       0            0           <none>                   6d16h
kube-flannel-ds-ppc64le   0         0         0       0            0           <none>                   6d16h
kube-flannel-ds-s390x     0         0         0       0            0           <none>                   6d16h
kube-proxy                5         5         5       5            5           kubernetes.io/os=linux   6d16h
[root@k8s01 ~]#

获取kube-system命名空间pod列表详情(每个节点都运行一个daemonset类型容器副本)

[root@k8s01 ~]# kubectl get pods --namespace=kube-system -o wide
NAME                            READY   STATUS    RESTARTS   AGE     IP             NODE    NOMINATED NODE   READINESS GATES
coredns-66bff467f8-5x8nf        1/1     Running   0          6d16h   10.244.1.2     k8s02   <none>           <none>
coredns-66bff467f8-mgcd2        1/1     Running   0          6d16h   10.244.0.2     k8s01   <none>           <none>
etcd-k8s01                      1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-apiserver-k8s01            1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-controller-manager-k8s01   1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-flannel-ds-amd64-4ngbr     1/1     Running   0          6d16h   172.31.6.113   k8s03   <none>           <none>
kube-flannel-ds-amd64-j9qmh     1/1     Running   0          4d      172.31.1.139   k8s04   <none>           <none>
kube-flannel-ds-amd64-kmw29     1/1     Running   0          6d16h   172.31.3.249   k8s02   <none>           <none>
kube-flannel-ds-amd64-l57kp     1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-flannel-ds-amd64-rr8sv     1/1     Running   1          4d      172.31.15.1    k8s05   <none>           <none>
kube-proxy-22fd2                1/1     Running   0          6d16h   172.31.3.249   k8s02   <none>           <none>
kube-proxy-97hft                1/1     Running   0          4d      172.31.1.139   k8s04   <none>           <none>
kube-proxy-jwwp2                1/1     Running   0          6d16h   172.31.6.113   k8s03   <none>           <none>
kube-proxy-mw6xf                1/1     Running   0          4d      172.31.15.1    k8s05   <none>           <none>
kube-proxy-wnf4q                1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-scheduler-k8s01            1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
[root@k8s01 ~]#

查看flannel网络组件配置文件中的daemonset配置

[root@k8s01 ~]# vi kube-flannel.yml
    134 apiVersion: apps/v1
    135 kind: DaemonSet
    136 metadata:
    137   name: kube-flannel-ds-amd64
    138   namespace: kube-system
    139   labels:
    140     tier: node
    141     app: flannel
    142 spec:
    143   selector:
    144     matchLabels:
    145       app: flannel
    146   template:
    147     metadata:
    148       labels:
    149         tier: node
    150         app: flannel
    151     spec:
    152       affinity:
    153         nodeAffinity:
    154           requiredDuringSchedulingIgnoredDuringExecution:
    155             nodeSelectorTerms:
    156               - matchExpressions:
    157                   - key: kubernetes.io/os
    158                     operator: In
    159                     values:
    160                       - linux
    161                   - key: kubernetes.io/arch
    162                     operator: In
    163                     values:
    164                       - amd64
    165       hostNetwork: true
    166       tolerations:
    167       - operator: Exists
    168         effect: NoSchedule
    169       serviceAccountName: flannel
    170       initContainers:
    171       - name: install-cni
    172         image: quay.io/coreos/flannel:v0.12.0-amd64
    173         command:
    174         - cp
    175         args:
    176         - -f
    177         - /etc/kube-flannel/cni-conf.json
    178         - /etc/cni/net.d/10-flannel.conflist
    179         volumeMounts:
    180         - name: cni
    181           mountPath: /etc/cni/net.d
    182         - name: flannel-cfg
    183           mountPath: /etc/kube-flannel/
    184       containers:
    185       - name: kube-flannel
    186         image: quay.io/coreos/flannel:v0.12.0-amd64
    187         command:
    188         - /opt/bin/flanneld
    189         args:
    190         - --ip-masq
    191         - --kube-subnet-mgr
    192         resources:
    193           requests:
    194             cpu: "100m"
    195             memory: "50Mi"
    196           limits:
    197             cpu: "100m"
    198             memory: "50Mi"
    199         securityContext:
    200           privileged: false
    201           capabilities:
    202             add: ["NET_ADMIN"]
    203         env:
    204         - name: POD_NAME
    205           valueFrom:
    206             fieldRef:
    207               fieldPath: metadata.name
    208         - name: POD_NAMESPACE
    209           valueFrom:
    210             fieldRef:
    211               fieldPath: metadata.namespace
    212         volumeMounts:
    213         - name: run
    214           mountPath: /run/flannel
    215         - name: flannel-cfg
    216           mountPath: /etc/kube-flannel/
    217       volumes:
    218         - name: run
    219           hostPath:
    220             path: /run/flannel
    221         - name: cni
    222           hostPath:
    223             path: /etc/cni/net.d
    224         - name: flannel-cfg
    225           configMap:
    226             name: kube-flannel-cfg

运行一个daemonset类型的资源(Fluentd日志收集系统)

[root@k8s01 ~]# vi daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: fluentd-elasticsearch
  namespace: kube-system
  labels:
    k8s-app: fluentd-logging
spec:
  selector:
    matchLabels:
      name: fluentd-elasticsearch
  template:
    metadata:
      labels:
        name: fluentd-elasticsearch
    spec:
      tolerations:
      # this toleration is to have the daemonset runnable on master nodes
      # remove it if your masters can't run pods
      - key: node-role.kubernetes.io/master
        effect: NoSchedule
      containers:
      - name: fluentd-elasticsearch
        image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 200Mi
        volumeMounts:
        - name: varlog
          mountPath: /var/log
        - name: varlibdockercontainers
          mountPath: /var/lib/docker/containers
          readOnly: true
      terminationGracePeriodSeconds: 30
      volumes:
      - name: varlog
        hostPath:
          path: /var/log
      - name: varlibdockercontainers
        hostPath:
          path: /var/lib/docker/containers

应用配置文件

[root@k8s01 ~]# kubectl apply -f daemonset.yaml 
daemonset.apps/fluentd-elasticsearch created
[root@k8s01 ~]# kubectl get daemonsets.apps 
No resources found in default namespace.
[root@k8s01 ~]# kubectl get daemonsets.apps --namespace=kube-system 
NAME                      DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
fluentd-elasticsearch     5         5         5       5            5           <none>                   28s
kube-flannel-ds-amd64     5         5         5       5            5           <none>                   6d18h
kube-flannel-ds-arm       0         0         0       0            0           <none>                   6d18h
kube-flannel-ds-arm64     0         0         0       0            0           <none>                   6d18h
kube-flannel-ds-ppc64le   0         0         0       0            0           <none>                   6d18h
kube-flannel-ds-s390x     0         0         0       0            0           <none>                   6d18h
kube-proxy                5         5         5       5            5           kubernetes.io/os=linux   6d18h
[root@k8s01 ~]#

获取kube-system命名空间的daemonset列表

[root@k8s01 ~]# kubectl get pods --namespace=kube-system -o wide
NAME                            READY   STATUS    RESTARTS   AGE     IP             NODE    NOMINATED NODE   READINESS GATES
coredns-66bff467f8-5x8nf        1/1     Running   0          6d18h   10.244.1.2     k8s02   <none>           <none>
coredns-66bff467f8-mgcd2        1/1     Running   0          6d18h   10.244.0.2     k8s01   <none>           <none>
etcd-k8s01                      1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
fluentd-elasticsearch-64c2h     1/1     Running   0          84s     10.244.5.9     k8s05   <none>           <none>
fluentd-elasticsearch-f8989     1/1     Running   0          84s     10.244.0.3     k8s01   <none>           <none>
fluentd-elasticsearch-lcgn7     1/1     Running   0          84s     10.244.3.4     k8s04   <none>           <none>
fluentd-elasticsearch-ss2zm     1/1     Running   0          84s     10.244.1.20    k8s02   <none>           <none>
fluentd-elasticsearch-wkd45     1/1     Running   0          84s     10.244.2.39    k8s03   <none>           <none>
kube-apiserver-k8s01            1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
kube-controller-manager-k8s01   1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
kube-flannel-ds-amd64-4ngbr     1/1     Running   0          6d18h   172.31.6.113   k8s03   <none>           <none>
kube-flannel-ds-amd64-j9qmh     1/1     Running   0          4d2h    172.31.1.139   k8s04   <none>           <none>
kube-flannel-ds-amd64-kmw29     1/1     Running   0          6d18h   172.31.3.249   k8s02   <none>           <none>
kube-flannel-ds-amd64-l57kp     1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
kube-flannel-ds-amd64-rr8sv     1/1     Running   1          4d2h    172.31.15.1    k8s05   <none>           <none>
kube-proxy-22fd2                1/1     Running   0          6d18h   172.31.3.249   k8s02   <none>           <none>
kube-proxy-97hft                1/1     Running   0          4d2h    172.31.1.139   k8s04   <none>           <none>
kube-proxy-jwwp2                1/1     Running   0          6d18h   172.31.6.113   k8s03   <none>           <none>
kube-proxy-mw6xf                1/1     Running   0          4d2h    172.31.15.1    k8s05   <none>           <none>
kube-proxy-wnf4q                1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
kube-scheduler-k8s01            1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
[root@k8s01 ~]#
4 月 272020
 

获取当前集群pod列表及所属节点

[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-bbfdbf4b7-8khd4   1/1     Running   0          3d23h   10.244.2.35   k8s03   <none>           <none>
nginx-deployment-bbfdbf4b7-9g825   1/1     Running   0          3d23h   10.244.1.17   k8s02   <none>           <none>
nginx-deployment-bbfdbf4b7-hsvfg   1/1     Running   0          3d23h   10.244.2.36   k8s03   <none>           <none>
nginx-deployment-bbfdbf4b7-jpt96   1/1     Running   0          3d23h   10.244.2.34   k8s03   <none>           <none>
nginx-deployment-bbfdbf4b7-vlnlk   1/1     Running   0          3d23h   10.244.1.18   k8s02   <none>           <none>
[root@k8s01 ~]# kubectl get deployments
NAME               READY   UP-TO-DATE   AVAILABLE   AGE
nginx-deployment   5/5     5            5           5d15h
[root@k8s01 ~]#

删除nginx-deployment资源

[root@k8s01 ~]# kubectl delete deployments.apps nginx-deployment 
deployment.apps "nginx-deployment" deleted
[root@k8s01 ~]# kubectl get pods
No resources found in default namespace.
[root@k8s01 ~]#

获取节点列表

[root@k8s01 ~]# kubectl get nodes
NAME    STATUS   ROLES    AGE     VERSION
k8s01   Ready    master   6d15h   v1.18.2
k8s02   Ready    <none>   6d15h   v1.18.2
k8s03   Ready    <none>   6d15h   v1.18.2
k8s04   Ready    <none>   3d23h   v1.18.2
k8s05   Ready    <none>   3d23h   v1.18.2
[root@k8s01 ~]#

应用nginx-deployment配置文件

[root@k8s01 ~]# cat nginx-deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.17.10
        ports:
        - containerPort: 80
[root@k8s01 ~]# kubectl apply -f nginx-deployment.yaml 
deployment.apps/nginx-deployment created
[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-dvr4p   1/1     Running   0          11s   10.244.2.37   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-fnq9c   1/1     Running   0          11s   10.244.3.2    k8s04   <none>           <none>
[root@k8s01 ~]#

获取节点的默认标签配置信息

[root@k8s01 ~]# kubectl get nodes --show-labels 
NAME    STATUS   ROLES    AGE     VERSION   LABELS
k8s01   Ready    master   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s01,kubernetes.io/os=linux,node-role.kubernetes.io/master=
k8s02   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s02,kubernetes.io/os=linux
k8s03   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s03,kubernetes.io/os=linux
k8s04   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s04,kubernetes.io/os=linux
k8s05   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s05,kubernetes.io/os=linux
[root@k8s01 ~]#

对指定节点添加标签键值对

[root@k8s01 ~]# kubectl label nodes k8s05 disktype=ssd
node/k8s05 labeled
[root@k8s01 ~]# kubectl get nodes --show-labels 
NAME    STATUS   ROLES    AGE     VERSION   LABELS
k8s01   Ready    master   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s01,kubernetes.io/os=linux,node-role.kubernetes.io/master=
k8s02   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s02,kubernetes.io/os=linux
k8s03   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s03,kubernetes.io/os=linux
k8s04   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s04,kubernetes.io/os=linux
k8s05   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s05,kubernetes.io/os=linux
[root@k8s01 ~]#

修改deployment配置文件添加关联标签

[root@k8s01 ~]# vi nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 6
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.17.10
        ports:
        - containerPort: 80
      nodeSelector:
        disktype: ssd

应用配置文件执行销毁原有pod并调度新pod资源到节点k8s05上

[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS              RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-5lzsz   1/1     Running             0          12s     10.244.3.3    k8s04   <none>           <none>
nginx-deployment-cc5db57d4-dvr4p   1/1     Running             0          9m53s   10.244.2.37   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-fnq9c   1/1     Running             0          9m53s   10.244.3.2    k8s04   <none>           <none>
nginx-deployment-cc5db57d4-hwmk4   1/1     Running             0          12s     10.244.1.19   k8s02   <none>           <none>
nginx-deployment-cc5db57d4-qt26r   1/1     Running             0          12s     10.244.2.38   k8s03   <none>           <none>
nginx-deployment-ddc6847d-4qx2m    0/1     ContainerCreating   0          12s     <none>        k8s05   <none>           <none>
nginx-deployment-ddc6847d-cvhv4    0/1     ContainerCreating   0          12s     <none>        k8s05   <none>           <none>
nginx-deployment-ddc6847d-dcztn    0/1     ContainerCreating   0          12s     <none>        k8s05   <none>           <none>
[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS        RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-dvr4p   0/1     Terminating   0          10m   10.244.2.37   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-fnq9c   0/1     Terminating   0          10m   10.244.3.2    k8s04   <none>           <none>
nginx-deployment-ddc6847d-26hl9    1/1     Running       0          13s   10.244.5.7    k8s05   <none>           <none>
nginx-deployment-ddc6847d-4qx2m    1/1     Running       0          26s   10.244.5.3    k8s05   <none>           <none>
nginx-deployment-ddc6847d-cvhv4    1/1     Running       0          26s   10.244.5.4    k8s05   <none>           <none>
nginx-deployment-ddc6847d-d6f99    1/1     Running       0          14s   10.244.5.6    k8s05   <none>           <none>
nginx-deployment-ddc6847d-dcztn    1/1     Running       0          26s   10.244.5.5    k8s05   <none>           <none>
nginx-deployment-ddc6847d-dj5x4    1/1     Running       0          12s   10.244.5.8    k8s05   <none>           <none>
[root@k8s01 ~]# kubectl get pods -o wide
NAME                              READY   STATUS    RESTARTS   AGE   IP           NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-ddc6847d-26hl9   1/1     Running   0          21s   10.244.5.7   k8s05   <none>           <none>
nginx-deployment-ddc6847d-4qx2m   1/1     Running   0          34s   10.244.5.3   k8s05   <none>           <none>
nginx-deployment-ddc6847d-cvhv4   1/1     Running   0          34s   10.244.5.4   k8s05   <none>           <none>
nginx-deployment-ddc6847d-d6f99   1/1     Running   0          22s   10.244.5.6   k8s05   <none>           <none>
nginx-deployment-ddc6847d-dcztn   1/1     Running   0          34s   10.244.5.5   k8s05   <none>           <none>
nginx-deployment-ddc6847d-dj5x4   1/1     Running   0          20s   10.244.5.8   k8s05   <none>           <none>
[root@k8s01 ~]#

删除lable标签配置

[root@k8s01 ~]# kubectl label nodes k8s05 disktype-
node/k8s05 labeled
[root@k8s01 ~]# kubectl get nodes --show-labels 
NAME    STATUS   ROLES    AGE     VERSION   LABELS
k8s01   Ready    master   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s01,kubernetes.io/os=linux,node-role.kubernetes.io/master=
k8s02   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s02,kubernetes.io/os=linux
k8s03   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s03,kubernetes.io/os=linux
k8s04   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s04,kubernetes.io/os=linux
k8s05   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s05,kubernetes.io/os=linux
[root@k8s01 ~]#
4 月 232020
 

为集群新增节点

172.31.3.209 k8s01
172.31.8.132 k8s02
172.31.10.229 k8s03
172.31.1.139 k8s04
172.31.15.1 k8s05

新节点加入集群

kubeadm join --token <token> <control-plane-host>:<control-plane-port> --discovery-token-ca-cert-hash sha256:<hash>

主节点生成token有效期为24小时,超过该有效期后需要另行生成。

查看现有token列表

[root@k8s01 ~]# kubeadm token list
TOKEN                     TTL         EXPIRES                USAGES                   DESCRIPTION                                                EXTRA GROUPS
ca673s.97ektx8klpsjfovt   8h          2020-04-23T10:35:25Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
qxycbf.ri8i2zygahp5je8m   8h          2020-04-23T10:35:43Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
[root@k8s01 ~]#

重新生成token

[root@k8s01 ~]# kubeadm token create
W0423 02:26:28.166475    9469 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
lf1qej.q4wq7xo23xigg672
[root@k8s01 ~]# kubeadm token list
TOKEN                     TTL         EXPIRES                USAGES                   DESCRIPTION                                                EXTRA GROUPS
ca673s.97ektx8klpsjfovt   8h          2020-04-23T10:35:25Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
lf1qej.q4wq7xo23xigg672   23h         2020-04-24T02:26:28Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
qxycbf.ri8i2zygahp5je8m   8h          2020-04-23T10:35:43Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
[root@k8s01 ~]#

重新生成hash值(该值不变)

openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
openssl dgst -sha256 -hex | sed 's/^.* //'

[root@k8s01 ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
> openssl dgst -sha256 -hex | sed 's/^.* //'
d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
[root@k8s01 ~]#

节点4加入

[root@k8s04 ~]# kubeadm join --token lf1qej.q4wq7xo23xigg672 172.31.14.12:6443 --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
W0423 02:28:44.283472 19177 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s04 ~]#

节点5加入

[root@k8s05 ~]# kubeadm join --token lf1qej.q4wq7xo23xigg672 172.31.14.12:6443 --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
W0423 02:28:51.716851 19271 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s05 ~]#

获取节点列表(加入成功)

[root@k8s01 ~]# kubectl get nodes -o wide
NAME    STATUS   ROLES    AGE     VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
k8s01   Ready    master   2d16h   v1.18.2   172.31.14.12   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.8
k8s02   Ready    <none>   2d16h   v1.18.2   172.31.3.249   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.8
k8s03   Ready    <none>   2d16h   v1.18.2   172.31.6.113   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.8
k8s04   Ready    <none>   78s     v1.18.2   172.31.1.139   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.8
k8s05   Ready    <none>   70s     v1.18.2   172.31.15.1    <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.8
[root@k8s01 ~]#

创建新token并生成完整节点加入命令(一次性)

[root@k8s01 ~]# kubeadm token list
TOKEN                     TTL         EXPIRES                USAGES                   DESCRIPTION                                                EXTRA GROUPS
ca673s.97ektx8klpsjfovt   7h          2020-04-23T10:35:25Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
lf1qej.q4wq7xo23xigg672   23h         2020-04-24T02:26:28Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
qxycbf.ri8i2zygahp5je8m   7h          2020-04-23T10:35:43Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
[root@k8s01 ~]# kubeadm token create --print-join-command
W0423 02:41:47.487117   15377 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
kubeadm join 172.31.14.12:6443 --token vc6toc.jhhp9jatexn4ed7m     --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859 
[root@k8s01 ~]# kubeadm token list
TOKEN                     TTL         EXPIRES                USAGES                   DESCRIPTION                                                EXTRA GROUPS
ca673s.97ektx8klpsjfovt   7h          2020-04-23T10:35:25Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
lf1qej.q4wq7xo23xigg672   23h         2020-04-24T02:26:28Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
qxycbf.ri8i2zygahp5je8m   7h          2020-04-23T10:35:43Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
vc6toc.jhhp9jatexn4ed7m   23h         2020-04-24T02:41:47Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
[root@k8s01 ~]#
4 月 222020
 

获取pod列表并查看pod运行的节点

[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-5q9lz   1/1     Running   0          22h   10.244.2.17   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-dncbs   1/1     Running   0          22h   10.244.1.10   k8s02   <none>           <none>
nginx-deployment-cc5db57d4-gsp6l   1/1     Running   0          22h   10.244.2.16   k8s03   <none>           <none>
[root@k8s01 ~]#

修改副本数量为5并再次应用deployment配置(扩容)

[root@k8s01 ~]# vi nginx-deployment.yaml
  replicas: 5

[root@k8s01 ~]# kubectl apply -f nginx-deployment.yaml 
deployment.apps/nginx-deployment configured
[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-5q9lz   1/1     Running   0          23h   10.244.2.17   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-clrlh   1/1     Running   0          9s    10.244.2.18   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-dncbs   1/1     Running   0          23h   10.244.1.10   k8s02   <none>           <none>
nginx-deployment-cc5db57d4-gsp6l   1/1     Running   0          23h   10.244.2.16   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-ndkr7   1/1     Running   0          9s    10.244.1.11   k8s02   <none>           <none>
[root@k8s01 ~]#

修改副本数量为2并再次应用deployment配置(缩容)

[root@k8s01 ~]# vi nginx-deployment.yaml
  replicas: 2

[root@k8s01 ~]# kubectl apply -f nginx-deployment.yaml 
deployment.apps/nginx-deployment configured
[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS        RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-clrlh   0/1     Terminating   0          4m50s   10.244.2.18   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-dncbs   1/1     Running       0          23h     10.244.1.10   k8s02   <none>           <none>
nginx-deployment-cc5db57d4-gsp6l   0/1     Terminating   0          23h     10.244.2.16   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-ndkr7   1/1     Running       0          4m50s   10.244.1.11   k8s02   <none>           <none>
[root@k8s01 ~]# 

[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-dncbs   1/1     Running   0          23h   10.244.1.10   k8s02   <none>           <none>
nginx-deployment-cc5db57d4-ndkr7   1/1     Running   0          22m   10.244.1.11   k8s02   <none>           <none>
[root@k8s01 ~]#