4 月 292020
 
[root@k8s01 ~]# yum list kubectl --showduplicates|grep kubectl.x86_64
kubectl.x86_64                       1.18.2-0                        @kubernetes
kubectl.x86_64                       1.5.4-0                         kubernetes 
kubectl.x86_64                       1.6.0-0                         kubernetes 
kubectl.x86_64                       1.6.1-0                         kubernetes 
kubectl.x86_64                       1.6.2-0                         kubernetes 
kubectl.x86_64                       1.6.3-0                         kubernetes 
kubectl.x86_64                       1.6.4-0                         kubernetes 
kubectl.x86_64                       1.6.5-0                         kubernetes 
kubectl.x86_64                       1.6.6-0                         kubernetes 
kubectl.x86_64                       1.6.7-0                         kubernetes 
kubectl.x86_64                       1.6.8-0                         kubernetes 
kubectl.x86_64                       1.6.9-0                         kubernetes 
kubectl.x86_64                       1.6.10-0                        kubernetes 
kubectl.x86_64                       1.6.11-0                        kubernetes 
kubectl.x86_64                       1.6.12-0                        kubernetes 
kubectl.x86_64                       1.6.13-0                        kubernetes 
kubectl.x86_64                       1.7.0-0                         kubernetes 
kubectl.x86_64                       1.7.1-0                         kubernetes 
kubectl.x86_64                       1.7.2-0                         kubernetes 
kubectl.x86_64                       1.7.3-1                         kubernetes 
kubectl.x86_64                       1.7.4-0                         kubernetes 
kubectl.x86_64                       1.7.5-0                         kubernetes 
kubectl.x86_64                       1.7.6-1                         kubernetes 
kubectl.x86_64                       1.7.7-1                         kubernetes 
kubectl.x86_64                       1.7.8-1                         kubernetes 
kubectl.x86_64                       1.7.9-0                         kubernetes 
kubectl.x86_64                       1.7.10-0                        kubernetes 
kubectl.x86_64                       1.7.11-0                        kubernetes 
kubectl.x86_64                       1.7.14-0                        kubernetes 
kubectl.x86_64                       1.7.15-0                        kubernetes 
kubectl.x86_64                       1.7.16-0                        kubernetes 
kubectl.x86_64                       1.8.0-0                         kubernetes 
kubectl.x86_64                       1.8.1-0                         kubernetes 
kubectl.x86_64                       1.8.2-0                         kubernetes 
kubectl.x86_64                       1.8.3-0                         kubernetes 
kubectl.x86_64                       1.8.4-0                         kubernetes 
kubectl.x86_64                       1.8.5-0                         kubernetes 
kubectl.x86_64                       1.8.6-0                         kubernetes 
kubectl.x86_64                       1.8.7-0                         kubernetes 
kubectl.x86_64                       1.8.8-0                         kubernetes 
kubectl.x86_64                       1.8.9-0                         kubernetes 
kubectl.x86_64                       1.8.10-0                        kubernetes 
kubectl.x86_64                       1.8.11-0                        kubernetes 
kubectl.x86_64                       1.8.12-0                        kubernetes 
kubectl.x86_64                       1.8.13-0                        kubernetes 
kubectl.x86_64                       1.8.14-0                        kubernetes 
kubectl.x86_64                       1.8.15-0                        kubernetes 
kubectl.x86_64                       1.9.0-0                         kubernetes 
kubectl.x86_64                       1.9.1-0                         kubernetes 
kubectl.x86_64                       1.9.2-0                         kubernetes 
kubectl.x86_64                       1.9.3-0                         kubernetes 
kubectl.x86_64                       1.9.4-0                         kubernetes 
kubectl.x86_64                       1.9.5-0                         kubernetes 
kubectl.x86_64                       1.9.6-0                         kubernetes 
kubectl.x86_64                       1.9.7-0                         kubernetes 
kubectl.x86_64                       1.9.8-0                         kubernetes 
kubectl.x86_64                       1.9.9-0                         kubernetes 
kubectl.x86_64                       1.9.10-0                        kubernetes 
kubectl.x86_64                       1.9.11-0                        kubernetes 
kubectl.x86_64                       1.10.0-0                        kubernetes 
kubectl.x86_64                       1.10.1-0                        kubernetes 
kubectl.x86_64                       1.10.2-0                        kubernetes 
kubectl.x86_64                       1.10.3-0                        kubernetes 
kubectl.x86_64                       1.10.4-0                        kubernetes 
kubectl.x86_64                       1.10.5-0                        kubernetes 
kubectl.x86_64                       1.10.6-0                        kubernetes 
kubectl.x86_64                       1.10.7-0                        kubernetes 
kubectl.x86_64                       1.10.8-0                        kubernetes 
kubectl.x86_64                       1.10.9-0                        kubernetes 
kubectl.x86_64                       1.10.10-0                       kubernetes 
kubectl.x86_64                       1.10.11-0                       kubernetes 
kubectl.x86_64                       1.10.12-0                       kubernetes 
kubectl.x86_64                       1.10.13-0                       kubernetes 
kubectl.x86_64                       1.11.0-0                        kubernetes 
kubectl.x86_64                       1.11.1-0                        kubernetes 
kubectl.x86_64                       1.11.2-0                        kubernetes 
kubectl.x86_64                       1.11.3-0                        kubernetes 
kubectl.x86_64                       1.11.4-0                        kubernetes 
kubectl.x86_64                       1.11.5-0                        kubernetes 
kubectl.x86_64                       1.11.6-0                        kubernetes 
kubectl.x86_64                       1.11.7-0                        kubernetes 
kubectl.x86_64                       1.11.8-0                        kubernetes 
kubectl.x86_64                       1.11.9-0                        kubernetes 
kubectl.x86_64                       1.11.10-0                       kubernetes 
kubectl.x86_64                       1.12.0-0                        kubernetes 
kubectl.x86_64                       1.12.1-0                        kubernetes 
kubectl.x86_64                       1.12.2-0                        kubernetes 
kubectl.x86_64                       1.12.3-0                        kubernetes 
kubectl.x86_64                       1.12.4-0                        kubernetes 
kubectl.x86_64                       1.12.5-0                        kubernetes 
kubectl.x86_64                       1.12.6-0                        kubernetes 
kubectl.x86_64                       1.12.7-0                        kubernetes 
kubectl.x86_64                       1.12.8-0                        kubernetes 
kubectl.x86_64                       1.12.9-0                        kubernetes 
kubectl.x86_64                       1.12.10-0                       kubernetes 
kubectl.x86_64                       1.13.0-0                        kubernetes 
kubectl.x86_64                       1.13.1-0                        kubernetes 
kubectl.x86_64                       1.13.2-0                        kubernetes 
kubectl.x86_64                       1.13.3-0                        kubernetes 
kubectl.x86_64                       1.13.4-0                        kubernetes 
kubectl.x86_64                       1.13.5-0                        kubernetes 
kubectl.x86_64                       1.13.6-0                        kubernetes 
kubectl.x86_64                       1.13.7-0                        kubernetes 
kubectl.x86_64                       1.13.8-0                        kubernetes 
kubectl.x86_64                       1.13.9-0                        kubernetes 
kubectl.x86_64                       1.13.10-0                       kubernetes 
kubectl.x86_64                       1.13.11-0                       kubernetes 
kubectl.x86_64                       1.13.12-0                       kubernetes 
kubectl.x86_64                       1.14.0-0                        kubernetes 
kubectl.x86_64                       1.14.1-0                        kubernetes 
kubectl.x86_64                       1.14.2-0                        kubernetes 
kubectl.x86_64                       1.14.3-0                        kubernetes 
kubectl.x86_64                       1.14.4-0                        kubernetes 
kubectl.x86_64                       1.14.5-0                        kubernetes 
kubectl.x86_64                       1.14.6-0                        kubernetes 
kubectl.x86_64                       1.14.7-0                        kubernetes 
kubectl.x86_64                       1.14.8-0                        kubernetes 
kubectl.x86_64                       1.14.9-0                        kubernetes 
kubectl.x86_64                       1.14.10-0                       kubernetes 
kubectl.x86_64                       1.15.0-0                        kubernetes 
kubectl.x86_64                       1.15.1-0                        kubernetes 
kubectl.x86_64                       1.15.2-0                        kubernetes 
kubectl.x86_64                       1.15.3-0                        kubernetes 
kubectl.x86_64                       1.15.4-0                        kubernetes 
kubectl.x86_64                       1.15.5-0                        kubernetes 
kubectl.x86_64                       1.15.6-0                        kubernetes 
kubectl.x86_64                       1.15.7-0                        kubernetes 
kubectl.x86_64                       1.15.8-0                        kubernetes 
kubectl.x86_64                       1.15.9-0                        kubernetes 
kubectl.x86_64                       1.15.10-0                       kubernetes 
kubectl.x86_64                       1.15.11-0                       kubernetes 
kubectl.x86_64                       1.16.0-0                        kubernetes 
kubectl.x86_64                       1.16.1-0                        kubernetes 
kubectl.x86_64                       1.16.2-0                        kubernetes 
kubectl.x86_64                       1.16.3-0                        kubernetes 
kubectl.x86_64                       1.16.4-0                        kubernetes 
kubectl.x86_64                       1.16.5-0                        kubernetes 
kubectl.x86_64                       1.16.6-0                        kubernetes 
kubectl.x86_64                       1.16.7-0                        kubernetes 
kubectl.x86_64                       1.16.8-0                        kubernetes 
kubectl.x86_64                       1.16.9-0                        kubernetes 
kubectl.x86_64                       1.17.0-0                        kubernetes 
kubectl.x86_64                       1.17.1-0                        kubernetes 
kubectl.x86_64                       1.17.2-0                        kubernetes 
kubectl.x86_64                       1.17.3-0                        kubernetes 
kubectl.x86_64                       1.17.4-0                        kubernetes 
kubectl.x86_64                       1.17.5-0                        kubernetes 
kubectl.x86_64                       1.18.0-0                        kubernetes 
kubectl.x86_64                       1.18.1-0                        kubernetes 
kubectl.x86_64                       1.18.2-0                        kubernetes 
[root@k8s01 ~]# 
4 月 272020
 

相较于Deployment资源,DaemonSet在每个节点仅运行一个副本,以提供守护服务。

查看DaemonSet类型的系统组件(kube-proxy和kube-flannel-ds-amd64)

获取kube-system命名空间的daemonset列表

[root@k8s01 ~]# kubectl get daemonsets.apps --namespace=kube-system 
NAME                      DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
kube-flannel-ds-amd64     5         5         5       5            5           <none>                   6d16h
kube-flannel-ds-arm       0         0         0       0            0           <none>                   6d16h
kube-flannel-ds-arm64     0         0         0       0            0           <none>                   6d16h
kube-flannel-ds-ppc64le   0         0         0       0            0           <none>                   6d16h
kube-flannel-ds-s390x     0         0         0       0            0           <none>                   6d16h
kube-proxy                5         5         5       5            5           kubernetes.io/os=linux   6d16h
[root@k8s01 ~]#

获取kube-system命名空间pod列表详情(每个节点都运行一个daemonset类型容器副本)

[root@k8s01 ~]# kubectl get pods --namespace=kube-system -o wide
NAME                            READY   STATUS    RESTARTS   AGE     IP             NODE    NOMINATED NODE   READINESS GATES
coredns-66bff467f8-5x8nf        1/1     Running   0          6d16h   10.244.1.2     k8s02   <none>           <none>
coredns-66bff467f8-mgcd2        1/1     Running   0          6d16h   10.244.0.2     k8s01   <none>           <none>
etcd-k8s01                      1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-apiserver-k8s01            1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-controller-manager-k8s01   1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-flannel-ds-amd64-4ngbr     1/1     Running   0          6d16h   172.31.6.113   k8s03   <none>           <none>
kube-flannel-ds-amd64-j9qmh     1/1     Running   0          4d      172.31.1.139   k8s04   <none>           <none>
kube-flannel-ds-amd64-kmw29     1/1     Running   0          6d16h   172.31.3.249   k8s02   <none>           <none>
kube-flannel-ds-amd64-l57kp     1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-flannel-ds-amd64-rr8sv     1/1     Running   1          4d      172.31.15.1    k8s05   <none>           <none>
kube-proxy-22fd2                1/1     Running   0          6d16h   172.31.3.249   k8s02   <none>           <none>
kube-proxy-97hft                1/1     Running   0          4d      172.31.1.139   k8s04   <none>           <none>
kube-proxy-jwwp2                1/1     Running   0          6d16h   172.31.6.113   k8s03   <none>           <none>
kube-proxy-mw6xf                1/1     Running   0          4d      172.31.15.1    k8s05   <none>           <none>
kube-proxy-wnf4q                1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-scheduler-k8s01            1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
[root@k8s01 ~]#

查看flannel网络组件配置文件中的daemonset配置

[root@k8s01 ~]# vi kube-flannel.yml
    134 apiVersion: apps/v1
    135 kind: DaemonSet
    136 metadata:
    137   name: kube-flannel-ds-amd64
    138   namespace: kube-system
    139   labels:
    140     tier: node
    141     app: flannel
    142 spec:
    143   selector:
    144     matchLabels:
    145       app: flannel
    146   template:
    147     metadata:
    148       labels:
    149         tier: node
    150         app: flannel
    151     spec:
    152       affinity:
    153         nodeAffinity:
    154           requiredDuringSchedulingIgnoredDuringExecution:
    155             nodeSelectorTerms:
    156               - matchExpressions:
    157                   - key: kubernetes.io/os
    158                     operator: In
    159                     values:
    160                       - linux
    161                   - key: kubernetes.io/arch
    162                     operator: In
    163                     values:
    164                       - amd64
    165       hostNetwork: true
    166       tolerations:
    167       - operator: Exists
    168         effect: NoSchedule
    169       serviceAccountName: flannel
    170       initContainers:
    171       - name: install-cni
    172         image: quay.io/coreos/flannel:v0.12.0-amd64
    173         command:
    174         - cp
    175         args:
    176         - -f
    177         - /etc/kube-flannel/cni-conf.json
    178         - /etc/cni/net.d/10-flannel.conflist
    179         volumeMounts:
    180         - name: cni
    181           mountPath: /etc/cni/net.d
    182         - name: flannel-cfg
    183           mountPath: /etc/kube-flannel/
    184       containers:
    185       - name: kube-flannel
    186         image: quay.io/coreos/flannel:v0.12.0-amd64
    187         command:
    188         - /opt/bin/flanneld
    189         args:
    190         - --ip-masq
    191         - --kube-subnet-mgr
    192         resources:
    193           requests:
    194             cpu: "100m"
    195             memory: "50Mi"
    196           limits:
    197             cpu: "100m"
    198             memory: "50Mi"
    199         securityContext:
    200           privileged: false
    201           capabilities:
    202             add: ["NET_ADMIN"]
    203         env:
    204         - name: POD_NAME
    205           valueFrom:
    206             fieldRef:
    207               fieldPath: metadata.name
    208         - name: POD_NAMESPACE
    209           valueFrom:
    210             fieldRef:
    211               fieldPath: metadata.namespace
    212         volumeMounts:
    213         - name: run
    214           mountPath: /run/flannel
    215         - name: flannel-cfg
    216           mountPath: /etc/kube-flannel/
    217       volumes:
    218         - name: run
    219           hostPath:
    220             path: /run/flannel
    221         - name: cni
    222           hostPath:
    223             path: /etc/cni/net.d
    224         - name: flannel-cfg
    225           configMap:
    226             name: kube-flannel-cfg

运行一个daemonset类型的资源(Fluentd日志收集系统)

[root@k8s01 ~]# vi daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: fluentd-elasticsearch
  namespace: kube-system
  labels:
    k8s-app: fluentd-logging
spec:
  selector:
    matchLabels:
      name: fluentd-elasticsearch
  template:
    metadata:
      labels:
        name: fluentd-elasticsearch
    spec:
      tolerations:
      # this toleration is to have the daemonset runnable on master nodes
      # remove it if your masters can't run pods
      - key: node-role.kubernetes.io/master
        effect: NoSchedule
      containers:
      - name: fluentd-elasticsearch
        image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 200Mi
        volumeMounts:
        - name: varlog
          mountPath: /var/log
        - name: varlibdockercontainers
          mountPath: /var/lib/docker/containers
          readOnly: true
      terminationGracePeriodSeconds: 30
      volumes:
      - name: varlog
        hostPath:
          path: /var/log
      - name: varlibdockercontainers
        hostPath:
          path: /var/lib/docker/containers

应用配置文件

[root@k8s01 ~]# kubectl apply -f daemonset.yaml 
daemonset.apps/fluentd-elasticsearch created
[root@k8s01 ~]# kubectl get daemonsets.apps 
No resources found in default namespace.
[root@k8s01 ~]# kubectl get daemonsets.apps --namespace=kube-system 
NAME                      DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
fluentd-elasticsearch     5         5         5       5            5           <none>                   28s
kube-flannel-ds-amd64     5         5         5       5            5           <none>                   6d18h
kube-flannel-ds-arm       0         0         0       0            0           <none>                   6d18h
kube-flannel-ds-arm64     0         0         0       0            0           <none>                   6d18h
kube-flannel-ds-ppc64le   0         0         0       0            0           <none>                   6d18h
kube-flannel-ds-s390x     0         0         0       0            0           <none>                   6d18h
kube-proxy                5         5         5       5            5           kubernetes.io/os=linux   6d18h
[root@k8s01 ~]#

获取kube-system命名空间的daemonset列表

[root@k8s01 ~]# kubectl get pods --namespace=kube-system -o wide
NAME                            READY   STATUS    RESTARTS   AGE     IP             NODE    NOMINATED NODE   READINESS GATES
coredns-66bff467f8-5x8nf        1/1     Running   0          6d18h   10.244.1.2     k8s02   <none>           <none>
coredns-66bff467f8-mgcd2        1/1     Running   0          6d18h   10.244.0.2     k8s01   <none>           <none>
etcd-k8s01                      1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
fluentd-elasticsearch-64c2h     1/1     Running   0          84s     10.244.5.9     k8s05   <none>           <none>
fluentd-elasticsearch-f8989     1/1     Running   0          84s     10.244.0.3     k8s01   <none>           <none>
fluentd-elasticsearch-lcgn7     1/1     Running   0          84s     10.244.3.4     k8s04   <none>           <none>
fluentd-elasticsearch-ss2zm     1/1     Running   0          84s     10.244.1.20    k8s02   <none>           <none>
fluentd-elasticsearch-wkd45     1/1     Running   0          84s     10.244.2.39    k8s03   <none>           <none>
kube-apiserver-k8s01            1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
kube-controller-manager-k8s01   1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
kube-flannel-ds-amd64-4ngbr     1/1     Running   0          6d18h   172.31.6.113   k8s03   <none>           <none>
kube-flannel-ds-amd64-j9qmh     1/1     Running   0          4d2h    172.31.1.139   k8s04   <none>           <none>
kube-flannel-ds-amd64-kmw29     1/1     Running   0          6d18h   172.31.3.249   k8s02   <none>           <none>
kube-flannel-ds-amd64-l57kp     1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
kube-flannel-ds-amd64-rr8sv     1/1     Running   1          4d2h    172.31.15.1    k8s05   <none>           <none>
kube-proxy-22fd2                1/1     Running   0          6d18h   172.31.3.249   k8s02   <none>           <none>
kube-proxy-97hft                1/1     Running   0          4d2h    172.31.1.139   k8s04   <none>           <none>
kube-proxy-jwwp2                1/1     Running   0          6d18h   172.31.6.113   k8s03   <none>           <none>
kube-proxy-mw6xf                1/1     Running   0          4d2h    172.31.15.1    k8s05   <none>           <none>
kube-proxy-wnf4q                1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
kube-scheduler-k8s01            1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
[root@k8s01 ~]#
4 月 272020
 

获取当前集群pod列表及所属节点

[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-bbfdbf4b7-8khd4   1/1     Running   0          3d23h   10.244.2.35   k8s03   <none>           <none>
nginx-deployment-bbfdbf4b7-9g825   1/1     Running   0          3d23h   10.244.1.17   k8s02   <none>           <none>
nginx-deployment-bbfdbf4b7-hsvfg   1/1     Running   0          3d23h   10.244.2.36   k8s03   <none>           <none>
nginx-deployment-bbfdbf4b7-jpt96   1/1     Running   0          3d23h   10.244.2.34   k8s03   <none>           <none>
nginx-deployment-bbfdbf4b7-vlnlk   1/1     Running   0          3d23h   10.244.1.18   k8s02   <none>           <none>
[root@k8s01 ~]# kubectl get deployments
NAME               READY   UP-TO-DATE   AVAILABLE   AGE
nginx-deployment   5/5     5            5           5d15h
[root@k8s01 ~]#

删除nginx-deployment资源

[root@k8s01 ~]# kubectl delete deployments.apps nginx-deployment 
deployment.apps "nginx-deployment" deleted
[root@k8s01 ~]# kubectl get pods
No resources found in default namespace.
[root@k8s01 ~]#

获取节点列表

[root@k8s01 ~]# kubectl get nodes
NAME    STATUS   ROLES    AGE     VERSION
k8s01   Ready    master   6d15h   v1.18.2
k8s02   Ready    <none>   6d15h   v1.18.2
k8s03   Ready    <none>   6d15h   v1.18.2
k8s04   Ready    <none>   3d23h   v1.18.2
k8s05   Ready    <none>   3d23h   v1.18.2
[root@k8s01 ~]#

应用nginx-deployment配置文件

[root@k8s01 ~]# cat nginx-deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.17.10
        ports:
        - containerPort: 80
[root@k8s01 ~]# kubectl apply -f nginx-deployment.yaml 
deployment.apps/nginx-deployment created
[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-dvr4p   1/1     Running   0          11s   10.244.2.37   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-fnq9c   1/1     Running   0          11s   10.244.3.2    k8s04   <none>           <none>
[root@k8s01 ~]#

获取节点的默认标签配置信息

[root@k8s01 ~]# kubectl get nodes --show-labels 
NAME    STATUS   ROLES    AGE     VERSION   LABELS
k8s01   Ready    master   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s01,kubernetes.io/os=linux,node-role.kubernetes.io/master=
k8s02   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s02,kubernetes.io/os=linux
k8s03   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s03,kubernetes.io/os=linux
k8s04   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s04,kubernetes.io/os=linux
k8s05   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s05,kubernetes.io/os=linux
[root@k8s01 ~]#

对指定节点添加标签键值对

[root@k8s01 ~]# kubectl label nodes k8s05 disktype=ssd
node/k8s05 labeled
[root@k8s01 ~]# kubectl get nodes --show-labels 
NAME    STATUS   ROLES    AGE     VERSION   LABELS
k8s01   Ready    master   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s01,kubernetes.io/os=linux,node-role.kubernetes.io/master=
k8s02   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s02,kubernetes.io/os=linux
k8s03   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s03,kubernetes.io/os=linux
k8s04   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s04,kubernetes.io/os=linux
k8s05   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s05,kubernetes.io/os=linux
[root@k8s01 ~]#

修改deployment配置文件添加关联标签

[root@k8s01 ~]# vi nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 6
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.17.10
        ports:
        - containerPort: 80
      nodeSelector:
        disktype: ssd

应用配置文件执行销毁原有pod并调度新pod资源到节点k8s05上

[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS              RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-5lzsz   1/1     Running             0          12s     10.244.3.3    k8s04   <none>           <none>
nginx-deployment-cc5db57d4-dvr4p   1/1     Running             0          9m53s   10.244.2.37   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-fnq9c   1/1     Running             0          9m53s   10.244.3.2    k8s04   <none>           <none>
nginx-deployment-cc5db57d4-hwmk4   1/1     Running             0          12s     10.244.1.19   k8s02   <none>           <none>
nginx-deployment-cc5db57d4-qt26r   1/1     Running             0          12s     10.244.2.38   k8s03   <none>           <none>
nginx-deployment-ddc6847d-4qx2m    0/1     ContainerCreating   0          12s     <none>        k8s05   <none>           <none>
nginx-deployment-ddc6847d-cvhv4    0/1     ContainerCreating   0          12s     <none>        k8s05   <none>           <none>
nginx-deployment-ddc6847d-dcztn    0/1     ContainerCreating   0          12s     <none>        k8s05   <none>           <none>
[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS        RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-dvr4p   0/1     Terminating   0          10m   10.244.2.37   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-fnq9c   0/1     Terminating   0          10m   10.244.3.2    k8s04   <none>           <none>
nginx-deployment-ddc6847d-26hl9    1/1     Running       0          13s   10.244.5.7    k8s05   <none>           <none>
nginx-deployment-ddc6847d-4qx2m    1/1     Running       0          26s   10.244.5.3    k8s05   <none>           <none>
nginx-deployment-ddc6847d-cvhv4    1/1     Running       0          26s   10.244.5.4    k8s05   <none>           <none>
nginx-deployment-ddc6847d-d6f99    1/1     Running       0          14s   10.244.5.6    k8s05   <none>           <none>
nginx-deployment-ddc6847d-dcztn    1/1     Running       0          26s   10.244.5.5    k8s05   <none>           <none>
nginx-deployment-ddc6847d-dj5x4    1/1     Running       0          12s   10.244.5.8    k8s05   <none>           <none>
[root@k8s01 ~]# kubectl get pods -o wide
NAME                              READY   STATUS    RESTARTS   AGE   IP           NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-ddc6847d-26hl9   1/1     Running   0          21s   10.244.5.7   k8s05   <none>           <none>
nginx-deployment-ddc6847d-4qx2m   1/1     Running   0          34s   10.244.5.3   k8s05   <none>           <none>
nginx-deployment-ddc6847d-cvhv4   1/1     Running   0          34s   10.244.5.4   k8s05   <none>           <none>
nginx-deployment-ddc6847d-d6f99   1/1     Running   0          22s   10.244.5.6   k8s05   <none>           <none>
nginx-deployment-ddc6847d-dcztn   1/1     Running   0          34s   10.244.5.5   k8s05   <none>           <none>
nginx-deployment-ddc6847d-dj5x4   1/1     Running   0          20s   10.244.5.8   k8s05   <none>           <none>
[root@k8s01 ~]#

删除lable标签配置

[root@k8s01 ~]# kubectl label nodes k8s05 disktype-
node/k8s05 labeled
[root@k8s01 ~]# kubectl get nodes --show-labels 
NAME    STATUS   ROLES    AGE     VERSION   LABELS
k8s01   Ready    master   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s01,kubernetes.io/os=linux,node-role.kubernetes.io/master=
k8s02   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s02,kubernetes.io/os=linux
k8s03   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s03,kubernetes.io/os=linux
k8s04   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s04,kubernetes.io/os=linux
k8s05   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s05,kubernetes.io/os=linux
[root@k8s01 ~]#
4 月 232020
 

为集群新增节点

172.31.3.209 k8s01
172.31.8.132 k8s02
172.31.10.229 k8s03
172.31.1.139 k8s04
172.31.15.1 k8s05

新节点加入集群

kubeadm join --token <token> <control-plane-host>:<control-plane-port> --discovery-token-ca-cert-hash sha256:<hash>

主节点生成token有效期为24小时,超过该有效期后需要另行生成。

查看现有token列表

[root@k8s01 ~]# kubeadm token list
TOKEN                     TTL         EXPIRES                USAGES                   DESCRIPTION                                                EXTRA GROUPS
ca673s.97ektx8klpsjfovt   8h          2020-04-23T10:35:25Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
qxycbf.ri8i2zygahp5je8m   8h          2020-04-23T10:35:43Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
[root@k8s01 ~]#

重新生成token

[root@k8s01 ~]# kubeadm token create
W0423 02:26:28.166475    9469 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
lf1qej.q4wq7xo23xigg672
[root@k8s01 ~]# kubeadm token list
TOKEN                     TTL         EXPIRES                USAGES                   DESCRIPTION                                                EXTRA GROUPS
ca673s.97ektx8klpsjfovt   8h          2020-04-23T10:35:25Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
lf1qej.q4wq7xo23xigg672   23h         2020-04-24T02:26:28Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
qxycbf.ri8i2zygahp5je8m   8h          2020-04-23T10:35:43Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
[root@k8s01 ~]#

重新生成hash值(该值不变)

openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
openssl dgst -sha256 -hex | sed 's/^.* //'

[root@k8s01 ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
> openssl dgst -sha256 -hex | sed 's/^.* //'
d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
[root@k8s01 ~]#

节点4加入

[root@k8s04 ~]# kubeadm join --token lf1qej.q4wq7xo23xigg672 172.31.14.12:6443 --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
W0423 02:28:44.283472 19177 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s04 ~]#

节点5加入

[root@k8s05 ~]# kubeadm join --token lf1qej.q4wq7xo23xigg672 172.31.14.12:6443 --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
W0423 02:28:51.716851 19271 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s05 ~]#

获取节点列表(加入成功)

[root@k8s01 ~]# kubectl get nodes -o wide
NAME    STATUS   ROLES    AGE     VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME
k8s01   Ready    master   2d16h   v1.18.2   172.31.14.12   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.8
k8s02   Ready    <none>   2d16h   v1.18.2   172.31.3.249   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.8
k8s03   Ready    <none>   2d16h   v1.18.2   172.31.6.113   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.8
k8s04   Ready    <none>   78s     v1.18.2   172.31.1.139   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.8
k8s05   Ready    <none>   70s     v1.18.2   172.31.15.1    <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.8
[root@k8s01 ~]#

创建新token并生成完整节点加入命令(一次性)

[root@k8s01 ~]# kubeadm token list
TOKEN                     TTL         EXPIRES                USAGES                   DESCRIPTION                                                EXTRA GROUPS
ca673s.97ektx8klpsjfovt   7h          2020-04-23T10:35:25Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
lf1qej.q4wq7xo23xigg672   23h         2020-04-24T02:26:28Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
qxycbf.ri8i2zygahp5je8m   7h          2020-04-23T10:35:43Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
[root@k8s01 ~]# kubeadm token create --print-join-command
W0423 02:41:47.487117   15377 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
kubeadm join 172.31.14.12:6443 --token vc6toc.jhhp9jatexn4ed7m     --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859 
[root@k8s01 ~]# kubeadm token list
TOKEN                     TTL         EXPIRES                USAGES                   DESCRIPTION                                                EXTRA GROUPS
ca673s.97ektx8klpsjfovt   7h          2020-04-23T10:35:25Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
lf1qej.q4wq7xo23xigg672   23h         2020-04-24T02:26:28Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
qxycbf.ri8i2zygahp5je8m   7h          2020-04-23T10:35:43Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
vc6toc.jhhp9jatexn4ed7m   23h         2020-04-24T02:41:47Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
[root@k8s01 ~]#
4 月 222020
 

获取pod列表并查看pod运行的节点

[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-5q9lz   1/1     Running   0          22h   10.244.2.17   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-dncbs   1/1     Running   0          22h   10.244.1.10   k8s02   <none>           <none>
nginx-deployment-cc5db57d4-gsp6l   1/1     Running   0          22h   10.244.2.16   k8s03   <none>           <none>
[root@k8s01 ~]#

修改副本数量为5并再次应用deployment配置(扩容)

[root@k8s01 ~]# vi nginx-deployment.yaml
  replicas: 5

[root@k8s01 ~]# kubectl apply -f nginx-deployment.yaml 
deployment.apps/nginx-deployment configured
[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-5q9lz   1/1     Running   0          23h   10.244.2.17   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-clrlh   1/1     Running   0          9s    10.244.2.18   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-dncbs   1/1     Running   0          23h   10.244.1.10   k8s02   <none>           <none>
nginx-deployment-cc5db57d4-gsp6l   1/1     Running   0          23h   10.244.2.16   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-ndkr7   1/1     Running   0          9s    10.244.1.11   k8s02   <none>           <none>
[root@k8s01 ~]#

修改副本数量为2并再次应用deployment配置(缩容)

[root@k8s01 ~]# vi nginx-deployment.yaml
  replicas: 2

[root@k8s01 ~]# kubectl apply -f nginx-deployment.yaml 
deployment.apps/nginx-deployment configured
[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS        RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-clrlh   0/1     Terminating   0          4m50s   10.244.2.18   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-dncbs   1/1     Running       0          23h     10.244.1.10   k8s02   <none>           <none>
nginx-deployment-cc5db57d4-gsp6l   0/1     Terminating   0          23h     10.244.2.16   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-ndkr7   1/1     Running       0          4m50s   10.244.1.11   k8s02   <none>           <none>
[root@k8s01 ~]# 

[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-dncbs   1/1     Running   0          23h   10.244.1.10   k8s02   <none>           <none>
nginx-deployment-cc5db57d4-ndkr7   1/1     Running   0          22m   10.244.1.11   k8s02   <none>           <none>
[root@k8s01 ~]#
4 月 222020
 

kubernetes 阿里云公共镜像仓库配置

适用于CentOS/RHEL/Fedora的配置

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
setenforce 0
yum install -y kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet

适用于Debian/Ubuntu的配置

apt-get update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add - 
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF 
apt-get update
apt-get install -y kubelet kubeadm kubectl

Docker Hub 镜像缓存(USTC)适用于Ubuntu 16.04+/Debian 8+/CentOS 7版本

vi /etc/docker/daemon.json

{
  "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn/"]
}
    
sudo systemctl restart docker

Kubernetes容器集群之Deployment学习

 未分类  Kubernetes容器集群之Deployment学习已关闭评论
4 月 222020
 

启用kubectl命令自动补全

[root@k8s01 ~]# yum -y install bash-completion
[root@k8s01 ~]# source /usr/share/bash-completion/bash_completion
[root@k8s01 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc
[root@k8s01 ~]# exit
logout

准备Deployment配置文件

[root@k8s01 ~]# vi nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.17.10
        ports:
        - containerPort: 80

应用Deployment配置文件

[root@k8s01 ~]# kubectl apply -f nginx-deployment.yaml 
deployment.apps/nginx-deployment created
[root@k8s01 ~]#

获取deployments列表和pods列表

[root@k8s01 ~]# kubectl get deployments.apps 
NAME               READY   UP-TO-DATE   AVAILABLE   AGE
nginx-deployment   3/3     3            3           22s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get pods
NAME                               READY   STATUS    RESTARTS   AGE
nginx-deployment-cc5db57d4-5q9lz   1/1     Running   0          39s
nginx-deployment-cc5db57d4-dncbs   1/1     Running   0          39s
nginx-deployment-cc5db57d4-gsp6l   1/1     Running   0          39s
[root@k8s01 ~]#

查看deployment详情(deployment属于controller的一种类型,通过replicaset来管理pod,Events记录replicaset启动过程)

[root@k8s01 ~]# kubectl describe deployments.apps nginx-deployment 
Name:                   nginx-deployment
Namespace:              default
CreationTimestamp:      Tue, 21 Apr 2020 10:33:38 +0000
Labels:                 app=nginx
Annotations:            deployment.kubernetes.io/revision: 1
Selector:               app=nginx
Replicas:               3 desired | 3 updated | 3 total | 3 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  app=nginx
  Containers:
   nginx:
    Image:        nginx:1.17.10
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Available      True    MinimumReplicasAvailable
  Progressing    True    NewReplicaSetAvailable
OldReplicaSets:  <none>
NewReplicaSet:   nginx-deployment-cc5db57d4 (3/3 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  85s   deployment-controller  Scaled up replica set nginx-deployment-cc5db57d4 to 3
[root@k8s01 ~]#

获取replicaset列表(显示已就绪3个副本,Events为3个副本pod创建记录)

[root@k8s01 ~]# kubectl get replicasets.apps 
NAME                         DESIRED   CURRENT   READY   AGE
nginx-deployment-cc5db57d4   3         3         3       11m
[root@k8s01 ~]#

查看relicasets详情

[root@k8s01 ~]# kubectl describe replicasets.apps nginx-deployment-cc5db57d4 
Name:           nginx-deployment-cc5db57d4
Namespace:      default
Selector:       app=nginx,pod-template-hash=cc5db57d4
Labels:         app=nginx
                pod-template-hash=cc5db57d4
Annotations:    deployment.kubernetes.io/desired-replicas: 3
                deployment.kubernetes.io/max-replicas: 4
                deployment.kubernetes.io/revision: 1
Controlled By:  Deployment/nginx-deployment
Replicas:       3 current / 3 desired
Pods Status:    3 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
  Labels:  app=nginx
           pod-template-hash=cc5db57d4
  Containers:
   nginx:
    Image:        nginx:1.17.10
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Events:
  Type    Reason            Age   From                   Message
  ----    ------            ----  ----                   -------
  Normal  SuccessfulCreate  13m   replicaset-controller  Created pod: nginx-deployment-cc5db57d4-gsp6l
  Normal  SuccessfulCreate  13m   replicaset-controller  Created pod: nginx-deployment-cc5db57d4-5q9lz
  Normal  SuccessfulCreate  13m   replicaset-controller  Created pod: nginx-deployment-cc5db57d4-dncbs
[root@k8s01 ~]#

获取pods列表(三个副本Pod都处于运行状态)

[root@k8s01 ~]# kubectl get pods
NAME                               READY   STATUS    RESTARTS   AGE
nginx-deployment-cc5db57d4-5q9lz   1/1     Running   0          15m
nginx-deployment-cc5db57d4-dncbs   1/1     Running   0          15m
nginx-deployment-cc5db57d4-gsp6l   1/1     Running   0          15m
[root@k8s01 ~]#

查看pods详情(Controolled By指明该pod由ReplicaSet控制生成,Events记录了该pod启动过程)

[root@k8s01 ~]# kubectl describe pods nginx-deployment-cc5db57d4-5q9lz
Name:         nginx-deployment-cc5db57d4-5q9lz
Namespace:    default
Priority:     0
Node:         k8s03/172.31.6.113
Start Time:   Tue, 21 Apr 2020 10:33:38 +0000
Labels:       app=nginx
              pod-template-hash=cc5db57d4
Annotations:  <none>
Status:       Running
IP:           10.244.2.17
IPs:
  IP:           10.244.2.17
Controlled By:  ReplicaSet/nginx-deployment-cc5db57d4
Containers:
  nginx:
    Container ID:   docker://e062b14bbf7670d5d3c45e983c88b36caa2ed3700fd03dbdb9adf06724fba9bf
    Image:          nginx:1.17.10
    Image ID:       docker-pullable://nginx@sha256:d81f010955749350ef31a119fb94b180fde8b2f157da351ff5667ae037968b28
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Tue, 21 Apr 2020 10:33:39 +0000
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-pkjh8 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  default-token-pkjh8:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-pkjh8
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason     Age   From               Message
  ----    ------     ----  ----               -------
  Normal  Scheduled  16m   default-scheduler  Successfully assigned default/nginx-deployment-cc5db57d4-5q9lz to k8s03
  Normal  Pulled     16m   kubelet, k8s03     Container image "nginx:1.17.10" already present on machine
  Normal  Created    16m   kubelet, k8s03     Created container nginx
  Normal  Started    16m   kubelet, k8s03     Started container nginx
[root@k8s01 ~]#

流程总结:

(1)用户通过kubectl创建Deployment。
(2)Deployment创建ReplicaSet。
(3)ReplicaSet创建Pod。

4 月 212020
 

准备Redis Master服务器Deployment配置文件

[root@k8s01 ~]# vi redis-master-deployment.yaml
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
  name: redis-master
  labels:
    app: redis
spec:
  selector:
    matchLabels:
      app: redis
      role: master
      tier: backend
  replicas: 1
  template:
    metadata:
      labels:
        app: redis
        role: master
        tier: backend
    spec:
      containers:
      - name: master
        image: k8s.gcr.io/redis:e2e  # or just image: redis
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
        ports:
        - containerPort: 6379

准备Redis Master服务器Service配置文件

[root@k8s01 ~]# vi redis-master-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: redis-master
  labels:
    app: redis
    role: master
    tier: backend
spec:
  ports:
  - port: 6379
    targetPort: 6379
  selector:
    app: redis
    role: master
    tier: backend

准备Redis Slave服务器Deployment配置文件

[root@k8s01 ~]# vi redis-slave-deployment.yaml
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
  name: redis-slave
  labels:
    app: redis
spec:
  selector:
    matchLabels:
      app: redis
      role: slave
      tier: backend
  replicas: 2
  template:
    metadata:
      labels:
        app: redis
        role: slave
        tier: backend
    spec:
      containers:
      - name: slave
        image: gcr.io/google_samples/gb-redisslave:v3
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
        env:
        - name: GET_HOSTS_FROM
          value: dns
          # Using `GET_HOSTS_FROM=dns` requires your cluster to
          # provide a dns service. As of Kubernetes 1.3, DNS is a built-in
          # service launched automatically. However, if the cluster you are using
          # does not have a built-in DNS service, you can instead
          # access an environment variable to find the master
          # service's host. To do so, comment out the 'value: dns' line above, and
          # uncomment the line below:
          # value: env
        ports:
        - containerPort: 6379

准备Redis Slave服务器Service配置文件

[root@k8s01 ~]# vi redis-slave-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: redis-slave
  labels:
    app: redis
    role: slave
    tier: backend
spec:
  ports:
  - port: 6379
  selector:
    app: redis
    role: slave
    tier: backend

准备Guestbook前端Deployment配置文件

[root@k8s01 ~]# vi frontend-deployment.yaml
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
  name: frontend
  labels:
    app: guestbook
spec:
  selector:
    matchLabels:
      app: guestbook
      tier: frontend
  replicas: 3
  template:
    metadata:
      labels:
        app: guestbook
        tier: frontend
    spec:
      containers:
      - name: php-redis
        image: gcr.io/google-samples/gb-frontend:v4
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
        env:
        - name: GET_HOSTS_FROM
          value: dns
          # Using `GET_HOSTS_FROM=dns` requires your cluster to
          # provide a dns service. As of Kubernetes 1.3, DNS is a built-in
          # service launched automatically. However, if the cluster you are using
          # does not have a built-in DNS service, you can instead
          # access an environment variable to find the master
          # service's host. To do so, comment out the 'value: dns' line above, and
          # uncomment the line below:
          # value: env
        ports:
        - containerPort: 80

准备Guestbook前端Service配置文件

[root@k8s01 ~]# vi frontend-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: frontend
  labels:
    app: guestbook
    tier: frontend
spec:
  # comment or delete the following line if you want to use a LoadBalancer
  type: NodePort 
  # if your cluster supports it, uncomment the following to automatically create
  # an external load-balanced IP for the frontend service.
  # type: LoadBalancer
  ports:
  - port: 80
  selector:
    app: guestbook
    tier: frontend

应用Redis master服务器Deployment配置文件

[root@k8s01 ~]# kubectl apply -f redis-master-deployment.yaml 
deployment.apps/redis-master created
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get deployments
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
redis-master   1/1     1            1           20s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
redis-master-6b54579d85-kkbjt   1/1     Running   0          27s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get services
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   20h
[root@k8s01 ~]#

查看Pod日志输出

[root@k8s01 ~]# kubectl logs -f redis-master-6b54579d85-kkbjt
                _._                                                  
           _.-``__ ''-._                                             
      _.-``    `.  `_.  ''-._           Redis 2.8.19 (00000000/0) 64 bit
  .-`` .-```.  ```\/    _.,_ ''-._                                   
 (    '      ,       .-`  | `,    )     Running in stand alone mode
 |`-._`-...-` __...-.``-._|'` _.-'|     Port: 6379
 |    `-._   `._    /     _.-'    |     PID: 1
  `-._    `-._  `-./  _.-'    _.-'                                   
 |`-._`-._    `-.__.-'    _.-'_.-'|                                  
 |    `-._`-._        _.-'_.-'    |           http://redis.io        
  `-._    `-._`-.__.-'_.-'    _.-'                                   
 |`-._`-._    `-.__.-'    _.-'_.-'|                                  
 |    `-._`-._        _.-'_.-'    |                                  
  `-._    `-._`-.__.-'_.-'    _.-'                                   
      `-._    `-.__.-'    _.-'                                       
          `-._        _.-'                                           
              `-.__.-'                                               

[1] 21 Apr 06:35:24.921 # Server started, Redis version 2.8.19
[1] 21 Apr 06:35:24.922 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.
[1] 21 Apr 06:35:24.922 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.
[1] 21 Apr 06:35:24.922 * The server is now ready to accept connections on port 6379

应用Redis master服务器Service配置文件

[root@k8s01 ~]# kubectl apply -f redis-master-service.yaml 
service/redis-master created
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
redis-master-6b54579d85-kkbjt   1/1     Running   0          5m59s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get deployments
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
redis-master   1/1     1            1           6m11s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get services
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP    20h
redis-master   ClusterIP   10.103.128.16   <none>        6379/TCP   31s
[root@k8s01 ~]#

应用Redis slave服务器Deployment配置文件

[root@k8s01 ~]# kubectl apply -f redis-slave-deployment.yaml 
deployment.apps/redis-slave created
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
redis-master-6b54579d85-kkbjt   1/1     Running   0          8m4s
redis-slave-799788557c-8vxqf    1/1     Running   0          12s
redis-slave-799788557c-rq74t    1/1     Running   0          12s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get deployments
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
redis-master   1/1     1            1           8m12s
redis-slave    2/2     2            2           20s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get services
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP    20h
redis-master   ClusterIP   10.103.128.16   <none>        6379/TCP   2m40s
[root@k8s01 ~]#

应用Redis slave服务器service配置文件

[root@k8s01 ~]# kubectl apply -f redis-slave-service.yaml 
service/redis-slave created
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
redis-master-6b54579d85-kkbjt   1/1     Running   0          9m6s
redis-slave-799788557c-8vxqf    1/1     Running   0          74s
redis-slave-799788557c-rq74t    1/1     Running   0          74s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get deployments
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
redis-master   1/1     1            1           9m15s
redis-slave    2/2     2            2           83s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get services
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP    20h
redis-master   ClusterIP   10.103.128.16   <none>        6379/TCP   3m37s
redis-slave    ClusterIP   10.96.236.63    <none>        6379/TCP   25s
[root@k8s01 ~]#

应用Guestbook前端Deployment配置文件

[root@k8s01 ~]# kubectl apply -f frontend-deployment.yaml 
deployment.apps/frontend created
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get pods -l app=guestbook -l tier=frontend
NAME                        READY   STATUS    RESTARTS   AGE
frontend-56fc5b6b47-5sgpf   1/1     Running   0          17s
frontend-56fc5b6b47-hb87m   1/1     Running   0          17s
frontend-56fc5b6b47-rs6jl   1/1     Running   0          17s
[root@k8s01 ~]#

应用Guestbook前端Service配置文件

[root@k8s01 ~]# kubectl apply -f frontend-service.yaml 
service/frontend created
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get services
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
frontend       NodePort    10.96.130.115   <none>        80:31802/TCP   7s
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP        20h
redis-master   ClusterIP   10.103.128.16   <none>        6379/TCP       9m
redis-slave    ClusterIP   10.96.236.63    <none>        6379/TCP       5m48s
[root@k8s01 ~]#

查看

[root@k8s01 ~]# kubectl get service frontend
NAME       TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
frontend   NodePort   10.96.130.115   <none>        80:31802/TCP   90s
[root@k8s01 ~]#

浏览器访问

服务伸缩

扩容

[root@k8s01 ~]# kubectl scale deployment frontend --replicas=5
deployment.apps/frontend scaled
[root@k8s01 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
frontend-56fc5b6b47-5sgpf       1/1     Running   0          8m38s
frontend-56fc5b6b47-f4pt6       1/1     Running   0          11s
frontend-56fc5b6b47-hb87m       1/1     Running   0          8m38s
frontend-56fc5b6b47-hj59m       1/1     Running   0          11s
frontend-56fc5b6b47-rs6jl       1/1     Running   0          8m38s
redis-master-6b54579d85-kkbjt   1/1     Running   0          22m
redis-slave-799788557c-8vxqf    1/1     Running   0          14m
redis-slave-799788557c-rq74t    1/1     Running   0          14m
[root@k8s01 ~]#

缩容

[root@k8s01 ~]# kubectl scale deployment frontend --replicas=2
deployment.apps/frontend scaled
[root@k8s01 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
frontend-56fc5b6b47-hb87m       1/1     Running   0          9m28s
frontend-56fc5b6b47-hj59m       1/1     Running   0          61s
redis-master-6b54579d85-kkbjt   1/1     Running   0          22m
redis-slave-799788557c-8vxqf    1/1     Running   0          15m
redis-slave-799788557c-rq74t    1/1     Running   0          15m
[root@k8s01 ~]#

清除Deployment配置Service配置和运行中的Pod容器

[root@k8s01 ~]# kubectl delete deployment -l app=redis
deployment.apps "redis-master" deleted
deployment.apps "redis-slave" deleted
[root@k8s01 ~]# kubectl delete service -l app=redis
service "redis-master" deleted
service "redis-slave" deleted
[root@k8s01 ~]# kubectl delete deployment -l app=guestbook
deployment.apps "frontend" deleted
[root@k8s01 ~]# kubectl delete service -l app=guestbook
service "frontend" deleted
[root@k8s01 ~]#
4 月 212020
 

基础环境安装脚本(基于Amazon AWS EC2 CentOS 7环境)

#!/bin/bash
#

#
setenforce 0;
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config;
#
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p;
#
yum makecache;
yum install -y yum-utils;
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
yum install -y docker-ce docker-ce-cli containerd.io;
#
cat <<EOF >> /etc/hosts
172.31.3.209 k8s01
172.31.8.132 k8s02
172.31.10.229 k8s03
EOF
#
mkdir /etc/docker;
cat <<EOF > /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF
#
systemctl daemon-reload;
systemctl enable docker;
systemctl restart docker;
#
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
#
yum install -y kubectl kubelet kubeadm;
systemctl enable kubelet;

执行脚本

[root@k8s01 ~]# vi deploy.sh
[root@k8s01 ~]# chmod 700 deploy.sh 
[root@k8s01 ~]# ./deploy.sh

初始化master节点

kubeadm init --apiserver-advertise-address=172.31.14.12 --pod-network-cidr=10.244.0.0/16

[root@k8s01 ~]# kubeadm init --apiserver-advertise-address=172.31.14.12 --pod-network-cidr=10.244.0.0/16

配置本地命令环境

[root@k8s01 ~]# mkdir -p $HOME/.kube
[root@k8s01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

节点加入集群

kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \
--discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859

节点2加入

[root@k8s02 ~]# kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \
> --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
W0420 10:23:48.432125 9198 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s02 ~]#

节点3加入

[root@k8s03 ~]# kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \
> --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
W0420 10:24:14.829097 9202 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s03 ~]#

安装flannel网络

[root@k8s01 ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
[root@k8s01 ~]#

获取节点信息

[root@k8s01 ~]# kubectl get nodes
NAME    STATUS   ROLES    AGE   VERSION
k8s01   Ready    master   12m   v1.18.2
k8s02   Ready    <none>   10m   v1.18.2
k8s03   Ready    <none>   10m   v1.18.2
[root@k8s01 ~]#

查看集群组件状态

[root@k8s01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
[root@k8s01 ~]# 

查看本地镜像列表

[root@k8s01 ~]# docker image ls
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-proxy                v1.18.2             0d40868643c6        3 days ago          117MB
k8s.gcr.io/kube-scheduler            v1.18.2             a3099161e137        3 days ago          95.3MB
k8s.gcr.io/kube-apiserver            v1.18.2             6ed75ad404bd        3 days ago          173MB
k8s.gcr.io/kube-controller-manager   v1.18.2             ace0a8c17ba9        3 days ago          162MB
quay.io/coreos/flannel               v0.12.0-amd64       4e9f801d2217        5 weeks ago         52.8MB
k8s.gcr.io/pause                     3.2                 80d28bedfe5d        2 months ago        683kB
k8s.gcr.io/coredns                   1.6.7               67da37a9a360        2 months ago        43.8MB
k8s.gcr.io/etcd                      3.4.3-0             303ce5db0e90        5 months ago        288MB
[root@k8s01 ~]#

 

4 月 102020
 

查看当前swarm节点状态列表

[root@ip-172-31-13-72 ~]# docker node ls
ID                            HOSTNAME                                      STATUS              AVAILABILITY        MANAGER STATUS      ENGINE VERSION
a3x4i21mefgk2kfrdwup7pi8c     ip-172-31-8-116.ap-east-1.compute.internal    Ready               Active                                  19.03.8
3ye29tl5y8t3alqn82afk80uo     ip-172-31-12-208.ap-east-1.compute.internal   Ready               Active                                  19.03.8
nxf5orroo1g5pfhzoi4lhuwny *   ip-172-31-13-72.ap-east-1.compute.internal    Ready               Active              Leader              19.03.8
[root@ip-172-31-13-72 ~]#

准备zookeeper服务配置文件

[root@ip-172-31-13-72 ~]# mkdir zookeeper
[root@ip-172-31-13-72 ~]# cd zookeeper/
[root@ip-172-31-13-72 zookeeper]# vi stack.yaml
version: '3.1'

services:
  zoo1:
    image: zookeeper
    restart: always
    hostname: zoo1
    ports:
      - 2181:2181
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181

  zoo2:
    image: zookeeper
    restart: always
    hostname: zoo2
    ports:
      - 2182:2181
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zoo3:2888:3888;2181

  zoo3:
    image: zookeeper
    restart: always
    hostname: zoo3
    ports:
      - 2183:2181
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181

在具有多个节点的swarm集群中,应当使用docker stack部署服务

[root@ip-172-31-13-72 zookeeper]# docker stack deploy --compose-file=stack.yaml zookeeper
Ignoring unsupported options: restart

Creating network zookeeper_default
Creating service zookeeper_zoo1
Creating service zookeeper_zoo2
Creating service zookeeper_zoo3
[root@ip-172-31-13-72 zookeeper]#

查看stack列表

[root@ip-172-31-13-72 ~]# docker stack ls
NAME                SERVICES            ORCHESTRATOR
zookeeper           3                   Swarm
[root@ip-172-31-13-72 ~]#

查看服务列表

[root@ip-172-31-13-72 ~]# docker service ls
ID                  NAME                MODE                REPLICAS            IMAGE               PORTS
ypz75uph0fgt        zookeeper_zoo1      replicated          1/1                 zookeeper:latest    *:2181->2181/tcp
r9n4vrvxmooa        zookeeper_zoo2      replicated          1/1                 zookeeper:latest    *:2182->2181/tcp
n268gwoxvm1g        zookeeper_zoo3      replicated          1/1                 zookeeper:latest    *:2183->2181/tcp
[root@ip-172-31-13-72 ~]#

查看节点上的容器

[root@ip-172-31-13-72 ~]# docker container ps
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                                    NAMES
81f90549dcac        zookeeper:latest    "/docker-entrypoint.…"   4 minutes ago       Up 4 minutes        2181/tcp, 2888/tcp, 3888/tcp, 8080/tcp   zookeeper_zoo1.1.2i04z7wsnpgzvl2tfiv3017ae
[root@ip-172-31-13-72 ~]# 

[root@ip-172-31-8-116 ~]# docker container ps
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                                    NAMES
0ea08ec1fcb6        zookeeper:latest    "/docker-entrypoint.…"   3 minutes ago       Up 3 minutes        2181/tcp, 2888/tcp, 3888/tcp, 8080/tcp   zookeeper_zoo2.1.kt45qxf31jc0zl98uj0hggx9c
[root@ip-172-31-8-116 ~]# 

[root@ip-172-31-12-208 ~]# docker container ps
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                                    NAMES
6820aabc2f98        zookeeper:latest    "/docker-entrypoint.…"   3 minutes ago       Up 3 minutes        2181/tcp, 2888/tcp, 3888/tcp, 8080/tcp   zookeeper_zoo3.1.2k3u6acz69sm9jzlg76o89tio
[root@ip-172-31-12-208 ~]#

查看本地镜像列表

[root@ip-172-31-13-72 ~]# docker image ls
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
zookeeper           <none>              b6f6e4bc7eef        11 days ago         252MB
[root@ip-172-31-13-72 ~]#

删除stack服务

[root@ip-172-31-13-72 ~]# docker stack rm zookeeper
Removing service zookeeper_zoo1
Removing service zookeeper_zoo2
Removing service zookeeper_zoo3
Removing network zookeeper_default
[root@ip-172-31-13-72 ~]#