4 月 272020
 

相较于Deployment资源,DaemonSet在每个节点仅运行一个副本,以提供守护服务。

查看DaemonSet类型的系统组件(kube-proxy和kube-flannel-ds-amd64)

获取kube-system命名空间的daemonset列表

[root@k8s01 ~]# kubectl get daemonsets.apps --namespace=kube-system 
NAME                      DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
kube-flannel-ds-amd64     5         5         5       5            5           <none>                   6d16h
kube-flannel-ds-arm       0         0         0       0            0           <none>                   6d16h
kube-flannel-ds-arm64     0         0         0       0            0           <none>                   6d16h
kube-flannel-ds-ppc64le   0         0         0       0            0           <none>                   6d16h
kube-flannel-ds-s390x     0         0         0       0            0           <none>                   6d16h
kube-proxy                5         5         5       5            5           kubernetes.io/os=linux   6d16h
[root@k8s01 ~]#

获取kube-system命名空间pod列表详情(每个节点都运行一个daemonset类型容器副本)

[root@k8s01 ~]# kubectl get pods --namespace=kube-system -o wide
NAME                            READY   STATUS    RESTARTS   AGE     IP             NODE    NOMINATED NODE   READINESS GATES
coredns-66bff467f8-5x8nf        1/1     Running   0          6d16h   10.244.1.2     k8s02   <none>           <none>
coredns-66bff467f8-mgcd2        1/1     Running   0          6d16h   10.244.0.2     k8s01   <none>           <none>
etcd-k8s01                      1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-apiserver-k8s01            1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-controller-manager-k8s01   1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-flannel-ds-amd64-4ngbr     1/1     Running   0          6d16h   172.31.6.113   k8s03   <none>           <none>
kube-flannel-ds-amd64-j9qmh     1/1     Running   0          4d      172.31.1.139   k8s04   <none>           <none>
kube-flannel-ds-amd64-kmw29     1/1     Running   0          6d16h   172.31.3.249   k8s02   <none>           <none>
kube-flannel-ds-amd64-l57kp     1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-flannel-ds-amd64-rr8sv     1/1     Running   1          4d      172.31.15.1    k8s05   <none>           <none>
kube-proxy-22fd2                1/1     Running   0          6d16h   172.31.3.249   k8s02   <none>           <none>
kube-proxy-97hft                1/1     Running   0          4d      172.31.1.139   k8s04   <none>           <none>
kube-proxy-jwwp2                1/1     Running   0          6d16h   172.31.6.113   k8s03   <none>           <none>
kube-proxy-mw6xf                1/1     Running   0          4d      172.31.15.1    k8s05   <none>           <none>
kube-proxy-wnf4q                1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
kube-scheduler-k8s01            1/1     Running   0          6d16h   172.31.14.12   k8s01   <none>           <none>
[root@k8s01 ~]#

查看flannel网络组件配置文件中的daemonset配置

[root@k8s01 ~]# vi kube-flannel.yml
    134 apiVersion: apps/v1
    135 kind: DaemonSet
    136 metadata:
    137   name: kube-flannel-ds-amd64
    138   namespace: kube-system
    139   labels:
    140     tier: node
    141     app: flannel
    142 spec:
    143   selector:
    144     matchLabels:
    145       app: flannel
    146   template:
    147     metadata:
    148       labels:
    149         tier: node
    150         app: flannel
    151     spec:
    152       affinity:
    153         nodeAffinity:
    154           requiredDuringSchedulingIgnoredDuringExecution:
    155             nodeSelectorTerms:
    156               - matchExpressions:
    157                   - key: kubernetes.io/os
    158                     operator: In
    159                     values:
    160                       - linux
    161                   - key: kubernetes.io/arch
    162                     operator: In
    163                     values:
    164                       - amd64
    165       hostNetwork: true
    166       tolerations:
    167       - operator: Exists
    168         effect: NoSchedule
    169       serviceAccountName: flannel
    170       initContainers:
    171       - name: install-cni
    172         image: quay.io/coreos/flannel:v0.12.0-amd64
    173         command:
    174         - cp
    175         args:
    176         - -f
    177         - /etc/kube-flannel/cni-conf.json
    178         - /etc/cni/net.d/10-flannel.conflist
    179         volumeMounts:
    180         - name: cni
    181           mountPath: /etc/cni/net.d
    182         - name: flannel-cfg
    183           mountPath: /etc/kube-flannel/
    184       containers:
    185       - name: kube-flannel
    186         image: quay.io/coreos/flannel:v0.12.0-amd64
    187         command:
    188         - /opt/bin/flanneld
    189         args:
    190         - --ip-masq
    191         - --kube-subnet-mgr
    192         resources:
    193           requests:
    194             cpu: "100m"
    195             memory: "50Mi"
    196           limits:
    197             cpu: "100m"
    198             memory: "50Mi"
    199         securityContext:
    200           privileged: false
    201           capabilities:
    202             add: ["NET_ADMIN"]
    203         env:
    204         - name: POD_NAME
    205           valueFrom:
    206             fieldRef:
    207               fieldPath: metadata.name
    208         - name: POD_NAMESPACE
    209           valueFrom:
    210             fieldRef:
    211               fieldPath: metadata.namespace
    212         volumeMounts:
    213         - name: run
    214           mountPath: /run/flannel
    215         - name: flannel-cfg
    216           mountPath: /etc/kube-flannel/
    217       volumes:
    218         - name: run
    219           hostPath:
    220             path: /run/flannel
    221         - name: cni
    222           hostPath:
    223             path: /etc/cni/net.d
    224         - name: flannel-cfg
    225           configMap:
    226             name: kube-flannel-cfg

运行一个daemonset类型的资源(Fluentd日志收集系统)

[root@k8s01 ~]# vi daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: fluentd-elasticsearch
  namespace: kube-system
  labels:
    k8s-app: fluentd-logging
spec:
  selector:
    matchLabels:
      name: fluentd-elasticsearch
  template:
    metadata:
      labels:
        name: fluentd-elasticsearch
    spec:
      tolerations:
      # this toleration is to have the daemonset runnable on master nodes
      # remove it if your masters can't run pods
      - key: node-role.kubernetes.io/master
        effect: NoSchedule
      containers:
      - name: fluentd-elasticsearch
        image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 200Mi
        volumeMounts:
        - name: varlog
          mountPath: /var/log
        - name: varlibdockercontainers
          mountPath: /var/lib/docker/containers
          readOnly: true
      terminationGracePeriodSeconds: 30
      volumes:
      - name: varlog
        hostPath:
          path: /var/log
      - name: varlibdockercontainers
        hostPath:
          path: /var/lib/docker/containers

应用配置文件

[root@k8s01 ~]# kubectl apply -f daemonset.yaml 
daemonset.apps/fluentd-elasticsearch created
[root@k8s01 ~]# kubectl get daemonsets.apps 
No resources found in default namespace.
[root@k8s01 ~]# kubectl get daemonsets.apps --namespace=kube-system 
NAME                      DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
fluentd-elasticsearch     5         5         5       5            5           <none>                   28s
kube-flannel-ds-amd64     5         5         5       5            5           <none>                   6d18h
kube-flannel-ds-arm       0         0         0       0            0           <none>                   6d18h
kube-flannel-ds-arm64     0         0         0       0            0           <none>                   6d18h
kube-flannel-ds-ppc64le   0         0         0       0            0           <none>                   6d18h
kube-flannel-ds-s390x     0         0         0       0            0           <none>                   6d18h
kube-proxy                5         5         5       5            5           kubernetes.io/os=linux   6d18h
[root@k8s01 ~]#

获取kube-system命名空间的daemonset列表

[root@k8s01 ~]# kubectl get pods --namespace=kube-system -o wide
NAME                            READY   STATUS    RESTARTS   AGE     IP             NODE    NOMINATED NODE   READINESS GATES
coredns-66bff467f8-5x8nf        1/1     Running   0          6d18h   10.244.1.2     k8s02   <none>           <none>
coredns-66bff467f8-mgcd2        1/1     Running   0          6d18h   10.244.0.2     k8s01   <none>           <none>
etcd-k8s01                      1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
fluentd-elasticsearch-64c2h     1/1     Running   0          84s     10.244.5.9     k8s05   <none>           <none>
fluentd-elasticsearch-f8989     1/1     Running   0          84s     10.244.0.3     k8s01   <none>           <none>
fluentd-elasticsearch-lcgn7     1/1     Running   0          84s     10.244.3.4     k8s04   <none>           <none>
fluentd-elasticsearch-ss2zm     1/1     Running   0          84s     10.244.1.20    k8s02   <none>           <none>
fluentd-elasticsearch-wkd45     1/1     Running   0          84s     10.244.2.39    k8s03   <none>           <none>
kube-apiserver-k8s01            1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
kube-controller-manager-k8s01   1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
kube-flannel-ds-amd64-4ngbr     1/1     Running   0          6d18h   172.31.6.113   k8s03   <none>           <none>
kube-flannel-ds-amd64-j9qmh     1/1     Running   0          4d2h    172.31.1.139   k8s04   <none>           <none>
kube-flannel-ds-amd64-kmw29     1/1     Running   0          6d18h   172.31.3.249   k8s02   <none>           <none>
kube-flannel-ds-amd64-l57kp     1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
kube-flannel-ds-amd64-rr8sv     1/1     Running   1          4d2h    172.31.15.1    k8s05   <none>           <none>
kube-proxy-22fd2                1/1     Running   0          6d18h   172.31.3.249   k8s02   <none>           <none>
kube-proxy-97hft                1/1     Running   0          4d2h    172.31.1.139   k8s04   <none>           <none>
kube-proxy-jwwp2                1/1     Running   0          6d18h   172.31.6.113   k8s03   <none>           <none>
kube-proxy-mw6xf                1/1     Running   0          4d2h    172.31.15.1    k8s05   <none>           <none>
kube-proxy-wnf4q                1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
kube-scheduler-k8s01            1/1     Running   0          6d18h   172.31.14.12   k8s01   <none>           <none>
[root@k8s01 ~]#
4 月 272020
 

获取当前集群pod列表及所属节点

[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-bbfdbf4b7-8khd4   1/1     Running   0          3d23h   10.244.2.35   k8s03   <none>           <none>
nginx-deployment-bbfdbf4b7-9g825   1/1     Running   0          3d23h   10.244.1.17   k8s02   <none>           <none>
nginx-deployment-bbfdbf4b7-hsvfg   1/1     Running   0          3d23h   10.244.2.36   k8s03   <none>           <none>
nginx-deployment-bbfdbf4b7-jpt96   1/1     Running   0          3d23h   10.244.2.34   k8s03   <none>           <none>
nginx-deployment-bbfdbf4b7-vlnlk   1/1     Running   0          3d23h   10.244.1.18   k8s02   <none>           <none>
[root@k8s01 ~]# kubectl get deployments
NAME               READY   UP-TO-DATE   AVAILABLE   AGE
nginx-deployment   5/5     5            5           5d15h
[root@k8s01 ~]#

删除nginx-deployment资源

[root@k8s01 ~]# kubectl delete deployments.apps nginx-deployment 
deployment.apps "nginx-deployment" deleted
[root@k8s01 ~]# kubectl get pods
No resources found in default namespace.
[root@k8s01 ~]#

获取节点列表

[root@k8s01 ~]# kubectl get nodes
NAME    STATUS   ROLES    AGE     VERSION
k8s01   Ready    master   6d15h   v1.18.2
k8s02   Ready    <none>   6d15h   v1.18.2
k8s03   Ready    <none>   6d15h   v1.18.2
k8s04   Ready    <none>   3d23h   v1.18.2
k8s05   Ready    <none>   3d23h   v1.18.2
[root@k8s01 ~]#

应用nginx-deployment配置文件

[root@k8s01 ~]# cat nginx-deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.17.10
        ports:
        - containerPort: 80
[root@k8s01 ~]# kubectl apply -f nginx-deployment.yaml 
deployment.apps/nginx-deployment created
[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-dvr4p   1/1     Running   0          11s   10.244.2.37   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-fnq9c   1/1     Running   0          11s   10.244.3.2    k8s04   <none>           <none>
[root@k8s01 ~]#

获取节点的默认标签配置信息

[root@k8s01 ~]# kubectl get nodes --show-labels 
NAME    STATUS   ROLES    AGE     VERSION   LABELS
k8s01   Ready    master   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s01,kubernetes.io/os=linux,node-role.kubernetes.io/master=
k8s02   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s02,kubernetes.io/os=linux
k8s03   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s03,kubernetes.io/os=linux
k8s04   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s04,kubernetes.io/os=linux
k8s05   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s05,kubernetes.io/os=linux
[root@k8s01 ~]#

对指定节点添加标签键值对

[root@k8s01 ~]# kubectl label nodes k8s05 disktype=ssd
node/k8s05 labeled
[root@k8s01 ~]# kubectl get nodes --show-labels 
NAME    STATUS   ROLES    AGE     VERSION   LABELS
k8s01   Ready    master   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s01,kubernetes.io/os=linux,node-role.kubernetes.io/master=
k8s02   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s02,kubernetes.io/os=linux
k8s03   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s03,kubernetes.io/os=linux
k8s04   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s04,kubernetes.io/os=linux
k8s05   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s05,kubernetes.io/os=linux
[root@k8s01 ~]#

修改deployment配置文件添加关联标签

[root@k8s01 ~]# vi nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 6
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.17.10
        ports:
        - containerPort: 80
      nodeSelector:
        disktype: ssd

应用配置文件执行销毁原有pod并调度新pod资源到节点k8s05上

[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS              RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-5lzsz   1/1     Running             0          12s     10.244.3.3    k8s04   <none>           <none>
nginx-deployment-cc5db57d4-dvr4p   1/1     Running             0          9m53s   10.244.2.37   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-fnq9c   1/1     Running             0          9m53s   10.244.3.2    k8s04   <none>           <none>
nginx-deployment-cc5db57d4-hwmk4   1/1     Running             0          12s     10.244.1.19   k8s02   <none>           <none>
nginx-deployment-cc5db57d4-qt26r   1/1     Running             0          12s     10.244.2.38   k8s03   <none>           <none>
nginx-deployment-ddc6847d-4qx2m    0/1     ContainerCreating   0          12s     <none>        k8s05   <none>           <none>
nginx-deployment-ddc6847d-cvhv4    0/1     ContainerCreating   0          12s     <none>        k8s05   <none>           <none>
nginx-deployment-ddc6847d-dcztn    0/1     ContainerCreating   0          12s     <none>        k8s05   <none>           <none>
[root@k8s01 ~]# kubectl get pods -o wide
NAME                               READY   STATUS        RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-cc5db57d4-dvr4p   0/1     Terminating   0          10m   10.244.2.37   k8s03   <none>           <none>
nginx-deployment-cc5db57d4-fnq9c   0/1     Terminating   0          10m   10.244.3.2    k8s04   <none>           <none>
nginx-deployment-ddc6847d-26hl9    1/1     Running       0          13s   10.244.5.7    k8s05   <none>           <none>
nginx-deployment-ddc6847d-4qx2m    1/1     Running       0          26s   10.244.5.3    k8s05   <none>           <none>
nginx-deployment-ddc6847d-cvhv4    1/1     Running       0          26s   10.244.5.4    k8s05   <none>           <none>
nginx-deployment-ddc6847d-d6f99    1/1     Running       0          14s   10.244.5.6    k8s05   <none>           <none>
nginx-deployment-ddc6847d-dcztn    1/1     Running       0          26s   10.244.5.5    k8s05   <none>           <none>
nginx-deployment-ddc6847d-dj5x4    1/1     Running       0          12s   10.244.5.8    k8s05   <none>           <none>
[root@k8s01 ~]# kubectl get pods -o wide
NAME                              READY   STATUS    RESTARTS   AGE   IP           NODE    NOMINATED NODE   READINESS GATES
nginx-deployment-ddc6847d-26hl9   1/1     Running   0          21s   10.244.5.7   k8s05   <none>           <none>
nginx-deployment-ddc6847d-4qx2m   1/1     Running   0          34s   10.244.5.3   k8s05   <none>           <none>
nginx-deployment-ddc6847d-cvhv4   1/1     Running   0          34s   10.244.5.4   k8s05   <none>           <none>
nginx-deployment-ddc6847d-d6f99   1/1     Running   0          22s   10.244.5.6   k8s05   <none>           <none>
nginx-deployment-ddc6847d-dcztn   1/1     Running   0          34s   10.244.5.5   k8s05   <none>           <none>
nginx-deployment-ddc6847d-dj5x4   1/1     Running   0          20s   10.244.5.8   k8s05   <none>           <none>
[root@k8s01 ~]#

删除lable标签配置

[root@k8s01 ~]# kubectl label nodes k8s05 disktype-
node/k8s05 labeled
[root@k8s01 ~]# kubectl get nodes --show-labels 
NAME    STATUS   ROLES    AGE     VERSION   LABELS
k8s01   Ready    master   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s01,kubernetes.io/os=linux,node-role.kubernetes.io/master=
k8s02   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s02,kubernetes.io/os=linux
k8s03   Ready    <none>   6d15h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s03,kubernetes.io/os=linux
k8s04   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s04,kubernetes.io/os=linux
k8s05   Ready    <none>   3d23h   v1.18.2   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s05,kubernetes.io/os=linux
[root@k8s01 ~]#