4 月 212020
 

准备Redis Master服务器Deployment配置文件

[root@k8s01 ~]# vi redis-master-deployment.yaml
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
  name: redis-master
  labels:
    app: redis
spec:
  selector:
    matchLabels:
      app: redis
      role: master
      tier: backend
  replicas: 1
  template:
    metadata:
      labels:
        app: redis
        role: master
        tier: backend
    spec:
      containers:
      - name: master
        image: k8s.gcr.io/redis:e2e  # or just image: redis
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
        ports:
        - containerPort: 6379

准备Redis Master服务器Service配置文件

[root@k8s01 ~]# vi redis-master-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: redis-master
  labels:
    app: redis
    role: master
    tier: backend
spec:
  ports:
  - port: 6379
    targetPort: 6379
  selector:
    app: redis
    role: master
    tier: backend

准备Redis Slave服务器Deployment配置文件

[root@k8s01 ~]# vi redis-slave-deployment.yaml
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
  name: redis-slave
  labels:
    app: redis
spec:
  selector:
    matchLabels:
      app: redis
      role: slave
      tier: backend
  replicas: 2
  template:
    metadata:
      labels:
        app: redis
        role: slave
        tier: backend
    spec:
      containers:
      - name: slave
        image: gcr.io/google_samples/gb-redisslave:v3
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
        env:
        - name: GET_HOSTS_FROM
          value: dns
          # Using `GET_HOSTS_FROM=dns` requires your cluster to
          # provide a dns service. As of Kubernetes 1.3, DNS is a built-in
          # service launched automatically. However, if the cluster you are using
          # does not have a built-in DNS service, you can instead
          # access an environment variable to find the master
          # service's host. To do so, comment out the 'value: dns' line above, and
          # uncomment the line below:
          # value: env
        ports:
        - containerPort: 6379

准备Redis Slave服务器Service配置文件

[root@k8s01 ~]# vi redis-slave-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: redis-slave
  labels:
    app: redis
    role: slave
    tier: backend
spec:
  ports:
  - port: 6379
  selector:
    app: redis
    role: slave
    tier: backend

准备Guestbook前端Deployment配置文件

[root@k8s01 ~]# vi frontend-deployment.yaml
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
  name: frontend
  labels:
    app: guestbook
spec:
  selector:
    matchLabels:
      app: guestbook
      tier: frontend
  replicas: 3
  template:
    metadata:
      labels:
        app: guestbook
        tier: frontend
    spec:
      containers:
      - name: php-redis
        image: gcr.io/google-samples/gb-frontend:v4
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
        env:
        - name: GET_HOSTS_FROM
          value: dns
          # Using `GET_HOSTS_FROM=dns` requires your cluster to
          # provide a dns service. As of Kubernetes 1.3, DNS is a built-in
          # service launched automatically. However, if the cluster you are using
          # does not have a built-in DNS service, you can instead
          # access an environment variable to find the master
          # service's host. To do so, comment out the 'value: dns' line above, and
          # uncomment the line below:
          # value: env
        ports:
        - containerPort: 80

准备Guestbook前端Service配置文件

[root@k8s01 ~]# vi frontend-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: frontend
  labels:
    app: guestbook
    tier: frontend
spec:
  # comment or delete the following line if you want to use a LoadBalancer
  type: NodePort 
  # if your cluster supports it, uncomment the following to automatically create
  # an external load-balanced IP for the frontend service.
  # type: LoadBalancer
  ports:
  - port: 80
  selector:
    app: guestbook
    tier: frontend

应用Redis master服务器Deployment配置文件

[root@k8s01 ~]# kubectl apply -f redis-master-deployment.yaml 
deployment.apps/redis-master created
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get deployments
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
redis-master   1/1     1            1           20s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
redis-master-6b54579d85-kkbjt   1/1     Running   0          27s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get services
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   20h
[root@k8s01 ~]#

查看Pod日志输出

[root@k8s01 ~]# kubectl logs -f redis-master-6b54579d85-kkbjt
                _._                                                  
           _.-``__ ''-._                                             
      _.-``    `.  `_.  ''-._           Redis 2.8.19 (00000000/0) 64 bit
  .-`` .-```.  ```\/    _.,_ ''-._                                   
 (    '      ,       .-`  | `,    )     Running in stand alone mode
 |`-._`-...-` __...-.``-._|'` _.-'|     Port: 6379
 |    `-._   `._    /     _.-'    |     PID: 1
  `-._    `-._  `-./  _.-'    _.-'                                   
 |`-._`-._    `-.__.-'    _.-'_.-'|                                  
 |    `-._`-._        _.-'_.-'    |           http://redis.io        
  `-._    `-._`-.__.-'_.-'    _.-'                                   
 |`-._`-._    `-.__.-'    _.-'_.-'|                                  
 |    `-._`-._        _.-'_.-'    |                                  
  `-._    `-._`-.__.-'_.-'    _.-'                                   
      `-._    `-.__.-'    _.-'                                       
          `-._        _.-'                                           
              `-.__.-'                                               

[1] 21 Apr 06:35:24.921 # Server started, Redis version 2.8.19
[1] 21 Apr 06:35:24.922 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.
[1] 21 Apr 06:35:24.922 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.
[1] 21 Apr 06:35:24.922 * The server is now ready to accept connections on port 6379

应用Redis master服务器Service配置文件

[root@k8s01 ~]# kubectl apply -f redis-master-service.yaml 
service/redis-master created
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
redis-master-6b54579d85-kkbjt   1/1     Running   0          5m59s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get deployments
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
redis-master   1/1     1            1           6m11s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get services
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP    20h
redis-master   ClusterIP   10.103.128.16   <none>        6379/TCP   31s
[root@k8s01 ~]#

应用Redis slave服务器Deployment配置文件

[root@k8s01 ~]# kubectl apply -f redis-slave-deployment.yaml 
deployment.apps/redis-slave created
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
redis-master-6b54579d85-kkbjt   1/1     Running   0          8m4s
redis-slave-799788557c-8vxqf    1/1     Running   0          12s
redis-slave-799788557c-rq74t    1/1     Running   0          12s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get deployments
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
redis-master   1/1     1            1           8m12s
redis-slave    2/2     2            2           20s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get services
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP    20h
redis-master   ClusterIP   10.103.128.16   <none>        6379/TCP   2m40s
[root@k8s01 ~]#

应用Redis slave服务器service配置文件

[root@k8s01 ~]# kubectl apply -f redis-slave-service.yaml 
service/redis-slave created
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
redis-master-6b54579d85-kkbjt   1/1     Running   0          9m6s
redis-slave-799788557c-8vxqf    1/1     Running   0          74s
redis-slave-799788557c-rq74t    1/1     Running   0          74s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get deployments
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
redis-master   1/1     1            1           9m15s
redis-slave    2/2     2            2           83s
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get services
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP    20h
redis-master   ClusterIP   10.103.128.16   <none>        6379/TCP   3m37s
redis-slave    ClusterIP   10.96.236.63    <none>        6379/TCP   25s
[root@k8s01 ~]#

应用Guestbook前端Deployment配置文件

[root@k8s01 ~]# kubectl apply -f frontend-deployment.yaml 
deployment.apps/frontend created
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get pods -l app=guestbook -l tier=frontend
NAME                        READY   STATUS    RESTARTS   AGE
frontend-56fc5b6b47-5sgpf   1/1     Running   0          17s
frontend-56fc5b6b47-hb87m   1/1     Running   0          17s
frontend-56fc5b6b47-rs6jl   1/1     Running   0          17s
[root@k8s01 ~]#

应用Guestbook前端Service配置文件

[root@k8s01 ~]# kubectl apply -f frontend-service.yaml 
service/frontend created
[root@k8s01 ~]# 
[root@k8s01 ~]# kubectl get services
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
frontend       NodePort    10.96.130.115   <none>        80:31802/TCP   7s
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP        20h
redis-master   ClusterIP   10.103.128.16   <none>        6379/TCP       9m
redis-slave    ClusterIP   10.96.236.63    <none>        6379/TCP       5m48s
[root@k8s01 ~]#

查看

[root@k8s01 ~]# kubectl get service frontend
NAME       TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
frontend   NodePort   10.96.130.115   <none>        80:31802/TCP   90s
[root@k8s01 ~]#

浏览器访问

服务伸缩

扩容

[root@k8s01 ~]# kubectl scale deployment frontend --replicas=5
deployment.apps/frontend scaled
[root@k8s01 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
frontend-56fc5b6b47-5sgpf       1/1     Running   0          8m38s
frontend-56fc5b6b47-f4pt6       1/1     Running   0          11s
frontend-56fc5b6b47-hb87m       1/1     Running   0          8m38s
frontend-56fc5b6b47-hj59m       1/1     Running   0          11s
frontend-56fc5b6b47-rs6jl       1/1     Running   0          8m38s
redis-master-6b54579d85-kkbjt   1/1     Running   0          22m
redis-slave-799788557c-8vxqf    1/1     Running   0          14m
redis-slave-799788557c-rq74t    1/1     Running   0          14m
[root@k8s01 ~]#

缩容

[root@k8s01 ~]# kubectl scale deployment frontend --replicas=2
deployment.apps/frontend scaled
[root@k8s01 ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
frontend-56fc5b6b47-hb87m       1/1     Running   0          9m28s
frontend-56fc5b6b47-hj59m       1/1     Running   0          61s
redis-master-6b54579d85-kkbjt   1/1     Running   0          22m
redis-slave-799788557c-8vxqf    1/1     Running   0          15m
redis-slave-799788557c-rq74t    1/1     Running   0          15m
[root@k8s01 ~]#

清除Deployment配置Service配置和运行中的Pod容器

[root@k8s01 ~]# kubectl delete deployment -l app=redis
deployment.apps "redis-master" deleted
deployment.apps "redis-slave" deleted
[root@k8s01 ~]# kubectl delete service -l app=redis
service "redis-master" deleted
service "redis-slave" deleted
[root@k8s01 ~]# kubectl delete deployment -l app=guestbook
deployment.apps "frontend" deleted
[root@k8s01 ~]# kubectl delete service -l app=guestbook
service "frontend" deleted
[root@k8s01 ~]#
4 月 212020
 

基础环境安装脚本(基于Amazon AWS EC2 CentOS 7环境)

#!/bin/bash
#

#
setenforce 0;
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config;
#
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p;
#
yum makecache;
yum install -y yum-utils;
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
yum install -y docker-ce docker-ce-cli containerd.io;
#
cat <<EOF >> /etc/hosts
172.31.3.209 k8s01
172.31.8.132 k8s02
172.31.10.229 k8s03
EOF
#
mkdir /etc/docker;
cat <<EOF > /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF
#
systemctl daemon-reload;
systemctl enable docker;
systemctl restart docker;
#
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
#
yum install -y kubectl kubelet kubeadm;
systemctl enable kubelet;

执行脚本

[root@k8s01 ~]# vi deploy.sh
[root@k8s01 ~]# chmod 700 deploy.sh 
[root@k8s01 ~]# ./deploy.sh

初始化master节点

kubeadm init --apiserver-advertise-address=172.31.14.12 --pod-network-cidr=10.244.0.0/16

[root@k8s01 ~]# kubeadm init --apiserver-advertise-address=172.31.14.12 --pod-network-cidr=10.244.0.0/16

配置本地命令环境

[root@k8s01 ~]# mkdir -p $HOME/.kube
[root@k8s01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

节点加入集群

kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \
--discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859

节点2加入

[root@k8s02 ~]# kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \
> --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
W0420 10:23:48.432125 9198 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s02 ~]#

节点3加入

[root@k8s03 ~]# kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \
> --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
W0420 10:24:14.829097 9202 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s03 ~]#

安装flannel网络

[root@k8s01 ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
[root@k8s01 ~]#

获取节点信息

[root@k8s01 ~]# kubectl get nodes
NAME    STATUS   ROLES    AGE   VERSION
k8s01   Ready    master   12m   v1.18.2
k8s02   Ready    <none>   10m   v1.18.2
k8s03   Ready    <none>   10m   v1.18.2
[root@k8s01 ~]#

查看集群组件状态

[root@k8s01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
[root@k8s01 ~]# 

查看本地镜像列表

[root@k8s01 ~]# docker image ls
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-proxy                v1.18.2             0d40868643c6        3 days ago          117MB
k8s.gcr.io/kube-scheduler            v1.18.2             a3099161e137        3 days ago          95.3MB
k8s.gcr.io/kube-apiserver            v1.18.2             6ed75ad404bd        3 days ago          173MB
k8s.gcr.io/kube-controller-manager   v1.18.2             ace0a8c17ba9        3 days ago          162MB
quay.io/coreos/flannel               v0.12.0-amd64       4e9f801d2217        5 weeks ago         52.8MB
k8s.gcr.io/pause                     3.2                 80d28bedfe5d        2 months ago        683kB
k8s.gcr.io/coredns                   1.6.7               67da37a9a360        2 months ago        43.8MB
k8s.gcr.io/etcd                      3.4.3-0             303ce5db0e90        5 months ago        288MB
[root@k8s01 ~]#