K8S入门笔记

安装K8S

1、环境初始化

#关闭swap
vim /etc/fstab
# /swap.img
#开启ip转发
vim /etc/sysctl.conf
net.ipv4.ip_forward=1

sysctl -p  #查看状态

2、安装docker(见上)

3、安装k8s

# 添加证书
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
# 添加apt源
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF  

apt-get update
# 安装指定版本
apt-get install -y kubelet=1.18.4-00 kubeadm=1.18.4-00 kubectl=1.18.4-00
systemctl enable kubelet
systemctl start kubelet
# 查看所需镜像
kubeadm config images list --kubernetes-version=v1.18.4

k8s.gcr.io/kube-apiserver:v1.18.4
k8s.gcr.io/kube-controller-manager:v1.18.4
k8s.gcr.io/kube-scheduler:v1.18.4
k8s.gcr.io/kube-proxy:v1.18.4
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.7
# 从阿里云下载镜像
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.4
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.4
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.4
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.4
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7

# 为镜像重新打tag
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.4 k8s.gcr.io/kube-apiserver:v1.18.4
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.4 k8s.gcr.io/kube-controller-manager:v1.18.4
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.4 k8s.gcr.io/kube-scheduler:v1.18.4
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.4 k8s.gcr.io/kube-proxy:v1.18.4
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0 k8s.gcr.io/etcd:3.4.3-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7 k8s.gcr.io/coredns:1.6.7
#初始化master节点(仅需主节点操作)
kubeadm init \
 --kubernetes-version=v1.18.4 \
 --pod-network-cidr=10.244.0.0/16 \
 --service-cidr=10.96.0.0/12 \
 --apiserver-advertise-address=192.168.6.51 #master的ip
 
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 加入主节点(其余工作节点)
kubeadm join 192.168.5.3:6443 --token h0uelc.l46qp29nxscke7f7 \
        --discovery-token-ca-cert-hash sha256:abc807778e24bff73362ceeb783cc7f6feec96f20b4fd707c3f8e8312294e28f 
# 安装网络插件,但需替换quay.io为quay-mirror.qiniu.com
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

# 下面是替换完成的文件
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ('NET_ADMIN', 'NET_RAW')
  defaultAddCapabilities: ()
  requiredDropCapabilities: ()
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ('extensions')
  resources: ('podsecuritypolicies')
  verbs: ('use')
  resourceNames: ('psp.flannel.unprivileged')
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": (
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      )
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay-mirror.qiniu.com/coreos/flannel:v0.14.0
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay-mirror.qiniu.com/coreos/flannel:v0.14.0
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ("NET_ADMIN", "NET_RAW")
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
# 查看node状态
kubectl get nodes

# 若为Ready则无问题。若为NotReady,则还需下面操作
vim /etc/cni/net.d/10-flannel.conflist
{
  "name": "cbr0",
  "cniVersion": "0.2.0",
  "plugins": (
    {
      "type": "flannel",
      "delegate": {
        "hairpinMode": true,
        "isDefaultGateway": true
      }
    },
    {
      "type": "portmap",
      "capabilities": {
        "portMappings": true
      }
    }
  )
}

systemctl daemon-reload

mkdir -p /run/flannel
vim /run/flannel/subnet.env
FLANNEL_NETWORK=10.244.0.0/16
FLANNEL_SUBNET=10.244.0.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=true

FLANNEL_NETWORK=192.168.6.51/24
FLANNEL_SUBNET=192.168.6.0/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=true
# 测试创建nginx(master节点)
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pod,svc

使用方式

创建/更新资源

kubectl run nginx-pod --image=nginx:1.17.1 --port=80 #命令行方式
kubectl apply -f XXX.yaml  #配置文件方式

删除资源

kubectl delete -f XXX.yaml

查询资源

kubectl get(describe) 资源名称(node,pod,svc,ns,deployment)

Namespace

Namespace是kubernetes系统中的一种非常重要资源,它的主要作用是用来实现多套环境的资源隔离或者多租户的资源隔离

默认情况下,kubernetes集群中的所有的Pod都是可以相互访问的。但是在实际中,可能不想让两个Pod之间进行互相的访问,那此时就可以将两个Pod划分到不同的namespace下。kubernetes通过将集群内部的资源分配到不同的Namespace中,可以形成逻辑上的"组",以方便不同的组的资源进行隔离使用和管理。

可以通过kubernetes的授权机制,将不同的namespace交给不同租户进行管理,这样就实现了多租户的资源隔离。此时还能结合kubernetes的资源配额机制,限定不同租户能占用的资源,例如CPU使用量、内存使用量等等,来实现租户可用资源的管理。

查看

kubectl get ns # 查看所有ns
kubectl get ns (ns名称) # 查看指定ns
kubectl get ns (ns名称)  -o [格式参数wide、json、yaml] # 使用指定格式查看
kubectl describe ns (ns名称)

创建

kubectl create ns (ns名称)

删除

kubectl delete ns (ns名称)

yml配置

apiVersion: v1
kind: Namespace
metadata:
  name: dev

Pod

Pod是kubernetes集群进行管理的最小单元,程序要运行必须部署在容器中,而容器必须存在于Pod中。

Pod可以认为是容器的封装,一个Pod中可以存在一个或者多个容器。

创建并运行

# 命令格式: kubectl run (pod控制器名称) [参数]
# --image  指定Pod的镜像
# --port   指定端口
# --namespace  指定namespace
kubectl run nginx --image=nginx:latest --port=80 --namespace dev

查看

kubectl get pods -n dev
kubectl describe pod nginx -n dev
kubectl get pods -n dev -o wide

删除

kubectl delete pod nginx -n dev # 此方法删除后会创建一个新的pod
kubectl delete deploy nginx -n dev # 应删除pod的控制器

配置方式

apiVersion: v1
kind: Pod
metadata:
  name: nginx
  namespace: dev
spec:
  containers:
  - image: nginx:latest
    name: pod
    ports:
    - name: nginx-port
      containerPort: 80
      protocol: TCP

Label

Label是kubernetes系统中的一个重要概念。它的作用就是在资源上添加标识,用来对它们进行区分和选择。

Label的特点:

  • 一个Label会以key/value键值对的形式附加到各种对象上,如Node、Pod、Service等等
  • 一个资源对象可以定义任意数量的Label ,同一个Label也可以被添加到任意数量的资源对象上去
  • Label通常在资源对象定义时确定,当然也可以在对象创建后动态添加或者删除

可以通过Label实现资源的多维度分组,以便灵活、方便地进行资源分配、调度、配置、部署等管理工作。

一些常用的Label 示例如下:

  • 版本标签:"version":"release", "version":"stable"......
  • 环境标签:"environment":"dev","environment":"test","environment":"pro"
  • 架构标签:"tier":"frontend","tier":"backend"

标签定义完毕之后,还要考虑到标签的选择,这就要使用到Label Selector,即:

Label用于给某个资源对象定义标识

Label Selector用于查询和筛选拥有某些标签的资源对象

当前有两种Label Selector:

  • 基于等式的Label Selector

    name = slave: 选择所有包含Label中key="name"且value="slave"的对象

    env != production: 选择所有包括Label中的key="env"且value不等于"production"的对象

  • 基于集合的Label Selector

    name in (master, slave): 选择所有包含Label中的key="name"且value="master"或"slave"的对象

    name not in (frontend): 选择所有包含Label中的key="name"且value不等于"frontend"的对象

标签的选择条件可以使用多个,此时将多个Label Selector进行组合,使用逗号","进行分隔即可。例如:

name=slave,env!=production

name not in (frontend),env!=production

命令

# 为pod资源打标签
kubectl label pod nginx-pod version=1.0 -n dev

# 为pod资源更新标签
kubectl label pod nginx-pod version=2.0 -n dev --overwrite

# 查看标签
kubectl get pod nginx-pod -n dev --show-labels

# 筛选标签
kubectl get pod -n dev -l version=2.0  --show-labels
kubectl get pod -n dev -l version!=2.0 --show-labels

#删除标签
kubectl label pod nginx-pod version- -n dev

配置方式

apiVersion: v1
kind: Pod
metadata:
  name: nginx
  namespace: dev
  labels:
    version: "3.0" 
    env: "test"
spec:
  containers:
  - image: nginx:latest
    name: pod
    ports:
    - name: nginx-port
      containerPort: 80
      protocol: TCP

Deployment

在kubernetes中,Pod是最小的控制单元,但是kubernetes很少直接控制Pod,一般都是通过Pod控制器来完成的。Pod控制器用于pod的管理,确保pod资源符合预期的状态,当pod的资源出现故障时,会尝试进行重启或重建pod。

命令方式

# 命令格式: kubectl create deployment 名称  [参数] 
# --image  指定pod的镜像
# --port   指定端口
# --replicas  指定创建pod数量
# --namespace  指定namespace
kubectl create deploy nginx --image=nginx:latest --port=80 --replicas=3 -n dev

# 查看deployment的信息
kubectl get deploy -n dev

# UP-TO-DATE:成功升级的副本数量
# AVAILABLE:可用副本的数量
kubectl get deploy -n dev -o wide

# 查看deployment的详细信息
kubectl describe deploy nginx -n dev
  
# 删除 
kubectl delete deploy nginx -n dev

配置方式

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx
  namespace: dev
spec:
  replicas: 2
  selector:
    matchLabels:
      run: nginx
  template:
    metadata:
      labels:
        run: nginx
    spec:
      containers:
      - image: nginx:latest
        name: nginx
        ports:
        - containerPort: 80
          protocol: TCP

Service

虽然每个Pod都会分配一个单独的Pod IP,然而却存在如下两问题:

  • Pod IP 会随着Pod的重建产生变化
  • Pod IP 仅仅是集群内可见的虚拟IP,外部无法访问

这样对于访问这个服务带来了难度。因此,kubernetes设计了Service来解决这个问题。

Service可以看作是一组同类Pod对外的访问接口。借助Service,应用可以方便地实现服务发现和负载均衡。

命令方式

# 暴露Service。type类型:ClusterIP:仅集群内部访问; NodePort:集群外部可访问
kubectl expose deploy nginx --name=svc-nginx1 --type=NodePort --port=80 --target-port=80 -n dev

# 查看service
kubectl get svc svc-nginx1 -n dev -o wide

# 删除svc
kubectl delete svc svc-nginx-1 -n dev

配置方式

apiVersion: v1
kind: Service
metadata:
  name: svc-nginx
  namespace: dev
spec:
  clusterIP: 10.109.179.231 #固定svc的内网ip
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    run: nginx
  type: ClusterIP

hhhhh