使用kubeadm部署高可用kubernetes集群

部署kubernetes有太多因不可描述原因导致镜像拉取失败的问题,多处使用国内镜像源替换官方镜像源处理

ip规划

角色 ip
master01 192.168.0.101
master02 192.168.0.102
master03 192.168.0.103
worker01 192.168.0.104
worker02 192.168.0.105
worker03 192.168.0.106

基础环境部署

所有服务器执行

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# 关闭swap
swapoff -a
sed -i '/swap/s/^/#/g' /etc/fstab
# 关闭selinux
setenforce 0
sed -i '/SELINUX/s/enforcing/disabled/g' /etc/selinux/config
# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 配置yum仓库
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
rpm --import https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
echo '
[kuberntente]
name=kubernetes aliyun
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgckeck=1
enabled=1
' > /etc/yum.repos.d/kubernetes.repo
# 安装docker kubelet kubeadm
yum repolist
yum install kubeadm kubelet docker-ce -y
# 启动docker和kubelet并设置为自启动
systemctl start docker
systemctl enable docker
systemctl enable kubelet

部署第一台master

1
2
3
4
# 导出默认kubeadm配置并修改
kubeadm config print init-defaults > kubeadm-config.yaml
# 根据需要修改config文件
vim kubeadm-config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
# 绑定地址,修改为监听全部地址
advertiseAddress: 0.0.0.0
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: master-01
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
# timeoutForControlPlane: 4m0s
# 证书认证需要的IP地址
CertSANs:
- 192.168.0.101
- 192.168.0.102
- 192.168.0.103
- 192.168.0.100 #作为虚拟IP
- 172.0.0.1
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
# k8s访问的域名,使用IP的情况下无法加入其它master节点
controlPlaneEndpoint: "k8s.doman.io:6443"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
# 修改镜像地址,谷歌家的地址因为不可描述原因无法访问
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
networking:
dnsDomain: cluster.local
# 填入cidr,使用默认cidr会导致flannel无法启动
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# 写入host指向
echo '192.168.0.101 k8s.doman.io' >> /etc/hosts
# 执行kubeadm init创建master
kubeadm init --config kubeadm-config.yaml
# 创建flannel
# 注: 如果宿主机是多网卡,需要先将flannel.yaml文件下载下来修改绑定的网卡
# containers:
# - name: kube-flannel
# image: quay.io/coreos/flannel:v0.13.0
# command:
# - /opt/bin/flanneld
# args:
# - --ip-masq
# - --kube-subnet-mgr
# - --iface=eth0 #在flanneld的命令下增加--iface参数
kubectl create -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# 复制kubectl配置文件
mkdir ~/.kube
cp /etc/kubeletes/admin.conf ~/.kube/config

配置第二和第三台master

master01上执行

1
2
3
4
5
6
7
8
# 复制证书到其他两台master
scp /etc/kubernetes/admin.conf master02-ip:/etc/kubernetes/
scp /etc/kubernetes/pki/etcd/ca.* master02-ip:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/front-proxy-ca.* master02-ip:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* master02-ip:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/ca.* master02-ip:/etc/kubernetes/pki/
# 生成join命令{KUBE_JOIN_COMMAND},在其他节点加入时需要用到
kubeadm token create --print-join-command

master02或master03上执行

1
2
3
4
5
6
# 写入host指向
echo '192.168.0.101 k8s.doman.io' >> /etc/hosts
# master02或者master03加入集群
{KUBE_JOIN_COMMAND} --experimental-control-plane
# 修改host指向
sed -i 's/101/102/g' /etc/hosts

配置worker节点

配置worker节点只需要在worker节点上运行{KUBE_JOIN_COMMAND}

检查node状态

status皆为ready

kubernetes get nodes

查看kubernetes组件pod状态

kubectl get pods -n kube-system

查看kubernetes组件状态

kubectl get cs

1
2
3
4
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}

安装dashboard

1
2
3
4
# 先下载官方dashboard配置文件
wget https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
# 查询在docker.io中的镜像
docker search kubernetes-dashboard-amd64
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# 修改镜像地址,将k8s.gcr.io的镜像修改为docker.io的镜像
containers:
- name: kubernetes-dashboard
#image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
image: docker.io/mirrorgooglecontainers/kubernetes-dashboard-amd64:v1.10.1

# 修改rbac权限
apiVersion: rbac.authorization.k8s.io/v1
#kind: RoleBinding <-- 此处修改为ClusterRoleBinding
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
#kind: Role <-- 此处修改为ClusterRole
#name: kubernetes-dashboard-minimal <-- 此处修改为cluster-admin
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
1
2
3
4
5
6
7
8
9
# 安装dashboard
kubectl apply -f kubernetes-dashboard.yaml

# 查看dashboard运行状况
kubectl get pods -n kube-system | grep dashboard
kubernetes-dashboard-664d76d896-fxwsm 1/1 Running 0 101m

# 查看dashboard的token
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep dashboard-token | awk '{print $1}')

安装ingress

ingress的github仓库可以找到支持的版本

找到部署文件deploy/mandatory.yaml并下载下来
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.20.0/deploy/mandatory.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
修改两处镜像仓库
spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
# image: k8s.gcr.io/defaultbackend-amd64:1.5
image: registry.cn-qingdao.aliyuncs.com/kubernetes_xingej/defaultbackend-amd64:1.5
....
....
spec:
serviceAccountName: nginx-ingress-serviceaccount
containers:
- name: nginx-ingress-controller
#image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.20.0
image: registry.cn-qingdao.aliyuncs.com/kubernetes_xingej/nginx-ingress-controller:0.20.0

1
2
3
4
5
6
# 安装ingress
kubectl apply -f mandatory.yaml
# 查看ingress pod状态
kubectl get pods -n kube-ingress
# 查看ingress service装态
kubectl get svc -n kube-ingress

master配置keepalived

1
2
3
4
# 所有节点安装keepalived
yum install keepalived -y
# iptables放行vrrp协议
iptables -A INPUT -p vrrp -j ACCEPT
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# 修改keepalived配置文件,需要根据不同节点单独配置
! Configuration File for keepalived

global_defs {
router_id LVS_DEVEL
}

vrrp_instance VI_1 {
state SLAVE #state都使用SLAVE,这样配置为非抢占 模式
interface eth0
unicast_peer { #交换机关闭组播功能,只能开启unicast_peer,采用vrrp单播方式
192.168.0.152
192.168.0.19
}
virtual_router_id 51
priority 84 # priority建议改为ipv4第四段,这个值不能冲突
advert_int 1
authentication {
auth_type PASS
auth_pass 1212
}
virtual_ipaddress {
192.168.0.100
}
}
0%