以下所有操作均在master端进行
角色 |
地址 |
安装组件 |
master |
192.168.142.220 |
kube-apiserver kube-controller-manager kube-scheduler etcd |
node1 |
192.168.142.136 |
kubelet kube-proxy docker flannel etcd |
node2 |
192.168.142.132 |
kubelet kube-proxy docker flannel etcd |
一、APIserver服务部署
建立apiserver安装站点
[root@master k8s]# pwd
/k8s
[root@master k8s]# mkdir apiserver
[root@master k8s]# cd apiserver/
建立ca证书(注意路径问题!!)
//定义ca证书,生成ca证书配置文件
[root@master apiserver]# cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
//生成证书签名文件
[root@master apiserver]# cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
//证书签名(生成ca.pem ca-key.pem)
[root@master apiserver]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
建立apiserver通信证书
//定义apiserver证书,生成apiserver证书配置文件
[root@master apiserver]# cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.142.220", //master1(注意地址变更)
"192.168.142.120", //master2(后期双节点)
"192.168.142.20", //vip
"192.168.142.130", //lb nginx负载均衡(master)
"192.168.142.140", //lb (backup)
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
//证书签名(生成server.pem server-key.pem)
[root@master apiserver]# cfssl gencert -ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes server-csr.json | cfssljson -bare server
建立admin证书
[root@master apiserver]# cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
//证书签名(生成admin.pem admin-key.epm)
[root@master apiserver]# cfssl gencert -ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin
建立kube-proxy证书
[root@master apiserver]# cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
//证书签名(生成kube-proxy.pem kube-proxy-key.pem)
[root@master apiserver]# cfssl gencert -ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
总共应该生成8个证书
[root@master apiserver]# ls *.pem
admin-key.pem ca-key.pem kube-proxy-key.pem server-key.pem
admin.pem ca.pem kube-proxy.pem server.pem
复制启动命令
//建立存放站点
[root@master apiserver]# mkdir -p /opt/kubernetes/{bin,ssl,cfg}
[root@master apiserver]# cp -p *.pem /opt/kubernetes/ssl/
//复制启动脚本
[root@master k8s]# tar zxvf kubernetes-server-linux-amd64.tar.gz
[root@master k8s]# cd kubernetes/server/bin/
[root@master bin]# cp -p kube-apiserver kubectl /opt/kubernetes/bin/
创建token文件
[root@master bin]# cd /opt/kubernetes/cfg
//生成随机的令牌
[root@master cfg]# export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
[root@master cfg]# cat > token.csv << EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
创建apiserver启动脚本
[root@master cfg]# vim /usr/lib/systemd/system/kube-apiserver.service
//手动进行编写
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
//提权方便识别
[root@master cfg]# chmod +x /usr/lib/systemd/system/kube-apiserver.service
创建apiserver配置文件
[root@master ssl]# vim /opt/kubernetes/cfg/kube-apiserver
//进行手工编写,注意IP地址的变更
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://192.168.142.220:2379,https://192.168.142.136:2379,https://192.168.142.132:2379 \
--bind-address=192.168.142.220 \
--secure-port=6443 \
--advertise-address=192.168.142.220 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--kubelet-https=true \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"
[root@master ssl]# mkdir -p /var/log/kubernetes/apiserver
apiserver服务启动
[root@master cfg]# systemctl daemon-reload
[root@master cfg]# systemctl start kube-apiserver
[root@master cfg]# systemctl status kube-apiserver
[root@master cfg]# systemctl enable kube-apiserver
检查服务启动情况
[root@master bin]# netstat -atnp | egrep "(6443|8080)"
//6443为http使用端口;8080位https使用端口
tcp 0 0 192.168.142.220:6443 0.0.0.0:* LISTEN 12898/kube-apiserve
tcp 0 0 127.0.0.1:8080 0.0.0.0:* LISTEN 12898/kube-apiserve
tcp 0 0 192.168.142.220:6443 192.168.142.220:60052 ESTABLISHED 12898/kube-apiserve
tcp 0 0 192.168.142.220:60052 192.168.142.220:6443 ESTABLISHED 12898/kube-apiserve
二、Controller-Manager服务部署
移动控制命令
[root@master bin]# pwd
/k8s/kubernetes/server/bin
//移动脚本
[root@master bin]# cp -p kube-controller-manager /opt/kubernetes/bin/
编写kube-controller-manager配置文件
[root@master bin]# cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \
--v=4 \
--master=127.0.0.1:8080 \
--leader-elect=true \
--address=127.0.0.1 \
--service-cluster-ip-range=10.0.0.0/24 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
--root-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem"
EOF
编写kube-controller-manager启动脚本
[root@master bin]# cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
启动服务
//提权并启动
[root@master cfg]# chmod +x /usr/lib/systemd/system/kube-controller-manager.service
[root@master cfg]# systemctl start kube-controller-manager
[root@master cfg]# systemctl status kube-controller-manager
[root@master cfg]# systemctl enable kube-controller-manager
查看服务启动情况
[root@master bin]# netstat -atnp | grep kube-controll
tcp 0 0 127.0.0.1:10252 0.0.0.0:* LISTEN 12964/kube-controll
tcp6 0 0 :::10257 :::* LISTEN 12964/kube-controll
三、Scheruler服务部署
移动控制命令
[root@master bin]# pwd
/k8s/kubernetes/server/bin
//移动脚本
[root@master bin]# cp -p kube-scheduler /opt/kubernetes/bin/
编写配置文件
[root@master bin]# cat <<EOF >/opt/kubernetes/cfg/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=true \
--v=4 \
--master=127.0.0.1:8080 \
--leader-elect"
EOF
编写启动脚本
[root@master bin]# cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
开启服务
[root@master bin]# chmod +x /usr/lib/systemd/system/kube-scheduler.service
[root@master bin]# systemctl daemon-reload
[root@master bin]# systemctl start kube-scheduler
[root@master bin]# systemctl status kube-scheduler
[root@master bin]# systemctl enable kube-scheduler
查看服务启动情况
[root@master bin]# netstat -atnp | grep schedule
tcp6 0 0 :::10251 :::* LISTEN
以上,就是master节点上需要部署的所有服务的全部部署过程。
//查看master节点状态
[root@master bin]# /opt/kubernetes/bin/kubectl get cs
//如果成功则应该全部为healthy
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
未完待续~~~