多节点安装Kubesphere
前提条件 - 所有节点都需要满足前提条件
本教程资源:8台4c16g主机 2台用于高可用 6台用于主从 ##notes:高可用也可用master节点实现 推荐独立
关闭防火墙
## https://v3-0.docs.kubesphere.io/docs/installing-on-linux/introduction/port-firewall/
systemctl disable firewalld
systemctl stop firewalld
systemctl status firewalld
关闭selinux
### 查看Selinux状态
getenforce
关闭swap分区
swapoff -a
echo "vm.swappiness=0" >> /etc/sysctl.conf
sysctl -p /etc/sysctl.conf
sed -i 's$/dev/mapper/centos-swap$#/dev/mapper/centos-swap$g' /etc/fstab
free -m
时间同步
见本分类下文章 - linux时间同步教程
hosts解析
[root ks.m1 ~/asciinema]# cat >>/etc/hosts<<EOF
> 192.168.0.1 ks.m1
> 192.168.0.2 ks.m2
> 192.168.0.3 ks.m3
> 192.168.0.4 ks.s1
> 192.168.0.5 ks.s2
> 192.168.0.5 ks.s3
> EOF
内核参数设置
### 设置内核
#### 向内核配置文件中写入以下内容
cat >/etc/sysctl.d/k8s.conf <<EOF
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> net.ipv4.ip_forward = 1
> EOF
#### 执行以下命令生效
modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
检查DNS设置
### 检查DNS设置
cat /etc/resolv.conf
# Generated by NetworkManager
search pek3.qingcloud.com
nameserver 10.16.120.4
nameserver 114.114.114.114
nameserver 119.29.29.29
# NOTE: the libc resolver may not support more than 3 nameservers.
# The nameservers listed below may not be recognized.
nameserver 1.2.4.8
安装ipvs
[root ks.m1 ~/asciinema]# ### 安装ipvs
[root ks.m1 ~/asciinema]#
[root ks.m1 ~/asciinema]# #### 向文件中写入以下内容
[root ks.m1 ~/asciinema]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
> #!/bin/bash
> modprobe -- ip_vs
> modprobe -- ip_vs_rr
> modprobe -- ip_vs_wrr
> modprobe -- ip_vs_sh
> modprobe -- nf_conntrack_ipv4
> EOF
[root ks.m1 ~/asciinema]# #### 修改权限以及查看是否已经正确加载所需的内核模块
[root ks.m1 ~/asciinema]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_connt
rack_ipv4
nf_conntrack_ipv4 15053 2
nf_defrag_ipv4 12729 1 nf_conntrack_ipv4
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145497 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 139224 7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c 12644 3 ip_vs,nf_nat,nf_conntrack
[root ks.m1 ~/asciinema]# #### 安装ipvsadm
[root ks.m1 ~/asciinema]# yum -y install ipset ipvsadm
已加载插件:fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.tuna.tsinghua.edu.cn
* epel: mirrors.tuna.tsinghua.edu.cn
* extras: mirrors.tuna.tsinghua.edu.cn
* updates: mirrors.bfsu.edu.cn
软件包 ipset-7.1-1.el7.x86_64 已安装并且是最新版本
软件包 ipvsadm-1.27-8.el7.x86_64 已安装并且是最新版本
无须任何处理
安装依赖组件
[root ks.m1 ~/asciinema]# ### 安装依赖组件### 安装依赖组件
[root ks.m1 ~/asciinema]# yum install -y ebtables socat ipset conntrack
已加载插件:fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.tuna.tsinghua.edu.cn
* epel: mirrors.tuna.tsinghua.edu.cn
* extras: mirrors.tuna.tsinghua.edu.cn
* updates: mirrors.bfsu.edu.cn
软件包 ebtables-2.0.10-16.el7.x86_64 已安装并且是最新版本
软件包 socat-1.7.3.2-2.el7.x86_64 已安装并且是最新版本
软件包 ipset-7.1-1.el7.x86_64 已安装并且是最新版本
软件包 conntrack-tools-1.4.4-7.el7.x86_64 已安装并且是最新版本
无须任何处理
安装docker
[root ks.m1 ~/asciinema]# ### 安装docker
[root ks.m1 ~/asciinema]# yum install -y yum-utils
已加载插件:fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.tuna.tsinghua.edu.cn
* epel: mirrors.tuna.tsinghua.edu.cn
* extras: mirrors.tuna.tsinghua.edu.cn
* updates: mirrors.bfsu.edu.cn
软件包 yum-utils-1.1.31-54.el7_8.noarch 已安装并且是最新版本
无须任何处理
[root ks.m1 ~/asciinema]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
已加载插件:fastestmirror
adding repo from: http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
grabbing file http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[root ks.m1 ~/asciinema]# yum makecache fast
已加载插件:fastestmirror
Loading mirror speeds from cached hostfile
epel/x86_64/metalink | 3.9 kB 00:00:00
* base: mirrors.tuna.tsinghua.edu.cn
* epel: mirrors.tuna.tsinghua.edu.cn
* extras: mirrors.tuna.tsinghua.edu.cn
* updates: mirrors.bfsu.edu.cn
base | 3.6 kB 00:00:00
docker-ce-stable | 3.5 kB 00:00:00
epel | 4.7 kB 00:00:00
extras | 2.9 kB 00:00:00
kubernetes | 1.4 kB 00:00:00
updates | 2.9 kB 00:00:00
元数据缓存已建立
[root ks.m1 ~/asciinema]# yum -y install docker-ce-18.09.9-3.el7 docker-ce-cli-18.09.9
已加载插件:fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.huaweicloud.com
* epel: mirrors.tuna.tsinghua.edu.cn
* extras: mirrors.tuna.tsinghua.edu.cn
* updates: mirrors.bfsu.edu.cn
软件包 3:docker-ce-18.09.9-3.el7.x86_64 已安装并且是最新版本
软件包 1:docker-ce-cli-18.09.9-3.el7.x86_64 已安装并且是最新版本
无须任何处理
###
[root ks.m1 ~/asciinema]# ### 启动docker
[root ks.m1 ~/asciinema]# systemctl enable docker && systemctl start docker
[root ks.m1 ~/asciinema]# systemctl status docker
[root ks.m1 ~/asciinema]# ### 设置docker镜像加速器 ### 地址登陆阿里云控制台 搜索镜像服务确定
[root ks.m1 ~/asciinema]# cat > /etc/docker/daemon.json <<EOF
> {
> "registry-mirrors": ["https://gqk8w9va.mirror.aliyuncs.com"]
> }
> EOF
[root ks.m1 ~/asciinema]# ### 修改docker Cgroup Driver为systemd
[root ks.m1 ~/asciinema]# sed -i.bak "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock -
-exec-opt native.cgroupdriver=systemd#g" /usr/lib/systemd/system/docker.service
[root ks.m1 ~/asciinema]#
[root ks.m1 ~/asciinema]# ### 重启docker使配置生效
[root ks.m1 ~/asciinema]# systemctl daemon-reload
[root ks.m1 ~/asciinema]# systemctl restart docker
[root ks.m1 ~/asciinema]# systemctl status docker
[root ks.m1 ~/asciinema]# docker info|grep "Registry Mirrors" -A 1
Registry Mirrors:
https://gqk8w9va.mirror.aliyuncs.com/
设置k8s仓库
[root ks.m1 ~/asciinema]# ### 设置kubernetes仓库
[root ks.m1 ~/asciinema]# cat >/etc/yum.repos.d/kubernetes.repo <<EOF
> [kubernetes]
> name=Kubernetes
> baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
> http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root ks.m1 ~/asciinema]# yum makecache fast
已加载插件:fastestmirror
Loading mirror speeds from cached hostfile
epel/x86_64/metalink | 3.9 kB 00:00:00
* base: mirrors.tuna.tsinghua.edu.cn
* epel: mirrors.tuna.tsinghua.edu.cn
* extras: mirrors.tuna.tsinghua.edu.cn
* updates: mirrors.bfsu.edu.cn
base | 3.6 kB 00:00:00
docker-ce-stable | 3.5 kB 00:00:00
epel | 4.7 kB 00:00:00
extras | 2.9 kB 00:00:00
kubernetes | 1.4 kB 00:00:00
updates | 2.9 kB 00:00:00
元数据缓存已建立
VIP、Haproxy、Keepalive
搭建高可用haproxy keepalive
见文章 - k8s安装高可用负载
kubekey安装k8s集群
[root ks.m1 ~/asciinema]# ### 下载kubekey
[root ks.m1 ~/asciinema]# yum install -y wget
已加载插件:fastestmirror
Loading mirror speeds from cached hostfile
* base: mirrors.tuna.tsinghua.edu.cn
* epel: mirrors.tuna.tsinghua.edu.cn
* extras: mirrors.tuna.tsinghua.edu.cn
* updates: mirrors.bfsu.edu.cn
软件包 wget-1.14-18.el7_6.1.x86_64 已安装并且是最新版本
无须任何处理
[root ks.m1 ~/asciinema]# wget -c https://kubesphere.io/download/kubekey-v1.0.0-linux-amd64.tar.gz -O - | tar -xz
--2020-10-09 11:32:20-- https://kubesphere.io/download/kubekey-v1.0.0-linux-amd64.tar.gz
正在解析主机 kubesphere.io (kubesphere.io)... 172.67.164.49, 104.18.45.230, 104.18.44.230, ...
正在连接 kubesphere.io (kubesphere.io)|172.67.164.49|:443... 已连接。
已发出 HTTP 请求,正在等待回应... 301 Moved Permanently
位置:https://kubernetes.pek3b.qingstor.com/kubekey/releases/download/v1.0.0/kubekey-v1.0.0-linux-amd64.tar.gz [跟随至新的 URL]
--2020-10-09 11:32:23-- https://kubernetes.pek3b.qingstor.com/kubekey/releases/download/v1.0.0/kubekey-v1.0.0-linux-amd64.tar.gz
正在解析主机 kubernetes.pek3b.qingstor.com (kubernetes.pek3b.qingstor.com)... 10.16.91.201, 10.16.91.205, 10.16.91.206, ...
正在连接 kubernetes.pek3b.qingstor.com (kubernetes.pek3b.qingstor.com)|10.16.91.201|:443... 已连接。
已发出 HTTP 请求,正在等待回应... 200 OK
长度:23042819 (22M) [application/x-tar]
正在保存至: “STDOUT”
100%[==============================================================================================================>] 23,042,819 48.5MB/s 用时 0.5s
2020-10-09 11:32:23 (48.5 MB/s) - 已写入至标准输出 [23042819/23042819]
[root ks.m1 ~/asciinema]# chmod +x kk
[root ks.m1 ~/asciinema]# ### 生成config模板文件
[root ks.m1 ~/asciinema]# ./kk create config --with-kubernetes v1.18.6 --with-kubesphere v3.0.0
[root ks.m1 ~/asciinema]# cat >config-cluster.yaml<<EOF
> apiVersion: kubekey.kubesphere.io/v1alpha1
> kind: Cluster
> metadata:
> name: sample
> spec:
> hosts:
> - {name: ks_m1, address: 192.168.0.1, internalAddress: 192.168.0.1, user: root, privateKeyPath: "/root/.ssh/kp-qingcloud"}
> - {name: ks_m2, address: 192.168.0.2, internalAddress: 192.168.0.2, user: root, privateKeyPath: "/root/.ssh/kp-qingcloud"}
> - {name: ks_m3, address: 192.168.0.3, internalAddress: 192.168.0.3, user: root, privateKeyPath: "/root/.ssh/kp-qingcloud"}
> - {name: ks_s1, address: 192.168.0.4, internalAddress: 192.168.0.4, user: root, privateKeyPath: "/root/.ssh/kp-qingcloud"}
> roleGroups:
> etcd:
> - ks_m1
> - ks_m2
> - ks_m3
> master:
> - ks_m[1:3] # the nodes from node1 to node3
> worker:
> - ks_s1
> controlPlaneEndpoint:
> domain: lb.kubesphere.local
> address: "192.168.159.11"
> port: "6443"
> kubernetes:
> version: v1.18.6
> imageRepo: kubesphere
> clusterName: cluster.local
> network:
> plugin: calico
> kubePodsCIDR: 10.233.64.0/18
> kubeServiceCIDR: 10.233.0.0/18
> registry:
> registryMirrors: []
> insecureRegistries: []
> addons: []
> ---
> apiVersion: installer.kubesphere.io/v1alpha1
> kind: ClusterConfiguration
> metadata:
> name: ks-installer
> namespace: kubesphere-system
> labels:
> version: v3.0.0
> spec:
> local_registry: ""
> persistence:
> storageClass: ""
> authentication:
> jwtSecret: ""
> etcd:
> monitoring: true # Whether to install etcd monitoring dashboard
> endpointIps: 192.168.0.1,192.168.0.2,192.168.0.3 # etcd cluster endpointIps
> port: 2379 # etcd port
> tlsEnable: true
> common:
> mysqlVolumeSize: 20Gi # MySQL PVC size
> minioVolumeSize: 20Gi # Minio PVC size
> etcdVolumeSize: 20Gi # etcd PVC size
> openldapVolumeSize: 2Gi # openldap PVC size
> redisVolumSize: 2Gi # Redis PVC size
> es: # Storage backend for logging, tracing, events and auditing.
> elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
> elasticsearchDataReplicas: 1 # total number of data nodes
> elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
> elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
> logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
> elkPrefix: logstash # The string making up index names. The index name will be formatted as ks-<elk_prefix>-log
> # externalElasticsearchUrl:
> # externalElasticsearchPort:
> console:
> enableMultiLogin: false # enable/disable multiple sing on, it allows an account can be used by different users at the same time.
> port: 30880
> alerting: # Whether to install KubeSphere alerting system. It enables Users to customize alerting policies to send messages to receiv
els to choose from.fferent time intervals and alerting lev
> enabled: false
> auditing: # Whether to install KubeSphere audit log system. It provides a security-relevant chronological set of records,recording th
by different tenants.ies happened in platform, initiated
> enabled: false
> devops: # Whether to install KubeSphere DevOps System. It provides out-of-box CI/CD system based on Jenkins, and automated workflow
tools including Source-to-Image & Binary-to-Image
> enabled: false
> jenkinsMemoryLim: 2Gi # Jenkins memory limit
> jenkinsMemoryReq: 1500Mi # Jenkins memory request
> jenkinsVolumeSize: 8Gi # Jenkins volume size
> jenkinsJavaOpts_Xms: 512m # The following three fields are JVM parameters
> jenkinsJavaOpts_Xmx: 512m
> jenkinsJavaOpts_MaxRAM: 2g
> events: # Whether to install KubeSphere events system. It provides a graphical web console for Kubernetes Events exporting, filteri
ng and alerting in multi-tenant Kubernetes clusters.
> enabled: false
> logging: # Whether to install KubeSphere logging system. Flexible logging functions are provided for log query, collection and manag
be added, such as Elasticsearch, Kafka and Fluentd.rs can
> enabled: false
> logsidecarReplicas: 2
> metrics_server: # Whether to install metrics-server. IT enables HPA (Horizontal Pod Autoscaler).
> enabled: true
> monitoring: #
> prometheusReplicas: 1 # Prometheus replicas are responsible for monitoring different segments of data source and provide high availabil
ity as well.
> prometheusMemoryRequest: 400Mi # Prometheus request memory
> prometheusVolumeSize: 20Gi # Prometheus PVC size
> alertmanagerReplicas: 1 # AlertManager Replicas
> multicluster:
> clusterRole: none # host | member | none # You can install a solo cluster, or specify it as the role of host or member cluster
> networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instanc
es (Pods).
> enabled: false
> notification: # Email Notification support for the legacy alerting system, should be enabled/disabled together with the above alerting option
> enabled: false
> openpitrix: # Whether to install KubeSphere Application Store. It provides an application store for Helm-based applications, and offer appl
ication lifecycle management
> enabled: false
> servicemesh: # Whether to install KubeSphere Service Mesh (Istio-based). It provides fine-grained traffic management, observability and trac
ing, and offer visualization for traffic topology
> enabled: false
> EOF
安装集群
[root ks.m1 ~/asciinema/build]#
[root ks.m1 ~/asciinema]# ## 安装集群
[root ks.m1 ~/asciinema]#
[root ks.m1 ~/asciinema]#
[root ks.m1 ~/asciinema]# ./kk create cluster -f config-cluster.yaml