export KKZONE=cn
三台机器
192.168.83.228 root/xx master
192.168.83.57 root/xx node1
192.168.83.58 root/xx nfs
chushihua.zip
1.master机器添加host映射
echo "192.168.83.58 nfs" >> /etc/hosts
echo "192.168.83.57 node1" >> /etc/hosts
2.持久化存储存储初始化(nfs)
(1)安装NFS服务端
1,安装nfs软件包-全部需要装
yum -y install rpcbind nfs-utils
2,新建共享目录
mkdir -p /mnt/nfs/shared_dir
echo "/mnt/nfs/shared_dir *(rw,no_root_squash)" >> /etc/exports
chmod -R 777 /mnt/nfs/shared_dir
3,启动NFS服务
systemctl start rpcbind nfs && systemctl enable rpcbind nfs
(2)验证NFS服务
1,配置hosts表解析,*所有master节点和worker,nfs节点都需要配置
echo "192.168.83.58 nfs1.kubesphere.local" >> /etc/hosts
2,查询共享
showmount -e nfs1.kubesphere.local | grep share
3.免密设置
配置免密
1,设置主机名(master,node1,nfs)
hostnamectl set-hostname $hostname
2,在master节点配置免密
ssh-keygen
ssh-copy-id $hostname
3,重启服务器
reboot
4.采用kk安装集群
下载:
export KKZONE=cn
curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.2 sh -
生成模板文件
./kk create config --with-kubesphere v3.3.1
新建nfs模板文件
[root@master1 ~]# cat >> /root/nfs-client.yaml << EOF
nfs:
server: "nfs1.kubesphere.local"
path: "/mnt/nfs/shared_dir"
storageClass:
defaultClass: true
EOF
编辑主yaml文件
apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
name: sample
spec:
hosts:
- {name: master, address: 192.168.83.228, internalAddress: 192.168.83.228, user: root, password: "xxx"}
- {name: node1, address: 192.168.83.57, internalAddress: 192.168.83.57, user: root, password: "xxx"}
roleGroups:
etcd:
- master
control-plane:
- master
worker:
- node1
controlPlaneEndpoint:
## Internal loadbalancer for apiservers
internalLoadbalancer: haproxy
domain: lb.kubesphere.local
address: ""
port: 6443
kubernetes:
version: v1.23.10
clusterName: cluster.local
autoRenewCerts: true
containerManager: docker
etcd:
type: kubekey
network:
plugin: calico
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
multusCNI:
enabled: false
registry:
privateRegistry: ""
namespaceOverride: ""
registryMirrors: []
insecureRegistries: []
addons:
- name: nfs-client
namespace: kube-system
sources:
chart:
name: nfs-client-provisioner
repo: https://charts.kubesphere.io/main
valuesFile: /root/nfs-client.yaml
---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
version: v3.3.1
spec:
persistence:
storageClass: ""
authentication:
jwtSecret: ""
zone: ""
local_registry: ""
namespace_override: ""
# dev_tag: ""
etcd:
monitoring: false
endpointIps: localhost
port: 2379
tlsEnable: true
common:
core:
console:
enableMultiLogin: true
port: 30880
type: NodePort
# apiserver:
# resources: {}
# controllerManager:
# resources: {}
redis:
enabled: false
volumeSize: 2Gi
openldap:
enabled: false
volumeSize: 2Gi
minio:
volumeSize: 20Gi
monitoring:
# type: external
endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
GPUMonitoring:
enabled: false
gpu:
kinds:
- resourceName: "nvidia.com/gpu"
resourceType: "GPU"
default: true
es:
# master:
# volumeSize: 4Gi
# replicas: 1
# resources: {}
# data:
# volumeSize: 20Gi
# replicas: 1
# resources: {}
logMaxAge: 7
elkPrefix: logstash
basicAuth:
enabled: false
username: ""
password: ""
externalElasticsearchHost: ""
externalElasticsearchPort: ""
alerting:
enabled: false
# thanosruler:
# replicas: 1
# resources: {}
auditing:
enabled: false
# operator:
# resources: {}
# webhook:
# resources: {}
devops:
enabled: true
# resources: {}
jenkinsMemoryLim: 8Gi
jenkinsMemoryReq: 4Gi
jenkinsVolumeSize: 8Gi
events:
enabled: false
# operator:
# resources: {}
# exporter:
# resources: {}
# ruler:
# enabled: true
# replicas: 2
# resources: {}
logging:
enabled: false
logsidecar:
enabled: true
replicas: 2
# resources: {}
metrics_server:
enabled: false
monitoring:
storageClass: ""
node_exporter:
port: 9100
# resources: {}
# kube_rbac_proxy:
# resources: {}
# kube_state_metrics:
# resources: {}
# prometheus:
# replicas: 1
# volumeSize: 20Gi
# resources: {}
# operator:
# resources: {}
# alertmanager:
# replicas: 1
# resources: {}
# notification_manager:
# resources: {}
# operator:
# resources: {}
# proxy:
# resources: {}
gpu:
nvidia_dcgm_exporter:
enabled: false
# resources: {}
multicluster:
clusterRole: none
network:
networkpolicy:
enabled: false
ippool:
type: none
topology:
type: none
openpitrix:
store:
enabled: true
servicemesh:
enabled: false
istio:
components:
ingressGateways:
- name: istio-ingressgateway
enabled: false
cni:
enabled: false
edgeruntime:
enabled: false
kubeedge:
enabled: false
cloudCore:
cloudHub:
advertiseAddress:
- ""
service:
cloudhubNodePort: "30000"
cloudhubQuicNodePort: "30001"
cloudhubHttpsNodePort: "30002"
cloudstreamNodePort: "30003"
tunnelNodePort: "30004"
# resources: {}
# hostNetWork: false
iptables-manager:
enabled: true
mode: "external"
# resources: {}
# edgeService:
# resources: {}
terminal:
timeout: 600
/kk create cluster -f config-sample.yaml
5.验证
kubectl get pod -n kube-system
设置命令补齐
yum install bash-completion -y
echo 'source <(kubectl completion bash)' >>~/.bashrc
kubectl completion bash >/etc/bash_completion.d/kubectl
6.添加集群与退出集群
添加
被添加节点执行初始化脚本
/root/chushihua.sh
被添加节点上添加nfs
ansible test -m shell -a "echo '192.168.83.53 nfs1.kubesphere.local' >> /etc/hosts"
被添加节点上设置主机名
hostnamectl set-hostname worker45
被添加节点机器reboot
!!以下是master机器
export KKZONE=cn
master ssh免密处理
ssh-copy-id $hostname
master更新配置文件
config-sample.yaml
./kk add nodes -f config-sample.yaml
脱离集群
master删除集群中节点
kubectl delete node node
node 被删除的节点执行
kubeadm reset