Skip to content

使用sealos3离线快速部署k8s

sealos4.x.x命令有所不同,内网提供的kube1.17.0用sealos3部署

下载安装 sealos

在线下载安装:

text
wget -c https://github.com/labring/sealos/releases/download/v3.3.9-rc.11/sealos_3.3.9-rc.11_linux_amd64.tar.gz && mkdir -p /usr/local/sealos && tar xvf sealos_3.3.9-rc.11_linux_amd64.tar.gz -C /usr/local/sealos/ && chmod +x /usr/local/sealos/sealos && ln -s /usr/local/sealos/sealos /usr/local/bin/sealos

没有网络提前下载:

https://github.com/labring/sealos/releases
# arm
https://github.com/labring/sealos/releases/download/v3.3.9-rc.11/sealos_3.3.9-rc.11_linux_arm64.tar.gz
# amd
https://github.com/labring/sealos/releases/download/v3.3.9-rc.11/sealos_3.3.9-rc.11_linux_amd64.tar.gz

然后拷贝上传到内网安装

bash
# arm
mkdir -p /usr/local/sealos && tar xvf sealos_3.3.9-rc.11_linux_arm64.tar.gz -C /usr/local/sealos/ && chmod +x /usr/local/sealos/sealos && ln -s /usr/local/sealos/sealos /usr/local/bin/sealos

# amd
mkdir -p /usr/local/sealos && tar xvf sealos_3.3.9-rc.11_linux_amd64.tar.gz -C /usr/local/sealos/ && chmod +x /usr/local/sealos/sealos && ln -s /usr/local/sealos/sealos /usr/local/bin/sealos

设置所有节点 hostname 主机名

text
hostnamectl set-hostname xxx

各节点配置 /etc/hosts

text
cat >> /etc/hosts << EOF
192.168.x.x master1
192.168.x.x node1
192.168.x.x node2
EOF

测试是否ssh能够通过

text
ssh root@192.168.x.x

内网提供的 k8s 安装包

text
kube1.17.0.tar.gz

部署多master多node集群

passwd 后面是服务器密码(根据自己服务器密码填写)

text
sealos init --passwd 'userpwd' --master 192.168.x.x --master 192.168.x.x  --node 192.168.x.x   --node 192.168.x.x --pkg-url kube1.17.0.tar.gz --version v1.17.0

部署单master多node集群

text
sealos init --passwd 'userpwd' --master 192.168.x.x --node 192.168.x.x  --node 192.168.x.x --pkg-url kube1.17.0.tar.gz --version v1.17.0

注意:主机名不能重复,否则会报:[EROR] [check.go:63] duplicate hostnames is not allowed

重置集群

text
sealos clean --all -f

增加或删除节点

text
sealos join --master 192.168.0.x --master 192.168.0.x
sealos join --master 192.168.0.x-192.168.0.x  # 或者多个连续IP


sealos join --node 192.168.0.x --node 192.168.0.x
sealos join --node 192.168.0.x-192.168.0.x  # 或者多个连续IP

sealos clean --master 192.168.0.x --master 192.168.0.x
sealos clean --master 192.168.0.x-192.168.0.x # 或者多个连续IP

sealos clean --node 192.168.0.x --node 192.168.0.x
sealos clean --node 192.168.0.x-192.168.0.x  # 或者多个连续IP

查看所有节点状态

text
kubectl get nodes
kubectl get po --all-namespaces
kubectl get po --all-namespaces -o wide

执行结果大致如下

bash
[root@master ~]# kubectl get node
NAME     STATUS   ROLES    AGE     VERSION
master   Ready    master   2m16s   v1.17.0
node1    Ready    <none>   101s    v1.17.0
node2    Ready    <none>   110s    v1.17.0

[root@master ~]# kubectl get po --all-namespaces
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-688c5dc8c7-sz769   1/1     Running   0          2m19s
kube-system   calico-node-b6mwm                          1/1     Running   0          2m3s
kube-system   calico-node-g6lpd                          1/1     Running   0          2m12s
kube-system   calico-node-kw8bs                          1/1     Running   0          2m19s
kube-system   coredns-6955765f44-6lwbm                   1/1     Running   0          2m19s
kube-system   coredns-6955765f44-dwqvd                   1/1     Running   0          2m19s
kube-system   etcd-master                                1/1     Running   0          2m34s
kube-system   kube-apiserver-master                      1/1     Running   0          2m34s
kube-system   kube-controller-manager-master             1/1     Running   0          2m34s
kube-system   kube-proxy-c9fjl                           1/1     Running   0          2m19s
kube-system   kube-proxy-nxv78                           1/1     Running   0          2m12s
kube-system   kube-proxy-s9n9g                           1/1     Running   0          2m3s
kube-system   kube-scheduler-master                      1/1     Running   0          2m34s
kube-system   kube-sealyun-lvscare-node1                 1/1     Running   0          2m
kube-system   kube-sealyun-lvscare-node2                 1/1     Running   0          2m9s

[root@master ~]# kubectl get po --all-namespaces -o wide 
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE     IP               NODE     NOMINATED NODE   READINESS GATES
kube-system   calico-kube-controllers-688c5dc8c7-sz769   1/1     Running   0          3m11s   100.108.11.193   node2    <none>           <none>
kube-system   calico-node-b6mwm                          1/1     Running   0          2m55s   192.168.10.101   node1    <none>           <none>
kube-system   calico-node-g6lpd                          1/1     Running   0          3m4s    192.168.10.102   node2    <none>           <none>
kube-system   calico-node-kw8bs                          1/1     Running   0          3m11s   192.168.10.100   master   <none>           <none>
kube-system   coredns-6955765f44-6lwbm                   1/1     Running   0          3m11s   100.89.161.129   master   <none>           <none>
kube-system   coredns-6955765f44-dwqvd                   1/1     Running   0          3m11s   100.108.11.194   node2    <none>           <none>
kube-system   etcd-master                                1/1     Running   0          3m26s   192.168.10.100   master   <none>           <none>
kube-system   kube-apiserver-master                      1/1     Running   0          3m26s   192.168.10.100   master   <none>           <none>
kube-system   kube-controller-manager-master             1/1     Running   0          3m26s   192.168.10.100   master   <none>           <none>
kube-system   kube-proxy-c9fjl                           1/1     Running   0          3m11s   192.168.10.100   master   <none>           <none>
kube-system   kube-proxy-nxv78                           1/1     Running   0          3m4s    192.168.10.102   node2    <none>           <none>
kube-system   kube-proxy-s9n9g                           1/1     Running   0          2m55s   192.168.10.101   node1    <none>           <none>
kube-system   kube-scheduler-master                      1/1     Running   0          3m26s   192.168.10.100   master   <none>           <none>
kube-system   kube-sealyun-lvscare-node1                 1/1     Running   0          2m52s   192.168.10.101   node1    <none>           <none>
kube-system   kube-sealyun-lvscare-node2                 1/1     Running   0          3m1s    192.168.10.102   node2    <none>           <none>

Sealos 常用参数说明

csharp
--master Master 节点服务器地址列表 
--node Node 节点服务器地址列表 
--user 服务器 SSH 用户名 
--passwd 服务器 SSH 用户密码 
--pkg-url 离线包所在位置,可以是本地目录,也可以是一个 HTTP 地址 
--version 指定需要部署的 Kubernetes 版本 
--pk 指定 SSH 私钥所在位置,默认为 /root/.ssh/id_rsa 

Other flags: 
  --kubeadm-config string kubeadm-config.yaml 用于指定自定义 kubeadm 配置文件 
  --vip string virtual ip (default "10.103.97.2") 本地负载时虚拟 IP ,不推荐修改,集群外不可访问

其他

查看污点

# kubectl describe nodes <节点名称> |grep Taints
kubectl describe nodes master |grep Taints

删除污点

gameble-
gameble就是污点名,后加减号就删掉了
kubectl taint nodes <node-name> <taint-name>:<effect>-

docker指定架构拉取镜像

docker pull --platform=arm64 nginx:latest
# --platform指定下载的OS/ARCH,如linux/amd64、linux/amd64等,具体看仓库内的信息


"--platform" is only supported on a Docker daemon with experimental features enabled
报错解决:
在/etc/docker/daemon.json的顶层级【与registry-mirrors同级】添加"experimental": true
重启docker

mediaType in manifest should be 'application/vnd.docker.distribution.manifest.v2+json' not 'application/vnd.oci.image.manifest.v1+json'
报错说明:
大概率是对应镜像或对应OS/ARCH或对应Tag不存在,去对应仓库检查
也有概率是当前的Docker版本低了,更换更高版本的Docker试试