kubernetes 安装与部署
修改主机名
sh
sudo hostnamectl set-hostname ksmain
关闭交换空间
sh
sudo swapoff -a
# sudo sed -i '/ swap / s/^(.*)$/#1/g' /etc/fstab
sed -i 's/.*swap.*/#&/' /etc/fstab
校准时间
sh
timedatectl set-timezone Asia/Shanghai
设置容器运行时网络
sh
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
# 设置所需的 sysctl 参数,参数在重新启动后保持不变
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# 应用 sysctl 参数而不重新启动
sudo sysctl --system
安装容器运行时
sh
# 安装文档链接:https://github.com/containerd/containerd/blob/main/docs/getting-started.md
# 安装容器运行时
# download https://github.com/containerd/containerd/releases
tar Cxzvf /usr/local containerd-1.7.6-linux-amd64.tar.gz
# download https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
mkdir -p /usr/local/lib/systemd/system/
mv containerd.service /usr/local/lib/systemd/system/containerd.service
systemctl daemon-reload
systemctl enable --now containerd
# 生成containerd配置文件
mkdir -p /etc/containerd/
containerd config default > /etc/containerd/config.toml
# 配置文件,源修改和cgroup 驱动修改(参考文本systemd与cgroupfs的比较)
sed -i 's#registry.k8s.io#registry.aliyuncs.com/google_containers#g' /etc/containerd/config.toml
# 配置 cgroup 驱动(参考文本systemd与cgroupfs的比较)
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
# 配置containerd国内源
参考文章相关部分
安装runc
sh
# 安装 runc
# download https://github.com/opencontainers/runc/releases
install -m 755 runc.amd64 /usr/local/sbin/runc
安装cni plugin
sh
# 安装cni plugin
# download https://github.com/containernetworking/plugins/releases
mkdir -p /opt/cni/bin
tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.3.0.tgz
# 初始cni配置
mkdir -p /etc/cni/net.d
cat >/etc/cni/net.d/10-mynet.conf <<EOF
{
"cniVersion": "0.2.0",
"name": "mynet",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"subnet": "10.22.0.0/16",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
}
EOF
cat >/etc/cni/net.d/99-loopback.conf <<EOF
{
"cniVersion": "0.2.0",
"name": "lo",
"type": "loopback"
}
EOF
aliyun安装kubeadm
sh
apt-get update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
创建集群
sh
kubeadm config images list --image-repository registry.aliyuncs.com/google_containers
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers
kubeadm init --image-repository registry.aliyuncs.com/google_containers
# 单机版kubernetes
# 在控制平面节点上调度 Pod
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
安装网络插件Tigera Calico
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/tigera-operator.yaml
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/custom-resources.yaml vi custom-resources.yaml
yaml
# This section includes base Calico installation configuration.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
# 注意修改cidr,k8s默认的是10.244.0.0/16
cidr: 192.168.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
---
# This section configures the Calico API server.
# For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
命令
sh
## 替换源
registry.k8s.io
等于以下
registry.cn-hangzhou.aliyuncs.com/google_containers
registry.aliyuncs.com/google_containers
## 监控日志调试
journalctl -xe -u kubelet -f
## 查看镜像
kubeadm config images list
kubeadm config images list --image-repository registry.aliyuncs.com/google_containers
kubeadm config images pull
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers
## 查看pod日志
kubectl describe pod <pod-name> -n <namespaces>
systemd vs cgroupfs vs cgroup vs cgroups
Cgroup 是一个 Linux 内核特性,对一组进程的资源使用(CPU、内存、磁盘 I/O 和网络等)进行限制、审计和隔离。 cgroups(Control Groups) 是 linux 内核提供的一种机制,这种机制可以根据需求把一系列系统任务及其子任务整合 (或分隔) 到按资源划分等级的不同组内,从而为系统资源管理提供一个统一的框架。 cgroupfs 就是 Cgroup 的一个接口的封装。cgroupfs是docker 默认的 Cgroup Driver。 Systemd 也是对于 Cgroup 接口的一个封装。ubuntu使用 systemd 初始化系统的。 linux发行版一搬都是此类 kubernetes 推荐使用 systemd
关键的一点是 kubelet 和容器运行时(containerd)需使用相同的 cgroup 驱动并且采用相同的配置
配置containerd systemd cgroup 驱动
yaml
# 结合 runc 使用 systemd cgroup 驱动,在 /etc/containerd/config.toml 中设置:
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
...
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
配置kebulet systemd cgroup 驱动
yaml
# vi /var/lib/kubelet/config.yaml
cgroupDriver: systemd
containerd国内源配置
vi /etc/containerd/config.yaml
yaml
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://------.mirror.aliyuncs.com", "https://registry-1.docker.io"]
containerd 自建私有源配置
yaml
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://registry-1.docker.io"] //到此为配置文件默认生成,之后为需要添加的内容
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."192.168.66.4"]
endpoint = ["https://192.168.66.4:443"]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.66.4".tls]
insecure_skip_verify = true
[plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.66.4".auth]
username = "admin"
password = "Harbor12345"