# Optional: NODE_MAJOR can be changed depending on the version you need.
NODE_MAJOR=18
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | sudo tee /etc/apt/sources.list.d/nodesource.list
Setting some aliases and auto completion for kubes command:
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl > /dev/null
kubeadm completion bash | sudo tee /etc/bash_completion.d/kubeadm > /dev/null
# copy .bash_aliases file from infra dir to the node hosts
SSH_HOST_ALIAS=sa3 # sa3, sd3, sm3
scp infra/.bash_aliases.prod $SSH_HOST_ALIAS:~/.bash_aliases
source ~/.bashrc
k version --output=yaml
Initialize k8s on App, the Control-Plane(master)
# Allow access to some ports in control-plane
ufw allow 6443/tcp
ufw allow 2379:2380/tcp
ufw allow 10250/tcp
ufw allow 10259/tcp
ufw allow 10257/tcp
# app host static ip
SERVER_STATIC_IP=94.182.195.209
# same as previous step
KUBE_VER=1.27.5
kubeadm config images pull
kubeadm init \
--kubernetes-version=v$KUBE_VER \
--pod-network-cidr=10.0.0.0/8 \
--control-plane-endpoint $SERVER_STATIC_IP \
--apiserver-advertise-address=$SERVER_STATIC_IP \
--cri-socket unix:///run/containerd/containerd.sock
# Configure kubectl access to the new cluster
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
k get node
Join db and monit host as worker nodes
In DB and Monit host we need to join to the k8s cluster initialized in the App host:
# allow access to kubelet api in db and monit worker
ufw allow 10250/tcp
CONTROL_PLANE_IP=94.182.195.209
kubeadm join --discovery-token nqrru2.xxxxxxxxxxxxxxx --discovery-token-ca-cert-hash sha256:0f7aexxxxx...3f56 $CONTROL_PLANE_IP:6443
First you must to get the control-plane(app) token by running the following command on the control-plane(app) node:
kubeadm token list
if token is expired run:
kubeadm token create
kubeadm token list
If you don't have the value of --discovery-token-ca-cert-hash, you can get it by running the following command chain on the control-plane node:
In default every worker node can schedule 110 pods and our use case needs some more pods per workers.
For increasing POD max count first Edit the file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf and add the --max-pods option to ExecStart configuration as follows: