Files
vagrant-k8s-vanilla/cluster.yml

199 lines
6.6 KiB
YAML

---
#
# Playbook to set up a Kubernetes cluster using kubeadm
#
# ==============================================================================
# PHASE 1: Install prerequisites on all nodes
# ==============================================================================
- name: 1. Install prerequisites on all nodes
hosts: all
become: yes
tasks:
- name: Ensure kernel modules are loaded
modprobe:
name: "{{ item }}"
state: present
loop:
- overlay
- br_netfilter
- name: Persist kernel modules across reboots
copy:
dest: /etc/modules-load.d/k8s.conf
content: |
overlay
br_netfilter
- name: Set required sysctl parameters
sysctl:
name: "{{ item.key }}"
value: "{{ item.value }}"
sysctl_set: yes
state: present
reload: yes
loop:
- { key: 'net.bridge.bridge-nf-call-iptables', value: '1' }
- { key: 'net.ipv4.ip_forward', value: '1' }
- { key: 'net.bridge.bridge-nf-call-ip6tables', value: '1' }
- name: Install required packages
apt:
name:
- apt-transport-https
- ca-certificates
- curl
- containerd
state: present
update_cache: yes
- name: Ensure /etc/containerd directory exists
file:
path: /etc/containerd
state: directory
mode: '0755'
- name: Configure containerd and enable SystemdCgroup
shell: |
containerd config default | tee /etc/containerd/config.toml >/dev/null 2>&1
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
args:
executable: /bin/bash
changed_when: true
- name: Restart and enable containerd
systemd:
name: containerd
state: restarted
enabled: yes
- name: Add Kubernetes v1.33 apt key
get_url:
url: https://pkgs.k8s.io/core:/stable:/v1.33/deb/Release.key
dest: /etc/apt/keyrings/kubernetes-apt-keyring.asc
mode: '0644'
force: true
- name: Add Kubernetes v1.33 apt repository
apt_repository:
repo: "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.asc] https://pkgs.k8s.io/core:/stable:/v1.33/deb/ /"
state: present
filename: kubernetes
- name: Install kubeadm, kubelet, and kubectl
apt:
name:
- kubelet
- kubeadm
- kubectl
state: present
update_cache: yes
- name: Hold Kubernetes packages to prevent accidental updates
dpkg_selections:
name: "{{ item }}"
selection: hold
loop:
- kubelet
- kubeadm
- kubectl
# ==============================================================================
# PHASE 2: Set up the first control plane
# ==============================================================================
- name: 2. Set up the first control plane
hosts: control_planes[0]
become: yes
tasks:
- name: Initialize the Kubernetes cluster
command: "kubeadm init --control-plane-endpoint={{ apiserver_endpoint }} --upload-certs --pod-network-cidr=192.168.0.0/16"
args:
creates: /etc/kubernetes/admin.conf
- name: Create .kube directory for the user
file:
path: "/home/{{ ansible_user }}/.kube"
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0755'
- name: Copy admin.conf to user's .kube directory
copy:
src: /etc/kubernetes/admin.conf
dest: "/home/{{ ansible_user }}/.kube/config"
remote_src: yes
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: '0644'
- name: Install Calico CNI v3.30.2
become: false # Run as the user with kubectl access
command: "kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.30.2/manifests/calico.yaml"
args:
creates: /etc/cni/net.d/10-calico.conflist
- name: Generate join command for other nodes
command: kubeadm token create --print-join-command
register: join_command_control_plane
- name: Generate certificate key for joining control planes
command: kubeadm init phase upload-certs --upload-certs
register: cert_key
# ==============================================================================
# PHASE 2.5: Store join commands as facts on localhost
# ==============================================================================
- name: 2.5. Store join commands on the Ansible controller
hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Store the join commands as facts
set_fact:
join_cp_command: "{{ hostvars[groups['control_planes'][0]]['join_command_control_plane']['stdout'] }} --control-plane --certificate-key {{ hostvars[groups['control_planes'][0]]['cert_key']['stdout_lines'][-1] }}"
join_worker_command: "{{ hostvars[groups['control_planes'][0]]['join_command_control_plane']['stdout'] }}"
# ==============================================================================
# PHASE 3: Join other control planes to the cluster
# ==============================================================================
- name: 3. Join other control planes to the cluster
hosts: control_planes[1:]
become: yes
tasks:
- name: Join control plane to the cluster
command: "{{ hostvars['localhost']['join_cp_command'] }}"
args:
creates: /etc/kubernetes/kubelet.conf
# ==============================================================================
# PHASE 4: Set up the worker nodes
# ==============================================================================
- name: 4. Join worker nodes to the cluster
hosts: workers
become: yes
tasks:
- name: Join worker node to the cluster
command: "{{ hostvars['localhost']['join_worker_command'] }}"
args:
creates: /etc/kubernetes/kubelet.conf
# ==============================================================================
# PHASE 5: Fetch kubeconfig for local use
# ==============================================================================
- name: 5. Fetch admin.conf to the local machine
hosts: control_planes[0]
become: yes
tasks:
- name: Fetch admin.conf from the first control plane
fetch:
src: /etc/kubernetes/admin.conf
dest: .kubeconfig
flat: yes
- name: Update kubeconfig to use the cluster's external IP
replace:
path: .kubeconfig
regexp: 'https://192.168.122.101:6443'
replace: "https://{{ apiserver_endpoint }}:6443"
delegate_to: localhost