kubernetes-bootstrapping/00-provisioning
2023-12-22 19:54:11 -07:00
..
butane clone k8s repo to control nodes at first boot 2023-12-21 12:39:48 -07:00
ignition fix worker node names in lighter 2023-12-21 12:50:34 -07:00
README.md use variables for vboxmanage and coreos OVA file 2023-12-22 19:54:11 -07:00

00-Provisioning

Adding new nodes

  1. Call lighter and pass the new node's name to generate ignition files for the node, see below snippet.
  2. Commit and check the resulting ignition/*.json files into version control at deadbeef.codes, they need to be present before booting the node.
# Be sure to run from 00-provisioning directory
cd 00-provisioning

# Templating for Butane files to replace hostname with name passed 
# to lighter, then it calls butane to generate ignition files
lighter() {
    if [ -z "$1" ]; then
        echo "error: lighter() called without specifying a VM name"
        echo "Usage: lighter() <name>"
        return
    fi

    # Create temporary working copies
    cp butane/boot.yaml butane/boot~.yaml
    cp butane/full.yaml butane/full~.yaml

    # Replace hostname token with name provided
    hostnameToken="{{HOSTNAME}}"
    sed -i -e "s/$hostnameToken/$1/g" butane/boot~.yaml
    sed -i -e "s/$hostnameToken/$1/g" butane/full~.yaml

    # Butane transpile to ignition files
    butane butane/boot~.yaml > ignition/$1-boot.json
    butane butane/full~.yaml > ignition/$1-full.json

    # Cleanup mess
    rm -f butane/*~.yaml
}

lighter kube-control01
lighter kube-control02
lighter kube-control03
lighter kube-worker01
lighter kube-worker02
lighter kube-worker03


After you've checked the ignition files into version control, provision the server, either on baremetal or VM - example with virtualbox. Use a method to point it to the boot ignition file, in virtual box guest properties can be used.


# Terraform?  We roll our own.

# Stop git bash being stupid
export MSYS_NO_PATHCONV=1

# Be sure to run from 00-provisioning directory
cd 00-provisioning

# Set your own
VBOXMANAGE="C:/Program Files/Oracle/VirtualBox/vboxmanage"
COREOSAPPLIANCEIMAGE="D:/VirtualBox/OVA/fedora-coreos-39.20231119.3.0-virtualbox.x86_64.ova"

# Function to create VirtualBox VM, accepts name of VM as argument
create_vm() {
    if [ -z "$1" ] || [ -z "$2" ]; then
        echo "error: create_vm() called without specifying a VM name"
        echo "Usage: create_vm <name> <MAC Address>"
        echo "Example: create_vm kube_control01 \"08:00:27:00:00:01\""
        return
    fi

    "$VBOXMANAGE" import --vsys 0 --vmname "$1" $COREOSAPPLIANCEIMAGE
    "$VBOXMANAGE" modifyvm $1 --nic1 bridged
    "$VBOXMANAGE" modifyvm $1 --bridge-adapter1 "Intel(R) Ethernet Controller I225-V"
    "$VBOXMANAGE" modifyvm $1 --macaddress1 $2
    "$VBOXMANAGE" guestproperty set $1 "/Ignition/Config" "$(cat ignition/$1-boot.json)"
    "$VBOXMANAGE" startvm $1 --type headless
}

# Controllers - if doing HA, need at least 3 for Raft concensus
create_vm kube-control01 "080027000001"
create_vm kube-control02 "080027000002"
create_vm kube-control03 "080027000003"

# Workers
create_vm kube-worker01 "080027000010"
create_vm kube-worker02 "080027000011"
create_vm kube-worker03 "080027000012"

If adding a new node or changing MAC address - be sure to update external firewall address objects and external load balancer.