Skip to main content
Welcome. This site supports keyboard navigation and screen readers. Press ? at any time for keyboard shortcuts. Press [ to focus the sidebar, ] to focus the content. High-contrast themes are available via the toolbar.
serard@dev00:~/cv

Part 13: The K8s Node Packer Image

"Every node starts from the same image. The image contains containerd, the kubelet, and exactly enough kernel knobs to make Kubernetes happy."


Why

A real Kubernetes node has specific requirements: a container runtime that implements CRI (containerd is the default), the kubelet, kubeadm/k3s binaries, certain kernel modules loaded (overlay, br_netfilter), certain sysctl values set (net.bridge.bridge-nf-call-iptables=1, net.ipv4.ip_forward=1), swap disabled, the right cgroup driver. Get any one of these wrong and the cluster either fails to bootstrap or behaves incorrectly under load.

The right place to handle this is the node image. The image is built once via Packer, and every cluster reuses it. The image is distribution-aware in two flavors: one for kubeadm (which expects you to run kubeadm init after boot) and one for k3s (which can install itself in one shell command). Both share the same Alpine base from homelab-docker Part 25.

The thesis: Packer.Alpine.K8sNode is a new contributor that builds on top of Packer.Alpine.Base and adds the k8s-specific bits. The output is a .box file named frenchexdev/alpine-3.21-k8snode-{distribution} that the K8s topology resolver references.


The shape

[Injectable(ServiceLifetime.Singleton)]
[Order(25)]
public sealed class K8sNodePackerContributor : IPackerBundleContributor
{
    private readonly HomeLabConfig _config;
    public K8sNodePackerContributor(IOptions<HomeLabConfig> config) => _config = config.Value;

    public bool ShouldContribute() => _config.K8s is not null;

    public void Contribute(PackerBundle bundle)
    {
        if (!ShouldContribute()) return;

        // 1. Common scripts (both kubeadm and k3s)
        bundle.Scripts.Add(new PackerScript("k8s-prepare-kernel.sh", PrepareKernelScript()));
        bundle.Scripts.Add(new PackerScript("k8s-disable-swap.sh", DisableSwapScript()));
        bundle.Scripts.Add(new PackerScript("k8s-install-containerd.sh", InstallContainerdScript()));

        // 2. Distribution-specific
        if (_config.K8s!.Distribution == "kubeadm")
        {
            bundle.Scripts.Add(new PackerScript("k8s-install-kubeadm.sh", InstallKubeadmScript()));
        }
        else if (_config.K8s.Distribution == "k3s")
        {
            // k3s installs itself at provisioning time, not at image-build time
            // We just pre-install curl + the install script for offline / air-gapped scenarios
            bundle.Scripts.Add(new PackerScript("k8s-prefetch-k3s.sh", PrefetchK3sScript()));
        }

        // 3. Provisioner that runs all the scripts in order
        bundle.Provisioners.Add(new PackerProvisioner
        {
            Type = "shell",
            Properties = new()
            {
                ["scripts"] = BuildScriptList(),
                ["execute_command"] = "{{ .Vars }} sh '{{ .Path }}'"
            }
        });

        // 4. Vagrant post-processor — output box name encodes the distribution
        bundle.PostProcessors.Add(new PackerPostProcessor
        {
            Type = "vagrant",
            Properties = new()
            {
                ["output"] = $"output-vagrant/{{{{.BuildName}}}}-k8snode-{_config.K8s.Distribution}-{{{{.Provider}}}}.box",
                ["compression_level"] = 9
            }
        });
    }

    private object[] BuildScriptList()
    {
        var scripts = new List<string>
        {
            "scripts/k8s-prepare-kernel.sh",
            "scripts/k8s-disable-swap.sh",
            "scripts/k8s-install-containerd.sh"
        };

        if (_config.K8s!.Distribution == "kubeadm")
            scripts.Add("scripts/k8s-install-kubeadm.sh");
        else if (_config.K8s.Distribution == "k3s")
            scripts.Add("scripts/k8s-prefetch-k3s.sh");

        return scripts.ToArray();
    }

    // ... script content methods below
}

k8s-prepare-kernel.sh

#!/bin/sh
set -eux

cat > /etc/modules-load.d/k8s.conf <<EOF
overlay
br_netfilter
EOF

modprobe overlay
modprobe br_netfilter

cat > /etc/sysctl.d/99-k8s.conf <<EOF
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
fs.inotify.max_user_instances       = 8192
fs.inotify.max_user_watches         = 524288
EOF

sysctl --system

k8s-disable-swap.sh

#!/bin/sh
set -eux

# Alpine doesn't enable swap by default but we make sure
swapoff -a || true
sed -i '/swap/d' /etc/fstab

k8s-install-containerd.sh

#!/bin/sh
set -eux

apk update
apk add --no-cache containerd containerd-ctr runc cni-plugins

# containerd config — set systemd cgroup driver, set the pause image
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml
sed -i 's|SystemdCgroup = false|SystemdCgroup = true|' /etc/containerd/config.toml
sed -i 's|sandbox_image = ".*"|sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml

rc-update add containerd default
service containerd start

# Sanity check
ctr version

k8s-install-kubeadm.sh

#!/bin/sh
set -eux

# Alpine has the k8s packages in the testing repository
echo 'https://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories
apk update

# Pin the version
K8S_VERSION="${K8S_VERSION:-1.31.4}"
apk add --no-cache "kubeadm=${K8S_VERSION}-r0" "kubelet=${K8S_VERSION}-r0" "kubectl=${K8S_VERSION}-r0"

# Pre-pull the kubeadm images so the first kubeadm init is fast
kubeadm config images pull --kubernetes-version "v${K8S_VERSION}"

# Enable kubelet via OpenRC
rc-update add kubelet default
# Note: don't START kubelet yet — it needs to be configured by kubeadm init / join

k8s-prefetch-k3s.sh

#!/bin/sh
set -eux

# Pre-fetch the k3s installer and the k3s binary so the first install is offline-capable
K3S_VERSION="${K3S_VERSION:-v1.31.4+k3s1}"
curl -sfL https://get.k3s.io -o /usr/local/bin/k3s-install.sh
chmod +x /usr/local/bin/k3s-install.sh

# The actual install happens at provisioning time on first boot
# (the K3sClusterDistribution from Part 12 calls it via SSH)

The Mermaid diagram

Diagram
The K8s node image is just another Packer contributor layered on top of the existing Alpine base — one ordered contributor, one .box file, ready for the Vagrant box registry.

The contributor runs after the base contributor (Order=10) and after any host overlay contributors. The result is one .box file per distribution, ready for the Vagrant box registry.


Versioning

The K8s.Dsl plugin uses Alpine.Version (from the toolbelt) to track the Alpine base image version and emit a warning when it drifts behind upstream. It also pins the Kubernetes version in the Packer scripts via the K8S_VERSION environment variable, which is populated from _config.K8s.Version.

The pinning is intentional: the Packer image bakes in a specific Kubernetes version, and a cluster created from this image will run that version. Upgrading the cluster (see Part 40) does not require rebuilding the image — kubeadm upgrade swaps the binaries on the running nodes — but rebuilding the image with the new version is the cheapest way to bootstrap fresh nodes at the new version.


The wiring

The contributor is [Injectable(ServiceLifetime.Singleton)] and [Order(25)]. The Generate stage of the pipeline picks it up via IEnumerable<IPackerBundleContributor>. The Apply stage calls IPackerClient.BuildAsync against the generated bundle. The output .box file is published to the Vagrant box registry via homelab box publish (homelab-docker Part 40).


The test

public sealed class K8sNodePackerContributorTests
{
    [Fact]
    public void contributor_does_nothing_when_k8s_config_is_absent()
    {
        var bundle = new PackerBundle();
        var c = new K8sNodePackerContributor(Options.Create(new HomeLabConfig { K8s = null }));
        c.Contribute(bundle);

        bundle.Scripts.Should().NotContain(s => s.FileName.Contains("k8s"));
    }

    [Fact]
    public void contributor_adds_kubeadm_install_script_when_distribution_is_kubeadm()
    {
        var bundle = new PackerBundle();
        var c = new K8sNodePackerContributor(Options.Create(new HomeLabConfig
        {
            K8s = new() { Distribution = "kubeadm", Version = "v1.31.4" }
        }));
        c.Contribute(bundle);

        bundle.Scripts.Should().Contain(s => s.FileName == "k8s-install-kubeadm.sh");
        bundle.Scripts.Should().NotContain(s => s.FileName == "k8s-prefetch-k3s.sh");
    }

    [Fact]
    public void contributor_adds_k3s_prefetch_script_when_distribution_is_k3s()
    {
        var bundle = new PackerBundle();
        var c = new K8sNodePackerContributor(Options.Create(new HomeLabConfig
        {
            K8s = new() { Distribution = "k3s", Version = "v1.31.4+k3s1" }
        }));
        c.Contribute(bundle);

        bundle.Scripts.Should().Contain(s => s.FileName == "k8s-prefetch-k3s.sh");
        bundle.Scripts.Should().NotContain(s => s.FileName == "k8s-install-kubeadm.sh");
    }

    [Fact]
    public void contributor_emits_distribution_in_the_box_filename()
    {
        var bundle = new PackerBundle();
        var c = new K8sNodePackerContributor(Options.Create(new HomeLabConfig
        {
            K8s = new() { Distribution = "kubeadm", Version = "v1.31.4" }
        }));
        c.Contribute(bundle);

        var pp = bundle.PostProcessors.First(p => p.Type == "vagrant");
        pp.Properties["output"].ToString().Should().Contain("k8snode-kubeadm");
    }

    [Fact]
    public void kernel_script_loads_overlay_and_br_netfilter()
    {
        var script = K8sNodePackerContributor.PrepareKernelScript();
        script.Should().Contain("modprobe overlay");
        script.Should().Contain("modprobe br_netfilter");
        script.Should().Contain("net.bridge.bridge-nf-call-iptables");
        script.Should().Contain("net.ipv4.ip_forward");
    }
}

What this gives you that ad hoc node setup doesn't

A team that does k8s-on-laptops without a typed node image installs everything by hand the first time, writes a cleanup script, and then re-runs the cleanup script every time someone brings up a new VM. The script drifts. Three months later, somebody tries to bring up a new node and discovers that the apk repository moved, the package version is gone, and the kernel modules need a different name. They fix it on their machine and forget to commit.

A typed Packer.Alpine.K8sNode contributor gives you, for the same surface area:

  • One Packer build that produces a reproducible .box file
  • Distribution-aware scripts so the same contributor handles both kubeadm and k3s
  • Pinned versions for both Alpine and Kubernetes
  • Tests that lock the script content
  • Composability with the existing Alpine base contributor

The bargain pays back the first time you bring up a fresh cluster on a new machine in 8 minutes (the box already has everything baked in) instead of 45 minutes (each node has to install containerd, kubelet, and kubeadm from the apk repository).


⬇ Download