Part 28: GitLab on K8s via the Helm Chart
"GitLab Omnibus belongs in a VM. The GitLab Helm chart belongs in a Kubernetes cluster."
Why
homelab-docker Part 39 showed how to run GitLab Omnibus inside a VM. That works for the Docker Compose-based DevLab. For a Kubernetes cluster, the right answer is the official GitLab Helm chart (also known as the cloud-native chart or gitlab/gitlab). The chart splits GitLab into ~30 microservices (Webservice, Sidekiq, Gitaly, Praefect, Workhorse, Toolbox, …), uses Kubernetes-native primitives (Deployments, StatefulSets, Services, Ingress), and integrates with CloudNativePG and MinIO via standard k8s mechanisms.
The thesis: K8s.Dsl ships a GitLabHelmReleaseContributor that installs the official chart with values pointing at the in-cluster CloudNativePG Postgres and the in-cluster MinIO. Plus the chart's runner sub-chart so the GitLab runner is part of the same release.
The shape
[Injectable(ServiceLifetime.Singleton)]
public sealed class GitLabHelmReleaseContributor : IHelmReleaseContributor
{
public string TargetCluster => "*";
public void Contribute(KubernetesBundle bundle)
{
var domain = _config.Acme.Tld;
bundle.HelmReleases.Add(new HelmReleaseSpec
{
Name = "gitlab",
Namespace = "gitlab",
Chart = "gitlab/gitlab",
Version = "8.5.0",
RepoUrl = "https://charts.gitlab.io/",
CreateNamespace = true,
Wait = true,
Timeout = TimeSpan.FromMinutes(20), // GitLab takes a while
Values = new()
{
["global"] = new Dictionary<string, object?>
{
["edition"] = "ce",
["hosts"] = new Dictionary<string, object?>
{
["domain"] = domain,
["https"] = true,
["externalIP"] = ""
},
["ingress"] = new Dictionary<string, object?>
{
["class"] = "nginx",
["configureCertmanager"] = false, // we use our own ClusterIssuer
["annotations"] = new Dictionary<string, object?>
{
["cert-manager.io/cluster-issuer"] = "homelab-ca"
},
["tls"] = new Dictionary<string, object?>
{
["enabled"] = true,
["secretName"] = "gitlab-wildcard-tls"
}
},
["psql"] = new Dictionary<string, object?>
{
["host"] = "gitlab-pg-rw.gitlab-data.svc.cluster.local",
["port"] = 5432,
["database"] = "gitlabhq_production",
["username"] = "gitlab",
["password"] = new Dictionary<string, object?>
{
["useSecret"] = true,
["secret"] = "gitlab-postgres-secret",
["key"] = "password"
}
},
["minio"] = new Dictionary<string, object?>
{
["enabled"] = false // we use our own MinIO operator instance
},
["registry"] = new Dictionary<string, object?>
{
["enabled"] = true,
["bucket"] = "gitlab-registry"
},
["appConfig"] = new Dictionary<string, object?>
{
["object_store"] = new Dictionary<string, object?>
{
["enabled"] = true,
["proxy_download"] = true,
["connection"] = new Dictionary<string, object?>
{
["secret"] = "gitlab-minio-connection"
}
},
["lfs"] = new Dictionary<string, object?> { ["bucket"] = "gitlab-lfs" },
["artifacts"] = new Dictionary<string, object?> { ["bucket"] = "gitlab-artifacts" },
["packages"] = new Dictionary<string, object?> { ["bucket"] = "gitlab-packages" },
["uploads"] = new Dictionary<string, object?> { ["bucket"] = "gitlab-uploads" }
}
},
// Disable the bundled Postgres and Redis — we use CloudNativePG and a separate Redis chart
["postgresql"] = new Dictionary<string, object?> { ["install"] = false },
["redis"] = new Dictionary<string, object?> { ["install"] = true }, // bundled Redis is fine for dev
// Resource sizing for the dev cluster
["gitlab"] = new Dictionary<string, object?>
{
["webservice"] = new Dictionary<string, object?>
{
["minReplicas"] = 1,
["maxReplicas"] = 2,
["resources"] = new Dictionary<string, object?>
{
["requests"] = new Dictionary<string, object?> { ["cpu"] = "300m", ["memory"] = "1Gi" },
["limits"] = new Dictionary<string, object?> { ["cpu"] = "1", ["memory"] = "2Gi" }
}
},
["sidekiq"] = new Dictionary<string, object?>
{
["minReplicas"] = 1,
["maxReplicas"] = 2,
["resources"] = new Dictionary<string, object?>
{
["requests"] = new Dictionary<string, object?> { ["cpu"] = "200m", ["memory"] = "1Gi" },
["limits"] = new Dictionary<string, object?> { ["cpu"] = "500m", ["memory"] = "2Gi" }
}
},
["gitaly"] = new Dictionary<string, object?>
{
["persistence"] = new Dictionary<string, object?>
{
["size"] = "20Gi",
["storageClass"] = "longhorn"
}
}
},
// The runner sub-chart
["gitlab-runner"] = new Dictionary<string, object?>
{
["install"] = true,
["runners"] = new Dictionary<string, object?>
{
["config"] = """
[[runners]]
name = "kubernetes-runner"
executor = "kubernetes"
[runners.kubernetes]
namespace = "gitlab-runner"
image = "alpine:3.21"
cpu_request = "100m"
memory_request = "256Mi"
"""
}
}
}
});
}
}[Injectable(ServiceLifetime.Singleton)]
public sealed class GitLabHelmReleaseContributor : IHelmReleaseContributor
{
public string TargetCluster => "*";
public void Contribute(KubernetesBundle bundle)
{
var domain = _config.Acme.Tld;
bundle.HelmReleases.Add(new HelmReleaseSpec
{
Name = "gitlab",
Namespace = "gitlab",
Chart = "gitlab/gitlab",
Version = "8.5.0",
RepoUrl = "https://charts.gitlab.io/",
CreateNamespace = true,
Wait = true,
Timeout = TimeSpan.FromMinutes(20), // GitLab takes a while
Values = new()
{
["global"] = new Dictionary<string, object?>
{
["edition"] = "ce",
["hosts"] = new Dictionary<string, object?>
{
["domain"] = domain,
["https"] = true,
["externalIP"] = ""
},
["ingress"] = new Dictionary<string, object?>
{
["class"] = "nginx",
["configureCertmanager"] = false, // we use our own ClusterIssuer
["annotations"] = new Dictionary<string, object?>
{
["cert-manager.io/cluster-issuer"] = "homelab-ca"
},
["tls"] = new Dictionary<string, object?>
{
["enabled"] = true,
["secretName"] = "gitlab-wildcard-tls"
}
},
["psql"] = new Dictionary<string, object?>
{
["host"] = "gitlab-pg-rw.gitlab-data.svc.cluster.local",
["port"] = 5432,
["database"] = "gitlabhq_production",
["username"] = "gitlab",
["password"] = new Dictionary<string, object?>
{
["useSecret"] = true,
["secret"] = "gitlab-postgres-secret",
["key"] = "password"
}
},
["minio"] = new Dictionary<string, object?>
{
["enabled"] = false // we use our own MinIO operator instance
},
["registry"] = new Dictionary<string, object?>
{
["enabled"] = true,
["bucket"] = "gitlab-registry"
},
["appConfig"] = new Dictionary<string, object?>
{
["object_store"] = new Dictionary<string, object?>
{
["enabled"] = true,
["proxy_download"] = true,
["connection"] = new Dictionary<string, object?>
{
["secret"] = "gitlab-minio-connection"
}
},
["lfs"] = new Dictionary<string, object?> { ["bucket"] = "gitlab-lfs" },
["artifacts"] = new Dictionary<string, object?> { ["bucket"] = "gitlab-artifacts" },
["packages"] = new Dictionary<string, object?> { ["bucket"] = "gitlab-packages" },
["uploads"] = new Dictionary<string, object?> { ["bucket"] = "gitlab-uploads" }
}
},
// Disable the bundled Postgres and Redis — we use CloudNativePG and a separate Redis chart
["postgresql"] = new Dictionary<string, object?> { ["install"] = false },
["redis"] = new Dictionary<string, object?> { ["install"] = true }, // bundled Redis is fine for dev
// Resource sizing for the dev cluster
["gitlab"] = new Dictionary<string, object?>
{
["webservice"] = new Dictionary<string, object?>
{
["minReplicas"] = 1,
["maxReplicas"] = 2,
["resources"] = new Dictionary<string, object?>
{
["requests"] = new Dictionary<string, object?> { ["cpu"] = "300m", ["memory"] = "1Gi" },
["limits"] = new Dictionary<string, object?> { ["cpu"] = "1", ["memory"] = "2Gi" }
}
},
["sidekiq"] = new Dictionary<string, object?>
{
["minReplicas"] = 1,
["maxReplicas"] = 2,
["resources"] = new Dictionary<string, object?>
{
["requests"] = new Dictionary<string, object?> { ["cpu"] = "200m", ["memory"] = "1Gi" },
["limits"] = new Dictionary<string, object?> { ["cpu"] = "500m", ["memory"] = "2Gi" }
}
},
["gitaly"] = new Dictionary<string, object?>
{
["persistence"] = new Dictionary<string, object?>
{
["size"] = "20Gi",
["storageClass"] = "longhorn"
}
}
},
// The runner sub-chart
["gitlab-runner"] = new Dictionary<string, object?>
{
["install"] = true,
["runners"] = new Dictionary<string, object?>
{
["config"] = """
[[runners]]
name = "kubernetes-runner"
executor = "kubernetes"
[runners.kubernetes]
namespace = "gitlab-runner"
image = "alpine:3.21"
cpu_request = "100m"
memory_request = "256Mi"
"""
}
}
}
});
}
}The chart is large. The values dictionary above is about 80 lines. We could split it into a separate values.yaml file and reference it from HelmReleaseSpec.ValuesFile, but inlining keeps everything in one place and lets the typed config flow through.
Wiring CloudNativePG and MinIO
GitLab needs a database (Postgres) and an object store (MinIO). On K8s, both are separate contributors:
- The CloudNativePG operator (Part 29) installs the operator and creates a
ClusterCRD instance for the GitLab database. - The MinIO operator (Part 30) installs the operator and creates a
TenantCRD instance for GitLab's object storage buckets. - The secrets contributor creates two Kubernetes Secrets (
gitlab-postgres-secretandgitlab-minio-connection) by reading fromISecretStore(the build-time path from Part 10). - The GitLab Helm release references the secrets by name in its values.
The chart does not try to install Postgres or MinIO itself (we set postgresql.install: false and global.minio.enabled: false). It uses the in-cluster instances we already provided.
This is the right pattern for a real Kubernetes deploy: each operator owns its domain (Postgres operator owns Postgres clusters, MinIO operator owns Tenants), and the application chart just consumes them via Service hostnames.
What this gives you that GitLab Omnibus on K8s doesn't
GitLab Omnibus inside a Pod is technically possible (the image exists). It is also:
- A 4 GB image
- A 6 GB RAM minimum
- A monolith with no horizontal scaling
- Difficult to upgrade (every Omnibus upgrade rebuilds every component)
The Helm chart gives you, for the same surface area:
- Microservices (~30 separate Deployments and StatefulSets)
- Horizontal scaling of webservice and sidekiq
- Standard Kubernetes upgrades via
helm upgrade - Standard Postgres via CloudNativePG (replication, backups, point-in-time recovery)
- Standard object storage via MinIO operator (multiple Tenants, lifecycle policies)
- Standard observability via the kube-prometheus-stack ServiceMonitors that the chart ships with
The bargain pays back the first time you upgrade GitLab and only the components that changed get rolled, instead of the whole monolith.