Part 45: Spring Boot Microservices for Globex
"Four Spring Boot services, one operator-managed Kafka, one operator-managed Postgres, one ArgoCD App-of-Apps. The whole thing on a freelancer's workstation."
Why
This is the first of four real-world cases. Globex is a Spring Boot shop. Their typical service mesh has a gateway and three downstream services (accounts, orders, inventory). They use Kafka for async communication. Postgres for persistent state. The freelancer has been hired to build a new feature in the orders service.
The thesis: the whole Globex stack runs on a k8s-ha HomeLab K8s instance with ~48 GB of RAM. The freelancer develops the new feature locally, pushes to Globex's GitLab (in DevLab), CI builds the Maven artifacts and a Docker image, ArgoCD deploys to globex-dev, and the feature is reachable for testing within 5 minutes of git push.
The stack
| Service | Image | Replicas | Resource requests |
|---|---|---|---|
gateway |
globex/gateway:1.4.7 |
2 | 500m CPU / 768Mi RAM |
accounts |
globex/accounts:1.4.7 |
2 | 300m CPU / 512Mi RAM |
orders |
globex/orders:1.4.7 |
2 | 300m CPU / 512Mi RAM |
inventory |
globex/inventory:1.4.7 |
2 | 300m CPU / 512Mi RAM |
Plus the platform stack:
| Component | Source |
|---|---|
| Postgres | CloudNativePG Cluster (HA, 3 replicas) |
| Kafka | Strimzi Kafka CRD (3 brokers, 3 zookeeper-less KRaft mode) |
| Schema Registry | Strimzi KafkaSchemaRegistry |
| GitLab | the Helm chart from Part 28 |
| ArgoCD | the Helm chart from Part 33 |
The Strimzi Kafka contributor
Strimzi is the operator we use for Kafka. K8s.Dsl ships a StrimziKafkaHelmReleaseContributor:
[Injectable(ServiceLifetime.Singleton)]
public sealed class StrimziKafkaHelmReleaseContributor : IHelmReleaseContributor
{
public bool ShouldContribute() => _config.K8s?.Plugins?.Contains("strimzi") == true;
public void Contribute(KubernetesBundle bundle)
{
bundle.HelmReleases.Add(new HelmReleaseSpec
{
Name = "strimzi-kafka-operator",
Namespace = "strimzi-system",
Chart = "strimzi/strimzi-kafka-operator",
Version = "0.44.0",
RepoUrl = "https://strimzi.io/charts/",
CreateNamespace = true,
Wait = true,
Values = new()
{
["watchAnyNamespace"] = true,
["resources"] = new Dictionary<string, object?>
{
["requests"] = new Dictionary<string, object?> { ["cpu"] = "100m", ["memory"] = "300Mi" },
["limits"] = new Dictionary<string, object?> { ["cpu"] = "500m", ["memory"] = "500Mi" }
}
}
});
}
}[Injectable(ServiceLifetime.Singleton)]
public sealed class StrimziKafkaHelmReleaseContributor : IHelmReleaseContributor
{
public bool ShouldContribute() => _config.K8s?.Plugins?.Contains("strimzi") == true;
public void Contribute(KubernetesBundle bundle)
{
bundle.HelmReleases.Add(new HelmReleaseSpec
{
Name = "strimzi-kafka-operator",
Namespace = "strimzi-system",
Chart = "strimzi/strimzi-kafka-operator",
Version = "0.44.0",
RepoUrl = "https://strimzi.io/charts/",
CreateNamespace = true,
Wait = true,
Values = new()
{
["watchAnyNamespace"] = true,
["resources"] = new Dictionary<string, object?>
{
["requests"] = new Dictionary<string, object?> { ["cpu"] = "100m", ["memory"] = "300Mi" },
["limits"] = new Dictionary<string, object?> { ["cpu"] = "500m", ["memory"] = "500Mi" }
}
}
});
}
}The actual Kafka cluster is a CRD instance contributed by Globex's project:
[Injectable(ServiceLifetime.Singleton)]
public sealed class GlobexKafkaClusterContributor : IK8sManifestContributor
{
public string TargetCluster => "globex";
public void Contribute(KubernetesBundle bundle)
{
bundle.Namespaces["globex-kafka"] ??= new NamespaceManifest { Name = "globex-kafka" };
bundle.CrdInstances.Add(new RawManifest
{
ApiVersion = "kafka.strimzi.io/v1beta2",
Kind = "Kafka",
Metadata = new() { Name = "globex-kafka", Namespace = "globex-kafka" },
Spec = new Dictionary<string, object?>
{
["kafka"] = new Dictionary<string, object?>
{
["version"] = "3.8.0",
["replicas"] = 3,
["listeners"] = new[]
{
new Dictionary<string, object?>
{
["name"] = "plain",
["port"] = 9092,
["type"] = "internal",
["tls"] = false
}
},
["config"] = new Dictionary<string, object?>
{
["offsets.topic.replication.factor"] = 3,
["transaction.state.log.replication.factor"] = 3,
["transaction.state.log.min.isr"] = 2,
["default.replication.factor"] = 3,
["min.insync.replicas"] = 2
},
["storage"] = new Dictionary<string, object?>
{
["type"] = "jbod",
["volumes"] = new[]
{
new Dictionary<string, object?>
{
["id"] = 0,
["type"] = "persistent-claim",
["size"] = "10Gi",
["class"] = "longhorn"
}
}
}
},
["zookeeper"] = new Dictionary<string, object?>
{
["replicas"] = 3,
["storage"] = new Dictionary<string, object?>
{
["type"] = "persistent-claim",
["size"] = "5Gi",
["class"] = "longhorn"
}
},
["entityOperator"] = new Dictionary<string, object?>
{
["topicOperator"] = new Dictionary<string, object?>(),
["userOperator"] = new Dictionary<string, object?>()
}
}
});
}
}[Injectable(ServiceLifetime.Singleton)]
public sealed class GlobexKafkaClusterContributor : IK8sManifestContributor
{
public string TargetCluster => "globex";
public void Contribute(KubernetesBundle bundle)
{
bundle.Namespaces["globex-kafka"] ??= new NamespaceManifest { Name = "globex-kafka" };
bundle.CrdInstances.Add(new RawManifest
{
ApiVersion = "kafka.strimzi.io/v1beta2",
Kind = "Kafka",
Metadata = new() { Name = "globex-kafka", Namespace = "globex-kafka" },
Spec = new Dictionary<string, object?>
{
["kafka"] = new Dictionary<string, object?>
{
["version"] = "3.8.0",
["replicas"] = 3,
["listeners"] = new[]
{
new Dictionary<string, object?>
{
["name"] = "plain",
["port"] = 9092,
["type"] = "internal",
["tls"] = false
}
},
["config"] = new Dictionary<string, object?>
{
["offsets.topic.replication.factor"] = 3,
["transaction.state.log.replication.factor"] = 3,
["transaction.state.log.min.isr"] = 2,
["default.replication.factor"] = 3,
["min.insync.replicas"] = 2
},
["storage"] = new Dictionary<string, object?>
{
["type"] = "jbod",
["volumes"] = new[]
{
new Dictionary<string, object?>
{
["id"] = 0,
["type"] = "persistent-claim",
["size"] = "10Gi",
["class"] = "longhorn"
}
}
}
},
["zookeeper"] = new Dictionary<string, object?>
{
["replicas"] = 3,
["storage"] = new Dictionary<string, object?>
{
["type"] = "persistent-claim",
["size"] = "5Gi",
["class"] = "longhorn"
}
},
["entityOperator"] = new Dictionary<string, object?>
{
["topicOperator"] = new Dictionary<string, object?>(),
["userOperator"] = new Dictionary<string, object?>()
}
}
});
}
}The contributor adds a 3-broker Kafka cluster with 3-replica topics by default. Production-equivalent for Globex's needs.
The Helm chart for the orders service
# charts/orders/values.yaml
image:
repository: registry.globex.lab/globex/orders
tag: ""
replicaCount: 2
service:
port: 8080
env:
- name: SPRING_PROFILES_ACTIVE
value: production
- name: SPRING_DATASOURCE_URL
value: jdbc:postgresql://globex-pg-rw.globex-data.svc.cluster.local:5432/orders
- name: SPRING_KAFKA_BOOTSTRAP_SERVERS
value: globex-kafka-kafka-bootstrap.globex-kafka.svc.cluster.local:9092
envFromSecret:
- name: orders-db-credentials
keys:
SPRING_DATASOURCE_USERNAME: username
SPRING_DATASOURCE_PASSWORD: password
probes:
liveness: /actuator/health/liveness
readiness: /actuator/health/readiness
metrics:
enabled: true
servicemonitor:
enabled: true
path: /actuator/prometheus
port: http# charts/orders/values.yaml
image:
repository: registry.globex.lab/globex/orders
tag: ""
replicaCount: 2
service:
port: 8080
env:
- name: SPRING_PROFILES_ACTIVE
value: production
- name: SPRING_DATASOURCE_URL
value: jdbc:postgresql://globex-pg-rw.globex-data.svc.cluster.local:5432/orders
- name: SPRING_KAFKA_BOOTSTRAP_SERVERS
value: globex-kafka-kafka-bootstrap.globex-kafka.svc.cluster.local:9092
envFromSecret:
- name: orders-db-credentials
keys:
SPRING_DATASOURCE_USERNAME: username
SPRING_DATASOURCE_PASSWORD: password
probes:
liveness: /actuator/health/liveness
readiness: /actuator/health/readiness
metrics:
enabled: true
servicemonitor:
enabled: true
path: /actuator/prometheus
port: httpThe Helm chart's templates produce Deployment + Service + ConfigMap + ServiceMonitor (for Prometheus to scrape Spring Actuator metrics) + PodDisruptionBudget. Standard Spring-on-K8s shape.
The same chart pattern repeats for the other three services. Globex's GitOps repo has four apps/<service>/ directories, each pointing at the same chart with different values overlays per environment.
CI
public static GitLabCiPipeline GlobexOrdersCi() => new GitLabCiPipelineBuilder()
.WithStages("validate", "test", "build", "publish", "deploy")
.WithJob(new JobBuilder("validate-pom")
.WithStage("validate")
.WithImage("maven:3.9-eclipse-temurin-21")
.WithScript("mvn validate"))
.WithJob(new JobBuilder("unit-tests")
.WithStage("test")
.WithImage("maven:3.9-eclipse-temurin-21")
.WithScript("mvn test")
.WithCache(paths: new[] { ".m2/repository" }))
.WithJob(new JobBuilder("integration-tests")
.WithStage("test")
.WithImage("maven:3.9-eclipse-temurin-21")
.WithService("postgres:16-alpine")
.WithService("docker.io/bitnami/kafka:3.8")
.WithScript("mvn verify -P integration-tests"))
.WithJob(new JobBuilder("build-jar")
.WithStage("build")
.WithImage("maven:3.9-eclipse-temurin-21")
.WithScript("mvn package -DskipTests")
.WithArtifact("target/*.jar"))
.WithJob(new JobBuilder("build-image")
.WithStage("publish")
.WithImage("docker:24")
.WithService("docker:24-dind")
.WithScript(
"docker build -t $CI_REGISTRY/globex/orders:$CI_COMMIT_SHA -t $CI_REGISTRY/globex/orders:latest .",
"docker push $CI_REGISTRY/globex/orders:$CI_COMMIT_SHA",
"docker push $CI_REGISTRY/globex/orders:latest"))
.WithJob(new JobBuilder("update-gitops")
.WithStage("deploy")
.WithRules(r => r.OnDefaultBranch())
.WithScript(
"git clone https://oauth2:$GITOPS_TOKEN@gitlab.globex.lab/frenchexdev/globex-gitops.git",
"cd globex-gitops",
"yq e '.image.tag = \"'$CI_COMMIT_SHA'\"' -i environments/dev/orders-values.yaml",
"git add -A && git commit -m 'chore: bump orders to '$CI_COMMIT_SHA",
"git push"))
.Build();public static GitLabCiPipeline GlobexOrdersCi() => new GitLabCiPipelineBuilder()
.WithStages("validate", "test", "build", "publish", "deploy")
.WithJob(new JobBuilder("validate-pom")
.WithStage("validate")
.WithImage("maven:3.9-eclipse-temurin-21")
.WithScript("mvn validate"))
.WithJob(new JobBuilder("unit-tests")
.WithStage("test")
.WithImage("maven:3.9-eclipse-temurin-21")
.WithScript("mvn test")
.WithCache(paths: new[] { ".m2/repository" }))
.WithJob(new JobBuilder("integration-tests")
.WithStage("test")
.WithImage("maven:3.9-eclipse-temurin-21")
.WithService("postgres:16-alpine")
.WithService("docker.io/bitnami/kafka:3.8")
.WithScript("mvn verify -P integration-tests"))
.WithJob(new JobBuilder("build-jar")
.WithStage("build")
.WithImage("maven:3.9-eclipse-temurin-21")
.WithScript("mvn package -DskipTests")
.WithArtifact("target/*.jar"))
.WithJob(new JobBuilder("build-image")
.WithStage("publish")
.WithImage("docker:24")
.WithService("docker:24-dind")
.WithScript(
"docker build -t $CI_REGISTRY/globex/orders:$CI_COMMIT_SHA -t $CI_REGISTRY/globex/orders:latest .",
"docker push $CI_REGISTRY/globex/orders:$CI_COMMIT_SHA",
"docker push $CI_REGISTRY/globex/orders:latest"))
.WithJob(new JobBuilder("update-gitops")
.WithStage("deploy")
.WithRules(r => r.OnDefaultBranch())
.WithScript(
"git clone https://oauth2:$GITOPS_TOKEN@gitlab.globex.lab/frenchexdev/globex-gitops.git",
"cd globex-gitops",
"yq e '.image.tag = \"'$CI_COMMIT_SHA'\"' -i environments/dev/orders-values.yaml",
"git add -A && git commit -m 'chore: bump orders to '$CI_COMMIT_SHA",
"git push"))
.Build();Five stages: validate, test (with sidecar Postgres + Kafka for integration tests), build the jar, build the image, update the GitOps repo. ArgoCD picks up the change. The whole flow takes ~6 minutes.
What this gives you that the hand-rolled approach doesn't
A typical "Spring Boot microservices on Kubernetes" tutorial shows you kubectl apply -f deployment.yaml. It does not show:
- Strimzi Kafka with KRaft mode
- CloudNativePG with backup to MinIO
- Helm chart per service with strongly-typed
values.yaml - ArgoCD App-of-Apps deployment
- ServiceMonitor scraping Spring Actuator
- Per-environment overlays
- A real CI pipeline that builds the jar, the image, and updates the GitOps repo
Globex's full stack on HomeLab K8s gives you all of it on a single workstation, in a single HomeLab instance, isolated from any other client. The freelancer develops the new feature against the same operator-managed Kafka and Postgres they will run in production.
The bargain pays back the first time the freelancer catches a Kafka consumer group bug in dev that would have surfaced in prod a week later.