Compare commits

..

2 Commits

Author SHA1 Message Date
8fa38268d9 chore: refresh knowledge catalog headers 2026-01-14 01:08:05 -03:00
4a1c4766b8 feat: add harbor/vault oidc automation 2026-01-14 01:07:47 -03:00
19 changed files with 1029 additions and 496 deletions

View File

@ -1,4 +1,4 @@
# clusters/atlas/flux-system/applications/communication/kustomization.yaml
# clusters/atlas/flux-system/applications/comms/kustomization.yaml
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:

View File

@ -1,3 +1,4 @@
# clusters/atlas/flux-system/gotk-sync.yaml
# This manifest was generated by flux. DO NOT EDIT.
---
apiVersion: source.toolkit.fluxcd.io/v1

View File

@ -1,8 +1,8 @@
{
"counts": {
"helmrelease_host_hints": 7,
"http_endpoints": 35,
"services": 44,
"workloads": 49
"helmrelease_host_hints": 17,
"http_endpoints": 37,
"services": 43,
"workloads": 54
}
}

View File

@ -12,12 +12,7 @@
"targetNamespace": "bstein-dev-home"
},
{
"name": "ci-demo",
"path": "services/ci-demo",
"targetNamespace": null
},
{
"name": "communication",
"name": "comms",
"path": "services/comms",
"targetNamespace": "comms"
},
@ -71,6 +66,11 @@
"path": "services/keycloak",
"targetNamespace": "sso"
},
{
"name": "logging",
"path": "services/logging",
"targetNamespace": null
},
{
"name": "longhorn-ui",
"path": "infrastructure/longhorn/ui-ingress",
@ -81,6 +81,11 @@
"path": "services/mailu",
"targetNamespace": "mailu-mailserver"
},
{
"name": "maintenance",
"path": "services/maintenance",
"targetNamespace": null
},
{
"name": "metallb",
"path": "infrastructure/metallb",
@ -116,11 +121,26 @@
"path": "services/openldap",
"targetNamespace": "sso"
},
{
"name": "outline",
"path": "services/outline",
"targetNamespace": "outline"
},
{
"name": "pegasus",
"path": "services/pegasus",
"targetNamespace": "jellyfin"
},
{
"name": "planka",
"path": "services/planka",
"targetNamespace": "planka"
},
{
"name": "postgres",
"path": "infrastructure/postgres",
"targetNamespace": "postgres"
},
{
"name": "sui-metrics",
"path": "services/sui-metrics/overlays/atlas",
@ -163,7 +183,7 @@
"serviceAccountName": null,
"nodeSelector": {},
"images": [
"ollama/ollama:latest"
"ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d"
]
},
{
@ -179,7 +199,7 @@
"node-role.kubernetes.io/worker": "true"
},
"images": [
"registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-84"
"registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92"
]
},
{
@ -195,7 +215,7 @@
"node-role.kubernetes.io/worker": "true"
},
"images": [
"registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-84"
"registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92"
]
},
{
@ -214,21 +234,6 @@
"python:3.11-slim"
]
},
{
"kind": "Deployment",
"namespace": "ci-demo",
"name": "ci-demo",
"labels": {
"app.kubernetes.io/name": "ci-demo"
},
"serviceAccountName": null,
"nodeSelector": {
"hardware": "rpi4"
},
"images": [
"registry.bstein.dev/infra/ci-demo:v0.0.0-3"
]
},
{
"kind": "Deployment",
"namespace": "comms",
@ -271,7 +276,7 @@
"hardware": "rpi5"
},
"images": [
"ghcr.io/element-hq/element-call:latest"
"ghcr.io/element-hq/element-call@sha256:e6897c7818331714eae19d83ef8ea94a8b41115f0d8d3f62c2fed2d02c65c9bc"
]
},
{
@ -345,56 +350,6 @@
"nginx:1.27-alpine"
]
},
{
"kind": "Deployment",
"namespace": "comms",
"name": "othrys-element-element-web",
"labels": {
"app.kubernetes.io/instance": "othrys-element",
"app.kubernetes.io/name": "element-web"
},
"serviceAccountName": "othrys-element-element-web",
"nodeSelector": {
"hardware": "rpi5"
},
"images": [
"ghcr.io/element-hq/element-web:v1.12.6"
]
},
{
"kind": "Deployment",
"namespace": "comms",
"name": "othrys-synapse-matrix-synapse",
"labels": {
"app.kubernetes.io/component": "synapse",
"app.kubernetes.io/instance": "othrys-synapse",
"app.kubernetes.io/name": "matrix-synapse"
},
"serviceAccountName": "default",
"nodeSelector": {
"hardware": "rpi5"
},
"images": [
"ghcr.io/element-hq/synapse:v1.144.0"
]
},
{
"kind": "Deployment",
"namespace": "comms",
"name": "othrys-synapse-redis-master",
"labels": {
"app.kubernetes.io/component": "master",
"app.kubernetes.io/instance": "othrys-synapse",
"app.kubernetes.io/managed-by": "Helm",
"app.kubernetes.io/name": "redis",
"helm.sh/chart": "redis-17.17.1"
},
"serviceAccountName": "othrys-synapse-redis",
"nodeSelector": {},
"images": [
"docker.io/bitnamilegacy/redis:7.0.12-debian-11-r34"
]
},
{
"kind": "DaemonSet",
"namespace": "crypto",
@ -407,7 +362,7 @@
"node-role.kubernetes.io/worker": "true"
},
"images": [
"ghcr.io/tari-project/xmrig:latest"
"ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9"
]
},
{
@ -681,6 +636,66 @@
"hashicorp/vault-csi-provider:1.7.0"
]
},
{
"kind": "DaemonSet",
"namespace": "logging",
"name": "node-image-gc-rpi4",
"labels": {
"app": "node-image-gc-rpi4"
},
"serviceAccountName": "node-image-gc-rpi4",
"nodeSelector": {
"hardware": "rpi4"
},
"images": [
"bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131"
]
},
{
"kind": "DaemonSet",
"namespace": "logging",
"name": "node-image-prune-rpi5",
"labels": {
"app": "node-image-prune-rpi5"
},
"serviceAccountName": "node-image-prune-rpi5",
"nodeSelector": {
"hardware": "rpi5"
},
"images": [
"bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131"
]
},
{
"kind": "DaemonSet",
"namespace": "logging",
"name": "node-log-rotation",
"labels": {
"app": "node-log-rotation"
},
"serviceAccountName": "node-log-rotation",
"nodeSelector": {
"hardware": "rpi5"
},
"images": [
"bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131"
]
},
{
"kind": "Deployment",
"namespace": "logging",
"name": "oauth2-proxy-logs",
"labels": {
"app": "oauth2-proxy-logs"
},
"serviceAccountName": null,
"nodeSelector": {
"node-role.kubernetes.io/worker": "true"
},
"images": [
"quay.io/oauth2-proxy/oauth2-proxy:v7.6.0"
]
},
{
"kind": "Deployment",
"namespace": "longhorn-system",
@ -708,7 +723,7 @@
"mailu.bstein.dev/vip": "true"
},
"images": [
"lachlanevenson/k8s-kubectl:latest"
"registry.bstein.dev/bstein/kubectl:1.35.0"
]
},
{
@ -726,37 +741,30 @@
},
{
"kind": "DaemonSet",
"namespace": "metallb-system",
"name": "metallb-speaker",
"namespace": "maintenance",
"name": "node-image-sweeper",
"labels": {
"app.kubernetes.io/component": "speaker",
"app.kubernetes.io/instance": "metallb",
"app.kubernetes.io/name": "metallb"
"app": "node-image-sweeper"
},
"serviceAccountName": "metallb-speaker",
"serviceAccountName": "node-image-sweeper",
"nodeSelector": {
"kubernetes.io/os": "linux"
},
"images": [
"quay.io/frrouting/frr:10.4.1",
"quay.io/metallb/speaker:v0.15.3"
"python:3.12.9-alpine3.20"
]
},
{
"kind": "Deployment",
"namespace": "metallb-system",
"name": "metallb-controller",
"kind": "DaemonSet",
"namespace": "maintenance",
"name": "node-nofile",
"labels": {
"app.kubernetes.io/component": "controller",
"app.kubernetes.io/instance": "metallb",
"app.kubernetes.io/name": "metallb"
},
"serviceAccountName": "metallb-controller",
"nodeSelector": {
"kubernetes.io/os": "linux"
"app": "node-nofile"
},
"serviceAccountName": "node-nofile",
"nodeSelector": {},
"images": [
"quay.io/metallb/controller:v0.15.3"
"bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131"
]
},
{
@ -772,6 +780,21 @@
"registry.bstein.dev/monitoring/dcgm-exporter:4.4.2-4.7.0-ubuntu22.04"
]
},
{
"kind": "DaemonSet",
"namespace": "monitoring",
"name": "jetson-tegrastats-exporter",
"labels": {
"app": "jetson-tegrastats-exporter"
},
"serviceAccountName": "default",
"nodeSelector": {
"jetson": "true"
},
"images": [
"python:3.10-slim"
]
},
{
"kind": "Deployment",
"namespace": "monitoring",
@ -797,7 +820,7 @@
"hardware": "rpi5"
},
"images": [
"collabora/code:latest"
"collabora/code@sha256:3c58d0e9bae75e4647467d0c7d91cb66f261d3e814709aed590b5c334a04db26"
]
},
{
@ -815,6 +838,66 @@
"nextcloud:29-apache"
]
},
{
"kind": "Deployment",
"namespace": "outline",
"name": "outline",
"labels": {
"app": "outline"
},
"serviceAccountName": null,
"nodeSelector": {
"node-role.kubernetes.io/worker": "true"
},
"images": [
"outlinewiki/outline:1.2.0"
]
},
{
"kind": "Deployment",
"namespace": "outline",
"name": "outline-redis",
"labels": {
"app": "outline-redis"
},
"serviceAccountName": null,
"nodeSelector": {
"node-role.kubernetes.io/worker": "true"
},
"images": [
"redis:7.4.1-alpine"
]
},
{
"kind": "Deployment",
"namespace": "planka",
"name": "planka",
"labels": {
"app": "planka"
},
"serviceAccountName": null,
"nodeSelector": {
"node-role.kubernetes.io/worker": "true"
},
"images": [
"ghcr.io/plankanban/planka:2.0.0-rc.4"
]
},
{
"kind": "StatefulSet",
"namespace": "postgres",
"name": "postgres",
"labels": {
"app": "postgres"
},
"serviceAccountName": "postgres-vault",
"nodeSelector": {
"node-role.kubernetes.io/worker": "true"
},
"images": [
"postgres:15"
]
},
{
"kind": "Deployment",
"namespace": "sso",
@ -984,22 +1067,6 @@
}
]
},
{
"namespace": "ci-demo",
"name": "ci-demo",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/name": "ci-demo"
},
"ports": [
{
"name": "http",
"port": 80,
"targetPort": "http",
"protocol": "TCP"
}
]
},
{
"namespace": "comms",
"name": "coturn",
@ -1454,94 +1521,6 @@
}
]
},
{
"namespace": "comms",
"name": "othrys-element-element-web",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/instance": "othrys-element",
"app.kubernetes.io/name": "element-web"
},
"ports": [
{
"name": "http",
"port": 80,
"targetPort": "http",
"protocol": "TCP"
}
]
},
{
"namespace": "comms",
"name": "othrys-synapse-matrix-synapse",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/component": "synapse",
"app.kubernetes.io/instance": "othrys-synapse",
"app.kubernetes.io/name": "matrix-synapse"
},
"ports": [
{
"name": "http",
"port": 8008,
"targetPort": "http",
"protocol": "TCP"
}
]
},
{
"namespace": "comms",
"name": "othrys-synapse-redis-headless",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/instance": "othrys-synapse",
"app.kubernetes.io/name": "redis"
},
"ports": [
{
"name": "tcp-redis",
"port": 6379,
"targetPort": "redis",
"protocol": "TCP"
}
]
},
{
"namespace": "comms",
"name": "othrys-synapse-redis-master",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/component": "master",
"app.kubernetes.io/instance": "othrys-synapse",
"app.kubernetes.io/name": "redis"
},
"ports": [
{
"name": "tcp-redis",
"port": 6379,
"targetPort": "redis",
"protocol": "TCP"
}
]
},
{
"namespace": "comms",
"name": "othrys-synapse-replication",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/component": "synapse",
"app.kubernetes.io/instance": "othrys-synapse",
"app.kubernetes.io/name": "matrix-synapse"
},
"ports": [
{
"name": "replication",
"port": 9093,
"targetPort": "replication",
"protocol": "TCP"
}
]
},
{
"namespace": "crypto",
"name": "monerod",
@ -1743,6 +1722,22 @@
}
]
},
{
"namespace": "logging",
"name": "oauth2-proxy-logs",
"type": "ClusterIP",
"selector": {
"app": "oauth2-proxy-logs"
},
"ports": [
{
"name": "http",
"port": 80,
"targetPort": 4180,
"protocol": "TCP"
}
]
},
{
"namespace": "longhorn-system",
"name": "oauth2-proxy-longhorn",
@ -1823,24 +1818,6 @@
}
]
},
{
"namespace": "metallb-system",
"name": "metallb-webhook-service",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/component": "controller",
"app.kubernetes.io/instance": "metallb",
"app.kubernetes.io/name": "metallb"
},
"ports": [
{
"name": null,
"port": 443,
"targetPort": 9443,
"protocol": "TCP"
}
]
},
{
"namespace": "monitoring",
"name": "dcgm-exporter",
@ -1857,6 +1834,22 @@
}
]
},
{
"namespace": "monitoring",
"name": "jetson-tegrastats-exporter",
"type": "ClusterIP",
"selector": {
"app": "jetson-tegrastats-exporter"
},
"ports": [
{
"name": "metrics",
"port": 9100,
"targetPort": "metrics",
"protocol": "TCP"
}
]
},
{
"namespace": "monitoring",
"name": "postmark-exporter",
@ -1905,6 +1898,70 @@
}
]
},
{
"namespace": "outline",
"name": "outline",
"type": "ClusterIP",
"selector": {
"app": "outline"
},
"ports": [
{
"name": "http",
"port": 80,
"targetPort": "http",
"protocol": "TCP"
}
]
},
{
"namespace": "outline",
"name": "outline-redis",
"type": "ClusterIP",
"selector": {
"app": "outline-redis"
},
"ports": [
{
"name": "redis",
"port": 6379,
"targetPort": "redis",
"protocol": "TCP"
}
]
},
{
"namespace": "planka",
"name": "planka",
"type": "ClusterIP",
"selector": {
"app": "planka"
},
"ports": [
{
"name": "http",
"port": 80,
"targetPort": "http",
"protocol": "TCP"
}
]
},
{
"namespace": "postgres",
"name": "postgres-service",
"type": "ClusterIP",
"selector": {
"app": "postgres"
},
"ports": [
{
"name": "postgres",
"port": 5432,
"targetPort": 5432,
"protocol": "TCP"
}
]
},
{
"namespace": "sso",
"name": "keycloak",
@ -2110,7 +2167,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-wellknown-bstein-dev",
"source": "communication"
"source": "comms"
}
},
{
@ -2130,7 +2187,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-wellknown-bstein-dev",
"source": "communication"
"source": "comms"
}
},
{
@ -2170,7 +2227,7 @@
"via": {
"kind": "Ingress",
"name": "element-call",
"source": "communication"
"source": "comms"
}
},
{
@ -2250,7 +2307,7 @@
"via": {
"kind": "Ingress",
"name": "livekit-jwt-ingress",
"source": "communication"
"source": "comms"
}
},
{
@ -2270,27 +2327,7 @@
"via": {
"kind": "Ingress",
"name": "livekit-ingress",
"source": "communication"
}
},
{
"host": "live.bstein.dev",
"path": "/",
"backend": {
"namespace": "comms",
"service": "othrys-element-element-web",
"port": 80,
"workloads": [
{
"kind": "Deployment",
"name": "othrys-element-element-web"
}
]
},
"via": {
"kind": "Ingress",
"name": "othrys-element-element-web",
"source": "communication"
"source": "comms"
}
},
{
@ -2310,7 +2347,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-wellknown",
"source": "communication"
"source": "comms"
}
},
{
@ -2330,7 +2367,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-wellknown",
"source": "communication"
"source": "comms"
}
},
{
@ -2340,17 +2377,32 @@
"namespace": "comms",
"service": "othrys-synapse-matrix-synapse",
"port": 8008,
"workloads": []
},
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "comms"
}
},
{
"host": "logs.bstein.dev",
"path": "/",
"backend": {
"namespace": "logging",
"service": "oauth2-proxy-logs",
"port": "http",
"workloads": [
{
"kind": "Deployment",
"name": "othrys-synapse-matrix-synapse"
"name": "oauth2-proxy-logs"
}
]
},
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"name": "logs",
"source": "logging"
}
},
{
@ -2405,7 +2457,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2425,7 +2477,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-wellknown-matrix-live",
"source": "communication"
"source": "comms"
}
},
{
@ -2445,7 +2497,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-wellknown-matrix-live",
"source": "communication"
"source": "comms"
}
},
{
@ -2455,17 +2507,12 @@
"namespace": "comms",
"service": "othrys-synapse-matrix-synapse",
"port": 8008,
"workloads": [
{
"kind": "Deployment",
"name": "othrys-synapse-matrix-synapse"
}
]
"workloads": []
},
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2485,7 +2532,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2505,7 +2552,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2525,7 +2572,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2545,7 +2592,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2565,7 +2612,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2575,17 +2622,12 @@
"namespace": "comms",
"service": "othrys-synapse-matrix-synapse",
"port": 8008,
"workloads": [
{
"kind": "Deployment",
"name": "othrys-synapse-matrix-synapse"
}
]
"workloads": []
},
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2608,6 +2650,26 @@
"source": "monerod"
}
},
{
"host": "notes.bstein.dev",
"path": "/",
"backend": {
"namespace": "outline",
"service": "outline",
"port": 80,
"workloads": [
{
"kind": "Deployment",
"name": "outline"
}
]
},
"via": {
"kind": "Ingress",
"name": "outline",
"source": "outline"
}
},
{
"host": "office.bstein.dev",
"path": "/",
@ -2728,6 +2790,26 @@
"source": "jellyfin"
}
},
{
"host": "tasks.bstein.dev",
"path": "/",
"backend": {
"namespace": "planka",
"service": "planka",
"port": 80,
"workloads": [
{
"kind": "Deployment",
"name": "planka"
}
]
},
"via": {
"kind": "Ingress",
"name": "planka",
"source": "planka"
}
},
{
"host": "vault.bstein.dev",
"path": "/",
@ -2750,12 +2832,27 @@
}
],
"helmrelease_host_hints": {
"comms:comms/othrys-element": [
"call.live.bstein.dev",
"live.bstein.dev",
"matrix.live.bstein.dev"
],
"comms:comms/othrys-synapse": [
"bstein.dev",
"kit.live.bstein.dev",
"live.bstein.dev",
"matrix.live.bstein.dev",
"turn.live.bstein.dev"
],
"gitops-ui:flux-system/weave-gitops": [
"cd.bstein.dev"
],
"harbor:harbor/harbor": [
"registry.bstein.dev"
],
"logging:logging/data-prepper": [
"registry.bstein.dev"
],
"mailu:mailu-mailserver/mailu": [
"bstein.dev",
"mail.bstein.dev"
@ -2764,6 +2861,7 @@
"alerts.bstein.dev"
],
"monitoring:monitoring/grafana": [
"bstein.dev",
"metrics.bstein.dev",
"sso.bstein.dev"
]

View File

@ -1,3 +1,4 @@
# knowledge/catalog/atlas.yaml
# Generated by scripts/knowledge_render_atlas.py (do not edit by hand)
cluster: atlas
sources:
@ -7,7 +8,7 @@ sources:
- name: bstein-dev-home
path: services/bstein-dev-home
targetNamespace: bstein-dev-home
- name: communication
- name: comms
path: services/comms
targetNamespace: comms
- name: core
@ -40,12 +41,18 @@ sources:
- name: keycloak
path: services/keycloak
targetNamespace: sso
- name: logging
path: services/logging
targetNamespace: null
- name: longhorn-ui
path: infrastructure/longhorn/ui-ingress
targetNamespace: longhorn-system
- name: mailu
path: services/mailu
targetNamespace: mailu-mailserver
- name: maintenance
path: services/maintenance
targetNamespace: null
- name: metallb
path: infrastructure/metallb
targetNamespace: metallb-system
@ -67,9 +74,18 @@ sources:
- name: openldap
path: services/openldap
targetNamespace: sso
- name: outline
path: services/outline
targetNamespace: outline
- name: pegasus
path: services/pegasus
targetNamespace: jellyfin
- name: planka
path: services/planka
targetNamespace: planka
- name: postgres
path: infrastructure/postgres
targetNamespace: postgres
- name: sui-metrics
path: services/sui-metrics/overlays/atlas
targetNamespace: sui-metrics
@ -97,7 +113,7 @@ workloads:
serviceAccountName: null
nodeSelector: {}
images:
- ollama/ollama:latest
- ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d
- kind: Deployment
namespace: bstein-dev-home
name: bstein-dev-home-backend
@ -108,7 +124,7 @@ workloads:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-84
- registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92
- kind: Deployment
namespace: bstein-dev-home
name: bstein-dev-home-frontend
@ -119,7 +135,7 @@ workloads:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-84
- registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92
- kind: Deployment
namespace: bstein-dev-home
name: chat-ai-gateway
@ -160,7 +176,7 @@ workloads:
nodeSelector:
hardware: rpi5
images:
- ghcr.io/element-hq/element-call:latest
- ghcr.io/element-hq/element-call@sha256:e6897c7818331714eae19d83ef8ea94a8b41115f0d8d3f62c2fed2d02c65c9bc
- kind: Deployment
namespace: comms
name: livekit
@ -209,42 +225,6 @@ workloads:
nodeSelector: {}
images:
- nginx:1.27-alpine
- kind: Deployment
namespace: comms
name: othrys-element-element-web
labels:
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/name: element-web
serviceAccountName: othrys-element-element-web
nodeSelector:
hardware: rpi5
images:
- ghcr.io/element-hq/element-web:v1.12.6
- kind: Deployment
namespace: comms
name: othrys-synapse-matrix-synapse
labels:
app.kubernetes.io/component: synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: matrix-synapse
serviceAccountName: default
nodeSelector:
hardware: rpi5
images:
- ghcr.io/element-hq/synapse:v1.144.0
- kind: Deployment
namespace: comms
name: othrys-synapse-redis-master
labels:
app.kubernetes.io/component: master
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
serviceAccountName: othrys-synapse-redis
nodeSelector: {}
images:
- docker.io/bitnamilegacy/redis:7.0.12-debian-11-r34
- kind: DaemonSet
namespace: crypto
name: monero-xmrig
@ -254,7 +234,7 @@ workloads:
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- ghcr.io/tari-project/xmrig:latest
- ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9
- kind: Deployment
namespace: crypto
name: monero-p2pool
@ -447,6 +427,46 @@ workloads:
kubernetes.io/os: linux
images:
- hashicorp/vault-csi-provider:1.7.0
- kind: DaemonSet
namespace: logging
name: node-image-gc-rpi4
labels:
app: node-image-gc-rpi4
serviceAccountName: node-image-gc-rpi4
nodeSelector:
hardware: rpi4
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: DaemonSet
namespace: logging
name: node-image-prune-rpi5
labels:
app: node-image-prune-rpi5
serviceAccountName: node-image-prune-rpi5
nodeSelector:
hardware: rpi5
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: DaemonSet
namespace: logging
name: node-log-rotation
labels:
app: node-log-rotation
serviceAccountName: node-log-rotation
nodeSelector:
hardware: rpi5
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: Deployment
namespace: logging
name: oauth2-proxy-logs
labels:
app: oauth2-proxy-logs
serviceAccountName: null
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- quay.io/oauth2-proxy/oauth2-proxy:v7.6.0
- kind: Deployment
namespace: longhorn-system
name: oauth2-proxy-longhorn
@ -466,7 +486,7 @@ workloads:
nodeSelector:
mailu.bstein.dev/vip: 'true'
images:
- lachlanevenson/k8s-kubectl:latest
- registry.bstein.dev/bstein/kubectl:1.35.0
- kind: Deployment
namespace: mailu-mailserver
name: mailu-sync-listener
@ -477,30 +497,24 @@ workloads:
images:
- python:3.11-alpine
- kind: DaemonSet
namespace: metallb-system
name: metallb-speaker
namespace: maintenance
name: node-image-sweeper
labels:
app.kubernetes.io/component: speaker
app.kubernetes.io/instance: metallb
app.kubernetes.io/name: metallb
serviceAccountName: metallb-speaker
app: node-image-sweeper
serviceAccountName: node-image-sweeper
nodeSelector:
kubernetes.io/os: linux
images:
- quay.io/frrouting/frr:10.4.1
- quay.io/metallb/speaker:v0.15.3
- kind: Deployment
namespace: metallb-system
name: metallb-controller
- python:3.12.9-alpine3.20
- kind: DaemonSet
namespace: maintenance
name: node-nofile
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: metallb
app.kubernetes.io/name: metallb
serviceAccountName: metallb-controller
nodeSelector:
kubernetes.io/os: linux
app: node-nofile
serviceAccountName: node-nofile
nodeSelector: {}
images:
- quay.io/metallb/controller:v0.15.3
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: DaemonSet
namespace: monitoring
name: dcgm-exporter
@ -510,6 +524,16 @@ workloads:
nodeSelector: {}
images:
- registry.bstein.dev/monitoring/dcgm-exporter:4.4.2-4.7.0-ubuntu22.04
- kind: DaemonSet
namespace: monitoring
name: jetson-tegrastats-exporter
labels:
app: jetson-tegrastats-exporter
serviceAccountName: default
nodeSelector:
jetson: 'true'
images:
- python:3.10-slim
- kind: Deployment
namespace: monitoring
name: postmark-exporter
@ -528,7 +552,7 @@ workloads:
nodeSelector:
hardware: rpi5
images:
- collabora/code:latest
- collabora/code@sha256:3c58d0e9bae75e4647467d0c7d91cb66f261d3e814709aed590b5c334a04db26
- kind: Deployment
namespace: nextcloud
name: nextcloud
@ -539,6 +563,46 @@ workloads:
hardware: rpi5
images:
- nextcloud:29-apache
- kind: Deployment
namespace: outline
name: outline
labels:
app: outline
serviceAccountName: null
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- outlinewiki/outline:1.2.0
- kind: Deployment
namespace: outline
name: outline-redis
labels:
app: outline-redis
serviceAccountName: null
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- redis:7.4.1-alpine
- kind: Deployment
namespace: planka
name: planka
labels:
app: planka
serviceAccountName: null
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- ghcr.io/plankanban/planka:2.0.0-rc.4
- kind: StatefulSet
namespace: postgres
name: postgres
labels:
app: postgres
serviceAccountName: postgres-vault
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- postgres:15
- kind: Deployment
namespace: sso
name: keycloak
@ -650,16 +714,6 @@ services:
port: 80
targetPort: 8080
protocol: TCP
- namespace: ci-demo
name: ci-demo
type: ClusterIP
selector:
app.kubernetes.io/name: ci-demo
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
- namespace: comms
name: coturn
type: LoadBalancer
@ -958,64 +1012,6 @@ services:
port: 80
targetPort: 80
protocol: TCP
- namespace: comms
name: othrys-element-element-web
type: ClusterIP
selector:
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/name: element-web
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
- namespace: comms
name: othrys-synapse-matrix-synapse
type: ClusterIP
selector:
app.kubernetes.io/component: synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: matrix-synapse
ports:
- name: http
port: 8008
targetPort: http
protocol: TCP
- namespace: comms
name: othrys-synapse-redis-headless
type: ClusterIP
selector:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: redis
ports:
- name: tcp-redis
port: 6379
targetPort: redis
protocol: TCP
- namespace: comms
name: othrys-synapse-redis-master
type: ClusterIP
selector:
app.kubernetes.io/component: master
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: redis
ports:
- name: tcp-redis
port: 6379
targetPort: redis
protocol: TCP
- namespace: comms
name: othrys-synapse-replication
type: ClusterIP
selector:
app.kubernetes.io/component: synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: matrix-synapse
ports:
- name: replication
port: 9093
targetPort: replication
protocol: TCP
- namespace: crypto
name: monerod
type: ClusterIP
@ -1143,6 +1139,16 @@ services:
port: 443
targetPort: websecure
protocol: TCP
- namespace: logging
name: oauth2-proxy-logs
type: ClusterIP
selector:
app: oauth2-proxy-logs
ports:
- name: http
port: 80
targetPort: 4180
protocol: TCP
- namespace: longhorn-system
name: oauth2-proxy-longhorn
type: ClusterIP
@ -1195,18 +1201,6 @@ services:
port: 8080
targetPort: 8080
protocol: TCP
- namespace: metallb-system
name: metallb-webhook-service
type: ClusterIP
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: metallb
app.kubernetes.io/name: metallb
ports:
- name: null
port: 443
targetPort: 9443
protocol: TCP
- namespace: monitoring
name: dcgm-exporter
type: ClusterIP
@ -1217,6 +1211,16 @@ services:
port: 9400
targetPort: metrics
protocol: TCP
- namespace: monitoring
name: jetson-tegrastats-exporter
type: ClusterIP
selector:
app: jetson-tegrastats-exporter
ports:
- name: metrics
port: 9100
targetPort: metrics
protocol: TCP
- namespace: monitoring
name: postmark-exporter
type: ClusterIP
@ -1247,6 +1251,46 @@ services:
port: 80
targetPort: http
protocol: TCP
- namespace: outline
name: outline
type: ClusterIP
selector:
app: outline
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
- namespace: outline
name: outline-redis
type: ClusterIP
selector:
app: outline-redis
ports:
- name: redis
port: 6379
targetPort: redis
protocol: TCP
- namespace: planka
name: planka
type: ClusterIP
selector:
app: planka
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
- namespace: postgres
name: postgres-service
type: ClusterIP
selector:
app: postgres
ports:
- name: postgres
port: 5432
targetPort: 5432
protocol: TCP
- namespace: sso
name: keycloak
type: ClusterIP
@ -1378,7 +1422,7 @@ http_endpoints:
via:
kind: Ingress
name: matrix-wellknown-bstein-dev
source: communication
source: comms
- host: bstein.dev
path: /.well-known/matrix/server
backend:
@ -1389,7 +1433,7 @@ http_endpoints:
via:
kind: Ingress
name: matrix-wellknown-bstein-dev
source: communication
source: comms
- host: bstein.dev
path: /api
backend:
@ -1415,7 +1459,7 @@ http_endpoints:
via:
kind: Ingress
name: element-call
source: communication
source: comms
- host: chat.ai.bstein.dev
path: /
backend:
@ -1467,7 +1511,7 @@ http_endpoints:
via:
kind: Ingress
name: livekit-jwt-ingress
source: communication
source: comms
- host: kit.live.bstein.dev
path: /livekit/sfu
backend:
@ -1480,20 +1524,7 @@ http_endpoints:
via:
kind: Ingress
name: livekit-ingress
source: communication
- host: live.bstein.dev
path: /
backend:
namespace: comms
service: othrys-element-element-web
port: 80
workloads:
- kind: Deployment
name: othrys-element-element-web
via:
kind: Ingress
name: othrys-element-element-web
source: communication
source: comms
- host: live.bstein.dev
path: /.well-known/matrix/client
backend:
@ -1504,7 +1535,7 @@ http_endpoints:
via:
kind: Ingress
name: matrix-wellknown
source: communication
source: comms
- host: live.bstein.dev
path: /.well-known/matrix/server
backend:
@ -1515,20 +1546,31 @@ http_endpoints:
via:
kind: Ingress
name: matrix-wellknown
source: communication
source: comms
- host: live.bstein.dev
path: /_matrix
backend:
namespace: comms
service: othrys-synapse-matrix-synapse
port: 8008
workloads: &id002
- kind: Deployment
name: othrys-synapse-matrix-synapse
workloads: []
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: logs.bstein.dev
path: /
backend:
namespace: logging
service: oauth2-proxy-logs
port: http
workloads:
- kind: Deployment
name: oauth2-proxy-logs
via:
kind: Ingress
name: logs
source: logging
- host: longhorn.bstein.dev
path: /
backend:
@ -1559,13 +1601,13 @@ http_endpoints:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: &id003
workloads: &id002
- kind: Deployment
name: matrix-authentication-service
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /.well-known/matrix/client
backend:
@ -1576,7 +1618,7 @@ http_endpoints:
via:
kind: Ingress
name: matrix-wellknown-matrix-live
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /.well-known/matrix/server
backend:
@ -1587,86 +1629,86 @@ http_endpoints:
via:
kind: Ingress
name: matrix-wellknown-matrix-live
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_matrix
backend:
namespace: comms
service: othrys-synapse-matrix-synapse
port: 8008
workloads: *id002
workloads: []
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_matrix/client/r0/register
backend:
namespace: comms
service: matrix-guest-register
port: 8080
workloads: &id004
workloads: &id003
- kind: Deployment
name: matrix-guest-register
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_matrix/client/v3/login
backend:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: *id003
workloads: *id002
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_matrix/client/v3/logout
backend:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: *id003
workloads: *id002
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_matrix/client/v3/refresh
backend:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: *id003
workloads: *id002
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_matrix/client/v3/register
backend:
namespace: comms
service: matrix-guest-register
port: 8080
workloads: *id004
workloads: *id003
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_synapse
backend:
namespace: comms
service: othrys-synapse-matrix-synapse
port: 8008
workloads: *id002
workloads: []
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: monero.bstein.dev
path: /
backend:
@ -1680,6 +1722,19 @@ http_endpoints:
kind: Ingress
name: monerod
source: monerod
- host: notes.bstein.dev
path: /
backend:
namespace: outline
service: outline
port: 80
workloads:
- kind: Deployment
name: outline
via:
kind: Ingress
name: outline
source: outline
- host: office.bstein.dev
path: /
backend:
@ -1758,6 +1813,19 @@ http_endpoints:
kind: Ingress
name: jellyfin
source: jellyfin
- host: tasks.bstein.dev
path: /
backend:
namespace: planka
service: planka
port: 80
workloads:
- kind: Deployment
name: planka
via:
kind: Ingress
name: planka
source: planka
- host: vault.bstein.dev
path: /
backend:
@ -1772,15 +1840,28 @@ http_endpoints:
name: vaultwarden-ingress
source: vaultwarden
helmrelease_host_hints:
comms:comms/othrys-element:
- call.live.bstein.dev
- live.bstein.dev
- matrix.live.bstein.dev
comms:comms/othrys-synapse:
- bstein.dev
- kit.live.bstein.dev
- live.bstein.dev
- matrix.live.bstein.dev
- turn.live.bstein.dev
gitops-ui:flux-system/weave-gitops:
- cd.bstein.dev
harbor:harbor/harbor:
- registry.bstein.dev
logging:logging/data-prepper:
- registry.bstein.dev
mailu:mailu-mailserver/mailu:
- bstein.dev
- mail.bstein.dev
monitoring:monitoring/alertmanager:
- alerts.bstein.dev
monitoring:monitoring/grafana:
- bstein.dev
- metrics.bstein.dev
- sso.bstein.dev

View File

@ -47,15 +47,14 @@ flowchart LR
wl_comms_livekit["comms/livekit (Deployment)"]
svc_comms_livekit --> wl_comms_livekit
host_live_bstein_dev["live.bstein.dev"]
svc_comms_othrys_element_element_web["comms/othrys-element-element-web (Service)"]
host_live_bstein_dev --> svc_comms_othrys_element_element_web
wl_comms_othrys_element_element_web["comms/othrys-element-element-web (Deployment)"]
svc_comms_othrys_element_element_web --> wl_comms_othrys_element_element_web
host_live_bstein_dev --> svc_comms_matrix_wellknown
svc_comms_othrys_synapse_matrix_synapse["comms/othrys-synapse-matrix-synapse (Service)"]
host_live_bstein_dev --> svc_comms_othrys_synapse_matrix_synapse
wl_comms_othrys_synapse_matrix_synapse["comms/othrys-synapse-matrix-synapse (Deployment)"]
svc_comms_othrys_synapse_matrix_synapse --> wl_comms_othrys_synapse_matrix_synapse
host_logs_bstein_dev["logs.bstein.dev"]
svc_logging_oauth2_proxy_logs["logging/oauth2-proxy-logs (Service)"]
host_logs_bstein_dev --> svc_logging_oauth2_proxy_logs
wl_logging_oauth2_proxy_logs["logging/oauth2-proxy-logs (Deployment)"]
svc_logging_oauth2_proxy_logs --> wl_logging_oauth2_proxy_logs
host_longhorn_bstein_dev["longhorn.bstein.dev"]
svc_longhorn_system_oauth2_proxy_longhorn["longhorn-system/oauth2-proxy-longhorn (Service)"]
host_longhorn_bstein_dev --> svc_longhorn_system_oauth2_proxy_longhorn
@ -80,6 +79,11 @@ flowchart LR
host_monero_bstein_dev --> svc_crypto_monerod
wl_crypto_monerod["crypto/monerod (Deployment)"]
svc_crypto_monerod --> wl_crypto_monerod
host_notes_bstein_dev["notes.bstein.dev"]
svc_outline_outline["outline/outline (Service)"]
host_notes_bstein_dev --> svc_outline_outline
wl_outline_outline["outline/outline (Deployment)"]
svc_outline_outline --> wl_outline_outline
host_office_bstein_dev["office.bstein.dev"]
svc_nextcloud_collabora["nextcloud/collabora (Service)"]
host_office_bstein_dev --> svc_nextcloud_collabora
@ -110,6 +114,11 @@ flowchart LR
host_stream_bstein_dev --> svc_jellyfin_jellyfin
wl_jellyfin_jellyfin["jellyfin/jellyfin (Deployment)"]
svc_jellyfin_jellyfin --> wl_jellyfin_jellyfin
host_tasks_bstein_dev["tasks.bstein.dev"]
svc_planka_planka["planka/planka (Service)"]
host_tasks_bstein_dev --> svc_planka_planka
wl_planka_planka["planka/planka (Deployment)"]
svc_planka_planka --> wl_planka_planka
host_vault_bstein_dev["vault.bstein.dev"]
svc_vaultwarden_vaultwarden_service["vaultwarden/vaultwarden-service (Service)"]
host_vault_bstein_dev --> svc_vaultwarden_vaultwarden_service
@ -133,10 +142,7 @@ flowchart LR
wl_comms_livekit_token_service
svc_comms_livekit
wl_comms_livekit
svc_comms_othrys_element_element_web
wl_comms_othrys_element_element_web
svc_comms_othrys_synapse_matrix_synapse
wl_comms_othrys_synapse_matrix_synapse
svc_comms_matrix_authentication_service
wl_comms_matrix_authentication_service
svc_comms_matrix_guest_register
@ -160,6 +166,10 @@ flowchart LR
svc_jenkins_jenkins
wl_jenkins_jenkins
end
subgraph logging[logging]
svc_logging_oauth2_proxy_logs
wl_logging_oauth2_proxy_logs
end
subgraph longhorn_system[longhorn-system]
svc_longhorn_system_oauth2_proxy_longhorn
wl_longhorn_system_oauth2_proxy_longhorn
@ -173,6 +183,14 @@ flowchart LR
svc_nextcloud_collabora
wl_nextcloud_collabora
end
subgraph outline[outline]
svc_outline_outline
wl_outline_outline
end
subgraph planka[planka]
svc_planka_planka
wl_planka_planka
end
subgraph sso[sso]
svc_sso_oauth2_proxy
wl_sso_oauth2_proxy

View File

@ -505,7 +505,9 @@ def main() -> int:
diagram_path = out_dir / "diagrams" / "atlas-http.mmd"
runbooks_json_path = out_dir / "catalog" / "runbooks.json"
catalog_rel = catalog_path.relative_to(REPO_ROOT).as_posix()
catalog_path.write_text(
f"# {catalog_rel}\n"
"# Generated by scripts/knowledge_render_atlas.py (do not edit by hand)\n"
+ yaml.safe_dump(catalog, sort_keys=False),
encoding="utf-8",

View File

@ -1,6 +1,6 @@
{
"counts": {
"helmrelease_host_hints": 18,
"helmrelease_host_hints": 17,
"http_endpoints": 37,
"services": 43,
"workloads": 54

View File

@ -199,7 +199,7 @@
"node-role.kubernetes.io/worker": "true"
},
"images": [
"registry.bstein.dev/bstein/bstein-dev-home-backend:registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92"
"registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92"
]
},
{
@ -215,7 +215,7 @@
"node-role.kubernetes.io/worker": "true"
},
"images": [
"registry.bstein.dev/bstein/bstein-dev-home-frontend:registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92"
"registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92"
]
},
{
@ -2842,8 +2842,7 @@
"kit.live.bstein.dev",
"live.bstein.dev",
"matrix.live.bstein.dev",
"registry.bstein.dev",
"sso.bstein.dev"
"turn.live.bstein.dev"
],
"gitops-ui:flux-system/weave-gitops": [
"cd.bstein.dev"

View File

@ -1,3 +1,4 @@
# services/comms/knowledge/catalog/atlas.yaml
# Generated by scripts/knowledge_render_atlas.py (do not edit by hand)
cluster: atlas
sources:
@ -123,7 +124,7 @@ workloads:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/bstein/bstein-dev-home-backend:registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92
- registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92
- kind: Deployment
namespace: bstein-dev-home
name: bstein-dev-home-frontend
@ -134,7 +135,7 @@ workloads:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/bstein/bstein-dev-home-frontend:registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92
- registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92
- kind: Deployment
namespace: bstein-dev-home
name: chat-ai-gateway
@ -1848,8 +1849,7 @@ helmrelease_host_hints:
- kit.live.bstein.dev
- live.bstein.dev
- matrix.live.bstein.dev
- registry.bstein.dev
- sso.bstein.dev
- turn.live.bstein.dev
gitops-ui:flux-system/weave-gitops:
- cd.bstein.dev
harbor:harbor/harbor:

View File

@ -1,4 +1,4 @@
# services/crypto/monerod
# services/crypto/monerod/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:

View File

@ -1,4 +1,4 @@
# services/crypto/xmr-miner/kustomization/yaml
# services/crypto/xmr-miner/kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:

View File

@ -117,21 +117,14 @@ spec:
existingSecret: harbor-core
existingXsrfSecret: harbor-core
existingXsrfSecretKey: CSRF_KEY
# OIDC config; client secret is stored out-of-band.
configureUserSettings: |
{
"auth_mode": "oidc_auth",
"oidc_name": "Keycloak",
"oidc_endpoint": "https://sso.bstein.dev/realms/atlas",
"oidc_client_id": "harbor",
"oidc_verify_cert": true,
"oidc_auto_onboard": true,
"oidc_scope": "openid,profile,email,groups",
"oidc_groups_claim": "groups",
"oidc_user_claim": "preferred_username",
"oidc_admin_group": "admin",
"oidc_logout": true
}
# OIDC config is injected via CONFIG_OVERWRITE_JSON from the harbor-oidc secret.
extraEnvVars:
- name: CONFIG_OVERWRITE_JSON
valueFrom:
secretKeyRef:
name: harbor-oidc
key: CONFIG_OVERWRITE_JSON
optional: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:

View File

@ -0,0 +1,142 @@
# services/keycloak/harbor-oidc-secret-ensure-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: harbor-oidc-secret-ensure-1
namespace: sso
spec:
backoffLimit: 0
ttlSecondsAfterFinished: 3600
template:
spec:
serviceAccountName: mas-secrets-ensure
restartPolicy: Never
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values: ["arm64"]
- key: node-role.kubernetes.io/worker
operator: Exists
containers:
- name: apply
image: alpine:3.20
command: ["/bin/sh", "-c"]
args:
- |
set -euo pipefail
apk add --no-cache curl jq kubectl >/dev/null
KC_URL="http://keycloak.sso.svc.cluster.local"
ACCESS_TOKEN=""
for attempt in 1 2 3 4 5; do
TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \
-H 'Content-Type: application/x-www-form-urlencoded' \
-d "grant_type=password" \
-d "client_id=admin-cli" \
-d "username=${KEYCLOAK_ADMIN}" \
-d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)"
ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)"
if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then
break
fi
echo "Keycloak token request failed (attempt ${attempt})" >&2
sleep $((attempt * 2))
done
if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then
echo "Failed to fetch Keycloak admin token" >&2
exit 1
fi
CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients?clientId=harbor" || true)"
CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)"
if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then
create_payload='{"clientId":"harbor","enabled":true,"protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://registry.bstein.dev/c/oidc/callback"],"webOrigins":["https://registry.bstein.dev"],"rootUrl":"https://registry.bstein.dev","baseUrl":"/"}'
status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
-H 'Content-Type: application/json' \
-d "${create_payload}" \
"$KC_URL/admin/realms/atlas/clients")"
if [ "$status" != "201" ] && [ "$status" != "204" ]; then
echo "Keycloak client create failed (status ${status})" >&2
exit 1
fi
CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients?clientId=harbor" || true)"
CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)"
fi
if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then
echo "Keycloak client harbor not found" >&2
exit 1
fi
SCOPE_ID="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/client-scopes?search=groups" | jq -r '.[] | select(.name=="groups") | .id' 2>/dev/null | head -n1 || true)"
if [ -z "$SCOPE_ID" ] || [ "$SCOPE_ID" = "null" ]; then
echo "Keycloak client scope groups not found" >&2
exit 1
fi
DEFAULT_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/default-client-scopes" || true)"
OPTIONAL_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes" || true)"
if ! echo "$DEFAULT_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1 \
&& ! echo "$OPTIONAL_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1; then
status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")"
if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then
status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")"
if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then
echo "Failed to attach groups client scope to harbor (status ${status})" >&2
exit 1
fi
fi
fi
CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/client-secret" | jq -r '.value' 2>/dev/null || true)"
if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then
echo "Keycloak client secret not found" >&2
exit 1
fi
CONFIG_OVERWRITE_JSON="$(jq -nc \
--arg auth_mode "oidc_auth" \
--arg oidc_name "Keycloak" \
--arg oidc_client_id "harbor" \
--arg oidc_client_secret "${CLIENT_SECRET}" \
--arg oidc_endpoint "https://sso.bstein.dev/realms/atlas" \
--arg oidc_scope "openid,profile,email,groups" \
--arg oidc_user_claim "preferred_username" \
--arg oidc_groups_claim "groups" \
--arg oidc_admin_group "admin" \
--argjson oidc_auto_onboard true \
--argjson oidc_verify_cert true \
--argjson oidc_logout true \
'{\n auth_mode: $auth_mode,\n oidc_name: $oidc_name,\n oidc_client_id: $oidc_client_id,\n oidc_client_secret: $oidc_client_secret,\n oidc_endpoint: $oidc_endpoint,\n oidc_scope: $oidc_scope,\n oidc_user_claim: $oidc_user_claim,\n oidc_groups_claim: $oidc_groups_claim,\n oidc_admin_group: $oidc_admin_group,\n oidc_auto_onboard: $oidc_auto_onboard,\n oidc_verify_cert: $oidc_verify_cert,\n oidc_logout: $oidc_logout\n }')"
kubectl -n harbor create secret generic harbor-oidc \
--from-literal=CONFIG_OVERWRITE_JSON="${CONFIG_OVERWRITE_JSON}" \
--dry-run=client -o yaml | kubectl -n harbor apply -f - >/dev/null
env:
- name: KEYCLOAK_ADMIN
valueFrom:
secretKeyRef:
name: keycloak-admin
key: username
- name: KEYCLOAK_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: keycloak-admin
key: password

View File

@ -19,6 +19,7 @@ resources:
- mas-secrets-ensure-job.yaml
- synapse-oidc-secret-ensure-job.yaml
- logs-oidc-secret-ensure-job.yaml
- harbor-oidc-secret-ensure-job.yaml
- service.yaml
- ingress.yaml
generatorOptions:

View File

@ -1,4 +1,4 @@
# services/monitoring/kube-state-metrics-helmrelease.yaml
# services/monitoring/helmrelease.yaml
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:

View File

@ -8,7 +8,14 @@ resources:
- rbac.yaml
- configmap.yaml
- statefulset.yaml
- oidc-config-cronjob.yaml
- service.yaml
- ingress.yaml
- certificate.yaml
- serverstransport.yaml
generatorOptions:
disableNameSuffixHash: true
configMapGenerator:
- name: vault-oidc-config-script
files:
- vault_oidc_configure.sh=scripts/vault_oidc_configure.sh

View File

@ -0,0 +1,114 @@
# services/vault/oidc-config-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: vault-oidc-config
namespace: vault
spec:
schedule: "*/15 * * * *"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 1
template:
spec:
serviceAccountName: vault
restartPolicy: Never
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: "true"
containers:
- name: configure-oidc
image: hashicorp/vault:1.17.6
imagePullPolicy: IfNotPresent
command:
- bash
- /scripts/vault_oidc_configure.sh
env:
- name: VAULT_ADDR
value: http://vault.vault.svc.cluster.local:8200
- name: VAULT_TOKEN
valueFrom:
secretKeyRef:
name: vault-oidc-admin-token
key: token
- name: VAULT_OIDC_DISCOVERY_URL
valueFrom:
secretKeyRef:
name: vault-oidc-config
key: discovery_url
- name: VAULT_OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: vault-oidc-config
key: client_id
- name: VAULT_OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: vault-oidc-config
key: client_secret
- name: VAULT_OIDC_DEFAULT_ROLE
valueFrom:
secretKeyRef:
name: vault-oidc-config
key: default_role
optional: true
- name: VAULT_OIDC_SCOPES
valueFrom:
secretKeyRef:
name: vault-oidc-config
key: scopes
optional: true
- name: VAULT_OIDC_USER_CLAIM
valueFrom:
secretKeyRef:
name: vault-oidc-config
key: user_claim
optional: true
- name: VAULT_OIDC_GROUPS_CLAIM
valueFrom:
secretKeyRef:
name: vault-oidc-config
key: groups_claim
optional: true
- name: VAULT_OIDC_TOKEN_POLICIES
valueFrom:
secretKeyRef:
name: vault-oidc-config
key: token_policies
optional: true
- name: VAULT_OIDC_REDIRECT_URIS
valueFrom:
secretKeyRef:
name: vault-oidc-config
key: redirect_uris
optional: true
- name: VAULT_OIDC_BOUND_AUDIENCES
valueFrom:
secretKeyRef:
name: vault-oidc-config
key: bound_audiences
optional: true
- name: VAULT_OIDC_BOUND_CLAIMS
valueFrom:
secretKeyRef:
name: vault-oidc-config
key: bound_claims
optional: true
- name: VAULT_OIDC_BOUND_CLAIMS_TYPE
valueFrom:
secretKeyRef:
name: vault-oidc-config
key: bound_claims_type
optional: true
volumeMounts:
- name: oidc-config-script
mountPath: /scripts
readOnly: true
volumes:
- name: oidc-config-script
configMap:
name: vault-oidc-config-script
defaultMode: 0555

View File

@ -0,0 +1,77 @@
#!/usr/bin/env bash
set -euo pipefail
log() { echo "[vault-oidc] $*"; }
status_json="$(vault status -format=json || true)"
if [[ -z "${status_json}" ]]; then
log "vault status failed; check VAULT_ADDR and VAULT_TOKEN"
exit 1
fi
if ! grep -q '"initialized":true' <<<"${status_json}"; then
log "vault not initialized; skipping"
exit 0
fi
if grep -q '"sealed":true' <<<"${status_json}"; then
log "vault sealed; skipping"
exit 0
fi
: "${VAULT_OIDC_DISCOVERY_URL:?set VAULT_OIDC_DISCOVERY_URL}"
: "${VAULT_OIDC_CLIENT_ID:?set VAULT_OIDC_CLIENT_ID}"
: "${VAULT_OIDC_CLIENT_SECRET:?set VAULT_OIDC_CLIENT_SECRET}"
role="${VAULT_OIDC_DEFAULT_ROLE:-atlas}"
scopes="${VAULT_OIDC_SCOPES:-openid profile email groups}"
user_claim="${VAULT_OIDC_USER_CLAIM:-preferred_username}"
groups_claim="${VAULT_OIDC_GROUPS_CLAIM:-groups}"
token_policies="${VAULT_OIDC_TOKEN_POLICIES:-default}"
redirect_uris="${VAULT_OIDC_REDIRECT_URIS:-https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback}"
bound_audiences="${VAULT_OIDC_BOUND_AUDIENCES:-${VAULT_OIDC_CLIENT_ID}}"
bound_claims="${VAULT_OIDC_BOUND_CLAIMS:-}"
bound_claims_type="${VAULT_OIDC_BOUND_CLAIMS_TYPE:-}"
if ! vault auth list -format=json | grep -q '"oidc/"'; then
log "enabling oidc auth method"
vault auth enable oidc
fi
log "configuring oidc auth"
vault write auth/oidc/config \
oidc_discovery_url="${VAULT_OIDC_DISCOVERY_URL}" \
oidc_client_id="${VAULT_OIDC_CLIENT_ID}" \
oidc_client_secret="${VAULT_OIDC_CLIENT_SECRET}" \
default_role="${role}"
vault auth tune -listing-visibility=unauth oidc >/dev/null
role_args=(
"user_claim=${user_claim}"
"oidc_scopes=${scopes}"
"token_policies=${token_policies}"
"bound_audiences=${bound_audiences}"
)
if [[ -n "${groups_claim}" ]]; then
role_args+=("groups_claim=${groups_claim}")
fi
if [[ -n "${bound_claims}" ]]; then
role_args+=("bound_claims=${bound_claims}")
fi
if [[ -n "${bound_claims_type}" ]]; then
role_args+=("bound_claims_type=${bound_claims_type}")
fi
IFS=',' read -r -a redirect_items <<<"${redirect_uris}"
for uri in "${redirect_items[@]}"; do
trimmed="${uri#"${uri%%[![:space:]]*}"}"
trimmed="${trimmed%"${trimmed##*[![:space:]]}"}"
if [[ -n "${trimmed}" ]]; then
role_args+=("allowed_redirect_uris=${trimmed}")
fi
done
log "configuring oidc role ${role}"
vault write "auth/oidc/role/${role}" "${role_args[@]}"