comms: sync atlas knowledge and use ariadne state

This commit is contained in:
Brad Stein 2026-01-26 03:32:17 -03:00
parent 5aac018a7b
commit 10003ca0d7
17 changed files with 2453 additions and 278 deletions

View File

@ -1,8 +1,8 @@
{
"counts": {
"helmrelease_host_hints": 17,
"http_endpoints": 37,
"services": 43,
"workloads": 54
"helmrelease_host_hints": 19,
"http_endpoints": 45,
"services": 47,
"workloads": 74
}
}

File diff suppressed because it is too large Load Diff

View File

@ -8,6 +8,15 @@ sources:
- name: bstein-dev-home
path: services/bstein-dev-home
targetNamespace: bstein-dev-home
- name: bstein-dev-home-migrations
path: services/bstein-dev-home/migrations
targetNamespace: bstein-dev-home
- name: cert-manager
path: infrastructure/cert-manager
targetNamespace: cert-manager
- name: cert-manager-cleanup
path: infrastructure/cert-manager/cleanup
targetNamespace: cert-manager
- name: comms
path: services/comms
targetNamespace: comms
@ -17,6 +26,9 @@ sources:
- name: crypto
path: services/crypto
targetNamespace: crypto
- name: finance
path: services/finance
targetNamespace: finance
- name: flux-system
path: clusters/atlas/flux-system
targetNamespace: null
@ -29,6 +41,9 @@ sources:
- name: harbor
path: services/harbor
targetNamespace: harbor
- name: health
path: services/health
targetNamespace: health
- name: helm
path: infrastructure/sources/helm
targetNamespace: flux-system
@ -44,6 +59,12 @@ sources:
- name: logging
path: services/logging
targetNamespace: null
- name: longhorn
path: infrastructure/longhorn/core
targetNamespace: longhorn-system
- name: longhorn-adopt
path: infrastructure/longhorn/adopt
targetNamespace: longhorn-system
- name: longhorn-ui
path: infrastructure/longhorn/ui-ingress
targetNamespace: longhorn-system
@ -98,9 +119,15 @@ sources:
- name: vault-csi
path: infrastructure/vault-csi
targetNamespace: kube-system
- name: vault-injector
path: infrastructure/vault-injector
targetNamespace: vault
- name: vaultwarden
path: services/vaultwarden
targetNamespace: vaultwarden
- name: wallet-monero-temp
path: services/crypto/wallet-monero-temp
targetNamespace: crypto
- name: xmr-miner
path: services/crypto/xmr-miner
targetNamespace: crypto
@ -124,7 +151,7 @@ workloads:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92
- registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-157
- kind: Deployment
namespace: bstein-dev-home
name: bstein-dev-home-frontend
@ -135,13 +162,22 @@ workloads:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92
- registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-157
- kind: Deployment
namespace: bstein-dev-home
name: bstein-dev-home-vault-sync
labels:
app: bstein-dev-home-vault-sync
serviceAccountName: bstein-dev-home-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: bstein-dev-home
name: chat-ai-gateway
labels:
app: chat-ai-gateway
serviceAccountName: null
serviceAccountName: bstein-dev-home
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
@ -157,12 +193,21 @@ workloads:
hardware: rpi5
images:
- python:3.11-slim
- kind: Deployment
namespace: comms
name: comms-vault-sync
labels:
app: comms-vault-sync
serviceAccountName: comms-vault
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: comms
name: coturn
labels:
app: coturn
serviceAccountName: null
serviceAccountName: comms-vault
nodeSelector:
hardware: rpi5
images:
@ -182,7 +227,7 @@ workloads:
name: livekit
labels:
app: livekit
serviceAccountName: null
serviceAccountName: comms-vault
nodeSelector:
hardware: rpi5
images:
@ -192,17 +237,17 @@ workloads:
name: livekit-token-service
labels:
app: livekit-token-service
serviceAccountName: null
serviceAccountName: comms-vault
nodeSelector:
hardware: rpi5
images:
- ghcr.io/element-hq/lk-jwt-service:0.3.0
- registry.bstein.dev/tools/lk-jwt-service-vault:0.3.0
- kind: Deployment
namespace: comms
name: matrix-authentication-service
labels:
app: matrix-authentication-service
serviceAccountName: null
serviceAccountName: comms-vault
nodeSelector:
hardware: rpi5
images:
@ -212,7 +257,7 @@ workloads:
name: matrix-guest-register
labels:
app.kubernetes.io/name: matrix-guest-register
serviceAccountName: null
serviceAccountName: comms-vault
nodeSelector: {}
images:
- python:3.11-slim
@ -235,12 +280,21 @@ workloads:
node-role.kubernetes.io/worker: 'true'
images:
- ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9
- kind: Deployment
namespace: crypto
name: crypto-vault-sync
labels:
app: crypto-vault-sync
serviceAccountName: crypto-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: crypto
name: monero-p2pool
labels:
app: monero-p2pool
serviceAccountName: null
serviceAccountName: crypto-vault-sync
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
@ -255,6 +309,38 @@ workloads:
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/crypto/monerod:0.18.4.1
- kind: Deployment
namespace: crypto
name: wallet-monero-temp
labels:
app: wallet-monero-temp
serviceAccountName: crypto-vault-sync
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/crypto/monero-wallet-rpc:0.18.4.1
- kind: Deployment
namespace: finance
name: actual-budget
labels:
app: actual-budget
serviceAccountName: finance-vault
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- actualbudget/actual-server:26.1.0-alpine@sha256:34aae5813fdfee12af2a50c4d0667df68029f1d61b90f45f282473273eb70d0d
- kind: Deployment
namespace: finance
name: firefly
labels:
app: firefly
serviceAccountName: finance-vault
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- fireflyiii/core:version-6.4.15
- kind: Deployment
namespace: flux-system
name: helm-controller
@ -344,17 +430,38 @@ workloads:
name: gitea
labels:
app: gitea
serviceAccountName: null
serviceAccountName: gitea-vault
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- gitea/gitea:1.23
- kind: Deployment
namespace: harbor
name: harbor-vault-sync
labels:
app: harbor-vault-sync
serviceAccountName: harbor-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: health
name: wger
labels:
app: wger
serviceAccountName: health-vault-sync
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- nginx:1.27.5-alpine@sha256:65645c7bb6a0661892a8b03b89d0743208a18dd2f3f17a54ef4b76fb8e2f2a10
- wger/server@sha256:710588b78af4e0aa0b4d8a8061e4563e16eae80eeaccfe7f9e0d9cbdd7f0cbc5
- kind: Deployment
namespace: jellyfin
name: jellyfin
labels:
app: jellyfin
serviceAccountName: null
serviceAccountName: pegasus-vault-sync
nodeSelector: {}
images:
- docker.io/jellyfin/jellyfin:10.11.5
@ -363,13 +470,22 @@ workloads:
name: pegasus
labels:
app: pegasus
serviceAccountName: null
serviceAccountName: pegasus-vault-sync
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- alpine:3.20
- registry.bstein.dev/streaming/pegasus:1.2.32
- registry.bstein.dev/streaming/pegasus-vault:1.2.32
- kind: Deployment
namespace: jellyfin
name: pegasus-vault-sync
labels:
app: pegasus-vault-sync
serviceAccountName: pegasus-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: jenkins
name: jenkins
@ -381,6 +497,26 @@ workloads:
node-role.kubernetes.io/worker: 'true'
images:
- jenkins/jenkins:2.528.3-jdk21
- kind: Deployment
namespace: jenkins
name: jenkins-vault-sync
labels:
app: jenkins-vault-sync
serviceAccountName: jenkins-vault-sync
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- alpine:3.20
- kind: DaemonSet
namespace: kube-system
name: ntp-sync
labels:
app: ntp-sync
serviceAccountName: null
nodeSelector: {}
images:
- public.ecr.aws/docker/library/busybox:1.36.1
- kind: DaemonSet
namespace: kube-system
name: nvidia-device-plugin-jetson
@ -427,6 +563,16 @@ workloads:
kubernetes.io/os: linux
images:
- hashicorp/vault-csi-provider:1.7.0
- kind: Deployment
namespace: kube-system
name: coredns
labels:
k8s-app: kube-dns
serviceAccountName: coredns
nodeSelector:
kubernetes.io/os: linux
images:
- registry.bstein.dev/infra/coredns:1.12.1
- kind: DaemonSet
namespace: logging
name: node-image-gc-rpi4
@ -457,22 +603,41 @@ workloads:
hardware: rpi5
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: Deployment
namespace: logging
name: logging-vault-sync
labels:
app: logging-vault-sync
serviceAccountName: logging-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: logging
name: oauth2-proxy-logs
labels:
app: oauth2-proxy-logs
serviceAccountName: null
serviceAccountName: logging-vault-sync
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- quay.io/oauth2-proxy/oauth2-proxy:v7.6.0
- registry.bstein.dev/tools/oauth2-proxy-vault:v7.6.0
- kind: Deployment
namespace: longhorn-system
name: longhorn-vault-sync
labels:
app: longhorn-vault-sync
serviceAccountName: longhorn-vault-sync
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- alpine:3.20
- kind: Deployment
namespace: longhorn-system
name: oauth2-proxy-longhorn
labels:
app: oauth2-proxy-longhorn
serviceAccountName: null
serviceAccountName: longhorn-vault
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
@ -489,13 +654,34 @@ workloads:
- registry.bstein.dev/bstein/kubectl:1.35.0
- kind: Deployment
namespace: mailu-mailserver
name: mailu-sync-listener
name: mailu-vault-sync
labels:
app: mailu-sync-listener
serviceAccountName: null
app: mailu-vault-sync
serviceAccountName: mailu-vault-sync
nodeSelector: {}
images:
- python:3.11-alpine
- alpine:3.20
- kind: DaemonSet
namespace: maintenance
name: disable-k3s-traefik
labels:
app: disable-k3s-traefik
serviceAccountName: disable-k3s-traefik
nodeSelector:
node-role.kubernetes.io/control-plane: 'true'
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: DaemonSet
namespace: maintenance
name: k3s-agent-restart
labels:
app: k3s-agent-restart
serviceAccountName: node-nofile
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: DaemonSet
namespace: maintenance
name: node-image-sweeper
@ -515,6 +701,26 @@ workloads:
nodeSelector: {}
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: Deployment
namespace: maintenance
name: ariadne
labels:
app: ariadne
serviceAccountName: ariadne
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/bstein/ariadne:0.1.0-48
- kind: Deployment
namespace: maintenance
name: maintenance-vault-sync
labels:
app: maintenance-vault-sync
serviceAccountName: maintenance-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: DaemonSet
namespace: monitoring
name: dcgm-exporter
@ -534,12 +740,21 @@ workloads:
jetson: 'true'
images:
- python:3.10-slim
- kind: Deployment
namespace: monitoring
name: monitoring-vault-sync
labels:
app: monitoring-vault-sync
serviceAccountName: monitoring-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: monitoring
name: postmark-exporter
labels:
app: postmark-exporter
serviceAccountName: null
serviceAccountName: monitoring-vault-sync
nodeSelector: {}
images:
- python:3.12-alpine
@ -558,7 +773,7 @@ workloads:
name: nextcloud
labels:
app: nextcloud
serviceAccountName: null
serviceAccountName: nextcloud-vault
nodeSelector:
hardware: rpi5
images:
@ -568,7 +783,7 @@ workloads:
name: outline
labels:
app: outline
serviceAccountName: null
serviceAccountName: outline-vault
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
@ -588,7 +803,7 @@ workloads:
name: planka
labels:
app: planka
serviceAccountName: null
serviceAccountName: planka-vault
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
@ -603,13 +818,16 @@ workloads:
node-role.kubernetes.io/worker: 'true'
images:
- postgres:15
- quay.io/prometheuscommunity/postgres-exporter:v0.15.0
- kind: Deployment
namespace: sso
name: keycloak
labels:
app: keycloak
serviceAccountName: null
nodeSelector: {}
serviceAccountName: sso-vault
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- quay.io/keycloak/keycloak:26.0.7
- kind: Deployment
@ -617,17 +835,26 @@ workloads:
name: oauth2-proxy
labels:
app: oauth2-proxy
serviceAccountName: null
serviceAccountName: sso-vault
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- quay.io/oauth2-proxy/oauth2-proxy:v7.6.0
- registry.bstein.dev/tools/oauth2-proxy-vault:v7.6.0
- kind: Deployment
namespace: sso
name: sso-vault-sync
labels:
app: sso-vault-sync
serviceAccountName: sso-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: StatefulSet
namespace: sso
name: openldap
labels:
app: openldap
serviceAccountName: null
serviceAccountName: sso-vault
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
@ -640,7 +867,7 @@ workloads:
app: sui-metrics
serviceAccountName: sui-metrics
nodeSelector:
kubernetes.io/hostname: titan-24
hardware: rpi5
images:
- victoriametrics/vmagent:v1.103.0
- kind: Deployment
@ -648,6 +875,8 @@ workloads:
name: traefik
labels:
app: traefik
app.kubernetes.io/instance: traefik-kube-system
app.kubernetes.io/name: traefik
serviceAccountName: traefik-ingress-controller
nodeSelector:
node-role.kubernetes.io/worker: 'true'
@ -669,8 +898,10 @@ workloads:
name: vaultwarden
labels:
app: vaultwarden
serviceAccountName: null
nodeSelector: {}
serviceAccountName: vaultwarden-vault
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- vaultwarden/server:1.35.2
services:
@ -1040,6 +1271,36 @@ services:
port: 3333
targetPort: 3333
protocol: TCP
- namespace: crypto
name: wallet-monero-temp
type: ClusterIP
selector:
app: wallet-monero-temp
ports:
- name: rpc
port: 18083
targetPort: 18083
protocol: TCP
- namespace: finance
name: actual-budget
type: ClusterIP
selector:
app: actual-budget
ports:
- name: http
port: 80
targetPort: 5006
protocol: TCP
- namespace: finance
name: firefly
type: ClusterIP
selector:
app: firefly
ports:
- name: http
port: 80
targetPort: 8080
protocol: TCP
- namespace: flux-system
name: notification-controller
type: ClusterIP
@ -1082,7 +1343,7 @@ services:
protocol: TCP
- namespace: gitea
name: gitea-ssh
type: NodePort
type: LoadBalancer
selector:
app: gitea
ports:
@ -1090,6 +1351,16 @@ services:
port: 2242
targetPort: 2242
protocol: TCP
- namespace: health
name: wger
type: ClusterIP
selector:
app: wger
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
- namespace: jellyfin
name: jellyfin
type: ClusterIP
@ -1124,21 +1395,6 @@ services:
port: 50000
targetPort: 50000
protocol: TCP
- namespace: kube-system
name: traefik
type: LoadBalancer
selector:
app.kubernetes.io/instance: traefik-kube-system
app.kubernetes.io/name: traefik
ports:
- name: web
port: 80
targetPort: web
protocol: TCP
- name: websecure
port: 443
targetPort: websecure
protocol: TCP
- namespace: logging
name: oauth2-proxy-logs
type: ClusterIP
@ -1191,15 +1447,15 @@ services:
port: 4190
targetPort: 4190
protocol: TCP
- namespace: mailu-mailserver
name: mailu-sync-listener
- namespace: maintenance
name: ariadne
type: ClusterIP
selector:
app: mailu-sync-listener
app: ariadne
ports:
- name: http
port: 8080
targetPort: 8080
port: 80
targetPort: http
protocol: TCP
- namespace: monitoring
name: dcgm-exporter
@ -1291,6 +1547,10 @@ services:
port: 5432
targetPort: 5432
protocol: TCP
- name: metrics
port: 9187
targetPort: 9187
protocol: TCP
- namespace: sso
name: keycloak
type: ClusterIP
@ -1335,6 +1595,20 @@ services:
port: 8429
targetPort: 8429
protocol: TCP
- namespace: traefik
name: traefik
type: LoadBalancer
selector:
app: traefik
ports:
- name: web
port: 80
targetPort: web
protocol: TCP
- name: websecure
port: 443
targetPort: websecure
protocol: TCP
- namespace: traefik
name: traefik-metrics
type: ClusterIP
@ -1447,6 +1721,19 @@ http_endpoints:
kind: Ingress
name: bstein-dev-home
source: bstein-dev-home
- host: budget.bstein.dev
path: /
backend:
namespace: finance
service: actual-budget
port: 80
workloads:
- kind: Deployment
name: actual-budget
via:
kind: Ingress
name: actual-budget
source: finance
- host: call.live.bstein.dev
path: /
backend:
@ -1499,6 +1786,19 @@ http_endpoints:
kind: Ingress
name: nextcloud
source: nextcloud
- host: health.bstein.dev
path: /
backend:
namespace: health
service: wger
port: 80
workloads:
- kind: Deployment
name: wger
via:
kind: Ingress
name: wger
source: health
- host: kit.live.bstein.dev
path: /livekit/jwt
backend:
@ -1558,6 +1858,65 @@ http_endpoints:
kind: Ingress
name: matrix-routing
source: comms
- host: live.bstein.dev
path: /_matrix/client/r0/register
backend:
namespace: comms
service: matrix-guest-register
port: 8080
workloads: &id003
- kind: Deployment
name: matrix-guest-register
via:
kind: Ingress
name: matrix-routing
source: comms
- host: live.bstein.dev
path: /_matrix/client/v3/login
backend:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: &id002
- kind: Deployment
name: matrix-authentication-service
via:
kind: Ingress
name: matrix-routing
source: comms
- host: live.bstein.dev
path: /_matrix/client/v3/logout
backend:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: *id002
via:
kind: Ingress
name: matrix-routing
source: comms
- host: live.bstein.dev
path: /_matrix/client/v3/refresh
backend:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: *id002
via:
kind: Ingress
name: matrix-routing
source: comms
- host: live.bstein.dev
path: /_matrix/client/v3/register
backend:
namespace: comms
service: matrix-guest-register
port: 8080
workloads: *id003
via:
kind: Ingress
name: matrix-routing
source: comms
- host: logs.bstein.dev
path: /
backend:
@ -1601,9 +1960,7 @@ http_endpoints:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: &id002
- kind: Deployment
name: matrix-authentication-service
workloads: *id002
via:
kind: Ingress
name: matrix-routing
@ -1647,9 +2004,7 @@ http_endpoints:
namespace: comms
service: matrix-guest-register
port: 8080
workloads: &id003
- kind: Deployment
name: matrix-guest-register
workloads: *id003
via:
kind: Ingress
name: matrix-routing
@ -1722,6 +2077,19 @@ http_endpoints:
kind: Ingress
name: monerod
source: monerod
- host: money.bstein.dev
path: /
backend:
namespace: finance
service: firefly
port: 80
workloads:
- kind: Deployment
name: firefly
via:
kind: Ingress
name: firefly
source: finance
- host: notes.bstein.dev
path: /
backend:
@ -1845,7 +2213,6 @@ helmrelease_host_hints:
- live.bstein.dev
- matrix.live.bstein.dev
comms:comms/othrys-synapse:
- bstein.dev
- kit.live.bstein.dev
- live.bstein.dev
- matrix.live.bstein.dev
@ -1856,6 +2223,8 @@ helmrelease_host_hints:
- registry.bstein.dev
logging:logging/data-prepper:
- registry.bstein.dev
longhorn:longhorn-system/longhorn:
- registry.bstein.dev
mailu:mailu-mailserver/mailu:
- bstein.dev
- mail.bstein.dev
@ -1863,5 +2232,8 @@ helmrelease_host_hints:
- alerts.bstein.dev
monitoring:monitoring/grafana:
- bstein.dev
- mail.bstein.dev
- metrics.bstein.dev
- sso.bstein.dev
monitoring:monitoring/kube-state-metrics:
- atlas.bstein.dev

View File

@ -17,6 +17,11 @@ flowchart LR
host_bstein_dev --> svc_bstein_dev_home_bstein_dev_home_backend
wl_bstein_dev_home_bstein_dev_home_backend["bstein-dev-home/bstein-dev-home-backend (Deployment)"]
svc_bstein_dev_home_bstein_dev_home_backend --> wl_bstein_dev_home_bstein_dev_home_backend
host_budget_bstein_dev["budget.bstein.dev"]
svc_finance_actual_budget["finance/actual-budget (Service)"]
host_budget_bstein_dev --> svc_finance_actual_budget
wl_finance_actual_budget["finance/actual-budget (Deployment)"]
svc_finance_actual_budget --> wl_finance_actual_budget
host_call_live_bstein_dev["call.live.bstein.dev"]
svc_comms_element_call["comms/element-call (Service)"]
host_call_live_bstein_dev --> svc_comms_element_call
@ -37,6 +42,11 @@ flowchart LR
host_cloud_bstein_dev --> svc_nextcloud_nextcloud
wl_nextcloud_nextcloud["nextcloud/nextcloud (Deployment)"]
svc_nextcloud_nextcloud --> wl_nextcloud_nextcloud
host_health_bstein_dev["health.bstein.dev"]
svc_health_wger["health/wger (Service)"]
host_health_bstein_dev --> svc_health_wger
wl_health_wger["health/wger (Deployment)"]
svc_health_wger --> wl_health_wger
host_kit_live_bstein_dev["kit.live.bstein.dev"]
svc_comms_livekit_token_service["comms/livekit-token-service (Service)"]
host_kit_live_bstein_dev --> svc_comms_livekit_token_service
@ -50,6 +60,14 @@ flowchart LR
host_live_bstein_dev --> svc_comms_matrix_wellknown
svc_comms_othrys_synapse_matrix_synapse["comms/othrys-synapse-matrix-synapse (Service)"]
host_live_bstein_dev --> svc_comms_othrys_synapse_matrix_synapse
svc_comms_matrix_guest_register["comms/matrix-guest-register (Service)"]
host_live_bstein_dev --> svc_comms_matrix_guest_register
wl_comms_matrix_guest_register["comms/matrix-guest-register (Deployment)"]
svc_comms_matrix_guest_register --> wl_comms_matrix_guest_register
svc_comms_matrix_authentication_service["comms/matrix-authentication-service (Service)"]
host_live_bstein_dev --> svc_comms_matrix_authentication_service
wl_comms_matrix_authentication_service["comms/matrix-authentication-service (Deployment)"]
svc_comms_matrix_authentication_service --> wl_comms_matrix_authentication_service
host_logs_bstein_dev["logs.bstein.dev"]
svc_logging_oauth2_proxy_logs["logging/oauth2-proxy-logs (Service)"]
host_logs_bstein_dev --> svc_logging_oauth2_proxy_logs
@ -64,21 +82,20 @@ flowchart LR
svc_mailu_mailserver_mailu_front["mailu-mailserver/mailu-front (Service)"]
host_mail_bstein_dev --> svc_mailu_mailserver_mailu_front
host_matrix_live_bstein_dev["matrix.live.bstein.dev"]
svc_comms_matrix_authentication_service["comms/matrix-authentication-service (Service)"]
host_matrix_live_bstein_dev --> svc_comms_matrix_authentication_service
wl_comms_matrix_authentication_service["comms/matrix-authentication-service (Deployment)"]
svc_comms_matrix_authentication_service --> wl_comms_matrix_authentication_service
host_matrix_live_bstein_dev --> svc_comms_matrix_wellknown
host_matrix_live_bstein_dev --> svc_comms_othrys_synapse_matrix_synapse
svc_comms_matrix_guest_register["comms/matrix-guest-register (Service)"]
host_matrix_live_bstein_dev --> svc_comms_matrix_guest_register
wl_comms_matrix_guest_register["comms/matrix-guest-register (Deployment)"]
svc_comms_matrix_guest_register --> wl_comms_matrix_guest_register
host_monero_bstein_dev["monero.bstein.dev"]
svc_crypto_monerod["crypto/monerod (Service)"]
host_monero_bstein_dev --> svc_crypto_monerod
wl_crypto_monerod["crypto/monerod (Deployment)"]
svc_crypto_monerod --> wl_crypto_monerod
host_money_bstein_dev["money.bstein.dev"]
svc_finance_firefly["finance/firefly (Service)"]
host_money_bstein_dev --> svc_finance_firefly
wl_finance_firefly["finance/firefly (Deployment)"]
svc_finance_firefly --> wl_finance_firefly
host_notes_bstein_dev["notes.bstein.dev"]
svc_outline_outline["outline/outline (Service)"]
host_notes_bstein_dev --> svc_outline_outline
@ -143,19 +160,29 @@ flowchart LR
svc_comms_livekit
wl_comms_livekit
svc_comms_othrys_synapse_matrix_synapse
svc_comms_matrix_authentication_service
wl_comms_matrix_authentication_service
svc_comms_matrix_guest_register
wl_comms_matrix_guest_register
svc_comms_matrix_authentication_service
wl_comms_matrix_authentication_service
end
subgraph crypto[crypto]
svc_crypto_monerod
wl_crypto_monerod
end
subgraph finance[finance]
svc_finance_actual_budget
wl_finance_actual_budget
svc_finance_firefly
wl_finance_firefly
end
subgraph gitea[gitea]
svc_gitea_gitea
wl_gitea_gitea
end
subgraph health[health]
svc_health_wger
wl_health_wger
end
subgraph jellyfin[jellyfin]
svc_jellyfin_pegasus
wl_jellyfin_pegasus

View File

@ -20,6 +20,7 @@ import subprocess
import sys
from dataclasses import dataclass
from pathlib import Path
import shutil
from typing import Any, Iterable
import yaml
@ -60,6 +61,12 @@ def _run(cmd: list[str], *, cwd: Path) -> str:
return res.stdout
def _sync_tree(source: Path, dest: Path) -> None:
if dest.exists():
shutil.rmtree(dest)
shutil.copytree(source, dest)
def kustomize_build(path: Path) -> str:
rel = path.relative_to(REPO_ROOT)
try:
@ -472,6 +479,11 @@ def main() -> int:
action="store_true",
help="Write generated files (otherwise just print a summary).",
)
ap.add_argument(
"--sync-comms",
action="store_true",
help="Mirror rendered knowledge into services/comms/knowledge for atlasbot.",
)
args = ap.parse_args()
out_dir = REPO_ROOT / args.out
@ -549,6 +561,11 @@ def main() -> int:
print(f"Wrote {summary_path.relative_to(REPO_ROOT)}")
print(f"Wrote {diagram_path.relative_to(REPO_ROOT)}")
print(f"Wrote {runbooks_json_path.relative_to(REPO_ROOT)}")
if args.sync_comms:
comms_dir = REPO_ROOT / "services" / "comms" / "knowledge"
_sync_tree(out_dir, comms_dir)
print(f"Synced {out_dir.relative_to(REPO_ROOT)} -> {comms_dir.relative_to(REPO_ROOT)}")
return 0

View File

@ -16,7 +16,7 @@ spec:
labels:
app: atlasbot
annotations:
checksum/atlasbot-configmap: manual-atlasbot-8
checksum/atlasbot-configmap: manual-atlasbot-9
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "comms"
vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret"
@ -73,6 +73,8 @@ spec:
value: /kb
- name: VM_URL
value: http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428
- name: ARIADNE_STATE_URL
value: http://ariadne.maintenance.svc.cluster.local/api/internal/cluster/state
- name: BOT_USER
value: atlasbot
- name: BOT_MENTIONS

View File

@ -1,8 +1,8 @@
{
"counts": {
"helmrelease_host_hints": 17,
"http_endpoints": 37,
"services": 43,
"workloads": 54
"helmrelease_host_hints": 19,
"http_endpoints": 45,
"services": 47,
"workloads": 74
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
# services/comms/knowledge/catalog/atlas.yaml
# knowledge/catalog/atlas.yaml
# Generated by scripts/knowledge_render_atlas.py (do not edit by hand)
cluster: atlas
sources:
@ -8,6 +8,15 @@ sources:
- name: bstein-dev-home
path: services/bstein-dev-home
targetNamespace: bstein-dev-home
- name: bstein-dev-home-migrations
path: services/bstein-dev-home/migrations
targetNamespace: bstein-dev-home
- name: cert-manager
path: infrastructure/cert-manager
targetNamespace: cert-manager
- name: cert-manager-cleanup
path: infrastructure/cert-manager/cleanup
targetNamespace: cert-manager
- name: comms
path: services/comms
targetNamespace: comms
@ -17,6 +26,9 @@ sources:
- name: crypto
path: services/crypto
targetNamespace: crypto
- name: finance
path: services/finance
targetNamespace: finance
- name: flux-system
path: clusters/atlas/flux-system
targetNamespace: null
@ -29,6 +41,9 @@ sources:
- name: harbor
path: services/harbor
targetNamespace: harbor
- name: health
path: services/health
targetNamespace: health
- name: helm
path: infrastructure/sources/helm
targetNamespace: flux-system
@ -44,6 +59,12 @@ sources:
- name: logging
path: services/logging
targetNamespace: null
- name: longhorn
path: infrastructure/longhorn/core
targetNamespace: longhorn-system
- name: longhorn-adopt
path: infrastructure/longhorn/adopt
targetNamespace: longhorn-system
- name: longhorn-ui
path: infrastructure/longhorn/ui-ingress
targetNamespace: longhorn-system
@ -98,9 +119,15 @@ sources:
- name: vault-csi
path: infrastructure/vault-csi
targetNamespace: kube-system
- name: vault-injector
path: infrastructure/vault-injector
targetNamespace: vault
- name: vaultwarden
path: services/vaultwarden
targetNamespace: vaultwarden
- name: wallet-monero-temp
path: services/crypto/wallet-monero-temp
targetNamespace: crypto
- name: xmr-miner
path: services/crypto/xmr-miner
targetNamespace: crypto
@ -124,7 +151,7 @@ workloads:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92
- registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-157
- kind: Deployment
namespace: bstein-dev-home
name: bstein-dev-home-frontend
@ -135,13 +162,22 @@ workloads:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92
- registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-157
- kind: Deployment
namespace: bstein-dev-home
name: bstein-dev-home-vault-sync
labels:
app: bstein-dev-home-vault-sync
serviceAccountName: bstein-dev-home-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: bstein-dev-home
name: chat-ai-gateway
labels:
app: chat-ai-gateway
serviceAccountName: null
serviceAccountName: bstein-dev-home
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
@ -157,12 +193,21 @@ workloads:
hardware: rpi5
images:
- python:3.11-slim
- kind: Deployment
namespace: comms
name: comms-vault-sync
labels:
app: comms-vault-sync
serviceAccountName: comms-vault
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: comms
name: coturn
labels:
app: coturn
serviceAccountName: null
serviceAccountName: comms-vault
nodeSelector:
hardware: rpi5
images:
@ -182,7 +227,7 @@ workloads:
name: livekit
labels:
app: livekit
serviceAccountName: null
serviceAccountName: comms-vault
nodeSelector:
hardware: rpi5
images:
@ -192,17 +237,17 @@ workloads:
name: livekit-token-service
labels:
app: livekit-token-service
serviceAccountName: null
serviceAccountName: comms-vault
nodeSelector:
hardware: rpi5
images:
- ghcr.io/element-hq/lk-jwt-service:0.3.0
- registry.bstein.dev/tools/lk-jwt-service-vault:0.3.0
- kind: Deployment
namespace: comms
name: matrix-authentication-service
labels:
app: matrix-authentication-service
serviceAccountName: null
serviceAccountName: comms-vault
nodeSelector:
hardware: rpi5
images:
@ -212,7 +257,7 @@ workloads:
name: matrix-guest-register
labels:
app.kubernetes.io/name: matrix-guest-register
serviceAccountName: null
serviceAccountName: comms-vault
nodeSelector: {}
images:
- python:3.11-slim
@ -235,12 +280,21 @@ workloads:
node-role.kubernetes.io/worker: 'true'
images:
- ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9
- kind: Deployment
namespace: crypto
name: crypto-vault-sync
labels:
app: crypto-vault-sync
serviceAccountName: crypto-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: crypto
name: monero-p2pool
labels:
app: monero-p2pool
serviceAccountName: null
serviceAccountName: crypto-vault-sync
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
@ -255,6 +309,38 @@ workloads:
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/crypto/monerod:0.18.4.1
- kind: Deployment
namespace: crypto
name: wallet-monero-temp
labels:
app: wallet-monero-temp
serviceAccountName: crypto-vault-sync
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/crypto/monero-wallet-rpc:0.18.4.1
- kind: Deployment
namespace: finance
name: actual-budget
labels:
app: actual-budget
serviceAccountName: finance-vault
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- actualbudget/actual-server:26.1.0-alpine@sha256:34aae5813fdfee12af2a50c4d0667df68029f1d61b90f45f282473273eb70d0d
- kind: Deployment
namespace: finance
name: firefly
labels:
app: firefly
serviceAccountName: finance-vault
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- fireflyiii/core:version-6.4.15
- kind: Deployment
namespace: flux-system
name: helm-controller
@ -344,17 +430,38 @@ workloads:
name: gitea
labels:
app: gitea
serviceAccountName: null
serviceAccountName: gitea-vault
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- gitea/gitea:1.23
- kind: Deployment
namespace: harbor
name: harbor-vault-sync
labels:
app: harbor-vault-sync
serviceAccountName: harbor-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: health
name: wger
labels:
app: wger
serviceAccountName: health-vault-sync
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- nginx:1.27.5-alpine@sha256:65645c7bb6a0661892a8b03b89d0743208a18dd2f3f17a54ef4b76fb8e2f2a10
- wger/server@sha256:710588b78af4e0aa0b4d8a8061e4563e16eae80eeaccfe7f9e0d9cbdd7f0cbc5
- kind: Deployment
namespace: jellyfin
name: jellyfin
labels:
app: jellyfin
serviceAccountName: null
serviceAccountName: pegasus-vault-sync
nodeSelector: {}
images:
- docker.io/jellyfin/jellyfin:10.11.5
@ -363,13 +470,22 @@ workloads:
name: pegasus
labels:
app: pegasus
serviceAccountName: null
serviceAccountName: pegasus-vault-sync
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- alpine:3.20
- registry.bstein.dev/streaming/pegasus:1.2.32
- registry.bstein.dev/streaming/pegasus-vault:1.2.32
- kind: Deployment
namespace: jellyfin
name: pegasus-vault-sync
labels:
app: pegasus-vault-sync
serviceAccountName: pegasus-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: jenkins
name: jenkins
@ -381,6 +497,26 @@ workloads:
node-role.kubernetes.io/worker: 'true'
images:
- jenkins/jenkins:2.528.3-jdk21
- kind: Deployment
namespace: jenkins
name: jenkins-vault-sync
labels:
app: jenkins-vault-sync
serviceAccountName: jenkins-vault-sync
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- alpine:3.20
- kind: DaemonSet
namespace: kube-system
name: ntp-sync
labels:
app: ntp-sync
serviceAccountName: null
nodeSelector: {}
images:
- public.ecr.aws/docker/library/busybox:1.36.1
- kind: DaemonSet
namespace: kube-system
name: nvidia-device-plugin-jetson
@ -427,6 +563,16 @@ workloads:
kubernetes.io/os: linux
images:
- hashicorp/vault-csi-provider:1.7.0
- kind: Deployment
namespace: kube-system
name: coredns
labels:
k8s-app: kube-dns
serviceAccountName: coredns
nodeSelector:
kubernetes.io/os: linux
images:
- registry.bstein.dev/infra/coredns:1.12.1
- kind: DaemonSet
namespace: logging
name: node-image-gc-rpi4
@ -457,22 +603,41 @@ workloads:
hardware: rpi5
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: Deployment
namespace: logging
name: logging-vault-sync
labels:
app: logging-vault-sync
serviceAccountName: logging-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: logging
name: oauth2-proxy-logs
labels:
app: oauth2-proxy-logs
serviceAccountName: null
serviceAccountName: logging-vault-sync
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- quay.io/oauth2-proxy/oauth2-proxy:v7.6.0
- registry.bstein.dev/tools/oauth2-proxy-vault:v7.6.0
- kind: Deployment
namespace: longhorn-system
name: longhorn-vault-sync
labels:
app: longhorn-vault-sync
serviceAccountName: longhorn-vault-sync
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- alpine:3.20
- kind: Deployment
namespace: longhorn-system
name: oauth2-proxy-longhorn
labels:
app: oauth2-proxy-longhorn
serviceAccountName: null
serviceAccountName: longhorn-vault
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
@ -489,13 +654,34 @@ workloads:
- registry.bstein.dev/bstein/kubectl:1.35.0
- kind: Deployment
namespace: mailu-mailserver
name: mailu-sync-listener
name: mailu-vault-sync
labels:
app: mailu-sync-listener
serviceAccountName: null
app: mailu-vault-sync
serviceAccountName: mailu-vault-sync
nodeSelector: {}
images:
- python:3.11-alpine
- alpine:3.20
- kind: DaemonSet
namespace: maintenance
name: disable-k3s-traefik
labels:
app: disable-k3s-traefik
serviceAccountName: disable-k3s-traefik
nodeSelector:
node-role.kubernetes.io/control-plane: 'true'
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: DaemonSet
namespace: maintenance
name: k3s-agent-restart
labels:
app: k3s-agent-restart
serviceAccountName: node-nofile
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: DaemonSet
namespace: maintenance
name: node-image-sweeper
@ -515,6 +701,26 @@ workloads:
nodeSelector: {}
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: Deployment
namespace: maintenance
name: ariadne
labels:
app: ariadne
serviceAccountName: ariadne
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/bstein/ariadne:0.1.0-48
- kind: Deployment
namespace: maintenance
name: maintenance-vault-sync
labels:
app: maintenance-vault-sync
serviceAccountName: maintenance-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: DaemonSet
namespace: monitoring
name: dcgm-exporter
@ -534,12 +740,21 @@ workloads:
jetson: 'true'
images:
- python:3.10-slim
- kind: Deployment
namespace: monitoring
name: monitoring-vault-sync
labels:
app: monitoring-vault-sync
serviceAccountName: monitoring-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: Deployment
namespace: monitoring
name: postmark-exporter
labels:
app: postmark-exporter
serviceAccountName: null
serviceAccountName: monitoring-vault-sync
nodeSelector: {}
images:
- python:3.12-alpine
@ -558,7 +773,7 @@ workloads:
name: nextcloud
labels:
app: nextcloud
serviceAccountName: null
serviceAccountName: nextcloud-vault
nodeSelector:
hardware: rpi5
images:
@ -568,7 +783,7 @@ workloads:
name: outline
labels:
app: outline
serviceAccountName: null
serviceAccountName: outline-vault
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
@ -588,7 +803,7 @@ workloads:
name: planka
labels:
app: planka
serviceAccountName: null
serviceAccountName: planka-vault
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
@ -603,13 +818,16 @@ workloads:
node-role.kubernetes.io/worker: 'true'
images:
- postgres:15
- quay.io/prometheuscommunity/postgres-exporter:v0.15.0
- kind: Deployment
namespace: sso
name: keycloak
labels:
app: keycloak
serviceAccountName: null
nodeSelector: {}
serviceAccountName: sso-vault
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- quay.io/keycloak/keycloak:26.0.7
- kind: Deployment
@ -617,17 +835,26 @@ workloads:
name: oauth2-proxy
labels:
app: oauth2-proxy
serviceAccountName: null
serviceAccountName: sso-vault
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- quay.io/oauth2-proxy/oauth2-proxy:v7.6.0
- registry.bstein.dev/tools/oauth2-proxy-vault:v7.6.0
- kind: Deployment
namespace: sso
name: sso-vault-sync
labels:
app: sso-vault-sync
serviceAccountName: sso-vault-sync
nodeSelector: {}
images:
- alpine:3.20
- kind: StatefulSet
namespace: sso
name: openldap
labels:
app: openldap
serviceAccountName: null
serviceAccountName: sso-vault
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
@ -640,7 +867,7 @@ workloads:
app: sui-metrics
serviceAccountName: sui-metrics
nodeSelector:
kubernetes.io/hostname: titan-24
hardware: rpi5
images:
- victoriametrics/vmagent:v1.103.0
- kind: Deployment
@ -648,6 +875,8 @@ workloads:
name: traefik
labels:
app: traefik
app.kubernetes.io/instance: traefik-kube-system
app.kubernetes.io/name: traefik
serviceAccountName: traefik-ingress-controller
nodeSelector:
node-role.kubernetes.io/worker: 'true'
@ -669,8 +898,10 @@ workloads:
name: vaultwarden
labels:
app: vaultwarden
serviceAccountName: null
nodeSelector: {}
serviceAccountName: vaultwarden-vault
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- vaultwarden/server:1.35.2
services:
@ -1040,6 +1271,36 @@ services:
port: 3333
targetPort: 3333
protocol: TCP
- namespace: crypto
name: wallet-monero-temp
type: ClusterIP
selector:
app: wallet-monero-temp
ports:
- name: rpc
port: 18083
targetPort: 18083
protocol: TCP
- namespace: finance
name: actual-budget
type: ClusterIP
selector:
app: actual-budget
ports:
- name: http
port: 80
targetPort: 5006
protocol: TCP
- namespace: finance
name: firefly
type: ClusterIP
selector:
app: firefly
ports:
- name: http
port: 80
targetPort: 8080
protocol: TCP
- namespace: flux-system
name: notification-controller
type: ClusterIP
@ -1082,7 +1343,7 @@ services:
protocol: TCP
- namespace: gitea
name: gitea-ssh
type: NodePort
type: LoadBalancer
selector:
app: gitea
ports:
@ -1090,6 +1351,16 @@ services:
port: 2242
targetPort: 2242
protocol: TCP
- namespace: health
name: wger
type: ClusterIP
selector:
app: wger
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
- namespace: jellyfin
name: jellyfin
type: ClusterIP
@ -1124,21 +1395,6 @@ services:
port: 50000
targetPort: 50000
protocol: TCP
- namespace: kube-system
name: traefik
type: LoadBalancer
selector:
app.kubernetes.io/instance: traefik-kube-system
app.kubernetes.io/name: traefik
ports:
- name: web
port: 80
targetPort: web
protocol: TCP
- name: websecure
port: 443
targetPort: websecure
protocol: TCP
- namespace: logging
name: oauth2-proxy-logs
type: ClusterIP
@ -1191,15 +1447,15 @@ services:
port: 4190
targetPort: 4190
protocol: TCP
- namespace: mailu-mailserver
name: mailu-sync-listener
- namespace: maintenance
name: ariadne
type: ClusterIP
selector:
app: mailu-sync-listener
app: ariadne
ports:
- name: http
port: 8080
targetPort: 8080
port: 80
targetPort: http
protocol: TCP
- namespace: monitoring
name: dcgm-exporter
@ -1291,6 +1547,10 @@ services:
port: 5432
targetPort: 5432
protocol: TCP
- name: metrics
port: 9187
targetPort: 9187
protocol: TCP
- namespace: sso
name: keycloak
type: ClusterIP
@ -1335,6 +1595,20 @@ services:
port: 8429
targetPort: 8429
protocol: TCP
- namespace: traefik
name: traefik
type: LoadBalancer
selector:
app: traefik
ports:
- name: web
port: 80
targetPort: web
protocol: TCP
- name: websecure
port: 443
targetPort: websecure
protocol: TCP
- namespace: traefik
name: traefik-metrics
type: ClusterIP
@ -1447,6 +1721,19 @@ http_endpoints:
kind: Ingress
name: bstein-dev-home
source: bstein-dev-home
- host: budget.bstein.dev
path: /
backend:
namespace: finance
service: actual-budget
port: 80
workloads:
- kind: Deployment
name: actual-budget
via:
kind: Ingress
name: actual-budget
source: finance
- host: call.live.bstein.dev
path: /
backend:
@ -1499,6 +1786,19 @@ http_endpoints:
kind: Ingress
name: nextcloud
source: nextcloud
- host: health.bstein.dev
path: /
backend:
namespace: health
service: wger
port: 80
workloads:
- kind: Deployment
name: wger
via:
kind: Ingress
name: wger
source: health
- host: kit.live.bstein.dev
path: /livekit/jwt
backend:
@ -1558,6 +1858,65 @@ http_endpoints:
kind: Ingress
name: matrix-routing
source: comms
- host: live.bstein.dev
path: /_matrix/client/r0/register
backend:
namespace: comms
service: matrix-guest-register
port: 8080
workloads: &id003
- kind: Deployment
name: matrix-guest-register
via:
kind: Ingress
name: matrix-routing
source: comms
- host: live.bstein.dev
path: /_matrix/client/v3/login
backend:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: &id002
- kind: Deployment
name: matrix-authentication-service
via:
kind: Ingress
name: matrix-routing
source: comms
- host: live.bstein.dev
path: /_matrix/client/v3/logout
backend:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: *id002
via:
kind: Ingress
name: matrix-routing
source: comms
- host: live.bstein.dev
path: /_matrix/client/v3/refresh
backend:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: *id002
via:
kind: Ingress
name: matrix-routing
source: comms
- host: live.bstein.dev
path: /_matrix/client/v3/register
backend:
namespace: comms
service: matrix-guest-register
port: 8080
workloads: *id003
via:
kind: Ingress
name: matrix-routing
source: comms
- host: logs.bstein.dev
path: /
backend:
@ -1601,9 +1960,7 @@ http_endpoints:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: &id002
- kind: Deployment
name: matrix-authentication-service
workloads: *id002
via:
kind: Ingress
name: matrix-routing
@ -1647,9 +2004,7 @@ http_endpoints:
namespace: comms
service: matrix-guest-register
port: 8080
workloads: &id003
- kind: Deployment
name: matrix-guest-register
workloads: *id003
via:
kind: Ingress
name: matrix-routing
@ -1722,6 +2077,19 @@ http_endpoints:
kind: Ingress
name: monerod
source: monerod
- host: money.bstein.dev
path: /
backend:
namespace: finance
service: firefly
port: 80
workloads:
- kind: Deployment
name: firefly
via:
kind: Ingress
name: firefly
source: finance
- host: notes.bstein.dev
path: /
backend:
@ -1845,7 +2213,6 @@ helmrelease_host_hints:
- live.bstein.dev
- matrix.live.bstein.dev
comms:comms/othrys-synapse:
- bstein.dev
- kit.live.bstein.dev
- live.bstein.dev
- matrix.live.bstein.dev
@ -1856,6 +2223,8 @@ helmrelease_host_hints:
- registry.bstein.dev
logging:logging/data-prepper:
- registry.bstein.dev
longhorn:longhorn-system/longhorn:
- registry.bstein.dev
mailu:mailu-mailserver/mailu:
- bstein.dev
- mail.bstein.dev
@ -1863,5 +2232,8 @@ helmrelease_host_hints:
- alerts.bstein.dev
monitoring:monitoring/grafana:
- bstein.dev
- mail.bstein.dev
- metrics.bstein.dev
- sso.bstein.dev
monitoring:monitoring/kube-state-metrics:
- atlas.bstein.dev

View File

@ -20,6 +20,22 @@
],
"body": "# CI: Gitea \u2192 Jenkins pipeline\n\n## What this is\nAtlas uses Gitea for source control and Jenkins for CI. Authentication is via Keycloak (SSO).\n\n## Where it is configured\n- Gitea manifests: `services/gitea/`\n- Jenkins manifests: `services/jenkins/`\n- Credential sync helpers: `scripts/gitea_cred_sync.sh`, `scripts/jenkins_cred_sync.sh`\n\n## What users do (typical flow)\n- Create a repo in Gitea.\n- Create/update a Jenkins job/pipeline that can fetch the repo.\n- Configure a webhook (or SCM polling) so pushes trigger builds.\n\n## Troubleshooting (common)\n- \u201cWebhook not firing\u201d: confirm ingress host, webhook URL, and Jenkins job is reachable.\n- \u201cAuth denied cloning\u201d: confirm Keycloak group membership and that Jenkins has a valid token/credential configured."
},
{
"path": "runbooks/comms-verify.md",
"title": "Othrys verification checklist",
"tags": [
"comms",
"matrix",
"element",
"livekit"
],
"entrypoints": [
"https://live.bstein.dev",
"https://matrix.live.bstein.dev"
],
"source_paths": [],
"body": "1) Guest join:\n- Open a private window and visit:\n `https://live.bstein.dev/#/room/#othrys:live.bstein.dev?action=join`\n- Confirm the guest join flow works and the displayname becomes `<word>-<word>`.\n\n2) Keycloak login:\n- Log in from `https://live.bstein.dev` and confirm MAS -> Keycloak -> Element redirect.\n\n3) Video rooms:\n- Start an Element Call room and confirm audio/video with a second account.\n- Check that guests can read public rooms but cannot start calls.\n\n4) Well-known:\n- `https://live.bstein.dev/.well-known/matrix/client` returns JSON.\n- `https://matrix.live.bstein.dev/.well-known/matrix/client` returns JSON.\n\n5) TURN reachability:\n- Confirm `turn.live.bstein.dev:3478` and `turns:5349` are reachable from WAN."
},
{
"path": "runbooks/kb-authoring.md",
"title": "KB authoring: what to write (and what not to)",

View File

@ -17,6 +17,11 @@ flowchart LR
host_bstein_dev --> svc_bstein_dev_home_bstein_dev_home_backend
wl_bstein_dev_home_bstein_dev_home_backend["bstein-dev-home/bstein-dev-home-backend (Deployment)"]
svc_bstein_dev_home_bstein_dev_home_backend --> wl_bstein_dev_home_bstein_dev_home_backend
host_budget_bstein_dev["budget.bstein.dev"]
svc_finance_actual_budget["finance/actual-budget (Service)"]
host_budget_bstein_dev --> svc_finance_actual_budget
wl_finance_actual_budget["finance/actual-budget (Deployment)"]
svc_finance_actual_budget --> wl_finance_actual_budget
host_call_live_bstein_dev["call.live.bstein.dev"]
svc_comms_element_call["comms/element-call (Service)"]
host_call_live_bstein_dev --> svc_comms_element_call
@ -37,6 +42,11 @@ flowchart LR
host_cloud_bstein_dev --> svc_nextcloud_nextcloud
wl_nextcloud_nextcloud["nextcloud/nextcloud (Deployment)"]
svc_nextcloud_nextcloud --> wl_nextcloud_nextcloud
host_health_bstein_dev["health.bstein.dev"]
svc_health_wger["health/wger (Service)"]
host_health_bstein_dev --> svc_health_wger
wl_health_wger["health/wger (Deployment)"]
svc_health_wger --> wl_health_wger
host_kit_live_bstein_dev["kit.live.bstein.dev"]
svc_comms_livekit_token_service["comms/livekit-token-service (Service)"]
host_kit_live_bstein_dev --> svc_comms_livekit_token_service
@ -50,6 +60,14 @@ flowchart LR
host_live_bstein_dev --> svc_comms_matrix_wellknown
svc_comms_othrys_synapse_matrix_synapse["comms/othrys-synapse-matrix-synapse (Service)"]
host_live_bstein_dev --> svc_comms_othrys_synapse_matrix_synapse
svc_comms_matrix_guest_register["comms/matrix-guest-register (Service)"]
host_live_bstein_dev --> svc_comms_matrix_guest_register
wl_comms_matrix_guest_register["comms/matrix-guest-register (Deployment)"]
svc_comms_matrix_guest_register --> wl_comms_matrix_guest_register
svc_comms_matrix_authentication_service["comms/matrix-authentication-service (Service)"]
host_live_bstein_dev --> svc_comms_matrix_authentication_service
wl_comms_matrix_authentication_service["comms/matrix-authentication-service (Deployment)"]
svc_comms_matrix_authentication_service --> wl_comms_matrix_authentication_service
host_logs_bstein_dev["logs.bstein.dev"]
svc_logging_oauth2_proxy_logs["logging/oauth2-proxy-logs (Service)"]
host_logs_bstein_dev --> svc_logging_oauth2_proxy_logs
@ -64,21 +82,20 @@ flowchart LR
svc_mailu_mailserver_mailu_front["mailu-mailserver/mailu-front (Service)"]
host_mail_bstein_dev --> svc_mailu_mailserver_mailu_front
host_matrix_live_bstein_dev["matrix.live.bstein.dev"]
svc_comms_matrix_authentication_service["comms/matrix-authentication-service (Service)"]
host_matrix_live_bstein_dev --> svc_comms_matrix_authentication_service
wl_comms_matrix_authentication_service["comms/matrix-authentication-service (Deployment)"]
svc_comms_matrix_authentication_service --> wl_comms_matrix_authentication_service
host_matrix_live_bstein_dev --> svc_comms_matrix_wellknown
host_matrix_live_bstein_dev --> svc_comms_othrys_synapse_matrix_synapse
svc_comms_matrix_guest_register["comms/matrix-guest-register (Service)"]
host_matrix_live_bstein_dev --> svc_comms_matrix_guest_register
wl_comms_matrix_guest_register["comms/matrix-guest-register (Deployment)"]
svc_comms_matrix_guest_register --> wl_comms_matrix_guest_register
host_monero_bstein_dev["monero.bstein.dev"]
svc_crypto_monerod["crypto/monerod (Service)"]
host_monero_bstein_dev --> svc_crypto_monerod
wl_crypto_monerod["crypto/monerod (Deployment)"]
svc_crypto_monerod --> wl_crypto_monerod
host_money_bstein_dev["money.bstein.dev"]
svc_finance_firefly["finance/firefly (Service)"]
host_money_bstein_dev --> svc_finance_firefly
wl_finance_firefly["finance/firefly (Deployment)"]
svc_finance_firefly --> wl_finance_firefly
host_notes_bstein_dev["notes.bstein.dev"]
svc_outline_outline["outline/outline (Service)"]
host_notes_bstein_dev --> svc_outline_outline
@ -143,19 +160,29 @@ flowchart LR
svc_comms_livekit
wl_comms_livekit
svc_comms_othrys_synapse_matrix_synapse
svc_comms_matrix_authentication_service
wl_comms_matrix_authentication_service
svc_comms_matrix_guest_register
wl_comms_matrix_guest_register
svc_comms_matrix_authentication_service
wl_comms_matrix_authentication_service
end
subgraph crypto[crypto]
svc_crypto_monerod
wl_crypto_monerod
end
subgraph finance[finance]
svc_finance_actual_budget
wl_finance_actual_budget
svc_finance_firefly
wl_finance_firefly
end
subgraph gitea[gitea]
svc_gitea_gitea
wl_gitea_gitea
end
subgraph health[health]
svc_health_wger
wl_health_wger
end
subgraph jellyfin[jellyfin]
svc_jellyfin_pegasus
wl_jellyfin_pegasus

View File

@ -0,0 +1,26 @@
# Metis (node recovery)
## Node classes (current map)
- rpi5 Ubuntu workers: titan-04,05,06,07,08,09,10,11,20,21 (Ubuntu 24.04.3, k3s agent)
- rpi5 control-plane: titan-0a/0b/0c (Ubuntu 24.04.1, k3s server, control-plane taint)
- rpi4 Armbian longhorn: titan-13/15/17/19 (Armbian 6.6.x, k3s agent, longhorn disks)
- rpi4 Armbian standard: titan-12/14/18 (Armbian 6.6.x, k3s agent)
- amd64 agents: titan-22/24 (Debian 13, k3s agent)
- External/non-cluster: tethys, titan-db, titan-jh, oceanus/titan-23, future titan-20/21 (when added), plus any newcomers.
## Longhorn disk UUIDs (critical nodes)
- titan-13: /mnt/astreae UUID=6031fa8b-f28c-45c3-b7bc-6133300e07c6 (ext4); /mnt/asteria UUID=cbd4989d-62b5-4741-8b2a-28fdae259cae (ext4)
- titan-15: /mnt/astreae UUID=f3362f14-5822-449f-944b-ac570b5cd615 (ext4); /mnt/asteria UUID=9c5316e6-f847-4884-b502-11f2d0d15d6f (ext4)
- titan-17: /mnt/astreae UUID=1fecdade-08b0-49cb-9ae3-be6c188b0a96 (ext4); /mnt/asteria UUID=2fe9f613-d372-47ca-b84f-82084e4edda0 (ext4)
- titan-19: /mnt/astreae UUID=4890abb9-dda2-4f4f-9c0f-081ee82849cf (ext4); /mnt/asteria UUID=2b4ea28d-b0e6-4fa3-841b-cd7067ae9153 (ext4)
## Metis repo (~/Development/metis)
- CLI skeleton in Go (`cmd/metis`), inventory loader (`pkg/inventory`), plan builder (`pkg/plan`).
- `inventory.example.yaml` shows expected schema (classes + per-node overlay, Longhorn disks, labels, taints).
- `AGENTS.md` in repo is untracked and holds raw notes.
## Next implementation steps
- Add per-class golden image refs and checksums (Harbor or file://) when ready.
- Implement burn execution: download with checksum, write via dd/etcher-equivalent, mount boot/root to inject hostname/IP/k3s tokens/labels/taints, journald/GC drop-ins, and Longhorn fstab entries. Add Windows writer (diskpart + wmic) and Linux writer (dd + sgdisk) paths.
- Add Keycloak/SSH bootstrap: ensure ssh user, authorized keys, and k3s token/URL injection for agents; control-plane restore path with etcd snapshot selection.
- Add per-host inventory entries for tethys, titan-db, titan-jh, oceanus/titan-23, future 20/21 once audited.

View File

@ -0,0 +1,30 @@
---
title: Othrys verification checklist
tags:
- comms
- matrix
- element
- livekit
entrypoints:
- https://live.bstein.dev
- https://matrix.live.bstein.dev
---
1) Guest join:
- Open a private window and visit:
`https://live.bstein.dev/#/room/#othrys:live.bstein.dev?action=join`
- Confirm the guest join flow works and the displayname becomes `<word>-<word>`.
2) Keycloak login:
- Log in from `https://live.bstein.dev` and confirm MAS -> Keycloak -> Element redirect.
3) Video rooms:
- Start an Element Call room and confirm audio/video with a second account.
- Check that guests can read public rooms but cannot start calls.
4) Well-known:
- `https://live.bstein.dev/.well-known/matrix/client` returns JSON.
- `https://matrix.live.bstein.dev/.well-known/matrix/client` returns JSON.
5) TURN reachability:
- Confirm `turn.live.bstein.dev:3478` and `turns:5349` are reachable from WAN.

View File

@ -0,0 +1,73 @@
# Metis (node recovery)
## Node classes (current map)
- rpi5 Ubuntu workers: titan-04,05,06,07,08,09,10,11,20,21 (Ubuntu 24.04.3, k3s agent)
- rpi5 control-plane: titan-0a/0b/0c (Ubuntu 24.04.1, k3s server, control-plane taint)
- rpi4 Armbian longhorn: titan-13/15/17/19 (Armbian 6.6.x, k3s agent, longhorn disks)
- rpi4 Armbian standard: titan-12/14/18 (Armbian 6.6.x, k3s agent)
- amd64 agents: titan-22/24 (Debian 13, k3s agent)
- External/non-cluster: tethys, titan-db, titan-jh, oceanus/titan-23, plus any newcomers.
### Jetson nodes (titan-20/21)
- Ubuntu 20.04.6 (Focal), kernel 5.10.104-tegra, CRI containerd 2.0.5-k3s2, arch arm64.
- Storage: NVMe 232G at / (ext4); onboard mmc partitions present but root on NVMe; 1.9T sda present (unused).
- k3s agent with drop-in 99-nofile.conf.
## Longhorn disk UUIDs (critical nodes)
- titan-13: /mnt/astreae UUID=6031fa8b-f28c-45c3-b7bc-6133300e07c6 (ext4); /mnt/asteria UUID=cbd4989d-62b5-4741-8b2a-28fdae259cae (ext4)
- titan-15: /mnt/astreae UUID=f3362f14-5822-449f-944b-ac570b5cd615 (ext4); /mnt/asteria UUID=9c5316e6-f847-4884-b502-11f2d0d15d6f (ext4)
- titan-17: /mnt/astreae UUID=1fecdade-08b0-49cb-9ae3-be6c188b0a96 (ext4); /mnt/asteria UUID=2fe9f613-d372-47ca-b84f-82084e4edda0 (ext4)
- titan-19: /mnt/astreae UUID=4890abb9-dda2-4f4f-9c0f-081ee82849cf (ext4); /mnt/asteria UUID=2b4ea28d-b0e6-4fa3-841b-cd7067ae9153 (ext4)
## Metis repo (~/Development/metis)
- CLI skeleton in Go (`cmd/metis`), inventory loader (`pkg/inventory`), plan builder (`pkg/plan`).
- `inventory.example.yaml` shows expected schema (classes + per-node overlay, Longhorn disks, labels, taints).
- `AGENTS.md` in repo is untracked and holds raw notes.
## Next implementation steps
- Add per-class golden image refs and checksums (Harbor or file://) when ready.
- Implement burn execution: download with checksum, write via dd/etcher-equivalent, mount boot/root to inject hostname/IP/k3s tokens/labels/taints, journald/GC drop-ins, and Longhorn fstab entries. Add Windows writer (diskpart + wmic) and Linux writer (dd + sgdisk) paths.
- Add Keycloak/SSH bootstrap: ensure ssh user, authorized keys, and k3s token/URL injection for agents; control-plane restore path with etcd snapshot selection.
- Add per-host inventory entries for tethys, titan-db, titan-jh, oceanus/titan-23, future 20/21 once audited.
## Node OS/Kernel/CRI snapshot (Jan 2026)
- titan-04: Ubuntu 24.04.3 LTS, kernel 6.8.0-1031-raspi, CRI containerd://2.0.5-k3s2, arch arm64
- titan-05: Ubuntu 24.04.3 LTS, kernel 6.8.0-1039-raspi, CRI containerd://2.0.5-k3s2, arch arm64
- titan-06: Ubuntu 24.04.3 LTS, kernel 6.8.0-1039-raspi, CRI containerd://2.0.5-k3s2, arch arm64
- titan-07: Ubuntu 24.04.3 LTS, kernel 6.8.0-1039-raspi, CRI containerd://2.0.5-k3s2, arch arm64
- titan-08: Ubuntu 24.04.3 LTS, kernel 6.8.0-1039-raspi, CRI containerd://2.0.5-k3s2, arch arm64
- titan-09: Ubuntu 24.04.3 LTS, kernel 6.8.0-1031-raspi, CRI containerd://2.0.5-k3s2, arch arm64
- titan-0a: Ubuntu 24.04.1 LTS, kernel 6.8.0-1038-raspi, CRI containerd://2.0.5-k3s2, arch arm64
- titan-0b: Ubuntu 24.04.1 LTS, kernel 6.8.0-1038-raspi, CRI containerd://2.0.5-k3s2, arch arm64
- titan-0c: Ubuntu 24.04.1 LTS, kernel 6.8.0-1038-raspi, CRI containerd://2.0.5-k3s2, arch arm64
- titan-10: Ubuntu 24.04.3 LTS, kernel 6.8.0-1039-raspi, CRI containerd://2.0.5-k3s2, arch arm64
- titan-11: Ubuntu 24.04.3 LTS, kernel 6.8.0-1039-raspi, CRI containerd://2.0.5-k3s2, arch arm64
- titan-12: Armbian 24.11.1 noble, kernel 6.6.60-current-bcm2711, CRI containerd://1.7.23-k3s2, arch arm64
- titan-13: Armbian 25.2.1 noble, kernel 6.6.63-current-bcm2711, CRI containerd://1.7.23-k3s2, arch arm64
- titan-14: Armbian 24.11.1 noble, kernel 6.6.60-current-bcm2711, CRI containerd://1.7.23-k3s2, arch arm64
- titan-15: Armbian 25.2.1 noble, kernel 6.6.63-current-bcm2711, CRI containerd://1.7.23-k3s2, arch arm64
- titan-17: Armbian 25.2.1 noble, kernel 6.6.63-current-bcm2711, CRI containerd://1.7.23-k3s2, arch arm64
- titan-18: Armbian 24.11.1 noble, kernel 6.6.60-current-bcm2711, CRI containerd://1.7.23-k3s2, arch arm64
- titan-19: Armbian 25.2.1 noble, kernel 6.6.63-current-bcm2711, CRI containerd://1.7.23-k3s2, arch arm64
- titan-20: Ubuntu 20.04.6 LTS, kernel 5.10.104-tegra, CRI containerd://2.0.5-k3s2, arch arm64
- titan-21: Ubuntu 20.04.6 LTS, kernel 5.10.104-tegra, CRI containerd://2.0.5-k3s2, arch arm64
- titan-22: Debian 13 (trixie), kernel 6.12.41+deb13-amd64, CRI containerd://2.0.5-k3s2, arch amd64
- titan-24: Debian 13 (trixie), kernel 6.12.57+deb13-amd64, CRI containerd://2.0.5-k3s2, arch amd64
### External hosts
- titan-db: Ubuntu 24.10, kernel 6.11.0-1015-raspi, root on /dev/sda2 ext4 (465G), boot vfat /dev/sda1; PostgreSQL service enabled.
- titan-jh: Arch Linux ARM (rolling), kernel 6.18.4-2-rpi, NVMe root ext4 238G (/), boot vfat 512M; ~495 packages installed (pacman -Q).
- titan-23/oceanus: TODO audit (future).
### Control plane Pis (titan-0a/0b/0c)
- Ubuntu 24.04.1 LTS, kernel 6.8.0-1038-raspi, containerd 2.0.5-k3s2.
- Storage: 477G SSD root (/dev/sda2 ext4), /boot/firmware vfat (/dev/sda1). fstab uses LABEL=writable and LABEL=system-boot.
- k3s server (control-plane taint expected); etcd snapshots not yet cataloged (TODO).
## k3s versions
- rpi5 workers/control-plane: k3s v1.33.3+k3s1 (crictl v1.31.0-k3s2)
- rpi4 nodes: k3s v1.31.5+k3s1 (crictl v1.31.0-k3s2)
- Jetson titan-20/21: k3s v1.33.3+k3s1 (per node info), crictl v1.31.0-k3s2

View File

@ -19,6 +19,8 @@ API_KEY = os.environ.get("CHAT_API_KEY", "")
KB_DIR = os.environ.get("KB_DIR", "")
VM_URL = os.environ.get("VM_URL", "http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428")
ARIADNE_STATE_URL = os.environ.get("ARIADNE_STATE_URL", "")
ARIADNE_STATE_TOKEN = os.environ.get("ARIADNE_STATE_TOKEN", "")
BOT_MENTIONS = os.environ.get("BOT_MENTIONS", f"{USER},atlas")
SERVER_NAME = os.environ.get("MATRIX_SERVER_NAME", "live.bstein.dev")
@ -297,6 +299,21 @@ def k8s_get(path: str, timeout: int = 8) -> dict:
raw = resp.read()
return json.loads(raw.decode()) if raw else {}
def _ariadne_state(timeout: int = 5) -> dict | None:
if not ARIADNE_STATE_URL:
return None
headers = {}
if ARIADNE_STATE_TOKEN:
headers["X-Internal-Token"] = ARIADNE_STATE_TOKEN
r = request.Request(ARIADNE_STATE_URL, headers=headers, method="GET")
try:
with request.urlopen(r, timeout=timeout) as resp:
raw = resp.read()
payload = json.loads(raw.decode()) if raw else {}
return payload if isinstance(payload, dict) else None
except Exception:
return None
def k8s_pods(namespace: str) -> list[dict]:
data = k8s_get(f"/api/v1/namespaces/{parse.quote(namespace)}/pods?limit=500")
items = data.get("items") or []
@ -445,6 +462,17 @@ def vm_cluster_snapshot() -> str:
return "\n".join(parts).strip()
def nodes_summary(cluster_name: str) -> str:
state = _ariadne_state()
if state:
nodes = state.get("nodes") if isinstance(state.get("nodes"), dict) else {}
total = nodes.get("total")
ready = nodes.get("ready")
not_ready = nodes.get("not_ready")
if isinstance(total, int) and isinstance(ready, int):
not_ready = not_ready if isinstance(not_ready, int) else max(total - ready, 0)
if not_ready:
return f"{cluster_name} cluster has {total} nodes: {ready} Ready, {not_ready} NotReady."
return f"{cluster_name} cluster has {total} nodes, all Ready."
try:
data = k8s_get("/api/v1/nodes?limit=500")
except Exception:
@ -467,6 +495,16 @@ def nodes_summary(cluster_name: str) -> str:
return f"{cluster_name} cluster has {total} nodes, all Ready."
def nodes_names_summary(cluster_name: str) -> str:
state = _ariadne_state()
if state:
nodes = state.get("nodes") if isinstance(state.get("nodes"), dict) else {}
names = nodes.get("names")
if isinstance(names, list) and names:
cleaned = sorted({str(n) for n in names if n})
if len(cleaned) <= 30:
return f"{cluster_name} node names: {', '.join(cleaned)}."
shown = ", ".join(cleaned[:30])
return f"{cluster_name} node names: {shown}, … (+{len(cleaned) - 30} more)."
try:
data = k8s_get("/api/v1/nodes?limit=500")
except Exception:

View File

@ -311,10 +311,18 @@ spec:
value: "0 0 1 1 *"
- name: ARIADNE_SCHEDULE_COMMS_SEED_ROOM
value: "*/10 * * * *"
- name: ARIADNE_SCHEDULE_CLUSTER_STATE
value: "*/15 * * * *"
- name: ARIADNE_CLUSTER_STATE_KEEP
value: "168"
- name: WELCOME_EMAIL_ENABLED
value: "true"
- name: K8S_API_TIMEOUT_SEC
value: "5"
- name: ARIADNE_VM_URL
value: http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428
- name: ARIADNE_CLUSTER_STATE_VM_TIMEOUT_SEC
value: "5"
- name: OPENSEARCH_URL
value: http://opensearch-master.logging.svc.cluster.local:9200
- name: OPENSEARCH_LIMIT_BYTES

View File

@ -21,12 +21,27 @@ rules:
- list
- watch
- delete
- apiGroups: [""]
resources:
- nodes
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/exec
verbs:
- get
- create
- apiGroups: ["kustomize.toolkit.fluxcd.io"]
resources:
- kustomizations
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1