feat(ariadne): internalize glue tasks and mailu defaults

This commit is contained in:
Brad Stein 2026-01-20 23:03:04 -03:00
parent 1c6e16e8c9
commit 871ab9dae8
46 changed files with 4326 additions and 3625 deletions

12
Jenkinsfile vendored
View File

@ -104,7 +104,15 @@ spec:
--out "${COVERAGE_JSON}" \ --out "${COVERAGE_JSON}" \
--source ariadne \ --source ariadne \
--fail-under "${COVERAGE_MIN}" \ --fail-under "${COVERAGE_MIN}" \
-m pytest -q --junitxml "${JUNIT_XML}" -m pytest -ra -vv --durations=20 --junitxml "${JUNIT_XML}"
python - <<'PY'
import json
with open("build/coverage.json", "r", encoding="utf-8") as handle:
payload = json.load(handle)
summary = payload.get("summary") or {}
percent = summary.get("percent_covered")
print(f"Coverage summary: {percent:.2f}%" if percent is not None else "Coverage summary unavailable")
PY
''' '''
} }
} }
@ -195,6 +203,8 @@ spec:
post { post {
always { always {
junit allowEmptyResults: true, testResults: 'build/junit.xml'
archiveArtifacts artifacts: 'build/junit.xml,build/coverage.json', allowEmptyArchive: true, fingerprint: true
script { script {
def props = fileExists('build.env') ? readProperties(file: 'build.env') : [:] def props = fileExists('build.env') ? readProperties(file: 'build.env') : [:]
echo "Build complete for ${props['SEMVER'] ?: env.VERSION_TAG}" echo "Build complete for ${props['SEMVER'] ?: env.VERSION_TAG}"

View File

@ -21,6 +21,9 @@ from .services.keycloak_admin import keycloak_admin
from .services.keycloak_profile import run_profile_sync from .services.keycloak_profile import run_profile_sync
from .services.mailu import mailu from .services.mailu import mailu
from .services.nextcloud import nextcloud from .services.nextcloud import nextcloud
from .services.image_sweeper import image_sweeper
from .services.opensearch_prune import prune_indices
from .services.pod_cleaner import clean_finished_pods
from .services.vaultwarden_sync import run_vaultwarden_sync from .services.vaultwarden_sync import run_vaultwarden_sync
from .services.vault import vault from .services.vault import vault
from .services.wger import wger from .services.wger import wger
@ -96,6 +99,16 @@ def _startup() -> None:
settings.nextcloud_sync_cron, settings.nextcloud_sync_cron,
lambda: nextcloud.sync_mail(wait=False), lambda: nextcloud.sync_mail(wait=False),
) )
scheduler.add_task(
"schedule.nextcloud_cron",
settings.nextcloud_cron,
lambda: nextcloud.run_cron(),
)
scheduler.add_task(
"schedule.nextcloud_maintenance",
settings.nextcloud_maintenance_cron,
lambda: nextcloud.run_maintenance(),
)
scheduler.add_task("schedule.vaultwarden_sync", settings.vaultwarden_sync_cron, run_vaultwarden_sync) scheduler.add_task("schedule.vaultwarden_sync", settings.vaultwarden_sync_cron, run_vaultwarden_sync)
scheduler.add_task( scheduler.add_task(
"schedule.keycloak_profile", "schedule.keycloak_profile",
@ -103,6 +116,26 @@ def _startup() -> None:
run_profile_sync, run_profile_sync,
) )
scheduler.add_task("schedule.wger_admin", settings.wger_admin_cron, lambda: wger.ensure_admin(wait=False)) scheduler.add_task("schedule.wger_admin", settings.wger_admin_cron, lambda: wger.ensure_admin(wait=False))
scheduler.add_task(
"schedule.firefly_cron",
settings.firefly_cron,
lambda: firefly.run_cron(),
)
scheduler.add_task(
"schedule.pod_cleaner",
settings.pod_cleaner_cron,
clean_finished_pods,
)
scheduler.add_task(
"schedule.opensearch_prune",
settings.opensearch_prune_cron,
prune_indices,
)
scheduler.add_task(
"schedule.image_sweeper",
settings.image_sweeper_cron,
lambda: image_sweeper.run(wait=True),
)
scheduler.add_task( scheduler.add_task(
"schedule.vault_k8s_auth", "schedule.vault_k8s_auth",
settings.vault_k8s_auth_cron, settings.vault_k8s_auth_cron,
@ -139,9 +172,15 @@ def _startup() -> None:
extra={ extra={
"event": "startup", "event": "startup",
"mailu_cron": settings.mailu_sync_cron, "mailu_cron": settings.mailu_sync_cron,
"nextcloud_cron": settings.nextcloud_sync_cron, "nextcloud_mail_cron": settings.nextcloud_sync_cron,
"nextcloud_cron": settings.nextcloud_cron,
"nextcloud_maintenance_cron": settings.nextcloud_maintenance_cron,
"vaultwarden_cron": settings.vaultwarden_sync_cron, "vaultwarden_cron": settings.vaultwarden_sync_cron,
"wger_admin_cron": settings.wger_admin_cron, "wger_admin_cron": settings.wger_admin_cron,
"firefly_cron": settings.firefly_cron,
"pod_cleaner_cron": settings.pod_cleaner_cron,
"opensearch_prune_cron": settings.opensearch_prune_cron,
"image_sweeper_cron": settings.image_sweeper_cron,
"vault_k8s_auth_cron": settings.vault_k8s_auth_cron, "vault_k8s_auth_cron": settings.vault_k8s_auth_cron,
"vault_oidc_cron": settings.vault_oidc_cron, "vault_oidc_cron": settings.vault_oidc_cron,
"comms_guest_name_cron": settings.comms_guest_name_cron, "comms_guest_name_cron": settings.comms_guest_name_cron,

View File

@ -1 +0,0 @@
"""Embedded job manifests for Ariadne-managed tasks."""

View File

@ -1,471 +0,0 @@
# services/comms/guest-name-job.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: guest-name-randomizer
namespace: comms
labels:
atlas.bstein.dev/glue: "true"
spec:
schedule: "*/1 * * * *"
suspend: true
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 1
jobTemplate:
spec:
backoffLimit: 0
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "comms"
vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret"
vault.hashicorp.com/agent-inject-template-turn-secret: |
{{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api"
vault.hashicorp.com/agent-inject-template-livekit-primary: |
{{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
vault.hashicorp.com/agent-inject-template-bot-pass: |
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
vault.hashicorp.com/agent-inject-template-seeder-pass: |
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime"
vault.hashicorp.com/agent-inject-template-chat-matrix: |
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime"
vault.hashicorp.com/agent-inject-template-chat-homepage: |
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime"
vault.hashicorp.com/agent-inject-template-mas-admin-secret: |
{{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db"
vault.hashicorp.com/agent-inject-template-synapse-db-pass: |
{{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db"
vault.hashicorp.com/agent-inject-template-mas-db-pass: |
{{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime"
vault.hashicorp.com/agent-inject-template-mas-matrix-shared: |
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime"
vault.hashicorp.com/agent-inject-template-mas-kc-secret: |
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}}
spec:
restartPolicy: Never
serviceAccountName: comms-vault
nodeSelector:
hardware: rpi5
volumes:
- name: vault-scripts
configMap:
name: comms-vault-env
defaultMode: 0555
containers:
- name: rename
image: registry.bstein.dev/bstein/comms-guest-tools:0.1.0
volumeMounts:
- name: vault-scripts
mountPath: /vault/scripts
readOnly: true
env:
- name: SYNAPSE_BASE
value: http://othrys-synapse-matrix-synapse:8008
- name: MAS_ADMIN_CLIENT_ID
value: 01KDXMVQBQ5JNY6SEJPZW6Z8BM
- name: MAS_ADMIN_CLIENT_SECRET_FILE
value: /vault/secrets/mas-admin-secret
- name: MAS_ADMIN_API_BASE
value: http://matrix-authentication-service:8081/api/admin/v1
- name: MAS_TOKEN_URL
value: http://matrix-authentication-service:8080/oauth2/token
- name: SEEDER_USER
value: othrys-seeder
- name: PGHOST
value: postgres-service.postgres.svc.cluster.local
- name: PGPORT
value: "5432"
- name: PGDATABASE
value: synapse
- name: PGUSER
value: synapse
command:
- /bin/sh
- -c
- |
set -euo pipefail
. /vault/scripts/comms_vault_env.sh
python - <<'PY'
import base64
import os
import random
import requests
import time
import urllib.parse
import psycopg2
ADJ = [
"brisk","calm","eager","gentle","merry","nifty","rapid","sunny","witty","zesty",
"amber","bold","bright","crisp","daring","frosty","glad","jolly","lively","mellow",
"quiet","ripe","serene","spry","tidy","vivid","warm","wild","clever","kind",
]
NOUN = [
"otter","falcon","comet","ember","grove","harbor","meadow","raven","river","summit",
"breeze","cedar","cinder","cove","delta","forest","glade","lark","marsh","peak",
"pine","quartz","reef","ridge","sable","sage","shore","thunder","vale","zephyr",
]
BASE = os.environ["SYNAPSE_BASE"]
MAS_ADMIN_CLIENT_ID = os.environ["MAS_ADMIN_CLIENT_ID"]
MAS_ADMIN_CLIENT_SECRET_FILE = os.environ["MAS_ADMIN_CLIENT_SECRET_FILE"]
MAS_ADMIN_API_BASE = os.environ["MAS_ADMIN_API_BASE"].rstrip("/")
MAS_TOKEN_URL = os.environ["MAS_TOKEN_URL"]
SEEDER_USER = os.environ["SEEDER_USER"]
ROOM_ALIAS = "#othrys:live.bstein.dev"
SERVER_NAME = "live.bstein.dev"
STALE_GUEST_MS = 14 * 24 * 60 * 60 * 1000
def mas_admin_token():
with open(MAS_ADMIN_CLIENT_SECRET_FILE, "r", encoding="utf-8") as f:
secret = f.read().strip()
basic = base64.b64encode(f"{MAS_ADMIN_CLIENT_ID}:{secret}".encode()).decode()
last_err = None
for attempt in range(5):
try:
r = requests.post(
MAS_TOKEN_URL,
headers={"Authorization": f"Basic {basic}"},
data={"grant_type": "client_credentials", "scope": "urn:mas:admin"},
timeout=30,
)
r.raise_for_status()
return r.json()["access_token"]
except Exception as exc: # noqa: BLE001
last_err = exc
time.sleep(2 ** attempt)
raise last_err
def mas_user_id(token, username):
r = requests.get(
f"{MAS_ADMIN_API_BASE}/users/by-username/{urllib.parse.quote(username)}",
headers={"Authorization": f"Bearer {token}"},
timeout=30,
)
r.raise_for_status()
return r.json()["data"]["id"]
def mas_personal_session(token, user_id):
r = requests.post(
f"{MAS_ADMIN_API_BASE}/personal-sessions",
headers={"Authorization": f"Bearer {token}"},
json={
"actor_user_id": user_id,
"human_name": "guest-name-randomizer",
"scope": "urn:matrix:client:api:*",
"expires_in": 300,
},
timeout=30,
)
r.raise_for_status()
data = r.json().get("data", {}).get("attributes", {}) or {}
return data["access_token"], r.json()["data"]["id"]
def mas_revoke_session(token, session_id):
requests.post(
f"{MAS_ADMIN_API_BASE}/personal-sessions/{urllib.parse.quote(session_id)}/revoke",
headers={"Authorization": f"Bearer {token}"},
json={},
timeout=30,
)
def resolve_alias(token, alias):
headers = {"Authorization": f"Bearer {token}"}
enc = urllib.parse.quote(alias)
r = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{enc}", headers=headers)
r.raise_for_status()
return r.json()["room_id"]
def room_members(token, room_id):
headers = {"Authorization": f"Bearer {token}"}
r = requests.get(f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/members", headers=headers)
r.raise_for_status()
members = set()
existing_names = set()
for ev in r.json().get("chunk", []):
user_id = ev.get("state_key")
if user_id:
members.add(user_id)
disp = (ev.get("content") or {}).get("displayname")
if disp:
existing_names.add(disp)
return members, existing_names
def mas_list_users(token):
headers = {"Authorization": f"Bearer {token}"}
users = []
cursor = None
while True:
url = f"{MAS_ADMIN_API_BASE}/users?page[size]=100"
if cursor:
url += f"&page[after]={urllib.parse.quote(cursor)}"
r = requests.get(url, headers=headers, timeout=30)
r.raise_for_status()
data = r.json().get("data", [])
if not data:
break
users.extend(data)
cursor = data[-1].get("meta", {}).get("page", {}).get("cursor")
if not cursor:
break
return users
def synapse_list_users(token):
headers = {"Authorization": f"Bearer {token}"}
users = []
from_token = None
while True:
url = f"{BASE}/_synapse/admin/v2/users?local=true&deactivated=false&limit=100"
if from_token:
url += f"&from={urllib.parse.quote(from_token)}"
r = requests.get(url, headers=headers, timeout=30)
r.raise_for_status()
payload = r.json()
users.extend(payload.get("users", []))
from_token = payload.get("next_token")
if not from_token:
break
return users
def should_prune_guest(entry, now_ms):
if not entry.get("is_guest"):
return False
last_seen = entry.get("last_seen_ts")
if last_seen is None:
return False
try:
last_seen = int(last_seen)
except (TypeError, ValueError):
return False
return now_ms - last_seen > STALE_GUEST_MS
def prune_guest(token, user_id):
headers = {"Authorization": f"Bearer {token}"}
try:
r = requests.delete(
f"{BASE}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}",
headers=headers,
params={"erase": "true"},
timeout=30,
)
except Exception as exc: # noqa: BLE001
print(f"guest prune failed for {user_id}: {exc}")
return False
if r.status_code in (200, 202, 204, 404):
return True
print(f"guest prune failed for {user_id}: {r.status_code} {r.text}")
return False
def user_id_for_username(username):
return f"@{username}:live.bstein.dev"
def get_displayname(token, user_id):
headers = {"Authorization": f"Bearer {token}"}
r = requests.get(f"{BASE}/_matrix/client/v3/profile/{urllib.parse.quote(user_id)}", headers=headers)
r.raise_for_status()
return r.json().get("displayname")
def get_displayname_admin(token, user_id):
headers = {"Authorization": f"Bearer {token}"}
r = requests.get(
f"{BASE}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}",
headers=headers,
timeout=30,
)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json().get("displayname")
def set_displayname(token, room_id, user_id, name, in_room):
headers = {"Authorization": f"Bearer {token}"}
payload = {"displayname": name}
r = requests.put(
f"{BASE}/_matrix/client/v3/profile/{urllib.parse.quote(user_id)}/displayname",
headers=headers,
json=payload,
)
r.raise_for_status()
if not in_room:
return
state_url = f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/state/m.room.member/{urllib.parse.quote(user_id)}"
content = {"membership": "join", "displayname": name}
requests.put(state_url, headers=headers, json=content, timeout=30)
def set_displayname_admin(token, user_id, name):
headers = {"Authorization": f"Bearer {token}"}
payload = {"displayname": name}
r = requests.put(
f"{BASE}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}",
headers=headers,
json=payload,
timeout=30,
)
if r.status_code in (200, 201, 204):
return True
return False
def needs_rename_username(username):
return username.isdigit() or username.startswith("guest-")
def needs_rename_display(display):
return not display or display.isdigit() or display.startswith("guest-")
def db_rename_numeric(existing_names):
profile_rows = []
profile_index = {}
users = []
conn = psycopg2.connect(
host=os.environ["PGHOST"],
port=int(os.environ["PGPORT"]),
dbname=os.environ["PGDATABASE"],
user=os.environ["PGUSER"],
password=os.environ["PGPASSWORD"],
)
try:
with conn:
with conn.cursor() as cur:
cur.execute(
"SELECT user_id, full_user_id, displayname FROM profiles WHERE full_user_id ~ %s",
(f"^@\\d+:{SERVER_NAME}$",),
)
profile_rows = cur.fetchall()
profile_index = {row[1]: row for row in profile_rows}
for user_id, full_user_id, display in profile_rows:
if display and not needs_rename_display(display):
continue
new = None
for _ in range(30):
candidate = f"{random.choice(ADJ)}-{random.choice(NOUN)}"
if candidate not in existing_names:
new = candidate
existing_names.add(candidate)
break
if not new:
continue
cur.execute(
"UPDATE profiles SET displayname = %s WHERE full_user_id = %s",
(new, full_user_id),
)
cur.execute(
"SELECT name FROM users WHERE name ~ %s",
(f"^@\\d+:{SERVER_NAME}$",),
)
users = [row[0] for row in cur.fetchall()]
if not users:
return
cur.execute(
"SELECT user_id, full_user_id FROM profiles WHERE full_user_id = ANY(%s)",
(users,),
)
for existing_full in cur.fetchall():
profile_index.setdefault(existing_full[1], existing_full)
for full_user_id in users:
if full_user_id in profile_index:
continue
localpart = full_user_id.split(":", 1)[0].lstrip("@")
new = None
for _ in range(30):
candidate = f"{random.choice(ADJ)}-{random.choice(NOUN)}"
if candidate not in existing_names:
new = candidate
existing_names.add(candidate)
break
if not new:
continue
cur.execute(
"INSERT INTO profiles (user_id, displayname, full_user_id) VALUES (%s, %s, %s) "
"ON CONFLICT (full_user_id) DO UPDATE SET displayname = EXCLUDED.displayname",
(localpart, new, full_user_id),
)
finally:
conn.close()
admin_token = mas_admin_token()
seeder_id = mas_user_id(admin_token, SEEDER_USER)
seeder_token, seeder_session = mas_personal_session(admin_token, seeder_id)
try:
room_id = resolve_alias(seeder_token, ROOM_ALIAS)
members, existing = room_members(seeder_token, room_id)
users = mas_list_users(admin_token)
mas_usernames = set()
for user in users:
attrs = user.get("attributes") or {}
username = attrs.get("username") or ""
if username:
mas_usernames.add(username)
legacy_guest = attrs.get("legacy_guest")
if not username:
continue
if not (legacy_guest or needs_rename_username(username)):
continue
user_id = user_id_for_username(username)
access_token, session_id = mas_personal_session(admin_token, user["id"])
try:
display = get_displayname(access_token, user_id)
if display and not needs_rename_display(display):
continue
new = None
for _ in range(30):
candidate = f"{random.choice(ADJ)}-{random.choice(NOUN)}"
if candidate not in existing:
new = candidate
existing.add(candidate)
break
if not new:
continue
set_displayname(access_token, room_id, user_id, new, user_id in members)
finally:
mas_revoke_session(admin_token, session_id)
try:
entries = synapse_list_users(seeder_token)
except Exception as exc: # noqa: BLE001
print(f"synapse admin list skipped: {exc}")
entries = []
now_ms = int(time.time() * 1000)
for entry in entries:
user_id = entry.get("name") or ""
if not user_id.startswith("@"):
continue
localpart = user_id.split(":", 1)[0].lstrip("@")
if localpart in mas_usernames:
continue
is_guest = entry.get("is_guest")
if is_guest and should_prune_guest(entry, now_ms):
if prune_guest(seeder_token, user_id):
continue
if not (is_guest or needs_rename_username(localpart)):
continue
display = get_displayname_admin(seeder_token, user_id)
if display and not needs_rename_display(display):
continue
new = None
for _ in range(30):
candidate = f"{random.choice(ADJ)}-{random.choice(NOUN)}"
if candidate not in existing:
new = candidate
existing.add(candidate)
break
if not new:
continue
if not set_displayname_admin(seeder_token, user_id, new):
continue
db_rename_numeric(existing)
finally:
mas_revoke_session(admin_token, seeder_session)
PY

View File

@ -1,169 +0,0 @@
# services/comms/pin-othrys-job.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: pin-othrys-invite
namespace: comms
labels:
atlas.bstein.dev/glue: "true"
spec:
schedule: "*/30 * * * *"
suspend: true
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 1
jobTemplate:
spec:
backoffLimit: 0
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "comms"
vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret"
vault.hashicorp.com/agent-inject-template-turn-secret: |
{{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api"
vault.hashicorp.com/agent-inject-template-livekit-primary: |
{{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
vault.hashicorp.com/agent-inject-template-bot-pass: |
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
vault.hashicorp.com/agent-inject-template-seeder-pass: |
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime"
vault.hashicorp.com/agent-inject-template-chat-matrix: |
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime"
vault.hashicorp.com/agent-inject-template-chat-homepage: |
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime"
vault.hashicorp.com/agent-inject-template-mas-admin-secret: |
{{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db"
vault.hashicorp.com/agent-inject-template-synapse-db-pass: |
{{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db"
vault.hashicorp.com/agent-inject-template-mas-db-pass: |
{{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime"
vault.hashicorp.com/agent-inject-template-mas-matrix-shared: |
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime"
vault.hashicorp.com/agent-inject-template-mas-kc-secret: |
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}}
spec:
restartPolicy: Never
serviceAccountName: comms-vault
containers:
- name: pin
image: python:3.11-slim
env:
- name: SYNAPSE_BASE
value: http://othrys-synapse-matrix-synapse:8008
- name: AUTH_BASE
value: http://matrix-authentication-service:8080
- name: SEEDER_USER
value: othrys-seeder
command:
- /bin/sh
- -c
- |
set -euo pipefail
. /vault/scripts/comms_vault_env.sh
pip install --no-cache-dir requests >/dev/null
python - <<'PY'
import os, requests, urllib.parse
BASE = os.environ["SYNAPSE_BASE"]
AUTH_BASE = os.environ.get("AUTH_BASE", BASE)
ROOM_ALIAS = "#othrys:live.bstein.dev"
MESSAGE = (
"Invite guests: share https://live.bstein.dev/#/room/#othrys:live.bstein.dev?action=join "
"and choose 'Continue' -> 'Join as guest'."
)
def auth(token): return {"Authorization": f"Bearer {token}"}
def canon_user(user):
u = (user or "").strip()
if u.startswith("@") and ":" in u:
return u
u = u.lstrip("@")
if ":" in u:
return f"@{u}"
return f"@{u}:live.bstein.dev"
def login(user, password):
r = requests.post(f"{AUTH_BASE}/_matrix/client/v3/login", json={
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": canon_user(user)},
"password": password,
})
r.raise_for_status()
return r.json()["access_token"]
def resolve(alias, token):
enc = urllib.parse.quote(alias)
r = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{enc}", headers=auth(token))
r.raise_for_status()
return r.json()["room_id"]
def get_pinned(room_id, token):
r = requests.get(
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/state/m.room.pinned_events",
headers=auth(token),
)
if r.status_code == 404:
return []
r.raise_for_status()
return r.json().get("pinned", [])
def get_event(room_id, event_id, token):
r = requests.get(
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/event/{urllib.parse.quote(event_id)}",
headers=auth(token),
)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()
def send(room_id, token, body):
r = requests.post(
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/send/m.room.message",
headers=auth(token),
json={"msgtype": "m.text", "body": body},
)
r.raise_for_status()
return r.json()["event_id"]
def pin(room_id, token, event_id):
r = requests.put(
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/state/m.room.pinned_events",
headers=auth(token),
json={"pinned": [event_id]},
)
r.raise_for_status()
token = login(os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"])
room_id = resolve(ROOM_ALIAS, token)
for event_id in get_pinned(room_id, token):
ev = get_event(room_id, event_id, token)
if ev and ev.get("content", {}).get("body") == MESSAGE:
raise SystemExit(0)
eid = send(room_id, token, MESSAGE)
pin(room_id, token, eid)
PY
volumeMounts:
- name: vault-scripts
mountPath: /vault/scripts
readOnly: true
volumes:
- name: vault-scripts
configMap:
name: comms-vault-env
defaultMode: 0555

View File

@ -1,312 +0,0 @@
# services/comms/reset-othrys-room-job.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: othrys-room-reset
namespace: comms
labels:
atlas.bstein.dev/glue: "true"
spec:
schedule: "0 0 1 1 *"
suspend: true
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 1
jobTemplate:
spec:
backoffLimit: 0
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "comms"
vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret"
vault.hashicorp.com/agent-inject-template-turn-secret: |
{{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api"
vault.hashicorp.com/agent-inject-template-livekit-primary: |
{{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
vault.hashicorp.com/agent-inject-template-bot-pass: |
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
vault.hashicorp.com/agent-inject-template-seeder-pass: |
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime"
vault.hashicorp.com/agent-inject-template-chat-matrix: |
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime"
vault.hashicorp.com/agent-inject-template-chat-homepage: |
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime"
vault.hashicorp.com/agent-inject-template-mas-admin-secret: |
{{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db"
vault.hashicorp.com/agent-inject-template-synapse-db-pass: |
{{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db"
vault.hashicorp.com/agent-inject-template-mas-db-pass: |
{{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime"
vault.hashicorp.com/agent-inject-template-mas-matrix-shared: |
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime"
vault.hashicorp.com/agent-inject-template-mas-kc-secret: |
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}}
spec:
restartPolicy: Never
serviceAccountName: comms-vault
containers:
- name: reset
image: python:3.11-slim
env:
- name: SYNAPSE_BASE
value: http://othrys-synapse-matrix-synapse:8008
- name: AUTH_BASE
value: http://matrix-authentication-service:8080
- name: SERVER_NAME
value: live.bstein.dev
- name: ROOM_ALIAS
value: "#othrys:live.bstein.dev"
- name: ROOM_NAME
value: Othrys
- name: PIN_MESSAGE
value: "Invite guests: share https://live.bstein.dev/#/room/#othrys:live.bstein.dev?action=join and choose 'Continue' -> 'Join as guest'."
- name: SEEDER_USER
value: othrys-seeder
- name: BOT_USER
value: atlasbot
command:
- /bin/sh
- -c
- |
set -euo pipefail
. /vault/scripts/comms_vault_env.sh
pip install --no-cache-dir requests >/dev/null
python - <<'PY'
import os
import time
import urllib.parse
import requests
BASE = os.environ["SYNAPSE_BASE"]
AUTH_BASE = os.environ.get("AUTH_BASE", BASE)
SERVER_NAME = os.environ.get("SERVER_NAME", "live.bstein.dev")
ROOM_ALIAS = os.environ.get("ROOM_ALIAS", "#othrys:live.bstein.dev")
ROOM_NAME = os.environ.get("ROOM_NAME", "Othrys")
PIN_MESSAGE = os.environ["PIN_MESSAGE"]
SEEDER_USER = os.environ["SEEDER_USER"]
SEEDER_PASS = os.environ["SEEDER_PASS"]
BOT_USER = os.environ["BOT_USER"]
POWER_LEVELS = {
"ban": 50,
"events": {
"m.room.avatar": 50,
"m.room.canonical_alias": 50,
"m.room.encryption": 100,
"m.room.history_visibility": 100,
"m.room.name": 50,
"m.room.power_levels": 100,
"m.room.server_acl": 100,
"m.room.tombstone": 100,
},
"events_default": 0,
"historical": 100,
"invite": 50,
"kick": 50,
"m.call.invite": 50,
"redact": 50,
"state_default": 50,
"users": {f"@{SEEDER_USER}:{SERVER_NAME}": 100},
"users_default": 0,
}
def auth(token):
return {"Authorization": f"Bearer {token}"}
def canon_user(user):
u = (user or "").strip()
if u.startswith("@") and ":" in u:
return u
u = u.lstrip("@")
if ":" in u:
return f"@{u}"
return f"@{u}:{SERVER_NAME}"
def login(user, password):
r = requests.post(
f"{AUTH_BASE}/_matrix/client/v3/login",
json={
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": canon_user(user)},
"password": password,
},
)
if r.status_code != 200:
raise SystemExit(f"login failed: {r.status_code} {r.text}")
return r.json()["access_token"]
def resolve_alias(token, alias):
enc = urllib.parse.quote(alias)
r = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{enc}", headers=auth(token))
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()["room_id"]
def create_room(token):
r = requests.post(
f"{BASE}/_matrix/client/v3/createRoom",
headers=auth(token),
json={
"preset": "public_chat",
"name": ROOM_NAME,
"room_version": "11",
},
)
r.raise_for_status()
return r.json()["room_id"]
def put_state(token, room_id, ev_type, content):
r = requests.put(
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/state/{ev_type}",
headers=auth(token),
json=content,
)
r.raise_for_status()
def set_directory_visibility(token, room_id, visibility):
r = requests.put(
f"{BASE}/_matrix/client/v3/directory/list/room/{urllib.parse.quote(room_id)}",
headers=auth(token),
json={"visibility": visibility},
)
r.raise_for_status()
def delete_alias(token, alias):
enc = urllib.parse.quote(alias)
r = requests.delete(f"{BASE}/_matrix/client/v3/directory/room/{enc}", headers=auth(token))
if r.status_code in (200, 202, 404):
return
r.raise_for_status()
def put_alias(token, alias, room_id):
enc = urllib.parse.quote(alias)
r = requests.put(
f"{BASE}/_matrix/client/v3/directory/room/{enc}",
headers=auth(token),
json={"room_id": room_id},
)
r.raise_for_status()
def list_joined_members(token, room_id):
r = requests.get(
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/members?membership=join",
headers=auth(token),
)
r.raise_for_status()
members = []
for ev in r.json().get("chunk", []):
if ev.get("type") != "m.room.member":
continue
uid = ev.get("state_key")
if not isinstance(uid, str) or not uid.startswith("@"):
continue
members.append(uid)
return members
def invite_user(token, room_id, user_id):
r = requests.post(
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/invite",
headers=auth(token),
json={"user_id": user_id},
)
if r.status_code in (200, 202):
return
r.raise_for_status()
def send_message(token, room_id, body):
r = requests.post(
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/send/m.room.message",
headers=auth(token),
json={"msgtype": "m.text", "body": body},
)
r.raise_for_status()
return r.json()["event_id"]
def login_with_retry():
last = None
for attempt in range(1, 6):
try:
return login(SEEDER_USER, SEEDER_PASS)
except Exception as exc: # noqa: BLE001
last = exc
time.sleep(attempt * 2)
raise last
token = login_with_retry()
old_room_id = resolve_alias(token, ROOM_ALIAS)
if not old_room_id:
raise SystemExit(f"alias {ROOM_ALIAS} not found; refusing to proceed")
new_room_id = create_room(token)
# Configure the new room.
put_state(token, new_room_id, "m.room.join_rules", {"join_rule": "public"})
put_state(token, new_room_id, "m.room.guest_access", {"guest_access": "can_join"})
put_state(token, new_room_id, "m.room.history_visibility", {"history_visibility": "shared"})
put_state(token, new_room_id, "m.room.power_levels", POWER_LEVELS)
# Move the alias.
delete_alias(token, ROOM_ALIAS)
put_alias(token, ROOM_ALIAS, new_room_id)
put_state(token, new_room_id, "m.room.canonical_alias", {"alias": ROOM_ALIAS})
set_directory_visibility(token, new_room_id, "public")
# Invite the bot and all joined members of the old room.
bot_user_id = f"@{BOT_USER}:{SERVER_NAME}"
invite_user(token, new_room_id, bot_user_id)
for uid in list_joined_members(token, old_room_id):
if uid == f"@{SEEDER_USER}:{SERVER_NAME}":
continue
localpart = uid.split(":", 1)[0].lstrip("@")
if localpart.isdigit():
continue
invite_user(token, new_room_id, uid)
# Pin the guest invite message in the new room.
event_id = send_message(token, new_room_id, PIN_MESSAGE)
put_state(token, new_room_id, "m.room.pinned_events", {"pinned": [event_id]})
# De-list and tombstone the old room.
set_directory_visibility(token, old_room_id, "private")
put_state(token, old_room_id, "m.room.join_rules", {"join_rule": "invite"})
put_state(token, old_room_id, "m.room.guest_access", {"guest_access": "forbidden"})
put_state(
token,
old_room_id,
"m.room.tombstone",
{"body": "Othrys has been reset. Please join the new room.", "replacement_room": new_room_id},
)
send_message(
token,
old_room_id,
"Othrys was reset. Join the new room at https://live.bstein.dev/#/room/#othrys:live.bstein.dev?action=join",
)
print(f"old_room_id={old_room_id}")
print(f"new_room_id={new_room_id}")
PY
volumeMounts:
- name: vault-scripts
mountPath: /vault/scripts
readOnly: true
volumes:
- name: vault-scripts
configMap:
name: comms-vault-env
defaultMode: 0555

View File

@ -1,185 +0,0 @@
# services/comms/seed-othrys-room.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: seed-othrys-room
namespace: comms
labels:
atlas.bstein.dev/glue: "true"
spec:
schedule: "*/10 * * * *"
suspend: true
concurrencyPolicy: Forbid
jobTemplate:
spec:
backoffLimit: 0
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "comms"
vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret"
vault.hashicorp.com/agent-inject-template-turn-secret: |
{{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api"
vault.hashicorp.com/agent-inject-template-livekit-primary: |
{{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
vault.hashicorp.com/agent-inject-template-bot-pass: |
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
vault.hashicorp.com/agent-inject-template-seeder-pass: |
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime"
vault.hashicorp.com/agent-inject-template-chat-matrix: |
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime"
vault.hashicorp.com/agent-inject-template-chat-homepage: |
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime"
vault.hashicorp.com/agent-inject-template-mas-admin-secret: |
{{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db"
vault.hashicorp.com/agent-inject-template-synapse-db-pass: |
{{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db"
vault.hashicorp.com/agent-inject-template-mas-db-pass: |
{{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime"
vault.hashicorp.com/agent-inject-template-mas-matrix-shared: |
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}}
vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime"
vault.hashicorp.com/agent-inject-template-mas-kc-secret: |
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}}
spec:
restartPolicy: Never
serviceAccountName: comms-vault
containers:
- name: seed
image: python:3.11-slim
env:
- name: SYNAPSE_BASE
value: http://othrys-synapse-matrix-synapse:8008
- name: AUTH_BASE
value: http://matrix-authentication-service:8080
- name: SEEDER_USER
value: othrys-seeder
- name: BOT_USER
value: atlasbot
command:
- /bin/sh
- -c
- |
set -euo pipefail
. /vault/scripts/comms_vault_env.sh
pip install --no-cache-dir requests pyyaml >/dev/null
python - <<'PY'
import os, requests, urllib.parse
BASE = os.environ["SYNAPSE_BASE"]
AUTH_BASE = os.environ.get("AUTH_BASE", BASE)
def canon_user(user):
u = (user or "").strip()
if u.startswith("@") and ":" in u:
return u
u = u.lstrip("@")
if ":" in u:
return f"@{u}"
return f"@{u}:live.bstein.dev"
def login(user, password):
r = requests.post(f"{AUTH_BASE}/_matrix/client/v3/login", json={
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": canon_user(user)},
"password": password,
})
if r.status_code != 200:
raise SystemExit(f"login failed: {r.status_code} {r.text}")
return r.json()["access_token"]
def ensure_user(token, localpart, password, admin):
headers = {"Authorization": f"Bearer {token}"}
user_id = f"@{localpart}:live.bstein.dev"
url = f"{BASE}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}"
res = requests.get(url, headers=headers)
if res.status_code == 200:
return
payload = {"password": password, "admin": admin, "deactivated": False}
create = requests.put(url, headers=headers, json=payload)
if create.status_code not in (200, 201):
raise SystemExit(f"create user {user_id} failed: {create.status_code} {create.text}")
def ensure_room(token):
headers = {"Authorization": f"Bearer {token}"}
alias = "#othrys:live.bstein.dev"
alias_enc = "%23othrys%3Alive.bstein.dev"
exists = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{alias_enc}", headers=headers)
if exists.status_code == 200:
room_id = exists.json()["room_id"]
else:
create = requests.post(f"{BASE}/_matrix/client/v3/createRoom", headers=headers, json={
"preset": "public_chat",
"name": "Othrys",
"room_alias_name": "othrys",
"initial_state": [],
"power_level_content_override": {"events_default": 0, "users_default": 0, "state_default": 50},
})
if create.status_code not in (200, 409):
raise SystemExit(f"create room failed: {create.status_code} {create.text}")
exists = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{alias_enc}", headers=headers)
room_id = exists.json()["room_id"]
state_events = [
("m.room.join_rules", {"join_rule": "public"}),
("m.room.guest_access", {"guest_access": "can_join"}),
("m.room.history_visibility", {"history_visibility": "shared"}),
("m.room.canonical_alias", {"alias": alias}),
]
for ev_type, content in state_events:
requests.put(f"{BASE}/_matrix/client/v3/rooms/{room_id}/state/{ev_type}", headers=headers, json=content)
requests.put(f"{BASE}/_matrix/client/v3/directory/list/room/{room_id}", headers=headers, json={"visibility": "public"})
return room_id
def join_user(token, room_id, user_id):
headers = {"Authorization": f"Bearer {token}"}
requests.post(f"{BASE}/_synapse/admin/v1/join/{urllib.parse.quote(room_id)}", headers=headers, json={"user_id": user_id})
def join_all_locals(token, room_id):
headers = {"Authorization": f"Bearer {token}"}
users = []
from_token = None
while True:
url = f"{BASE}/_synapse/admin/v2/users?local=true&deactivated=false&limit=100"
if from_token:
url += f"&from={from_token}"
res = requests.get(url, headers=headers).json()
users.extend([u["name"] for u in res.get("users", [])])
from_token = res.get("next_token")
if not from_token:
break
for uid in users:
join_user(token, room_id, uid)
token = login(os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"])
ensure_user(token, os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"], admin=True)
ensure_user(token, os.environ["BOT_USER"], os.environ["BOT_PASS"], admin=False)
room_id = ensure_room(token)
join_user(token, room_id, f"@{os.environ['BOT_USER']}:live.bstein.dev")
join_all_locals(token, room_id)
PY
volumeMounts:
- name: synapse-config
mountPath: /config
readOnly: true
- name: vault-scripts
mountPath: /vault/scripts
readOnly: true
volumes:
- name: synapse-config
secret:
secretName: othrys-synapse-matrix-synapse
- name: vault-scripts
configMap:
name: comms-vault-env
defaultMode: 0555

View File

@ -1,199 +0,0 @@
# services/finance/firefly-user-sync-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: firefly-user-sync
namespace: finance
labels:
atlas.bstein.dev/glue: "true"
spec:
schedule: "0 6 * * *"
suspend: true
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 0
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "finance"
vault.hashicorp.com/agent-inject-secret-firefly-env.sh: "kv/data/atlas/finance/firefly-db"
vault.hashicorp.com/agent-inject-template-firefly-env.sh: |
{{ with secret "kv/data/atlas/finance/firefly-db" }}
export DB_CONNECTION="pgsql"
export DB_HOST="{{ .Data.data.DB_HOST }}"
export DB_PORT="{{ .Data.data.DB_PORT }}"
export DB_DATABASE="{{ .Data.data.DB_DATABASE }}"
export DB_USERNAME="{{ .Data.data.DB_USERNAME }}"
export DB_PASSWORD="$(cat /vault/secrets/firefly-db-password)"
{{ end }}
{{ with secret "kv/data/atlas/finance/firefly-secrets" }}
export APP_KEY="$(cat /vault/secrets/firefly-app-key)"
{{ end }}
vault.hashicorp.com/agent-inject-secret-firefly-db-password: "kv/data/atlas/finance/firefly-db"
vault.hashicorp.com/agent-inject-template-firefly-db-password: |
{{- with secret "kv/data/atlas/finance/firefly-db" -}}
{{ .Data.data.DB_PASSWORD }}
{{- end -}}
vault.hashicorp.com/agent-inject-secret-firefly-app-key: "kv/data/atlas/finance/firefly-secrets"
vault.hashicorp.com/agent-inject-template-firefly-app-key: |
{{- with secret "kv/data/atlas/finance/firefly-secrets" -}}
{{ .Data.data.APP_KEY }}
{{- end -}}
spec:
serviceAccountName: finance-vault
restartPolicy: Never
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
- weight: 70
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi4"]
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: "true"
containers:
- name: sync
image: fireflyiii/core:version-6.4.15
command: ["/bin/sh", "-c"]
args:
- |
set -eu
. /vault/secrets/firefly-env.sh
cat <<'PHP' > /tmp/firefly_user_sync.php
#!/usr/bin/env php
<?php
declare(strict_types=1);
use FireflyIII\Console\Commands\Correction\CreatesGroupMemberships;
use FireflyIII\Models\Role;
use FireflyIII\Repositories\User\UserRepositoryInterface;
use FireflyIII\Support\Facades\FireflyConfig;
use FireflyIII\User;
use Illuminate\Contracts\Console\Kernel as ConsoleKernel;
function log_line(string $message): void
{
fwrite(STDOUT, $message . PHP_EOL);
}
function error_line(string $message): void
{
fwrite(STDERR, $message . PHP_EOL);
}
function find_app_root(): string
{
$candidates = [];
$env_root = getenv('FIREFLY_APP_DIR') ?: '';
if ($env_root !== '') {
$candidates[] = $env_root;
}
$candidates[] = '/var/www/html';
$candidates[] = '/var/www/firefly-iii';
$candidates[] = '/app';
foreach ($candidates as $candidate) {
if (!is_dir($candidate)) {
continue;
}
if (file_exists($candidate . '/vendor/autoload.php')) {
return $candidate;
}
}
return '';
}
$email = trim((string) getenv('FIREFLY_USER_EMAIL'));
$password = (string) getenv('FIREFLY_USER_PASSWORD');
if ($email === '' || $password === '') {
error_line('missing FIREFLY_USER_EMAIL or FIREFLY_USER_PASSWORD');
exit(1);
}
$root = find_app_root();
if ($root === '') {
error_line('firefly app root not found');
exit(1);
}
$autoload = $root . '/vendor/autoload.php';
$app_bootstrap = $root . '/bootstrap/app.php';
if (!file_exists($autoload) || !file_exists($app_bootstrap)) {
error_line('firefly bootstrap files missing');
exit(1);
}
require $autoload;
$app = require $app_bootstrap;
$kernel = $app->make(ConsoleKernel::class);
$kernel->bootstrap();
try {
FireflyConfig::set('single_user_mode', true);
} catch (Throwable $exc) {
error_line('failed to enforce single_user_mode: '.$exc->getMessage());
}
$repository = $app->make(UserRepositoryInterface::class);
$existing_user = User::where('email', $email)->first();
$first_user = User::count() == 0;
if (!$existing_user) {
$existing_user = User::create(
[
'email' => $email,
'password' => bcrypt($password),
'blocked' => false,
'blocked_code' => null,
]
);
if ($first_user) {
$role = Role::where('name', 'owner')->first();
if ($role) {
$existing_user->roles()->attach($role);
}
}
log_line(sprintf('created firefly user %s', $email));
} else {
log_line(sprintf('updating firefly user %s', $email));
}
$existing_user->blocked = false;
$existing_user->blocked_code = null;
$existing_user->save();
$repository->changePassword($existing_user, $password);
CreatesGroupMemberships::createGroupMembership($existing_user);
log_line('firefly user sync complete');
PHP
exec php /tmp/firefly_user_sync.php
env:
- name: APP_ENV
value: production
- name: APP_DEBUG
value: "false"
- name: TZ
value: Etc/UTC

View File

@ -1,233 +0,0 @@
# services/health/wger-admin-ensure-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: wger-admin-ensure
namespace: health
labels:
atlas.bstein.dev/glue: "true"
spec:
schedule: "15 3 * * *"
suspend: true
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 1
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "health"
vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db"
vault.hashicorp.com/agent-inject-template-wger-env: |
{{ with secret "kv/data/atlas/health/wger-db" }}
export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}"
export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}"
export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}"
export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}"
export DJANGO_DB_PASSWORD="$(cat /vault/secrets/wger-db-password)"
{{ end }}
{{ with secret "kv/data/atlas/health/wger-secrets" }}
export SECRET_KEY="$(cat /vault/secrets/wger-secret-key)"
export SIGNING_KEY="$(cat /vault/secrets/wger-signing-key)"
{{ end }}
{{ with secret "kv/data/atlas/health/wger-admin" }}
export WGER_ADMIN_USERNAME="$(cat /vault/secrets/wger-admin-username)"
export WGER_ADMIN_PASSWORD="$(cat /vault/secrets/wger-admin-password)"
{{ end }}
vault.hashicorp.com/agent-inject-secret-wger-db-password: "kv/data/atlas/health/wger-db"
vault.hashicorp.com/agent-inject-template-wger-db-password: |
{{- with secret "kv/data/atlas/health/wger-db" -}}
{{ .Data.data.DJANGO_DB_PASSWORD }}
{{- end -}}
vault.hashicorp.com/agent-inject-secret-wger-secret-key: "kv/data/atlas/health/wger-secrets"
vault.hashicorp.com/agent-inject-template-wger-secret-key: |
{{- with secret "kv/data/atlas/health/wger-secrets" -}}
{{ .Data.data.SECRET_KEY }}
{{- end -}}
vault.hashicorp.com/agent-inject-secret-wger-signing-key: "kv/data/atlas/health/wger-secrets"
vault.hashicorp.com/agent-inject-template-wger-signing-key: |
{{- with secret "kv/data/atlas/health/wger-secrets" -}}
{{ .Data.data.SIGNING_KEY }}
{{- end -}}
vault.hashicorp.com/agent-inject-secret-wger-admin-username: "kv/data/atlas/health/wger-admin"
vault.hashicorp.com/agent-inject-template-wger-admin-username: |
{{- with secret "kv/data/atlas/health/wger-admin" -}}
{{ .Data.data.username }}
{{- end -}}
vault.hashicorp.com/agent-inject-secret-wger-admin-password: "kv/data/atlas/health/wger-admin"
vault.hashicorp.com/agent-inject-template-wger-admin-password: |
{{- with secret "kv/data/atlas/health/wger-admin" -}}
{{ .Data.data.password }}
{{- end -}}
spec:
serviceAccountName: health-vault-sync
restartPolicy: Never
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
- weight: 70
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi4"]
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: "true"
containers:
- name: ensure
image: wger/server@sha256:710588b78af4e0aa0b4d8a8061e4563e16eae80eeaccfe7f9e0d9cbdd7f0cbc5
imagePullPolicy: IfNotPresent
command: ["/bin/sh", "-c"]
args:
- |
set -eu
. /vault/secrets/wger-env
cat <<'PY' > /tmp/wger_user_sync.py
#!/usr/bin/env python3
from __future__ import annotations
import os
import sys
import django
def _env(name: str, default: str = "") -> str:
value = os.getenv(name, default)
return value.strip() if isinstance(value, str) else ""
def _setup_django() -> None:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.main")
django.setup()
def _set_default_gym(user) -> None:
try:
from wger.gym.models import GymConfig
except Exception:
return
try:
config = GymConfig.objects.first()
except Exception:
return
if not config or not getattr(config, "default_gym", None):
return
profile = getattr(user, "userprofile", None)
if not profile or getattr(profile, "gym", None):
return
profile.gym = config.default_gym
profile.save()
def _ensure_profile(user) -> None:
profile = getattr(user, "userprofile", None)
if not profile:
return
if hasattr(profile, "email_verified") and not profile.email_verified:
profile.email_verified = True
if hasattr(profile, "is_temporary") and profile.is_temporary:
profile.is_temporary = False
profile.save()
def _ensure_admin(username: str, password: str, email: str) -> None:
from django.contrib.auth.models import User
if not username or not password:
raise RuntimeError("admin username/password missing")
user, created = User.objects.get_or_create(username=username)
if created:
user.is_active = True
if not user.is_staff:
user.is_staff = True
if email:
user.email = email
user.set_password(password)
user.save()
_ensure_profile(user)
_set_default_gym(user)
print(f"ensured admin user {username}")
def _ensure_user(username: str, password: str, email: str) -> None:
from django.contrib.auth.models import User
if not username or not password:
raise RuntimeError("username/password missing")
user, created = User.objects.get_or_create(username=username)
if created:
user.is_active = True
if email and user.email != email:
user.email = email
user.set_password(password)
user.save()
_ensure_profile(user)
_set_default_gym(user)
action = "created" if created else "updated"
print(f"{action} user {username}")
def main() -> int:
admin_user = _env("WGER_ADMIN_USERNAME")
admin_password = _env("WGER_ADMIN_PASSWORD")
admin_email = _env("WGER_ADMIN_EMAIL")
username = _env("WGER_USERNAME") or _env("ONLY_USERNAME")
password = _env("WGER_PASSWORD")
email = _env("WGER_EMAIL")
if not any([admin_user and admin_password, username and password]):
print("no admin or user payload provided; exiting")
return 0
_setup_django()
if admin_user and admin_password:
_ensure_admin(admin_user, admin_password, admin_email)
if username and password:
_ensure_user(username, password, email)
return 0
if __name__ == "__main__":
sys.exit(main())
PY
exec python3 /tmp/wger_user_sync.py
env:
- name: SITE_URL
value: https://health.bstein.dev
- name: TIME_ZONE
value: Etc/UTC
- name: TZ
value: Etc/UTC
- name: DJANGO_DEBUG
value: "False"
- name: DJANGO_DB_ENGINE
value: django.db.backends.postgresql
- name: DJANGO_CACHE_BACKEND
value: django.core.cache.backends.locmem.LocMemCache
- name: DJANGO_CACHE_LOCATION
value: wger-cache

View File

@ -1,219 +0,0 @@
# services/health/wger-user-sync-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: wger-user-sync
namespace: health
labels:
atlas.bstein.dev/glue: "true"
spec:
schedule: "0 5 * * *"
suspend: true
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 0
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "health"
vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db"
vault.hashicorp.com/agent-inject-template-wger-env: |
{{ with secret "kv/data/atlas/health/wger-db" }}
export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}"
export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}"
export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}"
export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}"
export DJANGO_DB_PASSWORD="$(cat /vault/secrets/wger-db-password)"
{{ end }}
{{ with secret "kv/data/atlas/health/wger-secrets" }}
export SECRET_KEY="$(cat /vault/secrets/wger-secret-key)"
export SIGNING_KEY="$(cat /vault/secrets/wger-signing-key)"
{{ end }}
vault.hashicorp.com/agent-inject-secret-wger-db-password: "kv/data/atlas/health/wger-db"
vault.hashicorp.com/agent-inject-template-wger-db-password: |
{{- with secret "kv/data/atlas/health/wger-db" -}}
{{ .Data.data.DJANGO_DB_PASSWORD }}
{{- end -}}
vault.hashicorp.com/agent-inject-secret-wger-secret-key: "kv/data/atlas/health/wger-secrets"
vault.hashicorp.com/agent-inject-template-wger-secret-key: |
{{- with secret "kv/data/atlas/health/wger-secrets" -}}
{{ .Data.data.SECRET_KEY }}
{{- end -}}
vault.hashicorp.com/agent-inject-secret-wger-signing-key: "kv/data/atlas/health/wger-secrets"
vault.hashicorp.com/agent-inject-template-wger-signing-key: |
{{- with secret "kv/data/atlas/health/wger-secrets" -}}
{{ .Data.data.SIGNING_KEY }}
{{- end -}}
spec:
serviceAccountName: health-vault-sync
restartPolicy: Never
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
- weight: 70
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi4"]
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: "true"
containers:
- name: sync
image: wger/server@sha256:710588b78af4e0aa0b4d8a8061e4563e16eae80eeaccfe7f9e0d9cbdd7f0cbc5
imagePullPolicy: IfNotPresent
command: ["/bin/sh", "-c"]
args:
- |
set -eu
. /vault/secrets/wger-env
cat <<'PY' > /tmp/wger_user_sync.py
#!/usr/bin/env python3
from __future__ import annotations
import os
import sys
import django
def _env(name: str, default: str = "") -> str:
value = os.getenv(name, default)
return value.strip() if isinstance(value, str) else ""
def _setup_django() -> None:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.main")
django.setup()
def _set_default_gym(user) -> None:
try:
from wger.gym.models import GymConfig
except Exception:
return
try:
config = GymConfig.objects.first()
except Exception:
return
if not config or not getattr(config, "default_gym", None):
return
profile = getattr(user, "userprofile", None)
if not profile or getattr(profile, "gym", None):
return
profile.gym = config.default_gym
profile.save()
def _ensure_profile(user) -> None:
profile = getattr(user, "userprofile", None)
if not profile:
return
if hasattr(profile, "email_verified") and not profile.email_verified:
profile.email_verified = True
if hasattr(profile, "is_temporary") and profile.is_temporary:
profile.is_temporary = False
profile.save()
def _ensure_admin(username: str, password: str, email: str) -> None:
from django.contrib.auth.models import User
if not username or not password:
raise RuntimeError("admin username/password missing")
user, created = User.objects.get_or_create(username=username)
if created:
user.is_active = True
if not user.is_staff:
user.is_staff = True
if email:
user.email = email
user.set_password(password)
user.save()
_ensure_profile(user)
_set_default_gym(user)
print(f"ensured admin user {username}")
def _ensure_user(username: str, password: str, email: str) -> None:
from django.contrib.auth.models import User
if not username or not password:
raise RuntimeError("username/password missing")
user, created = User.objects.get_or_create(username=username)
if created:
user.is_active = True
if email and user.email != email:
user.email = email
user.set_password(password)
user.save()
_ensure_profile(user)
_set_default_gym(user)
action = "created" if created else "updated"
print(f"{action} user {username}")
def main() -> int:
admin_user = _env("WGER_ADMIN_USERNAME")
admin_password = _env("WGER_ADMIN_PASSWORD")
admin_email = _env("WGER_ADMIN_EMAIL")
username = _env("WGER_USERNAME") or _env("ONLY_USERNAME")
password = _env("WGER_PASSWORD")
email = _env("WGER_EMAIL")
if not any([admin_user and admin_password, username and password]):
print("no admin or user payload provided; exiting")
return 0
_setup_django()
if admin_user and admin_password:
_ensure_admin(admin_user, admin_password, admin_email)
if username and password:
_ensure_user(username, password, email)
return 0
if __name__ == "__main__":
sys.exit(main())
PY
exec python3 /tmp/wger_user_sync.py
env:
- name: SITE_URL
value: https://health.bstein.dev
- name: TIME_ZONE
value: Etc/UTC
- name: TZ
value: Etc/UTC
- name: DJANGO_DEBUG
value: "False"
- name: DJANGO_DB_ENGINE
value: django.db.backends.postgresql
- name: DJANGO_CACHE_BACKEND
value: django.core.cache.backends.locmem.LocMemCache
- name: DJANGO_CACHE_LOCATION
value: wger-cache

View File

@ -1,335 +0,0 @@
# services/nextcloud-mail-sync/cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: nextcloud-mail-sync
namespace: nextcloud
labels:
atlas.bstein.dev/glue: "true"
spec:
schedule: "0 5 * * *"
suspend: true
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 1
jobTemplate:
spec:
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "nextcloud"
vault.hashicorp.com/agent-inject-secret-nextcloud-env.sh: "kv/data/atlas/nextcloud/nextcloud-db"
vault.hashicorp.com/agent-inject-template-nextcloud-env.sh: |
{{ with secret "kv/data/atlas/nextcloud/nextcloud-db" }}
export POSTGRES_DB="{{ .Data.data.database }}"
export POSTGRES_USER="{{ index .Data.data "db-username" }}"
export POSTGRES_PASSWORD="{{ index .Data.data "db-password" }}"
{{ end }}
{{ with secret "kv/data/atlas/nextcloud/nextcloud-admin" }}
export NEXTCLOUD_ADMIN_USER="{{ index .Data.data "admin-user" }}"
export NEXTCLOUD_ADMIN_PASSWORD="{{ index .Data.data "admin-password" }}"
{{ end }}
export ADMIN_USER="${NEXTCLOUD_ADMIN_USER}"
export ADMIN_PASS="${NEXTCLOUD_ADMIN_PASSWORD}"
{{ with secret "kv/data/atlas/nextcloud/nextcloud-oidc" }}
export OIDC_CLIENT_ID="{{ index .Data.data "client-id" }}"
export OIDC_CLIENT_SECRET="{{ index .Data.data "client-secret" }}"
{{ end }}
{{ with secret "kv/data/atlas/shared/postmark-relay" }}
export SMTP_NAME="{{ index .Data.data "apikey" }}"
export SMTP_PASSWORD="{{ index .Data.data "apikey" }}"
{{ end }}
{{ with secret "kv/data/atlas/shared/keycloak-admin" }}
export KC_ADMIN_USER="{{ .Data.data.username }}"
export KC_ADMIN_PASS="{{ .Data.data.password }}"
{{ end }}
spec:
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: "true"
restartPolicy: OnFailure
securityContext:
runAsUser: 0
runAsGroup: 0
serviceAccountName: nextcloud-vault
containers:
- name: mail-sync
image: nextcloud:29-apache
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
env:
- name: KC_BASE
value: http://keycloak.sso.svc.cluster.local
- name: KC_REALM
value: atlas
- name: MAILU_DOMAIN
value: bstein.dev
- name: POSTGRES_HOST
value: postgres-service.postgres.svc.cluster.local
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
volumeMounts:
- name: nextcloud-web
mountPath: /var/www/html
- name: nextcloud-config-pvc
mountPath: /var/www/html/config
- name: nextcloud-custom-apps
mountPath: /var/www/html/custom_apps
- name: nextcloud-user-data
mountPath: /var/www/html/data
args:
- |
set -eu
. /vault/secrets/nextcloud-env.sh
cat <<'SCRIPT' > /tmp/nextcloud-mail-sync.sh
#!/bin/bash
set -euo pipefail
KC_BASE="${KC_BASE:?}"
KC_REALM="${KC_REALM:?}"
KC_ADMIN_USER="${KC_ADMIN_USER:?}"
KC_ADMIN_PASS="${KC_ADMIN_PASS:?}"
MAILU_DOMAIN="${MAILU_DOMAIN:?}"
ONLY_USERNAME="${ONLY_USERNAME:-}"
POSTGRES_HOST="${POSTGRES_HOST:-}"
POSTGRES_DB="${POSTGRES_DB:-}"
POSTGRES_USER="${POSTGRES_USER:-}"
POSTGRES_PASSWORD="${POSTGRES_PASSWORD:-}"
if ! command -v jq >/dev/null 2>&1; then
apt-get update && apt-get install -y jq curl >/dev/null
fi
ensure_psql() {
if command -v psql >/dev/null 2>&1; then
return 0
fi
apt-get update && apt-get install -y postgresql-client >/dev/null
}
set_editor_mode_richtext() {
local ids=("$@")
if [[ ${#ids[@]} -eq 0 ]]; then
return 0
fi
if [[ -z "${POSTGRES_HOST}" || -z "${POSTGRES_DB}" || -z "${POSTGRES_USER}" || -z "${POSTGRES_PASSWORD}" ]]; then
echo "WARN: missing postgres env; cannot update mail editor_mode" >&2
return 0
fi
ensure_psql
local ids_csv
ids_csv=$(IFS=,; echo "${ids[*]}")
PGPASSWORD="${POSTGRES_PASSWORD}" psql \
-h "${POSTGRES_HOST}" \
-U "${POSTGRES_USER}" \
-d "${POSTGRES_DB}" \
-v ON_ERROR_STOP=1 \
-c "UPDATE oc_mail_accounts SET editor_mode='richtext' WHERE id IN (${ids_csv}) AND editor_mode <> 'richtext';" \
>/dev/null
}
list_mail_accounts() {
local user_id="${1}"
local export_out
# Nextcloud Mail does not provide a list command; export is safe (does not print passwords).
if ! export_out=$(/usr/sbin/runuser -u www-data -- php occ mail:account:export "${user_id}"); then
echo "WARN: unable to export mail accounts for ${user_id}; skipping sync for safety" >&2
return 1
fi
awk -v OFS='\t' '
BEGIN { IGNORECASE=1; id="" }
$1 == "Account" { id=$2; sub(":", "", id); next }
$1 == "-" && tolower($2) ~ /^e-?mail:$/ { if (id) print id, $3 }
' <<<"${export_out}" | sort -u
}
token=$(
curl -fsS \
--data-urlencode "grant_type=password" \
--data-urlencode "client_id=admin-cli" \
--data-urlencode "username=${KC_ADMIN_USER}" \
--data-urlencode "password=${KC_ADMIN_PASS}" \
"${KC_BASE}/realms/master/protocol/openid-connect/token" | jq -r '.access_token // empty'
)
if [[ -z "${token}" || "${token}" == "null" ]]; then
echo "Failed to obtain admin token"
exit 1
fi
cd /var/www/html
kc_users_url="${KC_BASE}/admin/realms/${KC_REALM}/users?max=2000&briefRepresentation=false"
if [[ -n "${ONLY_USERNAME}" ]]; then
username_q=$(jq -nr --arg v "${ONLY_USERNAME}" '$v|@uri')
kc_users_url="${KC_BASE}/admin/realms/${KC_REALM}/users?username=${username_q}&exact=true&max=1&briefRepresentation=false"
fi
users=$(curl -fsS -H "Authorization: Bearer ${token}" "${kc_users_url}")
if ! jq -e 'type == "array"' >/dev/null 2>&1 <<<"${users}"; then
echo "ERROR: Keycloak user list is not an array; aborting sync" >&2
exit 1
fi
kc_set_user_mail_meta() {
local user_id="${1}"
local primary_email="${2}"
local mailu_account_count="${3}"
local synced_at="${4}"
# Fetch the full user representation so we don't accidentally clobber attributes.
local user_json updated_json
if ! user_json=$(curl -fsS -H "Authorization: Bearer ${token}" \
"${KC_BASE}/admin/realms/${KC_REALM}/users/${user_id}"); then
echo "WARN: unable to fetch Keycloak user ${user_id} for metadata writeback" >&2
return 1
fi
updated_json=$(
jq -c \
--arg primary_email "${primary_email}" \
--arg mailu_account_count "${mailu_account_count}" \
--arg synced_at "${synced_at}" \
'
.attributes = (.attributes // {}) |
.attributes.nextcloud_mail_primary_email = [$primary_email] |
.attributes.nextcloud_mail_account_count = [$mailu_account_count] |
.attributes.nextcloud_mail_synced_at = [$synced_at] |
del(.access)
' <<<"${user_json}"
)
curl -fsS -X PUT \
-H "Authorization: Bearer ${token}" \
-H "Content-Type: application/json" \
-d "${updated_json}" \
"${KC_BASE}/admin/realms/${KC_REALM}/users/${user_id}" >/dev/null
}
while read -r user; do
user_id=$(jq -r '.id' <<<"${user}")
username=$(jq -r '.username' <<<"${user}")
keycloak_email=$(echo "${user}" | jq -r '.email // empty')
mailu_email=$(echo "${user}" | jq -r '(.attributes.mailu_email[0] // .attributes.mailu_email // empty)')
app_pw=$(echo "${user}" | jq -r '(.attributes.mailu_app_password[0] // .attributes.mailu_app_password // empty)')
if [[ -z "${mailu_email}" ]]; then
if [[ -n "${keycloak_email}" && "${keycloak_email,,}" == *"@${MAILU_DOMAIN,,}" ]]; then
mailu_email="${keycloak_email}"
else
mailu_email="${username}@${MAILU_DOMAIN}"
fi
fi
[[ -z "${mailu_email}" || -z "${app_pw}" ]] && continue
if ! accounts=$(list_mail_accounts "${username}"); then
continue
fi
# Manage only internal Mailu-domain accounts; leave any external accounts untouched.
mailu_accounts=$(awk -v d="${MAILU_DOMAIN,,}" 'tolower($2) ~ ("@" d "$") {print}' <<<"${accounts}" || true)
desired_email="${mailu_email}"
primary_id=""
primary_email=""
if [[ -n "${mailu_accounts}" ]]; then
while IFS=$'\t' read -r account_id account_email; do
if [[ -z "${primary_id}" ]]; then
primary_id="${account_id}"
primary_email="${account_email}"
fi
if [[ "${account_email,,}" == "${desired_email,,}" ]]; then
primary_id="${account_id}"
primary_email="${account_email}"
break
fi
done <<<"${mailu_accounts}"
echo "Updating ${username} mail account ${primary_id} (${primary_email})"
/usr/sbin/runuser -u www-data -- php occ mail:account:update -q "${primary_id}" \
--name "${username}" \
--email "${desired_email}" \
--imap-host mail.bstein.dev \
--imap-port 993 \
--imap-ssl-mode ssl \
--imap-user "${desired_email}" \
--imap-password "${app_pw}" \
--smtp-host mail.bstein.dev \
--smtp-port 587 \
--smtp-ssl-mode tls \
--smtp-user "${desired_email}" \
--smtp-password "${app_pw}" \
--auth-method password >/dev/null 2>&1 || true
# Remove any extra Mailu-domain accounts for this user to prevent duplicates.
while IFS=$'\t' read -r account_id account_email; do
if [[ "${account_id}" == "${primary_id}" ]]; then
continue
fi
echo "Deleting extra mail account ${account_id} (${account_email})"
/usr/sbin/runuser -u www-data -- php occ mail:account:delete -q "${account_id}" >/dev/null 2>&1 || true
done <<<"${mailu_accounts}"
else
echo "Creating mail account for ${username} (${desired_email})"
/usr/sbin/runuser -u www-data -- php occ mail:account:create -q \
"${username}" "${username}" "${desired_email}" \
--imap-host mail.bstein.dev \
--imap-port 993 \
--imap-ssl-mode ssl \
--imap-user "${desired_email}" \
--imap-password "${app_pw}" \
--smtp-host mail.bstein.dev \
--smtp-port 587 \
--smtp-ssl-mode tls \
--smtp-user "${desired_email}" \
--smtp-password "${app_pw}" \
--auth-method password >/dev/null 2>&1 || true
primary_id=$(list_mail_accounts "${username}" | awk -v d="${desired_email,,}" 'tolower($2) == d {print $1; exit}')
primary_email="${desired_email}"
fi
if [[ -n "${primary_id}" ]]; then
set_editor_mode_richtext "${primary_id}"
fi
mailu_account_count=$(wc -l <<<"${mailu_accounts}" | tr -d ' ')
if [[ -z "${mailu_account_count}" ]]; then
mailu_account_count="0"
fi
synced_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
kc_set_user_mail_meta "${user_id}" "${primary_email}" "${mailu_account_count}" "${synced_at}" || true
done < <(jq -c '.[]' <<<"${users}")
SCRIPT
exec /bin/bash /tmp/nextcloud-mail-sync.sh
volumes:
- name: nextcloud-config-pvc
persistentVolumeClaim:
claimName: nextcloud-config-v2
- name: nextcloud-custom-apps
persistentVolumeClaim:
claimName: nextcloud-custom-apps-v2
- name: nextcloud-user-data
persistentVolumeClaim:
claimName: nextcloud-user-data-v2
- name: nextcloud-web
persistentVolumeClaim:
claimName: nextcloud-web-v2

View File

@ -1,308 +0,0 @@
# services/vault/k8s-auth-config-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: vault-k8s-auth-config
namespace: vault
labels:
atlas.bstein.dev/glue: "true"
spec:
schedule: "*/15 * * * *"
suspend: true
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 1
template:
spec:
serviceAccountName: vault-admin
restartPolicy: Never
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: "true"
containers:
- name: configure-k8s-auth
image: hashicorp/vault:1.17.6
imagePullPolicy: IfNotPresent
command: ["/bin/sh", "-c"]
args:
- |
set -eu
cat <<'SH' > /tmp/vault_k8s_auth_configure.sh
#!/usr/bin/env sh
set -eu
log() { echo "[vault-k8s-auth] $*"; }
vault_cmd() {
for attempt in 1 2 3 4 5 6; do
set +e
output="$(vault "$@" 2>&1)"
status=$?
set -e
if [ "${status}" -eq 0 ]; then
printf '%s' "${output}"
return 0
fi
log "vault command failed; retrying (${attempt}/6)"
sleep $((attempt * 2))
done
log "vault command failed; giving up"
return 1
}
ensure_token() {
if [ -n "${VAULT_TOKEN:-}" ]; then
return
fi
role="${VAULT_K8S_ROLE:-vault}"
jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
if ! VAULT_TOKEN="$(vault_cmd write -field=token auth/kubernetes/login role="${role}" jwt="${jwt}")"; then
log "kubernetes auth login failed; set VAULT_TOKEN or fix role ${role}"
exit 1
fi
export VAULT_TOKEN
}
if ! status_json="$(vault_cmd status -format=json)"; then
log "vault status failed; check VAULT_ADDR and VAULT_TOKEN"
exit 1
fi
if ! printf '%s' "${status_json}" | grep -q '"initialized":[[:space:]]*true'; then
log "vault not initialized; skipping"
exit 0
fi
if printf '%s' "${status_json}" | grep -q '"sealed":[[:space:]]*true'; then
log "vault sealed; skipping"
exit 0
fi
ensure_token
k8s_host="https://${KUBERNETES_SERVICE_HOST}:443"
k8s_ca="$(cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt)"
k8s_token="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
role_ttl="${VAULT_K8S_ROLE_TTL:-1h}"
token_reviewer_jwt="${VAULT_K8S_TOKEN_REVIEWER_JWT:-}"
if [ -z "${token_reviewer_jwt}" ] && [ -n "${VAULT_K8S_TOKEN_REVIEWER_JWT_FILE:-}" ] && [ -r "${VAULT_K8S_TOKEN_REVIEWER_JWT_FILE}" ]; then
token_reviewer_jwt="$(cat "${VAULT_K8S_TOKEN_REVIEWER_JWT_FILE}")"
fi
if [ -z "${token_reviewer_jwt}" ]; then
token_reviewer_jwt="${k8s_token}"
fi
if ! vault_cmd auth list -format=json | grep -q '"kubernetes/"'; then
log "enabling kubernetes auth"
vault_cmd auth enable kubernetes
fi
log "configuring kubernetes auth"
vault_cmd write auth/kubernetes/config \
token_reviewer_jwt="${token_reviewer_jwt}" \
kubernetes_host="${k8s_host}" \
kubernetes_ca_cert="${k8s_ca}"
write_raw_policy() {
name="$1"
body="$2"
log "writing policy ${name}"
printf '%s\n' "${body}" | vault_cmd policy write "${name}" -
}
write_policy_and_role() {
role="$1"
namespace="$2"
service_accounts="$3"
read_paths="$4"
write_paths="$5"
policy_body=""
for path in ${read_paths}; do
policy_body="${policy_body}
path \"kv/data/atlas/${path}\" {
capabilities = [\"read\"]
}
path \"kv/metadata/atlas/${path}\" {
capabilities = [\"list\"]
}
"
done
for path in ${write_paths}; do
policy_body="${policy_body}
path \"kv/data/atlas/${path}\" {
capabilities = [\"create\", \"update\", \"read\"]
}
path \"kv/metadata/atlas/${path}\" {
capabilities = [\"list\"]
}
"
done
log "writing policy ${role}"
printf '%s\n' "${policy_body}" | vault_cmd policy write "${role}" -
log "writing role ${role}"
vault_cmd write "auth/kubernetes/role/${role}" \
bound_service_account_names="${service_accounts}" \
bound_service_account_namespaces="${namespace}" \
policies="${role}" \
ttl="${role_ttl}"
}
vault_admin_policy='
path "sys/auth" {
capabilities = ["read"]
}
path "sys/auth/*" {
capabilities = ["create", "update", "delete", "sudo", "read"]
}
path "auth/kubernetes/*" {
capabilities = ["create", "update", "read"]
}
path "auth/oidc/*" {
capabilities = ["create", "update", "read"]
}
path "sys/policies/acl" {
capabilities = ["list"]
}
path "sys/policies/acl/*" {
capabilities = ["create", "update", "read"]
}
path "sys/internal/ui/mounts" {
capabilities = ["read"]
}
path "sys/mounts" {
capabilities = ["read"]
}
path "sys/mounts/auth/*" {
capabilities = ["read", "update", "sudo"]
}
path "kv/data/atlas/vault/*" {
capabilities = ["read"]
}
path "kv/metadata/atlas/vault/*" {
capabilities = ["list"]
}
path "kv/data/*" {
capabilities = ["create", "update", "read", "delete", "patch"]
}
path "kv/metadata" {
capabilities = ["list"]
}
path "kv/metadata/*" {
capabilities = ["read", "list", "delete"]
}
path "kv/data/atlas/shared/*" {
capabilities = ["create", "update", "read", "patch"]
}
path "kv/metadata/atlas/shared/*" {
capabilities = ["list"]
}
'
write_raw_policy "vault-admin" "${vault_admin_policy}"
dev_kv_policy='
path "kv/metadata" {
capabilities = ["list"]
}
path "kv/metadata/atlas" {
capabilities = ["list"]
}
path "kv/metadata/atlas/shared" {
capabilities = ["list"]
}
path "kv/metadata/atlas/shared/*" {
capabilities = ["list"]
}
path "kv/data/atlas/shared/*" {
capabilities = ["read"]
}
'
write_raw_policy "dev-kv" "${dev_kv_policy}"
log "writing role vault-admin"
vault_cmd write "auth/kubernetes/role/vault-admin" \
bound_service_account_names="vault-admin" \
bound_service_account_namespaces="vault" \
policies="vault-admin" \
ttl="${role_ttl}"
write_policy_and_role "outline" "outline" "outline-vault" \
"outline/* shared/postmark-relay" ""
write_policy_and_role "planka" "planka" "planka-vault" \
"planka/* shared/postmark-relay" ""
write_policy_and_role "bstein-dev-home" "bstein-dev-home" "bstein-dev-home,bstein-dev-home-vault-sync" \
"portal/* shared/chat-ai-keys-runtime shared/portal-e2e-client shared/postmark-relay mailu/mailu-initial-account-secret shared/harbor-pull" ""
write_policy_and_role "gitea" "gitea" "gitea-vault" \
"gitea/*" ""
write_policy_and_role "vaultwarden" "vaultwarden" "vaultwarden-vault" \
"vaultwarden/* mailu/mailu-initial-account-secret" ""
write_policy_and_role "sso" "sso" "sso-vault,sso-vault-sync,mas-secrets-ensure" \
"sso/* portal/bstein-dev-home-keycloak-admin shared/keycloak-admin shared/portal-e2e-client shared/postmark-relay shared/harbor-pull" ""
write_policy_and_role "mailu-mailserver" "mailu-mailserver" "mailu-vault-sync" \
"mailu/* shared/postmark-relay shared/harbor-pull" ""
write_policy_and_role "harbor" "harbor" "harbor-vault-sync" \
"harbor/* shared/harbor-pull" ""
write_policy_and_role "nextcloud" "nextcloud" "nextcloud-vault" \
"nextcloud/* shared/keycloak-admin shared/postmark-relay" ""
write_policy_and_role "comms" "comms" "comms-vault,atlasbot" \
"comms/* shared/chat-ai-keys-runtime shared/harbor-pull" ""
write_policy_and_role "jenkins" "jenkins" "jenkins" \
"jenkins/*" ""
write_policy_and_role "monitoring" "monitoring" "monitoring-vault-sync" \
"monitoring/* shared/postmark-relay shared/harbor-pull" ""
write_policy_and_role "logging" "logging" "logging-vault-sync" \
"logging/* shared/harbor-pull" ""
write_policy_and_role "pegasus" "jellyfin" "pegasus-vault-sync" \
"pegasus/* shared/harbor-pull" ""
write_policy_and_role "crypto" "crypto" "crypto-vault-sync" \
"crypto/* shared/harbor-pull" ""
write_policy_and_role "health" "health" "health-vault-sync" \
"health/*" ""
write_policy_and_role "maintenance" "maintenance" "ariadne,maintenance-vault-sync" \
"portal/atlas-portal-db portal/bstein-dev-home-keycloak-admin mailu/mailu-db-secret mailu/mailu-initial-account-secret shared/harbor-pull" ""
write_policy_and_role "finance" "finance" "finance-vault" \
"finance/* shared/postmark-relay" ""
write_policy_and_role "finance-secrets" "finance" "finance-secrets-ensure" \
"" \
"finance/*"
write_policy_and_role "longhorn" "longhorn-system" "longhorn-vault,longhorn-vault-sync" \
"longhorn/* shared/harbor-pull" ""
write_policy_and_role "postgres" "postgres" "postgres-vault" \
"postgres/postgres-db" ""
write_policy_and_role "vault" "vault" "vault" \
"vault/*" ""
write_policy_and_role "sso-secrets" "sso" "mas-secrets-ensure" \
"shared/keycloak-admin" \
"harbor/harbor-oidc vault/vault-oidc-config comms/synapse-oidc logging/oauth2-proxy-logs-oidc finance/actual-oidc"
write_policy_and_role "crypto-secrets" "crypto" "crypto-secrets-ensure" \
"" \
"crypto/wallet-monero-temp-rpc-auth"
write_policy_and_role "comms-secrets" "comms" \
"comms-secrets-ensure,mas-db-ensure,mas-admin-client-secret-writer,othrys-synapse-signingkey-job" \
"" \
"comms/turn-shared-secret comms/livekit-api comms/synapse-redis comms/synapse-macaroon comms/atlasbot-credentials-runtime comms/synapse-db comms/mas-db comms/mas-admin-client-runtime comms/mas-secrets-runtime comms/othrys-synapse-signingkey"
SH
exec /bin/sh /tmp/vault_k8s_auth_configure.sh
env:
- name: VAULT_ADDR
value: http://10.43.57.249:8200
- name: VAULT_K8S_ROLE
value: vault-admin
- name: VAULT_K8S_TOKEN_REVIEWER_JWT_FILE
value: /var/run/secrets/vault-token-reviewer/token
- name: VAULT_K8S_ROLE_TTL
value: 1h
volumeMounts:
- name: token-reviewer
mountPath: /var/run/secrets/vault-token-reviewer
readOnly: true
volumes:
- name: token-reviewer
secret:
secretName: vault-admin-token-reviewer

View File

@ -1,236 +0,0 @@
# services/vault/oidc-config-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: vault-oidc-config
namespace: vault
labels:
atlas.bstein.dev/glue: "true"
spec:
schedule: "*/15 * * * *"
suspend: true
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 1
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "vault-admin"
vault.hashicorp.com/agent-inject-secret-vault-oidc-env.sh: "kv/data/atlas/vault/vault-oidc-config"
vault.hashicorp.com/agent-inject-template-vault-oidc-env.sh: |
{{ with secret "kv/data/atlas/vault/vault-oidc-config" }}
export VAULT_OIDC_DISCOVERY_URL="{{ .Data.data.discovery_url }}"
export VAULT_OIDC_CLIENT_ID="{{ .Data.data.client_id }}"
export VAULT_OIDC_CLIENT_SECRET="{{ .Data.data.client_secret }}"
export VAULT_OIDC_DEFAULT_ROLE="{{ .Data.data.default_role }}"
export VAULT_OIDC_SCOPES="{{ .Data.data.scopes }}"
export VAULT_OIDC_USER_CLAIM="{{ .Data.data.user_claim }}"
export VAULT_OIDC_GROUPS_CLAIM="{{ .Data.data.groups_claim }}"
export VAULT_OIDC_TOKEN_POLICIES="{{ .Data.data.token_policies }}"
export VAULT_OIDC_ADMIN_GROUP="{{ .Data.data.admin_group }}"
export VAULT_OIDC_ADMIN_POLICIES="{{ .Data.data.admin_policies }}"
export VAULT_OIDC_DEV_GROUP="{{ .Data.data.dev_group }}"
export VAULT_OIDC_DEV_POLICIES="{{ .Data.data.dev_policies }}"
export VAULT_OIDC_USER_GROUP="{{ .Data.data.user_group }}"
export VAULT_OIDC_USER_POLICIES="{{ .Data.data.user_policies }}"
export VAULT_OIDC_REDIRECT_URIS="{{ .Data.data.redirect_uris }}"
export VAULT_OIDC_BOUND_AUDIENCES="{{ .Data.data.bound_audiences }}"
export VAULT_OIDC_BOUND_CLAIMS="{{ .Data.data.bound_claims }}"
export VAULT_OIDC_BOUND_CLAIMS_TYPE="{{ .Data.data.bound_claims_type }}"
{{ end }}
spec:
serviceAccountName: vault-admin
restartPolicy: Never
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: "true"
containers:
- name: configure-oidc
image: hashicorp/vault:1.17.6
imagePullPolicy: IfNotPresent
command: ["/bin/sh", "-c"]
args:
- |
set -eu
if [ -f /vault/secrets/vault-oidc-env.sh ]; then
. /vault/secrets/vault-oidc-env.sh
fi
cat <<'SH' > /tmp/vault_oidc_configure.sh
#!/usr/bin/env sh
set -eu
log() { echo "[vault-oidc] $*"; }
vault_cmd() {
for attempt in 1 2 3 4 5 6; do
set +e
output="$(vault "$@" 2>&1)"
status=$?
set -e
if [ "${status}" -eq 0 ]; then
printf '%s' "${output}"
return 0
fi
log "vault command failed; retrying (${attempt}/6)"
sleep $((attempt * 2))
done
log "vault command failed; giving up"
return 1
}
ensure_token() {
if [ -n "${VAULT_TOKEN:-}" ]; then
return
fi
role="${VAULT_K8S_ROLE:-vault}"
jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
if ! VAULT_TOKEN="$(vault_cmd write -field=token auth/kubernetes/login role="${role}" jwt="${jwt}")"; then
log "kubernetes auth login failed; set VAULT_TOKEN or fix role ${role}"
exit 1
fi
export VAULT_TOKEN
}
if ! status_json="$(vault_cmd status -format=json)"; then
log "vault status failed; check VAULT_ADDR and VAULT_TOKEN"
exit 1
fi
if ! printf '%s' "${status_json}" | grep -q '"initialized":[[:space:]]*true'; then
log "vault not initialized; skipping"
exit 0
fi
if printf '%s' "${status_json}" | grep -q '"sealed":[[:space:]]*true'; then
log "vault sealed; skipping"
exit 0
fi
ensure_token
: "${VAULT_OIDC_DISCOVERY_URL:?set VAULT_OIDC_DISCOVERY_URL}"
: "${VAULT_OIDC_CLIENT_ID:?set VAULT_OIDC_CLIENT_ID}"
: "${VAULT_OIDC_CLIENT_SECRET:?set VAULT_OIDC_CLIENT_SECRET}"
default_role="${VAULT_OIDC_DEFAULT_ROLE:-admin}"
scopes="${VAULT_OIDC_SCOPES:-openid profile email groups}"
user_claim="${VAULT_OIDC_USER_CLAIM:-preferred_username}"
groups_claim="${VAULT_OIDC_GROUPS_CLAIM:-groups}"
redirect_uris="${VAULT_OIDC_REDIRECT_URIS:-https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback}"
bound_audiences="${VAULT_OIDC_BOUND_AUDIENCES:-${VAULT_OIDC_CLIENT_ID}}"
bound_claims_type="${VAULT_OIDC_BOUND_CLAIMS_TYPE:-string}"
bound_claims_type="$(printf '%s' "${bound_claims_type}" | tr -d '[:space:]')"
if [ -z "${bound_claims_type}" ] || [ "${bound_claims_type}" = "<novalue>" ]; then
bound_claims_type="string"
fi
admin_group="${VAULT_OIDC_ADMIN_GROUP:-admin}"
admin_policies="${VAULT_OIDC_ADMIN_POLICIES:-default,vault-admin}"
dev_group="${VAULT_OIDC_DEV_GROUP:-dev}"
dev_policies="${VAULT_OIDC_DEV_POLICIES:-default,dev-kv}"
user_group="${VAULT_OIDC_USER_GROUP:-${dev_group}}"
user_policies="${VAULT_OIDC_USER_POLICIES:-${VAULT_OIDC_TOKEN_POLICIES:-${dev_policies}}}"
if ! vault_cmd auth list -format=json | grep -q '"oidc/"'; then
log "enabling oidc auth method"
vault_cmd auth enable oidc
fi
log "configuring oidc auth"
vault_cmd write auth/oidc/config \
oidc_discovery_url="${VAULT_OIDC_DISCOVERY_URL}" \
oidc_client_id="${VAULT_OIDC_CLIENT_ID}" \
oidc_client_secret="${VAULT_OIDC_CLIENT_SECRET}" \
default_role="${default_role}"
vault_cmd auth tune -listing-visibility=unauth oidc >/dev/null
build_bound_claims() {
claim="$1"
groups="$2"
json="{\"${claim}\":["
first=1
old_ifs=$IFS
IFS=,
for item in $groups; do
item="$(printf '%s' "$item" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')"
if [ -z "${item}" ]; then
continue
fi
if [ "${first}" -eq 0 ]; then
json="${json},"
fi
json="${json}\"${item}\""
first=0
done
IFS=$old_ifs
json="${json}]}"
printf '%s' "${json}"
}
build_json_array() {
items="$1"
json="["
first=1
old_ifs=$IFS
IFS=,
for item in $items; do
item="$(printf '%s' "$item" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')"
if [ -z "${item}" ]; then
continue
fi
if [ "${first}" -eq 0 ]; then
json="${json},"
fi
json="${json}\"${item}\""
first=0
done
IFS=$old_ifs
json="${json}]"
printf '%s' "${json}"
}
configure_role() {
role_name="$1"
role_groups="$2"
role_policies="$3"
if [ -z "${role_name}" ] || [ -z "${role_groups}" ] || [ -z "${role_policies}" ]; then
log "skipping role ${role_name} (missing groups or policies)"
return
fi
claims="$(build_bound_claims "${groups_claim}" "${role_groups}")"
scopes_csv="$(printf '%s' "${scopes}" | tr ' ' ',' | tr -s ',' | sed 's/^,//;s/,$//')"
redirect_json="$(build_json_array "${redirect_uris}")"
payload_file="$(mktemp)"
cat > "${payload_file}" <<EOF
{
"user_claim": "${user_claim}",
"oidc_scopes": "${scopes_csv}",
"token_policies": "${role_policies}",
"bound_audiences": "${bound_audiences}",
"bound_claims": ${claims},
"bound_claims_type": "${bound_claims_type}",
"groups_claim": "${groups_claim}",
"allowed_redirect_uris": ${redirect_json}
}
EOF
log "configuring oidc role ${role_name}"
vault_cmd write "auth/oidc/role/${role_name}" @"${payload_file}"
rm -f "${payload_file}"
}
configure_role "admin" "${admin_group}" "${admin_policies}"
configure_role "dev" "${dev_group}" "${dev_policies}"
configure_role "user" "${user_group}" "${user_policies}"
SH
exec /bin/sh /tmp/vault_oidc_configure.sh
env:
- name: VAULT_ADDR
value: http://10.43.57.249:8200
- name: VAULT_K8S_ROLE
value: vault-admin

View File

@ -48,6 +48,13 @@ def post_json(path: str, payload: dict[str, Any]) -> dict[str, Any]:
return data return data
def delete_json(path: str) -> dict[str, Any]:
data = _k8s_request("DELETE", path)
if not isinstance(data, dict):
raise RuntimeError("unexpected kubernetes response")
return data
def get_secret_value(namespace: str, name: str, key: str) -> str: def get_secret_value(namespace: str, name: str, key: str) -> str:
data = get_json(f"/api/v1/namespaces/{namespace}/secrets/{name}") data = get_json(f"/api/v1/namespaces/{namespace}/secrets/{name}")
blob = data.get("data") if isinstance(data.get("data"), dict) else {} blob = data.get("data") if isinstance(data.get("data"), dict) else {}

123
ariadne/k8s/exec.py Normal file
View File

@ -0,0 +1,123 @@
from __future__ import annotations
from dataclasses import dataclass
import shlex
import time
from typing import Any
try:
from kubernetes import client, config
from kubernetes.stream import stream
except Exception as exc: # pragma: no cover - import checked at runtime
client = None
config = None
stream = None
_IMPORT_ERROR = exc
else:
_IMPORT_ERROR = None
from .pods import PodSelectionError, select_pod
from ..utils.logging import get_logger
logger = get_logger(__name__)
_CORE_API = None
@dataclass(frozen=True)
class ExecResult:
stdout: str
stderr: str
exit_code: int | None
@property
def ok(self) -> bool:
return self.exit_code in (0, None)
class ExecError(RuntimeError):
pass
def _ensure_client() -> Any:
global _CORE_API
if _IMPORT_ERROR:
raise RuntimeError(f"kubernetes client missing: {_IMPORT_ERROR}") from _IMPORT_ERROR
if _CORE_API is not None:
return _CORE_API
try:
config.load_incluster_config()
except Exception:
config.load_kube_config()
_CORE_API = client.CoreV1Api()
return _CORE_API
def _build_command(command: list[str] | str, env: dict[str, str] | None) -> list[str]:
if isinstance(command, str):
cmd_str = command
else:
cmd_str = shlex.join(command)
if env:
prefix = " ".join(f"{key}={shlex.quote(value)}" for key, value in env.items())
cmd_str = f"{prefix} {cmd_str}"
return ["/bin/sh", "-c", cmd_str]
class PodExecutor:
def __init__(self, namespace: str, label_selector: str, container: str | None = None) -> None:
self._namespace = namespace
self._label_selector = label_selector
self._container = container
def exec(
self,
command: list[str] | str,
env: dict[str, str] | None = None,
timeout_sec: float | None = None,
check: bool = True,
) -> ExecResult:
pod = select_pod(self._namespace, self._label_selector)
cmd = _build_command(command, env)
api = _ensure_client()
resp = stream(
api.connect_get_namespaced_pod_exec,
pod.name,
pod.namespace,
command=cmd,
container=self._container,
stderr=True,
stdin=False,
stdout=True,
tty=False,
_preload_content=False,
)
stdout_parts: list[str] = []
stderr_parts: list[str] = []
exit_code: int | None = None
started = time.monotonic()
try:
while resp.is_open():
resp.update(timeout=1)
if resp.peek_stdout():
stdout_parts.append(resp.read_stdout())
if resp.peek_stderr():
stderr_parts.append(resp.read_stderr())
if hasattr(resp, "peek_exit_code") and resp.peek_exit_code():
exit_code = resp.read_exit_code()
break
if timeout_sec is not None and (time.monotonic() - started) > timeout_sec:
raise TimeoutError("pod exec timed out")
finally:
resp.close()
if exit_code is None:
exit_code = getattr(resp, "returncode", None)
result = ExecResult("".join(stdout_parts), "".join(stderr_parts), exit_code)
if check and not result.ok:
raise ExecError(f"pod exec failed exit_code={result.exit_code} stderr={result.stderr.strip()}")
return result

View File

@ -1,184 +0,0 @@
from __future__ import annotations
import re
import time
from typing import Any
from .client import get_json, post_json
from ..utils.logging import get_logger
class JobSpawner:
def __init__(self, namespace: str, cronjob_name: str, manifest: dict[str, Any] | None = None) -> None:
self._namespace = namespace
self._cronjob_name = cronjob_name
self._manifest = manifest
self._logger = get_logger(__name__)
@staticmethod
def _safe_name_fragment(value: str, max_len: int = 24) -> str:
cleaned = re.sub(r"[^a-z0-9-]+", "-", (value or "").lower()).strip("-")
if not cleaned:
cleaned = "job"
return cleaned[:max_len].rstrip("-") or "job"
def _job_from_cronjob(
self,
cronjob: dict[str, Any],
label_suffix: str,
env_overrides: list[dict[str, str]] | None = None,
job_ttl_seconds: int | None = None,
) -> dict[str, Any]:
spec = cronjob.get("spec") if isinstance(cronjob.get("spec"), dict) else {}
jt = spec.get("jobTemplate") if isinstance(spec.get("jobTemplate"), dict) else {}
job_spec = jt.get("spec") if isinstance(jt.get("spec"), dict) else {}
now = int(time.time())
safe_label = self._safe_name_fragment(label_suffix)
job_name = f"{self._cronjob_name}-{safe_label}-{now}"
job: dict[str, Any] = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"name": job_name,
"namespace": self._namespace,
"labels": {
"app": self._cronjob_name,
"atlas.bstein.dev/trigger": "ariadne",
"atlas.bstein.dev/label": safe_label,
},
},
"spec": job_spec,
}
if isinstance(job_ttl_seconds, int) and job_ttl_seconds > 0:
job.setdefault("spec", {})
job["spec"]["ttlSecondsAfterFinished"] = job_ttl_seconds
tpl = job.get("spec", {}).get("template", {})
pod_spec = tpl.get("spec") if isinstance(tpl.get("spec"), dict) else {}
containers = pod_spec.get("containers") if isinstance(pod_spec.get("containers"), list) else []
if containers and isinstance(containers[0], dict) and env_overrides:
env = containers[0].get("env")
if not isinstance(env, list):
env = []
env = [e for e in env if not (isinstance(e, dict) and e.get("name") in {item["name"] for item in env_overrides})]
env.extend(env_overrides)
containers[0]["env"] = env
pod_spec["containers"] = containers
tpl["spec"] = pod_spec
job["spec"]["template"] = tpl
return job
def trigger(
self,
label_suffix: str,
env_overrides: list[dict[str, str]] | None = None,
job_ttl_seconds: int | None = None,
) -> dict[str, Any]:
cronjob = self._manifest or get_json(
f"/apis/batch/v1/namespaces/{self._namespace}/cronjobs/{self._cronjob_name}"
)
job_payload = self._job_from_cronjob(cronjob, label_suffix, env_overrides, job_ttl_seconds)
created = post_json(f"/apis/batch/v1/namespaces/{self._namespace}/jobs", job_payload)
job_name = (
created.get("metadata", {}).get("name")
if isinstance(created.get("metadata"), dict)
else job_payload.get("metadata", {}).get("name")
)
if not isinstance(job_name, str) or not job_name:
raise RuntimeError("job name missing")
self._logger.info(
"job triggered",
extra={
"event": "job_trigger",
"namespace": self._namespace,
"cronjob": self._cronjob_name,
"job": job_name,
},
)
return {"job": job_name, "status": "queued"}
def wait_for_completion(self, job_name: str, timeout_sec: float) -> dict[str, Any]:
deadline = time.time() + timeout_sec
while time.time() < deadline:
job = get_json(f"/apis/batch/v1/namespaces/{self._namespace}/jobs/{job_name}")
status = job.get("status") if isinstance(job.get("status"), dict) else {}
if int(status.get("succeeded") or 0) > 0:
self._logger.info(
"job completed",
extra={
"event": "job_complete",
"namespace": self._namespace,
"cronjob": self._cronjob_name,
"job": job_name,
"status": "ok",
},
)
return {"job": job_name, "status": "ok"}
if int(status.get("failed") or 0) > 0:
self._logger.info(
"job completed",
extra={
"event": "job_complete",
"namespace": self._namespace,
"cronjob": self._cronjob_name,
"job": job_name,
"status": "error",
},
)
return {"job": job_name, "status": "error"}
conditions = status.get("conditions") if isinstance(status.get("conditions"), list) else []
for cond in conditions:
if not isinstance(cond, dict):
continue
if cond.get("type") == "Complete" and cond.get("status") == "True":
self._logger.info(
"job completed",
extra={
"event": "job_complete",
"namespace": self._namespace,
"cronjob": self._cronjob_name,
"job": job_name,
"status": "ok",
},
)
return {"job": job_name, "status": "ok"}
if cond.get("type") == "Failed" and cond.get("status") == "True":
self._logger.info(
"job completed",
extra={
"event": "job_complete",
"namespace": self._namespace,
"cronjob": self._cronjob_name,
"job": job_name,
"status": "error",
},
)
return {"job": job_name, "status": "error"}
time.sleep(2)
self._logger.info(
"job wait timeout",
extra={
"event": "job_timeout",
"namespace": self._namespace,
"cronjob": self._cronjob_name,
"job": job_name,
},
)
return {"job": job_name, "status": "running"}
def trigger_and_wait(
self,
label_suffix: str,
env_overrides: list[dict[str, str]] | None,
timeout_sec: float,
job_ttl_seconds: int | None = None,
) -> dict[str, Any]:
created = self.trigger(label_suffix, env_overrides, job_ttl_seconds)
job_name = created.get("job")
if not isinstance(job_name, str) or not job_name:
raise RuntimeError("job name missing")
return self.wait_for_completion(job_name, timeout_sec)

View File

@ -1,20 +0,0 @@
from __future__ import annotations
from functools import lru_cache
from importlib import resources
from typing import Any
import yaml
@lru_cache(maxsize=64)
def load_cronjob_manifest(path: str) -> dict[str, Any]:
resource = resources.files("ariadne.job_manifests").joinpath(path)
if not resource.is_file():
raise FileNotFoundError(f"manifest not found: {path}")
payload = yaml.safe_load(resource.read_text(encoding="utf-8"))
if not isinstance(payload, dict):
raise ValueError("manifest payload is not a mapping")
if payload.get("kind") != "CronJob":
raise ValueError("manifest is not a CronJob")
return payload

81
ariadne/k8s/pods.py Normal file
View File

@ -0,0 +1,81 @@
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime, timezone
from typing import Any
from urllib.parse import quote
from .client import get_json
@dataclass(frozen=True)
class PodRef:
name: str
namespace: str
node: str | None = None
class PodSelectionError(RuntimeError):
pass
def _parse_start_time(value: str | None) -> float:
if not value:
return 0.0
try:
parsed = datetime.fromisoformat(value.replace("Z", "+00:00"))
except ValueError:
return 0.0
if parsed.tzinfo is None:
parsed = parsed.replace(tzinfo=timezone.utc)
return parsed.timestamp()
def _is_ready(pod: dict[str, Any]) -> bool:
status = pod.get("status") if isinstance(pod.get("status"), dict) else {}
if status.get("phase") != "Running":
return False
conditions = status.get("conditions")
if not isinstance(conditions, list):
return False
for cond in conditions:
if not isinstance(cond, dict):
continue
if cond.get("type") == "Ready":
return cond.get("status") == "True"
return False
def list_pods(namespace: str, label_selector: str) -> list[dict[str, Any]]:
namespace = (namespace or "").strip()
if not namespace:
raise PodSelectionError("pod namespace missing")
label_selector = (label_selector or "").strip()
selector = quote(label_selector, safe=",.-")
payload = get_json(f"/api/v1/namespaces/{namespace}/pods?labelSelector={selector}")
items = payload.get("items") if isinstance(payload.get("items"), list) else []
return [item for item in items if isinstance(item, dict)]
def select_pod(namespace: str, label_selector: str) -> PodRef:
pods = list_pods(namespace, label_selector)
candidates: list[tuple[float, PodRef]] = []
for pod in pods:
metadata = pod.get("metadata") if isinstance(pod.get("metadata"), dict) else {}
if metadata.get("deletionTimestamp"):
continue
name = metadata.get("name")
if not isinstance(name, str) or not name.strip():
continue
if not _is_ready(pod):
continue
status = pod.get("status") if isinstance(pod.get("status"), dict) else {}
started_at = _parse_start_time(status.get("startTime"))
node_name = status.get("nodeName") if isinstance(status.get("nodeName"), str) else None
candidates.append((started_at, PodRef(name=name, namespace=namespace, node=node_name)))
if not candidates:
raise PodSelectionError(f"no ready pods found for {namespace} {label_selector}")
candidates.sort(key=lambda item: item[0], reverse=True)
return candidates[0][1]

View File

@ -362,7 +362,7 @@ class ProvisioningManager:
# Task: trigger Nextcloud mail sync # Task: trigger Nextcloud mail sync
start = datetime.now(timezone.utc) start = datetime.now(timezone.utc)
try: try:
if not settings.nextcloud_namespace or not settings.nextcloud_mail_sync_cronjob: if not settings.nextcloud_namespace:
detail = "sync disabled" detail = "sync disabled"
self._upsert_task(conn, request_code, "nextcloud_mail_sync", "ok", detail) self._upsert_task(conn, request_code, "nextcloud_mail_sync", "ok", detail)
self._record_task(request_code, "nextcloud_mail_sync", "ok", detail, start) self._record_task(request_code, "nextcloud_mail_sync", "ok", detail, start)

View File

@ -89,12 +89,12 @@ class CronScheduler:
detail = None detail = None
result_detail = "" result_detail = ""
result_payload: Any | None = None result_payload: Any | None = None
with task_context(task.name):
self._logger.info( self._logger.info(
"schedule task started", "schedule task started",
extra={"event": "schedule_start", "task": task.name}, extra={"event": "schedule_start", "task": task.name},
) )
try: try:
with task_context(task.name):
result = task.runner() result = task.runner()
result_detail, result_payload = self._format_result(result) result_detail, result_payload = self._format_result(result)
except Exception as exc: except Exception as exc:

View File

@ -1,59 +1,860 @@
from __future__ import annotations from __future__ import annotations
from dataclasses import dataclass
import base64
import random
import time
import urllib.parse
from typing import Any from typing import Any
from ..k8s.jobs import JobSpawner import httpx
from ..k8s.manifests import load_cronjob_manifest import psycopg
from ..settings import settings from ..settings import settings
from ..utils.logging import get_logger
logger = get_logger(__name__)
_ADJ = [
"brisk",
"calm",
"eager",
"gentle",
"merry",
"nifty",
"rapid",
"sunny",
"witty",
"zesty",
"amber",
"bold",
"bright",
"crisp",
"daring",
"frosty",
"glad",
"jolly",
"lively",
"mellow",
"quiet",
"ripe",
"serene",
"spry",
"tidy",
"vivid",
"warm",
"wild",
"clever",
"kind",
]
_NOUN = [
"otter",
"falcon",
"comet",
"ember",
"grove",
"harbor",
"meadow",
"raven",
"river",
"summit",
"breeze",
"cedar",
"cinder",
"cove",
"delta",
"forest",
"glade",
"lark",
"marsh",
"peak",
"pine",
"quartz",
"reef",
"ridge",
"sable",
"sage",
"shore",
"thunder",
"vale",
"zephyr",
]
@dataclass(frozen=True)
class CommsSummary:
processed: int
renamed: int
pruned: int
skipped: int
detail: str = ""
def _auth(token: str) -> dict[str, str]:
return {"Authorization": f"Bearer {token}"}
def _canon_user(user: str, server_name: str) -> str:
user = (user or "").strip()
if user.startswith("@") and ":" in user:
return user
user = user.lstrip("@")
if ":" in user:
return f"@{user}"
return f"@{user}:{server_name}"
def _needs_rename_username(username: str) -> bool:
return username.isdigit() or username.startswith("guest-")
def _needs_rename_display(display: str | None) -> bool:
if not display:
return True
return display.isdigit() or display.startswith("guest-")
def _random_name(existing: set[str]) -> str | None:
for _ in range(30):
candidate = f"{random.choice(_ADJ)}-{random.choice(_NOUN)}"
if candidate not in existing:
existing.add(candidate)
return candidate
return None
class CommsService: class CommsService:
def __init__(self) -> None: def __init__(self, client_factory: type[httpx.Client] = httpx.Client) -> None:
self._guest_name_spawner = JobSpawner( self._client_factory = client_factory
settings.comms_namespace,
settings.comms_guest_name_cronjob, def _client(self) -> httpx.Client:
load_cronjob_manifest("comms/guest-name-job.yaml"), return self._client_factory(timeout=settings.comms_timeout_sec)
def _mas_admin_token(self, client: httpx.Client) -> str:
if not settings.comms_mas_admin_client_id or not settings.comms_mas_admin_client_secret:
raise RuntimeError("mas admin client credentials missing")
basic = base64.b64encode(
f"{settings.comms_mas_admin_client_id}:{settings.comms_mas_admin_client_secret}".encode()
).decode()
last_err: Exception | None = None
for attempt in range(5):
try:
resp = client.post(
settings.comms_mas_token_url,
headers={"Authorization": f"Basic {basic}"},
data={"grant_type": "client_credentials", "scope": "urn:mas:admin"},
) )
self._pin_invite_spawner = JobSpawner( resp.raise_for_status()
settings.comms_namespace, payload = resp.json()
settings.comms_pin_invite_cronjob, token = payload.get("access_token")
load_cronjob_manifest("comms/pin-othrys-job.yaml"), if not isinstance(token, str) or not token:
raise RuntimeError("missing mas access token")
return token
except Exception as exc: # noqa: BLE001
last_err = exc
time.sleep(2**attempt)
raise RuntimeError(str(last_err) if last_err else "mas admin token failed")
def _mas_user_id(self, client: httpx.Client, token: str, username: str) -> str:
url = f"{settings.comms_mas_admin_api_base}/users/by-username/{urllib.parse.quote(username)}"
resp = client.get(url, headers=_auth(token))
resp.raise_for_status()
payload = resp.json()
return payload["data"]["id"]
def _mas_personal_session(self, client: httpx.Client, token: str, user_id: str) -> tuple[str, str]:
resp = client.post(
f"{settings.comms_mas_admin_api_base}/personal-sessions",
headers=_auth(token),
json={
"actor_user_id": user_id,
"human_name": "guest-name-randomizer",
"scope": "urn:matrix:client:api:*",
"expires_in": 300,
},
) )
self._reset_room_spawner = JobSpawner( resp.raise_for_status()
settings.comms_namespace, payload = resp.json().get("data", {})
settings.comms_reset_room_cronjob, session_id = payload.get("id")
load_cronjob_manifest("comms/reset-othrys-room-job.yaml"), attrs = (payload.get("attributes") or {}) if isinstance(payload, dict) else {}
access_token = attrs.get("access_token")
if not isinstance(access_token, str) or not isinstance(session_id, str):
raise RuntimeError("invalid personal session response")
return access_token, session_id
def _mas_revoke_session(self, client: httpx.Client, token: str, session_id: str) -> None:
try:
client.post(
f"{settings.comms_mas_admin_api_base}/personal-sessions/{urllib.parse.quote(session_id)}/revoke",
headers=_auth(token),
json={},
) )
self._seed_room_spawner = JobSpawner( except Exception:
settings.comms_namespace, return
settings.comms_seed_room_cronjob,
load_cronjob_manifest("comms/seed-othrys-room.yaml"), def _resolve_alias(self, client: httpx.Client, token: str, alias: str) -> str:
resp = client.get(
f"{settings.comms_synapse_base}/_matrix/client/v3/directory/room/{urllib.parse.quote(alias)}",
headers=_auth(token),
)
resp.raise_for_status()
payload = resp.json()
return payload["room_id"]
def _room_members(self, client: httpx.Client, token: str, room_id: str) -> tuple[set[str], set[str]]:
resp = client.get(
f"{settings.comms_synapse_base}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/members",
headers=_auth(token),
)
resp.raise_for_status()
payload = resp.json()
members: set[str] = set()
existing: set[str] = set()
for ev in payload.get("chunk", []) or []:
user_id = ev.get("state_key")
if isinstance(user_id, str) and user_id:
members.add(user_id)
display = (ev.get("content") or {}).get("displayname")
if isinstance(display, str) and display:
existing.add(display)
return members, existing
def _mas_list_users(self, client: httpx.Client, token: str) -> list[dict[str, Any]]:
users: list[dict[str, Any]] = []
cursor = None
while True:
url = f"{settings.comms_mas_admin_api_base}/users?page[size]=100"
if cursor:
url += f"&page[after]={urllib.parse.quote(cursor)}"
resp = client.get(url, headers=_auth(token))
resp.raise_for_status()
payload = resp.json()
data = payload.get("data") or []
if not isinstance(data, list) or not data:
break
users.extend([item for item in data if isinstance(item, dict)])
last = data[-1]
cursor = (
last.get("meta", {})
if isinstance(last, dict)
else {}
).get("page", {}).get("cursor")
if not cursor:
break
return users
def _synapse_list_users(self, client: httpx.Client, token: str) -> list[dict[str, Any]]:
users: list[dict[str, Any]] = []
from_token = None
while True:
url = "{}/_synapse/admin/v2/users?local=true&deactivated=false&limit=100".format(
settings.comms_synapse_base
)
if from_token:
url += f"&from={urllib.parse.quote(from_token)}"
resp = client.get(url, headers=_auth(token))
resp.raise_for_status()
payload = resp.json()
users.extend([item for item in payload.get("users", []) if isinstance(item, dict)])
from_token = payload.get("next_token")
if not from_token:
break
return users
def _should_prune_guest(self, entry: dict[str, Any], now_ms: int) -> bool:
if not entry.get("is_guest"):
return False
last_seen = entry.get("last_seen_ts")
if last_seen is None:
return False
try:
last_seen = int(last_seen)
except (TypeError, ValueError):
return False
stale_ms = int(settings.comms_guest_stale_days) * 24 * 60 * 60 * 1000
return now_ms - last_seen > stale_ms
def _prune_guest(self, client: httpx.Client, token: str, user_id: str) -> bool:
try:
resp = client.delete(
f"{settings.comms_synapse_base}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}",
headers=_auth(token),
params={"erase": "true"},
)
except Exception as exc: # noqa: BLE001
logger.info(
"guest prune failed",
extra={"event": "comms_guest_prune", "status": "error", "detail": str(exc)},
)
return False
if resp.status_code in (200, 202, 204, 404):
return True
logger.info(
"guest prune failed",
extra={
"event": "comms_guest_prune",
"status": "error",
"detail": f"{resp.status_code} {resp.text}",
},
)
return False
def _get_displayname(self, client: httpx.Client, token: str, user_id: str) -> str | None:
resp = client.get(
f"{settings.comms_synapse_base}/_matrix/client/v3/profile/{urllib.parse.quote(user_id)}",
headers=_auth(token),
)
resp.raise_for_status()
return resp.json().get("displayname")
def _get_displayname_admin(self, client: httpx.Client, token: str, user_id: str) -> str | None:
resp = client.get(
f"{settings.comms_synapse_base}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}",
headers=_auth(token),
)
if resp.status_code == 404:
return None
resp.raise_for_status()
return resp.json().get("displayname")
def _set_displayname(
self,
client: httpx.Client,
token: str,
room_id: str,
user_id: str,
name: str,
in_room: bool,
) -> None:
resp = client.put(
f"{settings.comms_synapse_base}/_matrix/client/v3/profile/{urllib.parse.quote(user_id)}/displayname",
headers=_auth(token),
json={"displayname": name},
)
resp.raise_for_status()
if not in_room:
return
state_url = (
f"{settings.comms_synapse_base}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}"
f"/state/m.room.member/{urllib.parse.quote(user_id)}"
)
client.put(
state_url,
headers=_auth(token),
json={"membership": "join", "displayname": name},
) )
def _trigger(self, spawner: JobSpawner, label_suffix: str, wait: bool) -> dict[str, Any]: def _set_displayname_admin(self, client: httpx.Client, token: str, user_id: str, name: str) -> bool:
if wait: resp = client.put(
return spawner.trigger_and_wait(label_suffix, None, settings.comms_job_wait_timeout_sec) f"{settings.comms_synapse_base}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}",
return spawner.trigger(label_suffix, None) headers=_auth(token),
json={"displayname": name},
)
return resp.status_code in (200, 201, 204)
def _db_rename_numeric(self, existing: set[str]) -> int:
if not settings.comms_synapse_db_password:
return 0
renamed = 0
conn = psycopg.connect(
host=settings.comms_synapse_db_host,
port=settings.comms_synapse_db_port,
dbname=settings.comms_synapse_db_name,
user=settings.comms_synapse_db_user,
password=settings.comms_synapse_db_password,
)
try:
with conn:
with conn.cursor() as cur:
pattern = f"^@\\d+:{settings.comms_server_name}$"
cur.execute(
"SELECT user_id, full_user_id, displayname FROM profiles WHERE full_user_id ~ %s",
(pattern,),
)
profile_rows = cur.fetchall()
profile_index = {row[1]: row for row in profile_rows}
for _user_id, full_user_id, display in profile_rows:
if display and not _needs_rename_display(display):
continue
new_name = _random_name(existing)
if not new_name:
continue
cur.execute(
"UPDATE profiles SET displayname = %s WHERE full_user_id = %s",
(new_name, full_user_id),
)
renamed += 1
cur.execute(
"SELECT name FROM users WHERE name ~ %s",
(pattern,),
)
users = [row[0] for row in cur.fetchall()]
if not users:
return renamed
cur.execute(
"SELECT user_id, full_user_id FROM profiles WHERE full_user_id = ANY(%s)",
(users,),
)
for existing_full in cur.fetchall():
profile_index.setdefault(existing_full[1], existing_full)
for full_user_id in users:
if full_user_id in profile_index:
continue
localpart = full_user_id.split(":", 1)[0].lstrip("@")
new_name = _random_name(existing)
if not new_name:
continue
cur.execute(
"INSERT INTO profiles (user_id, displayname, full_user_id) VALUES (%s, %s, %s) "
"ON CONFLICT (full_user_id) DO UPDATE SET displayname = EXCLUDED.displayname",
(localpart, new_name, full_user_id),
)
renamed += 1
finally:
conn.close()
return renamed
def run_guest_name_randomizer(self, wait: bool = True) -> dict[str, Any]: def run_guest_name_randomizer(self, wait: bool = True) -> dict[str, Any]:
if not settings.comms_namespace or not settings.comms_guest_name_cronjob: if not settings.comms_mas_admin_client_id or not settings.comms_mas_admin_client_secret:
raise RuntimeError("comms guest name job not configured") raise RuntimeError("comms mas admin secret missing")
return self._trigger(self._guest_name_spawner, "guest-name", wait) if not settings.comms_synapse_base:
raise RuntimeError("comms synapse base missing")
processed = renamed = pruned = skipped = 0
with self._client() as client:
admin_token = self._mas_admin_token(client)
seeder_id = self._mas_user_id(client, admin_token, settings.comms_seeder_user)
seeder_token, seeder_session = self._mas_personal_session(client, admin_token, seeder_id)
try:
room_id = self._resolve_alias(client, seeder_token, settings.comms_room_alias)
members, existing = self._room_members(client, seeder_token, room_id)
users = self._mas_list_users(client, admin_token)
mas_usernames: set[str] = set()
for user in users:
attrs = user.get("attributes") or {}
username = attrs.get("username") or ""
if isinstance(username, str) and username:
mas_usernames.add(username)
legacy_guest = attrs.get("legacy_guest")
if not isinstance(username, str) or not username:
skipped += 1
continue
if not (legacy_guest or _needs_rename_username(username)):
skipped += 1
continue
user_id = f"@{username}:{settings.comms_server_name}"
access_token, session_id = self._mas_personal_session(client, admin_token, user["id"])
try:
display = self._get_displayname(client, access_token, user_id)
if display and not _needs_rename_display(display):
skipped += 1
continue
new_name = _random_name(existing)
if not new_name:
skipped += 1
continue
self._set_displayname(client, access_token, room_id, user_id, new_name, user_id in members)
renamed += 1
finally:
self._mas_revoke_session(client, admin_token, session_id)
try:
entries = self._synapse_list_users(client, seeder_token)
except Exception as exc: # noqa: BLE001
logger.info(
"synapse admin list skipped",
extra={"event": "comms_guest_list", "status": "error", "detail": str(exc)},
)
entries = []
now_ms = int(time.time() * 1000)
for entry in entries:
user_id = entry.get("name") or ""
if not user_id.startswith("@"):
continue
localpart = user_id.split(":", 1)[0].lstrip("@")
if localpart in mas_usernames:
continue
is_guest = entry.get("is_guest")
if is_guest and self._should_prune_guest(entry, now_ms):
if self._prune_guest(client, seeder_token, user_id):
pruned += 1
continue
if not (is_guest or _needs_rename_username(localpart)):
continue
display = self._get_displayname_admin(client, seeder_token, user_id)
if display and not _needs_rename_display(display):
continue
new_name = _random_name(existing)
if not new_name:
continue
if self._set_displayname_admin(client, seeder_token, user_id, new_name):
renamed += 1
renamed += self._db_rename_numeric(existing)
finally:
self._mas_revoke_session(client, admin_token, seeder_session)
processed = renamed + pruned + skipped
summary = CommsSummary(processed, renamed, pruned, skipped)
logger.info(
"comms guest name sync finished",
extra={
"event": "comms_guest_name",
"status": "ok",
"processed": summary.processed,
"renamed": summary.renamed,
"pruned": summary.pruned,
"skipped": summary.skipped,
},
)
return {"status": "ok", **summary.__dict__}
def run_pin_invite(self, wait: bool = True) -> dict[str, Any]: def run_pin_invite(self, wait: bool = True) -> dict[str, Any]:
if not settings.comms_namespace or not settings.comms_pin_invite_cronjob: if not settings.comms_seeder_password:
raise RuntimeError("comms pin invite job not configured") raise RuntimeError("comms seeder password missing")
return self._trigger(self._pin_invite_spawner, "pin-invite", wait)
with self._client() as client:
token = self._login(client, settings.comms_seeder_user, settings.comms_seeder_password)
room_id = self._resolve_alias(client, token, settings.comms_room_alias)
pinned = self._get_pinned(client, token, room_id)
for event_id in pinned:
event = self._get_event(client, token, room_id, event_id)
if event and (event.get("content") or {}).get("body") == settings.comms_pin_message:
return {"status": "ok", "detail": "already pinned"}
event_id = self._send_message(client, token, room_id, settings.comms_pin_message)
if not event_id:
return {"status": "error", "detail": "pin event_id missing"}
self._pin_message(client, token, room_id, event_id)
return {"status": "ok", "detail": "pinned"}
def run_reset_room(self, wait: bool = True) -> dict[str, Any]: def run_reset_room(self, wait: bool = True) -> dict[str, Any]:
if not settings.comms_namespace or not settings.comms_reset_room_cronjob: if not settings.comms_seeder_password:
raise RuntimeError("comms reset room job not configured") raise RuntimeError("comms seeder password missing")
return self._trigger(self._reset_room_spawner, "reset-room", wait)
with self._client() as client:
token = self._login_with_retry(client, settings.comms_seeder_user, settings.comms_seeder_password)
old_room_id = self._resolve_alias(client, token, settings.comms_room_alias)
new_room_id = self._create_room(client, token, settings.comms_room_name)
self._set_room_state(client, token, new_room_id, "m.room.join_rules", {"join_rule": "public"})
self._set_room_state(client, token, new_room_id, "m.room.guest_access", {"guest_access": "can_join"})
self._set_room_state(
client,
token,
new_room_id,
"m.room.history_visibility",
{"history_visibility": "shared"},
)
self._set_room_state(client, token, new_room_id, "m.room.power_levels", self._power_levels())
self._delete_alias(client, token, settings.comms_room_alias)
self._put_alias(client, token, settings.comms_room_alias, new_room_id)
self._set_room_state(
client,
token,
new_room_id,
"m.room.canonical_alias",
{"alias": settings.comms_room_alias},
)
self._set_directory_visibility(client, token, new_room_id, "public")
bot_user_id = _canon_user(settings.comms_bot_user, settings.comms_server_name)
self._invite_user(client, token, new_room_id, bot_user_id)
for uid in self._list_joined_members(client, token, old_room_id):
if uid == _canon_user(settings.comms_seeder_user, settings.comms_server_name):
continue
localpart = uid.split(":", 1)[0].lstrip("@")
if localpart.isdigit():
continue
self._invite_user(client, token, new_room_id, uid)
event_id = self._send_message(client, token, new_room_id, settings.comms_pin_message)
if not event_id:
raise RuntimeError("pin message event_id missing")
self._set_room_state(client, token, new_room_id, "m.room.pinned_events", {"pinned": [event_id]})
self._set_directory_visibility(client, token, old_room_id, "private")
self._set_room_state(client, token, old_room_id, "m.room.join_rules", {"join_rule": "invite"})
self._set_room_state(client, token, old_room_id, "m.room.guest_access", {"guest_access": "forbidden"})
self._set_room_state(
client,
token,
old_room_id,
"m.room.tombstone",
{
"body": "Othrys has been reset. Please join the new room.",
"replacement_room": new_room_id,
},
)
self._send_message(
client,
token,
old_room_id,
"Othrys was reset. Join the new room at https://live.bstein.dev/#/room/#othrys:live.bstein.dev?action=join",
)
return {"status": "ok", "detail": f"old_room_id={old_room_id} new_room_id={new_room_id}"}
def run_seed_room(self, wait: bool = True) -> dict[str, Any]: def run_seed_room(self, wait: bool = True) -> dict[str, Any]:
if not settings.comms_namespace or not settings.comms_seed_room_cronjob: if not settings.comms_seeder_password or not settings.comms_bot_password:
raise RuntimeError("comms seed room job not configured") raise RuntimeError("comms seeder/bot password missing")
return self._trigger(self._seed_room_spawner, "seed-room", wait)
with self._client() as client:
token = self._login(client, settings.comms_seeder_user, settings.comms_seeder_password)
self._ensure_user(client, token, settings.comms_seeder_user, settings.comms_seeder_password, True)
self._ensure_user(client, token, settings.comms_bot_user, settings.comms_bot_password, False)
room_id = self._ensure_room(client, token)
self._join_user(client, token, room_id, _canon_user(settings.comms_bot_user, settings.comms_server_name))
self._join_all_locals(client, token, room_id)
return {"status": "ok", "detail": "room seeded"}
def _login(self, client: httpx.Client, user: str, password: str) -> str:
resp = client.post(
f"{settings.comms_auth_base}/_matrix/client/v3/login",
json={
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": _canon_user(user, settings.comms_server_name)},
"password": password,
},
)
if resp.status_code != 200:
raise RuntimeError(f"login failed: {resp.status_code} {resp.text}")
payload = resp.json()
token = payload.get("access_token")
if not isinstance(token, str) or not token:
raise RuntimeError("login missing token")
return token
def _login_with_retry(self, client: httpx.Client, user: str, password: str) -> str:
last: Exception | None = None
for attempt in range(1, 6):
try:
return self._login(client, user, password)
except Exception as exc: # noqa: BLE001
last = exc
time.sleep(attempt * 2)
raise RuntimeError(str(last) if last else "login failed")
def _get_pinned(self, client: httpx.Client, token: str, room_id: str) -> list[str]:
resp = client.get(
f"{settings.comms_synapse_base}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/state/m.room.pinned_events",
headers=_auth(token),
)
if resp.status_code == 404:
return []
resp.raise_for_status()
pinned = resp.json().get("pinned", [])
return [item for item in pinned if isinstance(item, str)]
def _get_event(self, client: httpx.Client, token: str, room_id: str, event_id: str) -> dict[str, Any] | None:
resp = client.get(
f"{settings.comms_synapse_base}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/event/{urllib.parse.quote(event_id)}",
headers=_auth(token),
)
if resp.status_code == 404:
return None
resp.raise_for_status()
return resp.json()
def _send_message(self, client: httpx.Client, token: str, room_id: str, body: str) -> str:
resp = client.post(
f"{settings.comms_synapse_base}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/send/m.room.message",
headers=_auth(token),
json={"msgtype": "m.text", "body": body},
)
resp.raise_for_status()
payload = resp.json()
event_id = payload.get("event_id")
return event_id if isinstance(event_id, str) else ""
def _pin_message(self, client: httpx.Client, token: str, room_id: str, event_id: str) -> None:
resp = client.put(
f"{settings.comms_synapse_base}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/state/m.room.pinned_events",
headers=_auth(token),
json={"pinned": [event_id]},
)
resp.raise_for_status()
def _create_room(self, client: httpx.Client, token: str, name: str) -> str:
resp = client.post(
f"{settings.comms_synapse_base}/_matrix/client/v3/createRoom",
headers=_auth(token),
json={"preset": "public_chat", "name": name, "room_version": "11"},
)
resp.raise_for_status()
return resp.json()["room_id"]
def _set_room_state(self, client: httpx.Client, token: str, room_id: str, ev_type: str, content: dict[str, Any]) -> None:
resp = client.put(
f"{settings.comms_synapse_base}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/state/{ev_type}",
headers=_auth(token),
json=content,
)
resp.raise_for_status()
def _set_directory_visibility(self, client: httpx.Client, token: str, room_id: str, visibility: str) -> None:
resp = client.put(
f"{settings.comms_synapse_base}/_matrix/client/v3/directory/list/room/{urllib.parse.quote(room_id)}",
headers=_auth(token),
json={"visibility": visibility},
)
resp.raise_for_status()
def _delete_alias(self, client: httpx.Client, token: str, alias: str) -> None:
resp = client.delete(
f"{settings.comms_synapse_base}/_matrix/client/v3/directory/room/{urllib.parse.quote(alias)}",
headers=_auth(token),
)
if resp.status_code in (200, 202, 404):
return
resp.raise_for_status()
def _put_alias(self, client: httpx.Client, token: str, alias: str, room_id: str) -> None:
resp = client.put(
f"{settings.comms_synapse_base}/_matrix/client/v3/directory/room/{urllib.parse.quote(alias)}",
headers=_auth(token),
json={"room_id": room_id},
)
resp.raise_for_status()
def _list_joined_members(self, client: httpx.Client, token: str, room_id: str) -> list[str]:
resp = client.get(
f"{settings.comms_synapse_base}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/members?membership=join",
headers=_auth(token),
)
resp.raise_for_status()
members = []
for ev in resp.json().get("chunk", []) or []:
if ev.get("type") != "m.room.member":
continue
uid = ev.get("state_key")
if isinstance(uid, str) and uid.startswith("@"):
members.append(uid)
return members
def _invite_user(self, client: httpx.Client, token: str, room_id: str, user_id: str) -> None:
resp = client.post(
f"{settings.comms_synapse_base}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/invite",
headers=_auth(token),
json={"user_id": user_id},
)
if resp.status_code in (200, 202):
return
resp.raise_for_status()
def _power_levels(self) -> dict[str, Any]:
return {
"ban": 50,
"events": {
"m.room.avatar": 50,
"m.room.canonical_alias": 50,
"m.room.encryption": 100,
"m.room.history_visibility": 100,
"m.room.name": 50,
"m.room.power_levels": 100,
"m.room.server_acl": 100,
"m.room.tombstone": 100,
},
"events_default": 0,
"historical": 100,
"invite": 50,
"kick": 50,
"m.call.invite": 50,
"redact": 50,
"state_default": 50,
"users": { _canon_user(settings.comms_seeder_user, settings.comms_server_name): 100 },
"users_default": 0,
}
def _ensure_user(self, client: httpx.Client, token: str, localpart: str, password: str, admin: bool) -> None:
user_id = _canon_user(localpart, settings.comms_server_name)
url = f"{settings.comms_synapse_base}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}"
resp = client.get(url, headers=_auth(token))
if resp.status_code == 200:
return
payload = {"password": password, "admin": admin, "deactivated": False}
create = client.put(url, headers=_auth(token), json=payload)
if create.status_code not in (200, 201):
raise RuntimeError(f"create user {user_id} failed: {create.status_code} {create.text}")
def _ensure_room(self, client: httpx.Client, token: str) -> str:
alias = settings.comms_room_alias
alias_enc = urllib.parse.quote(alias)
exists = client.get(
f"{settings.comms_synapse_base}/_matrix/client/v3/directory/room/{alias_enc}",
headers=_auth(token),
)
if exists.status_code == 200:
room_id = exists.json()["room_id"]
else:
create = client.post(
f"{settings.comms_synapse_base}/_matrix/client/v3/createRoom",
headers=_auth(token),
json={
"preset": "public_chat",
"name": settings.comms_room_name,
"room_alias_name": alias.split(":", 1)[0].lstrip("#"),
"initial_state": [],
"power_level_content_override": {
"events_default": 0,
"users_default": 0,
"state_default": 50,
},
},
)
if create.status_code not in (200, 409):
raise RuntimeError(f"create room failed: {create.status_code} {create.text}")
exists = client.get(
f"{settings.comms_synapse_base}/_matrix/client/v3/directory/room/{alias_enc}",
headers=_auth(token),
)
room_id = exists.json()["room_id"]
state_events = [
("m.room.join_rules", {"join_rule": "public"}),
("m.room.guest_access", {"guest_access": "can_join"}),
("m.room.history_visibility", {"history_visibility": "shared"}),
("m.room.canonical_alias", {"alias": alias}),
]
for ev_type, content in state_events:
client.put(
f"{settings.comms_synapse_base}/_matrix/client/v3/rooms/{room_id}/state/{ev_type}",
headers=_auth(token),
json=content,
)
client.put(
f"{settings.comms_synapse_base}/_matrix/client/v3/directory/list/room/{room_id}",
headers=_auth(token),
json={"visibility": "public"},
)
return room_id
def _join_user(self, client: httpx.Client, token: str, room_id: str, user_id: str) -> None:
client.post(
f"{settings.comms_synapse_base}/_synapse/admin/v1/join/{urllib.parse.quote(room_id)}",
headers=_auth(token),
json={"user_id": user_id},
)
def _join_all_locals(self, client: httpx.Client, token: str, room_id: str) -> None:
users: list[str] = []
from_token = None
while True:
url = f"{settings.comms_synapse_base}/_synapse/admin/v2/users?local=true&deactivated=false&limit=100"
if from_token:
url += f"&from={from_token}"
resp = client.get(url, headers=_auth(token))
payload = resp.json()
users.extend([u["name"] for u in payload.get("users", []) if isinstance(u, dict) and u.get("name")])
from_token = payload.get("next_token")
if not from_token:
break
for uid in users:
self._join_user(client, token, room_id, uid)
comms = CommsService() comms = CommsService()

View File

@ -1,18 +1,144 @@
from __future__ import annotations from __future__ import annotations
from typing import Any from typing import Any
import textwrap
from ..k8s.jobs import JobSpawner import httpx
from ..k8s.manifests import load_cronjob_manifest
from ..k8s.exec import ExecError, PodExecutor
from ..k8s.pods import PodSelectionError
from ..settings import settings from ..settings import settings
_FIREFLY_SYNC_SCRIPT = textwrap.dedent(
"""
<?php
declare(strict_types=1);
use FireflyIII\\Console\\Commands\\Correction\\CreatesGroupMemberships;
use FireflyIII\\Models\\Role;
use FireflyIII\\Repositories\\User\\UserRepositoryInterface;
use FireflyIII\\Support\\Facades\\FireflyConfig;
use FireflyIII\\User;
use Illuminate\\Contracts\\Console\\Kernel as ConsoleKernel;
function log_line(string $message): void
{
fwrite(STDOUT, $message . PHP_EOL);
}
function error_line(string $message): void
{
fwrite(STDERR, $message . PHP_EOL);
}
function find_app_root(): string
{
$candidates = [];
$env_root = getenv('FIREFLY_APP_DIR') ?: '';
if ($env_root !== '') {
$candidates[] = $env_root;
}
$candidates[] = '/var/www/html';
$candidates[] = '/var/www/firefly-iii';
$candidates[] = '/app';
foreach ($candidates as $candidate) {
if (!is_dir($candidate)) {
continue;
}
if (file_exists($candidate . '/vendor/autoload.php')) {
return $candidate;
}
}
return '';
}
$email = trim((string) getenv('FIREFLY_USER_EMAIL'));
$password = (string) getenv('FIREFLY_USER_PASSWORD');
if ($email === '' || $password === '') {
error_line('missing FIREFLY_USER_EMAIL or FIREFLY_USER_PASSWORD');
exit(1);
}
$root = find_app_root();
if ($root === '') {
error_line('firefly app root not found');
exit(1);
}
$autoload = $root . '/vendor/autoload.php';
$app_bootstrap = $root . '/bootstrap/app.php';
if (!file_exists($autoload) || !file_exists($app_bootstrap)) {
error_line('firefly bootstrap files missing');
exit(1);
}
require $autoload;
$app = require $app_bootstrap;
$kernel = $app->make(ConsoleKernel::class);
$kernel->bootstrap();
try {
FireflyConfig::set('single_user_mode', true);
} catch (Throwable $exc) {
error_line('failed to enforce single_user_mode: ' . $exc->getMessage());
}
$repository = $app->make(UserRepositoryInterface::class);
$existing_user = User::where('email', $email)->first();
$first_user = User::count() == 0;
if (!$existing_user) {
$existing_user = User::create(
[
'email' => $email,
'password' => bcrypt($password),
'blocked' => false,
'blocked_code' => null,
]
);
if ($first_user) {
$role = Role::where('name', 'owner')->first();
if ($role) {
$existing_user->roles()->attach($role);
}
}
log_line(sprintf('created firefly user %s', $email));
} else {
log_line(sprintf('updating firefly user %s', $email));
}
$existing_user->blocked = false;
$existing_user->blocked_code = null;
$existing_user->save();
$repository->changePassword($existing_user, $password);
CreatesGroupMemberships::createGroupMembership($existing_user);
log_line('firefly user sync complete');
"""
).strip()
def _firefly_exec_command() -> str:
return f"php <<'PHP'\n{_FIREFLY_SYNC_SCRIPT}\nPHP"
class FireflyService: class FireflyService:
def __init__(self) -> None: def __init__(self) -> None:
self._spawner = JobSpawner( self._executor = PodExecutor(
settings.firefly_namespace, settings.firefly_namespace,
settings.firefly_user_sync_cronjob, settings.firefly_pod_label,
load_cronjob_manifest("finance/firefly-user-sync-cronjob.yaml"), settings.firefly_container,
) )
def sync_user(self, email: str, password: str, wait: bool = True) -> dict[str, Any]: def sync_user(self, email: str, password: str, wait: bool = True) -> dict[str, Any]:
@ -21,22 +147,39 @@ class FireflyService:
raise RuntimeError("missing email") raise RuntimeError("missing email")
if not password: if not password:
raise RuntimeError("missing password") raise RuntimeError("missing password")
if not settings.firefly_namespace or not settings.firefly_user_sync_cronjob: if not settings.firefly_namespace:
raise RuntimeError("firefly sync not configured") raise RuntimeError("firefly sync not configured")
env_overrides = [ env = {
{"name": "FIREFLY_USER_EMAIL", "value": email}, "FIREFLY_USER_EMAIL": email,
{"name": "FIREFLY_USER_PASSWORD", "value": password}, "FIREFLY_USER_PASSWORD": password,
] }
label_suffix = email.split("@", 1)[0] if "@" in email else email try:
if wait: result = self._executor.exec(
return self._spawner.trigger_and_wait( _firefly_exec_command(),
label_suffix, env=env,
env_overrides, timeout_sec=settings.firefly_user_sync_wait_timeout_sec,
settings.firefly_user_sync_wait_timeout_sec, check=True,
) )
return self._spawner.trigger(label_suffix, env_overrides) except (ExecError, PodSelectionError, TimeoutError) as exc:
return {"status": "error", "detail": str(exc)}
output = (result.stdout or result.stderr).strip()
return {"status": "ok", "detail": output}
def run_cron(self) -> dict[str, Any]:
if not settings.firefly_cron_token:
raise RuntimeError("firefly cron token missing")
url = f"{settings.firefly_cron_base_url.rstrip('/')}/{settings.firefly_cron_token}"
try:
with httpx.Client(timeout=settings.firefly_cron_timeout_sec) as client:
resp = client.get(url)
if resp.status_code != 200:
return {"status": "error", "detail": f"status={resp.status_code}"}
except Exception as exc:
return {"status": "error", "detail": str(exc)}
return {"status": "ok", "detail": "cron triggered"}
firefly = FireflyService() firefly = FireflyService()

View File

@ -0,0 +1,201 @@
from __future__ import annotations
import time
from typing import Any
from ..k8s.client import get_json, post_json
from ..utils.logging import get_logger
from ..settings import settings
logger = get_logger(__name__)
_IMAGE_SWEEPER_SCRIPT = """
set -eu
ONE_SHOT=${ONE_SHOT:-false}
THRESHOLD_DAYS=14
usage=$(df -P /host | awk 'NR==2 {gsub(/%/,"",$5); print $5}') || usage=""
if [ -n "${usage}" ] && [ "${usage}" -ge 70 ]; then
THRESHOLD_DAYS=3
fi
cutoff=$(python3 - <<'PY'
import time, os
print(int(time.time()) - int(os.environ.get("THRESHOLD_DAYS", "14")) * 86400)
PY
)
RUNNING=$(chroot /host /bin/sh -c "crictl ps -a --quiet 2>/dev/null" | tr -s ' ' '\n' | sort -u | tr '\n' ' ')
IMAGES_JSON=$(chroot /host /bin/sh -c "crictl images -o json 2>/dev/null" || echo '{}')
SKIP="registry.k8s.io/pause k8s.gcr.io/pause rancher/mirrored-pause"
prune_list=$(printf "%s" "${IMAGES_JSON}" | CUTOFF="${cutoff}" RUNNING="${RUNNING}" SKIP="${SKIP}" python3 - <<'PY'
import json, os, sys, time
try:
data = json.load(sys.stdin)
except Exception:
print("", end="")
sys.exit(0)
cutoff = int(os.environ.get("CUTOFF", "0"))
running = set(os.environ.get("RUNNING", "").split())
skip = os.environ.get("SKIP", "").split()
now = int(time.time())
prune = []
def is_skip(tags):
if not tags:
return False
for t in tags:
for prefix in skip:
if prefix and t.startswith(prefix):
return True
return False
for img in data.get("images", []):
image_id = img.get("id", "")
if not image_id:
continue
if image_id in running:
continue
tags = img.get("repoTags") or []
if is_skip(tags):
continue
created = img.get("createdAt") or 0
try:
created = int(str(created)) // 1000000000
except Exception:
created = 0
if created and created > now:
created = now
if cutoff and created and created < cutoff:
prune.append(image_id)
seen = set()
for p in prune:
if p in seen:
continue
seen.add(p)
print(p)
PY
)
if [ -n "${prune_list}" ]; then
printf "%s" "${prune_list}" | while read -r image_id; do
if [ -n "${image_id}" ]; then
chroot /host /bin/sh -c "crictl rmi --prune ${image_id}" || true
fi
done
fi
find /host/var/lib/rancher/k3s/agent/images -type f -name "*.tar" -mtime +7 -print -delete 2>/dev/null || true
find /host/var/lib/rancher/k3s/agent/containerd -maxdepth 1 -type f -mtime +7 -print -delete 2>/dev/null || true
if [ "${ONE_SHOT}" = "true" ]; then
exit 0
fi
sleep infinity
""".strip()
class ImageSweeperService:
def _job_payload(self, job_name: str) -> dict[str, Any]:
job: dict[str, Any] = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"name": job_name,
"namespace": settings.image_sweeper_namespace,
"labels": {
"app": "image-sweeper",
"atlas.bstein.dev/trigger": "ariadne",
},
},
"spec": {
"backoffLimit": 0,
"ttlSecondsAfterFinished": settings.image_sweeper_job_ttl_sec,
"template": {
"spec": {
"serviceAccountName": settings.image_sweeper_service_account,
"restartPolicy": "OnFailure",
"nodeSelector": {
"kubernetes.io/os": "linux",
"kubernetes.io/arch": "arm64",
"node-role.kubernetes.io/worker": "true",
},
"tolerations": [
{
"key": "node-role.kubernetes.io/control-plane",
"operator": "Exists",
"effect": "NoSchedule",
},
{
"key": "node-role.kubernetes.io/master",
"operator": "Exists",
"effect": "NoSchedule",
},
],
"containers": [
{
"name": "image-sweeper",
"image": "python:3.12.9-alpine3.20",
"command": ["/bin/sh", "-c"],
"args": [_IMAGE_SWEEPER_SCRIPT],
"env": [
{"name": "ONE_SHOT", "value": "true"},
],
"securityContext": {"privileged": True, "runAsUser": 0},
"volumeMounts": [
{"name": "host-root", "mountPath": "/host"},
],
}
],
"volumes": [
{"name": "host-root", "hostPath": {"path": "/"}},
],
}
},
},
}
return job
def _wait_for_completion(self, job_name: str, timeout_sec: float) -> dict[str, Any]:
deadline = time.time() + timeout_sec
while time.time() < deadline:
job = get_json(
f"/apis/batch/v1/namespaces/{settings.image_sweeper_namespace}/jobs/{job_name}"
)
status = job.get("status") if isinstance(job.get("status"), dict) else {}
if int(status.get("succeeded") or 0) > 0:
return {"job": job_name, "status": "ok"}
if int(status.get("failed") or 0) > 0:
return {"job": job_name, "status": "error"}
time.sleep(2)
return {"job": job_name, "status": "running"}
def run(self, wait: bool = True) -> dict[str, Any]:
job_name = f"image-sweeper-{int(time.time())}"
payload = self._job_payload(job_name)
created = post_json(
f"/apis/batch/v1/namespaces/{settings.image_sweeper_namespace}/jobs",
payload,
)
name = created.get("metadata", {}).get("name", job_name)
logger.info(
"image sweeper job triggered",
extra={"event": "image_sweeper_trigger", "job": name},
)
if wait:
return self._wait_for_completion(name, settings.image_sweeper_wait_timeout_sec)
return {"job": name, "status": "queued"}
image_sweeper = ImageSweeperService()

View File

@ -1,42 +1,496 @@
from __future__ import annotations from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime, timezone
import re
import time
from typing import Any from typing import Any
from ..k8s.jobs import JobSpawner import httpx
from ..k8s.manifests import load_cronjob_manifest import psycopg
from ..k8s.exec import ExecError, PodExecutor
from ..k8s.pods import PodSelectionError
from ..settings import settings from ..settings import settings
from ..utils.logging import get_logger
from .keycloak_admin import keycloak_admin
logger = get_logger(__name__)
def _extract_attr(attrs: Any, key: str) -> str:
if not isinstance(attrs, dict):
return ""
raw = attrs.get(key)
if isinstance(raw, list):
for item in raw:
if isinstance(item, str) and item.strip():
return item.strip()
return ""
if isinstance(raw, str) and raw.strip():
return raw.strip()
return ""
def _resolve_mailu_email(username: str, user: dict[str, Any]) -> str:
attrs = user.get("attributes")
mailu_email = _extract_attr(attrs, "mailu_email")
if mailu_email:
return mailu_email
email = user.get("email")
if isinstance(email, str) and email.strip():
email = email.strip()
if email.lower().endswith(f"@{settings.mailu_domain.lower()}"):
return email
return f"{username}@{settings.mailu_domain}"
def _parse_mail_export(output: str) -> list[tuple[str, str]]:
accounts: list[tuple[str, str]] = []
account_id = ""
for line in output.splitlines():
line = line.strip()
if not line:
continue
match = re.match(r"^Account\s+(\d+):", line, flags=re.IGNORECASE)
if match:
account_id = match.group(1)
continue
match = re.match(r"^-\s*E-?mail:\s*(\S+)", line, flags=re.IGNORECASE)
if match and account_id:
accounts.append((account_id, match.group(1)))
return accounts
@dataclass(frozen=True)
class NextcloudMailSyncSummary:
processed: int
created: int
updated: int
deleted: int
skipped: int
failures: int
detail: str = ""
class NextcloudService: class NextcloudService:
def __init__(self) -> None: def __init__(self) -> None:
self._spawner = JobSpawner( self._executor = PodExecutor(
settings.nextcloud_namespace, settings.nextcloud_namespace,
settings.nextcloud_mail_sync_cronjob, settings.nextcloud_pod_label,
load_cronjob_manifest("nextcloud-mail-sync/cronjob.yaml"), settings.nextcloud_container,
) )
def _occ(self, args: list[str]) -> str:
command = ["runuser", "-u", "www-data", "--", "php", "/var/www/html/occ", *args]
result = self._executor.exec(
command,
timeout_sec=settings.nextcloud_exec_timeout_sec,
check=True,
)
return result.stdout
def run_cron(self) -> dict[str, Any]:
if not settings.nextcloud_namespace:
raise RuntimeError("nextcloud cron not configured")
try:
self._executor.exec(
["runuser", "-u", "www-data", "--", "php", "-f", "/var/www/html/cron.php"],
timeout_sec=settings.nextcloud_exec_timeout_sec,
check=True,
)
except (ExecError, PodSelectionError, TimeoutError) as exc:
return {"status": "error", "detail": str(exc)}
return {"status": "ok"}
def _list_mail_accounts(self, username: str) -> list[tuple[str, str]]:
output = self._occ(["mail:account:export", username])
return _parse_mail_export(output)
def _set_editor_mode_richtext(self, account_ids: list[str]) -> None:
safe_ids = [item for item in account_ids if item.isdigit()]
if not safe_ids:
return
if not settings.nextcloud_db_host or not settings.nextcloud_db_password:
logger.info(
"nextcloud editor_mode skipped",
extra={"event": "nextcloud_mail_editor_mode", "status": "skip", "reason": "missing db config"},
)
return
ids_csv = ",".join(safe_ids)
query = (
"UPDATE oc_mail_accounts SET editor_mode='richtext' "
f"WHERE id IN ({ids_csv}) AND editor_mode <> 'richtext';"
)
try:
with psycopg.connect(
host=settings.nextcloud_db_host,
port=settings.nextcloud_db_port,
dbname=settings.nextcloud_db_name,
user=settings.nextcloud_db_user,
password=settings.nextcloud_db_password,
) as conn:
with conn.cursor() as cur:
cur.execute(query)
except Exception as exc:
logger.info(
"nextcloud editor_mode update failed",
extra={"event": "nextcloud_mail_editor_mode", "status": "error", "detail": str(exc)},
)
def _set_user_mail_meta(self, user_id: str, primary_email: str, account_count: int) -> None:
synced_at = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
attrs = {
"nextcloud_mail_primary_email": [primary_email],
"nextcloud_mail_account_count": [str(account_count)],
"nextcloud_mail_synced_at": [synced_at],
}
try:
keycloak_admin.update_user_safe(user_id, {"attributes": attrs})
except Exception:
return
def sync_mail(self, username: str | None = None, wait: bool = True) -> dict[str, Any]: def sync_mail(self, username: str | None = None, wait: bool = True) -> dict[str, Any]:
if not settings.nextcloud_namespace or not settings.nextcloud_mail_sync_cronjob: if not settings.nextcloud_namespace:
raise RuntimeError("nextcloud mail sync not configured") raise RuntimeError("nextcloud mail sync not configured")
cleaned_username = None
env_overrides = None if username is not None:
label_suffix = "all" cleaned_username = username.strip()
if username: if not cleaned_username:
cleaned = (username or "").strip()
if not cleaned:
raise RuntimeError("missing username") raise RuntimeError("missing username")
env_overrides = [{"name": "ONLY_USERNAME", "value": cleaned}] if not keycloak_admin.ready():
label_suffix = cleaned return {"status": "error", "detail": "keycloak admin not configured"}
ttl = settings.nextcloud_mail_sync_job_ttl_sec users: list[dict[str, Any]]
if wait: if cleaned_username is not None:
return self._spawner.trigger_and_wait( user = keycloak_admin.find_user(cleaned_username)
label_suffix, if not user:
env_overrides, return {"status": "ok", "detail": "no matching user"}
settings.nextcloud_mail_sync_wait_timeout_sec, users = [user]
job_ttl_seconds=ttl, else:
users = keycloak_admin.iter_users(page_size=200, brief=False)
processed = created = updated = deleted = skipped = failures = 0
for user in users:
username_val = user.get("username") if isinstance(user.get("username"), str) else ""
username_val = username_val.strip()
if not username_val:
skipped += 1
continue
if user.get("enabled") is False:
skipped += 1
continue
if user.get("serviceAccountClientId") or username_val.startswith("service-account-"):
skipped += 1
continue
user_id = user.get("id") if isinstance(user.get("id"), str) else ""
full_user = user
if user_id:
try:
full_user = keycloak_admin.get_user(user_id)
except Exception:
full_user = user
attrs = full_user.get("attributes") if isinstance(full_user.get("attributes"), dict) else {}
mailu_email = _resolve_mailu_email(username_val, full_user)
app_pw = _extract_attr(attrs, "mailu_app_password")
if not mailu_email or not app_pw:
skipped += 1
continue
if mailu_email and not _extract_attr(attrs, "mailu_email"):
try:
keycloak_admin.set_user_attribute(username_val, "mailu_email", mailu_email)
except Exception:
pass
try:
accounts = self._list_mail_accounts(username_val)
except Exception as exc:
failures += 1
logger.info(
"nextcloud mail export failed",
extra={"event": "nextcloud_mail_export", "status": "error", "detail": str(exc)},
) )
return self._spawner.trigger(label_suffix, env_overrides, job_ttl_seconds=ttl) continue
processed += 1
mailu_accounts = [(aid, email) for aid, email in accounts if email.lower().endswith(f"@{settings.mailu_domain.lower()}")]
primary_id = ""
primary_email = ""
for account_id, account_email in mailu_accounts:
if not primary_id:
primary_id = account_id
primary_email = account_email
if account_email.lower() == mailu_email.lower():
primary_id = account_id
primary_email = account_email
break
if mailu_accounts:
try:
self._occ(
[
"mail:account:update",
"-q",
primary_id,
"--name",
username_val,
"--email",
mailu_email,
"--imap-host",
settings.mailu_host,
"--imap-port",
"993",
"--imap-ssl-mode",
"ssl",
"--imap-user",
mailu_email,
"--imap-password",
app_pw,
"--smtp-host",
settings.mailu_host,
"--smtp-port",
"587",
"--smtp-ssl-mode",
"tls",
"--smtp-user",
mailu_email,
"--smtp-password",
app_pw,
"--auth-method",
"password",
]
)
updated += 1
except Exception:
failures += 1
continue
for account_id, account_email in mailu_accounts:
if account_id == primary_id:
continue
try:
self._occ(["mail:account:delete", "-q", account_id])
deleted += 1
except Exception:
failures += 1
else:
try:
self._occ(
[
"mail:account:create",
"-q",
username_val,
username_val,
mailu_email,
settings.mailu_host,
"993",
"ssl",
mailu_email,
app_pw,
settings.mailu_host,
"587",
"tls",
mailu_email,
app_pw,
"password",
]
)
created += 1
except Exception:
failures += 1
continue
try:
accounts_after = self._list_mail_accounts(username_val)
except Exception:
failures += 1
continue
mailu_accounts_after = [
(aid, email) for aid, email in accounts_after if email.lower().endswith(f"@{settings.mailu_domain.lower()}")
]
account_count = len(mailu_accounts_after)
primary_email_after = ""
editor_mode_ids = []
for account_id, account_email in mailu_accounts_after:
editor_mode_ids.append(account_id)
if account_email.lower() == mailu_email.lower():
primary_email_after = account_email
break
if not primary_email_after:
primary_email_after = account_email
self._set_editor_mode_richtext(editor_mode_ids)
if user_id:
self._set_user_mail_meta(user_id, primary_email_after, account_count)
summary = NextcloudMailSyncSummary(
processed=processed,
created=created,
updated=updated,
deleted=deleted,
skipped=skipped,
failures=failures,
)
logger.info(
"nextcloud mail sync finished",
extra={
"event": "nextcloud_mail_sync",
"status": "ok" if failures == 0 else "error",
"processed_count": processed,
"created_count": created,
"updated_count": updated,
"deleted_count": deleted,
"skipped_count": skipped,
"failures_count": failures,
},
)
status = "ok" if failures == 0 else "error"
return {"status": status, "summary": summary}
def _run_shell(self, script: str, check: bool = True) -> None:
self._executor.exec(
script,
timeout_sec=settings.nextcloud_exec_timeout_sec,
check=check,
)
def _external_api(self, method: str, path: str, data: dict[str, Any] | None = None) -> dict[str, Any]:
if not settings.nextcloud_url:
raise RuntimeError("nextcloud url not configured")
if not settings.nextcloud_admin_user or not settings.nextcloud_admin_password:
raise RuntimeError("nextcloud admin credentials missing")
url = f"{settings.nextcloud_url}/ocs/v2.php/apps/external/api/v1{path}"
headers = {"OCS-APIRequest": "true"}
with httpx.Client(timeout=settings.nextcloud_exec_timeout_sec) as client:
resp = client.request(
method,
url,
headers=headers,
auth=(settings.nextcloud_admin_user, settings.nextcloud_admin_password),
data=data,
)
resp.raise_for_status()
try:
return resp.json()
except Exception:
return {}
def run_maintenance(self) -> dict[str, Any]:
if not settings.nextcloud_namespace:
raise RuntimeError("nextcloud maintenance not configured")
try:
self._run_shell(
"""
set -euo pipefail
if [ ! -d /var/www/html/lib ] && [ -d /usr/src/nextcloud/lib ]; then
if command -v rsync >/dev/null 2>&1; then
rsync -a --delete --exclude config --exclude data /usr/src/nextcloud/ /var/www/html/
else
cp -a /usr/src/nextcloud/. /var/www/html/
fi
fi
mkdir -p /var/www/html/data
chown 33:33 /var/www/html || true
chmod 775 /var/www/html || true
chown -R 33:33 /var/www/html/apps /var/www/html/custom_apps /var/www/html/data /var/www/html/config 2>/dev/null || true
""",
check=False,
)
self._occ(["config:app:set", "theming", "name", "--value", "Atlas Cloud"])
self._occ(["config:app:set", "theming", "slogan", "--value", "Unified access to Atlas services"])
theming_url = settings.nextcloud_url or "https://cloud.bstein.dev"
self._occ(["config:app:set", "theming", "url", "--value", theming_url])
self._occ(["config:app:set", "theming", "color", "--value", "#0f172a"])
self._occ(["config:app:set", "theming", "disable-user-theming", "--value", "yes"])
self._executor.exec(
["runuser", "-u", "www-data", "--", "php", "/var/www/html/occ", "app:install", "customcss"],
timeout_sec=settings.nextcloud_exec_timeout_sec,
check=False,
)
self._executor.exec(
["runuser", "-u", "www-data", "--", "php", "/var/www/html/occ", "app:enable", "customcss"],
timeout_sec=settings.nextcloud_exec_timeout_sec,
check=False,
)
mail_css = (
".mail-message-body, .mail-message-body pre, .mail-message-body code, .mail-message-body table {\n"
" font-family: \"Inter\", \"Source Sans 3\", \"Helvetica Neue\", Arial, sans-serif;\n"
" font-size: 14px;\n"
" line-height: 1.6;\n"
" color: var(--color-main-text);\n"
"}\n"
".mail-message-body pre {\n"
" background: rgba(15, 23, 42, 0.06);\n"
" padding: 12px;\n"
" border-radius: 8px;\n"
"}\n"
".mail-message-body blockquote {\n"
" border-left: 3px solid var(--color-border);\n"
" padding-left: 12px;\n"
" margin: 8px 0;\n"
" color: var(--color-text-lighter);\n"
"}\n"
".mail-message-body img {\n"
" max-width: 100%;\n"
" border-radius: 6px;\n"
"}\n"
)
self._occ(["config:app:set", "customcss", "css", "--value", mail_css])
self._occ(["config:app:set", "files", "default_quota", "--value", "250 GB"])
payload = self._external_api("GET", "?format=json")
links = payload.get("ocs", {}).get("data", []) if isinstance(payload, dict) else []
for link in links:
link_id = link.get("id") if isinstance(link, dict) else None
if link_id is not None:
self._external_api("DELETE", f"/sites/{link_id}?format=json")
sites = [
("Vaultwarden", "https://vault.bstein.dev"),
("Jellyfin", "https://stream.bstein.dev"),
("Gitea", "https://scm.bstein.dev"),
("Jenkins", "https://ci.bstein.dev"),
("Harbor", "https://registry.bstein.dev"),
("Vault", "https://secret.bstein.dev"),
("Jitsi", "https://meet.bstein.dev"),
("Grafana", "https://metrics.bstein.dev"),
("Chat LLM", "https://chat.ai.bstein.dev"),
("Vision", "https://draw.ai.bstein.dev"),
("STT/TTS", "https://talk.ai.bstein.dev"),
]
for name, url in sites:
self._external_api(
"POST",
"/sites?format=json",
data={
"name": name,
"url": url,
"lang": "",
"type": "link",
"device": "",
"icon": "",
"groups[]": "",
"redirect": "1",
},
)
except (ExecError, PodSelectionError, TimeoutError) as exc:
return {"status": "error", "detail": str(exc)}
except Exception as exc: # noqa: BLE001
return {"status": "error", "detail": str(exc)}
return {"status": "ok", "detail": "maintenance complete"}
nextcloud = NextcloudService() nextcloud = NextcloudService()

View File

@ -0,0 +1,112 @@
from __future__ import annotations
from dataclasses import dataclass
import re
from typing import Any
import httpx
from ..settings import settings
from ..utils.logging import get_logger
logger = get_logger(__name__)
_UNITS = {
"b": 1,
"kb": 1024,
"mb": 1024**2,
"gb": 1024**3,
"tb": 1024**4,
}
def parse_size(value: str) -> int:
if not value:
return 0
text = value.strip().lower()
if text in {"-", "0"}:
return 0
match = re.match(r"^([0-9.]+)([a-z]+)$", text)
if not match:
return 0
number = float(match.group(1))
unit = match.group(2)
if unit not in _UNITS:
return 0
return int(number * _UNITS[unit])
@dataclass(frozen=True)
class OpensearchPruneSummary:
total_before: int
total_after: int
deleted: int
detail: str = ""
def _fetch_indices(client: httpx.Client, pattern: str) -> list[dict[str, Any]]:
url = f"{settings.opensearch_url}/_cat/indices/{pattern}"
params = {"format": "json", "h": "index,store.size,creation.date"}
resp = client.get(url, params=params)
if resp.status_code == 404:
return []
resp.raise_for_status()
payload = resp.json()
return payload if isinstance(payload, list) else []
def _delete_index(client: httpx.Client, index: str) -> None:
url = f"{settings.opensearch_url}/{index}"
resp = client.delete(url)
resp.raise_for_status()
def prune_indices() -> OpensearchPruneSummary:
patterns = [p.strip() for p in settings.opensearch_index_patterns.split(",") if p.strip()]
if not patterns:
return OpensearchPruneSummary(0, 0, 0, detail="no patterns configured")
indices: list[dict[str, Any]] = []
with httpx.Client(timeout=settings.opensearch_timeout_sec) as client:
for pattern in patterns:
try:
data = _fetch_indices(client, pattern)
except Exception as exc:
logger.info(
"opensearch index fetch failed",
extra={"event": "opensearch_prune", "status": "error", "detail": str(exc)},
)
continue
for item in data:
index = item.get("index")
if not isinstance(index, str) or not index or index.startswith("."):
continue
size = parse_size(str(item.get("store.size") or ""))
created = int(item.get("creation.date") or 0)
indices.append({"index": index, "size": size, "created": created})
total = sum(item["size"] for item in indices)
if total <= settings.opensearch_limit_bytes:
return OpensearchPruneSummary(total, total, 0, detail="within limit")
indices.sort(key=lambda item: item["created"])
deleted = 0
for item in indices:
if total <= settings.opensearch_limit_bytes:
break
try:
_delete_index(client, item["index"])
deleted += 1
total -= item["size"]
except Exception as exc:
logger.info(
"opensearch delete failed",
extra={"event": "opensearch_prune", "status": "error", "detail": str(exc)},
)
return OpensearchPruneSummary(
total_before=sum(item["size"] for item in indices),
total_after=total,
deleted=deleted,
)

View File

@ -0,0 +1,53 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
from ..k8s.client import delete_json, get_json
from ..utils.logging import get_logger
logger = get_logger(__name__)
@dataclass(frozen=True)
class PodCleanerSummary:
deleted: int
skipped: int
failures: int
def _collect_pods(phase: str) -> list[dict[str, Any]]:
payload = get_json(f"/api/v1/pods?fieldSelector=status.phase={phase}")
items = payload.get("items") if isinstance(payload.get("items"), list) else []
return [item for item in items if isinstance(item, dict)]
def _delete_pod(namespace: str, name: str) -> None:
delete_json(f"/api/v1/namespaces/{namespace}/pods/{name}?gracePeriodSeconds=0")
def clean_finished_pods() -> PodCleanerSummary:
deleted = 0
skipped = 0
failures = 0
for phase in ("Succeeded", "Failed"):
for pod in _collect_pods(phase):
metadata = pod.get("metadata") if isinstance(pod.get("metadata"), dict) else {}
namespace = metadata.get("namespace") if isinstance(metadata.get("namespace"), str) else ""
name = metadata.get("name") if isinstance(metadata.get("name"), str) else ""
if not namespace or not name:
skipped += 1
continue
try:
_delete_pod(namespace, name)
deleted += 1
except Exception as exc:
failures += 1
logger.info(
"pod delete failed",
extra={"event": "pod_cleaner", "status": "error", "detail": str(exc)},
)
return PodCleanerSummary(deleted=deleted, skipped=skipped, failures=failures)

View File

@ -1,46 +1,521 @@
from __future__ import annotations from __future__ import annotations
from dataclasses import dataclass
import os
from typing import Any from typing import Any
from ..k8s.jobs import JobSpawner import httpx
from ..k8s.manifests import load_cronjob_manifest
from ..settings import settings from ..settings import settings
from ..utils.logging import get_logger
logger = get_logger(__name__)
@dataclass(frozen=True)
class VaultResult:
status: str
detail: str = ""
def _split_csv(value: str) -> list[str]:
return [item.strip() for item in (value or "").split(",") if item.strip()]
def _read_file(path: str) -> str:
try:
with open(path, "r", encoding="utf-8") as handle:
return handle.read().strip()
except FileNotFoundError:
return ""
def _build_policy(read_paths: str, write_paths: str) -> str:
policy_parts: list[str] = []
for path in (read_paths or "").split():
policy_parts.append(
f'path "kv/data/atlas/{path}" {{\n capabilities = ["read"]\n}}\n'
f'path "kv/metadata/atlas/{path}" {{\n capabilities = ["list"]\n}}\n'
)
for path in (write_paths or "").split():
policy_parts.append(
f'path "kv/data/atlas/{path}" {{\n capabilities = ["create", "update", "read"]\n}}\n'
f'path "kv/metadata/atlas/{path}" {{\n capabilities = ["list"]\n}}\n'
)
return "\n".join(policy_parts).strip() + "\n"
_K8S_ROLES: list[dict[str, str]] = [
{
"role": "outline",
"namespace": "outline",
"service_accounts": "outline-vault",
"read_paths": "outline/* shared/postmark-relay",
"write_paths": "",
},
{
"role": "planka",
"namespace": "planka",
"service_accounts": "planka-vault",
"read_paths": "planka/* shared/postmark-relay",
"write_paths": "",
},
{
"role": "bstein-dev-home",
"namespace": "bstein-dev-home",
"service_accounts": "bstein-dev-home,bstein-dev-home-vault-sync",
"read_paths": "portal/* shared/chat-ai-keys-runtime shared/portal-e2e-client shared/postmark-relay "
"mailu/mailu-initial-account-secret shared/harbor-pull",
"write_paths": "",
},
{
"role": "gitea",
"namespace": "gitea",
"service_accounts": "gitea-vault",
"read_paths": "gitea/*",
"write_paths": "",
},
{
"role": "vaultwarden",
"namespace": "vaultwarden",
"service_accounts": "vaultwarden-vault",
"read_paths": "vaultwarden/* mailu/mailu-initial-account-secret",
"write_paths": "",
},
{
"role": "sso",
"namespace": "sso",
"service_accounts": "sso-vault,sso-vault-sync,mas-secrets-ensure",
"read_paths": "sso/* portal/bstein-dev-home-keycloak-admin shared/keycloak-admin "
"shared/portal-e2e-client shared/postmark-relay shared/harbor-pull",
"write_paths": "",
},
{
"role": "mailu-mailserver",
"namespace": "mailu-mailserver",
"service_accounts": "mailu-vault-sync",
"read_paths": "mailu/* shared/postmark-relay shared/harbor-pull",
"write_paths": "",
},
{
"role": "harbor",
"namespace": "harbor",
"service_accounts": "harbor-vault-sync",
"read_paths": "harbor/* shared/harbor-pull",
"write_paths": "",
},
{
"role": "nextcloud",
"namespace": "nextcloud",
"service_accounts": "nextcloud-vault",
"read_paths": "nextcloud/* shared/keycloak-admin shared/postmark-relay",
"write_paths": "",
},
{
"role": "comms",
"namespace": "comms",
"service_accounts": "comms-vault,atlasbot",
"read_paths": "comms/* shared/chat-ai-keys-runtime shared/harbor-pull",
"write_paths": "",
},
{
"role": "jenkins",
"namespace": "jenkins",
"service_accounts": "jenkins",
"read_paths": "jenkins/*",
"write_paths": "",
},
{
"role": "monitoring",
"namespace": "monitoring",
"service_accounts": "monitoring-vault-sync",
"read_paths": "monitoring/* shared/postmark-relay shared/harbor-pull",
"write_paths": "",
},
{
"role": "logging",
"namespace": "logging",
"service_accounts": "logging-vault-sync",
"read_paths": "logging/* shared/harbor-pull",
"write_paths": "",
},
{
"role": "pegasus",
"namespace": "jellyfin",
"service_accounts": "pegasus-vault-sync",
"read_paths": "pegasus/* shared/harbor-pull",
"write_paths": "",
},
{
"role": "crypto",
"namespace": "crypto",
"service_accounts": "crypto-vault-sync",
"read_paths": "crypto/* shared/harbor-pull",
"write_paths": "",
},
{
"role": "health",
"namespace": "health",
"service_accounts": "health-vault-sync",
"read_paths": "health/*",
"write_paths": "",
},
{
"role": "maintenance",
"namespace": "maintenance",
"service_accounts": "ariadne,maintenance-vault-sync",
"read_paths": "maintenance/ariadne-db portal/bstein-dev-home-keycloak-admin mailu/mailu-db-secret "
"mailu/mailu-initial-account-secret shared/harbor-pull",
"write_paths": "",
},
{
"role": "finance",
"namespace": "finance",
"service_accounts": "finance-vault",
"read_paths": "finance/* shared/postmark-relay",
"write_paths": "",
},
{
"role": "finance-secrets",
"namespace": "finance",
"service_accounts": "finance-secrets-ensure",
"read_paths": "",
"write_paths": "finance/*",
},
{
"role": "longhorn",
"namespace": "longhorn-system",
"service_accounts": "longhorn-vault,longhorn-vault-sync",
"read_paths": "longhorn/* shared/harbor-pull",
"write_paths": "",
},
{
"role": "postgres",
"namespace": "postgres",
"service_accounts": "postgres-vault",
"read_paths": "postgres/postgres-db",
"write_paths": "",
},
{
"role": "vault",
"namespace": "vault",
"service_accounts": "vault",
"read_paths": "vault/*",
"write_paths": "",
},
{
"role": "sso-secrets",
"namespace": "sso",
"service_accounts": "mas-secrets-ensure",
"read_paths": "shared/keycloak-admin",
"write_paths": "harbor/harbor-oidc vault/vault-oidc-config comms/synapse-oidc "
"logging/oauth2-proxy-logs-oidc finance/actual-oidc",
},
{
"role": "crypto-secrets",
"namespace": "crypto",
"service_accounts": "crypto-secrets-ensure",
"read_paths": "",
"write_paths": "crypto/wallet-monero-temp-rpc-auth",
},
{
"role": "comms-secrets",
"namespace": "comms",
"service_accounts": "comms-secrets-ensure,mas-db-ensure,mas-admin-client-secret-writer,othrys-synapse-signingkey-job",
"read_paths": "",
"write_paths": "comms/turn-shared-secret comms/livekit-api comms/synapse-redis comms/synapse-macaroon "
"comms/atlasbot-credentials-runtime comms/synapse-db comms/mas-db comms/mas-admin-client-runtime "
"comms/mas-secrets-runtime comms/othrys-synapse-signingkey",
},
]
_VAULT_ADMIN_POLICY = """
path "sys/auth" {
capabilities = ["read"]
}
path "sys/auth/*" {
capabilities = ["create", "update", "delete", "sudo", "read"]
}
path "auth/kubernetes/*" {
capabilities = ["create", "update", "read"]
}
path "auth/oidc/*" {
capabilities = ["create", "update", "read"]
}
path "sys/policies/acl" {
capabilities = ["list"]
}
path "sys/policies/acl/*" {
capabilities = ["create", "update", "read"]
}
path "sys/internal/ui/mounts" {
capabilities = ["read"]
}
path "sys/mounts" {
capabilities = ["read"]
}
path "sys/mounts/auth/*" {
capabilities = ["read", "update", "sudo"]
}
path "kv/data/atlas/vault/*" {
capabilities = ["read"]
}
path "kv/metadata/atlas/vault/*" {
capabilities = ["list"]
}
path "kv/data/*" {
capabilities = ["create", "update", "read", "delete", "patch"]
}
path "kv/metadata" {
capabilities = ["list"]
}
path "kv/metadata/*" {
capabilities = ["read", "list", "delete"]
}
path "kv/data/atlas/shared/*" {
capabilities = ["create", "update", "read", "patch"]
}
path "kv/metadata/atlas/shared/*" {
capabilities = ["list"]
}
""".strip()
_DEV_KV_POLICY = """
path "kv/metadata" {
capabilities = ["list"]
}
path "kv/metadata/atlas" {
capabilities = ["list"]
}
path "kv/metadata/atlas/shared" {
capabilities = ["list"]
}
path "kv/metadata/atlas/shared/*" {
capabilities = ["list"]
}
path "kv/data/atlas/shared/*" {
capabilities = ["read"]
}
""".strip()
class VaultClient:
def __init__(self, base_url: str, token: str | None = None) -> None:
self._base_url = base_url.rstrip("/")
self._token = token
def request(self, method: str, path: str, json: dict[str, Any] | None = None) -> httpx.Response:
headers = {}
if self._token:
headers["X-Vault-Token"] = self._token
return httpx.request(
method,
f"{self._base_url}{path}",
headers=headers,
json=json,
timeout=settings.k8s_api_timeout_sec,
)
class VaultService: class VaultService:
def __init__(self) -> None: def __init__(self) -> None:
self._k8s_auth_spawner = JobSpawner( self._token: str | None = None
settings.vault_namespace,
settings.vault_k8s_auth_cronjob, def _health(self, client: VaultClient) -> dict[str, Any]:
load_cronjob_manifest("vault/k8s-auth-config-cronjob.yaml"), resp = client.request("GET", "/v1/sys/health")
) resp.raise_for_status()
self._oidc_spawner = JobSpawner( return resp.json()
settings.vault_namespace,
settings.vault_oidc_cronjob, def _ensure_token(self) -> str:
load_cronjob_manifest("vault/oidc-config-cronjob.yaml"), if self._token:
return self._token
if settings.vault_token:
self._token = settings.vault_token
return self._token
jwt = settings.vault_k8s_token_reviewer_jwt
if not jwt and settings.vault_k8s_token_reviewer_jwt_file:
jwt = _read_file(settings.vault_k8s_token_reviewer_jwt_file)
if not jwt:
jwt = _read_file("/var/run/secrets/kubernetes.io/serviceaccount/token")
if not jwt:
raise RuntimeError("vault auth jwt missing")
resp = httpx.post(
f"{settings.vault_addr.rstrip('/')}/v1/auth/kubernetes/login",
json={"role": settings.vault_k8s_role, "jwt": jwt},
timeout=settings.k8s_api_timeout_sec,
) )
resp.raise_for_status()
token = resp.json().get("auth", {}).get("client_token")
if not isinstance(token, str) or not token:
raise RuntimeError("vault login token missing")
self._token = token
return token
def _client(self) -> VaultClient:
token = self._ensure_token()
return VaultClient(settings.vault_addr, token)
def _ensure_auth_enabled(self, client: VaultClient, auth_name: str, auth_type: str) -> None:
resp = client.request("GET", "/v1/sys/auth")
resp.raise_for_status()
mounts = resp.json()
if f"{auth_name}/" not in mounts:
resp = client.request("POST", f"/v1/sys/auth/{auth_name}", json={"type": auth_type})
resp.raise_for_status()
def _write_policy(self, client: VaultClient, name: str, policy: str) -> None:
resp = client.request("PUT", f"/v1/sys/policies/acl/{name}", json={"policy": policy})
resp.raise_for_status()
def _write_k8s_role(self, client: VaultClient, role: dict[str, str]) -> None:
payload = {
"bound_service_account_names": role["service_accounts"],
"bound_service_account_namespaces": role["namespace"],
"policies": role["role"],
"ttl": settings.vault_k8s_role_ttl,
}
resp = client.request("POST", f"/v1/auth/kubernetes/role/{role['role']}", json=payload)
resp.raise_for_status()
def sync_k8s_auth(self, wait: bool = True) -> dict[str, Any]: def sync_k8s_auth(self, wait: bool = True) -> dict[str, Any]:
if not settings.vault_namespace or not settings.vault_k8s_auth_cronjob: try:
raise RuntimeError("vault k8s auth sync not configured") status = self._health(VaultClient(settings.vault_addr))
if wait: except Exception as exc: # noqa: BLE001
return self._k8s_auth_spawner.trigger_and_wait( return VaultResult("error", str(exc)).__dict__
"k8s-auth",
None, if not status.get("initialized"):
settings.vault_job_wait_timeout_sec, return VaultResult("skip", "vault not initialized").__dict__
if status.get("sealed"):
return VaultResult("skip", "vault sealed").__dict__
client = self._client()
self._ensure_auth_enabled(client, "kubernetes", "kubernetes")
token_reviewer_jwt = settings.vault_k8s_token_reviewer_jwt
if not token_reviewer_jwt and settings.vault_k8s_token_reviewer_jwt_file:
token_reviewer_jwt = _read_file(settings.vault_k8s_token_reviewer_jwt_file)
if not token_reviewer_jwt:
token_reviewer_jwt = _read_file("/var/run/secrets/kubernetes.io/serviceaccount/token")
k8s_host = f"https://{os.environ.get('KUBERNETES_SERVICE_HOST', 'kubernetes.default.svc')}:443"
k8s_ca = _read_file("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt")
resp = client.request(
"POST",
"/v1/auth/kubernetes/config",
json={
"token_reviewer_jwt": token_reviewer_jwt,
"kubernetes_host": k8s_host,
"kubernetes_ca_cert": k8s_ca,
},
) )
return self._k8s_auth_spawner.trigger("k8s-auth", None) resp.raise_for_status()
self._write_policy(client, "vault-admin", _VAULT_ADMIN_POLICY)
self._write_policy(client, "dev-kv", _DEV_KV_POLICY)
self._write_k8s_role(
client,
{
"role": "vault-admin",
"namespace": "vault",
"service_accounts": "vault-admin",
},
)
for role in _K8S_ROLES:
policy = _build_policy(role.get("read_paths", ""), role.get("write_paths", ""))
self._write_policy(client, role["role"], policy)
self._write_k8s_role(client, role)
return VaultResult("ok", "k8s auth configured").__dict__
def sync_oidc(self, wait: bool = True) -> dict[str, Any]: def sync_oidc(self, wait: bool = True) -> dict[str, Any]:
if not settings.vault_namespace or not settings.vault_oidc_cronjob: try:
raise RuntimeError("vault oidc sync not configured") status = self._health(VaultClient(settings.vault_addr))
if wait: except Exception as exc: # noqa: BLE001
return self._oidc_spawner.trigger_and_wait( return VaultResult("error", str(exc)).__dict__
"oidc",
None, if not status.get("initialized"):
settings.vault_job_wait_timeout_sec, return VaultResult("skip", "vault not initialized").__dict__
if status.get("sealed"):
return VaultResult("skip", "vault sealed").__dict__
if not settings.vault_oidc_discovery_url:
return VaultResult("error", "oidc discovery url missing").__dict__
if not settings.vault_oidc_client_id or not settings.vault_oidc_client_secret:
return VaultResult("error", "oidc client credentials missing").__dict__
client = self._client()
self._ensure_auth_enabled(client, "oidc", "oidc")
resp = client.request(
"POST",
"/v1/auth/oidc/config",
json={
"oidc_discovery_url": settings.vault_oidc_discovery_url,
"oidc_client_id": settings.vault_oidc_client_id,
"oidc_client_secret": settings.vault_oidc_client_secret,
"default_role": settings.vault_oidc_default_role or "admin",
},
) )
return self._oidc_spawner.trigger("oidc", None) resp.raise_for_status()
try:
client.request(
"POST",
"/v1/sys/auth/oidc/tune",
json={"listing_visibility": "unauth"},
)
except Exception:
pass
scopes = settings.vault_oidc_scopes or "openid profile email groups"
scope_parts = [part for part in scopes.replace(" ", ",").split(",") if part]
scopes_csv = ",".join(dict.fromkeys(scope_parts))
redirect_uris = _split_csv(settings.vault_oidc_redirect_uris)
bound_audiences = settings.vault_oidc_bound_audiences or settings.vault_oidc_client_id
bound_claims_type = settings.vault_oidc_bound_claims_type or "string"
admin_group = settings.vault_oidc_admin_group or "admin"
admin_policies = settings.vault_oidc_admin_policies or "default,vault-admin"
dev_group = settings.vault_oidc_dev_group or "dev"
dev_policies = settings.vault_oidc_dev_policies or "default,dev-kv"
user_group = settings.vault_oidc_user_group or dev_group
user_policies = (
settings.vault_oidc_user_policies
or settings.vault_oidc_token_policies
or dev_policies
)
for role_name, groups, policies in (
("admin", admin_group, admin_policies),
("dev", dev_group, dev_policies),
("user", user_group, user_policies),
):
group_list = _split_csv(groups)
if not group_list or not policies:
continue
payload = {
"user_claim": settings.vault_oidc_user_claim or "preferred_username",
"oidc_scopes": scopes_csv,
"token_policies": policies,
"bound_audiences": bound_audiences,
"bound_claims": {settings.vault_oidc_groups_claim or "groups": group_list},
"bound_claims_type": bound_claims_type,
"groups_claim": settings.vault_oidc_groups_claim or "groups",
"allowed_redirect_uris": redirect_uris,
}
resp = client.request(
"POST",
f"/v1/auth/oidc/role/{role_name}",
json=payload,
)
resp.raise_for_status()
return VaultResult("ok", "oidc configured").__dict__
vault = VaultService() vault = VaultService()

View File

@ -77,7 +77,7 @@ def _vaultwarden_email_for_user(user: dict[str, Any]) -> str:
if email and email.lower().endswith(f"@{settings.mailu_domain.lower()}"): if email and email.lower().endswith(f"@{settings.mailu_domain.lower()}"):
return email return email
return "" return f"{username}@{settings.mailu_domain}"
def _set_user_attribute_if_missing(username: str, user: dict[str, Any], key: str, value: str) -> None: def _set_user_attribute_if_missing(username: str, user: dict[str, Any], key: str, value: str) -> None:
@ -155,6 +155,7 @@ def run_vaultwarden_sync() -> VaultwardenSyncSummary:
continue continue
try: try:
_set_user_attribute_if_missing(username, full_user, "mailu_email", email)
_set_user_attribute_if_missing(username, full_user, VAULTWARDEN_EMAIL_ATTR, email) _set_user_attribute_if_missing(username, full_user, VAULTWARDEN_EMAIL_ATTR, email)
except Exception: except Exception:
pass pass

View File

@ -1,23 +1,147 @@
from __future__ import annotations from __future__ import annotations
from typing import Any from typing import Any
import textwrap
from ..k8s.jobs import JobSpawner from ..k8s.exec import ExecError, PodExecutor
from ..k8s.manifests import load_cronjob_manifest from ..k8s.pods import PodSelectionError
from ..settings import settings from ..settings import settings
_WGER_SYNC_SCRIPT = textwrap.dedent(
"""
from __future__ import annotations
import os
import sys
import django
def _env(name: str, default: str = "") -> str:
value = os.getenv(name, default)
return value.strip() if isinstance(value, str) else ""
def _setup_django() -> None:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.main")
django.setup()
def _set_default_gym(user) -> None:
try:
from wger.gym.models import GymConfig
except Exception:
return
try:
config = GymConfig.objects.first()
except Exception:
return
if not config or not getattr(config, "default_gym", None):
return
profile = getattr(user, "userprofile", None)
if not profile or getattr(profile, "gym", None):
return
profile.gym = config.default_gym
profile.save()
def _ensure_profile(user) -> None:
profile = getattr(user, "userprofile", None)
if not profile:
return
if hasattr(profile, "email_verified") and not profile.email_verified:
profile.email_verified = True
if hasattr(profile, "is_temporary") and profile.is_temporary:
profile.is_temporary = False
profile.save()
def _ensure_admin(username: str, password: str, email: str) -> None:
from django.contrib.auth.models import User
if not username or not password:
raise RuntimeError("admin username/password missing")
user, created = User.objects.get_or_create(username=username)
if created:
user.is_active = True
if not user.is_staff:
user.is_staff = True
if email:
user.email = email
user.set_password(password)
user.save()
_ensure_profile(user)
_set_default_gym(user)
print(f"ensured admin user {username}")
def _ensure_user(username: str, password: str, email: str) -> None:
from django.contrib.auth.models import User
if not username or not password:
raise RuntimeError("username/password missing")
user, created = User.objects.get_or_create(username=username)
if created:
user.is_active = True
if email and user.email != email:
user.email = email
user.set_password(password)
user.save()
_ensure_profile(user)
_set_default_gym(user)
action = "created" if created else "updated"
print(f"{action} user {username}")
def main() -> int:
admin_user = _env("WGER_ADMIN_USERNAME")
admin_password = _env("WGER_ADMIN_PASSWORD")
admin_email = _env("WGER_ADMIN_EMAIL")
username = _env("WGER_USERNAME") or _env("ONLY_USERNAME")
password = _env("WGER_PASSWORD")
email = _env("WGER_EMAIL")
if not any([admin_user and admin_password, username and password]):
print("no admin or user payload provided; exiting")
return 0
_setup_django()
if admin_user and admin_password:
_ensure_admin(admin_user, admin_password, admin_email)
if username and password:
_ensure_user(username, password, email)
return 0
if __name__ == "__main__":
sys.exit(main())
"""
).strip()
def _wger_exec_command() -> str:
return f"python3 - <<'PY'\n{_WGER_SYNC_SCRIPT}\nPY"
class WgerService: class WgerService:
def __init__(self) -> None: def __init__(self) -> None:
self._user_spawner = JobSpawner( self._executor = PodExecutor(
settings.wger_namespace, settings.wger_namespace,
settings.wger_user_sync_cronjob, settings.wger_pod_label,
load_cronjob_manifest("health/wger-user-sync-cronjob.yaml"), settings.wger_container,
)
self._admin_spawner = JobSpawner(
settings.wger_namespace,
settings.wger_admin_cronjob,
load_cronjob_manifest("health/wger-admin-ensure-cronjob.yaml"),
) )
def sync_user(self, username: str, email: str, password: str, wait: bool = True) -> dict[str, Any]: def sync_user(self, username: str, email: str, password: str, wait: bool = True) -> dict[str, Any]:
@ -26,33 +150,51 @@ class WgerService:
raise RuntimeError("missing username") raise RuntimeError("missing username")
if not password: if not password:
raise RuntimeError("missing password") raise RuntimeError("missing password")
if not settings.wger_namespace or not settings.wger_user_sync_cronjob: if not settings.wger_namespace:
raise RuntimeError("wger sync not configured") raise RuntimeError("wger sync not configured")
env_overrides = [ env = {
{"name": "WGER_USERNAME", "value": username}, "WGER_USERNAME": username,
{"name": "WGER_EMAIL", "value": email}, "WGER_EMAIL": email,
{"name": "WGER_PASSWORD", "value": password}, "WGER_PASSWORD": password,
] }
if wait: try:
return self._user_spawner.trigger_and_wait( result = self._executor.exec(
username, _wger_exec_command(),
env_overrides, env=env,
settings.wger_user_sync_wait_timeout_sec, timeout_sec=settings.wger_user_sync_wait_timeout_sec,
check=True,
) )
return self._user_spawner.trigger(username, env_overrides) except (ExecError, PodSelectionError, TimeoutError) as exc:
return {"status": "error", "detail": str(exc)}
output = (result.stdout or result.stderr).strip()
return {"status": "ok", "detail": output}
def ensure_admin(self, wait: bool = False) -> dict[str, Any]: def ensure_admin(self, wait: bool = False) -> dict[str, Any]:
if not settings.wger_namespace or not settings.wger_admin_cronjob: if not settings.wger_namespace:
raise RuntimeError("wger admin sync not configured") raise RuntimeError("wger admin sync not configured")
if wait: if not settings.wger_admin_username or not settings.wger_admin_password:
return self._admin_spawner.trigger_and_wait( return {"status": "error", "detail": "admin credentials missing"}
"admin", env = {
None, "WGER_ADMIN_USERNAME": settings.wger_admin_username,
settings.wger_user_sync_wait_timeout_sec, "WGER_ADMIN_PASSWORD": settings.wger_admin_password,
"WGER_ADMIN_EMAIL": settings.wger_admin_email,
}
try:
result = self._executor.exec(
_wger_exec_command(),
env=env,
timeout_sec=settings.wger_user_sync_wait_timeout_sec,
check=True,
) )
return self._admin_spawner.trigger("admin", None) except (ExecError, PodSelectionError, TimeoutError) as exc:
return {"status": "error", "detail": str(exc)}
output = (result.stdout or result.stderr).strip()
return {"status": "ok", "detail": output}
wger = WgerService() wger = WgerService()

View File

@ -64,32 +64,89 @@ class Settings:
mailu_db_name: str mailu_db_name: str
mailu_db_user: str mailu_db_user: str
mailu_db_password: str mailu_db_password: str
mailu_host: str
nextcloud_namespace: str nextcloud_namespace: str
nextcloud_mail_sync_cronjob: str nextcloud_pod_label: str
nextcloud_mail_sync_wait_timeout_sec: float nextcloud_container: str
nextcloud_mail_sync_job_ttl_sec: int nextcloud_exec_timeout_sec: float
nextcloud_db_host: str
nextcloud_db_port: int
nextcloud_db_name: str
nextcloud_db_user: str
nextcloud_db_password: str
nextcloud_url: str
nextcloud_admin_user: str
nextcloud_admin_password: str
wger_namespace: str wger_namespace: str
wger_user_sync_cronjob: str
wger_user_sync_wait_timeout_sec: float wger_user_sync_wait_timeout_sec: float
wger_admin_cronjob: str wger_pod_label: str
wger_container: str
wger_admin_username: str
wger_admin_password: str
wger_admin_email: str
firefly_namespace: str firefly_namespace: str
firefly_user_sync_cronjob: str
firefly_user_sync_wait_timeout_sec: float firefly_user_sync_wait_timeout_sec: float
firefly_pod_label: str
firefly_container: str
firefly_cron_base_url: str
firefly_cron_token: str
firefly_cron_timeout_sec: float
vault_namespace: str vault_namespace: str
vault_k8s_auth_cronjob: str vault_addr: str
vault_oidc_cronjob: str vault_token: str
vault_job_wait_timeout_sec: float vault_k8s_role: str
vault_k8s_role_ttl: str
vault_k8s_token_reviewer_jwt: str
vault_k8s_token_reviewer_jwt_file: str
vault_oidc_discovery_url: str
vault_oidc_client_id: str
vault_oidc_client_secret: str
vault_oidc_default_role: str
vault_oidc_scopes: str
vault_oidc_user_claim: str
vault_oidc_groups_claim: str
vault_oidc_token_policies: str
vault_oidc_admin_group: str
vault_oidc_admin_policies: str
vault_oidc_dev_group: str
vault_oidc_dev_policies: str
vault_oidc_user_group: str
vault_oidc_user_policies: str
vault_oidc_redirect_uris: str
vault_oidc_bound_audiences: str
vault_oidc_bound_claims_type: str
comms_namespace: str comms_namespace: str
comms_guest_name_cronjob: str comms_synapse_base: str
comms_pin_invite_cronjob: str comms_auth_base: str
comms_reset_room_cronjob: str comms_mas_admin_api_base: str
comms_seed_room_cronjob: str comms_mas_token_url: str
comms_job_wait_timeout_sec: float comms_mas_admin_client_id: str
comms_mas_admin_client_secret: str
comms_server_name: str
comms_room_alias: str
comms_room_name: str
comms_pin_message: str
comms_seeder_user: str
comms_seeder_password: str
comms_bot_user: str
comms_bot_password: str
comms_synapse_db_host: str
comms_synapse_db_port: int
comms_synapse_db_name: str
comms_synapse_db_user: str
comms_synapse_db_password: str
comms_timeout_sec: float
comms_guest_stale_days: int
image_sweeper_namespace: str
image_sweeper_service_account: str
image_sweeper_job_ttl_sec: int
image_sweeper_wait_timeout_sec: float
vaultwarden_namespace: str vaultwarden_namespace: str
vaultwarden_pod_label: str vaultwarden_pod_label: str
@ -119,8 +176,14 @@ class Settings:
mailu_sync_cron: str mailu_sync_cron: str
nextcloud_sync_cron: str nextcloud_sync_cron: str
nextcloud_cron: str
nextcloud_maintenance_cron: str
vaultwarden_sync_cron: str vaultwarden_sync_cron: str
wger_admin_cron: str wger_admin_cron: str
firefly_cron: str
pod_cleaner_cron: str
opensearch_prune_cron: str
image_sweeper_cron: str
vault_k8s_auth_cron: str vault_k8s_auth_cron: str
vault_oidc_cron: str vault_oidc_cron: str
comms_guest_name_cron: str comms_guest_name_cron: str
@ -129,6 +192,11 @@ class Settings:
comms_seed_room_cron: str comms_seed_room_cron: str
keycloak_profile_cron: str keycloak_profile_cron: str
opensearch_url: str
opensearch_limit_bytes: int
opensearch_index_patterns: str
opensearch_timeout_sec: float
metrics_path: str metrics_path: str
@classmethod @classmethod
@ -182,27 +250,107 @@ class Settings:
mailu_db_name=_env("MAILU_DB_NAME", "mailu"), mailu_db_name=_env("MAILU_DB_NAME", "mailu"),
mailu_db_user=_env("MAILU_DB_USER", "mailu"), mailu_db_user=_env("MAILU_DB_USER", "mailu"),
mailu_db_password=_env("MAILU_DB_PASSWORD", ""), mailu_db_password=_env("MAILU_DB_PASSWORD", ""),
mailu_host=_env("MAILU_HOST", f"mail.{mailu_domain}"),
nextcloud_namespace=_env("NEXTCLOUD_NAMESPACE", "nextcloud"), nextcloud_namespace=_env("NEXTCLOUD_NAMESPACE", "nextcloud"),
nextcloud_mail_sync_cronjob=_env("NEXTCLOUD_MAIL_SYNC_CRONJOB", "nextcloud-mail-sync"), nextcloud_pod_label=_env("NEXTCLOUD_POD_LABEL", "app=nextcloud"),
nextcloud_mail_sync_wait_timeout_sec=_env_float("NEXTCLOUD_MAIL_SYNC_WAIT_TIMEOUT_SEC", 90.0), nextcloud_container=_env("NEXTCLOUD_CONTAINER", "nextcloud"),
nextcloud_mail_sync_job_ttl_sec=_env_int("NEXTCLOUD_MAIL_SYNC_JOB_TTL_SEC", 3600), nextcloud_exec_timeout_sec=_env_float("NEXTCLOUD_EXEC_TIMEOUT_SEC", 120.0),
nextcloud_db_host=_env("NEXTCLOUD_DB_HOST", "postgres-service.postgres.svc.cluster.local"),
nextcloud_db_port=_env_int("NEXTCLOUD_DB_PORT", 5432),
nextcloud_db_name=_env("NEXTCLOUD_DB_NAME", "nextcloud"),
nextcloud_db_user=_env("NEXTCLOUD_DB_USER", "nextcloud"),
nextcloud_db_password=_env("NEXTCLOUD_DB_PASSWORD", ""),
nextcloud_url=_env("NEXTCLOUD_URL", "https://cloud.bstein.dev").rstrip("/"),
nextcloud_admin_user=_env("NEXTCLOUD_ADMIN_USER", ""),
nextcloud_admin_password=_env("NEXTCLOUD_ADMIN_PASSWORD", ""),
wger_namespace=_env("WGER_NAMESPACE", "health"), wger_namespace=_env("WGER_NAMESPACE", "health"),
wger_user_sync_cronjob=_env("WGER_USER_SYNC_CRONJOB", "wger-user-sync"),
wger_user_sync_wait_timeout_sec=_env_float("WGER_USER_SYNC_WAIT_TIMEOUT_SEC", 60.0), wger_user_sync_wait_timeout_sec=_env_float("WGER_USER_SYNC_WAIT_TIMEOUT_SEC", 60.0),
wger_admin_cronjob=_env("WGER_ADMIN_CRONJOB", "wger-admin-ensure"), wger_pod_label=_env("WGER_POD_LABEL", "app=wger"),
wger_container=_env("WGER_CONTAINER", "wger"),
wger_admin_username=_env("WGER_ADMIN_USERNAME", ""),
wger_admin_password=_env("WGER_ADMIN_PASSWORD", ""),
wger_admin_email=_env("WGER_ADMIN_EMAIL", ""),
firefly_namespace=_env("FIREFLY_NAMESPACE", "finance"), firefly_namespace=_env("FIREFLY_NAMESPACE", "finance"),
firefly_user_sync_cronjob=_env("FIREFLY_USER_SYNC_CRONJOB", "firefly-user-sync"),
firefly_user_sync_wait_timeout_sec=_env_float("FIREFLY_USER_SYNC_WAIT_TIMEOUT_SEC", 90.0), firefly_user_sync_wait_timeout_sec=_env_float("FIREFLY_USER_SYNC_WAIT_TIMEOUT_SEC", 90.0),
firefly_pod_label=_env("FIREFLY_POD_LABEL", "app=firefly"),
firefly_container=_env("FIREFLY_CONTAINER", "firefly"),
firefly_cron_base_url=_env(
"FIREFLY_CRON_BASE_URL",
"http://firefly.finance.svc.cluster.local/api/v1/cron",
),
firefly_cron_token=_env("FIREFLY_CRON_TOKEN", ""),
firefly_cron_timeout_sec=_env_float("FIREFLY_CRON_TIMEOUT_SEC", 30.0),
vault_namespace=_env("VAULT_NAMESPACE", "vault"), vault_namespace=_env("VAULT_NAMESPACE", "vault"),
vault_k8s_auth_cronjob=_env("VAULT_K8S_AUTH_CRONJOB", "vault-k8s-auth-config"), vault_addr=_env("VAULT_ADDR", "http://vault.vault.svc.cluster.local:8200").rstrip("/"),
vault_oidc_cronjob=_env("VAULT_OIDC_CRONJOB", "vault-oidc-config"), vault_token=_env("VAULT_TOKEN", ""),
vault_job_wait_timeout_sec=_env_float("VAULT_JOB_WAIT_TIMEOUT_SEC", 120.0), vault_k8s_role=_env("VAULT_K8S_ROLE", "vault"),
vault_k8s_role_ttl=_env("VAULT_K8S_ROLE_TTL", "1h"),
vault_k8s_token_reviewer_jwt=_env("VAULT_K8S_TOKEN_REVIEWER_JWT", ""),
vault_k8s_token_reviewer_jwt_file=_env("VAULT_K8S_TOKEN_REVIEWER_JWT_FILE", ""),
vault_oidc_discovery_url=_env("VAULT_OIDC_DISCOVERY_URL", ""),
vault_oidc_client_id=_env("VAULT_OIDC_CLIENT_ID", ""),
vault_oidc_client_secret=_env("VAULT_OIDC_CLIENT_SECRET", ""),
vault_oidc_default_role=_env("VAULT_OIDC_DEFAULT_ROLE", "admin"),
vault_oidc_scopes=_env("VAULT_OIDC_SCOPES", "openid profile email groups"),
vault_oidc_user_claim=_env("VAULT_OIDC_USER_CLAIM", "preferred_username"),
vault_oidc_groups_claim=_env("VAULT_OIDC_GROUPS_CLAIM", "groups"),
vault_oidc_token_policies=_env("VAULT_OIDC_TOKEN_POLICIES", ""),
vault_oidc_admin_group=_env("VAULT_OIDC_ADMIN_GROUP", "admin"),
vault_oidc_admin_policies=_env("VAULT_OIDC_ADMIN_POLICIES", "default,vault-admin"),
vault_oidc_dev_group=_env("VAULT_OIDC_DEV_GROUP", "dev"),
vault_oidc_dev_policies=_env("VAULT_OIDC_DEV_POLICIES", "default,dev-kv"),
vault_oidc_user_group=_env("VAULT_OIDC_USER_GROUP", ""),
vault_oidc_user_policies=_env("VAULT_OIDC_USER_POLICIES", ""),
vault_oidc_redirect_uris=_env(
"VAULT_OIDC_REDIRECT_URIS",
"https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback",
),
vault_oidc_bound_audiences=_env("VAULT_OIDC_BOUND_AUDIENCES", ""),
vault_oidc_bound_claims_type=_env("VAULT_OIDC_BOUND_CLAIMS_TYPE", "string"),
comms_namespace=_env("COMMS_NAMESPACE", "comms"), comms_namespace=_env("COMMS_NAMESPACE", "comms"),
comms_guest_name_cronjob=_env("COMMS_GUEST_NAME_CRONJOB", "guest-name-randomizer"), comms_synapse_base=_env(
comms_pin_invite_cronjob=_env("COMMS_PIN_INVITE_CRONJOB", "pin-othrys-invite"), "COMMS_SYNAPSE_BASE",
comms_reset_room_cronjob=_env("COMMS_RESET_ROOM_CRONJOB", "othrys-room-reset"), "http://othrys-synapse-matrix-synapse:8008",
comms_seed_room_cronjob=_env("COMMS_SEED_ROOM_CRONJOB", "seed-othrys-room"), ).rstrip("/"),
comms_job_wait_timeout_sec=_env_float("COMMS_JOB_WAIT_TIMEOUT_SEC", 60.0), comms_auth_base=_env(
"COMMS_AUTH_BASE",
"http://matrix-authentication-service:8080",
).rstrip("/"),
comms_mas_admin_api_base=_env(
"COMMS_MAS_ADMIN_API_BASE",
"http://matrix-authentication-service:8081/api/admin/v1",
).rstrip("/"),
comms_mas_token_url=_env(
"COMMS_MAS_TOKEN_URL",
"http://matrix-authentication-service:8080/oauth2/token",
),
comms_mas_admin_client_id=_env("COMMS_MAS_ADMIN_CLIENT_ID", "01KDXMVQBQ5JNY6SEJPZW6Z8BM"),
comms_mas_admin_client_secret=_env("COMMS_MAS_ADMIN_CLIENT_SECRET", ""),
comms_server_name=_env("COMMS_SERVER_NAME", "live.bstein.dev"),
comms_room_alias=_env("COMMS_ROOM_ALIAS", "#othrys:live.bstein.dev"),
comms_room_name=_env("COMMS_ROOM_NAME", "Othrys"),
comms_pin_message=_env(
"COMMS_PIN_MESSAGE",
"Invite guests: share https://live.bstein.dev/#/room/#othrys:live.bstein.dev?action=join and choose 'Continue' -> 'Join as guest'.",
),
comms_seeder_user=_env("COMMS_SEEDER_USER", "othrys-seeder"),
comms_seeder_password=_env("COMMS_SEEDER_PASSWORD", ""),
comms_bot_user=_env("COMMS_BOT_USER", "atlasbot"),
comms_bot_password=_env("COMMS_BOT_PASSWORD", ""),
comms_synapse_db_host=_env(
"COMMS_SYNAPSE_DB_HOST",
"postgres-service.postgres.svc.cluster.local",
),
comms_synapse_db_port=_env_int("COMMS_SYNAPSE_DB_PORT", 5432),
comms_synapse_db_name=_env("COMMS_SYNAPSE_DB_NAME", "synapse"),
comms_synapse_db_user=_env("COMMS_SYNAPSE_DB_USER", "synapse"),
comms_synapse_db_password=_env("COMMS_SYNAPSE_DB_PASSWORD", ""),
comms_timeout_sec=_env_float("COMMS_TIMEOUT_SEC", 30.0),
comms_guest_stale_days=_env_int("COMMS_GUEST_STALE_DAYS", 14),
image_sweeper_namespace=_env("IMAGE_SWEEPER_NAMESPACE", "maintenance"),
image_sweeper_service_account=_env("IMAGE_SWEEPER_SERVICE_ACCOUNT", "node-image-sweeper"),
image_sweeper_job_ttl_sec=_env_int("IMAGE_SWEEPER_JOB_TTL_SEC", 3600),
image_sweeper_wait_timeout_sec=_env_float("IMAGE_SWEEPER_WAIT_TIMEOUT_SEC", 1200.0),
vaultwarden_namespace=_env("VAULTWARDEN_NAMESPACE", "vaultwarden"), vaultwarden_namespace=_env("VAULTWARDEN_NAMESPACE", "vaultwarden"),
vaultwarden_pod_label=_env("VAULTWARDEN_POD_LABEL", "app=vaultwarden"), vaultwarden_pod_label=_env("VAULTWARDEN_POD_LABEL", "app=vaultwarden"),
vaultwarden_pod_port=_env_int("VAULTWARDEN_POD_PORT", 80), vaultwarden_pod_port=_env_int("VAULTWARDEN_POD_PORT", 80),
@ -231,8 +379,14 @@ class Settings:
k8s_api_timeout_sec=_env_float("K8S_API_TIMEOUT_SEC", 5.0), k8s_api_timeout_sec=_env_float("K8S_API_TIMEOUT_SEC", 5.0),
mailu_sync_cron=_env("ARIADNE_SCHEDULE_MAILU_SYNC", "30 4 * * *"), mailu_sync_cron=_env("ARIADNE_SCHEDULE_MAILU_SYNC", "30 4 * * *"),
nextcloud_sync_cron=_env("ARIADNE_SCHEDULE_NEXTCLOUD_SYNC", "0 5 * * *"), nextcloud_sync_cron=_env("ARIADNE_SCHEDULE_NEXTCLOUD_SYNC", "0 5 * * *"),
nextcloud_cron=_env("ARIADNE_SCHEDULE_NEXTCLOUD_CRON", "*/5 * * * *"),
nextcloud_maintenance_cron=_env("ARIADNE_SCHEDULE_NEXTCLOUD_MAINTENANCE", "30 4 * * *"),
vaultwarden_sync_cron=_env("ARIADNE_SCHEDULE_VAULTWARDEN_SYNC", "*/15 * * * *"), vaultwarden_sync_cron=_env("ARIADNE_SCHEDULE_VAULTWARDEN_SYNC", "*/15 * * * *"),
wger_admin_cron=_env("ARIADNE_SCHEDULE_WGER_ADMIN", "15 3 * * *"), wger_admin_cron=_env("ARIADNE_SCHEDULE_WGER_ADMIN", "15 3 * * *"),
firefly_cron=_env("ARIADNE_SCHEDULE_FIREFLY_CRON", "0 3 * * *"),
pod_cleaner_cron=_env("ARIADNE_SCHEDULE_POD_CLEANER", "0 * * * *"),
opensearch_prune_cron=_env("ARIADNE_SCHEDULE_OPENSEARCH_PRUNE", "23 3 * * *"),
image_sweeper_cron=_env("ARIADNE_SCHEDULE_IMAGE_SWEEPER", "30 4 * * 0"),
vault_k8s_auth_cron=_env("ARIADNE_SCHEDULE_VAULT_K8S_AUTH", "*/15 * * * *"), vault_k8s_auth_cron=_env("ARIADNE_SCHEDULE_VAULT_K8S_AUTH", "*/15 * * * *"),
vault_oidc_cron=_env("ARIADNE_SCHEDULE_VAULT_OIDC", "*/15 * * * *"), vault_oidc_cron=_env("ARIADNE_SCHEDULE_VAULT_OIDC", "*/15 * * * *"),
comms_guest_name_cron=_env("ARIADNE_SCHEDULE_COMMS_GUEST_NAME", "*/1 * * * *"), comms_guest_name_cron=_env("ARIADNE_SCHEDULE_COMMS_GUEST_NAME", "*/1 * * * *"),
@ -240,6 +394,13 @@ class Settings:
comms_reset_room_cron=_env("ARIADNE_SCHEDULE_COMMS_RESET_ROOM", "0 0 1 1 *"), comms_reset_room_cron=_env("ARIADNE_SCHEDULE_COMMS_RESET_ROOM", "0 0 1 1 *"),
comms_seed_room_cron=_env("ARIADNE_SCHEDULE_COMMS_SEED_ROOM", "*/10 * * * *"), comms_seed_room_cron=_env("ARIADNE_SCHEDULE_COMMS_SEED_ROOM", "*/10 * * * *"),
keycloak_profile_cron=_env("ARIADNE_SCHEDULE_KEYCLOAK_PROFILE", "0 */6 * * *"), keycloak_profile_cron=_env("ARIADNE_SCHEDULE_KEYCLOAK_PROFILE", "0 */6 * * *"),
opensearch_url=_env(
"OPENSEARCH_URL",
"http://opensearch-master.logging.svc.cluster.local:9200",
).rstrip("/"),
opensearch_limit_bytes=_env_int("OPENSEARCH_LIMIT_BYTES", 1024**4),
opensearch_index_patterns=_env("OPENSEARCH_INDEX_PATTERNS", "kube-*,journald-*"),
opensearch_timeout_sec=_env_float("OPENSEARCH_TIMEOUT_SEC", 30.0),
metrics_path=_env("METRICS_PATH", "/metrics"), metrics_path=_env("METRICS_PATH", "/metrics"),
) )

View File

@ -30,6 +30,7 @@ _STANDARD_ATTRS = {
"processName", "processName",
"relativeCreated", "relativeCreated",
"stack_info", "stack_info",
"taskName",
"thread", "thread",
"threadName", "threadName",
} }
@ -48,6 +49,9 @@ class JsonFormatter(logging.Formatter):
"logger": record.name, "logger": record.name,
"message": record.getMessage(), "message": record.getMessage(),
} }
task_name = getattr(record, "taskName", None)
if task_name:
payload["taskName"] = task_name
if record.exc_info: if record.exc_info:
payload["exc_info"] = self.formatException(record.exc_info) payload["exc_info"] = self.formatException(record.exc_info)
@ -57,8 +61,6 @@ class JsonFormatter(logging.Formatter):
for key, value in record.__dict__.items(): for key, value in record.__dict__.items():
if key in _STANDARD_ATTRS or key in payload: if key in _STANDARD_ATTRS or key in payload:
continue continue
if key == "taskName" and value is None:
continue
payload[key] = value payload[key] = value
return json.dumps(payload, ensure_ascii=True) return json.dumps(payload, ensure_ascii=True)
@ -70,10 +72,16 @@ _TASK_NAME: contextvars.ContextVar[str | None] = contextvars.ContextVar("ariadne
class _ContextFilter(logging.Filter): class _ContextFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool: def filter(self, record: logging.LogRecord) -> bool:
task_value = getattr(record, "taskName", None) task_value = getattr(record, "task", None)
if task_value and not getattr(record, "taskName", None):
record.taskName = task_value
return True
if not task_value: if not task_value:
task_name = _TASK_NAME.get() task_name = _TASK_NAME.get()
if task_name: if task_name:
record.task = task_name
if not getattr(record, "taskName", None):
record.taskName = task_name record.taskName = task_name
return True return True

View File

@ -1,6 +1,7 @@
fastapi==0.115.11 fastapi==0.115.11
uvicorn[standard]==0.30.6 uvicorn[standard]==0.30.6
httpx==0.27.2 httpx==0.27.2
kubernetes==30.1.0
PyJWT[crypto]==2.10.1 PyJWT[crypto]==2.10.1
psycopg[binary]==3.2.6 psycopg[binary]==3.2.6
psycopg-pool==3.2.6 psycopg-pool==3.2.6

352
tests/test_comms.py Normal file
View File

@ -0,0 +1,352 @@
from __future__ import annotations
import types
from ariadne.services import comms as comms_module
from ariadne.services.comms import CommsService
class DummyResponse:
def __init__(self, payload=None, status_code=200, text=""):
self._payload = payload or {}
self.status_code = status_code
self.text = text
def json(self):
return self._payload
def raise_for_status(self):
if self.status_code >= 400:
raise RuntimeError("status error")
class DummyClient:
def __init__(self, handler, timeout=None):
self._handler = handler
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def get(self, url, **kwargs):
return self._handler("GET", url, **kwargs)
def post(self, url, **kwargs):
return self._handler("POST", url, **kwargs)
def put(self, url, **kwargs):
return self._handler("PUT", url, **kwargs)
def delete(self, url, **kwargs):
return self._handler("DELETE", url, **kwargs)
def _make_handler(responses):
def handler(method, url, **_kwargs):
key = (method, url)
value = responses.get(key)
if isinstance(value, list):
if not value:
return DummyResponse()
return value.pop(0)
if callable(value):
return value(method, url)
if value is None:
return DummyResponse()
return value
return handler
def test_comms_pin_invite_pins(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
comms_auth_base="http://auth",
comms_synapse_base="http://synapse",
comms_server_name="live.bstein.dev",
comms_room_alias="#othrys:live.bstein.dev",
comms_pin_message="invite",
comms_seeder_user="othrys-seeder",
comms_seeder_password="pw",
comms_timeout_sec=5.0,
)
monkeypatch.setattr(comms_module, "settings", dummy_settings)
alias_enc = "%23othrys%3Alive.bstein.dev"
responses = {
("POST", "http://auth/_matrix/client/v3/login"): DummyResponse({"access_token": "tok"}),
("GET", f"http://synapse/_matrix/client/v3/directory/room/{alias_enc}"): DummyResponse(
{"room_id": "room1"}
),
("GET", "http://synapse/_matrix/client/v3/rooms/room1/state/m.room.pinned_events"): DummyResponse(
{"pinned": []}
),
("POST", "http://synapse/_matrix/client/v3/rooms/room1/send/m.room.message"): DummyResponse(
{"event_id": "event1"}
),
("PUT", "http://synapse/_matrix/client/v3/rooms/room1/state/m.room.pinned_events"): DummyResponse({}),
}
handler = _make_handler(responses)
svc = CommsService(client_factory=lambda timeout=None: DummyClient(handler, timeout=timeout))
result = svc.run_pin_invite()
assert result["status"] == "ok"
assert result["detail"] == "pinned"
def test_comms_pin_invite_skips_existing(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
comms_auth_base="http://auth",
comms_synapse_base="http://synapse",
comms_server_name="live.bstein.dev",
comms_room_alias="#othrys:live.bstein.dev",
comms_pin_message="invite",
comms_seeder_user="othrys-seeder",
comms_seeder_password="pw",
comms_timeout_sec=5.0,
)
monkeypatch.setattr(comms_module, "settings", dummy_settings)
alias_enc = "%23othrys%3Alive.bstein.dev"
responses = {
("POST", "http://auth/_matrix/client/v3/login"): DummyResponse({"access_token": "tok"}),
("GET", f"http://synapse/_matrix/client/v3/directory/room/{alias_enc}"): DummyResponse(
{"room_id": "room1"}
),
("GET", "http://synapse/_matrix/client/v3/rooms/room1/state/m.room.pinned_events"): DummyResponse(
{"pinned": ["event1"]}
),
("GET", "http://synapse/_matrix/client/v3/rooms/room1/event/event1"): DummyResponse(
{"content": {"body": "invite"}}
),
}
handler = _make_handler(responses)
svc = CommsService(client_factory=lambda timeout=None: DummyClient(handler, timeout=timeout))
result = svc.run_pin_invite()
assert result["status"] == "ok"
assert result["detail"] == "already pinned"
def test_comms_seed_room(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
comms_auth_base="http://auth",
comms_synapse_base="http://synapse",
comms_server_name="live.bstein.dev",
comms_room_alias="#othrys:live.bstein.dev",
comms_room_name="Othrys",
comms_seeder_user="othrys-seeder",
comms_seeder_password="pw",
comms_bot_user="atlasbot",
comms_bot_password="bot",
comms_timeout_sec=5.0,
)
monkeypatch.setattr(comms_module, "settings", dummy_settings)
alias_enc = "%23othrys%3Alive.bstein.dev"
responses = {
("POST", "http://auth/_matrix/client/v3/login"): DummyResponse({"access_token": "tok"}),
("GET", "http://synapse/_synapse/admin/v2/users/%40othrys-seeder%3Alive.bstein.dev"): DummyResponse(
status_code=404
),
("PUT", "http://synapse/_synapse/admin/v2/users/%40othrys-seeder%3Alive.bstein.dev"): DummyResponse(
status_code=201
),
("GET", "http://synapse/_synapse/admin/v2/users/%40atlasbot%3Alive.bstein.dev"): DummyResponse(
status_code=404
),
("PUT", "http://synapse/_synapse/admin/v2/users/%40atlasbot%3Alive.bstein.dev"): DummyResponse(
status_code=201
),
("GET", f"http://synapse/_matrix/client/v3/directory/room/{alias_enc}"): [
DummyResponse(status_code=404),
DummyResponse({"room_id": "room1"}),
],
("POST", "http://synapse/_matrix/client/v3/createRoom"): DummyResponse({"room_id": "room1"}),
("GET", "http://synapse/_synapse/admin/v2/users?local=true&deactivated=false&limit=100"): DummyResponse(
{"users": [{"name": "@a:live.bstein.dev"}]}
),
("POST", "http://synapse/_synapse/admin/v1/join/room1"): DummyResponse({}),
}
handler = _make_handler(responses)
svc = CommsService(client_factory=lambda timeout=None: DummyClient(handler, timeout=timeout))
result = svc.run_seed_room()
assert result["status"] == "ok"
def test_comms_reset_room(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
comms_auth_base="http://auth",
comms_synapse_base="http://synapse",
comms_server_name="live.bstein.dev",
comms_room_alias="#othrys:live.bstein.dev",
comms_room_name="Othrys",
comms_pin_message="invite",
comms_seeder_user="othrys-seeder",
comms_seeder_password="pw",
comms_bot_user="atlasbot",
comms_timeout_sec=5.0,
)
monkeypatch.setattr(comms_module, "settings", dummy_settings)
alias_enc = "%23othrys%3Alive.bstein.dev"
responses = {
("POST", "http://auth/_matrix/client/v3/login"): DummyResponse({"access_token": "tok"}),
("GET", f"http://synapse/_matrix/client/v3/directory/room/{alias_enc}"): DummyResponse(
{"room_id": "old-room"}
),
("POST", "http://synapse/_matrix/client/v3/createRoom"): DummyResponse({"room_id": "new-room"}),
("GET", "http://synapse/_matrix/client/v3/rooms/old-room/members?membership=join"): DummyResponse(
{
"chunk": [
{"type": "m.room.member", "state_key": "@othrys-seeder:live.bstein.dev"},
{"type": "m.room.member", "state_key": "@bob:live.bstein.dev"},
{"type": "m.room.member", "state_key": "@123:live.bstein.dev"},
]
}
),
("POST", "http://synapse/_matrix/client/v3/rooms/new-room/send/m.room.message"): DummyResponse(
{"event_id": "event1"}
),
}
def handler(method, url, **_kwargs):
resp = responses.get((method, url))
if resp is None:
return DummyResponse({})
if isinstance(resp, list):
return resp.pop(0)
return resp
svc = CommsService(client_factory=lambda timeout=None: DummyClient(handler, timeout=timeout))
result = svc.run_reset_room()
assert result["status"] == "ok"
def test_comms_guest_name_randomizer(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
comms_mas_admin_client_id="client",
comms_mas_admin_client_secret="secret",
comms_mas_token_url="http://mas/token",
comms_mas_admin_api_base="http://mas/api/admin/v1",
comms_synapse_base="http://synapse",
comms_room_alias="#othrys:live.bstein.dev",
comms_server_name="live.bstein.dev",
comms_seeder_user="othrys-seeder",
comms_timeout_sec=5.0,
comms_guest_stale_days=1,
)
monkeypatch.setattr(comms_module, "settings", dummy_settings)
responses = {
("POST", "http://mas/token"): DummyResponse({"access_token": "admintoken"}),
("GET", "http://mas/api/admin/v1/users/by-username/othrys-seeder"): DummyResponse(
{"data": {"id": "seed"}}
),
("POST", "http://mas/api/admin/v1/personal-sessions"): DummyResponse(
{"data": {"id": "session-1", "attributes": {"access_token": "seedtoken"}}}
),
("POST", "http://mas/api/admin/v1/personal-sessions/session-1/revoke"): DummyResponse({}),
("GET", "http://synapse/_matrix/client/v3/directory/room/%23othrys%3Alive.bstein.dev"): DummyResponse(
{"room_id": "room1"}
),
("GET", "http://synapse/_matrix/client/v3/rooms/room1/members"): DummyResponse(
{"chunk": []}
),
("GET", "http://mas/api/admin/v1/users?page[size]=100"): DummyResponse(
{
"data": [
{"id": "user-1", "attributes": {"username": "guest-1", "legacy_guest": True}},
]
}
),
("POST", "http://mas/api/admin/v1/personal-sessions"): DummyResponse(
{"data": {"id": "session-2", "attributes": {"access_token": "usertoken"}}}
),
("GET", "http://synapse/_matrix/client/v3/profile/%40guest-1%3Alive.bstein.dev"): DummyResponse(
{"displayname": None}
),
("PUT", "http://synapse/_matrix/client/v3/profile/%40guest-1%3Alive.bstein.dev/displayname"): DummyResponse({}),
("GET", "http://synapse/_synapse/admin/v2/users?local=true&deactivated=false&limit=100"): DummyResponse(
{
"users": [
{"name": "@guest-99:live.bstein.dev", "is_guest": True, "last_seen_ts": 0},
]
}
),
("DELETE", "http://synapse/_synapse/admin/v2/users/%40guest-99%3Alive.bstein.dev"): DummyResponse({}),
("GET", "http://synapse/_synapse/admin/v2/users/%40guest-99%3Alive.bstein.dev"): DummyResponse(
{"displayname": None}
),
("PUT", "http://synapse/_synapse/admin/v2/users/%40guest-99%3Alive.bstein.dev"): DummyResponse({}),
("POST", "http://mas/api/admin/v1/personal-sessions/session-2/revoke"): DummyResponse({}),
}
def handler(method, url, **_kwargs):
resp = responses.get((method, url))
if resp is None:
return DummyResponse({})
return resp
svc = CommsService(client_factory=lambda timeout=None: DummyClient(handler, timeout=timeout))
monkeypatch.setattr(svc, "_db_rename_numeric", lambda *_args, **_kwargs: 0)
result = svc.run_guest_name_randomizer()
assert result["status"] == "ok"
assert result["renamed"] >= 1
def test_comms_db_rename_numeric(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
comms_synapse_db_host="db",
comms_synapse_db_port=5432,
comms_synapse_db_name="synapse",
comms_synapse_db_user="synapse",
comms_synapse_db_password="pw",
comms_server_name="live.bstein.dev",
)
monkeypatch.setattr(comms_module, "settings", dummy_settings)
class FakeCursor:
def __init__(self):
self._queue = []
def execute(self, query, params=None):
if "FROM profiles WHERE full_user_id" in query:
self._queue.append([("1", "@123:live.bstein.dev", "guest-1")])
elif "FROM users WHERE name" in query:
self._queue.append([("@123:live.bstein.dev",)])
elif "FROM profiles WHERE full_user_id = ANY" in query:
self._queue.append([])
else:
self._queue.append([])
def fetchall(self):
return self._queue.pop(0) if self._queue else []
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
class FakeConn:
def cursor(self):
return FakeCursor()
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def close(self):
return None
monkeypatch.setattr(comms_module.psycopg, "connect", lambda **_kwargs: FakeConn())
svc = CommsService()
renamed = svc._db_rename_numeric(set())
assert renamed >= 1

View File

@ -0,0 +1,48 @@
from __future__ import annotations
import types
from ariadne.services import image_sweeper as sweeper_module
from ariadne.services.image_sweeper import ImageSweeperService
def test_image_sweeper_job_payload(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
image_sweeper_namespace="maintenance",
image_sweeper_service_account="node-image-sweeper",
image_sweeper_job_ttl_sec=3600,
image_sweeper_wait_timeout_sec=30.0,
)
monkeypatch.setattr(sweeper_module, "settings", dummy_settings)
svc = ImageSweeperService()
payload = svc._job_payload("job-1")
assert payload["metadata"]["name"] == "job-1"
spec = payload["spec"]["template"]["spec"]
assert spec["serviceAccountName"] == "node-image-sweeper"
assert spec["containers"][0]["env"][0]["value"] == "true"
def test_image_sweeper_run_wait(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
image_sweeper_namespace="maintenance",
image_sweeper_service_account="node-image-sweeper",
image_sweeper_job_ttl_sec=3600,
image_sweeper_wait_timeout_sec=30.0,
)
monkeypatch.setattr(sweeper_module, "settings", dummy_settings)
def fake_post(path, payload):
assert path.endswith("/maintenance/jobs")
return {"metadata": {"name": "job-1"}}
def fake_get(_path):
return {"status": {"succeeded": 1}}
monkeypatch.setattr(sweeper_module, "post_json", fake_post)
monkeypatch.setattr(sweeper_module, "get_json", fake_get)
svc = ImageSweeperService()
result = svc.run(wait=True)
assert result["status"] == "ok"

View File

@ -82,6 +82,29 @@ def test_get_json_rejects_non_dict(monkeypatch) -> None:
k8s_client.get_json("/api/test") k8s_client.get_json("/api/test")
def test_delete_json_success(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(k8s_api_timeout_sec=5.0)
monkeypatch.setattr(k8s_client, "settings", dummy_settings)
monkeypatch.setattr(k8s_client, "_read_service_account", lambda: ("token", "/tmp/ca"))
client = DummyClient()
monkeypatch.setattr(k8s_client.httpx, "Client", lambda *args, **kwargs: client)
result = k8s_client.delete_json("/api/test")
assert result == {"ok": True}
def test_delete_json_rejects_non_dict(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(k8s_api_timeout_sec=5.0)
monkeypatch.setattr(k8s_client, "settings", dummy_settings)
monkeypatch.setattr(k8s_client, "_read_service_account", lambda: ("token", "/tmp/ca"))
client = DummyClient()
client.payload = ["bad"]
monkeypatch.setattr(k8s_client.httpx, "Client", lambda *args, **kwargs: client)
with pytest.raises(RuntimeError):
k8s_client.delete_json("/api/test")
def test_read_service_account(monkeypatch, tmp_path) -> None: def test_read_service_account(monkeypatch, tmp_path) -> None:
sa_dir = tmp_path / "sa" sa_dir = tmp_path / "sa"
sa_dir.mkdir() sa_dir.mkdir()

117
tests/test_k8s_exec.py Normal file
View File

@ -0,0 +1,117 @@
from __future__ import annotations
import types
import pytest
from ariadne.k8s.exec import ExecError, PodExecutor, _build_command
from ariadne.k8s.pods import PodRef
import ariadne.k8s.exec as exec_module
class DummyStream:
def __init__(self, stdout: str = "", stderr: str = "", exit_code: int = 0):
self._open = True
self._stdout = [stdout] if stdout else []
self._stderr = [stderr] if stderr else []
self._exit_code_ready = True
self._exit_code = exit_code
self.returncode = exit_code
def is_open(self) -> bool:
return self._open
def update(self, timeout: int = 1) -> None:
return None
def peek_stdout(self) -> bool:
return bool(self._stdout)
def read_stdout(self) -> str:
return self._stdout.pop(0)
def peek_stderr(self) -> bool:
return bool(self._stderr)
def read_stderr(self) -> str:
return self._stderr.pop(0)
def peek_exit_code(self) -> bool:
return self._exit_code_ready
def read_exit_code(self) -> int:
self._exit_code_ready = False
self._open = False
return self._exit_code
def close(self) -> None:
self._open = False
class HangingStream(DummyStream):
def __init__(self):
super().__init__(stdout="", stderr="", exit_code=0)
self._exit_code_ready = False
def peek_exit_code(self) -> bool:
return False
def test_build_command_wraps_env() -> None:
cmd = _build_command(["echo", "hello"], {"FOO": "bar"})
assert cmd[0] == "/bin/sh"
assert "FOO=bar" in cmd[2]
def test_exec_returns_output(monkeypatch) -> None:
monkeypatch.setattr(exec_module, "select_pod", lambda *_args, **_kwargs: PodRef("pod", "ns"))
monkeypatch.setattr(exec_module, "_ensure_client", lambda: types.SimpleNamespace(connect_get_namespaced_pod_exec=None))
monkeypatch.setattr(exec_module, "stream", lambda *args, **kwargs: DummyStream(stdout="ok\n", exit_code=0))
executor = PodExecutor("ns", "app=test", "container")
result = executor.exec(["echo", "ok"], check=True)
assert result.stdout == "ok\n"
assert result.ok
def test_exec_raises_on_failure(monkeypatch) -> None:
monkeypatch.setattr(exec_module, "select_pod", lambda *_args, **_kwargs: PodRef("pod", "ns"))
monkeypatch.setattr(exec_module, "_ensure_client", lambda: types.SimpleNamespace(connect_get_namespaced_pod_exec=None))
monkeypatch.setattr(exec_module, "stream", lambda *args, **kwargs: DummyStream(stderr="bad", exit_code=2))
executor = PodExecutor("ns", "app=test", None)
with pytest.raises(ExecError):
executor.exec(["false"], check=True)
def test_exec_times_out(monkeypatch) -> None:
monkeypatch.setattr(exec_module, "select_pod", lambda *_args, **_kwargs: PodRef("pod", "ns"))
monkeypatch.setattr(exec_module, "_ensure_client", lambda: types.SimpleNamespace(connect_get_namespaced_pod_exec=None))
monkeypatch.setattr(exec_module, "stream", lambda *args, **kwargs: HangingStream())
executor = PodExecutor("ns", "app=test", None)
with pytest.raises(TimeoutError):
executor.exec(["sleep", "10"], timeout_sec=0.0, check=False)
def test_ensure_client_fallback(monkeypatch) -> None:
dummy_api = object()
monkeypatch.setattr(exec_module, "_CORE_API", None)
monkeypatch.setattr(exec_module, "_IMPORT_ERROR", None)
class DummyConfig:
def __init__(self):
self.calls = []
def load_incluster_config(self):
self.calls.append("incluster")
raise RuntimeError("no in-cluster")
def load_kube_config(self):
self.calls.append("kubeconfig")
dummy_config = DummyConfig()
monkeypatch.setattr(exec_module, "config", dummy_config)
monkeypatch.setattr(exec_module, "client", types.SimpleNamespace(CoreV1Api=lambda: dummy_api))
assert exec_module._ensure_client() is dummy_api

View File

@ -1,242 +0,0 @@
from __future__ import annotations
import pytest
from ariadne.k8s.jobs import JobSpawner
def test_job_from_cronjob_applies_env_and_ttl() -> None:
cronjob = {
"spec": {
"jobTemplate": {
"spec": {
"template": {
"spec": {
"containers": [
{"name": "sync", "env": [{"name": "FOO", "value": "1"}]}
]
}
}
}
}
}
}
spawner = JobSpawner("ns", "cron")
job = spawner._job_from_cronjob(
cronjob,
"User@Name",
env_overrides=[{"name": "FOO", "value": "2"}, {"name": "BAR", "value": "3"}],
job_ttl_seconds=3600,
)
assert job["spec"]["ttlSecondsAfterFinished"] == 3600
labels = job["metadata"]["labels"]
assert labels["atlas.bstein.dev/trigger"] == "ariadne"
env = job["spec"]["template"]["spec"]["containers"][0]["env"]
env_map = {item["name"]: item["value"] for item in env}
assert env_map["FOO"] == "2"
assert env_map["BAR"] == "3"
def test_job_from_cronjob_env_not_list() -> None:
cronjob = {
"spec": {
"jobTemplate": {
"spec": {
"template": {
"spec": {
"containers": [
{"name": "sync", "env": "bad"}
]
}
}
}
}
}
}
spawner = JobSpawner("ns", "cron")
job = spawner._job_from_cronjob(
cronjob,
"label",
env_overrides=[{"name": "FOO", "value": "1"}],
)
env = job["spec"]["template"]["spec"]["containers"][0]["env"]
assert env == [{"name": "FOO", "value": "1"}]
def test_safe_name_fragment() -> None:
assert JobSpawner._safe_name_fragment("User@Name") == "user-name"
assert JobSpawner._safe_name_fragment("$$$") == "job"
def test_trigger_creates_job(monkeypatch) -> None:
cronjob = {
"metadata": {"name": "cron"},
"spec": {
"jobTemplate": {
"spec": {
"template": {
"spec": {
"containers": [{"name": "sync", "env": []}]
}
}
}
}
},
}
monkeypatch.setattr("ariadne.k8s.jobs.get_json", lambda *args, **kwargs: cronjob)
monkeypatch.setattr("ariadne.k8s.jobs.post_json", lambda *args, **kwargs: {"metadata": {"name": "job"}})
spawner = JobSpawner("ns", "cron")
result = spawner.trigger("label", None, job_ttl_seconds=30)
assert result["status"] == "queued"
def test_trigger_uses_manifest(monkeypatch) -> None:
cronjob = {
"metadata": {"name": "cron"},
"spec": {
"jobTemplate": {
"spec": {
"template": {
"spec": {
"containers": [{"name": "sync", "env": []}]
}
}
}
}
},
}
def explode(*_args, **_kwargs):
raise AssertionError("get_json should not be called")
monkeypatch.setattr("ariadne.k8s.jobs.get_json", explode)
monkeypatch.setattr("ariadne.k8s.jobs.post_json", lambda *args, **kwargs: {"metadata": {"name": "job"}})
spawner = JobSpawner("ns", "cron", manifest=cronjob)
result = spawner.trigger("label", None, job_ttl_seconds=30)
assert result["status"] == "queued"
def test_trigger_missing_job_name(monkeypatch) -> None:
cronjob = {"spec": {"jobTemplate": {"spec": {"template": {"spec": {"containers": [{"name": "sync"}]}}}}}}
monkeypatch.setattr("ariadne.k8s.jobs.get_json", lambda *args, **kwargs: cronjob)
posted = {}
def fake_post(_path, payload):
posted["payload"] = payload
return {}
monkeypatch.setattr("ariadne.k8s.jobs.post_json", fake_post)
spawner = JobSpawner("ns", "cron")
result = spawner.trigger("label", None)
assert result["job"] == posted["payload"]["metadata"]["name"]
def test_trigger_missing_job_name_raises(monkeypatch) -> None:
monkeypatch.setattr("ariadne.k8s.jobs.get_json", lambda *args, **kwargs: {})
monkeypatch.setattr("ariadne.k8s.jobs.post_json", lambda *args, **kwargs: {})
spawner = JobSpawner("ns", "cron")
monkeypatch.setattr(spawner, "_job_from_cronjob", lambda *args, **kwargs: {"metadata": {}})
with pytest.raises(RuntimeError):
spawner.trigger("label", None)
def test_wait_for_completion_success(monkeypatch) -> None:
responses = [
{"status": {"succeeded": 1}},
]
def fake_get_json(path):
return responses.pop(0)
monkeypatch.setattr("ariadne.k8s.jobs.get_json", fake_get_json)
spawner = JobSpawner("ns", "cron")
result = spawner.wait_for_completion("job", timeout_sec=0.1)
assert result["status"] == "ok"
def test_wait_for_completion_skips_bad_condition(monkeypatch) -> None:
responses = [
{"status": {"conditions": ["bad", {"type": "Complete", "status": "True"}]}},
]
monkeypatch.setattr("ariadne.k8s.jobs.get_json", lambda path: responses.pop(0))
spawner = JobSpawner("ns", "cron")
result = spawner.wait_for_completion("job", timeout_sec=0.1)
assert result["status"] == "ok"
def test_wait_for_completion_error(monkeypatch) -> None:
responses = [
{"status": {"failed": 1}},
]
monkeypatch.setattr("ariadne.k8s.jobs.get_json", lambda path: responses.pop(0))
spawner = JobSpawner("ns", "cron")
result = spawner.wait_for_completion("job", timeout_sec=0.1)
assert result["status"] == "error"
def test_wait_for_completion_timeout(monkeypatch) -> None:
monkeypatch.setattr("ariadne.k8s.jobs.get_json", lambda path: {"status": {}})
spawner = JobSpawner("ns", "cron")
result = spawner.wait_for_completion("job", timeout_sec=0.01)
assert result["status"] == "running"
def test_wait_for_completion_conditions(monkeypatch) -> None:
responses = [
{"status": {"conditions": [{"type": "Complete", "status": "True"}]}},
]
monkeypatch.setattr("ariadne.k8s.jobs.get_json", lambda path: responses.pop(0))
spawner = JobSpawner("ns", "cron")
result = spawner.wait_for_completion("job", timeout_sec=0.1)
assert result["status"] == "ok"
def test_wait_for_completion_failed_condition(monkeypatch) -> None:
responses = [
{"status": {"conditions": [{"type": "Failed", "status": "True"}]}},
]
monkeypatch.setattr("ariadne.k8s.jobs.get_json", lambda path: responses.pop(0))
spawner = JobSpawner("ns", "cron")
result = spawner.wait_for_completion("job", timeout_sec=0.1)
assert result["status"] == "error"
def test_trigger_and_wait(monkeypatch) -> None:
cronjob = {"spec": {"jobTemplate": {"spec": {"template": {"spec": {"containers": [{"name": "sync"}]}}}}}}
monkeypatch.setattr("ariadne.k8s.jobs.get_json", lambda *args, **kwargs: cronjob)
monkeypatch.setattr(
"ariadne.k8s.jobs.post_json",
lambda *args, **kwargs: {"metadata": {"name": "job"}},
)
monkeypatch.setattr(
"ariadne.k8s.jobs.JobSpawner.wait_for_completion",
lambda self, job, timeout_sec: {"job": job, "status": "ok"},
)
spawner = JobSpawner("ns", "cron")
result = spawner.trigger_and_wait("label", None, timeout_sec=1.0)
assert result["status"] == "ok"
def test_trigger_and_wait_missing_job_name(monkeypatch) -> None:
spawner = JobSpawner("ns", "cron")
monkeypatch.setattr(spawner, "trigger", lambda *args, **kwargs: {"job": ""})
with pytest.raises(RuntimeError):
spawner.trigger_and_wait("label", None, timeout_sec=0.1)

View File

@ -1,16 +0,0 @@
from __future__ import annotations
import pytest
from ariadne.k8s.manifests import load_cronjob_manifest
def test_load_cronjob_manifest_ok() -> None:
manifest = load_cronjob_manifest("comms/guest-name-job.yaml")
assert manifest["kind"] == "CronJob"
assert manifest["metadata"]["name"] == "guest-name-randomizer"
def test_load_cronjob_manifest_missing() -> None:
with pytest.raises(FileNotFoundError):
load_cronjob_manifest("missing.yaml")

59
tests/test_k8s_pods.py Normal file
View File

@ -0,0 +1,59 @@
from __future__ import annotations
import pytest
from ariadne.k8s import pods as pods_module
def test_list_pods_encodes_selector(monkeypatch) -> None:
captured = {}
def fake_get_json(path: str):
captured["path"] = path
return {"items": []}
monkeypatch.setattr(pods_module, "get_json", fake_get_json)
pods_module.list_pods("demo", "app=nextcloud")
assert "labelSelector=app%3Dnextcloud" in captured["path"]
def test_select_pod_picks_ready_latest(monkeypatch) -> None:
payload = {
"items": [
{
"metadata": {"name": "old-pod"},
"status": {
"phase": "Running",
"startTime": "2026-01-19T00:00:00Z",
"conditions": [{"type": "Ready", "status": "True"}],
},
},
{
"metadata": {"name": "new-pod"},
"status": {
"phase": "Running",
"startTime": "2026-01-20T00:00:00Z",
"conditions": [{"type": "Ready", "status": "True"}],
},
},
]
}
monkeypatch.setattr(pods_module, "get_json", lambda *_args, **_kwargs: payload)
pod = pods_module.select_pod("demo", "app=test")
assert pod.name == "new-pod"
def test_select_pod_ignores_non_ready(monkeypatch) -> None:
payload = {
"items": [
{
"metadata": {"name": "pending"},
"status": {"phase": "Pending"},
},
]
}
monkeypatch.setattr(pods_module, "get_json", lambda *_args, **_kwargs: payload)
with pytest.raises(pods_module.PodSelectionError):
pods_module.select_pod("demo", "app=test")

View File

@ -90,7 +90,23 @@ def test_task_context_injects_task_name() -> None:
args=(), args=(),
exc_info=None, exc_info=None,
) )
assert getattr(record, "taskName", None) in {None, ""} assert getattr(record, "task", None) in {None, ""}
with task_context("schedule.demo"): with task_context("schedule.demo"):
logging_module._ContextFilter().filter(record) logging_module._ContextFilter().filter(record)
assert record.task == "schedule.demo"
assert record.taskName == "schedule.demo" assert record.taskName == "schedule.demo"
def test_task_field_sets_task_name() -> None:
record = logging.LogRecord(
name="ariadne.test",
level=logging.INFO,
pathname=__file__,
lineno=60,
msg="hello",
args=(),
exc_info=None,
)
record.task = "schedule.manual"
logging_module._ContextFilter().filter(record)
assert record.taskName == "schedule.manual"

View File

@ -0,0 +1,233 @@
from __future__ import annotations
import types
from ariadne.services import nextcloud as nextcloud_module
from ariadne.services.nextcloud import NextcloudService, _parse_mail_export
def test_parse_mail_export() -> None:
output = "\n".join(
[
"Account 12:",
" - Name: Alice",
" - E-mail: alice@bstein.dev",
"Account 13:",
" - E-mail: extra@bstein.dev",
]
)
accounts = _parse_mail_export(output)
assert accounts == [("12", "alice@bstein.dev"), ("13", "extra@bstein.dev")]
def test_nextcloud_sync_mail_create(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
nextcloud_namespace="nextcloud",
nextcloud_mail_sync_cronjob="nextcloud-mail-sync",
nextcloud_mail_sync_wait_timeout_sec=90.0,
nextcloud_mail_sync_job_ttl_sec=3600,
nextcloud_pod_label="app=nextcloud",
nextcloud_container="nextcloud",
nextcloud_exec_timeout_sec=30.0,
nextcloud_db_host="",
nextcloud_db_port=5432,
nextcloud_db_name="nextcloud",
nextcloud_db_user="nextcloud",
nextcloud_db_password="",
mailu_domain="bstein.dev",
mailu_host="mail.bstein.dev",
)
monkeypatch.setattr(nextcloud_module, "settings", dummy_settings)
user = {
"id": "uid-1",
"username": "alice",
"enabled": True,
"attributes": {
"mailu_app_password": ["pw"],
"mailu_email": ["alice@bstein.dev"],
},
}
monkeypatch.setattr(nextcloud_module.keycloak_admin, "ready", lambda: True)
monkeypatch.setattr(nextcloud_module.keycloak_admin, "iter_users", lambda **_kwargs: [user])
monkeypatch.setattr(nextcloud_module.keycloak_admin, "get_user", lambda *_args, **_kwargs: user)
occ_calls: list[list[str]] = []
list_calls = [[], [("42", "alice@bstein.dev")]]
def fake_occ(args):
occ_calls.append(args)
return ""
svc = NextcloudService()
monkeypatch.setattr(svc, "_occ", fake_occ)
monkeypatch.setattr(svc, "_list_mail_accounts", lambda *_args, **_kwargs: list_calls.pop(0))
monkeypatch.setattr(svc, "_set_editor_mode_richtext", lambda *_args, **_kwargs: None)
monkeypatch.setattr(svc, "_set_user_mail_meta", lambda *_args, **_kwargs: None)
result = svc.sync_mail()
assert result["status"] == "ok"
summary = result["summary"]
assert summary.created == 1
assert any("mail:account:create" in call[0] for call in occ_calls)
def test_nextcloud_sync_mail_keycloak_not_ready(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
nextcloud_namespace="nextcloud",
nextcloud_mail_sync_cronjob="nextcloud-mail-sync",
nextcloud_mail_sync_wait_timeout_sec=90.0,
nextcloud_mail_sync_job_ttl_sec=3600,
nextcloud_pod_label="app=nextcloud",
nextcloud_container="nextcloud",
nextcloud_exec_timeout_sec=30.0,
nextcloud_db_host="",
nextcloud_db_port=5432,
nextcloud_db_name="nextcloud",
nextcloud_db_user="nextcloud",
nextcloud_db_password="",
mailu_domain="bstein.dev",
mailu_host="mail.bstein.dev",
)
monkeypatch.setattr(nextcloud_module, "settings", dummy_settings)
monkeypatch.setattr(nextcloud_module.keycloak_admin, "ready", lambda: False)
svc = NextcloudService()
result = svc.sync_mail()
assert result["status"] == "error"
def test_nextcloud_sync_mail_update_and_delete(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
nextcloud_namespace="nextcloud",
nextcloud_mail_sync_cronjob="nextcloud-mail-sync",
nextcloud_mail_sync_wait_timeout_sec=90.0,
nextcloud_mail_sync_job_ttl_sec=3600,
nextcloud_pod_label="app=nextcloud",
nextcloud_container="nextcloud",
nextcloud_exec_timeout_sec=30.0,
nextcloud_db_host="",
nextcloud_db_port=5432,
nextcloud_db_name="nextcloud",
nextcloud_db_user="nextcloud",
nextcloud_db_password="",
mailu_domain="bstein.dev",
mailu_host="mail.bstein.dev",
)
monkeypatch.setattr(nextcloud_module, "settings", dummy_settings)
user = {
"id": "uid-1",
"username": "alice",
"enabled": True,
"attributes": {
"mailu_app_password": ["pw"],
"mailu_email": ["alice@bstein.dev"],
},
}
monkeypatch.setattr(nextcloud_module.keycloak_admin, "ready", lambda: True)
monkeypatch.setattr(nextcloud_module.keycloak_admin, "iter_users", lambda **_kwargs: [user])
monkeypatch.setattr(nextcloud_module.keycloak_admin, "get_user", lambda *_args, **_kwargs: user)
occ_calls: list[list[str]] = []
list_calls = [
[("1", "alice@bstein.dev"), ("2", "extra@bstein.dev")],
[("1", "alice@bstein.dev")],
]
def fake_occ(args):
occ_calls.append(args)
return ""
svc = NextcloudService()
monkeypatch.setattr(svc, "_occ", fake_occ)
monkeypatch.setattr(svc, "_list_mail_accounts", lambda *_args, **_kwargs: list_calls.pop(0))
monkeypatch.setattr(svc, "_set_editor_mode_richtext", lambda *_args, **_kwargs: None)
monkeypatch.setattr(svc, "_set_user_mail_meta", lambda *_args, **_kwargs: None)
result = svc.sync_mail()
assert result["status"] == "ok"
summary = result["summary"]
assert summary.updated == 1
assert summary.deleted == 1
assert any("mail:account:update" in call[0] for call in occ_calls)
def test_nextcloud_run_cron(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
nextcloud_namespace="nextcloud",
nextcloud_mail_sync_cronjob="nextcloud-mail-sync",
nextcloud_mail_sync_wait_timeout_sec=90.0,
nextcloud_mail_sync_job_ttl_sec=3600,
nextcloud_pod_label="app=nextcloud",
nextcloud_container="nextcloud",
nextcloud_exec_timeout_sec=30.0,
nextcloud_db_host="",
nextcloud_db_port=5432,
nextcloud_db_name="nextcloud",
nextcloud_db_user="nextcloud",
nextcloud_db_password="",
mailu_domain="bstein.dev",
mailu_host="mail.bstein.dev",
)
monkeypatch.setattr(nextcloud_module, "settings", dummy_settings)
svc = NextcloudService()
def fake_exec(*_args, **_kwargs):
return types.SimpleNamespace(stdout="ok", stderr="", exit_code=0, ok=True)
monkeypatch.setattr(svc._executor, "exec", fake_exec)
result = svc.run_cron()
assert result["status"] == "ok"
def test_nextcloud_run_maintenance(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
nextcloud_namespace="nextcloud",
nextcloud_mail_sync_cronjob="nextcloud-mail-sync",
nextcloud_mail_sync_wait_timeout_sec=90.0,
nextcloud_mail_sync_job_ttl_sec=3600,
nextcloud_pod_label="app=nextcloud",
nextcloud_container="nextcloud",
nextcloud_exec_timeout_sec=30.0,
nextcloud_db_host="",
nextcloud_db_port=5432,
nextcloud_db_name="nextcloud",
nextcloud_db_user="nextcloud",
nextcloud_db_password="",
nextcloud_url="https://cloud.bstein.dev",
nextcloud_admin_user="admin",
nextcloud_admin_password="secret",
mailu_domain="bstein.dev",
mailu_host="mail.bstein.dev",
)
monkeypatch.setattr(nextcloud_module, "settings", dummy_settings)
svc = NextcloudService()
occ_calls: list[list[str]] = []
monkeypatch.setattr(svc, "_occ", lambda args: occ_calls.append(args) or "")
exec_calls = []
def fake_exec(*_args, **_kwargs):
exec_calls.append(_args)
return types.SimpleNamespace(stdout="ok", stderr="", exit_code=0, ok=True)
monkeypatch.setattr(svc._executor, "exec", fake_exec)
api_calls: list[tuple[str, str]] = []
def fake_api(method: str, path: str, data=None):
api_calls.append((method, path))
if method == "GET":
return {"ocs": {"data": [{"id": 1}]}}
return {}
monkeypatch.setattr(svc, "_external_api", fake_api)
result = svc.run_maintenance()
assert result["status"] == "ok"
assert any(len(call) > 1 and call[1] == "theming" for call in occ_calls)
assert any(method == "DELETE" for method, _ in api_calls)

View File

@ -0,0 +1,60 @@
from __future__ import annotations
import types
import ariadne.services.opensearch_prune as prune_module
def test_parse_size() -> None:
assert prune_module.parse_size("1gb") == 1024**3
assert prune_module.parse_size("0") == 0
assert prune_module.parse_size("bad") == 0
def test_prune_indices_deletes(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
opensearch_url="http://opensearch",
opensearch_limit_bytes=5,
opensearch_index_patterns="kube-*",
opensearch_timeout_sec=5.0,
)
monkeypatch.setattr(prune_module, "settings", dummy_settings)
class DummyResponse:
def __init__(self, payload, status_code=200):
self._payload = payload
self.status_code = status_code
def raise_for_status(self):
return None
def json(self):
return self._payload
class DummyClient:
def __init__(self):
self.deleted = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def get(self, url, params=None):
return DummyResponse(
[
{"index": "kube-1", "store.size": "10b", "creation.date": "1"},
{"index": "kube-2", "store.size": "1b", "creation.date": "2"},
]
)
def delete(self, url):
self.deleted.append(url)
return DummyResponse({}, 200)
dummy = DummyClient()
monkeypatch.setattr(prune_module.httpx, "Client", lambda *args, **kwargs: dummy)
summary = prune_module.prune_indices()
assert summary.deleted == 1

37
tests/test_pod_cleaner.py Normal file
View File

@ -0,0 +1,37 @@
from __future__ import annotations
import ariadne.services.pod_cleaner as pod_cleaner
def test_clean_finished_pods(monkeypatch) -> None:
calls = {"delete": []}
def fake_get_json(path: str):
if "Succeeded" in path:
return {"items": [{"metadata": {"namespace": "ns", "name": "ok"}}]}
return {"items": [{"metadata": {"namespace": "ns", "name": "fail"}}]}
def fake_delete_json(path: str):
calls["delete"].append(path)
return {"ok": True}
monkeypatch.setattr(pod_cleaner, "get_json", fake_get_json)
monkeypatch.setattr(pod_cleaner, "delete_json", fake_delete_json)
summary = pod_cleaner.clean_finished_pods()
assert summary.deleted == 2
assert summary.failures == 0
def test_clean_finished_pods_handles_failure(monkeypatch) -> None:
def fake_get_json(_path: str):
return {"items": [{"metadata": {"namespace": "ns", "name": "bad"}}]}
def fake_delete_json(_path: str):
raise RuntimeError("boom")
monkeypatch.setattr(pod_cleaner, "get_json", fake_get_json)
monkeypatch.setattr(pod_cleaner, "delete_json", fake_delete_json)
summary = pod_cleaner.clean_finished_pods()
assert summary.failures == 2

View File

@ -5,28 +5,28 @@ import types
import pytest import pytest
from ariadne.services.comms import CommsService
from ariadne.services.firefly import FireflyService from ariadne.services.firefly import FireflyService
from ariadne.services.mailu import MailuService from ariadne.services.mailu import MailuService
from ariadne.services.nextcloud import NextcloudService from ariadne.services.nextcloud import NextcloudService
from ariadne.services.vault import VaultService
from ariadne.services.wger import WgerService from ariadne.services.wger import WgerService
from ariadne.services.vaultwarden import VaultwardenService from ariadne.services.vaultwarden import VaultwardenService
class DummySpawner: class DummyExecutor:
def __init__(self, namespace, cronjob, manifest=None): def __init__(self, stdout: str = "ok", stderr: str = "", exit_code: int = 0):
self.namespace = namespace
self.cronjob = cronjob
self.calls = [] self.calls = []
self._stdout = stdout
self._stderr = stderr
self._exit_code = exit_code
def trigger_and_wait(self, label_suffix, env_overrides, timeout_sec, job_ttl_seconds=None): def exec(self, command, env=None, timeout_sec=None, check=True):
self.calls.append((label_suffix, env_overrides, timeout_sec, job_ttl_seconds)) self.calls.append((command, env, timeout_sec, check))
return {"job": "test", "status": "ok"} return types.SimpleNamespace(
stdout=self._stdout,
def trigger(self, label_suffix, env_overrides, job_ttl_seconds=None): stderr=self._stderr,
self.calls.append((label_suffix, env_overrides, job_ttl_seconds)) exit_code=self._exit_code,
return {"job": "test", "status": "queued"} ok=self._exit_code == 0,
)
class DummyClient: class DummyClient:
@ -72,112 +72,112 @@ class DummyVaultwardenClient:
return None return None
def test_nextcloud_sync_mail_builds_env(monkeypatch) -> None: def test_nextcloud_sync_mail_no_user(monkeypatch) -> None:
dummy = types.SimpleNamespace( dummy = types.SimpleNamespace(
nextcloud_namespace="nextcloud", nextcloud_namespace="nextcloud",
nextcloud_mail_sync_cronjob="nextcloud-mail-sync", nextcloud_mail_sync_cronjob="nextcloud-mail-sync",
nextcloud_mail_sync_wait_timeout_sec=90.0, nextcloud_mail_sync_wait_timeout_sec=90.0,
nextcloud_mail_sync_job_ttl_sec=3600, nextcloud_mail_sync_job_ttl_sec=3600,
nextcloud_pod_label="app=nextcloud",
nextcloud_container="nextcloud",
nextcloud_exec_timeout_sec=30.0,
nextcloud_db_host="",
nextcloud_db_port=5432,
nextcloud_db_name="nextcloud",
nextcloud_db_user="nextcloud",
nextcloud_db_password="",
mailu_domain="bstein.dev",
mailu_host="mail.bstein.dev",
) )
monkeypatch.setattr("ariadne.services.nextcloud.settings", dummy) monkeypatch.setattr("ariadne.services.nextcloud.settings", dummy)
monkeypatch.setattr( monkeypatch.setattr("ariadne.services.nextcloud.keycloak_admin.ready", lambda: True)
"ariadne.services.nextcloud.JobSpawner", monkeypatch.setattr("ariadne.services.nextcloud.keycloak_admin.find_user", lambda *_args, **_kwargs: None)
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = NextcloudService() svc = NextcloudService()
result = svc.sync_mail("alice", wait=True) result = svc.sync_mail("alice", wait=True)
assert result["status"] == "ok" assert result["status"] == "ok"
spawner = svc._spawner
assert spawner.calls
label, env, timeout, ttl = spawner.calls[0]
assert label == "alice"
assert {item["name"]: item["value"] for item in env}["ONLY_USERNAME"] == "alice"
assert ttl == 3600
def test_wger_sync_user_env(monkeypatch) -> None: def test_wger_sync_user_exec(monkeypatch) -> None:
dummy = types.SimpleNamespace( dummy = types.SimpleNamespace(
wger_namespace="health", wger_namespace="health",
wger_user_sync_cronjob="wger-user-sync", wger_user_sync_cronjob="wger-user-sync",
wger_admin_cronjob="wger-admin-ensure", wger_admin_cronjob="wger-admin-ensure",
wger_user_sync_wait_timeout_sec=60.0, wger_user_sync_wait_timeout_sec=60.0,
wger_pod_label="app=wger",
wger_container="wger",
wger_admin_username="admin",
wger_admin_password="pw",
wger_admin_email="admin@bstein.dev",
) )
monkeypatch.setattr("ariadne.services.wger.settings", dummy) monkeypatch.setattr("ariadne.services.wger.settings", dummy)
monkeypatch.setattr(
"ariadne.services.wger.JobSpawner", calls: list[dict[str, str]] = []
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
) class DummyExecutor:
def exec(self, _cmd, env=None, timeout_sec=None, check=True):
calls.append(env or {})
return types.SimpleNamespace(stdout="ok", stderr="", exit_code=0, ok=True)
monkeypatch.setattr("ariadne.services.wger.PodExecutor", lambda *_args, **_kwargs: DummyExecutor())
svc = WgerService() svc = WgerService()
result = svc.sync_user("alice", "alice@bstein.dev", "pw", wait=True) result = svc.sync_user("alice", "alice@bstein.dev", "pw", wait=True)
assert result["status"] == "ok" assert result["status"] == "ok"
user_spawner = svc._user_spawner assert calls[0]["WGER_USERNAME"] == "alice"
label, env, _, _ = user_spawner.calls[0]
assert label == "alice"
env_map = {item["name"]: item["value"] for item in env}
assert env_map["WGER_USERNAME"] == "alice"
assert env_map["WGER_EMAIL"] == "alice@bstein.dev"
def test_wger_sync_user_queued(monkeypatch) -> None: def test_wger_ensure_admin_exec(monkeypatch) -> None:
dummy = types.SimpleNamespace( dummy = types.SimpleNamespace(
wger_namespace="health", wger_namespace="health",
wger_user_sync_cronjob="wger-user-sync", wger_user_sync_cronjob="wger-user-sync",
wger_admin_cronjob="wger-admin-ensure", wger_admin_cronjob="wger-admin-ensure",
wger_user_sync_wait_timeout_sec=60.0, wger_user_sync_wait_timeout_sec=60.0,
wger_pod_label="app=wger",
wger_container="wger",
wger_admin_username="admin",
wger_admin_password="pw",
wger_admin_email="admin@bstein.dev",
) )
monkeypatch.setattr("ariadne.services.wger.settings", dummy) monkeypatch.setattr("ariadne.services.wger.settings", dummy)
monkeypatch.setattr(
"ariadne.services.wger.JobSpawner", calls: list[dict[str, str]] = []
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
) class DummyExecutor:
def exec(self, _cmd, env=None, timeout_sec=None, check=True):
calls.append(env or {})
return types.SimpleNamespace(stdout="ok", stderr="", exit_code=0, ok=True)
monkeypatch.setattr("ariadne.services.wger.PodExecutor", lambda *_args, **_kwargs: DummyExecutor())
svc = WgerService() svc = WgerService()
result = svc.sync_user("alice", "alice@bstein.dev", "pw", wait=False) result = svc.ensure_admin(wait=False)
assert result["status"] == "queued" assert result["status"] == "ok"
assert calls[0]["WGER_ADMIN_USERNAME"] == "admin"
def test_firefly_sync_user_env(monkeypatch) -> None: def test_firefly_sync_user_exec(monkeypatch) -> None:
dummy = types.SimpleNamespace( dummy = types.SimpleNamespace(
firefly_namespace="finance", firefly_namespace="finance",
firefly_user_sync_cronjob="firefly-user-sync", firefly_user_sync_cronjob="firefly-user-sync",
firefly_user_sync_wait_timeout_sec=60.0, firefly_user_sync_wait_timeout_sec=60.0,
firefly_pod_label="app=firefly",
firefly_container="firefly",
firefly_cron_base_url="http://firefly/cron",
firefly_cron_token="token",
firefly_cron_timeout_sec=10.0,
) )
monkeypatch.setattr("ariadne.services.firefly.settings", dummy) monkeypatch.setattr("ariadne.services.firefly.settings", dummy)
monkeypatch.setattr(
"ariadne.services.firefly.JobSpawner", class DummyExecutor:
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest), def exec(self, _cmd, env=None, timeout_sec=None, check=True):
) return types.SimpleNamespace(stdout="ok", stderr="", exit_code=0, ok=True)
monkeypatch.setattr("ariadne.services.firefly.PodExecutor", lambda *_args, **_kwargs: DummyExecutor())
svc = FireflyService() svc = FireflyService()
result = svc.sync_user("alice@bstein.dev", "pw", wait=True) result = svc.sync_user("alice@bstein.dev", "pw", wait=True)
assert result["status"] == "ok" assert result["status"] == "ok"
spawner = svc._spawner
label, env, _, _ = spawner.calls[0]
assert label == "alice"
env_map = {item["name"]: item["value"] for item in env}
assert env_map["FIREFLY_USER_EMAIL"] == "alice@bstein.dev"
def test_firefly_sync_user_queued(monkeypatch) -> None:
dummy = types.SimpleNamespace(
firefly_namespace="finance",
firefly_user_sync_cronjob="firefly-user-sync",
firefly_user_sync_wait_timeout_sec=60.0,
)
monkeypatch.setattr("ariadne.services.firefly.settings", dummy)
monkeypatch.setattr(
"ariadne.services.firefly.JobSpawner",
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = FireflyService()
result = svc.sync_user("alice@bstein.dev", "pw", wait=False)
assert result["status"] == "queued"
def test_firefly_sync_missing_inputs(monkeypatch) -> None: def test_firefly_sync_missing_inputs(monkeypatch) -> None:
@ -185,12 +185,14 @@ def test_firefly_sync_missing_inputs(monkeypatch) -> None:
firefly_namespace="finance", firefly_namespace="finance",
firefly_user_sync_cronjob="firefly-user-sync", firefly_user_sync_cronjob="firefly-user-sync",
firefly_user_sync_wait_timeout_sec=60.0, firefly_user_sync_wait_timeout_sec=60.0,
firefly_pod_label="app=firefly",
firefly_container="firefly",
firefly_cron_base_url="http://firefly/cron",
firefly_cron_token="token",
firefly_cron_timeout_sec=10.0,
) )
monkeypatch.setattr("ariadne.services.firefly.settings", dummy) monkeypatch.setattr("ariadne.services.firefly.settings", dummy)
monkeypatch.setattr( monkeypatch.setattr("ariadne.services.firefly.PodExecutor", lambda *_args, **_kwargs: None)
"ariadne.services.firefly.JobSpawner",
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = FireflyService() svc = FireflyService()
with pytest.raises(RuntimeError): with pytest.raises(RuntimeError):
@ -204,6 +206,11 @@ def test_firefly_sync_missing_config(monkeypatch) -> None:
firefly_namespace="", firefly_namespace="",
firefly_user_sync_cronjob="", firefly_user_sync_cronjob="",
firefly_user_sync_wait_timeout_sec=60.0, firefly_user_sync_wait_timeout_sec=60.0,
firefly_pod_label="app=firefly",
firefly_container="firefly",
firefly_cron_base_url="http://firefly/cron",
firefly_cron_token="token",
firefly_cron_timeout_sec=10.0,
) )
monkeypatch.setattr("ariadne.services.firefly.settings", dummy) monkeypatch.setattr("ariadne.services.firefly.settings", dummy)
svc = FireflyService() svc = FireflyService()
@ -211,146 +218,39 @@ def test_firefly_sync_missing_config(monkeypatch) -> None:
svc.sync_user("alice@bstein.dev", "pw", wait=True) svc.sync_user("alice@bstein.dev", "pw", wait=True)
def test_vault_sync_jobs(monkeypatch) -> None: def test_firefly_run_cron(monkeypatch) -> None:
dummy = types.SimpleNamespace( dummy = types.SimpleNamespace(
vault_namespace="vault", firefly_namespace="finance",
vault_k8s_auth_cronjob="vault-k8s-auth-config", firefly_user_sync_cronjob="firefly-user-sync",
vault_oidc_cronjob="vault-oidc-config", firefly_user_sync_wait_timeout_sec=60.0,
vault_job_wait_timeout_sec=120.0, firefly_pod_label="app=firefly",
) firefly_container="firefly",
monkeypatch.setattr("ariadne.services.vault.settings", dummy) firefly_cron_base_url="http://firefly/cron",
monkeypatch.setattr( firefly_cron_token="token",
"ariadne.services.vault.JobSpawner", firefly_cron_timeout_sec=10.0,
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
) )
monkeypatch.setattr("ariadne.services.firefly.settings", dummy)
monkeypatch.setattr("ariadne.services.firefly.PodExecutor", lambda *_args, **_kwargs: DummyExecutor())
svc = VaultService() class DummyHTTP:
result = svc.sync_k8s_auth(wait=True) def __init__(self):
self.calls = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def get(self, url):
self.calls.append(url)
return types.SimpleNamespace(status_code=200)
monkeypatch.setattr("ariadne.services.firefly.httpx.Client", lambda *args, **kwargs: DummyHTTP())
svc = FireflyService()
result = svc.run_cron()
assert result["status"] == "ok" assert result["status"] == "ok"
spawner = svc._k8s_auth_spawner
label, _, timeout, _ = spawner.calls[0]
assert label == "k8s-auth"
assert timeout == 120.0
def test_vault_sync_k8s_auth_queue(monkeypatch) -> None:
dummy = types.SimpleNamespace(
vault_namespace="vault",
vault_k8s_auth_cronjob="vault-k8s-auth-config",
vault_oidc_cronjob="vault-oidc-config",
vault_job_wait_timeout_sec=120.0,
)
monkeypatch.setattr("ariadne.services.vault.settings", dummy)
monkeypatch.setattr(
"ariadne.services.vault.JobSpawner",
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = VaultService()
result = svc.sync_k8s_auth(wait=False)
assert result["status"] == "queued"
def test_vault_sync_oidc_queue(monkeypatch) -> None:
dummy = types.SimpleNamespace(
vault_namespace="vault",
vault_k8s_auth_cronjob="vault-k8s-auth-config",
vault_oidc_cronjob="vault-oidc-config",
vault_job_wait_timeout_sec=120.0,
)
monkeypatch.setattr("ariadne.services.vault.settings", dummy)
monkeypatch.setattr(
"ariadne.services.vault.JobSpawner",
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = VaultService()
result = svc.sync_oidc(wait=False)
assert result["status"] == "queued"
def test_vault_sync_oidc_wait(monkeypatch) -> None:
dummy = types.SimpleNamespace(
vault_namespace="vault",
vault_k8s_auth_cronjob="vault-k8s-auth-config",
vault_oidc_cronjob="vault-oidc-config",
vault_job_wait_timeout_sec=120.0,
)
monkeypatch.setattr("ariadne.services.vault.settings", dummy)
monkeypatch.setattr(
"ariadne.services.vault.JobSpawner",
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = VaultService()
result = svc.sync_oidc(wait=True)
assert result["status"] == "ok"
def test_comms_jobs(monkeypatch) -> None:
dummy = types.SimpleNamespace(
comms_namespace="comms",
comms_guest_name_cronjob="guest-name-randomizer",
comms_pin_invite_cronjob="pin-othrys-invite",
comms_reset_room_cronjob="othrys-room-reset",
comms_seed_room_cronjob="seed-othrys-room",
comms_job_wait_timeout_sec=60.0,
)
monkeypatch.setattr("ariadne.services.comms.settings", dummy)
monkeypatch.setattr(
"ariadne.services.comms.JobSpawner",
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = CommsService()
result = svc.run_guest_name_randomizer(wait=True)
assert result["status"] == "ok"
spawner = svc._guest_name_spawner
label, _, timeout, _ = spawner.calls[0]
assert label == "guest-name"
assert timeout == 60.0
def test_comms_pin_invite(monkeypatch) -> None:
dummy = types.SimpleNamespace(
comms_namespace="comms",
comms_guest_name_cronjob="guest-name-randomizer",
comms_pin_invite_cronjob="pin-othrys-invite",
comms_reset_room_cronjob="othrys-room-reset",
comms_seed_room_cronjob="seed-othrys-room",
comms_job_wait_timeout_sec=60.0,
)
monkeypatch.setattr("ariadne.services.comms.settings", dummy)
monkeypatch.setattr(
"ariadne.services.comms.JobSpawner",
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = CommsService()
result = svc.run_pin_invite(wait=False)
assert result["status"] == "queued"
def test_comms_reset_and_seed(monkeypatch) -> None:
dummy = types.SimpleNamespace(
comms_namespace="comms",
comms_guest_name_cronjob="guest-name-randomizer",
comms_pin_invite_cronjob="pin-othrys-invite",
comms_reset_room_cronjob="othrys-room-reset",
comms_seed_room_cronjob="seed-othrys-room",
comms_job_wait_timeout_sec=60.0,
)
monkeypatch.setattr("ariadne.services.comms.settings", dummy)
monkeypatch.setattr(
"ariadne.services.comms.JobSpawner",
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = CommsService()
assert svc.run_reset_room(wait=False)["status"] == "queued"
assert svc.run_seed_room(wait=True)["status"] == "ok"
def test_mailu_sync_includes_force(monkeypatch) -> None: def test_mailu_sync_includes_force(monkeypatch) -> None:
@ -737,6 +637,16 @@ def test_nextcloud_missing_config(monkeypatch) -> None:
nextcloud_mail_sync_cronjob="", nextcloud_mail_sync_cronjob="",
nextcloud_mail_sync_wait_timeout_sec=90.0, nextcloud_mail_sync_wait_timeout_sec=90.0,
nextcloud_mail_sync_job_ttl_sec=3600, nextcloud_mail_sync_job_ttl_sec=3600,
nextcloud_pod_label="app=nextcloud",
nextcloud_container="nextcloud",
nextcloud_exec_timeout_sec=30.0,
nextcloud_db_host="",
nextcloud_db_port=5432,
nextcloud_db_name="nextcloud",
nextcloud_db_user="nextcloud",
nextcloud_db_password="",
mailu_domain="bstein.dev",
mailu_host="mail.bstein.dev",
) )
monkeypatch.setattr("ariadne.services.nextcloud.settings", dummy) monkeypatch.setattr("ariadne.services.nextcloud.settings", dummy)
svc = NextcloudService() svc = NextcloudService()
@ -744,44 +654,20 @@ def test_nextcloud_missing_config(monkeypatch) -> None:
svc.sync_mail("alice") svc.sync_mail("alice")
def test_vault_sync_missing_config(monkeypatch) -> None:
dummy = types.SimpleNamespace(
vault_namespace="",
vault_k8s_auth_cronjob="",
vault_oidc_cronjob="",
vault_job_wait_timeout_sec=120.0,
)
monkeypatch.setattr("ariadne.services.vault.settings", dummy)
svc = VaultService()
with pytest.raises(RuntimeError):
svc.sync_k8s_auth(wait=True)
def test_vault_sync_oidc_missing_config(monkeypatch) -> None:
dummy = types.SimpleNamespace(
vault_namespace="",
vault_k8s_auth_cronjob="",
vault_oidc_cronjob="",
vault_job_wait_timeout_sec=120.0,
)
monkeypatch.setattr("ariadne.services.vault.settings", dummy)
svc = VaultService()
with pytest.raises(RuntimeError):
svc.sync_oidc(wait=True)
def test_wger_sync_missing_inputs(monkeypatch) -> None: def test_wger_sync_missing_inputs(monkeypatch) -> None:
dummy = types.SimpleNamespace( dummy = types.SimpleNamespace(
wger_namespace="health", wger_namespace="health",
wger_user_sync_cronjob="wger-user-sync", wger_user_sync_cronjob="wger-user-sync",
wger_admin_cronjob="wger-admin-ensure", wger_admin_cronjob="wger-admin-ensure",
wger_user_sync_wait_timeout_sec=60.0, wger_user_sync_wait_timeout_sec=60.0,
wger_pod_label="app=wger",
wger_container="wger",
wger_admin_username="admin",
wger_admin_password="pw",
wger_admin_email="admin@bstein.dev",
) )
monkeypatch.setattr("ariadne.services.wger.settings", dummy) monkeypatch.setattr("ariadne.services.wger.settings", dummy)
monkeypatch.setattr( monkeypatch.setattr("ariadne.services.wger.PodExecutor", lambda *_args, **_kwargs: DummyExecutor())
"ariadne.services.wger.JobSpawner",
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = WgerService() svc = WgerService()
with pytest.raises(RuntimeError): with pytest.raises(RuntimeError):
@ -796,8 +682,14 @@ def test_wger_sync_missing_config(monkeypatch) -> None:
wger_user_sync_cronjob="", wger_user_sync_cronjob="",
wger_admin_cronjob="wger-admin-ensure", wger_admin_cronjob="wger-admin-ensure",
wger_user_sync_wait_timeout_sec=60.0, wger_user_sync_wait_timeout_sec=60.0,
wger_pod_label="app=wger",
wger_container="wger",
wger_admin_username="admin",
wger_admin_password="pw",
wger_admin_email="admin@bstein.dev",
) )
monkeypatch.setattr("ariadne.services.wger.settings", dummy) monkeypatch.setattr("ariadne.services.wger.settings", dummy)
monkeypatch.setattr("ariadne.services.wger.PodExecutor", lambda *_args, **_kwargs: DummyExecutor())
svc = WgerService() svc = WgerService()
with pytest.raises(RuntimeError): with pytest.raises(RuntimeError):
svc.sync_user("alice", "email", "pw", wait=True) svc.sync_user("alice", "email", "pw", wait=True)
@ -809,34 +701,38 @@ def test_wger_ensure_admin(monkeypatch) -> None:
wger_user_sync_cronjob="wger-user-sync", wger_user_sync_cronjob="wger-user-sync",
wger_admin_cronjob="wger-admin-ensure", wger_admin_cronjob="wger-admin-ensure",
wger_user_sync_wait_timeout_sec=60.0, wger_user_sync_wait_timeout_sec=60.0,
wger_pod_label="app=wger",
wger_container="wger",
wger_admin_username="admin",
wger_admin_password="pw",
wger_admin_email="admin@bstein.dev",
) )
monkeypatch.setattr("ariadne.services.wger.settings", dummy) monkeypatch.setattr("ariadne.services.wger.settings", dummy)
monkeypatch.setattr( monkeypatch.setattr("ariadne.services.wger.PodExecutor", lambda *_args, **_kwargs: DummyExecutor())
"ariadne.services.wger.JobSpawner",
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = WgerService() svc = WgerService()
result = svc.ensure_admin(wait=True) result = svc.ensure_admin(wait=True)
assert result["status"] == "ok" assert result["status"] == "ok"
def test_wger_ensure_admin_queue(monkeypatch) -> None: def test_wger_ensure_admin_missing_creds(monkeypatch) -> None:
dummy = types.SimpleNamespace( dummy = types.SimpleNamespace(
wger_namespace="health", wger_namespace="health",
wger_user_sync_cronjob="wger-user-sync", wger_user_sync_cronjob="wger-user-sync",
wger_admin_cronjob="wger-admin-ensure", wger_admin_cronjob="wger-admin-ensure",
wger_user_sync_wait_timeout_sec=60.0, wger_user_sync_wait_timeout_sec=60.0,
wger_pod_label="app=wger",
wger_container="wger",
wger_admin_username="",
wger_admin_password="",
wger_admin_email="",
) )
monkeypatch.setattr("ariadne.services.wger.settings", dummy) monkeypatch.setattr("ariadne.services.wger.settings", dummy)
monkeypatch.setattr( monkeypatch.setattr("ariadne.services.wger.PodExecutor", lambda *_args, **_kwargs: DummyExecutor())
"ariadne.services.wger.JobSpawner",
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = WgerService() svc = WgerService()
result = svc.ensure_admin(wait=False) result = svc.ensure_admin(wait=True)
assert result["status"] == "queued" assert result["status"] == "error"
def test_wger_ensure_admin_missing_config(monkeypatch) -> None: def test_wger_ensure_admin_missing_config(monkeypatch) -> None:
@ -845,47 +741,19 @@ def test_wger_ensure_admin_missing_config(monkeypatch) -> None:
wger_user_sync_cronjob="wger-user-sync", wger_user_sync_cronjob="wger-user-sync",
wger_admin_cronjob="", wger_admin_cronjob="",
wger_user_sync_wait_timeout_sec=60.0, wger_user_sync_wait_timeout_sec=60.0,
wger_pod_label="app=wger",
wger_container="wger",
wger_admin_username="admin",
wger_admin_password="pw",
wger_admin_email="admin@bstein.dev",
) )
monkeypatch.setattr("ariadne.services.wger.settings", dummy) monkeypatch.setattr("ariadne.services.wger.settings", dummy)
monkeypatch.setattr("ariadne.services.wger.PodExecutor", lambda *_args, **_kwargs: DummyExecutor())
svc = WgerService() svc = WgerService()
with pytest.raises(RuntimeError): with pytest.raises(RuntimeError):
svc.ensure_admin(wait=True) svc.ensure_admin(wait=True)
def test_comms_missing_config(monkeypatch) -> None:
dummy = types.SimpleNamespace(
comms_namespace="",
comms_guest_name_cronjob="guest-name-randomizer",
comms_pin_invite_cronjob="pin-othrys-invite",
comms_reset_room_cronjob="othrys-room-reset",
comms_seed_room_cronjob="seed-othrys-room",
comms_job_wait_timeout_sec=60.0,
)
monkeypatch.setattr("ariadne.services.comms.settings", dummy)
svc = CommsService()
with pytest.raises(RuntimeError):
svc.run_guest_name_randomizer(wait=True)
def test_comms_missing_config_variants(monkeypatch) -> None:
dummy = types.SimpleNamespace(
comms_namespace="",
comms_guest_name_cronjob="guest-name-randomizer",
comms_pin_invite_cronjob="pin-othrys-invite",
comms_reset_room_cronjob="othrys-room-reset",
comms_seed_room_cronjob="seed-othrys-room",
comms_job_wait_timeout_sec=60.0,
)
monkeypatch.setattr("ariadne.services.comms.settings", dummy)
svc = CommsService()
with pytest.raises(RuntimeError):
svc.run_pin_invite(wait=True)
with pytest.raises(RuntimeError):
svc.run_reset_room(wait=True)
with pytest.raises(RuntimeError):
svc.run_seed_room(wait=True)
def test_mailu_mailbox_exists_handles_error(monkeypatch) -> None: def test_mailu_mailbox_exists_handles_error(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace( dummy_settings = types.SimpleNamespace(
mailu_sync_url="", mailu_sync_url="",
@ -986,31 +854,47 @@ def test_nextcloud_sync_missing_username(monkeypatch) -> None:
nextcloud_mail_sync_cronjob="nextcloud-mail-sync", nextcloud_mail_sync_cronjob="nextcloud-mail-sync",
nextcloud_mail_sync_wait_timeout_sec=90.0, nextcloud_mail_sync_wait_timeout_sec=90.0,
nextcloud_mail_sync_job_ttl_sec=3600, nextcloud_mail_sync_job_ttl_sec=3600,
nextcloud_pod_label="app=nextcloud",
nextcloud_container="nextcloud",
nextcloud_exec_timeout_sec=30.0,
nextcloud_db_host="",
nextcloud_db_port=5432,
nextcloud_db_name="nextcloud",
nextcloud_db_user="nextcloud",
nextcloud_db_password="",
mailu_domain="bstein.dev",
mailu_host="mail.bstein.dev",
) )
monkeypatch.setattr("ariadne.services.nextcloud.settings", dummy) monkeypatch.setattr("ariadne.services.nextcloud.settings", dummy)
monkeypatch.setattr( monkeypatch.setattr("ariadne.services.nextcloud.PodExecutor", lambda *_args, **_kwargs: DummyExecutor())
"ariadne.services.nextcloud.JobSpawner",
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest),
)
svc = NextcloudService() svc = NextcloudService()
with pytest.raises(RuntimeError): with pytest.raises(RuntimeError):
svc.sync_mail(" ", wait=True) svc.sync_mail(" ", wait=True)
def test_nextcloud_sync_queue(monkeypatch) -> None: def test_nextcloud_sync_no_match(monkeypatch) -> None:
dummy = types.SimpleNamespace( dummy = types.SimpleNamespace(
nextcloud_namespace="nextcloud", nextcloud_namespace="nextcloud",
nextcloud_mail_sync_cronjob="nextcloud-mail-sync", nextcloud_mail_sync_cronjob="nextcloud-mail-sync",
nextcloud_mail_sync_wait_timeout_sec=90.0, nextcloud_mail_sync_wait_timeout_sec=90.0,
nextcloud_mail_sync_job_ttl_sec=3600, nextcloud_mail_sync_job_ttl_sec=3600,
nextcloud_pod_label="app=nextcloud",
nextcloud_container="nextcloud",
nextcloud_exec_timeout_sec=30.0,
nextcloud_db_host="",
nextcloud_db_port=5432,
nextcloud_db_name="nextcloud",
nextcloud_db_user="nextcloud",
nextcloud_db_password="",
mailu_domain="bstein.dev",
mailu_host="mail.bstein.dev",
) )
monkeypatch.setattr("ariadne.services.nextcloud.settings", dummy) monkeypatch.setattr("ariadne.services.nextcloud.settings", dummy)
monkeypatch.setattr( monkeypatch.setattr("ariadne.services.nextcloud.keycloak_admin.ready", lambda: True)
"ariadne.services.nextcloud.JobSpawner", monkeypatch.setattr("ariadne.services.nextcloud.keycloak_admin.find_user", lambda *_args, **_kwargs: None)
lambda ns, cj, manifest=None: DummySpawner(ns, cj, manifest), monkeypatch.setattr("ariadne.services.nextcloud.PodExecutor", lambda *_args, **_kwargs: DummyExecutor())
)
svc = NextcloudService() svc = NextcloudService()
result = svc.sync_mail("alice", wait=False) result = svc.sync_mail("alice", wait=False)
assert result["status"] == "queued" assert result["status"] == "ok"

188
tests/test_vault.py Normal file
View File

@ -0,0 +1,188 @@
from __future__ import annotations
import types
from ariadne.services import vault as vault_module
from ariadne.services.vault import VaultService, _build_policy
class DummyResponse:
def __init__(self, payload=None, status_code=200):
self._payload = payload or {}
self.status_code = status_code
self.text = ""
def json(self):
return self._payload
def raise_for_status(self):
if self.status_code >= 400:
raise RuntimeError("status error")
def test_build_policy() -> None:
policy = _build_policy("foo/*", "bar/*")
assert "kv/data/atlas/foo/*" in policy
assert "kv/data/atlas/bar/*" in policy
def test_vault_sync_k8s_auth_success(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
vault_addr="http://vault",
vault_token="token",
vault_k8s_role="vault",
vault_k8s_role_ttl="1h",
vault_k8s_token_reviewer_jwt="jwt",
vault_k8s_token_reviewer_jwt_file="",
vault_oidc_discovery_url="",
vault_oidc_client_id="",
vault_oidc_client_secret="",
vault_oidc_default_role="admin",
vault_oidc_scopes="",
vault_oidc_user_claim="",
vault_oidc_groups_claim="",
vault_oidc_token_policies="",
vault_oidc_admin_group="",
vault_oidc_admin_policies="",
vault_oidc_dev_group="",
vault_oidc_dev_policies="",
vault_oidc_user_group="",
vault_oidc_user_policies="",
vault_oidc_redirect_uris="",
vault_oidc_bound_audiences="",
vault_oidc_bound_claims_type="",
k8s_api_timeout_sec=5.0,
)
monkeypatch.setattr(vault_module, "settings", dummy_settings)
calls: list[tuple[str, str]] = []
def fake_request(self, method: str, path: str, json=None):
calls.append((method, path))
if path == "/v1/sys/health":
return DummyResponse({"initialized": True, "sealed": False})
if path == "/v1/sys/auth":
return DummyResponse({})
return DummyResponse({})
monkeypatch.setattr(vault_module.VaultClient, "request", fake_request)
svc = VaultService()
result = svc.sync_k8s_auth()
assert result["status"] == "ok"
assert any(path == "/v1/auth/kubernetes/config" for _, path in calls)
def test_vault_sync_oidc_success(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
vault_addr="http://vault",
vault_token="token",
vault_k8s_role="vault",
vault_k8s_role_ttl="1h",
vault_k8s_token_reviewer_jwt="jwt",
vault_k8s_token_reviewer_jwt_file="",
vault_oidc_discovery_url="http://oidc",
vault_oidc_client_id="client",
vault_oidc_client_secret="secret",
vault_oidc_default_role="admin",
vault_oidc_scopes="openid profile",
vault_oidc_user_claim="preferred_username",
vault_oidc_groups_claim="groups",
vault_oidc_token_policies="",
vault_oidc_admin_group="admin",
vault_oidc_admin_policies="default,vault-admin",
vault_oidc_dev_group="dev",
vault_oidc_dev_policies="default,dev-kv",
vault_oidc_user_group="",
vault_oidc_user_policies="",
vault_oidc_redirect_uris="https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback",
vault_oidc_bound_audiences="",
vault_oidc_bound_claims_type="string",
k8s_api_timeout_sec=5.0,
)
monkeypatch.setattr(vault_module, "settings", dummy_settings)
def fake_request(self, method: str, path: str, json=None):
if path == "/v1/sys/health":
return DummyResponse({"initialized": True, "sealed": False})
if path == "/v1/sys/auth":
return DummyResponse({})
return DummyResponse({})
monkeypatch.setattr(vault_module.VaultClient, "request", fake_request)
svc = VaultService()
result = svc.sync_oidc()
assert result["status"] == "ok"
def test_vault_sync_oidc_missing_discovery(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
vault_addr="http://vault",
vault_token="token",
vault_k8s_role="vault",
vault_k8s_role_ttl="1h",
vault_k8s_token_reviewer_jwt="jwt",
vault_k8s_token_reviewer_jwt_file="",
vault_oidc_discovery_url="",
vault_oidc_client_id="client",
vault_oidc_client_secret="secret",
vault_oidc_default_role="admin",
vault_oidc_scopes="openid profile",
vault_oidc_user_claim="preferred_username",
vault_oidc_groups_claim="groups",
vault_oidc_token_policies="",
vault_oidc_admin_group="admin",
vault_oidc_admin_policies="default,vault-admin",
vault_oidc_dev_group="dev",
vault_oidc_dev_policies="default,dev-kv",
vault_oidc_user_group="",
vault_oidc_user_policies="",
vault_oidc_redirect_uris="https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback",
vault_oidc_bound_audiences="",
vault_oidc_bound_claims_type="string",
k8s_api_timeout_sec=5.0,
)
monkeypatch.setattr(vault_module, "settings", dummy_settings)
monkeypatch.setattr(vault_module.VaultClient, "request", lambda *args, **kwargs: DummyResponse({"initialized": True, "sealed": False}))
svc = VaultService()
result = svc.sync_oidc()
assert result["status"] == "error"
def test_vault_ensure_token_login(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(
vault_addr="http://vault",
vault_token="",
vault_k8s_role="vault",
vault_k8s_token_reviewer_jwt="jwt",
vault_k8s_token_reviewer_jwt_file="",
vault_oidc_discovery_url="",
vault_oidc_client_id="",
vault_oidc_client_secret="",
vault_oidc_default_role="admin",
vault_oidc_scopes="",
vault_oidc_user_claim="",
vault_oidc_groups_claim="",
vault_oidc_token_policies="",
vault_oidc_admin_group="",
vault_oidc_admin_policies="",
vault_oidc_dev_group="",
vault_oidc_dev_policies="",
vault_oidc_user_group="",
vault_oidc_user_policies="",
vault_oidc_redirect_uris="",
vault_oidc_bound_audiences="",
vault_oidc_bound_claims_type="",
k8s_api_timeout_sec=5.0,
)
monkeypatch.setattr(vault_module, "settings", dummy_settings)
def fake_post(_url, json=None, timeout=None):
return DummyResponse({"auth": {"client_token": "tok"}})
monkeypatch.setattr(vault_module.httpx, "post", fake_post)
svc = VaultService()
assert svc._ensure_token() == "tok"

View File

@ -201,11 +201,11 @@ def test_vaultwarden_email_for_user_missing_username(monkeypatch) -> None:
assert vaultwarden_sync._vaultwarden_email_for_user({"username": " "}) == "" assert vaultwarden_sync._vaultwarden_email_for_user({"username": " "}) == ""
def test_vaultwarden_email_for_user_rejects_external_email(monkeypatch) -> None: def test_vaultwarden_email_for_user_defaults_mailu(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace(mailu_domain="bstein.dev") dummy_settings = types.SimpleNamespace(mailu_domain="bstein.dev")
monkeypatch.setattr(vaultwarden_sync, "settings", dummy_settings) monkeypatch.setattr(vaultwarden_sync, "settings", dummy_settings)
user = {"username": "alice", "email": "alice@example.com", "attributes": {}} user = {"username": "alice", "email": "alice@example.com", "attributes": {}}
assert vaultwarden_sync._vaultwarden_email_for_user(user) == "" assert vaultwarden_sync._vaultwarden_email_for_user(user) == "alice@bstein.dev"
def test_set_user_attribute_if_missing_skips_existing(monkeypatch) -> None: def test_set_user_attribute_if_missing_skips_existing(monkeypatch) -> None:
@ -368,7 +368,7 @@ def test_vaultwarden_sync_set_attribute_failure_on_error(monkeypatch) -> None:
assert summary.failures == 1 assert summary.failures == 1
def test_vaultwarden_sync_skips_missing_email(monkeypatch) -> None: def test_vaultwarden_sync_defaults_mailu_email(monkeypatch) -> None:
dummy_settings = types.SimpleNamespace( dummy_settings = types.SimpleNamespace(
mailu_domain="bstein.dev", mailu_domain="bstein.dev",
vaultwarden_retry_cooldown_sec=0, vaultwarden_retry_cooldown_sec=0,
@ -381,6 +381,8 @@ def test_vaultwarden_sync_skips_missing_email(monkeypatch) -> None:
) )
monkeypatch.setattr(vaultwarden_sync, "keycloak_admin", dummy) monkeypatch.setattr(vaultwarden_sync, "keycloak_admin", dummy)
monkeypatch.setattr(vaultwarden_sync.mailu, "mailbox_exists", lambda email: True) monkeypatch.setattr(vaultwarden_sync.mailu, "mailbox_exists", lambda email: True)
monkeypatch.setattr(vaultwarden_sync.vaultwarden, "invite_user", lambda email: VaultwardenInvite(True, "invited"))
summary = vaultwarden_sync.run_vaultwarden_sync() summary = vaultwarden_sync.run_vaultwarden_sync()
assert summary.skipped == 1 assert summary.processed == 1
assert summary.created_or_present == 1