From 8ee7d046d22267d0995e872af44108085da64909 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 19:01:07 -0300 Subject: [PATCH 001/270] ops: prepare vault-consumption branch --- clusters/atlas/flux-system/gotk-sync.yaml | 2 +- scripts/test_atlas_user_cleanup.py | 281 ++++++++++++++++++++-- 2 files changed, 267 insertions(+), 16 deletions(-) diff --git a/clusters/atlas/flux-system/gotk-sync.yaml b/clusters/atlas/flux-system/gotk-sync.yaml index 713e739..59cabae 100644 --- a/clusters/atlas/flux-system/gotk-sync.yaml +++ b/clusters/atlas/flux-system/gotk-sync.yaml @@ -8,7 +8,7 @@ metadata: spec: interval: 1m0s ref: - branch: feature/sso-hardening + branch: feature/vault-consumption secretRef: name: flux-system-gitea url: ssh://git@scm.bstein.dev:2242/bstein/titan-iac.git diff --git a/scripts/test_atlas_user_cleanup.py b/scripts/test_atlas_user_cleanup.py index 41ba708..2acf8a7 100755 --- a/scripts/test_atlas_user_cleanup.py +++ b/scripts/test_atlas_user_cleanup.py @@ -7,6 +7,8 @@ test accounts created via the bstein-dev-home onboarding portal. Targets (best-effort): - Keycloak users in realm "atlas" - Atlas portal Postgres rows (access_requests + dependent tables) + - Mailu mailboxes created for test users + - Nextcloud Mail accounts created for test users - Vaultwarden users/invites created by the portal Safety: @@ -56,6 +58,19 @@ class VaultwardenUser: status: int +@dataclass(frozen=True) +class MailuUser: + email: str + localpart: str + domain: str + + +@dataclass(frozen=True) +class NextcloudMailAccount: + account_id: str + email: str + + def _run(cmd: list[str], *, input_bytes: bytes | None = None) -> str: proc = subprocess.run( cmd, @@ -70,6 +85,19 @@ def _run(cmd: list[str], *, input_bytes: bytes | None = None) -> str: return proc.stdout.decode("utf-8", errors="replace") +def _run_capture(cmd: list[str], *, input_bytes: bytes | None = None) -> tuple[int, str, str]: + proc = subprocess.run( + cmd, + input=input_bytes, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=False, + ) + stdout = proc.stdout.decode("utf-8", errors="replace") + stderr = proc.stderr.decode("utf-8", errors="replace") + return proc.returncode, stdout, stderr + + def _kubectl_get_secret_value(namespace: str, name: str, key: str) -> str: raw_b64 = _run( [ @@ -110,6 +138,21 @@ def _kubectl_first_pod(namespace: str) -> str: return pod_name +def _kubectl_exec(namespace: str, target: str, cmd: list[str]) -> tuple[int, str, str]: + return _run_capture( + [ + "kubectl", + "-n", + namespace, + "exec", + "-i", + target, + "--", + *cmd, + ] + ) + + def _validate_prefixes(prefixes: list[str]) -> list[str]: cleaned: list[str] = [] for prefix in prefixes: @@ -187,6 +230,62 @@ def _keycloak_delete_user(server: str, realm: str, token: str, user_id: str) -> raise +def _sql_quote(value: str) -> str: + return "'" + value.replace("'", "''") + "'" + + +def _psql_exec(db_name: str, sql: str, *, user: str = "postgres") -> str: + postgres_pod = _kubectl_first_pod("postgres") + return _run( + [ + "kubectl", + "-n", + "postgres", + "exec", + "-i", + postgres_pod, + "--", + "psql", + "-U", + user, + "-d", + db_name, + "-c", + sql, + ] + ) + + +def _psql_tsv(db_name: str, sql: str, *, user: str = "postgres") -> list[list[str]]: + postgres_pod = _kubectl_first_pod("postgres") + out = _run( + [ + "kubectl", + "-n", + "postgres", + "exec", + "-i", + postgres_pod, + "--", + "psql", + "-U", + user, + "-d", + db_name, + "-At", + "-F", + "\t", + "-c", + sql, + ] + ) + rows: list[list[str]] = [] + for line in out.splitlines(): + parts = line.split("\t") + rows.append(parts) + return rows + + def _psql_json(portal_db_url: str, sql: str) -> list[dict[str, Any]]: postgres_pod = _kubectl_first_pod("postgres") out = _run( @@ -256,6 +355,89 @@ def _portal_delete_requests(portal_db_url: str, prefixes: list[str]) -> int: return int(match.group(1)) if match else 0 +def _mailu_list_users(prefixes: list[str], domain: str, db_name: str, protected: set[str]) -> list[MailuUser]: + if not prefixes or not domain: + return [] + clauses = " OR ".join([f"localpart LIKE '{p}%'" for p in prefixes]) + sql = ( + 'SELECT email, localpart, domain_name ' + 'FROM "user" ' + f"WHERE domain_name = {_sql_quote(domain)} AND ({clauses}) " + "ORDER BY email;" + ) + rows = _psql_tsv(db_name, sql) + users: list[MailuUser] = [] + for row in rows: + if len(row) < 3: + continue + email = row[0].strip() + if not email or email in protected: + continue + users.append(MailuUser(email=email, localpart=row[1].strip(), domain=row[2].strip())) + return users + + +def _mailu_delete_users(db_name: str, emails: list[str]) -> int: + if not emails: + return 0 + email_list = ",".join(_sql_quote(e) for e in emails) + sql = f'DELETE FROM "user" WHERE email IN ({email_list});' + out = _psql_exec(db_name, sql) + match = re.search(r"DELETE\\s+(\\d+)", out) + return int(match.group(1)) if match else 0 + + +_NEXTCLOUD_ACCOUNT_RE = re.compile(r"^Account\\s+(\\d+):") +_EMAIL_RE = re.compile(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+") + + +def _nextcloud_exec(cmd: list[str]) -> tuple[int, str, str]: + namespace = os.getenv("NEXTCLOUD_NAMESPACE", "nextcloud").strip() or "nextcloud" + target = os.getenv("NEXTCLOUD_EXEC_TARGET", "deploy/nextcloud").strip() or "deploy/nextcloud" + return _kubectl_exec(namespace, target, cmd) + + +def _parse_nextcloud_mail_accounts(export_output: str) -> list[NextcloudMailAccount]: + accounts: list[NextcloudMailAccount] = [] + current_id = "" + for line in export_output.splitlines(): + line = line.strip() + if not line: + continue + match = _NEXTCLOUD_ACCOUNT_RE.match(line) + if match: + current_id = match.group(1) + continue + if not current_id or "@" not in line: + continue + email_match = _EMAIL_RE.search(line) + if not email_match: + continue + accounts.append(NextcloudMailAccount(account_id=current_id, email=email_match.group(0))) + current_id = "" + return accounts + + +def _nextcloud_list_mail_accounts(username: str) -> list[NextcloudMailAccount]: + occ_path = os.getenv("NEXTCLOUD_OCC_PATH", "/var/www/html/occ").strip() or "/var/www/html/occ" + rc, out, err = _nextcloud_exec(["php", occ_path, "mail:account:export", username]) + if rc != 0: + message = (err or out).strip() + lowered = message.lower() + if any(token in lowered for token in ("not found", "does not exist", "no such user", "unknown user")): + return [] + raise RuntimeError(f"nextcloud mail export failed for {username}: {message}") + return _parse_nextcloud_mail_accounts(out) + + +def _nextcloud_delete_mail_account(account_id: str) -> None: + occ_path = os.getenv("NEXTCLOUD_OCC_PATH", "/var/www/html/occ").strip() or "/var/www/html/occ" + rc, out, err = _nextcloud_exec(["php", occ_path, "mail:account:delete", "-q", account_id]) + if rc != 0: + message = (err or out).strip() + raise RuntimeError(f"nextcloud mail delete failed for account {account_id}: {message}") + + def _vaultwarden_admin_cookie(admin_token: str, base_url: str) -> str: data = urllib.parse.urlencode({"token": admin_token}).encode("utf-8") req = urllib.request.Request(f"{base_url}/admin", data=data, method="POST") @@ -356,6 +538,8 @@ def main() -> int: ), ) parser.add_argument("--skip-keycloak", action="store_true", help="Skip Keycloak user deletion.") + parser.add_argument("--skip-mailu", action="store_true", help="Skip Mailu mailbox cleanup.") + parser.add_argument("--skip-nextcloud-mail", action="store_true", help="Skip Nextcloud Mail account cleanup.") parser.add_argument("--skip-portal-db", action="store_true", help="Skip portal DB cleanup.") parser.add_argument("--skip-vaultwarden", action="store_true", help="Skip Vaultwarden cleanup.") parser.add_argument( @@ -364,6 +548,18 @@ def main() -> int: default=[], help="Keycloak usernames that must never be deleted (repeatable).", ) + parser.add_argument( + "--protect-mailu-email", + action="append", + default=[], + help="Mailu emails that must never be deleted (repeatable).", + ) + parser.add_argument( + "--protect-nextcloud-username", + action="append", + default=[], + help="Nextcloud usernames that must never be touched (repeatable).", + ) parser.add_argument( "--protect-vaultwarden-email", action="append", @@ -376,7 +572,11 @@ def main() -> int: apply = bool(args.apply) expected_confirm = ",".join(prefixes) protected_keycloak = {"bstein", "robotuser", *[u.strip() for u in args.protect_keycloak_username if u.strip()]} + protected_mailu = {e.strip() for e in args.protect_mailu_email if e.strip()} + protected_nextcloud = {u.strip() for u in args.protect_nextcloud_username if u.strip()} protected_vaultwarden = {e.strip() for e in args.protect_vaultwarden_email if e.strip()} + mailu_domain = os.getenv("MAILU_DOMAIN", "bstein.dev").strip() or "bstein.dev" + mailu_db_name = os.getenv("MAILU_DB_NAME", "mailu").strip() or "mailu" if apply and args.confirm != expected_confirm: raise SystemExit( @@ -388,23 +588,29 @@ def main() -> int: print("mode:", "APPLY (destructive)" if apply else "DRY RUN (no changes)") if protected_keycloak: print("protected keycloak usernames:", ", ".join(sorted(protected_keycloak))) + if protected_mailu: + print("protected mailu emails:", ", ".join(sorted(protected_mailu))) + if protected_nextcloud: + print("protected nextcloud usernames:", ", ".join(sorted(protected_nextcloud))) if protected_vaultwarden: print("protected vaultwarden emails:", ", ".join(sorted(protected_vaultwarden))) print() + portal_requests: list[PortalRequestRow] = [] if not args.skip_portal_db: portal_db_url = _kubectl_get_secret_value("bstein-dev-home", "atlas-portal-db", "PORTAL_DATABASE_URL") - requests = _portal_list_requests(portal_db_url, prefixes) - print(f"Portal DB: {len(requests)} access_requests matched") - for row in requests[:50]: + portal_requests = _portal_list_requests(portal_db_url, prefixes) + print(f"Portal DB: {len(portal_requests)} access_requests matched") + for row in portal_requests[:50]: print(f" {row.request_code}\t{row.status}\t{row.username}") - if len(requests) > 50: - print(f" ... and {len(requests) - 50} more") - if apply and requests: + if len(portal_requests) > 50: + print(f" ... and {len(portal_requests) - 50} more") + if apply and portal_requests: deleted = _portal_delete_requests(portal_db_url, prefixes) print(f"Portal DB: deleted {deleted} access_requests (cascade removes tasks/steps/artifacts).") print() + keycloak_users: list[KeycloakUser] = [] if not args.skip_keycloak: kc_server = os.getenv("KEYCLOAK_PUBLIC_URL", "https://sso.bstein.dev").rstrip("/") kc_realm = os.getenv("KEYCLOAK_REALM", "atlas") @@ -421,18 +627,63 @@ def main() -> int: if user.username in protected_keycloak: continue found[user.user_id] = user - users = list(found.values()) - users.sort(key=lambda u: u.username) - print(f"Keycloak: {len(users)} users matched") - for user in users[:50]: + keycloak_users = list(found.values()) + keycloak_users.sort(key=lambda u: u.username) + print(f"Keycloak: {len(keycloak_users)} users matched") + for user in keycloak_users[:50]: email = user.email or "-" print(f" {user.username}\t{email}\t{user.user_id}") - if len(users) > 50: - print(f" ... and {len(users) - 50} more") - if apply and users: - for user in users: + if len(keycloak_users) > 50: + print(f" ... and {len(keycloak_users) - 50} more") + if apply and keycloak_users: + for user in keycloak_users: _keycloak_delete_user(kc_server, kc_realm, token, user.user_id) - print(f"Keycloak: deleted {len(users)} users.") + print(f"Keycloak: deleted {len(keycloak_users)} users.") + print() + + if not args.skip_mailu: + mailu_users = _mailu_list_users(prefixes, mailu_domain, mailu_db_name, protected_mailu) + print(f"Mailu: {len(mailu_users)} mailboxes matched (domain={mailu_domain})") + for user in mailu_users[:50]: + print(f" {user.email}\t{user.localpart}\t{user.domain}") + if len(mailu_users) > 50: + print(f" ... and {len(mailu_users) - 50} more") + if apply and mailu_users: + deleted = _mailu_delete_users(mailu_db_name, [u.email for u in mailu_users]) + print(f"Mailu: deleted {deleted} mailboxes.") + print() + + if not args.skip_nextcloud_mail: + nextcloud_usernames = {row.username for row in portal_requests if row.username} + nextcloud_usernames.update({u.username for u in keycloak_users if u.username}) + nextcloud_usernames = {u for u in nextcloud_usernames if _starts_with_any(u, prefixes)} + nextcloud_usernames = {u for u in nextcloud_usernames if u not in protected_nextcloud} + + matches: list[tuple[str, NextcloudMailAccount]] = [] + for username in sorted(nextcloud_usernames): + accounts = _nextcloud_list_mail_accounts(username) + for account in accounts: + email = account.email.strip() + if not email: + continue + if not email.lower().endswith(f"@{mailu_domain.lower()}"): + continue + localpart = email.split("@", 1)[0] + if not _starts_with_any(localpart, prefixes): + continue + if email in protected_mailu: + continue + matches.append((username, account)) + + print(f"Nextcloud Mail: {len(matches)} accounts matched") + for username, account in matches[:50]: + print(f" {username}\t{account.account_id}\t{account.email}") + if len(matches) > 50: + print(f" ... and {len(matches) - 50} more") + if apply and matches: + for _, account in matches: + _nextcloud_delete_mail_account(account.account_id) + print(f"Nextcloud Mail: deleted {len(matches)} accounts.") print() if not args.skip_vaultwarden: From 4602656578dbd0c5cadd4902232d4b0ae5c05ce9 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 19:29:14 -0300 Subject: [PATCH 002/270] vault: prep helm releases and image pins --- .../atlas/applications/kustomization.yaml | 13 - .../atlas/flux-system/gotk-components.yaml | 1 + clusters/atlas/platform/kustomization.yaml | 8 - infrastructure/metallb/helmrelease.yaml | 47 + infrastructure/metallb/kustomization.yaml | 5 +- infrastructure/metallb/metallb-rendered.yaml | 2411 ----------------- .../metallb/patches/node-placement.yaml | 27 - .../metallb/patches/speaker-loglevel.yaml | 15 - .../cert-manager/letsencrypt-prod.yaml | 1 + .../sources/cert-manager/letsencrypt.yaml | 1 + infrastructure/sources/helm/ananace.yaml | 9 + .../sources/helm/kustomization.yaml | 2 + infrastructure/sources/helm/metallb.yaml | 9 + services/ai-llm/deployment.yaml | 4 +- services/bstein-dev-home/backend-service.yaml | 1 + .../bstein-dev-home/frontend-service.yaml | 1 + services/bstein-dev-home/namespace.yaml | 1 + services/comms/comms-secrets-ensure-job.yaml | 2 +- services/comms/element-call-deployment.yaml | 2 +- services/comms/element-rendered.yaml | 202 -- services/comms/helmrelease.yaml | 255 ++ .../knowledge/catalog/atlas-summary.json | 8 +- services/comms/knowledge/catalog/atlas.json | 641 +++-- services/comms/knowledge/catalog/atlas.yaml | 462 ++-- .../comms/knowledge/diagrams/atlas-http.mmd | 36 +- services/comms/kustomization.yaml | 6 +- .../mas-admin-client-secret-ensure-job.yaml | 2 +- services/comms/mas-db-ensure-job.yaml | 2 +- .../synapse-deployment-strategy-patch.yaml | 11 - services/comms/synapse-rendered.yaml | 895 ------ .../comms/synapse-signingkey-ensure-job.yaml | 2 +- services/comms/values-element.yaml | 59 - services/comms/values-synapse.yaml | 132 - .../crypto/xmr-miner/xmrig-daemonset.yaml | 3 +- services/keycloak/mas-secrets-ensure-job.yaml | 2 +- services/mailu/vip-controller.yaml | 2 +- services/monitoring/namespace.yaml | 3 +- services/nextcloud/collabora.yaml | 2 +- services/pegasus/deployment.yaml | 1 + 39 files changed, 1011 insertions(+), 4275 deletions(-) delete mode 100644 clusters/atlas/applications/kustomization.yaml delete mode 100644 clusters/atlas/platform/kustomization.yaml create mode 100644 infrastructure/metallb/helmrelease.yaml delete mode 100644 infrastructure/metallb/metallb-rendered.yaml delete mode 100644 infrastructure/metallb/patches/node-placement.yaml delete mode 100644 infrastructure/metallb/patches/speaker-loglevel.yaml create mode 100644 infrastructure/sources/helm/ananace.yaml create mode 100644 infrastructure/sources/helm/metallb.yaml delete mode 100644 services/comms/element-rendered.yaml create mode 100644 services/comms/helmrelease.yaml delete mode 100644 services/comms/synapse-deployment-strategy-patch.yaml delete mode 100644 services/comms/synapse-rendered.yaml delete mode 100644 services/comms/values-element.yaml delete mode 100644 services/comms/values-synapse.yaml diff --git a/clusters/atlas/applications/kustomization.yaml b/clusters/atlas/applications/kustomization.yaml deleted file mode 100644 index ed6d795..0000000 --- a/clusters/atlas/applications/kustomization.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# clusters/atlas/applications/kustomization.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ../../services/crypto - - ../../services/gitea - - ../../services/jellyfin - - ../../services/comms - - ../../services/monitoring - - ../../services/logging - - ../../services/pegasus - - ../../services/vault - - ../../services/bstein-dev-home diff --git a/clusters/atlas/flux-system/gotk-components.yaml b/clusters/atlas/flux-system/gotk-components.yaml index 6c475ff..7d56afa 100644 --- a/clusters/atlas/flux-system/gotk-components.yaml +++ b/clusters/atlas/flux-system/gotk-components.yaml @@ -1,3 +1,4 @@ +# clusters/atlas/flux-system/gotk-components.yaml --- # This manifest was generated by flux. DO NOT EDIT. # Flux Version: v2.7.5 diff --git a/clusters/atlas/platform/kustomization.yaml b/clusters/atlas/platform/kustomization.yaml deleted file mode 100644 index 43fa993..0000000 --- a/clusters/atlas/platform/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# clusters/atlas/platform/kustomization.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ../../../infrastructure/modules/base - - ../../../infrastructure/modules/profiles/atlas-ha - - ../../../infrastructure/sources/cert-manager/letsencrypt.yaml - - ../../../infrastructure/metallb diff --git a/infrastructure/metallb/helmrelease.yaml b/infrastructure/metallb/helmrelease.yaml new file mode 100644 index 0000000..6298394 --- /dev/null +++ b/infrastructure/metallb/helmrelease.yaml @@ -0,0 +1,47 @@ +# infrastructure/metallb/helmrelease.yaml +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: metallb + namespace: metallb-system +spec: + interval: 30m + chart: + spec: + chart: metallb + version: 0.15.3 + sourceRef: + kind: HelmRepository + name: metallb + namespace: flux-system + install: + crds: CreateReplace + remediation: { retries: 3 } + timeout: 10m + upgrade: + crds: CreateReplace + remediation: + retries: 3 + remediateLastFailure: true + cleanupOnFail: true + timeout: 10m + values: + loadBalancerClass: metallb + prometheus: + metricsPort: 7472 + controller: + logLevel: info + webhookMode: enabled + tlsMinVersion: VersionTLS12 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: hardware + operator: In + values: + - rpi4 + - rpi5 + speaker: + logLevel: info diff --git a/infrastructure/metallb/kustomization.yaml b/infrastructure/metallb/kustomization.yaml index 1a1452c..bfc20a6 100644 --- a/infrastructure/metallb/kustomization.yaml +++ b/infrastructure/metallb/kustomization.yaml @@ -3,8 +3,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - namespace.yaml - - metallb-rendered.yaml + - helmrelease.yaml - ippool.yaml -patchesStrategicMerge: - - patches/node-placement.yaml - - patches/speaker-loglevel.yaml diff --git a/infrastructure/metallb/metallb-rendered.yaml b/infrastructure/metallb/metallb-rendered.yaml deleted file mode 100644 index 0f8ad10..0000000 --- a/infrastructure/metallb/metallb-rendered.yaml +++ /dev/null @@ -1,2411 +0,0 @@ ---- -# Source: metallb/templates/service-accounts.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: metallb-controller - namespace: "metallb-system" - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: controller ---- -# Source: metallb/templates/service-accounts.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: metallb-speaker - namespace: "metallb-system" - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: speaker ---- -# Source: metallb/templates/webhooks.yaml -apiVersion: v1 -kind: Secret -metadata: - name: metallb-webhook-cert - namespace: "metallb-system" - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm ---- -# Source: metallb/templates/exclude-l2-config.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: metallb-excludel2 - namespace: "metallb-system" - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm -data: - excludel2.yaml: | - announcedInterfacesToExclude: - - ^docker.* - - ^cbr.* - - ^dummy.* - - ^virbr.* - - ^lxcbr.* - - ^veth.* - - ^lo$ - - ^cali.* - - ^tunl.* - - ^flannel.* - - ^kube-ipvs.* - - ^cni.* - - ^nodelocaldns.* - - ^lxc.* ---- -# Source: metallb/templates/speaker.yaml -# FRR expects to have these files owned by frr:frr on startup. -# Having them in a ConfigMap allows us to modify behaviors: for example enabling more daemons on startup. -apiVersion: v1 -kind: ConfigMap -metadata: - name: metallb-frr-startup - namespace: "metallb-system" - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: speaker -data: - daemons: | - # This file tells the frr package which daemons to start. - # - # Sample configurations for these daemons can be found in - # /usr/share/doc/frr/examples/. - # - # ATTENTION: - # - # When activating a daemon for the first time, a config file, even if it is - # empty, has to be present *and* be owned by the user and group "frr", else - # the daemon will not be started by /etc/init.d/frr. The permissions should - # be u=rw,g=r,o=. - # When using "vtysh" such a config file is also needed. It should be owned by - # group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too. - # - # The watchfrr and zebra daemons are always started. - # - bgpd=yes - ospfd=no - ospf6d=no - ripd=no - ripngd=no - isisd=no - pimd=no - ldpd=no - nhrpd=no - eigrpd=no - babeld=no - sharpd=no - pbrd=no - bfdd=yes - fabricd=no - vrrpd=no - - # - # If this option is set the /etc/init.d/frr script automatically loads - # the config via "vtysh -b" when the servers are started. - # Check /etc/pam.d/frr if you intend to use "vtysh"! - # - vtysh_enable=yes - zebra_options=" -A 127.0.0.1 -s 90000000 --limit-fds 100000" - bgpd_options=" -A 127.0.0.1 -p 0 --limit-fds 100000" - ospfd_options=" -A 127.0.0.1" - ospf6d_options=" -A ::1" - ripd_options=" -A 127.0.0.1" - ripngd_options=" -A ::1" - isisd_options=" -A 127.0.0.1" - pimd_options=" -A 127.0.0.1" - ldpd_options=" -A 127.0.0.1" - nhrpd_options=" -A 127.0.0.1" - eigrpd_options=" -A 127.0.0.1" - babeld_options=" -A 127.0.0.1" - sharpd_options=" -A 127.0.0.1" - pbrd_options=" -A 127.0.0.1" - staticd_options="-A 127.0.0.1 --limit-fds 100000" - bfdd_options=" -A 127.0.0.1 --limit-fds 100000" - fabricd_options="-A 127.0.0.1" - vrrpd_options=" -A 127.0.0.1" - - # configuration profile - # - #frr_profile="traditional" - #frr_profile="datacenter" - - # - # This is the maximum number of FD's that will be available. - # Upon startup this is read by the control files and ulimit - # is called. Uncomment and use a reasonable value for your - # setup if you are expecting a large number of peers in - # say BGP. - #MAX_FDS=1024 - - # The list of daemons to watch is automatically generated by the init script. - #watchfrr_options="" - - # for debugging purposes, you can specify a "wrap" command to start instead - # of starting the daemon directly, e.g. to use valgrind on ospfd: - # ospfd_wrap="/usr/bin/valgrind" - # or you can use "all_wrap" for all daemons, e.g. to use perf record: - # all_wrap="/usr/bin/perf record --call-graph -" - # the normal daemon command is added to this at the end. - vtysh.conf: |+ - service integrated-vtysh-config - frr.conf: |+ - ! This file gets overriden the first time the speaker renders a config. - ! So anything configured here is only temporary. - frr version 8.0 - frr defaults traditional - hostname Router - line vty - log file /etc/frr/frr.log informational ---- -# Source: metallb/charts/crds/templates/crds.yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.19.0 - name: bfdprofiles.metallb.io -spec: - group: metallb.io - names: - kind: BFDProfile - listKind: BFDProfileList - plural: bfdprofiles - singular: bfdprofile - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.passiveMode - name: Passive Mode - type: boolean - - jsonPath: .spec.transmitInterval - name: Transmit Interval - type: integer - - jsonPath: .spec.receiveInterval - name: Receive Interval - type: integer - - jsonPath: .spec.detectMultiplier - name: Multiplier - type: integer - name: v1beta1 - schema: - openAPIV3Schema: - description: |- - BFDProfile represents the settings of the bfd session that can be - optionally associated with a BGP session. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BFDProfileSpec defines the desired state of BFDProfile. - properties: - detectMultiplier: - description: |- - Configures the detection multiplier to determine - packet loss. The remote transmission interval will be multiplied - by this value to determine the connection loss detection timer. - format: int32 - maximum: 255 - minimum: 2 - type: integer - echoInterval: - description: |- - Configures the minimal echo receive transmission - interval that this system is capable of handling in milliseconds. - Defaults to 50ms - format: int32 - maximum: 60000 - minimum: 10 - type: integer - echoMode: - description: |- - Enables or disables the echo transmission mode. - This mode is disabled by default, and not supported on multi - hops setups. - type: boolean - minimumTtl: - description: |- - For multi hop sessions only: configure the minimum - expected TTL for an incoming BFD control packet. - format: int32 - maximum: 254 - minimum: 1 - type: integer - passiveMode: - description: |- - Mark session as passive: a passive session will not - attempt to start the connection and will wait for control packets - from peer before it begins replying. - type: boolean - receiveInterval: - description: |- - The minimum interval that this system is capable of - receiving control packets in milliseconds. - Defaults to 300ms. - format: int32 - maximum: 60000 - minimum: 10 - type: integer - transmitInterval: - description: |- - The minimum transmission interval (less jitter) - that this system wants to use to send BFD control packets in - milliseconds. Defaults to 300ms - format: int32 - maximum: 60000 - minimum: 10 - type: integer - type: object - status: - description: BFDProfileStatus defines the observed state of BFDProfile. - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -# Source: metallb/charts/crds/templates/crds.yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.19.0 - name: bgpadvertisements.metallb.io -spec: - group: metallb.io - names: - kind: BGPAdvertisement - listKind: BGPAdvertisementList - plural: bgpadvertisements - singular: bgpadvertisement - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.ipAddressPools - name: IPAddressPools - type: string - - jsonPath: .spec.ipAddressPoolSelectors - name: IPAddressPool Selectors - type: string - - jsonPath: .spec.peers - name: Peers - type: string - - jsonPath: .spec.nodeSelectors - name: Node Selectors - priority: 10 - type: string - name: v1beta1 - schema: - openAPIV3Schema: - description: |- - BGPAdvertisement allows to advertise the IPs coming - from the selected IPAddressPools via BGP, setting the parameters of the - BGP Advertisement. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BGPAdvertisementSpec defines the desired state of BGPAdvertisement. - properties: - aggregationLength: - default: 32 - description: The aggregation-length advertisement option lets you “roll up” the /32s into a larger prefix. Defaults to 32. Works for IPv4 addresses. - format: int32 - minimum: 1 - type: integer - aggregationLengthV6: - default: 128 - description: The aggregation-length advertisement option lets you “roll up” the /128s into a larger prefix. Defaults to 128. Works for IPv6 addresses. - format: int32 - type: integer - communities: - description: |- - The BGP communities to be associated with the announcement. Each item can be a standard community of the - form 1234:1234, a large community of the form large:1234:1234:1234 or the name of an alias defined in the - Community CRD. - items: - type: string - type: array - ipAddressPoolSelectors: - description: |- - A selector for the IPAddressPools which would get advertised via this advertisement. - If no IPAddressPool is selected by this or by the list, the advertisement is applied to all the IPAddressPools. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - ipAddressPools: - description: The list of IPAddressPools to advertise via this advertisement, selected by name. - items: - type: string - type: array - localPref: - description: |- - The BGP LOCAL_PREF attribute which is used by BGP best path algorithm, - Path with higher localpref is preferred over one with lower localpref. - format: int32 - type: integer - nodeSelectors: - description: NodeSelectors allows to limit the nodes to announce as next hops for the LoadBalancer IP. When empty, all the nodes having are announced as next hops. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - peers: - description: |- - Peers limits the bgppeer to advertise the ips of the selected pools to. - When empty, the loadbalancer IP is announced to all the BGPPeers configured. - items: - type: string - type: array - type: object - status: - description: BGPAdvertisementStatus defines the observed state of BGPAdvertisement. - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -# Source: metallb/charts/crds/templates/crds.yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.19.0 - name: bgppeers.metallb.io -spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - service: - name: metallb-webhook-service - namespace: metallb-system - path: /convert - conversionReviewVersions: - - v1beta1 - - v1beta2 - group: metallb.io - names: - kind: BGPPeer - listKind: BGPPeerList - plural: bgppeers - singular: bgppeer - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.peerAddress - name: Address - type: string - - jsonPath: .spec.peerASN - name: ASN - type: string - - jsonPath: .spec.bfdProfile - name: BFD Profile - type: string - - jsonPath: .spec.ebgpMultiHop - name: Multi Hops - type: string - deprecated: true - deprecationWarning: v1beta1 is deprecated, please use v1beta2 - name: v1beta1 - schema: - openAPIV3Schema: - description: BGPPeer is the Schema for the peers API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BGPPeerSpec defines the desired state of Peer. - properties: - bfdProfile: - type: string - ebgpMultiHop: - description: EBGP peer is multi-hops away - type: boolean - holdTime: - description: Requested BGP hold time, per RFC4271. - type: string - keepaliveTime: - description: Requested BGP keepalive time, per RFC4271. - type: string - myASN: - description: AS number to use for the local end of the session. - format: int32 - maximum: 4294967295 - minimum: 0 - type: integer - nodeSelectors: - description: |- - Only connect to this peer on nodes that match one of these - selectors. - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - minItems: 1 - type: array - required: - - key - - operator - - values - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - type: array - password: - description: Authentication password for routers enforcing TCP MD5 authenticated sessions - type: string - peerASN: - description: AS number to expect from the remote end of the session. - format: int32 - maximum: 4294967295 - minimum: 0 - type: integer - peerAddress: - description: Address to dial when establishing the session. - type: string - peerPort: - description: Port to dial when establishing the session. - maximum: 16384 - minimum: 0 - type: integer - routerID: - description: BGP router ID to advertise to the peer - type: string - sourceAddress: - description: Source address to use when establishing the session. - type: string - required: - - myASN - - peerASN - - peerAddress - type: object - status: - description: BGPPeerStatus defines the observed state of Peer. - type: object - type: object - served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .spec.peerAddress - name: Address - type: string - - jsonPath: .spec.peerASN - name: ASN - type: string - - jsonPath: .spec.bfdProfile - name: BFD Profile - type: string - - jsonPath: .spec.ebgpMultiHop - name: Multi Hops - type: string - name: v1beta2 - schema: - openAPIV3Schema: - description: BGPPeer is the Schema for the peers API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BGPPeerSpec defines the desired state of Peer. - properties: - bfdProfile: - description: The name of the BFD Profile to be used for the BFD session associated to the BGP session. If not set, the BFD session won't be set up. - type: string - connectTime: - description: Requested BGP connect time, controls how long BGP waits between connection attempts to a neighbor. - type: string - x-kubernetes-validations: - - message: connect time should be between 1 seconds to 65535 - rule: duration(self).getSeconds() >= 1 && duration(self).getSeconds() <= 65535 - - message: connect time should contain a whole number of seconds - rule: duration(self).getMilliseconds() % 1000 == 0 - disableMP: - default: false - description: |- - To set if we want to disable MP BGP that will separate IPv4 and IPv6 route exchanges into distinct BGP sessions. - Deprecated: DisableMP is deprecated in favor of dualStackAddressFamily. - type: boolean - dualStackAddressFamily: - default: false - description: |- - To set if we want to enable the neighbor not only for the ipfamily related to its session, - but also the other one. This allows to advertise/receive IPv4 prefixes over IPv6 sessions and vice versa. - type: boolean - dynamicASN: - description: |- - DynamicASN detects the AS number to use for the remote end of the session - without explicitly setting it via the ASN field. Limited to: - internal - if the neighbor's ASN is different than MyASN connection is denied. - external - if the neighbor's ASN is the same as MyASN the connection is denied. - ASN and DynamicASN are mutually exclusive and one of them must be specified. - enum: - - internal - - external - type: string - ebgpMultiHop: - description: To set if the BGPPeer is multi-hops away. Needed for FRR mode only. - type: boolean - enableGracefulRestart: - description: |- - EnableGracefulRestart allows BGP peer to continue to forward data packets - along known routes while the routing protocol information is being - restored. This field is immutable because it requires restart of the BGP - session. Supported for FRR mode only. - type: boolean - x-kubernetes-validations: - - message: EnableGracefulRestart cannot be changed after creation - rule: self == oldSelf - holdTime: - description: Requested BGP hold time, per RFC4271. - type: string - interface: - description: |- - Interface is the node interface over which the unnumbered BGP peering will - be established. No API validation takes place as that string value - represents an interface name on the host and if user provides an invalid - value, only the actual BGP session will not be established. - Address and Interface are mutually exclusive and one of them must be specified. - type: string - keepaliveTime: - description: Requested BGP keepalive time, per RFC4271. - type: string - myASN: - description: AS number to use for the local end of the session. - format: int32 - maximum: 4294967295 - minimum: 0 - type: integer - nodeSelectors: - description: |- - Only connect to this peer on nodes that match one of these - selectors. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - password: - description: Authentication password for routers enforcing TCP MD5 authenticated sessions - type: string - passwordSecret: - description: |- - passwordSecret is name of the authentication secret for BGP Peer. - the secret must be of type "kubernetes.io/basic-auth", and created in the - same namespace as the MetalLB deployment. The password is stored in the - secret as the key "password". - properties: - name: - description: name is unique within a namespace to reference a secret resource. - type: string - namespace: - description: namespace defines the space within which the secret name must be unique. - type: string - type: object - x-kubernetes-map-type: atomic - peerASN: - description: |- - AS number to expect from the remote end of the session. - ASN and DynamicASN are mutually exclusive and one of them must be specified. - format: int32 - maximum: 4294967295 - minimum: 0 - type: integer - peerAddress: - description: Address to dial when establishing the session. - type: string - peerPort: - default: 179 - description: Port to dial when establishing the session. - maximum: 16384 - minimum: 1 - type: integer - routerID: - description: BGP router ID to advertise to the peer - type: string - sourceAddress: - description: Source address to use when establishing the session. - type: string - vrf: - description: |- - To set if we want to peer with the BGPPeer using an interface belonging to - a host vrf - type: string - required: - - myASN - type: object - status: - description: BGPPeerStatus defines the observed state of Peer. - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -# Source: metallb/charts/crds/templates/crds.yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.19.0 - name: communities.metallb.io -spec: - group: metallb.io - names: - kind: Community - listKind: CommunityList - plural: communities - singular: community - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: |- - Community is a collection of aliases for communities. - Users can define named aliases to be used in the BGPPeer CRD. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: CommunitySpec defines the desired state of Community. - properties: - communities: - items: - properties: - name: - description: The name of the alias for the community. - type: string - value: - description: |- - The BGP community value corresponding to the given name. Can be a standard community of the form 1234:1234 - or a large community of the form large:1234:1234:1234. - type: string - type: object - type: array - type: object - status: - description: CommunityStatus defines the observed state of Community. - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -# Source: metallb/charts/crds/templates/crds.yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.19.0 - name: configurationstates.metallb.io -spec: - group: metallb.io - names: - kind: ConfigurationState - listKind: ConfigurationStateList - plural: configurationstates - singular: configurationstate - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.result - name: Result - type: string - - jsonPath: .status.errorSummary - name: ErrorSummary - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: |- - ConfigurationState is a status-only CRD that reports configuration validation results from MetalLB components. - Labels: - - metallb.io/component-type: "controller" or "speaker" - - metallb.io/node-name: node name (only for speaker) - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - status: - description: ConfigurationStateStatus defines the observed state of ConfigurationState. - properties: - conditions: - description: Conditions contains the status conditions from the reconcilers running in this component. - items: - description: Condition contains details for one aspect of the current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - errorSummary: - description: |- - ErrorSummary contains the aggregated error messages from reconciliation failures. - This field is empty when Result is "Valid". - type: string - result: - description: Result indicates the configuration validation result. - enum: - - Valid - - Invalid - - Unknown - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -# Source: metallb/charts/crds/templates/crds.yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.19.0 - name: ipaddresspools.metallb.io -spec: - group: metallb.io - names: - kind: IPAddressPool - listKind: IPAddressPoolList - plural: ipaddresspools - singular: ipaddresspool - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.autoAssign - name: Auto Assign - type: boolean - - jsonPath: .spec.avoidBuggyIPs - name: Avoid Buggy IPs - type: boolean - - jsonPath: .spec.addresses - name: Addresses - type: string - name: v1beta1 - schema: - openAPIV3Schema: - description: |- - IPAddressPool represents a pool of IP addresses that can be allocated - to LoadBalancer services. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: IPAddressPoolSpec defines the desired state of IPAddressPool. - properties: - addresses: - description: |- - A list of IP address ranges over which MetalLB has authority. - You can list multiple ranges in a single pool, they will all share the - same settings. Each range can be either a CIDR prefix, or an explicit - start-end range of IPs. - items: - type: string - type: array - autoAssign: - default: true - description: |- - AutoAssign flag used to prevent MetallB from automatic allocation - for a pool. - type: boolean - avoidBuggyIPs: - default: false - description: |- - AvoidBuggyIPs prevents addresses ending with .0 and .255 - to be used by a pool. - type: boolean - serviceAllocation: - description: |- - AllocateTo makes ip pool allocation to specific namespace and/or service. - The controller will use the pool with lowest value of priority in case of - multiple matches. A pool with no priority set will be used only if the - pools with priority can't be used. If multiple matching IPAddressPools are - available it will check for the availability of IPs sorting the matching - IPAddressPools by priority, starting from the highest to the lowest. If - multiple IPAddressPools have the same priority, choice will be random. - properties: - namespaceSelectors: - description: |- - NamespaceSelectors list of label selectors to select namespace(s) for ip pool, - an alternative to using namespace list. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - namespaces: - description: Namespaces list of namespace(s) on which ip pool can be attached. - items: - type: string - type: array - priority: - description: Priority priority given for ip pool while ip allocation on a service. - type: integer - serviceSelectors: - description: |- - ServiceSelectors list of label selector to select service(s) for which ip pool - can be used for ip allocation. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - type: object - required: - - addresses - type: object - status: - description: IPAddressPoolStatus defines the observed state of IPAddressPool. - properties: - assignedIPv4: - description: AssignedIPv4 is the number of assigned IPv4 addresses. - format: int64 - type: integer - assignedIPv6: - description: AssignedIPv6 is the number of assigned IPv6 addresses. - format: int64 - type: integer - availableIPv4: - description: AvailableIPv4 is the number of available IPv4 addresses. - format: int64 - type: integer - availableIPv6: - description: AvailableIPv6 is the number of available IPv6 addresses. - format: int64 - type: integer - required: - - assignedIPv4 - - assignedIPv6 - - availableIPv4 - - availableIPv6 - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} ---- -# Source: metallb/charts/crds/templates/crds.yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.19.0 - name: l2advertisements.metallb.io -spec: - group: metallb.io - names: - kind: L2Advertisement - listKind: L2AdvertisementList - plural: l2advertisements - singular: l2advertisement - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.ipAddressPools - name: IPAddressPools - type: string - - jsonPath: .spec.ipAddressPoolSelectors - name: IPAddressPool Selectors - type: string - - jsonPath: .spec.interfaces - name: Interfaces - type: string - - jsonPath: .spec.nodeSelectors - name: Node Selectors - priority: 10 - type: string - name: v1beta1 - schema: - openAPIV3Schema: - description: |- - L2Advertisement allows to advertise the LoadBalancer IPs provided - by the selected pools via L2. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: L2AdvertisementSpec defines the desired state of L2Advertisement. - properties: - interfaces: - description: |- - A list of interfaces to announce from. The LB IP will be announced only from these interfaces. - If the field is not set, we advertise from all the interfaces on the host. - items: - type: string - type: array - ipAddressPoolSelectors: - description: |- - A selector for the IPAddressPools which would get advertised via this advertisement. - If no IPAddressPool is selected by this or by the list, the advertisement is applied to all the IPAddressPools. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - ipAddressPools: - description: The list of IPAddressPools to advertise via this advertisement, selected by name. - items: - type: string - type: array - nodeSelectors: - description: NodeSelectors allows to limit the nodes to announce as next hops for the LoadBalancer IP. When empty, all the nodes having are announced as next hops. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - type: object - status: - description: L2AdvertisementStatus defines the observed state of L2Advertisement. - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -# Source: metallb/charts/crds/templates/crds.yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.19.0 - name: servicebgpstatuses.metallb.io -spec: - group: metallb.io - names: - kind: ServiceBGPStatus - listKind: ServiceBGPStatusList - plural: servicebgpstatuses - singular: servicebgpstatus - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.node - name: Node - type: string - - jsonPath: .status.serviceName - name: Service Name - type: string - - jsonPath: .status.serviceNamespace - name: Service Namespace - type: string - name: v1beta1 - schema: - openAPIV3Schema: - description: ServiceBGPStatus exposes the BGP peers a service is configured to be advertised to, per relevant node. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ServiceBGPStatusSpec defines the desired state of ServiceBGPStatus. - type: object - status: - description: MetalLBServiceBGPStatus defines the observed state of ServiceBGPStatus. - properties: - node: - description: Node indicates the node announcing the service. - type: string - x-kubernetes-validations: - - message: Value is immutable - rule: self == oldSelf - peers: - description: |- - Peers indicate the BGP peers for which the service is configured to be advertised to. - The service being actually advertised to a given peer depends on the session state and is not indicated here. - items: - type: string - type: array - serviceName: - description: ServiceName indicates the service this status represents. - type: string - x-kubernetes-validations: - - message: Value is immutable - rule: self == oldSelf - serviceNamespace: - description: ServiceNamespace indicates the namespace of the service. - type: string - x-kubernetes-validations: - - message: Value is immutable - rule: self == oldSelf - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -# Source: metallb/charts/crds/templates/crds.yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.19.0 - name: servicel2statuses.metallb.io -spec: - group: metallb.io - names: - kind: ServiceL2Status - listKind: ServiceL2StatusList - plural: servicel2statuses - singular: servicel2status - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.node - name: Allocated Node - type: string - - jsonPath: .status.serviceName - name: Service Name - type: string - - jsonPath: .status.serviceNamespace - name: Service Namespace - type: string - name: v1beta1 - schema: - openAPIV3Schema: - description: ServiceL2Status reveals the actual traffic status of loadbalancer services in layer2 mode. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ServiceL2StatusSpec defines the desired state of ServiceL2Status. - type: object - status: - description: MetalLBServiceL2Status defines the observed state of ServiceL2Status. - properties: - interfaces: - description: Interfaces indicates the interfaces that receive the directed traffic - items: - description: InterfaceInfo defines interface info of layer2 announcement. - properties: - name: - description: Name the name of network interface card - type: string - type: object - type: array - node: - description: Node indicates the node that receives the directed traffic - type: string - x-kubernetes-validations: - - message: Value is immutable - rule: self == oldSelf - serviceName: - description: ServiceName indicates the service this status represents - type: string - x-kubernetes-validations: - - message: Value is immutable - rule: self == oldSelf - serviceNamespace: - description: ServiceNamespace indicates the namespace of the service - type: string - x-kubernetes-validations: - - message: Value is immutable - rule: self == oldSelf - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -# Source: metallb/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: metallb:controller - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm -rules: -- apiGroups: [""] - resources: ["services", "namespaces"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["nodes"] - verbs: ["list"] -- apiGroups: [""] - resources: ["services/status"] - verbs: ["update"] -- apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] -- apiGroups: ["admissionregistration.k8s.io"] - resources: ["validatingwebhookconfigurations"] - resourceNames: ["metallb-webhook-configuration"] - verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] -- apiGroups: ["admissionregistration.k8s.io"] - resources: ["validatingwebhookconfigurations"] - verbs: ["list", "watch"] -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - resourceNames: ["bfdprofiles.metallb.io","bgpadvertisements.metallb.io", - "bgppeers.metallb.io","ipaddresspools.metallb.io","l2advertisements.metallb.io","communities.metallb.io","configurationstates.metallb.io"] - verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["list", "watch"] -- apiGroups: ["metallb.io"] - resources: ["configurationstates"] - verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] -- apiGroups: ["metallb.io"] - resources: ["configurationstates/status"] - verbs: ["get", "patch", "update"] ---- -# Source: metallb/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: metallb:speaker - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm -rules: -- apiGroups: [""] - resources: ["services", "endpoints", "nodes", "namespaces"] - verbs: ["get", "list", "watch"] -- apiGroups: ["discovery.k8s.io"] - resources: ["endpointslices"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] -- apiGroups: ["metallb.io"] - resources: ["servicel2statuses","servicel2statuses/status","configurationstates","configurationstates/status"] - verbs: ["*"] ---- -# Source: metallb/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: metallb:controller - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm -subjects: -- kind: ServiceAccount - name: metallb-controller - namespace: metallb-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: metallb:controller ---- -# Source: metallb/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: metallb:speaker - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm -subjects: -- kind: ServiceAccount - name: metallb-speaker - namespace: metallb-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: metallb:speaker ---- -# Source: metallb/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: metallb-pod-lister - namespace: "metallb-system" - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm -rules: -- apiGroups: [""] - resources: ["pods"] - verbs: ["list", "get"] -- apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch"] -- apiGroups: ["metallb.io"] - resources: ["bfdprofiles"] - verbs: ["get", "list", "watch"] -- apiGroups: ["metallb.io"] - resources: ["bgppeers"] - verbs: ["get", "list", "watch"] -- apiGroups: ["metallb.io"] - resources: ["l2advertisements"] - verbs: ["get", "list", "watch"] -- apiGroups: ["metallb.io"] - resources: ["bgpadvertisements"] - verbs: ["get", "list", "watch"] -- apiGroups: ["metallb.io"] - resources: ["ipaddresspools"] - verbs: ["get", "list", "watch"] -- apiGroups: ["metallb.io"] - resources: ["communities"] - verbs: ["get", "list", "watch"] -- apiGroups: ["metallb.io"] - resources: ["servicebgpstatuses","servicebgpstatuses/status"] - verbs: ["*"] ---- -# Source: metallb/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: metallb-controller - namespace: "metallb-system" - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm -rules: -- apiGroups: [""] - resources: ["secrets"] - verbs: ["create", "get", "list", "watch"] -- apiGroups: [""] - resources: ["secrets"] - resourceNames: ["metallb-memberlist"] - verbs: ["list"] -- apiGroups: ["apps"] - resources: ["deployments"] - resourceNames: ["metallb-controller"] - verbs: ["get"] -- apiGroups: [""] - resources: ["secrets"] - verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] -- apiGroups: ["metallb.io"] - resources: ["ipaddresspools"] - verbs: ["get", "list", "watch"] -- apiGroups: ["metallb.io"] - resources: ["ipaddresspools/status"] - verbs: ["update"] -- apiGroups: ["metallb.io"] - resources: ["bgppeers"] - verbs: ["get", "list"] -- apiGroups: ["metallb.io"] - resources: ["bgpadvertisements"] - verbs: ["get", "list"] -- apiGroups: ["metallb.io"] - resources: ["l2advertisements"] - verbs: ["get", "list"] -- apiGroups: ["metallb.io"] - resources: ["communities"] - verbs: ["get", "list","watch"] -- apiGroups: ["metallb.io"] - resources: ["bfdprofiles"] - verbs: ["get", "list","watch"] ---- -# Source: metallb/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: metallb-pod-lister - namespace: "metallb-system" - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: metallb-pod-lister -subjects: -- kind: ServiceAccount - name: metallb-speaker ---- -# Source: metallb/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: metallb-controller - namespace: "metallb-system" - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: metallb-controller -subjects: -- kind: ServiceAccount - name: metallb-controller ---- -# Source: metallb/templates/webhooks.yaml -apiVersion: v1 -kind: Service -metadata: - name: metallb-webhook-service - namespace: "metallb-system" - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm -spec: - ports: - - port: 443 - targetPort: 9443 - selector: - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/component: controller ---- -# Source: metallb/templates/speaker.yaml -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: metallb-speaker - namespace: "metallb-system" - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: speaker -spec: - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/component: speaker - template: - metadata: - labels: - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/component: speaker - spec: - serviceAccountName: metallb-speaker - terminationGracePeriodSeconds: 0 - hostNetwork: true - volumes: - - name: memberlist - secret: - secretName: metallb-memberlist - defaultMode: 420 - - name: metallb-excludel2 - configMap: - defaultMode: 256 - name: metallb-excludel2 - - name: frr-sockets - emptyDir: {} - - name: frr-startup - configMap: - name: metallb-frr-startup - - name: frr-conf - emptyDir: {} - - name: reloader - emptyDir: {} - - name: metrics - emptyDir: {} - - name: frr-tmp - emptyDir: {} - - name: frr-lib - emptyDir: {} - - name: frr-log - emptyDir: {} - initContainers: - # Copies the initial config files with the right permissions to the shared volume. - - name: cp-frr-files - image: quay.io/frrouting/frr:10.4.1 - securityContext: - runAsUser: 100 - runAsGroup: 101 - command: ["/bin/sh", "-c", "cp -rLf /tmp/frr/* /etc/frr/"] - volumeMounts: - - name: frr-startup - mountPath: /tmp/frr - - name: frr-conf - mountPath: /etc/frr - # Copies the reloader to the shared volume between the speaker and reloader. - - name: cp-reloader - image: quay.io/metallb/speaker:v0.15.3 - command: ["/cp-tool","/frr-reloader.sh","/etc/frr_reloader/frr-reloader.sh"] - volumeMounts: - - name: reloader - mountPath: /etc/frr_reloader - # Copies the metrics exporter - - name: cp-metrics - image: quay.io/metallb/speaker:v0.15.3 - command: ["/cp-tool","/frr-metrics","/etc/frr_metrics/frr-metrics"] - volumeMounts: - - name: metrics - mountPath: /etc/frr_metrics - shareProcessNamespace: true - containers: - - name: speaker - image: quay.io/metallb/speaker:v0.15.3 - args: - - --port=7472 - - --log-level=info - env: - - name: METALLB_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: METALLB_HOST - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: METALLB_ML_BIND_ADDR - valueFrom: - fieldRef: - fieldPath: status.podIP - - - name: METALLB_ML_LABELS - value: "app.kubernetes.io/name=metallb,app.kubernetes.io/component=speaker" - - name: METALLB_ML_BIND_PORT - value: "7946" - - name: METALLB_ML_SECRET_KEY_PATH - value: "/etc/ml_secret_key" - - name: FRR_CONFIG_FILE - value: /etc/frr_reloader/frr.conf - - name: FRR_RELOADER_PID_FILE - value: /etc/frr_reloader/reloader.pid - - name: METALLB_BGP_TYPE - value: frr - - name: METALLB_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - ports: - - name: monitoring - containerPort: 7472 - - name: memberlist-tcp - containerPort: 7946 - protocol: TCP - - name: memberlist-udp - containerPort: 7946 - protocol: UDP - livenessProbe: - httpGet: - path: /metrics - port: monitoring - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /metrics - port: monitoring - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - capabilities: - drop: - - ALL - add: - - NET_RAW - volumeMounts: - - name: memberlist - mountPath: /etc/ml_secret_key - - name: reloader - mountPath: /etc/frr_reloader - - name: metallb-excludel2 - mountPath: /etc/metallb - - name: frr - securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - add: - - NET_ADMIN - - NET_RAW - - SYS_ADMIN - - NET_BIND_SERVICE - image: quay.io/frrouting/frr:10.4.1 - env: - - name: TINI_SUBREAPER - value: "true" - volumeMounts: - - name: frr-sockets - mountPath: /var/run/frr - - name: frr-conf - mountPath: /etc/frr - - name: frr-tmp - mountPath: /var/tmp/frr - - name: frr-lib - mountPath: /var/lib/frr - # The command is FRR's default entrypoint & waiting for the log file to appear and tailing it. - # If the log file isn't created in 60 seconds the tail fails and the container is restarted. - # This workaround is needed to have the frr logs as part of kubectl logs -c frr < speaker_pod_name >. - command: - - /bin/sh - - -c - - | - /sbin/tini -- /usr/lib/frr/docker-start & - attempts=0 - until [[ -f /etc/frr/frr.log || $attempts -eq 60 ]]; do - sleep 1 - attempts=$(( $attempts + 1 )) - done - tail -f /etc/frr/frr.log - livenessProbe: - httpGet: - path: livez - port: 7473 - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - startupProbe: - httpGet: - path: /livez - port: 7473 - failureThreshold: 30 - periodSeconds: 5 - - name: reloader - image: quay.io/frrouting/frr:10.4.1 - securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - command: ["/etc/frr_reloader/frr-reloader.sh"] - volumeMounts: - - name: frr-sockets - mountPath: /var/run/frr - - name: frr-conf - mountPath: /etc/frr - - name: reloader - mountPath: /etc/frr_reloader - - name: frr-log - mountPath: /var/log/frr - - name: frr-metrics - image: quay.io/frrouting/frr:10.4.1 - securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - command: ["/etc/frr_metrics/frr-metrics"] - args: - - --metrics-port=7473 - env: - - name: VTYSH_HISTFILE - value: /dev/null - ports: - - containerPort: 7473 - name: frrmetrics - volumeMounts: - - name: frr-sockets - mountPath: /var/run/frr - - name: frr-conf - mountPath: /etc/frr - - name: metrics - mountPath: /etc/frr_metrics - nodeSelector: - "kubernetes.io/os": linux - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - operator: Exists - - key: node-role.kubernetes.io/control-plane - effect: NoSchedule - operator: Exists ---- -# Source: metallb/templates/controller.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: metallb-controller - namespace: "metallb-system" - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: controller -spec: - strategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/component: controller - template: - metadata: - labels: - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/component: controller - spec: - serviceAccountName: metallb-controller - terminationGracePeriodSeconds: 0 - securityContext: - fsGroup: 65534 - runAsNonRoot: true - runAsUser: 65534 - containers: - - name: controller - image: quay.io/metallb/controller:v0.15.3 - args: - - --port=7472 - - --log-level=info - - --webhook-mode=enabled - - --tls-min-version=VersionTLS12 - env: - - name: METALLB_ML_SECRET_NAME - value: metallb-memberlist - - name: METALLB_DEPLOYMENT - value: metallb-controller - - name: METALLB_BGP_TYPE - value: frr - ports: - - name: monitoring - containerPort: 7472 - - containerPort: 9443 - name: webhook-server - protocol: TCP - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - livenessProbe: - httpGet: - path: /metrics - port: monitoring - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /metrics - port: monitoring - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - capabilities: - drop: - - ALL - nodeSelector: - "kubernetes.io/os": linux - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: metallb-webhook-cert ---- -# Source: metallb/templates/webhooks.yaml -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: metallb-webhook-configuration - labels: - helm.sh/chart: metallb-0.15.3 - app.kubernetes.io/name: metallb - app.kubernetes.io/instance: metallb - app.kubernetes.io/version: "v0.15.3" - app.kubernetes.io/managed-by: Helm -webhooks: -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: metallb-webhook-service - namespace: metallb-system - path: /validate-metallb-io-v1beta2-bgppeer - failurePolicy: Fail - name: bgppeervalidationwebhook.metallb.io - rules: - - apiGroups: - - metallb.io - apiVersions: - - v1beta2 - operations: - - CREATE - - UPDATE - resources: - - bgppeers - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: metallb-webhook-service - namespace: metallb-system - path: /validate-metallb-io-v1beta1-ipaddresspool - failurePolicy: Fail - name: ipaddresspoolvalidationwebhook.metallb.io - rules: - - apiGroups: - - metallb.io - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - ipaddresspools - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: metallb-webhook-service - namespace: metallb-system - path: /validate-metallb-io-v1beta1-bgpadvertisement - failurePolicy: Fail - name: bgpadvertisementvalidationwebhook.metallb.io - rules: - - apiGroups: - - metallb.io - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - bgpadvertisements - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: metallb-webhook-service - namespace: metallb-system - path: /validate-metallb-io-v1beta1-community - failurePolicy: Fail - name: communityvalidationwebhook.metallb.io - rules: - - apiGroups: - - metallb.io - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - communities - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: metallb-webhook-service - namespace: metallb-system - path: /validate-metallb-io-v1beta1-bfdprofile - failurePolicy: Fail - name: bfdprofilevalidationwebhook.metallb.io - rules: - - apiGroups: - - metallb.io - apiVersions: - - v1beta1 - operations: - - CREATE - - DELETE - resources: - - bfdprofiles - sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: metallb-webhook-service - namespace: metallb-system - path: /validate-metallb-io-v1beta1-l2advertisement - failurePolicy: Fail - name: l2advertisementvalidationwebhook.metallb.io - rules: - - apiGroups: - - metallb.io - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - l2advertisements - sideEffects: None diff --git a/infrastructure/metallb/patches/node-placement.yaml b/infrastructure/metallb/patches/node-placement.yaml deleted file mode 100644 index c42ae99..0000000 --- a/infrastructure/metallb/patches/node-placement.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# infrastructure/metallb/patches/node-placement.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: metallb-controller - namespace: metallb-system -spec: - template: - spec: - containers: - - name: controller - args: - - --port=7472 - - --log-level=info - - --webhook-mode=enabled - - --tls-min-version=VersionTLS12 - - --lb-class=metallb - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: hardware - operator: In - values: - - rpi4 - - rpi5 diff --git a/infrastructure/metallb/patches/speaker-loglevel.yaml b/infrastructure/metallb/patches/speaker-loglevel.yaml deleted file mode 100644 index 61b8942..0000000 --- a/infrastructure/metallb/patches/speaker-loglevel.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# infrastructure/metallb/patches/speaker-loglevel.yaml -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: metallb-speaker - namespace: metallb-system -spec: - template: - spec: - containers: - - name: speaker - args: - - --port=7472 - - --log-level=info - - --lb-class=metallb diff --git a/infrastructure/sources/cert-manager/letsencrypt-prod.yaml b/infrastructure/sources/cert-manager/letsencrypt-prod.yaml index 65bf316..7f90f01 100644 --- a/infrastructure/sources/cert-manager/letsencrypt-prod.yaml +++ b/infrastructure/sources/cert-manager/letsencrypt-prod.yaml @@ -1,3 +1,4 @@ +# infrastructure/sources/cert-manager/letsencrypt-prod.yaml apiVersion: cert-manager.io/v1 kind: ClusterIssuer metadata: diff --git a/infrastructure/sources/cert-manager/letsencrypt.yaml b/infrastructure/sources/cert-manager/letsencrypt.yaml index 13590f3..a988312 100644 --- a/infrastructure/sources/cert-manager/letsencrypt.yaml +++ b/infrastructure/sources/cert-manager/letsencrypt.yaml @@ -1,3 +1,4 @@ +# infrastructure/sources/cert-manager/letsencrypt.yaml apiVersion: cert-manager.io/v1 kind: ClusterIssuer metadata: diff --git a/infrastructure/sources/helm/ananace.yaml b/infrastructure/sources/helm/ananace.yaml new file mode 100644 index 0000000..b5e8a7b --- /dev/null +++ b/infrastructure/sources/helm/ananace.yaml @@ -0,0 +1,9 @@ +# infrastructure/sources/helm/ananace.yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: ananace + namespace: flux-system +spec: + interval: 1h + url: https://ananace.gitlab.io/charts diff --git a/infrastructure/sources/helm/kustomization.yaml b/infrastructure/sources/helm/kustomization.yaml index c8d20bb..74ff668 100644 --- a/infrastructure/sources/helm/kustomization.yaml +++ b/infrastructure/sources/helm/kustomization.yaml @@ -2,12 +2,14 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: + - ananace.yaml - fluent-bit.yaml - grafana.yaml - hashicorp.yaml - jetstack.yaml - jenkins.yaml - mailu.yaml + - metallb.yaml - opentelemetry.yaml - opensearch.yaml - harbor.yaml diff --git a/infrastructure/sources/helm/metallb.yaml b/infrastructure/sources/helm/metallb.yaml new file mode 100644 index 0000000..12021af --- /dev/null +++ b/infrastructure/sources/helm/metallb.yaml @@ -0,0 +1,9 @@ +# infrastructure/sources/helm/metallb.yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: metallb + namespace: flux-system +spec: + interval: 1h + url: https://metallb.github.io/metallb diff --git a/services/ai-llm/deployment.yaml b/services/ai-llm/deployment.yaml index b6e6701..fa35440 100644 --- a/services/ai-llm/deployment.yaml +++ b/services/ai-llm/deployment.yaml @@ -42,7 +42,7 @@ spec: claimName: ollama-models initContainers: - name: warm-model - image: ollama/ollama:latest + image: ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d env: - name: OLLAMA_HOST value: 0.0.0.0 @@ -75,7 +75,7 @@ spec: nvidia.com/gpu.shared: 1 containers: - name: ollama - image: ollama/ollama:latest + image: ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d imagePullPolicy: IfNotPresent ports: - name: http diff --git a/services/bstein-dev-home/backend-service.yaml b/services/bstein-dev-home/backend-service.yaml index 75ec16c..76be476 100644 --- a/services/bstein-dev-home/backend-service.yaml +++ b/services/bstein-dev-home/backend-service.yaml @@ -1,3 +1,4 @@ +# services/bstein-dev-home/backend-service.yaml apiVersion: v1 kind: Service metadata: diff --git a/services/bstein-dev-home/frontend-service.yaml b/services/bstein-dev-home/frontend-service.yaml index 8540580..ee1df10 100644 --- a/services/bstein-dev-home/frontend-service.yaml +++ b/services/bstein-dev-home/frontend-service.yaml @@ -1,3 +1,4 @@ +# services/bstein-dev-home/frontend-service.yaml apiVersion: v1 kind: Service metadata: diff --git a/services/bstein-dev-home/namespace.yaml b/services/bstein-dev-home/namespace.yaml index ae77d71..a6a7c25 100644 --- a/services/bstein-dev-home/namespace.yaml +++ b/services/bstein-dev-home/namespace.yaml @@ -1,3 +1,4 @@ +# services/bstein-dev-home/namespace.yaml apiVersion: v1 kind: Namespace metadata: diff --git a/services/comms/comms-secrets-ensure-job.yaml b/services/comms/comms-secrets-ensure-job.yaml index 877649b..e7c8c43 100644 --- a/services/comms/comms-secrets-ensure-job.yaml +++ b/services/comms/comms-secrets-ensure-job.yaml @@ -13,7 +13,7 @@ spec: restartPolicy: Never containers: - name: ensure - image: bitnami/kubectl:latest + image: registry.bstein.dev/bstein/kubectl:1.35.0 command: ["/bin/sh", "-c"] args: - | diff --git a/services/comms/element-call-deployment.yaml b/services/comms/element-call-deployment.yaml index 7f3581d..149dcd1 100644 --- a/services/comms/element-call-deployment.yaml +++ b/services/comms/element-call-deployment.yaml @@ -19,7 +19,7 @@ spec: hardware: rpi5 containers: - name: element-call - image: ghcr.io/element-hq/element-call:latest + image: ghcr.io/element-hq/element-call@sha256:e6897c7818331714eae19d83ef8ea94a8b41115f0d8d3f62c2fed2d02c65c9bc ports: - containerPort: 8080 name: http diff --git a/services/comms/element-rendered.yaml b/services/comms/element-rendered.yaml deleted file mode 100644 index 0d3200e..0000000 --- a/services/comms/element-rendered.yaml +++ /dev/null @@ -1,202 +0,0 @@ ---- -# Source: element-web/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: othrys-element-element-web - labels: - helm.sh/chart: element-web-1.4.26 - app.kubernetes.io/name: element-web - app.kubernetes.io/instance: othrys-element - app.kubernetes.io/version: "1.12.6" - app.kubernetes.io/managed-by: Helm ---- -# Source: element-web/templates/configuration-nginx.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: othrys-element-element-web-nginx - labels: - helm.sh/chart: element-web-1.4.26 - app.kubernetes.io/name: element-web - app.kubernetes.io/instance: othrys-element - app.kubernetes.io/version: "1.12.6" - app.kubernetes.io/managed-by: Helm -data: - default.conf: | - server { - listen 8080; - listen [::]:8080; - server_name localhost; - - root /usr/share/nginx/html; - index index.html; - - add_header X-Frame-Options SAMEORIGIN; - add_header X-Content-Type-Options nosniff; - add_header X-XSS-Protection "1; mode=block"; - add_header Content-Security-Policy "frame-ancestors 'self'"; - - # Set no-cache for the index.html only so that browsers always check for a new copy of Element Web. - location = /index.html { - add_header Cache-Control "no-cache"; - } - - # redirect server error pages to the static page /50x.html - # - error_page 500 502 503 504 /50x.html; - } ---- -# Source: element-web/templates/configuration.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: othrys-element-element-web - labels: - helm.sh/chart: element-web-1.4.26 - app.kubernetes.io/name: element-web - app.kubernetes.io/instance: othrys-element - app.kubernetes.io/version: "1.12.6" - app.kubernetes.io/managed-by: Helm -data: - config.json: | - {"brand":"Othrys","default_server_config":{"m.homeserver":{"base_url":"https://matrix.live.bstein.dev","server_name":"live.bstein.dev"},"m.identity_server":{"base_url":"https://vector.im"}},"default_theme":"dark","disable_custom_urls":true,"disable_login_language_selector":true,"disable_guests":false,"registration_url":"https://bstein.dev/request-access","show_labs_settings":true,"features":{"feature_group_calls":true,"feature_video_rooms":true,"feature_element_call_video_rooms":true},"room_directory":{"servers":["live.bstein.dev"]},"jitsi":{},"element_call":{"url":"https://call.live.bstein.dev","participant_limit":16,"brand":"Othrys Call"}} ---- -# Source: element-web/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: othrys-element-element-web - labels: - helm.sh/chart: element-web-1.4.26 - app.kubernetes.io/name: element-web - app.kubernetes.io/instance: othrys-element - app.kubernetes.io/version: "1.12.6" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: element-web - app.kubernetes.io/instance: othrys-element ---- -# Source: element-web/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: othrys-element-element-web - labels: - helm.sh/chart: element-web-1.4.26 - app.kubernetes.io/name: element-web - app.kubernetes.io/instance: othrys-element - app.kubernetes.io/version: "1.12.6" - app.kubernetes.io/managed-by: Helm -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: element-web - app.kubernetes.io/instance: othrys-element - template: - metadata: - annotations: - checksum/config: manual-rtc-enable-1 - checksum/config-nginx: 085061d0925f4840c3770233509dc0b00fe8fa1a5fef8bf282a514fd101c76fa - labels: - app.kubernetes.io/name: element-web - app.kubernetes.io/instance: othrys-element - spec: - serviceAccountName: othrys-element-element-web - securityContext: - {} - containers: - - name: element-web - securityContext: - {} - image: "ghcr.io/element-hq/element-web:v1.12.6" - imagePullPolicy: IfNotPresent - env: - - name: ELEMENT_WEB_PORT - value: '8080' - ports: - - name: http - containerPort: 8080 - protocol: TCP - livenessProbe: - httpGet: - path: / - port: http - readinessProbe: - httpGet: - path: / - port: http - resources: - limits: - cpu: 500m - memory: 512Mi - requests: - cpu: 100m - memory: 256Mi - volumeMounts: - - mountPath: /app/config.json - name: config - subPath: config.json - - mountPath: /etc/nginx/conf.d/config.json - name: config-nginx - subPath: config.json - volumes: - - name: config - configMap: - name: othrys-element-element-web - - name: config-nginx - configMap: - name: othrys-element-element-web-nginx - nodeSelector: - hardware: rpi5 - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - preference: - matchExpressions: - - key: hardware - operator: In - values: - - rpi5 - - rpi4 - weight: 50 ---- -# Source: element-web/templates/ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: othrys-element-element-web - labels: - helm.sh/chart: element-web-1.4.26 - app.kubernetes.io/name: element-web - app.kubernetes.io/instance: othrys-element - app.kubernetes.io/version: "1.12.6" - app.kubernetes.io/managed-by: Helm - annotations: - cert-manager.io/cluster-issuer: letsencrypt - traefik.ingress.kubernetes.io/router.entrypoints: websecure -spec: - ingressClassName: traefik - tls: - - hosts: - - "live.bstein.dev" - secretName: live-othrys-tls - rules: - - host: "live.bstein.dev" - http: - paths: - - path: / - backend: - service: - name: othrys-element-element-web - port: - number: 80 - pathType: Prefix diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml new file mode 100644 index 0000000..39cd534 --- /dev/null +++ b/services/comms/helmrelease.yaml @@ -0,0 +1,255 @@ +# services/comms/helmrelease.yaml +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: othrys-synapse + namespace: comms +spec: + interval: 30m + chart: + spec: + chart: matrix-synapse + version: 3.12.17 + sourceRef: + kind: HelmRepository + name: ananace + namespace: flux-system + install: + remediation: { retries: 3 } + timeout: 15m + upgrade: + remediation: + retries: 3 + remediateLastFailure: true + cleanupOnFail: true + timeout: 15m + values: + serverName: live.bstein.dev + publicServerName: matrix.live.bstein.dev + + config: + publicBaseurl: https://matrix.live.bstein.dev + + externalPostgresql: + host: postgres-service.postgres.svc.cluster.local + port: 5432 + username: synapse + existingSecret: synapse-db + existingSecretPasswordKey: POSTGRES_PASSWORD + database: synapse + + redis: + enabled: true + auth: + enabled: true + existingSecret: synapse-redis + existingSecretPasswordKey: redis-password + + postgresql: + enabled: false + + persistence: + enabled: true + storageClass: asteria + accessMode: ReadWriteOnce + size: 50Gi + + synapse: + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + podSecurityContext: + fsGroup: 666 + runAsUser: 666 + runAsGroup: 666 + resources: + requests: + cpu: 500m + memory: 1Gi + limits: + cpu: "2" + memory: 3Gi + nodeSelector: + hardware: rpi5 + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5", "rpi4"] + + ingress: + enabled: true + className: traefik + annotations: + cert-manager.io/cluster-issuer: letsencrypt + traefik.ingress.kubernetes.io/router.entrypoints: websecure + csHosts: + - matrix.live.bstein.dev + hosts: + - matrix.live.bstein.dev + wkHosts: + - live.bstein.dev + - bstein.dev + tls: + - secretName: matrix-live-tls + hosts: + - matrix.live.bstein.dev + - live.bstein.dev + + extraConfig: + allow_guest_access: true + allow_public_rooms_without_auth: true + auto_join_rooms: + - "#othrys:live.bstein.dev" + autocreate_auto_join_rooms: true + default_room_version: "11" + experimental_features: + msc3266_enabled: true + msc4143_enabled: true + msc4222_enabled: true + max_event_delay_duration: 24h + password_config: + enabled: true + oidc_enabled: true + oidc_providers: + - idp_id: keycloak + idp_name: Keycloak + issuer: https://sso.bstein.dev/realms/atlas + client_id: synapse + client_secret: "@@OIDC_CLIENT_SECRET@@" + client_auth_method: client_secret_post + scopes: ["openid", "profile", "email"] + authorization_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/auth + token_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/token + userinfo_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/userinfo + user_mapping_provider: + config: + localpart_template: "{{ user.preferred_username }}" + display_name_template: "{{ user.name }}" + allow_existing_users: true + rc_message: + per_second: 0.5 + burst_count: 30 + rc_delayed_event_mgmt: + per_second: 1 + burst_count: 20 + rc_login: + address: + burst_count: 20 + per_second: 5 + account: + burst_count: 20 + per_second: 5 + failed_attempts: + burst_count: 20 + per_second: 5 + room_list_publication_rules: + - action: allow + well_known_client: + "m.homeserver": + "base_url": "https://matrix.live.bstein.dev" + "org.matrix.msc4143.rtc_foci": + - type: "livekit" + livekit_service_url: "https://kit.live.bstein.dev/livekit/jwt" + + worker: + enabled: false + + signingkey: + job: + generateImage: + repository: matrixdotorg/synapse + tag: v1.144.0 + publishImage: + repository: registry.bstein.dev/bstein/kubectl + tag: 1.35.0 +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: othrys-element + namespace: comms +spec: + interval: 30m + chart: + spec: + chart: element-web + version: 1.4.26 + sourceRef: + kind: HelmRepository + name: ananace + namespace: flux-system + install: + remediation: { retries: 3 } + timeout: 10m + upgrade: + remediation: + retries: 3 + remediateLastFailure: true + cleanupOnFail: true + timeout: 10m + values: + replicaCount: 1 + + defaultServer: + url: https://matrix.live.bstein.dev + name: live.bstein.dev + + config: + default_theme: dark + brand: Othrys + disable_custom_urls: true + disable_login_language_selector: true + disable_guests: false + show_labs_settings: true + features: + feature_group_calls: true + feature_video_rooms: true + feature_element_call_video_rooms: true + room_directory: + servers: + - live.bstein.dev + jitsi: {} + element_call: + url: https://call.live.bstein.dev + participant_limit: 16 + brand: Othrys Call + + ingress: + enabled: true + className: traefik + annotations: + cert-manager.io/cluster-issuer: letsencrypt + traefik.ingress.kubernetes.io/router.entrypoints: websecure + hosts: + - live.bstein.dev + tls: + - secretName: live-othrys-tls + hosts: [live.bstein.dev] + + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 500m + memory: 512Mi + + nodeSelector: + hardware: rpi5 + + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5", "rpi4"] diff --git a/services/comms/knowledge/catalog/atlas-summary.json b/services/comms/knowledge/catalog/atlas-summary.json index 2139e29..00d6658 100644 --- a/services/comms/knowledge/catalog/atlas-summary.json +++ b/services/comms/knowledge/catalog/atlas-summary.json @@ -1,8 +1,8 @@ { "counts": { - "helmrelease_host_hints": 7, - "http_endpoints": 35, - "services": 44, - "workloads": 49 + "helmrelease_host_hints": 18, + "http_endpoints": 37, + "services": 43, + "workloads": 54 } } diff --git a/services/comms/knowledge/catalog/atlas.json b/services/comms/knowledge/catalog/atlas.json index 92f08f4..9ca1d29 100644 --- a/services/comms/knowledge/catalog/atlas.json +++ b/services/comms/knowledge/catalog/atlas.json @@ -12,12 +12,7 @@ "targetNamespace": "bstein-dev-home" }, { - "name": "ci-demo", - "path": "services/ci-demo", - "targetNamespace": null - }, - { - "name": "communication", + "name": "comms", "path": "services/comms", "targetNamespace": "comms" }, @@ -71,6 +66,11 @@ "path": "services/keycloak", "targetNamespace": "sso" }, + { + "name": "logging", + "path": "services/logging", + "targetNamespace": null + }, { "name": "longhorn-ui", "path": "infrastructure/longhorn/ui-ingress", @@ -81,6 +81,11 @@ "path": "services/mailu", "targetNamespace": "mailu-mailserver" }, + { + "name": "maintenance", + "path": "services/maintenance", + "targetNamespace": null + }, { "name": "metallb", "path": "infrastructure/metallb", @@ -116,11 +121,26 @@ "path": "services/openldap", "targetNamespace": "sso" }, + { + "name": "outline", + "path": "services/outline", + "targetNamespace": "outline" + }, { "name": "pegasus", "path": "services/pegasus", "targetNamespace": "jellyfin" }, + { + "name": "planka", + "path": "services/planka", + "targetNamespace": "planka" + }, + { + "name": "postgres", + "path": "infrastructure/postgres", + "targetNamespace": "postgres" + }, { "name": "sui-metrics", "path": "services/sui-metrics/overlays/atlas", @@ -163,7 +183,7 @@ "serviceAccountName": null, "nodeSelector": {}, "images": [ - "ollama/ollama:latest" + "ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d" ] }, { @@ -179,7 +199,7 @@ "node-role.kubernetes.io/worker": "true" }, "images": [ - "registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-84" + "registry.bstein.dev/bstein/bstein-dev-home-backend:registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92" ] }, { @@ -195,7 +215,7 @@ "node-role.kubernetes.io/worker": "true" }, "images": [ - "registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-84" + "registry.bstein.dev/bstein/bstein-dev-home-frontend:registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92" ] }, { @@ -214,21 +234,6 @@ "python:3.11-slim" ] }, - { - "kind": "Deployment", - "namespace": "ci-demo", - "name": "ci-demo", - "labels": { - "app.kubernetes.io/name": "ci-demo" - }, - "serviceAccountName": null, - "nodeSelector": { - "hardware": "rpi4" - }, - "images": [ - "registry.bstein.dev/infra/ci-demo:v0.0.0-3" - ] - }, { "kind": "Deployment", "namespace": "comms", @@ -271,7 +276,7 @@ "hardware": "rpi5" }, "images": [ - "ghcr.io/element-hq/element-call:latest" + "ghcr.io/element-hq/element-call@sha256:e6897c7818331714eae19d83ef8ea94a8b41115f0d8d3f62c2fed2d02c65c9bc" ] }, { @@ -345,56 +350,6 @@ "nginx:1.27-alpine" ] }, - { - "kind": "Deployment", - "namespace": "comms", - "name": "othrys-element-element-web", - "labels": { - "app.kubernetes.io/instance": "othrys-element", - "app.kubernetes.io/name": "element-web" - }, - "serviceAccountName": "othrys-element-element-web", - "nodeSelector": { - "hardware": "rpi5" - }, - "images": [ - "ghcr.io/element-hq/element-web:v1.12.6" - ] - }, - { - "kind": "Deployment", - "namespace": "comms", - "name": "othrys-synapse-matrix-synapse", - "labels": { - "app.kubernetes.io/component": "synapse", - "app.kubernetes.io/instance": "othrys-synapse", - "app.kubernetes.io/name": "matrix-synapse" - }, - "serviceAccountName": "default", - "nodeSelector": { - "hardware": "rpi5" - }, - "images": [ - "ghcr.io/element-hq/synapse:v1.144.0" - ] - }, - { - "kind": "Deployment", - "namespace": "comms", - "name": "othrys-synapse-redis-master", - "labels": { - "app.kubernetes.io/component": "master", - "app.kubernetes.io/instance": "othrys-synapse", - "app.kubernetes.io/managed-by": "Helm", - "app.kubernetes.io/name": "redis", - "helm.sh/chart": "redis-17.17.1" - }, - "serviceAccountName": "othrys-synapse-redis", - "nodeSelector": {}, - "images": [ - "docker.io/bitnamilegacy/redis:7.0.12-debian-11-r34" - ] - }, { "kind": "DaemonSet", "namespace": "crypto", @@ -407,7 +362,7 @@ "node-role.kubernetes.io/worker": "true" }, "images": [ - "ghcr.io/tari-project/xmrig:latest" + "ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9" ] }, { @@ -681,6 +636,66 @@ "hashicorp/vault-csi-provider:1.7.0" ] }, + { + "kind": "DaemonSet", + "namespace": "logging", + "name": "node-image-gc-rpi4", + "labels": { + "app": "node-image-gc-rpi4" + }, + "serviceAccountName": "node-image-gc-rpi4", + "nodeSelector": { + "hardware": "rpi4" + }, + "images": [ + "bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131" + ] + }, + { + "kind": "DaemonSet", + "namespace": "logging", + "name": "node-image-prune-rpi5", + "labels": { + "app": "node-image-prune-rpi5" + }, + "serviceAccountName": "node-image-prune-rpi5", + "nodeSelector": { + "hardware": "rpi5" + }, + "images": [ + "bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131" + ] + }, + { + "kind": "DaemonSet", + "namespace": "logging", + "name": "node-log-rotation", + "labels": { + "app": "node-log-rotation" + }, + "serviceAccountName": "node-log-rotation", + "nodeSelector": { + "hardware": "rpi5" + }, + "images": [ + "bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131" + ] + }, + { + "kind": "Deployment", + "namespace": "logging", + "name": "oauth2-proxy-logs", + "labels": { + "app": "oauth2-proxy-logs" + }, + "serviceAccountName": null, + "nodeSelector": { + "node-role.kubernetes.io/worker": "true" + }, + "images": [ + "quay.io/oauth2-proxy/oauth2-proxy:v7.6.0" + ] + }, { "kind": "Deployment", "namespace": "longhorn-system", @@ -708,7 +723,7 @@ "mailu.bstein.dev/vip": "true" }, "images": [ - "lachlanevenson/k8s-kubectl:latest" + "registry.bstein.dev/bstein/kubectl:1.35.0" ] }, { @@ -726,37 +741,30 @@ }, { "kind": "DaemonSet", - "namespace": "metallb-system", - "name": "metallb-speaker", + "namespace": "maintenance", + "name": "node-image-sweeper", "labels": { - "app.kubernetes.io/component": "speaker", - "app.kubernetes.io/instance": "metallb", - "app.kubernetes.io/name": "metallb" + "app": "node-image-sweeper" }, - "serviceAccountName": "metallb-speaker", + "serviceAccountName": "node-image-sweeper", "nodeSelector": { "kubernetes.io/os": "linux" }, "images": [ - "quay.io/frrouting/frr:10.4.1", - "quay.io/metallb/speaker:v0.15.3" + "python:3.12.9-alpine3.20" ] }, { - "kind": "Deployment", - "namespace": "metallb-system", - "name": "metallb-controller", + "kind": "DaemonSet", + "namespace": "maintenance", + "name": "node-nofile", "labels": { - "app.kubernetes.io/component": "controller", - "app.kubernetes.io/instance": "metallb", - "app.kubernetes.io/name": "metallb" - }, - "serviceAccountName": "metallb-controller", - "nodeSelector": { - "kubernetes.io/os": "linux" + "app": "node-nofile" }, + "serviceAccountName": "node-nofile", + "nodeSelector": {}, "images": [ - "quay.io/metallb/controller:v0.15.3" + "bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131" ] }, { @@ -772,6 +780,21 @@ "registry.bstein.dev/monitoring/dcgm-exporter:4.4.2-4.7.0-ubuntu22.04" ] }, + { + "kind": "DaemonSet", + "namespace": "monitoring", + "name": "jetson-tegrastats-exporter", + "labels": { + "app": "jetson-tegrastats-exporter" + }, + "serviceAccountName": "default", + "nodeSelector": { + "jetson": "true" + }, + "images": [ + "python:3.10-slim" + ] + }, { "kind": "Deployment", "namespace": "monitoring", @@ -797,7 +820,7 @@ "hardware": "rpi5" }, "images": [ - "collabora/code:latest" + "collabora/code@sha256:3c58d0e9bae75e4647467d0c7d91cb66f261d3e814709aed590b5c334a04db26" ] }, { @@ -815,6 +838,66 @@ "nextcloud:29-apache" ] }, + { + "kind": "Deployment", + "namespace": "outline", + "name": "outline", + "labels": { + "app": "outline" + }, + "serviceAccountName": null, + "nodeSelector": { + "node-role.kubernetes.io/worker": "true" + }, + "images": [ + "outlinewiki/outline:1.2.0" + ] + }, + { + "kind": "Deployment", + "namespace": "outline", + "name": "outline-redis", + "labels": { + "app": "outline-redis" + }, + "serviceAccountName": null, + "nodeSelector": { + "node-role.kubernetes.io/worker": "true" + }, + "images": [ + "redis:7.4.1-alpine" + ] + }, + { + "kind": "Deployment", + "namespace": "planka", + "name": "planka", + "labels": { + "app": "planka" + }, + "serviceAccountName": null, + "nodeSelector": { + "node-role.kubernetes.io/worker": "true" + }, + "images": [ + "ghcr.io/plankanban/planka:2.0.0-rc.4" + ] + }, + { + "kind": "StatefulSet", + "namespace": "postgres", + "name": "postgres", + "labels": { + "app": "postgres" + }, + "serviceAccountName": "postgres-vault", + "nodeSelector": { + "node-role.kubernetes.io/worker": "true" + }, + "images": [ + "postgres:15" + ] + }, { "kind": "Deployment", "namespace": "sso", @@ -984,22 +1067,6 @@ } ] }, - { - "namespace": "ci-demo", - "name": "ci-demo", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/name": "ci-demo" - }, - "ports": [ - { - "name": "http", - "port": 80, - "targetPort": "http", - "protocol": "TCP" - } - ] - }, { "namespace": "comms", "name": "coturn", @@ -1454,94 +1521,6 @@ } ] }, - { - "namespace": "comms", - "name": "othrys-element-element-web", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/instance": "othrys-element", - "app.kubernetes.io/name": "element-web" - }, - "ports": [ - { - "name": "http", - "port": 80, - "targetPort": "http", - "protocol": "TCP" - } - ] - }, - { - "namespace": "comms", - "name": "othrys-synapse-matrix-synapse", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/component": "synapse", - "app.kubernetes.io/instance": "othrys-synapse", - "app.kubernetes.io/name": "matrix-synapse" - }, - "ports": [ - { - "name": "http", - "port": 8008, - "targetPort": "http", - "protocol": "TCP" - } - ] - }, - { - "namespace": "comms", - "name": "othrys-synapse-redis-headless", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/instance": "othrys-synapse", - "app.kubernetes.io/name": "redis" - }, - "ports": [ - { - "name": "tcp-redis", - "port": 6379, - "targetPort": "redis", - "protocol": "TCP" - } - ] - }, - { - "namespace": "comms", - "name": "othrys-synapse-redis-master", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/component": "master", - "app.kubernetes.io/instance": "othrys-synapse", - "app.kubernetes.io/name": "redis" - }, - "ports": [ - { - "name": "tcp-redis", - "port": 6379, - "targetPort": "redis", - "protocol": "TCP" - } - ] - }, - { - "namespace": "comms", - "name": "othrys-synapse-replication", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/component": "synapse", - "app.kubernetes.io/instance": "othrys-synapse", - "app.kubernetes.io/name": "matrix-synapse" - }, - "ports": [ - { - "name": "replication", - "port": 9093, - "targetPort": "replication", - "protocol": "TCP" - } - ] - }, { "namespace": "crypto", "name": "monerod", @@ -1743,6 +1722,22 @@ } ] }, + { + "namespace": "logging", + "name": "oauth2-proxy-logs", + "type": "ClusterIP", + "selector": { + "app": "oauth2-proxy-logs" + }, + "ports": [ + { + "name": "http", + "port": 80, + "targetPort": 4180, + "protocol": "TCP" + } + ] + }, { "namespace": "longhorn-system", "name": "oauth2-proxy-longhorn", @@ -1823,24 +1818,6 @@ } ] }, - { - "namespace": "metallb-system", - "name": "metallb-webhook-service", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/component": "controller", - "app.kubernetes.io/instance": "metallb", - "app.kubernetes.io/name": "metallb" - }, - "ports": [ - { - "name": null, - "port": 443, - "targetPort": 9443, - "protocol": "TCP" - } - ] - }, { "namespace": "monitoring", "name": "dcgm-exporter", @@ -1857,6 +1834,22 @@ } ] }, + { + "namespace": "monitoring", + "name": "jetson-tegrastats-exporter", + "type": "ClusterIP", + "selector": { + "app": "jetson-tegrastats-exporter" + }, + "ports": [ + { + "name": "metrics", + "port": 9100, + "targetPort": "metrics", + "protocol": "TCP" + } + ] + }, { "namespace": "monitoring", "name": "postmark-exporter", @@ -1905,6 +1898,70 @@ } ] }, + { + "namespace": "outline", + "name": "outline", + "type": "ClusterIP", + "selector": { + "app": "outline" + }, + "ports": [ + { + "name": "http", + "port": 80, + "targetPort": "http", + "protocol": "TCP" + } + ] + }, + { + "namespace": "outline", + "name": "outline-redis", + "type": "ClusterIP", + "selector": { + "app": "outline-redis" + }, + "ports": [ + { + "name": "redis", + "port": 6379, + "targetPort": "redis", + "protocol": "TCP" + } + ] + }, + { + "namespace": "planka", + "name": "planka", + "type": "ClusterIP", + "selector": { + "app": "planka" + }, + "ports": [ + { + "name": "http", + "port": 80, + "targetPort": "http", + "protocol": "TCP" + } + ] + }, + { + "namespace": "postgres", + "name": "postgres-service", + "type": "ClusterIP", + "selector": { + "app": "postgres" + }, + "ports": [ + { + "name": "postgres", + "port": 5432, + "targetPort": 5432, + "protocol": "TCP" + } + ] + }, { "namespace": "sso", "name": "keycloak", @@ -2110,7 +2167,7 @@ "via": { "kind": "Ingress", "name": "matrix-wellknown-bstein-dev", - "source": "communication" + "source": "comms" } }, { @@ -2130,7 +2187,7 @@ "via": { "kind": "Ingress", "name": "matrix-wellknown-bstein-dev", - "source": "communication" + "source": "comms" } }, { @@ -2170,7 +2227,7 @@ "via": { "kind": "Ingress", "name": "element-call", - "source": "communication" + "source": "comms" } }, { @@ -2250,7 +2307,7 @@ "via": { "kind": "Ingress", "name": "livekit-jwt-ingress", - "source": "communication" + "source": "comms" } }, { @@ -2270,27 +2327,7 @@ "via": { "kind": "Ingress", "name": "livekit-ingress", - "source": "communication" - } - }, - { - "host": "live.bstein.dev", - "path": "/", - "backend": { - "namespace": "comms", - "service": "othrys-element-element-web", - "port": 80, - "workloads": [ - { - "kind": "Deployment", - "name": "othrys-element-element-web" - } - ] - }, - "via": { - "kind": "Ingress", - "name": "othrys-element-element-web", - "source": "communication" + "source": "comms" } }, { @@ -2310,7 +2347,7 @@ "via": { "kind": "Ingress", "name": "matrix-wellknown", - "source": "communication" + "source": "comms" } }, { @@ -2330,7 +2367,7 @@ "via": { "kind": "Ingress", "name": "matrix-wellknown", - "source": "communication" + "source": "comms" } }, { @@ -2340,17 +2377,32 @@ "namespace": "comms", "service": "othrys-synapse-matrix-synapse", "port": 8008, + "workloads": [] + }, + "via": { + "kind": "Ingress", + "name": "matrix-routing", + "source": "comms" + } + }, + { + "host": "logs.bstein.dev", + "path": "/", + "backend": { + "namespace": "logging", + "service": "oauth2-proxy-logs", + "port": "http", "workloads": [ { "kind": "Deployment", - "name": "othrys-synapse-matrix-synapse" + "name": "oauth2-proxy-logs" } ] }, "via": { "kind": "Ingress", - "name": "matrix-routing", - "source": "communication" + "name": "logs", + "source": "logging" } }, { @@ -2405,7 +2457,7 @@ "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2425,7 +2477,7 @@ "via": { "kind": "Ingress", "name": "matrix-wellknown-matrix-live", - "source": "communication" + "source": "comms" } }, { @@ -2445,7 +2497,7 @@ "via": { "kind": "Ingress", "name": "matrix-wellknown-matrix-live", - "source": "communication" + "source": "comms" } }, { @@ -2455,17 +2507,12 @@ "namespace": "comms", "service": "othrys-synapse-matrix-synapse", "port": 8008, - "workloads": [ - { - "kind": "Deployment", - "name": "othrys-synapse-matrix-synapse" - } - ] + "workloads": [] }, "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2485,7 +2532,7 @@ "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2505,7 +2552,7 @@ "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2525,7 +2572,7 @@ "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2545,7 +2592,7 @@ "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2565,7 +2612,7 @@ "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2575,17 +2622,12 @@ "namespace": "comms", "service": "othrys-synapse-matrix-synapse", "port": 8008, - "workloads": [ - { - "kind": "Deployment", - "name": "othrys-synapse-matrix-synapse" - } - ] + "workloads": [] }, "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2608,6 +2650,26 @@ "source": "monerod" } }, + { + "host": "notes.bstein.dev", + "path": "/", + "backend": { + "namespace": "outline", + "service": "outline", + "port": 80, + "workloads": [ + { + "kind": "Deployment", + "name": "outline" + } + ] + }, + "via": { + "kind": "Ingress", + "name": "outline", + "source": "outline" + } + }, { "host": "office.bstein.dev", "path": "/", @@ -2728,6 +2790,26 @@ "source": "jellyfin" } }, + { + "host": "tasks.bstein.dev", + "path": "/", + "backend": { + "namespace": "planka", + "service": "planka", + "port": 80, + "workloads": [ + { + "kind": "Deployment", + "name": "planka" + } + ] + }, + "via": { + "kind": "Ingress", + "name": "planka", + "source": "planka" + } + }, { "host": "vault.bstein.dev", "path": "/", @@ -2750,12 +2832,28 @@ } ], "helmrelease_host_hints": { + "comms:comms/othrys-element": [ + "call.live.bstein.dev", + "live.bstein.dev", + "matrix.live.bstein.dev" + ], + "comms:comms/othrys-synapse": [ + "bstein.dev", + "kit.live.bstein.dev", + "live.bstein.dev", + "matrix.live.bstein.dev", + "registry.bstein.dev", + "sso.bstein.dev" + ], "gitops-ui:flux-system/weave-gitops": [ "cd.bstein.dev" ], "harbor:harbor/harbor": [ "registry.bstein.dev" ], + "logging:logging/data-prepper": [ + "registry.bstein.dev" + ], "mailu:mailu-mailserver/mailu": [ "bstein.dev", "mail.bstein.dev" @@ -2764,6 +2862,7 @@ "alerts.bstein.dev" ], "monitoring:monitoring/grafana": [ + "bstein.dev", "metrics.bstein.dev", "sso.bstein.dev" ] diff --git a/services/comms/knowledge/catalog/atlas.yaml b/services/comms/knowledge/catalog/atlas.yaml index 06e2469..5bac143 100644 --- a/services/comms/knowledge/catalog/atlas.yaml +++ b/services/comms/knowledge/catalog/atlas.yaml @@ -7,10 +7,7 @@ sources: - name: bstein-dev-home path: services/bstein-dev-home targetNamespace: bstein-dev-home -- name: ci-demo - path: services/ci-demo - targetNamespace: null -- name: communication +- name: comms path: services/comms targetNamespace: comms - name: core @@ -43,12 +40,18 @@ sources: - name: keycloak path: services/keycloak targetNamespace: sso +- name: logging + path: services/logging + targetNamespace: null - name: longhorn-ui path: infrastructure/longhorn/ui-ingress targetNamespace: longhorn-system - name: mailu path: services/mailu targetNamespace: mailu-mailserver +- name: maintenance + path: services/maintenance + targetNamespace: null - name: metallb path: infrastructure/metallb targetNamespace: metallb-system @@ -70,9 +73,18 @@ sources: - name: openldap path: services/openldap targetNamespace: sso +- name: outline + path: services/outline + targetNamespace: outline - name: pegasus path: services/pegasus targetNamespace: jellyfin +- name: planka + path: services/planka + targetNamespace: planka +- name: postgres + path: infrastructure/postgres + targetNamespace: postgres - name: sui-metrics path: services/sui-metrics/overlays/atlas targetNamespace: sui-metrics @@ -100,7 +112,7 @@ workloads: serviceAccountName: null nodeSelector: {} images: - - ollama/ollama:latest + - ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d - kind: Deployment namespace: bstein-dev-home name: bstein-dev-home-backend @@ -111,7 +123,7 @@ workloads: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: 'true' images: - - registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-84 + - registry.bstein.dev/bstein/bstein-dev-home-backend:registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92 - kind: Deployment namespace: bstein-dev-home name: bstein-dev-home-frontend @@ -122,7 +134,7 @@ workloads: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: 'true' images: - - registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-84 + - registry.bstein.dev/bstein/bstein-dev-home-frontend:registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92 - kind: Deployment namespace: bstein-dev-home name: chat-ai-gateway @@ -134,16 +146,6 @@ workloads: node-role.kubernetes.io/worker: 'true' images: - python:3.11-slim -- kind: Deployment - namespace: ci-demo - name: ci-demo - labels: - app.kubernetes.io/name: ci-demo - serviceAccountName: null - nodeSelector: - hardware: rpi4 - images: - - registry.bstein.dev/infra/ci-demo:v0.0.0-3 - kind: Deployment namespace: comms name: atlasbot @@ -173,7 +175,7 @@ workloads: nodeSelector: hardware: rpi5 images: - - ghcr.io/element-hq/element-call:latest + - ghcr.io/element-hq/element-call@sha256:e6897c7818331714eae19d83ef8ea94a8b41115f0d8d3f62c2fed2d02c65c9bc - kind: Deployment namespace: comms name: livekit @@ -222,42 +224,6 @@ workloads: nodeSelector: {} images: - nginx:1.27-alpine -- kind: Deployment - namespace: comms - name: othrys-element-element-web - labels: - app.kubernetes.io/instance: othrys-element - app.kubernetes.io/name: element-web - serviceAccountName: othrys-element-element-web - nodeSelector: - hardware: rpi5 - images: - - ghcr.io/element-hq/element-web:v1.12.6 -- kind: Deployment - namespace: comms - name: othrys-synapse-matrix-synapse - labels: - app.kubernetes.io/component: synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: matrix-synapse - serviceAccountName: default - nodeSelector: - hardware: rpi5 - images: - - ghcr.io/element-hq/synapse:v1.144.0 -- kind: Deployment - namespace: comms - name: othrys-synapse-redis-master - labels: - app.kubernetes.io/component: master - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: redis - helm.sh/chart: redis-17.17.1 - serviceAccountName: othrys-synapse-redis - nodeSelector: {} - images: - - docker.io/bitnamilegacy/redis:7.0.12-debian-11-r34 - kind: DaemonSet namespace: crypto name: monero-xmrig @@ -267,7 +233,7 @@ workloads: nodeSelector: node-role.kubernetes.io/worker: 'true' images: - - ghcr.io/tari-project/xmrig:latest + - ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9 - kind: Deployment namespace: crypto name: monero-p2pool @@ -460,6 +426,46 @@ workloads: kubernetes.io/os: linux images: - hashicorp/vault-csi-provider:1.7.0 +- kind: DaemonSet + namespace: logging + name: node-image-gc-rpi4 + labels: + app: node-image-gc-rpi4 + serviceAccountName: node-image-gc-rpi4 + nodeSelector: + hardware: rpi4 + images: + - bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 +- kind: DaemonSet + namespace: logging + name: node-image-prune-rpi5 + labels: + app: node-image-prune-rpi5 + serviceAccountName: node-image-prune-rpi5 + nodeSelector: + hardware: rpi5 + images: + - bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 +- kind: DaemonSet + namespace: logging + name: node-log-rotation + labels: + app: node-log-rotation + serviceAccountName: node-log-rotation + nodeSelector: + hardware: rpi5 + images: + - bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 +- kind: Deployment + namespace: logging + name: oauth2-proxy-logs + labels: + app: oauth2-proxy-logs + serviceAccountName: null + nodeSelector: + node-role.kubernetes.io/worker: 'true' + images: + - quay.io/oauth2-proxy/oauth2-proxy:v7.6.0 - kind: Deployment namespace: longhorn-system name: oauth2-proxy-longhorn @@ -479,7 +485,7 @@ workloads: nodeSelector: mailu.bstein.dev/vip: 'true' images: - - lachlanevenson/k8s-kubectl:latest + - registry.bstein.dev/bstein/kubectl:1.35.0 - kind: Deployment namespace: mailu-mailserver name: mailu-sync-listener @@ -490,30 +496,24 @@ workloads: images: - python:3.11-alpine - kind: DaemonSet - namespace: metallb-system - name: metallb-speaker + namespace: maintenance + name: node-image-sweeper labels: - app.kubernetes.io/component: speaker - app.kubernetes.io/instance: metallb - app.kubernetes.io/name: metallb - serviceAccountName: metallb-speaker + app: node-image-sweeper + serviceAccountName: node-image-sweeper nodeSelector: kubernetes.io/os: linux images: - - quay.io/frrouting/frr:10.4.1 - - quay.io/metallb/speaker:v0.15.3 -- kind: Deployment - namespace: metallb-system - name: metallb-controller + - python:3.12.9-alpine3.20 +- kind: DaemonSet + namespace: maintenance + name: node-nofile labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: metallb - app.kubernetes.io/name: metallb - serviceAccountName: metallb-controller - nodeSelector: - kubernetes.io/os: linux + app: node-nofile + serviceAccountName: node-nofile + nodeSelector: {} images: - - quay.io/metallb/controller:v0.15.3 + - bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 - kind: DaemonSet namespace: monitoring name: dcgm-exporter @@ -523,6 +523,16 @@ workloads: nodeSelector: {} images: - registry.bstein.dev/monitoring/dcgm-exporter:4.4.2-4.7.0-ubuntu22.04 +- kind: DaemonSet + namespace: monitoring + name: jetson-tegrastats-exporter + labels: + app: jetson-tegrastats-exporter + serviceAccountName: default + nodeSelector: + jetson: 'true' + images: + - python:3.10-slim - kind: Deployment namespace: monitoring name: postmark-exporter @@ -541,7 +551,7 @@ workloads: nodeSelector: hardware: rpi5 images: - - collabora/code:latest + - collabora/code@sha256:3c58d0e9bae75e4647467d0c7d91cb66f261d3e814709aed590b5c334a04db26 - kind: Deployment namespace: nextcloud name: nextcloud @@ -552,6 +562,46 @@ workloads: hardware: rpi5 images: - nextcloud:29-apache +- kind: Deployment + namespace: outline + name: outline + labels: + app: outline + serviceAccountName: null + nodeSelector: + node-role.kubernetes.io/worker: 'true' + images: + - outlinewiki/outline:1.2.0 +- kind: Deployment + namespace: outline + name: outline-redis + labels: + app: outline-redis + serviceAccountName: null + nodeSelector: + node-role.kubernetes.io/worker: 'true' + images: + - redis:7.4.1-alpine +- kind: Deployment + namespace: planka + name: planka + labels: + app: planka + serviceAccountName: null + nodeSelector: + node-role.kubernetes.io/worker: 'true' + images: + - ghcr.io/plankanban/planka:2.0.0-rc.4 +- kind: StatefulSet + namespace: postgres + name: postgres + labels: + app: postgres + serviceAccountName: postgres-vault + nodeSelector: + node-role.kubernetes.io/worker: 'true' + images: + - postgres:15 - kind: Deployment namespace: sso name: keycloak @@ -663,16 +713,6 @@ services: port: 80 targetPort: 8080 protocol: TCP -- namespace: ci-demo - name: ci-demo - type: ClusterIP - selector: - app.kubernetes.io/name: ci-demo - ports: - - name: http - port: 80 - targetPort: http - protocol: TCP - namespace: comms name: coturn type: LoadBalancer @@ -971,64 +1011,6 @@ services: port: 80 targetPort: 80 protocol: TCP -- namespace: comms - name: othrys-element-element-web - type: ClusterIP - selector: - app.kubernetes.io/instance: othrys-element - app.kubernetes.io/name: element-web - ports: - - name: http - port: 80 - targetPort: http - protocol: TCP -- namespace: comms - name: othrys-synapse-matrix-synapse - type: ClusterIP - selector: - app.kubernetes.io/component: synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: matrix-synapse - ports: - - name: http - port: 8008 - targetPort: http - protocol: TCP -- namespace: comms - name: othrys-synapse-redis-headless - type: ClusterIP - selector: - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: redis - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - protocol: TCP -- namespace: comms - name: othrys-synapse-redis-master - type: ClusterIP - selector: - app.kubernetes.io/component: master - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: redis - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - protocol: TCP -- namespace: comms - name: othrys-synapse-replication - type: ClusterIP - selector: - app.kubernetes.io/component: synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: matrix-synapse - ports: - - name: replication - port: 9093 - targetPort: replication - protocol: TCP - namespace: crypto name: monerod type: ClusterIP @@ -1156,6 +1138,16 @@ services: port: 443 targetPort: websecure protocol: TCP +- namespace: logging + name: oauth2-proxy-logs + type: ClusterIP + selector: + app: oauth2-proxy-logs + ports: + - name: http + port: 80 + targetPort: 4180 + protocol: TCP - namespace: longhorn-system name: oauth2-proxy-longhorn type: ClusterIP @@ -1208,18 +1200,6 @@ services: port: 8080 targetPort: 8080 protocol: TCP -- namespace: metallb-system - name: metallb-webhook-service - type: ClusterIP - selector: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: metallb - app.kubernetes.io/name: metallb - ports: - - name: null - port: 443 - targetPort: 9443 - protocol: TCP - namespace: monitoring name: dcgm-exporter type: ClusterIP @@ -1230,6 +1210,16 @@ services: port: 9400 targetPort: metrics protocol: TCP +- namespace: monitoring + name: jetson-tegrastats-exporter + type: ClusterIP + selector: + app: jetson-tegrastats-exporter + ports: + - name: metrics + port: 9100 + targetPort: metrics + protocol: TCP - namespace: monitoring name: postmark-exporter type: ClusterIP @@ -1260,6 +1250,46 @@ services: port: 80 targetPort: http protocol: TCP +- namespace: outline + name: outline + type: ClusterIP + selector: + app: outline + ports: + - name: http + port: 80 + targetPort: http + protocol: TCP +- namespace: outline + name: outline-redis + type: ClusterIP + selector: + app: outline-redis + ports: + - name: redis + port: 6379 + targetPort: redis + protocol: TCP +- namespace: planka + name: planka + type: ClusterIP + selector: + app: planka + ports: + - name: http + port: 80 + targetPort: http + protocol: TCP +- namespace: postgres + name: postgres-service + type: ClusterIP + selector: + app: postgres + ports: + - name: postgres + port: 5432 + targetPort: 5432 + protocol: TCP - namespace: sso name: keycloak type: ClusterIP @@ -1391,7 +1421,7 @@ http_endpoints: via: kind: Ingress name: matrix-wellknown-bstein-dev - source: communication + source: comms - host: bstein.dev path: /.well-known/matrix/server backend: @@ -1402,7 +1432,7 @@ http_endpoints: via: kind: Ingress name: matrix-wellknown-bstein-dev - source: communication + source: comms - host: bstein.dev path: /api backend: @@ -1428,7 +1458,7 @@ http_endpoints: via: kind: Ingress name: element-call - source: communication + source: comms - host: chat.ai.bstein.dev path: / backend: @@ -1480,7 +1510,7 @@ http_endpoints: via: kind: Ingress name: livekit-jwt-ingress - source: communication + source: comms - host: kit.live.bstein.dev path: /livekit/sfu backend: @@ -1493,20 +1523,7 @@ http_endpoints: via: kind: Ingress name: livekit-ingress - source: communication -- host: live.bstein.dev - path: / - backend: - namespace: comms - service: othrys-element-element-web - port: 80 - workloads: - - kind: Deployment - name: othrys-element-element-web - via: - kind: Ingress - name: othrys-element-element-web - source: communication + source: comms - host: live.bstein.dev path: /.well-known/matrix/client backend: @@ -1517,7 +1534,7 @@ http_endpoints: via: kind: Ingress name: matrix-wellknown - source: communication + source: comms - host: live.bstein.dev path: /.well-known/matrix/server backend: @@ -1528,20 +1545,31 @@ http_endpoints: via: kind: Ingress name: matrix-wellknown - source: communication + source: comms - host: live.bstein.dev path: /_matrix backend: namespace: comms service: othrys-synapse-matrix-synapse port: 8008 - workloads: &id002 - - kind: Deployment - name: othrys-synapse-matrix-synapse + workloads: [] via: kind: Ingress name: matrix-routing - source: communication + source: comms +- host: logs.bstein.dev + path: / + backend: + namespace: logging + service: oauth2-proxy-logs + port: http + workloads: + - kind: Deployment + name: oauth2-proxy-logs + via: + kind: Ingress + name: logs + source: logging - host: longhorn.bstein.dev path: / backend: @@ -1572,13 +1600,13 @@ http_endpoints: namespace: comms service: matrix-authentication-service port: 8080 - workloads: &id003 + workloads: &id002 - kind: Deployment name: matrix-authentication-service via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /.well-known/matrix/client backend: @@ -1589,7 +1617,7 @@ http_endpoints: via: kind: Ingress name: matrix-wellknown-matrix-live - source: communication + source: comms - host: matrix.live.bstein.dev path: /.well-known/matrix/server backend: @@ -1600,86 +1628,86 @@ http_endpoints: via: kind: Ingress name: matrix-wellknown-matrix-live - source: communication + source: comms - host: matrix.live.bstein.dev path: /_matrix backend: namespace: comms service: othrys-synapse-matrix-synapse port: 8008 - workloads: *id002 + workloads: [] via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /_matrix/client/r0/register backend: namespace: comms service: matrix-guest-register port: 8080 - workloads: &id004 + workloads: &id003 - kind: Deployment name: matrix-guest-register via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /_matrix/client/v3/login backend: namespace: comms service: matrix-authentication-service port: 8080 - workloads: *id003 + workloads: *id002 via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /_matrix/client/v3/logout backend: namespace: comms service: matrix-authentication-service port: 8080 - workloads: *id003 + workloads: *id002 via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /_matrix/client/v3/refresh backend: namespace: comms service: matrix-authentication-service port: 8080 - workloads: *id003 + workloads: *id002 via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /_matrix/client/v3/register backend: namespace: comms service: matrix-guest-register port: 8080 - workloads: *id004 + workloads: *id003 via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /_synapse backend: namespace: comms service: othrys-synapse-matrix-synapse port: 8008 - workloads: *id002 + workloads: [] via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: monero.bstein.dev path: / backend: @@ -1693,6 +1721,19 @@ http_endpoints: kind: Ingress name: monerod source: monerod +- host: notes.bstein.dev + path: / + backend: + namespace: outline + service: outline + port: 80 + workloads: + - kind: Deployment + name: outline + via: + kind: Ingress + name: outline + source: outline - host: office.bstein.dev path: / backend: @@ -1771,6 +1812,19 @@ http_endpoints: kind: Ingress name: jellyfin source: jellyfin +- host: tasks.bstein.dev + path: / + backend: + namespace: planka + service: planka + port: 80 + workloads: + - kind: Deployment + name: planka + via: + kind: Ingress + name: planka + source: planka - host: vault.bstein.dev path: / backend: @@ -1785,15 +1839,29 @@ http_endpoints: name: vaultwarden-ingress source: vaultwarden helmrelease_host_hints: + comms:comms/othrys-element: + - call.live.bstein.dev + - live.bstein.dev + - matrix.live.bstein.dev + comms:comms/othrys-synapse: + - bstein.dev + - kit.live.bstein.dev + - live.bstein.dev + - matrix.live.bstein.dev + - registry.bstein.dev + - sso.bstein.dev gitops-ui:flux-system/weave-gitops: - cd.bstein.dev harbor:harbor/harbor: - registry.bstein.dev + logging:logging/data-prepper: + - registry.bstein.dev mailu:mailu-mailserver/mailu: - bstein.dev - mail.bstein.dev monitoring:monitoring/alertmanager: - alerts.bstein.dev monitoring:monitoring/grafana: + - bstein.dev - metrics.bstein.dev - sso.bstein.dev diff --git a/services/comms/knowledge/diagrams/atlas-http.mmd b/services/comms/knowledge/diagrams/atlas-http.mmd index ddd33d8..ab7c362 100644 --- a/services/comms/knowledge/diagrams/atlas-http.mmd +++ b/services/comms/knowledge/diagrams/atlas-http.mmd @@ -47,15 +47,14 @@ flowchart LR wl_comms_livekit["comms/livekit (Deployment)"] svc_comms_livekit --> wl_comms_livekit host_live_bstein_dev["live.bstein.dev"] - svc_comms_othrys_element_element_web["comms/othrys-element-element-web (Service)"] - host_live_bstein_dev --> svc_comms_othrys_element_element_web - wl_comms_othrys_element_element_web["comms/othrys-element-element-web (Deployment)"] - svc_comms_othrys_element_element_web --> wl_comms_othrys_element_element_web host_live_bstein_dev --> svc_comms_matrix_wellknown svc_comms_othrys_synapse_matrix_synapse["comms/othrys-synapse-matrix-synapse (Service)"] host_live_bstein_dev --> svc_comms_othrys_synapse_matrix_synapse - wl_comms_othrys_synapse_matrix_synapse["comms/othrys-synapse-matrix-synapse (Deployment)"] - svc_comms_othrys_synapse_matrix_synapse --> wl_comms_othrys_synapse_matrix_synapse + host_logs_bstein_dev["logs.bstein.dev"] + svc_logging_oauth2_proxy_logs["logging/oauth2-proxy-logs (Service)"] + host_logs_bstein_dev --> svc_logging_oauth2_proxy_logs + wl_logging_oauth2_proxy_logs["logging/oauth2-proxy-logs (Deployment)"] + svc_logging_oauth2_proxy_logs --> wl_logging_oauth2_proxy_logs host_longhorn_bstein_dev["longhorn.bstein.dev"] svc_longhorn_system_oauth2_proxy_longhorn["longhorn-system/oauth2-proxy-longhorn (Service)"] host_longhorn_bstein_dev --> svc_longhorn_system_oauth2_proxy_longhorn @@ -80,6 +79,11 @@ flowchart LR host_monero_bstein_dev --> svc_crypto_monerod wl_crypto_monerod["crypto/monerod (Deployment)"] svc_crypto_monerod --> wl_crypto_monerod + host_notes_bstein_dev["notes.bstein.dev"] + svc_outline_outline["outline/outline (Service)"] + host_notes_bstein_dev --> svc_outline_outline + wl_outline_outline["outline/outline (Deployment)"] + svc_outline_outline --> wl_outline_outline host_office_bstein_dev["office.bstein.dev"] svc_nextcloud_collabora["nextcloud/collabora (Service)"] host_office_bstein_dev --> svc_nextcloud_collabora @@ -110,6 +114,11 @@ flowchart LR host_stream_bstein_dev --> svc_jellyfin_jellyfin wl_jellyfin_jellyfin["jellyfin/jellyfin (Deployment)"] svc_jellyfin_jellyfin --> wl_jellyfin_jellyfin + host_tasks_bstein_dev["tasks.bstein.dev"] + svc_planka_planka["planka/planka (Service)"] + host_tasks_bstein_dev --> svc_planka_planka + wl_planka_planka["planka/planka (Deployment)"] + svc_planka_planka --> wl_planka_planka host_vault_bstein_dev["vault.bstein.dev"] svc_vaultwarden_vaultwarden_service["vaultwarden/vaultwarden-service (Service)"] host_vault_bstein_dev --> svc_vaultwarden_vaultwarden_service @@ -133,10 +142,7 @@ flowchart LR wl_comms_livekit_token_service svc_comms_livekit wl_comms_livekit - svc_comms_othrys_element_element_web - wl_comms_othrys_element_element_web svc_comms_othrys_synapse_matrix_synapse - wl_comms_othrys_synapse_matrix_synapse svc_comms_matrix_authentication_service wl_comms_matrix_authentication_service svc_comms_matrix_guest_register @@ -160,6 +166,10 @@ flowchart LR svc_jenkins_jenkins wl_jenkins_jenkins end + subgraph logging[logging] + svc_logging_oauth2_proxy_logs + wl_logging_oauth2_proxy_logs + end subgraph longhorn_system[longhorn-system] svc_longhorn_system_oauth2_proxy_longhorn wl_longhorn_system_oauth2_proxy_longhorn @@ -173,6 +183,14 @@ flowchart LR svc_nextcloud_collabora wl_nextcloud_collabora end + subgraph outline[outline] + svc_outline_outline + wl_outline_outline + end + subgraph planka[planka] + svc_planka_planka + wl_planka_planka + end subgraph sso[sso] svc_sso_oauth2_proxy wl_sso_oauth2_proxy diff --git a/services/comms/kustomization.yaml b/services/comms/kustomization.yaml index 2008843..6490b67 100644 --- a/services/comms/kustomization.yaml +++ b/services/comms/kustomization.yaml @@ -5,7 +5,7 @@ namespace: comms resources: - namespace.yaml - mas-configmap.yaml - - element-rendered.yaml + - helmrelease.yaml - livekit-config.yaml - element-call-config.yaml - element-call-deployment.yaml @@ -24,7 +24,6 @@ resources: - synapse-seeder-admin-ensure-job.yaml - synapse-user-seed-job.yaml - mas-local-users-ensure-job.yaml - - synapse-rendered.yaml - mas-deployment.yaml - livekit-token-deployment.yaml - livekit.yaml @@ -39,9 +38,6 @@ resources: - livekit-middlewares.yaml - matrix-ingress.yaml -patches: - - path: synapse-deployment-strategy-patch.yaml - configMapGenerator: - name: matrix-guest-register files: diff --git a/services/comms/mas-admin-client-secret-ensure-job.yaml b/services/comms/mas-admin-client-secret-ensure-job.yaml index 3843877..3d65b43 100644 --- a/services/comms/mas-admin-client-secret-ensure-job.yaml +++ b/services/comms/mas-admin-client-secret-ensure-job.yaml @@ -62,7 +62,7 @@ spec: mountPath: /work containers: - name: patch - image: bitnami/kubectl:latest + image: registry.bstein.dev/bstein/kubectl:1.35.0 command: ["/bin/sh", "-c"] args: - | diff --git a/services/comms/mas-db-ensure-job.yaml b/services/comms/mas-db-ensure-job.yaml index 1c8b5c4..50603d5 100644 --- a/services/comms/mas-db-ensure-job.yaml +++ b/services/comms/mas-db-ensure-job.yaml @@ -13,7 +13,7 @@ spec: restartPolicy: Never containers: - name: ensure - image: bitnami/kubectl:latest + image: registry.bstein.dev/bstein/kubectl:1.35.0 command: ["/bin/sh", "-c"] args: - | diff --git a/services/comms/synapse-deployment-strategy-patch.yaml b/services/comms/synapse-deployment-strategy-patch.yaml deleted file mode 100644 index 59b8e32..0000000 --- a/services/comms/synapse-deployment-strategy-patch.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# services/comms/synapse-deployment-strategy-patch.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: othrys-synapse-matrix-synapse -spec: - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 diff --git a/services/comms/synapse-rendered.yaml b/services/comms/synapse-rendered.yaml deleted file mode 100644 index 83fce79..0000000 --- a/services/comms/synapse-rendered.yaml +++ /dev/null @@ -1,895 +0,0 @@ ---- -# Source: matrix-synapse/charts/redis/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -automountServiceAccountToken: true -metadata: - name: othrys-synapse-redis - labels: - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: redis - helm.sh/chart: redis-17.17.1 ---- -# Source: matrix-synapse/templates/secrets.yaml -apiVersion: v1 -kind: Secret -metadata: - name: othrys-synapse-matrix-synapse - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm -stringData: - config.yaml: | - ## Registration ## - - ## API Configuration ## - - ## Database configuration ## - - database: - name: "psycopg2" - args: - user: "synapse" - password: "@@POSTGRES_PASSWORD@@" - database: "synapse" - host: "postgres-service.postgres.svc.cluster.local" - port: 5432 - sslmode: "prefer" - cp_min: 5 - cp_max: 10 - - - ## Redis configuration ## - - redis: - enabled: true - host: "othrys-synapse-redis-master" - port: 6379 - password: "@@REDIS_PASSWORD@@" ---- -# Source: matrix-synapse/charts/redis/templates/configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: othrys-synapse-redis-configuration - labels: - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: redis - helm.sh/chart: redis-17.17.1 -data: - redis.conf: |- - # User-supplied common configuration: - # Enable AOF https://redis.io/topics/persistence#append-only-file - appendonly yes - # Disable RDB persistence, AOF persistence already enabled. - save "" - # End of common configuration - master.conf: |- - dir /data - # User-supplied master configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of master configuration - replica.conf: |- - dir /data - # User-supplied replica configuration: - rename-command FLUSHDB "" - rename-command FLUSHALL "" - # End of replica configuration ---- -# Source: matrix-synapse/templates/configuration.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: othrys-synapse-matrix-synapse - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm -data: - log.yaml: | - version: 1 - formatters: - precise: - format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s' - filters: - context: - (): synapse.util.logcontext.LoggingContextFilter - request: "" - handlers: - console: - class: logging.StreamHandler - formatter: precise - filters: [context] - level: INFO - loggers: - synapse: - level: INFO - root: - level: INFO - handlers: [console] - homeserver.yaml: | - # NOTE: - # Secrets are stored in separate configs to better fit K8s concepts - - ## Server ## - - server_name: "live.bstein.dev" - public_baseurl: "https://matrix.live.bstein.dev" - pid_file: /homeserver.pid - web_client: False - soft_file_limit: 0 - log_config: "/synapse/config/log.yaml" - report_stats: false - - instance_map: - main: - host: othrys-synapse-replication - port: 9093 - - ## Ports ## - - listeners: - - port: 8008 - tls: false - bind_addresses: ["0.0.0.0"] - type: http - x_forwarded: true - - resources: - - names: - - client - - federation - compress: false - - - port: 9090 - tls: false - bind_addresses: ["::"] - type: http - - resources: - - names: [metrics] - compress: false - - - port: 9093 - tls: false - bind_addresses: ["::"] - type: http - - resources: - - names: [replication] - compress: false - - ## Files ## - - media_store_path: "/synapse/data/media" - uploads_path: "/synapse/data/uploads" - - ## Registration ## - - enable_registration: false - - ## Metrics ### - - enable_metrics: true - - ## Signing Keys ## - - signing_key_path: "/synapse/keys/signing.key" - macaroon_secret_key: "@@MACAROON_SECRET_KEY@@" - - # The trusted servers to download signing keys from. - trusted_key_servers: - - server_name: matrix.org - - ## Workers ## - - ## Extra config ## - - allow_guest_access: true - allow_public_rooms_without_auth: true - auto_join_rooms: - - "#othrys:live.bstein.dev" - autocreate_auto_join_rooms: true - default_room_version: "11" - experimental_features: - msc3266_enabled: true - msc4108_enabled: true - msc4143_enabled: true - msc4222_enabled: true - max_event_delay_duration: 24h - password_config: - enabled: false - turn_uris: - - "turn:turn.live.bstein.dev:3478?transport=udp" - - "turn:turn.live.bstein.dev:3478?transport=tcp" - - "turns:turn.live.bstein.dev:5349?transport=tcp" - turn_shared_secret: "@@TURN_SECRET@@" - turn_allow_guests: true - turn_user_lifetime: 86400000 - rc_login: - address: - burst_count: 20 - per_second: 5 - account: - burst_count: 20 - per_second: 5 - failed_attempts: - burst_count: 20 - per_second: 5 - rc_message: - per_second: 0.5 - burst_count: 30 - rc_delayed_event_mgmt: - per_second: 1 - burst_count: 20 - room_list_publication_rules: - - action: allow - well_known_client: - "m.homeserver": - "base_url": "https://matrix.live.bstein.dev" - "org.matrix.msc2965.authentication": - "issuer": "https://matrix.live.bstein.dev/" - "account": "https://matrix.live.bstein.dev/account/" - "org.matrix.msc4143.rtc_foci": - - type: "livekit" - livekit_service_url: "https://kit.live.bstein.dev/livekit/jwt" - - matrix_authentication_service: - enabled: true - endpoint: http://matrix-authentication-service:8080/ - secret: "@@MAS_SHARED_SECRET@@" ---- -# Source: matrix-synapse/templates/pvc.yaml -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: othrys-synapse-matrix-synapse - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm -spec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: "50Gi" - storageClassName: "asteria" ---- -# Source: matrix-synapse/charts/redis/templates/headless-svc.yaml -apiVersion: v1 -kind: Service -metadata: - name: othrys-synapse-redis-headless - labels: - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: redis - helm.sh/chart: redis-17.17.1 - annotations: - -spec: - type: ClusterIP - clusterIP: None - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - selector: - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: redis ---- -# Source: matrix-synapse/charts/redis/templates/master/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: othrys-synapse-redis-master - labels: - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: redis - helm.sh/chart: redis-17.17.1 - app.kubernetes.io/component: master -spec: - type: ClusterIP - internalTrafficPolicy: Cluster - sessionAffinity: None - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - nodePort: null - selector: - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: redis - app.kubernetes.io/component: master ---- -# Source: matrix-synapse/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: othrys-synapse-matrix-synapse - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 8008 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/component: synapse - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse ---- -# Source: matrix-synapse/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: othrys-synapse-replication - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm -spec: - type: ClusterIP - ports: - - port: 9093 - targetPort: replication - protocol: TCP - name: replication - selector: - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/component: synapse ---- -# Source: matrix-synapse/charts/redis/templates/master/application.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: othrys-synapse-redis-master - labels: - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: redis - helm.sh/chart: redis-17.17.1 - app.kubernetes.io/component: master -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: redis - app.kubernetes.io/component: master - strategy: - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: redis - helm.sh/chart: redis-17.17.1 - app.kubernetes.io/component: master - annotations: - checksum/configmap: 86bcc953bb473748a3d3dc60b7c11f34e60c93519234d4c37f42e22ada559d47 - checksum/health: aff24913d801436ea469d8d374b2ddb3ec4c43ee7ab24663d5f8ff1a1b6991a9 - checksum/scripts: 560c33ff34d845009b51830c332aa05fa211444d1877d3526d3599be7543aaa5 - checksum/secret: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a - spec: - - securityContext: - fsGroup: 1001 - serviceAccountName: othrys-synapse-redis - automountServiceAccountToken: true - affinity: - podAffinity: - - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: redis - app.kubernetes.io/component: master - topologyKey: kubernetes.io/hostname - weight: 1 - nodeAffinity: - - enableServiceLinks: true - terminationGracePeriodSeconds: 30 - containers: - - name: redis - image: docker.io/bitnamilegacy/redis:7.0.12-debian-11-r34 - imagePullPolicy: "IfNotPresent" - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsGroup: 0 - runAsNonRoot: true - runAsUser: 1001 - seccompProfile: - type: RuntimeDefault - command: - - /bin/bash - args: - - -c - - /opt/bitnami/scripts/start-scripts/start-master.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: REDIS_REPLICATION_MODE - value: master - - name: ALLOW_EMPTY_PASSWORD - value: "no" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: synapse-redis - key: redis-password - - name: REDIS_TLS_ENABLED - value: "no" - - name: REDIS_PORT - value: "6379" - ports: - - name: redis - containerPort: 6379 - livenessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - # One second longer than command timeout should prevent generation of zombie processes. - timeoutSeconds: 6 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_liveness_local.sh 5 - readinessProbe: - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - sh - - -c - - /health/ping_readiness_local.sh 1 - resources: - limits: {} - requests: {} - volumeMounts: - - name: start-scripts - mountPath: /opt/bitnami/scripts/start-scripts - - name: health - mountPath: /health - - name: redis-data - mountPath: /data - - name: config - mountPath: /opt/bitnami/redis/mounted-etc - - name: redis-tmp-conf - mountPath: /opt/bitnami/redis/etc/ - - name: tmp - mountPath: /tmp - volumes: - - name: start-scripts - configMap: - name: othrys-synapse-redis-scripts - defaultMode: 0755 - - name: health - configMap: - name: othrys-synapse-redis-health - defaultMode: 0755 - - name: config - configMap: - name: othrys-synapse-redis-configuration - - name: redis-tmp-conf - emptyDir: {} - - name: tmp - emptyDir: {} - - name: redis-data - emptyDir: {} ---- -# Source: matrix-synapse/templates/deployment.yaml -# Server: live.bstein.dev -apiVersion: apps/v1 -kind: Deployment -metadata: - name: othrys-synapse-matrix-synapse - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: synapse -spec: - replicas: 1 - strategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/component: synapse - template: - metadata: - annotations: - checksum/config: manual-rtc-enable-11 - checksum/secrets: ec9f3b254a562a0f0709461eb74a8cc91b8c1a2fb06be2594a131776c2541773 - labels: - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/component: synapse - spec: - serviceAccountName: default - - securityContext: - fsGroup: 666 - runAsGroup: 666 - runAsUser: 666 - containers: - - name: synapse - command: - - sh - - -c - - | - export POSTGRES_PASSWORD=$(echo "${POSTGRES_PASSWORD:-}" | sed 's/\//\\\//g' | sed 's/\&/\\\&/g') && \ - export REDIS_PASSWORD=$(echo "${REDIS_PASSWORD:-}" | sed 's/\//\\\//g' | sed 's/\&/\\\&/g') && \ - export OIDC_CLIENT_SECRET_ESCAPED=$(echo "${OIDC_CLIENT_SECRET:-}" | sed 's/[\\/&]/\\&/g') && \ - export TURN_SECRET_ESCAPED=$(echo "${TURN_SECRET:-}" | sed 's/[\\/&]/\\&/g') && \ - export MAS_SHARED_SECRET_ESCAPED=$(echo "${MAS_SHARED_SECRET:-}" | sed 's/[\\/&]/\\&/g') && \ - export MACAROON_SECRET_KEY_ESCAPED=$(echo "${MACAROON_SECRET_KEY:-}" | sed 's/[\\/&]/\\&/g') && \ - cat /synapse/secrets/*.yaml | \ - sed -e "s/@@POSTGRES_PASSWORD@@/${POSTGRES_PASSWORD:-}/" \ - -e "s/@@REDIS_PASSWORD@@/${REDIS_PASSWORD:-}/" \ - > /synapse/config/conf.d/secrets.yaml - - cp /synapse/config/homeserver.yaml /synapse/runtime-config/homeserver.yaml && \ - if [ -n "${OIDC_CLIENT_SECRET_ESCAPED}" ]; then \ - sed -i "s/@@OIDC_CLIENT_SECRET@@/${OIDC_CLIENT_SECRET_ESCAPED}/g" /synapse/runtime-config/homeserver.yaml; \ - fi; \ - if [ -n "${TURN_SECRET_ESCAPED}" ]; then \ - sed -i "s/@@TURN_SECRET@@/${TURN_SECRET_ESCAPED}/g" /synapse/runtime-config/homeserver.yaml; \ - fi; \ - if [ -n "${MAS_SHARED_SECRET_ESCAPED}" ]; then \ - sed -i "s/@@MAS_SHARED_SECRET@@/${MAS_SHARED_SECRET_ESCAPED}/g" /synapse/runtime-config/homeserver.yaml; \ - fi; \ - if [ -n "${MACAROON_SECRET_KEY_ESCAPED}" ]; then \ - sed -i "s/@@MACAROON_SECRET_KEY@@/${MACAROON_SECRET_KEY_ESCAPED}/g" /synapse/runtime-config/homeserver.yaml; \ - fi - exec python -B -m synapse.app.homeserver \ - -c /synapse/runtime-config/homeserver.yaml \ - -c /synapse/config/conf.d/ - env: - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: synapse-db - key: POSTGRES_PASSWORD - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: synapse-redis - key: redis-password - - name: OIDC_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: synapse-oidc - key: client-secret - - name: TURN_SECRET - valueFrom: - secretKeyRef: - name: turn-shared-secret - key: TURN_STATIC_AUTH_SECRET - - name: MAS_SHARED_SECRET - valueFrom: - secretKeyRef: - name: mas-secrets-runtime - key: matrix_shared_secret - - name: MACAROON_SECRET_KEY - valueFrom: - secretKeyRef: - name: synapse-macaroon - key: macaroon_secret_key - image: "ghcr.io/element-hq/synapse:v1.144.0" - imagePullPolicy: IfNotPresent - securityContext: - {} - ports: - - name: http - containerPort: 8008 - protocol: TCP - - name: replication - containerPort: 9093 - protocol: TCP - - name: metrics - containerPort: 9090 - protocol: TCP - livenessProbe: - httpGet: - path: /health - port: http - readinessProbe: - httpGet: - path: /health - port: http - startupProbe: - failureThreshold: 12 - httpGet: - path: /health - port: http - volumeMounts: - - name: config - mountPath: /synapse/config - - name: runtime-config - mountPath: /synapse/runtime-config - - name: tmpconf - mountPath: /synapse/config/conf.d - - name: secrets - mountPath: /synapse/secrets - - name: signingkey - mountPath: /synapse/keys - - name: media - mountPath: /synapse/data - - name: tmpdir - mountPath: /tmp - resources: - limits: - cpu: "2" - memory: 3Gi - requests: - cpu: 500m - memory: 1Gi - volumes: - - name: config - configMap: - name: othrys-synapse-matrix-synapse - - name: secrets - secret: - secretName: othrys-synapse-matrix-synapse - - name: signingkey - secret: - secretName: "othrys-synapse-signingkey" - items: - - key: "signing.key" - path: signing.key - - name: tmpconf - emptyDir: {} - - name: tmpdir - emptyDir: {} - - name: runtime-config - emptyDir: {} - - name: media - persistentVolumeClaim: - claimName: othrys-synapse-matrix-synapse - nodeSelector: - hardware: rpi5 - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - preference: - matchExpressions: - - key: hardware - operator: In - values: - - rpi5 - - rpi4 - weight: 50 ---- -# Source: matrix-synapse/templates/signing-key-job.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: othrys-synapse-signingkey-job - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: signingkey-job - annotations: - helm.sh/hook: pre-install - helm.sh/hook-delete-policy: hook-succeeded ---- -# Source: matrix-synapse/templates/signing-key-job.yaml -# Create secret if signing key job is enabled, or if we're running in ArgoCD and we don't have an existing secret -apiVersion: v1 -kind: Secret -metadata: - annotations: - helm.sh/hook: pre-install - helm.sh/hook-delete-policy: never - helm.sh/resource-policy: keep - # If for some reason we didn't detect ArgoCD, but are running in it, we want to make sure we don't delete the secret - argocd.argoproj.io/hook: Skip - name: othrys-synapse-signingkey - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: signingkey-job ---- -# Source: matrix-synapse/templates/signing-key-job.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: othrys-synapse-signingkey-job - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: signingkey-job - annotations: - helm.sh/hook: pre-install - helm.sh/hook-delete-policy: hook-succeeded -rules: - - apiGroups: - - "" - resources: - - secrets - resourceNames: - - othrys-synapse-signingkey - verbs: - - get - - update - - patch ---- -# Source: matrix-synapse/templates/signing-key-job.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: othrys-synapse-signingkey-job - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: signingkey-job - annotations: - helm.sh/hook: pre-install - helm.sh/hook-delete-policy: hook-succeeded -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: othrys-synapse-signingkey-job -subjects: - - kind: ServiceAccount - name: othrys-synapse-signingkey-job - namespace: comms ---- -# Source: matrix-synapse/templates/tests/test-connection.yaml -apiVersion: v1 -kind: Pod -metadata: - name: "othrys-synapse-matrix-synapse-test-connection" - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm - annotations: - "helm.sh/hook": test-success -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['othrys-synapse-matrix-synapse:8008/_matrix/client/versions'] - restartPolicy: Never ---- -# Source: matrix-synapse/templates/signing-key-job.yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: othrys-synapse-signingkey-job - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: signingkey-job - annotations: - helm.sh/hook: pre-install - helm.sh/hook-delete-policy: hook-succeeded -spec: - ttlSecondsAfterFinished: 0 - template: - metadata: - labels: - helm.sh/chart: matrix-synapse-3.12.17 - app.kubernetes.io/name: matrix-synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/version: "1.144.0" - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: signingkey-job - spec: - containers: - - command: - - sh - - -c - - | - echo "Generating signing key..." - if which generate_signing_key.py >/dev/null; then - generate_signing_key.py -o /synapse/keys/signing.key - else - generate_signing_key -o /synapse/keys/signing.key - fi - image: "matrixdotorg/synapse:latest" - imagePullPolicy: IfNotPresent - name: signing-key-generate - resources: - {} - securityContext: - {} - volumeMounts: - - mountPath: /synapse/keys - name: matrix-synapse-keys - - command: - - sh - - -c - - | - printf "Checking rights to update secret... " - kubectl auth can-i update secret/${SECRET_NAME} - /scripts/signing-key.sh - env: - - name: SECRET_NAME - value: othrys-synapse-signingkey - image: "bitnami/kubectl:latest" - imagePullPolicy: IfNotPresent - name: signing-key-upload - resources: - {} - securityContext: - {} - volumeMounts: - - mountPath: /scripts - name: scripts - readOnly: true - - mountPath: /synapse/keys - name: matrix-synapse-keys - readOnly: true - securityContext: - {} - restartPolicy: Never - serviceAccount: othrys-synapse-signingkey-job - volumes: - - name: scripts - configMap: - name: othrys-synapse-matrix-synapse-scripts - defaultMode: 0755 - - name: matrix-synapse-keys - emptyDir: {} - parallelism: 1 - completions: 1 - backoffLimit: 1 diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml index 5ebaeda..8846d86 100644 --- a/services/comms/synapse-signingkey-ensure-job.yaml +++ b/services/comms/synapse-signingkey-ensure-job.yaml @@ -26,7 +26,7 @@ spec: mountPath: /work containers: - name: patch - image: bitnami/kubectl:latest + image: registry.bstein.dev/bstein/kubectl:1.35.0 command: ["/bin/sh", "-c"] args: - | diff --git a/services/comms/values-element.yaml b/services/comms/values-element.yaml deleted file mode 100644 index b8c7d87..0000000 --- a/services/comms/values-element.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# services/comms/values-element.yaml -replicaCount: 1 - -defaultServer: - url: https://matrix.live.bstein.dev - name: live.bstein.dev - -config: - default_theme: dark - brand: Othrys - disable_custom_urls: true - disable_login_language_selector: true - disable_guests: false - show_labs_settings: true - features: - feature_group_calls: true - feature_video_rooms: true - feature_element_call_video_rooms: true - room_directory: - servers: - - live.bstein.dev - jitsi: {} - element_call: - url: https://call.live.bstein.dev - participant_limit: 16 - brand: Othrys Call - -ingress: - enabled: true - className: traefik - annotations: - cert-manager.io/cluster-issuer: letsencrypt - traefik.ingress.kubernetes.io/router.entrypoints: websecure - hosts: - - live.bstein.dev - tls: - - secretName: live-othrys-tls - hosts: [live.bstein.dev] - -resources: - requests: - cpu: 100m - memory: 256Mi - limits: - cpu: 500m - memory: 512Mi - -nodeSelector: - hardware: rpi5 - -affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 50 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi5","rpi4"] diff --git a/services/comms/values-synapse.yaml b/services/comms/values-synapse.yaml deleted file mode 100644 index 650d0e8..0000000 --- a/services/comms/values-synapse.yaml +++ /dev/null @@ -1,132 +0,0 @@ -# services/comms/values-synapse.yaml -serverName: live.bstein.dev -publicServerName: matrix.live.bstein.dev - -config: - publicBaseurl: https://matrix.live.bstein.dev - -externalPostgresql: - host: postgres-service.postgres.svc.cluster.local - port: 5432 - username: synapse - existingSecret: synapse-db - existingSecretPasswordKey: POSTGRES_PASSWORD - database: synapse - -redis: - enabled: true - auth: - enabled: true - existingSecret: synapse-redis - existingSecretPasswordKey: redis-password - -postgresql: - enabled: false - -persistence: - enabled: true - storageClass: asteria - accessMode: ReadWriteOnce - size: 50Gi - -synapse: - podSecurityContext: - fsGroup: 666 - runAsUser: 666 - runAsGroup: 666 - resources: - requests: - cpu: 500m - memory: 1Gi - limits: - cpu: "2" - memory: 3Gi - nodeSelector: - hardware: rpi5 - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 50 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi5","rpi4"] - -ingress: - enabled: true - className: traefik - annotations: - cert-manager.io/cluster-issuer: letsencrypt - traefik.ingress.kubernetes.io/router.entrypoints: websecure - csHosts: - - matrix.live.bstein.dev - hosts: - - matrix.live.bstein.dev - wkHosts: - - live.bstein.dev - - bstein.dev - tls: - - secretName: matrix-live-tls - hosts: - - matrix.live.bstein.dev - - live.bstein.dev - -extraConfig: - allow_guest_access: true - allow_public_rooms_without_auth: true - auto_join_rooms: - - "#othrys:live.bstein.dev" - autocreate_auto_join_rooms: true - default_room_version: "11" - experimental_features: - msc3266_enabled: true - msc4143_enabled: true - msc4222_enabled: true - max_event_delay_duration: 24h - password_config: - enabled: true - oidc_enabled: true - oidc_providers: - - idp_id: keycloak - idp_name: Keycloak - issuer: https://sso.bstein.dev/realms/atlas - client_id: synapse - client_secret: "@@OIDC_CLIENT_SECRET@@" - client_auth_method: client_secret_post - scopes: ["openid", "profile", "email"] - authorization_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/auth - token_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/token - userinfo_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/userinfo - user_mapping_provider: - config: - localpart_template: "{{ user.preferred_username }}" - display_name_template: "{{ user.name }}" - allow_existing_users: true - rc_message: - per_second: 0.5 - burst_count: 30 - rc_delayed_event_mgmt: - per_second: 1 - burst_count: 20 - rc_login: - address: - burst_count: 20 - per_second: 5 - account: - burst_count: 20 - per_second: 5 - failed_attempts: - burst_count: 20 - per_second: 5 - room_list_publication_rules: - - action: allow - well_known_client: - "m.homeserver": - "base_url": "https://matrix.live.bstein.dev" - "org.matrix.msc4143.rtc_foci": - - type: "livekit" - livekit_service_url: "https://kit.live.bstein.dev/livekit/jwt" - -worker: - enabled: false diff --git a/services/crypto/xmr-miner/xmrig-daemonset.yaml b/services/crypto/xmr-miner/xmrig-daemonset.yaml index 74836d3..089dcc4 100644 --- a/services/crypto/xmr-miner/xmrig-daemonset.yaml +++ b/services/crypto/xmr-miner/xmrig-daemonset.yaml @@ -1,3 +1,4 @@ +# services/crypto/xmr-miner/xmrig-daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: @@ -29,7 +30,7 @@ spec: secretName: monero-payout containers: - name: xmrig - image: ghcr.io/tari-project/xmrig:latest + image: ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9 imagePullPolicy: IfNotPresent env: - name: XMRIG_THREADS diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index b0951cf..4d10aae 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -84,7 +84,7 @@ spec: mountPath: /work containers: - name: apply - image: bitnami/kubectl:latest + image: registry.bstein.dev/bstein/kubectl:1.35.0 command: ["/bin/sh", "-c"] args: - | diff --git a/services/mailu/vip-controller.yaml b/services/mailu/vip-controller.yaml index a6d8c1f..81cc96e 100644 --- a/services/mailu/vip-controller.yaml +++ b/services/mailu/vip-controller.yaml @@ -50,7 +50,7 @@ spec: mailu.bstein.dev/vip: "true" containers: - name: vip-controller - image: lachlanevenson/k8s-kubectl:latest + image: registry.bstein.dev/bstein/kubectl:1.35.0 imagePullPolicy: IfNotPresent command: - /bin/sh diff --git a/services/monitoring/namespace.yaml b/services/monitoring/namespace.yaml index 3335b6a..37732a0 100644 --- a/services/monitoring/namespace.yaml +++ b/services/monitoring/namespace.yaml @@ -1,4 +1,5 @@ +# services/monitoring/namespace.yaml apiVersion: v1 kind: Namespace metadata: - name: monitoring \ No newline at end of file + name: monitoring diff --git a/services/nextcloud/collabora.yaml b/services/nextcloud/collabora.yaml index 1cda2ea..0f09c79 100644 --- a/services/nextcloud/collabora.yaml +++ b/services/nextcloud/collabora.yaml @@ -20,7 +20,7 @@ spec: hardware: rpi5 containers: - name: collabora - image: collabora/code:latest + image: collabora/code@sha256:3c58d0e9bae75e4647467d0c7d91cb66f261d3e814709aed590b5c334a04db26 imagePullPolicy: IfNotPresent env: - name: domain diff --git a/services/pegasus/deployment.yaml b/services/pegasus/deployment.yaml index 34270b0..7f8547f 100644 --- a/services/pegasus/deployment.yaml +++ b/services/pegasus/deployment.yaml @@ -1,3 +1,4 @@ +# services/pegasus/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: From bbe5ded0a64759de3c901cb1e1c97b76ba71e4a2 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 20:03:11 -0300 Subject: [PATCH 003/270] comms: bump ensure job names for new images --- services/comms/comms-secrets-ensure-job.yaml | 2 +- services/comms/mas-admin-client-secret-ensure-job.yaml | 2 +- services/comms/mas-db-ensure-job.yaml | 2 +- services/comms/synapse-signingkey-ensure-job.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/services/comms/comms-secrets-ensure-job.yaml b/services/comms/comms-secrets-ensure-job.yaml index e7c8c43..dffb222 100644 --- a/services/comms/comms-secrets-ensure-job.yaml +++ b/services/comms/comms-secrets-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: comms-secrets-ensure-1 + name: comms-secrets-ensure-2 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/mas-admin-client-secret-ensure-job.yaml b/services/comms/mas-admin-client-secret-ensure-job.yaml index 3d65b43..9b76290 100644 --- a/services/comms/mas-admin-client-secret-ensure-job.yaml +++ b/services/comms/mas-admin-client-secret-ensure-job.yaml @@ -36,7 +36,7 @@ subjects: apiVersion: batch/v1 kind: Job metadata: - name: mas-admin-client-secret-ensure-7 + name: mas-admin-client-secret-ensure-8 namespace: comms spec: backoffLimit: 2 diff --git a/services/comms/mas-db-ensure-job.yaml b/services/comms/mas-db-ensure-job.yaml index 50603d5..1d1492e 100644 --- a/services/comms/mas-db-ensure-job.yaml +++ b/services/comms/mas-db-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-db-ensure-16 + name: mas-db-ensure-17 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml index 8846d86..1d36f96 100644 --- a/services/comms/synapse-signingkey-ensure-job.yaml +++ b/services/comms/synapse-signingkey-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-synapse-signingkey-ensure-5 + name: othrys-synapse-signingkey-ensure-6 namespace: comms spec: backoffLimit: 2 From 4332ded0c358bf86060756ea960ccd81d0a363d3 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 20:07:51 -0300 Subject: [PATCH 004/270] comms: drop legacy synapse configmaps --- services/comms/kustomization.yaml | 21 ---------- .../comms/synapse-signingkey-ensure-job.yaml | 42 ------------------- 2 files changed, 63 deletions(-) delete mode 100644 services/comms/synapse-signingkey-ensure-job.yaml diff --git a/services/comms/kustomization.yaml b/services/comms/kustomization.yaml index 6490b67..f8456ea 100644 --- a/services/comms/kustomization.yaml +++ b/services/comms/kustomization.yaml @@ -20,7 +20,6 @@ resources: - mas-admin-client-secret-ensure-job.yaml - mas-db-ensure-job.yaml - comms-secrets-ensure-job.yaml - - synapse-signingkey-ensure-job.yaml - synapse-seeder-admin-ensure-job.yaml - synapse-user-seed-job.yaml - mas-local-users-ensure-job.yaml @@ -49,26 +48,6 @@ configMapGenerator: - bot.py=scripts/atlasbot/bot.py options: disableNameSuffixHash: true - - name: othrys-synapse-redis-health - files: - - ping_readiness_local.sh=scripts/synapse/redis/ping_readiness_local.sh - - ping_liveness_local.sh=scripts/synapse/redis/ping_liveness_local.sh - - ping_readiness_master.sh=scripts/synapse/redis/ping_readiness_master.sh - - ping_liveness_master.sh=scripts/synapse/redis/ping_liveness_master.sh - - ping_readiness_local_and_master.sh=scripts/synapse/redis/ping_readiness_local_and_master.sh - - ping_liveness_local_and_master.sh=scripts/synapse/redis/ping_liveness_local_and_master.sh - options: - disableNameSuffixHash: true - - name: othrys-synapse-redis-scripts - files: - - start-master.sh=scripts/synapse/redis/start-master.sh - options: - disableNameSuffixHash: true - - name: othrys-synapse-matrix-synapse-scripts - files: - - signing-key.sh=scripts/synapse/signing-key.sh - options: - disableNameSuffixHash: true - name: atlas-kb files: - INDEX.md=knowledge/INDEX.md diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml deleted file mode 100644 index 1d36f96..0000000 --- a/services/comms/synapse-signingkey-ensure-job.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# services/comms/synapse-signingkey-ensure-job.yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: othrys-synapse-signingkey-ensure-6 - namespace: comms -spec: - backoffLimit: 2 - template: - spec: - serviceAccountName: othrys-synapse-signingkey-job - restartPolicy: OnFailure - volumes: - - name: work - emptyDir: {} - initContainers: - - name: generate - image: ghcr.io/element-hq/synapse:v1.144.0 - command: ["/bin/sh", "-c"] - args: - - | - set -euo pipefail - generate_signing_key -o /work/signing.key - volumeMounts: - - name: work - mountPath: /work - containers: - - name: patch - image: registry.bstein.dev/bstein/kubectl:1.35.0 - command: ["/bin/sh", "-c"] - args: - - | - set -euo pipefail - if kubectl -n comms get secret othrys-synapse-signingkey -o jsonpath='{.data.signing\.key}' 2>/dev/null | grep -q .; then - exit 0 - fi - kubectl -n comms create secret generic othrys-synapse-signingkey \ - --from-file=signing.key=/work/signing.key \ - --dry-run=client -o yaml | kubectl -n comms apply -f - >/dev/null - volumeMounts: - - name: work - mountPath: /work From fbde129d4c44931a32edeeb513f1f2d8dcd677c9 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 20:27:50 -0300 Subject: [PATCH 005/270] fix(bstein-dev-home): drop invalid image overrides --- services/bstein-dev-home/kustomization.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/services/bstein-dev-home/kustomization.yaml b/services/bstein-dev-home/kustomization.yaml index 81220e8..56d9cfd 100644 --- a/services/bstein-dev-home/kustomization.yaml +++ b/services/bstein-dev-home/kustomization.yaml @@ -16,11 +16,6 @@ resources: - vaultwarden-cred-sync-cronjob.yaml - portal-onboarding-e2e-test-job.yaml - ingress.yaml -images: - - name: registry.bstein.dev/bstein/bstein-dev-home-frontend - newTag: registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} - - name: registry.bstein.dev/bstein/bstein-dev-home-backend - newTag: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} configMapGenerator: - name: chat-ai-gateway namespace: bstein-dev-home From bcef167b50d6c710fe47ddf14bb200634402d6e7 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 20:42:26 -0300 Subject: [PATCH 006/270] harbor: enable keycloak oidc settings --- services/harbor/helmrelease.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/services/harbor/helmrelease.yaml b/services/harbor/helmrelease.yaml index 75f8be3..5b384d7 100644 --- a/services/harbor/helmrelease.yaml +++ b/services/harbor/helmrelease.yaml @@ -117,6 +117,21 @@ spec: existingSecret: harbor-core existingXsrfSecret: harbor-core existingXsrfSecretKey: CSRF_KEY + # OIDC config; client secret is stored out-of-band. + configureUserSettings: | + { + "auth_mode": "oidc_auth", + "oidc_name": "Keycloak", + "oidc_endpoint": "https://sso.bstein.dev/realms/atlas", + "oidc_client_id": "harbor", + "oidc_verify_cert": true, + "oidc_auto_onboard": true, + "oidc_scope": "openid,profile,email,groups", + "oidc_groups_claim": "groups", + "oidc_user_claim": "preferred_username", + "oidc_admin_group": "admin", + "oidc_logout": true + } affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: From 098a06e723321d7bb6eb8b9de1838a0a4cb31675 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 20:42:30 -0300 Subject: [PATCH 007/270] comms: seed synapse signing key for helm --- services/comms/helmrelease.yaml | 9 ++-- services/comms/kustomization.yaml | 2 + .../comms/synapse-signingkey-ensure-job.yaml | 44 +++++++++++++++++++ .../comms/synapse-signingkey-ensure-rbac.yaml | 34 ++++++++++++++ 4 files changed, 83 insertions(+), 6 deletions(-) create mode 100644 services/comms/synapse-signingkey-ensure-job.yaml create mode 100644 services/comms/synapse-signingkey-ensure-rbac.yaml diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 39cd534..d110456 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -163,12 +163,9 @@ spec: signingkey: job: - generateImage: - repository: matrixdotorg/synapse - tag: v1.144.0 - publishImage: - repository: registry.bstein.dev/bstein/kubectl - tag: 1.35.0 + enabled: false + existingSecret: othrys-synapse-signingkey + existingSecretKey: signing.key --- apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease diff --git a/services/comms/kustomization.yaml b/services/comms/kustomization.yaml index f8456ea..5e50d0f 100644 --- a/services/comms/kustomization.yaml +++ b/services/comms/kustomization.yaml @@ -17,9 +17,11 @@ resources: - mas-secrets-ensure-rbac.yaml - comms-secrets-ensure-rbac.yaml - mas-db-ensure-rbac.yaml + - synapse-signingkey-ensure-rbac.yaml - mas-admin-client-secret-ensure-job.yaml - mas-db-ensure-job.yaml - comms-secrets-ensure-job.yaml + - synapse-signingkey-ensure-job.yaml - synapse-seeder-admin-ensure-job.yaml - synapse-user-seed-job.yaml - mas-local-users-ensure-job.yaml diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml new file mode 100644 index 0000000..4a2b89f --- /dev/null +++ b/services/comms/synapse-signingkey-ensure-job.yaml @@ -0,0 +1,44 @@ +# services/comms/synapse-signingkey-ensure-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: othrys-synapse-signingkey-ensure-1 + namespace: comms +spec: + backoffLimit: 2 + template: + spec: + serviceAccountName: othrys-synapse-signingkey-job + restartPolicy: OnFailure + volumes: + - name: work + emptyDir: {} + initContainers: + - name: generate + image: ghcr.io/element-hq/synapse:v1.144.0 + command: ["/bin/sh", "-c"] + args: + - | + set -euo pipefail + umask 077 + generate_signing_key -o /work/signing.key + volumeMounts: + - name: work + mountPath: /work + containers: + - name: store + image: registry.bstein.dev/bstein/kubectl:1.35.0 + command: ["/bin/sh", "-c"] + args: + - | + set -euo pipefail + if kubectl -n comms get secret othrys-synapse-signingkey \ + -o jsonpath='{.data.signing\.key}' 2>/dev/null | grep -q .; then + exit 0 + fi + kubectl -n comms create secret generic othrys-synapse-signingkey \ + --from-file=signing.key=/work/signing.key \ + --dry-run=client -o yaml | kubectl -n comms apply -f - >/dev/null + volumeMounts: + - name: work + mountPath: /work diff --git a/services/comms/synapse-signingkey-ensure-rbac.yaml b/services/comms/synapse-signingkey-ensure-rbac.yaml new file mode 100644 index 0000000..c7f66bc --- /dev/null +++ b/services/comms/synapse-signingkey-ensure-rbac.yaml @@ -0,0 +1,34 @@ +# services/comms/synapse-signingkey-ensure-rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: othrys-synapse-signingkey-job + namespace: comms +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: othrys-synapse-signingkey-job + namespace: comms +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["othrys-synapse-signingkey"] + verbs: ["get", "patch", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: othrys-synapse-signingkey-job + namespace: comms +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: othrys-synapse-signingkey-job +subjects: + - kind: ServiceAccount + name: othrys-synapse-signingkey-job + namespace: comms From 47f0d1736e22559ca02a7b303ecec4face306dc1 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 20:45:14 -0300 Subject: [PATCH 008/270] comms: retry synapse signing key job --- services/comms/synapse-signingkey-ensure-job.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml index 4a2b89f..00b7080 100644 --- a/services/comms/synapse-signingkey-ensure-job.yaml +++ b/services/comms/synapse-signingkey-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-synapse-signingkey-ensure-1 + name: othrys-synapse-signingkey-ensure-2 namespace: comms spec: backoffLimit: 2 @@ -21,7 +21,11 @@ spec: - | set -euo pipefail umask 077 - generate_signing_key -o /work/signing.key + if which generate_signing_key.py >/dev/null; then + generate_signing_key.py -o /work/signing.key + else + generate_signing_key -o /work/signing.key + fi volumeMounts: - name: work mountPath: /work From fa8ec588a860e04c031e1e836db2a509764adbd4 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 20:47:54 -0300 Subject: [PATCH 009/270] comms: add debug logging for signing key job --- services/comms/synapse-signingkey-ensure-job.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml index 00b7080..b1aed4d 100644 --- a/services/comms/synapse-signingkey-ensure-job.yaml +++ b/services/comms/synapse-signingkey-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-synapse-signingkey-ensure-2 + name: othrys-synapse-signingkey-ensure-3 namespace: comms spec: backoffLimit: 2 @@ -36,10 +36,12 @@ spec: args: - | set -euo pipefail + set -x if kubectl -n comms get secret othrys-synapse-signingkey \ - -o jsonpath='{.data.signing\.key}' 2>/dev/null | grep -q .; then + -o jsonpath='{.data.signing\.key}' 2>/tmp/get_err | grep -q .; then exit 0 fi + cat /tmp/get_err >&2 || true kubectl -n comms create secret generic othrys-synapse-signingkey \ --from-file=signing.key=/work/signing.key \ --dry-run=client -o yaml | kubectl -n comms apply -f - >/dev/null From 71fd00d8450af68e3ce39605878fa789412cf948 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 20:49:11 -0300 Subject: [PATCH 010/270] comms: fix signing key job permissions --- services/comms/synapse-signingkey-ensure-job.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml index b1aed4d..81d95a7 100644 --- a/services/comms/synapse-signingkey-ensure-job.yaml +++ b/services/comms/synapse-signingkey-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-synapse-signingkey-ensure-3 + name: othrys-synapse-signingkey-ensure-4 namespace: comms spec: backoffLimit: 2 @@ -26,6 +26,7 @@ spec: else generate_signing_key -o /work/signing.key fi + chmod 0644 /work/signing.key volumeMounts: - name: work mountPath: /work From e6a3ae5f7b2896f404ee664a9c3ff4ce577746f1 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 20:55:36 -0300 Subject: [PATCH 011/270] comms: restore MAS and OIDC secrets in synapse --- services/comms/helmrelease.yaml | 73 +++++++++++++++++++++++++-------- 1 file changed, 57 insertions(+), 16 deletions(-) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index d110456..71fc5df 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -71,6 +71,54 @@ spec: limits: cpu: "2" memory: 3Gi + extraEnv: + - name: OIDC_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: synapse-oidc + key: client-secret + - name: TURN_SECRET + valueFrom: + secretKeyRef: + name: turn-shared-secret + key: TURN_STATIC_AUTH_SECRET + - name: MAS_SHARED_SECRET + valueFrom: + secretKeyRef: + name: mas-secrets-runtime + key: matrix_shared_secret + - name: MACAROON_SECRET_KEY + valueFrom: + secretKeyRef: + name: synapse-macaroon + key: macaroon_secret_key + extraCommands: + - | + yaml_quote() { printf "%s" "$1" | sed "s/'/''/g"; } + cat > /synapse/config/conf.d/runtime-secrets.yaml < Date: Tue, 13 Jan 2026 20:59:35 -0300 Subject: [PATCH 012/270] comms: fix synapse runtime config injection --- services/comms/helmrelease.yaml | 52 ++++++++++++++++----------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 71fc5df..a7e180d 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -93,32 +93,32 @@ spec: name: synapse-macaroon key: macaroon_secret_key extraCommands: - - | - yaml_quote() { printf "%s" "$1" | sed "s/'/''/g"; } - cat > /synapse/config/conf.d/runtime-secrets.yaml <- + esc() { printf "%s" "$1" | sed "s/'/''/g"; }; + printf '%s\n' + "oidc_providers:" + " - idp_id: keycloak" + " idp_name: Keycloak" + " issuer: https://sso.bstein.dev/realms/atlas" + " client_id: synapse" + " client_secret: '$(esc "${OIDC_CLIENT_SECRET:-}")'" + " client_auth_method: client_secret_post" + " scopes: [\"openid\", \"profile\", \"email\"]" + " authorization_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/auth" + " token_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/token" + " userinfo_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/userinfo" + " user_mapping_provider:" + " config:" + " localpart_template: \"{{ user.preferred_username }}\"" + " display_name_template: \"{{ user.name }}\"" + " allow_existing_users: true" + "matrix_authentication_service:" + " enabled: true" + " endpoint: http://matrix-authentication-service:8080/" + " secret: '$(esc "${MAS_SHARED_SECRET:-}")'" + "turn_shared_secret: '$(esc "${TURN_SECRET:-}")'" + "macaroon_secret_key: '$(esc "${MACAROON_SECRET_KEY:-}")'" + > /synapse/config/conf.d/runtime-secrets.yaml nodeSelector: hardware: rpi5 affinity: From 928b9379d80ab1e9a73cc8a3adfaad32b0003111 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 21:02:19 -0300 Subject: [PATCH 013/270] comms: disable synapse password auth with MAS --- services/comms/helmrelease.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index a7e180d..0cb4668 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -163,7 +163,7 @@ spec: msc4222_enabled: true max_event_delay_duration: 24h password_config: - enabled: true + enabled: false oidc_enabled: true rc_message: per_second: 0.5 From b97146f4d147683f86cb912b331f45b3fe77e699 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 21:04:29 -0300 Subject: [PATCH 014/270] comms: disable synapse oidc with MAS --- services/comms/helmrelease.yaml | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 0cb4668..eaa7c20 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -72,11 +72,6 @@ spec: cpu: "2" memory: 3Gi extraEnv: - - name: OIDC_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: synapse-oidc - key: client-secret - name: TURN_SECRET valueFrom: secretKeyRef: @@ -96,22 +91,6 @@ spec: - >- esc() { printf "%s" "$1" | sed "s/'/''/g"; }; printf '%s\n' - "oidc_providers:" - " - idp_id: keycloak" - " idp_name: Keycloak" - " issuer: https://sso.bstein.dev/realms/atlas" - " client_id: synapse" - " client_secret: '$(esc "${OIDC_CLIENT_SECRET:-}")'" - " client_auth_method: client_secret_post" - " scopes: [\"openid\", \"profile\", \"email\"]" - " authorization_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/auth" - " token_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/token" - " userinfo_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/userinfo" - " user_mapping_provider:" - " config:" - " localpart_template: \"{{ user.preferred_username }}\"" - " display_name_template: \"{{ user.name }}\"" - " allow_existing_users: true" "matrix_authentication_service:" " enabled: true" " endpoint: http://matrix-authentication-service:8080/" @@ -164,7 +143,6 @@ spec: max_event_delay_duration: 24h password_config: enabled: false - oidc_enabled: true rc_message: per_second: 0.5 burst_count: 30 From 98554e5fa4053e2af83cf63d6df2de0adc77b56a Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 21:06:45 -0300 Subject: [PATCH 015/270] comms: rerun mas local user seed --- services/comms/mas-local-users-ensure-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index e462426..7853763 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-local-users-ensure-5 + name: mas-local-users-ensure-6 namespace: comms spec: backoffLimit: 1 From 46777f9ec90a7073edf06e0b06f96d1ec90d5acc Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 21:09:41 -0300 Subject: [PATCH 016/270] comms: restart atlasbot after MAS fixes --- services/comms/atlasbot-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/comms/atlasbot-deployment.yaml b/services/comms/atlasbot-deployment.yaml index 4d8bfc7..f9e1f79 100644 --- a/services/comms/atlasbot-deployment.yaml +++ b/services/comms/atlasbot-deployment.yaml @@ -16,7 +16,7 @@ spec: labels: app: atlasbot annotations: - checksum/atlasbot-configmap: manual-atlasbot-3 + checksum/atlasbot-configmap: manual-atlasbot-4 spec: serviceAccountName: atlasbot nodeSelector: From 0b5dcde3a30ab189f5b6e7fead5a6cd503802ac8 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 21:15:10 -0300 Subject: [PATCH 017/270] monitoring: align victoria-metrics PVC size --- services/monitoring/helmrelease.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index 704b91d..0914c2a 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -71,7 +71,7 @@ spec: persistentVolume: enabled: true - size: 250Gi + size: 100Gi # Enable built-in Kubernetes scraping scrape: From bcc15c3e0a01ab0f5004e0ace2b4bfe004c3266b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Tue, 13 Jan 2026 21:18:42 -0300 Subject: [PATCH 018/270] monitoring: allow grafana upgrade remediation --- services/monitoring/helmrelease.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index 0914c2a..ad26eaf 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -245,6 +245,15 @@ spec: kind: HelmRepository name: grafana namespace: flux-system + install: + remediation: { retries: 3 } + timeout: 15m + upgrade: + remediation: + retries: 3 + remediateLastFailure: true + cleanupOnFail: true + timeout: 15m values: admin: existingSecret: grafana-admin From 4a1c4766b8ba198578590d6fd4c15bfcb58872f5 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 01:07:47 -0300 Subject: [PATCH 019/270] feat: add harbor/vault oidc automation --- services/harbor/helmrelease.yaml | 23 +-- .../harbor-oidc-secret-ensure-job.yaml | 142 ++++++++++++++++++ services/keycloak/kustomization.yaml | 1 + services/vault/kustomization.yaml | 7 + services/vault/oidc-config-cronjob.yaml | 114 ++++++++++++++ .../vault/scripts/vault_oidc_configure.sh | 77 ++++++++++ 6 files changed, 349 insertions(+), 15 deletions(-) create mode 100644 services/keycloak/harbor-oidc-secret-ensure-job.yaml create mode 100644 services/vault/oidc-config-cronjob.yaml create mode 100644 services/vault/scripts/vault_oidc_configure.sh diff --git a/services/harbor/helmrelease.yaml b/services/harbor/helmrelease.yaml index 5b384d7..249a3f3 100644 --- a/services/harbor/helmrelease.yaml +++ b/services/harbor/helmrelease.yaml @@ -117,21 +117,14 @@ spec: existingSecret: harbor-core existingXsrfSecret: harbor-core existingXsrfSecretKey: CSRF_KEY - # OIDC config; client secret is stored out-of-band. - configureUserSettings: | - { - "auth_mode": "oidc_auth", - "oidc_name": "Keycloak", - "oidc_endpoint": "https://sso.bstein.dev/realms/atlas", - "oidc_client_id": "harbor", - "oidc_verify_cert": true, - "oidc_auto_onboard": true, - "oidc_scope": "openid,profile,email,groups", - "oidc_groups_claim": "groups", - "oidc_user_claim": "preferred_username", - "oidc_admin_group": "admin", - "oidc_logout": true - } + # OIDC config is injected via CONFIG_OVERWRITE_JSON from the harbor-oidc secret. + extraEnvVars: + - name: CONFIG_OVERWRITE_JSON + valueFrom: + secretKeyRef: + name: harbor-oidc + key: CONFIG_OVERWRITE_JSON + optional: true affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: diff --git a/services/keycloak/harbor-oidc-secret-ensure-job.yaml b/services/keycloak/harbor-oidc-secret-ensure-job.yaml new file mode 100644 index 0000000..974f01a --- /dev/null +++ b/services/keycloak/harbor-oidc-secret-ensure-job.yaml @@ -0,0 +1,142 @@ +# services/keycloak/harbor-oidc-secret-ensure-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: harbor-oidc-secret-ensure-1 + namespace: sso +spec: + backoffLimit: 0 + ttlSecondsAfterFinished: 3600 + template: + spec: + serviceAccountName: mas-secrets-ensure + restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] + - key: node-role.kubernetes.io/worker + operator: Exists + containers: + - name: apply + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - | + set -euo pipefail + apk add --no-cache curl jq kubectl >/dev/null + + KC_URL="http://keycloak.sso.svc.cluster.local" + ACCESS_TOKEN="" + for attempt in 1 2 3 4 5; do + TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d "grant_type=password" \ + -d "client_id=admin-cli" \ + -d "username=${KEYCLOAK_ADMIN}" \ + -d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)" + ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)" + if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then + break + fi + echo "Keycloak token request failed (attempt ${attempt})" >&2 + sleep $((attempt * 2)) + done + if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then + echo "Failed to fetch Keycloak admin token" >&2 + exit 1 + fi + + CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients?clientId=harbor" || true)" + CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" + + if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then + create_payload='{"clientId":"harbor","enabled":true,"protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://registry.bstein.dev/c/oidc/callback"],"webOrigins":["https://registry.bstein.dev"],"rootUrl":"https://registry.bstein.dev","baseUrl":"/"}' + status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + -H 'Content-Type: application/json' \ + -d "${create_payload}" \ + "$KC_URL/admin/realms/atlas/clients")" + if [ "$status" != "201" ] && [ "$status" != "204" ]; then + echo "Keycloak client create failed (status ${status})" >&2 + exit 1 + fi + CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients?clientId=harbor" || true)" + CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" + fi + + if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then + echo "Keycloak client harbor not found" >&2 + exit 1 + fi + + SCOPE_ID="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/client-scopes?search=groups" | jq -r '.[] | select(.name=="groups") | .id' 2>/dev/null | head -n1 || true)" + if [ -z "$SCOPE_ID" ] || [ "$SCOPE_ID" = "null" ]; then + echo "Keycloak client scope groups not found" >&2 + exit 1 + fi + + DEFAULT_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/default-client-scopes" || true)" + OPTIONAL_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes" || true)" + + if ! echo "$DEFAULT_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1 \ + && ! echo "$OPTIONAL_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1; then + status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")" + if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then + status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")" + if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then + echo "Failed to attach groups client scope to harbor (status ${status})" >&2 + exit 1 + fi + fi + fi + + CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/client-secret" | jq -r '.value' 2>/dev/null || true)" + if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then + echo "Keycloak client secret not found" >&2 + exit 1 + fi + + CONFIG_OVERWRITE_JSON="$(jq -nc \ + --arg auth_mode "oidc_auth" \ + --arg oidc_name "Keycloak" \ + --arg oidc_client_id "harbor" \ + --arg oidc_client_secret "${CLIENT_SECRET}" \ + --arg oidc_endpoint "https://sso.bstein.dev/realms/atlas" \ + --arg oidc_scope "openid,profile,email,groups" \ + --arg oidc_user_claim "preferred_username" \ + --arg oidc_groups_claim "groups" \ + --arg oidc_admin_group "admin" \ + --argjson oidc_auto_onboard true \ + --argjson oidc_verify_cert true \ + --argjson oidc_logout true \ + '{\n auth_mode: $auth_mode,\n oidc_name: $oidc_name,\n oidc_client_id: $oidc_client_id,\n oidc_client_secret: $oidc_client_secret,\n oidc_endpoint: $oidc_endpoint,\n oidc_scope: $oidc_scope,\n oidc_user_claim: $oidc_user_claim,\n oidc_groups_claim: $oidc_groups_claim,\n oidc_admin_group: $oidc_admin_group,\n oidc_auto_onboard: $oidc_auto_onboard,\n oidc_verify_cert: $oidc_verify_cert,\n oidc_logout: $oidc_logout\n }')" + + kubectl -n harbor create secret generic harbor-oidc \ + --from-literal=CONFIG_OVERWRITE_JSON="${CONFIG_OVERWRITE_JSON}" \ + --dry-run=client -o yaml | kubectl -n harbor apply -f - >/dev/null + env: + - name: KEYCLOAK_ADMIN + valueFrom: + secretKeyRef: + name: keycloak-admin + key: username + - name: KEYCLOAK_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: keycloak-admin + key: password diff --git a/services/keycloak/kustomization.yaml b/services/keycloak/kustomization.yaml index ddb4ab2..c334e5e 100644 --- a/services/keycloak/kustomization.yaml +++ b/services/keycloak/kustomization.yaml @@ -19,6 +19,7 @@ resources: - mas-secrets-ensure-job.yaml - synapse-oidc-secret-ensure-job.yaml - logs-oidc-secret-ensure-job.yaml + - harbor-oidc-secret-ensure-job.yaml - service.yaml - ingress.yaml generatorOptions: diff --git a/services/vault/kustomization.yaml b/services/vault/kustomization.yaml index b39fc48..1ab70bc 100644 --- a/services/vault/kustomization.yaml +++ b/services/vault/kustomization.yaml @@ -8,7 +8,14 @@ resources: - rbac.yaml - configmap.yaml - statefulset.yaml + - oidc-config-cronjob.yaml - service.yaml - ingress.yaml - certificate.yaml - serverstransport.yaml +generatorOptions: + disableNameSuffixHash: true +configMapGenerator: + - name: vault-oidc-config-script + files: + - vault_oidc_configure.sh=scripts/vault_oidc_configure.sh diff --git a/services/vault/oidc-config-cronjob.yaml b/services/vault/oidc-config-cronjob.yaml new file mode 100644 index 0000000..15131a8 --- /dev/null +++ b/services/vault/oidc-config-cronjob.yaml @@ -0,0 +1,114 @@ +# services/vault/oidc-config-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: vault-oidc-config + namespace: vault +spec: + schedule: "*/15 * * * *" + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + backoffLimit: 1 + template: + spec: + serviceAccountName: vault + restartPolicy: Never + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" + containers: + - name: configure-oidc + image: hashicorp/vault:1.17.6 + imagePullPolicy: IfNotPresent + command: + - bash + - /scripts/vault_oidc_configure.sh + env: + - name: VAULT_ADDR + value: http://vault.vault.svc.cluster.local:8200 + - name: VAULT_TOKEN + valueFrom: + secretKeyRef: + name: vault-oidc-admin-token + key: token + - name: VAULT_OIDC_DISCOVERY_URL + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: discovery_url + - name: VAULT_OIDC_CLIENT_ID + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: client_id + - name: VAULT_OIDC_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: client_secret + - name: VAULT_OIDC_DEFAULT_ROLE + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: default_role + optional: true + - name: VAULT_OIDC_SCOPES + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: scopes + optional: true + - name: VAULT_OIDC_USER_CLAIM + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: user_claim + optional: true + - name: VAULT_OIDC_GROUPS_CLAIM + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: groups_claim + optional: true + - name: VAULT_OIDC_TOKEN_POLICIES + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: token_policies + optional: true + - name: VAULT_OIDC_REDIRECT_URIS + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: redirect_uris + optional: true + - name: VAULT_OIDC_BOUND_AUDIENCES + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: bound_audiences + optional: true + - name: VAULT_OIDC_BOUND_CLAIMS + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: bound_claims + optional: true + - name: VAULT_OIDC_BOUND_CLAIMS_TYPE + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: bound_claims_type + optional: true + volumeMounts: + - name: oidc-config-script + mountPath: /scripts + readOnly: true + volumes: + - name: oidc-config-script + configMap: + name: vault-oidc-config-script + defaultMode: 0555 diff --git a/services/vault/scripts/vault_oidc_configure.sh b/services/vault/scripts/vault_oidc_configure.sh new file mode 100644 index 0000000..3cd4a2d --- /dev/null +++ b/services/vault/scripts/vault_oidc_configure.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +set -euo pipefail + +log() { echo "[vault-oidc] $*"; } + +status_json="$(vault status -format=json || true)" +if [[ -z "${status_json}" ]]; then + log "vault status failed; check VAULT_ADDR and VAULT_TOKEN" + exit 1 +fi + +if ! grep -q '"initialized":true' <<<"${status_json}"; then + log "vault not initialized; skipping" + exit 0 +fi + +if grep -q '"sealed":true' <<<"${status_json}"; then + log "vault sealed; skipping" + exit 0 +fi + +: "${VAULT_OIDC_DISCOVERY_URL:?set VAULT_OIDC_DISCOVERY_URL}" +: "${VAULT_OIDC_CLIENT_ID:?set VAULT_OIDC_CLIENT_ID}" +: "${VAULT_OIDC_CLIENT_SECRET:?set VAULT_OIDC_CLIENT_SECRET}" + +role="${VAULT_OIDC_DEFAULT_ROLE:-atlas}" +scopes="${VAULT_OIDC_SCOPES:-openid profile email groups}" +user_claim="${VAULT_OIDC_USER_CLAIM:-preferred_username}" +groups_claim="${VAULT_OIDC_GROUPS_CLAIM:-groups}" +token_policies="${VAULT_OIDC_TOKEN_POLICIES:-default}" +redirect_uris="${VAULT_OIDC_REDIRECT_URIS:-https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback}" +bound_audiences="${VAULT_OIDC_BOUND_AUDIENCES:-${VAULT_OIDC_CLIENT_ID}}" +bound_claims="${VAULT_OIDC_BOUND_CLAIMS:-}" +bound_claims_type="${VAULT_OIDC_BOUND_CLAIMS_TYPE:-}" + +if ! vault auth list -format=json | grep -q '"oidc/"'; then + log "enabling oidc auth method" + vault auth enable oidc +fi + +log "configuring oidc auth" +vault write auth/oidc/config \ + oidc_discovery_url="${VAULT_OIDC_DISCOVERY_URL}" \ + oidc_client_id="${VAULT_OIDC_CLIENT_ID}" \ + oidc_client_secret="${VAULT_OIDC_CLIENT_SECRET}" \ + default_role="${role}" + +vault auth tune -listing-visibility=unauth oidc >/dev/null + +role_args=( + "user_claim=${user_claim}" + "oidc_scopes=${scopes}" + "token_policies=${token_policies}" + "bound_audiences=${bound_audiences}" +) + +if [[ -n "${groups_claim}" ]]; then + role_args+=("groups_claim=${groups_claim}") +fi +if [[ -n "${bound_claims}" ]]; then + role_args+=("bound_claims=${bound_claims}") +fi +if [[ -n "${bound_claims_type}" ]]; then + role_args+=("bound_claims_type=${bound_claims_type}") +fi + +IFS=',' read -r -a redirect_items <<<"${redirect_uris}" +for uri in "${redirect_items[@]}"; do + trimmed="${uri#"${uri%%[![:space:]]*}"}" + trimmed="${trimmed%"${trimmed##*[![:space:]]}"}" + if [[ -n "${trimmed}" ]]; then + role_args+=("allowed_redirect_uris=${trimmed}") + fi +done + +log "configuring oidc role ${role}" +vault write "auth/oidc/role/${role}" "${role_args[@]}" From 8fa38268d9b68b8d0248ee40cfdbdfc8baab1c3b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 01:08:05 -0300 Subject: [PATCH 020/270] chore: refresh knowledge catalog headers --- .../applications/comms/kustomization.yaml | 2 +- clusters/atlas/flux-system/gotk-sync.yaml | 1 + knowledge/catalog/atlas-summary.json | 8 +- knowledge/catalog/atlas.json | 640 ++++++++++-------- knowledge/catalog/atlas.yaml | 449 +++++++----- knowledge/diagrams/atlas-http.mmd | 36 +- scripts/knowledge_render_atlas.py | 2 + .../knowledge/catalog/atlas-summary.json | 2 +- services/comms/knowledge/catalog/atlas.json | 7 +- services/comms/knowledge/catalog/atlas.yaml | 8 +- services/crypto/monerod/deployment.yaml | 2 +- services/crypto/xmr-miner/kustomization.yaml | 2 +- services/monitoring/helmrelease.yaml | 2 +- 13 files changed, 680 insertions(+), 481 deletions(-) diff --git a/clusters/atlas/flux-system/applications/comms/kustomization.yaml b/clusters/atlas/flux-system/applications/comms/kustomization.yaml index 0fb664a..cde929d 100644 --- a/clusters/atlas/flux-system/applications/comms/kustomization.yaml +++ b/clusters/atlas/flux-system/applications/comms/kustomization.yaml @@ -1,4 +1,4 @@ -# clusters/atlas/flux-system/applications/communication/kustomization.yaml +# clusters/atlas/flux-system/applications/comms/kustomization.yaml apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: diff --git a/clusters/atlas/flux-system/gotk-sync.yaml b/clusters/atlas/flux-system/gotk-sync.yaml index 59cabae..53c0817 100644 --- a/clusters/atlas/flux-system/gotk-sync.yaml +++ b/clusters/atlas/flux-system/gotk-sync.yaml @@ -1,3 +1,4 @@ +# clusters/atlas/flux-system/gotk-sync.yaml # This manifest was generated by flux. DO NOT EDIT. --- apiVersion: source.toolkit.fluxcd.io/v1 diff --git a/knowledge/catalog/atlas-summary.json b/knowledge/catalog/atlas-summary.json index 2139e29..fa35051 100644 --- a/knowledge/catalog/atlas-summary.json +++ b/knowledge/catalog/atlas-summary.json @@ -1,8 +1,8 @@ { "counts": { - "helmrelease_host_hints": 7, - "http_endpoints": 35, - "services": 44, - "workloads": 49 + "helmrelease_host_hints": 17, + "http_endpoints": 37, + "services": 43, + "workloads": 54 } } diff --git a/knowledge/catalog/atlas.json b/knowledge/catalog/atlas.json index 92f08f4..0d97bcd 100644 --- a/knowledge/catalog/atlas.json +++ b/knowledge/catalog/atlas.json @@ -12,12 +12,7 @@ "targetNamespace": "bstein-dev-home" }, { - "name": "ci-demo", - "path": "services/ci-demo", - "targetNamespace": null - }, - { - "name": "communication", + "name": "comms", "path": "services/comms", "targetNamespace": "comms" }, @@ -71,6 +66,11 @@ "path": "services/keycloak", "targetNamespace": "sso" }, + { + "name": "logging", + "path": "services/logging", + "targetNamespace": null + }, { "name": "longhorn-ui", "path": "infrastructure/longhorn/ui-ingress", @@ -81,6 +81,11 @@ "path": "services/mailu", "targetNamespace": "mailu-mailserver" }, + { + "name": "maintenance", + "path": "services/maintenance", + "targetNamespace": null + }, { "name": "metallb", "path": "infrastructure/metallb", @@ -116,11 +121,26 @@ "path": "services/openldap", "targetNamespace": "sso" }, + { + "name": "outline", + "path": "services/outline", + "targetNamespace": "outline" + }, { "name": "pegasus", "path": "services/pegasus", "targetNamespace": "jellyfin" }, + { + "name": "planka", + "path": "services/planka", + "targetNamespace": "planka" + }, + { + "name": "postgres", + "path": "infrastructure/postgres", + "targetNamespace": "postgres" + }, { "name": "sui-metrics", "path": "services/sui-metrics/overlays/atlas", @@ -163,7 +183,7 @@ "serviceAccountName": null, "nodeSelector": {}, "images": [ - "ollama/ollama:latest" + "ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d" ] }, { @@ -179,7 +199,7 @@ "node-role.kubernetes.io/worker": "true" }, "images": [ - "registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-84" + "registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92" ] }, { @@ -195,7 +215,7 @@ "node-role.kubernetes.io/worker": "true" }, "images": [ - "registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-84" + "registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92" ] }, { @@ -214,21 +234,6 @@ "python:3.11-slim" ] }, - { - "kind": "Deployment", - "namespace": "ci-demo", - "name": "ci-demo", - "labels": { - "app.kubernetes.io/name": "ci-demo" - }, - "serviceAccountName": null, - "nodeSelector": { - "hardware": "rpi4" - }, - "images": [ - "registry.bstein.dev/infra/ci-demo:v0.0.0-3" - ] - }, { "kind": "Deployment", "namespace": "comms", @@ -271,7 +276,7 @@ "hardware": "rpi5" }, "images": [ - "ghcr.io/element-hq/element-call:latest" + "ghcr.io/element-hq/element-call@sha256:e6897c7818331714eae19d83ef8ea94a8b41115f0d8d3f62c2fed2d02c65c9bc" ] }, { @@ -345,56 +350,6 @@ "nginx:1.27-alpine" ] }, - { - "kind": "Deployment", - "namespace": "comms", - "name": "othrys-element-element-web", - "labels": { - "app.kubernetes.io/instance": "othrys-element", - "app.kubernetes.io/name": "element-web" - }, - "serviceAccountName": "othrys-element-element-web", - "nodeSelector": { - "hardware": "rpi5" - }, - "images": [ - "ghcr.io/element-hq/element-web:v1.12.6" - ] - }, - { - "kind": "Deployment", - "namespace": "comms", - "name": "othrys-synapse-matrix-synapse", - "labels": { - "app.kubernetes.io/component": "synapse", - "app.kubernetes.io/instance": "othrys-synapse", - "app.kubernetes.io/name": "matrix-synapse" - }, - "serviceAccountName": "default", - "nodeSelector": { - "hardware": "rpi5" - }, - "images": [ - "ghcr.io/element-hq/synapse:v1.144.0" - ] - }, - { - "kind": "Deployment", - "namespace": "comms", - "name": "othrys-synapse-redis-master", - "labels": { - "app.kubernetes.io/component": "master", - "app.kubernetes.io/instance": "othrys-synapse", - "app.kubernetes.io/managed-by": "Helm", - "app.kubernetes.io/name": "redis", - "helm.sh/chart": "redis-17.17.1" - }, - "serviceAccountName": "othrys-synapse-redis", - "nodeSelector": {}, - "images": [ - "docker.io/bitnamilegacy/redis:7.0.12-debian-11-r34" - ] - }, { "kind": "DaemonSet", "namespace": "crypto", @@ -407,7 +362,7 @@ "node-role.kubernetes.io/worker": "true" }, "images": [ - "ghcr.io/tari-project/xmrig:latest" + "ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9" ] }, { @@ -681,6 +636,66 @@ "hashicorp/vault-csi-provider:1.7.0" ] }, + { + "kind": "DaemonSet", + "namespace": "logging", + "name": "node-image-gc-rpi4", + "labels": { + "app": "node-image-gc-rpi4" + }, + "serviceAccountName": "node-image-gc-rpi4", + "nodeSelector": { + "hardware": "rpi4" + }, + "images": [ + "bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131" + ] + }, + { + "kind": "DaemonSet", + "namespace": "logging", + "name": "node-image-prune-rpi5", + "labels": { + "app": "node-image-prune-rpi5" + }, + "serviceAccountName": "node-image-prune-rpi5", + "nodeSelector": { + "hardware": "rpi5" + }, + "images": [ + "bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131" + ] + }, + { + "kind": "DaemonSet", + "namespace": "logging", + "name": "node-log-rotation", + "labels": { + "app": "node-log-rotation" + }, + "serviceAccountName": "node-log-rotation", + "nodeSelector": { + "hardware": "rpi5" + }, + "images": [ + "bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131" + ] + }, + { + "kind": "Deployment", + "namespace": "logging", + "name": "oauth2-proxy-logs", + "labels": { + "app": "oauth2-proxy-logs" + }, + "serviceAccountName": null, + "nodeSelector": { + "node-role.kubernetes.io/worker": "true" + }, + "images": [ + "quay.io/oauth2-proxy/oauth2-proxy:v7.6.0" + ] + }, { "kind": "Deployment", "namespace": "longhorn-system", @@ -708,7 +723,7 @@ "mailu.bstein.dev/vip": "true" }, "images": [ - "lachlanevenson/k8s-kubectl:latest" + "registry.bstein.dev/bstein/kubectl:1.35.0" ] }, { @@ -726,37 +741,30 @@ }, { "kind": "DaemonSet", - "namespace": "metallb-system", - "name": "metallb-speaker", + "namespace": "maintenance", + "name": "node-image-sweeper", "labels": { - "app.kubernetes.io/component": "speaker", - "app.kubernetes.io/instance": "metallb", - "app.kubernetes.io/name": "metallb" + "app": "node-image-sweeper" }, - "serviceAccountName": "metallb-speaker", + "serviceAccountName": "node-image-sweeper", "nodeSelector": { "kubernetes.io/os": "linux" }, "images": [ - "quay.io/frrouting/frr:10.4.1", - "quay.io/metallb/speaker:v0.15.3" + "python:3.12.9-alpine3.20" ] }, { - "kind": "Deployment", - "namespace": "metallb-system", - "name": "metallb-controller", + "kind": "DaemonSet", + "namespace": "maintenance", + "name": "node-nofile", "labels": { - "app.kubernetes.io/component": "controller", - "app.kubernetes.io/instance": "metallb", - "app.kubernetes.io/name": "metallb" - }, - "serviceAccountName": "metallb-controller", - "nodeSelector": { - "kubernetes.io/os": "linux" + "app": "node-nofile" }, + "serviceAccountName": "node-nofile", + "nodeSelector": {}, "images": [ - "quay.io/metallb/controller:v0.15.3" + "bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131" ] }, { @@ -772,6 +780,21 @@ "registry.bstein.dev/monitoring/dcgm-exporter:4.4.2-4.7.0-ubuntu22.04" ] }, + { + "kind": "DaemonSet", + "namespace": "monitoring", + "name": "jetson-tegrastats-exporter", + "labels": { + "app": "jetson-tegrastats-exporter" + }, + "serviceAccountName": "default", + "nodeSelector": { + "jetson": "true" + }, + "images": [ + "python:3.10-slim" + ] + }, { "kind": "Deployment", "namespace": "monitoring", @@ -797,7 +820,7 @@ "hardware": "rpi5" }, "images": [ - "collabora/code:latest" + "collabora/code@sha256:3c58d0e9bae75e4647467d0c7d91cb66f261d3e814709aed590b5c334a04db26" ] }, { @@ -815,6 +838,66 @@ "nextcloud:29-apache" ] }, + { + "kind": "Deployment", + "namespace": "outline", + "name": "outline", + "labels": { + "app": "outline" + }, + "serviceAccountName": null, + "nodeSelector": { + "node-role.kubernetes.io/worker": "true" + }, + "images": [ + "outlinewiki/outline:1.2.0" + ] + }, + { + "kind": "Deployment", + "namespace": "outline", + "name": "outline-redis", + "labels": { + "app": "outline-redis" + }, + "serviceAccountName": null, + "nodeSelector": { + "node-role.kubernetes.io/worker": "true" + }, + "images": [ + "redis:7.4.1-alpine" + ] + }, + { + "kind": "Deployment", + "namespace": "planka", + "name": "planka", + "labels": { + "app": "planka" + }, + "serviceAccountName": null, + "nodeSelector": { + "node-role.kubernetes.io/worker": "true" + }, + "images": [ + "ghcr.io/plankanban/planka:2.0.0-rc.4" + ] + }, + { + "kind": "StatefulSet", + "namespace": "postgres", + "name": "postgres", + "labels": { + "app": "postgres" + }, + "serviceAccountName": "postgres-vault", + "nodeSelector": { + "node-role.kubernetes.io/worker": "true" + }, + "images": [ + "postgres:15" + ] + }, { "kind": "Deployment", "namespace": "sso", @@ -984,22 +1067,6 @@ } ] }, - { - "namespace": "ci-demo", - "name": "ci-demo", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/name": "ci-demo" - }, - "ports": [ - { - "name": "http", - "port": 80, - "targetPort": "http", - "protocol": "TCP" - } - ] - }, { "namespace": "comms", "name": "coturn", @@ -1454,94 +1521,6 @@ } ] }, - { - "namespace": "comms", - "name": "othrys-element-element-web", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/instance": "othrys-element", - "app.kubernetes.io/name": "element-web" - }, - "ports": [ - { - "name": "http", - "port": 80, - "targetPort": "http", - "protocol": "TCP" - } - ] - }, - { - "namespace": "comms", - "name": "othrys-synapse-matrix-synapse", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/component": "synapse", - "app.kubernetes.io/instance": "othrys-synapse", - "app.kubernetes.io/name": "matrix-synapse" - }, - "ports": [ - { - "name": "http", - "port": 8008, - "targetPort": "http", - "protocol": "TCP" - } - ] - }, - { - "namespace": "comms", - "name": "othrys-synapse-redis-headless", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/instance": "othrys-synapse", - "app.kubernetes.io/name": "redis" - }, - "ports": [ - { - "name": "tcp-redis", - "port": 6379, - "targetPort": "redis", - "protocol": "TCP" - } - ] - }, - { - "namespace": "comms", - "name": "othrys-synapse-redis-master", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/component": "master", - "app.kubernetes.io/instance": "othrys-synapse", - "app.kubernetes.io/name": "redis" - }, - "ports": [ - { - "name": "tcp-redis", - "port": 6379, - "targetPort": "redis", - "protocol": "TCP" - } - ] - }, - { - "namespace": "comms", - "name": "othrys-synapse-replication", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/component": "synapse", - "app.kubernetes.io/instance": "othrys-synapse", - "app.kubernetes.io/name": "matrix-synapse" - }, - "ports": [ - { - "name": "replication", - "port": 9093, - "targetPort": "replication", - "protocol": "TCP" - } - ] - }, { "namespace": "crypto", "name": "monerod", @@ -1743,6 +1722,22 @@ } ] }, + { + "namespace": "logging", + "name": "oauth2-proxy-logs", + "type": "ClusterIP", + "selector": { + "app": "oauth2-proxy-logs" + }, + "ports": [ + { + "name": "http", + "port": 80, + "targetPort": 4180, + "protocol": "TCP" + } + ] + }, { "namespace": "longhorn-system", "name": "oauth2-proxy-longhorn", @@ -1823,24 +1818,6 @@ } ] }, - { - "namespace": "metallb-system", - "name": "metallb-webhook-service", - "type": "ClusterIP", - "selector": { - "app.kubernetes.io/component": "controller", - "app.kubernetes.io/instance": "metallb", - "app.kubernetes.io/name": "metallb" - }, - "ports": [ - { - "name": null, - "port": 443, - "targetPort": 9443, - "protocol": "TCP" - } - ] - }, { "namespace": "monitoring", "name": "dcgm-exporter", @@ -1857,6 +1834,22 @@ } ] }, + { + "namespace": "monitoring", + "name": "jetson-tegrastats-exporter", + "type": "ClusterIP", + "selector": { + "app": "jetson-tegrastats-exporter" + }, + "ports": [ + { + "name": "metrics", + "port": 9100, + "targetPort": "metrics", + "protocol": "TCP" + } + ] + }, { "namespace": "monitoring", "name": "postmark-exporter", @@ -1905,6 +1898,70 @@ } ] }, + { + "namespace": "outline", + "name": "outline", + "type": "ClusterIP", + "selector": { + "app": "outline" + }, + "ports": [ + { + "name": "http", + "port": 80, + "targetPort": "http", + "protocol": "TCP" + } + ] + }, + { + "namespace": "outline", + "name": "outline-redis", + "type": "ClusterIP", + "selector": { + "app": "outline-redis" + }, + "ports": [ + { + "name": "redis", + "port": 6379, + "targetPort": "redis", + "protocol": "TCP" + } + ] + }, + { + "namespace": "planka", + "name": "planka", + "type": "ClusterIP", + "selector": { + "app": "planka" + }, + "ports": [ + { + "name": "http", + "port": 80, + "targetPort": "http", + "protocol": "TCP" + } + ] + }, + { + "namespace": "postgres", + "name": "postgres-service", + "type": "ClusterIP", + "selector": { + "app": "postgres" + }, + "ports": [ + { + "name": "postgres", + "port": 5432, + "targetPort": 5432, + "protocol": "TCP" + } + ] + }, { "namespace": "sso", "name": "keycloak", @@ -2110,7 +2167,7 @@ "via": { "kind": "Ingress", "name": "matrix-wellknown-bstein-dev", - "source": "communication" + "source": "comms" } }, { @@ -2130,7 +2187,7 @@ "via": { "kind": "Ingress", "name": "matrix-wellknown-bstein-dev", - "source": "communication" + "source": "comms" } }, { @@ -2170,7 +2227,7 @@ "via": { "kind": "Ingress", "name": "element-call", - "source": "communication" + "source": "comms" } }, { @@ -2250,7 +2307,7 @@ "via": { "kind": "Ingress", "name": "livekit-jwt-ingress", - "source": "communication" + "source": "comms" } }, { @@ -2270,27 +2327,7 @@ "via": { "kind": "Ingress", "name": "livekit-ingress", - "source": "communication" - } - }, - { - "host": "live.bstein.dev", - "path": "/", - "backend": { - "namespace": "comms", - "service": "othrys-element-element-web", - "port": 80, - "workloads": [ - { - "kind": "Deployment", - "name": "othrys-element-element-web" - } - ] - }, - "via": { - "kind": "Ingress", - "name": "othrys-element-element-web", - "source": "communication" + "source": "comms" } }, { @@ -2310,7 +2347,7 @@ "via": { "kind": "Ingress", "name": "matrix-wellknown", - "source": "communication" + "source": "comms" } }, { @@ -2330,7 +2367,7 @@ "via": { "kind": "Ingress", "name": "matrix-wellknown", - "source": "communication" + "source": "comms" } }, { @@ -2340,17 +2377,32 @@ "namespace": "comms", "service": "othrys-synapse-matrix-synapse", "port": 8008, + "workloads": [] + }, + "via": { + "kind": "Ingress", + "name": "matrix-routing", + "source": "comms" + } + }, + { + "host": "logs.bstein.dev", + "path": "/", + "backend": { + "namespace": "logging", + "service": "oauth2-proxy-logs", + "port": "http", "workloads": [ { "kind": "Deployment", - "name": "othrys-synapse-matrix-synapse" + "name": "oauth2-proxy-logs" } ] }, "via": { "kind": "Ingress", - "name": "matrix-routing", - "source": "communication" + "name": "logs", + "source": "logging" } }, { @@ -2405,7 +2457,7 @@ "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2425,7 +2477,7 @@ "via": { "kind": "Ingress", "name": "matrix-wellknown-matrix-live", - "source": "communication" + "source": "comms" } }, { @@ -2445,7 +2497,7 @@ "via": { "kind": "Ingress", "name": "matrix-wellknown-matrix-live", - "source": "communication" + "source": "comms" } }, { @@ -2455,17 +2507,12 @@ "namespace": "comms", "service": "othrys-synapse-matrix-synapse", "port": 8008, - "workloads": [ - { - "kind": "Deployment", - "name": "othrys-synapse-matrix-synapse" - } - ] + "workloads": [] }, "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2485,7 +2532,7 @@ "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2505,7 +2552,7 @@ "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2525,7 +2572,7 @@ "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2545,7 +2592,7 @@ "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2565,7 +2612,7 @@ "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2575,17 +2622,12 @@ "namespace": "comms", "service": "othrys-synapse-matrix-synapse", "port": 8008, - "workloads": [ - { - "kind": "Deployment", - "name": "othrys-synapse-matrix-synapse" - } - ] + "workloads": [] }, "via": { "kind": "Ingress", "name": "matrix-routing", - "source": "communication" + "source": "comms" } }, { @@ -2608,6 +2650,26 @@ "source": "monerod" } }, + { + "host": "notes.bstein.dev", + "path": "/", + "backend": { + "namespace": "outline", + "service": "outline", + "port": 80, + "workloads": [ + { + "kind": "Deployment", + "name": "outline" + } + ] + }, + "via": { + "kind": "Ingress", + "name": "outline", + "source": "outline" + } + }, { "host": "office.bstein.dev", "path": "/", @@ -2728,6 +2790,26 @@ "source": "jellyfin" } }, + { + "host": "tasks.bstein.dev", + "path": "/", + "backend": { + "namespace": "planka", + "service": "planka", + "port": 80, + "workloads": [ + { + "kind": "Deployment", + "name": "planka" + } + ] + }, + "via": { + "kind": "Ingress", + "name": "planka", + "source": "planka" + } + }, { "host": "vault.bstein.dev", "path": "/", @@ -2750,12 +2832,27 @@ } ], "helmrelease_host_hints": { + "comms:comms/othrys-element": [ + "call.live.bstein.dev", + "live.bstein.dev", + "matrix.live.bstein.dev" + ], + "comms:comms/othrys-synapse": [ + "bstein.dev", + "kit.live.bstein.dev", + "live.bstein.dev", + "matrix.live.bstein.dev", + "turn.live.bstein.dev" + ], "gitops-ui:flux-system/weave-gitops": [ "cd.bstein.dev" ], "harbor:harbor/harbor": [ "registry.bstein.dev" ], + "logging:logging/data-prepper": [ + "registry.bstein.dev" + ], "mailu:mailu-mailserver/mailu": [ "bstein.dev", "mail.bstein.dev" @@ -2764,6 +2861,7 @@ "alerts.bstein.dev" ], "monitoring:monitoring/grafana": [ + "bstein.dev", "metrics.bstein.dev", "sso.bstein.dev" ] diff --git a/knowledge/catalog/atlas.yaml b/knowledge/catalog/atlas.yaml index d628b7b..f3e04a8 100644 --- a/knowledge/catalog/atlas.yaml +++ b/knowledge/catalog/atlas.yaml @@ -1,3 +1,4 @@ +# knowledge/catalog/atlas.yaml # Generated by scripts/knowledge_render_atlas.py (do not edit by hand) cluster: atlas sources: @@ -7,7 +8,7 @@ sources: - name: bstein-dev-home path: services/bstein-dev-home targetNamespace: bstein-dev-home -- name: communication +- name: comms path: services/comms targetNamespace: comms - name: core @@ -40,12 +41,18 @@ sources: - name: keycloak path: services/keycloak targetNamespace: sso +- name: logging + path: services/logging + targetNamespace: null - name: longhorn-ui path: infrastructure/longhorn/ui-ingress targetNamespace: longhorn-system - name: mailu path: services/mailu targetNamespace: mailu-mailserver +- name: maintenance + path: services/maintenance + targetNamespace: null - name: metallb path: infrastructure/metallb targetNamespace: metallb-system @@ -67,9 +74,18 @@ sources: - name: openldap path: services/openldap targetNamespace: sso +- name: outline + path: services/outline + targetNamespace: outline - name: pegasus path: services/pegasus targetNamespace: jellyfin +- name: planka + path: services/planka + targetNamespace: planka +- name: postgres + path: infrastructure/postgres + targetNamespace: postgres - name: sui-metrics path: services/sui-metrics/overlays/atlas targetNamespace: sui-metrics @@ -97,7 +113,7 @@ workloads: serviceAccountName: null nodeSelector: {} images: - - ollama/ollama:latest + - ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d - kind: Deployment namespace: bstein-dev-home name: bstein-dev-home-backend @@ -108,7 +124,7 @@ workloads: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: 'true' images: - - registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-84 + - registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92 - kind: Deployment namespace: bstein-dev-home name: bstein-dev-home-frontend @@ -119,7 +135,7 @@ workloads: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: 'true' images: - - registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-84 + - registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92 - kind: Deployment namespace: bstein-dev-home name: chat-ai-gateway @@ -160,7 +176,7 @@ workloads: nodeSelector: hardware: rpi5 images: - - ghcr.io/element-hq/element-call:latest + - ghcr.io/element-hq/element-call@sha256:e6897c7818331714eae19d83ef8ea94a8b41115f0d8d3f62c2fed2d02c65c9bc - kind: Deployment namespace: comms name: livekit @@ -209,42 +225,6 @@ workloads: nodeSelector: {} images: - nginx:1.27-alpine -- kind: Deployment - namespace: comms - name: othrys-element-element-web - labels: - app.kubernetes.io/instance: othrys-element - app.kubernetes.io/name: element-web - serviceAccountName: othrys-element-element-web - nodeSelector: - hardware: rpi5 - images: - - ghcr.io/element-hq/element-web:v1.12.6 -- kind: Deployment - namespace: comms - name: othrys-synapse-matrix-synapse - labels: - app.kubernetes.io/component: synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: matrix-synapse - serviceAccountName: default - nodeSelector: - hardware: rpi5 - images: - - ghcr.io/element-hq/synapse:v1.144.0 -- kind: Deployment - namespace: comms - name: othrys-synapse-redis-master - labels: - app.kubernetes.io/component: master - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/name: redis - helm.sh/chart: redis-17.17.1 - serviceAccountName: othrys-synapse-redis - nodeSelector: {} - images: - - docker.io/bitnamilegacy/redis:7.0.12-debian-11-r34 - kind: DaemonSet namespace: crypto name: monero-xmrig @@ -254,7 +234,7 @@ workloads: nodeSelector: node-role.kubernetes.io/worker: 'true' images: - - ghcr.io/tari-project/xmrig:latest + - ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9 - kind: Deployment namespace: crypto name: monero-p2pool @@ -447,6 +427,46 @@ workloads: kubernetes.io/os: linux images: - hashicorp/vault-csi-provider:1.7.0 +- kind: DaemonSet + namespace: logging + name: node-image-gc-rpi4 + labels: + app: node-image-gc-rpi4 + serviceAccountName: node-image-gc-rpi4 + nodeSelector: + hardware: rpi4 + images: + - bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 +- kind: DaemonSet + namespace: logging + name: node-image-prune-rpi5 + labels: + app: node-image-prune-rpi5 + serviceAccountName: node-image-prune-rpi5 + nodeSelector: + hardware: rpi5 + images: + - bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 +- kind: DaemonSet + namespace: logging + name: node-log-rotation + labels: + app: node-log-rotation + serviceAccountName: node-log-rotation + nodeSelector: + hardware: rpi5 + images: + - bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 +- kind: Deployment + namespace: logging + name: oauth2-proxy-logs + labels: + app: oauth2-proxy-logs + serviceAccountName: null + nodeSelector: + node-role.kubernetes.io/worker: 'true' + images: + - quay.io/oauth2-proxy/oauth2-proxy:v7.6.0 - kind: Deployment namespace: longhorn-system name: oauth2-proxy-longhorn @@ -466,7 +486,7 @@ workloads: nodeSelector: mailu.bstein.dev/vip: 'true' images: - - lachlanevenson/k8s-kubectl:latest + - registry.bstein.dev/bstein/kubectl:1.35.0 - kind: Deployment namespace: mailu-mailserver name: mailu-sync-listener @@ -477,30 +497,24 @@ workloads: images: - python:3.11-alpine - kind: DaemonSet - namespace: metallb-system - name: metallb-speaker + namespace: maintenance + name: node-image-sweeper labels: - app.kubernetes.io/component: speaker - app.kubernetes.io/instance: metallb - app.kubernetes.io/name: metallb - serviceAccountName: metallb-speaker + app: node-image-sweeper + serviceAccountName: node-image-sweeper nodeSelector: kubernetes.io/os: linux images: - - quay.io/frrouting/frr:10.4.1 - - quay.io/metallb/speaker:v0.15.3 -- kind: Deployment - namespace: metallb-system - name: metallb-controller + - python:3.12.9-alpine3.20 +- kind: DaemonSet + namespace: maintenance + name: node-nofile labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: metallb - app.kubernetes.io/name: metallb - serviceAccountName: metallb-controller - nodeSelector: - kubernetes.io/os: linux + app: node-nofile + serviceAccountName: node-nofile + nodeSelector: {} images: - - quay.io/metallb/controller:v0.15.3 + - bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 - kind: DaemonSet namespace: monitoring name: dcgm-exporter @@ -510,6 +524,16 @@ workloads: nodeSelector: {} images: - registry.bstein.dev/monitoring/dcgm-exporter:4.4.2-4.7.0-ubuntu22.04 +- kind: DaemonSet + namespace: monitoring + name: jetson-tegrastats-exporter + labels: + app: jetson-tegrastats-exporter + serviceAccountName: default + nodeSelector: + jetson: 'true' + images: + - python:3.10-slim - kind: Deployment namespace: monitoring name: postmark-exporter @@ -528,7 +552,7 @@ workloads: nodeSelector: hardware: rpi5 images: - - collabora/code:latest + - collabora/code@sha256:3c58d0e9bae75e4647467d0c7d91cb66f261d3e814709aed590b5c334a04db26 - kind: Deployment namespace: nextcloud name: nextcloud @@ -539,6 +563,46 @@ workloads: hardware: rpi5 images: - nextcloud:29-apache +- kind: Deployment + namespace: outline + name: outline + labels: + app: outline + serviceAccountName: null + nodeSelector: + node-role.kubernetes.io/worker: 'true' + images: + - outlinewiki/outline:1.2.0 +- kind: Deployment + namespace: outline + name: outline-redis + labels: + app: outline-redis + serviceAccountName: null + nodeSelector: + node-role.kubernetes.io/worker: 'true' + images: + - redis:7.4.1-alpine +- kind: Deployment + namespace: planka + name: planka + labels: + app: planka + serviceAccountName: null + nodeSelector: + node-role.kubernetes.io/worker: 'true' + images: + - ghcr.io/plankanban/planka:2.0.0-rc.4 +- kind: StatefulSet + namespace: postgres + name: postgres + labels: + app: postgres + serviceAccountName: postgres-vault + nodeSelector: + node-role.kubernetes.io/worker: 'true' + images: + - postgres:15 - kind: Deployment namespace: sso name: keycloak @@ -650,16 +714,6 @@ services: port: 80 targetPort: 8080 protocol: TCP -- namespace: ci-demo - name: ci-demo - type: ClusterIP - selector: - app.kubernetes.io/name: ci-demo - ports: - - name: http - port: 80 - targetPort: http - protocol: TCP - namespace: comms name: coturn type: LoadBalancer @@ -958,64 +1012,6 @@ services: port: 80 targetPort: 80 protocol: TCP -- namespace: comms - name: othrys-element-element-web - type: ClusterIP - selector: - app.kubernetes.io/instance: othrys-element - app.kubernetes.io/name: element-web - ports: - - name: http - port: 80 - targetPort: http - protocol: TCP -- namespace: comms - name: othrys-synapse-matrix-synapse - type: ClusterIP - selector: - app.kubernetes.io/component: synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: matrix-synapse - ports: - - name: http - port: 8008 - targetPort: http - protocol: TCP -- namespace: comms - name: othrys-synapse-redis-headless - type: ClusterIP - selector: - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: redis - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - protocol: TCP -- namespace: comms - name: othrys-synapse-redis-master - type: ClusterIP - selector: - app.kubernetes.io/component: master - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: redis - ports: - - name: tcp-redis - port: 6379 - targetPort: redis - protocol: TCP -- namespace: comms - name: othrys-synapse-replication - type: ClusterIP - selector: - app.kubernetes.io/component: synapse - app.kubernetes.io/instance: othrys-synapse - app.kubernetes.io/name: matrix-synapse - ports: - - name: replication - port: 9093 - targetPort: replication - protocol: TCP - namespace: crypto name: monerod type: ClusterIP @@ -1143,6 +1139,16 @@ services: port: 443 targetPort: websecure protocol: TCP +- namespace: logging + name: oauth2-proxy-logs + type: ClusterIP + selector: + app: oauth2-proxy-logs + ports: + - name: http + port: 80 + targetPort: 4180 + protocol: TCP - namespace: longhorn-system name: oauth2-proxy-longhorn type: ClusterIP @@ -1195,18 +1201,6 @@ services: port: 8080 targetPort: 8080 protocol: TCP -- namespace: metallb-system - name: metallb-webhook-service - type: ClusterIP - selector: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: metallb - app.kubernetes.io/name: metallb - ports: - - name: null - port: 443 - targetPort: 9443 - protocol: TCP - namespace: monitoring name: dcgm-exporter type: ClusterIP @@ -1217,6 +1211,16 @@ services: port: 9400 targetPort: metrics protocol: TCP +- namespace: monitoring + name: jetson-tegrastats-exporter + type: ClusterIP + selector: + app: jetson-tegrastats-exporter + ports: + - name: metrics + port: 9100 + targetPort: metrics + protocol: TCP - namespace: monitoring name: postmark-exporter type: ClusterIP @@ -1247,6 +1251,46 @@ services: port: 80 targetPort: http protocol: TCP +- namespace: outline + name: outline + type: ClusterIP + selector: + app: outline + ports: + - name: http + port: 80 + targetPort: http + protocol: TCP +- namespace: outline + name: outline-redis + type: ClusterIP + selector: + app: outline-redis + ports: + - name: redis + port: 6379 + targetPort: redis + protocol: TCP +- namespace: planka + name: planka + type: ClusterIP + selector: + app: planka + ports: + - name: http + port: 80 + targetPort: http + protocol: TCP +- namespace: postgres + name: postgres-service + type: ClusterIP + selector: + app: postgres + ports: + - name: postgres + port: 5432 + targetPort: 5432 + protocol: TCP - namespace: sso name: keycloak type: ClusterIP @@ -1378,7 +1422,7 @@ http_endpoints: via: kind: Ingress name: matrix-wellknown-bstein-dev - source: communication + source: comms - host: bstein.dev path: /.well-known/matrix/server backend: @@ -1389,7 +1433,7 @@ http_endpoints: via: kind: Ingress name: matrix-wellknown-bstein-dev - source: communication + source: comms - host: bstein.dev path: /api backend: @@ -1415,7 +1459,7 @@ http_endpoints: via: kind: Ingress name: element-call - source: communication + source: comms - host: chat.ai.bstein.dev path: / backend: @@ -1467,7 +1511,7 @@ http_endpoints: via: kind: Ingress name: livekit-jwt-ingress - source: communication + source: comms - host: kit.live.bstein.dev path: /livekit/sfu backend: @@ -1480,20 +1524,7 @@ http_endpoints: via: kind: Ingress name: livekit-ingress - source: communication -- host: live.bstein.dev - path: / - backend: - namespace: comms - service: othrys-element-element-web - port: 80 - workloads: - - kind: Deployment - name: othrys-element-element-web - via: - kind: Ingress - name: othrys-element-element-web - source: communication + source: comms - host: live.bstein.dev path: /.well-known/matrix/client backend: @@ -1504,7 +1535,7 @@ http_endpoints: via: kind: Ingress name: matrix-wellknown - source: communication + source: comms - host: live.bstein.dev path: /.well-known/matrix/server backend: @@ -1515,20 +1546,31 @@ http_endpoints: via: kind: Ingress name: matrix-wellknown - source: communication + source: comms - host: live.bstein.dev path: /_matrix backend: namespace: comms service: othrys-synapse-matrix-synapse port: 8008 - workloads: &id002 - - kind: Deployment - name: othrys-synapse-matrix-synapse + workloads: [] via: kind: Ingress name: matrix-routing - source: communication + source: comms +- host: logs.bstein.dev + path: / + backend: + namespace: logging + service: oauth2-proxy-logs + port: http + workloads: + - kind: Deployment + name: oauth2-proxy-logs + via: + kind: Ingress + name: logs + source: logging - host: longhorn.bstein.dev path: / backend: @@ -1559,13 +1601,13 @@ http_endpoints: namespace: comms service: matrix-authentication-service port: 8080 - workloads: &id003 + workloads: &id002 - kind: Deployment name: matrix-authentication-service via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /.well-known/matrix/client backend: @@ -1576,7 +1618,7 @@ http_endpoints: via: kind: Ingress name: matrix-wellknown-matrix-live - source: communication + source: comms - host: matrix.live.bstein.dev path: /.well-known/matrix/server backend: @@ -1587,86 +1629,86 @@ http_endpoints: via: kind: Ingress name: matrix-wellknown-matrix-live - source: communication + source: comms - host: matrix.live.bstein.dev path: /_matrix backend: namespace: comms service: othrys-synapse-matrix-synapse port: 8008 - workloads: *id002 + workloads: [] via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /_matrix/client/r0/register backend: namespace: comms service: matrix-guest-register port: 8080 - workloads: &id004 + workloads: &id003 - kind: Deployment name: matrix-guest-register via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /_matrix/client/v3/login backend: namespace: comms service: matrix-authentication-service port: 8080 - workloads: *id003 + workloads: *id002 via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /_matrix/client/v3/logout backend: namespace: comms service: matrix-authentication-service port: 8080 - workloads: *id003 + workloads: *id002 via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /_matrix/client/v3/refresh backend: namespace: comms service: matrix-authentication-service port: 8080 - workloads: *id003 + workloads: *id002 via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /_matrix/client/v3/register backend: namespace: comms service: matrix-guest-register port: 8080 - workloads: *id004 + workloads: *id003 via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: matrix.live.bstein.dev path: /_synapse backend: namespace: comms service: othrys-synapse-matrix-synapse port: 8008 - workloads: *id002 + workloads: [] via: kind: Ingress name: matrix-routing - source: communication + source: comms - host: monero.bstein.dev path: / backend: @@ -1680,6 +1722,19 @@ http_endpoints: kind: Ingress name: monerod source: monerod +- host: notes.bstein.dev + path: / + backend: + namespace: outline + service: outline + port: 80 + workloads: + - kind: Deployment + name: outline + via: + kind: Ingress + name: outline + source: outline - host: office.bstein.dev path: / backend: @@ -1758,6 +1813,19 @@ http_endpoints: kind: Ingress name: jellyfin source: jellyfin +- host: tasks.bstein.dev + path: / + backend: + namespace: planka + service: planka + port: 80 + workloads: + - kind: Deployment + name: planka + via: + kind: Ingress + name: planka + source: planka - host: vault.bstein.dev path: / backend: @@ -1772,15 +1840,28 @@ http_endpoints: name: vaultwarden-ingress source: vaultwarden helmrelease_host_hints: + comms:comms/othrys-element: + - call.live.bstein.dev + - live.bstein.dev + - matrix.live.bstein.dev + comms:comms/othrys-synapse: + - bstein.dev + - kit.live.bstein.dev + - live.bstein.dev + - matrix.live.bstein.dev + - turn.live.bstein.dev gitops-ui:flux-system/weave-gitops: - cd.bstein.dev harbor:harbor/harbor: - registry.bstein.dev + logging:logging/data-prepper: + - registry.bstein.dev mailu:mailu-mailserver/mailu: - bstein.dev - mail.bstein.dev monitoring:monitoring/alertmanager: - alerts.bstein.dev monitoring:monitoring/grafana: + - bstein.dev - metrics.bstein.dev - sso.bstein.dev diff --git a/knowledge/diagrams/atlas-http.mmd b/knowledge/diagrams/atlas-http.mmd index ddd33d8..ab7c362 100644 --- a/knowledge/diagrams/atlas-http.mmd +++ b/knowledge/diagrams/atlas-http.mmd @@ -47,15 +47,14 @@ flowchart LR wl_comms_livekit["comms/livekit (Deployment)"] svc_comms_livekit --> wl_comms_livekit host_live_bstein_dev["live.bstein.dev"] - svc_comms_othrys_element_element_web["comms/othrys-element-element-web (Service)"] - host_live_bstein_dev --> svc_comms_othrys_element_element_web - wl_comms_othrys_element_element_web["comms/othrys-element-element-web (Deployment)"] - svc_comms_othrys_element_element_web --> wl_comms_othrys_element_element_web host_live_bstein_dev --> svc_comms_matrix_wellknown svc_comms_othrys_synapse_matrix_synapse["comms/othrys-synapse-matrix-synapse (Service)"] host_live_bstein_dev --> svc_comms_othrys_synapse_matrix_synapse - wl_comms_othrys_synapse_matrix_synapse["comms/othrys-synapse-matrix-synapse (Deployment)"] - svc_comms_othrys_synapse_matrix_synapse --> wl_comms_othrys_synapse_matrix_synapse + host_logs_bstein_dev["logs.bstein.dev"] + svc_logging_oauth2_proxy_logs["logging/oauth2-proxy-logs (Service)"] + host_logs_bstein_dev --> svc_logging_oauth2_proxy_logs + wl_logging_oauth2_proxy_logs["logging/oauth2-proxy-logs (Deployment)"] + svc_logging_oauth2_proxy_logs --> wl_logging_oauth2_proxy_logs host_longhorn_bstein_dev["longhorn.bstein.dev"] svc_longhorn_system_oauth2_proxy_longhorn["longhorn-system/oauth2-proxy-longhorn (Service)"] host_longhorn_bstein_dev --> svc_longhorn_system_oauth2_proxy_longhorn @@ -80,6 +79,11 @@ flowchart LR host_monero_bstein_dev --> svc_crypto_monerod wl_crypto_monerod["crypto/monerod (Deployment)"] svc_crypto_monerod --> wl_crypto_monerod + host_notes_bstein_dev["notes.bstein.dev"] + svc_outline_outline["outline/outline (Service)"] + host_notes_bstein_dev --> svc_outline_outline + wl_outline_outline["outline/outline (Deployment)"] + svc_outline_outline --> wl_outline_outline host_office_bstein_dev["office.bstein.dev"] svc_nextcloud_collabora["nextcloud/collabora (Service)"] host_office_bstein_dev --> svc_nextcloud_collabora @@ -110,6 +114,11 @@ flowchart LR host_stream_bstein_dev --> svc_jellyfin_jellyfin wl_jellyfin_jellyfin["jellyfin/jellyfin (Deployment)"] svc_jellyfin_jellyfin --> wl_jellyfin_jellyfin + host_tasks_bstein_dev["tasks.bstein.dev"] + svc_planka_planka["planka/planka (Service)"] + host_tasks_bstein_dev --> svc_planka_planka + wl_planka_planka["planka/planka (Deployment)"] + svc_planka_planka --> wl_planka_planka host_vault_bstein_dev["vault.bstein.dev"] svc_vaultwarden_vaultwarden_service["vaultwarden/vaultwarden-service (Service)"] host_vault_bstein_dev --> svc_vaultwarden_vaultwarden_service @@ -133,10 +142,7 @@ flowchart LR wl_comms_livekit_token_service svc_comms_livekit wl_comms_livekit - svc_comms_othrys_element_element_web - wl_comms_othrys_element_element_web svc_comms_othrys_synapse_matrix_synapse - wl_comms_othrys_synapse_matrix_synapse svc_comms_matrix_authentication_service wl_comms_matrix_authentication_service svc_comms_matrix_guest_register @@ -160,6 +166,10 @@ flowchart LR svc_jenkins_jenkins wl_jenkins_jenkins end + subgraph logging[logging] + svc_logging_oauth2_proxy_logs + wl_logging_oauth2_proxy_logs + end subgraph longhorn_system[longhorn-system] svc_longhorn_system_oauth2_proxy_longhorn wl_longhorn_system_oauth2_proxy_longhorn @@ -173,6 +183,14 @@ flowchart LR svc_nextcloud_collabora wl_nextcloud_collabora end + subgraph outline[outline] + svc_outline_outline + wl_outline_outline + end + subgraph planka[planka] + svc_planka_planka + wl_planka_planka + end subgraph sso[sso] svc_sso_oauth2_proxy wl_sso_oauth2_proxy diff --git a/scripts/knowledge_render_atlas.py b/scripts/knowledge_render_atlas.py index 50ac84c..c7f9f26 100644 --- a/scripts/knowledge_render_atlas.py +++ b/scripts/knowledge_render_atlas.py @@ -505,7 +505,9 @@ def main() -> int: diagram_path = out_dir / "diagrams" / "atlas-http.mmd" runbooks_json_path = out_dir / "catalog" / "runbooks.json" + catalog_rel = catalog_path.relative_to(REPO_ROOT).as_posix() catalog_path.write_text( + f"# {catalog_rel}\n" "# Generated by scripts/knowledge_render_atlas.py (do not edit by hand)\n" + yaml.safe_dump(catalog, sort_keys=False), encoding="utf-8", diff --git a/services/comms/knowledge/catalog/atlas-summary.json b/services/comms/knowledge/catalog/atlas-summary.json index 00d6658..fa35051 100644 --- a/services/comms/knowledge/catalog/atlas-summary.json +++ b/services/comms/knowledge/catalog/atlas-summary.json @@ -1,6 +1,6 @@ { "counts": { - "helmrelease_host_hints": 18, + "helmrelease_host_hints": 17, "http_endpoints": 37, "services": 43, "workloads": 54 diff --git a/services/comms/knowledge/catalog/atlas.json b/services/comms/knowledge/catalog/atlas.json index 9ca1d29..0d97bcd 100644 --- a/services/comms/knowledge/catalog/atlas.json +++ b/services/comms/knowledge/catalog/atlas.json @@ -199,7 +199,7 @@ "node-role.kubernetes.io/worker": "true" }, "images": [ - "registry.bstein.dev/bstein/bstein-dev-home-backend:registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92" + "registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92" ] }, { @@ -215,7 +215,7 @@ "node-role.kubernetes.io/worker": "true" }, "images": [ - "registry.bstein.dev/bstein/bstein-dev-home-frontend:registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92" + "registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92" ] }, { @@ -2842,8 +2842,7 @@ "kit.live.bstein.dev", "live.bstein.dev", "matrix.live.bstein.dev", - "registry.bstein.dev", - "sso.bstein.dev" + "turn.live.bstein.dev" ], "gitops-ui:flux-system/weave-gitops": [ "cd.bstein.dev" diff --git a/services/comms/knowledge/catalog/atlas.yaml b/services/comms/knowledge/catalog/atlas.yaml index 5bac143..6529e1a 100644 --- a/services/comms/knowledge/catalog/atlas.yaml +++ b/services/comms/knowledge/catalog/atlas.yaml @@ -1,3 +1,4 @@ +# services/comms/knowledge/catalog/atlas.yaml # Generated by scripts/knowledge_render_atlas.py (do not edit by hand) cluster: atlas sources: @@ -123,7 +124,7 @@ workloads: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: 'true' images: - - registry.bstein.dev/bstein/bstein-dev-home-backend:registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92 + - registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92 - kind: Deployment namespace: bstein-dev-home name: bstein-dev-home-frontend @@ -134,7 +135,7 @@ workloads: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: 'true' images: - - registry.bstein.dev/bstein/bstein-dev-home-frontend:registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92 + - registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92 - kind: Deployment namespace: bstein-dev-home name: chat-ai-gateway @@ -1848,8 +1849,7 @@ helmrelease_host_hints: - kit.live.bstein.dev - live.bstein.dev - matrix.live.bstein.dev - - registry.bstein.dev - - sso.bstein.dev + - turn.live.bstein.dev gitops-ui:flux-system/weave-gitops: - cd.bstein.dev harbor:harbor/harbor: diff --git a/services/crypto/monerod/deployment.yaml b/services/crypto/monerod/deployment.yaml index 1c20ff8..40c9e24 100644 --- a/services/crypto/monerod/deployment.yaml +++ b/services/crypto/monerod/deployment.yaml @@ -1,4 +1,4 @@ -# services/crypto/monerod +# services/crypto/monerod/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: diff --git a/services/crypto/xmr-miner/kustomization.yaml b/services/crypto/xmr-miner/kustomization.yaml index 263b25a..46c9767 100644 --- a/services/crypto/xmr-miner/kustomization.yaml +++ b/services/crypto/xmr-miner/kustomization.yaml @@ -1,4 +1,4 @@ -# services/crypto/xmr-miner/kustomization/yaml +# services/crypto/xmr-miner/kustomization.yaml apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index ad26eaf..33abc9e 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -1,4 +1,4 @@ -# services/monitoring/kube-state-metrics-helmrelease.yaml +# services/monitoring/helmrelease.yaml apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: From e776f004c9edb41bf289568c7bfbaf6c3acb4681 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 01:21:08 -0300 Subject: [PATCH 021/270] keycloak: ensure harbor oidc scope --- .../harbor-oidc-secret-ensure-job.yaml | 115 ++---------------- services/keycloak/kustomization.yaml | 3 + .../scripts/harbor_oidc_secret_ensure.sh | 104 ++++++++++++++++ 3 files changed, 117 insertions(+), 105 deletions(-) create mode 100755 services/keycloak/scripts/harbor_oidc_secret_ensure.sh diff --git a/services/keycloak/harbor-oidc-secret-ensure-job.yaml b/services/keycloak/harbor-oidc-secret-ensure-job.yaml index 974f01a..946f0fe 100644 --- a/services/keycloak/harbor-oidc-secret-ensure-job.yaml +++ b/services/keycloak/harbor-oidc-secret-ensure-job.yaml @@ -11,6 +11,11 @@ spec: spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never + volumes: + - name: harbor-oidc-secret-ensure-script + configMap: + name: harbor-oidc-secret-ensure-script + defaultMode: 0555 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -24,111 +29,7 @@ spec: containers: - name: apply image: alpine:3.20 - command: ["/bin/sh", "-c"] - args: - - | - set -euo pipefail - apk add --no-cache curl jq kubectl >/dev/null - - KC_URL="http://keycloak.sso.svc.cluster.local" - ACCESS_TOKEN="" - for attempt in 1 2 3 4 5; do - TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \ - -H 'Content-Type: application/x-www-form-urlencoded' \ - -d "grant_type=password" \ - -d "client_id=admin-cli" \ - -d "username=${KEYCLOAK_ADMIN}" \ - -d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)" - ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)" - if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then - break - fi - echo "Keycloak token request failed (attempt ${attempt})" >&2 - sleep $((attempt * 2)) - done - if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then - echo "Failed to fetch Keycloak admin token" >&2 - exit 1 - fi - - CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/atlas/clients?clientId=harbor" || true)" - CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" - - if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then - create_payload='{"clientId":"harbor","enabled":true,"protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://registry.bstein.dev/c/oidc/callback"],"webOrigins":["https://registry.bstein.dev"],"rootUrl":"https://registry.bstein.dev","baseUrl":"/"}' - status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ - -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - -H 'Content-Type: application/json' \ - -d "${create_payload}" \ - "$KC_URL/admin/realms/atlas/clients")" - if [ "$status" != "201" ] && [ "$status" != "204" ]; then - echo "Keycloak client create failed (status ${status})" >&2 - exit 1 - fi - CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/atlas/clients?clientId=harbor" || true)" - CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" - fi - - if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then - echo "Keycloak client harbor not found" >&2 - exit 1 - fi - - SCOPE_ID="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/atlas/client-scopes?search=groups" | jq -r '.[] | select(.name=="groups") | .id' 2>/dev/null | head -n1 || true)" - if [ -z "$SCOPE_ID" ] || [ "$SCOPE_ID" = "null" ]; then - echo "Keycloak client scope groups not found" >&2 - exit 1 - fi - - DEFAULT_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/default-client-scopes" || true)" - OPTIONAL_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes" || true)" - - if ! echo "$DEFAULT_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1 \ - && ! echo "$OPTIONAL_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1; then - status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \ - -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")" - if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then - status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ - -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")" - if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then - echo "Failed to attach groups client scope to harbor (status ${status})" >&2 - exit 1 - fi - fi - fi - - CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/client-secret" | jq -r '.value' 2>/dev/null || true)" - if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then - echo "Keycloak client secret not found" >&2 - exit 1 - fi - - CONFIG_OVERWRITE_JSON="$(jq -nc \ - --arg auth_mode "oidc_auth" \ - --arg oidc_name "Keycloak" \ - --arg oidc_client_id "harbor" \ - --arg oidc_client_secret "${CLIENT_SECRET}" \ - --arg oidc_endpoint "https://sso.bstein.dev/realms/atlas" \ - --arg oidc_scope "openid,profile,email,groups" \ - --arg oidc_user_claim "preferred_username" \ - --arg oidc_groups_claim "groups" \ - --arg oidc_admin_group "admin" \ - --argjson oidc_auto_onboard true \ - --argjson oidc_verify_cert true \ - --argjson oidc_logout true \ - '{\n auth_mode: $auth_mode,\n oidc_name: $oidc_name,\n oidc_client_id: $oidc_client_id,\n oidc_client_secret: $oidc_client_secret,\n oidc_endpoint: $oidc_endpoint,\n oidc_scope: $oidc_scope,\n oidc_user_claim: $oidc_user_claim,\n oidc_groups_claim: $oidc_groups_claim,\n oidc_admin_group: $oidc_admin_group,\n oidc_auto_onboard: $oidc_auto_onboard,\n oidc_verify_cert: $oidc_verify_cert,\n oidc_logout: $oidc_logout\n }')" - - kubectl -n harbor create secret generic harbor-oidc \ - --from-literal=CONFIG_OVERWRITE_JSON="${CONFIG_OVERWRITE_JSON}" \ - --dry-run=client -o yaml | kubectl -n harbor apply -f - >/dev/null + command: ["/scripts/harbor_oidc_secret_ensure.sh"] env: - name: KEYCLOAK_ADMIN valueFrom: @@ -140,3 +41,7 @@ spec: secretKeyRef: name: keycloak-admin key: password + volumeMounts: + - name: harbor-oidc-secret-ensure-script + mountPath: /scripts + readOnly: true diff --git a/services/keycloak/kustomization.yaml b/services/keycloak/kustomization.yaml index c334e5e..5222ee1 100644 --- a/services/keycloak/kustomization.yaml +++ b/services/keycloak/kustomization.yaml @@ -32,3 +32,6 @@ configMapGenerator: - name: portal-e2e-client-secret-sync-script files: - sso_portal_e2e_client_secret_sync.sh=scripts/sso_portal_e2e_client_secret_sync.sh + - name: harbor-oidc-secret-ensure-script + files: + - harbor_oidc_secret_ensure.sh=scripts/harbor_oidc_secret_ensure.sh diff --git a/services/keycloak/scripts/harbor_oidc_secret_ensure.sh b/services/keycloak/scripts/harbor_oidc_secret_ensure.sh new file mode 100755 index 0000000..da3dff0 --- /dev/null +++ b/services/keycloak/scripts/harbor_oidc_secret_ensure.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env sh +set -euo pipefail + +apk add --no-cache curl jq kubectl >/dev/null + +KC_URL="http://keycloak.sso.svc.cluster.local" +ACCESS_TOKEN="" +for attempt in 1 2 3 4 5; do + TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d "grant_type=password" \ + -d "client_id=admin-cli" \ + -d "username=${KEYCLOAK_ADMIN}" \ + -d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)" + ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)" + if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then + break + fi + echo "Keycloak token request failed (attempt ${attempt})" >&2 + sleep $((attempt * 2)) +done +if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then + echo "Failed to fetch Keycloak admin token" >&2 + exit 1 +fi + +CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients?clientId=harbor" || true)" +CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" + +if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then + create_payload='{"clientId":"harbor","enabled":true,"protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://registry.bstein.dev/c/oidc/callback"],"webOrigins":["https://registry.bstein.dev"],"rootUrl":"https://registry.bstein.dev","baseUrl":"/"}' + status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + -H 'Content-Type: application/json' \ + -d "${create_payload}" \ + "$KC_URL/admin/realms/atlas/clients")" + if [ "$status" != "201" ] && [ "$status" != "204" ]; then + echo "Keycloak client create failed (status ${status})" >&2 + exit 1 + fi + CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients?clientId=harbor" || true)" + CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" +fi + +if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then + echo "Keycloak client harbor not found" >&2 + exit 1 +fi + +SCOPE_ID="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/client-scopes?search=groups" | jq -r '.[] | select(.name=="groups") | .id' 2>/dev/null | head -n1 || true)" +if [ -z "$SCOPE_ID" ] || [ "$SCOPE_ID" = "null" ]; then + echo "Keycloak client scope groups not found" >&2 + exit 1 +fi + +DEFAULT_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/default-client-scopes" || true)" +OPTIONAL_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes" || true)" + +if ! echo "$DEFAULT_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1 \ + && ! echo "$OPTIONAL_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1; then + status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")" + if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then + status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")" + if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then + echo "Failed to attach groups client scope to harbor (status ${status})" >&2 + exit 1 + fi + fi +fi + +CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/client-secret" | jq -r '.value' 2>/dev/null || true)" +if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then + echo "Keycloak client secret not found" >&2 + exit 1 +fi + +CONFIG_OVERWRITE_JSON="$(jq -nc \ + --arg auth_mode "oidc_auth" \ + --arg oidc_name "Keycloak" \ + --arg oidc_client_id "harbor" \ + --arg oidc_client_secret "${CLIENT_SECRET}" \ + --arg oidc_endpoint "https://sso.bstein.dev/realms/atlas" \ + --arg oidc_scope "openid,profile,email,groups" \ + --arg oidc_user_claim "preferred_username" \ + --arg oidc_groups_claim "groups" \ + --arg oidc_admin_group "admin" \ + --argjson oidc_auto_onboard true \ + --argjson oidc_verify_cert true \ + --argjson oidc_logout true \ + '{\n auth_mode: $auth_mode,\n oidc_name: $oidc_name,\n oidc_client_id: $oidc_client_id,\n oidc_client_secret: $oidc_client_secret,\n oidc_endpoint: $oidc_endpoint,\n oidc_scope: $oidc_scope,\n oidc_user_claim: $oidc_user_claim,\n oidc_groups_claim: $oidc_groups_claim,\n oidc_admin_group: $oidc_admin_group,\n oidc_auto_onboard: $oidc_auto_onboard,\n oidc_verify_cert: $oidc_verify_cert,\n oidc_logout: $oidc_logout\n }')" + +kubectl -n harbor create secret generic harbor-oidc \ + --from-literal=CONFIG_OVERWRITE_JSON="${CONFIG_OVERWRITE_JSON}" \ + --dry-run=client -o yaml | kubectl -n harbor apply -f - >/dev/null From 537d304b368ae38198f05b7cc8b8695bf9e19aaf Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 01:22:30 -0300 Subject: [PATCH 022/270] keycloak: bump harbor oidc job --- services/keycloak/harbor-oidc-secret-ensure-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/keycloak/harbor-oidc-secret-ensure-job.yaml b/services/keycloak/harbor-oidc-secret-ensure-job.yaml index 946f0fe..52cff2f 100644 --- a/services/keycloak/harbor-oidc-secret-ensure-job.yaml +++ b/services/keycloak/harbor-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: harbor-oidc-secret-ensure-1 + name: harbor-oidc-secret-ensure-2 namespace: sso spec: backoffLimit: 0 From 1d894ea80f80553cfc3e1ae46c5c5ee746877ad0 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 01:24:18 -0300 Subject: [PATCH 023/270] keycloak: fix harbor oidc job --- services/keycloak/harbor-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/scripts/harbor_oidc_secret_ensure.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/keycloak/harbor-oidc-secret-ensure-job.yaml b/services/keycloak/harbor-oidc-secret-ensure-job.yaml index 52cff2f..21a7ff0 100644 --- a/services/keycloak/harbor-oidc-secret-ensure-job.yaml +++ b/services/keycloak/harbor-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: harbor-oidc-secret-ensure-2 + name: harbor-oidc-secret-ensure-3 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/scripts/harbor_oidc_secret_ensure.sh b/services/keycloak/scripts/harbor_oidc_secret_ensure.sh index da3dff0..4767ef0 100755 --- a/services/keycloak/scripts/harbor_oidc_secret_ensure.sh +++ b/services/keycloak/scripts/harbor_oidc_secret_ensure.sh @@ -97,7 +97,7 @@ CONFIG_OVERWRITE_JSON="$(jq -nc \ --argjson oidc_auto_onboard true \ --argjson oidc_verify_cert true \ --argjson oidc_logout true \ - '{\n auth_mode: $auth_mode,\n oidc_name: $oidc_name,\n oidc_client_id: $oidc_client_id,\n oidc_client_secret: $oidc_client_secret,\n oidc_endpoint: $oidc_endpoint,\n oidc_scope: $oidc_scope,\n oidc_user_claim: $oidc_user_claim,\n oidc_groups_claim: $oidc_groups_claim,\n oidc_admin_group: $oidc_admin_group,\n oidc_auto_onboard: $oidc_auto_onboard,\n oidc_verify_cert: $oidc_verify_cert,\n oidc_logout: $oidc_logout\n }')" + '{auth_mode:$auth_mode,oidc_name:$oidc_name,oidc_client_id:$oidc_client_id,oidc_client_secret:$oidc_client_secret,oidc_endpoint:$oidc_endpoint,oidc_scope:$oidc_scope,oidc_user_claim:$oidc_user_claim,oidc_groups_claim:$oidc_groups_claim,oidc_admin_group:$oidc_admin_group,oidc_auto_onboard:$oidc_auto_onboard,oidc_verify_cert:$oidc_verify_cert,oidc_logout:$oidc_logout}')" kubectl -n harbor create secret generic harbor-oidc \ --from-literal=CONFIG_OVERWRITE_JSON="${CONFIG_OVERWRITE_JSON}" \ From b82195f2d7bd778468604671b69cbcee05dfb7d2 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 01:30:41 -0300 Subject: [PATCH 024/270] feat: start vault consumption for outline and planka --- .../vault-csi/secrets-store-csi-driver.yaml | 2 +- services/outline/deployment.yaml | 31 ++++++--- services/outline/kustomization.yaml | 8 +++ services/outline/scripts/outline_vault_env.sh | 31 +++++++++ services/outline/secretproviderclass.yaml | 63 +++++++++++++++++++ services/outline/serviceaccount.yaml | 6 ++ services/planka/deployment.yaml | 33 ++++++---- services/planka/kustomization.yaml | 8 +++ services/planka/scripts/planka_vault_env.sh | 27 ++++++++ services/planka/secretproviderclass.yaml | 60 ++++++++++++++++++ services/planka/serviceaccount.yaml | 6 ++ services/vault/k8s-auth-config-cronjob.yaml | 47 ++++++++++++++ services/vault/kustomization.yaml | 4 ++ .../vault/scripts/vault_k8s_auth_configure.sh | 62 ++++++++++++++++++ 14 files changed, 367 insertions(+), 21 deletions(-) create mode 100644 services/outline/scripts/outline_vault_env.sh create mode 100644 services/outline/secretproviderclass.yaml create mode 100644 services/outline/serviceaccount.yaml create mode 100644 services/planka/scripts/planka_vault_env.sh create mode 100644 services/planka/secretproviderclass.yaml create mode 100644 services/planka/serviceaccount.yaml create mode 100644 services/vault/k8s-auth-config-cronjob.yaml create mode 100644 services/vault/scripts/vault_k8s_auth_configure.sh diff --git a/infrastructure/vault-csi/secrets-store-csi-driver.yaml b/infrastructure/vault-csi/secrets-store-csi-driver.yaml index 0b249fc..fec4758 100644 --- a/infrastructure/vault-csi/secrets-store-csi-driver.yaml +++ b/infrastructure/vault-csi/secrets-store-csi-driver.yaml @@ -16,5 +16,5 @@ spec: namespace: flux-system values: syncSecret: - enabled: true + enabled: false enableSecretRotation: false diff --git a/services/outline/deployment.yaml b/services/outline/deployment.yaml index 9f8160e..2cacceb 100644 --- a/services/outline/deployment.yaml +++ b/services/outline/deployment.yaml @@ -21,6 +21,7 @@ spec: labels: app: outline spec: + serviceAccountName: outline-vault nodeSelector: node-role.kubernetes.io/worker: "true" affinity: @@ -34,6 +35,11 @@ spec: containers: - name: outline image: outlinewiki/outline:1.2.0 + command: + - /bin/sh + - -c + args: + - . /vault/scripts/outline_vault_env.sh && exec node build/server/index.js ports: - name: http containerPort: 3000 @@ -66,18 +72,15 @@ spec: value: "false" - name: SMTP_PORT value: "25" - envFrom: - - secretRef: - name: outline-db - - secretRef: - name: outline-secrets - - secretRef: - name: outline-oidc - - secretRef: - name: outline-smtp volumeMounts: - name: user-data mountPath: /var/lib/outline/data + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true readinessProbe: httpGet: path: /_health @@ -105,3 +108,13 @@ spec: - name: user-data persistentVolumeClaim: claimName: outline-user-data + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: outline-vault + - name: vault-scripts + configMap: + name: outline-vault-env + defaultMode: 0555 diff --git a/services/outline/kustomization.yaml b/services/outline/kustomization.yaml index 33640f6..011c6e6 100644 --- a/services/outline/kustomization.yaml +++ b/services/outline/kustomization.yaml @@ -4,9 +4,17 @@ kind: Kustomization namespace: outline resources: - namespace.yaml + - serviceaccount.yaml + - secretproviderclass.yaml - user-pvc.yaml - redis-deployment.yaml - redis-service.yaml - deployment.yaml - service.yaml - ingress.yaml +generatorOptions: + disableNameSuffixHash: true +configMapGenerator: + - name: outline-vault-env + files: + - outline_vault_env.sh=scripts/outline_vault_env.sh diff --git a/services/outline/scripts/outline_vault_env.sh b/services/outline/scripts/outline_vault_env.sh new file mode 100644 index 0000000..d9f8469 --- /dev/null +++ b/services/outline/scripts/outline_vault_env.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env sh +set -eu + +vault_dir="/vault/secrets" + +read_secret() { + cat "${vault_dir}/$1" +} + +export DATABASE_URL="$(read_secret DATABASE_URL)" +export SECRET_KEY="$(read_secret SECRET_KEY)" +export UTILS_SECRET="$(read_secret UTILS_SECRET)" + +export OIDC_AUTH_URI="$(read_secret OIDC_AUTH_URI)" +export OIDC_CLIENT_ID="$(read_secret OIDC_CLIENT_ID)" +export OIDC_CLIENT_SECRET="$(read_secret OIDC_CLIENT_SECRET)" +export OIDC_LOGOUT_URI="$(read_secret OIDC_LOGOUT_URI)" +export OIDC_TOKEN_URI="$(read_secret OIDC_TOKEN_URI)" +export OIDC_USERINFO_URI="$(read_secret OIDC_USERINFO_URI)" + +export SMTP_FROM_EMAIL="$(read_secret SMTP_FROM_EMAIL)" +export SMTP_HOST="$(read_secret SMTP_HOST)" +export SMTP_PASSWORD="$(read_secret SMTP_PASSWORD)" +export SMTP_USERNAME="$(read_secret SMTP_USERNAME)" + +if [ -f "${vault_dir}/AWS_ACCESS_KEY_ID" ]; then + export AWS_ACCESS_KEY_ID="$(read_secret AWS_ACCESS_KEY_ID)" + export AWS_SECRET_ACCESS_KEY="$(read_secret AWS_SECRET_ACCESS_KEY)" + export AWS_S3_UPLOAD_BUCKET_NAME="$(read_secret AWS_S3_UPLOAD_BUCKET_NAME)" + export AWS_S3_UPLOAD_BUCKET_URL="$(read_secret AWS_S3_UPLOAD_BUCKET_URL)" +fi diff --git a/services/outline/secretproviderclass.yaml b/services/outline/secretproviderclass.yaml new file mode 100644 index 0000000..2781c85 --- /dev/null +++ b/services/outline/secretproviderclass.yaml @@ -0,0 +1,63 @@ +# services/outline/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: outline-vault + namespace: outline +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "outline" + objects: | + - objectName: "DATABASE_URL" + secretPath: "kv/data/atlas/outline/outline-db" + secretKey: "DATABASE_URL" + - objectName: "SECRET_KEY" + secretPath: "kv/data/atlas/outline/outline-secrets" + secretKey: "SECRET_KEY" + - objectName: "UTILS_SECRET" + secretPath: "kv/data/atlas/outline/outline-secrets" + secretKey: "UTILS_SECRET" + - objectName: "OIDC_AUTH_URI" + secretPath: "kv/data/atlas/outline/outline-oidc" + secretKey: "OIDC_AUTH_URI" + - objectName: "OIDC_CLIENT_ID" + secretPath: "kv/data/atlas/outline/outline-oidc" + secretKey: "OIDC_CLIENT_ID" + - objectName: "OIDC_CLIENT_SECRET" + secretPath: "kv/data/atlas/outline/outline-oidc" + secretKey: "OIDC_CLIENT_SECRET" + - objectName: "OIDC_LOGOUT_URI" + secretPath: "kv/data/atlas/outline/outline-oidc" + secretKey: "OIDC_LOGOUT_URI" + - objectName: "OIDC_TOKEN_URI" + secretPath: "kv/data/atlas/outline/outline-oidc" + secretKey: "OIDC_TOKEN_URI" + - objectName: "OIDC_USERINFO_URI" + secretPath: "kv/data/atlas/outline/outline-oidc" + secretKey: "OIDC_USERINFO_URI" + - objectName: "SMTP_FROM_EMAIL" + secretPath: "kv/data/atlas/outline/outline-smtp" + secretKey: "SMTP_FROM_EMAIL" + - objectName: "SMTP_HOST" + secretPath: "kv/data/atlas/outline/outline-smtp" + secretKey: "SMTP_HOST" + - objectName: "SMTP_PASSWORD" + secretPath: "kv/data/atlas/outline/outline-smtp" + secretKey: "SMTP_PASSWORD" + - objectName: "SMTP_USERNAME" + secretPath: "kv/data/atlas/outline/outline-smtp" + secretKey: "SMTP_USERNAME" + - objectName: "AWS_ACCESS_KEY_ID" + secretPath: "kv/data/atlas/outline/outline-s3" + secretKey: "AWS_ACCESS_KEY_ID" + - objectName: "AWS_SECRET_ACCESS_KEY" + secretPath: "kv/data/atlas/outline/outline-s3" + secretKey: "AWS_SECRET_ACCESS_KEY" + - objectName: "AWS_S3_UPLOAD_BUCKET_NAME" + secretPath: "kv/data/atlas/outline/outline-s3" + secretKey: "AWS_S3_UPLOAD_BUCKET_NAME" + - objectName: "AWS_S3_UPLOAD_BUCKET_URL" + secretPath: "kv/data/atlas/outline/outline-s3" + secretKey: "AWS_S3_UPLOAD_BUCKET_URL" diff --git a/services/outline/serviceaccount.yaml b/services/outline/serviceaccount.yaml new file mode 100644 index 0000000..8f15c78 --- /dev/null +++ b/services/outline/serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/outline/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: outline-vault + namespace: outline diff --git a/services/planka/deployment.yaml b/services/planka/deployment.yaml index 9524245..d2aa431 100644 --- a/services/planka/deployment.yaml +++ b/services/planka/deployment.yaml @@ -21,6 +21,7 @@ spec: labels: app: planka spec: + serviceAccountName: planka-vault nodeSelector: node-role.kubernetes.io/worker: "true" affinity: @@ -58,6 +59,11 @@ spec: containers: - name: planka image: ghcr.io/plankanban/planka:2.0.0-rc.4 + command: + - /bin/sh + - -c + args: + - . /vault/scripts/planka_vault_env.sh && exec node app.js --prod ports: - name: http containerPort: 1337 @@ -66,23 +72,12 @@ spec: value: https://tasks.bstein.dev - name: TRUST_PROXY value: "true" - - name: OIDC_IGNORE_ROLES - value: "false" - name: OIDC_ADMIN_ROLES value: admin - name: OIDC_PROJECT_OWNER_ROLES value: planka-users - name: OIDC_ROLES_ATTRIBUTE value: groups - envFrom: - - secretRef: - name: planka-db - - secretRef: - name: planka-secrets - - secretRef: - name: planka-oidc - - secretRef: - name: planka-smtp volumeMounts: - name: user-data mountPath: /app/public/user-avatars @@ -95,6 +90,12 @@ spec: subPath: private/attachments - name: app-data mountPath: /app/.tmp + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true readinessProbe: httpGet: path: / @@ -125,3 +126,13 @@ spec: - name: app-data persistentVolumeClaim: claimName: planka-app-data + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: planka-vault + - name: vault-scripts + configMap: + name: planka-vault-env + defaultMode: 0555 diff --git a/services/planka/kustomization.yaml b/services/planka/kustomization.yaml index ab42954..14a7cc9 100644 --- a/services/planka/kustomization.yaml +++ b/services/planka/kustomization.yaml @@ -4,8 +4,16 @@ kind: Kustomization namespace: planka resources: - namespace.yaml + - serviceaccount.yaml + - secretproviderclass.yaml - user-data-pvc.yaml - app-pvc.yaml - deployment.yaml - service.yaml - ingress.yaml +generatorOptions: + disableNameSuffixHash: true +configMapGenerator: + - name: planka-vault-env + files: + - planka_vault_env.sh=scripts/planka_vault_env.sh diff --git a/services/planka/scripts/planka_vault_env.sh b/services/planka/scripts/planka_vault_env.sh new file mode 100644 index 0000000..f5ab2ab --- /dev/null +++ b/services/planka/scripts/planka_vault_env.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env sh +set -eu + +vault_dir="/vault/secrets" + +read_secret() { + cat "${vault_dir}/$1" +} + +export DATABASE_URL="$(read_secret DATABASE_URL)" +export SECRET_KEY="$(read_secret SECRET_KEY)" + +export OIDC_CLIENT_ID="$(read_secret OIDC_CLIENT_ID)" +export OIDC_CLIENT_SECRET="$(read_secret OIDC_CLIENT_SECRET)" +export OIDC_ENFORCED="$(read_secret OIDC_ENFORCED)" +export OIDC_IGNORE_ROLES="$(read_secret OIDC_IGNORE_ROLES)" +export OIDC_ISSUER="$(read_secret OIDC_ISSUER)" +export OIDC_SCOPES="$(read_secret OIDC_SCOPES)" +export OIDC_USE_OAUTH_CALLBACK="$(read_secret OIDC_USE_OAUTH_CALLBACK)" + +export SMTP_FROM="$(read_secret SMTP_FROM)" +export SMTP_HOST="$(read_secret SMTP_HOST)" +export SMTP_PASSWORD="$(read_secret SMTP_PASSWORD)" +export SMTP_PORT="$(read_secret SMTP_PORT)" +export SMTP_SECURE="$(read_secret SMTP_SECURE)" +export SMTP_TLS_REJECT_UNAUTHORIZED="$(read_secret SMTP_TLS_REJECT_UNAUTHORIZED)" +export SMTP_USER="$(read_secret SMTP_USER)" diff --git a/services/planka/secretproviderclass.yaml b/services/planka/secretproviderclass.yaml new file mode 100644 index 0000000..e72d98c --- /dev/null +++ b/services/planka/secretproviderclass.yaml @@ -0,0 +1,60 @@ +# services/planka/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: planka-vault + namespace: planka +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "planka" + objects: | + - objectName: "DATABASE_URL" + secretPath: "kv/data/atlas/planka/planka-db" + secretKey: "DATABASE_URL" + - objectName: "SECRET_KEY" + secretPath: "kv/data/atlas/planka/planka-secrets" + secretKey: "SECRET_KEY" + - objectName: "OIDC_CLIENT_ID" + secretPath: "kv/data/atlas/planka/planka-oidc" + secretKey: "OIDC_CLIENT_ID" + - objectName: "OIDC_CLIENT_SECRET" + secretPath: "kv/data/atlas/planka/planka-oidc" + secretKey: "OIDC_CLIENT_SECRET" + - objectName: "OIDC_ENFORCED" + secretPath: "kv/data/atlas/planka/planka-oidc" + secretKey: "OIDC_ENFORCED" + - objectName: "OIDC_IGNORE_ROLES" + secretPath: "kv/data/atlas/planka/planka-oidc" + secretKey: "OIDC_IGNORE_ROLES" + - objectName: "OIDC_ISSUER" + secretPath: "kv/data/atlas/planka/planka-oidc" + secretKey: "OIDC_ISSUER" + - objectName: "OIDC_SCOPES" + secretPath: "kv/data/atlas/planka/planka-oidc" + secretKey: "OIDC_SCOPES" + - objectName: "OIDC_USE_OAUTH_CALLBACK" + secretPath: "kv/data/atlas/planka/planka-oidc" + secretKey: "OIDC_USE_OAUTH_CALLBACK" + - objectName: "SMTP_FROM" + secretPath: "kv/data/atlas/planka/planka-smtp" + secretKey: "SMTP_FROM" + - objectName: "SMTP_HOST" + secretPath: "kv/data/atlas/planka/planka-smtp" + secretKey: "SMTP_HOST" + - objectName: "SMTP_PASSWORD" + secretPath: "kv/data/atlas/planka/planka-smtp" + secretKey: "SMTP_PASSWORD" + - objectName: "SMTP_PORT" + secretPath: "kv/data/atlas/planka/planka-smtp" + secretKey: "SMTP_PORT" + - objectName: "SMTP_SECURE" + secretPath: "kv/data/atlas/planka/planka-smtp" + secretKey: "SMTP_SECURE" + - objectName: "SMTP_TLS_REJECT_UNAUTHORIZED" + secretPath: "kv/data/atlas/planka/planka-smtp" + secretKey: "SMTP_TLS_REJECT_UNAUTHORIZED" + - objectName: "SMTP_USER" + secretPath: "kv/data/atlas/planka/planka-smtp" + secretKey: "SMTP_USER" diff --git a/services/planka/serviceaccount.yaml b/services/planka/serviceaccount.yaml new file mode 100644 index 0000000..ca4f437 --- /dev/null +++ b/services/planka/serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/planka/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: planka-vault + namespace: planka diff --git a/services/vault/k8s-auth-config-cronjob.yaml b/services/vault/k8s-auth-config-cronjob.yaml new file mode 100644 index 0000000..d974f6b --- /dev/null +++ b/services/vault/k8s-auth-config-cronjob.yaml @@ -0,0 +1,47 @@ +# services/vault/k8s-auth-config-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: vault-k8s-auth-config + namespace: vault +spec: + schedule: "*/15 * * * *" + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + backoffLimit: 1 + template: + spec: + serviceAccountName: vault + restartPolicy: Never + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" + containers: + - name: configure-k8s-auth + image: hashicorp/vault:1.17.6 + imagePullPolicy: IfNotPresent + command: + - bash + - /scripts/vault_k8s_auth_configure.sh + env: + - name: VAULT_ADDR + value: http://vault.vault.svc.cluster.local:8200 + - name: VAULT_TOKEN + valueFrom: + secretKeyRef: + name: vault-oidc-admin-token + key: token + - name: VAULT_K8S_ROLE_TTL + value: 1h + volumeMounts: + - name: k8s-auth-config-script + mountPath: /scripts + readOnly: true + volumes: + - name: k8s-auth-config-script + configMap: + name: vault-k8s-auth-config-script + defaultMode: 0555 diff --git a/services/vault/kustomization.yaml b/services/vault/kustomization.yaml index 1ab70bc..9643894 100644 --- a/services/vault/kustomization.yaml +++ b/services/vault/kustomization.yaml @@ -8,6 +8,7 @@ resources: - rbac.yaml - configmap.yaml - statefulset.yaml + - k8s-auth-config-cronjob.yaml - oidc-config-cronjob.yaml - service.yaml - ingress.yaml @@ -19,3 +20,6 @@ configMapGenerator: - name: vault-oidc-config-script files: - vault_oidc_configure.sh=scripts/vault_oidc_configure.sh + - name: vault-k8s-auth-config-script + files: + - vault_k8s_auth_configure.sh=scripts/vault_k8s_auth_configure.sh diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh new file mode 100644 index 0000000..15973e6 --- /dev/null +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +set -euo pipefail + +log() { echo "[vault-k8s-auth] $*"; } + +status_json="$(vault status -format=json || true)" +if [[ -z "${status_json}" ]]; then + log "vault status failed; check VAULT_ADDR and VAULT_TOKEN" + exit 1 +fi + +if ! grep -q '"initialized":true' <<<"${status_json}"; then + log "vault not initialized; skipping" + exit 0 +fi + +if grep -q '"sealed":true' <<<"${status_json}"; then + log "vault sealed; skipping" + exit 0 +fi + +k8s_host="https://${KUBERNETES_SERVICE_HOST}:443" +k8s_ca="$(cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt)" +k8s_token="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" +role_ttl="${VAULT_K8S_ROLE_TTL:-1h}" + +if ! vault auth list -format=json | grep -q '"kubernetes/"'; then + log "enabling kubernetes auth" + vault auth enable kubernetes +fi + +log "configuring kubernetes auth" +vault write auth/kubernetes/config \ + token_reviewer_jwt="${k8s_token}" \ + kubernetes_host="${k8s_host}" \ + kubernetes_ca_cert="${k8s_ca}" + +declare -A roles +roles[outline]=outline-vault +roles[planka]=planka-vault + +for namespace in "${!roles[@]}"; do + policy_name="${namespace}" + service_account="${roles[$namespace]}" + + log "writing policy ${policy_name}" + vault policy write "${policy_name}" - < Date: Wed, 14 Jan 2026 01:35:06 -0300 Subject: [PATCH 025/270] fix: run vault k8s auth config with sh --- services/vault/k8s-auth-config-cronjob.yaml | 2 +- .../vault/scripts/vault_k8s_auth_configure.sh | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/services/vault/k8s-auth-config-cronjob.yaml b/services/vault/k8s-auth-config-cronjob.yaml index d974f6b..3b74932 100644 --- a/services/vault/k8s-auth-config-cronjob.yaml +++ b/services/vault/k8s-auth-config-cronjob.yaml @@ -24,7 +24,7 @@ spec: image: hashicorp/vault:1.17.6 imagePullPolicy: IfNotPresent command: - - bash + - sh - /scripts/vault_k8s_auth_configure.sh env: - name: VAULT_ADDR diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 15973e6..9e2f674 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -1,5 +1,5 @@ -#!/usr/bin/env bash -set -euo pipefail +#!/usr/bin/env sh +set -eu log() { echo "[vault-k8s-auth] $*"; } @@ -35,13 +35,13 @@ vault write auth/kubernetes/config \ kubernetes_host="${k8s_host}" \ kubernetes_ca_cert="${k8s_ca}" -declare -A roles -roles[outline]=outline-vault -roles[planka]=planka-vault - -for namespace in "${!roles[@]}"; do +for namespace in outline planka; do policy_name="${namespace}" - service_account="${roles[$namespace]}" + case "${namespace}" in + outline) service_account="outline-vault" ;; + planka) service_account="planka-vault" ;; + *) log "unknown namespace ${namespace}"; exit 1 ;; + esac log "writing policy ${policy_name}" vault policy write "${policy_name}" - < Date: Wed, 14 Jan 2026 01:38:27 -0300 Subject: [PATCH 026/270] fix: make vault k8s auth script posix --- services/vault/scripts/vault_k8s_auth_configure.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 9e2f674..4457fc6 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -4,17 +4,17 @@ set -eu log() { echo "[vault-k8s-auth] $*"; } status_json="$(vault status -format=json || true)" -if [[ -z "${status_json}" ]]; then +if [ -z "${status_json}" ]; then log "vault status failed; check VAULT_ADDR and VAULT_TOKEN" exit 1 fi -if ! grep -q '"initialized":true' <<<"${status_json}"; then +if ! printf '%s' "${status_json}" | grep -q '"initialized":true'; then log "vault not initialized; skipping" exit 0 fi -if grep -q '"sealed":true' <<<"${status_json}"; then +if printf '%s' "${status_json}" | grep -q '"sealed":true'; then log "vault sealed; skipping" exit 0 fi From 50aec198a46a5cf741027a32efad97b8bd7a7740 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 01:42:28 -0300 Subject: [PATCH 027/270] fix: detect vault initialized state correctly --- services/vault/scripts/vault_k8s_auth_configure.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 4457fc6..14604cd 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -9,12 +9,12 @@ if [ -z "${status_json}" ]; then exit 1 fi -if ! printf '%s' "${status_json}" | grep -q '"initialized":true'; then +if ! printf '%s' "${status_json}" | grep -q '"initialized":[[:space:]]*true'; then log "vault not initialized; skipping" exit 0 fi -if printf '%s' "${status_json}" | grep -q '"sealed":true'; then +if printf '%s' "${status_json}" | grep -q '"sealed":[[:space:]]*true'; then log "vault sealed; skipping" exit 0 fi From 55234f853684cd3f60c12b83dfd8c999823fb1b8 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 02:24:32 -0300 Subject: [PATCH 028/270] vault: align oidc roles with keycloak --- services/keycloak/kustomization.yaml | 4 + .../scripts/vault_oidc_secret_ensure.sh | 103 ++++++++++++++++++ .../vault-oidc-secret-ensure-job.yaml | 47 ++++++++ services/vault/oidc-config-cronjob.yaml | 36 ++++++ .../vault/scripts/vault_oidc_configure.sh | 96 +++++++++++----- 5 files changed, 256 insertions(+), 30 deletions(-) create mode 100755 services/keycloak/scripts/vault_oidc_secret_ensure.sh create mode 100644 services/keycloak/vault-oidc-secret-ensure-job.yaml diff --git a/services/keycloak/kustomization.yaml b/services/keycloak/kustomization.yaml index 5222ee1..38da8bf 100644 --- a/services/keycloak/kustomization.yaml +++ b/services/keycloak/kustomization.yaml @@ -20,6 +20,7 @@ resources: - synapse-oidc-secret-ensure-job.yaml - logs-oidc-secret-ensure-job.yaml - harbor-oidc-secret-ensure-job.yaml + - vault-oidc-secret-ensure-job.yaml - service.yaml - ingress.yaml generatorOptions: @@ -35,3 +36,6 @@ configMapGenerator: - name: harbor-oidc-secret-ensure-script files: - harbor_oidc_secret_ensure.sh=scripts/harbor_oidc_secret_ensure.sh + - name: vault-oidc-secret-ensure-script + files: + - vault_oidc_secret_ensure.sh=scripts/vault_oidc_secret_ensure.sh diff --git a/services/keycloak/scripts/vault_oidc_secret_ensure.sh b/services/keycloak/scripts/vault_oidc_secret_ensure.sh new file mode 100755 index 0000000..f7b3261 --- /dev/null +++ b/services/keycloak/scripts/vault_oidc_secret_ensure.sh @@ -0,0 +1,103 @@ +#!/usr/bin/env sh +set -euo pipefail + +apk add --no-cache curl jq kubectl >/dev/null + +KC_URL="http://keycloak.sso.svc.cluster.local" +ACCESS_TOKEN="" +for attempt in 1 2 3 4 5; do + TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d "grant_type=password" \ + -d "client_id=admin-cli" \ + -d "username=${KEYCLOAK_ADMIN}" \ + -d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)" + ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)" + if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then + break + fi + echo "Keycloak token request failed (attempt ${attempt})" >&2 + sleep $((attempt * 2)) +done +if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then + echo "Failed to fetch Keycloak admin token" >&2 + exit 1 +fi + +CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients?clientId=vault-oidc" || true)" +CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" + +if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then + create_payload='{"clientId":"vault-oidc","enabled":true,"protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback","http://localhost:8250/oidc/callback"],"webOrigins":["https://secret.bstein.dev"],"rootUrl":"https://secret.bstein.dev","baseUrl":"/"}' + status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + -H 'Content-Type: application/json' \ + -d "${create_payload}" \ + "$KC_URL/admin/realms/atlas/clients")" + if [ "$status" != "201" ] && [ "$status" != "204" ]; then + echo "Keycloak client create failed (status ${status})" >&2 + exit 1 + fi + CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients?clientId=vault-oidc" || true)" + CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" +fi + +if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then + echo "Keycloak client vault-oidc not found" >&2 + exit 1 +fi + +SCOPE_ID="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/client-scopes?search=groups" | jq -r '.[] | select(.name=="groups") | .id' 2>/dev/null | head -n1 || true)" +if [ -z "$SCOPE_ID" ] || [ "$SCOPE_ID" = "null" ]; then + echo "Keycloak client scope groups not found" >&2 + exit 1 +fi + +DEFAULT_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/default-client-scopes" || true)" +OPTIONAL_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes" || true)" + +if ! echo "$DEFAULT_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1 \ + && ! echo "$OPTIONAL_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1; then + status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")" + if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then + status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")" + if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then + echo "Failed to attach groups client scope to vault-oidc (status ${status})" >&2 + exit 1 + fi + fi +fi + +CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/client-secret" | jq -r '.value' 2>/dev/null || true)" +if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then + echo "Keycloak client secret not found" >&2 + exit 1 +fi + +kubectl -n vault create secret generic vault-oidc-config \ + --from-literal=discovery_url="https://sso.bstein.dev/realms/atlas" \ + --from-literal=client_id="vault-oidc" \ + --from-literal=client_secret="${CLIENT_SECRET}" \ + --from-literal=default_role="admin" \ + --from-literal=scopes="openid profile email groups" \ + --from-literal=user_claim="preferred_username" \ + --from-literal=groups_claim="groups" \ + --from-literal=redirect_uris="https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback,http://localhost:8250/oidc/callback" \ + --from-literal=bound_audiences="vault-oidc" \ + --from-literal=admin_group="admin" \ + --from-literal=admin_policies="default,vault-admin" \ + --from-literal=dev_group="dev" \ + --from-literal=dev_policies="default,dev-kv" \ + --from-literal=user_group="dev" \ + --from-literal=user_policies="default,dev-kv" \ + --dry-run=client -o yaml | kubectl -n vault apply -f - >/dev/null diff --git a/services/keycloak/vault-oidc-secret-ensure-job.yaml b/services/keycloak/vault-oidc-secret-ensure-job.yaml new file mode 100644 index 0000000..ce3a1f0 --- /dev/null +++ b/services/keycloak/vault-oidc-secret-ensure-job.yaml @@ -0,0 +1,47 @@ +# services/keycloak/vault-oidc-secret-ensure-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: vault-oidc-secret-ensure-1 + namespace: sso +spec: + backoffLimit: 0 + ttlSecondsAfterFinished: 3600 + template: + spec: + serviceAccountName: mas-secrets-ensure + restartPolicy: Never + volumes: + - name: vault-oidc-secret-ensure-script + configMap: + name: vault-oidc-secret-ensure-script + defaultMode: 0555 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] + - key: node-role.kubernetes.io/worker + operator: Exists + containers: + - name: apply + image: alpine:3.20 + command: ["/scripts/vault_oidc_secret_ensure.sh"] + env: + - name: KEYCLOAK_ADMIN + valueFrom: + secretKeyRef: + name: keycloak-admin + key: username + - name: KEYCLOAK_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: keycloak-admin + key: password + volumeMounts: + - name: vault-oidc-secret-ensure-script + mountPath: /scripts + readOnly: true diff --git a/services/vault/oidc-config-cronjob.yaml b/services/vault/oidc-config-cronjob.yaml index 15131a8..3960aad 100644 --- a/services/vault/oidc-config-cronjob.yaml +++ b/services/vault/oidc-config-cronjob.yaml @@ -79,6 +79,42 @@ spec: name: vault-oidc-config key: token_policies optional: true + - name: VAULT_OIDC_ADMIN_GROUP + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: admin_group + optional: true + - name: VAULT_OIDC_ADMIN_POLICIES + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: admin_policies + optional: true + - name: VAULT_OIDC_DEV_GROUP + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: dev_group + optional: true + - name: VAULT_OIDC_DEV_POLICIES + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: dev_policies + optional: true + - name: VAULT_OIDC_USER_GROUP + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: user_group + optional: true + - name: VAULT_OIDC_USER_POLICIES + valueFrom: + secretKeyRef: + name: vault-oidc-config + key: user_policies + optional: true - name: VAULT_OIDC_REDIRECT_URIS valueFrom: secretKeyRef: diff --git a/services/vault/scripts/vault_oidc_configure.sh b/services/vault/scripts/vault_oidc_configure.sh index 3cd4a2d..380d772 100644 --- a/services/vault/scripts/vault_oidc_configure.sh +++ b/services/vault/scripts/vault_oidc_configure.sh @@ -23,15 +23,20 @@ fi : "${VAULT_OIDC_CLIENT_ID:?set VAULT_OIDC_CLIENT_ID}" : "${VAULT_OIDC_CLIENT_SECRET:?set VAULT_OIDC_CLIENT_SECRET}" -role="${VAULT_OIDC_DEFAULT_ROLE:-atlas}" +default_role="${VAULT_OIDC_DEFAULT_ROLE:-admin}" scopes="${VAULT_OIDC_SCOPES:-openid profile email groups}" user_claim="${VAULT_OIDC_USER_CLAIM:-preferred_username}" groups_claim="${VAULT_OIDC_GROUPS_CLAIM:-groups}" -token_policies="${VAULT_OIDC_TOKEN_POLICIES:-default}" redirect_uris="${VAULT_OIDC_REDIRECT_URIS:-https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback}" bound_audiences="${VAULT_OIDC_BOUND_AUDIENCES:-${VAULT_OIDC_CLIENT_ID}}" -bound_claims="${VAULT_OIDC_BOUND_CLAIMS:-}" -bound_claims_type="${VAULT_OIDC_BOUND_CLAIMS_TYPE:-}" +bound_claims_type="${VAULT_OIDC_BOUND_CLAIMS_TYPE:-string}" + +admin_group="${VAULT_OIDC_ADMIN_GROUP:-admin}" +admin_policies="${VAULT_OIDC_ADMIN_POLICIES:-default,vault-admin}" +dev_group="${VAULT_OIDC_DEV_GROUP:-dev}" +dev_policies="${VAULT_OIDC_DEV_POLICIES:-default,dev-kv}" +user_group="${VAULT_OIDC_USER_GROUP:-${dev_group}}" +user_policies="${VAULT_OIDC_USER_POLICIES:-${VAULT_OIDC_TOKEN_POLICIES:-${dev_policies}}}" if ! vault auth list -format=json | grep -q '"oidc/"'; then log "enabling oidc auth method" @@ -43,35 +48,66 @@ vault write auth/oidc/config \ oidc_discovery_url="${VAULT_OIDC_DISCOVERY_URL}" \ oidc_client_id="${VAULT_OIDC_CLIENT_ID}" \ oidc_client_secret="${VAULT_OIDC_CLIENT_SECRET}" \ - default_role="${role}" + default_role="${default_role}" vault auth tune -listing-visibility=unauth oidc >/dev/null -role_args=( - "user_claim=${user_claim}" - "oidc_scopes=${scopes}" - "token_policies=${token_policies}" - "bound_audiences=${bound_audiences}" -) +build_bound_claims() { + local claim="$1" + local groups="$2" + local json + local first=1 + json="{\"${claim}\":[" + IFS=',' read -r -a group_items <<<"${groups}" + for item in "${group_items[@]}"; do + item="${item#"${item%%[![:space:]]*}"}" + item="${item%"${item##*[![:space:]]}"}" + if [[ -z "${item}" ]]; then + continue + fi + if [[ ${first} -eq 0 ]]; then + json+="," + fi + json+="\"${item}\"" + first=0 + done + json+="]}" + printf '%s' "${json}" +} -if [[ -n "${groups_claim}" ]]; then - role_args+=("groups_claim=${groups_claim}") -fi -if [[ -n "${bound_claims}" ]]; then - role_args+=("bound_claims=${bound_claims}") -fi -if [[ -n "${bound_claims_type}" ]]; then - role_args+=("bound_claims_type=${bound_claims_type}") -fi - -IFS=',' read -r -a redirect_items <<<"${redirect_uris}" -for uri in "${redirect_items[@]}"; do - trimmed="${uri#"${uri%%[![:space:]]*}"}" - trimmed="${trimmed%"${trimmed##*[![:space:]]}"}" - if [[ -n "${trimmed}" ]]; then - role_args+=("allowed_redirect_uris=${trimmed}") +configure_role() { + local role_name="$1" + local role_groups="$2" + local role_policies="$3" + if [[ -z "${role_name}" || -z "${role_groups}" || -z "${role_policies}" ]]; then + log "skipping role ${role_name} (missing groups or policies)" + return fi -done + local claims + claims="$(build_bound_claims "${groups_claim}" "${role_groups}")" + local role_args=( + "user_claim=${user_claim}" + "oidc_scopes=${scopes}" + "token_policies=${role_policies}" + "bound_audiences=${bound_audiences}" + "bound_claims=${claims}" + "bound_claims_type=${bound_claims_type}" + ) + if [[ -n "${groups_claim}" ]]; then + role_args+=("groups_claim=${groups_claim}") + fi + IFS=',' read -r -a redirect_items <<<"${redirect_uris}" + for uri in "${redirect_items[@]}"; do + trimmed="${uri#"${uri%%[![:space:]]*}"}" + trimmed="${trimmed%"${trimmed##*[![:space:]]}"}" + if [[ -n "${trimmed}" ]]; then + role_args+=("allowed_redirect_uris=${trimmed}") + fi + done + log "configuring oidc role ${role_name}" + vault write "auth/oidc/role/${role_name}" "${role_args[@]}" +} -log "configuring oidc role ${role}" -vault write "auth/oidc/role/${role}" "${role_args[@]}" +configure_role "admin" "${admin_group}" "${admin_policies}" +configure_role "dev" "${dev_group}" "${dev_policies}" +configure_role "user" "${user_group}" "${user_policies}" From c3541b72c3e1ef880e9b2333d5159b6b440c23ad Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 02:28:38 -0300 Subject: [PATCH 029/270] vault: run oidc config with sh --- services/vault/oidc-config-cronjob.yaml | 2 +- .../vault/scripts/vault_oidc_configure.sh | 73 +++++++++---------- 2 files changed, 34 insertions(+), 41 deletions(-) diff --git a/services/vault/oidc-config-cronjob.yaml b/services/vault/oidc-config-cronjob.yaml index 3960aad..3ea7b53 100644 --- a/services/vault/oidc-config-cronjob.yaml +++ b/services/vault/oidc-config-cronjob.yaml @@ -24,7 +24,7 @@ spec: image: hashicorp/vault:1.17.6 imagePullPolicy: IfNotPresent command: - - bash + - sh - /scripts/vault_oidc_configure.sh env: - name: VAULT_ADDR diff --git a/services/vault/scripts/vault_oidc_configure.sh b/services/vault/scripts/vault_oidc_configure.sh index 380d772..0013866 100644 --- a/services/vault/scripts/vault_oidc_configure.sh +++ b/services/vault/scripts/vault_oidc_configure.sh @@ -1,20 +1,20 @@ -#!/usr/bin/env bash -set -euo pipefail +#!/usr/bin/env sh +set -eu log() { echo "[vault-oidc] $*"; } status_json="$(vault status -format=json || true)" -if [[ -z "${status_json}" ]]; then +if [ -z "${status_json}" ]; then log "vault status failed; check VAULT_ADDR and VAULT_TOKEN" exit 1 fi -if ! grep -q '"initialized":true' <<<"${status_json}"; then +if ! printf '%s' "${status_json}" | grep -q '"initialized":[[:space:]]*true'; then log "vault not initialized; skipping" exit 0 fi -if grep -q '"sealed":true' <<<"${status_json}"; then +if printf '%s' "${status_json}" | grep -q '"sealed":[[:space:]]*true'; then log "vault sealed; skipping" exit 0 fi @@ -53,59 +53,52 @@ vault write auth/oidc/config \ vault auth tune -listing-visibility=unauth oidc >/dev/null build_bound_claims() { - local claim="$1" - local groups="$2" - local json - local first=1 + claim="$1" + groups="$2" json="{\"${claim}\":[" - IFS=',' read -r -a group_items <<<"${groups}" - for item in "${group_items[@]}"; do - item="${item#"${item%%[![:space:]]*}"}" - item="${item%"${item##*[![:space:]]}"}" - if [[ -z "${item}" ]]; then + first=1 + old_ifs=$IFS + IFS=, + for item in $groups; do + item="$(printf '%s' "$item" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')" + if [ -z "${item}" ]; then continue fi - if [[ ${first} -eq 0 ]]; then - json+="," + if [ "${first}" -eq 0 ]; then + json="${json}," fi - json+="\"${item}\"" + json="${json}\"${item}\"" first=0 done - json+="]}" + IFS=$old_ifs + json="${json}]}" printf '%s' "${json}" } configure_role() { - local role_name="$1" - local role_groups="$2" - local role_policies="$3" - if [[ -z "${role_name}" || -z "${role_groups}" || -z "${role_policies}" ]]; then + role_name="$1" + role_groups="$2" + role_policies="$3" + if [ -z "${role_name}" ] || [ -z "${role_groups}" ] || [ -z "${role_policies}" ]; then log "skipping role ${role_name} (missing groups or policies)" return fi - local claims claims="$(build_bound_claims "${groups_claim}" "${role_groups}")" - local role_args=( - "user_claim=${user_claim}" - "oidc_scopes=${scopes}" - "token_policies=${role_policies}" - "bound_audiences=${bound_audiences}" - "bound_claims=${claims}" - "bound_claims_type=${bound_claims_type}" - ) - if [[ -n "${groups_claim}" ]]; then - role_args+=("groups_claim=${groups_claim}") + role_args="user_claim=${user_claim} oidc_scopes=${scopes} token_policies=${role_policies} bound_audiences=${bound_audiences} bound_claims=${claims} bound_claims_type=${bound_claims_type}" + if [ -n "${groups_claim}" ]; then + role_args="${role_args} groups_claim=${groups_claim}" fi - IFS=',' read -r -a redirect_items <<<"${redirect_uris}" - for uri in "${redirect_items[@]}"; do - trimmed="${uri#"${uri%%[![:space:]]*}"}" - trimmed="${trimmed%"${trimmed##*[![:space:]]}"}" - if [[ -n "${trimmed}" ]]; then - role_args+=("allowed_redirect_uris=${trimmed}") + old_ifs=$IFS + IFS=, + for uri in $redirect_uris; do + trimmed="$(printf '%s' "$uri" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')" + if [ -n "${trimmed}" ]; then + role_args="${role_args} allowed_redirect_uris=${trimmed}" fi done + IFS=$old_ifs log "configuring oidc role ${role_name}" - vault write "auth/oidc/role/${role_name}" "${role_args[@]}" + vault write "auth/oidc/role/${role_name}" ${role_args} } configure_role "admin" "${admin_group}" "${admin_policies}" From 8a358832f35e1821562c8a67fef3307520c36aa4 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 02:52:51 -0300 Subject: [PATCH 030/270] vault: fix oidc scopes parsing --- services/vault/scripts/vault_oidc_configure.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/services/vault/scripts/vault_oidc_configure.sh b/services/vault/scripts/vault_oidc_configure.sh index 0013866..01b0696 100644 --- a/services/vault/scripts/vault_oidc_configure.sh +++ b/services/vault/scripts/vault_oidc_configure.sh @@ -84,7 +84,8 @@ configure_role() { return fi claims="$(build_bound_claims "${groups_claim}" "${role_groups}")" - role_args="user_claim=${user_claim} oidc_scopes=${scopes} token_policies=${role_policies} bound_audiences=${bound_audiences} bound_claims=${claims} bound_claims_type=${bound_claims_type}" + scopes_csv="$(printf '%s' "${scopes}" | tr ' ' ',' | tr -s ',' | sed 's/^,//;s/,$//')" + role_args="user_claim=${user_claim} oidc_scopes=${scopes_csv} token_policies=${role_policies} bound_audiences=${bound_audiences} bound_claims=${claims} bound_claims_type=${bound_claims_type}" if [ -n "${groups_claim}" ]; then role_args="${role_args} groups_claim=${groups_claim}" fi From fd2ae6bdd5d04aa2add9da1d492964034421a7a6 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 02:54:59 -0300 Subject: [PATCH 031/270] vault: wire more services to CSI --- .../bstein-dev-home/backend-deployment.yaml | 40 ++++++------ .../chat-ai-gateway-deployment.yaml | 29 +++++---- services/bstein-dev-home/kustomization.yaml | 8 ++- .../portal-onboarding-e2e-test-job.yaml | 33 +++++----- .../scripts/bstein_dev_home_vault_env.sh | 17 +++++ .../bstein-dev-home/secretproviderclass.yaml | 36 +++++++++++ .../vaultwarden-cred-sync-cronjob.yaml | 29 ++++++--- services/gitea/deployment.yaml | 62 +++++++++---------- services/gitea/kustomization.yaml | 11 +++- services/gitea/scripts/gitea_vault_env.sh | 15 +++++ services/gitea/secretproviderclass.yaml | 30 +++++++++ services/gitea/serviceaccount.yaml | 6 ++ services/keycloak/deployment.yaml | 47 +++++++------- services/keycloak/kustomization.yaml | 7 ++- .../keycloak/scripts/keycloak_vault_env.sh | 25 ++++++++ services/keycloak/secretproviderclass.yaml | 39 ++++++++++++ services/keycloak/serviceaccount.yaml | 6 ++ .../vault/scripts/vault_k8s_auth_configure.sh | 50 +++++++++++++-- services/vaultwarden/deployment.yaml | 32 +++++++--- services/vaultwarden/kustomization.yaml | 9 +++ .../scripts/vaultwarden_vault_env.sh | 11 ++++ services/vaultwarden/secretproviderclass.yaml | 18 ++++++ services/vaultwarden/serviceaccount.yaml | 6 ++ 23 files changed, 436 insertions(+), 130 deletions(-) create mode 100644 services/bstein-dev-home/scripts/bstein_dev_home_vault_env.sh create mode 100644 services/bstein-dev-home/secretproviderclass.yaml create mode 100644 services/gitea/scripts/gitea_vault_env.sh create mode 100644 services/gitea/secretproviderclass.yaml create mode 100644 services/gitea/serviceaccount.yaml create mode 100644 services/keycloak/scripts/keycloak_vault_env.sh create mode 100644 services/keycloak/secretproviderclass.yaml create mode 100644 services/keycloak/serviceaccount.yaml create mode 100644 services/vaultwarden/scripts/vaultwarden_vault_env.sh create mode 100644 services/vaultwarden/secretproviderclass.yaml create mode 100644 services/vaultwarden/serviceaccount.yaml diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index 2e92443..08f73f7 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -26,15 +26,11 @@ spec: - name: backend image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} imagePullPolicy: Always - command: ["gunicorn"] + command: ["/bin/sh", "-c"] args: - - -b - - 0.0.0.0:8080 - - --workers - - "2" - - --timeout - - "180" - - app:app + - >- + . /vault/scripts/bstein_dev_home_vault_env.sh + && exec gunicorn -b 0.0.0.0:8080 --workers 2 --timeout 180 app:app env: - name: AI_CHAT_API value: http://ollama.ai.svc.cluster.local:11434 @@ -67,18 +63,8 @@ spec: value: atlas - name: KEYCLOAK_ADMIN_CLIENT_ID value: bstein-dev-home-admin - - name: KEYCLOAK_ADMIN_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: bstein-dev-home-keycloak-admin - key: client_secret - name: ACCOUNT_ALLOWED_GROUPS value: "" - - name: PORTAL_DATABASE_URL - valueFrom: - secretKeyRef: - name: atlas-portal-db - key: PORTAL_DATABASE_URL - name: HTTP_CHECK_TIMEOUT_SEC value: "2" - name: ACCESS_REQUEST_SUBMIT_RATE_LIMIT @@ -108,6 +94,13 @@ spec: initialDelaySeconds: 10 periodSeconds: 10 timeoutSeconds: 3 + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true resources: requests: cpu: 100m @@ -115,3 +108,14 @@ spec: limits: cpu: 500m memory: 512Mi + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: bstein-dev-home-vault + - name: vault-scripts + configMap: + name: bstein-dev-home-vault-env + defaultMode: 0555 diff --git a/services/bstein-dev-home/chat-ai-gateway-deployment.yaml b/services/bstein-dev-home/chat-ai-gateway-deployment.yaml index 7ac6504..4fb4ba5 100644 --- a/services/bstein-dev-home/chat-ai-gateway-deployment.yaml +++ b/services/bstein-dev-home/chat-ai-gateway-deployment.yaml @@ -15,6 +15,7 @@ spec: labels: app: chat-ai-gateway spec: + serviceAccountName: bstein-dev-home nodeSelector: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: "true" @@ -23,20 +24,10 @@ spec: image: python:3.11-slim command: ["/bin/sh","-c"] args: - - python /app/gateway.py + - . /vault/scripts/bstein_dev_home_vault_env.sh && exec python /app/gateway.py env: - name: UPSTREAM_URL value: http://bstein-dev-home-backend/api/chat - - name: CHAT_KEY_MATRIX - valueFrom: - secretKeyRef: - name: chat-ai-keys-runtime - key: matrix - - name: CHAT_KEY_HOMEPAGE - valueFrom: - secretKeyRef: - name: chat-ai-keys-runtime - key: homepage ports: - name: http containerPort: 8080 @@ -63,7 +54,23 @@ spec: - name: code mountPath: /app/gateway.py subPath: gateway.py + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true volumes: - name: code configMap: name: chat-ai-gateway + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: bstein-dev-home-vault + - name: vault-scripts + configMap: + name: bstein-dev-home-vault-env + defaultMode: 0555 diff --git a/services/bstein-dev-home/kustomization.yaml b/services/bstein-dev-home/kustomization.yaml index 56d9cfd..57228ed 100644 --- a/services/bstein-dev-home/kustomization.yaml +++ b/services/bstein-dev-home/kustomization.yaml @@ -6,7 +6,7 @@ resources: - namespace.yaml - image.yaml - rbac.yaml - - portal-e2e-client-secret-sync-rbac.yaml + - secretproviderclass.yaml - chat-ai-gateway-deployment.yaml - chat-ai-gateway-service.yaml - frontend-deployment.yaml @@ -17,6 +17,12 @@ resources: - portal-onboarding-e2e-test-job.yaml - ingress.yaml configMapGenerator: + - name: bstein-dev-home-vault-env + namespace: bstein-dev-home + files: + - bstein_dev_home_vault_env.sh=scripts/bstein_dev_home_vault_env.sh + options: + disableNameSuffixHash: true - name: chat-ai-gateway namespace: bstein-dev-home files: diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index 3170f86..18ef62a 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -9,6 +9,7 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: bstein-dev-home containers: - name: test image: python:3.11-slim @@ -21,21 +22,6 @@ spec: value: atlas - name: KEYCLOAK_ADMIN_CLIENT_ID value: bstein-dev-home-admin - - name: KEYCLOAK_ADMIN_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: bstein-dev-home-keycloak-admin - key: client_secret - - name: PORTAL_E2E_CLIENT_ID - valueFrom: - secretKeyRef: - name: portal-e2e-client - key: client_id - - name: PORTAL_E2E_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: portal-e2e-client - key: client_secret - name: PORTAL_TARGET_CLIENT_ID value: bstein-dev-home - name: E2E_PORTAL_ADMIN_USERNAME @@ -54,13 +40,30 @@ spec: args: - | set -euo pipefail + . /vault/scripts/bstein_dev_home_vault_env.sh python /scripts/test_portal_onboarding_flow.py volumeMounts: - name: tests mountPath: /scripts readOnly: true + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true volumes: - name: tests configMap: name: portal-onboarding-e2e-tests defaultMode: 0555 + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: bstein-dev-home-vault + - name: vault-scripts + configMap: + name: bstein-dev-home-vault-env + defaultMode: 0555 diff --git a/services/bstein-dev-home/scripts/bstein_dev_home_vault_env.sh b/services/bstein-dev-home/scripts/bstein_dev_home_vault_env.sh new file mode 100644 index 0000000..8cab54e --- /dev/null +++ b/services/bstein-dev-home/scripts/bstein_dev_home_vault_env.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env sh +set -eu + +vault_dir="/vault/secrets" + +read_secret() { + cat "${vault_dir}/$1" +} + +export KEYCLOAK_ADMIN_CLIENT_SECRET="$(read_secret bstein-dev-home-keycloak-admin__client_secret)" +export PORTAL_DATABASE_URL="$(read_secret atlas-portal-db__PORTAL_DATABASE_URL)" + +export CHAT_KEY_MATRIX="$(read_secret chat-ai-keys-runtime__matrix)" +export CHAT_KEY_HOMEPAGE="$(read_secret chat-ai-keys-runtime__homepage)" + +export PORTAL_E2E_CLIENT_ID="$(read_secret portal-e2e-client__client_id)" +export PORTAL_E2E_CLIENT_SECRET="$(read_secret portal-e2e-client__client_secret)" diff --git a/services/bstein-dev-home/secretproviderclass.yaml b/services/bstein-dev-home/secretproviderclass.yaml new file mode 100644 index 0000000..83e94c0 --- /dev/null +++ b/services/bstein-dev-home/secretproviderclass.yaml @@ -0,0 +1,36 @@ +# services/bstein-dev-home/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: bstein-dev-home-vault + namespace: bstein-dev-home +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "bstein-dev-home" + objects: | + - objectName: "atlas-portal-db__PORTAL_DATABASE_URL" + secretPath: "kv/data/atlas/bstein-dev-home/atlas-portal-db" + secretKey: "PORTAL_DATABASE_URL" + - objectName: "bstein-dev-home-keycloak-admin__client_secret" + secretPath: "kv/data/atlas/bstein-dev-home/bstein-dev-home-keycloak-admin" + secretKey: "client_secret" + - objectName: "chat-ai-keys__homepage" + secretPath: "kv/data/atlas/bstein-dev-home/chat-ai-keys" + secretKey: "homepage" + - objectName: "chat-ai-keys__matrix" + secretPath: "kv/data/atlas/bstein-dev-home/chat-ai-keys" + secretKey: "matrix" + - objectName: "chat-ai-keys-runtime__homepage" + secretPath: "kv/data/atlas/shared/chat-ai-keys-runtime" + secretKey: "homepage" + - objectName: "chat-ai-keys-runtime__matrix" + secretPath: "kv/data/atlas/shared/chat-ai-keys-runtime" + secretKey: "matrix" + - objectName: "portal-e2e-client__client_id" + secretPath: "kv/data/atlas/shared/portal-e2e-client" + secretKey: "client_id" + - objectName: "portal-e2e-client__client_secret" + secretPath: "kv/data/atlas/shared/portal-e2e-client" + secretKey: "client_secret" diff --git a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml index 5e7c779..b531e7a 100644 --- a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml +++ b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml @@ -25,9 +25,11 @@ spec: - name: sync image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} imagePullPolicy: Always - command: - - python - - /scripts/vaultwarden_cred_sync.py + command: ["/bin/sh", "-c"] + args: + - >- + . /vault/scripts/bstein_dev_home_vault_env.sh + && exec python /scripts/vaultwarden_cred_sync.py env: - name: PYTHONPATH value: /app @@ -41,19 +43,30 @@ spec: value: atlas - name: KEYCLOAK_ADMIN_CLIENT_ID value: bstein-dev-home-admin - - name: KEYCLOAK_ADMIN_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: bstein-dev-home-keycloak-admin - key: client_secret - name: HTTP_CHECK_TIMEOUT_SEC value: "20" volumeMounts: - name: vaultwarden-cred-sync-script mountPath: /scripts readOnly: true + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true volumes: - name: vaultwarden-cred-sync-script configMap: name: vaultwarden-cred-sync-script defaultMode: 0555 + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: bstein-dev-home-vault + - name: vault-scripts + configMap: + name: bstein-dev-home-vault-env + defaultMode: 0555 diff --git a/services/gitea/deployment.yaml b/services/gitea/deployment.yaml index ed2cd63..13aef1e 100644 --- a/services/gitea/deployment.yaml +++ b/services/gitea/deployment.yaml @@ -21,33 +21,19 @@ spec: labels: app: gitea spec: + serviceAccountName: gitea-vault initContainers: - name: configure-oidc image: gitea/gitea:1.23 securityContext: runAsUser: 1000 runAsGroup: 1000 - env: - - name: CLIENT_ID - valueFrom: - secretKeyRef: - name: gitea-oidc - key: client_id - - name: CLIENT_SECRET - valueFrom: - secretKeyRef: - name: gitea-oidc - key: client_secret - - name: DISCOVERY_URL - valueFrom: - secretKeyRef: - name: gitea-oidc - key: openid_auto_discovery_url command: - - /bin/bash + - /bin/sh - -c - | set -euo pipefail + . /vault/scripts/gitea_vault_env.sh APPINI=/data/gitea/conf/app.ini BIN=/usr/local/bin/gitea @@ -87,6 +73,18 @@ spec: volumeMounts: - name: gitea-data mountPath: /data + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true nodeSelector: node-role.kubernetes.io/worker: "true" affinity: @@ -107,6 +105,11 @@ spec: containers: - name: gitea image: gitea/gitea:1.23 + command: ["/bin/sh", "-c"] + args: + - >- + . /vault/scripts/gitea_vault_env.sh + && exec /usr/bin/entrypoint /usr/bin/s6-svscan /etc/s6 ports: - containerPort: 3000 name: http @@ -143,16 +146,6 @@ spec: value: "scm.bstein.dev" - name: GITEA__session__SAME_SITE value: "lax" - - name: GITEA__security__SECRET_KEY - valueFrom: - secretKeyRef: - name: gitea-secret - key: SECRET_KEY - - name: GITEA__security__INTERNAL_TOKEN - valueFrom: - secretKeyRef: - name: gitea-secret - key: INTERNAL_TOKEN - name: DB_TYPE value: "postgres" - name: DB_HOST @@ -161,11 +154,6 @@ spec: value: "gitea" - name: DB_USER value: "gitea" - - name: DB_PASS - valueFrom: - secretKeyRef: - name: gitea-db-secret - key: password - name: START_SSH_SERVER value: "true" - name: SSH_PORT @@ -177,3 +165,13 @@ spec: - name: gitea-data persistentVolumeClaim: claimName: gitea-data + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: gitea-vault + - name: vault-scripts + configMap: + name: gitea-vault-env + defaultMode: 0555 diff --git a/services/gitea/kustomization.yaml b/services/gitea/kustomization.yaml index 36d6c23..466e6ce 100644 --- a/services/gitea/kustomization.yaml +++ b/services/gitea/kustomization.yaml @@ -3,7 +3,16 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - namespace.yaml + - serviceaccount.yaml + - pvc.yaml + - secretproviderclass.yaml - deployment.yaml - service.yaml - - pvc.yaml - ingress.yaml +configMapGenerator: + - name: gitea-vault-env + namespace: gitea + files: + - gitea_vault_env.sh=scripts/gitea_vault_env.sh + options: + disableNameSuffixHash: true diff --git a/services/gitea/scripts/gitea_vault_env.sh b/services/gitea/scripts/gitea_vault_env.sh new file mode 100644 index 0000000..0e4c4a8 --- /dev/null +++ b/services/gitea/scripts/gitea_vault_env.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env sh +set -eu + +vault_dir="/vault/secrets" + +read_secret() { + cat "${vault_dir}/$1" +} + +export GITEA__security__SECRET_KEY="$(read_secret gitea-secret__SECRET_KEY)" +export GITEA__security__INTERNAL_TOKEN="$(read_secret gitea-secret__INTERNAL_TOKEN)" +export DB_PASS="$(read_secret gitea-db-secret__password)" +export CLIENT_ID="$(read_secret gitea-oidc__client_id)" +export CLIENT_SECRET="$(read_secret gitea-oidc__client_secret)" +export DISCOVERY_URL="$(read_secret gitea-oidc__openid_auto_discovery_url)" diff --git a/services/gitea/secretproviderclass.yaml b/services/gitea/secretproviderclass.yaml new file mode 100644 index 0000000..b555025 --- /dev/null +++ b/services/gitea/secretproviderclass.yaml @@ -0,0 +1,30 @@ +# services/gitea/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: gitea-vault + namespace: gitea +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "gitea" + objects: | + - objectName: "gitea-db-secret__password" + secretPath: "kv/data/atlas/gitea/gitea-db-secret" + secretKey: "password" + - objectName: "gitea-secret__SECRET_KEY" + secretPath: "kv/data/atlas/gitea/gitea-secret" + secretKey: "SECRET_KEY" + - objectName: "gitea-secret__INTERNAL_TOKEN" + secretPath: "kv/data/atlas/gitea/gitea-secret" + secretKey: "INTERNAL_TOKEN" + - objectName: "gitea-oidc__client_id" + secretPath: "kv/data/atlas/gitea/gitea-oidc" + secretKey: "client_id" + - objectName: "gitea-oidc__client_secret" + secretPath: "kv/data/atlas/gitea/gitea-oidc" + secretKey: "client_secret" + - objectName: "gitea-oidc__openid_auto_discovery_url" + secretPath: "kv/data/atlas/gitea/gitea-oidc" + secretKey: "openid_auto_discovery_url" diff --git a/services/gitea/serviceaccount.yaml b/services/gitea/serviceaccount.yaml new file mode 100644 index 0000000..a91ca8a --- /dev/null +++ b/services/gitea/serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/gitea/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: gitea-vault + namespace: gitea diff --git a/services/keycloak/deployment.yaml b/services/keycloak/deployment.yaml index 48cf5e0..3c116f6 100644 --- a/services/keycloak/deployment.yaml +++ b/services/keycloak/deployment.yaml @@ -21,6 +21,7 @@ spec: labels: app: keycloak spec: + serviceAccountName: sso-vault affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -69,28 +70,16 @@ spec: - name: keycloak image: quay.io/keycloak/keycloak:26.0.7 imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] args: - - start + - >- + . /vault/scripts/keycloak_vault_env.sh + && exec /opt/keycloak/bin/kc.sh start env: - name: KC_DB value: postgres - name: KC_DB_URL_HOST value: postgres-service.postgres.svc.cluster.local - - name: KC_DB_URL_DATABASE - valueFrom: - secretKeyRef: - name: keycloak-db - key: POSTGRES_DATABASE - - name: KC_DB_USERNAME - valueFrom: - secretKeyRef: - name: keycloak-db - key: POSTGRES_USER - - name: KC_DB_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-db - key: POSTGRES_PASSWORD - name: KC_DB_SCHEMA value: public - name: KC_HOSTNAME @@ -115,16 +104,6 @@ spec: value: "true" - name: KC_METRICS_ENABLED value: "true" - - name: KEYCLOAK_ADMIN - valueFrom: - secretKeyRef: - name: keycloak-admin - key: username - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: password - name: KC_EVENTS_LISTENERS value: jboss-logging,mailu-http - name: KC_SPI_EVENTS_LISTENER_MAILU-HTTP_ENDPOINT @@ -153,9 +132,25 @@ spec: mountPath: /opt/keycloak/data - name: providers mountPath: /opt/keycloak/providers + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true volumes: - name: data persistentVolumeClaim: claimName: keycloak-data - name: providers emptyDir: {} + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 diff --git a/services/keycloak/kustomization.yaml b/services/keycloak/kustomization.yaml index 38da8bf..82df213 100644 --- a/services/keycloak/kustomization.yaml +++ b/services/keycloak/kustomization.yaml @@ -5,11 +5,11 @@ namespace: sso resources: - namespace.yaml - pvc.yaml + - serviceaccount.yaml + - secretproviderclass.yaml - deployment.yaml - realm-settings-job.yaml - portal-e2e-client-job.yaml - - portal-e2e-client-secret-sync-rbac.yaml - - portal-e2e-client-secret-sync-cronjob.yaml - portal-e2e-target-client-job.yaml - portal-e2e-token-exchange-permissions-job.yaml - portal-e2e-token-exchange-test-job.yaml @@ -26,6 +26,9 @@ resources: generatorOptions: disableNameSuffixHash: true configMapGenerator: + - name: sso-vault-env + files: + - keycloak_vault_env.sh=scripts/keycloak_vault_env.sh - name: portal-e2e-tests files: - test_portal_token_exchange.py=scripts/tests/test_portal_token_exchange.py diff --git a/services/keycloak/scripts/keycloak_vault_env.sh b/services/keycloak/scripts/keycloak_vault_env.sh new file mode 100644 index 0000000..a9cfdae --- /dev/null +++ b/services/keycloak/scripts/keycloak_vault_env.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env sh +set -eu + +vault_dir="/vault/secrets" + +read_secret() { + cat "${vault_dir}/$1" +} + +admin_user="$(read_secret keycloak-admin__username)" +admin_password="$(read_secret keycloak-admin__password)" + +export KEYCLOAK_ADMIN="${admin_user}" +export KEYCLOAK_ADMIN_USER="${admin_user}" +export KEYCLOAK_ADMIN_PASSWORD="${admin_password}" + +export KC_DB_URL_DATABASE="$(read_secret keycloak-db__POSTGRES_DATABASE)" +export KC_DB_USERNAME="$(read_secret keycloak-db__POSTGRES_USER)" +export KC_DB_PASSWORD="$(read_secret keycloak-db__POSTGRES_PASSWORD)" + +export PORTAL_E2E_CLIENT_ID="$(read_secret portal-e2e-client__client_id)" +export PORTAL_E2E_CLIENT_SECRET="$(read_secret portal-e2e-client__client_secret)" + +export LDAP_ADMIN_PASSWORD="$(read_secret openldap-admin__LDAP_ADMIN_PASSWORD)" +export LDAP_CONFIG_PASSWORD="$(read_secret openldap-admin__LDAP_CONFIG_PASSWORD)" diff --git a/services/keycloak/secretproviderclass.yaml b/services/keycloak/secretproviderclass.yaml new file mode 100644 index 0000000..7015c12 --- /dev/null +++ b/services/keycloak/secretproviderclass.yaml @@ -0,0 +1,39 @@ +# services/keycloak/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: sso-vault + namespace: sso +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "sso" + objects: | + - objectName: "keycloak-db__POSTGRES_DATABASE" + secretPath: "kv/data/atlas/sso/keycloak-db" + secretKey: "POSTGRES_DATABASE" + - objectName: "keycloak-db__POSTGRES_USER" + secretPath: "kv/data/atlas/sso/keycloak-db" + secretKey: "POSTGRES_USER" + - objectName: "keycloak-db__POSTGRES_PASSWORD" + secretPath: "kv/data/atlas/sso/keycloak-db" + secretKey: "POSTGRES_PASSWORD" + - objectName: "keycloak-admin__username" + secretPath: "kv/data/atlas/shared/keycloak-admin" + secretKey: "username" + - objectName: "keycloak-admin__password" + secretPath: "kv/data/atlas/shared/keycloak-admin" + secretKey: "password" + - objectName: "portal-e2e-client__client_id" + secretPath: "kv/data/atlas/shared/portal-e2e-client" + secretKey: "client_id" + - objectName: "portal-e2e-client__client_secret" + secretPath: "kv/data/atlas/shared/portal-e2e-client" + secretKey: "client_secret" + - objectName: "openldap-admin__LDAP_ADMIN_PASSWORD" + secretPath: "kv/data/atlas/sso/openldap-admin" + secretKey: "LDAP_ADMIN_PASSWORD" + - objectName: "openldap-admin__LDAP_CONFIG_PASSWORD" + secretPath: "kv/data/atlas/sso/openldap-admin" + secretKey: "LDAP_CONFIG_PASSWORD" diff --git a/services/keycloak/serviceaccount.yaml b/services/keycloak/serviceaccount.yaml new file mode 100644 index 0000000..59d710f --- /dev/null +++ b/services/keycloak/serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/keycloak/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sso-vault + namespace: sso diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 14604cd..fdffbea 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -35,16 +35,39 @@ vault write auth/kubernetes/config \ kubernetes_host="${k8s_host}" \ kubernetes_ca_cert="${k8s_ca}" -for namespace in outline planka; do +for namespace in outline planka bstein-dev-home gitea vaultwarden sso; do policy_name="${namespace}" + service_account="" + shared_paths="" + case "${namespace}" in - outline) service_account="outline-vault" ;; - planka) service_account="planka-vault" ;; - *) log "unknown namespace ${namespace}"; exit 1 ;; + outline) + service_account="outline-vault" + ;; + planka) + service_account="planka-vault" + ;; + bstein-dev-home) + service_account="bstein-dev-home" + shared_paths="shared/chat-ai-keys-runtime shared/portal-e2e-client" + ;; + gitea) + service_account="gitea-vault" + ;; + vaultwarden) + service_account="vaultwarden-vault" + ;; + sso) + service_account="sso-vault,mas-secrets-ensure" + shared_paths="shared/keycloak-admin shared/portal-e2e-client" + ;; + *) + log "unknown namespace ${namespace}" + exit 1 + ;; esac - log "writing policy ${policy_name}" - vault policy write "${policy_name}" - <- + . /vault/scripts/vaultwarden_vault_env.sh + && exec /start.sh env: - name: SIGNUPS_ALLOWED value: "false" @@ -43,16 +49,6 @@ spec: value: "postmaster@bstein.dev" - name: SMTP_FROM_NAME value: "Atlas Vaultwarden" - - name: DATABASE_URL - valueFrom: - secretKeyRef: - name: vaultwarden-db-url - key: DATABASE_URL - - name: ADMIN_TOKEN - valueFrom: - secretKeyRef: - name: vaultwarden-admin - key: ADMIN_TOKEN ports: - name: http containerPort: 80 @@ -60,7 +56,23 @@ spec: volumeMounts: - name: vaultwarden-data mountPath: /data + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true volumes: - name: vaultwarden-data persistentVolumeClaim: claimName: vaultwarden-data + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: vaultwarden-vault + - name: vault-scripts + configMap: + name: vaultwarden-vault-env + defaultMode: 0555 diff --git a/services/vaultwarden/kustomization.yaml b/services/vaultwarden/kustomization.yaml index f0d02fd..c1525f7 100644 --- a/services/vaultwarden/kustomization.yaml +++ b/services/vaultwarden/kustomization.yaml @@ -4,7 +4,16 @@ kind: Kustomization namespace: vaultwarden resources: - namespace.yaml + - serviceaccount.yaml - pvc.yaml + - secretproviderclass.yaml - deployment.yaml - service.yaml - ingress.yaml +configMapGenerator: + - name: vaultwarden-vault-env + namespace: vaultwarden + files: + - vaultwarden_vault_env.sh=scripts/vaultwarden_vault_env.sh + options: + disableNameSuffixHash: true diff --git a/services/vaultwarden/scripts/vaultwarden_vault_env.sh b/services/vaultwarden/scripts/vaultwarden_vault_env.sh new file mode 100644 index 0000000..133faaa --- /dev/null +++ b/services/vaultwarden/scripts/vaultwarden_vault_env.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env sh +set -eu + +vault_dir="/vault/secrets" + +read_secret() { + cat "${vault_dir}/$1" +} + +export DATABASE_URL="$(read_secret vaultwarden-db-url__DATABASE_URL)" +export ADMIN_TOKEN="$(read_secret vaultwarden-admin__ADMIN_TOKEN)" diff --git a/services/vaultwarden/secretproviderclass.yaml b/services/vaultwarden/secretproviderclass.yaml new file mode 100644 index 0000000..6d4530b --- /dev/null +++ b/services/vaultwarden/secretproviderclass.yaml @@ -0,0 +1,18 @@ +# services/vaultwarden/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: vaultwarden-vault + namespace: vaultwarden +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "vaultwarden" + objects: | + - objectName: "vaultwarden-db-url__DATABASE_URL" + secretPath: "kv/data/atlas/vaultwarden/vaultwarden-db-url" + secretKey: "DATABASE_URL" + - objectName: "vaultwarden-admin__ADMIN_TOKEN" + secretPath: "kv/data/atlas/vaultwarden/vaultwarden-admin" + secretKey: "ADMIN_TOKEN" diff --git a/services/vaultwarden/serviceaccount.yaml b/services/vaultwarden/serviceaccount.yaml new file mode 100644 index 0000000..445ee15 --- /dev/null +++ b/services/vaultwarden/serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/vaultwarden/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vaultwarden-vault + namespace: vaultwarden From 4111fb079fdf3f04dc2a6e193f1e60c00fae4f6f Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 02:56:29 -0300 Subject: [PATCH 032/270] vault: write bound_claims as file --- services/vault/scripts/vault_oidc_configure.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/services/vault/scripts/vault_oidc_configure.sh b/services/vault/scripts/vault_oidc_configure.sh index 01b0696..3d14e52 100644 --- a/services/vault/scripts/vault_oidc_configure.sh +++ b/services/vault/scripts/vault_oidc_configure.sh @@ -84,8 +84,10 @@ configure_role() { return fi claims="$(build_bound_claims "${groups_claim}" "${role_groups}")" + claims_file="$(mktemp)" + printf '%s' "${claims}" > "${claims_file}" scopes_csv="$(printf '%s' "${scopes}" | tr ' ' ',' | tr -s ',' | sed 's/^,//;s/,$//')" - role_args="user_claim=${user_claim} oidc_scopes=${scopes_csv} token_policies=${role_policies} bound_audiences=${bound_audiences} bound_claims=${claims} bound_claims_type=${bound_claims_type}" + role_args="user_claim=${user_claim} oidc_scopes=${scopes_csv} token_policies=${role_policies} bound_audiences=${bound_audiences} bound_claims=@${claims_file} bound_claims_type=${bound_claims_type}" if [ -n "${groups_claim}" ]; then role_args="${role_args} groups_claim=${groups_claim}" fi @@ -100,6 +102,7 @@ configure_role() { IFS=$old_ifs log "configuring oidc role ${role_name}" vault write "auth/oidc/role/${role_name}" ${role_args} + rm -f "${claims_file}" } configure_role "admin" "${admin_group}" "${admin_policies}" From 3384533acdeff47d332f573824a539dfe49838a4 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 03:00:10 -0300 Subject: [PATCH 033/270] fix: resolve gitea mounts and bump portal job --- .../bstein-dev-home/portal-onboarding-e2e-test-job.yaml | 2 +- services/gitea/deployment.yaml | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index 18ef62a..b5fdc6d 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: portal-onboarding-e2e-test-11 + name: portal-onboarding-e2e-test-12 namespace: bstein-dev-home spec: backoffLimit: 0 diff --git a/services/gitea/deployment.yaml b/services/gitea/deployment.yaml index 13aef1e..83bd144 100644 --- a/services/gitea/deployment.yaml +++ b/services/gitea/deployment.yaml @@ -79,12 +79,6 @@ spec: - name: vault-scripts mountPath: /vault/scripts readOnly: true - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true nodeSelector: node-role.kubernetes.io/worker: "true" affinity: From bb2a3ba904bbc23d1e4e587ba64c410176df972b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 03:11:53 -0300 Subject: [PATCH 034/270] fix(gitea): inline vault secrets --- services/gitea/deployment.yaml | 23 ++++++++++++----------- services/gitea/kustomization.yaml | 7 ------- services/gitea/scripts/gitea_vault_env.sh | 15 --------------- 3 files changed, 12 insertions(+), 33 deletions(-) delete mode 100644 services/gitea/scripts/gitea_vault_env.sh diff --git a/services/gitea/deployment.yaml b/services/gitea/deployment.yaml index 83bd144..4fa1ecb 100644 --- a/services/gitea/deployment.yaml +++ b/services/gitea/deployment.yaml @@ -33,7 +33,9 @@ spec: - -c - | set -euo pipefail - . /vault/scripts/gitea_vault_env.sh + CLIENT_ID="$(cat /vault/secrets/gitea-oidc__client_id)" + CLIENT_SECRET="$(cat /vault/secrets/gitea-oidc__client_secret)" + DISCOVERY_URL="$(cat /vault/secrets/gitea-oidc__openid_auto_discovery_url)" APPINI=/data/gitea/conf/app.ini BIN=/usr/local/bin/gitea @@ -76,9 +78,6 @@ spec: - name: vault-secrets mountPath: /vault/secrets readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true nodeSelector: node-role.kubernetes.io/worker: "true" affinity: @@ -101,9 +100,12 @@ spec: image: gitea/gitea:1.23 command: ["/bin/sh", "-c"] args: - - >- - . /vault/scripts/gitea_vault_env.sh - && exec /usr/bin/entrypoint /usr/bin/s6-svscan /etc/s6 + - | + set -euo pipefail + export GITEA__security__SECRET_KEY="$(cat /vault/secrets/gitea-secret__SECRET_KEY)" + export GITEA__security__INTERNAL_TOKEN="$(cat /vault/secrets/gitea-secret__INTERNAL_TOKEN)" + export DB_PASS="$(cat /vault/secrets/gitea-db-secret__password)" + exec /usr/bin/entrypoint /usr/bin/s6-svscan /etc/s6 ports: - containerPort: 3000 name: http @@ -155,6 +157,9 @@ spec: volumeMounts: - name: gitea-data mountPath: /data + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true volumes: - name: gitea-data persistentVolumeClaim: @@ -165,7 +170,3 @@ spec: readOnly: true volumeAttributes: secretProviderClass: gitea-vault - - name: vault-scripts - configMap: - name: gitea-vault-env - defaultMode: 0555 diff --git a/services/gitea/kustomization.yaml b/services/gitea/kustomization.yaml index 466e6ce..84a1b64 100644 --- a/services/gitea/kustomization.yaml +++ b/services/gitea/kustomization.yaml @@ -9,10 +9,3 @@ resources: - deployment.yaml - service.yaml - ingress.yaml -configMapGenerator: - - name: gitea-vault-env - namespace: gitea - files: - - gitea_vault_env.sh=scripts/gitea_vault_env.sh - options: - disableNameSuffixHash: true diff --git a/services/gitea/scripts/gitea_vault_env.sh b/services/gitea/scripts/gitea_vault_env.sh deleted file mode 100644 index 0e4c4a8..0000000 --- a/services/gitea/scripts/gitea_vault_env.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env sh -set -eu - -vault_dir="/vault/secrets" - -read_secret() { - cat "${vault_dir}/$1" -} - -export GITEA__security__SECRET_KEY="$(read_secret gitea-secret__SECRET_KEY)" -export GITEA__security__INTERNAL_TOKEN="$(read_secret gitea-secret__INTERNAL_TOKEN)" -export DB_PASS="$(read_secret gitea-db-secret__password)" -export CLIENT_ID="$(read_secret gitea-oidc__client_id)" -export CLIENT_SECRET="$(read_secret gitea-oidc__client_secret)" -export DISCOVERY_URL="$(read_secret gitea-oidc__openid_auto_discovery_url)" From 8d526e383f7e51778b079911db0e519f464a46b9 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 03:45:03 -0300 Subject: [PATCH 035/270] vault: send oidc role payload as json --- .../vault/scripts/vault_oidc_configure.sh | 55 +++++++++++++------ 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/services/vault/scripts/vault_oidc_configure.sh b/services/vault/scripts/vault_oidc_configure.sh index 3d14e52..99f5fd6 100644 --- a/services/vault/scripts/vault_oidc_configure.sh +++ b/services/vault/scripts/vault_oidc_configure.sh @@ -75,6 +75,28 @@ build_bound_claims() { printf '%s' "${json}" } +build_json_array() { + items="$1" + json="[" + first=1 + old_ifs=$IFS + IFS=, + for item in $items; do + item="$(printf '%s' "$item" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')" + if [ -z "${item}" ]; then + continue + fi + if [ "${first}" -eq 0 ]; then + json="${json}," + fi + json="${json}\"${item}\"" + first=0 + done + IFS=$old_ifs + json="${json}]" + printf '%s' "${json}" +} + configure_role() { role_name="$1" role_groups="$2" @@ -84,25 +106,24 @@ configure_role() { return fi claims="$(build_bound_claims "${groups_claim}" "${role_groups}")" - claims_file="$(mktemp)" - printf '%s' "${claims}" > "${claims_file}" scopes_csv="$(printf '%s' "${scopes}" | tr ' ' ',' | tr -s ',' | sed 's/^,//;s/,$//')" - role_args="user_claim=${user_claim} oidc_scopes=${scopes_csv} token_policies=${role_policies} bound_audiences=${bound_audiences} bound_claims=@${claims_file} bound_claims_type=${bound_claims_type}" - if [ -n "${groups_claim}" ]; then - role_args="${role_args} groups_claim=${groups_claim}" - fi - old_ifs=$IFS - IFS=, - for uri in $redirect_uris; do - trimmed="$(printf '%s' "$uri" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')" - if [ -n "${trimmed}" ]; then - role_args="${role_args} allowed_redirect_uris=${trimmed}" - fi - done - IFS=$old_ifs + redirect_json="$(build_json_array "${redirect_uris}")" + payload_file="$(mktemp)" + cat > "${payload_file}" < Date: Wed, 14 Jan 2026 05:07:23 -0300 Subject: [PATCH 036/270] vault(consumption): sync secrets via CSI --- .../vault-csi/secrets-store-csi-driver.yaml | 2 +- services/comms/atlasbot-deployment.yaml | 29 ++-- services/comms/bstein-force-leave-job.yaml | 19 +-- services/comms/comms-secrets-ensure-job.yaml | 101 ++++++------- services/comms/coturn.yaml | 23 ++- services/comms/guest-name-job.yaml | 32 +++-- services/comms/guest-register-deployment.yaml | 19 +-- services/comms/kustomization.yaml | 8 ++ services/comms/livekit-token-deployment.yaml | 30 +++- services/comms/livekit.yaml | 36 +++-- .../mas-admin-client-secret-ensure-job.yaml | 31 ++-- services/comms/mas-db-ensure-job.yaml | 39 +++-- services/comms/mas-deployment.yaml | 79 +++++------ .../comms/mas-local-users-ensure-job.yaml | 37 +++-- services/comms/othrys-kick-numeric-job.yaml | 25 +++- services/comms/pin-othrys-job.yaml | 25 +++- services/comms/reset-othrys-room-job.yaml | 25 +++- services/comms/scripts/comms_vault_env.sh | 27 ++++ services/comms/secretproviderclass.yaml | 134 ++++++++++++++++++ services/comms/seed-othrys-room.yaml | 28 ++-- services/comms/serviceaccount.yaml | 6 + .../synapse-seeder-admin-ensure-job.yaml | 25 +++- .../comms/synapse-signingkey-ensure-job.yaml | 28 +++- services/comms/synapse-user-seed-job.yaml | 35 +++-- services/comms/vault-sync-deployment.yaml | 34 +++++ services/harbor/kustomization.yaml | 3 + services/harbor/secretproviderclass.yaml | 87 ++++++++++++ services/harbor/serviceaccount.yaml | 6 + services/harbor/vault-sync-deployment.yaml | 34 +++++ .../harbor-oidc-secret-ensure-job.yaml | 27 ++-- services/keycloak/ldap-federation-job.yaml | 35 +++-- .../keycloak/logs-oidc-secret-ensure-job.yaml | 68 ++++++--- services/keycloak/mas-secrets-ensure-job.yaml | 65 ++++++--- services/keycloak/portal-e2e-client-job.yaml | 40 +++--- ...al-e2e-execute-actions-email-test-job.yaml | 28 ++-- .../portal-e2e-target-client-job.yaml | 30 ++-- ...al-e2e-token-exchange-permissions-job.yaml | 30 ++-- .../portal-e2e-token-exchange-test-job.yaml | 28 ++-- services/keycloak/realm-settings-job.yaml | 30 ++-- .../scripts/harbor_oidc_secret_ensure.sh | 19 ++- .../keycloak/scripts/keycloak_vault_env.sh | 1 + .../scripts/vault_oidc_secret_ensure.sh | 33 +++++ .../synapse-oidc-secret-ensure-job.yaml | 50 ++++--- services/keycloak/user-overrides-job.yaml | 30 ++-- .../vault-oidc-secret-ensure-job.yaml | 27 ++-- services/mailu/kustomization.yaml | 9 ++ services/mailu/mailu-sync-cronjob.yaml | 46 +++--- services/mailu/mailu-sync-job.yaml | 46 +++--- services/mailu/mailu-sync-listener.yaml | 46 +++--- services/mailu/scripts/mailu_vault_env.sh | 14 ++ services/mailu/secretproviderclass.yaml | 78 ++++++++++ services/mailu/serviceaccount.yaml | 6 + services/mailu/vault-sync-deployment.yaml | 34 +++++ services/nextcloud-mail-sync/cronjob.yaml | 51 ++++--- services/nextcloud/deployment.yaml | 109 ++++---------- services/nextcloud/kustomization.yaml | 7 + services/nextcloud/maintenance-cronjob.yaml | 34 +++-- .../nextcloud/scripts/nextcloud_vault_env.sh | 27 ++++ services/nextcloud/secretproviderclass.yaml | 45 ++++++ services/nextcloud/serviceaccount.yaml | 6 + .../vault/scripts/vault_k8s_auth_configure.sh | 107 +++++++------- 61 files changed, 1527 insertions(+), 686 deletions(-) create mode 100644 services/comms/scripts/comms_vault_env.sh create mode 100644 services/comms/secretproviderclass.yaml create mode 100644 services/comms/serviceaccount.yaml create mode 100644 services/comms/vault-sync-deployment.yaml create mode 100644 services/harbor/secretproviderclass.yaml create mode 100644 services/harbor/serviceaccount.yaml create mode 100644 services/harbor/vault-sync-deployment.yaml create mode 100644 services/mailu/scripts/mailu_vault_env.sh create mode 100644 services/mailu/secretproviderclass.yaml create mode 100644 services/mailu/serviceaccount.yaml create mode 100644 services/mailu/vault-sync-deployment.yaml create mode 100644 services/nextcloud/scripts/nextcloud_vault_env.sh create mode 100644 services/nextcloud/secretproviderclass.yaml create mode 100644 services/nextcloud/serviceaccount.yaml diff --git a/infrastructure/vault-csi/secrets-store-csi-driver.yaml b/infrastructure/vault-csi/secrets-store-csi-driver.yaml index fec4758..0b249fc 100644 --- a/infrastructure/vault-csi/secrets-store-csi-driver.yaml +++ b/infrastructure/vault-csi/secrets-store-csi-driver.yaml @@ -16,5 +16,5 @@ spec: namespace: flux-system values: syncSecret: - enabled: false + enabled: true enableSecretRotation: false diff --git a/services/comms/atlasbot-deployment.yaml b/services/comms/atlasbot-deployment.yaml index f9e1f79..0622d32 100644 --- a/services/comms/atlasbot-deployment.yaml +++ b/services/comms/atlasbot-deployment.yaml @@ -27,7 +27,8 @@ spec: command: ["/bin/sh","-c"] args: - | - python /app/bot.py + . /vault/scripts/comms_vault_env.sh + exec python /app/bot.py env: - name: MATRIX_BASE value: http://othrys-synapse-matrix-synapse:8008 @@ -39,16 +40,6 @@ spec: value: http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428 - name: BOT_USER value: atlasbot - - name: BOT_PASS - valueFrom: - secretKeyRef: - name: atlasbot-credentials-runtime - key: bot-password - - name: CHAT_API_KEY - valueFrom: - secretKeyRef: - name: chat-ai-keys-runtime - key: matrix - name: OLLAMA_URL value: https://chat.ai.bstein.dev/ - name: OLLAMA_MODEL @@ -67,6 +58,12 @@ spec: - name: kb mountPath: /kb readOnly: true + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true volumes: - name: code configMap: @@ -85,3 +82,13 @@ spec: path: catalog/runbooks.json - key: atlas-http.mmd path: diagrams/atlas-http.mmd + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 diff --git a/services/comms/bstein-force-leave-job.yaml b/services/comms/bstein-force-leave-job.yaml index 956330b..0c760a4 100644 --- a/services/comms/bstein-force-leave-job.yaml +++ b/services/comms/bstein-force-leave-job.yaml @@ -9,25 +9,26 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: comms-vault volumes: - - name: mas-admin-client - secret: - secretName: mas-admin-client-runtime - items: - - key: client_secret - path: client_secret + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault containers: - name: leave image: python:3.11-slim volumeMounts: - - name: mas-admin-client - mountPath: /etc/mas-admin-client + - name: vault-secrets + mountPath: /vault/secrets readOnly: true env: - name: MAS_ADMIN_CLIENT_ID value: 01KDXMVQBQ5JNY6SEJPZW6Z8BM - name: MAS_ADMIN_CLIENT_SECRET_FILE - value: /etc/mas-admin-client/client_secret + value: /vault/secrets/mas-admin-client-runtime__client_secret - name: MAS_TOKEN_URL value: http://matrix-authentication-service:8080/oauth2/token - name: MAS_ADMIN_API_BASE diff --git a/services/comms/comms-secrets-ensure-job.yaml b/services/comms/comms-secrets-ensure-job.yaml index dffb222..cc8ee02 100644 --- a/services/comms/comms-secrets-ensure-job.yaml +++ b/services/comms/comms-secrets-ensure-job.yaml @@ -20,73 +20,58 @@ spec: set -eu trap 'echo "comms-secrets-ensure failed"; sleep 300' ERR umask 077 + apk add --no-cache curl jq >/dev/null safe_pass() { head -c 32 /dev/urandom | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=' } - get_secret_value() { - ns="$1" - name="$2" - key="$3" - kubectl -n "${ns}" get secret "${name}" -o "jsonpath={.data.${key}}" 2>/dev/null | base64 -d 2>/dev/null || true - } - - ensure_secret_key() { - ns="$1" - name="$2" - key="$3" - value="$4" - if ! kubectl -n "${ns}" get secret "${name}" >/dev/null 2>&1; then - kubectl -n "${ns}" create secret generic "${name}" --from-literal="${key}=${value}" >/dev/null - return - fi - existing="$(kubectl -n "${ns}" get secret "${name}" -o "jsonpath={.data.${key}}" 2>/dev/null || true)" - if [ -z "${existing}" ]; then - b64="$(printf '%s' "${value}" | base64 | tr -d '\n')" - payload="$(printf '{"data":{"%s":"%s"}}' "${key}" "${b64}")" - kubectl -n "${ns}" patch secret "${name}" --type=merge -p "${payload}" >/dev/null - fi - } - - ensure_chat_secret() { - ns="$1" - if ! kubectl -n "${ns}" get secret chat-ai-keys-runtime >/dev/null 2>&1; then - kubectl -n "${ns}" create secret generic chat-ai-keys-runtime \ - --from-literal=matrix="${CHAT_KEY_MATRIX}" \ - --from-literal=homepage="${CHAT_KEY_HOMEPAGE}" >/dev/null - return - fi - ensure_secret_key "${ns}" chat-ai-keys-runtime matrix "${CHAT_KEY_MATRIX}" - ensure_secret_key "${ns}" chat-ai-keys-runtime homepage "${CHAT_KEY_HOMEPAGE}" - } - - CHAT_KEY_MATRIX="$(get_secret_value comms chat-ai-keys-runtime matrix)" - CHAT_KEY_HOMEPAGE="$(get_secret_value comms chat-ai-keys-runtime homepage)" - if [ -z "${CHAT_KEY_MATRIX}" ] || [ -z "${CHAT_KEY_HOMEPAGE}" ]; then - ALT_MATRIX="$(get_secret_value bstein-dev-home chat-ai-keys-runtime matrix)" - ALT_HOMEPAGE="$(get_secret_value bstein-dev-home chat-ai-keys-runtime homepage)" - [ -z "${CHAT_KEY_MATRIX}" ] && CHAT_KEY_MATRIX="${ALT_MATRIX}" - [ -z "${CHAT_KEY_HOMEPAGE}" ] && CHAT_KEY_HOMEPAGE="${ALT_HOMEPAGE}" + vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" + vault_role="${VAULT_ROLE:-comms-secrets}" + jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" + vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" + if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 fi - [ -z "${CHAT_KEY_MATRIX}" ] && CHAT_KEY_MATRIX="$(safe_pass)" - [ -z "${CHAT_KEY_HOMEPAGE}" ] && CHAT_KEY_HOMEPAGE="$(safe_pass)" - ensure_chat_secret comms - ensure_chat_secret bstein-dev-home + vault_read() { + path="$1" + key="$2" + curl -sS -H "X-Vault-Token: ${vault_token}" \ + "${vault_addr}/v1/kv/data/atlas/${path}" | jq -r --arg key "${key}" '.data.data[$key] // empty' + } - ensure_secret_key comms turn-shared-secret TURN_STATIC_AUTH_SECRET "$(safe_pass)" - ensure_secret_key comms livekit-api primary "$(safe_pass)" - ensure_secret_key comms synapse-redis redis-password "$(safe_pass)" - ensure_secret_key comms synapse-macaroon macaroon_secret_key "$(safe_pass)" - ensure_secret_key comms atlasbot-credentials-runtime bot-password "$(safe_pass)" - ensure_secret_key comms atlasbot-credentials-runtime seeder-password "$(safe_pass)" + vault_write() { + path="$1" + key="$2" + value="$3" + payload="$(jq -nc --arg key "${key}" --arg value "${value}" '{data:{($key):$value}}')" + curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/${path}" >/dev/null + } - SYN_PASS="$(get_secret_value comms synapse-db POSTGRES_PASSWORD)" - if [ -z "${SYN_PASS}" ]; then - SYN_PASS="$(safe_pass)" - kubectl -n comms create secret generic synapse-db --from-literal=POSTGRES_PASSWORD="${SYN_PASS}" >/dev/null - fi + ensure_key() { + path="$1" + key="$2" + current="$(vault_read "${path}" "${key}")" + if [ -z "${current}" ]; then + current="$(safe_pass)" + vault_write "${path}" "${key}" "${current}" + fi + printf '%s' "${current}" + } + + ensure_key "comms/turn-shared-secret" "TURN_STATIC_AUTH_SECRET" >/dev/null + ensure_key "comms/livekit-api" "primary" >/dev/null + ensure_key "comms/synapse-redis" "redis-password" >/dev/null + ensure_key "comms/synapse-macaroon" "macaroon_secret_key" >/dev/null + ensure_key "comms/atlasbot-credentials-runtime" "bot-password" >/dev/null + ensure_key "comms/atlasbot-credentials-runtime" "seeder-password" >/dev/null + + SYN_PASS="$(ensure_key "comms/synapse-db" "POSTGRES_PASSWORD")" POD_NAME="$(kubectl -n postgres get pods -l app=postgres -o jsonpath='{.items[0].metadata.name}')" if [ -z "${POD_NAME}" ]; then diff --git a/services/comms/coturn.yaml b/services/comms/coturn.yaml index 12fa78a..ac7e57b 100644 --- a/services/comms/coturn.yaml +++ b/services/comms/coturn.yaml @@ -15,6 +15,7 @@ spec: labels: app: coturn spec: + serviceAccountName: comms-vault nodeSelector: hardware: rpi5 affinity: @@ -33,6 +34,7 @@ spec: - /bin/sh - -c - | + . /vault/scripts/comms_vault_env.sh exec /usr/bin/turnserver \ --no-cli \ --fingerprint \ @@ -57,11 +59,6 @@ spec: fieldPath: status.podIP - name: TURN_PUBLIC_IP value: "38.28.125.112" - - name: TURN_STATIC_AUTH_SECRET - valueFrom: - secretKeyRef: - name: turn-shared-secret - key: TURN_STATIC_AUTH_SECRET ports: - name: turn-udp containerPort: 3478 @@ -76,6 +73,12 @@ spec: - name: tls mountPath: /etc/coturn/tls readOnly: true + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true resources: requests: cpu: 200m @@ -87,6 +90,16 @@ spec: - name: tls secret: secretName: turn-live-tls + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 --- apiVersion: v1 kind: Service diff --git a/services/comms/guest-name-job.yaml b/services/comms/guest-name-job.yaml index 156617d..1f9004e 100644 --- a/services/comms/guest-name-job.yaml +++ b/services/comms/guest-name-job.yaml @@ -16,19 +16,27 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: comms-vault volumes: - - name: mas-admin-client - secret: - secretName: mas-admin-client-runtime - items: - - key: client_secret - path: client_secret + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 containers: - name: rename image: python:3.11-slim volumeMounts: - - name: mas-admin-client - mountPath: /etc/mas-admin-client + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts readOnly: true env: - name: SYNAPSE_BASE @@ -36,7 +44,7 @@ spec: - name: MAS_ADMIN_CLIENT_ID value: 01KDXMVQBQ5JNY6SEJPZW6Z8BM - name: MAS_ADMIN_CLIENT_SECRET_FILE - value: /etc/mas-admin-client/client_secret + value: /vault/secrets/mas-admin-client-runtime__client_secret - name: MAS_ADMIN_API_BASE value: http://matrix-authentication-service:8081/api/admin/v1 - name: MAS_TOKEN_URL @@ -51,16 +59,12 @@ spec: value: synapse - name: PGUSER value: synapse - - name: PGPASSWORD - valueFrom: - secretKeyRef: - name: synapse-db - key: POSTGRES_PASSWORD command: - /bin/sh - -c - | set -euo pipefail + . /vault/scripts/comms_vault_env.sh pip install --no-cache-dir requests psycopg2-binary >/dev/null python - <<'PY' import base64 diff --git a/services/comms/guest-register-deployment.yaml b/services/comms/guest-register-deployment.yaml index 284cc42..bdf5c37 100644 --- a/services/comms/guest-register-deployment.yaml +++ b/services/comms/guest-register-deployment.yaml @@ -17,6 +17,7 @@ spec: labels: app.kubernetes.io/name: matrix-guest-register spec: + serviceAccountName: comms-vault securityContext: runAsNonRoot: true runAsUser: 10001 @@ -42,7 +43,7 @@ spec: - name: MAS_ADMIN_CLIENT_ID value: 01KDXMVQBQ5JNY6SEJPZW6Z8BM - name: MAS_ADMIN_CLIENT_SECRET_FILE - value: /etc/mas/admin-client/client_secret + value: /vault/secrets/mas-admin-client-runtime__client_secret - name: MAS_ADMIN_API_BASE value: http://matrix-authentication-service:8081/api/admin/v1 - name: SYNAPSE_BASE @@ -83,8 +84,8 @@ spec: mountPath: /app/server.py subPath: server.py readOnly: true - - name: mas-admin-client - mountPath: /etc/mas/admin-client + - name: vault-secrets + mountPath: /vault/secrets readOnly: true command: - python @@ -96,9 +97,9 @@ spec: items: - key: server.py path: server.py - - name: mas-admin-client - secret: - secretName: mas-admin-client-runtime - items: - - key: client_secret - path: client_secret + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault diff --git a/services/comms/kustomization.yaml b/services/comms/kustomization.yaml index 5e50d0f..b0cc0da 100644 --- a/services/comms/kustomization.yaml +++ b/services/comms/kustomization.yaml @@ -4,6 +4,8 @@ kind: Kustomization namespace: comms resources: - namespace.yaml + - serviceaccount.yaml + - secretproviderclass.yaml - mas-configmap.yaml - helmrelease.yaml - livekit-config.yaml @@ -18,6 +20,7 @@ resources: - comms-secrets-ensure-rbac.yaml - mas-db-ensure-rbac.yaml - synapse-signingkey-ensure-rbac.yaml + - vault-sync-deployment.yaml - mas-admin-client-secret-ensure-job.yaml - mas-db-ensure-job.yaml - comms-secrets-ensure-job.yaml @@ -40,6 +43,11 @@ resources: - matrix-ingress.yaml configMapGenerator: + - name: comms-vault-env + files: + - comms_vault_env.sh=scripts/comms_vault_env.sh + options: + disableNameSuffixHash: true - name: matrix-guest-register files: - server.py=scripts/guest-register/server.py diff --git a/services/comms/livekit-token-deployment.yaml b/services/comms/livekit-token-deployment.yaml index 1b4cdca..750872c 100644 --- a/services/comms/livekit-token-deployment.yaml +++ b/services/comms/livekit-token-deployment.yaml @@ -15,6 +15,7 @@ spec: labels: app: livekit-token-service spec: + serviceAccountName: comms-vault nodeSelector: hardware: rpi5 affinity: @@ -33,21 +34,29 @@ spec: containers: - name: token-service image: ghcr.io/element-hq/lk-jwt-service:0.3.0 + command: + - /bin/sh + - -c + - | + . /vault/scripts/comms_vault_env.sh + exec /lk-jwt-service env: - name: LIVEKIT_URL value: wss://kit.live.bstein.dev/livekit/sfu - name: LIVEKIT_KEY value: primary - - name: LIVEKIT_SECRET - valueFrom: - secretKeyRef: - name: livekit-api - key: primary - name: LIVEKIT_FULL_ACCESS_HOMESERVERS value: live.bstein.dev ports: - containerPort: 8080 name: http + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true resources: requests: cpu: 50m @@ -55,6 +64,17 @@ spec: limits: cpu: 300m memory: 256Mi + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 --- apiVersion: v1 kind: Service diff --git a/services/comms/livekit.yaml b/services/comms/livekit.yaml index 46d57f8..adad92a 100644 --- a/services/comms/livekit.yaml +++ b/services/comms/livekit.yaml @@ -17,6 +17,7 @@ spec: labels: app: livekit spec: + serviceAccountName: comms-vault enableServiceLinks: false nodeSelector: hardware: rpi5 @@ -36,16 +37,11 @@ spec: args: - | set -euo pipefail + . /vault/scripts/comms_vault_env.sh umask 077 TURN_PASSWORD_ESCAPED="$(printf '%s' "${TURN_PASSWORD}" | sed 's/[\\/&]/\\&/g')" sed "s/@@TURN_PASSWORD@@/${TURN_PASSWORD_ESCAPED}/g" /etc/livekit-template/livekit.yaml > /etc/livekit/livekit.yaml chmod 0644 /etc/livekit/livekit.yaml - env: - - name: TURN_PASSWORD - valueFrom: - secretKeyRef: - name: turn-shared-secret - key: TURN_STATIC_AUTH_SECRET volumeMounts: - name: config-template mountPath: /etc/livekit-template @@ -53,6 +49,12 @@ spec: - name: config mountPath: /etc/livekit readOnly: false + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true containers: - name: livekit image: livekit/livekit-server:v1.9.0 @@ -61,6 +63,7 @@ spec: - -c - | set -euo pipefail + . /vault/scripts/comms_vault_env.sh umask 077 printf "%s: %s\n" "${LIVEKIT_API_KEY_ID}" "${LIVEKIT_API_SECRET}" > /var/run/livekit/keys chmod 600 /var/run/livekit/keys @@ -68,11 +71,6 @@ spec: env: - name: LIVEKIT_API_KEY_ID value: primary - - name: LIVEKIT_API_SECRET - valueFrom: - secretKeyRef: - name: livekit-api - key: primary ports: - containerPort: 7880 name: http @@ -92,6 +90,12 @@ spec: readOnly: true - name: runtime-keys mountPath: /var/run/livekit + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true resources: requests: cpu: 500m @@ -110,6 +114,16 @@ spec: emptyDir: {} - name: runtime-keys emptyDir: {} + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 --- apiVersion: v1 kind: Service diff --git a/services/comms/mas-admin-client-secret-ensure-job.yaml b/services/comms/mas-admin-client-secret-ensure-job.yaml index 9b76290..a84f68e 100644 --- a/services/comms/mas-admin-client-secret-ensure-job.yaml +++ b/services/comms/mas-admin-client-secret-ensure-job.yaml @@ -67,18 +67,29 @@ spec: args: - | set -euo pipefail - if kubectl -n comms get secret mas-admin-client-runtime >/dev/null 2>&1; then - if kubectl -n comms get secret mas-admin-client-runtime -o jsonpath='{.data.client_secret}' 2>/dev/null | grep -q .; then - exit 0 - fi - else - kubectl -n comms create secret generic mas-admin-client-runtime \ - --from-file=client_secret=/work/client_secret >/dev/null + apk add --no-cache curl jq >/dev/null + + vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" + vault_role="${VAULT_ROLE:-comms-secrets}" + jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" + vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" + if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 + fi + + current="$(curl -sS -H "X-Vault-Token: ${vault_token}" \ + "${vault_addr}/v1/kv/data/atlas/comms/mas-admin-client-runtime" | jq -r '.data.data.client_secret // empty')" + if [ -n "${current}" ]; then exit 0 fi - secret_b64="$(base64 /work/client_secret | tr -d '\n')" - payload="$(printf '{"data":{"client_secret":"%s"}}' "${secret_b64}")" - kubectl -n comms patch secret mas-admin-client-runtime --type=merge -p "${payload}" >/dev/null + + value="$(cat /work/client_secret)" + payload="$(jq -nc --arg value "${value}" '{data:{client_secret:$value}}')" + curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/comms/mas-admin-client-runtime" >/dev/null volumeMounts: - name: work mountPath: /work diff --git a/services/comms/mas-db-ensure-job.yaml b/services/comms/mas-db-ensure-job.yaml index 1d1492e..28e7825 100644 --- a/services/comms/mas-db-ensure-job.yaml +++ b/services/comms/mas-db-ensure-job.yaml @@ -24,18 +24,35 @@ spec: head -c 32 /dev/urandom | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=' } - EXISTING_B64="$(kubectl -n comms get secret mas-db -o jsonpath='{.data.password}' 2>/dev/null || true)" - if [ -n "${EXISTING_B64}" ]; then - MAS_PASS="$(printf '%s' "${EXISTING_B64}" | base64 -d)" - if printf '%s' "${MAS_PASS}" | grep -Eq '[^A-Za-z0-9_-]'; then - MAS_PASS="$(safe_pass)" - MAS_B64="$(printf '%s' "${MAS_PASS}" | base64 | tr -d '\n')" - payload="$(printf '{"data":{"password":"%s"}}' "${MAS_B64}")" - kubectl -n comms patch secret mas-db --type=merge -p "${payload}" >/dev/null - fi - else + apk add --no-cache curl jq >/dev/null + + vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" + vault_role="${VAULT_ROLE:-comms-secrets}" + jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" + vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" + if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 + fi + + vault_read() { + curl -sS -H "X-Vault-Token: ${vault_token}" \ + "${vault_addr}/v1/kv/data/atlas/comms/mas-db" | jq -r '.data.data.password // empty' + } + + vault_write() { + value="$1" + payload="$(jq -nc --arg value "${value}" '{data:{password:$value}}')" + curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/comms/mas-db" >/dev/null + } + + MAS_PASS="$(vault_read)" + if [ -z "${MAS_PASS}" ] || printf '%s' "${MAS_PASS}" | grep -Eq '[^A-Za-z0-9_-]'; then MAS_PASS="$(safe_pass)" - kubectl -n comms create secret generic mas-db --from-literal=password="${MAS_PASS}" >/dev/null + vault_write "${MAS_PASS}" fi POD_NAME="$(kubectl -n postgres get pods -l app=postgres -o jsonpath='{.items[0].metadata.name}')" diff --git a/services/comms/mas-deployment.yaml b/services/comms/mas-deployment.yaml index 2117c17..c7e6821 100644 --- a/services/comms/mas-deployment.yaml +++ b/services/comms/mas-deployment.yaml @@ -18,6 +18,7 @@ spec: app: matrix-authentication-service spec: enableServiceLinks: false + serviceAccountName: comms-vault nodeSelector: hardware: rpi5 affinity: @@ -36,6 +37,7 @@ spec: args: - | set -euo pipefail + . /vault/scripts/comms_vault_env.sh umask 077 DB_PASS_ESCAPED="$(printf '%s' "${MAS_DB_PASSWORD}" | sed 's/[\\/&]/\\&/g')" MATRIX_SECRET_ESCAPED="$(printf '%s' "${MATRIX_SHARED_SECRET}" | sed 's/[\\/&]/\\&/g')" @@ -47,22 +49,6 @@ spec: -e "s/@@KEYCLOAK_CLIENT_SECRET@@/${KC_SECRET_ESCAPED}/g" \ /etc/mas/config.yaml > /rendered/config.yaml chmod 0644 /rendered/config.yaml - env: - - name: MAS_DB_PASSWORD - valueFrom: - secretKeyRef: - name: mas-db - key: password - - name: MATRIX_SHARED_SECRET - valueFrom: - secretKeyRef: - name: mas-secrets-runtime - key: matrix_shared_secret - - name: KEYCLOAK_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: mas-secrets-runtime - key: keycloak_client_secret volumeMounts: - name: config mountPath: /etc/mas/config.yaml @@ -71,6 +57,12 @@ spec: - name: rendered mountPath: /rendered readOnly: false + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true containers: - name: mas image: ghcr.io/element-hq/matrix-authentication-service:1.8.0 @@ -86,14 +78,25 @@ spec: - name: rendered mountPath: /rendered readOnly: true - - name: secrets - mountPath: /etc/mas/secrets + - name: vault-secrets + mountPath: /etc/mas/secrets/encryption + subPath: mas-secrets-runtime__encryption readOnly: true - - name: admin-client - mountPath: /etc/mas/admin-client + - name: vault-secrets + mountPath: /etc/mas/secrets/matrix_shared_secret + subPath: mas-secrets-runtime__matrix_shared_secret readOnly: true - - name: keys - mountPath: /etc/mas/keys + - name: vault-secrets + mountPath: /etc/mas/secrets/keycloak_client_secret + subPath: mas-secrets-runtime__keycloak_client_secret + readOnly: true + - name: vault-secrets + mountPath: /etc/mas/keys/rsa_key + subPath: mas-secrets-runtime__rsa_key + readOnly: true + - name: vault-secrets + mountPath: /etc/mas/admin-client/client_secret + subPath: mas-admin-client-runtime__client_secret readOnly: true resources: requests: @@ -111,28 +114,16 @@ spec: path: config.yaml - name: rendered emptyDir: {} - - name: secrets - secret: - secretName: mas-secrets-runtime - items: - - key: encryption - path: encryption - - key: matrix_shared_secret - path: matrix_shared_secret - - key: keycloak_client_secret - path: keycloak_client_secret - - name: keys - secret: - secretName: mas-secrets-runtime - items: - - key: rsa_key - path: rsa_key - - name: admin-client - secret: - secretName: mas-admin-client-runtime - items: - - key: client_secret - path: client_secret + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 --- apiVersion: v1 kind: Service diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index 7853763..b81b94d 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -10,48 +10,47 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: comms-vault volumes: - - name: mas-admin-client - secret: - secretName: mas-admin-client-runtime - items: - - key: client_secret - path: client_secret + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 containers: - name: ensure image: python:3.11-slim volumeMounts: - - name: mas-admin-client - mountPath: /etc/mas-admin-client + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts readOnly: true env: - name: MAS_ADMIN_CLIENT_ID value: 01KDXMVQBQ5JNY6SEJPZW6Z8BM - name: MAS_ADMIN_CLIENT_SECRET_FILE - value: /etc/mas-admin-client/client_secret + value: /vault/secrets/mas-admin-client-runtime__client_secret - name: MAS_TOKEN_URL value: http://matrix-authentication-service:8080/oauth2/token - name: MAS_ADMIN_API_BASE value: http://matrix-authentication-service:8081/api/admin/v1 - name: SEEDER_USER value: othrys-seeder - - name: SEEDER_PASS - valueFrom: - secretKeyRef: - name: atlasbot-credentials-runtime - key: seeder-password - name: BOT_USER value: atlasbot - - name: BOT_PASS - valueFrom: - secretKeyRef: - name: atlasbot-credentials-runtime - key: bot-password command: - /bin/sh - -c - | set -euo pipefail + . /vault/scripts/comms_vault_env.sh pip install --no-cache-dir requests >/dev/null python - <<'PY' import base64 diff --git a/services/comms/othrys-kick-numeric-job.yaml b/services/comms/othrys-kick-numeric-job.yaml index 8f02bbb..df96b9e 100644 --- a/services/comms/othrys-kick-numeric-job.yaml +++ b/services/comms/othrys-kick-numeric-job.yaml @@ -9,6 +9,7 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: comms-vault containers: - name: kick image: python:3.11-slim @@ -23,16 +24,12 @@ spec: value: "#othrys:live.bstein.dev" - name: SEEDER_USER value: othrys-seeder - - name: SEEDER_PASS - valueFrom: - secretKeyRef: - name: atlasbot-credentials-runtime - key: seeder-password command: - /bin/sh - -c - | set -euo pipefail + . /vault/scripts/comms_vault_env.sh pip install --no-cache-dir requests >/dev/null python - <<'PY' import os @@ -113,3 +110,21 @@ spec: if is_numeric(user_id): kick(token, room_id, user_id) PY + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 diff --git a/services/comms/pin-othrys-job.yaml b/services/comms/pin-othrys-job.yaml index 3639194..babb6d1 100644 --- a/services/comms/pin-othrys-job.yaml +++ b/services/comms/pin-othrys-job.yaml @@ -16,6 +16,7 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: comms-vault containers: - name: pin image: python:3.11-slim @@ -26,16 +27,12 @@ spec: value: http://matrix-authentication-service:8080 - name: SEEDER_USER value: othrys-seeder - - name: SEEDER_PASS - valueFrom: - secretKeyRef: - name: atlasbot-credentials-runtime - key: seeder-password command: - /bin/sh - -c - | set -euo pipefail + . /vault/scripts/comms_vault_env.sh pip install --no-cache-dir requests >/dev/null python - <<'PY' import os, requests, urllib.parse @@ -121,3 +118,21 @@ spec: eid = send(room_id, token, MESSAGE) pin(room_id, token, eid) PY + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 diff --git a/services/comms/reset-othrys-room-job.yaml b/services/comms/reset-othrys-room-job.yaml index dd056c3..6e20979 100644 --- a/services/comms/reset-othrys-room-job.yaml +++ b/services/comms/reset-othrys-room-job.yaml @@ -16,6 +16,7 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: comms-vault containers: - name: reset image: python:3.11-slim @@ -34,11 +35,6 @@ spec: value: "Invite guests: share https://live.bstein.dev/#/room/#othrys:live.bstein.dev?action=join and choose 'Continue' -> 'Join as guest'." - name: SEEDER_USER value: othrys-seeder - - name: SEEDER_PASS - valueFrom: - secretKeyRef: - name: atlasbot-credentials-runtime - key: seeder-password - name: BOT_USER value: atlasbot command: @@ -46,6 +42,7 @@ spec: - -c - | set -euo pipefail + . /vault/scripts/comms_vault_env.sh pip install --no-cache-dir requests >/dev/null python - <<'PY' import os @@ -264,3 +261,21 @@ spec: print(f"old_room_id={old_room_id}") print(f"new_room_id={new_room_id}") PY + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 diff --git a/services/comms/scripts/comms_vault_env.sh b/services/comms/scripts/comms_vault_env.sh new file mode 100644 index 0000000..98b3fc4 --- /dev/null +++ b/services/comms/scripts/comms_vault_env.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env sh +set -eu + +vault_dir="/vault/secrets" + +read_secret() { + cat "${vault_dir}/$1" +} + +export TURN_STATIC_AUTH_SECRET="$(read_secret turn-shared-secret__TURN_STATIC_AUTH_SECRET)" +export TURN_PASSWORD="${TURN_STATIC_AUTH_SECRET}" + +export LIVEKIT_API_SECRET="$(read_secret livekit-api__primary)" +export LIVEKIT_SECRET="${LIVEKIT_API_SECRET}" + +export BOT_PASS="$(read_secret atlasbot-credentials-runtime__bot-password)" +export SEEDER_PASS="$(read_secret atlasbot-credentials-runtime__seeder-password)" + +export CHAT_API_KEY="$(read_secret chat-ai-keys-runtime__matrix)" +export CHAT_API_HOMEPAGE="$(read_secret chat-ai-keys-runtime__homepage)" + +export MAS_ADMIN_CLIENT_SECRET_FILE="${vault_dir}/mas-admin-client-runtime__client_secret" +export PGPASSWORD="$(read_secret synapse-db__POSTGRES_PASSWORD)" + +export MAS_DB_PASSWORD="$(read_secret mas-db__password)" +export MATRIX_SHARED_SECRET="$(read_secret mas-secrets-runtime__matrix_shared_secret)" +export KEYCLOAK_CLIENT_SECRET="$(read_secret mas-secrets-runtime__keycloak_client_secret)" diff --git a/services/comms/secretproviderclass.yaml b/services/comms/secretproviderclass.yaml new file mode 100644 index 0000000..971d408 --- /dev/null +++ b/services/comms/secretproviderclass.yaml @@ -0,0 +1,134 @@ +# services/comms/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: comms-vault + namespace: comms +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "comms" + objects: | + - objectName: "turn-shared-secret__TURN_STATIC_AUTH_SECRET" + secretPath: "kv/data/atlas/comms/turn-shared-secret" + secretKey: "TURN_STATIC_AUTH_SECRET" + - objectName: "livekit-api__primary" + secretPath: "kv/data/atlas/comms/livekit-api" + secretKey: "primary" + - objectName: "synapse-db__POSTGRES_PASSWORD" + secretPath: "kv/data/atlas/comms/synapse-db" + secretKey: "POSTGRES_PASSWORD" + - objectName: "synapse-redis__redis-password" + secretPath: "kv/data/atlas/comms/synapse-redis" + secretKey: "redis-password" + - objectName: "synapse-macaroon__macaroon_secret_key" + secretPath: "kv/data/atlas/comms/synapse-macaroon" + secretKey: "macaroon_secret_key" + - objectName: "atlasbot-credentials-runtime__bot-password" + secretPath: "kv/data/atlas/comms/atlasbot-credentials-runtime" + secretKey: "bot-password" + - objectName: "atlasbot-credentials-runtime__seeder-password" + secretPath: "kv/data/atlas/comms/atlasbot-credentials-runtime" + secretKey: "seeder-password" + - objectName: "chat-ai-keys-runtime__matrix" + secretPath: "kv/data/atlas/shared/chat-ai-keys-runtime" + secretKey: "matrix" + - objectName: "chat-ai-keys-runtime__homepage" + secretPath: "kv/data/atlas/shared/chat-ai-keys-runtime" + secretKey: "homepage" + - objectName: "mas-admin-client-runtime__client_secret" + secretPath: "kv/data/atlas/comms/mas-admin-client-runtime" + secretKey: "client_secret" + - objectName: "mas-db__password" + secretPath: "kv/data/atlas/comms/mas-db" + secretKey: "password" + - objectName: "mas-secrets-runtime__encryption" + secretPath: "kv/data/atlas/comms/mas-secrets-runtime" + secretKey: "encryption" + - objectName: "mas-secrets-runtime__matrix_shared_secret" + secretPath: "kv/data/atlas/comms/mas-secrets-runtime" + secretKey: "matrix_shared_secret" + - objectName: "mas-secrets-runtime__keycloak_client_secret" + secretPath: "kv/data/atlas/comms/mas-secrets-runtime" + secretKey: "keycloak_client_secret" + - objectName: "mas-secrets-runtime__rsa_key" + secretPath: "kv/data/atlas/comms/mas-secrets-runtime" + secretKey: "rsa_key" + - objectName: "othrys-synapse-signingkey__signing.key" + secretPath: "kv/data/atlas/comms/othrys-synapse-signingkey" + secretKey: "signing.key" + - objectName: "synapse-oidc__client-secret" + secretPath: "kv/data/atlas/comms/synapse-oidc" + secretKey: "client-secret" + secretObjects: + - secretName: turn-shared-secret + type: Opaque + data: + - objectName: turn-shared-secret__TURN_STATIC_AUTH_SECRET + key: TURN_STATIC_AUTH_SECRET + - secretName: livekit-api + type: Opaque + data: + - objectName: livekit-api__primary + key: primary + - secretName: synapse-db + type: Opaque + data: + - objectName: synapse-db__POSTGRES_PASSWORD + key: POSTGRES_PASSWORD + - secretName: synapse-redis + type: Opaque + data: + - objectName: synapse-redis__redis-password + key: redis-password + - secretName: synapse-macaroon + type: Opaque + data: + - objectName: synapse-macaroon__macaroon_secret_key + key: macaroon_secret_key + - secretName: atlasbot-credentials-runtime + type: Opaque + data: + - objectName: atlasbot-credentials-runtime__bot-password + key: bot-password + - objectName: atlasbot-credentials-runtime__seeder-password + key: seeder-password + - secretName: chat-ai-keys-runtime + type: Opaque + data: + - objectName: chat-ai-keys-runtime__matrix + key: matrix + - objectName: chat-ai-keys-runtime__homepage + key: homepage + - secretName: mas-admin-client-runtime + type: Opaque + data: + - objectName: mas-admin-client-runtime__client_secret + key: client_secret + - secretName: mas-db + type: Opaque + data: + - objectName: mas-db__password + key: password + - secretName: mas-secrets-runtime + type: Opaque + data: + - objectName: mas-secrets-runtime__encryption + key: encryption + - objectName: mas-secrets-runtime__matrix_shared_secret + key: matrix_shared_secret + - objectName: mas-secrets-runtime__keycloak_client_secret + key: keycloak_client_secret + - objectName: mas-secrets-runtime__rsa_key + key: rsa_key + - secretName: othrys-synapse-signingkey + type: Opaque + data: + - objectName: othrys-synapse-signingkey__signing.key + key: signing.key + - secretName: synapse-oidc + type: Opaque + data: + - objectName: synapse-oidc__client-secret + key: client-secret diff --git a/services/comms/seed-othrys-room.yaml b/services/comms/seed-othrys-room.yaml index 901f14d..0508e0e 100644 --- a/services/comms/seed-othrys-room.yaml +++ b/services/comms/seed-othrys-room.yaml @@ -14,6 +14,7 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: comms-vault containers: - name: seed image: python:3.11-slim @@ -24,23 +25,14 @@ spec: value: http://matrix-authentication-service:8080 - name: SEEDER_USER value: othrys-seeder - - name: SEEDER_PASS - valueFrom: - secretKeyRef: - name: atlasbot-credentials-runtime - key: seeder-password - name: BOT_USER value: atlasbot - - name: BOT_PASS - valueFrom: - secretKeyRef: - name: atlasbot-credentials-runtime - key: bot-password command: - /bin/sh - -c - | set -euo pipefail + . /vault/scripts/comms_vault_env.sh pip install --no-cache-dir requests pyyaml >/dev/null python - <<'PY' import os, requests, urllib.parse @@ -140,7 +132,23 @@ spec: - name: synapse-config mountPath: /config readOnly: true + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true volumes: - name: synapse-config secret: secretName: othrys-synapse-matrix-synapse + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 diff --git a/services/comms/serviceaccount.yaml b/services/comms/serviceaccount.yaml new file mode 100644 index 0000000..1b975b8 --- /dev/null +++ b/services/comms/serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/comms/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: comms-vault + namespace: comms diff --git a/services/comms/synapse-seeder-admin-ensure-job.yaml b/services/comms/synapse-seeder-admin-ensure-job.yaml index 0885722..dbe5609 100644 --- a/services/comms/synapse-seeder-admin-ensure-job.yaml +++ b/services/comms/synapse-seeder-admin-ensure-job.yaml @@ -9,6 +9,7 @@ spec: template: spec: restartPolicy: OnFailure + serviceAccountName: comms-vault containers: - name: psql image: postgres:16-alpine @@ -21,16 +22,30 @@ spec: value: synapse - name: PGUSER value: synapse - - name: PGPASSWORD - valueFrom: - secretKeyRef: - name: synapse-db - key: POSTGRES_PASSWORD command: - /bin/sh - -c - | set -euo pipefail + . /vault/scripts/comms_vault_env.sh psql -v ON_ERROR_STOP=1 <<'SQL' UPDATE users SET admin = 1 WHERE name = '@othrys-seeder:live.bstein.dev'; SQL + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml index 81d95a7..ca83f52 100644 --- a/services/comms/synapse-signingkey-ensure-job.yaml +++ b/services/comms/synapse-signingkey-ensure-job.yaml @@ -37,15 +37,29 @@ spec: args: - | set -euo pipefail - set -x - if kubectl -n comms get secret othrys-synapse-signingkey \ - -o jsonpath='{.data.signing\.key}' 2>/tmp/get_err | grep -q .; then + apk add --no-cache curl jq >/dev/null + + vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" + vault_role="${VAULT_ROLE:-comms-secrets}" + jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" + vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" + if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 + fi + + existing="$(curl -sS -H "X-Vault-Token: ${vault_token}" \ + "${vault_addr}/v1/kv/data/atlas/comms/othrys-synapse-signingkey" | jq -r '.data.data["signing.key"] // empty')" + if [ -n "${existing}" ]; then exit 0 fi - cat /tmp/get_err >&2 || true - kubectl -n comms create secret generic othrys-synapse-signingkey \ - --from-file=signing.key=/work/signing.key \ - --dry-run=client -o yaml | kubectl -n comms apply -f - >/dev/null + + value="$(cat /work/signing.key)" + payload="$(jq -nc --arg value "${value}" '{data:{"signing.key":$value}}')" + curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/comms/othrys-synapse-signingkey" >/dev/null volumeMounts: - name: work mountPath: /work diff --git a/services/comms/synapse-user-seed-job.yaml b/services/comms/synapse-user-seed-job.yaml index 083f72e..2285dad 100644 --- a/services/comms/synapse-user-seed-job.yaml +++ b/services/comms/synapse-user-seed-job.yaml @@ -10,6 +10,7 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: comms-vault containers: - name: seed image: python:3.11-slim @@ -22,30 +23,16 @@ spec: value: synapse - name: PGUSER value: synapse - - name: PGPASSWORD - valueFrom: - secretKeyRef: - name: synapse-db - key: POSTGRES_PASSWORD - name: SEEDER_USER value: othrys-seeder - - name: SEEDER_PASS - valueFrom: - secretKeyRef: - name: atlasbot-credentials-runtime - key: seeder-password - name: BOT_USER value: atlasbot - - name: BOT_PASS - valueFrom: - secretKeyRef: - name: atlasbot-credentials-runtime - key: bot-password command: - /bin/sh - -c - | set -euo pipefail + . /vault/scripts/comms_vault_env.sh pip install --no-cache-dir psycopg2-binary bcrypt >/dev/null python - <<'PY' import os @@ -118,3 +105,21 @@ spec: finally: conn.close() PY + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault + - name: vault-scripts + configMap: + name: comms-vault-env + defaultMode: 0555 diff --git a/services/comms/vault-sync-deployment.yaml b/services/comms/vault-sync-deployment.yaml new file mode 100644 index 0000000..f5b5849 --- /dev/null +++ b/services/comms/vault-sync-deployment.yaml @@ -0,0 +1,34 @@ +# services/comms/vault-sync-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: comms-vault-sync + namespace: comms +spec: + replicas: 1 + selector: + matchLabels: + app: comms-vault-sync + template: + metadata: + labels: + app: comms-vault-sync + spec: + serviceAccountName: comms-vault + containers: + - name: sync + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - "sleep infinity" + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: comms-vault diff --git a/services/harbor/kustomization.yaml b/services/harbor/kustomization.yaml index 7da3d50..2a9cb9e 100644 --- a/services/harbor/kustomization.yaml +++ b/services/harbor/kustomization.yaml @@ -4,7 +4,10 @@ kind: Kustomization namespace: harbor resources: - namespace.yaml + - serviceaccount.yaml + - secretproviderclass.yaml - pvc.yaml - certificate.yaml - helmrelease.yaml + - vault-sync-deployment.yaml - image.yaml diff --git a/services/harbor/secretproviderclass.yaml b/services/harbor/secretproviderclass.yaml new file mode 100644 index 0000000..1e1a7f1 --- /dev/null +++ b/services/harbor/secretproviderclass.yaml @@ -0,0 +1,87 @@ +# services/harbor/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: harbor-vault + namespace: harbor +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "harbor" + objects: | + - objectName: "harbor-core__CSRF_KEY" + secretPath: "kv/data/atlas/harbor/harbor-core" + secretKey: "CSRF_KEY" + - objectName: "harbor-core__REGISTRY_CREDENTIAL_PASSWORD" + secretPath: "kv/data/atlas/harbor/harbor-core" + secretKey: "REGISTRY_CREDENTIAL_PASSWORD" + - objectName: "harbor-core__harbor_admin_password" + secretPath: "kv/data/atlas/harbor/harbor-core" + secretKey: "harbor_admin_password" + - objectName: "harbor-core__secret" + secretPath: "kv/data/atlas/harbor/harbor-core" + secretKey: "secret" + - objectName: "harbor-core__secretKey" + secretPath: "kv/data/atlas/harbor/harbor-core" + secretKey: "secretKey" + - objectName: "harbor-core__tls.crt" + secretPath: "kv/data/atlas/harbor/harbor-core" + secretKey: "tls.crt" + - objectName: "harbor-core__tls.key" + secretPath: "kv/data/atlas/harbor/harbor-core" + secretKey: "tls.key" + - objectName: "harbor-db__database" + secretPath: "kv/data/atlas/harbor/harbor-db" + secretKey: "database" + - objectName: "harbor-db__host" + secretPath: "kv/data/atlas/harbor/harbor-db" + secretKey: "host" + - objectName: "harbor-db__password" + secretPath: "kv/data/atlas/harbor/harbor-db" + secretKey: "password" + - objectName: "harbor-db__port" + secretPath: "kv/data/atlas/harbor/harbor-db" + secretKey: "port" + - objectName: "harbor-db__username" + secretPath: "kv/data/atlas/harbor/harbor-db" + secretKey: "username" + - objectName: "harbor-oidc__CONFIG_OVERWRITE_JSON" + secretPath: "kv/data/atlas/harbor/harbor-oidc" + secretKey: "CONFIG_OVERWRITE_JSON" + secretObjects: + - secretName: harbor-core + type: Opaque + data: + - objectName: harbor-core__CSRF_KEY + key: CSRF_KEY + - objectName: harbor-core__REGISTRY_CREDENTIAL_PASSWORD + key: REGISTRY_CREDENTIAL_PASSWORD + - objectName: harbor-core__harbor_admin_password + key: harbor_admin_password + - objectName: harbor-core__secret + key: secret + - objectName: harbor-core__secretKey + key: secretKey + - objectName: harbor-core__tls.crt + key: tls.crt + - objectName: harbor-core__tls.key + key: tls.key + - secretName: harbor-db + type: Opaque + data: + - objectName: harbor-db__database + key: database + - objectName: harbor-db__host + key: host + - objectName: harbor-db__password + key: password + - objectName: harbor-db__port + key: port + - objectName: harbor-db__username + key: username + - secretName: harbor-oidc + type: Opaque + data: + - objectName: harbor-oidc__CONFIG_OVERWRITE_JSON + key: CONFIG_OVERWRITE_JSON diff --git a/services/harbor/serviceaccount.yaml b/services/harbor/serviceaccount.yaml new file mode 100644 index 0000000..46bb816 --- /dev/null +++ b/services/harbor/serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/harbor/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: harbor-vault-sync + namespace: harbor diff --git a/services/harbor/vault-sync-deployment.yaml b/services/harbor/vault-sync-deployment.yaml new file mode 100644 index 0000000..11aae09 --- /dev/null +++ b/services/harbor/vault-sync-deployment.yaml @@ -0,0 +1,34 @@ +# services/harbor/vault-sync-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: harbor-vault-sync + namespace: harbor +spec: + replicas: 1 + selector: + matchLabels: + app: harbor-vault-sync + template: + metadata: + labels: + app: harbor-vault-sync + spec: + serviceAccountName: harbor-vault-sync + containers: + - name: sync + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - "sleep infinity" + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: harbor-vault diff --git a/services/keycloak/harbor-oidc-secret-ensure-job.yaml b/services/keycloak/harbor-oidc-secret-ensure-job.yaml index 21a7ff0..e4fbcee 100644 --- a/services/keycloak/harbor-oidc-secret-ensure-job.yaml +++ b/services/keycloak/harbor-oidc-secret-ensure-job.yaml @@ -16,6 +16,16 @@ spec: configMap: name: harbor-oidc-secret-ensure-script defaultMode: 0555 + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -30,18 +40,13 @@ spec: - name: apply image: alpine:3.20 command: ["/scripts/harbor_oidc_secret_ensure.sh"] - env: - - name: KEYCLOAK_ADMIN - valueFrom: - secretKeyRef: - name: keycloak-admin - key: username - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: password volumeMounts: - name: harbor-oidc-secret-ensure-script mountPath: /scripts readOnly: true + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true diff --git a/services/keycloak/ldap-federation-job.yaml b/services/keycloak/ldap-federation-job.yaml index 9650468..d9d650b 100644 --- a/services/keycloak/ldap-federation-job.yaml +++ b/services/keycloak/ldap-federation-job.yaml @@ -19,6 +19,7 @@ spec: - key: node-role.kubernetes.io/worker operator: Exists restartPolicy: OnFailure + serviceAccountName: sso-vault containers: - name: configure image: python:3.11-alpine @@ -28,25 +29,10 @@ spec: value: http://keycloak.sso.svc.cluster.local - name: KEYCLOAK_REALM value: atlas - - name: KEYCLOAK_ADMIN_USER - valueFrom: - secretKeyRef: - name: keycloak-admin - key: username - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: password - name: LDAP_URL value: ldap://openldap.sso.svc.cluster.local:389 - name: LDAP_BIND_DN value: cn=admin,dc=bstein,dc=dev - - name: LDAP_BIND_PASSWORD - valueFrom: - secretKeyRef: - name: openldap-admin - key: LDAP_ADMIN_PASSWORD - name: LDAP_USERS_DN value: ou=users,dc=bstein,dc=dev - name: LDAP_GROUPS_DN @@ -55,6 +41,7 @@ spec: args: - | set -euo pipefail + . /vault/scripts/keycloak_vault_env.sh python - <<'PY' import json import os @@ -360,3 +347,21 @@ spec: except Exception as e: print(f"WARNING: LDAP cleanup failed (continuing): {e}") PY + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 diff --git a/services/keycloak/logs-oidc-secret-ensure-job.yaml b/services/keycloak/logs-oidc-secret-ensure-job.yaml index 11d48f9..df3d569 100644 --- a/services/keycloak/logs-oidc-secret-ensure-job.yaml +++ b/services/keycloak/logs-oidc-secret-ensure-job.yaml @@ -18,6 +18,7 @@ spec: args: - | set -euo pipefail + . /vault/scripts/keycloak_vault_env.sh apk add --no-cache curl jq kubectl openssl >/dev/null KC_URL="http://keycloak.sso.svc.cluster.local" @@ -73,31 +74,56 @@ spec: exit 1 fi - if kubectl -n logging get secret oauth2-proxy-logs-oidc >/dev/null 2>&1; then - current_cookie="$(kubectl -n logging get secret oauth2-proxy-logs-oidc -o jsonpath='{.data.cookie_secret}' 2>/dev/null || true)" - if [ -n "${current_cookie}" ]; then - decoded="$(printf '%s' "${current_cookie}" | base64 -d 2>/dev/null || true)" - length="$(printf '%s' "${decoded}" | wc -c | tr -d ' ')" - if [ "${length}" = "16" ] || [ "${length}" = "24" ] || [ "${length}" = "32" ]; then - exit 0 - fi - fi + vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" + vault_role="${VAULT_ROLE:-sso-secrets}" + jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" + vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" + if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 fi - COOKIE_SECRET="$(openssl rand -hex 16 | tr -d '\n')" + COOKIE_SECRET="$(curl -sS -H "X-Vault-Token: ${vault_token}" \ + "${vault_addr}/v1/kv/data/atlas/logging/oauth2-proxy-logs-oidc" | jq -r '.data.data.cookie_secret // empty')" + if [ -n "${COOKIE_SECRET}" ]; then + length="$(printf '%s' "${COOKIE_SECRET}" | wc -c | tr -d ' ')" + if [ "${length}" != "16" ] && [ "${length}" != "24" ] && [ "${length}" != "32" ]; then + COOKIE_SECRET="" + fi + fi + if [ -z "${COOKIE_SECRET}" ]; then + COOKIE_SECRET="$(openssl rand -hex 16 | tr -d '\n')" + fi + + payload="$(jq -nc \ + --arg client_id "logs" \ + --arg client_secret "${CLIENT_SECRET}" \ + --arg cookie_secret "${COOKIE_SECRET}" \ + '{data:{client_id:$client_id,client_secret:$client_secret,cookie_secret:$cookie_secret}}')" + curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/logging/oauth2-proxy-logs-oidc" >/dev/null kubectl -n logging create secret generic oauth2-proxy-logs-oidc \ --from-literal=client_id="logs" \ --from-literal=client_secret="${CLIENT_SECRET}" \ --from-literal=cookie_secret="${COOKIE_SECRET}" \ --dry-run=client -o yaml | kubectl -n logging apply -f - >/dev/null - env: - - name: KEYCLOAK_ADMIN - valueFrom: - secretKeyRef: - name: keycloak-admin - key: username - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: password + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index 4d10aae..ec2d7a0 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -20,6 +20,16 @@ spec: volumes: - name: work emptyDir: {} + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 initContainers: - name: generate image: alpine:3.20 @@ -27,6 +37,7 @@ spec: args: - | set -euo pipefail + . /vault/scripts/keycloak_vault_env.sh umask 077 apk add --no-cache curl openssl jq >/dev/null @@ -68,20 +79,15 @@ spec: openssl rand -hex 32 | tr -d '\n' > /work/matrix_shared_secret openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:4096 -out /work/rsa_key >/dev/null 2>&1 chmod 0644 /work/* - env: - - name: KEYCLOAK_ADMIN - valueFrom: - secretKeyRef: - name: keycloak-admin - key: username - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: password volumeMounts: - name: work mountPath: /work + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true containers: - name: apply image: registry.bstein.dev/bstein/kubectl:1.35.0 @@ -89,19 +95,36 @@ spec: args: - | set -euo pipefail - if kubectl -n comms get secret mas-secrets-runtime >/dev/null 2>&1; then - kubectl -n comms get secret mas-secrets-runtime -o jsonpath='{.data.encryption}' | base64 -d 2>/dev/null > /tmp/encryption.current || true - current_len="$(wc -c < /tmp/encryption.current | tr -d ' ')" - if [ "${current_len}" = "64" ] && grep -Eq '^[0-9a-fA-F]{64}$' /tmp/encryption.current; then + apk add --no-cache curl jq >/dev/null + + vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" + vault_role="${VAULT_ROLE:-sso-secrets}" + jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" + vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" + if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 + fi + + existing="$(curl -sS -H "X-Vault-Token: ${vault_token}" \ + "${vault_addr}/v1/kv/data/atlas/comms/mas-secrets-runtime" | jq -r '.data.data.encryption // empty')" + if [ -n "${existing}" ]; then + current_len="$(printf '%s' "${existing}" | wc -c | tr -d ' ')" + if [ "${current_len}" = "64" ] && printf '%s' "${existing}" | grep -Eq '^[0-9a-fA-F]{64}$'; then exit 0 fi fi - kubectl -n comms create secret generic mas-secrets-runtime \ - --from-file=encryption=/work/encryption \ - --from-file=matrix_shared_secret=/work/matrix_shared_secret \ - --from-file=keycloak_client_secret=/work/keycloak_client_secret \ - --from-file=rsa_key=/work/rsa_key \ - --dry-run=client -o yaml | kubectl -n comms apply -f - >/dev/null + + payload="$(jq -nc \ + --arg encryption "$(cat /work/encryption)" \ + --arg matrix_shared_secret "$(cat /work/matrix_shared_secret)" \ + --arg keycloak_client_secret "$(cat /work/keycloak_client_secret)" \ + --arg rsa_key "$(cat /work/rsa_key)" \ + '{data:{encryption:$encryption, matrix_shared_secret:$matrix_shared_secret, keycloak_client_secret:$keycloak_client_secret, rsa_key:$rsa_key}}')" + curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/comms/mas-secrets-runtime" >/dev/null volumeMounts: - name: work mountPath: /work diff --git a/services/keycloak/portal-e2e-client-job.yaml b/services/keycloak/portal-e2e-client-job.yaml index 7f6c5dd..ea15178 100644 --- a/services/keycloak/portal-e2e-client-job.yaml +++ b/services/keycloak/portal-e2e-client-job.yaml @@ -9,6 +9,7 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: sso-vault containers: - name: configure image: python:3.11-alpine @@ -17,30 +18,11 @@ spec: value: http://keycloak.sso.svc.cluster.local - name: KEYCLOAK_REALM value: atlas - - name: KEYCLOAK_ADMIN_USER - valueFrom: - secretKeyRef: - name: keycloak-admin - key: username - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: password - - name: PORTAL_E2E_CLIENT_ID - valueFrom: - secretKeyRef: - name: portal-e2e-client - key: client_id - - name: PORTAL_E2E_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: portal-e2e-client - key: client_secret command: ["/bin/sh", "-c"] args: - | set -euo pipefail + . /vault/scripts/keycloak_vault_env.sh python - <<'PY' import json import os @@ -245,3 +227,21 @@ spec: if status not in (200, 204): raise SystemExit(f"Role mapping update failed (status={status}) resp={resp}") PY + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index 877dd55..817c526 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -9,6 +9,7 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: sso-vault containers: - name: test image: python:3.11-alpine @@ -17,16 +18,6 @@ spec: value: http://keycloak.sso.svc.cluster.local - name: KEYCLOAK_REALM value: atlas - - name: PORTAL_E2E_CLIENT_ID - valueFrom: - secretKeyRef: - name: portal-e2e-client - key: client_id - - name: PORTAL_E2E_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: portal-e2e-client - key: client_secret - name: E2E_PROBE_USERNAME value: e2e-smtp-probe - name: E2E_PROBE_EMAIL @@ -39,13 +30,30 @@ spec: args: - | set -euo pipefail + . /vault/scripts/keycloak_vault_env.sh python /scripts/test_keycloak_execute_actions_email.py volumeMounts: - name: tests mountPath: /scripts readOnly: true + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true volumes: - name: tests configMap: name: portal-e2e-tests defaultMode: 0555 + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 diff --git a/services/keycloak/portal-e2e-target-client-job.yaml b/services/keycloak/portal-e2e-target-client-job.yaml index 45b3980..63a3ea9 100644 --- a/services/keycloak/portal-e2e-target-client-job.yaml +++ b/services/keycloak/portal-e2e-target-client-job.yaml @@ -9,6 +9,7 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: sso-vault containers: - name: configure image: python:3.11-alpine @@ -17,22 +18,13 @@ spec: value: http://keycloak.sso.svc.cluster.local - name: KEYCLOAK_REALM value: atlas - - name: KEYCLOAK_ADMIN_USER - valueFrom: - secretKeyRef: - name: keycloak-admin - key: username - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: password - name: TARGET_CLIENT_ID value: bstein-dev-home command: ["/bin/sh", "-c"] args: - | set -euo pipefail + . /vault/scripts/keycloak_vault_env.sh python - <<'PY' import json import os @@ -136,3 +128,21 @@ spec: print(f"OK: ensured token exchange enabled on client {target_client_id}") PY + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 diff --git a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml index 104d6f0..c0ec397 100644 --- a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml @@ -9,6 +9,7 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: sso-vault containers: - name: configure image: python:3.11-alpine @@ -17,16 +18,6 @@ spec: value: http://keycloak.sso.svc.cluster.local - name: KEYCLOAK_REALM value: atlas - - name: KEYCLOAK_ADMIN_USER - valueFrom: - secretKeyRef: - name: keycloak-admin - key: username - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: password - name: PORTAL_E2E_CLIENT_ID value: test-portal-e2e - name: TARGET_CLIENT_ID @@ -35,6 +26,7 @@ spec: args: - | set -euo pipefail + . /vault/scripts/keycloak_vault_env.sh python - <<'PY' import json import os @@ -269,3 +261,21 @@ spec: print("OK: configured token exchange permissions for portal E2E client") PY + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 diff --git a/services/keycloak/portal-e2e-token-exchange-test-job.yaml b/services/keycloak/portal-e2e-token-exchange-test-job.yaml index ab43303..694a8ca 100644 --- a/services/keycloak/portal-e2e-token-exchange-test-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-test-job.yaml @@ -10,6 +10,7 @@ spec: template: spec: restartPolicy: Never + serviceAccountName: sso-vault containers: - name: test image: python:3.11-alpine @@ -26,27 +27,34 @@ spec: value: "300" - name: RETRY_INTERVAL_SECONDS value: "5" - - name: PORTAL_E2E_CLIENT_ID - valueFrom: - secretKeyRef: - name: portal-e2e-client - key: client_id - - name: PORTAL_E2E_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: portal-e2e-client - key: client_secret command: ["/bin/sh", "-c"] args: - | set -euo pipefail + . /vault/scripts/keycloak_vault_env.sh python /scripts/test_portal_token_exchange.py volumeMounts: - name: tests mountPath: /scripts readOnly: true + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true volumes: - name: tests configMap: name: portal-e2e-tests defaultMode: 0555 + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index bdc816d..0c5752f 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -19,6 +19,7 @@ spec: - key: node-role.kubernetes.io/worker operator: Exists restartPolicy: Never + serviceAccountName: sso-vault containers: - name: configure image: python:3.11-alpine @@ -27,16 +28,6 @@ spec: value: http://keycloak.sso.svc.cluster.local - name: KEYCLOAK_REALM value: atlas - - name: KEYCLOAK_ADMIN_USER - valueFrom: - secretKeyRef: - name: keycloak-admin - key: username - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: password - name: KEYCLOAK_SMTP_HOST value: mailu-front.mailu-mailserver.svc.cluster.local - name: KEYCLOAK_SMTP_PORT @@ -53,6 +44,7 @@ spec: args: - | set -euo pipefail + . /vault/scripts/keycloak_vault_env.sh python - <<'PY' import json import os @@ -444,3 +436,21 @@ spec: f"Unexpected execution update response for identity-provider-redirector: {status}" ) PY + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 diff --git a/services/keycloak/scripts/harbor_oidc_secret_ensure.sh b/services/keycloak/scripts/harbor_oidc_secret_ensure.sh index 4767ef0..f2dafc6 100755 --- a/services/keycloak/scripts/harbor_oidc_secret_ensure.sh +++ b/services/keycloak/scripts/harbor_oidc_secret_ensure.sh @@ -3,6 +3,8 @@ set -euo pipefail apk add --no-cache curl jq kubectl >/dev/null +. /vault/scripts/keycloak_vault_env.sh + KC_URL="http://keycloak.sso.svc.cluster.local" ACCESS_TOKEN="" for attempt in 1 2 3 4 5; do @@ -99,6 +101,17 @@ CONFIG_OVERWRITE_JSON="$(jq -nc \ --argjson oidc_logout true \ '{auth_mode:$auth_mode,oidc_name:$oidc_name,oidc_client_id:$oidc_client_id,oidc_client_secret:$oidc_client_secret,oidc_endpoint:$oidc_endpoint,oidc_scope:$oidc_scope,oidc_user_claim:$oidc_user_claim,oidc_groups_claim:$oidc_groups_claim,oidc_admin_group:$oidc_admin_group,oidc_auto_onboard:$oidc_auto_onboard,oidc_verify_cert:$oidc_verify_cert,oidc_logout:$oidc_logout}')" -kubectl -n harbor create secret generic harbor-oidc \ - --from-literal=CONFIG_OVERWRITE_JSON="${CONFIG_OVERWRITE_JSON}" \ - --dry-run=client -o yaml | kubectl -n harbor apply -f - >/dev/null +vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" +vault_role="${VAULT_ROLE:-sso-secrets}" +jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" +login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" +vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" +if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 +fi + +payload="$(jq -nc --arg value "${CONFIG_OVERWRITE_JSON}" '{data:{CONFIG_OVERWRITE_JSON:$value}}')" +curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/harbor/harbor-oidc" >/dev/null diff --git a/services/keycloak/scripts/keycloak_vault_env.sh b/services/keycloak/scripts/keycloak_vault_env.sh index a9cfdae..62f7f38 100644 --- a/services/keycloak/scripts/keycloak_vault_env.sh +++ b/services/keycloak/scripts/keycloak_vault_env.sh @@ -23,3 +23,4 @@ export PORTAL_E2E_CLIENT_SECRET="$(read_secret portal-e2e-client__client_secret) export LDAP_ADMIN_PASSWORD="$(read_secret openldap-admin__LDAP_ADMIN_PASSWORD)" export LDAP_CONFIG_PASSWORD="$(read_secret openldap-admin__LDAP_CONFIG_PASSWORD)" +export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" diff --git a/services/keycloak/scripts/vault_oidc_secret_ensure.sh b/services/keycloak/scripts/vault_oidc_secret_ensure.sh index f7b3261..680057f 100755 --- a/services/keycloak/scripts/vault_oidc_secret_ensure.sh +++ b/services/keycloak/scripts/vault_oidc_secret_ensure.sh @@ -3,6 +3,8 @@ set -euo pipefail apk add --no-cache curl jq kubectl >/dev/null +. /vault/scripts/keycloak_vault_env.sh + KC_URL="http://keycloak.sso.svc.cluster.local" ACCESS_TOKEN="" for attempt in 1 2 3 4 5; do @@ -84,6 +86,37 @@ if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then exit 1 fi +vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" +vault_role="${VAULT_ROLE:-sso-secrets}" +jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" +login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" +vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" +if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 +fi + +payload="$(jq -nc \ + --arg discovery_url "https://sso.bstein.dev/realms/atlas" \ + --arg client_id "vault-oidc" \ + --arg client_secret "${CLIENT_SECRET}" \ + --arg default_role "admin" \ + --arg scopes "openid profile email groups" \ + --arg user_claim "preferred_username" \ + --arg groups_claim "groups" \ + --arg redirect_uris "https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback,http://localhost:8250/oidc/callback" \ + --arg bound_audiences "vault-oidc" \ + --arg admin_group "admin" \ + --arg admin_policies "default,vault-admin" \ + --arg dev_group "dev" \ + --arg dev_policies "default,dev-kv" \ + --arg user_group "dev" \ + --arg user_policies "default,dev-kv" \ + '{data:{discovery_url:$discovery_url,client_id:$client_id,client_secret:$client_secret,default_role:$default_role,scopes:$scopes,user_claim:$user_claim,groups_claim:$groups_claim,redirect_uris:$redirect_uris,bound_audiences:$bound_audiences,admin_group:$admin_group,admin_policies:$admin_policies,dev_group:$dev_group,dev_policies:$dev_policies,user_group:$user_group,user_policies:$user_policies}}')" +curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/vault/vault-oidc-config" >/dev/null + kubectl -n vault create secret generic vault-oidc-config \ --from-literal=discovery_url="https://sso.bstein.dev/realms/atlas" \ --from-literal=client_id="vault-oidc" \ diff --git a/services/keycloak/synapse-oidc-secret-ensure-job.yaml b/services/keycloak/synapse-oidc-secret-ensure-job.yaml index 7486ced..38e6753 100644 --- a/services/keycloak/synapse-oidc-secret-ensure-job.yaml +++ b/services/keycloak/synapse-oidc-secret-ensure-job.yaml @@ -18,7 +18,8 @@ spec: args: - | set -euo pipefail - apk add --no-cache curl jq kubectl >/dev/null + . /vault/scripts/keycloak_vault_env.sh + apk add --no-cache curl jq >/dev/null KC_URL="http://keycloak.sso.svc.cluster.local" ACCESS_TOKEN="" @@ -54,22 +55,35 @@ spec: exit 1 fi - existing="$(kubectl -n comms get secret synapse-oidc -o jsonpath='{.data.client-secret}' 2>/dev/null || true)" - if [ -n "${existing}" ]; then - exit 0 + vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" + vault_role="${VAULT_ROLE:-sso-secrets}" + jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" + vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" + if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 fi - kubectl -n comms create secret generic synapse-oidc \ - --from-literal=client-secret="${CLIENT_SECRET}" \ - --dry-run=client -o yaml | kubectl -n comms apply -f - >/dev/null - env: - - name: KEYCLOAK_ADMIN - valueFrom: - secretKeyRef: - name: keycloak-admin - key: username - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: password + payload="$(jq -nc --arg value "${CLIENT_SECRET}" '{data:{"client-secret":$value}}')" + curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/comms/synapse-oidc" >/dev/null + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 diff --git a/services/keycloak/user-overrides-job.yaml b/services/keycloak/user-overrides-job.yaml index 43813ee..2f580a9 100644 --- a/services/keycloak/user-overrides-job.yaml +++ b/services/keycloak/user-overrides-job.yaml @@ -19,6 +19,7 @@ spec: - key: node-role.kubernetes.io/worker operator: Exists restartPolicy: Never + serviceAccountName: sso-vault containers: - name: configure image: python:3.11-alpine @@ -27,16 +28,6 @@ spec: value: http://keycloak.sso.svc.cluster.local - name: KEYCLOAK_REALM value: atlas - - name: KEYCLOAK_ADMIN_USER - valueFrom: - secretKeyRef: - name: keycloak-admin - key: username - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: password - name: OVERRIDE_USERNAME value: bstein - name: OVERRIDE_MAILU_EMAIL @@ -45,6 +36,7 @@ spec: args: - | set -euo pipefail + . /vault/scripts/keycloak_vault_env.sh python - <<'PY' import json import os @@ -143,3 +135,21 @@ spec: if status not in (200, 204): raise SystemExit(f"Unexpected user update response: {status}") PY + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 diff --git a/services/keycloak/vault-oidc-secret-ensure-job.yaml b/services/keycloak/vault-oidc-secret-ensure-job.yaml index ce3a1f0..2a8c382 100644 --- a/services/keycloak/vault-oidc-secret-ensure-job.yaml +++ b/services/keycloak/vault-oidc-secret-ensure-job.yaml @@ -16,6 +16,16 @@ spec: configMap: name: vault-oidc-secret-ensure-script defaultMode: 0555 + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault + - name: vault-scripts + configMap: + name: sso-vault-env + defaultMode: 0555 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -30,18 +40,13 @@ spec: - name: apply image: alpine:3.20 command: ["/scripts/vault_oidc_secret_ensure.sh"] - env: - - name: KEYCLOAK_ADMIN - valueFrom: - secretKeyRef: - name: keycloak-admin - key: username - - name: KEYCLOAK_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: keycloak-admin - key: password volumeMounts: - name: vault-oidc-secret-ensure-script mountPath: /scripts readOnly: true + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true diff --git a/services/mailu/kustomization.yaml b/services/mailu/kustomization.yaml index af4b2b1..31b1cb9 100644 --- a/services/mailu/kustomization.yaml +++ b/services/mailu/kustomization.yaml @@ -4,7 +4,10 @@ kind: Kustomization namespace: mailu-mailserver resources: - namespace.yaml + - serviceaccount.yaml + - secretproviderclass.yaml - helmrelease.yaml + - vault-sync-deployment.yaml - certificate.yaml - vip-controller.yaml - unbound-configmap.yaml @@ -16,6 +19,12 @@ resources: - front-lb.yaml configMapGenerator: + - name: mailu-vault-env + namespace: mailu-mailserver + files: + - mailu_vault_env.sh=scripts/mailu_vault_env.sh + options: + disableNameSuffixHash: true - name: mailu-sync-script namespace: mailu-mailserver files: diff --git a/services/mailu/mailu-sync-cronjob.yaml b/services/mailu/mailu-sync-cronjob.yaml index 268680f..4d73afa 100644 --- a/services/mailu/mailu-sync-cronjob.yaml +++ b/services/mailu/mailu-sync-cronjob.yaml @@ -12,6 +12,7 @@ spec: template: spec: restartPolicy: OnFailure + serviceAccountName: mailu-vault-sync containers: - name: mailu-sync image: python:3.11-alpine @@ -19,8 +20,10 @@ spec: command: ["/bin/sh", "-c"] args: - | + set -euo pipefail + . /vault/scripts/mailu_vault_env.sh pip install --no-cache-dir requests psycopg2-binary passlib >/tmp/pip.log \ - && python /app/sync.py + && python /app/sync.py env: - name: KEYCLOAK_BASE_URL value: http://keycloak.sso.svc.cluster.local @@ -34,35 +37,16 @@ spec: value: postgres-service.postgres.svc.cluster.local - name: MAILU_DB_PORT value: "5432" - - name: MAILU_DB_NAME - valueFrom: - secretKeyRef: - name: mailu-db-secret - key: database - - name: MAILU_DB_USER - valueFrom: - secretKeyRef: - name: mailu-db-secret - key: username - - name: MAILU_DB_PASSWORD - valueFrom: - secretKeyRef: - name: mailu-db-secret - key: password - - name: KEYCLOAK_CLIENT_ID - valueFrom: - secretKeyRef: - name: mailu-sync-credentials - key: client-id - - name: KEYCLOAK_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: mailu-sync-credentials - key: client-secret volumeMounts: - name: sync-script mountPath: /app/sync.py subPath: sync.py + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true resources: requests: cpu: 50m @@ -75,3 +59,13 @@ spec: configMap: name: mailu-sync-script defaultMode: 0444 + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: mailu-vault + - name: vault-scripts + configMap: + name: mailu-vault-env + defaultMode: 0555 diff --git a/services/mailu/mailu-sync-job.yaml b/services/mailu/mailu-sync-job.yaml index 7230c1d..60d48cb 100644 --- a/services/mailu/mailu-sync-job.yaml +++ b/services/mailu/mailu-sync-job.yaml @@ -8,6 +8,7 @@ spec: template: spec: restartPolicy: OnFailure + serviceAccountName: mailu-vault-sync containers: - name: mailu-sync image: python:3.11-alpine @@ -15,8 +16,10 @@ spec: command: ["/bin/sh", "-c"] args: - | + set -euo pipefail + . /vault/scripts/mailu_vault_env.sh pip install --no-cache-dir requests psycopg2-binary passlib >/tmp/pip.log \ - && python /app/sync.py + && python /app/sync.py env: - name: KEYCLOAK_BASE_URL value: http://keycloak.sso.svc.cluster.local @@ -30,35 +33,16 @@ spec: value: postgres-service.postgres.svc.cluster.local - name: MAILU_DB_PORT value: "5432" - - name: MAILU_DB_NAME - valueFrom: - secretKeyRef: - name: mailu-db-secret - key: database - - name: MAILU_DB_USER - valueFrom: - secretKeyRef: - name: mailu-db-secret - key: username - - name: MAILU_DB_PASSWORD - valueFrom: - secretKeyRef: - name: mailu-db-secret - key: password - - name: KEYCLOAK_CLIENT_ID - valueFrom: - secretKeyRef: - name: mailu-sync-credentials - key: client-id - - name: KEYCLOAK_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: mailu-sync-credentials - key: client-secret volumeMounts: - name: sync-script mountPath: /app/sync.py subPath: sync.py + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true resources: requests: cpu: 50m @@ -71,3 +55,13 @@ spec: configMap: name: mailu-sync-script defaultMode: 0444 + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: mailu-vault + - name: vault-scripts + configMap: + name: mailu-vault-env + defaultMode: 0555 diff --git a/services/mailu/mailu-sync-listener.yaml b/services/mailu/mailu-sync-listener.yaml index 2127313..f90164c 100644 --- a/services/mailu/mailu-sync-listener.yaml +++ b/services/mailu/mailu-sync-listener.yaml @@ -30,6 +30,7 @@ spec: app: mailu-sync-listener spec: restartPolicy: Always + serviceAccountName: mailu-vault-sync containers: - name: listener image: python:3.11-alpine @@ -37,8 +38,10 @@ spec: command: ["/bin/sh", "-c"] args: - | + set -euo pipefail + . /vault/scripts/mailu_vault_env.sh pip install --no-cache-dir requests psycopg2-binary passlib >/tmp/pip.log \ - && python /app/listener.py + && python /app/listener.py env: - name: KEYCLOAK_BASE_URL value: http://keycloak.sso.svc.cluster.local @@ -52,31 +55,6 @@ spec: value: postgres-service.postgres.svc.cluster.local - name: MAILU_DB_PORT value: "5432" - - name: MAILU_DB_NAME - valueFrom: - secretKeyRef: - name: mailu-db-secret - key: database - - name: MAILU_DB_USER - valueFrom: - secretKeyRef: - name: mailu-db-secret - key: username - - name: MAILU_DB_PASSWORD - valueFrom: - secretKeyRef: - name: mailu-db-secret - key: password - - name: KEYCLOAK_CLIENT_ID - valueFrom: - secretKeyRef: - name: mailu-sync-credentials - key: client-id - - name: KEYCLOAK_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: mailu-sync-credentials - key: client-secret volumeMounts: - name: sync-script mountPath: /app/sync.py @@ -84,6 +62,12 @@ spec: - name: listener-script mountPath: /app/listener.py subPath: listener.py + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true resources: requests: cpu: 50m @@ -100,3 +84,13 @@ spec: configMap: name: mailu-sync-listener defaultMode: 0444 + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: mailu-vault + - name: vault-scripts + configMap: + name: mailu-vault-env + defaultMode: 0555 diff --git a/services/mailu/scripts/mailu_vault_env.sh b/services/mailu/scripts/mailu_vault_env.sh new file mode 100644 index 0000000..082a51a --- /dev/null +++ b/services/mailu/scripts/mailu_vault_env.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env sh +set -eu + +vault_dir="/vault/secrets" + +read_secret() { + cat "${vault_dir}/$1" +} + +export MAILU_DB_NAME="$(read_secret mailu-db-secret__database)" +export MAILU_DB_USER="$(read_secret mailu-db-secret__username)" +export MAILU_DB_PASSWORD="$(read_secret mailu-db-secret__password)" +export KEYCLOAK_CLIENT_ID="$(read_secret mailu-sync-credentials__client-id)" +export KEYCLOAK_CLIENT_SECRET="$(read_secret mailu-sync-credentials__client-secret)" diff --git a/services/mailu/secretproviderclass.yaml b/services/mailu/secretproviderclass.yaml new file mode 100644 index 0000000..0ed32ba --- /dev/null +++ b/services/mailu/secretproviderclass.yaml @@ -0,0 +1,78 @@ +# services/mailu/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: mailu-vault + namespace: mailu-mailserver +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "mailu-mailserver" + objects: | + - objectName: "mailu-secret__secret-key" + secretPath: "kv/data/atlas/mailu/mailu-secret" + secretKey: "secret-key" + - objectName: "postmark-relay__relay-username" + secretPath: "kv/data/atlas/shared/postmark-relay" + secretKey: "relay-username" + - objectName: "postmark-relay__relay-password" + secretPath: "kv/data/atlas/shared/postmark-relay" + secretKey: "relay-password" + - objectName: "mailu-db-secret__database" + secretPath: "kv/data/atlas/mailu/mailu-db-secret" + secretKey: "database" + - objectName: "mailu-db-secret__username" + secretPath: "kv/data/atlas/mailu/mailu-db-secret" + secretKey: "username" + - objectName: "mailu-db-secret__password" + secretPath: "kv/data/atlas/mailu/mailu-db-secret" + secretKey: "password" + - objectName: "mailu-db-secret__url" + secretPath: "kv/data/atlas/mailu/mailu-db-secret" + secretKey: "url" + - objectName: "mailu-initial-account-secret__password" + secretPath: "kv/data/atlas/mailu/mailu-initial-account-secret" + secretKey: "password" + - objectName: "mailu-sync-credentials__client-id" + secretPath: "kv/data/atlas/mailu/mailu-sync-credentials" + secretKey: "client-id" + - objectName: "mailu-sync-credentials__client-secret" + secretPath: "kv/data/atlas/mailu/mailu-sync-credentials" + secretKey: "client-secret" + secretObjects: + - secretName: mailu-secret + type: Opaque + data: + - objectName: mailu-secret__secret-key + key: secret-key + - secretName: mailu-postmark-relay + type: Opaque + data: + - objectName: postmark-relay__relay-username + key: relay-username + - objectName: postmark-relay__relay-password + key: relay-password + - secretName: mailu-db-secret + type: Opaque + data: + - objectName: mailu-db-secret__database + key: database + - objectName: mailu-db-secret__username + key: username + - objectName: mailu-db-secret__password + key: password + - objectName: mailu-db-secret__url + key: url + - secretName: mailu-initial-account-secret + type: Opaque + data: + - objectName: mailu-initial-account-secret__password + key: password + - secretName: mailu-sync-credentials + type: Opaque + data: + - objectName: mailu-sync-credentials__client-id + key: client-id + - objectName: mailu-sync-credentials__client-secret + key: client-secret diff --git a/services/mailu/serviceaccount.yaml b/services/mailu/serviceaccount.yaml new file mode 100644 index 0000000..d95410b --- /dev/null +++ b/services/mailu/serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/mailu/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mailu-vault-sync + namespace: mailu-mailserver diff --git a/services/mailu/vault-sync-deployment.yaml b/services/mailu/vault-sync-deployment.yaml new file mode 100644 index 0000000..966f22b --- /dev/null +++ b/services/mailu/vault-sync-deployment.yaml @@ -0,0 +1,34 @@ +# services/mailu/vault-sync-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mailu-vault-sync + namespace: mailu-mailserver +spec: + replicas: 1 + selector: + matchLabels: + app: mailu-vault-sync + template: + metadata: + labels: + app: mailu-vault-sync + spec: + serviceAccountName: mailu-vault-sync + containers: + - name: sync + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - "sleep infinity" + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: mailu-vault diff --git a/services/nextcloud-mail-sync/cronjob.yaml b/services/nextcloud-mail-sync/cronjob.yaml index 9976d8e..129022b 100644 --- a/services/nextcloud-mail-sync/cronjob.yaml +++ b/services/nextcloud-mail-sync/cronjob.yaml @@ -17,47 +17,23 @@ spec: securityContext: runAsUser: 0 runAsGroup: 0 + serviceAccountName: nextcloud-vault containers: - name: mail-sync image: nextcloud:29-apache imagePullPolicy: IfNotPresent command: - - /bin/bash - - /sync/sync.sh + - /bin/sh + - -c env: - name: KC_BASE value: https://sso.bstein.dev - name: KC_REALM value: atlas - - name: KC_ADMIN_USER - valueFrom: - secretKeyRef: - name: nextcloud-keycloak-admin - key: username - - name: KC_ADMIN_PASS - valueFrom: - secretKeyRef: - name: nextcloud-keycloak-admin - key: password - name: MAILU_DOMAIN value: bstein.dev - name: POSTGRES_HOST value: postgres-service.postgres.svc.cluster.local - - name: POSTGRES_DB - valueFrom: - secretKeyRef: - name: nextcloud-db - key: database - - name: POSTGRES_USER - valueFrom: - secretKeyRef: - name: nextcloud-db - key: db-username - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: nextcloud-db - key: db-password resources: requests: cpu: 100m @@ -77,6 +53,17 @@ spec: - name: sync-script mountPath: /sync/sync.sh subPath: sync.sh + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true + args: + - | + set -euo pipefail + . /vault/scripts/nextcloud_vault_env.sh + exec /sync/sync.sh volumes: - name: nextcloud-config-pvc persistentVolumeClaim: @@ -94,3 +81,13 @@ spec: configMap: name: nextcloud-mail-sync-script defaultMode: 0755 + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: nextcloud-vault + - name: vault-scripts + configMap: + name: nextcloud-vault-env + defaultMode: 0555 diff --git a/services/nextcloud/deployment.yaml b/services/nextcloud/deployment.yaml index 295435e..894484c 100644 --- a/services/nextcloud/deployment.yaml +++ b/services/nextcloud/deployment.yaml @@ -22,6 +22,7 @@ spec: fsGroup: 33 runAsUser: 33 runAsGroup: 33 + serviceAccountName: nextcloud-vault initContainers: - name: seed-nextcloud-web image: nextcloud:29-apache @@ -80,6 +81,7 @@ spec: command: ["/bin/sh", "-c"] args: - | + . /vault/scripts/nextcloud_vault_env.sh installed="$(su -s /bin/sh www-data -c "php /var/www/html/occ status" 2>/dev/null | awk '/installed:/{print $3}' || true)" if [ ! -s /var/www/html/config/config.php ]; then su -s /bin/sh www-data -c "php /var/www/html/occ maintenance:install --database pgsql --database-host \"${POSTGRES_HOST}\" --database-name \"${POSTGRES_DB}\" --database-user \"${POSTGRES_USER}\" --database-pass \"${POSTGRES_PASSWORD}\" --admin-user \"${NEXTCLOUD_ADMIN_USER}\" --admin-pass \"${NEXTCLOUD_ADMIN_PASSWORD}\" --data-dir /var/www/html/data" @@ -150,41 +152,6 @@ spec: env: - name: POSTGRES_HOST value: postgres-service.postgres.svc.cluster.local - - name: POSTGRES_DB - valueFrom: - secretKeyRef: - name: nextcloud-db - key: database - - name: POSTGRES_USER - valueFrom: - secretKeyRef: - name: nextcloud-db - key: db-username - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: nextcloud-db - key: db-password - - name: NEXTCLOUD_ADMIN_USER - valueFrom: - secretKeyRef: - name: nextcloud-admin - key: admin-user - - name: NEXTCLOUD_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: nextcloud-admin - key: admin-password - - name: OIDC_CLIENT_ID - valueFrom: - secretKeyRef: - name: nextcloud-oidc - key: client-id - - name: OIDC_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: nextcloud-oidc - key: client-secret volumeMounts: - name: nextcloud-web mountPath: /var/www/html @@ -197,40 +164,26 @@ spec: - name: nextcloud-config-extra mountPath: /var/www/html/config/extra.config.php subPath: extra.config.php + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true containers: - name: nextcloud image: nextcloud:29-apache imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: + - >- + . /vault/scripts/nextcloud_vault_env.sh + && exec /entrypoint.sh apache2-foreground env: # DB (external secret required: nextcloud-db with keys username,password,database) - name: POSTGRES_HOST value: postgres-service.postgres.svc.cluster.local - - name: POSTGRES_DB - valueFrom: - secretKeyRef: - name: nextcloud-db - key: database - - name: POSTGRES_USER - valueFrom: - secretKeyRef: - name: nextcloud-db - key: db-username - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: nextcloud-db - key: db-password # Admin bootstrap (external secret: nextcloud-admin with keys admin-user, admin-password) - - name: NEXTCLOUD_ADMIN_USER - valueFrom: - secretKeyRef: - name: nextcloud-admin - key: admin-user - - name: NEXTCLOUD_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: nextcloud-admin - key: admin-password - name: NEXTCLOUD_TRUSTED_DOMAINS value: cloud.bstein.dev - name: OVERWRITEHOST @@ -246,31 +199,11 @@ spec: value: "587" - name: SMTP_SECURE value: tls - - name: SMTP_NAME - valueFrom: - secretKeyRef: - name: nextcloud-smtp - key: smtp-username - - name: SMTP_PASSWORD - valueFrom: - secretKeyRef: - name: nextcloud-smtp - key: smtp-password - name: MAIL_FROM_ADDRESS value: no-reply - name: MAIL_DOMAIN value: bstein.dev # OIDC (external secret: nextcloud-oidc with keys client-id, client-secret) - - name: OIDC_CLIENT_ID - valueFrom: - secretKeyRef: - name: nextcloud-oidc - key: client-id - - name: OIDC_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: nextcloud-oidc - key: client-secret - name: NEXTCLOUD_UPDATE value: "1" - name: APP_INSTALL @@ -290,6 +223,12 @@ spec: - name: nextcloud-config-extra mountPath: /var/www/html/config/extra.config.php subPath: extra.config.php + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true resources: requests: cpu: 250m @@ -314,3 +253,13 @@ spec: configMap: name: nextcloud-config defaultMode: 0444 + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: nextcloud-vault + - name: vault-scripts + configMap: + name: nextcloud-vault-env + defaultMode: 0555 diff --git a/services/nextcloud/kustomization.yaml b/services/nextcloud/kustomization.yaml index 14e0ec1..f16db47 100644 --- a/services/nextcloud/kustomization.yaml +++ b/services/nextcloud/kustomization.yaml @@ -4,6 +4,8 @@ kind: Kustomization namespace: nextcloud resources: - namespace.yaml + - serviceaccount.yaml + - secretproviderclass.yaml - configmap.yaml - pvc.yaml - deployment.yaml @@ -13,6 +15,11 @@ resources: - service.yaml - ingress.yaml configMapGenerator: + - name: nextcloud-vault-env + files: + - nextcloud_vault_env.sh=scripts/nextcloud_vault_env.sh + options: + disableNameSuffixHash: true - name: nextcloud-maintenance-script files: - maintenance.sh=scripts/nextcloud-maintenance.sh diff --git a/services/nextcloud/maintenance-cronjob.yaml b/services/nextcloud/maintenance-cronjob.yaml index 618f548..d76478e 100644 --- a/services/nextcloud/maintenance-cronjob.yaml +++ b/services/nextcloud/maintenance-cronjob.yaml @@ -15,24 +15,20 @@ spec: securityContext: runAsUser: 0 runAsGroup: 0 + serviceAccountName: nextcloud-vault containers: - name: maintenance image: nextcloud:29-apache imagePullPolicy: IfNotPresent - command: ["/bin/bash", "/maintenance/maintenance.sh"] + command: ["/bin/sh", "-c"] + args: + - | + set -euo pipefail + . /vault/scripts/nextcloud_vault_env.sh + exec /maintenance/maintenance.sh env: - name: NC_URL value: https://cloud.bstein.dev - - name: ADMIN_USER - valueFrom: - secretKeyRef: - name: nextcloud-admin - key: admin-user - - name: ADMIN_PASS - valueFrom: - secretKeyRef: - name: nextcloud-admin - key: admin-password volumeMounts: - name: nextcloud-web mountPath: /var/www/html @@ -45,6 +41,12 @@ spec: - name: maintenance-script mountPath: /maintenance/maintenance.sh subPath: maintenance.sh + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + - name: vault-scripts + mountPath: /vault/scripts + readOnly: true resources: requests: cpu: 100m @@ -69,3 +71,13 @@ spec: configMap: name: nextcloud-maintenance-script defaultMode: 0755 + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: nextcloud-vault + - name: vault-scripts + configMap: + name: nextcloud-vault-env + defaultMode: 0555 diff --git a/services/nextcloud/scripts/nextcloud_vault_env.sh b/services/nextcloud/scripts/nextcloud_vault_env.sh new file mode 100644 index 0000000..0f34c9f --- /dev/null +++ b/services/nextcloud/scripts/nextcloud_vault_env.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env sh +set -eu + +vault_dir="/vault/secrets" + +read_secret() { + cat "${vault_dir}/$1" +} + +export POSTGRES_DB="$(read_secret nextcloud-db__database)" +export POSTGRES_USER="$(read_secret nextcloud-db__db-username)" +export POSTGRES_PASSWORD="$(read_secret nextcloud-db__db-password)" + +export NEXTCLOUD_ADMIN_USER="$(read_secret nextcloud-admin__admin-user)" +export NEXTCLOUD_ADMIN_PASSWORD="$(read_secret nextcloud-admin__admin-password)" + +export ADMIN_USER="${NEXTCLOUD_ADMIN_USER}" +export ADMIN_PASS="${NEXTCLOUD_ADMIN_PASSWORD}" + +export OIDC_CLIENT_ID="$(read_secret nextcloud-oidc__client-id)" +export OIDC_CLIENT_SECRET="$(read_secret nextcloud-oidc__client-secret)" + +export SMTP_NAME="$(read_secret nextcloud-smtp__smtp-username)" +export SMTP_PASSWORD="$(read_secret nextcloud-smtp__smtp-password)" + +export KC_ADMIN_USER="$(read_secret keycloak-admin__username)" +export KC_ADMIN_PASS="$(read_secret keycloak-admin__password)" diff --git a/services/nextcloud/secretproviderclass.yaml b/services/nextcloud/secretproviderclass.yaml new file mode 100644 index 0000000..b5e6c37 --- /dev/null +++ b/services/nextcloud/secretproviderclass.yaml @@ -0,0 +1,45 @@ +# services/nextcloud/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: nextcloud-vault + namespace: nextcloud +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "nextcloud" + objects: | + - objectName: "nextcloud-db__database" + secretPath: "kv/data/atlas/nextcloud/nextcloud-db" + secretKey: "database" + - objectName: "nextcloud-db__db-username" + secretPath: "kv/data/atlas/nextcloud/nextcloud-db" + secretKey: "db-username" + - objectName: "nextcloud-db__db-password" + secretPath: "kv/data/atlas/nextcloud/nextcloud-db" + secretKey: "db-password" + - objectName: "nextcloud-admin__admin-user" + secretPath: "kv/data/atlas/nextcloud/nextcloud-admin" + secretKey: "admin-user" + - objectName: "nextcloud-admin__admin-password" + secretPath: "kv/data/atlas/nextcloud/nextcloud-admin" + secretKey: "admin-password" + - objectName: "nextcloud-oidc__client-id" + secretPath: "kv/data/atlas/nextcloud/nextcloud-oidc" + secretKey: "client-id" + - objectName: "nextcloud-oidc__client-secret" + secretPath: "kv/data/atlas/nextcloud/nextcloud-oidc" + secretKey: "client-secret" + - objectName: "nextcloud-smtp__smtp-username" + secretPath: "kv/data/atlas/nextcloud/nextcloud-smtp" + secretKey: "smtp-username" + - objectName: "nextcloud-smtp__smtp-password" + secretPath: "kv/data/atlas/nextcloud/nextcloud-smtp" + secretKey: "smtp-password" + - objectName: "keycloak-admin__username" + secretPath: "kv/data/atlas/shared/keycloak-admin" + secretKey: "username" + - objectName: "keycloak-admin__password" + secretPath: "kv/data/atlas/shared/keycloak-admin" + secretKey: "password" diff --git a/services/nextcloud/serviceaccount.yaml b/services/nextcloud/serviceaccount.yaml new file mode 100644 index 0000000..c97cd5b --- /dev/null +++ b/services/nextcloud/serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/nextcloud/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nextcloud-vault + namespace: nextcloud diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index fdffbea..39577ba 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -35,66 +35,71 @@ vault write auth/kubernetes/config \ kubernetes_host="${k8s_host}" \ kubernetes_ca_cert="${k8s_ca}" -for namespace in outline planka bstein-dev-home gitea vaultwarden sso; do - policy_name="${namespace}" - service_account="" - shared_paths="" +write_policy_and_role() { + role="$1" + namespace="$2" + service_accounts="$3" + read_paths="$4" + write_paths="$5" - case "${namespace}" in - outline) - service_account="outline-vault" - ;; - planka) - service_account="planka-vault" - ;; - bstein-dev-home) - service_account="bstein-dev-home" - shared_paths="shared/chat-ai-keys-runtime shared/portal-e2e-client" - ;; - gitea) - service_account="gitea-vault" - ;; - vaultwarden) - service_account="vaultwarden-vault" - ;; - sso) - service_account="sso-vault,mas-secrets-ensure" - shared_paths="shared/keycloak-admin shared/portal-e2e-client" - ;; - *) - log "unknown namespace ${namespace}" - exit 1 - ;; - esac - - policy_body="$(cat < Date: Wed, 14 Jan 2026 05:11:27 -0300 Subject: [PATCH 037/270] mailu: bump sync job name --- services/mailu/mailu-sync-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/mailu/mailu-sync-job.yaml b/services/mailu/mailu-sync-job.yaml index 60d48cb..370f212 100644 --- a/services/mailu/mailu-sync-job.yaml +++ b/services/mailu/mailu-sync-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mailu-sync + name: mailu-sync-2 namespace: mailu-mailserver spec: template: From 35369d53d8e1d5c9bbfacd8de1bbd0c58aea0a94 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 05:32:07 -0300 Subject: [PATCH 038/270] jobs: bump names for immutability --- services/comms/bstein-force-leave-job.yaml | 2 +- services/comms/comms-secrets-ensure-job.yaml | 2 +- services/comms/mas-admin-client-secret-ensure-job.yaml | 2 +- services/comms/mas-db-ensure-job.yaml | 2 +- services/comms/mas-local-users-ensure-job.yaml | 2 +- services/comms/othrys-kick-numeric-job.yaml | 2 +- services/comms/synapse-seeder-admin-ensure-job.yaml | 2 +- services/comms/synapse-signingkey-ensure-job.yaml | 2 +- services/comms/synapse-user-seed-job.yaml | 2 +- services/keycloak/harbor-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/ldap-federation-job.yaml | 2 +- services/keycloak/logs-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/mas-secrets-ensure-job.yaml | 2 +- services/keycloak/portal-e2e-client-job.yaml | 2 +- .../keycloak/portal-e2e-execute-actions-email-test-job.yaml | 2 +- services/keycloak/portal-e2e-target-client-job.yaml | 2 +- .../keycloak/portal-e2e-token-exchange-permissions-job.yaml | 2 +- services/keycloak/portal-e2e-token-exchange-test-job.yaml | 2 +- services/keycloak/realm-settings-job.yaml | 2 +- services/keycloak/synapse-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/user-overrides-job.yaml | 2 +- services/keycloak/vault-oidc-secret-ensure-job.yaml | 2 +- 22 files changed, 22 insertions(+), 22 deletions(-) diff --git a/services/comms/bstein-force-leave-job.yaml b/services/comms/bstein-force-leave-job.yaml index 0c760a4..42428d8 100644 --- a/services/comms/bstein-force-leave-job.yaml +++ b/services/comms/bstein-force-leave-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: bstein-leave-rooms-6 + name: bstein-leave-rooms-7 namespace: comms spec: backoffLimit: 0 diff --git a/services/comms/comms-secrets-ensure-job.yaml b/services/comms/comms-secrets-ensure-job.yaml index cc8ee02..674f1fd 100644 --- a/services/comms/comms-secrets-ensure-job.yaml +++ b/services/comms/comms-secrets-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: comms-secrets-ensure-2 + name: comms-secrets-ensure-3 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/mas-admin-client-secret-ensure-job.yaml b/services/comms/mas-admin-client-secret-ensure-job.yaml index a84f68e..5d47c7d 100644 --- a/services/comms/mas-admin-client-secret-ensure-job.yaml +++ b/services/comms/mas-admin-client-secret-ensure-job.yaml @@ -36,7 +36,7 @@ subjects: apiVersion: batch/v1 kind: Job metadata: - name: mas-admin-client-secret-ensure-8 + name: mas-admin-client-secret-ensure-9 namespace: comms spec: backoffLimit: 2 diff --git a/services/comms/mas-db-ensure-job.yaml b/services/comms/mas-db-ensure-job.yaml index 28e7825..b843646 100644 --- a/services/comms/mas-db-ensure-job.yaml +++ b/services/comms/mas-db-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-db-ensure-17 + name: mas-db-ensure-18 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index b81b94d..ab44505 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-local-users-ensure-6 + name: mas-local-users-ensure-7 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/othrys-kick-numeric-job.yaml b/services/comms/othrys-kick-numeric-job.yaml index df96b9e..59ef560 100644 --- a/services/comms/othrys-kick-numeric-job.yaml +++ b/services/comms/othrys-kick-numeric-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-kick-numeric-1 + name: othrys-kick-numeric-2 namespace: comms spec: backoffLimit: 0 diff --git a/services/comms/synapse-seeder-admin-ensure-job.yaml b/services/comms/synapse-seeder-admin-ensure-job.yaml index dbe5609..3cccc5f 100644 --- a/services/comms/synapse-seeder-admin-ensure-job.yaml +++ b/services/comms/synapse-seeder-admin-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-seeder-admin-ensure-2 + name: synapse-seeder-admin-ensure-3 namespace: comms spec: backoffLimit: 2 diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml index ca83f52..9439f22 100644 --- a/services/comms/synapse-signingkey-ensure-job.yaml +++ b/services/comms/synapse-signingkey-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-synapse-signingkey-ensure-4 + name: othrys-synapse-signingkey-ensure-5 namespace: comms spec: backoffLimit: 2 diff --git a/services/comms/synapse-user-seed-job.yaml b/services/comms/synapse-user-seed-job.yaml index 2285dad..f895958 100644 --- a/services/comms/synapse-user-seed-job.yaml +++ b/services/comms/synapse-user-seed-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-user-seed-2 + name: synapse-user-seed-3 namespace: comms spec: backoffLimit: 1 diff --git a/services/keycloak/harbor-oidc-secret-ensure-job.yaml b/services/keycloak/harbor-oidc-secret-ensure-job.yaml index e4fbcee..4566e26 100644 --- a/services/keycloak/harbor-oidc-secret-ensure-job.yaml +++ b/services/keycloak/harbor-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: harbor-oidc-secret-ensure-3 + name: harbor-oidc-secret-ensure-4 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/ldap-federation-job.yaml b/services/keycloak/ldap-federation-job.yaml index d9d650b..06e7a82 100644 --- a/services/keycloak/ldap-federation-job.yaml +++ b/services/keycloak/ldap-federation-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-ldap-federation-5 + name: keycloak-ldap-federation-6 namespace: sso spec: backoffLimit: 2 diff --git a/services/keycloak/logs-oidc-secret-ensure-job.yaml b/services/keycloak/logs-oidc-secret-ensure-job.yaml index df3d569..ae5a8aa 100644 --- a/services/keycloak/logs-oidc-secret-ensure-job.yaml +++ b/services/keycloak/logs-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: logs-oidc-secret-ensure-2 + name: logs-oidc-secret-ensure-3 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index ec2d7a0..9d59479 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -8,7 +8,7 @@ metadata: apiVersion: batch/v1 kind: Job metadata: - name: mas-secrets-ensure-13 + name: mas-secrets-ensure-14 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-client-job.yaml b/services/keycloak/portal-e2e-client-job.yaml index ea15178..1653656 100644 --- a/services/keycloak/portal-e2e-client-job.yaml +++ b/services/keycloak/portal-e2e-client-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-client-2 + name: keycloak-portal-e2e-client-3 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index 817c526..9bba6a4 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-execute-actions-email-5 + name: keycloak-portal-e2e-execute-actions-email-6 namespace: sso spec: backoffLimit: 3 diff --git a/services/keycloak/portal-e2e-target-client-job.yaml b/services/keycloak/portal-e2e-target-client-job.yaml index 63a3ea9..a608b40 100644 --- a/services/keycloak/portal-e2e-target-client-job.yaml +++ b/services/keycloak/portal-e2e-target-client-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-target-1 + name: keycloak-portal-e2e-target-2 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml index c0ec397..c34e889 100644 --- a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-token-exchange-permissions-5 + name: keycloak-portal-e2e-token-exchange-permissions-6 namespace: sso spec: backoffLimit: 6 diff --git a/services/keycloak/portal-e2e-token-exchange-test-job.yaml b/services/keycloak/portal-e2e-token-exchange-test-job.yaml index 694a8ca..69f5d2e 100644 --- a/services/keycloak/portal-e2e-token-exchange-test-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-token-exchange-test-1 + name: keycloak-portal-e2e-token-exchange-test-2 namespace: sso spec: backoffLimit: 6 diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 0c5752f..78d31d1 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-16 + name: keycloak-realm-settings-17 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/synapse-oidc-secret-ensure-job.yaml b/services/keycloak/synapse-oidc-secret-ensure-job.yaml index 38e6753..5f96cb1 100644 --- a/services/keycloak/synapse-oidc-secret-ensure-job.yaml +++ b/services/keycloak/synapse-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-oidc-secret-ensure-4 + name: synapse-oidc-secret-ensure-5 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/user-overrides-job.yaml b/services/keycloak/user-overrides-job.yaml index 2f580a9..0ea4f1f 100644 --- a/services/keycloak/user-overrides-job.yaml +++ b/services/keycloak/user-overrides-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-user-overrides-1 + name: keycloak-user-overrides-2 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/vault-oidc-secret-ensure-job.yaml b/services/keycloak/vault-oidc-secret-ensure-job.yaml index 2a8c382..f27335a 100644 --- a/services/keycloak/vault-oidc-secret-ensure-job.yaml +++ b/services/keycloak/vault-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: vault-oidc-secret-ensure-1 + name: vault-oidc-secret-ensure-2 namespace: sso spec: backoffLimit: 0 From 6898641b0a7da10ec439b2ee3731af3604843e2e Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 05:35:51 -0300 Subject: [PATCH 039/270] comms: restore livekit token env --- services/comms/livekit-token-deployment.yaml | 28 ++++---------------- 1 file changed, 5 insertions(+), 23 deletions(-) diff --git a/services/comms/livekit-token-deployment.yaml b/services/comms/livekit-token-deployment.yaml index 750872c..98c46e0 100644 --- a/services/comms/livekit-token-deployment.yaml +++ b/services/comms/livekit-token-deployment.yaml @@ -34,29 +34,21 @@ spec: containers: - name: token-service image: ghcr.io/element-hq/lk-jwt-service:0.3.0 - command: - - /bin/sh - - -c - - | - . /vault/scripts/comms_vault_env.sh - exec /lk-jwt-service env: - name: LIVEKIT_URL value: wss://kit.live.bstein.dev/livekit/sfu - name: LIVEKIT_KEY value: primary + - name: LIVEKIT_SECRET + valueFrom: + secretKeyRef: + name: livekit-api + key: primary - name: LIVEKIT_FULL_ACCESS_HOMESERVERS value: live.bstein.dev ports: - containerPort: 8080 name: http - volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true resources: requests: cpu: 50m @@ -65,16 +57,6 @@ spec: cpu: 300m memory: 256Mi volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - - name: vault-scripts - configMap: - name: comms-vault-env - defaultMode: 0555 --- apiVersion: v1 kind: Service From 9ec08e1dc262f08d8d00e13c4298da952bccd606 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 05:41:01 -0300 Subject: [PATCH 040/270] jobs: drop apk in kubectl image --- services/comms/comms-secrets-ensure-job.yaml | 1 - services/comms/mas-admin-client-secret-ensure-job.yaml | 2 -- services/comms/mas-db-ensure-job.yaml | 2 -- services/comms/synapse-signingkey-ensure-job.yaml | 2 -- services/keycloak/mas-secrets-ensure-job.yaml | 2 -- 5 files changed, 9 deletions(-) diff --git a/services/comms/comms-secrets-ensure-job.yaml b/services/comms/comms-secrets-ensure-job.yaml index 674f1fd..62e945e 100644 --- a/services/comms/comms-secrets-ensure-job.yaml +++ b/services/comms/comms-secrets-ensure-job.yaml @@ -20,7 +20,6 @@ spec: set -eu trap 'echo "comms-secrets-ensure failed"; sleep 300' ERR umask 077 - apk add --no-cache curl jq >/dev/null safe_pass() { head -c 32 /dev/urandom | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=' diff --git a/services/comms/mas-admin-client-secret-ensure-job.yaml b/services/comms/mas-admin-client-secret-ensure-job.yaml index 5d47c7d..d9f0e94 100644 --- a/services/comms/mas-admin-client-secret-ensure-job.yaml +++ b/services/comms/mas-admin-client-secret-ensure-job.yaml @@ -67,8 +67,6 @@ spec: args: - | set -euo pipefail - apk add --no-cache curl jq >/dev/null - vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" vault_role="${VAULT_ROLE:-comms-secrets}" jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" diff --git a/services/comms/mas-db-ensure-job.yaml b/services/comms/mas-db-ensure-job.yaml index b843646..030c73a 100644 --- a/services/comms/mas-db-ensure-job.yaml +++ b/services/comms/mas-db-ensure-job.yaml @@ -24,8 +24,6 @@ spec: head -c 32 /dev/urandom | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=' } - apk add --no-cache curl jq >/dev/null - vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" vault_role="${VAULT_ROLE:-comms-secrets}" jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml index 9439f22..ae30561 100644 --- a/services/comms/synapse-signingkey-ensure-job.yaml +++ b/services/comms/synapse-signingkey-ensure-job.yaml @@ -37,8 +37,6 @@ spec: args: - | set -euo pipefail - apk add --no-cache curl jq >/dev/null - vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" vault_role="${VAULT_ROLE:-comms-secrets}" jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index 9d59479..1ae1c9d 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -95,8 +95,6 @@ spec: args: - | set -euo pipefail - apk add --no-cache curl jq >/dev/null - vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" vault_role="${VAULT_ROLE:-sso-secrets}" jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" From 5683b3f941d2c55e2472c003277f015fbe78d520 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 05:47:21 -0300 Subject: [PATCH 041/270] jobs: bump names after vault tweaks --- services/comms/comms-secrets-ensure-job.yaml | 2 +- services/comms/mas-admin-client-secret-ensure-job.yaml | 2 +- services/comms/mas-db-ensure-job.yaml | 2 +- services/comms/synapse-signingkey-ensure-job.yaml | 2 +- services/keycloak/mas-secrets-ensure-job.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/services/comms/comms-secrets-ensure-job.yaml b/services/comms/comms-secrets-ensure-job.yaml index 62e945e..2dfcdf0 100644 --- a/services/comms/comms-secrets-ensure-job.yaml +++ b/services/comms/comms-secrets-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: comms-secrets-ensure-3 + name: comms-secrets-ensure-4 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/mas-admin-client-secret-ensure-job.yaml b/services/comms/mas-admin-client-secret-ensure-job.yaml index d9f0e94..4580634 100644 --- a/services/comms/mas-admin-client-secret-ensure-job.yaml +++ b/services/comms/mas-admin-client-secret-ensure-job.yaml @@ -36,7 +36,7 @@ subjects: apiVersion: batch/v1 kind: Job metadata: - name: mas-admin-client-secret-ensure-9 + name: mas-admin-client-secret-ensure-10 namespace: comms spec: backoffLimit: 2 diff --git a/services/comms/mas-db-ensure-job.yaml b/services/comms/mas-db-ensure-job.yaml index 030c73a..5894c0e 100644 --- a/services/comms/mas-db-ensure-job.yaml +++ b/services/comms/mas-db-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-db-ensure-18 + name: mas-db-ensure-19 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml index ae30561..3b87eb3 100644 --- a/services/comms/synapse-signingkey-ensure-job.yaml +++ b/services/comms/synapse-signingkey-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-synapse-signingkey-ensure-5 + name: othrys-synapse-signingkey-ensure-6 namespace: comms spec: backoffLimit: 2 diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index 1ae1c9d..42a78b0 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -8,7 +8,7 @@ metadata: apiVersion: batch/v1 kind: Job metadata: - name: mas-secrets-ensure-14 + name: mas-secrets-ensure-15 namespace: sso spec: backoffLimit: 0 From 37302664c2273a6c640f0008a13170cfd16f4497 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 06:16:42 -0300 Subject: [PATCH 042/270] vault: add remaining secret syncs --- services/crypto/xmr-miner/kustomization.yaml | 3 + .../crypto/xmr-miner/secretproviderclass.yaml | 21 ++++++ .../xmr-miner/vault-serviceaccount.yaml | 6 ++ .../xmr-miner/vault-sync-deployment.yaml | 34 +++++++++ services/jenkins/kustomization.yaml | 3 + services/jenkins/secretproviderclass.yaml | 72 +++++++++++++++++++ services/jenkins/vault-serviceaccount.yaml | 6 ++ services/jenkins/vault-sync-deployment.yaml | 34 +++++++++ services/keycloak/secretproviderclass.yaml | 26 +++++++ services/logging/kustomization.yaml | 3 + services/logging/secretproviderclass.yaml | 31 ++++++++ services/logging/vault-serviceaccount.yaml | 6 ++ services/logging/vault-sync-deployment.yaml | 34 +++++++++ services/monitoring/kustomization.yaml | 3 + services/monitoring/secretproviderclass.yaml | 44 ++++++++++++ services/monitoring/vault-serviceaccount.yaml | 6 ++ .../monitoring/vault-sync-deployment.yaml | 34 +++++++++ services/pegasus/kustomization.yaml | 3 + services/pegasus/secretproviderclass.yaml | 31 ++++++++ services/pegasus/vault-serviceaccount.yaml | 6 ++ services/pegasus/vault-sync-deployment.yaml | 34 +++++++++ .../vault/scripts/vault_k8s_auth_configure.sh | 10 +++ 22 files changed, 450 insertions(+) create mode 100644 services/crypto/xmr-miner/secretproviderclass.yaml create mode 100644 services/crypto/xmr-miner/vault-serviceaccount.yaml create mode 100644 services/crypto/xmr-miner/vault-sync-deployment.yaml create mode 100644 services/jenkins/secretproviderclass.yaml create mode 100644 services/jenkins/vault-serviceaccount.yaml create mode 100644 services/jenkins/vault-sync-deployment.yaml create mode 100644 services/logging/secretproviderclass.yaml create mode 100644 services/logging/vault-serviceaccount.yaml create mode 100644 services/logging/vault-sync-deployment.yaml create mode 100644 services/monitoring/secretproviderclass.yaml create mode 100644 services/monitoring/vault-serviceaccount.yaml create mode 100644 services/monitoring/vault-sync-deployment.yaml create mode 100644 services/pegasus/secretproviderclass.yaml create mode 100644 services/pegasus/vault-serviceaccount.yaml create mode 100644 services/pegasus/vault-sync-deployment.yaml diff --git a/services/crypto/xmr-miner/kustomization.yaml b/services/crypto/xmr-miner/kustomization.yaml index 46c9767..2ded8db 100644 --- a/services/crypto/xmr-miner/kustomization.yaml +++ b/services/crypto/xmr-miner/kustomization.yaml @@ -3,6 +3,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - configmap-sources.yaml + - vault-serviceaccount.yaml + - secretproviderclass.yaml - deployment.yaml + - vault-sync-deployment.yaml - service.yaml - xmrig-daemonset.yaml diff --git a/services/crypto/xmr-miner/secretproviderclass.yaml b/services/crypto/xmr-miner/secretproviderclass.yaml new file mode 100644 index 0000000..2d61854 --- /dev/null +++ b/services/crypto/xmr-miner/secretproviderclass.yaml @@ -0,0 +1,21 @@ +# services/crypto/xmr-miner/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: crypto-vault + namespace: crypto +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "crypto" + objects: | + - objectName: "xmr-payout__address" + secretPath: "kv/data/atlas/crypto/xmr-payout" + secretKey: "address" + secretObjects: + - secretName: xmr-payout + type: Opaque + data: + - objectName: xmr-payout__address + key: address diff --git a/services/crypto/xmr-miner/vault-serviceaccount.yaml b/services/crypto/xmr-miner/vault-serviceaccount.yaml new file mode 100644 index 0000000..96a12c7 --- /dev/null +++ b/services/crypto/xmr-miner/vault-serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/crypto/xmr-miner/vault-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: crypto-vault-sync + namespace: crypto diff --git a/services/crypto/xmr-miner/vault-sync-deployment.yaml b/services/crypto/xmr-miner/vault-sync-deployment.yaml new file mode 100644 index 0000000..fcd08c3 --- /dev/null +++ b/services/crypto/xmr-miner/vault-sync-deployment.yaml @@ -0,0 +1,34 @@ +# services/crypto/xmr-miner/vault-sync-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: crypto-vault-sync + namespace: crypto +spec: + replicas: 1 + selector: + matchLabels: + app: crypto-vault-sync + template: + metadata: + labels: + app: crypto-vault-sync + spec: + serviceAccountName: crypto-vault-sync + containers: + - name: sync + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - "sleep infinity" + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: crypto-vault diff --git a/services/jenkins/kustomization.yaml b/services/jenkins/kustomization.yaml index acb6fb4..809f308 100644 --- a/services/jenkins/kustomization.yaml +++ b/services/jenkins/kustomization.yaml @@ -5,10 +5,13 @@ namespace: jenkins resources: - namespace.yaml - serviceaccount.yaml + - vault-serviceaccount.yaml + - secretproviderclass.yaml - pvc.yaml - configmap-jcasc.yaml - configmap-plugins.yaml - deployment.yaml + - vault-sync-deployment.yaml - service.yaml - ingress.yaml diff --git a/services/jenkins/secretproviderclass.yaml b/services/jenkins/secretproviderclass.yaml new file mode 100644 index 0000000..01cc66e --- /dev/null +++ b/services/jenkins/secretproviderclass.yaml @@ -0,0 +1,72 @@ +# services/jenkins/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: jenkins-vault + namespace: jenkins +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "jenkins" + objects: | + - objectName: "jenkins-oidc__clientId" + secretPath: "kv/data/atlas/jenkins/jenkins-oidc" + secretKey: "clientId" + - objectName: "jenkins-oidc__clientSecret" + secretPath: "kv/data/atlas/jenkins/jenkins-oidc" + secretKey: "clientSecret" + - objectName: "jenkins-oidc__authorizationUrl" + secretPath: "kv/data/atlas/jenkins/jenkins-oidc" + secretKey: "authorizationUrl" + - objectName: "jenkins-oidc__tokenUrl" + secretPath: "kv/data/atlas/jenkins/jenkins-oidc" + secretKey: "tokenUrl" + - objectName: "jenkins-oidc__userInfoUrl" + secretPath: "kv/data/atlas/jenkins/jenkins-oidc" + secretKey: "userInfoUrl" + - objectName: "jenkins-oidc__logoutUrl" + secretPath: "kv/data/atlas/jenkins/jenkins-oidc" + secretKey: "logoutUrl" + - objectName: "harbor-robot-creds__username" + secretPath: "kv/data/atlas/jenkins/harbor-robot-creds" + secretKey: "username" + - objectName: "harbor-robot-creds__password" + secretPath: "kv/data/atlas/jenkins/harbor-robot-creds" + secretKey: "password" + - objectName: "gitea-pat__username" + secretPath: "kv/data/atlas/jenkins/gitea-pat" + secretKey: "username" + - objectName: "gitea-pat__token" + secretPath: "kv/data/atlas/jenkins/gitea-pat" + secretKey: "token" + secretObjects: + - secretName: jenkins-oidc + type: Opaque + data: + - objectName: jenkins-oidc__clientId + key: clientId + - objectName: jenkins-oidc__clientSecret + key: clientSecret + - objectName: jenkins-oidc__authorizationUrl + key: authorizationUrl + - objectName: jenkins-oidc__tokenUrl + key: tokenUrl + - objectName: jenkins-oidc__userInfoUrl + key: userInfoUrl + - objectName: jenkins-oidc__logoutUrl + key: logoutUrl + - secretName: harbor-robot-creds + type: Opaque + data: + - objectName: harbor-robot-creds__username + key: username + - objectName: harbor-robot-creds__password + key: password + - secretName: gitea-pat + type: Opaque + data: + - objectName: gitea-pat__username + key: username + - objectName: gitea-pat__token + key: token diff --git a/services/jenkins/vault-serviceaccount.yaml b/services/jenkins/vault-serviceaccount.yaml new file mode 100644 index 0000000..8d31400 --- /dev/null +++ b/services/jenkins/vault-serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/jenkins/vault-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: jenkins-vault-sync + namespace: jenkins diff --git a/services/jenkins/vault-sync-deployment.yaml b/services/jenkins/vault-sync-deployment.yaml new file mode 100644 index 0000000..6de64f9 --- /dev/null +++ b/services/jenkins/vault-sync-deployment.yaml @@ -0,0 +1,34 @@ +# services/jenkins/vault-sync-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jenkins-vault-sync + namespace: jenkins +spec: + replicas: 1 + selector: + matchLabels: + app: jenkins-vault-sync + template: + metadata: + labels: + app: jenkins-vault-sync + spec: + serviceAccountName: jenkins-vault-sync + containers: + - name: sync + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - "sleep infinity" + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: jenkins-vault diff --git a/services/keycloak/secretproviderclass.yaml b/services/keycloak/secretproviderclass.yaml index 7015c12..7ca83ec 100644 --- a/services/keycloak/secretproviderclass.yaml +++ b/services/keycloak/secretproviderclass.yaml @@ -37,3 +37,29 @@ spec: - objectName: "openldap-admin__LDAP_CONFIG_PASSWORD" secretPath: "kv/data/atlas/sso/openldap-admin" secretKey: "LDAP_CONFIG_PASSWORD" + - objectName: "oauth2-proxy-oidc__client_id" + secretPath: "kv/data/atlas/sso/oauth2-proxy-oidc" + secretKey: "client_id" + - objectName: "oauth2-proxy-oidc__client_secret" + secretPath: "kv/data/atlas/sso/oauth2-proxy-oidc" + secretKey: "client_secret" + - objectName: "oauth2-proxy-oidc__cookie_secret" + secretPath: "kv/data/atlas/sso/oauth2-proxy-oidc" + secretKey: "cookie_secret" + secretObjects: + - secretName: openldap-admin + type: Opaque + data: + - objectName: openldap-admin__LDAP_ADMIN_PASSWORD + key: LDAP_ADMIN_PASSWORD + - objectName: openldap-admin__LDAP_CONFIG_PASSWORD + key: LDAP_CONFIG_PASSWORD + - secretName: oauth2-proxy-oidc + type: Opaque + data: + - objectName: oauth2-proxy-oidc__client_id + key: client_id + - objectName: oauth2-proxy-oidc__client_secret + key: client_secret + - objectName: oauth2-proxy-oidc__cookie_secret + key: cookie_secret diff --git a/services/logging/kustomization.yaml b/services/logging/kustomization.yaml index fe010f6..d1c2852 100644 --- a/services/logging/kustomization.yaml +++ b/services/logging/kustomization.yaml @@ -8,6 +8,8 @@ resources: - node-log-rotation-serviceaccount.yaml - node-image-gc-rpi4-serviceaccount.yaml - node-image-prune-rpi5-serviceaccount.yaml + - vault-serviceaccount.yaml + - secretproviderclass.yaml - opensearch-pvc.yaml - opensearch-helmrelease.yaml - opensearch-dashboards-helmrelease.yaml @@ -22,6 +24,7 @@ resources: - node-image-gc-rpi4-daemonset.yaml - node-image-prune-rpi5-daemonset.yaml - oauth2-proxy.yaml + - vault-sync-deployment.yaml - ingress.yaml configMapGenerator: diff --git a/services/logging/secretproviderclass.yaml b/services/logging/secretproviderclass.yaml new file mode 100644 index 0000000..70ecb3d --- /dev/null +++ b/services/logging/secretproviderclass.yaml @@ -0,0 +1,31 @@ +# services/logging/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: logging-vault + namespace: logging +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "logging" + objects: | + - objectName: "oauth2-proxy-logs-oidc__client_id" + secretPath: "kv/data/atlas/logging/oauth2-proxy-logs-oidc" + secretKey: "client_id" + - objectName: "oauth2-proxy-logs-oidc__client_secret" + secretPath: "kv/data/atlas/logging/oauth2-proxy-logs-oidc" + secretKey: "client_secret" + - objectName: "oauth2-proxy-logs-oidc__cookie_secret" + secretPath: "kv/data/atlas/logging/oauth2-proxy-logs-oidc" + secretKey: "cookie_secret" + secretObjects: + - secretName: oauth2-proxy-logs-oidc + type: Opaque + data: + - objectName: oauth2-proxy-logs-oidc__client_id + key: client_id + - objectName: oauth2-proxy-logs-oidc__client_secret + key: client_secret + - objectName: oauth2-proxy-logs-oidc__cookie_secret + key: cookie_secret diff --git a/services/logging/vault-serviceaccount.yaml b/services/logging/vault-serviceaccount.yaml new file mode 100644 index 0000000..9104c20 --- /dev/null +++ b/services/logging/vault-serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/logging/vault-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: logging-vault-sync + namespace: logging diff --git a/services/logging/vault-sync-deployment.yaml b/services/logging/vault-sync-deployment.yaml new file mode 100644 index 0000000..41a4f7d --- /dev/null +++ b/services/logging/vault-sync-deployment.yaml @@ -0,0 +1,34 @@ +# services/logging/vault-sync-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: logging-vault-sync + namespace: logging +spec: + replicas: 1 + selector: + matchLabels: + app: logging-vault-sync + template: + metadata: + labels: + app: logging-vault-sync + spec: + serviceAccountName: logging-vault-sync + containers: + - name: sync + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - "sleep infinity" + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: logging-vault diff --git a/services/monitoring/kustomization.yaml b/services/monitoring/kustomization.yaml index 0dafba7..66065cb 100644 --- a/services/monitoring/kustomization.yaml +++ b/services/monitoring/kustomization.yaml @@ -5,6 +5,8 @@ namespace: monitoring resources: - namespace.yaml - rbac.yaml + - secretproviderclass.yaml + - vault-serviceaccount.yaml - grafana-dashboard-overview.yaml - grafana-dashboard-pods.yaml - grafana-dashboard-nodes.yaml @@ -16,6 +18,7 @@ resources: - jetson-tegrastats-exporter.yaml - postmark-exporter-service.yaml - postmark-exporter-deployment.yaml + - vault-sync-deployment.yaml - grafana-alerting-config.yaml - grafana-smtp-sync-serviceaccount.yaml - grafana-smtp-sync-rbac.yaml diff --git a/services/monitoring/secretproviderclass.yaml b/services/monitoring/secretproviderclass.yaml new file mode 100644 index 0000000..fcb7967 --- /dev/null +++ b/services/monitoring/secretproviderclass.yaml @@ -0,0 +1,44 @@ +# services/monitoring/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: monitoring-vault + namespace: monitoring +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "monitoring" + objects: | + - objectName: "grafana-admin__admin-user" + secretPath: "kv/data/atlas/monitoring/grafana-admin" + secretKey: "admin-user" + - objectName: "grafana-admin__admin-password" + secretPath: "kv/data/atlas/monitoring/grafana-admin" + secretKey: "admin-password" + - objectName: "postmark-exporter__relay-username" + secretPath: "kv/data/atlas/monitoring/postmark-exporter" + secretKey: "relay-username" + - objectName: "postmark-exporter__relay-password" + secretPath: "kv/data/atlas/monitoring/postmark-exporter" + secretKey: "relay-password" + - objectName: "postmark-exporter__sending-limit" + secretPath: "kv/data/atlas/monitoring/postmark-exporter" + secretKey: "sending-limit" + secretObjects: + - secretName: grafana-admin + type: Opaque + data: + - objectName: grafana-admin__admin-user + key: admin-user + - objectName: grafana-admin__admin-password + key: admin-password + - secretName: postmark-exporter + type: Opaque + data: + - objectName: postmark-exporter__relay-username + key: server-token + - objectName: postmark-exporter__relay-password + key: server-token-fallback + - objectName: postmark-exporter__sending-limit + key: sending-limit diff --git a/services/monitoring/vault-serviceaccount.yaml b/services/monitoring/vault-serviceaccount.yaml new file mode 100644 index 0000000..fa23093 --- /dev/null +++ b/services/monitoring/vault-serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/monitoring/vault-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: monitoring-vault-sync + namespace: monitoring diff --git a/services/monitoring/vault-sync-deployment.yaml b/services/monitoring/vault-sync-deployment.yaml new file mode 100644 index 0000000..d335330 --- /dev/null +++ b/services/monitoring/vault-sync-deployment.yaml @@ -0,0 +1,34 @@ +# services/monitoring/vault-sync-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: monitoring-vault-sync + namespace: monitoring +spec: + replicas: 1 + selector: + matchLabels: + app: monitoring-vault-sync + template: + metadata: + labels: + app: monitoring-vault-sync + spec: + serviceAccountName: monitoring-vault-sync + containers: + - name: sync + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - "sleep infinity" + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: monitoring-vault diff --git a/services/pegasus/kustomization.yaml b/services/pegasus/kustomization.yaml index 5902595..bef2b40 100644 --- a/services/pegasus/kustomization.yaml +++ b/services/pegasus/kustomization.yaml @@ -3,8 +3,11 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - configmap.yaml + - vault-serviceaccount.yaml + - secretproviderclass.yaml - service.yaml - deployment.yaml + - vault-sync-deployment.yaml - ingress.yaml patches: - target: { kind: Deployment, name: pegasus, namespace: jellyfin } diff --git a/services/pegasus/secretproviderclass.yaml b/services/pegasus/secretproviderclass.yaml new file mode 100644 index 0000000..fa7448b --- /dev/null +++ b/services/pegasus/secretproviderclass.yaml @@ -0,0 +1,31 @@ +# services/pegasus/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: pegasus-vault + namespace: jellyfin +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "pegasus" + objects: | + - objectName: "pegasus-secrets__PEGASUS_SESSION_KEY" + secretPath: "kv/data/atlas/pegasus/pegasus-secrets" + secretKey: "PEGASUS_SESSION_KEY" + - objectName: "pegasus-secrets__JELLYFIN_URL" + secretPath: "kv/data/atlas/pegasus/pegasus-secrets" + secretKey: "JELLYFIN_URL" + - objectName: "pegasus-secrets__JELLYFIN_API_KEY" + secretPath: "kv/data/atlas/pegasus/pegasus-secrets" + secretKey: "JELLYFIN_API_KEY" + secretObjects: + - secretName: pegasus-secrets + type: Opaque + data: + - objectName: pegasus-secrets__PEGASUS_SESSION_KEY + key: PEGASUS_SESSION_KEY + - objectName: pegasus-secrets__JELLYFIN_URL + key: JELLYFIN_URL + - objectName: pegasus-secrets__JELLYFIN_API_KEY + key: JELLYFIN_API_KEY diff --git a/services/pegasus/vault-serviceaccount.yaml b/services/pegasus/vault-serviceaccount.yaml new file mode 100644 index 0000000..ed56930 --- /dev/null +++ b/services/pegasus/vault-serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/pegasus/vault-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pegasus-vault-sync + namespace: jellyfin diff --git a/services/pegasus/vault-sync-deployment.yaml b/services/pegasus/vault-sync-deployment.yaml new file mode 100644 index 0000000..6128d8d --- /dev/null +++ b/services/pegasus/vault-sync-deployment.yaml @@ -0,0 +1,34 @@ +# services/pegasus/vault-sync-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pegasus-vault-sync + namespace: jellyfin +spec: + replicas: 1 + selector: + matchLabels: + app: pegasus-vault-sync + template: + metadata: + labels: + app: pegasus-vault-sync + spec: + serviceAccountName: pegasus-vault-sync + containers: + - name: sync + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - "sleep infinity" + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: pegasus-vault diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 39577ba..c849461 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -95,6 +95,16 @@ write_policy_and_role "nextcloud" "nextcloud" "nextcloud-vault" \ "nextcloud/* shared/keycloak-admin" "" write_policy_and_role "comms" "comms" "comms-vault,atlasbot" \ "comms/* shared/chat-ai-keys-runtime" "" +write_policy_and_role "jenkins" "jenkins" "jenkins-vault-sync" \ + "jenkins/*" "" +write_policy_and_role "monitoring" "monitoring" "monitoring-vault-sync" \ + "monitoring/*" "" +write_policy_and_role "logging" "logging" "logging-vault-sync" \ + "logging/*" "" +write_policy_and_role "pegasus" "jellyfin" "pegasus-vault-sync" \ + "pegasus/*" "" +write_policy_and_role "crypto" "crypto" "crypto-vault-sync" \ + "crypto/*" "" write_policy_and_role "sso-secrets" "sso" "mas-secrets-ensure" \ "shared/keycloak-admin" \ From b8e50bb0a6e5faebe9b88ed3e68c062fbb95d944 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 06:41:34 -0300 Subject: [PATCH 043/270] monitoring: move grafana smtp to vault --- scripts/dashboards_render_atlas.py | 2 +- .../monitoring/dashboards/atlas-storage.json | 2 +- .../monitoring/grafana-alerting-config.yaml | 2 +- .../monitoring/grafana-dashboard-storage.yaml | 2 +- .../monitoring/grafana-smtp-sync-cronjob.yaml | 44 ----------------- .../monitoring/grafana-smtp-sync-rbac.yaml | 49 ------------------- .../grafana-smtp-sync-serviceaccount.yaml | 6 --- services/monitoring/kustomization.yaml | 9 ---- .../monitoring/scripts/grafana_smtp_sync.sh | 31 ------------ services/monitoring/secretproviderclass.yaml | 13 +++++ .../vault/scripts/vault_k8s_auth_configure.sh | 2 +- 11 files changed, 18 insertions(+), 144 deletions(-) delete mode 100644 services/monitoring/grafana-smtp-sync-cronjob.yaml delete mode 100644 services/monitoring/grafana-smtp-sync-rbac.yaml delete mode 100644 services/monitoring/grafana-smtp-sync-serviceaccount.yaml delete mode 100644 services/monitoring/scripts/grafana_smtp_sync.sh diff --git a/scripts/dashboards_render_atlas.py b/scripts/dashboards_render_atlas.py index 01fe9c7..a5abfe8 100644 --- a/scripts/dashboards_render_atlas.py +++ b/scripts/dashboards_render_atlas.py @@ -1727,7 +1727,7 @@ def build_storage_dashboard(): stat_panel( 31, "Maintenance Cron Freshness (s)", - 'time() - max by (cronjob) (kube_cronjob_status_last_successful_time{namespace="maintenance",cronjob=~"image-sweeper|grafana-smtp-sync"})', + 'time() - max by (cronjob) (kube_cronjob_status_last_successful_time{namespace="maintenance",cronjob="image-sweeper"})', {"h": 4, "w": 12, "x": 12, "y": 44}, unit="s", thresholds={ diff --git a/services/monitoring/dashboards/atlas-storage.json b/services/monitoring/dashboards/atlas-storage.json index d93a941..0eca11c 100644 --- a/services/monitoring/dashboards/atlas-storage.json +++ b/services/monitoring/dashboards/atlas-storage.json @@ -494,7 +494,7 @@ }, "targets": [ { - "expr": "time() - max by (cronjob) (kube_cronjob_status_last_successful_time{namespace=\"maintenance\",cronjob=~\"image-sweeper|grafana-smtp-sync\"})", + "expr": "time() - max by (cronjob) (kube_cronjob_status_last_successful_time{namespace=\"maintenance\",cronjob=\"image-sweeper\"})", "refId": "A" } ], diff --git a/services/monitoring/grafana-alerting-config.yaml b/services/monitoring/grafana-alerting-config.yaml index c679bff..daa1e29 100644 --- a/services/monitoring/grafana-alerting-config.yaml +++ b/services/monitoring/grafana-alerting-config.yaml @@ -244,7 +244,7 @@ data: to: 0 datasourceUid: atlas-vm model: - expr: time() - max by (cronjob) (kube_cronjob_status_last_successful_time{namespace="maintenance",cronjob=~"image-sweeper|grafana-smtp-sync"}) + expr: time() - max by (cronjob) (kube_cronjob_status_last_successful_time{namespace="maintenance",cronjob="image-sweeper"}) intervalMs: 60000 maxDataPoints: 43200 legendFormat: '{{cronjob}}' diff --git a/services/monitoring/grafana-dashboard-storage.yaml b/services/monitoring/grafana-dashboard-storage.yaml index 5ce4186..d25e922 100644 --- a/services/monitoring/grafana-dashboard-storage.yaml +++ b/services/monitoring/grafana-dashboard-storage.yaml @@ -503,7 +503,7 @@ data: }, "targets": [ { - "expr": "time() - max by (cronjob) (kube_cronjob_status_last_successful_time{namespace=\"maintenance\",cronjob=~\"image-sweeper|grafana-smtp-sync\"})", + "expr": "time() - max by (cronjob) (kube_cronjob_status_last_successful_time{namespace=\"maintenance\",cronjob=\"image-sweeper\"})", "refId": "A" } ], diff --git a/services/monitoring/grafana-smtp-sync-cronjob.yaml b/services/monitoring/grafana-smtp-sync-cronjob.yaml deleted file mode 100644 index 3b92d4c..0000000 --- a/services/monitoring/grafana-smtp-sync-cronjob.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# services/monitoring/grafana-smtp-sync-cronjob.yaml -apiVersion: batch/v1 -kind: CronJob -metadata: - name: grafana-smtp-sync - namespace: monitoring -spec: - schedule: "15 3 * * *" - concurrencyPolicy: Forbid - jobTemplate: - spec: - template: - spec: - serviceAccountName: grafana-smtp-sync - restartPolicy: OnFailure - containers: - - name: sync - image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 - command: ["/bin/sh", "-c"] - args: - - | - set -euo pipefail - if ! command -v jq >/dev/null 2>&1; then - apt-get update >/dev/null && apt-get install -y jq >/dev/null - fi - exec /scripts/sync.sh - env: - - name: SOURCE_NS - value: mailu-mailserver - - name: SOURCE_SECRET - value: mailu-postmark-relay - - name: TARGET_NS - value: monitoring - - name: TARGET_SECRET - value: grafana-smtp - volumeMounts: - - name: script - mountPath: /scripts - readOnly: true - volumes: - - name: script - configMap: - name: grafana-smtp-sync-script - defaultMode: 0555 diff --git a/services/monitoring/grafana-smtp-sync-rbac.yaml b/services/monitoring/grafana-smtp-sync-rbac.yaml deleted file mode 100644 index 532d622..0000000 --- a/services/monitoring/grafana-smtp-sync-rbac.yaml +++ /dev/null @@ -1,49 +0,0 @@ -# services/monitoring/grafana-smtp-sync-rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-smtp-sync -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get"] - resourceNames: - - mailu-postmark-relay ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-smtp-sync -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-smtp-sync -subjects: - - kind: ServiceAccount - name: grafana-smtp-sync - namespace: monitoring - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: grafana-smtp-sync - namespace: monitoring -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "create", "update", "patch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: grafana-smtp-sync - namespace: monitoring -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: grafana-smtp-sync -subjects: - - kind: ServiceAccount - name: grafana-smtp-sync - namespace: monitoring diff --git a/services/monitoring/grafana-smtp-sync-serviceaccount.yaml b/services/monitoring/grafana-smtp-sync-serviceaccount.yaml deleted file mode 100644 index 6ad0e18..0000000 --- a/services/monitoring/grafana-smtp-sync-serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# services/monitoring/grafana-smtp-sync-serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-smtp-sync - namespace: monitoring diff --git a/services/monitoring/kustomization.yaml b/services/monitoring/kustomization.yaml index 66065cb..6596a36 100644 --- a/services/monitoring/kustomization.yaml +++ b/services/monitoring/kustomization.yaml @@ -20,9 +20,6 @@ resources: - postmark-exporter-deployment.yaml - vault-sync-deployment.yaml - grafana-alerting-config.yaml - - grafana-smtp-sync-serviceaccount.yaml - - grafana-smtp-sync-rbac.yaml - - grafana-smtp-sync-cronjob.yaml - grafana-folders.yaml - helmrelease.yaml - grafana-org-bootstrap.yaml @@ -34,12 +31,6 @@ configMapGenerator: - monitoring_postmark_exporter.py=scripts/postmark_exporter.py options: disableNameSuffixHash: true - - name: grafana-smtp-sync-script - namespace: monitoring - files: - - sync.sh=scripts/grafana_smtp_sync.sh - options: - disableNameSuffixHash: true - name: jetson-tegrastats-exporter-script namespace: monitoring files: diff --git a/services/monitoring/scripts/grafana_smtp_sync.sh b/services/monitoring/scripts/grafana_smtp_sync.sh deleted file mode 100644 index c8207ad..0000000 --- a/services/monitoring/scripts/grafana_smtp_sync.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -set -euo pipefail - -SOURCE_NS=${SOURCE_NS:-mailu-mailserver} -SOURCE_SECRET=${SOURCE_SECRET:-mailu-postmark-relay} -TARGET_NS=${TARGET_NS:-monitoring} -TARGET_SECRET=${TARGET_SECRET:-grafana-smtp} - -tmp=$(mktemp) -cleanup() { rm -f "$tmp"; } -trap cleanup EXIT - -kubectl -n "$SOURCE_NS" get secret "$SOURCE_SECRET" -o json > "$tmp" - -pass=$(jq -r '.data["relay-password"]' "$tmp") -user=$pass - -if [ -z "$user" ] || [ -z "$pass" ] || [ "$user" = "null" ] || [ "$pass" = "null" ]; then - echo "missing credentials from $SOURCE_NS/$SOURCE_SECRET" >&2 - exit 1 -fi - -cat < Date: Wed, 14 Jan 2026 10:07:31 -0300 Subject: [PATCH 044/270] vault: sync harbor pulls --- .../bstein-dev-home/backend-deployment.yaml | 2 +- .../bstein-dev-home/frontend-deployment.yaml | 2 +- services/bstein-dev-home/kustomization.yaml | 2 ++ .../bstein-dev-home/secretproviderclass.yaml | 17 +++++++--- .../bstein-dev-home/vault-serviceaccount.yaml | 6 ++++ .../vault-sync-deployment.yaml | 34 +++++++++++++++++++ .../vaultwarden-cred-sync-cronjob.yaml | 2 +- services/comms/comms-secrets-ensure-rbac.yaml | 2 ++ .../mas-admin-client-secret-ensure-job.yaml | 2 ++ services/comms/mas-db-ensure-rbac.yaml | 2 ++ services/comms/secretproviderclass.yaml | 8 +++++ .../comms/synapse-signingkey-ensure-rbac.yaml | 2 ++ services/crypto/monerod/deployment.yaml | 2 ++ .../crypto/xmr-miner/secretproviderclass.yaml | 8 +++++ services/harbor/helmrelease.yaml | 2 ++ services/harbor/secretproviderclass.yaml | 8 +++++ services/keycloak/kustomization.yaml | 5 ++- services/keycloak/mas-secrets-ensure-job.yaml | 2 ++ ...portal-e2e-client-secret-sync-cronjob.yaml | 32 ----------------- .../portal-e2e-client-secret-sync-rbac.yaml | 31 ----------------- services/keycloak/realm-settings-job.yaml | 16 +++++---- .../keycloak/scripts/keycloak_vault_env.sh | 3 ++ .../sso_portal_e2e_client_secret_sync.sh | 20 ----------- services/keycloak/secretproviderclass.yaml | 14 ++++++++ services/keycloak/serviceaccount.yaml | 2 ++ services/keycloak/vault-serviceaccount.yaml | 6 ++++ services/keycloak/vault-sync-deployment.yaml | 34 +++++++++++++++++++ .../logging/data-prepper-helmrelease.yaml | 2 +- services/logging/secretproviderclass.yaml | 8 +++++ services/mailu/secretproviderclass.yaml | 8 +++++ services/mailu/vip-controller.yaml | 2 ++ services/monitoring/dcgm-exporter.yaml | 2 ++ services/monitoring/helmrelease.yaml | 4 +-- services/monitoring/secretproviderclass.yaml | 8 +++++ services/nextcloud/configmap.yaml | 4 +-- services/nextcloud/deployment.yaml | 4 +-- services/nextcloud/secretproviderclass.yaml | 8 ++--- services/outline/deployment.yaml | 2 +- services/outline/secretproviderclass.yaml | 8 ++--- services/pegasus/deployment.yaml | 2 ++ services/pegasus/secretproviderclass.yaml | 8 +++++ services/planka/secretproviderclass.yaml | 8 ++--- .../vault/scripts/vault_k8s_auth_configure.sh | 30 ++++++++-------- services/vaultwarden/deployment.yaml | 12 +++---- .../scripts/vaultwarden_vault_env.sh | 3 ++ services/vaultwarden/secretproviderclass.yaml | 6 ++++ 46 files changed, 254 insertions(+), 141 deletions(-) create mode 100644 services/bstein-dev-home/vault-serviceaccount.yaml create mode 100644 services/bstein-dev-home/vault-sync-deployment.yaml delete mode 100644 services/keycloak/portal-e2e-client-secret-sync-cronjob.yaml delete mode 100644 services/keycloak/portal-e2e-client-secret-sync-rbac.yaml delete mode 100755 services/keycloak/scripts/sso_portal_e2e_client_secret_sync.sh create mode 100644 services/keycloak/vault-serviceaccount.yaml create mode 100644 services/keycloak/vault-sync-deployment.yaml diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index 08f73f7..3266747 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -21,7 +21,7 @@ spec: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: "true" imagePullSecrets: - - name: harbor-bstein-robot + - name: harbor-regcred containers: - name: backend image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} diff --git a/services/bstein-dev-home/frontend-deployment.yaml b/services/bstein-dev-home/frontend-deployment.yaml index 3092edb..478ebf9 100644 --- a/services/bstein-dev-home/frontend-deployment.yaml +++ b/services/bstein-dev-home/frontend-deployment.yaml @@ -19,7 +19,7 @@ spec: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: "true" imagePullSecrets: - - name: harbor-bstein-robot + - name: harbor-regcred containers: - name: frontend image: registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} diff --git a/services/bstein-dev-home/kustomization.yaml b/services/bstein-dev-home/kustomization.yaml index 57228ed..a57c81a 100644 --- a/services/bstein-dev-home/kustomization.yaml +++ b/services/bstein-dev-home/kustomization.yaml @@ -6,7 +6,9 @@ resources: - namespace.yaml - image.yaml - rbac.yaml + - vault-serviceaccount.yaml - secretproviderclass.yaml + - vault-sync-deployment.yaml - chat-ai-gateway-deployment.yaml - chat-ai-gateway-service.yaml - frontend-deployment.yaml diff --git a/services/bstein-dev-home/secretproviderclass.yaml b/services/bstein-dev-home/secretproviderclass.yaml index 83e94c0..c153211 100644 --- a/services/bstein-dev-home/secretproviderclass.yaml +++ b/services/bstein-dev-home/secretproviderclass.yaml @@ -11,16 +11,16 @@ spec: roleName: "bstein-dev-home" objects: | - objectName: "atlas-portal-db__PORTAL_DATABASE_URL" - secretPath: "kv/data/atlas/bstein-dev-home/atlas-portal-db" + secretPath: "kv/data/atlas/portal/atlas-portal-db" secretKey: "PORTAL_DATABASE_URL" - objectName: "bstein-dev-home-keycloak-admin__client_secret" - secretPath: "kv/data/atlas/bstein-dev-home/bstein-dev-home-keycloak-admin" + secretPath: "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" secretKey: "client_secret" - objectName: "chat-ai-keys__homepage" - secretPath: "kv/data/atlas/bstein-dev-home/chat-ai-keys" + secretPath: "kv/data/atlas/portal/chat-ai-keys" secretKey: "homepage" - objectName: "chat-ai-keys__matrix" - secretPath: "kv/data/atlas/bstein-dev-home/chat-ai-keys" + secretPath: "kv/data/atlas/portal/chat-ai-keys" secretKey: "matrix" - objectName: "chat-ai-keys-runtime__homepage" secretPath: "kv/data/atlas/shared/chat-ai-keys-runtime" @@ -34,3 +34,12 @@ spec: - objectName: "portal-e2e-client__client_secret" secretPath: "kv/data/atlas/shared/portal-e2e-client" secretKey: "client_secret" + - objectName: "harbor-pull__dockerconfigjson" + secretPath: "kv/data/atlas/harbor-pull/bstein-dev-home" + secretKey: "dockerconfigjson" + secretObjects: + - secretName: harbor-regcred + type: kubernetes.io/dockerconfigjson + data: + - objectName: harbor-pull__dockerconfigjson + key: .dockerconfigjson diff --git a/services/bstein-dev-home/vault-serviceaccount.yaml b/services/bstein-dev-home/vault-serviceaccount.yaml new file mode 100644 index 0000000..d3ea79a --- /dev/null +++ b/services/bstein-dev-home/vault-serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/bstein-dev-home/vault-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bstein-dev-home-vault-sync + namespace: bstein-dev-home diff --git a/services/bstein-dev-home/vault-sync-deployment.yaml b/services/bstein-dev-home/vault-sync-deployment.yaml new file mode 100644 index 0000000..ad50f1e --- /dev/null +++ b/services/bstein-dev-home/vault-sync-deployment.yaml @@ -0,0 +1,34 @@ +# services/bstein-dev-home/vault-sync-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: bstein-dev-home-vault-sync + namespace: bstein-dev-home +spec: + replicas: 1 + selector: + matchLabels: + app: bstein-dev-home-vault-sync + template: + metadata: + labels: + app: bstein-dev-home-vault-sync + spec: + serviceAccountName: bstein-dev-home-vault-sync + containers: + - name: sync + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - "sleep infinity" + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: bstein-dev-home-vault diff --git a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml index b531e7a..5d7531e 100644 --- a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml +++ b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml @@ -20,7 +20,7 @@ spec: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: "true" imagePullSecrets: - - name: harbor-bstein-robot + - name: harbor-regcred containers: - name: sync image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} diff --git a/services/comms/comms-secrets-ensure-rbac.yaml b/services/comms/comms-secrets-ensure-rbac.yaml index dfb4f21..47e41d4 100644 --- a/services/comms/comms-secrets-ensure-rbac.yaml +++ b/services/comms/comms-secrets-ensure-rbac.yaml @@ -4,6 +4,8 @@ kind: ServiceAccount metadata: name: comms-secrets-ensure namespace: comms +imagePullSecrets: + - name: harbor-regcred --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/services/comms/mas-admin-client-secret-ensure-job.yaml b/services/comms/mas-admin-client-secret-ensure-job.yaml index 4580634..07f59a6 100644 --- a/services/comms/mas-admin-client-secret-ensure-job.yaml +++ b/services/comms/mas-admin-client-secret-ensure-job.yaml @@ -4,6 +4,8 @@ kind: ServiceAccount metadata: name: mas-admin-client-secret-writer namespace: comms +imagePullSecrets: + - name: harbor-regcred --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/services/comms/mas-db-ensure-rbac.yaml b/services/comms/mas-db-ensure-rbac.yaml index 19691d7..c8093b5 100644 --- a/services/comms/mas-db-ensure-rbac.yaml +++ b/services/comms/mas-db-ensure-rbac.yaml @@ -4,6 +4,8 @@ kind: ServiceAccount metadata: name: mas-db-ensure namespace: comms +imagePullSecrets: + - name: harbor-regcred --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/services/comms/secretproviderclass.yaml b/services/comms/secretproviderclass.yaml index 971d408..ff3767f 100644 --- a/services/comms/secretproviderclass.yaml +++ b/services/comms/secretproviderclass.yaml @@ -61,6 +61,9 @@ spec: - objectName: "synapse-oidc__client-secret" secretPath: "kv/data/atlas/comms/synapse-oidc" secretKey: "client-secret" + - objectName: "harbor-pull__dockerconfigjson" + secretPath: "kv/data/atlas/harbor-pull/comms" + secretKey: "dockerconfigjson" secretObjects: - secretName: turn-shared-secret type: Opaque @@ -132,3 +135,8 @@ spec: data: - objectName: synapse-oidc__client-secret key: client-secret + - secretName: harbor-regcred + type: kubernetes.io/dockerconfigjson + data: + - objectName: harbor-pull__dockerconfigjson + key: .dockerconfigjson diff --git a/services/comms/synapse-signingkey-ensure-rbac.yaml b/services/comms/synapse-signingkey-ensure-rbac.yaml index c7f66bc..29387f1 100644 --- a/services/comms/synapse-signingkey-ensure-rbac.yaml +++ b/services/comms/synapse-signingkey-ensure-rbac.yaml @@ -4,6 +4,8 @@ kind: ServiceAccount metadata: name: othrys-synapse-signingkey-job namespace: comms +imagePullSecrets: + - name: harbor-regcred --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/services/crypto/monerod/deployment.yaml b/services/crypto/monerod/deployment.yaml index 40c9e24..9d64864 100644 --- a/services/crypto/monerod/deployment.yaml +++ b/services/crypto/monerod/deployment.yaml @@ -18,6 +18,8 @@ spec: fsGroupChangePolicy: OnRootMismatch nodeSelector: node-role.kubernetes.io/worker: "true" + imagePullSecrets: + - name: harbor-regcred affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: diff --git a/services/crypto/xmr-miner/secretproviderclass.yaml b/services/crypto/xmr-miner/secretproviderclass.yaml index 2d61854..00c72bd 100644 --- a/services/crypto/xmr-miner/secretproviderclass.yaml +++ b/services/crypto/xmr-miner/secretproviderclass.yaml @@ -13,9 +13,17 @@ spec: - objectName: "xmr-payout__address" secretPath: "kv/data/atlas/crypto/xmr-payout" secretKey: "address" + - objectName: "harbor-pull__dockerconfigjson" + secretPath: "kv/data/atlas/harbor-pull/crypto" + secretKey: "dockerconfigjson" secretObjects: - secretName: xmr-payout type: Opaque data: - objectName: xmr-payout__address key: address + - secretName: harbor-regcred + type: kubernetes.io/dockerconfigjson + data: + - objectName: harbor-pull__dockerconfigjson + key: .dockerconfigjson diff --git a/services/harbor/helmrelease.yaml b/services/harbor/helmrelease.yaml index 249a3f3..11244ff 100644 --- a/services/harbor/helmrelease.yaml +++ b/services/harbor/helmrelease.yaml @@ -29,6 +29,8 @@ spec: values: externalURL: https://registry.bstein.dev imagePullPolicy: IfNotPresent + imagePullSecrets: + - name: harbor-regcred expose: type: ingress tls: diff --git a/services/harbor/secretproviderclass.yaml b/services/harbor/secretproviderclass.yaml index 1e1a7f1..90fc876 100644 --- a/services/harbor/secretproviderclass.yaml +++ b/services/harbor/secretproviderclass.yaml @@ -49,6 +49,9 @@ spec: - objectName: "harbor-oidc__CONFIG_OVERWRITE_JSON" secretPath: "kv/data/atlas/harbor/harbor-oidc" secretKey: "CONFIG_OVERWRITE_JSON" + - objectName: "harbor-pull__dockerconfigjson" + secretPath: "kv/data/atlas/harbor-pull/harbor" + secretKey: "dockerconfigjson" secretObjects: - secretName: harbor-core type: Opaque @@ -85,3 +88,8 @@ spec: data: - objectName: harbor-oidc__CONFIG_OVERWRITE_JSON key: CONFIG_OVERWRITE_JSON + - secretName: harbor-regcred + type: kubernetes.io/dockerconfigjson + data: + - objectName: harbor-pull__dockerconfigjson + key: .dockerconfigjson diff --git a/services/keycloak/kustomization.yaml b/services/keycloak/kustomization.yaml index 82df213..c34aad4 100644 --- a/services/keycloak/kustomization.yaml +++ b/services/keycloak/kustomization.yaml @@ -6,7 +6,9 @@ resources: - namespace.yaml - pvc.yaml - serviceaccount.yaml + - vault-serviceaccount.yaml - secretproviderclass.yaml + - vault-sync-deployment.yaml - deployment.yaml - realm-settings-job.yaml - portal-e2e-client-job.yaml @@ -33,9 +35,6 @@ configMapGenerator: files: - test_portal_token_exchange.py=scripts/tests/test_portal_token_exchange.py - test_keycloak_execute_actions_email.py=scripts/tests/test_keycloak_execute_actions_email.py - - name: portal-e2e-client-secret-sync-script - files: - - sso_portal_e2e_client_secret_sync.sh=scripts/sso_portal_e2e_client_secret_sync.sh - name: harbor-oidc-secret-ensure-script files: - harbor_oidc_secret_ensure.sh=scripts/harbor_oidc_secret_ensure.sh diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index 42a78b0..75d8300 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -4,6 +4,8 @@ kind: ServiceAccount metadata: name: mas-secrets-ensure namespace: sso +imagePullSecrets: + - name: harbor-regcred --- apiVersion: batch/v1 kind: Job diff --git a/services/keycloak/portal-e2e-client-secret-sync-cronjob.yaml b/services/keycloak/portal-e2e-client-secret-sync-cronjob.yaml deleted file mode 100644 index 8bb7e55..0000000 --- a/services/keycloak/portal-e2e-client-secret-sync-cronjob.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# services/keycloak/portal-e2e-client-secret-sync-cronjob.yaml -apiVersion: batch/v1 -kind: CronJob -metadata: - name: portal-e2e-client-secret-sync - namespace: sso -spec: - schedule: "*/10 * * * *" - concurrencyPolicy: Forbid - successfulJobsHistoryLimit: 1 - failedJobsHistoryLimit: 3 - jobTemplate: - spec: - backoffLimit: 1 - template: - spec: - serviceAccountName: portal-e2e-client-secret-sync - restartPolicy: Never - containers: - - name: sync - image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 - command: ["/usr/bin/env", "bash"] - args: ["/scripts/sso_portal_e2e_client_secret_sync.sh"] - volumeMounts: - - name: script - mountPath: /scripts - readOnly: true - volumes: - - name: script - configMap: - name: portal-e2e-client-secret-sync-script - defaultMode: 0555 diff --git a/services/keycloak/portal-e2e-client-secret-sync-rbac.yaml b/services/keycloak/portal-e2e-client-secret-sync-rbac.yaml deleted file mode 100644 index e2d39bb..0000000 --- a/services/keycloak/portal-e2e-client-secret-sync-rbac.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# services/keycloak/portal-e2e-client-secret-sync-rbac.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: portal-e2e-client-secret-sync - namespace: sso ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: portal-e2e-client-secret-sync-source - namespace: sso -rules: - - apiGroups: [""] - resources: ["secrets"] - resourceNames: ["portal-e2e-client"] - verbs: ["get"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: portal-e2e-client-secret-sync-source - namespace: sso -subjects: - - kind: ServiceAccount - name: portal-e2e-client-secret-sync - namespace: sso -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: portal-e2e-client-secret-sync-source diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 78d31d1..5cabe3c 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-17 + name: keycloak-realm-settings-18 namespace: sso spec: backoffLimit: 0 @@ -29,15 +29,15 @@ spec: - name: KEYCLOAK_REALM value: atlas - name: KEYCLOAK_SMTP_HOST - value: mailu-front.mailu-mailserver.svc.cluster.local + value: smtp.postmarkapp.com - name: KEYCLOAK_SMTP_PORT - value: "25" + value: "587" - name: KEYCLOAK_SMTP_FROM - value: no-reply@bstein.dev + value: no-reply-sso@bstein.dev - name: KEYCLOAK_SMTP_FROM_NAME value: Atlas SSO - name: KEYCLOAK_SMTP_REPLY_TO - value: no-reply@bstein.dev + value: no-reply-sso@bstein.dev - name: KEYCLOAK_SMTP_REPLY_TO_NAME value: Atlas SSO command: ["/bin/sh", "-c"] @@ -118,8 +118,10 @@ spec: "fromDisplayName": os.environ["KEYCLOAK_SMTP_FROM_NAME"], "replyTo": os.environ["KEYCLOAK_SMTP_REPLY_TO"], "replyToDisplayName": os.environ["KEYCLOAK_SMTP_REPLY_TO_NAME"], - "auth": "false", - "starttls": "false", + "user": os.environ["KEYCLOAK_SMTP_USER"], + "password": os.environ["KEYCLOAK_SMTP_PASSWORD"], + "auth": "true", + "starttls": "true", "ssl": "false", } ) diff --git a/services/keycloak/scripts/keycloak_vault_env.sh b/services/keycloak/scripts/keycloak_vault_env.sh index 62f7f38..dd68fc7 100644 --- a/services/keycloak/scripts/keycloak_vault_env.sh +++ b/services/keycloak/scripts/keycloak_vault_env.sh @@ -24,3 +24,6 @@ export PORTAL_E2E_CLIENT_SECRET="$(read_secret portal-e2e-client__client_secret) export LDAP_ADMIN_PASSWORD="$(read_secret openldap-admin__LDAP_ADMIN_PASSWORD)" export LDAP_CONFIG_PASSWORD="$(read_secret openldap-admin__LDAP_CONFIG_PASSWORD)" export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" + +export KEYCLOAK_SMTP_USER="$(read_secret postmark-relay__relay-username)" +export KEYCLOAK_SMTP_PASSWORD="$(read_secret postmark-relay__relay-password)" diff --git a/services/keycloak/scripts/sso_portal_e2e_client_secret_sync.sh b/services/keycloak/scripts/sso_portal_e2e_client_secret_sync.sh deleted file mode 100755 index bf944ca..0000000 --- a/services/keycloak/scripts/sso_portal_e2e_client_secret_sync.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -SOURCE_NAMESPACE="${SOURCE_NAMESPACE:-sso}" -DEST_NAMESPACE="${DEST_NAMESPACE:-bstein-dev-home}" -SECRET_NAME="${SECRET_NAME:-portal-e2e-client}" - -client_id="$(kubectl -n "${SOURCE_NAMESPACE}" get secret "${SECRET_NAME}" -o jsonpath='{.data.client_id}')" -client_secret="$(kubectl -n "${SOURCE_NAMESPACE}" get secret "${SECRET_NAME}" -o jsonpath='{.data.client_secret}')" - -cat </dev/null -apiVersion: v1 -kind: Secret -metadata: - name: ${SECRET_NAME} -type: Opaque -data: - client_id: ${client_id} - client_secret: ${client_secret} -EOF diff --git a/services/keycloak/secretproviderclass.yaml b/services/keycloak/secretproviderclass.yaml index 7ca83ec..e78e57e 100644 --- a/services/keycloak/secretproviderclass.yaml +++ b/services/keycloak/secretproviderclass.yaml @@ -46,6 +46,15 @@ spec: - objectName: "oauth2-proxy-oidc__cookie_secret" secretPath: "kv/data/atlas/sso/oauth2-proxy-oidc" secretKey: "cookie_secret" + - objectName: "postmark-relay__relay-username" + secretPath: "kv/data/atlas/shared/postmark-relay" + secretKey: "relay-username" + - objectName: "postmark-relay__relay-password" + secretPath: "kv/data/atlas/shared/postmark-relay" + secretKey: "relay-password" + - objectName: "harbor-pull__dockerconfigjson" + secretPath: "kv/data/atlas/harbor-pull/sso" + secretKey: "dockerconfigjson" secretObjects: - secretName: openldap-admin type: Opaque @@ -63,3 +72,8 @@ spec: key: client_secret - objectName: oauth2-proxy-oidc__cookie_secret key: cookie_secret + - secretName: harbor-regcred + type: kubernetes.io/dockerconfigjson + data: + - objectName: harbor-pull__dockerconfigjson + key: .dockerconfigjson diff --git a/services/keycloak/serviceaccount.yaml b/services/keycloak/serviceaccount.yaml index 59d710f..5f581c1 100644 --- a/services/keycloak/serviceaccount.yaml +++ b/services/keycloak/serviceaccount.yaml @@ -4,3 +4,5 @@ kind: ServiceAccount metadata: name: sso-vault namespace: sso +imagePullSecrets: + - name: harbor-regcred diff --git a/services/keycloak/vault-serviceaccount.yaml b/services/keycloak/vault-serviceaccount.yaml new file mode 100644 index 0000000..79fa47c --- /dev/null +++ b/services/keycloak/vault-serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/keycloak/vault-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sso-vault-sync + namespace: sso diff --git a/services/keycloak/vault-sync-deployment.yaml b/services/keycloak/vault-sync-deployment.yaml new file mode 100644 index 0000000..a9afcd0 --- /dev/null +++ b/services/keycloak/vault-sync-deployment.yaml @@ -0,0 +1,34 @@ +# services/keycloak/vault-sync-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sso-vault-sync + namespace: sso +spec: + replicas: 1 + selector: + matchLabels: + app: sso-vault-sync + template: + metadata: + labels: + app: sso-vault-sync + spec: + serviceAccountName: sso-vault-sync + containers: + - name: sync + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - "sleep infinity" + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: sso-vault diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index 8b27052..73984f5 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -22,7 +22,7 @@ spec: repository: registry.bstein.dev/streaming/data-prepper tag: "2.8.0" imagePullSecrets: - - name: harbor-robot-pipeline + - name: harbor-regcred config: data-prepper-config.yaml: | ssl: false diff --git a/services/logging/secretproviderclass.yaml b/services/logging/secretproviderclass.yaml index 70ecb3d..bbe6cfd 100644 --- a/services/logging/secretproviderclass.yaml +++ b/services/logging/secretproviderclass.yaml @@ -19,6 +19,9 @@ spec: - objectName: "oauth2-proxy-logs-oidc__cookie_secret" secretPath: "kv/data/atlas/logging/oauth2-proxy-logs-oidc" secretKey: "cookie_secret" + - objectName: "harbor-pull__dockerconfigjson" + secretPath: "kv/data/atlas/harbor-pull/logging" + secretKey: "dockerconfigjson" secretObjects: - secretName: oauth2-proxy-logs-oidc type: Opaque @@ -29,3 +32,8 @@ spec: key: client_secret - objectName: oauth2-proxy-logs-oidc__cookie_secret key: cookie_secret + - secretName: harbor-regcred + type: kubernetes.io/dockerconfigjson + data: + - objectName: harbor-pull__dockerconfigjson + key: .dockerconfigjson diff --git a/services/mailu/secretproviderclass.yaml b/services/mailu/secretproviderclass.yaml index 0ed32ba..11cc2fe 100644 --- a/services/mailu/secretproviderclass.yaml +++ b/services/mailu/secretproviderclass.yaml @@ -40,6 +40,9 @@ spec: - objectName: "mailu-sync-credentials__client-secret" secretPath: "kv/data/atlas/mailu/mailu-sync-credentials" secretKey: "client-secret" + - objectName: "harbor-pull__dockerconfigjson" + secretPath: "kv/data/atlas/harbor-pull/mailu-mailserver" + secretKey: "dockerconfigjson" secretObjects: - secretName: mailu-secret type: Opaque @@ -76,3 +79,8 @@ spec: key: client-id - objectName: mailu-sync-credentials__client-secret key: client-secret + - secretName: harbor-regcred + type: kubernetes.io/dockerconfigjson + data: + - objectName: harbor-pull__dockerconfigjson + key: .dockerconfigjson diff --git a/services/mailu/vip-controller.yaml b/services/mailu/vip-controller.yaml index 81cc96e..faa49ec 100644 --- a/services/mailu/vip-controller.yaml +++ b/services/mailu/vip-controller.yaml @@ -5,6 +5,8 @@ kind: ServiceAccount metadata: name: vip-controller namespace: mailu-mailserver +imagePullSecrets: + - name: harbor-regcred --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/services/monitoring/dcgm-exporter.yaml b/services/monitoring/dcgm-exporter.yaml index 7627420..8760c9f 100644 --- a/services/monitoring/dcgm-exporter.yaml +++ b/services/monitoring/dcgm-exporter.yaml @@ -22,6 +22,8 @@ spec: prometheus.io/port: "9400" spec: serviceAccountName: default + imagePullSecrets: + - name: harbor-regcred runtimeClassName: nvidia affinity: nodeAffinity: diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index 33abc9e..dbb41ef 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -275,8 +275,8 @@ spec: GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer" GF_SMTP_ENABLED: "true" GF_SMTP_HOST: "smtp.postmarkapp.com:587" - GF_SMTP_FROM: "alerts@bstein.dev" - GF_SMTP_FROM_NAME: "Atlas Alerts" + GF_SMTP_FROM: "no-reply-grafana@bstein.dev" + GF_SMTP_FROM_NAME: "Atlas Grafana" GRAFANA_ALERT_EMAILS: "alerts@bstein.dev" GF_SECURITY_ALLOW_EMBEDDING: "true" GF_AUTH_GENERIC_OAUTH_ENABLED: "true" diff --git a/services/monitoring/secretproviderclass.yaml b/services/monitoring/secretproviderclass.yaml index 3f94c08..4f58ff0 100644 --- a/services/monitoring/secretproviderclass.yaml +++ b/services/monitoring/secretproviderclass.yaml @@ -31,6 +31,9 @@ spec: - objectName: "postmark-relay__relay-password" secretPath: "kv/data/atlas/shared/postmark-relay" secretKey: "relay-password" + - objectName: "harbor-pull__dockerconfigjson" + secretPath: "kv/data/atlas/harbor-pull/monitoring" + secretKey: "dockerconfigjson" secretObjects: - secretName: grafana-admin type: Opaque @@ -55,3 +58,8 @@ spec: key: username - objectName: postmark-relay__relay-password key: password + - secretName: harbor-regcred + type: kubernetes.io/dockerconfigjson + data: + - objectName: harbor-pull__dockerconfigjson + key: .dockerconfigjson diff --git a/services/nextcloud/configmap.yaml b/services/nextcloud/configmap.yaml index 21098a2..7222320 100644 --- a/services/nextcloud/configmap.yaml +++ b/services/nextcloud/configmap.yaml @@ -18,13 +18,13 @@ data: 'default_phone_region' => 'US', 'mail_smtpmode' => 'smtp', 'mail_sendmailmode' => 'smtp', - 'mail_smtphost' => 'mail.bstein.dev', + 'mail_smtphost' => 'smtp.postmarkapp.com', 'mail_smtpport' => '587', 'mail_smtpsecure' => 'tls', 'mail_smtpauth' => true, 'mail_smtpauthtype' => 'LOGIN', 'mail_domain' => 'bstein.dev', - 'mail_from_address' => 'no-reply', + 'mail_from_address' => 'no-reply-nextcloud', 'datadirectory' => '/var/www/html/data', 'apps_paths' => array ( diff --git a/services/nextcloud/deployment.yaml b/services/nextcloud/deployment.yaml index 894484c..45f5e8f 100644 --- a/services/nextcloud/deployment.yaml +++ b/services/nextcloud/deployment.yaml @@ -194,13 +194,13 @@ spec: value: https://cloud.bstein.dev # SMTP (external secret: nextcloud-smtp with keys username, password) - name: SMTP_HOST - value: mail.bstein.dev + value: smtp.postmarkapp.com - name: SMTP_PORT value: "587" - name: SMTP_SECURE value: tls - name: MAIL_FROM_ADDRESS - value: no-reply + value: no-reply-nextcloud - name: MAIL_DOMAIN value: bstein.dev # OIDC (external secret: nextcloud-oidc with keys client-id, client-secret) diff --git a/services/nextcloud/secretproviderclass.yaml b/services/nextcloud/secretproviderclass.yaml index b5e6c37..1d9a104 100644 --- a/services/nextcloud/secretproviderclass.yaml +++ b/services/nextcloud/secretproviderclass.yaml @@ -32,11 +32,11 @@ spec: secretPath: "kv/data/atlas/nextcloud/nextcloud-oidc" secretKey: "client-secret" - objectName: "nextcloud-smtp__smtp-username" - secretPath: "kv/data/atlas/nextcloud/nextcloud-smtp" - secretKey: "smtp-username" + secretPath: "kv/data/atlas/shared/postmark-relay" + secretKey: "relay-username" - objectName: "nextcloud-smtp__smtp-password" - secretPath: "kv/data/atlas/nextcloud/nextcloud-smtp" - secretKey: "smtp-password" + secretPath: "kv/data/atlas/shared/postmark-relay" + secretKey: "relay-password" - objectName: "keycloak-admin__username" secretPath: "kv/data/atlas/shared/keycloak-admin" secretKey: "username" diff --git a/services/outline/deployment.yaml b/services/outline/deployment.yaml index 2cacceb..0c4825e 100644 --- a/services/outline/deployment.yaml +++ b/services/outline/deployment.yaml @@ -71,7 +71,7 @@ spec: - name: SMTP_SECURE value: "false" - name: SMTP_PORT - value: "25" + value: "587" volumeMounts: - name: user-data mountPath: /var/lib/outline/data diff --git a/services/outline/secretproviderclass.yaml b/services/outline/secretproviderclass.yaml index 2781c85..70891df 100644 --- a/services/outline/secretproviderclass.yaml +++ b/services/outline/secretproviderclass.yaml @@ -44,11 +44,11 @@ spec: secretPath: "kv/data/atlas/outline/outline-smtp" secretKey: "SMTP_HOST" - objectName: "SMTP_PASSWORD" - secretPath: "kv/data/atlas/outline/outline-smtp" - secretKey: "SMTP_PASSWORD" + secretPath: "kv/data/atlas/shared/postmark-relay" + secretKey: "relay-password" - objectName: "SMTP_USERNAME" - secretPath: "kv/data/atlas/outline/outline-smtp" - secretKey: "SMTP_USERNAME" + secretPath: "kv/data/atlas/shared/postmark-relay" + secretKey: "relay-username" - objectName: "AWS_ACCESS_KEY_ID" secretPath: "kv/data/atlas/outline/outline-s3" secretKey: "AWS_ACCESS_KEY_ID" diff --git a/services/pegasus/deployment.yaml b/services/pegasus/deployment.yaml index 7f8547f..94d8dfb 100644 --- a/services/pegasus/deployment.yaml +++ b/services/pegasus/deployment.yaml @@ -19,6 +19,8 @@ spec: nodeSelector: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: "true" + imagePullSecrets: + - name: harbor-regcred securityContext: runAsNonRoot: true runAsUser: 65532 diff --git a/services/pegasus/secretproviderclass.yaml b/services/pegasus/secretproviderclass.yaml index fa7448b..7513eee 100644 --- a/services/pegasus/secretproviderclass.yaml +++ b/services/pegasus/secretproviderclass.yaml @@ -19,6 +19,9 @@ spec: - objectName: "pegasus-secrets__JELLYFIN_API_KEY" secretPath: "kv/data/atlas/pegasus/pegasus-secrets" secretKey: "JELLYFIN_API_KEY" + - objectName: "harbor-pull__dockerconfigjson" + secretPath: "kv/data/atlas/harbor-pull/jellyfin" + secretKey: "dockerconfigjson" secretObjects: - secretName: pegasus-secrets type: Opaque @@ -29,3 +32,8 @@ spec: key: JELLYFIN_URL - objectName: pegasus-secrets__JELLYFIN_API_KEY key: JELLYFIN_API_KEY + - secretName: harbor-regcred + type: kubernetes.io/dockerconfigjson + data: + - objectName: harbor-pull__dockerconfigjson + key: .dockerconfigjson diff --git a/services/planka/secretproviderclass.yaml b/services/planka/secretproviderclass.yaml index e72d98c..028b2b5 100644 --- a/services/planka/secretproviderclass.yaml +++ b/services/planka/secretproviderclass.yaml @@ -44,8 +44,8 @@ spec: secretPath: "kv/data/atlas/planka/planka-smtp" secretKey: "SMTP_HOST" - objectName: "SMTP_PASSWORD" - secretPath: "kv/data/atlas/planka/planka-smtp" - secretKey: "SMTP_PASSWORD" + secretPath: "kv/data/atlas/shared/postmark-relay" + secretKey: "relay-password" - objectName: "SMTP_PORT" secretPath: "kv/data/atlas/planka/planka-smtp" secretKey: "SMTP_PORT" @@ -56,5 +56,5 @@ spec: secretPath: "kv/data/atlas/planka/planka-smtp" secretKey: "SMTP_TLS_REJECT_UNAUTHORIZED" - objectName: "SMTP_USER" - secretPath: "kv/data/atlas/planka/planka-smtp" - secretKey: "SMTP_USER" + secretPath: "kv/data/atlas/shared/postmark-relay" + secretKey: "relay-username" diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index f7b61df..3ecbd3f 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -76,35 +76,35 @@ path \"kv/metadata/atlas/${path}\" { } write_policy_and_role "outline" "outline" "outline-vault" \ - "outline/*" "" + "outline/* shared/postmark-relay" "" write_policy_and_role "planka" "planka" "planka-vault" \ - "planka/*" "" -write_policy_and_role "bstein-dev-home" "bstein-dev-home" "bstein-dev-home" \ - "bstein-dev-home/* shared/chat-ai-keys-runtime shared/portal-e2e-client" "" + "planka/* shared/postmark-relay" "" +write_policy_and_role "bstein-dev-home" "bstein-dev-home" "bstein-dev-home,bstein-dev-home-vault-sync" \ + "portal/* shared/chat-ai-keys-runtime shared/portal-e2e-client harbor-pull/bstein-dev-home" "" write_policy_and_role "gitea" "gitea" "gitea-vault" \ "gitea/*" "" write_policy_and_role "vaultwarden" "vaultwarden" "vaultwarden-vault" \ - "vaultwarden/*" "" -write_policy_and_role "sso" "sso" "sso-vault,mas-secrets-ensure" \ - "sso/* shared/keycloak-admin shared/portal-e2e-client" "" + "vaultwarden/* shared/postmark-relay" "" +write_policy_and_role "sso" "sso" "sso-vault,sso-vault-sync,mas-secrets-ensure" \ + "sso/* shared/keycloak-admin shared/portal-e2e-client shared/postmark-relay harbor-pull/sso" "" write_policy_and_role "mailu-mailserver" "mailu-mailserver" "mailu-vault-sync" \ - "mailu/* shared/postmark-relay" "" + "mailu/* shared/postmark-relay harbor-pull/mailu-mailserver" "" write_policy_and_role "harbor" "harbor" "harbor-vault-sync" \ - "harbor/*" "" + "harbor/* harbor-pull/harbor" "" write_policy_and_role "nextcloud" "nextcloud" "nextcloud-vault" \ - "nextcloud/* shared/keycloak-admin" "" + "nextcloud/* shared/keycloak-admin shared/postmark-relay" "" write_policy_and_role "comms" "comms" "comms-vault,atlasbot" \ - "comms/* shared/chat-ai-keys-runtime" "" + "comms/* shared/chat-ai-keys-runtime harbor-pull/comms" "" write_policy_and_role "jenkins" "jenkins" "jenkins-vault-sync" \ "jenkins/*" "" write_policy_and_role "monitoring" "monitoring" "monitoring-vault-sync" \ - "monitoring/* shared/postmark-relay" "" + "monitoring/* shared/postmark-relay harbor-pull/monitoring" "" write_policy_and_role "logging" "logging" "logging-vault-sync" \ - "logging/*" "" + "logging/* harbor-pull/logging" "" write_policy_and_role "pegasus" "jellyfin" "pegasus-vault-sync" \ - "pegasus/*" "" + "pegasus/* harbor-pull/jellyfin" "" write_policy_and_role "crypto" "crypto" "crypto-vault-sync" \ - "crypto/*" "" + "crypto/* harbor-pull/crypto" "" write_policy_and_role "sso-secrets" "sso" "mas-secrets-ensure" \ "shared/keycloak-admin" \ diff --git a/services/vaultwarden/deployment.yaml b/services/vaultwarden/deployment.yaml index 22a2c86..f102ea9 100644 --- a/services/vaultwarden/deployment.yaml +++ b/services/vaultwarden/deployment.yaml @@ -36,19 +36,19 @@ spec: - name: DOMAIN value: "https://vault.bstein.dev" - name: SMTP_HOST - value: "mailu-front.mailu-mailserver.svc.cluster.local" + value: "smtp.postmarkapp.com" - name: SMTP_PORT - value: "25" + value: "587" - name: SMTP_SECURITY value: "starttls" - name: SMTP_ACCEPT_INVALID_HOSTNAMES - value: "true" + value: "false" - name: SMTP_ACCEPT_INVALID_CERTS - value: "true" + value: "false" - name: SMTP_FROM - value: "postmaster@bstein.dev" + value: "no-reply-vaultwarden@bstein.dev" - name: SMTP_FROM_NAME - value: "Atlas Vaultwarden" + value: "Vaultwarden" ports: - name: http containerPort: 80 diff --git a/services/vaultwarden/scripts/vaultwarden_vault_env.sh b/services/vaultwarden/scripts/vaultwarden_vault_env.sh index 133faaa..7a80081 100644 --- a/services/vaultwarden/scripts/vaultwarden_vault_env.sh +++ b/services/vaultwarden/scripts/vaultwarden_vault_env.sh @@ -9,3 +9,6 @@ read_secret() { export DATABASE_URL="$(read_secret vaultwarden-db-url__DATABASE_URL)" export ADMIN_TOKEN="$(read_secret vaultwarden-admin__ADMIN_TOKEN)" + +export SMTP_USERNAME="$(read_secret postmark-relay__relay-username)" +export SMTP_PASSWORD="$(read_secret postmark-relay__relay-password)" diff --git a/services/vaultwarden/secretproviderclass.yaml b/services/vaultwarden/secretproviderclass.yaml index 6d4530b..63f864e 100644 --- a/services/vaultwarden/secretproviderclass.yaml +++ b/services/vaultwarden/secretproviderclass.yaml @@ -16,3 +16,9 @@ spec: - objectName: "vaultwarden-admin__ADMIN_TOKEN" secretPath: "kv/data/atlas/vaultwarden/vaultwarden-admin" secretKey: "ADMIN_TOKEN" + - objectName: "postmark-relay__relay-username" + secretPath: "kv/data/atlas/shared/postmark-relay" + secretKey: "relay-username" + - objectName: "postmark-relay__relay-password" + secretPath: "kv/data/atlas/shared/postmark-relay" + secretKey: "relay-password" From 1add32e6837f6253e90ce5a8c7e078d7a46e2159 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 11:46:13 -0300 Subject: [PATCH 045/270] infra: add vault injector --- .../flux-system/platform/kustomization.yaml | 1 + .../vault-injector/kustomization.yaml | 16 +++++++ .../vault-injector/helmrelease.yaml | 43 +++++++++++++++++++ .../vault-injector/kustomization.yaml | 5 +++ 4 files changed, 65 insertions(+) create mode 100644 clusters/atlas/flux-system/platform/vault-injector/kustomization.yaml create mode 100644 infrastructure/vault-injector/helmrelease.yaml create mode 100644 infrastructure/vault-injector/kustomization.yaml diff --git a/clusters/atlas/flux-system/platform/kustomization.yaml b/clusters/atlas/flux-system/platform/kustomization.yaml index 6f88db7..83ca71e 100644 --- a/clusters/atlas/flux-system/platform/kustomization.yaml +++ b/clusters/atlas/flux-system/platform/kustomization.yaml @@ -13,3 +13,4 @@ resources: - longhorn-ui/kustomization.yaml - postgres/kustomization.yaml - ../platform/vault-csi/kustomization.yaml + - ../platform/vault-injector/kustomization.yaml diff --git a/clusters/atlas/flux-system/platform/vault-injector/kustomization.yaml b/clusters/atlas/flux-system/platform/vault-injector/kustomization.yaml new file mode 100644 index 0000000..d7d740d --- /dev/null +++ b/clusters/atlas/flux-system/platform/vault-injector/kustomization.yaml @@ -0,0 +1,16 @@ +# clusters/atlas/flux-system/platform/vault-injector/kustomization.yaml +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: vault-injector + namespace: flux-system +spec: + interval: 30m + path: ./infrastructure/vault-injector + targetNamespace: vault + prune: true + sourceRef: + kind: GitRepository + name: flux-system + namespace: flux-system + wait: true diff --git a/infrastructure/vault-injector/helmrelease.yaml b/infrastructure/vault-injector/helmrelease.yaml new file mode 100644 index 0000000..6a9c0fd --- /dev/null +++ b/infrastructure/vault-injector/helmrelease.yaml @@ -0,0 +1,43 @@ +# infrastructure/vault-injector/helmrelease.yaml +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: vault-injector + namespace: vault +spec: + interval: 30m + chart: + spec: + chart: vault + version: 0.31.0 + sourceRef: + kind: HelmRepository + name: hashicorp + namespace: flux-system + install: + remediation: { retries: 3 } + timeout: 10m + upgrade: + remediation: + retries: 3 + remediateLastFailure: true + cleanupOnFail: true + timeout: 10m + values: + global: + externalVaultAddr: http://vault.vault.svc.cluster.local:8200 + tlsDisable: true + server: + enabled: false + csi: + enabled: false + injector: + enabled: true + replicas: 1 + agentImage: + repository: hashicorp/vault + tag: "1.17.6" + webhook: + failurePolicy: Ignore + nodeSelector: + node-role.kubernetes.io/worker: "true" diff --git a/infrastructure/vault-injector/kustomization.yaml b/infrastructure/vault-injector/kustomization.yaml new file mode 100644 index 0000000..b4db089 --- /dev/null +++ b/infrastructure/vault-injector/kustomization.yaml @@ -0,0 +1,5 @@ +# infrastructure/vault-injector/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml From 16c62d5a4af8b5e5d762357fab920adc72072e8b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 12:28:10 -0300 Subject: [PATCH 046/270] vault: move core apps to injector --- .../bstein-dev-home/backend-deployment.yaml | 39 ++++++------ .../chat-ai-gateway-deployment.yaml | 37 ++++++----- services/bstein-dev-home/kustomization.yaml | 6 -- .../portal-onboarding-e2e-test-job.yaml | 38 ++++++----- .../scripts/bstein_dev_home_vault_env.sh | 17 ----- .../bstein-dev-home/secretproviderclass.yaml | 24 ------- .../vaultwarden-cred-sync-cronjob.yaml | 38 ++++++----- services/gitea/deployment.yaml | 45 +++++++++---- services/gitea/kustomization.yaml | 1 - services/gitea/secretproviderclass.yaml | 30 --------- services/nextcloud-mail-sync/cronjob.yaml | 47 +++++++++----- services/nextcloud/deployment.yaml | 54 +++++++++------- services/nextcloud/kustomization.yaml | 6 -- services/nextcloud/maintenance-cronjob.yaml | 47 +++++++++----- .../nextcloud/scripts/nextcloud_vault_env.sh | 27 -------- services/nextcloud/secretproviderclass.yaml | 45 ------------- services/outline/deployment.yaml | 46 +++++++++----- services/outline/kustomization.yaml | 7 --- services/outline/scripts/outline_vault_env.sh | 31 --------- services/outline/secretproviderclass.yaml | 63 ------------------- services/planka/deployment.yaml | 49 ++++++++++----- services/planka/kustomization.yaml | 7 --- services/planka/scripts/planka_vault_env.sh | 27 -------- services/planka/secretproviderclass.yaml | 60 ------------------ services/vaultwarden/deployment.yaml | 33 +++++----- services/vaultwarden/kustomization.yaml | 8 --- .../scripts/vaultwarden_vault_env.sh | 14 ----- services/vaultwarden/secretproviderclass.yaml | 24 ------- 28 files changed, 282 insertions(+), 588 deletions(-) delete mode 100644 services/bstein-dev-home/scripts/bstein_dev_home_vault_env.sh delete mode 100644 services/gitea/secretproviderclass.yaml delete mode 100644 services/nextcloud/scripts/nextcloud_vault_env.sh delete mode 100644 services/nextcloud/secretproviderclass.yaml delete mode 100644 services/outline/scripts/outline_vault_env.sh delete mode 100644 services/outline/secretproviderclass.yaml delete mode 100644 services/planka/scripts/planka_vault_env.sh delete mode 100644 services/planka/secretproviderclass.yaml delete mode 100644 services/vaultwarden/scripts/vaultwarden_vault_env.sh delete mode 100644 services/vaultwarden/secretproviderclass.yaml diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index 3266747..659cd33 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -14,6 +14,25 @@ spec: metadata: labels: app: bstein-dev-home-backend + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "bstein-dev-home" + vault.hashicorp.com/agent-inject-secret-portal-env.sh: "kv/data/atlas/portal/atlas-portal-db" + vault.hashicorp.com/agent-inject-template-portal-env.sh: | + {{- with secret "kv/data/atlas/portal/atlas-portal-db" -}} + export PORTAL_DATABASE_URL="{{ .Data.data.PORTAL_DATABASE_URL }}" + {{- end }} + {{- with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" -}} + export KEYCLOAK_ADMIN_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}} + export CHAT_KEY_MATRIX="{{ .Data.data.matrix }}" + export CHAT_KEY_HOMEPAGE="{{ .Data.data.homepage }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} spec: automountServiceAccountToken: true serviceAccountName: bstein-dev-home @@ -29,7 +48,7 @@ spec: command: ["/bin/sh", "-c"] args: - >- - . /vault/scripts/bstein_dev_home_vault_env.sh + . /vault/secrets/portal-env.sh && exec gunicorn -b 0.0.0.0:8080 --workers 2 --timeout 180 app:app env: - name: AI_CHAT_API @@ -94,13 +113,6 @@ spec: initialDelaySeconds: 10 periodSeconds: 10 timeoutSeconds: 3 - volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true resources: requests: cpu: 100m @@ -108,14 +120,3 @@ spec: limits: cpu: 500m memory: 512Mi - volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: bstein-dev-home-vault - - name: vault-scripts - configMap: - name: bstein-dev-home-vault-env - defaultMode: 0555 diff --git a/services/bstein-dev-home/chat-ai-gateway-deployment.yaml b/services/bstein-dev-home/chat-ai-gateway-deployment.yaml index 4fb4ba5..fba58bc 100644 --- a/services/bstein-dev-home/chat-ai-gateway-deployment.yaml +++ b/services/bstein-dev-home/chat-ai-gateway-deployment.yaml @@ -14,6 +14,25 @@ spec: metadata: labels: app: chat-ai-gateway + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "bstein-dev-home" + vault.hashicorp.com/agent-inject-secret-portal-env.sh: "kv/data/atlas/portal/atlas-portal-db" + vault.hashicorp.com/agent-inject-template-portal-env.sh: | + {{- with secret "kv/data/atlas/portal/atlas-portal-db" -}} + export PORTAL_DATABASE_URL="{{ .Data.data.PORTAL_DATABASE_URL }}" + {{- end }} + {{- with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" -}} + export KEYCLOAK_ADMIN_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}} + export CHAT_KEY_MATRIX="{{ .Data.data.matrix }}" + export CHAT_KEY_HOMEPAGE="{{ .Data.data.homepage }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} spec: serviceAccountName: bstein-dev-home nodeSelector: @@ -24,7 +43,7 @@ spec: image: python:3.11-slim command: ["/bin/sh","-c"] args: - - . /vault/scripts/bstein_dev_home_vault_env.sh && exec python /app/gateway.py + - . /vault/secrets/portal-env.sh && exec python /app/gateway.py env: - name: UPSTREAM_URL value: http://bstein-dev-home-backend/api/chat @@ -54,23 +73,7 @@ spec: - name: code mountPath: /app/gateway.py subPath: gateway.py - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true volumes: - name: code configMap: name: chat-ai-gateway - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: bstein-dev-home-vault - - name: vault-scripts - configMap: - name: bstein-dev-home-vault-env - defaultMode: 0555 diff --git a/services/bstein-dev-home/kustomization.yaml b/services/bstein-dev-home/kustomization.yaml index a57c81a..31e1d41 100644 --- a/services/bstein-dev-home/kustomization.yaml +++ b/services/bstein-dev-home/kustomization.yaml @@ -19,12 +19,6 @@ resources: - portal-onboarding-e2e-test-job.yaml - ingress.yaml configMapGenerator: - - name: bstein-dev-home-vault-env - namespace: bstein-dev-home - files: - - bstein_dev_home_vault_env.sh=scripts/bstein_dev_home_vault_env.sh - options: - disableNameSuffixHash: true - name: chat-ai-gateway namespace: bstein-dev-home files: diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index b5fdc6d..dce1471 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -7,6 +7,26 @@ metadata: spec: backoffLimit: 0 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "bstein-dev-home" + vault.hashicorp.com/agent-inject-secret-portal-env.sh: "kv/data/atlas/portal/atlas-portal-db" + vault.hashicorp.com/agent-inject-template-portal-env.sh: | + {{- with secret "kv/data/atlas/portal/atlas-portal-db" -}} + export PORTAL_DATABASE_URL="{{ .Data.data.PORTAL_DATABASE_URL }}" + {{- end }} + {{- with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" -}} + export KEYCLOAK_ADMIN_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}} + export CHAT_KEY_MATRIX="{{ .Data.data.matrix }}" + export CHAT_KEY_HOMEPAGE="{{ .Data.data.homepage }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} spec: restartPolicy: Never serviceAccountName: bstein-dev-home @@ -40,30 +60,14 @@ spec: args: - | set -euo pipefail - . /vault/scripts/bstein_dev_home_vault_env.sh + . /vault/secrets/portal-env.sh python /scripts/test_portal_onboarding_flow.py volumeMounts: - name: tests mountPath: /scripts readOnly: true - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true volumes: - name: tests configMap: name: portal-onboarding-e2e-tests defaultMode: 0555 - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: bstein-dev-home-vault - - name: vault-scripts - configMap: - name: bstein-dev-home-vault-env - defaultMode: 0555 diff --git a/services/bstein-dev-home/scripts/bstein_dev_home_vault_env.sh b/services/bstein-dev-home/scripts/bstein_dev_home_vault_env.sh deleted file mode 100644 index 8cab54e..0000000 --- a/services/bstein-dev-home/scripts/bstein_dev_home_vault_env.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env sh -set -eu - -vault_dir="/vault/secrets" - -read_secret() { - cat "${vault_dir}/$1" -} - -export KEYCLOAK_ADMIN_CLIENT_SECRET="$(read_secret bstein-dev-home-keycloak-admin__client_secret)" -export PORTAL_DATABASE_URL="$(read_secret atlas-portal-db__PORTAL_DATABASE_URL)" - -export CHAT_KEY_MATRIX="$(read_secret chat-ai-keys-runtime__matrix)" -export CHAT_KEY_HOMEPAGE="$(read_secret chat-ai-keys-runtime__homepage)" - -export PORTAL_E2E_CLIENT_ID="$(read_secret portal-e2e-client__client_id)" -export PORTAL_E2E_CLIENT_SECRET="$(read_secret portal-e2e-client__client_secret)" diff --git a/services/bstein-dev-home/secretproviderclass.yaml b/services/bstein-dev-home/secretproviderclass.yaml index c153211..f330fe6 100644 --- a/services/bstein-dev-home/secretproviderclass.yaml +++ b/services/bstein-dev-home/secretproviderclass.yaml @@ -10,30 +10,6 @@ spec: vaultAddress: "http://vault.vault.svc.cluster.local:8200" roleName: "bstein-dev-home" objects: | - - objectName: "atlas-portal-db__PORTAL_DATABASE_URL" - secretPath: "kv/data/atlas/portal/atlas-portal-db" - secretKey: "PORTAL_DATABASE_URL" - - objectName: "bstein-dev-home-keycloak-admin__client_secret" - secretPath: "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" - secretKey: "client_secret" - - objectName: "chat-ai-keys__homepage" - secretPath: "kv/data/atlas/portal/chat-ai-keys" - secretKey: "homepage" - - objectName: "chat-ai-keys__matrix" - secretPath: "kv/data/atlas/portal/chat-ai-keys" - secretKey: "matrix" - - objectName: "chat-ai-keys-runtime__homepage" - secretPath: "kv/data/atlas/shared/chat-ai-keys-runtime" - secretKey: "homepage" - - objectName: "chat-ai-keys-runtime__matrix" - secretPath: "kv/data/atlas/shared/chat-ai-keys-runtime" - secretKey: "matrix" - - objectName: "portal-e2e-client__client_id" - secretPath: "kv/data/atlas/shared/portal-e2e-client" - secretKey: "client_id" - - objectName: "portal-e2e-client__client_secret" - secretPath: "kv/data/atlas/shared/portal-e2e-client" - secretKey: "client_secret" - objectName: "harbor-pull__dockerconfigjson" secretPath: "kv/data/atlas/harbor-pull/bstein-dev-home" secretKey: "dockerconfigjson" diff --git a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml index 5d7531e..b46a2e3 100644 --- a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml +++ b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml @@ -13,6 +13,26 @@ spec: spec: backoffLimit: 0 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "bstein-dev-home" + vault.hashicorp.com/agent-inject-secret-portal-env.sh: "kv/data/atlas/portal/atlas-portal-db" + vault.hashicorp.com/agent-inject-template-portal-env.sh: | + {{- with secret "kv/data/atlas/portal/atlas-portal-db" -}} + export PORTAL_DATABASE_URL="{{ .Data.data.PORTAL_DATABASE_URL }}" + {{- end }} + {{- with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" -}} + export KEYCLOAK_ADMIN_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}} + export CHAT_KEY_MATRIX="{{ .Data.data.matrix }}" + export CHAT_KEY_HOMEPAGE="{{ .Data.data.homepage }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} spec: serviceAccountName: bstein-dev-home restartPolicy: Never @@ -28,7 +48,7 @@ spec: command: ["/bin/sh", "-c"] args: - >- - . /vault/scripts/bstein_dev_home_vault_env.sh + . /vault/secrets/portal-env.sh && exec python /scripts/vaultwarden_cred_sync.py env: - name: PYTHONPATH @@ -49,24 +69,8 @@ spec: - name: vaultwarden-cred-sync-script mountPath: /scripts readOnly: true - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true volumes: - name: vaultwarden-cred-sync-script configMap: name: vaultwarden-cred-sync-script defaultMode: 0555 - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: bstein-dev-home-vault - - name: vault-scripts - configMap: - name: bstein-dev-home-vault-env - defaultMode: 0555 diff --git a/services/gitea/deployment.yaml b/services/gitea/deployment.yaml index 4fa1ecb..e67b3b9 100644 --- a/services/gitea/deployment.yaml +++ b/services/gitea/deployment.yaml @@ -20,6 +20,39 @@ spec: metadata: labels: app: gitea + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "gitea" + vault.hashicorp.com/agent-inject-secret-gitea-db-secret__password: "kv/data/atlas/gitea/gitea-db-secret" + vault.hashicorp.com/agent-inject-template-gitea-db-secret__password: | + {{- with secret "kv/data/atlas/gitea/gitea-db-secret" -}} + {{ .Data.data.password }} + {{- end }} + vault.hashicorp.com/agent-inject-secret-gitea-secret__SECRET_KEY: "kv/data/atlas/gitea/gitea-secret" + vault.hashicorp.com/agent-inject-template-gitea-secret__SECRET_KEY: | + {{- with secret "kv/data/atlas/gitea/gitea-secret" -}} + {{ .Data.data.SECRET_KEY }} + {{- end }} + vault.hashicorp.com/agent-inject-secret-gitea-secret__INTERNAL_TOKEN: "kv/data/atlas/gitea/gitea-secret" + vault.hashicorp.com/agent-inject-template-gitea-secret__INTERNAL_TOKEN: | + {{- with secret "kv/data/atlas/gitea/gitea-secret" -}} + {{ .Data.data.INTERNAL_TOKEN }} + {{- end }} + vault.hashicorp.com/agent-inject-secret-gitea-oidc__client_id: "kv/data/atlas/gitea/gitea-oidc" + vault.hashicorp.com/agent-inject-template-gitea-oidc__client_id: | + {{- with secret "kv/data/atlas/gitea/gitea-oidc" -}} + {{ .Data.data.client_id }} + {{- end }} + vault.hashicorp.com/agent-inject-secret-gitea-oidc__client_secret: "kv/data/atlas/gitea/gitea-oidc" + vault.hashicorp.com/agent-inject-template-gitea-oidc__client_secret: | + {{- with secret "kv/data/atlas/gitea/gitea-oidc" -}} + {{ .Data.data.client_secret }} + {{- end }} + vault.hashicorp.com/agent-inject-secret-gitea-oidc__openid_auto_discovery_url: "kv/data/atlas/gitea/gitea-oidc" + vault.hashicorp.com/agent-inject-template-gitea-oidc__openid_auto_discovery_url: | + {{- with secret "kv/data/atlas/gitea/gitea-oidc" -}} + {{ .Data.data.openid_auto_discovery_url }} + {{- end }} spec: serviceAccountName: gitea-vault initContainers: @@ -75,9 +108,6 @@ spec: volumeMounts: - name: gitea-data mountPath: /data - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true nodeSelector: node-role.kubernetes.io/worker: "true" affinity: @@ -157,16 +187,7 @@ spec: volumeMounts: - name: gitea-data mountPath: /data - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true volumes: - name: gitea-data persistentVolumeClaim: claimName: gitea-data - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: gitea-vault diff --git a/services/gitea/kustomization.yaml b/services/gitea/kustomization.yaml index 84a1b64..b09f5fd 100644 --- a/services/gitea/kustomization.yaml +++ b/services/gitea/kustomization.yaml @@ -5,7 +5,6 @@ resources: - namespace.yaml - serviceaccount.yaml - pvc.yaml - - secretproviderclass.yaml - deployment.yaml - service.yaml - ingress.yaml diff --git a/services/gitea/secretproviderclass.yaml b/services/gitea/secretproviderclass.yaml deleted file mode 100644 index b555025..0000000 --- a/services/gitea/secretproviderclass.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# services/gitea/secretproviderclass.yaml -apiVersion: secrets-store.csi.x-k8s.io/v1 -kind: SecretProviderClass -metadata: - name: gitea-vault - namespace: gitea -spec: - provider: vault - parameters: - vaultAddress: "http://vault.vault.svc.cluster.local:8200" - roleName: "gitea" - objects: | - - objectName: "gitea-db-secret__password" - secretPath: "kv/data/atlas/gitea/gitea-db-secret" - secretKey: "password" - - objectName: "gitea-secret__SECRET_KEY" - secretPath: "kv/data/atlas/gitea/gitea-secret" - secretKey: "SECRET_KEY" - - objectName: "gitea-secret__INTERNAL_TOKEN" - secretPath: "kv/data/atlas/gitea/gitea-secret" - secretKey: "INTERNAL_TOKEN" - - objectName: "gitea-oidc__client_id" - secretPath: "kv/data/atlas/gitea/gitea-oidc" - secretKey: "client_id" - - objectName: "gitea-oidc__client_secret" - secretPath: "kv/data/atlas/gitea/gitea-oidc" - secretKey: "client_secret" - - objectName: "gitea-oidc__openid_auto_discovery_url" - secretPath: "kv/data/atlas/gitea/gitea-oidc" - secretKey: "openid_auto_discovery_url" diff --git a/services/nextcloud-mail-sync/cronjob.yaml b/services/nextcloud-mail-sync/cronjob.yaml index 129022b..75fe548 100644 --- a/services/nextcloud-mail-sync/cronjob.yaml +++ b/services/nextcloud-mail-sync/cronjob.yaml @@ -12,6 +12,35 @@ spec: jobTemplate: spec: template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "nextcloud" + vault.hashicorp.com/agent-inject-secret-nextcloud-env.sh: "kv/data/atlas/nextcloud/nextcloud-db" + vault.hashicorp.com/agent-inject-template-nextcloud-env.sh: | + {{- with secret "kv/data/atlas/nextcloud/nextcloud-db" -}} + export POSTGRES_DB="{{ .Data.data.database }}" + export POSTGRES_USER="{{ .Data.data.db-username }}" + export POSTGRES_PASSWORD="{{ .Data.data.db-password }}" + {{- end }} + {{- with secret "kv/data/atlas/nextcloud/nextcloud-admin" -}} + export NEXTCLOUD_ADMIN_USER="{{ .Data.data.admin-user }}" + export NEXTCLOUD_ADMIN_PASSWORD="{{ .Data.data.admin-password }}" + {{- end }} + export ADMIN_USER="${NEXTCLOUD_ADMIN_USER}" + export ADMIN_PASS="${NEXTCLOUD_ADMIN_PASSWORD}" + {{- with secret "kv/data/atlas/nextcloud/nextcloud-oidc" -}} + export OIDC_CLIENT_ID="{{ .Data.data.client-id }}" + export OIDC_CLIENT_SECRET="{{ .Data.data.client-secret }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export SMTP_NAME="{{ index .Data.data "relay-username" }}" + export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KC_ADMIN_USER="{{ .Data.data.username }}" + export KC_ADMIN_PASS="{{ .Data.data.password }}" + {{- end }} spec: restartPolicy: OnFailure securityContext: @@ -53,16 +82,10 @@ spec: - name: sync-script mountPath: /sync/sync.sh subPath: sync.sh - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true args: - | set -euo pipefail - . /vault/scripts/nextcloud_vault_env.sh + . /vault/secrets/nextcloud-env.sh exec /sync/sync.sh volumes: - name: nextcloud-config-pvc @@ -81,13 +104,3 @@ spec: configMap: name: nextcloud-mail-sync-script defaultMode: 0755 - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: nextcloud-vault - - name: vault-scripts - configMap: - name: nextcloud-vault-env - defaultMode: 0555 diff --git a/services/nextcloud/deployment.yaml b/services/nextcloud/deployment.yaml index 45f5e8f..84efb1d 100644 --- a/services/nextcloud/deployment.yaml +++ b/services/nextcloud/deployment.yaml @@ -15,6 +15,34 @@ spec: metadata: labels: app: nextcloud + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "nextcloud" + vault.hashicorp.com/agent-inject-secret-nextcloud-env.sh: "kv/data/atlas/nextcloud/nextcloud-db" + vault.hashicorp.com/agent-inject-template-nextcloud-env.sh: | + {{- with secret "kv/data/atlas/nextcloud/nextcloud-db" -}} + export POSTGRES_DB="{{ .Data.data.database }}" + export POSTGRES_USER="{{ .Data.data.db-username }}" + export POSTGRES_PASSWORD="{{ .Data.data.db-password }}" + {{- end }} + {{- with secret "kv/data/atlas/nextcloud/nextcloud-admin" -}} + export NEXTCLOUD_ADMIN_USER="{{ .Data.data.admin-user }}" + export NEXTCLOUD_ADMIN_PASSWORD="{{ .Data.data.admin-password }}" + {{- end }} + export ADMIN_USER="${NEXTCLOUD_ADMIN_USER}" + export ADMIN_PASS="${NEXTCLOUD_ADMIN_PASSWORD}" + {{- with secret "kv/data/atlas/nextcloud/nextcloud-oidc" -}} + export OIDC_CLIENT_ID="{{ .Data.data.client-id }}" + export OIDC_CLIENT_SECRET="{{ .Data.data.client-secret }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export SMTP_NAME="{{ index .Data.data "relay-username" }}" + export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KC_ADMIN_USER="{{ .Data.data.username }}" + export KC_ADMIN_PASS="{{ .Data.data.password }}" + {{- end }} spec: nodeSelector: hardware: rpi5 @@ -81,7 +109,7 @@ spec: command: ["/bin/sh", "-c"] args: - | - . /vault/scripts/nextcloud_vault_env.sh + . /vault/secrets/nextcloud-env.sh installed="$(su -s /bin/sh www-data -c "php /var/www/html/occ status" 2>/dev/null | awk '/installed:/{print $3}' || true)" if [ ! -s /var/www/html/config/config.php ]; then su -s /bin/sh www-data -c "php /var/www/html/occ maintenance:install --database pgsql --database-host \"${POSTGRES_HOST}\" --database-name \"${POSTGRES_DB}\" --database-user \"${POSTGRES_USER}\" --database-pass \"${POSTGRES_PASSWORD}\" --admin-user \"${NEXTCLOUD_ADMIN_USER}\" --admin-pass \"${NEXTCLOUD_ADMIN_PASSWORD}\" --data-dir /var/www/html/data" @@ -164,12 +192,6 @@ spec: - name: nextcloud-config-extra mountPath: /var/www/html/config/extra.config.php subPath: extra.config.php - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true containers: - name: nextcloud image: nextcloud:29-apache @@ -177,7 +199,7 @@ spec: command: ["/bin/sh", "-c"] args: - >- - . /vault/scripts/nextcloud_vault_env.sh + . /vault/secrets/nextcloud-env.sh && exec /entrypoint.sh apache2-foreground env: # DB (external secret required: nextcloud-db with keys username,password,database) @@ -223,12 +245,6 @@ spec: - name: nextcloud-config-extra mountPath: /var/www/html/config/extra.config.php subPath: extra.config.php - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true resources: requests: cpu: 250m @@ -253,13 +269,3 @@ spec: configMap: name: nextcloud-config defaultMode: 0444 - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: nextcloud-vault - - name: vault-scripts - configMap: - name: nextcloud-vault-env - defaultMode: 0555 diff --git a/services/nextcloud/kustomization.yaml b/services/nextcloud/kustomization.yaml index f16db47..ebaeaaf 100644 --- a/services/nextcloud/kustomization.yaml +++ b/services/nextcloud/kustomization.yaml @@ -5,7 +5,6 @@ namespace: nextcloud resources: - namespace.yaml - serviceaccount.yaml - - secretproviderclass.yaml - configmap.yaml - pvc.yaml - deployment.yaml @@ -15,11 +14,6 @@ resources: - service.yaml - ingress.yaml configMapGenerator: - - name: nextcloud-vault-env - files: - - nextcloud_vault_env.sh=scripts/nextcloud_vault_env.sh - options: - disableNameSuffixHash: true - name: nextcloud-maintenance-script files: - maintenance.sh=scripts/nextcloud-maintenance.sh diff --git a/services/nextcloud/maintenance-cronjob.yaml b/services/nextcloud/maintenance-cronjob.yaml index d76478e..aaedbc8 100644 --- a/services/nextcloud/maintenance-cronjob.yaml +++ b/services/nextcloud/maintenance-cronjob.yaml @@ -10,6 +10,35 @@ spec: jobTemplate: spec: template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "nextcloud" + vault.hashicorp.com/agent-inject-secret-nextcloud-env.sh: "kv/data/atlas/nextcloud/nextcloud-db" + vault.hashicorp.com/agent-inject-template-nextcloud-env.sh: | + {{- with secret "kv/data/atlas/nextcloud/nextcloud-db" -}} + export POSTGRES_DB="{{ .Data.data.database }}" + export POSTGRES_USER="{{ .Data.data.db-username }}" + export POSTGRES_PASSWORD="{{ .Data.data.db-password }}" + {{- end }} + {{- with secret "kv/data/atlas/nextcloud/nextcloud-admin" -}} + export NEXTCLOUD_ADMIN_USER="{{ .Data.data.admin-user }}" + export NEXTCLOUD_ADMIN_PASSWORD="{{ .Data.data.admin-password }}" + {{- end }} + export ADMIN_USER="${NEXTCLOUD_ADMIN_USER}" + export ADMIN_PASS="${NEXTCLOUD_ADMIN_PASSWORD}" + {{- with secret "kv/data/atlas/nextcloud/nextcloud-oidc" -}} + export OIDC_CLIENT_ID="{{ .Data.data.client-id }}" + export OIDC_CLIENT_SECRET="{{ .Data.data.client-secret }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export SMTP_NAME="{{ index .Data.data "relay-username" }}" + export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KC_ADMIN_USER="{{ .Data.data.username }}" + export KC_ADMIN_PASS="{{ .Data.data.password }}" + {{- end }} spec: restartPolicy: OnFailure securityContext: @@ -24,7 +53,7 @@ spec: args: - | set -euo pipefail - . /vault/scripts/nextcloud_vault_env.sh + . /vault/secrets/nextcloud-env.sh exec /maintenance/maintenance.sh env: - name: NC_URL @@ -41,12 +70,6 @@ spec: - name: maintenance-script mountPath: /maintenance/maintenance.sh subPath: maintenance.sh - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true resources: requests: cpu: 100m @@ -71,13 +94,3 @@ spec: configMap: name: nextcloud-maintenance-script defaultMode: 0755 - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: nextcloud-vault - - name: vault-scripts - configMap: - name: nextcloud-vault-env - defaultMode: 0555 diff --git a/services/nextcloud/scripts/nextcloud_vault_env.sh b/services/nextcloud/scripts/nextcloud_vault_env.sh deleted file mode 100644 index 0f34c9f..0000000 --- a/services/nextcloud/scripts/nextcloud_vault_env.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env sh -set -eu - -vault_dir="/vault/secrets" - -read_secret() { - cat "${vault_dir}/$1" -} - -export POSTGRES_DB="$(read_secret nextcloud-db__database)" -export POSTGRES_USER="$(read_secret nextcloud-db__db-username)" -export POSTGRES_PASSWORD="$(read_secret nextcloud-db__db-password)" - -export NEXTCLOUD_ADMIN_USER="$(read_secret nextcloud-admin__admin-user)" -export NEXTCLOUD_ADMIN_PASSWORD="$(read_secret nextcloud-admin__admin-password)" - -export ADMIN_USER="${NEXTCLOUD_ADMIN_USER}" -export ADMIN_PASS="${NEXTCLOUD_ADMIN_PASSWORD}" - -export OIDC_CLIENT_ID="$(read_secret nextcloud-oidc__client-id)" -export OIDC_CLIENT_SECRET="$(read_secret nextcloud-oidc__client-secret)" - -export SMTP_NAME="$(read_secret nextcloud-smtp__smtp-username)" -export SMTP_PASSWORD="$(read_secret nextcloud-smtp__smtp-password)" - -export KC_ADMIN_USER="$(read_secret keycloak-admin__username)" -export KC_ADMIN_PASS="$(read_secret keycloak-admin__password)" diff --git a/services/nextcloud/secretproviderclass.yaml b/services/nextcloud/secretproviderclass.yaml deleted file mode 100644 index 1d9a104..0000000 --- a/services/nextcloud/secretproviderclass.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# services/nextcloud/secretproviderclass.yaml -apiVersion: secrets-store.csi.x-k8s.io/v1 -kind: SecretProviderClass -metadata: - name: nextcloud-vault - namespace: nextcloud -spec: - provider: vault - parameters: - vaultAddress: "http://vault.vault.svc.cluster.local:8200" - roleName: "nextcloud" - objects: | - - objectName: "nextcloud-db__database" - secretPath: "kv/data/atlas/nextcloud/nextcloud-db" - secretKey: "database" - - objectName: "nextcloud-db__db-username" - secretPath: "kv/data/atlas/nextcloud/nextcloud-db" - secretKey: "db-username" - - objectName: "nextcloud-db__db-password" - secretPath: "kv/data/atlas/nextcloud/nextcloud-db" - secretKey: "db-password" - - objectName: "nextcloud-admin__admin-user" - secretPath: "kv/data/atlas/nextcloud/nextcloud-admin" - secretKey: "admin-user" - - objectName: "nextcloud-admin__admin-password" - secretPath: "kv/data/atlas/nextcloud/nextcloud-admin" - secretKey: "admin-password" - - objectName: "nextcloud-oidc__client-id" - secretPath: "kv/data/atlas/nextcloud/nextcloud-oidc" - secretKey: "client-id" - - objectName: "nextcloud-oidc__client-secret" - secretPath: "kv/data/atlas/nextcloud/nextcloud-oidc" - secretKey: "client-secret" - - objectName: "nextcloud-smtp__smtp-username" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-username" - - objectName: "nextcloud-smtp__smtp-password" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-password" - - objectName: "keycloak-admin__username" - secretPath: "kv/data/atlas/shared/keycloak-admin" - secretKey: "username" - - objectName: "keycloak-admin__password" - secretPath: "kv/data/atlas/shared/keycloak-admin" - secretKey: "password" diff --git a/services/outline/deployment.yaml b/services/outline/deployment.yaml index 0c4825e..04341a0 100644 --- a/services/outline/deployment.yaml +++ b/services/outline/deployment.yaml @@ -20,6 +20,34 @@ spec: metadata: labels: app: outline + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "outline" + vault.hashicorp.com/agent-inject-secret-outline-env.sh: "kv/data/atlas/outline/outline-db" + vault.hashicorp.com/agent-inject-template-outline-env.sh: | + {{- with secret "kv/data/atlas/outline/outline-db" -}} + export DATABASE_URL="{{ .Data.data.DATABASE_URL }}" + {{- end }} + {{- with secret "kv/data/atlas/outline/outline-secrets" -}} + export SECRET_KEY="{{ .Data.data.SECRET_KEY }}" + export UTILS_SECRET="{{ .Data.data.UTILS_SECRET }}" + {{- end }} + {{- with secret "kv/data/atlas/outline/outline-oidc" -}} + export OIDC_AUTH_URI="{{ .Data.data.OIDC_AUTH_URI }}" + export OIDC_CLIENT_ID="{{ .Data.data.OIDC_CLIENT_ID }}" + export OIDC_CLIENT_SECRET="{{ .Data.data.OIDC_CLIENT_SECRET }}" + export OIDC_LOGOUT_URI="{{ .Data.data.OIDC_LOGOUT_URI }}" + export OIDC_TOKEN_URI="{{ .Data.data.OIDC_TOKEN_URI }}" + export OIDC_USERINFO_URI="{{ .Data.data.OIDC_USERINFO_URI }}" + {{- end }} + {{- with secret "kv/data/atlas/outline/outline-smtp" -}} + export SMTP_FROM_EMAIL="{{ .Data.data.SMTP_FROM_EMAIL }}" + export SMTP_HOST="{{ .Data.data.SMTP_HOST }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export SMTP_USERNAME="{{ index .Data.data "relay-username" }}" + export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} spec: serviceAccountName: outline-vault nodeSelector: @@ -39,7 +67,7 @@ spec: - /bin/sh - -c args: - - . /vault/scripts/outline_vault_env.sh && exec node build/server/index.js + - . /vault/secrets/outline-env.sh && exec node build/server/index.js ports: - name: http containerPort: 3000 @@ -75,12 +103,6 @@ spec: volumeMounts: - name: user-data mountPath: /var/lib/outline/data - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true readinessProbe: httpGet: path: /_health @@ -108,13 +130,3 @@ spec: - name: user-data persistentVolumeClaim: claimName: outline-user-data - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: outline-vault - - name: vault-scripts - configMap: - name: outline-vault-env - defaultMode: 0555 diff --git a/services/outline/kustomization.yaml b/services/outline/kustomization.yaml index 011c6e6..2fd0ae5 100644 --- a/services/outline/kustomization.yaml +++ b/services/outline/kustomization.yaml @@ -5,16 +5,9 @@ namespace: outline resources: - namespace.yaml - serviceaccount.yaml - - secretproviderclass.yaml - user-pvc.yaml - redis-deployment.yaml - redis-service.yaml - deployment.yaml - service.yaml - ingress.yaml -generatorOptions: - disableNameSuffixHash: true -configMapGenerator: - - name: outline-vault-env - files: - - outline_vault_env.sh=scripts/outline_vault_env.sh diff --git a/services/outline/scripts/outline_vault_env.sh b/services/outline/scripts/outline_vault_env.sh deleted file mode 100644 index d9f8469..0000000 --- a/services/outline/scripts/outline_vault_env.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env sh -set -eu - -vault_dir="/vault/secrets" - -read_secret() { - cat "${vault_dir}/$1" -} - -export DATABASE_URL="$(read_secret DATABASE_URL)" -export SECRET_KEY="$(read_secret SECRET_KEY)" -export UTILS_SECRET="$(read_secret UTILS_SECRET)" - -export OIDC_AUTH_URI="$(read_secret OIDC_AUTH_URI)" -export OIDC_CLIENT_ID="$(read_secret OIDC_CLIENT_ID)" -export OIDC_CLIENT_SECRET="$(read_secret OIDC_CLIENT_SECRET)" -export OIDC_LOGOUT_URI="$(read_secret OIDC_LOGOUT_URI)" -export OIDC_TOKEN_URI="$(read_secret OIDC_TOKEN_URI)" -export OIDC_USERINFO_URI="$(read_secret OIDC_USERINFO_URI)" - -export SMTP_FROM_EMAIL="$(read_secret SMTP_FROM_EMAIL)" -export SMTP_HOST="$(read_secret SMTP_HOST)" -export SMTP_PASSWORD="$(read_secret SMTP_PASSWORD)" -export SMTP_USERNAME="$(read_secret SMTP_USERNAME)" - -if [ -f "${vault_dir}/AWS_ACCESS_KEY_ID" ]; then - export AWS_ACCESS_KEY_ID="$(read_secret AWS_ACCESS_KEY_ID)" - export AWS_SECRET_ACCESS_KEY="$(read_secret AWS_SECRET_ACCESS_KEY)" - export AWS_S3_UPLOAD_BUCKET_NAME="$(read_secret AWS_S3_UPLOAD_BUCKET_NAME)" - export AWS_S3_UPLOAD_BUCKET_URL="$(read_secret AWS_S3_UPLOAD_BUCKET_URL)" -fi diff --git a/services/outline/secretproviderclass.yaml b/services/outline/secretproviderclass.yaml deleted file mode 100644 index 70891df..0000000 --- a/services/outline/secretproviderclass.yaml +++ /dev/null @@ -1,63 +0,0 @@ -# services/outline/secretproviderclass.yaml -apiVersion: secrets-store.csi.x-k8s.io/v1 -kind: SecretProviderClass -metadata: - name: outline-vault - namespace: outline -spec: - provider: vault - parameters: - vaultAddress: "http://vault.vault.svc.cluster.local:8200" - roleName: "outline" - objects: | - - objectName: "DATABASE_URL" - secretPath: "kv/data/atlas/outline/outline-db" - secretKey: "DATABASE_URL" - - objectName: "SECRET_KEY" - secretPath: "kv/data/atlas/outline/outline-secrets" - secretKey: "SECRET_KEY" - - objectName: "UTILS_SECRET" - secretPath: "kv/data/atlas/outline/outline-secrets" - secretKey: "UTILS_SECRET" - - objectName: "OIDC_AUTH_URI" - secretPath: "kv/data/atlas/outline/outline-oidc" - secretKey: "OIDC_AUTH_URI" - - objectName: "OIDC_CLIENT_ID" - secretPath: "kv/data/atlas/outline/outline-oidc" - secretKey: "OIDC_CLIENT_ID" - - objectName: "OIDC_CLIENT_SECRET" - secretPath: "kv/data/atlas/outline/outline-oidc" - secretKey: "OIDC_CLIENT_SECRET" - - objectName: "OIDC_LOGOUT_URI" - secretPath: "kv/data/atlas/outline/outline-oidc" - secretKey: "OIDC_LOGOUT_URI" - - objectName: "OIDC_TOKEN_URI" - secretPath: "kv/data/atlas/outline/outline-oidc" - secretKey: "OIDC_TOKEN_URI" - - objectName: "OIDC_USERINFO_URI" - secretPath: "kv/data/atlas/outline/outline-oidc" - secretKey: "OIDC_USERINFO_URI" - - objectName: "SMTP_FROM_EMAIL" - secretPath: "kv/data/atlas/outline/outline-smtp" - secretKey: "SMTP_FROM_EMAIL" - - objectName: "SMTP_HOST" - secretPath: "kv/data/atlas/outline/outline-smtp" - secretKey: "SMTP_HOST" - - objectName: "SMTP_PASSWORD" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-password" - - objectName: "SMTP_USERNAME" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-username" - - objectName: "AWS_ACCESS_KEY_ID" - secretPath: "kv/data/atlas/outline/outline-s3" - secretKey: "AWS_ACCESS_KEY_ID" - - objectName: "AWS_SECRET_ACCESS_KEY" - secretPath: "kv/data/atlas/outline/outline-s3" - secretKey: "AWS_SECRET_ACCESS_KEY" - - objectName: "AWS_S3_UPLOAD_BUCKET_NAME" - secretPath: "kv/data/atlas/outline/outline-s3" - secretKey: "AWS_S3_UPLOAD_BUCKET_NAME" - - objectName: "AWS_S3_UPLOAD_BUCKET_URL" - secretPath: "kv/data/atlas/outline/outline-s3" - secretKey: "AWS_S3_UPLOAD_BUCKET_URL" diff --git a/services/planka/deployment.yaml b/services/planka/deployment.yaml index d2aa431..cec505f 100644 --- a/services/planka/deployment.yaml +++ b/services/planka/deployment.yaml @@ -20,6 +20,37 @@ spec: metadata: labels: app: planka + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "planka" + vault.hashicorp.com/agent-inject-secret-planka-env.sh: "kv/data/atlas/planka/planka-db" + vault.hashicorp.com/agent-inject-template-planka-env.sh: | + {{- with secret "kv/data/atlas/planka/planka-db" -}} + export DATABASE_URL="{{ .Data.data.DATABASE_URL }}" + {{- end }} + {{- with secret "kv/data/atlas/planka/planka-secrets" -}} + export SECRET_KEY="{{ .Data.data.SECRET_KEY }}" + {{- end }} + {{- with secret "kv/data/atlas/planka/planka-oidc" -}} + export OIDC_CLIENT_ID="{{ .Data.data.OIDC_CLIENT_ID }}" + export OIDC_CLIENT_SECRET="{{ .Data.data.OIDC_CLIENT_SECRET }}" + export OIDC_ENFORCED="{{ .Data.data.OIDC_ENFORCED }}" + export OIDC_IGNORE_ROLES="{{ .Data.data.OIDC_IGNORE_ROLES }}" + export OIDC_ISSUER="{{ .Data.data.OIDC_ISSUER }}" + export OIDC_SCOPES="{{ .Data.data.OIDC_SCOPES }}" + export OIDC_USE_OAUTH_CALLBACK="{{ .Data.data.OIDC_USE_OAUTH_CALLBACK }}" + {{- end }} + {{- with secret "kv/data/atlas/planka/planka-smtp" -}} + export SMTP_FROM="{{ .Data.data.SMTP_FROM }}" + export SMTP_HOST="{{ .Data.data.SMTP_HOST }}" + export SMTP_PORT="{{ .Data.data.SMTP_PORT }}" + export SMTP_SECURE="{{ .Data.data.SMTP_SECURE }}" + export SMTP_TLS_REJECT_UNAUTHORIZED="{{ .Data.data.SMTP_TLS_REJECT_UNAUTHORIZED }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export SMTP_USER="{{ index .Data.data "relay-username" }}" + export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} spec: serviceAccountName: planka-vault nodeSelector: @@ -63,7 +94,7 @@ spec: - /bin/sh - -c args: - - . /vault/scripts/planka_vault_env.sh && exec node app.js --prod + - . /vault/secrets/planka-env.sh && exec node app.js --prod ports: - name: http containerPort: 1337 @@ -90,12 +121,6 @@ spec: subPath: private/attachments - name: app-data mountPath: /app/.tmp - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true readinessProbe: httpGet: path: / @@ -126,13 +151,3 @@ spec: - name: app-data persistentVolumeClaim: claimName: planka-app-data - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: planka-vault - - name: vault-scripts - configMap: - name: planka-vault-env - defaultMode: 0555 diff --git a/services/planka/kustomization.yaml b/services/planka/kustomization.yaml index 14a7cc9..db19e6e 100644 --- a/services/planka/kustomization.yaml +++ b/services/planka/kustomization.yaml @@ -5,15 +5,8 @@ namespace: planka resources: - namespace.yaml - serviceaccount.yaml - - secretproviderclass.yaml - user-data-pvc.yaml - app-pvc.yaml - deployment.yaml - service.yaml - ingress.yaml -generatorOptions: - disableNameSuffixHash: true -configMapGenerator: - - name: planka-vault-env - files: - - planka_vault_env.sh=scripts/planka_vault_env.sh diff --git a/services/planka/scripts/planka_vault_env.sh b/services/planka/scripts/planka_vault_env.sh deleted file mode 100644 index f5ab2ab..0000000 --- a/services/planka/scripts/planka_vault_env.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env sh -set -eu - -vault_dir="/vault/secrets" - -read_secret() { - cat "${vault_dir}/$1" -} - -export DATABASE_URL="$(read_secret DATABASE_URL)" -export SECRET_KEY="$(read_secret SECRET_KEY)" - -export OIDC_CLIENT_ID="$(read_secret OIDC_CLIENT_ID)" -export OIDC_CLIENT_SECRET="$(read_secret OIDC_CLIENT_SECRET)" -export OIDC_ENFORCED="$(read_secret OIDC_ENFORCED)" -export OIDC_IGNORE_ROLES="$(read_secret OIDC_IGNORE_ROLES)" -export OIDC_ISSUER="$(read_secret OIDC_ISSUER)" -export OIDC_SCOPES="$(read_secret OIDC_SCOPES)" -export OIDC_USE_OAUTH_CALLBACK="$(read_secret OIDC_USE_OAUTH_CALLBACK)" - -export SMTP_FROM="$(read_secret SMTP_FROM)" -export SMTP_HOST="$(read_secret SMTP_HOST)" -export SMTP_PASSWORD="$(read_secret SMTP_PASSWORD)" -export SMTP_PORT="$(read_secret SMTP_PORT)" -export SMTP_SECURE="$(read_secret SMTP_SECURE)" -export SMTP_TLS_REJECT_UNAUTHORIZED="$(read_secret SMTP_TLS_REJECT_UNAUTHORIZED)" -export SMTP_USER="$(read_secret SMTP_USER)" diff --git a/services/planka/secretproviderclass.yaml b/services/planka/secretproviderclass.yaml deleted file mode 100644 index 028b2b5..0000000 --- a/services/planka/secretproviderclass.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# services/planka/secretproviderclass.yaml -apiVersion: secrets-store.csi.x-k8s.io/v1 -kind: SecretProviderClass -metadata: - name: planka-vault - namespace: planka -spec: - provider: vault - parameters: - vaultAddress: "http://vault.vault.svc.cluster.local:8200" - roleName: "planka" - objects: | - - objectName: "DATABASE_URL" - secretPath: "kv/data/atlas/planka/planka-db" - secretKey: "DATABASE_URL" - - objectName: "SECRET_KEY" - secretPath: "kv/data/atlas/planka/planka-secrets" - secretKey: "SECRET_KEY" - - objectName: "OIDC_CLIENT_ID" - secretPath: "kv/data/atlas/planka/planka-oidc" - secretKey: "OIDC_CLIENT_ID" - - objectName: "OIDC_CLIENT_SECRET" - secretPath: "kv/data/atlas/planka/planka-oidc" - secretKey: "OIDC_CLIENT_SECRET" - - objectName: "OIDC_ENFORCED" - secretPath: "kv/data/atlas/planka/planka-oidc" - secretKey: "OIDC_ENFORCED" - - objectName: "OIDC_IGNORE_ROLES" - secretPath: "kv/data/atlas/planka/planka-oidc" - secretKey: "OIDC_IGNORE_ROLES" - - objectName: "OIDC_ISSUER" - secretPath: "kv/data/atlas/planka/planka-oidc" - secretKey: "OIDC_ISSUER" - - objectName: "OIDC_SCOPES" - secretPath: "kv/data/atlas/planka/planka-oidc" - secretKey: "OIDC_SCOPES" - - objectName: "OIDC_USE_OAUTH_CALLBACK" - secretPath: "kv/data/atlas/planka/planka-oidc" - secretKey: "OIDC_USE_OAUTH_CALLBACK" - - objectName: "SMTP_FROM" - secretPath: "kv/data/atlas/planka/planka-smtp" - secretKey: "SMTP_FROM" - - objectName: "SMTP_HOST" - secretPath: "kv/data/atlas/planka/planka-smtp" - secretKey: "SMTP_HOST" - - objectName: "SMTP_PASSWORD" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-password" - - objectName: "SMTP_PORT" - secretPath: "kv/data/atlas/planka/planka-smtp" - secretKey: "SMTP_PORT" - - objectName: "SMTP_SECURE" - secretPath: "kv/data/atlas/planka/planka-smtp" - secretKey: "SMTP_SECURE" - - objectName: "SMTP_TLS_REJECT_UNAUTHORIZED" - secretPath: "kv/data/atlas/planka/planka-smtp" - secretKey: "SMTP_TLS_REJECT_UNAUTHORIZED" - - objectName: "SMTP_USER" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-username" diff --git a/services/vaultwarden/deployment.yaml b/services/vaultwarden/deployment.yaml index f102ea9..57789a7 100644 --- a/services/vaultwarden/deployment.yaml +++ b/services/vaultwarden/deployment.yaml @@ -18,6 +18,21 @@ spec: metadata: labels: app: vaultwarden + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "vaultwarden" + vault.hashicorp.com/agent-inject-secret-vaultwarden-env.sh: "kv/data/atlas/vaultwarden/vaultwarden-db-url" + vault.hashicorp.com/agent-inject-template-vaultwarden-env.sh: | + {{- with secret "kv/data/atlas/vaultwarden/vaultwarden-db-url" -}} + export DATABASE_URL="{{ .Data.data.DATABASE_URL }}" + {{- end }} + {{- with secret "kv/data/atlas/vaultwarden/vaultwarden-admin" -}} + export ADMIN_TOKEN="{{ .Data.data.ADMIN_TOKEN }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export SMTP_USERNAME="{{ index .Data.data "relay-username" }}" + export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} spec: serviceAccountName: vaultwarden-vault containers: @@ -26,7 +41,7 @@ spec: command: ["/bin/sh", "-c"] args: - >- - . /vault/scripts/vaultwarden_vault_env.sh + . /vault/secrets/vaultwarden-env.sh && exec /start.sh env: - name: SIGNUPS_ALLOWED @@ -56,23 +71,7 @@ spec: volumeMounts: - name: vaultwarden-data mountPath: /data - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true volumes: - name: vaultwarden-data persistentVolumeClaim: claimName: vaultwarden-data - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: vaultwarden-vault - - name: vault-scripts - configMap: - name: vaultwarden-vault-env - defaultMode: 0555 diff --git a/services/vaultwarden/kustomization.yaml b/services/vaultwarden/kustomization.yaml index c1525f7..c53cb1c 100644 --- a/services/vaultwarden/kustomization.yaml +++ b/services/vaultwarden/kustomization.yaml @@ -6,14 +6,6 @@ resources: - namespace.yaml - serviceaccount.yaml - pvc.yaml - - secretproviderclass.yaml - deployment.yaml - service.yaml - ingress.yaml -configMapGenerator: - - name: vaultwarden-vault-env - namespace: vaultwarden - files: - - vaultwarden_vault_env.sh=scripts/vaultwarden_vault_env.sh - options: - disableNameSuffixHash: true diff --git a/services/vaultwarden/scripts/vaultwarden_vault_env.sh b/services/vaultwarden/scripts/vaultwarden_vault_env.sh deleted file mode 100644 index 7a80081..0000000 --- a/services/vaultwarden/scripts/vaultwarden_vault_env.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env sh -set -eu - -vault_dir="/vault/secrets" - -read_secret() { - cat "${vault_dir}/$1" -} - -export DATABASE_URL="$(read_secret vaultwarden-db-url__DATABASE_URL)" -export ADMIN_TOKEN="$(read_secret vaultwarden-admin__ADMIN_TOKEN)" - -export SMTP_USERNAME="$(read_secret postmark-relay__relay-username)" -export SMTP_PASSWORD="$(read_secret postmark-relay__relay-password)" diff --git a/services/vaultwarden/secretproviderclass.yaml b/services/vaultwarden/secretproviderclass.yaml deleted file mode 100644 index 63f864e..0000000 --- a/services/vaultwarden/secretproviderclass.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# services/vaultwarden/secretproviderclass.yaml -apiVersion: secrets-store.csi.x-k8s.io/v1 -kind: SecretProviderClass -metadata: - name: vaultwarden-vault - namespace: vaultwarden -spec: - provider: vault - parameters: - vaultAddress: "http://vault.vault.svc.cluster.local:8200" - roleName: "vaultwarden" - objects: | - - objectName: "vaultwarden-db-url__DATABASE_URL" - secretPath: "kv/data/atlas/vaultwarden/vaultwarden-db-url" - secretKey: "DATABASE_URL" - - objectName: "vaultwarden-admin__ADMIN_TOKEN" - secretPath: "kv/data/atlas/vaultwarden/vaultwarden-admin" - secretKey: "ADMIN_TOKEN" - - objectName: "postmark-relay__relay-username" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-username" - - objectName: "postmark-relay__relay-password" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-password" From 36fb225cbd03a3dc6747b7a9607bf01f563bacec Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 12:34:02 -0300 Subject: [PATCH 047/270] bstein-dev-home: bump onboarding job --- services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index dce1471..1d926b2 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: portal-onboarding-e2e-test-12 + name: portal-onboarding-e2e-test-13 namespace: bstein-dev-home spec: backoffLimit: 0 From 0aa16757e9618ead8038fe40ef78bfaf418af68d Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 12:44:49 -0300 Subject: [PATCH 048/270] gitea: run vault init first --- services/gitea/deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/services/gitea/deployment.yaml b/services/gitea/deployment.yaml index e67b3b9..cbcdab8 100644 --- a/services/gitea/deployment.yaml +++ b/services/gitea/deployment.yaml @@ -22,6 +22,7 @@ spec: app: gitea annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-init-first: "true" vault.hashicorp.com/role: "gitea" vault.hashicorp.com/agent-inject-secret-gitea-db-secret__password: "kv/data/atlas/gitea/gitea-db-secret" vault.hashicorp.com/agent-inject-template-gitea-db-secret__password: | From 92fbde08eb66d8c9c58d60a39148d893c6b7ca48 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 13:00:21 -0300 Subject: [PATCH 049/270] nextcloud: fix vault template keys --- services/nextcloud-mail-sync/cronjob.yaml | 12 ++++++------ services/nextcloud/deployment.yaml | 13 +++++++------ services/nextcloud/maintenance-cronjob.yaml | 12 ++++++------ 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/services/nextcloud-mail-sync/cronjob.yaml b/services/nextcloud-mail-sync/cronjob.yaml index 75fe548..5042f4b 100644 --- a/services/nextcloud-mail-sync/cronjob.yaml +++ b/services/nextcloud-mail-sync/cronjob.yaml @@ -20,18 +20,18 @@ spec: vault.hashicorp.com/agent-inject-template-nextcloud-env.sh: | {{- with secret "kv/data/atlas/nextcloud/nextcloud-db" -}} export POSTGRES_DB="{{ .Data.data.database }}" - export POSTGRES_USER="{{ .Data.data.db-username }}" - export POSTGRES_PASSWORD="{{ .Data.data.db-password }}" + export POSTGRES_USER="{{ index .Data.data "db-username" }}" + export POSTGRES_PASSWORD="{{ index .Data.data "db-password" }}" {{- end }} {{- with secret "kv/data/atlas/nextcloud/nextcloud-admin" -}} - export NEXTCLOUD_ADMIN_USER="{{ .Data.data.admin-user }}" - export NEXTCLOUD_ADMIN_PASSWORD="{{ .Data.data.admin-password }}" + export NEXTCLOUD_ADMIN_USER="{{ index .Data.data "admin-user" }}" + export NEXTCLOUD_ADMIN_PASSWORD="{{ index .Data.data "admin-password" }}" {{- end }} export ADMIN_USER="${NEXTCLOUD_ADMIN_USER}" export ADMIN_PASS="${NEXTCLOUD_ADMIN_PASSWORD}" {{- with secret "kv/data/atlas/nextcloud/nextcloud-oidc" -}} - export OIDC_CLIENT_ID="{{ .Data.data.client-id }}" - export OIDC_CLIENT_SECRET="{{ .Data.data.client-secret }}" + export OIDC_CLIENT_ID="{{ index .Data.data "client-id" }}" + export OIDC_CLIENT_SECRET="{{ index .Data.data "client-secret" }}" {{- end }} {{- with secret "kv/data/atlas/shared/postmark-relay" -}} export SMTP_NAME="{{ index .Data.data "relay-username" }}" diff --git a/services/nextcloud/deployment.yaml b/services/nextcloud/deployment.yaml index 84efb1d..063c6f7 100644 --- a/services/nextcloud/deployment.yaml +++ b/services/nextcloud/deployment.yaml @@ -17,23 +17,24 @@ spec: app: nextcloud annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-init-first: "true" vault.hashicorp.com/role: "nextcloud" vault.hashicorp.com/agent-inject-secret-nextcloud-env.sh: "kv/data/atlas/nextcloud/nextcloud-db" vault.hashicorp.com/agent-inject-template-nextcloud-env.sh: | {{- with secret "kv/data/atlas/nextcloud/nextcloud-db" -}} export POSTGRES_DB="{{ .Data.data.database }}" - export POSTGRES_USER="{{ .Data.data.db-username }}" - export POSTGRES_PASSWORD="{{ .Data.data.db-password }}" + export POSTGRES_USER="{{ index .Data.data "db-username" }}" + export POSTGRES_PASSWORD="{{ index .Data.data "db-password" }}" {{- end }} {{- with secret "kv/data/atlas/nextcloud/nextcloud-admin" -}} - export NEXTCLOUD_ADMIN_USER="{{ .Data.data.admin-user }}" - export NEXTCLOUD_ADMIN_PASSWORD="{{ .Data.data.admin-password }}" + export NEXTCLOUD_ADMIN_USER="{{ index .Data.data "admin-user" }}" + export NEXTCLOUD_ADMIN_PASSWORD="{{ index .Data.data "admin-password" }}" {{- end }} export ADMIN_USER="${NEXTCLOUD_ADMIN_USER}" export ADMIN_PASS="${NEXTCLOUD_ADMIN_PASSWORD}" {{- with secret "kv/data/atlas/nextcloud/nextcloud-oidc" -}} - export OIDC_CLIENT_ID="{{ .Data.data.client-id }}" - export OIDC_CLIENT_SECRET="{{ .Data.data.client-secret }}" + export OIDC_CLIENT_ID="{{ index .Data.data "client-id" }}" + export OIDC_CLIENT_SECRET="{{ index .Data.data "client-secret" }}" {{- end }} {{- with secret "kv/data/atlas/shared/postmark-relay" -}} export SMTP_NAME="{{ index .Data.data "relay-username" }}" diff --git a/services/nextcloud/maintenance-cronjob.yaml b/services/nextcloud/maintenance-cronjob.yaml index aaedbc8..f8af256 100644 --- a/services/nextcloud/maintenance-cronjob.yaml +++ b/services/nextcloud/maintenance-cronjob.yaml @@ -18,18 +18,18 @@ spec: vault.hashicorp.com/agent-inject-template-nextcloud-env.sh: | {{- with secret "kv/data/atlas/nextcloud/nextcloud-db" -}} export POSTGRES_DB="{{ .Data.data.database }}" - export POSTGRES_USER="{{ .Data.data.db-username }}" - export POSTGRES_PASSWORD="{{ .Data.data.db-password }}" + export POSTGRES_USER="{{ index .Data.data "db-username" }}" + export POSTGRES_PASSWORD="{{ index .Data.data "db-password" }}" {{- end }} {{- with secret "kv/data/atlas/nextcloud/nextcloud-admin" -}} - export NEXTCLOUD_ADMIN_USER="{{ .Data.data.admin-user }}" - export NEXTCLOUD_ADMIN_PASSWORD="{{ .Data.data.admin-password }}" + export NEXTCLOUD_ADMIN_USER="{{ index .Data.data "admin-user" }}" + export NEXTCLOUD_ADMIN_PASSWORD="{{ index .Data.data "admin-password" }}" {{- end }} export ADMIN_USER="${NEXTCLOUD_ADMIN_USER}" export ADMIN_PASS="${NEXTCLOUD_ADMIN_PASSWORD}" {{- with secret "kv/data/atlas/nextcloud/nextcloud-oidc" -}} - export OIDC_CLIENT_ID="{{ .Data.data.client-id }}" - export OIDC_CLIENT_SECRET="{{ .Data.data.client-secret }}" + export OIDC_CLIENT_ID="{{ index .Data.data "client-id" }}" + export OIDC_CLIENT_SECRET="{{ index .Data.data "client-secret" }}" {{- end }} {{- with secret "kv/data/atlas/shared/postmark-relay" -}} export SMTP_NAME="{{ index .Data.data "relay-username" }}" From 58c880d9cefa70478cf73c12be7c4bbab5f26fd5 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 13:20:57 -0300 Subject: [PATCH 050/270] keycloak: switch jobs to vault injector --- services/keycloak/deployment.yaml | 46 ++++++++++------- .../harbor-oidc-secret-ensure-job.yaml | 29 +++++------ services/keycloak/kustomization.yaml | 3 -- services/keycloak/ldap-federation-job.yaml | 50 ++++++++++++------- .../keycloak/logs-oidc-secret-ensure-job.yaml | 31 +++++------- services/keycloak/mas-secrets-ensure-job.yaml | 32 ++++++------ services/keycloak/portal-e2e-client-job.yaml | 49 +++++++++++------- ...al-e2e-execute-actions-email-test-job.yaml | 49 +++++++++++------- .../portal-e2e-target-client-job.yaml | 49 +++++++++++------- ...al-e2e-token-exchange-permissions-job.yaml | 50 ++++++++++++------- .../portal-e2e-token-exchange-test-job.yaml | 49 +++++++++++------- services/keycloak/realm-settings-job.yaml | 50 ++++++++++++------- .../scripts/harbor_oidc_secret_ensure.sh | 2 +- .../keycloak/scripts/keycloak_vault_env.sh | 29 ----------- .../scripts/vault_oidc_secret_ensure.sh | 2 +- services/keycloak/secretproviderclass.yaml | 27 ---------- .../synapse-oidc-secret-ensure-job.yaml | 31 +++++------- services/keycloak/user-overrides-job.yaml | 50 ++++++++++++------- .../vault-oidc-secret-ensure-job.yaml | 29 +++++------ 19 files changed, 343 insertions(+), 314 deletions(-) delete mode 100644 services/keycloak/scripts/keycloak_vault_env.sh diff --git a/services/keycloak/deployment.yaml b/services/keycloak/deployment.yaml index 3c116f6..b2842b1 100644 --- a/services/keycloak/deployment.yaml +++ b/services/keycloak/deployment.yaml @@ -20,6 +20,34 @@ spec: metadata: labels: app: keycloak + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso" + vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" + export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" + export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" + export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" + export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} spec: serviceAccountName: sso-vault affinity: @@ -73,7 +101,7 @@ spec: command: ["/bin/sh", "-c"] args: - >- - . /vault/scripts/keycloak_vault_env.sh + . /vault/secrets/keycloak-env.sh && exec /opt/keycloak/bin/kc.sh start env: - name: KC_DB @@ -132,25 +160,9 @@ spec: mountPath: /opt/keycloak/data - name: providers mountPath: /opt/keycloak/providers - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true volumes: - name: data persistentVolumeClaim: claimName: keycloak-data - name: providers emptyDir: {} - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 diff --git a/services/keycloak/harbor-oidc-secret-ensure-job.yaml b/services/keycloak/harbor-oidc-secret-ensure-job.yaml index 4566e26..aa51f4a 100644 --- a/services/keycloak/harbor-oidc-secret-ensure-job.yaml +++ b/services/keycloak/harbor-oidc-secret-ensure-job.yaml @@ -2,12 +2,23 @@ apiVersion: batch/v1 kind: Job metadata: - name: harbor-oidc-secret-ensure-4 + name: harbor-oidc-secret-ensure-5 namespace: sso spec: backoffLimit: 0 ttlSecondsAfterFinished: 3600 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso-secrets" + vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never @@ -16,16 +27,6 @@ spec: configMap: name: harbor-oidc-secret-ensure-script defaultMode: 0555 - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -44,9 +45,3 @@ spec: - name: harbor-oidc-secret-ensure-script mountPath: /scripts readOnly: true - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true diff --git a/services/keycloak/kustomization.yaml b/services/keycloak/kustomization.yaml index c34aad4..e141467 100644 --- a/services/keycloak/kustomization.yaml +++ b/services/keycloak/kustomization.yaml @@ -28,9 +28,6 @@ resources: generatorOptions: disableNameSuffixHash: true configMapGenerator: - - name: sso-vault-env - files: - - keycloak_vault_env.sh=scripts/keycloak_vault_env.sh - name: portal-e2e-tests files: - test_portal_token_exchange.py=scripts/tests/test_portal_token_exchange.py diff --git a/services/keycloak/ldap-federation-job.yaml b/services/keycloak/ldap-federation-job.yaml index 06e7a82..68ce057 100644 --- a/services/keycloak/ldap-federation-job.yaml +++ b/services/keycloak/ldap-federation-job.yaml @@ -2,11 +2,40 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-ldap-federation-6 + name: keycloak-ldap-federation-7 namespace: sso spec: backoffLimit: 2 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso" + vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" + export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" + export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" + export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" + export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} spec: affinity: nodeAffinity: @@ -41,7 +70,7 @@ spec: args: - | set -euo pipefail - . /vault/scripts/keycloak_vault_env.sh + . /vault/secrets/keycloak-env.sh python - <<'PY' import json import os @@ -348,20 +377,3 @@ spec: print(f"WARNING: LDAP cleanup failed (continuing): {e}") PY volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true - volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 diff --git a/services/keycloak/logs-oidc-secret-ensure-job.yaml b/services/keycloak/logs-oidc-secret-ensure-job.yaml index ae5a8aa..7fc3097 100644 --- a/services/keycloak/logs-oidc-secret-ensure-job.yaml +++ b/services/keycloak/logs-oidc-secret-ensure-job.yaml @@ -2,12 +2,23 @@ apiVersion: batch/v1 kind: Job metadata: - name: logs-oidc-secret-ensure-3 + name: logs-oidc-secret-ensure-4 namespace: sso spec: backoffLimit: 0 ttlSecondsAfterFinished: 3600 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso-secrets" + vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never @@ -18,7 +29,7 @@ spec: args: - | set -euo pipefail - . /vault/scripts/keycloak_vault_env.sh + . /vault/secrets/keycloak-admin-env.sh apk add --no-cache curl jq kubectl openssl >/dev/null KC_URL="http://keycloak.sso.svc.cluster.local" @@ -110,20 +121,4 @@ spec: --from-literal=cookie_secret="${COOKIE_SECRET}" \ --dry-run=client -o yaml | kubectl -n logging apply -f - >/dev/null volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index 75d8300..3b6e15e 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -10,28 +10,30 @@ imagePullSecrets: apiVersion: batch/v1 kind: Job metadata: - name: mas-secrets-ensure-15 + name: mas-secrets-ensure-16 namespace: sso spec: backoffLimit: 0 ttlSecondsAfterFinished: 3600 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-init-first: "true" + vault.hashicorp.com/role: "sso-secrets" + vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never volumes: - name: work emptyDir: {} - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 initContainers: - name: generate image: alpine:3.20 @@ -39,7 +41,7 @@ spec: args: - | set -euo pipefail - . /vault/scripts/keycloak_vault_env.sh + . /vault/secrets/keycloak-admin-env.sh umask 077 apk add --no-cache curl openssl jq >/dev/null @@ -84,12 +86,6 @@ spec: volumeMounts: - name: work mountPath: /work - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true containers: - name: apply image: registry.bstein.dev/bstein/kubectl:1.35.0 diff --git a/services/keycloak/portal-e2e-client-job.yaml b/services/keycloak/portal-e2e-client-job.yaml index 1653656..2cb50ca 100644 --- a/services/keycloak/portal-e2e-client-job.yaml +++ b/services/keycloak/portal-e2e-client-job.yaml @@ -2,11 +2,40 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-client-3 + name: keycloak-portal-e2e-client-4 namespace: sso spec: backoffLimit: 0 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso" + vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" + export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" + export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" + export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" + export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} spec: restartPolicy: Never serviceAccountName: sso-vault @@ -22,7 +51,7 @@ spec: args: - | set -euo pipefail - . /vault/scripts/keycloak_vault_env.sh + . /vault/secrets/keycloak-env.sh python - <<'PY' import json import os @@ -228,20 +257,4 @@ spec: raise SystemExit(f"Role mapping update failed (status={status}) resp={resp}") PY volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index 9bba6a4..c80e3eb 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -2,11 +2,40 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-execute-actions-email-6 + name: keycloak-portal-e2e-execute-actions-email-7 namespace: sso spec: backoffLimit: 3 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso" + vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" + export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" + export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" + export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" + export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} spec: restartPolicy: Never serviceAccountName: sso-vault @@ -30,30 +59,14 @@ spec: args: - | set -euo pipefail - . /vault/scripts/keycloak_vault_env.sh + . /vault/secrets/keycloak-env.sh python /scripts/test_keycloak_execute_actions_email.py volumeMounts: - name: tests mountPath: /scripts readOnly: true - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true volumes: - name: tests configMap: name: portal-e2e-tests defaultMode: 0555 - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 diff --git a/services/keycloak/portal-e2e-target-client-job.yaml b/services/keycloak/portal-e2e-target-client-job.yaml index a608b40..c4dcd0f 100644 --- a/services/keycloak/portal-e2e-target-client-job.yaml +++ b/services/keycloak/portal-e2e-target-client-job.yaml @@ -2,11 +2,40 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-target-2 + name: keycloak-portal-e2e-target-3 namespace: sso spec: backoffLimit: 0 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso" + vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" + export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" + export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" + export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" + export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} spec: restartPolicy: Never serviceAccountName: sso-vault @@ -24,7 +53,7 @@ spec: args: - | set -euo pipefail - . /vault/scripts/keycloak_vault_env.sh + . /vault/secrets/keycloak-env.sh python - <<'PY' import json import os @@ -129,20 +158,4 @@ spec: print(f"OK: ensured token exchange enabled on client {target_client_id}") PY volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 diff --git a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml index c34e889..cbd21ac 100644 --- a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml @@ -2,11 +2,40 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-token-exchange-permissions-6 + name: keycloak-portal-e2e-token-exchange-permissions-7 namespace: sso spec: backoffLimit: 6 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso" + vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" + export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" + export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" + export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" + export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} spec: restartPolicy: Never serviceAccountName: sso-vault @@ -26,7 +55,7 @@ spec: args: - | set -euo pipefail - . /vault/scripts/keycloak_vault_env.sh + . /vault/secrets/keycloak-env.sh python - <<'PY' import json import os @@ -262,20 +291,3 @@ spec: print("OK: configured token exchange permissions for portal E2E client") PY volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true - volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 diff --git a/services/keycloak/portal-e2e-token-exchange-test-job.yaml b/services/keycloak/portal-e2e-token-exchange-test-job.yaml index 69f5d2e..56c7ce5 100644 --- a/services/keycloak/portal-e2e-token-exchange-test-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-test-job.yaml @@ -2,12 +2,41 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-token-exchange-test-2 + name: keycloak-portal-e2e-token-exchange-test-3 namespace: sso spec: backoffLimit: 6 ttlSecondsAfterFinished: 3600 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso" + vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" + export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" + export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" + export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" + export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} spec: restartPolicy: Never serviceAccountName: sso-vault @@ -31,30 +60,14 @@ spec: args: - | set -euo pipefail - . /vault/scripts/keycloak_vault_env.sh + . /vault/secrets/keycloak-env.sh python /scripts/test_portal_token_exchange.py volumeMounts: - name: tests mountPath: /scripts readOnly: true - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true volumes: - name: tests configMap: name: portal-e2e-tests defaultMode: 0555 - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 5cabe3c..f44dcd4 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,11 +2,40 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-18 + name: keycloak-realm-settings-19 namespace: sso spec: backoffLimit: 0 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso" + vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" + export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" + export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" + export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" + export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} spec: affinity: nodeAffinity: @@ -44,7 +73,7 @@ spec: args: - | set -euo pipefail - . /vault/scripts/keycloak_vault_env.sh + . /vault/secrets/keycloak-env.sh python - <<'PY' import json import os @@ -439,20 +468,3 @@ spec: ) PY volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true - volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 diff --git a/services/keycloak/scripts/harbor_oidc_secret_ensure.sh b/services/keycloak/scripts/harbor_oidc_secret_ensure.sh index f2dafc6..beef591 100755 --- a/services/keycloak/scripts/harbor_oidc_secret_ensure.sh +++ b/services/keycloak/scripts/harbor_oidc_secret_ensure.sh @@ -3,7 +3,7 @@ set -euo pipefail apk add --no-cache curl jq kubectl >/dev/null -. /vault/scripts/keycloak_vault_env.sh +. /vault/secrets/keycloak-admin-env.sh KC_URL="http://keycloak.sso.svc.cluster.local" ACCESS_TOKEN="" diff --git a/services/keycloak/scripts/keycloak_vault_env.sh b/services/keycloak/scripts/keycloak_vault_env.sh deleted file mode 100644 index dd68fc7..0000000 --- a/services/keycloak/scripts/keycloak_vault_env.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env sh -set -eu - -vault_dir="/vault/secrets" - -read_secret() { - cat "${vault_dir}/$1" -} - -admin_user="$(read_secret keycloak-admin__username)" -admin_password="$(read_secret keycloak-admin__password)" - -export KEYCLOAK_ADMIN="${admin_user}" -export KEYCLOAK_ADMIN_USER="${admin_user}" -export KEYCLOAK_ADMIN_PASSWORD="${admin_password}" - -export KC_DB_URL_DATABASE="$(read_secret keycloak-db__POSTGRES_DATABASE)" -export KC_DB_USERNAME="$(read_secret keycloak-db__POSTGRES_USER)" -export KC_DB_PASSWORD="$(read_secret keycloak-db__POSTGRES_PASSWORD)" - -export PORTAL_E2E_CLIENT_ID="$(read_secret portal-e2e-client__client_id)" -export PORTAL_E2E_CLIENT_SECRET="$(read_secret portal-e2e-client__client_secret)" - -export LDAP_ADMIN_PASSWORD="$(read_secret openldap-admin__LDAP_ADMIN_PASSWORD)" -export LDAP_CONFIG_PASSWORD="$(read_secret openldap-admin__LDAP_CONFIG_PASSWORD)" -export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" - -export KEYCLOAK_SMTP_USER="$(read_secret postmark-relay__relay-username)" -export KEYCLOAK_SMTP_PASSWORD="$(read_secret postmark-relay__relay-password)" diff --git a/services/keycloak/scripts/vault_oidc_secret_ensure.sh b/services/keycloak/scripts/vault_oidc_secret_ensure.sh index 680057f..20d39c1 100755 --- a/services/keycloak/scripts/vault_oidc_secret_ensure.sh +++ b/services/keycloak/scripts/vault_oidc_secret_ensure.sh @@ -3,7 +3,7 @@ set -euo pipefail apk add --no-cache curl jq kubectl >/dev/null -. /vault/scripts/keycloak_vault_env.sh +. /vault/secrets/keycloak-admin-env.sh KC_URL="http://keycloak.sso.svc.cluster.local" ACCESS_TOKEN="" diff --git a/services/keycloak/secretproviderclass.yaml b/services/keycloak/secretproviderclass.yaml index e78e57e..95e28be 100644 --- a/services/keycloak/secretproviderclass.yaml +++ b/services/keycloak/secretproviderclass.yaml @@ -10,27 +10,6 @@ spec: vaultAddress: "http://vault.vault.svc.cluster.local:8200" roleName: "sso" objects: | - - objectName: "keycloak-db__POSTGRES_DATABASE" - secretPath: "kv/data/atlas/sso/keycloak-db" - secretKey: "POSTGRES_DATABASE" - - objectName: "keycloak-db__POSTGRES_USER" - secretPath: "kv/data/atlas/sso/keycloak-db" - secretKey: "POSTGRES_USER" - - objectName: "keycloak-db__POSTGRES_PASSWORD" - secretPath: "kv/data/atlas/sso/keycloak-db" - secretKey: "POSTGRES_PASSWORD" - - objectName: "keycloak-admin__username" - secretPath: "kv/data/atlas/shared/keycloak-admin" - secretKey: "username" - - objectName: "keycloak-admin__password" - secretPath: "kv/data/atlas/shared/keycloak-admin" - secretKey: "password" - - objectName: "portal-e2e-client__client_id" - secretPath: "kv/data/atlas/shared/portal-e2e-client" - secretKey: "client_id" - - objectName: "portal-e2e-client__client_secret" - secretPath: "kv/data/atlas/shared/portal-e2e-client" - secretKey: "client_secret" - objectName: "openldap-admin__LDAP_ADMIN_PASSWORD" secretPath: "kv/data/atlas/sso/openldap-admin" secretKey: "LDAP_ADMIN_PASSWORD" @@ -46,12 +25,6 @@ spec: - objectName: "oauth2-proxy-oidc__cookie_secret" secretPath: "kv/data/atlas/sso/oauth2-proxy-oidc" secretKey: "cookie_secret" - - objectName: "postmark-relay__relay-username" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-username" - - objectName: "postmark-relay__relay-password" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-password" - objectName: "harbor-pull__dockerconfigjson" secretPath: "kv/data/atlas/harbor-pull/sso" secretKey: "dockerconfigjson" diff --git a/services/keycloak/synapse-oidc-secret-ensure-job.yaml b/services/keycloak/synapse-oidc-secret-ensure-job.yaml index 5f96cb1..1e4878d 100644 --- a/services/keycloak/synapse-oidc-secret-ensure-job.yaml +++ b/services/keycloak/synapse-oidc-secret-ensure-job.yaml @@ -2,12 +2,23 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-oidc-secret-ensure-5 + name: synapse-oidc-secret-ensure-6 namespace: sso spec: backoffLimit: 0 ttlSecondsAfterFinished: 3600 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso-secrets" + vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never @@ -18,7 +29,7 @@ spec: args: - | set -euo pipefail - . /vault/scripts/keycloak_vault_env.sh + . /vault/secrets/keycloak-admin-env.sh apk add --no-cache curl jq >/dev/null KC_URL="http://keycloak.sso.svc.cluster.local" @@ -70,20 +81,4 @@ spec: curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ -d "${payload}" "${vault_addr}/v1/kv/data/atlas/comms/synapse-oidc" >/dev/null volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 diff --git a/services/keycloak/user-overrides-job.yaml b/services/keycloak/user-overrides-job.yaml index 0ea4f1f..495af18 100644 --- a/services/keycloak/user-overrides-job.yaml +++ b/services/keycloak/user-overrides-job.yaml @@ -2,11 +2,40 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-user-overrides-2 + name: keycloak-user-overrides-3 namespace: sso spec: backoffLimit: 0 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso" + vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" + export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" + export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" + export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{- end }} + {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" + export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" + export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" + {{- end }} + {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + {{- end }} spec: affinity: nodeAffinity: @@ -36,7 +65,7 @@ spec: args: - | set -euo pipefail - . /vault/scripts/keycloak_vault_env.sh + . /vault/secrets/keycloak-env.sh python - <<'PY' import json import os @@ -136,20 +165,3 @@ spec: raise SystemExit(f"Unexpected user update response: {status}") PY volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true - volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 diff --git a/services/keycloak/vault-oidc-secret-ensure-job.yaml b/services/keycloak/vault-oidc-secret-ensure-job.yaml index f27335a..797cada 100644 --- a/services/keycloak/vault-oidc-secret-ensure-job.yaml +++ b/services/keycloak/vault-oidc-secret-ensure-job.yaml @@ -2,12 +2,23 @@ apiVersion: batch/v1 kind: Job metadata: - name: vault-oidc-secret-ensure-2 + name: vault-oidc-secret-ensure-3 namespace: sso spec: backoffLimit: 0 ttlSecondsAfterFinished: 3600 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso-secrets" + vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | + {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never @@ -16,16 +27,6 @@ spec: configMap: name: vault-oidc-secret-ensure-script defaultMode: 0555 - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: sso-vault - - name: vault-scripts - configMap: - name: sso-vault-env - defaultMode: 0555 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -44,9 +45,3 @@ spec: - name: vault-oidc-secret-ensure-script mountPath: /scripts readOnly: true - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - - name: vault-scripts - mountPath: /vault/scripts - readOnly: true From 89f4b0dbdfa1b40f0a0fc04da4b67fa09835574c Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 13:40:29 -0300 Subject: [PATCH 051/270] vault: stabilize injector templates and add health apps --- .../applications/health/kustomization.yaml | 33 ++++ .../applications/kustomization.yaml | 1 + .../bstein-dev-home/backend-deployment.yaml | 16 +- .../chat-ai-gateway-deployment.yaml | 16 +- .../portal-onboarding-e2e-test-job.yaml | 16 +- .../vaultwarden-cred-sync-cronjob.yaml | 16 +- services/gitea/deployment.yaml | 24 +-- services/health/endurain-data-pvc.yaml | 12 ++ services/health/endurain-deployment.yaml | 147 +++++++++++++++ services/health/endurain-ingress.yaml | 26 +++ .../health/endurain-oidc-config-cronjob.yaml | 87 +++++++++ services/health/endurain-service.yaml | 15 ++ services/health/kustomization.yaml | 30 ++++ services/health/namespace.yaml | 5 + .../health/scripts/endurain_oidc_configure.sh | 134 ++++++++++++++ .../scripts/sparkyfitness_oidc_configure.sh | 134 ++++++++++++++ services/health/secretproviderclass.yaml | 167 +++++++++++++++++ services/health/serviceaccount.yaml | 6 + services/health/sparkyfitness-data-pvc.yaml | 12 ++ .../sparkyfitness-frontend-deployment.yaml | 81 +++++++++ .../sparkyfitness-frontend-service.yaml | 15 ++ services/health/sparkyfitness-ingress.yaml | 26 +++ .../sparkyfitness-oidc-config-cronjob.yaml | 89 +++++++++ .../sparkyfitness-server-deployment.yaml | 170 ++++++++++++++++++ .../health/sparkyfitness-server-service.yaml | 15 ++ services/health/vault-sync-deployment.yaml | 34 ++++ services/keycloak/deployment.yaml | 51 ++---- .../endurain-oidc-secret-ensure-job.yaml | 52 ++++++ .../harbor-oidc-secret-ensure-job.yaml | 4 +- services/keycloak/kustomization.yaml | 8 + services/keycloak/ldap-federation-job.yaml | 20 +-- .../keycloak/logs-oidc-secret-ensure-job.yaml | 4 +- services/keycloak/mas-secrets-ensure-job.yaml | 4 +- services/keycloak/portal-e2e-client-job.yaml | 20 +-- ...al-e2e-execute-actions-email-test-job.yaml | 20 +-- .../portal-e2e-target-client-job.yaml | 20 +-- ...al-e2e-token-exchange-permissions-job.yaml | 20 +-- .../portal-e2e-token-exchange-test-job.yaml | 20 +-- services/keycloak/realm-settings-job.yaml | 20 +-- .../scripts/endurain_oidc_secret_ensure.sh | 87 +++++++++ .../sparkyfitness_oidc_secret_ensure.sh | 87 +++++++++ .../sparkyfitness-oidc-secret-ensure-job.yaml | 52 ++++++ .../synapse-oidc-secret-ensure-job.yaml | 4 +- services/keycloak/user-overrides-job.yaml | 20 +-- .../vault-oidc-secret-ensure-job.yaml | 4 +- services/nextcloud-mail-sync/cronjob.yaml | 20 +-- services/nextcloud/deployment.yaml | 20 +-- services/nextcloud/maintenance-cronjob.yaml | 20 +-- services/outline/deployment.yaml | 20 +-- services/planka/deployment.yaml | 20 +-- .../vault/scripts/vault_k8s_auth_configure.sh | 4 +- services/vaultwarden/deployment.yaml | 12 +- 52 files changed, 1736 insertions(+), 224 deletions(-) create mode 100644 clusters/atlas/flux-system/applications/health/kustomization.yaml create mode 100644 services/health/endurain-data-pvc.yaml create mode 100644 services/health/endurain-deployment.yaml create mode 100644 services/health/endurain-ingress.yaml create mode 100644 services/health/endurain-oidc-config-cronjob.yaml create mode 100644 services/health/endurain-service.yaml create mode 100644 services/health/kustomization.yaml create mode 100644 services/health/namespace.yaml create mode 100644 services/health/scripts/endurain_oidc_configure.sh create mode 100644 services/health/scripts/sparkyfitness_oidc_configure.sh create mode 100644 services/health/secretproviderclass.yaml create mode 100644 services/health/serviceaccount.yaml create mode 100644 services/health/sparkyfitness-data-pvc.yaml create mode 100644 services/health/sparkyfitness-frontend-deployment.yaml create mode 100644 services/health/sparkyfitness-frontend-service.yaml create mode 100644 services/health/sparkyfitness-ingress.yaml create mode 100644 services/health/sparkyfitness-oidc-config-cronjob.yaml create mode 100644 services/health/sparkyfitness-server-deployment.yaml create mode 100644 services/health/sparkyfitness-server-service.yaml create mode 100644 services/health/vault-sync-deployment.yaml create mode 100644 services/keycloak/endurain-oidc-secret-ensure-job.yaml create mode 100644 services/keycloak/scripts/endurain_oidc_secret_ensure.sh create mode 100644 services/keycloak/scripts/sparkyfitness_oidc_secret_ensure.sh create mode 100644 services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml diff --git a/clusters/atlas/flux-system/applications/health/kustomization.yaml b/clusters/atlas/flux-system/applications/health/kustomization.yaml new file mode 100644 index 0000000..f666d39 --- /dev/null +++ b/clusters/atlas/flux-system/applications/health/kustomization.yaml @@ -0,0 +1,33 @@ +# clusters/atlas/flux-system/applications/health/kustomization.yaml +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: health + namespace: flux-system +spec: + interval: 10m + path: ./services/health + prune: true + sourceRef: + kind: GitRepository + name: flux-system + targetNamespace: health + dependsOn: + - name: keycloak + - name: postgres + - name: traefik + - name: vault + healthChecks: + - apiVersion: apps/v1 + kind: Deployment + name: endurain + namespace: health + - apiVersion: apps/v1 + kind: Deployment + name: sparkyfitness-server + namespace: health + - apiVersion: apps/v1 + kind: Deployment + name: sparkyfitness-frontend + namespace: health + wait: false diff --git a/clusters/atlas/flux-system/applications/kustomization.yaml b/clusters/atlas/flux-system/applications/kustomization.yaml index d48cf9e..cc32c85 100644 --- a/clusters/atlas/flux-system/applications/kustomization.yaml +++ b/clusters/atlas/flux-system/applications/kustomization.yaml @@ -27,3 +27,4 @@ resources: - nextcloud-mail-sync/kustomization.yaml - outline/kustomization.yaml - planka/kustomization.yaml + - health/kustomization.yaml diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index 659cd33..e18a372 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -19,20 +19,20 @@ spec: vault.hashicorp.com/role: "bstein-dev-home" vault.hashicorp.com/agent-inject-secret-portal-env.sh: "kv/data/atlas/portal/atlas-portal-db" vault.hashicorp.com/agent-inject-template-portal-env.sh: | - {{- with secret "kv/data/atlas/portal/atlas-portal-db" -}} + {{ with secret "kv/data/atlas/portal/atlas-portal-db" }} export PORTAL_DATABASE_URL="{{ .Data.data.PORTAL_DATABASE_URL }}" - {{- end }} - {{- with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" }} export KEYCLOAK_ADMIN_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/chat-ai-keys-runtime" }} export CHAT_KEY_MATRIX="{{ .Data.data.matrix }}" export CHAT_KEY_HOMEPAGE="{{ .Data.data.homepage }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} + {{ end }} spec: automountServiceAccountToken: true serviceAccountName: bstein-dev-home diff --git a/services/bstein-dev-home/chat-ai-gateway-deployment.yaml b/services/bstein-dev-home/chat-ai-gateway-deployment.yaml index fba58bc..40d74fe 100644 --- a/services/bstein-dev-home/chat-ai-gateway-deployment.yaml +++ b/services/bstein-dev-home/chat-ai-gateway-deployment.yaml @@ -19,20 +19,20 @@ spec: vault.hashicorp.com/role: "bstein-dev-home" vault.hashicorp.com/agent-inject-secret-portal-env.sh: "kv/data/atlas/portal/atlas-portal-db" vault.hashicorp.com/agent-inject-template-portal-env.sh: | - {{- with secret "kv/data/atlas/portal/atlas-portal-db" -}} + {{ with secret "kv/data/atlas/portal/atlas-portal-db" }} export PORTAL_DATABASE_URL="{{ .Data.data.PORTAL_DATABASE_URL }}" - {{- end }} - {{- with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" }} export KEYCLOAK_ADMIN_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/chat-ai-keys-runtime" }} export CHAT_KEY_MATRIX="{{ .Data.data.matrix }}" export CHAT_KEY_HOMEPAGE="{{ .Data.data.homepage }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} + {{ end }} spec: serviceAccountName: bstein-dev-home nodeSelector: diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index 1d926b2..1f725f6 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -13,20 +13,20 @@ spec: vault.hashicorp.com/role: "bstein-dev-home" vault.hashicorp.com/agent-inject-secret-portal-env.sh: "kv/data/atlas/portal/atlas-portal-db" vault.hashicorp.com/agent-inject-template-portal-env.sh: | - {{- with secret "kv/data/atlas/portal/atlas-portal-db" -}} + {{ with secret "kv/data/atlas/portal/atlas-portal-db" }} export PORTAL_DATABASE_URL="{{ .Data.data.PORTAL_DATABASE_URL }}" - {{- end }} - {{- with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" }} export KEYCLOAK_ADMIN_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/chat-ai-keys-runtime" }} export CHAT_KEY_MATRIX="{{ .Data.data.matrix }}" export CHAT_KEY_HOMEPAGE="{{ .Data.data.homepage }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} + {{ end }} spec: restartPolicy: Never serviceAccountName: bstein-dev-home diff --git a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml index b46a2e3..efbab7e 100644 --- a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml +++ b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml @@ -19,20 +19,20 @@ spec: vault.hashicorp.com/role: "bstein-dev-home" vault.hashicorp.com/agent-inject-secret-portal-env.sh: "kv/data/atlas/portal/atlas-portal-db" vault.hashicorp.com/agent-inject-template-portal-env.sh: | - {{- with secret "kv/data/atlas/portal/atlas-portal-db" -}} + {{ with secret "kv/data/atlas/portal/atlas-portal-db" }} export PORTAL_DATABASE_URL="{{ .Data.data.PORTAL_DATABASE_URL }}" - {{- end }} - {{- with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" }} export KEYCLOAK_ADMIN_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/chat-ai-keys-runtime" }} export CHAT_KEY_MATRIX="{{ .Data.data.matrix }}" export CHAT_KEY_HOMEPAGE="{{ .Data.data.homepage }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} + {{ end }} spec: serviceAccountName: bstein-dev-home restartPolicy: Never diff --git a/services/gitea/deployment.yaml b/services/gitea/deployment.yaml index cbcdab8..69650ca 100644 --- a/services/gitea/deployment.yaml +++ b/services/gitea/deployment.yaml @@ -26,34 +26,34 @@ spec: vault.hashicorp.com/role: "gitea" vault.hashicorp.com/agent-inject-secret-gitea-db-secret__password: "kv/data/atlas/gitea/gitea-db-secret" vault.hashicorp.com/agent-inject-template-gitea-db-secret__password: | - {{- with secret "kv/data/atlas/gitea/gitea-db-secret" -}} + {{ with secret "kv/data/atlas/gitea/gitea-db-secret" }} {{ .Data.data.password }} - {{- end }} + {{ end }} vault.hashicorp.com/agent-inject-secret-gitea-secret__SECRET_KEY: "kv/data/atlas/gitea/gitea-secret" vault.hashicorp.com/agent-inject-template-gitea-secret__SECRET_KEY: | - {{- with secret "kv/data/atlas/gitea/gitea-secret" -}} + {{ with secret "kv/data/atlas/gitea/gitea-secret" }} {{ .Data.data.SECRET_KEY }} - {{- end }} + {{ end }} vault.hashicorp.com/agent-inject-secret-gitea-secret__INTERNAL_TOKEN: "kv/data/atlas/gitea/gitea-secret" vault.hashicorp.com/agent-inject-template-gitea-secret__INTERNAL_TOKEN: | - {{- with secret "kv/data/atlas/gitea/gitea-secret" -}} + {{ with secret "kv/data/atlas/gitea/gitea-secret" }} {{ .Data.data.INTERNAL_TOKEN }} - {{- end }} + {{ end }} vault.hashicorp.com/agent-inject-secret-gitea-oidc__client_id: "kv/data/atlas/gitea/gitea-oidc" vault.hashicorp.com/agent-inject-template-gitea-oidc__client_id: | - {{- with secret "kv/data/atlas/gitea/gitea-oidc" -}} + {{ with secret "kv/data/atlas/gitea/gitea-oidc" }} {{ .Data.data.client_id }} - {{- end }} + {{ end }} vault.hashicorp.com/agent-inject-secret-gitea-oidc__client_secret: "kv/data/atlas/gitea/gitea-oidc" vault.hashicorp.com/agent-inject-template-gitea-oidc__client_secret: | - {{- with secret "kv/data/atlas/gitea/gitea-oidc" -}} + {{ with secret "kv/data/atlas/gitea/gitea-oidc" }} {{ .Data.data.client_secret }} - {{- end }} + {{ end }} vault.hashicorp.com/agent-inject-secret-gitea-oidc__openid_auto_discovery_url: "kv/data/atlas/gitea/gitea-oidc" vault.hashicorp.com/agent-inject-template-gitea-oidc__openid_auto_discovery_url: | - {{- with secret "kv/data/atlas/gitea/gitea-oidc" -}} + {{ with secret "kv/data/atlas/gitea/gitea-oidc" }} {{ .Data.data.openid_auto_discovery_url }} - {{- end }} + {{ end }} spec: serviceAccountName: gitea-vault initContainers: diff --git a/services/health/endurain-data-pvc.yaml b/services/health/endurain-data-pvc.yaml new file mode 100644 index 0000000..6c8d244 --- /dev/null +++ b/services/health/endurain-data-pvc.yaml @@ -0,0 +1,12 @@ +# services/health/endurain-data-pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: endurain-data + namespace: health +spec: + accessModes: ["ReadWriteOnce"] + storageClassName: asteria + resources: + requests: + storage: 10Gi diff --git a/services/health/endurain-deployment.yaml b/services/health/endurain-deployment.yaml new file mode 100644 index 0000000..05608b1 --- /dev/null +++ b/services/health/endurain-deployment.yaml @@ -0,0 +1,147 @@ +# services/health/endurain-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: endurain + namespace: health + labels: + app: endurain +spec: + replicas: 1 + selector: + matchLabels: + app: endurain + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + app: endurain + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: hardware + operator: In + values: ["rpi5", "rpi4"] + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 90 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + initContainers: + - name: init-data + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - | + set -e + mkdir -p /data + chown -R 1000:1000 /data + securityContext: + runAsUser: 0 + runAsGroup: 0 + volumeMounts: + - name: endurain-data + mountPath: /data + containers: + - name: endurain + image: ghcr.io/endurain-project/endurain:v0.16.6 + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8080 + env: + - name: ENDURAIN_HOST + value: https://endurain.bstein.dev + - name: BEHIND_PROXY + value: "true" + - name: LOG_LEVEL + value: info + - name: TZ + value: Etc/UTC + - name: DB_HOST + valueFrom: + secretKeyRef: + name: endurain-db + key: DB_HOST + - name: DB_PORT + valueFrom: + secretKeyRef: + name: endurain-db + key: DB_PORT + - name: DB_USER + valueFrom: + secretKeyRef: + name: endurain-db + key: DB_USER + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: endurain-db + key: DB_PASSWORD + - name: DB_DATABASE + valueFrom: + secretKeyRef: + name: endurain-db + key: DB_DATABASE + - name: SECRET_KEY + valueFrom: + secretKeyRef: + name: endurain-secrets + key: SECRET_KEY + - name: FERNET_KEY + valueFrom: + secretKeyRef: + name: endurain-secrets + key: FERNET_KEY + volumeMounts: + - name: endurain-data + mountPath: /app/backend/data + readinessProbe: + httpGet: + path: /api/v1/about + port: http + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 6 + livenessProbe: + httpGet: + path: /api/v1/about + port: http + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 3 + failureThreshold: 6 + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: "1" + memory: 2Gi + volumes: + - name: endurain-data + persistentVolumeClaim: + claimName: endurain-data diff --git a/services/health/endurain-ingress.yaml b/services/health/endurain-ingress.yaml new file mode 100644 index 0000000..a7b2cc0 --- /dev/null +++ b/services/health/endurain-ingress.yaml @@ -0,0 +1,26 @@ +# services/health/endurain-ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: endurain + namespace: health + annotations: + kubernetes.io/ingress.class: traefik + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" + cert-manager.io/cluster-issuer: letsencrypt +spec: + tls: + - hosts: ["endurain.bstein.dev"] + secretName: endurain-tls + rules: + - host: endurain.bstein.dev + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: endurain + port: + number: 80 diff --git a/services/health/endurain-oidc-config-cronjob.yaml b/services/health/endurain-oidc-config-cronjob.yaml new file mode 100644 index 0000000..7930425 --- /dev/null +++ b/services/health/endurain-oidc-config-cronjob.yaml @@ -0,0 +1,87 @@ +# services/health/endurain-oidc-config-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: endurain-oidc-config + namespace: health +spec: + schedule: "*/30 * * * *" + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + backoffLimit: 1 + template: + spec: + serviceAccountName: health-vault-sync + restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 90 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + containers: + - name: configure + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - | + set -euo pipefail + apk add --no-cache bash curl jq >/dev/null + exec /scripts/endurain_oidc_configure.sh + env: + - name: ENDURAIN_BASE_URL + value: http://endurain.health.svc.cluster.local + - name: ENDURAIN_ADMIN_USERNAME + valueFrom: + secretKeyRef: + name: endurain-admin + key: username + - name: ENDURAIN_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: endurain-admin + key: password + - name: ENDURAIN_OIDC_CLIENT_ID + valueFrom: + secretKeyRef: + name: endurain-oidc + key: client_id + - name: ENDURAIN_OIDC_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: endurain-oidc + key: client_secret + - name: ENDURAIN_OIDC_ISSUER_URL + valueFrom: + secretKeyRef: + name: endurain-oidc + key: issuer_url + volumeMounts: + - name: endurain-oidc-config-script + mountPath: /scripts + readOnly: true + volumes: + - name: endurain-oidc-config-script + configMap: + name: endurain-oidc-config-script + defaultMode: 0555 diff --git a/services/health/endurain-service.yaml b/services/health/endurain-service.yaml new file mode 100644 index 0000000..cffe116 --- /dev/null +++ b/services/health/endurain-service.yaml @@ -0,0 +1,15 @@ +# services/health/endurain-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: endurain + namespace: health + labels: + app: endurain +spec: + selector: + app: endurain + ports: + - name: http + port: 80 + targetPort: http diff --git a/services/health/kustomization.yaml b/services/health/kustomization.yaml new file mode 100644 index 0000000..1690876 --- /dev/null +++ b/services/health/kustomization.yaml @@ -0,0 +1,30 @@ +# services/health/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: health +resources: + - namespace.yaml + - serviceaccount.yaml + - secretproviderclass.yaml + - vault-sync-deployment.yaml + - endurain-data-pvc.yaml + - sparkyfitness-data-pvc.yaml + - endurain-oidc-config-cronjob.yaml + - sparkyfitness-oidc-config-cronjob.yaml + - endurain-deployment.yaml + - endurain-service.yaml + - sparkyfitness-server-deployment.yaml + - sparkyfitness-server-service.yaml + - sparkyfitness-frontend-deployment.yaml + - sparkyfitness-frontend-service.yaml + - endurain-ingress.yaml + - sparkyfitness-ingress.yaml +generatorOptions: + disableNameSuffixHash: true +configMapGenerator: + - name: endurain-oidc-config-script + files: + - endurain_oidc_configure.sh=scripts/endurain_oidc_configure.sh + - name: sparkyfitness-oidc-config-script + files: + - sparkyfitness_oidc_configure.sh=scripts/sparkyfitness_oidc_configure.sh diff --git a/services/health/namespace.yaml b/services/health/namespace.yaml new file mode 100644 index 0000000..71d6fff --- /dev/null +++ b/services/health/namespace.yaml @@ -0,0 +1,5 @@ +# services/health/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: health diff --git a/services/health/scripts/endurain_oidc_configure.sh b/services/health/scripts/endurain_oidc_configure.sh new file mode 100644 index 0000000..76ebc99 --- /dev/null +++ b/services/health/scripts/endurain_oidc_configure.sh @@ -0,0 +1,134 @@ +#!/usr/bin/env bash +set -euo pipefail + +base_url="${ENDURAIN_BASE_URL:-http://endurain.health.svc.cluster.local}" +admin_username="${ENDURAIN_ADMIN_USERNAME:-admin}" +admin_password="${ENDURAIN_ADMIN_PASSWORD:?ENDURAIN_ADMIN_PASSWORD is required}" +default_password="${ENDURAIN_DEFAULT_ADMIN_PASSWORD:-admin}" +oidc_client_id="${ENDURAIN_OIDC_CLIENT_ID:?ENDURAIN_OIDC_CLIENT_ID is required}" +oidc_client_secret="${ENDURAIN_OIDC_CLIENT_SECRET:?ENDURAIN_OIDC_CLIENT_SECRET is required}" +oidc_issuer_url="${ENDURAIN_OIDC_ISSUER_URL:?ENDURAIN_OIDC_ISSUER_URL is required}" + +wait_for_endurain() { + for attempt in 1 2 3 4 5 6 7 8 9 10; do + if curl -fsS "${base_url}/api/v1/about" >/dev/null 2>&1; then + return 0 + fi + sleep $((attempt * 3)) + done + return 1 +} + +login() { + local username="$1" + local password="$2" + local token + token="$(curl -sS -X POST "${base_url}/api/v1/auth/login" \ + -H "X-Client-Type: mobile" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + --data-urlencode "grant_type=password" \ + --data-urlencode "username=${username}" \ + --data-urlencode "password=${password}" | jq -r '.access_token' 2>/dev/null || true)" + if [ -n "${token}" ] && [ "${token}" != "null" ]; then + echo "${token}" + return 0 + fi + return 1 +} + +if ! wait_for_endurain; then + echo "Endurain is not responding at ${base_url}" >&2 + exit 1 +fi + +token="$(login "${admin_username}" "${admin_password}" || true)" +if [ -z "${token}" ]; then + token="$(login "${admin_username}" "${default_password}" || true)" + if [ -z "${token}" ]; then + echo "Failed to authenticate to Endurain as admin" >&2 + exit 1 + fi + if [ "${admin_password}" != "${default_password}" ]; then + user_id="$(curl -sS -H "Authorization: Bearer ${token}" -H "X-Client-Type: mobile" \ + "${base_url}/api/v1/users/username/${admin_username}" | jq -r '.id' 2>/dev/null || true)" + if [ -z "${user_id}" ] || [ "${user_id}" = "null" ]; then + echo "Admin user ${admin_username} not found" >&2 + exit 1 + fi + update_payload="$(jq -nc --arg password "${admin_password}" '{password:$password}')" + status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \ + -H "Authorization: Bearer ${token}" \ + -H "X-Client-Type: mobile" \ + -H "Content-Type: application/json" \ + -d "${update_payload}" \ + "${base_url}/api/v1/users/${user_id}/password")" + if [ "${status}" != "200" ] && [ "${status}" != "201" ]; then + echo "Failed to rotate Endurain admin password (status ${status})" >&2 + exit 1 + fi + token="$(login "${admin_username}" "${admin_password}" || true)" + if [ -z "${token}" ]; then + echo "Failed to authenticate with rotated admin password" >&2 + exit 1 + fi + fi +fi + +idp_payload="$(jq -nc \ + --arg name "Keycloak" \ + --arg slug "keycloak" \ + --arg issuer_url "${oidc_issuer_url}" \ + --arg scopes "openid profile email" \ + --arg client_id "${oidc_client_id}" \ + --arg client_secret "${oidc_client_secret}" \ + --arg icon "keycloak" \ + --argjson enabled true \ + --argjson auto_create_users true \ + --argjson sync_user_info true \ + --argjson user_mapping '{"username":["preferred_username","username","email"],"email":["email","mail"],"name":["name","display_name","full_name"]}' \ + '{name:$name,slug:$slug,provider_type:"oidc",enabled:$enabled,issuer_url:$issuer_url,scopes:$scopes,icon:$icon,auto_create_users:$auto_create_users,sync_user_info:$sync_user_info,user_mapping:$user_mapping,client_id:$client_id,client_secret:$client_secret}')" + +idp_id="$(curl -sS -H "Authorization: Bearer ${token}" -H "X-Client-Type: mobile" \ + "${base_url}/api/v1/idp" | jq -r '.[] | select(.slug=="keycloak") | .id' 2>/dev/null | head -n1 || true)" + +if [ -n "${idp_id}" ] && [ "${idp_id}" != "null" ]; then + status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \ + -H "Authorization: Bearer ${token}" \ + -H "X-Client-Type: mobile" \ + -H "Content-Type: application/json" \ + -d "${idp_payload}" \ + "${base_url}/api/v1/idp/${idp_id}")" +else + status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ + -H "Authorization: Bearer ${token}" \ + -H "X-Client-Type: mobile" \ + -H "Content-Type: application/json" \ + -d "${idp_payload}" \ + "${base_url}/api/v1/idp")" +fi + +if [ "${status}" != "200" ] && [ "${status}" != "201" ] && [ "${status}" != "204" ]; then + echo "Failed to upsert Endurain OIDC provider (status ${status})" >&2 + exit 1 +fi + +settings_json="$(curl -sS -H "Authorization: Bearer ${token}" -H "X-Client-Type: mobile" \ + "${base_url}/api/v1/server_settings")" +if [ -z "${settings_json}" ]; then + echo "Failed to fetch Endurain server settings" >&2 + exit 1 +fi + +settings_payload="$(echo "${settings_json}" | jq \ + '.sso_enabled=true | .sso_auto_redirect=true | .signup_enabled=false | .local_login_enabled=true')" + +status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \ + -H "Authorization: Bearer ${token}" \ + -H "X-Client-Type: mobile" \ + -H "Content-Type: application/json" \ + -d "${settings_payload}" \ + "${base_url}/api/v1/server_settings")" +if [ "${status}" != "200" ] && [ "${status}" != "201" ]; then + echo "Failed to update Endurain server settings (status ${status})" >&2 + exit 1 +fi diff --git a/services/health/scripts/sparkyfitness_oidc_configure.sh b/services/health/scripts/sparkyfitness_oidc_configure.sh new file mode 100644 index 0000000..98c6857 --- /dev/null +++ b/services/health/scripts/sparkyfitness_oidc_configure.sh @@ -0,0 +1,134 @@ +#!/usr/bin/env bash +set -euo pipefail + +base_url="${SPARKYFITNESS_BASE_URL:-http://sparkyfitness-server.health.svc.cluster.local:3010}" +frontend_url="${SPARKYFITNESS_FRONTEND_URL:?SPARKYFITNESS_FRONTEND_URL is required}" +admin_email="${SPARKYFITNESS_ADMIN_EMAIL:?SPARKYFITNESS_ADMIN_EMAIL is required}" +admin_password="${SPARKYFITNESS_ADMIN_PASSWORD:?SPARKYFITNESS_ADMIN_PASSWORD is required}" +oidc_client_id="${SPARKYFITNESS_OIDC_CLIENT_ID:?SPARKYFITNESS_OIDC_CLIENT_ID is required}" +oidc_client_secret="${SPARKYFITNESS_OIDC_CLIENT_SECRET:?SPARKYFITNESS_OIDC_CLIENT_SECRET is required}" +oidc_issuer_url="${SPARKYFITNESS_OIDC_ISSUER_URL:?SPARKYFITNESS_OIDC_ISSUER_URL is required}" + +wait_for_server() { + for attempt in 1 2 3 4 5 6 7 8 9 10; do + if curl -fsS "${base_url}/health" >/dev/null 2>&1; then + return 0 + fi + sleep $((attempt * 3)) + done + return 1 +} + +cookie_jar="$(mktemp)" +trap 'rm -f "${cookie_jar}"' EXIT + +auth_login() { + local payload + payload="$(jq -nc --arg email "${admin_email}" --arg password "${admin_password}" '{email:$email,password:$password}')" + local status + status="$(curl -sS -o /tmp/sparkyfitness_login.json -w "%{http_code}" \ + -c "${cookie_jar}" -b "${cookie_jar}" \ + -H "Content-Type: application/json" \ + -X POST "${base_url}/auth/login" \ + -d "${payload}")" + if [ "${status}" = "200" ]; then + return 0 + fi + return 1 +} + +auth_register() { + local payload + payload="$(jq -nc --arg email "${admin_email}" --arg password "${admin_password}" --arg full_name "Sparky Admin" '{email:$email,password:$password,full_name:$full_name}')" + curl -sS -o /tmp/sparkyfitness_register.json -w "%{http_code}" \ + -c "${cookie_jar}" -b "${cookie_jar}" \ + -H "Content-Type: application/json" \ + -X POST "${base_url}/auth/register" \ + -d "${payload}" +} + +if ! wait_for_server; then + echo "SparkyFitness is not responding at ${base_url}" >&2 + exit 1 +fi + +if ! auth_login; then + status="$(auth_register)" + if [ "${status}" = "409" ]; then + if ! auth_login; then + echo "Admin login failed after existing user detected" >&2 + exit 1 + fi + elif [ "${status}" = "201" ]; then + if ! auth_login; then + echo "Admin login failed after registration" >&2 + exit 1 + fi + elif [ "${status}" = "403" ]; then + echo "Registration disabled; unable to bootstrap admin user" >&2 + exit 1 + else + echo "Admin registration failed (status ${status})" >&2 + exit 1 + fi +fi + +settings_json="$(curl -sS -b "${cookie_jar}" "${base_url}/admin/global-settings")" +if [ -z "${settings_json}" ]; then + echo "Failed to fetch SparkyFitness global settings" >&2 + exit 1 +fi + +email_enabled="$(echo "${settings_json}" | jq -r '.enable_email_password_login // true')" +mfa_mandatory="$(echo "${settings_json}" | jq -r '.is_mfa_mandatory // .mfa_mandatory // false')" +settings_payload="$(jq -nc \ + --argjson enable_email_password_login "${email_enabled}" \ + --argjson is_oidc_active true \ + --argjson is_mfa_mandatory "${mfa_mandatory}" \ + '{enable_email_password_login:$enable_email_password_login,is_oidc_active:$is_oidc_active,is_mfa_mandatory:$is_mfa_mandatory}')" + +status="$(curl -sS -o /dev/null -w "%{http_code}" -b "${cookie_jar}" \ + -H "Content-Type: application/json" \ + -X PUT "${base_url}/admin/global-settings" \ + -d "${settings_payload}")" +if [ "${status}" != "200" ]; then + echo "Failed to update SparkyFitness global settings (status ${status})" >&2 + exit 1 +fi + +providers_json="$(curl -sS -b "${cookie_jar}" "${base_url}/admin/oidc-settings")" +provider_id="$(echo "${providers_json}" | jq -r --arg issuer "${oidc_issuer_url}" '.[] | select(.issuer_url==$issuer) | .id' 2>/dev/null | head -n1 || true)" + +redirect_uri="${frontend_url%/}/oidc-callback" +provider_payload="$(jq -nc \ + --arg issuer_url "${oidc_issuer_url}" \ + --arg client_id "${oidc_client_id}" \ + --arg client_secret "${oidc_client_secret}" \ + --arg redirect_uri "${redirect_uri}" \ + --arg scope "openid profile email" \ + --arg token_endpoint_auth_method "client_secret_post" \ + --argjson response_types '["code"]' \ + --argjson is_active true \ + --arg display_name "Atlas SSO" \ + --argjson auto_register true \ + --arg signing_algorithm "RS256" \ + --arg profile_signing_algorithm "none" \ + --argjson timeout 30000 \ + '{issuer_url:$issuer_url,client_id:$client_id,client_secret:$client_secret,redirect_uris:[$redirect_uri],scope:$scope,token_endpoint_auth_method:$token_endpoint_auth_method,response_types:$response_types,is_active:$is_active,display_name:$display_name,auto_register:$auto_register,signing_algorithm:$signing_algorithm,profile_signing_algorithm:$profile_signing_algorithm,timeout:$timeout}')" + +if [ -n "${provider_id}" ] && [ "${provider_id}" != "null" ]; then + status="$(curl -sS -o /dev/null -w "%{http_code}" -b "${cookie_jar}" \ + -H "Content-Type: application/json" \ + -X PUT "${base_url}/admin/oidc-settings/${provider_id}" \ + -d "${provider_payload}")" +else + status="$(curl -sS -o /dev/null -w "%{http_code}" -b "${cookie_jar}" \ + -H "Content-Type: application/json" \ + -X POST "${base_url}/admin/oidc-settings" \ + -d "${provider_payload}")" +fi + +if [ "${status}" != "200" ] && [ "${status}" != "201" ]; then + echo "Failed to upsert SparkyFitness OIDC provider (status ${status})" >&2 + exit 1 +fi diff --git a/services/health/secretproviderclass.yaml b/services/health/secretproviderclass.yaml new file mode 100644 index 0000000..c019c96 --- /dev/null +++ b/services/health/secretproviderclass.yaml @@ -0,0 +1,167 @@ +# services/health/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: health-vault + namespace: health +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "health" + objects: | + - objectName: "endurain-db__DB_HOST" + secretPath: "kv/data/atlas/health/endurain-db" + secretKey: "DB_HOST" + - objectName: "endurain-db__DB_PORT" + secretPath: "kv/data/atlas/health/endurain-db" + secretKey: "DB_PORT" + - objectName: "endurain-db__DB_USER" + secretPath: "kv/data/atlas/health/endurain-db" + secretKey: "DB_USER" + - objectName: "endurain-db__DB_PASSWORD" + secretPath: "kv/data/atlas/health/endurain-db" + secretKey: "DB_PASSWORD" + - objectName: "endurain-db__DB_DATABASE" + secretPath: "kv/data/atlas/health/endurain-db" + secretKey: "DB_DATABASE" + - objectName: "endurain-secrets__SECRET_KEY" + secretPath: "kv/data/atlas/health/endurain-secrets" + secretKey: "SECRET_KEY" + - objectName: "endurain-secrets__FERNET_KEY" + secretPath: "kv/data/atlas/health/endurain-secrets" + secretKey: "FERNET_KEY" + - objectName: "endurain-admin__username" + secretPath: "kv/data/atlas/health/endurain-admin" + secretKey: "username" + - objectName: "endurain-admin__password" + secretPath: "kv/data/atlas/health/endurain-admin" + secretKey: "password" + - objectName: "endurain-oidc__client_id" + secretPath: "kv/data/atlas/health/endurain-oidc" + secretKey: "client_id" + - objectName: "endurain-oidc__client_secret" + secretPath: "kv/data/atlas/health/endurain-oidc" + secretKey: "client_secret" + - objectName: "endurain-oidc__issuer_url" + secretPath: "kv/data/atlas/health/endurain-oidc" + secretKey: "issuer_url" + - objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_HOST" + secretPath: "kv/data/atlas/health/sparkyfitness-db" + secretKey: "SPARKY_FITNESS_DB_HOST" + - objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_PORT" + secretPath: "kv/data/atlas/health/sparkyfitness-db" + secretKey: "SPARKY_FITNESS_DB_PORT" + - objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_NAME" + secretPath: "kv/data/atlas/health/sparkyfitness-db" + secretKey: "SPARKY_FITNESS_DB_NAME" + - objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_USER" + secretPath: "kv/data/atlas/health/sparkyfitness-db" + secretKey: "SPARKY_FITNESS_DB_USER" + - objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_PASSWORD" + secretPath: "kv/data/atlas/health/sparkyfitness-db" + secretKey: "SPARKY_FITNESS_DB_PASSWORD" + - objectName: "sparkyfitness-db__SPARKY_FITNESS_APP_DB_USER" + secretPath: "kv/data/atlas/health/sparkyfitness-db" + secretKey: "SPARKY_FITNESS_APP_DB_USER" + - objectName: "sparkyfitness-db__SPARKY_FITNESS_APP_DB_PASSWORD" + secretPath: "kv/data/atlas/health/sparkyfitness-db" + secretKey: "SPARKY_FITNESS_APP_DB_PASSWORD" + - objectName: "sparkyfitness-secrets__JWT_SECRET" + secretPath: "kv/data/atlas/health/sparkyfitness-secrets" + secretKey: "JWT_SECRET" + - objectName: "sparkyfitness-secrets__SPARKY_FITNESS_API_ENCRYPTION_KEY" + secretPath: "kv/data/atlas/health/sparkyfitness-secrets" + secretKey: "SPARKY_FITNESS_API_ENCRYPTION_KEY" + - objectName: "sparkyfitness-admin__email" + secretPath: "kv/data/atlas/health/sparkyfitness-admin" + secretKey: "email" + - objectName: "sparkyfitness-admin__password" + secretPath: "kv/data/atlas/health/sparkyfitness-admin" + secretKey: "password" + - objectName: "sparkyfitness-oidc__client_id" + secretPath: "kv/data/atlas/health/sparkyfitness-oidc" + secretKey: "client_id" + - objectName: "sparkyfitness-oidc__client_secret" + secretPath: "kv/data/atlas/health/sparkyfitness-oidc" + secretKey: "client_secret" + - objectName: "sparkyfitness-oidc__issuer_url" + secretPath: "kv/data/atlas/health/sparkyfitness-oidc" + secretKey: "issuer_url" + secretObjects: + - secretName: endurain-db + type: Opaque + data: + - objectName: endurain-db__DB_HOST + key: DB_HOST + - objectName: endurain-db__DB_PORT + key: DB_PORT + - objectName: endurain-db__DB_USER + key: DB_USER + - objectName: endurain-db__DB_PASSWORD + key: DB_PASSWORD + - objectName: endurain-db__DB_DATABASE + key: DB_DATABASE + - secretName: endurain-secrets + type: Opaque + data: + - objectName: endurain-secrets__SECRET_KEY + key: SECRET_KEY + - objectName: endurain-secrets__FERNET_KEY + key: FERNET_KEY + - secretName: endurain-admin + type: Opaque + data: + - objectName: endurain-admin__username + key: username + - objectName: endurain-admin__password + key: password + - secretName: endurain-oidc + type: Opaque + data: + - objectName: endurain-oidc__client_id + key: client_id + - objectName: endurain-oidc__client_secret + key: client_secret + - objectName: endurain-oidc__issuer_url + key: issuer_url + - secretName: sparkyfitness-db + type: Opaque + data: + - objectName: sparkyfitness-db__SPARKY_FITNESS_DB_HOST + key: SPARKY_FITNESS_DB_HOST + - objectName: sparkyfitness-db__SPARKY_FITNESS_DB_PORT + key: SPARKY_FITNESS_DB_PORT + - objectName: sparkyfitness-db__SPARKY_FITNESS_DB_NAME + key: SPARKY_FITNESS_DB_NAME + - objectName: sparkyfitness-db__SPARKY_FITNESS_DB_USER + key: SPARKY_FITNESS_DB_USER + - objectName: sparkyfitness-db__SPARKY_FITNESS_DB_PASSWORD + key: SPARKY_FITNESS_DB_PASSWORD + - objectName: sparkyfitness-db__SPARKY_FITNESS_APP_DB_USER + key: SPARKY_FITNESS_APP_DB_USER + - objectName: sparkyfitness-db__SPARKY_FITNESS_APP_DB_PASSWORD + key: SPARKY_FITNESS_APP_DB_PASSWORD + - secretName: sparkyfitness-secrets + type: Opaque + data: + - objectName: sparkyfitness-secrets__JWT_SECRET + key: JWT_SECRET + - objectName: sparkyfitness-secrets__SPARKY_FITNESS_API_ENCRYPTION_KEY + key: SPARKY_FITNESS_API_ENCRYPTION_KEY + - secretName: sparkyfitness-admin + type: Opaque + data: + - objectName: sparkyfitness-admin__email + key: email + - objectName: sparkyfitness-admin__password + key: password + - secretName: sparkyfitness-oidc + type: Opaque + data: + - objectName: sparkyfitness-oidc__client_id + key: client_id + - objectName: sparkyfitness-oidc__client_secret + key: client_secret + - objectName: sparkyfitness-oidc__issuer_url + key: issuer_url diff --git a/services/health/serviceaccount.yaml b/services/health/serviceaccount.yaml new file mode 100644 index 0000000..78046ba --- /dev/null +++ b/services/health/serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/health/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: health-vault-sync + namespace: health diff --git a/services/health/sparkyfitness-data-pvc.yaml b/services/health/sparkyfitness-data-pvc.yaml new file mode 100644 index 0000000..0fbcf7b --- /dev/null +++ b/services/health/sparkyfitness-data-pvc.yaml @@ -0,0 +1,12 @@ +# services/health/sparkyfitness-data-pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: sparkyfitness-data + namespace: health +spec: + accessModes: ["ReadWriteOnce"] + storageClassName: asteria + resources: + requests: + storage: 10Gi diff --git a/services/health/sparkyfitness-frontend-deployment.yaml b/services/health/sparkyfitness-frontend-deployment.yaml new file mode 100644 index 0000000..38df36a --- /dev/null +++ b/services/health/sparkyfitness-frontend-deployment.yaml @@ -0,0 +1,81 @@ +# services/health/sparkyfitness-frontend-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sparkyfitness-frontend + namespace: health + labels: + app: sparkyfitness-frontend +spec: + replicas: 1 + selector: + matchLabels: + app: sparkyfitness-frontend + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + app: sparkyfitness-frontend + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: hardware + operator: In + values: ["rpi5", "rpi4"] + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 90 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + containers: + - name: sparkyfitness-frontend + image: codewithcj/sparkyfitness:0.16.3.3 + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 80 + env: + - name: SPARKY_FITNESS_SERVER_HOST + value: sparkyfitness-server + - name: SPARKY_FITNESS_SERVER_PORT + value: "3010" + readinessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 6 + livenessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 3 + failureThreshold: 6 + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 500m + memory: 512Mi diff --git a/services/health/sparkyfitness-frontend-service.yaml b/services/health/sparkyfitness-frontend-service.yaml new file mode 100644 index 0000000..0850d6c --- /dev/null +++ b/services/health/sparkyfitness-frontend-service.yaml @@ -0,0 +1,15 @@ +# services/health/sparkyfitness-frontend-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: sparkyfitness-frontend + namespace: health + labels: + app: sparkyfitness-frontend +spec: + selector: + app: sparkyfitness-frontend + ports: + - name: http + port: 80 + targetPort: http diff --git a/services/health/sparkyfitness-ingress.yaml b/services/health/sparkyfitness-ingress.yaml new file mode 100644 index 0000000..b9d5758 --- /dev/null +++ b/services/health/sparkyfitness-ingress.yaml @@ -0,0 +1,26 @@ +# services/health/sparkyfitness-ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: sparkyfitness + namespace: health + annotations: + kubernetes.io/ingress.class: traefik + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" + cert-manager.io/cluster-issuer: letsencrypt +spec: + tls: + - hosts: ["sparkyfitness.bstein.dev"] + secretName: sparkyfitness-tls + rules: + - host: sparkyfitness.bstein.dev + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: sparkyfitness-frontend + port: + number: 80 diff --git a/services/health/sparkyfitness-oidc-config-cronjob.yaml b/services/health/sparkyfitness-oidc-config-cronjob.yaml new file mode 100644 index 0000000..a20c1f1 --- /dev/null +++ b/services/health/sparkyfitness-oidc-config-cronjob.yaml @@ -0,0 +1,89 @@ +# services/health/sparkyfitness-oidc-config-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: sparkyfitness-oidc-config + namespace: health +spec: + schedule: "*/30 * * * *" + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + backoffLimit: 1 + template: + spec: + serviceAccountName: health-vault-sync + restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 90 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + containers: + - name: configure + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - | + set -euo pipefail + apk add --no-cache bash curl jq >/dev/null + exec /scripts/sparkyfitness_oidc_configure.sh + env: + - name: SPARKYFITNESS_BASE_URL + value: http://sparkyfitness-server.health.svc.cluster.local:3010 + - name: SPARKYFITNESS_FRONTEND_URL + value: https://sparkyfitness.bstein.dev + - name: SPARKYFITNESS_ADMIN_EMAIL + valueFrom: + secretKeyRef: + name: sparkyfitness-admin + key: email + - name: SPARKYFITNESS_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: sparkyfitness-admin + key: password + - name: SPARKYFITNESS_OIDC_CLIENT_ID + valueFrom: + secretKeyRef: + name: sparkyfitness-oidc + key: client_id + - name: SPARKYFITNESS_OIDC_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: sparkyfitness-oidc + key: client_secret + - name: SPARKYFITNESS_OIDC_ISSUER_URL + valueFrom: + secretKeyRef: + name: sparkyfitness-oidc + key: issuer_url + volumeMounts: + - name: sparkyfitness-oidc-config-script + mountPath: /scripts + readOnly: true + volumes: + - name: sparkyfitness-oidc-config-script + configMap: + name: sparkyfitness-oidc-config-script + defaultMode: 0555 diff --git a/services/health/sparkyfitness-server-deployment.yaml b/services/health/sparkyfitness-server-deployment.yaml new file mode 100644 index 0000000..e920662 --- /dev/null +++ b/services/health/sparkyfitness-server-deployment.yaml @@ -0,0 +1,170 @@ +# services/health/sparkyfitness-server-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sparkyfitness-server + namespace: health + labels: + app: sparkyfitness-server +spec: + replicas: 1 + selector: + matchLabels: + app: sparkyfitness-server + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + app: sparkyfitness-server + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: hardware + operator: In + values: ["rpi5", "rpi4"] + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 90 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + initContainers: + - name: init-data + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - | + set -e + mkdir -p /data/uploads /data/backup + chown -R 1000:1000 /data + securityContext: + runAsUser: 0 + runAsGroup: 0 + volumeMounts: + - name: sparkyfitness-data + mountPath: /data + containers: + - name: sparkyfitness-server + image: codewithcj/sparkyfitness_server:0.16.3.3 + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 3010 + env: + - name: SPARKY_FITNESS_SERVER_PORT + value: "3010" + - name: SPARKY_FITNESS_LOG_LEVEL + value: INFO + - name: NODE_ENV + value: production + - name: TZ + value: Etc/UTC + - name: SPARKY_FITNESS_FRONTEND_URL + value: https://sparkyfitness.bstein.dev + - name: SPARKY_FITNESS_DISABLE_SIGNUP + value: "false" + - name: SPARKY_FITNESS_DB_HOST + valueFrom: + secretKeyRef: + name: sparkyfitness-db + key: SPARKY_FITNESS_DB_HOST + - name: SPARKY_FITNESS_DB_PORT + valueFrom: + secretKeyRef: + name: sparkyfitness-db + key: SPARKY_FITNESS_DB_PORT + - name: SPARKY_FITNESS_DB_NAME + valueFrom: + secretKeyRef: + name: sparkyfitness-db + key: SPARKY_FITNESS_DB_NAME + - name: SPARKY_FITNESS_DB_USER + valueFrom: + secretKeyRef: + name: sparkyfitness-db + key: SPARKY_FITNESS_DB_USER + - name: SPARKY_FITNESS_DB_PASSWORD + valueFrom: + secretKeyRef: + name: sparkyfitness-db + key: SPARKY_FITNESS_DB_PASSWORD + - name: SPARKY_FITNESS_APP_DB_USER + valueFrom: + secretKeyRef: + name: sparkyfitness-db + key: SPARKY_FITNESS_APP_DB_USER + - name: SPARKY_FITNESS_APP_DB_PASSWORD + valueFrom: + secretKeyRef: + name: sparkyfitness-db + key: SPARKY_FITNESS_APP_DB_PASSWORD + - name: SPARKY_FITNESS_API_ENCRYPTION_KEY + valueFrom: + secretKeyRef: + name: sparkyfitness-secrets + key: SPARKY_FITNESS_API_ENCRYPTION_KEY + - name: JWT_SECRET + valueFrom: + secretKeyRef: + name: sparkyfitness-secrets + key: JWT_SECRET + - name: SPARKY_FITNESS_ADMIN_EMAIL + valueFrom: + secretKeyRef: + name: sparkyfitness-admin + key: email + volumeMounts: + - name: sparkyfitness-data + mountPath: /app/SparkyFitnessServer/uploads + subPath: uploads + - name: sparkyfitness-data + mountPath: /app/SparkyFitnessServer/backup + subPath: backup + readinessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 6 + livenessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 3 + failureThreshold: 6 + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: "1" + memory: 2Gi + volumes: + - name: sparkyfitness-data + persistentVolumeClaim: + claimName: sparkyfitness-data diff --git a/services/health/sparkyfitness-server-service.yaml b/services/health/sparkyfitness-server-service.yaml new file mode 100644 index 0000000..91220f9 --- /dev/null +++ b/services/health/sparkyfitness-server-service.yaml @@ -0,0 +1,15 @@ +# services/health/sparkyfitness-server-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: sparkyfitness-server + namespace: health + labels: + app: sparkyfitness-server +spec: + selector: + app: sparkyfitness-server + ports: + - name: http + port: 3010 + targetPort: http diff --git a/services/health/vault-sync-deployment.yaml b/services/health/vault-sync-deployment.yaml new file mode 100644 index 0000000..7b4c08e --- /dev/null +++ b/services/health/vault-sync-deployment.yaml @@ -0,0 +1,34 @@ +# services/health/vault-sync-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: health-vault-sync + namespace: health +spec: + replicas: 1 + selector: + matchLabels: + app: health-vault-sync + template: + metadata: + labels: + app: health-vault-sync + spec: + serviceAccountName: health-vault-sync + containers: + - name: sync + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - "sleep infinity" + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: health-vault diff --git a/services/keycloak/deployment.yaml b/services/keycloak/deployment.yaml index b2842b1..6f6fd2b 100644 --- a/services/keycloak/deployment.yaml +++ b/services/keycloak/deployment.yaml @@ -25,58 +25,43 @@ spec: vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/keycloak-db" }} export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/openldap-admin" }} export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} + {{ end }} spec: serviceAccountName: sso-vault + nodeSelector: + kubernetes.io/arch: amd64 + node-role.kubernetes.io/accelerator: "true" affinity: nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: hardware - operator: In - values: ["rpi5","rpi4"] - - key: node-role.kubernetes.io/worker - operator: Exists - - matchExpressions: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: - key: kubernetes.io/hostname operator: In - values: ["titan-24"] - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 90 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi5"] - - weight: 70 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi4"] + values: ["titan-22"] securityContext: runAsUser: 1000 runAsGroup: 0 diff --git a/services/keycloak/endurain-oidc-secret-ensure-job.yaml b/services/keycloak/endurain-oidc-secret-ensure-job.yaml new file mode 100644 index 0000000..9870f1d --- /dev/null +++ b/services/keycloak/endurain-oidc-secret-ensure-job.yaml @@ -0,0 +1,52 @@ +# services/keycloak/endurain-oidc-secret-ensure-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: endurain-oidc-secret-ensure-1 + namespace: sso +spec: + backoffLimit: 0 + ttlSecondsAfterFinished: 3600 + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso-secrets" + vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{ end }} + spec: + serviceAccountName: mas-secrets-ensure + restartPolicy: Never + volumes: + - name: endurain-oidc-secret-ensure-script + configMap: + name: endurain-oidc-secret-ensure-script + defaultMode: 0555 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] + - key: node-role.kubernetes.io/worker + operator: Exists + containers: + - name: apply + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - | + set -euo pipefail + apk add --no-cache bash curl jq >/dev/null + exec /scripts/endurain_oidc_secret_ensure.sh + volumeMounts: + - name: endurain-oidc-secret-ensure-script + mountPath: /scripts + readOnly: true diff --git a/services/keycloak/harbor-oidc-secret-ensure-job.yaml b/services/keycloak/harbor-oidc-secret-ensure-job.yaml index aa51f4a..999cb64 100644 --- a/services/keycloak/harbor-oidc-secret-ensure-job.yaml +++ b/services/keycloak/harbor-oidc-secret-ensure-job.yaml @@ -14,11 +14,11 @@ spec: vault.hashicorp.com/role: "sso-secrets" vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} + {{ end }} spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never diff --git a/services/keycloak/kustomization.yaml b/services/keycloak/kustomization.yaml index e141467..6eb2691 100644 --- a/services/keycloak/kustomization.yaml +++ b/services/keycloak/kustomization.yaml @@ -22,6 +22,8 @@ resources: - synapse-oidc-secret-ensure-job.yaml - logs-oidc-secret-ensure-job.yaml - harbor-oidc-secret-ensure-job.yaml + - endurain-oidc-secret-ensure-job.yaml + - sparkyfitness-oidc-secret-ensure-job.yaml - vault-oidc-secret-ensure-job.yaml - service.yaml - ingress.yaml @@ -35,6 +37,12 @@ configMapGenerator: - name: harbor-oidc-secret-ensure-script files: - harbor_oidc_secret_ensure.sh=scripts/harbor_oidc_secret_ensure.sh + - name: endurain-oidc-secret-ensure-script + files: + - endurain_oidc_secret_ensure.sh=scripts/endurain_oidc_secret_ensure.sh + - name: sparkyfitness-oidc-secret-ensure-script + files: + - sparkyfitness_oidc_secret_ensure.sh=scripts/sparkyfitness_oidc_secret_ensure.sh - name: vault-oidc-secret-ensure-script files: - vault_oidc_secret_ensure.sh=scripts/vault_oidc_secret_ensure.sh diff --git a/services/keycloak/ldap-federation-job.yaml b/services/keycloak/ldap-federation-job.yaml index 68ce057..1d3d09e 100644 --- a/services/keycloak/ldap-federation-job.yaml +++ b/services/keycloak/ldap-federation-job.yaml @@ -13,29 +13,29 @@ spec: vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/keycloak-db" }} export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/openldap-admin" }} export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} + {{ end }} spec: affinity: nodeAffinity: diff --git a/services/keycloak/logs-oidc-secret-ensure-job.yaml b/services/keycloak/logs-oidc-secret-ensure-job.yaml index 7fc3097..cd6cc62 100644 --- a/services/keycloak/logs-oidc-secret-ensure-job.yaml +++ b/services/keycloak/logs-oidc-secret-ensure-job.yaml @@ -14,11 +14,11 @@ spec: vault.hashicorp.com/role: "sso-secrets" vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} + {{ end }} spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index 3b6e15e..4ad24e5 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -23,11 +23,11 @@ spec: vault.hashicorp.com/role: "sso-secrets" vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} + {{ end }} spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never diff --git a/services/keycloak/portal-e2e-client-job.yaml b/services/keycloak/portal-e2e-client-job.yaml index 2cb50ca..7801690 100644 --- a/services/keycloak/portal-e2e-client-job.yaml +++ b/services/keycloak/portal-e2e-client-job.yaml @@ -13,29 +13,29 @@ spec: vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/keycloak-db" }} export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/openldap-admin" }} export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} + {{ end }} spec: restartPolicy: Never serviceAccountName: sso-vault diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index c80e3eb..5bf70ff 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -13,29 +13,29 @@ spec: vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/keycloak-db" }} export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/openldap-admin" }} export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} + {{ end }} spec: restartPolicy: Never serviceAccountName: sso-vault diff --git a/services/keycloak/portal-e2e-target-client-job.yaml b/services/keycloak/portal-e2e-target-client-job.yaml index c4dcd0f..68779bd 100644 --- a/services/keycloak/portal-e2e-target-client-job.yaml +++ b/services/keycloak/portal-e2e-target-client-job.yaml @@ -13,29 +13,29 @@ spec: vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/keycloak-db" }} export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/openldap-admin" }} export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} + {{ end }} spec: restartPolicy: Never serviceAccountName: sso-vault diff --git a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml index cbd21ac..59a89f7 100644 --- a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml @@ -13,29 +13,29 @@ spec: vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/keycloak-db" }} export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/openldap-admin" }} export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} + {{ end }} spec: restartPolicy: Never serviceAccountName: sso-vault diff --git a/services/keycloak/portal-e2e-token-exchange-test-job.yaml b/services/keycloak/portal-e2e-token-exchange-test-job.yaml index 56c7ce5..a50a635 100644 --- a/services/keycloak/portal-e2e-token-exchange-test-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-test-job.yaml @@ -14,29 +14,29 @@ spec: vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/keycloak-db" }} export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/openldap-admin" }} export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} + {{ end }} spec: restartPolicy: Never serviceAccountName: sso-vault diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index f44dcd4..e650c30 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -13,29 +13,29 @@ spec: vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/keycloak-db" }} export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/openldap-admin" }} export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} + {{ end }} spec: affinity: nodeAffinity: diff --git a/services/keycloak/scripts/endurain_oidc_secret_ensure.sh b/services/keycloak/scripts/endurain_oidc_secret_ensure.sh new file mode 100644 index 0000000..6b026b0 --- /dev/null +++ b/services/keycloak/scripts/endurain_oidc_secret_ensure.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +set -euo pipefail + +. /vault/secrets/keycloak-admin-env.sh + +KC_URL="http://keycloak.sso.svc.cluster.local" +REALM="atlas" +CLIENT_ID="endurain" +ROOT_URL="https://endurain.bstein.dev" +REDIRECT_URI="https://endurain.bstein.dev/api/v1/public/idp/callback/keycloak" +ISSUER_URL="https://sso.bstein.dev/realms/atlas" + +ACCESS_TOKEN="" +for attempt in 1 2 3 4 5; do + TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d "grant_type=password" \ + -d "client_id=admin-cli" \ + -d "username=${KEYCLOAK_ADMIN}" \ + -d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)" + ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)" + if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then + break + fi + echo "Keycloak token request failed (attempt ${attempt})" >&2 + sleep $((attempt * 2)) +done +if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then + echo "Failed to fetch Keycloak admin token" >&2 + exit 1 +fi + +CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/${REALM}/clients?clientId=${CLIENT_ID}" || true)" +CLIENT_UUID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" + +if [ -z "$CLIENT_UUID" ] || [ "$CLIENT_UUID" = "null" ]; then + create_payload="$(jq -nc \ + --arg client_id "${CLIENT_ID}" \ + --arg root_url "${ROOT_URL}" \ + --arg redirect_uri "${REDIRECT_URI}" \ + --arg web_origin "${ROOT_URL}" \ + '{clientId:$client_id,name:"Endurain",enabled:true,protocol:"openid-connect",publicClient:false,standardFlowEnabled:true,implicitFlowEnabled:false,directAccessGrantsEnabled:false,serviceAccountsEnabled:false,redirectUris:[$redirect_uri],webOrigins:[$web_origin],rootUrl:$root_url,baseUrl:"/"}')" + status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + -H 'Content-Type: application/json' \ + -d "${create_payload}" \ + "$KC_URL/admin/realms/${REALM}/clients")" + if [ "$status" != "201" ] && [ "$status" != "204" ]; then + echo "Keycloak client create failed (status ${status})" >&2 + exit 1 + fi + CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/${REALM}/clients?clientId=${CLIENT_ID}" || true)" + CLIENT_UUID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" +fi + +if [ -z "$CLIENT_UUID" ] || [ "$CLIENT_UUID" = "null" ]; then + echo "Keycloak client ${CLIENT_ID} not found" >&2 + exit 1 +fi + +CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/${REALM}/clients/${CLIENT_UUID}/client-secret" | jq -r '.value' 2>/dev/null || true)" +if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then + echo "Keycloak client secret not found" >&2 + exit 1 +fi + +vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" +vault_role="${VAULT_ROLE:-sso-secrets}" +jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" +login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" +vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" +if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 +fi + +payload="$(jq -nc \ + --arg client_id "${CLIENT_ID}" \ + --arg client_secret "${CLIENT_SECRET}" \ + --arg issuer_url "${ISSUER_URL}" \ + '{data:{client_id:$client_id,client_secret:$client_secret,issuer_url:$issuer_url}}')" +curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/health/endurain-oidc" >/dev/null diff --git a/services/keycloak/scripts/sparkyfitness_oidc_secret_ensure.sh b/services/keycloak/scripts/sparkyfitness_oidc_secret_ensure.sh new file mode 100644 index 0000000..449e81c --- /dev/null +++ b/services/keycloak/scripts/sparkyfitness_oidc_secret_ensure.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +set -euo pipefail + +. /vault/secrets/keycloak-admin-env.sh + +KC_URL="http://keycloak.sso.svc.cluster.local" +REALM="atlas" +CLIENT_ID="sparkyfitness" +ROOT_URL="https://sparkyfitness.bstein.dev" +REDIRECT_URI="https://sparkyfitness.bstein.dev/oidc-callback" +ISSUER_URL="https://sso.bstein.dev/realms/atlas" + +ACCESS_TOKEN="" +for attempt in 1 2 3 4 5; do + TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d "grant_type=password" \ + -d "client_id=admin-cli" \ + -d "username=${KEYCLOAK_ADMIN}" \ + -d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)" + ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)" + if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then + break + fi + echo "Keycloak token request failed (attempt ${attempt})" >&2 + sleep $((attempt * 2)) +done +if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then + echo "Failed to fetch Keycloak admin token" >&2 + exit 1 +fi + +CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/${REALM}/clients?clientId=${CLIENT_ID}" || true)" +CLIENT_UUID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" + +if [ -z "$CLIENT_UUID" ] || [ "$CLIENT_UUID" = "null" ]; then + create_payload="$(jq -nc \ + --arg client_id "${CLIENT_ID}" \ + --arg root_url "${ROOT_URL}" \ + --arg redirect_uri "${REDIRECT_URI}" \ + --arg web_origin "${ROOT_URL}" \ + '{clientId:$client_id,name:"SparkyFitness",enabled:true,protocol:"openid-connect",publicClient:false,standardFlowEnabled:true,implicitFlowEnabled:false,directAccessGrantsEnabled:false,serviceAccountsEnabled:false,redirectUris:[$redirect_uri],webOrigins:[$web_origin],rootUrl:$root_url,baseUrl:"/"}')" + status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + -H 'Content-Type: application/json' \ + -d "${create_payload}" \ + "$KC_URL/admin/realms/${REALM}/clients")" + if [ "$status" != "201" ] && [ "$status" != "204" ]; then + echo "Keycloak client create failed (status ${status})" >&2 + exit 1 + fi + CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/${REALM}/clients?clientId=${CLIENT_ID}" || true)" + CLIENT_UUID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" +fi + +if [ -z "$CLIENT_UUID" ] || [ "$CLIENT_UUID" = "null" ]; then + echo "Keycloak client ${CLIENT_ID} not found" >&2 + exit 1 +fi + +CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/${REALM}/clients/${CLIENT_UUID}/client-secret" | jq -r '.value' 2>/dev/null || true)" +if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then + echo "Keycloak client secret not found" >&2 + exit 1 +fi + +vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" +vault_role="${VAULT_ROLE:-sso-secrets}" +jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" +login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" +vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" +if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 +fi + +payload="$(jq -nc \ + --arg client_id "${CLIENT_ID}" \ + --arg client_secret "${CLIENT_SECRET}" \ + --arg issuer_url "${ISSUER_URL}" \ + '{data:{client_id:$client_id,client_secret:$client_secret,issuer_url:$issuer_url}}')" +curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/health/sparkyfitness-oidc" >/dev/null diff --git a/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml b/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml new file mode 100644 index 0000000..17e6deb --- /dev/null +++ b/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml @@ -0,0 +1,52 @@ +# services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: sparkyfitness-oidc-secret-ensure-1 + namespace: sso +spec: + backoffLimit: 0 + ttlSecondsAfterFinished: 3600 + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso-secrets" + vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{ end }} + spec: + serviceAccountName: mas-secrets-ensure + restartPolicy: Never + volumes: + - name: sparkyfitness-oidc-secret-ensure-script + configMap: + name: sparkyfitness-oidc-secret-ensure-script + defaultMode: 0555 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] + - key: node-role.kubernetes.io/worker + operator: Exists + containers: + - name: apply + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - | + set -euo pipefail + apk add --no-cache bash curl jq >/dev/null + exec /scripts/sparkyfitness_oidc_secret_ensure.sh + volumeMounts: + - name: sparkyfitness-oidc-secret-ensure-script + mountPath: /scripts + readOnly: true diff --git a/services/keycloak/synapse-oidc-secret-ensure-job.yaml b/services/keycloak/synapse-oidc-secret-ensure-job.yaml index 1e4878d..712ca9d 100644 --- a/services/keycloak/synapse-oidc-secret-ensure-job.yaml +++ b/services/keycloak/synapse-oidc-secret-ensure-job.yaml @@ -14,11 +14,11 @@ spec: vault.hashicorp.com/role: "sso-secrets" vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} + {{ end }} spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never diff --git a/services/keycloak/user-overrides-job.yaml b/services/keycloak/user-overrides-job.yaml index 495af18..7ca098a 100644 --- a/services/keycloak/user-overrides-job.yaml +++ b/services/keycloak/user-overrides-job.yaml @@ -13,29 +13,29 @@ spec: vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/keycloak-db" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/keycloak-db" }} export KC_DB_URL_DATABASE="{{ .Data.data.POSTGRES_DATABASE }}" export KC_DB_USERNAME="{{ .Data.data.POSTGRES_USER }}" export KC_DB_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/portal-e2e-client" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/portal-e2e-client" }} export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" - {{- end }} - {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/sso/openldap-admin" }} export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} + {{ end }} spec: affinity: nodeAffinity: diff --git a/services/keycloak/vault-oidc-secret-ensure-job.yaml b/services/keycloak/vault-oidc-secret-ensure-job.yaml index 797cada..31de281 100644 --- a/services/keycloak/vault-oidc-secret-ensure-job.yaml +++ b/services/keycloak/vault-oidc-secret-ensure-job.yaml @@ -14,11 +14,11 @@ spec: vault.hashicorp.com/role: "sso-secrets" vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KEYCLOAK_ADMIN="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} + {{ end }} spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never diff --git a/services/nextcloud-mail-sync/cronjob.yaml b/services/nextcloud-mail-sync/cronjob.yaml index 5042f4b..e6dcd37 100644 --- a/services/nextcloud-mail-sync/cronjob.yaml +++ b/services/nextcloud-mail-sync/cronjob.yaml @@ -18,29 +18,29 @@ spec: vault.hashicorp.com/role: "nextcloud" vault.hashicorp.com/agent-inject-secret-nextcloud-env.sh: "kv/data/atlas/nextcloud/nextcloud-db" vault.hashicorp.com/agent-inject-template-nextcloud-env.sh: | - {{- with secret "kv/data/atlas/nextcloud/nextcloud-db" -}} + {{ with secret "kv/data/atlas/nextcloud/nextcloud-db" }} export POSTGRES_DB="{{ .Data.data.database }}" export POSTGRES_USER="{{ index .Data.data "db-username" }}" export POSTGRES_PASSWORD="{{ index .Data.data "db-password" }}" - {{- end }} - {{- with secret "kv/data/atlas/nextcloud/nextcloud-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/nextcloud/nextcloud-admin" }} export NEXTCLOUD_ADMIN_USER="{{ index .Data.data "admin-user" }}" export NEXTCLOUD_ADMIN_PASSWORD="{{ index .Data.data "admin-password" }}" - {{- end }} + {{ end }} export ADMIN_USER="${NEXTCLOUD_ADMIN_USER}" export ADMIN_PASS="${NEXTCLOUD_ADMIN_PASSWORD}" - {{- with secret "kv/data/atlas/nextcloud/nextcloud-oidc" -}} + {{ with secret "kv/data/atlas/nextcloud/nextcloud-oidc" }} export OIDC_CLIENT_ID="{{ index .Data.data "client-id" }}" export OIDC_CLIENT_SECRET="{{ index .Data.data "client-secret" }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export SMTP_NAME="{{ index .Data.data "relay-username" }}" export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KC_ADMIN_USER="{{ .Data.data.username }}" export KC_ADMIN_PASS="{{ .Data.data.password }}" - {{- end }} + {{ end }} spec: restartPolicy: OnFailure securityContext: diff --git a/services/nextcloud/deployment.yaml b/services/nextcloud/deployment.yaml index 063c6f7..9af1e00 100644 --- a/services/nextcloud/deployment.yaml +++ b/services/nextcloud/deployment.yaml @@ -21,29 +21,29 @@ spec: vault.hashicorp.com/role: "nextcloud" vault.hashicorp.com/agent-inject-secret-nextcloud-env.sh: "kv/data/atlas/nextcloud/nextcloud-db" vault.hashicorp.com/agent-inject-template-nextcloud-env.sh: | - {{- with secret "kv/data/atlas/nextcloud/nextcloud-db" -}} + {{ with secret "kv/data/atlas/nextcloud/nextcloud-db" }} export POSTGRES_DB="{{ .Data.data.database }}" export POSTGRES_USER="{{ index .Data.data "db-username" }}" export POSTGRES_PASSWORD="{{ index .Data.data "db-password" }}" - {{- end }} - {{- with secret "kv/data/atlas/nextcloud/nextcloud-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/nextcloud/nextcloud-admin" }} export NEXTCLOUD_ADMIN_USER="{{ index .Data.data "admin-user" }}" export NEXTCLOUD_ADMIN_PASSWORD="{{ index .Data.data "admin-password" }}" - {{- end }} + {{ end }} export ADMIN_USER="${NEXTCLOUD_ADMIN_USER}" export ADMIN_PASS="${NEXTCLOUD_ADMIN_PASSWORD}" - {{- with secret "kv/data/atlas/nextcloud/nextcloud-oidc" -}} + {{ with secret "kv/data/atlas/nextcloud/nextcloud-oidc" }} export OIDC_CLIENT_ID="{{ index .Data.data "client-id" }}" export OIDC_CLIENT_SECRET="{{ index .Data.data "client-secret" }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export SMTP_NAME="{{ index .Data.data "relay-username" }}" export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KC_ADMIN_USER="{{ .Data.data.username }}" export KC_ADMIN_PASS="{{ .Data.data.password }}" - {{- end }} + {{ end }} spec: nodeSelector: hardware: rpi5 diff --git a/services/nextcloud/maintenance-cronjob.yaml b/services/nextcloud/maintenance-cronjob.yaml index f8af256..8c92417 100644 --- a/services/nextcloud/maintenance-cronjob.yaml +++ b/services/nextcloud/maintenance-cronjob.yaml @@ -16,29 +16,29 @@ spec: vault.hashicorp.com/role: "nextcloud" vault.hashicorp.com/agent-inject-secret-nextcloud-env.sh: "kv/data/atlas/nextcloud/nextcloud-db" vault.hashicorp.com/agent-inject-template-nextcloud-env.sh: | - {{- with secret "kv/data/atlas/nextcloud/nextcloud-db" -}} + {{ with secret "kv/data/atlas/nextcloud/nextcloud-db" }} export POSTGRES_DB="{{ .Data.data.database }}" export POSTGRES_USER="{{ index .Data.data "db-username" }}" export POSTGRES_PASSWORD="{{ index .Data.data "db-password" }}" - {{- end }} - {{- with secret "kv/data/atlas/nextcloud/nextcloud-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/nextcloud/nextcloud-admin" }} export NEXTCLOUD_ADMIN_USER="{{ index .Data.data "admin-user" }}" export NEXTCLOUD_ADMIN_PASSWORD="{{ index .Data.data "admin-password" }}" - {{- end }} + {{ end }} export ADMIN_USER="${NEXTCLOUD_ADMIN_USER}" export ADMIN_PASS="${NEXTCLOUD_ADMIN_PASSWORD}" - {{- with secret "kv/data/atlas/nextcloud/nextcloud-oidc" -}} + {{ with secret "kv/data/atlas/nextcloud/nextcloud-oidc" }} export OIDC_CLIENT_ID="{{ index .Data.data "client-id" }}" export OIDC_CLIENT_SECRET="{{ index .Data.data "client-secret" }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export SMTP_NAME="{{ index .Data.data "relay-username" }}" export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/keycloak-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KC_ADMIN_USER="{{ .Data.data.username }}" export KC_ADMIN_PASS="{{ .Data.data.password }}" - {{- end }} + {{ end }} spec: restartPolicy: OnFailure securityContext: diff --git a/services/outline/deployment.yaml b/services/outline/deployment.yaml index 04341a0..471d185 100644 --- a/services/outline/deployment.yaml +++ b/services/outline/deployment.yaml @@ -25,29 +25,29 @@ spec: vault.hashicorp.com/role: "outline" vault.hashicorp.com/agent-inject-secret-outline-env.sh: "kv/data/atlas/outline/outline-db" vault.hashicorp.com/agent-inject-template-outline-env.sh: | - {{- with secret "kv/data/atlas/outline/outline-db" -}} + {{ with secret "kv/data/atlas/outline/outline-db" }} export DATABASE_URL="{{ .Data.data.DATABASE_URL }}" - {{- end }} - {{- with secret "kv/data/atlas/outline/outline-secrets" -}} + {{ end }} + {{ with secret "kv/data/atlas/outline/outline-secrets" }} export SECRET_KEY="{{ .Data.data.SECRET_KEY }}" export UTILS_SECRET="{{ .Data.data.UTILS_SECRET }}" - {{- end }} - {{- with secret "kv/data/atlas/outline/outline-oidc" -}} + {{ end }} + {{ with secret "kv/data/atlas/outline/outline-oidc" }} export OIDC_AUTH_URI="{{ .Data.data.OIDC_AUTH_URI }}" export OIDC_CLIENT_ID="{{ .Data.data.OIDC_CLIENT_ID }}" export OIDC_CLIENT_SECRET="{{ .Data.data.OIDC_CLIENT_SECRET }}" export OIDC_LOGOUT_URI="{{ .Data.data.OIDC_LOGOUT_URI }}" export OIDC_TOKEN_URI="{{ .Data.data.OIDC_TOKEN_URI }}" export OIDC_USERINFO_URI="{{ .Data.data.OIDC_USERINFO_URI }}" - {{- end }} - {{- with secret "kv/data/atlas/outline/outline-smtp" -}} + {{ end }} + {{ with secret "kv/data/atlas/outline/outline-smtp" }} export SMTP_FROM_EMAIL="{{ .Data.data.SMTP_FROM_EMAIL }}" export SMTP_HOST="{{ .Data.data.SMTP_HOST }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export SMTP_USERNAME="{{ index .Data.data "relay-username" }}" export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} + {{ end }} spec: serviceAccountName: outline-vault nodeSelector: diff --git a/services/planka/deployment.yaml b/services/planka/deployment.yaml index cec505f..7d8a628 100644 --- a/services/planka/deployment.yaml +++ b/services/planka/deployment.yaml @@ -25,13 +25,13 @@ spec: vault.hashicorp.com/role: "planka" vault.hashicorp.com/agent-inject-secret-planka-env.sh: "kv/data/atlas/planka/planka-db" vault.hashicorp.com/agent-inject-template-planka-env.sh: | - {{- with secret "kv/data/atlas/planka/planka-db" -}} + {{ with secret "kv/data/atlas/planka/planka-db" }} export DATABASE_URL="{{ .Data.data.DATABASE_URL }}" - {{- end }} - {{- with secret "kv/data/atlas/planka/planka-secrets" -}} + {{ end }} + {{ with secret "kv/data/atlas/planka/planka-secrets" }} export SECRET_KEY="{{ .Data.data.SECRET_KEY }}" - {{- end }} - {{- with secret "kv/data/atlas/planka/planka-oidc" -}} + {{ end }} + {{ with secret "kv/data/atlas/planka/planka-oidc" }} export OIDC_CLIENT_ID="{{ .Data.data.OIDC_CLIENT_ID }}" export OIDC_CLIENT_SECRET="{{ .Data.data.OIDC_CLIENT_SECRET }}" export OIDC_ENFORCED="{{ .Data.data.OIDC_ENFORCED }}" @@ -39,18 +39,18 @@ spec: export OIDC_ISSUER="{{ .Data.data.OIDC_ISSUER }}" export OIDC_SCOPES="{{ .Data.data.OIDC_SCOPES }}" export OIDC_USE_OAUTH_CALLBACK="{{ .Data.data.OIDC_USE_OAUTH_CALLBACK }}" - {{- end }} - {{- with secret "kv/data/atlas/planka/planka-smtp" -}} + {{ end }} + {{ with secret "kv/data/atlas/planka/planka-smtp" }} export SMTP_FROM="{{ .Data.data.SMTP_FROM }}" export SMTP_HOST="{{ .Data.data.SMTP_HOST }}" export SMTP_PORT="{{ .Data.data.SMTP_PORT }}" export SMTP_SECURE="{{ .Data.data.SMTP_SECURE }}" export SMTP_TLS_REJECT_UNAUTHORIZED="{{ .Data.data.SMTP_TLS_REJECT_UNAUTHORIZED }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export SMTP_USER="{{ index .Data.data "relay-username" }}" export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} + {{ end }} spec: serviceAccountName: planka-vault nodeSelector: diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 3ecbd3f..f96dd94 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -105,10 +105,12 @@ write_policy_and_role "pegasus" "jellyfin" "pegasus-vault-sync" \ "pegasus/* harbor-pull/jellyfin" "" write_policy_and_role "crypto" "crypto" "crypto-vault-sync" \ "crypto/* harbor-pull/crypto" "" +write_policy_and_role "health" "health" "health-vault-sync" \ + "health/*" "" write_policy_and_role "sso-secrets" "sso" "mas-secrets-ensure" \ "shared/keycloak-admin" \ - "harbor/harbor-oidc vault/vault-oidc-config comms/synapse-oidc logging/oauth2-proxy-logs-oidc" + "harbor/harbor-oidc vault/vault-oidc-config comms/synapse-oidc logging/oauth2-proxy-logs-oidc health/endurain-oidc health/sparkyfitness-oidc" write_policy_and_role "comms-secrets" "comms" \ "comms-secrets-ensure,mas-db-ensure,mas-admin-client-secret-writer,othrys-synapse-signingkey-job" \ "" \ diff --git a/services/vaultwarden/deployment.yaml b/services/vaultwarden/deployment.yaml index 57789a7..2fde277 100644 --- a/services/vaultwarden/deployment.yaml +++ b/services/vaultwarden/deployment.yaml @@ -23,16 +23,16 @@ spec: vault.hashicorp.com/role: "vaultwarden" vault.hashicorp.com/agent-inject-secret-vaultwarden-env.sh: "kv/data/atlas/vaultwarden/vaultwarden-db-url" vault.hashicorp.com/agent-inject-template-vaultwarden-env.sh: | - {{- with secret "kv/data/atlas/vaultwarden/vaultwarden-db-url" -}} + {{ with secret "kv/data/atlas/vaultwarden/vaultwarden-db-url" }} export DATABASE_URL="{{ .Data.data.DATABASE_URL }}" - {{- end }} - {{- with secret "kv/data/atlas/vaultwarden/vaultwarden-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/vaultwarden/vaultwarden-admin" }} export ADMIN_TOKEN="{{ .Data.data.ADMIN_TOKEN }}" - {{- end }} - {{- with secret "kv/data/atlas/shared/postmark-relay" -}} + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} export SMTP_USERNAME="{{ index .Data.data "relay-username" }}" export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" - {{- end }} + {{ end }} spec: serviceAccountName: vaultwarden-vault containers: From 9c16d0fbc04eb91e0dae5376c2b84c1b0ebd8af6 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 13:42:08 -0300 Subject: [PATCH 052/270] keycloak: bump job names --- services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml | 2 +- services/keycloak/endurain-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/harbor-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/ldap-federation-job.yaml | 2 +- services/keycloak/logs-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/mas-secrets-ensure-job.yaml | 2 +- services/keycloak/portal-e2e-client-job.yaml | 2 +- .../keycloak/portal-e2e-execute-actions-email-test-job.yaml | 2 +- services/keycloak/portal-e2e-target-client-job.yaml | 2 +- .../keycloak/portal-e2e-token-exchange-permissions-job.yaml | 2 +- services/keycloak/portal-e2e-token-exchange-test-job.yaml | 2 +- services/keycloak/realm-settings-job.yaml | 2 +- services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/synapse-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/user-overrides-job.yaml | 2 +- services/keycloak/vault-oidc-secret-ensure-job.yaml | 2 +- 16 files changed, 16 insertions(+), 16 deletions(-) diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index 1f725f6..16de572 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: portal-onboarding-e2e-test-13 + name: portal-onboarding-e2e-test-14 namespace: bstein-dev-home spec: backoffLimit: 0 diff --git a/services/keycloak/endurain-oidc-secret-ensure-job.yaml b/services/keycloak/endurain-oidc-secret-ensure-job.yaml index 9870f1d..386c663 100644 --- a/services/keycloak/endurain-oidc-secret-ensure-job.yaml +++ b/services/keycloak/endurain-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: endurain-oidc-secret-ensure-1 + name: endurain-oidc-secret-ensure-2 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/harbor-oidc-secret-ensure-job.yaml b/services/keycloak/harbor-oidc-secret-ensure-job.yaml index 999cb64..598b801 100644 --- a/services/keycloak/harbor-oidc-secret-ensure-job.yaml +++ b/services/keycloak/harbor-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: harbor-oidc-secret-ensure-5 + name: harbor-oidc-secret-ensure-6 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/ldap-federation-job.yaml b/services/keycloak/ldap-federation-job.yaml index 1d3d09e..8dd62c9 100644 --- a/services/keycloak/ldap-federation-job.yaml +++ b/services/keycloak/ldap-federation-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-ldap-federation-7 + name: keycloak-ldap-federation-8 namespace: sso spec: backoffLimit: 2 diff --git a/services/keycloak/logs-oidc-secret-ensure-job.yaml b/services/keycloak/logs-oidc-secret-ensure-job.yaml index cd6cc62..5f9316f 100644 --- a/services/keycloak/logs-oidc-secret-ensure-job.yaml +++ b/services/keycloak/logs-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: logs-oidc-secret-ensure-4 + name: logs-oidc-secret-ensure-5 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index 4ad24e5..330cb51 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -10,7 +10,7 @@ imagePullSecrets: apiVersion: batch/v1 kind: Job metadata: - name: mas-secrets-ensure-16 + name: mas-secrets-ensure-17 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-client-job.yaml b/services/keycloak/portal-e2e-client-job.yaml index 7801690..c3d996d 100644 --- a/services/keycloak/portal-e2e-client-job.yaml +++ b/services/keycloak/portal-e2e-client-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-client-4 + name: keycloak-portal-e2e-client-5 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index 5bf70ff..aeb3a0d 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-execute-actions-email-7 + name: keycloak-portal-e2e-execute-actions-email-8 namespace: sso spec: backoffLimit: 3 diff --git a/services/keycloak/portal-e2e-target-client-job.yaml b/services/keycloak/portal-e2e-target-client-job.yaml index 68779bd..2900ae9 100644 --- a/services/keycloak/portal-e2e-target-client-job.yaml +++ b/services/keycloak/portal-e2e-target-client-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-target-3 + name: keycloak-portal-e2e-target-4 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml index 59a89f7..026260a 100644 --- a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-token-exchange-permissions-7 + name: keycloak-portal-e2e-token-exchange-permissions-8 namespace: sso spec: backoffLimit: 6 diff --git a/services/keycloak/portal-e2e-token-exchange-test-job.yaml b/services/keycloak/portal-e2e-token-exchange-test-job.yaml index a50a635..f32fa52 100644 --- a/services/keycloak/portal-e2e-token-exchange-test-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-token-exchange-test-3 + name: keycloak-portal-e2e-token-exchange-test-4 namespace: sso spec: backoffLimit: 6 diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index e650c30..d26e199 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-19 + name: keycloak-realm-settings-20 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml b/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml index 17e6deb..6405d81 100644 --- a/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml +++ b/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: sparkyfitness-oidc-secret-ensure-1 + name: sparkyfitness-oidc-secret-ensure-2 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/synapse-oidc-secret-ensure-job.yaml b/services/keycloak/synapse-oidc-secret-ensure-job.yaml index 712ca9d..f4f0da4 100644 --- a/services/keycloak/synapse-oidc-secret-ensure-job.yaml +++ b/services/keycloak/synapse-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-oidc-secret-ensure-6 + name: synapse-oidc-secret-ensure-7 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/user-overrides-job.yaml b/services/keycloak/user-overrides-job.yaml index 7ca098a..d0063fb 100644 --- a/services/keycloak/user-overrides-job.yaml +++ b/services/keycloak/user-overrides-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-user-overrides-3 + name: keycloak-user-overrides-4 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/vault-oidc-secret-ensure-job.yaml b/services/keycloak/vault-oidc-secret-ensure-job.yaml index 31de281..982444f 100644 --- a/services/keycloak/vault-oidc-secret-ensure-job.yaml +++ b/services/keycloak/vault-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: vault-oidc-secret-ensure-3 + name: vault-oidc-secret-ensure-4 namespace: sso spec: backoffLimit: 0 From ab50780f49e8822f24b398afe3eb520e465cda5a Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 13:43:56 -0300 Subject: [PATCH 053/270] gitea: trim vault secret newlines --- services/gitea/deployment.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/services/gitea/deployment.yaml b/services/gitea/deployment.yaml index 69650ca..a5fa9c4 100644 --- a/services/gitea/deployment.yaml +++ b/services/gitea/deployment.yaml @@ -67,9 +67,9 @@ spec: - -c - | set -euo pipefail - CLIENT_ID="$(cat /vault/secrets/gitea-oidc__client_id)" - CLIENT_SECRET="$(cat /vault/secrets/gitea-oidc__client_secret)" - DISCOVERY_URL="$(cat /vault/secrets/gitea-oidc__openid_auto_discovery_url)" + CLIENT_ID="$(tr -d '\r\n' Date: Wed, 14 Jan 2026 13:46:34 -0300 Subject: [PATCH 054/270] gitea: tolerate oidc init failures --- services/gitea/deployment.yaml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/services/gitea/deployment.yaml b/services/gitea/deployment.yaml index a5fa9c4..9dc0c87 100644 --- a/services/gitea/deployment.yaml +++ b/services/gitea/deployment.yaml @@ -78,7 +78,7 @@ spec: if [ -n "$id" ]; then echo "Updating existing auth source id=$id" - $BIN -c "$APPINI" admin auth update-oauth \ + if ! $BIN -c "$APPINI" admin auth update-oauth \ --id "$id" \ --name keycloak \ --provider openidConnect \ @@ -90,10 +90,12 @@ spec: --required-claim-value "" \ --group-claim-name groups \ --admin-group admin \ - --skip-local-2fa + --skip-local-2fa; then + echo "OIDC update failed; continuing without blocking startup" >&2 + fi else echo "Creating keycloak auth source" - $BIN -c "$APPINI" admin auth add-oauth \ + if ! $BIN -c "$APPINI" admin auth add-oauth \ --name keycloak \ --provider openidConnect \ --key "$CLIENT_ID" \ @@ -104,7 +106,9 @@ spec: --required-claim-value "" \ --group-claim-name groups \ --admin-group admin \ - --skip-local-2fa + --skip-local-2fa; then + echo "OIDC create failed; continuing without blocking startup" >&2 + fi fi volumeMounts: - name: gitea-data From d559aeb4640672fd2763d8f8a1185740f29d3cbf Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 13:49:37 -0300 Subject: [PATCH 055/270] keycloak: schedule on arm64 workers --- services/keycloak/deployment.yaml | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/services/keycloak/deployment.yaml b/services/keycloak/deployment.yaml index 6f6fd2b..bf8a47c 100644 --- a/services/keycloak/deployment.yaml +++ b/services/keycloak/deployment.yaml @@ -51,17 +51,23 @@ spec: spec: serviceAccountName: sso-vault nodeSelector: - kubernetes.io/arch: amd64 - node-role.kubernetes.io/accelerator: "true" + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 preference: matchExpressions: - - key: kubernetes.io/hostname + - key: hardware operator: In - values: ["titan-22"] + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] securityContext: runAsUser: 1000 runAsGroup: 0 From e92cfa7dba9f86442406471d90b94c2d095f9f16 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 14:17:26 -0300 Subject: [PATCH 056/270] vault: move comms and mailu workloads to injector --- services/comms/atlasbot-deployment.yaml | 44 ++++++++++++---- services/comms/bstein-force-leave-job.yaml | 18 +++---- services/comms/coturn.yaml | 45 ++++++++++++---- services/comms/guest-name-job.yaml | 46 +++++++++++++---- services/comms/guest-register-deployment.yaml | 14 ++--- services/comms/livekit.yaml | 48 ++++++++++++----- services/comms/mas-deployment.yaml | 51 +++++++++++++++---- .../comms/mas-local-users-ensure-job.yaml | 48 +++++++++++++---- services/comms/othrys-kick-numeric-job.yaml | 48 +++++++++++++---- services/comms/pin-othrys-job.yaml | 46 +++++++++++++---- services/comms/reset-othrys-room-job.yaml | 46 +++++++++++++---- services/comms/scripts/comms_vault_env.sh | 2 +- services/comms/seed-othrys-room.yaml | 46 +++++++++++++---- .../synapse-seeder-admin-ensure-job.yaml | 48 +++++++++++++---- services/comms/synapse-user-seed-job.yaml | 48 +++++++++++++---- services/mailu/mailu-sync-cronjob.yaml | 28 ++++++---- services/mailu/mailu-sync-job.yaml | 30 +++++++---- services/mailu/mailu-sync-listener.yaml | 27 ++++++---- services/mailu/scripts/mailu_vault_env.sh | 2 +- 19 files changed, 521 insertions(+), 164 deletions(-) diff --git a/services/comms/atlasbot-deployment.yaml b/services/comms/atlasbot-deployment.yaml index 0622d32..5aa433f 100644 --- a/services/comms/atlasbot-deployment.yaml +++ b/services/comms/atlasbot-deployment.yaml @@ -17,6 +17,41 @@ spec: app: atlasbot annotations: checksum/atlasbot-configmap: manual-atlasbot-4 + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db__password: | + {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: serviceAccountName: atlasbot nodeSelector: @@ -58,9 +93,6 @@ spec: - name: kb mountPath: /kb readOnly: true - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true @@ -82,12 +114,6 @@ spec: path: catalog/runbooks.json - key: atlas-http.mmd path: diagrams/atlas-http.mmd - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - name: vault-scripts configMap: name: comms-vault-env diff --git a/services/comms/bstein-force-leave-job.yaml b/services/comms/bstein-force-leave-job.yaml index 42428d8..e694127 100644 --- a/services/comms/bstein-force-leave-job.yaml +++ b/services/comms/bstein-force-leave-job.yaml @@ -2,28 +2,26 @@ apiVersion: batch/v1 kind: Job metadata: - name: bstein-leave-rooms-7 + name: bstein-leave-rooms-8 namespace: comms spec: backoffLimit: 0 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} spec: restartPolicy: Never serviceAccountName: comms-vault volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault containers: - name: leave image: python:3.11-slim volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true env: - name: MAS_ADMIN_CLIENT_ID value: 01KDXMVQBQ5JNY6SEJPZW6Z8BM diff --git a/services/comms/coturn.yaml b/services/comms/coturn.yaml index ac7e57b..6c3f61c 100644 --- a/services/comms/coturn.yaml +++ b/services/comms/coturn.yaml @@ -14,6 +14,42 @@ spec: metadata: labels: app: coturn + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db__password: | + {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: serviceAccountName: comms-vault nodeSelector: @@ -73,9 +109,6 @@ spec: - name: tls mountPath: /etc/coturn/tls readOnly: true - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true @@ -90,12 +123,6 @@ spec: - name: tls secret: secretName: turn-live-tls - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - name: vault-scripts configMap: name: comms-vault-env diff --git a/services/comms/guest-name-job.yaml b/services/comms/guest-name-job.yaml index 1f9004e..7e58e46 100644 --- a/services/comms/guest-name-job.yaml +++ b/services/comms/guest-name-job.yaml @@ -14,16 +14,47 @@ spec: spec: backoffLimit: 0 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db__password: | + {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never serviceAccountName: comms-vault volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - name: vault-scripts configMap: name: comms-vault-env @@ -32,9 +63,6 @@ spec: - name: rename image: python:3.11-slim volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true diff --git a/services/comms/guest-register-deployment.yaml b/services/comms/guest-register-deployment.yaml index bdf5c37..d3e218c 100644 --- a/services/comms/guest-register-deployment.yaml +++ b/services/comms/guest-register-deployment.yaml @@ -14,6 +14,11 @@ spec: metadata: annotations: checksum/config: guest-register-proxy-5 + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} labels: app.kubernetes.io/name: matrix-guest-register spec: @@ -84,9 +89,6 @@ spec: mountPath: /app/server.py subPath: server.py readOnly: true - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true command: - python - /app/server.py @@ -97,9 +99,3 @@ spec: items: - key: server.py path: server.py - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault diff --git a/services/comms/livekit.yaml b/services/comms/livekit.yaml index adad92a..b204d6e 100644 --- a/services/comms/livekit.yaml +++ b/services/comms/livekit.yaml @@ -14,6 +14,42 @@ spec: metadata: annotations: checksum/config: livekit-config-v5 + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-init-first: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db__password: | + {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} labels: app: livekit spec: @@ -49,9 +85,6 @@ spec: - name: config mountPath: /etc/livekit readOnly: false - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true @@ -90,9 +123,6 @@ spec: readOnly: true - name: runtime-keys mountPath: /var/run/livekit - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true @@ -114,12 +144,6 @@ spec: emptyDir: {} - name: runtime-keys emptyDir: {} - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - name: vault-scripts configMap: name: comms-vault-env diff --git a/services/comms/mas-deployment.yaml b/services/comms/mas-deployment.yaml index c7e6821..ef9a5ab 100644 --- a/services/comms/mas-deployment.yaml +++ b/services/comms/mas-deployment.yaml @@ -14,6 +14,48 @@ spec: metadata: annotations: checksum/config: v5-adminapi-7 + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-init-first: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db__password: | + {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__encryption: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__encryption: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.encryption }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__rsa_key: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__rsa_key: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.rsa_key }}{{- end -}} labels: app: matrix-authentication-service spec: @@ -57,9 +99,6 @@ spec: - name: rendered mountPath: /rendered readOnly: false - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true @@ -114,12 +153,6 @@ spec: path: config.yaml - name: rendered emptyDir: {} - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - name: vault-scripts configMap: name: comms-vault-env diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index ab44505..3d7ef72 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -2,22 +2,53 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-local-users-ensure-7 + name: mas-local-users-ensure-8 namespace: comms spec: backoffLimit: 1 ttlSecondsAfterFinished: 3600 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db__password: | + {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never serviceAccountName: comms-vault volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - name: vault-scripts configMap: name: comms-vault-env @@ -26,9 +57,6 @@ spec: - name: ensure image: python:3.11-slim volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true diff --git a/services/comms/othrys-kick-numeric-job.yaml b/services/comms/othrys-kick-numeric-job.yaml index 59ef560..979b670 100644 --- a/services/comms/othrys-kick-numeric-job.yaml +++ b/services/comms/othrys-kick-numeric-job.yaml @@ -2,11 +2,48 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-kick-numeric-2 + name: othrys-kick-numeric-3 namespace: comms spec: backoffLimit: 0 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db__password: | + {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never serviceAccountName: comms-vault @@ -111,19 +148,10 @@ spec: kick(token, room_id, user_id) PY volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - name: vault-scripts configMap: name: comms-vault-env diff --git a/services/comms/pin-othrys-job.yaml b/services/comms/pin-othrys-job.yaml index babb6d1..a0699d6 100644 --- a/services/comms/pin-othrys-job.yaml +++ b/services/comms/pin-othrys-job.yaml @@ -14,6 +14,43 @@ spec: spec: backoffLimit: 0 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db__password: | + {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never serviceAccountName: comms-vault @@ -119,19 +156,10 @@ spec: pin(room_id, token, eid) PY volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - name: vault-scripts configMap: name: comms-vault-env diff --git a/services/comms/reset-othrys-room-job.yaml b/services/comms/reset-othrys-room-job.yaml index 6e20979..dfbad68 100644 --- a/services/comms/reset-othrys-room-job.yaml +++ b/services/comms/reset-othrys-room-job.yaml @@ -14,6 +14,43 @@ spec: spec: backoffLimit: 0 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db__password: | + {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never serviceAccountName: comms-vault @@ -262,19 +299,10 @@ spec: print(f"new_room_id={new_room_id}") PY volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - name: vault-scripts configMap: name: comms-vault-env diff --git a/services/comms/scripts/comms_vault_env.sh b/services/comms/scripts/comms_vault_env.sh index 98b3fc4..b14abdd 100644 --- a/services/comms/scripts/comms_vault_env.sh +++ b/services/comms/scripts/comms_vault_env.sh @@ -4,7 +4,7 @@ set -eu vault_dir="/vault/secrets" read_secret() { - cat "${vault_dir}/$1" + tr -d '\r\n' < "${vault_dir}/$1" } export TURN_STATIC_AUTH_SECRET="$(read_secret turn-shared-secret__TURN_STATIC_AUTH_SECRET)" diff --git a/services/comms/seed-othrys-room.yaml b/services/comms/seed-othrys-room.yaml index 0508e0e..2a926af 100644 --- a/services/comms/seed-othrys-room.yaml +++ b/services/comms/seed-othrys-room.yaml @@ -12,6 +12,43 @@ spec: spec: backoffLimit: 0 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db__password: | + {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never serviceAccountName: comms-vault @@ -132,9 +169,6 @@ spec: - name: synapse-config mountPath: /config readOnly: true - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true @@ -142,12 +176,6 @@ spec: - name: synapse-config secret: secretName: othrys-synapse-matrix-synapse - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - name: vault-scripts configMap: name: comms-vault-env diff --git a/services/comms/synapse-seeder-admin-ensure-job.yaml b/services/comms/synapse-seeder-admin-ensure-job.yaml index 3cccc5f..86068fd 100644 --- a/services/comms/synapse-seeder-admin-ensure-job.yaml +++ b/services/comms/synapse-seeder-admin-ensure-job.yaml @@ -2,11 +2,48 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-seeder-admin-ensure-3 + name: synapse-seeder-admin-ensure-4 namespace: comms spec: backoffLimit: 2 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db__password: | + {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: OnFailure serviceAccountName: comms-vault @@ -32,19 +69,10 @@ spec: UPDATE users SET admin = 1 WHERE name = '@othrys-seeder:live.bstein.dev'; SQL volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - name: vault-scripts configMap: name: comms-vault-env diff --git a/services/comms/synapse-user-seed-job.yaml b/services/comms/synapse-user-seed-job.yaml index f895958..a85ba28 100644 --- a/services/comms/synapse-user-seed-job.yaml +++ b/services/comms/synapse-user-seed-job.yaml @@ -2,12 +2,49 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-user-seed-3 + name: synapse-user-seed-4 namespace: comms spec: backoffLimit: 1 ttlSecondsAfterFinished: 3600 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db__password: | + {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never serviceAccountName: comms-vault @@ -106,19 +143,10 @@ spec: conn.close() PY volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: comms-vault - name: vault-scripts configMap: name: comms-vault-env diff --git a/services/mailu/mailu-sync-cronjob.yaml b/services/mailu/mailu-sync-cronjob.yaml index 4d73afa..e4ef9be 100644 --- a/services/mailu/mailu-sync-cronjob.yaml +++ b/services/mailu/mailu-sync-cronjob.yaml @@ -10,6 +10,25 @@ spec: jobTemplate: spec: template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "mailu-mailserver" + vault.hashicorp.com/agent-inject-secret-mailu-db-secret__database: "kv/data/atlas/mailu/mailu-db-secret" + vault.hashicorp.com/agent-inject-template-mailu-db-secret__database: | + {{- with secret "kv/data/atlas/mailu/mailu-db-secret" -}}{{ .Data.data.database }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-db-secret__username: "kv/data/atlas/mailu/mailu-db-secret" + vault.hashicorp.com/agent-inject-template-mailu-db-secret__username: | + {{- with secret "kv/data/atlas/mailu/mailu-db-secret" -}}{{ .Data.data.username }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-db-secret__password: "kv/data/atlas/mailu/mailu-db-secret" + vault.hashicorp.com/agent-inject-template-mailu-db-secret__password: | + {{- with secret "kv/data/atlas/mailu/mailu-db-secret" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-sync-credentials__client-id: "kv/data/atlas/mailu/mailu-sync-credentials" + vault.hashicorp.com/agent-inject-template-mailu-sync-credentials__client-id: | + {{- with secret "kv/data/atlas/mailu/mailu-sync-credentials" -}}{{ index .Data.data "client-id" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-sync-credentials__client-secret: "kv/data/atlas/mailu/mailu-sync-credentials" + vault.hashicorp.com/agent-inject-template-mailu-sync-credentials__client-secret: | + {{- with secret "kv/data/atlas/mailu/mailu-sync-credentials" -}}{{ index .Data.data "client-secret" }}{{- end -}} spec: restartPolicy: OnFailure serviceAccountName: mailu-vault-sync @@ -41,9 +60,6 @@ spec: - name: sync-script mountPath: /app/sync.py subPath: sync.py - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true @@ -59,12 +75,6 @@ spec: configMap: name: mailu-sync-script defaultMode: 0444 - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: mailu-vault - name: vault-scripts configMap: name: mailu-vault-env diff --git a/services/mailu/mailu-sync-job.yaml b/services/mailu/mailu-sync-job.yaml index 370f212..b1cee93 100644 --- a/services/mailu/mailu-sync-job.yaml +++ b/services/mailu/mailu-sync-job.yaml @@ -2,10 +2,29 @@ apiVersion: batch/v1 kind: Job metadata: - name: mailu-sync-2 + name: mailu-sync-3 namespace: mailu-mailserver spec: template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "mailu-mailserver" + vault.hashicorp.com/agent-inject-secret-mailu-db-secret__database: "kv/data/atlas/mailu/mailu-db-secret" + vault.hashicorp.com/agent-inject-template-mailu-db-secret__database: | + {{- with secret "kv/data/atlas/mailu/mailu-db-secret" -}}{{ .Data.data.database }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-db-secret__username: "kv/data/atlas/mailu/mailu-db-secret" + vault.hashicorp.com/agent-inject-template-mailu-db-secret__username: | + {{- with secret "kv/data/atlas/mailu/mailu-db-secret" -}}{{ .Data.data.username }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-db-secret__password: "kv/data/atlas/mailu/mailu-db-secret" + vault.hashicorp.com/agent-inject-template-mailu-db-secret__password: | + {{- with secret "kv/data/atlas/mailu/mailu-db-secret" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-sync-credentials__client-id: "kv/data/atlas/mailu/mailu-sync-credentials" + vault.hashicorp.com/agent-inject-template-mailu-sync-credentials__client-id: | + {{- with secret "kv/data/atlas/mailu/mailu-sync-credentials" -}}{{ index .Data.data "client-id" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-sync-credentials__client-secret: "kv/data/atlas/mailu/mailu-sync-credentials" + vault.hashicorp.com/agent-inject-template-mailu-sync-credentials__client-secret: | + {{- with secret "kv/data/atlas/mailu/mailu-sync-credentials" -}}{{ index .Data.data "client-secret" }}{{- end -}} spec: restartPolicy: OnFailure serviceAccountName: mailu-vault-sync @@ -37,9 +56,6 @@ spec: - name: sync-script mountPath: /app/sync.py subPath: sync.py - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true @@ -55,12 +71,6 @@ spec: configMap: name: mailu-sync-script defaultMode: 0444 - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: mailu-vault - name: vault-scripts configMap: name: mailu-vault-env diff --git a/services/mailu/mailu-sync-listener.yaml b/services/mailu/mailu-sync-listener.yaml index f90164c..cfc915f 100644 --- a/services/mailu/mailu-sync-listener.yaml +++ b/services/mailu/mailu-sync-listener.yaml @@ -28,6 +28,24 @@ spec: metadata: labels: app: mailu-sync-listener + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "mailu-mailserver" + vault.hashicorp.com/agent-inject-secret-mailu-db-secret__database: "kv/data/atlas/mailu/mailu-db-secret" + vault.hashicorp.com/agent-inject-template-mailu-db-secret__database: | + {{- with secret "kv/data/atlas/mailu/mailu-db-secret" -}}{{ .Data.data.database }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-db-secret__username: "kv/data/atlas/mailu/mailu-db-secret" + vault.hashicorp.com/agent-inject-template-mailu-db-secret__username: | + {{- with secret "kv/data/atlas/mailu/mailu-db-secret" -}}{{ .Data.data.username }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-db-secret__password: "kv/data/atlas/mailu/mailu-db-secret" + vault.hashicorp.com/agent-inject-template-mailu-db-secret__password: | + {{- with secret "kv/data/atlas/mailu/mailu-db-secret" -}}{{ .Data.data.password }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-sync-credentials__client-id: "kv/data/atlas/mailu/mailu-sync-credentials" + vault.hashicorp.com/agent-inject-template-mailu-sync-credentials__client-id: | + {{- with secret "kv/data/atlas/mailu/mailu-sync-credentials" -}}{{ index .Data.data "client-id" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-sync-credentials__client-secret: "kv/data/atlas/mailu/mailu-sync-credentials" + vault.hashicorp.com/agent-inject-template-mailu-sync-credentials__client-secret: | + {{- with secret "kv/data/atlas/mailu/mailu-sync-credentials" -}}{{ index .Data.data "client-secret" }}{{- end -}} spec: restartPolicy: Always serviceAccountName: mailu-vault-sync @@ -62,9 +80,6 @@ spec: - name: listener-script mountPath: /app/listener.py subPath: listener.py - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - name: vault-scripts mountPath: /vault/scripts readOnly: true @@ -84,12 +99,6 @@ spec: configMap: name: mailu-sync-listener defaultMode: 0444 - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: mailu-vault - name: vault-scripts configMap: name: mailu-vault-env diff --git a/services/mailu/scripts/mailu_vault_env.sh b/services/mailu/scripts/mailu_vault_env.sh index 082a51a..1ba7dce 100644 --- a/services/mailu/scripts/mailu_vault_env.sh +++ b/services/mailu/scripts/mailu_vault_env.sh @@ -4,7 +4,7 @@ set -eu vault_dir="/vault/secrets" read_secret() { - cat "${vault_dir}/$1" + tr -d '\r\n' < "${vault_dir}/$1" } export MAILU_DB_NAME="$(read_secret mailu-db-secret__database)" From 393916ded953444555173990883a756ecad6fdca Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 14:21:58 -0300 Subject: [PATCH 057/270] comms: shorten vault inject file names --- services/comms/atlasbot-deployment.yaml | 44 ++++++------- services/comms/bstein-force-leave-job.yaml | 6 +- services/comms/coturn.yaml | 44 ++++++------- services/comms/guest-name-job.yaml | 46 +++++++------- services/comms/guest-register-deployment.yaml | 6 +- services/comms/livekit.yaml | 44 ++++++------- services/comms/mas-deployment.yaml | 62 +++++++++---------- .../comms/mas-local-users-ensure-job.yaml | 46 +++++++------- services/comms/othrys-kick-numeric-job.yaml | 44 ++++++------- services/comms/pin-othrys-job.yaml | 44 ++++++------- services/comms/reset-othrys-room-job.yaml | 44 ++++++------- services/comms/scripts/comms_vault_env.sh | 22 +++---- services/comms/secretproviderclass.yaml | 52 ++++++++-------- services/comms/seed-othrys-room.yaml | 44 ++++++------- .../synapse-seeder-admin-ensure-job.yaml | 44 ++++++------- services/comms/synapse-user-seed-job.yaml | 44 ++++++------- 16 files changed, 318 insertions(+), 318 deletions(-) diff --git a/services/comms/atlasbot-deployment.yaml b/services/comms/atlasbot-deployment.yaml index 5aa433f..4618053 100644 --- a/services/comms/atlasbot-deployment.yaml +++ b/services/comms/atlasbot-deployment.yaml @@ -19,38 +19,38 @@ spec: checksum/atlasbot-configmap: manual-atlasbot-4 vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" - vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-secret: | {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" - vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-primary: | {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-bot-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-seeder-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-matrix: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-homepage: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" - vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db-pass: | {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" - vault.hashicorp.com/agent-inject-template-mas-db__password: | + vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db-pass: | {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-matrix-shared: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-kc-secret: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: serviceAccountName: atlasbot diff --git a/services/comms/bstein-force-leave-job.yaml b/services/comms/bstein-force-leave-job.yaml index e694127..4d38349 100644 --- a/services/comms/bstein-force-leave-job.yaml +++ b/services/comms/bstein-force-leave-job.yaml @@ -11,8 +11,8 @@ spec: annotations: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} spec: restartPolicy: Never @@ -26,7 +26,7 @@ spec: - name: MAS_ADMIN_CLIENT_ID value: 01KDXMVQBQ5JNY6SEJPZW6Z8BM - name: MAS_ADMIN_CLIENT_SECRET_FILE - value: /vault/secrets/mas-admin-client-runtime__client_secret + value: /vault/secrets/mas-admin-secret - name: MAS_TOKEN_URL value: http://matrix-authentication-service:8080/oauth2/token - name: MAS_ADMIN_API_BASE diff --git a/services/comms/coturn.yaml b/services/comms/coturn.yaml index 6c3f61c..9f3c64f 100644 --- a/services/comms/coturn.yaml +++ b/services/comms/coturn.yaml @@ -17,38 +17,38 @@ spec: annotations: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" - vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-secret: | {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" - vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-primary: | {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-bot-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-seeder-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-matrix: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-homepage: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" - vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db-pass: | {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" - vault.hashicorp.com/agent-inject-template-mas-db__password: | + vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db-pass: | {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-matrix-shared: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-kc-secret: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: serviceAccountName: comms-vault diff --git a/services/comms/guest-name-job.yaml b/services/comms/guest-name-job.yaml index 7e58e46..00a1e47 100644 --- a/services/comms/guest-name-job.yaml +++ b/services/comms/guest-name-job.yaml @@ -18,38 +18,38 @@ spec: annotations: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" - vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-secret: | {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" - vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-primary: | {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-bot-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-seeder-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-matrix: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-homepage: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" - vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db-pass: | {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" - vault.hashicorp.com/agent-inject-template-mas-db__password: | + vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db-pass: | {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-matrix-shared: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-kc-secret: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never @@ -72,7 +72,7 @@ spec: - name: MAS_ADMIN_CLIENT_ID value: 01KDXMVQBQ5JNY6SEJPZW6Z8BM - name: MAS_ADMIN_CLIENT_SECRET_FILE - value: /vault/secrets/mas-admin-client-runtime__client_secret + value: /vault/secrets/mas-admin-secret - name: MAS_ADMIN_API_BASE value: http://matrix-authentication-service:8081/api/admin/v1 - name: MAS_TOKEN_URL diff --git a/services/comms/guest-register-deployment.yaml b/services/comms/guest-register-deployment.yaml index d3e218c..2888033 100644 --- a/services/comms/guest-register-deployment.yaml +++ b/services/comms/guest-register-deployment.yaml @@ -16,8 +16,8 @@ spec: checksum/config: guest-register-proxy-5 vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} labels: app.kubernetes.io/name: matrix-guest-register @@ -48,7 +48,7 @@ spec: - name: MAS_ADMIN_CLIENT_ID value: 01KDXMVQBQ5JNY6SEJPZW6Z8BM - name: MAS_ADMIN_CLIENT_SECRET_FILE - value: /vault/secrets/mas-admin-client-runtime__client_secret + value: /vault/secrets/mas-admin-secret - name: MAS_ADMIN_API_BASE value: http://matrix-authentication-service:8081/api/admin/v1 - name: SYNAPSE_BASE diff --git a/services/comms/livekit.yaml b/services/comms/livekit.yaml index b204d6e..e7f7769 100644 --- a/services/comms/livekit.yaml +++ b/services/comms/livekit.yaml @@ -17,38 +17,38 @@ spec: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/agent-init-first: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" - vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-secret: | {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" - vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-primary: | {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-bot-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-seeder-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-matrix: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-homepage: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" - vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db-pass: | {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" - vault.hashicorp.com/agent-inject-template-mas-db__password: | + vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db-pass: | {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-matrix-shared: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-kc-secret: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} labels: app: livekit diff --git a/services/comms/mas-deployment.yaml b/services/comms/mas-deployment.yaml index ef9a5ab..d8d06d9 100644 --- a/services/comms/mas-deployment.yaml +++ b/services/comms/mas-deployment.yaml @@ -17,44 +17,44 @@ spec: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/agent-init-first: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" - vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-secret: | {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" - vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-primary: | {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-bot-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-seeder-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-matrix: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-homepage: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" - vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db-pass: | {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" - vault.hashicorp.com/agent-inject-template-mas-db__password: | + vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db-pass: | {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-matrix-shared: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-kc-secret: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__encryption: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__encryption: | + vault.hashicorp.com/agent-inject-secret-mas-encryption: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-encryption: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.encryption }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__rsa_key: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__rsa_key: | + vault.hashicorp.com/agent-inject-secret-mas-rsa-key: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-rsa-key: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.rsa_key }}{{- end -}} labels: app: matrix-authentication-service @@ -119,23 +119,23 @@ spec: readOnly: true - name: vault-secrets mountPath: /etc/mas/secrets/encryption - subPath: mas-secrets-runtime__encryption + subPath: mas-encryption readOnly: true - name: vault-secrets mountPath: /etc/mas/secrets/matrix_shared_secret - subPath: mas-secrets-runtime__matrix_shared_secret + subPath: mas-matrix-shared readOnly: true - name: vault-secrets mountPath: /etc/mas/secrets/keycloak_client_secret - subPath: mas-secrets-runtime__keycloak_client_secret + subPath: mas-kc-secret readOnly: true - name: vault-secrets mountPath: /etc/mas/keys/rsa_key - subPath: mas-secrets-runtime__rsa_key + subPath: mas-rsa-key readOnly: true - name: vault-secrets mountPath: /etc/mas/admin-client/client_secret - subPath: mas-admin-client-runtime__client_secret + subPath: mas-admin-secret readOnly: true resources: requests: diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index 3d7ef72..3cf24f9 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -12,38 +12,38 @@ spec: annotations: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" - vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-secret: | {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" - vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-primary: | {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-bot-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-seeder-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-matrix: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-homepage: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" - vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db-pass: | {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" - vault.hashicorp.com/agent-inject-template-mas-db__password: | + vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db-pass: | {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-matrix-shared: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-kc-secret: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never @@ -64,7 +64,7 @@ spec: - name: MAS_ADMIN_CLIENT_ID value: 01KDXMVQBQ5JNY6SEJPZW6Z8BM - name: MAS_ADMIN_CLIENT_SECRET_FILE - value: /vault/secrets/mas-admin-client-runtime__client_secret + value: /vault/secrets/mas-admin-secret - name: MAS_TOKEN_URL value: http://matrix-authentication-service:8080/oauth2/token - name: MAS_ADMIN_API_BASE diff --git a/services/comms/othrys-kick-numeric-job.yaml b/services/comms/othrys-kick-numeric-job.yaml index 979b670..fa9d62d 100644 --- a/services/comms/othrys-kick-numeric-job.yaml +++ b/services/comms/othrys-kick-numeric-job.yaml @@ -11,38 +11,38 @@ spec: annotations: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" - vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-secret: | {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" - vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-primary: | {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-bot-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-seeder-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-matrix: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-homepage: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" - vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db-pass: | {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" - vault.hashicorp.com/agent-inject-template-mas-db__password: | + vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db-pass: | {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-matrix-shared: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-kc-secret: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never diff --git a/services/comms/pin-othrys-job.yaml b/services/comms/pin-othrys-job.yaml index a0699d6..e56a71f 100644 --- a/services/comms/pin-othrys-job.yaml +++ b/services/comms/pin-othrys-job.yaml @@ -18,38 +18,38 @@ spec: annotations: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" - vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-secret: | {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" - vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-primary: | {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-bot-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-seeder-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-matrix: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-homepage: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" - vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db-pass: | {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" - vault.hashicorp.com/agent-inject-template-mas-db__password: | + vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db-pass: | {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-matrix-shared: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-kc-secret: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never diff --git a/services/comms/reset-othrys-room-job.yaml b/services/comms/reset-othrys-room-job.yaml index dfbad68..319e0a7 100644 --- a/services/comms/reset-othrys-room-job.yaml +++ b/services/comms/reset-othrys-room-job.yaml @@ -18,38 +18,38 @@ spec: annotations: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" - vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-secret: | {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" - vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-primary: | {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-bot-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-seeder-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-matrix: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-homepage: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" - vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db-pass: | {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" - vault.hashicorp.com/agent-inject-template-mas-db__password: | + vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db-pass: | {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-matrix-shared: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-kc-secret: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never diff --git a/services/comms/scripts/comms_vault_env.sh b/services/comms/scripts/comms_vault_env.sh index b14abdd..72319bc 100644 --- a/services/comms/scripts/comms_vault_env.sh +++ b/services/comms/scripts/comms_vault_env.sh @@ -7,21 +7,21 @@ read_secret() { tr -d '\r\n' < "${vault_dir}/$1" } -export TURN_STATIC_AUTH_SECRET="$(read_secret turn-shared-secret__TURN_STATIC_AUTH_SECRET)" +export TURN_STATIC_AUTH_SECRET="$(read_secret turn-secret)" export TURN_PASSWORD="${TURN_STATIC_AUTH_SECRET}" -export LIVEKIT_API_SECRET="$(read_secret livekit-api__primary)" +export LIVEKIT_API_SECRET="$(read_secret livekit-primary)" export LIVEKIT_SECRET="${LIVEKIT_API_SECRET}" -export BOT_PASS="$(read_secret atlasbot-credentials-runtime__bot-password)" -export SEEDER_PASS="$(read_secret atlasbot-credentials-runtime__seeder-password)" +export BOT_PASS="$(read_secret bot-pass)" +export SEEDER_PASS="$(read_secret seeder-pass)" -export CHAT_API_KEY="$(read_secret chat-ai-keys-runtime__matrix)" -export CHAT_API_HOMEPAGE="$(read_secret chat-ai-keys-runtime__homepage)" +export CHAT_API_KEY="$(read_secret chat-matrix)" +export CHAT_API_HOMEPAGE="$(read_secret chat-homepage)" -export MAS_ADMIN_CLIENT_SECRET_FILE="${vault_dir}/mas-admin-client-runtime__client_secret" -export PGPASSWORD="$(read_secret synapse-db__POSTGRES_PASSWORD)" +export MAS_ADMIN_CLIENT_SECRET_FILE="${vault_dir}/mas-admin-secret" +export PGPASSWORD="$(read_secret synapse-db-pass)" -export MAS_DB_PASSWORD="$(read_secret mas-db__password)" -export MATRIX_SHARED_SECRET="$(read_secret mas-secrets-runtime__matrix_shared_secret)" -export KEYCLOAK_CLIENT_SECRET="$(read_secret mas-secrets-runtime__keycloak_client_secret)" +export MAS_DB_PASSWORD="$(read_secret mas-db-pass)" +export MATRIX_SHARED_SECRET="$(read_secret mas-matrix-shared)" +export KEYCLOAK_CLIENT_SECRET="$(read_secret mas-kc-secret)" diff --git a/services/comms/secretproviderclass.yaml b/services/comms/secretproviderclass.yaml index ff3767f..70ca9b4 100644 --- a/services/comms/secretproviderclass.yaml +++ b/services/comms/secretproviderclass.yaml @@ -10,13 +10,13 @@ spec: vaultAddress: "http://vault.vault.svc.cluster.local:8200" roleName: "comms" objects: | - - objectName: "turn-shared-secret__TURN_STATIC_AUTH_SECRET" + - objectName: "turn-secret" secretPath: "kv/data/atlas/comms/turn-shared-secret" secretKey: "TURN_STATIC_AUTH_SECRET" - - objectName: "livekit-api__primary" + - objectName: "livekit-primary" secretPath: "kv/data/atlas/comms/livekit-api" secretKey: "primary" - - objectName: "synapse-db__POSTGRES_PASSWORD" + - objectName: "synapse-db-pass" secretPath: "kv/data/atlas/comms/synapse-db" secretKey: "POSTGRES_PASSWORD" - objectName: "synapse-redis__redis-password" @@ -25,34 +25,34 @@ spec: - objectName: "synapse-macaroon__macaroon_secret_key" secretPath: "kv/data/atlas/comms/synapse-macaroon" secretKey: "macaroon_secret_key" - - objectName: "atlasbot-credentials-runtime__bot-password" + - objectName: "bot-pass" secretPath: "kv/data/atlas/comms/atlasbot-credentials-runtime" secretKey: "bot-password" - - objectName: "atlasbot-credentials-runtime__seeder-password" + - objectName: "seeder-pass" secretPath: "kv/data/atlas/comms/atlasbot-credentials-runtime" secretKey: "seeder-password" - - objectName: "chat-ai-keys-runtime__matrix" + - objectName: "chat-matrix" secretPath: "kv/data/atlas/shared/chat-ai-keys-runtime" secretKey: "matrix" - - objectName: "chat-ai-keys-runtime__homepage" + - objectName: "chat-homepage" secretPath: "kv/data/atlas/shared/chat-ai-keys-runtime" secretKey: "homepage" - - objectName: "mas-admin-client-runtime__client_secret" + - objectName: "mas-admin-secret" secretPath: "kv/data/atlas/comms/mas-admin-client-runtime" secretKey: "client_secret" - - objectName: "mas-db__password" + - objectName: "mas-db-pass" secretPath: "kv/data/atlas/comms/mas-db" secretKey: "password" - - objectName: "mas-secrets-runtime__encryption" + - objectName: "mas-encryption" secretPath: "kv/data/atlas/comms/mas-secrets-runtime" secretKey: "encryption" - - objectName: "mas-secrets-runtime__matrix_shared_secret" + - objectName: "mas-matrix-shared" secretPath: "kv/data/atlas/comms/mas-secrets-runtime" secretKey: "matrix_shared_secret" - - objectName: "mas-secrets-runtime__keycloak_client_secret" + - objectName: "mas-kc-secret" secretPath: "kv/data/atlas/comms/mas-secrets-runtime" secretKey: "keycloak_client_secret" - - objectName: "mas-secrets-runtime__rsa_key" + - objectName: "mas-rsa-key" secretPath: "kv/data/atlas/comms/mas-secrets-runtime" secretKey: "rsa_key" - objectName: "othrys-synapse-signingkey__signing.key" @@ -68,17 +68,17 @@ spec: - secretName: turn-shared-secret type: Opaque data: - - objectName: turn-shared-secret__TURN_STATIC_AUTH_SECRET + - objectName: turn-secret key: TURN_STATIC_AUTH_SECRET - secretName: livekit-api type: Opaque data: - - objectName: livekit-api__primary + - objectName: livekit-primary key: primary - secretName: synapse-db type: Opaque data: - - objectName: synapse-db__POSTGRES_PASSWORD + - objectName: synapse-db-pass key: POSTGRES_PASSWORD - secretName: synapse-redis type: Opaque @@ -93,37 +93,37 @@ spec: - secretName: atlasbot-credentials-runtime type: Opaque data: - - objectName: atlasbot-credentials-runtime__bot-password + - objectName: bot-pass key: bot-password - - objectName: atlasbot-credentials-runtime__seeder-password + - objectName: seeder-pass key: seeder-password - secretName: chat-ai-keys-runtime type: Opaque data: - - objectName: chat-ai-keys-runtime__matrix + - objectName: chat-matrix key: matrix - - objectName: chat-ai-keys-runtime__homepage + - objectName: chat-homepage key: homepage - secretName: mas-admin-client-runtime type: Opaque data: - - objectName: mas-admin-client-runtime__client_secret + - objectName: mas-admin-secret key: client_secret - secretName: mas-db type: Opaque data: - - objectName: mas-db__password + - objectName: mas-db-pass key: password - secretName: mas-secrets-runtime type: Opaque data: - - objectName: mas-secrets-runtime__encryption + - objectName: mas-encryption key: encryption - - objectName: mas-secrets-runtime__matrix_shared_secret + - objectName: mas-matrix-shared key: matrix_shared_secret - - objectName: mas-secrets-runtime__keycloak_client_secret + - objectName: mas-kc-secret key: keycloak_client_secret - - objectName: mas-secrets-runtime__rsa_key + - objectName: mas-rsa-key key: rsa_key - secretName: othrys-synapse-signingkey type: Opaque diff --git a/services/comms/seed-othrys-room.yaml b/services/comms/seed-othrys-room.yaml index 2a926af..333ff35 100644 --- a/services/comms/seed-othrys-room.yaml +++ b/services/comms/seed-othrys-room.yaml @@ -16,38 +16,38 @@ spec: annotations: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" - vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-secret: | {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" - vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-primary: | {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-bot-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-seeder-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-matrix: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-homepage: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" - vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db-pass: | {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" - vault.hashicorp.com/agent-inject-template-mas-db__password: | + vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db-pass: | {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-matrix-shared: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-kc-secret: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never diff --git a/services/comms/synapse-seeder-admin-ensure-job.yaml b/services/comms/synapse-seeder-admin-ensure-job.yaml index 86068fd..450bdcd 100644 --- a/services/comms/synapse-seeder-admin-ensure-job.yaml +++ b/services/comms/synapse-seeder-admin-ensure-job.yaml @@ -11,38 +11,38 @@ spec: annotations: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" - vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-secret: | {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" - vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-primary: | {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-bot-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-seeder-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-matrix: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-homepage: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" - vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db-pass: | {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" - vault.hashicorp.com/agent-inject-template-mas-db__password: | + vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db-pass: | {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-matrix-shared: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-kc-secret: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: OnFailure diff --git a/services/comms/synapse-user-seed-job.yaml b/services/comms/synapse-user-seed-job.yaml index a85ba28..82b72e7 100644 --- a/services/comms/synapse-user-seed-job.yaml +++ b/services/comms/synapse-user-seed-job.yaml @@ -12,38 +12,38 @@ spec: annotations: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/role: "comms" - vault.hashicorp.com/agent-inject-secret-turn-shared-secret__TURN_STATIC_AUTH_SECRET: "kv/data/atlas/comms/turn-shared-secret" - vault.hashicorp.com/agent-inject-template-turn-shared-secret__TURN_STATIC_AUTH_SECRET: | + vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" + vault.hashicorp.com/agent-inject-template-turn-secret: | {{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-livekit-api__primary: "kv/data/atlas/comms/livekit-api" - vault.hashicorp.com/agent-inject-template-livekit-api__primary: | + vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-primary: | {{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__bot-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__bot-password: | + vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-bot-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-atlasbot-credentials-runtime__seeder-password: "kv/data/atlas/comms/atlasbot-credentials-runtime" - vault.hashicorp.com/agent-inject-template-atlasbot-credentials-runtime__seeder-password: | + vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime" + vault.hashicorp.com/agent-inject-template-seeder-pass: | {{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__matrix: | + vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-matrix: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-chat-ai-keys-runtime__homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" - vault.hashicorp.com/agent-inject-template-chat-ai-keys-runtime__homepage: | + vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime" + vault.hashicorp.com/agent-inject-template-chat-homepage: | {{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-admin-client-runtime__client_secret: "kv/data/atlas/comms/mas-admin-client-runtime" - vault.hashicorp.com/agent-inject-template-mas-admin-client-runtime__client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" + vault.hashicorp.com/agent-inject-template-mas-admin-secret: | {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-synapse-db__POSTGRES_PASSWORD: "kv/data/atlas/comms/synapse-db" - vault.hashicorp.com/agent-inject-template-synapse-db__POSTGRES_PASSWORD: | + vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-db-pass: | {{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-db__password: "kv/data/atlas/comms/mas-db" - vault.hashicorp.com/agent-inject-template-mas-db__password: | + vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db" + vault.hashicorp.com/agent-inject-template-mas-db-pass: | {{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__matrix_shared_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__matrix_shared_secret: | + vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-matrix-shared: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}} - vault.hashicorp.com/agent-inject-secret-mas-secrets-runtime__keycloak_client_secret: "kv/data/atlas/comms/mas-secrets-runtime" - vault.hashicorp.com/agent-inject-template-mas-secrets-runtime__keycloak_client_secret: | + vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime" + vault.hashicorp.com/agent-inject-template-mas-kc-secret: | {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never From f6fc250fe10ff907f515c917f8a8f209fd3aab8b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 14:24:55 -0300 Subject: [PATCH 058/270] comms: add vault-secrets emptyDir for mas --- services/comms/mas-deployment.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/services/comms/mas-deployment.yaml b/services/comms/mas-deployment.yaml index d8d06d9..532c9da 100644 --- a/services/comms/mas-deployment.yaml +++ b/services/comms/mas-deployment.yaml @@ -153,6 +153,8 @@ spec: path: config.yaml - name: rendered emptyDir: {} + - name: vault-secrets + emptyDir: {} - name: vault-scripts configMap: name: comms-vault-env From 98d67293bcee408b007cf884a2b004b79976ca16 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 14:29:29 -0300 Subject: [PATCH 059/270] vault: prepopulate injector for jobs --- services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml | 3 ++- services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml | 3 ++- services/comms/bstein-force-leave-job.yaml | 3 ++- services/comms/guest-name-job.yaml | 3 ++- services/comms/mas-local-users-ensure-job.yaml | 3 ++- services/comms/othrys-kick-numeric-job.yaml | 3 ++- services/comms/pin-othrys-job.yaml | 3 ++- services/comms/reset-othrys-room-job.yaml | 3 ++- services/comms/seed-othrys-room.yaml | 3 ++- services/comms/synapse-seeder-admin-ensure-job.yaml | 3 ++- services/comms/synapse-user-seed-job.yaml | 3 ++- services/keycloak/endurain-oidc-secret-ensure-job.yaml | 3 ++- services/keycloak/harbor-oidc-secret-ensure-job.yaml | 3 ++- services/keycloak/ldap-federation-job.yaml | 3 ++- services/keycloak/logs-oidc-secret-ensure-job.yaml | 3 ++- services/keycloak/mas-secrets-ensure-job.yaml | 3 ++- services/keycloak/portal-e2e-client-job.yaml | 3 ++- .../keycloak/portal-e2e-execute-actions-email-test-job.yaml | 3 ++- services/keycloak/portal-e2e-target-client-job.yaml | 3 ++- .../keycloak/portal-e2e-token-exchange-permissions-job.yaml | 3 ++- services/keycloak/portal-e2e-token-exchange-test-job.yaml | 3 ++- services/keycloak/realm-settings-job.yaml | 3 ++- services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml | 3 ++- services/keycloak/synapse-oidc-secret-ensure-job.yaml | 3 ++- services/keycloak/user-overrides-job.yaml | 3 ++- services/keycloak/vault-oidc-secret-ensure-job.yaml | 3 ++- services/mailu/mailu-sync-cronjob.yaml | 3 ++- services/mailu/mailu-sync-job.yaml | 3 ++- services/nextcloud-mail-sync/cronjob.yaml | 3 ++- services/nextcloud/maintenance-cronjob.yaml | 3 ++- 30 files changed, 60 insertions(+), 30 deletions(-) diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index 16de572..f8d27b3 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -10,6 +10,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "bstein-dev-home" vault.hashicorp.com/agent-inject-secret-portal-env.sh: "kv/data/atlas/portal/atlas-portal-db" vault.hashicorp.com/agent-inject-template-portal-env.sh: | @@ -70,4 +71,4 @@ spec: - name: tests configMap: name: portal-onboarding-e2e-tests - defaultMode: 0555 + defaultMode: 0555 \ No newline at end of file diff --git a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml index efbab7e..bba2b1b 100644 --- a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml +++ b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml @@ -16,6 +16,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "bstein-dev-home" vault.hashicorp.com/agent-inject-secret-portal-env.sh: "kv/data/atlas/portal/atlas-portal-db" vault.hashicorp.com/agent-inject-template-portal-env.sh: | @@ -73,4 +74,4 @@ spec: - name: vaultwarden-cred-sync-script configMap: name: vaultwarden-cred-sync-script - defaultMode: 0555 + defaultMode: 0555 \ No newline at end of file diff --git a/services/comms/bstein-force-leave-job.yaml b/services/comms/bstein-force-leave-job.yaml index 4d38349..759f30b 100644 --- a/services/comms/bstein-force-leave-job.yaml +++ b/services/comms/bstein-force-leave-job.yaml @@ -10,6 +10,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "comms" vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime" vault.hashicorp.com/agent-inject-template-mas-admin-secret: | @@ -185,4 +186,4 @@ spec: print(json.dumps(results, indent=2, sort_keys=True)) if failures: raise SystemExit(f"failed to leave/forget rooms: {', '.join(failures)}") - PY + PY \ No newline at end of file diff --git a/services/comms/guest-name-job.yaml b/services/comms/guest-name-job.yaml index 00a1e47..0ba2f52 100644 --- a/services/comms/guest-name-job.yaml +++ b/services/comms/guest-name-job.yaml @@ -17,6 +17,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "comms" vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" vault.hashicorp.com/agent-inject-template-turn-secret: | @@ -430,4 +431,4 @@ spec: db_rename_numeric(existing) finally: mas_revoke_session(admin_token, seeder_session) - PY + PY \ No newline at end of file diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index 3cf24f9..fcb0faf 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -11,6 +11,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "comms" vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" vault.hashicorp.com/agent-inject-template-turn-secret: | @@ -186,4 +187,4 @@ spec: token = admin_token() ensure_user(token, os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"]) ensure_user(token, os.environ["BOT_USER"], os.environ["BOT_PASS"]) - PY + PY \ No newline at end of file diff --git a/services/comms/othrys-kick-numeric-job.yaml b/services/comms/othrys-kick-numeric-job.yaml index fa9d62d..4d9ad6d 100644 --- a/services/comms/othrys-kick-numeric-job.yaml +++ b/services/comms/othrys-kick-numeric-job.yaml @@ -10,6 +10,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "comms" vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" vault.hashicorp.com/agent-inject-template-turn-secret: | @@ -155,4 +156,4 @@ spec: - name: vault-scripts configMap: name: comms-vault-env - defaultMode: 0555 + defaultMode: 0555 \ No newline at end of file diff --git a/services/comms/pin-othrys-job.yaml b/services/comms/pin-othrys-job.yaml index e56a71f..f25c18e 100644 --- a/services/comms/pin-othrys-job.yaml +++ b/services/comms/pin-othrys-job.yaml @@ -17,6 +17,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "comms" vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" vault.hashicorp.com/agent-inject-template-turn-secret: | @@ -163,4 +164,4 @@ spec: - name: vault-scripts configMap: name: comms-vault-env - defaultMode: 0555 + defaultMode: 0555 \ No newline at end of file diff --git a/services/comms/reset-othrys-room-job.yaml b/services/comms/reset-othrys-room-job.yaml index 319e0a7..c0d941b 100644 --- a/services/comms/reset-othrys-room-job.yaml +++ b/services/comms/reset-othrys-room-job.yaml @@ -17,6 +17,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "comms" vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" vault.hashicorp.com/agent-inject-template-turn-secret: | @@ -306,4 +307,4 @@ spec: - name: vault-scripts configMap: name: comms-vault-env - defaultMode: 0555 + defaultMode: 0555 \ No newline at end of file diff --git a/services/comms/seed-othrys-room.yaml b/services/comms/seed-othrys-room.yaml index 333ff35..ce87c85 100644 --- a/services/comms/seed-othrys-room.yaml +++ b/services/comms/seed-othrys-room.yaml @@ -15,6 +15,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "comms" vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" vault.hashicorp.com/agent-inject-template-turn-secret: | @@ -179,4 +180,4 @@ spec: - name: vault-scripts configMap: name: comms-vault-env - defaultMode: 0555 + defaultMode: 0555 \ No newline at end of file diff --git a/services/comms/synapse-seeder-admin-ensure-job.yaml b/services/comms/synapse-seeder-admin-ensure-job.yaml index 450bdcd..073c28d 100644 --- a/services/comms/synapse-seeder-admin-ensure-job.yaml +++ b/services/comms/synapse-seeder-admin-ensure-job.yaml @@ -10,6 +10,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "comms" vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" vault.hashicorp.com/agent-inject-template-turn-secret: | @@ -76,4 +77,4 @@ spec: - name: vault-scripts configMap: name: comms-vault-env - defaultMode: 0555 + defaultMode: 0555 \ No newline at end of file diff --git a/services/comms/synapse-user-seed-job.yaml b/services/comms/synapse-user-seed-job.yaml index 82b72e7..4117bff 100644 --- a/services/comms/synapse-user-seed-job.yaml +++ b/services/comms/synapse-user-seed-job.yaml @@ -11,6 +11,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "comms" vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret" vault.hashicorp.com/agent-inject-template-turn-secret: | @@ -150,4 +151,4 @@ spec: - name: vault-scripts configMap: name: comms-vault-env - defaultMode: 0555 + defaultMode: 0555 \ No newline at end of file diff --git a/services/keycloak/endurain-oidc-secret-ensure-job.yaml b/services/keycloak/endurain-oidc-secret-ensure-job.yaml index 386c663..2ce30b4 100644 --- a/services/keycloak/endurain-oidc-secret-ensure-job.yaml +++ b/services/keycloak/endurain-oidc-secret-ensure-job.yaml @@ -11,6 +11,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso-secrets" vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | @@ -49,4 +50,4 @@ spec: volumeMounts: - name: endurain-oidc-secret-ensure-script mountPath: /scripts - readOnly: true + readOnly: true \ No newline at end of file diff --git a/services/keycloak/harbor-oidc-secret-ensure-job.yaml b/services/keycloak/harbor-oidc-secret-ensure-job.yaml index 598b801..fc6dd7e 100644 --- a/services/keycloak/harbor-oidc-secret-ensure-job.yaml +++ b/services/keycloak/harbor-oidc-secret-ensure-job.yaml @@ -11,6 +11,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso-secrets" vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | @@ -44,4 +45,4 @@ spec: volumeMounts: - name: harbor-oidc-secret-ensure-script mountPath: /scripts - readOnly: true + readOnly: true \ No newline at end of file diff --git a/services/keycloak/ldap-federation-job.yaml b/services/keycloak/ldap-federation-job.yaml index 8dd62c9..783200c 100644 --- a/services/keycloak/ldap-federation-job.yaml +++ b/services/keycloak/ldap-federation-job.yaml @@ -10,6 +10,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | @@ -376,4 +377,4 @@ spec: except Exception as e: print(f"WARNING: LDAP cleanup failed (continuing): {e}") PY - volumeMounts: + volumeMounts: \ No newline at end of file diff --git a/services/keycloak/logs-oidc-secret-ensure-job.yaml b/services/keycloak/logs-oidc-secret-ensure-job.yaml index 5f9316f..67abdc9 100644 --- a/services/keycloak/logs-oidc-secret-ensure-job.yaml +++ b/services/keycloak/logs-oidc-secret-ensure-job.yaml @@ -11,6 +11,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso-secrets" vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | @@ -121,4 +122,4 @@ spec: --from-literal=cookie_secret="${COOKIE_SECRET}" \ --dry-run=client -o yaml | kubectl -n logging apply -f - >/dev/null volumeMounts: - volumes: + volumes: \ No newline at end of file diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index 330cb51..ff5f022 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -19,6 +19,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/agent-init-first: "true" vault.hashicorp.com/role: "sso-secrets" vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" @@ -123,4 +124,4 @@ spec: -d "${payload}" "${vault_addr}/v1/kv/data/atlas/comms/mas-secrets-runtime" >/dev/null volumeMounts: - name: work - mountPath: /work + mountPath: /work \ No newline at end of file diff --git a/services/keycloak/portal-e2e-client-job.yaml b/services/keycloak/portal-e2e-client-job.yaml index c3d996d..e54fdfa 100644 --- a/services/keycloak/portal-e2e-client-job.yaml +++ b/services/keycloak/portal-e2e-client-job.yaml @@ -10,6 +10,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | @@ -257,4 +258,4 @@ spec: raise SystemExit(f"Role mapping update failed (status={status}) resp={resp}") PY volumeMounts: - volumes: + volumes: \ No newline at end of file diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index aeb3a0d..cc23305 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -10,6 +10,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | @@ -69,4 +70,4 @@ spec: - name: tests configMap: name: portal-e2e-tests - defaultMode: 0555 + defaultMode: 0555 \ No newline at end of file diff --git a/services/keycloak/portal-e2e-target-client-job.yaml b/services/keycloak/portal-e2e-target-client-job.yaml index 2900ae9..6fee3e8 100644 --- a/services/keycloak/portal-e2e-target-client-job.yaml +++ b/services/keycloak/portal-e2e-target-client-job.yaml @@ -10,6 +10,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | @@ -158,4 +159,4 @@ spec: print(f"OK: ensured token exchange enabled on client {target_client_id}") PY volumeMounts: - volumes: + volumes: \ No newline at end of file diff --git a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml index 026260a..9ef1a01 100644 --- a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml @@ -10,6 +10,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | @@ -290,4 +291,4 @@ spec: print("OK: configured token exchange permissions for portal E2E client") PY - volumeMounts: + volumeMounts: \ No newline at end of file diff --git a/services/keycloak/portal-e2e-token-exchange-test-job.yaml b/services/keycloak/portal-e2e-token-exchange-test-job.yaml index f32fa52..ae1c636 100644 --- a/services/keycloak/portal-e2e-token-exchange-test-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-test-job.yaml @@ -11,6 +11,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | @@ -70,4 +71,4 @@ spec: - name: tests configMap: name: portal-e2e-tests - defaultMode: 0555 + defaultMode: 0555 \ No newline at end of file diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index d26e199..926ebeb 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -10,6 +10,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | @@ -467,4 +468,4 @@ spec: f"Unexpected execution update response for identity-provider-redirector: {status}" ) PY - volumeMounts: + volumeMounts: \ No newline at end of file diff --git a/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml b/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml index 6405d81..ea38eec 100644 --- a/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml +++ b/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml @@ -11,6 +11,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso-secrets" vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | @@ -49,4 +50,4 @@ spec: volumeMounts: - name: sparkyfitness-oidc-secret-ensure-script mountPath: /scripts - readOnly: true + readOnly: true \ No newline at end of file diff --git a/services/keycloak/synapse-oidc-secret-ensure-job.yaml b/services/keycloak/synapse-oidc-secret-ensure-job.yaml index f4f0da4..9a5dd8e 100644 --- a/services/keycloak/synapse-oidc-secret-ensure-job.yaml +++ b/services/keycloak/synapse-oidc-secret-ensure-job.yaml @@ -11,6 +11,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso-secrets" vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | @@ -81,4 +82,4 @@ spec: curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ -d "${payload}" "${vault_addr}/v1/kv/data/atlas/comms/synapse-oidc" >/dev/null volumeMounts: - volumes: + volumes: \ No newline at end of file diff --git a/services/keycloak/user-overrides-job.yaml b/services/keycloak/user-overrides-job.yaml index d0063fb..431d4fe 100644 --- a/services/keycloak/user-overrides-job.yaml +++ b/services/keycloak/user-overrides-job.yaml @@ -10,6 +10,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso" vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | @@ -164,4 +165,4 @@ spec: if status not in (200, 204): raise SystemExit(f"Unexpected user update response: {status}") PY - volumeMounts: + volumeMounts: \ No newline at end of file diff --git a/services/keycloak/vault-oidc-secret-ensure-job.yaml b/services/keycloak/vault-oidc-secret-ensure-job.yaml index 982444f..29f69b7 100644 --- a/services/keycloak/vault-oidc-secret-ensure-job.yaml +++ b/services/keycloak/vault-oidc-secret-ensure-job.yaml @@ -11,6 +11,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "sso-secrets" vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | @@ -44,4 +45,4 @@ spec: volumeMounts: - name: vault-oidc-secret-ensure-script mountPath: /scripts - readOnly: true + readOnly: true \ No newline at end of file diff --git a/services/mailu/mailu-sync-cronjob.yaml b/services/mailu/mailu-sync-cronjob.yaml index e4ef9be..9e0e35c 100644 --- a/services/mailu/mailu-sync-cronjob.yaml +++ b/services/mailu/mailu-sync-cronjob.yaml @@ -13,6 +13,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "mailu-mailserver" vault.hashicorp.com/agent-inject-secret-mailu-db-secret__database: "kv/data/atlas/mailu/mailu-db-secret" vault.hashicorp.com/agent-inject-template-mailu-db-secret__database: | @@ -78,4 +79,4 @@ spec: - name: vault-scripts configMap: name: mailu-vault-env - defaultMode: 0555 + defaultMode: 0555 \ No newline at end of file diff --git a/services/mailu/mailu-sync-job.yaml b/services/mailu/mailu-sync-job.yaml index b1cee93..00c84c5 100644 --- a/services/mailu/mailu-sync-job.yaml +++ b/services/mailu/mailu-sync-job.yaml @@ -9,6 +9,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "mailu-mailserver" vault.hashicorp.com/agent-inject-secret-mailu-db-secret__database: "kv/data/atlas/mailu/mailu-db-secret" vault.hashicorp.com/agent-inject-template-mailu-db-secret__database: | @@ -74,4 +75,4 @@ spec: - name: vault-scripts configMap: name: mailu-vault-env - defaultMode: 0555 + defaultMode: 0555 \ No newline at end of file diff --git a/services/nextcloud-mail-sync/cronjob.yaml b/services/nextcloud-mail-sync/cronjob.yaml index e6dcd37..6f38778 100644 --- a/services/nextcloud-mail-sync/cronjob.yaml +++ b/services/nextcloud-mail-sync/cronjob.yaml @@ -15,6 +15,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "nextcloud" vault.hashicorp.com/agent-inject-secret-nextcloud-env.sh: "kv/data/atlas/nextcloud/nextcloud-db" vault.hashicorp.com/agent-inject-template-nextcloud-env.sh: | @@ -103,4 +104,4 @@ spec: - name: sync-script configMap: name: nextcloud-mail-sync-script - defaultMode: 0755 + defaultMode: 0755 \ No newline at end of file diff --git a/services/nextcloud/maintenance-cronjob.yaml b/services/nextcloud/maintenance-cronjob.yaml index 8c92417..1ace3fc 100644 --- a/services/nextcloud/maintenance-cronjob.yaml +++ b/services/nextcloud/maintenance-cronjob.yaml @@ -13,6 +13,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "nextcloud" vault.hashicorp.com/agent-inject-secret-nextcloud-env.sh: "kv/data/atlas/nextcloud/nextcloud-db" vault.hashicorp.com/agent-inject-template-nextcloud-env.sh: | @@ -93,4 +94,4 @@ spec: - name: maintenance-script configMap: name: nextcloud-maintenance-script - defaultMode: 0755 + defaultMode: 0755 \ No newline at end of file From 4f1fb62ab345ce7e9e652558750635642f35d7f5 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 14:33:57 -0300 Subject: [PATCH 060/270] vault: bump job names for injector --- services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml | 2 +- services/comms/bstein-force-leave-job.yaml | 2 +- services/comms/mas-local-users-ensure-job.yaml | 2 +- services/comms/othrys-kick-numeric-job.yaml | 2 +- services/comms/synapse-seeder-admin-ensure-job.yaml | 2 +- services/comms/synapse-user-seed-job.yaml | 2 +- services/keycloak/endurain-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/harbor-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/ldap-federation-job.yaml | 2 +- services/keycloak/logs-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/mas-secrets-ensure-job.yaml | 2 +- services/keycloak/portal-e2e-client-job.yaml | 2 +- .../keycloak/portal-e2e-execute-actions-email-test-job.yaml | 2 +- services/keycloak/portal-e2e-target-client-job.yaml | 2 +- .../keycloak/portal-e2e-token-exchange-permissions-job.yaml | 2 +- services/keycloak/portal-e2e-token-exchange-test-job.yaml | 2 +- services/keycloak/realm-settings-job.yaml | 2 +- services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/synapse-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/user-overrides-job.yaml | 2 +- services/keycloak/vault-oidc-secret-ensure-job.yaml | 2 +- services/mailu/mailu-sync-job.yaml | 2 +- 22 files changed, 22 insertions(+), 22 deletions(-) diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index f8d27b3..cfe35a1 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: portal-onboarding-e2e-test-14 + name: portal-onboarding-e2e-test-16 namespace: bstein-dev-home spec: backoffLimit: 0 diff --git a/services/comms/bstein-force-leave-job.yaml b/services/comms/bstein-force-leave-job.yaml index 759f30b..172ffb4 100644 --- a/services/comms/bstein-force-leave-job.yaml +++ b/services/comms/bstein-force-leave-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: bstein-leave-rooms-8 + name: bstein-leave-rooms-10 namespace: comms spec: backoffLimit: 0 diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index fcb0faf..ac3428c 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-local-users-ensure-8 + name: mas-local-users-ensure-10 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/othrys-kick-numeric-job.yaml b/services/comms/othrys-kick-numeric-job.yaml index 4d9ad6d..637ad58 100644 --- a/services/comms/othrys-kick-numeric-job.yaml +++ b/services/comms/othrys-kick-numeric-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-kick-numeric-3 + name: othrys-kick-numeric-5 namespace: comms spec: backoffLimit: 0 diff --git a/services/comms/synapse-seeder-admin-ensure-job.yaml b/services/comms/synapse-seeder-admin-ensure-job.yaml index 073c28d..ad22634 100644 --- a/services/comms/synapse-seeder-admin-ensure-job.yaml +++ b/services/comms/synapse-seeder-admin-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-seeder-admin-ensure-4 + name: synapse-seeder-admin-ensure-6 namespace: comms spec: backoffLimit: 2 diff --git a/services/comms/synapse-user-seed-job.yaml b/services/comms/synapse-user-seed-job.yaml index 4117bff..9afe882 100644 --- a/services/comms/synapse-user-seed-job.yaml +++ b/services/comms/synapse-user-seed-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-user-seed-4 + name: synapse-user-seed-6 namespace: comms spec: backoffLimit: 1 diff --git a/services/keycloak/endurain-oidc-secret-ensure-job.yaml b/services/keycloak/endurain-oidc-secret-ensure-job.yaml index 2ce30b4..53a31c6 100644 --- a/services/keycloak/endurain-oidc-secret-ensure-job.yaml +++ b/services/keycloak/endurain-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: endurain-oidc-secret-ensure-2 + name: endurain-oidc-secret-ensure-4 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/harbor-oidc-secret-ensure-job.yaml b/services/keycloak/harbor-oidc-secret-ensure-job.yaml index fc6dd7e..82c8097 100644 --- a/services/keycloak/harbor-oidc-secret-ensure-job.yaml +++ b/services/keycloak/harbor-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: harbor-oidc-secret-ensure-6 + name: harbor-oidc-secret-ensure-8 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/ldap-federation-job.yaml b/services/keycloak/ldap-federation-job.yaml index 783200c..2f911f1 100644 --- a/services/keycloak/ldap-federation-job.yaml +++ b/services/keycloak/ldap-federation-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-ldap-federation-8 + name: keycloak-ldap-federation-10 namespace: sso spec: backoffLimit: 2 diff --git a/services/keycloak/logs-oidc-secret-ensure-job.yaml b/services/keycloak/logs-oidc-secret-ensure-job.yaml index 67abdc9..f3fcaa3 100644 --- a/services/keycloak/logs-oidc-secret-ensure-job.yaml +++ b/services/keycloak/logs-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: logs-oidc-secret-ensure-5 + name: logs-oidc-secret-ensure-7 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index ff5f022..88e8177 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -10,7 +10,7 @@ imagePullSecrets: apiVersion: batch/v1 kind: Job metadata: - name: mas-secrets-ensure-17 + name: mas-secrets-ensure-18 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-client-job.yaml b/services/keycloak/portal-e2e-client-job.yaml index e54fdfa..9c5229f 100644 --- a/services/keycloak/portal-e2e-client-job.yaml +++ b/services/keycloak/portal-e2e-client-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-client-5 + name: keycloak-portal-e2e-client-6 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index cc23305..7ee4e20 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-execute-actions-email-8 + name: keycloak-portal-e2e-execute-actions-email-9 namespace: sso spec: backoffLimit: 3 diff --git a/services/keycloak/portal-e2e-target-client-job.yaml b/services/keycloak/portal-e2e-target-client-job.yaml index 6fee3e8..6c1086f 100644 --- a/services/keycloak/portal-e2e-target-client-job.yaml +++ b/services/keycloak/portal-e2e-target-client-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-target-4 + name: keycloak-portal-e2e-target-5 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml index 9ef1a01..9e3f11c 100644 --- a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-token-exchange-permissions-8 + name: keycloak-portal-e2e-token-exchange-permissions-9 namespace: sso spec: backoffLimit: 6 diff --git a/services/keycloak/portal-e2e-token-exchange-test-job.yaml b/services/keycloak/portal-e2e-token-exchange-test-job.yaml index ae1c636..4e6960d 100644 --- a/services/keycloak/portal-e2e-token-exchange-test-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-token-exchange-test-4 + name: keycloak-portal-e2e-token-exchange-test-5 namespace: sso spec: backoffLimit: 6 diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 926ebeb..76076e8 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-20 + name: keycloak-realm-settings-21 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml b/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml index ea38eec..3b16100 100644 --- a/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml +++ b/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: sparkyfitness-oidc-secret-ensure-2 + name: sparkyfitness-oidc-secret-ensure-3 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/synapse-oidc-secret-ensure-job.yaml b/services/keycloak/synapse-oidc-secret-ensure-job.yaml index 9a5dd8e..2368404 100644 --- a/services/keycloak/synapse-oidc-secret-ensure-job.yaml +++ b/services/keycloak/synapse-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-oidc-secret-ensure-7 + name: synapse-oidc-secret-ensure-8 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/user-overrides-job.yaml b/services/keycloak/user-overrides-job.yaml index 431d4fe..b865e5e 100644 --- a/services/keycloak/user-overrides-job.yaml +++ b/services/keycloak/user-overrides-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-user-overrides-4 + name: keycloak-user-overrides-5 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/vault-oidc-secret-ensure-job.yaml b/services/keycloak/vault-oidc-secret-ensure-job.yaml index 29f69b7..13c2571 100644 --- a/services/keycloak/vault-oidc-secret-ensure-job.yaml +++ b/services/keycloak/vault-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: vault-oidc-secret-ensure-4 + name: vault-oidc-secret-ensure-5 namespace: sso spec: backoffLimit: 0 diff --git a/services/mailu/mailu-sync-job.yaml b/services/mailu/mailu-sync-job.yaml index 00c84c5..38cea89 100644 --- a/services/mailu/mailu-sync-job.yaml +++ b/services/mailu/mailu-sync-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mailu-sync-3 + name: mailu-sync-4 namespace: mailu-mailserver spec: template: From fb9578b624b335fb583abeee12cd72ef712d0799 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 14:49:41 -0300 Subject: [PATCH 061/270] vault: inject monitoring exporter and health jobs --- .../health/endurain-oidc-config-cronjob.yaml | 42 ++++++++----------- .../sparkyfitness-oidc-config-cronjob.yaml | 42 ++++++++----------- .../monitoring/grafana-org-bootstrap.yaml | 25 ++++++----- .../postmark-exporter-deployment.yaml | 29 ++++++------- services/monitoring/secretproviderclass.yaml | 18 -------- 5 files changed, 61 insertions(+), 95 deletions(-) diff --git a/services/health/endurain-oidc-config-cronjob.yaml b/services/health/endurain-oidc-config-cronjob.yaml index 7930425..86b4d91 100644 --- a/services/health/endurain-oidc-config-cronjob.yaml +++ b/services/health/endurain-oidc-config-cronjob.yaml @@ -13,6 +13,22 @@ spec: spec: backoffLimit: 1 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" + vault.hashicorp.com/role: "health" + vault.hashicorp.com/agent-inject-secret-endurain-oidc-env: "kv/data/atlas/health/endurain-admin" + vault.hashicorp.com/agent-inject-template-endurain-oidc-env: | + {{- with secret "kv/data/atlas/health/endurain-admin" -}} + export ENDURAIN_ADMIN_USERNAME="{{ .Data.data.username }}" + export ENDURAIN_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/health/endurain-oidc" -}} + export ENDURAIN_OIDC_CLIENT_ID="{{ .Data.data.client_id }}" + export ENDURAIN_OIDC_CLIENT_SECRET="{{ .Data.data.client_secret }}" + export ENDURAIN_OIDC_ISSUER_URL="{{ .Data.data.issuer_url }}" + {{- end -}} spec: serviceAccountName: health-vault-sync restartPolicy: Never @@ -47,35 +63,11 @@ spec: - | set -euo pipefail apk add --no-cache bash curl jq >/dev/null + . /vault/secrets/endurain-oidc-env exec /scripts/endurain_oidc_configure.sh env: - name: ENDURAIN_BASE_URL value: http://endurain.health.svc.cluster.local - - name: ENDURAIN_ADMIN_USERNAME - valueFrom: - secretKeyRef: - name: endurain-admin - key: username - - name: ENDURAIN_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: endurain-admin - key: password - - name: ENDURAIN_OIDC_CLIENT_ID - valueFrom: - secretKeyRef: - name: endurain-oidc - key: client_id - - name: ENDURAIN_OIDC_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: endurain-oidc - key: client_secret - - name: ENDURAIN_OIDC_ISSUER_URL - valueFrom: - secretKeyRef: - name: endurain-oidc - key: issuer_url volumeMounts: - name: endurain-oidc-config-script mountPath: /scripts diff --git a/services/health/sparkyfitness-oidc-config-cronjob.yaml b/services/health/sparkyfitness-oidc-config-cronjob.yaml index a20c1f1..b3d4c52 100644 --- a/services/health/sparkyfitness-oidc-config-cronjob.yaml +++ b/services/health/sparkyfitness-oidc-config-cronjob.yaml @@ -13,6 +13,22 @@ spec: spec: backoffLimit: 1 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" + vault.hashicorp.com/role: "health" + vault.hashicorp.com/agent-inject-secret-sparky-oidc-env: "kv/data/atlas/health/sparkyfitness-admin" + vault.hashicorp.com/agent-inject-template-sparky-oidc-env: | + {{- with secret "kv/data/atlas/health/sparkyfitness-admin" -}} + export SPARKYFITNESS_ADMIN_EMAIL="{{ .Data.data.email }}" + export SPARKYFITNESS_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/health/sparkyfitness-oidc" -}} + export SPARKYFITNESS_OIDC_CLIENT_ID="{{ .Data.data.client_id }}" + export SPARKYFITNESS_OIDC_CLIENT_SECRET="{{ .Data.data.client_secret }}" + export SPARKYFITNESS_OIDC_ISSUER_URL="{{ .Data.data.issuer_url }}" + {{- end -}} spec: serviceAccountName: health-vault-sync restartPolicy: Never @@ -47,37 +63,13 @@ spec: - | set -euo pipefail apk add --no-cache bash curl jq >/dev/null + . /vault/secrets/sparky-oidc-env exec /scripts/sparkyfitness_oidc_configure.sh env: - name: SPARKYFITNESS_BASE_URL value: http://sparkyfitness-server.health.svc.cluster.local:3010 - name: SPARKYFITNESS_FRONTEND_URL value: https://sparkyfitness.bstein.dev - - name: SPARKYFITNESS_ADMIN_EMAIL - valueFrom: - secretKeyRef: - name: sparkyfitness-admin - key: email - - name: SPARKYFITNESS_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: sparkyfitness-admin - key: password - - name: SPARKYFITNESS_OIDC_CLIENT_ID - valueFrom: - secretKeyRef: - name: sparkyfitness-oidc - key: client_id - - name: SPARKYFITNESS_OIDC_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: sparkyfitness-oidc - key: client_secret - - name: SPARKYFITNESS_OIDC_ISSUER_URL - valueFrom: - secretKeyRef: - name: sparkyfitness-oidc - key: issuer_url volumeMounts: - name: sparkyfitness-oidc-config-script mountPath: /scripts diff --git a/services/monitoring/grafana-org-bootstrap.yaml b/services/monitoring/grafana-org-bootstrap.yaml index 0872f4a..a39d938 100644 --- a/services/monitoring/grafana-org-bootstrap.yaml +++ b/services/monitoring/grafana-org-bootstrap.yaml @@ -2,13 +2,25 @@ apiVersion: batch/v1 kind: Job metadata: - name: grafana-org-bootstrap-1 + name: grafana-org-bootstrap-2 namespace: monitoring spec: backoffLimit: 2 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" + vault.hashicorp.com/role: "monitoring" + vault.hashicorp.com/agent-inject-secret-grafana-env: "kv/data/atlas/monitoring/grafana-admin" + vault.hashicorp.com/agent-inject-template-grafana-env: | + {{- with secret "kv/data/atlas/monitoring/grafana-admin" -}} + export GRAFANA_USER="{{ index .Data.data "admin-user" }}" + export GRAFANA_PASSWORD="{{ index .Data.data "admin-password" }}" + {{- end -}} spec: restartPolicy: OnFailure + serviceAccountName: monitoring-vault-sync containers: - name: bootstrap image: python:3.11-alpine @@ -17,20 +29,11 @@ spec: value: http://grafana - name: OVERVIEW_ORG_NAME value: Overview - - name: GRAFANA_USER - valueFrom: - secretKeyRef: - name: grafana-admin - key: admin-user - - name: GRAFANA_PASSWORD - valueFrom: - secretKeyRef: - name: grafana-admin - key: admin-password command: ["/bin/sh", "-c"] args: - | set -euo pipefail + . /vault/secrets/grafana-env python - <<'PY' import base64 import json diff --git a/services/monitoring/postmark-exporter-deployment.yaml b/services/monitoring/postmark-exporter-deployment.yaml index 646c455..5e6c837 100644 --- a/services/monitoring/postmark-exporter-deployment.yaml +++ b/services/monitoring/postmark-exporter-deployment.yaml @@ -16,8 +16,20 @@ spec: prometheus.io/scrape: "true" prometheus.io/port: "8000" prometheus.io/path: "/metrics" + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "monitoring" + vault.hashicorp.com/agent-inject-secret-postmark-env: "kv/data/atlas/monitoring/postmark-exporter" + vault.hashicorp.com/agent-inject-template-postmark-env: | + {{- with secret "kv/data/atlas/monitoring/postmark-exporter" -}} + export POSTMARK_SERVER_TOKEN="{{ index .Data.data "relay-username" }}" + export POSTMARK_SERVER_TOKEN_FALLBACK="{{ index .Data.data "relay-password" }}" + {{- if index .Data.data "sending-limit" }} + export POSTMARK_SENDING_LIMIT="{{ index .Data.data "sending-limit" }}" + {{- end }} + {{- end -}} bstein.dev/restarted-at: "2026-01-06T00:00:00Z" spec: + serviceAccountName: monitoring-vault-sync containers: - name: exporter image: python:3.12-alpine @@ -26,25 +38,10 @@ spec: args: - | set -euo pipefail + . /vault/secrets/postmark-env pip install --no-cache-dir prometheus-client==0.22.1 requests==2.32.3 exec python /app/monitoring_postmark_exporter.py env: - - name: POSTMARK_SERVER_TOKEN - valueFrom: - secretKeyRef: - name: postmark-exporter - key: server-token - - name: POSTMARK_SERVER_TOKEN_FALLBACK - valueFrom: - secretKeyRef: - name: postmark-exporter - key: server-token-fallback - - name: POSTMARK_SENDING_LIMIT - valueFrom: - secretKeyRef: - name: postmark-exporter - key: sending-limit - optional: true - name: POSTMARK_SENDING_LIMIT_WINDOW value: "30d" - name: POLL_INTERVAL_SECONDS diff --git a/services/monitoring/secretproviderclass.yaml b/services/monitoring/secretproviderclass.yaml index 4f58ff0..3fab887 100644 --- a/services/monitoring/secretproviderclass.yaml +++ b/services/monitoring/secretproviderclass.yaml @@ -16,15 +16,6 @@ spec: - objectName: "grafana-admin__admin-password" secretPath: "kv/data/atlas/monitoring/grafana-admin" secretKey: "admin-password" - - objectName: "postmark-exporter__relay-username" - secretPath: "kv/data/atlas/monitoring/postmark-exporter" - secretKey: "relay-username" - - objectName: "postmark-exporter__relay-password" - secretPath: "kv/data/atlas/monitoring/postmark-exporter" - secretKey: "relay-password" - - objectName: "postmark-exporter__sending-limit" - secretPath: "kv/data/atlas/monitoring/postmark-exporter" - secretKey: "sending-limit" - objectName: "postmark-relay__relay-username" secretPath: "kv/data/atlas/shared/postmark-relay" secretKey: "relay-username" @@ -42,15 +33,6 @@ spec: key: admin-user - objectName: grafana-admin__admin-password key: admin-password - - secretName: postmark-exporter - type: Opaque - data: - - objectName: postmark-exporter__relay-username - key: server-token - - objectName: postmark-exporter__relay-password - key: server-token-fallback - - objectName: postmark-exporter__sending-limit - key: sending-limit - secretName: grafana-smtp type: Opaque data: From fb671865e58ecdd35479705d9da651df849364b9 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 17:29:09 -0300 Subject: [PATCH 062/270] vault: inject remaining services with wrappers --- .../applications/health/kustomization.yaml | 10 +- dockerfiles/Dockerfile.livekit-token-vault | 10 + dockerfiles/Dockerfile.oauth2-proxy-vault | 10 + dockerfiles/Dockerfile.pegasus-vault | 10 + dockerfiles/vault-entrypoint.sh | 14 ++ .../bstein-dev-home/backend-deployment.yaml | 6 + services/bstein-dev-home/rbac.yaml | 31 +++ services/comms/livekit-token-deployment.yaml | 19 +- services/comms/secretproviderclass.yaml | 8 - services/crypto/xmr-miner/deployment.yaml | 39 ++-- .../crypto/xmr-miner/secretproviderclass.yaml | 8 - services/health/config/nginx.conf | 30 +++ services/health/endurain-deployment.yaml | 147 ------------ .../health/endurain-oidc-config-cronjob.yaml | 79 ------- services/health/kustomization.yaml | 29 +-- .../health/scripts/endurain_oidc_configure.sh | 134 ----------- .../scripts/sparkyfitness_oidc_configure.sh | 134 ----------- services/health/scripts/wger_user_sync.py | 120 ++++++++++ services/health/secretproviderclass.yaml | 167 -------------- .../sparkyfitness-frontend-deployment.yaml | 81 ------- .../sparkyfitness-frontend-service.yaml | 15 -- services/health/sparkyfitness-ingress.yaml | 26 --- .../sparkyfitness-oidc-config-cronjob.yaml | 81 ------- .../sparkyfitness-server-deployment.yaml | 170 -------------- .../health/sparkyfitness-server-service.yaml | 15 -- services/health/vault-sync-deployment.yaml | 34 --- .../health/wger-admin-ensure-cronjob.yaml | 92 ++++++++ services/health/wger-deployment.yaml | 212 ++++++++++++++++++ ...ndurain-ingress.yaml => wger-ingress.yaml} | 12 +- ...rain-data-pvc.yaml => wger-media-pvc.yaml} | 6 +- ...ndurain-service.yaml => wger-service.yaml} | 8 +- ...ess-data-pvc.yaml => wger-static-pvc.yaml} | 6 +- services/health/wger-user-sync-cronjob.yaml | 89 ++++++++ services/jenkins/deployment.yaml | 78 +++---- services/jenkins/kustomization.yaml | 3 - services/jenkins/secretproviderclass.yaml | 72 ------ services/jenkins/vault-serviceaccount.yaml | 6 - services/jenkins/vault-sync-deployment.yaml | 34 --- .../endurain-oidc-secret-ensure-job.yaml | 53 ----- services/keycloak/kustomization.yaml | 8 - .../scripts/endurain_oidc_secret_ensure.sh | 87 ------- .../sparkyfitness_oidc_secret_ensure.sh | 87 ------- services/keycloak/secretproviderclass.yaml | 31 --- .../sparkyfitness-oidc-secret-ensure-job.yaml | 53 ----- services/logging/oauth2-proxy.yaml | 32 +-- services/logging/secretproviderclass.yaml | 18 -- services/oauth2-proxy/deployment.yaml | 32 +-- services/openldap/statefulset.yaml | 26 ++- services/pegasus/deployment.yaml | 25 ++- services/pegasus/image.yaml | 2 +- services/pegasus/secretproviderclass.yaml | 18 -- .../vault/scripts/vault_k8s_auth_configure.sh | 4 +- 52 files changed, 778 insertions(+), 1743 deletions(-) create mode 100644 dockerfiles/Dockerfile.livekit-token-vault create mode 100644 dockerfiles/Dockerfile.oauth2-proxy-vault create mode 100644 dockerfiles/Dockerfile.pegasus-vault create mode 100644 dockerfiles/vault-entrypoint.sh create mode 100644 services/health/config/nginx.conf delete mode 100644 services/health/endurain-deployment.yaml delete mode 100644 services/health/endurain-oidc-config-cronjob.yaml delete mode 100644 services/health/scripts/endurain_oidc_configure.sh delete mode 100644 services/health/scripts/sparkyfitness_oidc_configure.sh create mode 100644 services/health/scripts/wger_user_sync.py delete mode 100644 services/health/secretproviderclass.yaml delete mode 100644 services/health/sparkyfitness-frontend-deployment.yaml delete mode 100644 services/health/sparkyfitness-frontend-service.yaml delete mode 100644 services/health/sparkyfitness-ingress.yaml delete mode 100644 services/health/sparkyfitness-oidc-config-cronjob.yaml delete mode 100644 services/health/sparkyfitness-server-deployment.yaml delete mode 100644 services/health/sparkyfitness-server-service.yaml delete mode 100644 services/health/vault-sync-deployment.yaml create mode 100644 services/health/wger-admin-ensure-cronjob.yaml create mode 100644 services/health/wger-deployment.yaml rename services/health/{endurain-ingress.yaml => wger-ingress.yaml} (72%) rename services/health/{endurain-data-pvc.yaml => wger-media-pvc.yaml} (66%) rename services/health/{endurain-service.yaml => wger-service.yaml} (57%) rename services/health/{sparkyfitness-data-pvc.yaml => wger-static-pvc.yaml} (64%) create mode 100644 services/health/wger-user-sync-cronjob.yaml delete mode 100644 services/jenkins/secretproviderclass.yaml delete mode 100644 services/jenkins/vault-serviceaccount.yaml delete mode 100644 services/jenkins/vault-sync-deployment.yaml delete mode 100644 services/keycloak/endurain-oidc-secret-ensure-job.yaml delete mode 100644 services/keycloak/scripts/endurain_oidc_secret_ensure.sh delete mode 100644 services/keycloak/scripts/sparkyfitness_oidc_secret_ensure.sh delete mode 100644 services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml diff --git a/clusters/atlas/flux-system/applications/health/kustomization.yaml b/clusters/atlas/flux-system/applications/health/kustomization.yaml index f666d39..f4a3d61 100644 --- a/clusters/atlas/flux-system/applications/health/kustomization.yaml +++ b/clusters/atlas/flux-system/applications/health/kustomization.yaml @@ -20,14 +20,6 @@ spec: healthChecks: - apiVersion: apps/v1 kind: Deployment - name: endurain - namespace: health - - apiVersion: apps/v1 - kind: Deployment - name: sparkyfitness-server - namespace: health - - apiVersion: apps/v1 - kind: Deployment - name: sparkyfitness-frontend + name: wger namespace: health wait: false diff --git a/dockerfiles/Dockerfile.livekit-token-vault b/dockerfiles/Dockerfile.livekit-token-vault new file mode 100644 index 0000000..cbe49b1 --- /dev/null +++ b/dockerfiles/Dockerfile.livekit-token-vault @@ -0,0 +1,10 @@ +FROM ghcr.io/element-hq/lk-jwt-service:0.3.0 AS base + +FROM alpine:3.20 +RUN apk add --no-cache ca-certificates +COPY --from=base /lk-jwt-service /lk-jwt-service +COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh +RUN chmod 0755 /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] +CMD ["/lk-jwt-service"] diff --git a/dockerfiles/Dockerfile.oauth2-proxy-vault b/dockerfiles/Dockerfile.oauth2-proxy-vault new file mode 100644 index 0000000..71ce2a6 --- /dev/null +++ b/dockerfiles/Dockerfile.oauth2-proxy-vault @@ -0,0 +1,10 @@ +FROM quay.io/oauth2-proxy/oauth2-proxy:v7.6.0 AS base + +FROM alpine:3.20 +RUN apk add --no-cache ca-certificates +COPY --from=base /bin/oauth2-proxy /bin/oauth2-proxy +COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh +RUN chmod 0755 /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] +CMD ["/bin/oauth2-proxy"] diff --git a/dockerfiles/Dockerfile.pegasus-vault b/dockerfiles/Dockerfile.pegasus-vault new file mode 100644 index 0000000..ac49095 --- /dev/null +++ b/dockerfiles/Dockerfile.pegasus-vault @@ -0,0 +1,10 @@ +FROM registry.bstein.dev/streaming/pegasus:1.2.32 AS base + +FROM alpine:3.20 +RUN apk add --no-cache ca-certificates +COPY --from=base /pegasus /pegasus +COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh +RUN chmod 0755 /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] +CMD ["/pegasus"] diff --git a/dockerfiles/vault-entrypoint.sh b/dockerfiles/vault-entrypoint.sh new file mode 100644 index 0000000..3bacabd --- /dev/null +++ b/dockerfiles/vault-entrypoint.sh @@ -0,0 +1,14 @@ +#!/bin/sh +set -eu + +if [ -n "${VAULT_ENV_FILE:-}" ]; then + if [ -f "${VAULT_ENV_FILE}" ]; then + # shellcheck disable=SC1090 + . "${VAULT_ENV_FILE}" + else + echo "Vault env file not found: ${VAULT_ENV_FILE}" >&2 + exit 1 + fi +fi + +exec "$@" diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index e18a372..d4b037c 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -96,6 +96,12 @@ spec: value: "60" - name: ACCESS_REQUEST_INTERNAL_EMAIL_ALLOWLIST value: robotuser@bstein.dev + - name: WGER_NAMESPACE + value: health + - name: WGER_USER_SYNC_CRONJOB + value: wger-user-sync + - name: WGER_USER_SYNC_WAIT_TIMEOUT_SEC + value: "90" ports: - name: http containerPort: 8080 diff --git a/services/bstein-dev-home/rbac.yaml b/services/bstein-dev-home/rbac.yaml index f97ed24..7ce8fd8 100644 --- a/services/bstein-dev-home/rbac.yaml +++ b/services/bstein-dev-home/rbac.yaml @@ -106,3 +106,34 @@ subjects: - kind: ServiceAccount name: bstein-dev-home namespace: bstein-dev-home +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: bstein-dev-home-wger-user-sync + namespace: health +rules: + - apiGroups: ["batch"] + resources: ["cronjobs"] + verbs: ["get"] + resourceNames: ["wger-user-sync"] + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create", "get", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: bstein-dev-home-wger-user-sync + namespace: health +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: bstein-dev-home-wger-user-sync +subjects: + - kind: ServiceAccount + name: bstein-dev-home + namespace: bstein-dev-home diff --git a/services/comms/livekit-token-deployment.yaml b/services/comms/livekit-token-deployment.yaml index 98c46e0..31213fd 100644 --- a/services/comms/livekit-token-deployment.yaml +++ b/services/comms/livekit-token-deployment.yaml @@ -14,8 +14,18 @@ spec: metadata: labels: app: livekit-token-service + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-livekit-env: "kv/data/atlas/comms/livekit-api" + vault.hashicorp.com/agent-inject-template-livekit-env: | + {{- with secret "kv/data/atlas/comms/livekit-api" -}} + export LIVEKIT_SECRET="{{ .Data.data.primary }}" + {{- end -}} spec: serviceAccountName: comms-vault + imagePullSecrets: + - name: harbor-regcred nodeSelector: hardware: rpi5 affinity: @@ -33,17 +43,14 @@ spec: - live.bstein.dev containers: - name: token-service - image: ghcr.io/element-hq/lk-jwt-service:0.3.0 + image: registry.bstein.dev/tools/lk-jwt-service-vault:0.3.0 env: - name: LIVEKIT_URL value: wss://kit.live.bstein.dev/livekit/sfu - name: LIVEKIT_KEY value: primary - - name: LIVEKIT_SECRET - valueFrom: - secretKeyRef: - name: livekit-api - key: primary + - name: VAULT_ENV_FILE + value: /vault/secrets/livekit-env - name: LIVEKIT_FULL_ACCESS_HOMESERVERS value: live.bstein.dev ports: diff --git a/services/comms/secretproviderclass.yaml b/services/comms/secretproviderclass.yaml index 70ca9b4..251173c 100644 --- a/services/comms/secretproviderclass.yaml +++ b/services/comms/secretproviderclass.yaml @@ -13,9 +13,6 @@ spec: - objectName: "turn-secret" secretPath: "kv/data/atlas/comms/turn-shared-secret" secretKey: "TURN_STATIC_AUTH_SECRET" - - objectName: "livekit-primary" - secretPath: "kv/data/atlas/comms/livekit-api" - secretKey: "primary" - objectName: "synapse-db-pass" secretPath: "kv/data/atlas/comms/synapse-db" secretKey: "POSTGRES_PASSWORD" @@ -70,11 +67,6 @@ spec: data: - objectName: turn-secret key: TURN_STATIC_AUTH_SECRET - - secretName: livekit-api - type: Opaque - data: - - objectName: livekit-primary - key: primary - secretName: synapse-db type: Opaque data: diff --git a/services/crypto/xmr-miner/deployment.yaml b/services/crypto/xmr-miner/deployment.yaml index efc00ca..820c2ce 100644 --- a/services/crypto/xmr-miner/deployment.yaml +++ b/services/crypto/xmr-miner/deployment.yaml @@ -12,9 +12,18 @@ spec: template: metadata: labels: { app: monero-p2pool } + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "crypto" + vault.hashicorp.com/agent-inject-secret-xmr-env: "kv/data/atlas/crypto/xmr-payout" + vault.hashicorp.com/agent-inject-template-xmr-env: | + {{- with secret "kv/data/atlas/crypto/xmr-payout" -}} + export XMR_ADDR="{{ .Data.data.address }}" + {{- end -}} spec: nodeSelector: node-role.kubernetes.io/worker: "true" + serviceAccountName: crypto-vault-sync affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -61,27 +70,17 @@ spec: - name: monero-p2pool image: debian:bookworm-slim imagePullPolicy: IfNotPresent - command: ["/opt/p2pool/p2pool"] + command: ["/bin/sh", "-c"] args: - - "--host" - - "monerod.crypto.svc.cluster.local" - - "--rpc-port" - - "18081" - - "--zmq-port" - - "18083" - - "--stratum" - - "0.0.0.0:3333" - - "--wallet" - - "$(XMR_ADDR)" - # - "--light-mode" - # - "--no-randomx" - # - "--no-cache" - env: - - name: XMR_ADDR - valueFrom: - secretKeyRef: - name: xmr-payout - key: address + - | + set -eu + . /vault/secrets/xmr-env + exec /opt/p2pool/p2pool \ + --host monerod.crypto.svc.cluster.local \ + --rpc-port 18081 \ + --zmq-port 18083 \ + --stratum 0.0.0.0:3333 \ + --wallet "${XMR_ADDR}" ports: - { name: stratum, containerPort: 3333, protocol: TCP } readinessProbe: diff --git a/services/crypto/xmr-miner/secretproviderclass.yaml b/services/crypto/xmr-miner/secretproviderclass.yaml index 00c72bd..a72097f 100644 --- a/services/crypto/xmr-miner/secretproviderclass.yaml +++ b/services/crypto/xmr-miner/secretproviderclass.yaml @@ -10,18 +10,10 @@ spec: vaultAddress: "http://vault.vault.svc.cluster.local:8200" roleName: "crypto" objects: | - - objectName: "xmr-payout__address" - secretPath: "kv/data/atlas/crypto/xmr-payout" - secretKey: "address" - objectName: "harbor-pull__dockerconfigjson" secretPath: "kv/data/atlas/harbor-pull/crypto" secretKey: "dockerconfigjson" secretObjects: - - secretName: xmr-payout - type: Opaque - data: - - objectName: xmr-payout__address - key: address - secretName: harbor-regcred type: kubernetes.io/dockerconfigjson data: diff --git a/services/health/config/nginx.conf b/services/health/config/nginx.conf new file mode 100644 index 0000000..26b1f74 --- /dev/null +++ b/services/health/config/nginx.conf @@ -0,0 +1,30 @@ +upstream wger { + server 127.0.0.1:8000; +} + +server { + listen 8080; + + location = /api/v2/register { + return 404; + } + + location / { + proxy_pass http://wger; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + proxy_redirect off; + } + + location /static/ { + alias /wger/static/; + } + + location /media/ { + alias /wger/media/; + } + + client_max_body_size 100M; +} diff --git a/services/health/endurain-deployment.yaml b/services/health/endurain-deployment.yaml deleted file mode 100644 index 05608b1..0000000 --- a/services/health/endurain-deployment.yaml +++ /dev/null @@ -1,147 +0,0 @@ -# services/health/endurain-deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: endurain - namespace: health - labels: - app: endurain -spec: - replicas: 1 - selector: - matchLabels: - app: endurain - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - template: - metadata: - labels: - app: endurain - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: hardware - operator: In - values: ["rpi5", "rpi4"] - - key: node-role.kubernetes.io/worker - operator: Exists - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 90 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi5"] - - weight: 70 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi4"] - securityContext: - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - fsGroupChangePolicy: OnRootMismatch - initContainers: - - name: init-data - image: alpine:3.20 - command: ["/bin/sh", "-c"] - args: - - | - set -e - mkdir -p /data - chown -R 1000:1000 /data - securityContext: - runAsUser: 0 - runAsGroup: 0 - volumeMounts: - - name: endurain-data - mountPath: /data - containers: - - name: endurain - image: ghcr.io/endurain-project/endurain:v0.16.6 - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 8080 - env: - - name: ENDURAIN_HOST - value: https://endurain.bstein.dev - - name: BEHIND_PROXY - value: "true" - - name: LOG_LEVEL - value: info - - name: TZ - value: Etc/UTC - - name: DB_HOST - valueFrom: - secretKeyRef: - name: endurain-db - key: DB_HOST - - name: DB_PORT - valueFrom: - secretKeyRef: - name: endurain-db - key: DB_PORT - - name: DB_USER - valueFrom: - secretKeyRef: - name: endurain-db - key: DB_USER - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: endurain-db - key: DB_PASSWORD - - name: DB_DATABASE - valueFrom: - secretKeyRef: - name: endurain-db - key: DB_DATABASE - - name: SECRET_KEY - valueFrom: - secretKeyRef: - name: endurain-secrets - key: SECRET_KEY - - name: FERNET_KEY - valueFrom: - secretKeyRef: - name: endurain-secrets - key: FERNET_KEY - volumeMounts: - - name: endurain-data - mountPath: /app/backend/data - readinessProbe: - httpGet: - path: /api/v1/about - port: http - initialDelaySeconds: 15 - periodSeconds: 10 - timeoutSeconds: 3 - failureThreshold: 6 - livenessProbe: - httpGet: - path: /api/v1/about - port: http - initialDelaySeconds: 30 - periodSeconds: 20 - timeoutSeconds: 3 - failureThreshold: 6 - resources: - requests: - cpu: 200m - memory: 512Mi - limits: - cpu: "1" - memory: 2Gi - volumes: - - name: endurain-data - persistentVolumeClaim: - claimName: endurain-data diff --git a/services/health/endurain-oidc-config-cronjob.yaml b/services/health/endurain-oidc-config-cronjob.yaml deleted file mode 100644 index 86b4d91..0000000 --- a/services/health/endurain-oidc-config-cronjob.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# services/health/endurain-oidc-config-cronjob.yaml -apiVersion: batch/v1 -kind: CronJob -metadata: - name: endurain-oidc-config - namespace: health -spec: - schedule: "*/30 * * * *" - concurrencyPolicy: Forbid - successfulJobsHistoryLimit: 1 - failedJobsHistoryLimit: 3 - jobTemplate: - spec: - backoffLimit: 1 - template: - metadata: - annotations: - vault.hashicorp.com/agent-inject: "true" - vault.hashicorp.com/agent-pre-populate-only: "true" - vault.hashicorp.com/role: "health" - vault.hashicorp.com/agent-inject-secret-endurain-oidc-env: "kv/data/atlas/health/endurain-admin" - vault.hashicorp.com/agent-inject-template-endurain-oidc-env: | - {{- with secret "kv/data/atlas/health/endurain-admin" -}} - export ENDURAIN_ADMIN_USERNAME="{{ .Data.data.username }}" - export ENDURAIN_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/health/endurain-oidc" -}} - export ENDURAIN_OIDC_CLIENT_ID="{{ .Data.data.client_id }}" - export ENDURAIN_OIDC_CLIENT_SECRET="{{ .Data.data.client_secret }}" - export ENDURAIN_OIDC_ISSUER_URL="{{ .Data.data.issuer_url }}" - {{- end -}} - spec: - serviceAccountName: health-vault-sync - restartPolicy: Never - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/arch - operator: In - values: ["arm64"] - - key: node-role.kubernetes.io/worker - operator: Exists - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 90 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi5"] - - weight: 70 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi4"] - containers: - - name: configure - image: alpine:3.20 - command: ["/bin/sh", "-c"] - args: - - | - set -euo pipefail - apk add --no-cache bash curl jq >/dev/null - . /vault/secrets/endurain-oidc-env - exec /scripts/endurain_oidc_configure.sh - env: - - name: ENDURAIN_BASE_URL - value: http://endurain.health.svc.cluster.local - volumeMounts: - - name: endurain-oidc-config-script - mountPath: /scripts - readOnly: true - volumes: - - name: endurain-oidc-config-script - configMap: - name: endurain-oidc-config-script - defaultMode: 0555 diff --git a/services/health/kustomization.yaml b/services/health/kustomization.yaml index 1690876..c4dd47e 100644 --- a/services/health/kustomization.yaml +++ b/services/health/kustomization.yaml @@ -5,26 +5,19 @@ namespace: health resources: - namespace.yaml - serviceaccount.yaml - - secretproviderclass.yaml - - vault-sync-deployment.yaml - - endurain-data-pvc.yaml - - sparkyfitness-data-pvc.yaml - - endurain-oidc-config-cronjob.yaml - - sparkyfitness-oidc-config-cronjob.yaml - - endurain-deployment.yaml - - endurain-service.yaml - - sparkyfitness-server-deployment.yaml - - sparkyfitness-server-service.yaml - - sparkyfitness-frontend-deployment.yaml - - sparkyfitness-frontend-service.yaml - - endurain-ingress.yaml - - sparkyfitness-ingress.yaml + - wger-media-pvc.yaml + - wger-static-pvc.yaml + - wger-admin-ensure-cronjob.yaml + - wger-user-sync-cronjob.yaml + - wger-deployment.yaml + - wger-service.yaml + - wger-ingress.yaml generatorOptions: disableNameSuffixHash: true configMapGenerator: - - name: endurain-oidc-config-script + - name: wger-nginx-config files: - - endurain_oidc_configure.sh=scripts/endurain_oidc_configure.sh - - name: sparkyfitness-oidc-config-script + - default.conf=config/nginx.conf + - name: wger-user-sync-script files: - - sparkyfitness_oidc_configure.sh=scripts/sparkyfitness_oidc_configure.sh + - wger_user_sync.py=scripts/wger_user_sync.py diff --git a/services/health/scripts/endurain_oidc_configure.sh b/services/health/scripts/endurain_oidc_configure.sh deleted file mode 100644 index 76ebc99..0000000 --- a/services/health/scripts/endurain_oidc_configure.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -base_url="${ENDURAIN_BASE_URL:-http://endurain.health.svc.cluster.local}" -admin_username="${ENDURAIN_ADMIN_USERNAME:-admin}" -admin_password="${ENDURAIN_ADMIN_PASSWORD:?ENDURAIN_ADMIN_PASSWORD is required}" -default_password="${ENDURAIN_DEFAULT_ADMIN_PASSWORD:-admin}" -oidc_client_id="${ENDURAIN_OIDC_CLIENT_ID:?ENDURAIN_OIDC_CLIENT_ID is required}" -oidc_client_secret="${ENDURAIN_OIDC_CLIENT_SECRET:?ENDURAIN_OIDC_CLIENT_SECRET is required}" -oidc_issuer_url="${ENDURAIN_OIDC_ISSUER_URL:?ENDURAIN_OIDC_ISSUER_URL is required}" - -wait_for_endurain() { - for attempt in 1 2 3 4 5 6 7 8 9 10; do - if curl -fsS "${base_url}/api/v1/about" >/dev/null 2>&1; then - return 0 - fi - sleep $((attempt * 3)) - done - return 1 -} - -login() { - local username="$1" - local password="$2" - local token - token="$(curl -sS -X POST "${base_url}/api/v1/auth/login" \ - -H "X-Client-Type: mobile" \ - -H "Content-Type: application/x-www-form-urlencoded" \ - --data-urlencode "grant_type=password" \ - --data-urlencode "username=${username}" \ - --data-urlencode "password=${password}" | jq -r '.access_token' 2>/dev/null || true)" - if [ -n "${token}" ] && [ "${token}" != "null" ]; then - echo "${token}" - return 0 - fi - return 1 -} - -if ! wait_for_endurain; then - echo "Endurain is not responding at ${base_url}" >&2 - exit 1 -fi - -token="$(login "${admin_username}" "${admin_password}" || true)" -if [ -z "${token}" ]; then - token="$(login "${admin_username}" "${default_password}" || true)" - if [ -z "${token}" ]; then - echo "Failed to authenticate to Endurain as admin" >&2 - exit 1 - fi - if [ "${admin_password}" != "${default_password}" ]; then - user_id="$(curl -sS -H "Authorization: Bearer ${token}" -H "X-Client-Type: mobile" \ - "${base_url}/api/v1/users/username/${admin_username}" | jq -r '.id' 2>/dev/null || true)" - if [ -z "${user_id}" ] || [ "${user_id}" = "null" ]; then - echo "Admin user ${admin_username} not found" >&2 - exit 1 - fi - update_payload="$(jq -nc --arg password "${admin_password}" '{password:$password}')" - status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \ - -H "Authorization: Bearer ${token}" \ - -H "X-Client-Type: mobile" \ - -H "Content-Type: application/json" \ - -d "${update_payload}" \ - "${base_url}/api/v1/users/${user_id}/password")" - if [ "${status}" != "200" ] && [ "${status}" != "201" ]; then - echo "Failed to rotate Endurain admin password (status ${status})" >&2 - exit 1 - fi - token="$(login "${admin_username}" "${admin_password}" || true)" - if [ -z "${token}" ]; then - echo "Failed to authenticate with rotated admin password" >&2 - exit 1 - fi - fi -fi - -idp_payload="$(jq -nc \ - --arg name "Keycloak" \ - --arg slug "keycloak" \ - --arg issuer_url "${oidc_issuer_url}" \ - --arg scopes "openid profile email" \ - --arg client_id "${oidc_client_id}" \ - --arg client_secret "${oidc_client_secret}" \ - --arg icon "keycloak" \ - --argjson enabled true \ - --argjson auto_create_users true \ - --argjson sync_user_info true \ - --argjson user_mapping '{"username":["preferred_username","username","email"],"email":["email","mail"],"name":["name","display_name","full_name"]}' \ - '{name:$name,slug:$slug,provider_type:"oidc",enabled:$enabled,issuer_url:$issuer_url,scopes:$scopes,icon:$icon,auto_create_users:$auto_create_users,sync_user_info:$sync_user_info,user_mapping:$user_mapping,client_id:$client_id,client_secret:$client_secret}')" - -idp_id="$(curl -sS -H "Authorization: Bearer ${token}" -H "X-Client-Type: mobile" \ - "${base_url}/api/v1/idp" | jq -r '.[] | select(.slug=="keycloak") | .id' 2>/dev/null | head -n1 || true)" - -if [ -n "${idp_id}" ] && [ "${idp_id}" != "null" ]; then - status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \ - -H "Authorization: Bearer ${token}" \ - -H "X-Client-Type: mobile" \ - -H "Content-Type: application/json" \ - -d "${idp_payload}" \ - "${base_url}/api/v1/idp/${idp_id}")" -else - status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ - -H "Authorization: Bearer ${token}" \ - -H "X-Client-Type: mobile" \ - -H "Content-Type: application/json" \ - -d "${idp_payload}" \ - "${base_url}/api/v1/idp")" -fi - -if [ "${status}" != "200" ] && [ "${status}" != "201" ] && [ "${status}" != "204" ]; then - echo "Failed to upsert Endurain OIDC provider (status ${status})" >&2 - exit 1 -fi - -settings_json="$(curl -sS -H "Authorization: Bearer ${token}" -H "X-Client-Type: mobile" \ - "${base_url}/api/v1/server_settings")" -if [ -z "${settings_json}" ]; then - echo "Failed to fetch Endurain server settings" >&2 - exit 1 -fi - -settings_payload="$(echo "${settings_json}" | jq \ - '.sso_enabled=true | .sso_auto_redirect=true | .signup_enabled=false | .local_login_enabled=true')" - -status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \ - -H "Authorization: Bearer ${token}" \ - -H "X-Client-Type: mobile" \ - -H "Content-Type: application/json" \ - -d "${settings_payload}" \ - "${base_url}/api/v1/server_settings")" -if [ "${status}" != "200" ] && [ "${status}" != "201" ]; then - echo "Failed to update Endurain server settings (status ${status})" >&2 - exit 1 -fi diff --git a/services/health/scripts/sparkyfitness_oidc_configure.sh b/services/health/scripts/sparkyfitness_oidc_configure.sh deleted file mode 100644 index 98c6857..0000000 --- a/services/health/scripts/sparkyfitness_oidc_configure.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -base_url="${SPARKYFITNESS_BASE_URL:-http://sparkyfitness-server.health.svc.cluster.local:3010}" -frontend_url="${SPARKYFITNESS_FRONTEND_URL:?SPARKYFITNESS_FRONTEND_URL is required}" -admin_email="${SPARKYFITNESS_ADMIN_EMAIL:?SPARKYFITNESS_ADMIN_EMAIL is required}" -admin_password="${SPARKYFITNESS_ADMIN_PASSWORD:?SPARKYFITNESS_ADMIN_PASSWORD is required}" -oidc_client_id="${SPARKYFITNESS_OIDC_CLIENT_ID:?SPARKYFITNESS_OIDC_CLIENT_ID is required}" -oidc_client_secret="${SPARKYFITNESS_OIDC_CLIENT_SECRET:?SPARKYFITNESS_OIDC_CLIENT_SECRET is required}" -oidc_issuer_url="${SPARKYFITNESS_OIDC_ISSUER_URL:?SPARKYFITNESS_OIDC_ISSUER_URL is required}" - -wait_for_server() { - for attempt in 1 2 3 4 5 6 7 8 9 10; do - if curl -fsS "${base_url}/health" >/dev/null 2>&1; then - return 0 - fi - sleep $((attempt * 3)) - done - return 1 -} - -cookie_jar="$(mktemp)" -trap 'rm -f "${cookie_jar}"' EXIT - -auth_login() { - local payload - payload="$(jq -nc --arg email "${admin_email}" --arg password "${admin_password}" '{email:$email,password:$password}')" - local status - status="$(curl -sS -o /tmp/sparkyfitness_login.json -w "%{http_code}" \ - -c "${cookie_jar}" -b "${cookie_jar}" \ - -H "Content-Type: application/json" \ - -X POST "${base_url}/auth/login" \ - -d "${payload}")" - if [ "${status}" = "200" ]; then - return 0 - fi - return 1 -} - -auth_register() { - local payload - payload="$(jq -nc --arg email "${admin_email}" --arg password "${admin_password}" --arg full_name "Sparky Admin" '{email:$email,password:$password,full_name:$full_name}')" - curl -sS -o /tmp/sparkyfitness_register.json -w "%{http_code}" \ - -c "${cookie_jar}" -b "${cookie_jar}" \ - -H "Content-Type: application/json" \ - -X POST "${base_url}/auth/register" \ - -d "${payload}" -} - -if ! wait_for_server; then - echo "SparkyFitness is not responding at ${base_url}" >&2 - exit 1 -fi - -if ! auth_login; then - status="$(auth_register)" - if [ "${status}" = "409" ]; then - if ! auth_login; then - echo "Admin login failed after existing user detected" >&2 - exit 1 - fi - elif [ "${status}" = "201" ]; then - if ! auth_login; then - echo "Admin login failed after registration" >&2 - exit 1 - fi - elif [ "${status}" = "403" ]; then - echo "Registration disabled; unable to bootstrap admin user" >&2 - exit 1 - else - echo "Admin registration failed (status ${status})" >&2 - exit 1 - fi -fi - -settings_json="$(curl -sS -b "${cookie_jar}" "${base_url}/admin/global-settings")" -if [ -z "${settings_json}" ]; then - echo "Failed to fetch SparkyFitness global settings" >&2 - exit 1 -fi - -email_enabled="$(echo "${settings_json}" | jq -r '.enable_email_password_login // true')" -mfa_mandatory="$(echo "${settings_json}" | jq -r '.is_mfa_mandatory // .mfa_mandatory // false')" -settings_payload="$(jq -nc \ - --argjson enable_email_password_login "${email_enabled}" \ - --argjson is_oidc_active true \ - --argjson is_mfa_mandatory "${mfa_mandatory}" \ - '{enable_email_password_login:$enable_email_password_login,is_oidc_active:$is_oidc_active,is_mfa_mandatory:$is_mfa_mandatory}')" - -status="$(curl -sS -o /dev/null -w "%{http_code}" -b "${cookie_jar}" \ - -H "Content-Type: application/json" \ - -X PUT "${base_url}/admin/global-settings" \ - -d "${settings_payload}")" -if [ "${status}" != "200" ]; then - echo "Failed to update SparkyFitness global settings (status ${status})" >&2 - exit 1 -fi - -providers_json="$(curl -sS -b "${cookie_jar}" "${base_url}/admin/oidc-settings")" -provider_id="$(echo "${providers_json}" | jq -r --arg issuer "${oidc_issuer_url}" '.[] | select(.issuer_url==$issuer) | .id' 2>/dev/null | head -n1 || true)" - -redirect_uri="${frontend_url%/}/oidc-callback" -provider_payload="$(jq -nc \ - --arg issuer_url "${oidc_issuer_url}" \ - --arg client_id "${oidc_client_id}" \ - --arg client_secret "${oidc_client_secret}" \ - --arg redirect_uri "${redirect_uri}" \ - --arg scope "openid profile email" \ - --arg token_endpoint_auth_method "client_secret_post" \ - --argjson response_types '["code"]' \ - --argjson is_active true \ - --arg display_name "Atlas SSO" \ - --argjson auto_register true \ - --arg signing_algorithm "RS256" \ - --arg profile_signing_algorithm "none" \ - --argjson timeout 30000 \ - '{issuer_url:$issuer_url,client_id:$client_id,client_secret:$client_secret,redirect_uris:[$redirect_uri],scope:$scope,token_endpoint_auth_method:$token_endpoint_auth_method,response_types:$response_types,is_active:$is_active,display_name:$display_name,auto_register:$auto_register,signing_algorithm:$signing_algorithm,profile_signing_algorithm:$profile_signing_algorithm,timeout:$timeout}')" - -if [ -n "${provider_id}" ] && [ "${provider_id}" != "null" ]; then - status="$(curl -sS -o /dev/null -w "%{http_code}" -b "${cookie_jar}" \ - -H "Content-Type: application/json" \ - -X PUT "${base_url}/admin/oidc-settings/${provider_id}" \ - -d "${provider_payload}")" -else - status="$(curl -sS -o /dev/null -w "%{http_code}" -b "${cookie_jar}" \ - -H "Content-Type: application/json" \ - -X POST "${base_url}/admin/oidc-settings" \ - -d "${provider_payload}")" -fi - -if [ "${status}" != "200" ] && [ "${status}" != "201" ]; then - echo "Failed to upsert SparkyFitness OIDC provider (status ${status})" >&2 - exit 1 -fi diff --git a/services/health/scripts/wger_user_sync.py b/services/health/scripts/wger_user_sync.py new file mode 100644 index 0000000..4963c79 --- /dev/null +++ b/services/health/scripts/wger_user_sync.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 + +from __future__ import annotations + +import os +import sys + +import django + + +def _env(name: str, default: str = "") -> str: + value = os.getenv(name, default) + return value.strip() if isinstance(value, str) else "" + + +def _setup_django() -> None: + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.main") + django.setup() + + +def _set_default_gym(user) -> None: + try: + from wger.gym.models import GymConfig + except Exception: + return + + try: + config = GymConfig.objects.first() + except Exception: + return + + if not config or not getattr(config, "default_gym", None): + return + + profile = getattr(user, "userprofile", None) + if not profile or getattr(profile, "gym", None): + return + + profile.gym = config.default_gym + profile.save() + + +def _ensure_profile(user) -> None: + profile = getattr(user, "userprofile", None) + if not profile: + return + if hasattr(profile, "email_verified") and not profile.email_verified: + profile.email_verified = True + if hasattr(profile, "is_temporary") and profile.is_temporary: + profile.is_temporary = False + profile.save() + + +def _ensure_admin(username: str, password: str, email: str) -> None: + from django.contrib.auth.models import User + + if not username or not password: + raise RuntimeError("admin username/password missing") + + user, created = User.objects.get_or_create(username=username) + if created: + user.is_active = True + if not user.is_staff: + user.is_staff = True + if email: + user.email = email + user.set_password(password) + user.save() + + _ensure_profile(user) + _set_default_gym(user) + print(f"ensured admin user {username}") + + +def _ensure_user(username: str, password: str, email: str) -> None: + from django.contrib.auth.models import User + + if not username or not password: + raise RuntimeError("username/password missing") + + user, created = User.objects.get_or_create(username=username) + if created: + user.is_active = True + if email and user.email != email: + user.email = email + user.set_password(password) + user.save() + + _ensure_profile(user) + _set_default_gym(user) + action = "created" if created else "updated" + print(f"{action} user {username}") + + +def main() -> int: + admin_user = _env("WGER_ADMIN_USERNAME") + admin_password = _env("WGER_ADMIN_PASSWORD") + admin_email = _env("WGER_ADMIN_EMAIL") + + username = _env("WGER_USERNAME") or _env("ONLY_USERNAME") + password = _env("WGER_PASSWORD") + email = _env("WGER_EMAIL") + + if not any([admin_user and admin_password, username and password]): + print("no admin or user payload provided; exiting") + return 0 + + _setup_django() + + if admin_user and admin_password: + _ensure_admin(admin_user, admin_password, admin_email) + + if username and password: + _ensure_user(username, password, email) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/services/health/secretproviderclass.yaml b/services/health/secretproviderclass.yaml deleted file mode 100644 index c019c96..0000000 --- a/services/health/secretproviderclass.yaml +++ /dev/null @@ -1,167 +0,0 @@ -# services/health/secretproviderclass.yaml -apiVersion: secrets-store.csi.x-k8s.io/v1 -kind: SecretProviderClass -metadata: - name: health-vault - namespace: health -spec: - provider: vault - parameters: - vaultAddress: "http://vault.vault.svc.cluster.local:8200" - roleName: "health" - objects: | - - objectName: "endurain-db__DB_HOST" - secretPath: "kv/data/atlas/health/endurain-db" - secretKey: "DB_HOST" - - objectName: "endurain-db__DB_PORT" - secretPath: "kv/data/atlas/health/endurain-db" - secretKey: "DB_PORT" - - objectName: "endurain-db__DB_USER" - secretPath: "kv/data/atlas/health/endurain-db" - secretKey: "DB_USER" - - objectName: "endurain-db__DB_PASSWORD" - secretPath: "kv/data/atlas/health/endurain-db" - secretKey: "DB_PASSWORD" - - objectName: "endurain-db__DB_DATABASE" - secretPath: "kv/data/atlas/health/endurain-db" - secretKey: "DB_DATABASE" - - objectName: "endurain-secrets__SECRET_KEY" - secretPath: "kv/data/atlas/health/endurain-secrets" - secretKey: "SECRET_KEY" - - objectName: "endurain-secrets__FERNET_KEY" - secretPath: "kv/data/atlas/health/endurain-secrets" - secretKey: "FERNET_KEY" - - objectName: "endurain-admin__username" - secretPath: "kv/data/atlas/health/endurain-admin" - secretKey: "username" - - objectName: "endurain-admin__password" - secretPath: "kv/data/atlas/health/endurain-admin" - secretKey: "password" - - objectName: "endurain-oidc__client_id" - secretPath: "kv/data/atlas/health/endurain-oidc" - secretKey: "client_id" - - objectName: "endurain-oidc__client_secret" - secretPath: "kv/data/atlas/health/endurain-oidc" - secretKey: "client_secret" - - objectName: "endurain-oidc__issuer_url" - secretPath: "kv/data/atlas/health/endurain-oidc" - secretKey: "issuer_url" - - objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_HOST" - secretPath: "kv/data/atlas/health/sparkyfitness-db" - secretKey: "SPARKY_FITNESS_DB_HOST" - - objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_PORT" - secretPath: "kv/data/atlas/health/sparkyfitness-db" - secretKey: "SPARKY_FITNESS_DB_PORT" - - objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_NAME" - secretPath: "kv/data/atlas/health/sparkyfitness-db" - secretKey: "SPARKY_FITNESS_DB_NAME" - - objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_USER" - secretPath: "kv/data/atlas/health/sparkyfitness-db" - secretKey: "SPARKY_FITNESS_DB_USER" - - objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_PASSWORD" - secretPath: "kv/data/atlas/health/sparkyfitness-db" - secretKey: "SPARKY_FITNESS_DB_PASSWORD" - - objectName: "sparkyfitness-db__SPARKY_FITNESS_APP_DB_USER" - secretPath: "kv/data/atlas/health/sparkyfitness-db" - secretKey: "SPARKY_FITNESS_APP_DB_USER" - - objectName: "sparkyfitness-db__SPARKY_FITNESS_APP_DB_PASSWORD" - secretPath: "kv/data/atlas/health/sparkyfitness-db" - secretKey: "SPARKY_FITNESS_APP_DB_PASSWORD" - - objectName: "sparkyfitness-secrets__JWT_SECRET" - secretPath: "kv/data/atlas/health/sparkyfitness-secrets" - secretKey: "JWT_SECRET" - - objectName: "sparkyfitness-secrets__SPARKY_FITNESS_API_ENCRYPTION_KEY" - secretPath: "kv/data/atlas/health/sparkyfitness-secrets" - secretKey: "SPARKY_FITNESS_API_ENCRYPTION_KEY" - - objectName: "sparkyfitness-admin__email" - secretPath: "kv/data/atlas/health/sparkyfitness-admin" - secretKey: "email" - - objectName: "sparkyfitness-admin__password" - secretPath: "kv/data/atlas/health/sparkyfitness-admin" - secretKey: "password" - - objectName: "sparkyfitness-oidc__client_id" - secretPath: "kv/data/atlas/health/sparkyfitness-oidc" - secretKey: "client_id" - - objectName: "sparkyfitness-oidc__client_secret" - secretPath: "kv/data/atlas/health/sparkyfitness-oidc" - secretKey: "client_secret" - - objectName: "sparkyfitness-oidc__issuer_url" - secretPath: "kv/data/atlas/health/sparkyfitness-oidc" - secretKey: "issuer_url" - secretObjects: - - secretName: endurain-db - type: Opaque - data: - - objectName: endurain-db__DB_HOST - key: DB_HOST - - objectName: endurain-db__DB_PORT - key: DB_PORT - - objectName: endurain-db__DB_USER - key: DB_USER - - objectName: endurain-db__DB_PASSWORD - key: DB_PASSWORD - - objectName: endurain-db__DB_DATABASE - key: DB_DATABASE - - secretName: endurain-secrets - type: Opaque - data: - - objectName: endurain-secrets__SECRET_KEY - key: SECRET_KEY - - objectName: endurain-secrets__FERNET_KEY - key: FERNET_KEY - - secretName: endurain-admin - type: Opaque - data: - - objectName: endurain-admin__username - key: username - - objectName: endurain-admin__password - key: password - - secretName: endurain-oidc - type: Opaque - data: - - objectName: endurain-oidc__client_id - key: client_id - - objectName: endurain-oidc__client_secret - key: client_secret - - objectName: endurain-oidc__issuer_url - key: issuer_url - - secretName: sparkyfitness-db - type: Opaque - data: - - objectName: sparkyfitness-db__SPARKY_FITNESS_DB_HOST - key: SPARKY_FITNESS_DB_HOST - - objectName: sparkyfitness-db__SPARKY_FITNESS_DB_PORT - key: SPARKY_FITNESS_DB_PORT - - objectName: sparkyfitness-db__SPARKY_FITNESS_DB_NAME - key: SPARKY_FITNESS_DB_NAME - - objectName: sparkyfitness-db__SPARKY_FITNESS_DB_USER - key: SPARKY_FITNESS_DB_USER - - objectName: sparkyfitness-db__SPARKY_FITNESS_DB_PASSWORD - key: SPARKY_FITNESS_DB_PASSWORD - - objectName: sparkyfitness-db__SPARKY_FITNESS_APP_DB_USER - key: SPARKY_FITNESS_APP_DB_USER - - objectName: sparkyfitness-db__SPARKY_FITNESS_APP_DB_PASSWORD - key: SPARKY_FITNESS_APP_DB_PASSWORD - - secretName: sparkyfitness-secrets - type: Opaque - data: - - objectName: sparkyfitness-secrets__JWT_SECRET - key: JWT_SECRET - - objectName: sparkyfitness-secrets__SPARKY_FITNESS_API_ENCRYPTION_KEY - key: SPARKY_FITNESS_API_ENCRYPTION_KEY - - secretName: sparkyfitness-admin - type: Opaque - data: - - objectName: sparkyfitness-admin__email - key: email - - objectName: sparkyfitness-admin__password - key: password - - secretName: sparkyfitness-oidc - type: Opaque - data: - - objectName: sparkyfitness-oidc__client_id - key: client_id - - objectName: sparkyfitness-oidc__client_secret - key: client_secret - - objectName: sparkyfitness-oidc__issuer_url - key: issuer_url diff --git a/services/health/sparkyfitness-frontend-deployment.yaml b/services/health/sparkyfitness-frontend-deployment.yaml deleted file mode 100644 index 38df36a..0000000 --- a/services/health/sparkyfitness-frontend-deployment.yaml +++ /dev/null @@ -1,81 +0,0 @@ -# services/health/sparkyfitness-frontend-deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkyfitness-frontend - namespace: health - labels: - app: sparkyfitness-frontend -spec: - replicas: 1 - selector: - matchLabels: - app: sparkyfitness-frontend - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - template: - metadata: - labels: - app: sparkyfitness-frontend - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: hardware - operator: In - values: ["rpi5", "rpi4"] - - key: node-role.kubernetes.io/worker - operator: Exists - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 90 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi5"] - - weight: 70 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi4"] - containers: - - name: sparkyfitness-frontend - image: codewithcj/sparkyfitness:0.16.3.3 - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 80 - env: - - name: SPARKY_FITNESS_SERVER_HOST - value: sparkyfitness-server - - name: SPARKY_FITNESS_SERVER_PORT - value: "3010" - readinessProbe: - httpGet: - path: / - port: http - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 3 - failureThreshold: 6 - livenessProbe: - httpGet: - path: / - port: http - initialDelaySeconds: 30 - periodSeconds: 20 - timeoutSeconds: 3 - failureThreshold: 6 - resources: - requests: - cpu: 100m - memory: 256Mi - limits: - cpu: 500m - memory: 512Mi diff --git a/services/health/sparkyfitness-frontend-service.yaml b/services/health/sparkyfitness-frontend-service.yaml deleted file mode 100644 index 0850d6c..0000000 --- a/services/health/sparkyfitness-frontend-service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# services/health/sparkyfitness-frontend-service.yaml -apiVersion: v1 -kind: Service -metadata: - name: sparkyfitness-frontend - namespace: health - labels: - app: sparkyfitness-frontend -spec: - selector: - app: sparkyfitness-frontend - ports: - - name: http - port: 80 - targetPort: http diff --git a/services/health/sparkyfitness-ingress.yaml b/services/health/sparkyfitness-ingress.yaml deleted file mode 100644 index b9d5758..0000000 --- a/services/health/sparkyfitness-ingress.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# services/health/sparkyfitness-ingress.yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: sparkyfitness - namespace: health - annotations: - kubernetes.io/ingress.class: traefik - traefik.ingress.kubernetes.io/router.entrypoints: websecure - traefik.ingress.kubernetes.io/router.tls: "true" - cert-manager.io/cluster-issuer: letsencrypt -spec: - tls: - - hosts: ["sparkyfitness.bstein.dev"] - secretName: sparkyfitness-tls - rules: - - host: sparkyfitness.bstein.dev - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: sparkyfitness-frontend - port: - number: 80 diff --git a/services/health/sparkyfitness-oidc-config-cronjob.yaml b/services/health/sparkyfitness-oidc-config-cronjob.yaml deleted file mode 100644 index b3d4c52..0000000 --- a/services/health/sparkyfitness-oidc-config-cronjob.yaml +++ /dev/null @@ -1,81 +0,0 @@ -# services/health/sparkyfitness-oidc-config-cronjob.yaml -apiVersion: batch/v1 -kind: CronJob -metadata: - name: sparkyfitness-oidc-config - namespace: health -spec: - schedule: "*/30 * * * *" - concurrencyPolicy: Forbid - successfulJobsHistoryLimit: 1 - failedJobsHistoryLimit: 3 - jobTemplate: - spec: - backoffLimit: 1 - template: - metadata: - annotations: - vault.hashicorp.com/agent-inject: "true" - vault.hashicorp.com/agent-pre-populate-only: "true" - vault.hashicorp.com/role: "health" - vault.hashicorp.com/agent-inject-secret-sparky-oidc-env: "kv/data/atlas/health/sparkyfitness-admin" - vault.hashicorp.com/agent-inject-template-sparky-oidc-env: | - {{- with secret "kv/data/atlas/health/sparkyfitness-admin" -}} - export SPARKYFITNESS_ADMIN_EMAIL="{{ .Data.data.email }}" - export SPARKYFITNESS_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/health/sparkyfitness-oidc" -}} - export SPARKYFITNESS_OIDC_CLIENT_ID="{{ .Data.data.client_id }}" - export SPARKYFITNESS_OIDC_CLIENT_SECRET="{{ .Data.data.client_secret }}" - export SPARKYFITNESS_OIDC_ISSUER_URL="{{ .Data.data.issuer_url }}" - {{- end -}} - spec: - serviceAccountName: health-vault-sync - restartPolicy: Never - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/arch - operator: In - values: ["arm64"] - - key: node-role.kubernetes.io/worker - operator: Exists - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 90 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi5"] - - weight: 70 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi4"] - containers: - - name: configure - image: alpine:3.20 - command: ["/bin/sh", "-c"] - args: - - | - set -euo pipefail - apk add --no-cache bash curl jq >/dev/null - . /vault/secrets/sparky-oidc-env - exec /scripts/sparkyfitness_oidc_configure.sh - env: - - name: SPARKYFITNESS_BASE_URL - value: http://sparkyfitness-server.health.svc.cluster.local:3010 - - name: SPARKYFITNESS_FRONTEND_URL - value: https://sparkyfitness.bstein.dev - volumeMounts: - - name: sparkyfitness-oidc-config-script - mountPath: /scripts - readOnly: true - volumes: - - name: sparkyfitness-oidc-config-script - configMap: - name: sparkyfitness-oidc-config-script - defaultMode: 0555 diff --git a/services/health/sparkyfitness-server-deployment.yaml b/services/health/sparkyfitness-server-deployment.yaml deleted file mode 100644 index e920662..0000000 --- a/services/health/sparkyfitness-server-deployment.yaml +++ /dev/null @@ -1,170 +0,0 @@ -# services/health/sparkyfitness-server-deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkyfitness-server - namespace: health - labels: - app: sparkyfitness-server -spec: - replicas: 1 - selector: - matchLabels: - app: sparkyfitness-server - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - template: - metadata: - labels: - app: sparkyfitness-server - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: hardware - operator: In - values: ["rpi5", "rpi4"] - - key: node-role.kubernetes.io/worker - operator: Exists - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 90 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi5"] - - weight: 70 - preference: - matchExpressions: - - key: hardware - operator: In - values: ["rpi4"] - securityContext: - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - fsGroupChangePolicy: OnRootMismatch - initContainers: - - name: init-data - image: alpine:3.20 - command: ["/bin/sh", "-c"] - args: - - | - set -e - mkdir -p /data/uploads /data/backup - chown -R 1000:1000 /data - securityContext: - runAsUser: 0 - runAsGroup: 0 - volumeMounts: - - name: sparkyfitness-data - mountPath: /data - containers: - - name: sparkyfitness-server - image: codewithcj/sparkyfitness_server:0.16.3.3 - imagePullPolicy: IfNotPresent - ports: - - name: http - containerPort: 3010 - env: - - name: SPARKY_FITNESS_SERVER_PORT - value: "3010" - - name: SPARKY_FITNESS_LOG_LEVEL - value: INFO - - name: NODE_ENV - value: production - - name: TZ - value: Etc/UTC - - name: SPARKY_FITNESS_FRONTEND_URL - value: https://sparkyfitness.bstein.dev - - name: SPARKY_FITNESS_DISABLE_SIGNUP - value: "false" - - name: SPARKY_FITNESS_DB_HOST - valueFrom: - secretKeyRef: - name: sparkyfitness-db - key: SPARKY_FITNESS_DB_HOST - - name: SPARKY_FITNESS_DB_PORT - valueFrom: - secretKeyRef: - name: sparkyfitness-db - key: SPARKY_FITNESS_DB_PORT - - name: SPARKY_FITNESS_DB_NAME - valueFrom: - secretKeyRef: - name: sparkyfitness-db - key: SPARKY_FITNESS_DB_NAME - - name: SPARKY_FITNESS_DB_USER - valueFrom: - secretKeyRef: - name: sparkyfitness-db - key: SPARKY_FITNESS_DB_USER - - name: SPARKY_FITNESS_DB_PASSWORD - valueFrom: - secretKeyRef: - name: sparkyfitness-db - key: SPARKY_FITNESS_DB_PASSWORD - - name: SPARKY_FITNESS_APP_DB_USER - valueFrom: - secretKeyRef: - name: sparkyfitness-db - key: SPARKY_FITNESS_APP_DB_USER - - name: SPARKY_FITNESS_APP_DB_PASSWORD - valueFrom: - secretKeyRef: - name: sparkyfitness-db - key: SPARKY_FITNESS_APP_DB_PASSWORD - - name: SPARKY_FITNESS_API_ENCRYPTION_KEY - valueFrom: - secretKeyRef: - name: sparkyfitness-secrets - key: SPARKY_FITNESS_API_ENCRYPTION_KEY - - name: JWT_SECRET - valueFrom: - secretKeyRef: - name: sparkyfitness-secrets - key: JWT_SECRET - - name: SPARKY_FITNESS_ADMIN_EMAIL - valueFrom: - secretKeyRef: - name: sparkyfitness-admin - key: email - volumeMounts: - - name: sparkyfitness-data - mountPath: /app/SparkyFitnessServer/uploads - subPath: uploads - - name: sparkyfitness-data - mountPath: /app/SparkyFitnessServer/backup - subPath: backup - readinessProbe: - httpGet: - path: /health - port: http - initialDelaySeconds: 15 - periodSeconds: 10 - timeoutSeconds: 3 - failureThreshold: 6 - livenessProbe: - httpGet: - path: /health - port: http - initialDelaySeconds: 30 - periodSeconds: 20 - timeoutSeconds: 3 - failureThreshold: 6 - resources: - requests: - cpu: 200m - memory: 512Mi - limits: - cpu: "1" - memory: 2Gi - volumes: - - name: sparkyfitness-data - persistentVolumeClaim: - claimName: sparkyfitness-data diff --git a/services/health/sparkyfitness-server-service.yaml b/services/health/sparkyfitness-server-service.yaml deleted file mode 100644 index 91220f9..0000000 --- a/services/health/sparkyfitness-server-service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# services/health/sparkyfitness-server-service.yaml -apiVersion: v1 -kind: Service -metadata: - name: sparkyfitness-server - namespace: health - labels: - app: sparkyfitness-server -spec: - selector: - app: sparkyfitness-server - ports: - - name: http - port: 3010 - targetPort: http diff --git a/services/health/vault-sync-deployment.yaml b/services/health/vault-sync-deployment.yaml deleted file mode 100644 index 7b4c08e..0000000 --- a/services/health/vault-sync-deployment.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# services/health/vault-sync-deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: health-vault-sync - namespace: health -spec: - replicas: 1 - selector: - matchLabels: - app: health-vault-sync - template: - metadata: - labels: - app: health-vault-sync - spec: - serviceAccountName: health-vault-sync - containers: - - name: sync - image: alpine:3.20 - command: ["/bin/sh", "-c"] - args: - - "sleep infinity" - volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: health-vault diff --git a/services/health/wger-admin-ensure-cronjob.yaml b/services/health/wger-admin-ensure-cronjob.yaml new file mode 100644 index 0000000..cc422e2 --- /dev/null +++ b/services/health/wger-admin-ensure-cronjob.yaml @@ -0,0 +1,92 @@ +# services/health/wger-admin-ensure-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: wger-admin-ensure + namespace: health +spec: + schedule: "15 3 * * *" + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + backoffLimit: 1 + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" + vault.hashicorp.com/role: "health" + vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db" + vault.hashicorp.com/agent-inject-template-wger-env: | + {{- with secret "kv/data/atlas/health/wger-db" -}} + export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}" + export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}" + export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}" + export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}" + export DJANGO_DB_PASSWORD="{{ .Data.data.DJANGO_DB_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/health/wger-secrets" -}} + export SECRET_KEY="{{ .Data.data.SECRET_KEY }}" + export SIGNING_KEY="{{ .Data.data.SIGNING_KEY }}" + {{- end }} + {{- with secret "kv/data/atlas/health/wger-admin" -}} + export WGER_ADMIN_USERNAME="{{ .Data.data.username }}" + export WGER_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{- end -}} + spec: + serviceAccountName: health-vault-sync + restartPolicy: Never + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" + containers: + - name: ensure + image: wger/server@sha256:710588b78af4e0aa0b4d8a8061e4563e16eae80eeaccfe7f9e0d9cbdd7f0cbc5 + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: + - | + set -eu + . /vault/secrets/wger-env + exec python /scripts/wger_user_sync.py + env: + - name: SITE_URL + value: https://health.bstein.dev + - name: TIME_ZONE + value: Etc/UTC + - name: TZ + value: Etc/UTC + - name: DJANGO_DEBUG + value: "False" + - name: DJANGO_DB_ENGINE + value: django.db.backends.postgresql + - name: DJANGO_CACHE_BACKEND + value: django.core.cache.backends.locmem.LocMemCache + - name: DJANGO_CACHE_LOCATION + value: wger-cache + volumeMounts: + - name: wger-user-sync-script + mountPath: /scripts + readOnly: true + volumes: + - name: wger-user-sync-script + configMap: + name: wger-user-sync-script + defaultMode: 0555 diff --git a/services/health/wger-deployment.yaml b/services/health/wger-deployment.yaml new file mode 100644 index 0000000..e39db5b --- /dev/null +++ b/services/health/wger-deployment.yaml @@ -0,0 +1,212 @@ +# services/health/wger-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: wger + namespace: health +spec: + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: wger + template: + metadata: + labels: + app: wger + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "health" + vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db" + vault.hashicorp.com/agent-inject-template-wger-env: | + {{- with secret "kv/data/atlas/health/wger-db" -}} + export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}" + export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}" + export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}" + export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}" + export DJANGO_DB_PASSWORD="{{ .Data.data.DJANGO_DB_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/health/wger-secrets" -}} + export SECRET_KEY="{{ .Data.data.SECRET_KEY }}" + export SIGNING_KEY="{{ .Data.data.SIGNING_KEY }}" + {{- end -}} + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" + serviceAccountName: health-vault-sync + initContainers: + - name: init-storage + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - | + set -e + mkdir -p /wger/static /wger/media + chown -R 1000:1000 /wger + securityContext: + runAsUser: 0 + runAsGroup: 0 + volumeMounts: + - name: wger-static + mountPath: /wger/static + - name: wger-media + mountPath: /wger/media + containers: + - name: wger + image: wger/server@sha256:710588b78af4e0aa0b4d8a8061e4563e16eae80eeaccfe7f9e0d9cbdd7f0cbc5 + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: + - | + set -eu + . /vault/secrets/wger-env + exec /home/wger/entrypoint.sh + ports: + - name: app + containerPort: 8000 + env: + - name: SITE_URL + value: https://health.bstein.dev + - name: CSRF_TRUSTED_ORIGINS + value: https://health.bstein.dev + - name: X_FORWARDED_PROTO_HEADER_SET + value: "true" + - name: NUMBER_OF_PROXIES + value: "1" + - name: TIME_ZONE + value: Etc/UTC + - name: TZ + value: Etc/UTC + - name: DJANGO_DEBUG + value: "False" + - name: DJANGO_PERFORM_MIGRATIONS + value: "True" + - name: DJANGO_DB_ENGINE + value: django.db.backends.postgresql + - name: DJANGO_CACHE_BACKEND + value: django.core.cache.backends.locmem.LocMemCache + - name: DJANGO_CACHE_LOCATION + value: wger-cache + - name: DJANGO_CACHE_TIMEOUT + value: "3600" + - name: ALLOW_REGISTRATION + value: "False" + - name: ALLOW_GUEST_USERS + value: "False" + - name: ALLOW_UPLOAD_VIDEOS + value: "False" + - name: USE_CELERY + value: "False" + - name: SYNC_EXERCISES_CELERY + value: "False" + - name: SYNC_INGREDIENTS_CELERY + value: "False" + - name: SYNC_EXERCISE_IMAGES_CELERY + value: "False" + - name: SYNC_EXERCISE_VIDEOS_CELERY + value: "False" + - name: CACHE_API_EXERCISES_CELERY + value: "False" + - name: DOWNLOAD_INGREDIENTS_FROM + value: "None" + - name: ENABLE_EMAIL + value: "False" + volumeMounts: + - name: wger-static + mountPath: /home/wger/static + - name: wger-media + mountPath: /home/wger/media + readinessProbe: + httpGet: + path: /api/v2/version/ + port: app + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 6 + livenessProbe: + httpGet: + path: /api/v2/version/ + port: app + initialDelaySeconds: 45 + periodSeconds: 20 + timeoutSeconds: 3 + failureThreshold: 6 + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: "1" + memory: 2Gi + - name: nginx + image: nginx:1.27.5-alpine@sha256:65645c7bb6a0661892a8b03b89d0743208a18dd2f3f17a54ef4b76fb8e2f2a10 + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8080 + securityContext: + runAsUser: 101 + runAsGroup: 101 + allowPrivilegeEscalation: false + volumeMounts: + - name: wger-nginx-config + mountPath: /etc/nginx/conf.d/default.conf + subPath: default.conf + - name: wger-static + mountPath: /wger/static + - name: wger-media + mountPath: /wger/media + readinessProbe: + httpGet: + path: /api/v2/version/ + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 6 + livenessProbe: + httpGet: + path: /api/v2/version/ + port: http + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 3 + failureThreshold: 6 + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 200m + memory: 256Mi + volumes: + - name: wger-static + persistentVolumeClaim: + claimName: wger-static + - name: wger-media + persistentVolumeClaim: + claimName: wger-media + - name: wger-nginx-config + configMap: + name: wger-nginx-config + defaultMode: 0444 diff --git a/services/health/endurain-ingress.yaml b/services/health/wger-ingress.yaml similarity index 72% rename from services/health/endurain-ingress.yaml rename to services/health/wger-ingress.yaml index a7b2cc0..c868fbf 100644 --- a/services/health/endurain-ingress.yaml +++ b/services/health/wger-ingress.yaml @@ -1,8 +1,8 @@ -# services/health/endurain-ingress.yaml +# services/health/wger-ingress.yaml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: endurain + name: wger namespace: health annotations: kubernetes.io/ingress.class: traefik @@ -11,16 +11,16 @@ metadata: cert-manager.io/cluster-issuer: letsencrypt spec: tls: - - hosts: ["endurain.bstein.dev"] - secretName: endurain-tls + - hosts: ["health.bstein.dev"] + secretName: wger-tls rules: - - host: endurain.bstein.dev + - host: health.bstein.dev http: paths: - path: / pathType: Prefix backend: service: - name: endurain + name: wger port: number: 80 diff --git a/services/health/endurain-data-pvc.yaml b/services/health/wger-media-pvc.yaml similarity index 66% rename from services/health/endurain-data-pvc.yaml rename to services/health/wger-media-pvc.yaml index 6c8d244..c31d81b 100644 --- a/services/health/endurain-data-pvc.yaml +++ b/services/health/wger-media-pvc.yaml @@ -1,12 +1,12 @@ -# services/health/endurain-data-pvc.yaml +# services/health/wger-media-pvc.yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: endurain-data + name: wger-media namespace: health spec: accessModes: ["ReadWriteOnce"] storageClassName: asteria resources: requests: - storage: 10Gi + storage: 20Gi diff --git a/services/health/endurain-service.yaml b/services/health/wger-service.yaml similarity index 57% rename from services/health/endurain-service.yaml rename to services/health/wger-service.yaml index cffe116..d01101a 100644 --- a/services/health/endurain-service.yaml +++ b/services/health/wger-service.yaml @@ -1,14 +1,12 @@ -# services/health/endurain-service.yaml +# services/health/wger-service.yaml apiVersion: v1 kind: Service metadata: - name: endurain + name: wger namespace: health - labels: - app: endurain spec: selector: - app: endurain + app: wger ports: - name: http port: 80 diff --git a/services/health/sparkyfitness-data-pvc.yaml b/services/health/wger-static-pvc.yaml similarity index 64% rename from services/health/sparkyfitness-data-pvc.yaml rename to services/health/wger-static-pvc.yaml index 0fbcf7b..2c6506a 100644 --- a/services/health/sparkyfitness-data-pvc.yaml +++ b/services/health/wger-static-pvc.yaml @@ -1,12 +1,12 @@ -# services/health/sparkyfitness-data-pvc.yaml +# services/health/wger-static-pvc.yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: sparkyfitness-data + name: wger-static namespace: health spec: accessModes: ["ReadWriteOnce"] storageClassName: asteria resources: requests: - storage: 10Gi + storage: 5Gi diff --git a/services/health/wger-user-sync-cronjob.yaml b/services/health/wger-user-sync-cronjob.yaml new file mode 100644 index 0000000..5e23852 --- /dev/null +++ b/services/health/wger-user-sync-cronjob.yaml @@ -0,0 +1,89 @@ +# services/health/wger-user-sync-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: wger-user-sync + namespace: health +spec: + schedule: "0 5 * * *" + suspend: true + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + backoffLimit: 0 + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" + vault.hashicorp.com/role: "health" + vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db" + vault.hashicorp.com/agent-inject-template-wger-env: | + {{- with secret "kv/data/atlas/health/wger-db" -}} + export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}" + export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}" + export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}" + export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}" + export DJANGO_DB_PASSWORD="{{ .Data.data.DJANGO_DB_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/health/wger-secrets" -}} + export SECRET_KEY="{{ .Data.data.SECRET_KEY }}" + export SIGNING_KEY="{{ .Data.data.SIGNING_KEY }}" + {{- end -}} + spec: + serviceAccountName: health-vault-sync + restartPolicy: Never + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" + containers: + - name: sync + image: wger/server@sha256:710588b78af4e0aa0b4d8a8061e4563e16eae80eeaccfe7f9e0d9cbdd7f0cbc5 + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: + - | + set -eu + . /vault/secrets/wger-env + exec python /scripts/wger_user_sync.py + env: + - name: SITE_URL + value: https://health.bstein.dev + - name: TIME_ZONE + value: Etc/UTC + - name: TZ + value: Etc/UTC + - name: DJANGO_DEBUG + value: "False" + - name: DJANGO_DB_ENGINE + value: django.db.backends.postgresql + - name: DJANGO_CACHE_BACKEND + value: django.core.cache.backends.locmem.LocMemCache + - name: DJANGO_CACHE_LOCATION + value: wger-cache + volumeMounts: + - name: wger-user-sync-script + mountPath: /scripts + readOnly: true + volumes: + - name: wger-user-sync-script + configMap: + name: wger-user-sync-script + defaultMode: 0555 diff --git a/services/jenkins/deployment.yaml b/services/jenkins/deployment.yaml index ec749e8..9ff7683 100644 --- a/services/jenkins/deployment.yaml +++ b/services/jenkins/deployment.yaml @@ -17,6 +17,27 @@ spec: metadata: labels: app: jenkins + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "jenkins" + vault.hashicorp.com/agent-inject-secret-jenkins-env: "kv/data/atlas/jenkins/jenkins-oidc" + vault.hashicorp.com/agent-inject-template-jenkins-env: | + {{- with secret "kv/data/atlas/jenkins/jenkins-oidc" -}} + export OIDC_CLIENT_ID="{{ .Data.data.clientId }}" + export OIDC_CLIENT_SECRET="{{ .Data.data.clientSecret }}" + export OIDC_AUTH_URL="{{ .Data.data.authorizationUrl }}" + export OIDC_TOKEN_URL="{{ .Data.data.tokenUrl }}" + export OIDC_USERINFO_URL="{{ .Data.data.userInfoUrl }}" + export OIDC_LOGOUT_URL="{{ .Data.data.logoutUrl }}" + {{- end }} + {{- with secret "kv/data/atlas/jenkins/harbor-robot-creds" -}} + export HARBOR_ROBOT_USERNAME="{{ .Data.data.username }}" + export HARBOR_ROBOT_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/jenkins/gitea-pat" -}} + export GITEA_PAT_USERNAME="{{ .Data.data.username }}" + export GITEA_PAT_TOKEN="{{ .Data.data.token }}" + {{- end -}} spec: serviceAccountName: jenkins nodeSelector: @@ -63,6 +84,13 @@ spec: - name: jenkins image: jenkins/jenkins:2.528.3-jdk21 imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - | + set -eu + . /vault/secrets/jenkins-env + exec /usr/bin/tini -- /usr/local/bin/jenkins.sh ports: - name: http containerPort: 8080 @@ -81,56 +109,6 @@ spec: value: "true" - name: OIDC_ISSUER value: "https://sso.bstein.dev/realms/atlas" - - name: OIDC_CLIENT_ID - valueFrom: - secretKeyRef: - name: jenkins-oidc - key: clientId - - name: OIDC_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: jenkins-oidc - key: clientSecret - - name: OIDC_AUTH_URL - valueFrom: - secretKeyRef: - name: jenkins-oidc - key: authorizationUrl - - name: OIDC_TOKEN_URL - valueFrom: - secretKeyRef: - name: jenkins-oidc - key: tokenUrl - - name: OIDC_USERINFO_URL - valueFrom: - secretKeyRef: - name: jenkins-oidc - key: userInfoUrl - - name: OIDC_LOGOUT_URL - valueFrom: - secretKeyRef: - name: jenkins-oidc - key: logoutUrl - - name: HARBOR_ROBOT_USERNAME - valueFrom: - secretKeyRef: - name: harbor-robot-creds - key: username - - name: HARBOR_ROBOT_PASSWORD - valueFrom: - secretKeyRef: - name: harbor-robot-creds - key: password - - name: GITEA_PAT_USERNAME - valueFrom: - secretKeyRef: - name: gitea-pat - key: username - - name: GITEA_PAT_TOKEN - valueFrom: - secretKeyRef: - name: gitea-pat - key: token resources: requests: cpu: 750m diff --git a/services/jenkins/kustomization.yaml b/services/jenkins/kustomization.yaml index 809f308..acb6fb4 100644 --- a/services/jenkins/kustomization.yaml +++ b/services/jenkins/kustomization.yaml @@ -5,13 +5,10 @@ namespace: jenkins resources: - namespace.yaml - serviceaccount.yaml - - vault-serviceaccount.yaml - - secretproviderclass.yaml - pvc.yaml - configmap-jcasc.yaml - configmap-plugins.yaml - deployment.yaml - - vault-sync-deployment.yaml - service.yaml - ingress.yaml diff --git a/services/jenkins/secretproviderclass.yaml b/services/jenkins/secretproviderclass.yaml deleted file mode 100644 index 01cc66e..0000000 --- a/services/jenkins/secretproviderclass.yaml +++ /dev/null @@ -1,72 +0,0 @@ -# services/jenkins/secretproviderclass.yaml -apiVersion: secrets-store.csi.x-k8s.io/v1 -kind: SecretProviderClass -metadata: - name: jenkins-vault - namespace: jenkins -spec: - provider: vault - parameters: - vaultAddress: "http://vault.vault.svc.cluster.local:8200" - roleName: "jenkins" - objects: | - - objectName: "jenkins-oidc__clientId" - secretPath: "kv/data/atlas/jenkins/jenkins-oidc" - secretKey: "clientId" - - objectName: "jenkins-oidc__clientSecret" - secretPath: "kv/data/atlas/jenkins/jenkins-oidc" - secretKey: "clientSecret" - - objectName: "jenkins-oidc__authorizationUrl" - secretPath: "kv/data/atlas/jenkins/jenkins-oidc" - secretKey: "authorizationUrl" - - objectName: "jenkins-oidc__tokenUrl" - secretPath: "kv/data/atlas/jenkins/jenkins-oidc" - secretKey: "tokenUrl" - - objectName: "jenkins-oidc__userInfoUrl" - secretPath: "kv/data/atlas/jenkins/jenkins-oidc" - secretKey: "userInfoUrl" - - objectName: "jenkins-oidc__logoutUrl" - secretPath: "kv/data/atlas/jenkins/jenkins-oidc" - secretKey: "logoutUrl" - - objectName: "harbor-robot-creds__username" - secretPath: "kv/data/atlas/jenkins/harbor-robot-creds" - secretKey: "username" - - objectName: "harbor-robot-creds__password" - secretPath: "kv/data/atlas/jenkins/harbor-robot-creds" - secretKey: "password" - - objectName: "gitea-pat__username" - secretPath: "kv/data/atlas/jenkins/gitea-pat" - secretKey: "username" - - objectName: "gitea-pat__token" - secretPath: "kv/data/atlas/jenkins/gitea-pat" - secretKey: "token" - secretObjects: - - secretName: jenkins-oidc - type: Opaque - data: - - objectName: jenkins-oidc__clientId - key: clientId - - objectName: jenkins-oidc__clientSecret - key: clientSecret - - objectName: jenkins-oidc__authorizationUrl - key: authorizationUrl - - objectName: jenkins-oidc__tokenUrl - key: tokenUrl - - objectName: jenkins-oidc__userInfoUrl - key: userInfoUrl - - objectName: jenkins-oidc__logoutUrl - key: logoutUrl - - secretName: harbor-robot-creds - type: Opaque - data: - - objectName: harbor-robot-creds__username - key: username - - objectName: harbor-robot-creds__password - key: password - - secretName: gitea-pat - type: Opaque - data: - - objectName: gitea-pat__username - key: username - - objectName: gitea-pat__token - key: token diff --git a/services/jenkins/vault-serviceaccount.yaml b/services/jenkins/vault-serviceaccount.yaml deleted file mode 100644 index 8d31400..0000000 --- a/services/jenkins/vault-serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# services/jenkins/vault-serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: jenkins-vault-sync - namespace: jenkins diff --git a/services/jenkins/vault-sync-deployment.yaml b/services/jenkins/vault-sync-deployment.yaml deleted file mode 100644 index 6de64f9..0000000 --- a/services/jenkins/vault-sync-deployment.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# services/jenkins/vault-sync-deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: jenkins-vault-sync - namespace: jenkins -spec: - replicas: 1 - selector: - matchLabels: - app: jenkins-vault-sync - template: - metadata: - labels: - app: jenkins-vault-sync - spec: - serviceAccountName: jenkins-vault-sync - containers: - - name: sync - image: alpine:3.20 - command: ["/bin/sh", "-c"] - args: - - "sleep infinity" - volumeMounts: - - name: vault-secrets - mountPath: /vault/secrets - readOnly: true - volumes: - - name: vault-secrets - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: jenkins-vault diff --git a/services/keycloak/endurain-oidc-secret-ensure-job.yaml b/services/keycloak/endurain-oidc-secret-ensure-job.yaml deleted file mode 100644 index 53a31c6..0000000 --- a/services/keycloak/endurain-oidc-secret-ensure-job.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# services/keycloak/endurain-oidc-secret-ensure-job.yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: endurain-oidc-secret-ensure-4 - namespace: sso -spec: - backoffLimit: 0 - ttlSecondsAfterFinished: 3600 - template: - metadata: - annotations: - vault.hashicorp.com/agent-inject: "true" - vault.hashicorp.com/agent-pre-populate-only: "true" - vault.hashicorp.com/role: "sso-secrets" - vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" - vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | - {{ with secret "kv/data/atlas/shared/keycloak-admin" }} - export KEYCLOAK_ADMIN="{{ .Data.data.username }}" - export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" - export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{ end }} - spec: - serviceAccountName: mas-secrets-ensure - restartPolicy: Never - volumes: - - name: endurain-oidc-secret-ensure-script - configMap: - name: endurain-oidc-secret-ensure-script - defaultMode: 0555 - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/arch - operator: In - values: ["arm64"] - - key: node-role.kubernetes.io/worker - operator: Exists - containers: - - name: apply - image: alpine:3.20 - command: ["/bin/sh", "-c"] - args: - - | - set -euo pipefail - apk add --no-cache bash curl jq >/dev/null - exec /scripts/endurain_oidc_secret_ensure.sh - volumeMounts: - - name: endurain-oidc-secret-ensure-script - mountPath: /scripts - readOnly: true \ No newline at end of file diff --git a/services/keycloak/kustomization.yaml b/services/keycloak/kustomization.yaml index 6eb2691..e141467 100644 --- a/services/keycloak/kustomization.yaml +++ b/services/keycloak/kustomization.yaml @@ -22,8 +22,6 @@ resources: - synapse-oidc-secret-ensure-job.yaml - logs-oidc-secret-ensure-job.yaml - harbor-oidc-secret-ensure-job.yaml - - endurain-oidc-secret-ensure-job.yaml - - sparkyfitness-oidc-secret-ensure-job.yaml - vault-oidc-secret-ensure-job.yaml - service.yaml - ingress.yaml @@ -37,12 +35,6 @@ configMapGenerator: - name: harbor-oidc-secret-ensure-script files: - harbor_oidc_secret_ensure.sh=scripts/harbor_oidc_secret_ensure.sh - - name: endurain-oidc-secret-ensure-script - files: - - endurain_oidc_secret_ensure.sh=scripts/endurain_oidc_secret_ensure.sh - - name: sparkyfitness-oidc-secret-ensure-script - files: - - sparkyfitness_oidc_secret_ensure.sh=scripts/sparkyfitness_oidc_secret_ensure.sh - name: vault-oidc-secret-ensure-script files: - vault_oidc_secret_ensure.sh=scripts/vault_oidc_secret_ensure.sh diff --git a/services/keycloak/scripts/endurain_oidc_secret_ensure.sh b/services/keycloak/scripts/endurain_oidc_secret_ensure.sh deleted file mode 100644 index 6b026b0..0000000 --- a/services/keycloak/scripts/endurain_oidc_secret_ensure.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -. /vault/secrets/keycloak-admin-env.sh - -KC_URL="http://keycloak.sso.svc.cluster.local" -REALM="atlas" -CLIENT_ID="endurain" -ROOT_URL="https://endurain.bstein.dev" -REDIRECT_URI="https://endurain.bstein.dev/api/v1/public/idp/callback/keycloak" -ISSUER_URL="https://sso.bstein.dev/realms/atlas" - -ACCESS_TOKEN="" -for attempt in 1 2 3 4 5; do - TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \ - -H 'Content-Type: application/x-www-form-urlencoded' \ - -d "grant_type=password" \ - -d "client_id=admin-cli" \ - -d "username=${KEYCLOAK_ADMIN}" \ - -d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)" - ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)" - if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then - break - fi - echo "Keycloak token request failed (attempt ${attempt})" >&2 - sleep $((attempt * 2)) -done -if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then - echo "Failed to fetch Keycloak admin token" >&2 - exit 1 -fi - -CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/${REALM}/clients?clientId=${CLIENT_ID}" || true)" -CLIENT_UUID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" - -if [ -z "$CLIENT_UUID" ] || [ "$CLIENT_UUID" = "null" ]; then - create_payload="$(jq -nc \ - --arg client_id "${CLIENT_ID}" \ - --arg root_url "${ROOT_URL}" \ - --arg redirect_uri "${REDIRECT_URI}" \ - --arg web_origin "${ROOT_URL}" \ - '{clientId:$client_id,name:"Endurain",enabled:true,protocol:"openid-connect",publicClient:false,standardFlowEnabled:true,implicitFlowEnabled:false,directAccessGrantsEnabled:false,serviceAccountsEnabled:false,redirectUris:[$redirect_uri],webOrigins:[$web_origin],rootUrl:$root_url,baseUrl:"/"}')" - status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ - -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - -H 'Content-Type: application/json' \ - -d "${create_payload}" \ - "$KC_URL/admin/realms/${REALM}/clients")" - if [ "$status" != "201" ] && [ "$status" != "204" ]; then - echo "Keycloak client create failed (status ${status})" >&2 - exit 1 - fi - CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/${REALM}/clients?clientId=${CLIENT_ID}" || true)" - CLIENT_UUID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" -fi - -if [ -z "$CLIENT_UUID" ] || [ "$CLIENT_UUID" = "null" ]; then - echo "Keycloak client ${CLIENT_ID} not found" >&2 - exit 1 -fi - -CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/${REALM}/clients/${CLIENT_UUID}/client-secret" | jq -r '.value' 2>/dev/null || true)" -if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then - echo "Keycloak client secret not found" >&2 - exit 1 -fi - -vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" -vault_role="${VAULT_ROLE:-sso-secrets}" -jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" -vault_token="$(curl -sS --request POST --data "${login_payload}" \ - "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" -if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then - echo "vault login failed" >&2 - exit 1 -fi - -payload="$(jq -nc \ - --arg client_id "${CLIENT_ID}" \ - --arg client_secret "${CLIENT_SECRET}" \ - --arg issuer_url "${ISSUER_URL}" \ - '{data:{client_id:$client_id,client_secret:$client_secret,issuer_url:$issuer_url}}')" -curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ - -d "${payload}" "${vault_addr}/v1/kv/data/atlas/health/endurain-oidc" >/dev/null diff --git a/services/keycloak/scripts/sparkyfitness_oidc_secret_ensure.sh b/services/keycloak/scripts/sparkyfitness_oidc_secret_ensure.sh deleted file mode 100644 index 449e81c..0000000 --- a/services/keycloak/scripts/sparkyfitness_oidc_secret_ensure.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -. /vault/secrets/keycloak-admin-env.sh - -KC_URL="http://keycloak.sso.svc.cluster.local" -REALM="atlas" -CLIENT_ID="sparkyfitness" -ROOT_URL="https://sparkyfitness.bstein.dev" -REDIRECT_URI="https://sparkyfitness.bstein.dev/oidc-callback" -ISSUER_URL="https://sso.bstein.dev/realms/atlas" - -ACCESS_TOKEN="" -for attempt in 1 2 3 4 5; do - TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \ - -H 'Content-Type: application/x-www-form-urlencoded' \ - -d "grant_type=password" \ - -d "client_id=admin-cli" \ - -d "username=${KEYCLOAK_ADMIN}" \ - -d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)" - ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)" - if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then - break - fi - echo "Keycloak token request failed (attempt ${attempt})" >&2 - sleep $((attempt * 2)) -done -if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then - echo "Failed to fetch Keycloak admin token" >&2 - exit 1 -fi - -CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/${REALM}/clients?clientId=${CLIENT_ID}" || true)" -CLIENT_UUID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" - -if [ -z "$CLIENT_UUID" ] || [ "$CLIENT_UUID" = "null" ]; then - create_payload="$(jq -nc \ - --arg client_id "${CLIENT_ID}" \ - --arg root_url "${ROOT_URL}" \ - --arg redirect_uri "${REDIRECT_URI}" \ - --arg web_origin "${ROOT_URL}" \ - '{clientId:$client_id,name:"SparkyFitness",enabled:true,protocol:"openid-connect",publicClient:false,standardFlowEnabled:true,implicitFlowEnabled:false,directAccessGrantsEnabled:false,serviceAccountsEnabled:false,redirectUris:[$redirect_uri],webOrigins:[$web_origin],rootUrl:$root_url,baseUrl:"/"}')" - status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ - -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - -H 'Content-Type: application/json' \ - -d "${create_payload}" \ - "$KC_URL/admin/realms/${REALM}/clients")" - if [ "$status" != "201" ] && [ "$status" != "204" ]; then - echo "Keycloak client create failed (status ${status})" >&2 - exit 1 - fi - CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/${REALM}/clients?clientId=${CLIENT_ID}" || true)" - CLIENT_UUID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" -fi - -if [ -z "$CLIENT_UUID" ] || [ "$CLIENT_UUID" = "null" ]; then - echo "Keycloak client ${CLIENT_ID} not found" >&2 - exit 1 -fi - -CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/${REALM}/clients/${CLIENT_UUID}/client-secret" | jq -r '.value' 2>/dev/null || true)" -if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then - echo "Keycloak client secret not found" >&2 - exit 1 -fi - -vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" -vault_role="${VAULT_ROLE:-sso-secrets}" -jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" -vault_token="$(curl -sS --request POST --data "${login_payload}" \ - "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" -if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then - echo "vault login failed" >&2 - exit 1 -fi - -payload="$(jq -nc \ - --arg client_id "${CLIENT_ID}" \ - --arg client_secret "${CLIENT_SECRET}" \ - --arg issuer_url "${ISSUER_URL}" \ - '{data:{client_id:$client_id,client_secret:$client_secret,issuer_url:$issuer_url}}')" -curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ - -d "${payload}" "${vault_addr}/v1/kv/data/atlas/health/sparkyfitness-oidc" >/dev/null diff --git a/services/keycloak/secretproviderclass.yaml b/services/keycloak/secretproviderclass.yaml index 95e28be..86cebd2 100644 --- a/services/keycloak/secretproviderclass.yaml +++ b/services/keycloak/secretproviderclass.yaml @@ -10,41 +10,10 @@ spec: vaultAddress: "http://vault.vault.svc.cluster.local:8200" roleName: "sso" objects: | - - objectName: "openldap-admin__LDAP_ADMIN_PASSWORD" - secretPath: "kv/data/atlas/sso/openldap-admin" - secretKey: "LDAP_ADMIN_PASSWORD" - - objectName: "openldap-admin__LDAP_CONFIG_PASSWORD" - secretPath: "kv/data/atlas/sso/openldap-admin" - secretKey: "LDAP_CONFIG_PASSWORD" - - objectName: "oauth2-proxy-oidc__client_id" - secretPath: "kv/data/atlas/sso/oauth2-proxy-oidc" - secretKey: "client_id" - - objectName: "oauth2-proxy-oidc__client_secret" - secretPath: "kv/data/atlas/sso/oauth2-proxy-oidc" - secretKey: "client_secret" - - objectName: "oauth2-proxy-oidc__cookie_secret" - secretPath: "kv/data/atlas/sso/oauth2-proxy-oidc" - secretKey: "cookie_secret" - objectName: "harbor-pull__dockerconfigjson" secretPath: "kv/data/atlas/harbor-pull/sso" secretKey: "dockerconfigjson" secretObjects: - - secretName: openldap-admin - type: Opaque - data: - - objectName: openldap-admin__LDAP_ADMIN_PASSWORD - key: LDAP_ADMIN_PASSWORD - - objectName: openldap-admin__LDAP_CONFIG_PASSWORD - key: LDAP_CONFIG_PASSWORD - - secretName: oauth2-proxy-oidc - type: Opaque - data: - - objectName: oauth2-proxy-oidc__client_id - key: client_id - - objectName: oauth2-proxy-oidc__client_secret - key: client_secret - - objectName: oauth2-proxy-oidc__cookie_secret - key: cookie_secret - secretName: harbor-regcred type: kubernetes.io/dockerconfigjson data: diff --git a/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml b/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml deleted file mode 100644 index 3b16100..0000000 --- a/services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: sparkyfitness-oidc-secret-ensure-3 - namespace: sso -spec: - backoffLimit: 0 - ttlSecondsAfterFinished: 3600 - template: - metadata: - annotations: - vault.hashicorp.com/agent-inject: "true" - vault.hashicorp.com/agent-pre-populate-only: "true" - vault.hashicorp.com/role: "sso-secrets" - vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" - vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | - {{ with secret "kv/data/atlas/shared/keycloak-admin" }} - export KEYCLOAK_ADMIN="{{ .Data.data.username }}" - export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" - export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" - {{ end }} - spec: - serviceAccountName: mas-secrets-ensure - restartPolicy: Never - volumes: - - name: sparkyfitness-oidc-secret-ensure-script - configMap: - name: sparkyfitness-oidc-secret-ensure-script - defaultMode: 0555 - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/arch - operator: In - values: ["arm64"] - - key: node-role.kubernetes.io/worker - operator: Exists - containers: - - name: apply - image: alpine:3.20 - command: ["/bin/sh", "-c"] - args: - - | - set -euo pipefail - apk add --no-cache bash curl jq >/dev/null - exec /scripts/sparkyfitness_oidc_secret_ensure.sh - volumeMounts: - - name: sparkyfitness-oidc-secret-ensure-script - mountPath: /scripts - readOnly: true \ No newline at end of file diff --git a/services/logging/oauth2-proxy.yaml b/services/logging/oauth2-proxy.yaml index ecebfa7..d7891da 100644 --- a/services/logging/oauth2-proxy.yaml +++ b/services/logging/oauth2-proxy.yaml @@ -32,7 +32,20 @@ spec: metadata: labels: app: oauth2-proxy-logs + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "logging" + vault.hashicorp.com/agent-inject-secret-oidc-env: "kv/data/atlas/logging/oauth2-proxy-logs-oidc" + vault.hashicorp.com/agent-inject-template-oidc-env: | + {{- with secret "kv/data/atlas/logging/oauth2-proxy-logs-oidc" -}} + export OAUTH2_PROXY_CLIENT_ID="{{ .Data.data.client_id }}" + export OAUTH2_PROXY_CLIENT_SECRET="{{ .Data.data.client_secret }}" + export OAUTH2_PROXY_COOKIE_SECRET="{{ .Data.data.cookie_secret }}" + {{- end -}} spec: + serviceAccountName: logging-vault-sync + imagePullSecrets: + - name: harbor-regcred nodeSelector: node-role.kubernetes.io/worker: "true" affinity: @@ -47,7 +60,7 @@ spec: - rpi4 containers: - name: oauth2-proxy - image: quay.io/oauth2-proxy/oauth2-proxy:v7.6.0 + image: registry.bstein.dev/tools/oauth2-proxy-vault:v7.6.0 imagePullPolicy: IfNotPresent args: - --provider=oidc @@ -70,21 +83,8 @@ spec: - --skip-jwt-bearer-tokens=true - --cookie-domain=logs.bstein.dev env: - - name: OAUTH2_PROXY_CLIENT_ID - valueFrom: - secretKeyRef: - name: oauth2-proxy-logs-oidc - key: client_id - - name: OAUTH2_PROXY_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: oauth2-proxy-logs-oidc - key: client_secret - - name: OAUTH2_PROXY_COOKIE_SECRET - valueFrom: - secretKeyRef: - name: oauth2-proxy-logs-oidc - key: cookie_secret + - name: VAULT_ENV_FILE + value: /vault/secrets/oidc-env ports: - containerPort: 4180 name: http diff --git a/services/logging/secretproviderclass.yaml b/services/logging/secretproviderclass.yaml index bbe6cfd..f5db15e 100644 --- a/services/logging/secretproviderclass.yaml +++ b/services/logging/secretproviderclass.yaml @@ -10,28 +10,10 @@ spec: vaultAddress: "http://vault.vault.svc.cluster.local:8200" roleName: "logging" objects: | - - objectName: "oauth2-proxy-logs-oidc__client_id" - secretPath: "kv/data/atlas/logging/oauth2-proxy-logs-oidc" - secretKey: "client_id" - - objectName: "oauth2-proxy-logs-oidc__client_secret" - secretPath: "kv/data/atlas/logging/oauth2-proxy-logs-oidc" - secretKey: "client_secret" - - objectName: "oauth2-proxy-logs-oidc__cookie_secret" - secretPath: "kv/data/atlas/logging/oauth2-proxy-logs-oidc" - secretKey: "cookie_secret" - objectName: "harbor-pull__dockerconfigjson" secretPath: "kv/data/atlas/harbor-pull/logging" secretKey: "dockerconfigjson" secretObjects: - - secretName: oauth2-proxy-logs-oidc - type: Opaque - data: - - objectName: oauth2-proxy-logs-oidc__client_id - key: client_id - - objectName: oauth2-proxy-logs-oidc__client_secret - key: client_secret - - objectName: oauth2-proxy-logs-oidc__cookie_secret - key: cookie_secret - secretName: harbor-regcred type: kubernetes.io/dockerconfigjson data: diff --git a/services/oauth2-proxy/deployment.yaml b/services/oauth2-proxy/deployment.yaml index 7c22a93..64cdd0e 100644 --- a/services/oauth2-proxy/deployment.yaml +++ b/services/oauth2-proxy/deployment.yaml @@ -15,7 +15,20 @@ spec: metadata: labels: app: oauth2-proxy + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso" + vault.hashicorp.com/agent-inject-secret-oidc-env: "kv/data/atlas/sso/oauth2-proxy-oidc" + vault.hashicorp.com/agent-inject-template-oidc-env: | + {{- with secret "kv/data/atlas/sso/oauth2-proxy-oidc" -}} + export OAUTH2_PROXY_CLIENT_ID="{{ .Data.data.client_id }}" + export OAUTH2_PROXY_CLIENT_SECRET="{{ .Data.data.client_secret }}" + export OAUTH2_PROXY_COOKIE_SECRET="{{ .Data.data.cookie_secret }}" + {{- end -}} spec: + serviceAccountName: sso-vault + imagePullSecrets: + - name: harbor-regcred nodeSelector: node-role.kubernetes.io/worker: "true" affinity: @@ -29,7 +42,7 @@ spec: values: ["rpi5","rpi4"] containers: - name: oauth2-proxy - image: quay.io/oauth2-proxy/oauth2-proxy:v7.6.0 + image: registry.bstein.dev/tools/oauth2-proxy-vault:v7.6.0 imagePullPolicy: IfNotPresent args: - --provider=oidc @@ -50,21 +63,8 @@ spec: - --skip-jwt-bearer-tokens=true - --oidc-groups-claim=groups env: - - name: OAUTH2_PROXY_CLIENT_ID - valueFrom: - secretKeyRef: - name: oauth2-proxy-oidc - key: client_id - - name: OAUTH2_PROXY_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: oauth2-proxy-oidc - key: client_secret - - name: OAUTH2_PROXY_COOKIE_SECRET - valueFrom: - secretKeyRef: - name: oauth2-proxy-oidc - key: cookie_secret + - name: VAULT_ENV_FILE + value: /vault/secrets/oidc-env ports: - containerPort: 4180 name: http diff --git a/services/openldap/statefulset.yaml b/services/openldap/statefulset.yaml index ee8c792..210d16e 100644 --- a/services/openldap/statefulset.yaml +++ b/services/openldap/statefulset.yaml @@ -16,14 +16,30 @@ spec: metadata: labels: app: openldap + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "sso" + vault.hashicorp.com/agent-inject-secret-openldap-env: "kv/data/atlas/sso/openldap-admin" + vault.hashicorp.com/agent-inject-template-openldap-env: | + {{- with secret "kv/data/atlas/sso/openldap-admin" -}} + export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}" + export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}" + {{- end -}} spec: nodeSelector: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: "true" + serviceAccountName: sso-vault containers: - name: openldap image: docker.io/osixia/openldap:1.5.0 imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: + - | + set -eu + . /vault/secrets/openldap-env + exec /usr/bin/python3 -u /container/tool/run ports: - name: ldap containerPort: 389 @@ -34,16 +50,6 @@ spec: value: Atlas - name: LDAP_DOMAIN value: bstein.dev - - name: LDAP_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: openldap-admin - key: LDAP_ADMIN_PASSWORD - - name: LDAP_CONFIG_PASSWORD - valueFrom: - secretKeyRef: - name: openldap-admin - key: LDAP_CONFIG_PASSWORD readinessProbe: tcpSocket: port: ldap diff --git a/services/pegasus/deployment.yaml b/services/pegasus/deployment.yaml index 94d8dfb..bc3db70 100644 --- a/services/pegasus/deployment.yaml +++ b/services/pegasus/deployment.yaml @@ -14,11 +14,23 @@ spec: maxUnavailable: 1 selector: { matchLabels: { app: pegasus } } template: - metadata: { labels: { app: pegasus } } + metadata: + labels: { app: pegasus } + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "pegasus" + vault.hashicorp.com/agent-inject-secret-pegasus-env: "kv/data/atlas/pegasus/pegasus-secrets" + vault.hashicorp.com/agent-inject-template-pegasus-env: | + {{- with secret "kv/data/atlas/pegasus/pegasus-secrets" -}} + export PEGASUS_SESSION_KEY="{{ .Data.data.PEGASUS_SESSION_KEY }}" + export JELLYFIN_URL="{{ .Data.data.JELLYFIN_URL }}" + export JELLYFIN_API_KEY="{{ .Data.data.JELLYFIN_API_KEY }}" + {{- end -}} spec: nodeSelector: kubernetes.io/arch: arm64 node-role.kubernetes.io/worker: "true" + serviceAccountName: pegasus-vault-sync imagePullSecrets: - name: harbor-regcred securityContext: @@ -60,9 +72,8 @@ spec: containers: - name: pegasus - image: registry.bstein.dev/streaming/pegasus:1.2.32 # {"$imagepolicy": "jellyfin:pegasus"} + image: registry.bstein.dev/streaming/pegasus-vault:1.2.32 # {"$imagepolicy": "jellyfin:pegasus"} imagePullPolicy: Always - command: ["/pegasus"] env: - name: PEGASUS_MEDIA_ROOT valueFrom: { configMapKeyRef: { name: pegasus-config, key: PEGASUS_MEDIA_ROOT } } @@ -70,12 +81,8 @@ spec: valueFrom: { configMapKeyRef: { name: pegasus-config, key: PEGASUS_BIND } } - name: PEGASUS_USER_MAP_FILE value: "/config/user-map.yaml" - - name: PEGASUS_SESSION_KEY - valueFrom: { secretKeyRef: { name: pegasus-secrets, key: PEGASUS_SESSION_KEY } } - - name: JELLYFIN_URL - valueFrom: { secretKeyRef: { name: pegasus-secrets, key: JELLYFIN_URL } } - - name: JELLYFIN_API_KEY - valueFrom: { secretKeyRef: { name: pegasus-secrets, key: JELLYFIN_API_KEY } } + - name: VAULT_ENV_FILE + value: /vault/secrets/pegasus-env - name: PEGASUS_DEBUG value: "1" - name: PEGASUS_DRY_RUN diff --git a/services/pegasus/image.yaml b/services/pegasus/image.yaml index 682ec83..5987815 100644 --- a/services/pegasus/image.yaml +++ b/services/pegasus/image.yaml @@ -5,7 +5,7 @@ metadata: name: pegasus namespace: jellyfin spec: - image: registry.bstein.dev/streaming/pegasus + image: registry.bstein.dev/streaming/pegasus-vault interval: 1m0s --- diff --git a/services/pegasus/secretproviderclass.yaml b/services/pegasus/secretproviderclass.yaml index 7513eee..b4621a5 100644 --- a/services/pegasus/secretproviderclass.yaml +++ b/services/pegasus/secretproviderclass.yaml @@ -10,28 +10,10 @@ spec: vaultAddress: "http://vault.vault.svc.cluster.local:8200" roleName: "pegasus" objects: | - - objectName: "pegasus-secrets__PEGASUS_SESSION_KEY" - secretPath: "kv/data/atlas/pegasus/pegasus-secrets" - secretKey: "PEGASUS_SESSION_KEY" - - objectName: "pegasus-secrets__JELLYFIN_URL" - secretPath: "kv/data/atlas/pegasus/pegasus-secrets" - secretKey: "JELLYFIN_URL" - - objectName: "pegasus-secrets__JELLYFIN_API_KEY" - secretPath: "kv/data/atlas/pegasus/pegasus-secrets" - secretKey: "JELLYFIN_API_KEY" - objectName: "harbor-pull__dockerconfigjson" secretPath: "kv/data/atlas/harbor-pull/jellyfin" secretKey: "dockerconfigjson" secretObjects: - - secretName: pegasus-secrets - type: Opaque - data: - - objectName: pegasus-secrets__PEGASUS_SESSION_KEY - key: PEGASUS_SESSION_KEY - - objectName: pegasus-secrets__JELLYFIN_URL - key: JELLYFIN_URL - - objectName: pegasus-secrets__JELLYFIN_API_KEY - key: JELLYFIN_API_KEY - secretName: harbor-regcred type: kubernetes.io/dockerconfigjson data: diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index f96dd94..81cdc27 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -95,7 +95,7 @@ write_policy_and_role "nextcloud" "nextcloud" "nextcloud-vault" \ "nextcloud/* shared/keycloak-admin shared/postmark-relay" "" write_policy_and_role "comms" "comms" "comms-vault,atlasbot" \ "comms/* shared/chat-ai-keys-runtime harbor-pull/comms" "" -write_policy_and_role "jenkins" "jenkins" "jenkins-vault-sync" \ +write_policy_and_role "jenkins" "jenkins" "jenkins" \ "jenkins/*" "" write_policy_and_role "monitoring" "monitoring" "monitoring-vault-sync" \ "monitoring/* shared/postmark-relay harbor-pull/monitoring" "" @@ -110,7 +110,7 @@ write_policy_and_role "health" "health" "health-vault-sync" \ write_policy_and_role "sso-secrets" "sso" "mas-secrets-ensure" \ "shared/keycloak-admin" \ - "harbor/harbor-oidc vault/vault-oidc-config comms/synapse-oidc logging/oauth2-proxy-logs-oidc health/endurain-oidc health/sparkyfitness-oidc" + "harbor/harbor-oidc vault/vault-oidc-config comms/synapse-oidc logging/oauth2-proxy-logs-oidc" write_policy_and_role "comms-secrets" "comms" \ "comms-secrets-ensure,mas-db-ensure,mas-admin-client-secret-writer,othrys-synapse-signingkey-job" \ "" \ From bb9a4e6d8be64c4140d68ec4e69ae133b3d12362 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 17:48:12 -0300 Subject: [PATCH 063/270] longhorn: read oauth2-proxy secrets from vault --- .../longhorn/ui-ingress/kustomization.yaml | 3 +- .../ui-ingress/oauth2-proxy-longhorn.yaml | 28 ++++++++----------- .../longhorn/ui-ingress/serviceaccount.yaml | 6 ++++ .../vault/scripts/vault_k8s_auth_configure.sh | 2 ++ 4 files changed, 22 insertions(+), 17 deletions(-) create mode 100644 infrastructure/longhorn/ui-ingress/serviceaccount.yaml diff --git a/infrastructure/longhorn/ui-ingress/kustomization.yaml b/infrastructure/longhorn/ui-ingress/kustomization.yaml index a2ae5f3..40b030c 100644 --- a/infrastructure/longhorn/ui-ingress/kustomization.yaml +++ b/infrastructure/longhorn/ui-ingress/kustomization.yaml @@ -2,6 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: + - serviceaccount.yaml + - oauth2-proxy-longhorn.yaml - middleware.yaml - ingress.yaml - - oauth2-proxy-longhorn.yaml diff --git a/infrastructure/longhorn/ui-ingress/oauth2-proxy-longhorn.yaml b/infrastructure/longhorn/ui-ingress/oauth2-proxy-longhorn.yaml index b8d4f34..a730e31 100644 --- a/infrastructure/longhorn/ui-ingress/oauth2-proxy-longhorn.yaml +++ b/infrastructure/longhorn/ui-ingress/oauth2-proxy-longhorn.yaml @@ -32,7 +32,18 @@ spec: metadata: labels: app: oauth2-proxy-longhorn + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "longhorn" + vault.hashicorp.com/agent-inject-secret-oidc-config: "kv/data/atlas/longhorn/oauth2-proxy" + vault.hashicorp.com/agent-inject-template-oidc-config: | + {{- with secret "kv/data/atlas/longhorn/oauth2-proxy" -}} + client_id = "{{ .Data.data.client_id }}" + client_secret = "{{ .Data.data.client_secret }}" + cookie_secret = "{{ .Data.data.cookie_secret }}" + {{- end -}} spec: + serviceAccountName: longhorn-vault nodeSelector: node-role.kubernetes.io/worker: "true" affinity: @@ -50,6 +61,7 @@ spec: imagePullPolicy: IfNotPresent args: - --provider=oidc + - --config=/vault/secrets/oidc-config - --redirect-url=https://longhorn.bstein.dev/oauth2/callback - --oidc-issuer-url=https://sso.bstein.dev/realms/atlas - --scope=openid profile email groups @@ -69,22 +81,6 @@ spec: - --skip-jwt-bearer-tokens=true - --oidc-groups-claim=groups - --cookie-domain=longhorn.bstein.dev - env: - - name: OAUTH2_PROXY_CLIENT_ID - valueFrom: - secretKeyRef: - name: oauth2-proxy-longhorn-oidc - key: client_id - - name: OAUTH2_PROXY_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: oauth2-proxy-longhorn-oidc - key: client_secret - - name: OAUTH2_PROXY_COOKIE_SECRET - valueFrom: - secretKeyRef: - name: oauth2-proxy-longhorn-oidc - key: cookie_secret ports: - containerPort: 4180 name: http diff --git a/infrastructure/longhorn/ui-ingress/serviceaccount.yaml b/infrastructure/longhorn/ui-ingress/serviceaccount.yaml new file mode 100644 index 0000000..310cb8a --- /dev/null +++ b/infrastructure/longhorn/ui-ingress/serviceaccount.yaml @@ -0,0 +1,6 @@ +# infrastructure/longhorn/ui-ingress/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-vault + namespace: longhorn-system diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 81cdc27..eb78aed 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -107,6 +107,8 @@ write_policy_and_role "crypto" "crypto" "crypto-vault-sync" \ "crypto/* harbor-pull/crypto" "" write_policy_and_role "health" "health" "health-vault-sync" \ "health/*" "" +write_policy_and_role "longhorn" "longhorn-system" "longhorn-vault" \ + "longhorn/*" "" write_policy_and_role "sso-secrets" "sso" "mas-secrets-ensure" \ "shared/keycloak-admin" \ From 4ff2f3e8892b001a38879da126b8b60534c11d13 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 17:53:09 -0300 Subject: [PATCH 064/270] jenkins: escape vault env values --- services/jenkins/deployment.yaml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/services/jenkins/deployment.yaml b/services/jenkins/deployment.yaml index 9ff7683..3c87349 100644 --- a/services/jenkins/deployment.yaml +++ b/services/jenkins/deployment.yaml @@ -23,20 +23,20 @@ spec: vault.hashicorp.com/agent-inject-secret-jenkins-env: "kv/data/atlas/jenkins/jenkins-oidc" vault.hashicorp.com/agent-inject-template-jenkins-env: | {{- with secret "kv/data/atlas/jenkins/jenkins-oidc" -}} - export OIDC_CLIENT_ID="{{ .Data.data.clientId }}" - export OIDC_CLIENT_SECRET="{{ .Data.data.clientSecret }}" - export OIDC_AUTH_URL="{{ .Data.data.authorizationUrl }}" - export OIDC_TOKEN_URL="{{ .Data.data.tokenUrl }}" - export OIDC_USERINFO_URL="{{ .Data.data.userInfoUrl }}" - export OIDC_LOGOUT_URL="{{ .Data.data.logoutUrl }}" + export OIDC_CLIENT_ID='{{ .Data.data.clientId | replace "'" "'\"'\"'" }}' + export OIDC_CLIENT_SECRET='{{ .Data.data.clientSecret | replace "'" "'\"'\"'" }}' + export OIDC_AUTH_URL='{{ .Data.data.authorizationUrl | replace "'" "'\"'\"'" }}' + export OIDC_TOKEN_URL='{{ .Data.data.tokenUrl | replace "'" "'\"'\"'" }}' + export OIDC_USERINFO_URL='{{ .Data.data.userInfoUrl | replace "'" "'\"'\"'" }}' + export OIDC_LOGOUT_URL='{{ .Data.data.logoutUrl | replace "'" "'\"'\"'" }}' {{- end }} {{- with secret "kv/data/atlas/jenkins/harbor-robot-creds" -}} - export HARBOR_ROBOT_USERNAME="{{ .Data.data.username }}" - export HARBOR_ROBOT_PASSWORD="{{ .Data.data.password }}" + export HARBOR_ROBOT_USERNAME='{{ .Data.data.username | replace "'" "'\"'\"'" }}' + export HARBOR_ROBOT_PASSWORD='{{ .Data.data.password | replace "'" "'\"'\"'" }}' {{- end }} {{- with secret "kv/data/atlas/jenkins/gitea-pat" -}} - export GITEA_PAT_USERNAME="{{ .Data.data.username }}" - export GITEA_PAT_TOKEN="{{ .Data.data.token }}" + export GITEA_PAT_USERNAME='{{ .Data.data.username | replace "'" "'\"'\"'" }}' + export GITEA_PAT_TOKEN='{{ .Data.data.token | replace "'" "'\"'\"'" }}' {{- end -}} spec: serviceAccountName: jenkins From c98d24e91e15b609779c3e54cbefe0b646ac6a18 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 17:57:10 -0300 Subject: [PATCH 065/270] jenkins: load vault env via env --- services/jenkins/deployment.yaml | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/services/jenkins/deployment.yaml b/services/jenkins/deployment.yaml index 3c87349..0320b02 100644 --- a/services/jenkins/deployment.yaml +++ b/services/jenkins/deployment.yaml @@ -23,20 +23,20 @@ spec: vault.hashicorp.com/agent-inject-secret-jenkins-env: "kv/data/atlas/jenkins/jenkins-oidc" vault.hashicorp.com/agent-inject-template-jenkins-env: | {{- with secret "kv/data/atlas/jenkins/jenkins-oidc" -}} - export OIDC_CLIENT_ID='{{ .Data.data.clientId | replace "'" "'\"'\"'" }}' - export OIDC_CLIENT_SECRET='{{ .Data.data.clientSecret | replace "'" "'\"'\"'" }}' - export OIDC_AUTH_URL='{{ .Data.data.authorizationUrl | replace "'" "'\"'\"'" }}' - export OIDC_TOKEN_URL='{{ .Data.data.tokenUrl | replace "'" "'\"'\"'" }}' - export OIDC_USERINFO_URL='{{ .Data.data.userInfoUrl | replace "'" "'\"'\"'" }}' - export OIDC_LOGOUT_URL='{{ .Data.data.logoutUrl | replace "'" "'\"'\"'" }}' + OIDC_CLIENT_ID={{ .Data.data.clientId }} + OIDC_CLIENT_SECRET={{ .Data.data.clientSecret }} + OIDC_AUTH_URL={{ .Data.data.authorizationUrl }} + OIDC_TOKEN_URL={{ .Data.data.tokenUrl }} + OIDC_USERINFO_URL={{ .Data.data.userInfoUrl }} + OIDC_LOGOUT_URL={{ .Data.data.logoutUrl }} {{- end }} {{- with secret "kv/data/atlas/jenkins/harbor-robot-creds" -}} - export HARBOR_ROBOT_USERNAME='{{ .Data.data.username | replace "'" "'\"'\"'" }}' - export HARBOR_ROBOT_PASSWORD='{{ .Data.data.password | replace "'" "'\"'\"'" }}' + HARBOR_ROBOT_USERNAME={{ .Data.data.username }} + HARBOR_ROBOT_PASSWORD={{ .Data.data.password }} {{- end }} {{- with secret "kv/data/atlas/jenkins/gitea-pat" -}} - export GITEA_PAT_USERNAME='{{ .Data.data.username | replace "'" "'\"'\"'" }}' - export GITEA_PAT_TOKEN='{{ .Data.data.token | replace "'" "'\"'\"'" }}' + GITEA_PAT_USERNAME={{ .Data.data.username }} + GITEA_PAT_TOKEN={{ .Data.data.token }} {{- end -}} spec: serviceAccountName: jenkins @@ -88,9 +88,8 @@ spec: - /bin/sh - -c - | - set -eu - . /vault/secrets/jenkins-env - exec /usr/bin/tini -- /usr/local/bin/jenkins.sh + set -e + exec env $(cat /vault/secrets/jenkins-env) /usr/bin/tini -- /usr/local/bin/jenkins.sh ports: - name: http containerPort: 8080 From 713fedfe73241195e53ce440fef19f0708b40022 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 20:46:46 -0300 Subject: [PATCH 066/270] harbor: move secrets to vault sidecars --- dockerfiles/Dockerfile.harbor-core-vault | 9 + .../Dockerfile.harbor-jobservice-vault | 9 + dockerfiles/Dockerfile.harbor-registry-vault | 9 + .../Dockerfile.harbor-registryctl-vault | 9 + dockerfiles/vault-entrypoint.sh | 21 ++ services/harbor/helmrelease.yaml | 294 +++++++++++++++++- services/harbor/image.yaml | 8 +- services/harbor/secretproviderclass.yaml | 74 ----- services/health/config/nginx.conf | 6 + 9 files changed, 349 insertions(+), 90 deletions(-) create mode 100644 dockerfiles/Dockerfile.harbor-core-vault create mode 100644 dockerfiles/Dockerfile.harbor-jobservice-vault create mode 100644 dockerfiles/Dockerfile.harbor-registry-vault create mode 100644 dockerfiles/Dockerfile.harbor-registryctl-vault diff --git a/dockerfiles/Dockerfile.harbor-core-vault b/dockerfiles/Dockerfile.harbor-core-vault new file mode 100644 index 0000000..b313647 --- /dev/null +++ b/dockerfiles/Dockerfile.harbor-core-vault @@ -0,0 +1,9 @@ +FROM registry.bstein.dev/infra/harbor-core:v2.14.1-arm64 + +USER root +COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh +RUN chmod 0755 /entrypoint.sh +USER harbor + +ENTRYPOINT ["/entrypoint.sh"] +CMD ["/harbor/entrypoint.sh"] diff --git a/dockerfiles/Dockerfile.harbor-jobservice-vault b/dockerfiles/Dockerfile.harbor-jobservice-vault new file mode 100644 index 0000000..28a82d5 --- /dev/null +++ b/dockerfiles/Dockerfile.harbor-jobservice-vault @@ -0,0 +1,9 @@ +FROM registry.bstein.dev/infra/harbor-jobservice:v2.14.1-arm64 + +USER root +COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh +RUN chmod 0755 /entrypoint.sh +USER harbor + +ENTRYPOINT ["/entrypoint.sh"] +CMD ["/harbor/entrypoint.sh"] diff --git a/dockerfiles/Dockerfile.harbor-registry-vault b/dockerfiles/Dockerfile.harbor-registry-vault new file mode 100644 index 0000000..608b6e5 --- /dev/null +++ b/dockerfiles/Dockerfile.harbor-registry-vault @@ -0,0 +1,9 @@ +FROM registry.bstein.dev/infra/harbor-registry:v2.14.1-arm64 + +USER root +COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh +RUN chmod 0755 /entrypoint.sh +USER harbor + +ENTRYPOINT ["/entrypoint.sh"] +CMD ["/home/harbor/entrypoint.sh"] diff --git a/dockerfiles/Dockerfile.harbor-registryctl-vault b/dockerfiles/Dockerfile.harbor-registryctl-vault new file mode 100644 index 0000000..b9cf061 --- /dev/null +++ b/dockerfiles/Dockerfile.harbor-registryctl-vault @@ -0,0 +1,9 @@ +FROM registry.bstein.dev/infra/harbor-registryctl:v2.14.1-arm64 + +USER root +COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh +RUN chmod 0755 /entrypoint.sh +USER harbor + +ENTRYPOINT ["/entrypoint.sh"] +CMD ["/home/harbor/start.sh"] diff --git a/dockerfiles/vault-entrypoint.sh b/dockerfiles/vault-entrypoint.sh index 3bacabd..8d6ea78 100644 --- a/dockerfiles/vault-entrypoint.sh +++ b/dockerfiles/vault-entrypoint.sh @@ -11,4 +11,25 @@ if [ -n "${VAULT_ENV_FILE:-}" ]; then fi fi +if [ -n "${VAULT_COPY_FILES:-}" ]; then + old_ifs="$IFS" + IFS=',' + set -- ${VAULT_COPY_FILES} + IFS="$old_ifs" + for pair in "$@"; do + src="${pair%%:*}" + dest="${pair#*:}" + if [ -z "${src}" ] || [ -z "${dest}" ]; then + echo "Vault copy entry malformed: ${pair}" >&2 + exit 1 + fi + if [ ! -f "${src}" ]; then + echo "Vault file not found: ${src}" >&2 + exit 1 + fi + mkdir -p "$(dirname "${dest}")" + cp "${src}" "${dest}" + done +fi + exec "$@" diff --git a/services/harbor/helmrelease.yaml b/services/harbor/helmrelease.yaml index 11244ff..95025f2 100644 --- a/services/harbor/helmrelease.yaml +++ b/services/harbor/helmrelease.yaml @@ -112,21 +112,46 @@ spec: existingSecretSecretKey: harbor-core core: image: - repository: registry.bstein.dev/infra/harbor-core + repository: registry.bstein.dev/infra/harbor-core-vault tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-core:tag"} nodeSelector: kubernetes.io/hostname: titan-05 + serviceAccountName: harbor-vault-sync + automountServiceAccountToken: true existingSecret: harbor-core existingXsrfSecret: harbor-core existingXsrfSecretKey: CSRF_KEY - # OIDC config is injected via CONFIG_OVERWRITE_JSON from the harbor-oidc secret. - extraEnvVars: - - name: CONFIG_OVERWRITE_JSON - valueFrom: - secretKeyRef: - name: harbor-oidc - key: CONFIG_OVERWRITE_JSON - optional: true + secretName: harbor-core + podAnnotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "harbor" + vault.hashicorp.com/agent-inject-secret-harbor-core-env.sh: "kv/data/atlas/harbor/harbor-core" + vault.hashicorp.com/agent-inject-template-harbor-core-env.sh: | + {{- with secret "kv/data/atlas/harbor/harbor-core" -}} + export CORE_SECRET="{{ .Data.data.secret }}" + export CSRF_KEY="{{ .Data.data.CSRF_KEY }}" + export HARBOR_ADMIN_PASSWORD="{{ .Data.data.harbor_admin_password }}" + export REGISTRY_CREDENTIAL_PASSWORD="{{ .Data.data.REGISTRY_CREDENTIAL_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/harbor/harbor-jobservice" -}} + export JOBSERVICE_SECRET="{{ .Data.data.JOBSERVICE_SECRET }}" + {{- end }} + {{- with secret "kv/data/atlas/harbor/harbor-db" -}} + export POSTGRESQL_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/harbor/harbor-oidc" -}} + export CONFIG_OVERWRITE_JSON='{{ .Data.data.CONFIG_OVERWRITE_JSON }}' + {{- end }} + vault.hashicorp.com/agent-inject-secret-harbor-core-secretKey: "kv/data/atlas/harbor/harbor-core" + vault.hashicorp.com/agent-inject-template-harbor-core-secretKey: | + {{- with secret "kv/data/atlas/harbor/harbor-core" -}} + {{ .Data.data.secretKey }} + {{- end }} + vault.hashicorp.com/agent-inject-secret-harbor-core-tls-key: "kv/data/atlas/harbor/harbor-core" + vault.hashicorp.com/agent-inject-template-harbor-core-tls-key: | + {{- with secret "kv/data/atlas/harbor/harbor-core" -}} + {{ index .Data.data "tls.key" }} + {{- end }} affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -150,10 +175,25 @@ spec: values: ["rpi4"] jobservice: image: - repository: registry.bstein.dev/infra/harbor-jobservice + repository: registry.bstein.dev/infra/harbor-jobservice-vault tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-jobservice:tag"} nodeSelector: kubernetes.io/hostname: titan-05 + serviceAccountName: harbor-vault-sync + automountServiceAccountToken: true + existingSecret: harbor-jobservice + podAnnotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "harbor" + vault.hashicorp.com/agent-inject-secret-harbor-jobservice-env.sh: "kv/data/atlas/harbor/harbor-jobservice" + vault.hashicorp.com/agent-inject-template-harbor-jobservice-env.sh: | + {{- with secret "kv/data/atlas/harbor/harbor-core" -}} + export CORE_SECRET="{{ .Data.data.secret }}" + {{- end }} + {{- with secret "kv/data/atlas/harbor/harbor-jobservice" -}} + export JOBSERVICE_SECRET="{{ .Data.data.JOBSERVICE_SECRET }}" + export REGISTRY_CREDENTIAL_PASSWORD="{{ .Data.data.REGISTRY_CREDENTIAL_PASSWORD }}" + {{- end }} affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -205,12 +245,43 @@ spec: registry: registry: image: - repository: registry.bstein.dev/infra/harbor-registry + repository: registry.bstein.dev/infra/harbor-registry-vault tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-registry:tag"} controller: image: - repository: registry.bstein.dev/infra/harbor-registryctl + repository: registry.bstein.dev/infra/harbor-registryctl-vault tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-registryctl:tag"} + serviceAccountName: harbor-vault-sync + automountServiceAccountToken: true + existingSecret: harbor-registry + credentials: + existingSecret: harbor-registry + podAnnotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "harbor" + vault.hashicorp.com/agent-inject-secret-harbor-registry-env.sh: "kv/data/atlas/harbor/harbor-registry" + vault.hashicorp.com/agent-inject-template-harbor-registry-env.sh: | + {{- with secret "kv/data/atlas/harbor/harbor-registry" -}} + export REGISTRY_HTTP_SECRET="{{ .Data.data.REGISTRY_HTTP_SECRET }}" + export REGISTRY_REDIS_PASSWORD="{{ .Data.data.REGISTRY_REDIS_PASSWORD }}" + {{- end }} + vault.hashicorp.com/agent-inject-secret-harbor-registryctl-env.sh: "kv/data/atlas/harbor/harbor-registry" + vault.hashicorp.com/agent-inject-template-harbor-registryctl-env.sh: | + {{- with secret "kv/data/atlas/harbor/harbor-core" -}} + export CORE_SECRET="{{ .Data.data.secret }}" + {{- end }} + {{- with secret "kv/data/atlas/harbor/harbor-jobservice" -}} + export JOBSERVICE_SECRET="{{ .Data.data.JOBSERVICE_SECRET }}" + {{- end }} + {{- with secret "kv/data/atlas/harbor/harbor-registry" -}} + export REGISTRY_HTTP_SECRET="{{ .Data.data.REGISTRY_HTTP_SECRET }}" + export REGISTRY_REDIS_PASSWORD="{{ .Data.data.REGISTRY_REDIS_PASSWORD }}" + {{- end }} + vault.hashicorp.com/agent-inject-secret-harbor-registry-htpasswd: "kv/data/atlas/harbor/harbor-registry-htpasswd" + vault.hashicorp.com/agent-inject-template-harbor-registry-htpasswd: | + {{- with secret "kv/data/atlas/harbor/harbor-registry-htpasswd" -}} + {{ .Data.data.REGISTRY_HTPASSWD }} + {{- end }} nodeSelector: kubernetes.io/hostname: titan-05 affinity: @@ -267,3 +338,202 @@ spec: tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-prepare:tag"} updateStrategy: type: Recreate + postRenderers: + - kustomize: + patches: + - target: + kind: Deployment + name: harbor-core + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: harbor-core + spec: + template: + spec: + containers: + - name: core + env: + - $patch: replace + - name: VAULT_ENV_FILE + value: /vault/secrets/harbor-core-env.sh + - name: VAULT_COPY_FILES + value: /vault/secrets/harbor-core-secretKey:/etc/core/key,/vault/secrets/harbor-core-tls-key:/etc/core/private_key.pem + envFrom: + - $patch: replace + - configMapRef: + name: harbor-core + volumeMounts: + - name: secret-key + $patch: delete + - name: token-service-private-key + $patch: delete + - name: core-writable + mountPath: /etc/core + volumes: + - name: secret-key + $patch: delete + - name: token-service-private-key + $patch: delete + - name: core-writable + emptyDir: {} + - target: + kind: Deployment + name: harbor-jobservice + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: harbor-jobservice + spec: + template: + spec: + containers: + - name: jobservice + env: + - $patch: replace + - name: VAULT_ENV_FILE + value: /vault/secrets/harbor-jobservice-env.sh + envFrom: + - $patch: replace + - configMapRef: + name: harbor-jobservice-env + - target: + kind: Deployment + name: harbor-registry + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: harbor-registry + spec: + template: + spec: + containers: + - name: registry + env: + - $patch: replace + - name: VAULT_ENV_FILE + value: /vault/secrets/harbor-registry-env.sh + - name: VAULT_COPY_FILES + value: /vault/secrets/harbor-registry-htpasswd:/etc/registry/passwd + envFrom: + - $patch: replace + volumeMounts: + - name: registry-htpasswd + $patch: delete + - name: registry-writable + mountPath: /etc/registry + - name: registryctl + env: + - $patch: replace + - name: VAULT_ENV_FILE + value: /vault/secrets/harbor-registryctl-env.sh + envFrom: + - $patch: replace + - configMapRef: + name: harbor-registryctl + volumes: + - name: registry-htpasswd + $patch: delete + - name: registry-writable + emptyDir: {} + - target: + kind: Job + name: migration-job + patch: |- + apiVersion: batch/v1 + kind: Job + metadata: + name: migration-job + spec: + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "harbor" + vault.hashicorp.com/agent-inject-secret-harbor-core-env.sh: "kv/data/atlas/harbor/harbor-core" + vault.hashicorp.com/agent-inject-template-harbor-core-env.sh: | + {{- with secret "kv/data/atlas/harbor/harbor-core" -}} + export CORE_SECRET="{{ .Data.data.secret }}" + export CSRF_KEY="{{ .Data.data.CSRF_KEY }}" + export HARBOR_ADMIN_PASSWORD="{{ .Data.data.harbor_admin_password }}" + export REGISTRY_CREDENTIAL_PASSWORD="{{ .Data.data.REGISTRY_CREDENTIAL_PASSWORD }}" + {{- end }} + {{- with secret "kv/data/atlas/harbor/harbor-jobservice" -}} + export JOBSERVICE_SECRET="{{ .Data.data.JOBSERVICE_SECRET }}" + {{- end }} + {{- with secret "kv/data/atlas/harbor/harbor-db" -}} + export POSTGRESQL_PASSWORD="{{ .Data.data.password }}" + {{- end }} + {{- with secret "kv/data/atlas/harbor/harbor-oidc" -}} + export CONFIG_OVERWRITE_JSON='{{ .Data.data.CONFIG_OVERWRITE_JSON }}' + {{- end }} + vault.hashicorp.com/agent-inject-secret-harbor-core-secretKey: "kv/data/atlas/harbor/harbor-core" + vault.hashicorp.com/agent-inject-template-harbor-core-secretKey: | + {{- with secret "kv/data/atlas/harbor/harbor-core" -}} + {{ .Data.data.secretKey }} + {{- end }} + vault.hashicorp.com/agent-inject-secret-harbor-core-tls-key: "kv/data/atlas/harbor/harbor-core" + vault.hashicorp.com/agent-inject-template-harbor-core-tls-key: | + {{- with secret "kv/data/atlas/harbor/harbor-core" -}} + {{ index .Data.data "tls.key" }} + {{- end }} + spec: + automountServiceAccountToken: true + containers: + - name: core-job + env: + - $patch: replace + - name: VAULT_ENV_FILE + value: /vault/secrets/harbor-core-env.sh + envFrom: + - $patch: replace + - configMapRef: + name: harbor-core + - target: + kind: Secret + name: harbor-core + patch: |- + apiVersion: v1 + kind: Secret + metadata: + name: harbor-core + $patch: delete + - target: + kind: Secret + name: harbor-jobservice + patch: |- + apiVersion: v1 + kind: Secret + metadata: + name: harbor-jobservice + $patch: delete + - target: + kind: Secret + name: harbor-registry + patch: |- + apiVersion: v1 + kind: Secret + metadata: + name: harbor-registry + $patch: delete + - target: + kind: Secret + name: harbor-registry-htpasswd + patch: |- + apiVersion: v1 + kind: Secret + metadata: + name: harbor-registry-htpasswd + $patch: delete + - target: + kind: Secret + name: harbor-registryctl + patch: |- + apiVersion: v1 + kind: Secret + metadata: + name: harbor-registryctl + $patch: delete diff --git a/services/harbor/image.yaml b/services/harbor/image.yaml index 2b25875..850926a 100644 --- a/services/harbor/image.yaml +++ b/services/harbor/image.yaml @@ -5,7 +5,7 @@ metadata: name: harbor-core namespace: harbor spec: - image: registry.bstein.dev/infra/harbor-core + image: registry.bstein.dev/infra/harbor-core-vault interval: 5m0s --- apiVersion: image.toolkit.fluxcd.io/v1beta2 @@ -29,7 +29,7 @@ metadata: name: harbor-jobservice namespace: harbor spec: - image: registry.bstein.dev/infra/harbor-jobservice + image: registry.bstein.dev/infra/harbor-jobservice-vault interval: 5m0s --- apiVersion: image.toolkit.fluxcd.io/v1beta2 @@ -77,7 +77,7 @@ metadata: name: harbor-registry namespace: harbor spec: - image: registry.bstein.dev/infra/harbor-registry + image: registry.bstein.dev/infra/harbor-registry-vault interval: 5m0s --- apiVersion: image.toolkit.fluxcd.io/v1beta2 @@ -101,7 +101,7 @@ metadata: name: harbor-registryctl namespace: harbor spec: - image: registry.bstein.dev/infra/harbor-registryctl + image: registry.bstein.dev/infra/harbor-registryctl-vault interval: 5m0s --- apiVersion: image.toolkit.fluxcd.io/v1beta2 diff --git a/services/harbor/secretproviderclass.yaml b/services/harbor/secretproviderclass.yaml index 90fc876..03fef95 100644 --- a/services/harbor/secretproviderclass.yaml +++ b/services/harbor/secretproviderclass.yaml @@ -10,84 +10,10 @@ spec: vaultAddress: "http://vault.vault.svc.cluster.local:8200" roleName: "harbor" objects: | - - objectName: "harbor-core__CSRF_KEY" - secretPath: "kv/data/atlas/harbor/harbor-core" - secretKey: "CSRF_KEY" - - objectName: "harbor-core__REGISTRY_CREDENTIAL_PASSWORD" - secretPath: "kv/data/atlas/harbor/harbor-core" - secretKey: "REGISTRY_CREDENTIAL_PASSWORD" - - objectName: "harbor-core__harbor_admin_password" - secretPath: "kv/data/atlas/harbor/harbor-core" - secretKey: "harbor_admin_password" - - objectName: "harbor-core__secret" - secretPath: "kv/data/atlas/harbor/harbor-core" - secretKey: "secret" - - objectName: "harbor-core__secretKey" - secretPath: "kv/data/atlas/harbor/harbor-core" - secretKey: "secretKey" - - objectName: "harbor-core__tls.crt" - secretPath: "kv/data/atlas/harbor/harbor-core" - secretKey: "tls.crt" - - objectName: "harbor-core__tls.key" - secretPath: "kv/data/atlas/harbor/harbor-core" - secretKey: "tls.key" - - objectName: "harbor-db__database" - secretPath: "kv/data/atlas/harbor/harbor-db" - secretKey: "database" - - objectName: "harbor-db__host" - secretPath: "kv/data/atlas/harbor/harbor-db" - secretKey: "host" - - objectName: "harbor-db__password" - secretPath: "kv/data/atlas/harbor/harbor-db" - secretKey: "password" - - objectName: "harbor-db__port" - secretPath: "kv/data/atlas/harbor/harbor-db" - secretKey: "port" - - objectName: "harbor-db__username" - secretPath: "kv/data/atlas/harbor/harbor-db" - secretKey: "username" - - objectName: "harbor-oidc__CONFIG_OVERWRITE_JSON" - secretPath: "kv/data/atlas/harbor/harbor-oidc" - secretKey: "CONFIG_OVERWRITE_JSON" - objectName: "harbor-pull__dockerconfigjson" secretPath: "kv/data/atlas/harbor-pull/harbor" secretKey: "dockerconfigjson" secretObjects: - - secretName: harbor-core - type: Opaque - data: - - objectName: harbor-core__CSRF_KEY - key: CSRF_KEY - - objectName: harbor-core__REGISTRY_CREDENTIAL_PASSWORD - key: REGISTRY_CREDENTIAL_PASSWORD - - objectName: harbor-core__harbor_admin_password - key: harbor_admin_password - - objectName: harbor-core__secret - key: secret - - objectName: harbor-core__secretKey - key: secretKey - - objectName: harbor-core__tls.crt - key: tls.crt - - objectName: harbor-core__tls.key - key: tls.key - - secretName: harbor-db - type: Opaque - data: - - objectName: harbor-db__database - key: database - - objectName: harbor-db__host - key: host - - objectName: harbor-db__password - key: password - - objectName: harbor-db__port - key: port - - objectName: harbor-db__username - key: username - - secretName: harbor-oidc - type: Opaque - data: - - objectName: harbor-oidc__CONFIG_OVERWRITE_JSON - key: CONFIG_OVERWRITE_JSON - secretName: harbor-regcred type: kubernetes.io/dockerconfigjson data: diff --git a/services/health/config/nginx.conf b/services/health/config/nginx.conf index 26b1f74..8508c38 100644 --- a/services/health/config/nginx.conf +++ b/services/health/config/nginx.conf @@ -5,6 +5,12 @@ upstream wger { server { listen 8080; + client_body_temp_path /tmp/nginx/client_body 1 2; + proxy_temp_path /tmp/nginx/proxy 1 2; + fastcgi_temp_path /tmp/nginx/fastcgi 1 2; + uwsgi_temp_path /tmp/nginx/uwsgi 1 2; + scgi_temp_path /tmp/nginx/scgi 1 2; + location = /api/v2/register { return 404; } From eeeb69fb7a2675bc8a4b53f19833558db88f1307 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 21:02:50 -0300 Subject: [PATCH 067/270] harbor: mount vault entrypoint script --- services/harbor/helmrelease.yaml | 65 +++++++++++++++++++-- services/harbor/image.yaml | 8 +-- services/harbor/kustomization.yaml | 6 ++ services/harbor/scripts/vault-entrypoint.sh | 35 +++++++++++ 4 files changed, 106 insertions(+), 8 deletions(-) create mode 100644 services/harbor/scripts/vault-entrypoint.sh diff --git a/services/harbor/helmrelease.yaml b/services/harbor/helmrelease.yaml index 95025f2..45ca1d8 100644 --- a/services/harbor/helmrelease.yaml +++ b/services/harbor/helmrelease.yaml @@ -112,7 +112,7 @@ spec: existingSecretSecretKey: harbor-core core: image: - repository: registry.bstein.dev/infra/harbor-core-vault + repository: registry.bstein.dev/infra/harbor-core tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-core:tag"} nodeSelector: kubernetes.io/hostname: titan-05 @@ -175,7 +175,7 @@ spec: values: ["rpi4"] jobservice: image: - repository: registry.bstein.dev/infra/harbor-jobservice-vault + repository: registry.bstein.dev/infra/harbor-jobservice tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-jobservice:tag"} nodeSelector: kubernetes.io/hostname: titan-05 @@ -245,11 +245,11 @@ spec: registry: registry: image: - repository: registry.bstein.dev/infra/harbor-registry-vault + repository: registry.bstein.dev/infra/harbor-registry tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-registry:tag"} controller: image: - repository: registry.bstein.dev/infra/harbor-registryctl-vault + repository: registry.bstein.dev/infra/harbor-registryctl tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-registryctl:tag"} serviceAccountName: harbor-vault-sync automountServiceAccountToken: true @@ -354,6 +354,10 @@ spec: spec: containers: - name: core + command: + - /entrypoint.sh + args: + - /harbor/entrypoint.sh env: - $patch: replace - name: VAULT_ENV_FILE @@ -365,6 +369,9 @@ spec: - configMapRef: name: harbor-core volumeMounts: + - name: harbor-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh - name: secret-key $patch: delete - name: token-service-private-key @@ -372,6 +379,10 @@ spec: - name: core-writable mountPath: /etc/core volumes: + - name: harbor-vault-entrypoint + configMap: + name: harbor-vault-entrypoint + defaultMode: 493 - name: secret-key $patch: delete - name: token-service-private-key @@ -391,6 +402,10 @@ spec: spec: containers: - name: jobservice + command: + - /entrypoint.sh + args: + - /harbor/entrypoint.sh env: - $patch: replace - name: VAULT_ENV_FILE @@ -399,6 +414,15 @@ spec: - $patch: replace - configMapRef: name: harbor-jobservice-env + volumeMounts: + - name: harbor-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh + volumes: + - name: harbor-vault-entrypoint + configMap: + name: harbor-vault-entrypoint + defaultMode: 493 - target: kind: Deployment name: harbor-registry @@ -412,6 +436,10 @@ spec: spec: containers: - name: registry + command: + - /entrypoint.sh + args: + - /home/harbor/entrypoint.sh env: - $patch: replace - name: VAULT_ENV_FILE @@ -421,11 +449,18 @@ spec: envFrom: - $patch: replace volumeMounts: + - name: harbor-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh - name: registry-htpasswd $patch: delete - name: registry-writable mountPath: /etc/registry - name: registryctl + command: + - /entrypoint.sh + args: + - /home/harbor/start.sh env: - $patch: replace - name: VAULT_ENV_FILE @@ -434,7 +469,15 @@ spec: - $patch: replace - configMapRef: name: harbor-registryctl + volumeMounts: + - name: harbor-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh volumes: + - name: harbor-vault-entrypoint + configMap: + name: harbor-vault-entrypoint + defaultMode: 493 - name: registry-htpasswd $patch: delete - name: registry-writable @@ -484,6 +527,11 @@ spec: automountServiceAccountToken: true containers: - name: core-job + command: + - /entrypoint.sh + args: + - /harbor/harbor_core + - -mode=migrate env: - $patch: replace - name: VAULT_ENV_FILE @@ -492,6 +540,15 @@ spec: - $patch: replace - configMapRef: name: harbor-core + volumeMounts: + - name: harbor-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh + volumes: + - name: harbor-vault-entrypoint + configMap: + name: harbor-vault-entrypoint + defaultMode: 493 - target: kind: Secret name: harbor-core diff --git a/services/harbor/image.yaml b/services/harbor/image.yaml index 850926a..2b25875 100644 --- a/services/harbor/image.yaml +++ b/services/harbor/image.yaml @@ -5,7 +5,7 @@ metadata: name: harbor-core namespace: harbor spec: - image: registry.bstein.dev/infra/harbor-core-vault + image: registry.bstein.dev/infra/harbor-core interval: 5m0s --- apiVersion: image.toolkit.fluxcd.io/v1beta2 @@ -29,7 +29,7 @@ metadata: name: harbor-jobservice namespace: harbor spec: - image: registry.bstein.dev/infra/harbor-jobservice-vault + image: registry.bstein.dev/infra/harbor-jobservice interval: 5m0s --- apiVersion: image.toolkit.fluxcd.io/v1beta2 @@ -77,7 +77,7 @@ metadata: name: harbor-registry namespace: harbor spec: - image: registry.bstein.dev/infra/harbor-registry-vault + image: registry.bstein.dev/infra/harbor-registry interval: 5m0s --- apiVersion: image.toolkit.fluxcd.io/v1beta2 @@ -101,7 +101,7 @@ metadata: name: harbor-registryctl namespace: harbor spec: - image: registry.bstein.dev/infra/harbor-registryctl-vault + image: registry.bstein.dev/infra/harbor-registryctl interval: 5m0s --- apiVersion: image.toolkit.fluxcd.io/v1beta2 diff --git a/services/harbor/kustomization.yaml b/services/harbor/kustomization.yaml index 2a9cb9e..3018b3c 100644 --- a/services/harbor/kustomization.yaml +++ b/services/harbor/kustomization.yaml @@ -2,6 +2,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization namespace: harbor +generatorOptions: + disableNameSuffixHash: true resources: - namespace.yaml - serviceaccount.yaml @@ -11,3 +13,7 @@ resources: - helmrelease.yaml - vault-sync-deployment.yaml - image.yaml +configMapGenerator: + - name: harbor-vault-entrypoint + files: + - scripts/vault-entrypoint.sh diff --git a/services/harbor/scripts/vault-entrypoint.sh b/services/harbor/scripts/vault-entrypoint.sh new file mode 100644 index 0000000..8d6ea78 --- /dev/null +++ b/services/harbor/scripts/vault-entrypoint.sh @@ -0,0 +1,35 @@ +#!/bin/sh +set -eu + +if [ -n "${VAULT_ENV_FILE:-}" ]; then + if [ -f "${VAULT_ENV_FILE}" ]; then + # shellcheck disable=SC1090 + . "${VAULT_ENV_FILE}" + else + echo "Vault env file not found: ${VAULT_ENV_FILE}" >&2 + exit 1 + fi +fi + +if [ -n "${VAULT_COPY_FILES:-}" ]; then + old_ifs="$IFS" + IFS=',' + set -- ${VAULT_COPY_FILES} + IFS="$old_ifs" + for pair in "$@"; do + src="${pair%%:*}" + dest="${pair#*:}" + if [ -z "${src}" ] || [ -z "${dest}" ]; then + echo "Vault copy entry malformed: ${pair}" >&2 + exit 1 + fi + if [ ! -f "${src}" ]; then + echo "Vault file not found: ${src}" >&2 + exit 1 + fi + mkdir -p "$(dirname "${dest}")" + cp "${src}" "${dest}" + done +fi + +exec "$@" From 8a750ac3ab8f185f05079a18e4bb92af8a2bff58 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 21:17:05 -0300 Subject: [PATCH 068/270] harbor: fix vault secretKey file path --- services/harbor/helmrelease.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/harbor/helmrelease.yaml b/services/harbor/helmrelease.yaml index 45ca1d8..a3d041d 100644 --- a/services/harbor/helmrelease.yaml +++ b/services/harbor/helmrelease.yaml @@ -363,7 +363,7 @@ spec: - name: VAULT_ENV_FILE value: /vault/secrets/harbor-core-env.sh - name: VAULT_COPY_FILES - value: /vault/secrets/harbor-core-secretKey:/etc/core/key,/vault/secrets/harbor-core-tls-key:/etc/core/private_key.pem + value: /vault/secrets/harbor-core-secretkey:/etc/core/key,/vault/secrets/harbor-core-tls-key:/etc/core/private_key.pem envFrom: - $patch: replace - configMapRef: From 9743064ad350f42ee13e54e30ddee653652c7bcb Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 21:24:16 -0300 Subject: [PATCH 069/270] vault: keep copy loop from clobbering args --- dockerfiles/vault-entrypoint.sh | 5 ++--- services/harbor/scripts/vault-entrypoint.sh | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/dockerfiles/vault-entrypoint.sh b/dockerfiles/vault-entrypoint.sh index 8d6ea78..fa3b791 100644 --- a/dockerfiles/vault-entrypoint.sh +++ b/dockerfiles/vault-entrypoint.sh @@ -14,9 +14,7 @@ fi if [ -n "${VAULT_COPY_FILES:-}" ]; then old_ifs="$IFS" IFS=',' - set -- ${VAULT_COPY_FILES} - IFS="$old_ifs" - for pair in "$@"; do + for pair in ${VAULT_COPY_FILES}; do src="${pair%%:*}" dest="${pair#*:}" if [ -z "${src}" ] || [ -z "${dest}" ]; then @@ -30,6 +28,7 @@ if [ -n "${VAULT_COPY_FILES:-}" ]; then mkdir -p "$(dirname "${dest}")" cp "${src}" "${dest}" done + IFS="$old_ifs" fi exec "$@" diff --git a/services/harbor/scripts/vault-entrypoint.sh b/services/harbor/scripts/vault-entrypoint.sh index 8d6ea78..fa3b791 100644 --- a/services/harbor/scripts/vault-entrypoint.sh +++ b/services/harbor/scripts/vault-entrypoint.sh @@ -14,9 +14,7 @@ fi if [ -n "${VAULT_COPY_FILES:-}" ]; then old_ifs="$IFS" IFS=',' - set -- ${VAULT_COPY_FILES} - IFS="$old_ifs" - for pair in "$@"; do + for pair in ${VAULT_COPY_FILES}; do src="${pair%%:*}" dest="${pair#*:}" if [ -z "${src}" ] || [ -z "${dest}" ]; then @@ -30,6 +28,7 @@ if [ -n "${VAULT_COPY_FILES:-}" ]; then mkdir -p "$(dirname "${dest}")" cp "${src}" "${dest}" done + IFS="$old_ifs" fi exec "$@" From 22e3004b0a81769f5f75cdf5d6f57207c7643662 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 21:29:40 -0300 Subject: [PATCH 070/270] harbor: preserve required volume mounts --- services/harbor/helmrelease.yaml | 39 +++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/services/harbor/helmrelease.yaml b/services/harbor/helmrelease.yaml index a3d041d..9c74f7c 100644 --- a/services/harbor/helmrelease.yaml +++ b/services/harbor/helmrelease.yaml @@ -369,15 +369,19 @@ spec: - configMapRef: name: harbor-core volumeMounts: + - $patch: replace - name: harbor-vault-entrypoint mountPath: /entrypoint.sh subPath: vault-entrypoint.sh - - name: secret-key - $patch: delete - - name: token-service-private-key - $patch: delete - name: core-writable mountPath: /etc/core + - name: config + mountPath: /etc/core/app.conf + subPath: app.conf + - name: ca-download + mountPath: /etc/core/ca + - name: psc + mountPath: /etc/core/token volumes: - name: harbor-vault-entrypoint configMap: @@ -415,9 +419,15 @@ spec: - configMapRef: name: harbor-jobservice-env volumeMounts: + - $patch: replace - name: harbor-vault-entrypoint mountPath: /entrypoint.sh subPath: vault-entrypoint.sh + - name: jobservice-config + mountPath: /etc/jobservice/config.yml + subPath: config.yml + - name: job-logs + mountPath: /var/log/jobs volumes: - name: harbor-vault-entrypoint configMap: @@ -449,13 +459,17 @@ spec: envFrom: - $patch: replace volumeMounts: + - $patch: replace - name: harbor-vault-entrypoint mountPath: /entrypoint.sh subPath: vault-entrypoint.sh - - name: registry-htpasswd - $patch: delete - name: registry-writable mountPath: /etc/registry + - name: registry-config + mountPath: /etc/registry/config.yml + subPath: config.yml + - name: registry-data + mountPath: /storage - name: registryctl command: - /entrypoint.sh @@ -470,9 +484,18 @@ spec: - configMapRef: name: harbor-registryctl volumeMounts: + - $patch: replace - name: harbor-vault-entrypoint mountPath: /entrypoint.sh subPath: vault-entrypoint.sh + - name: registry-config + mountPath: /etc/registry/config.yml + subPath: config.yml + - name: registry-config + mountPath: /etc/registryctl/config.yml + subPath: ctl-config.yml + - name: registry-data + mountPath: /storage volumes: - name: harbor-vault-entrypoint configMap: @@ -541,9 +564,13 @@ spec: - configMapRef: name: harbor-core volumeMounts: + - $patch: replace - name: harbor-vault-entrypoint mountPath: /entrypoint.sh subPath: vault-entrypoint.sh + - name: config + mountPath: /etc/core/app.conf + subPath: app.conf volumes: - name: harbor-vault-entrypoint configMap: From 9652d9d3cfc9e9f086ac6fc127250d9275976f67 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 22:03:40 -0300 Subject: [PATCH 071/270] health: escape wger env vars and fix nginx temp paths --- services/health/wger-admin-ensure-cronjob.yaml | 18 +++++++++--------- services/health/wger-deployment.yaml | 14 +++++++------- services/health/wger-user-sync-cronjob.yaml | 14 +++++++------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/services/health/wger-admin-ensure-cronjob.yaml b/services/health/wger-admin-ensure-cronjob.yaml index cc422e2..03757f6 100644 --- a/services/health/wger-admin-ensure-cronjob.yaml +++ b/services/health/wger-admin-ensure-cronjob.yaml @@ -21,19 +21,19 @@ spec: vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db" vault.hashicorp.com/agent-inject-template-wger-env: | {{- with secret "kv/data/atlas/health/wger-db" -}} - export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}" - export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}" - export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}" - export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}" - export DJANGO_DB_PASSWORD="{{ .Data.data.DJANGO_DB_PASSWORD }}" + export DJANGO_DB_HOST='{{ .Data.data.DJANGO_DB_HOST | replace "'" "'\"'\"'" }}' + export DJANGO_DB_PORT='{{ .Data.data.DJANGO_DB_PORT | replace "'" "'\"'\"'" }}' + export DJANGO_DB_DATABASE='{{ .Data.data.DJANGO_DB_DATABASE | replace "'" "'\"'\"'" }}' + export DJANGO_DB_USER='{{ .Data.data.DJANGO_DB_USER | replace "'" "'\"'\"'" }}' + export DJANGO_DB_PASSWORD='{{ .Data.data.DJANGO_DB_PASSWORD | replace "'" "'\"'\"'" }}' {{- end }} {{- with secret "kv/data/atlas/health/wger-secrets" -}} - export SECRET_KEY="{{ .Data.data.SECRET_KEY }}" - export SIGNING_KEY="{{ .Data.data.SIGNING_KEY }}" + export SECRET_KEY='{{ .Data.data.SECRET_KEY | replace "'" "'\"'\"'" }}' + export SIGNING_KEY='{{ .Data.data.SIGNING_KEY | replace "'" "'\"'\"'" }}' {{- end }} {{- with secret "kv/data/atlas/health/wger-admin" -}} - export WGER_ADMIN_USERNAME="{{ .Data.data.username }}" - export WGER_ADMIN_PASSWORD="{{ .Data.data.password }}" + export WGER_ADMIN_USERNAME='{{ .Data.data.username | replace "'" "'\"'\"'" }}' + export WGER_ADMIN_PASSWORD='{{ .Data.data.password | replace "'" "'\"'\"'" }}' {{- end -}} spec: serviceAccountName: health-vault-sync diff --git a/services/health/wger-deployment.yaml b/services/health/wger-deployment.yaml index e39db5b..546a81e 100644 --- a/services/health/wger-deployment.yaml +++ b/services/health/wger-deployment.yaml @@ -20,15 +20,15 @@ spec: vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db" vault.hashicorp.com/agent-inject-template-wger-env: | {{- with secret "kv/data/atlas/health/wger-db" -}} - export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}" - export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}" - export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}" - export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}" - export DJANGO_DB_PASSWORD="{{ .Data.data.DJANGO_DB_PASSWORD }}" + export DJANGO_DB_HOST='{{ .Data.data.DJANGO_DB_HOST | replace "'" "'\"'\"'" }}' + export DJANGO_DB_PORT='{{ .Data.data.DJANGO_DB_PORT | replace "'" "'\"'\"'" }}' + export DJANGO_DB_DATABASE='{{ .Data.data.DJANGO_DB_DATABASE | replace "'" "'\"'\"'" }}' + export DJANGO_DB_USER='{{ .Data.data.DJANGO_DB_USER | replace "'" "'\"'\"'" }}' + export DJANGO_DB_PASSWORD='{{ .Data.data.DJANGO_DB_PASSWORD | replace "'" "'\"'\"'" }}' {{- end }} {{- with secret "kv/data/atlas/health/wger-secrets" -}} - export SECRET_KEY="{{ .Data.data.SECRET_KEY }}" - export SIGNING_KEY="{{ .Data.data.SIGNING_KEY }}" + export SECRET_KEY='{{ .Data.data.SECRET_KEY | replace "'" "'\"'\"'" }}' + export SIGNING_KEY='{{ .Data.data.SIGNING_KEY | replace "'" "'\"'\"'" }}' {{- end -}} spec: affinity: diff --git a/services/health/wger-user-sync-cronjob.yaml b/services/health/wger-user-sync-cronjob.yaml index 5e23852..2ac85ea 100644 --- a/services/health/wger-user-sync-cronjob.yaml +++ b/services/health/wger-user-sync-cronjob.yaml @@ -22,15 +22,15 @@ spec: vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db" vault.hashicorp.com/agent-inject-template-wger-env: | {{- with secret "kv/data/atlas/health/wger-db" -}} - export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}" - export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}" - export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}" - export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}" - export DJANGO_DB_PASSWORD="{{ .Data.data.DJANGO_DB_PASSWORD }}" + export DJANGO_DB_HOST='{{ .Data.data.DJANGO_DB_HOST | replace "'" "'\"'\"'" }}' + export DJANGO_DB_PORT='{{ .Data.data.DJANGO_DB_PORT | replace "'" "'\"'\"'" }}' + export DJANGO_DB_DATABASE='{{ .Data.data.DJANGO_DB_DATABASE | replace "'" "'\"'\"'" }}' + export DJANGO_DB_USER='{{ .Data.data.DJANGO_DB_USER | replace "'" "'\"'\"'" }}' + export DJANGO_DB_PASSWORD='{{ .Data.data.DJANGO_DB_PASSWORD | replace "'" "'\"'\"'" }}' {{- end }} {{- with secret "kv/data/atlas/health/wger-secrets" -}} - export SECRET_KEY="{{ .Data.data.SECRET_KEY }}" - export SIGNING_KEY="{{ .Data.data.SIGNING_KEY }}" + export SECRET_KEY='{{ .Data.data.SECRET_KEY | replace "'" "'\"'\"'" }}' + export SIGNING_KEY='{{ .Data.data.SIGNING_KEY | replace "'" "'\"'\"'" }}' {{- end -}} spec: serviceAccountName: health-vault-sync From 71f533ca1f2aa7c1df3d97fc2d84ef2bbf9046e6 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 22:07:51 -0300 Subject: [PATCH 072/270] harbor: fix vault env templates --- services/harbor/helmrelease.yaml | 64 +++++++++++++++----------------- 1 file changed, 30 insertions(+), 34 deletions(-) diff --git a/services/harbor/helmrelease.yaml b/services/harbor/helmrelease.yaml index 9c74f7c..b0cbdbd 100644 --- a/services/harbor/helmrelease.yaml +++ b/services/harbor/helmrelease.yaml @@ -127,26 +127,24 @@ spec: vault.hashicorp.com/role: "harbor" vault.hashicorp.com/agent-inject-secret-harbor-core-env.sh: "kv/data/atlas/harbor/harbor-core" vault.hashicorp.com/agent-inject-template-harbor-core-env.sh: | - {{- with secret "kv/data/atlas/harbor/harbor-core" -}} + {{ with secret "kv/data/atlas/harbor/harbor-core" }} export CORE_SECRET="{{ .Data.data.secret }}" export CSRF_KEY="{{ .Data.data.CSRF_KEY }}" export HARBOR_ADMIN_PASSWORD="{{ .Data.data.harbor_admin_password }}" export REGISTRY_CREDENTIAL_PASSWORD="{{ .Data.data.REGISTRY_CREDENTIAL_PASSWORD }}" - {{- end }} - {{- with secret "kv/data/atlas/harbor/harbor-jobservice" -}} + {{ end }} + {{ with secret "kv/data/atlas/harbor/harbor-jobservice" }} export JOBSERVICE_SECRET="{{ .Data.data.JOBSERVICE_SECRET }}" - {{- end }} - {{- with secret "kv/data/atlas/harbor/harbor-db" -}} + {{ end }} + {{ with secret "kv/data/atlas/harbor/harbor-db" }} export POSTGRESQL_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/harbor/harbor-oidc" -}} + {{ end }} + {{ with secret "kv/data/atlas/harbor/harbor-oidc" }} export CONFIG_OVERWRITE_JSON='{{ .Data.data.CONFIG_OVERWRITE_JSON }}' - {{- end }} + {{ end }} vault.hashicorp.com/agent-inject-secret-harbor-core-secretKey: "kv/data/atlas/harbor/harbor-core" vault.hashicorp.com/agent-inject-template-harbor-core-secretKey: | - {{- with secret "kv/data/atlas/harbor/harbor-core" -}} - {{ .Data.data.secretKey }} - {{- end }} + {{- with secret "kv/data/atlas/harbor/harbor-core" -}}{{ .Data.data.secretKey }}{{- end -}} vault.hashicorp.com/agent-inject-secret-harbor-core-tls-key: "kv/data/atlas/harbor/harbor-core" vault.hashicorp.com/agent-inject-template-harbor-core-tls-key: | {{- with secret "kv/data/atlas/harbor/harbor-core" -}} @@ -187,13 +185,13 @@ spec: vault.hashicorp.com/role: "harbor" vault.hashicorp.com/agent-inject-secret-harbor-jobservice-env.sh: "kv/data/atlas/harbor/harbor-jobservice" vault.hashicorp.com/agent-inject-template-harbor-jobservice-env.sh: | - {{- with secret "kv/data/atlas/harbor/harbor-core" -}} + {{ with secret "kv/data/atlas/harbor/harbor-core" }} export CORE_SECRET="{{ .Data.data.secret }}" - {{- end }} - {{- with secret "kv/data/atlas/harbor/harbor-jobservice" -}} + {{ end }} + {{ with secret "kv/data/atlas/harbor/harbor-jobservice" }} export JOBSERVICE_SECRET="{{ .Data.data.JOBSERVICE_SECRET }}" export REGISTRY_CREDENTIAL_PASSWORD="{{ .Data.data.REGISTRY_CREDENTIAL_PASSWORD }}" - {{- end }} + {{ end }} affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -261,22 +259,22 @@ spec: vault.hashicorp.com/role: "harbor" vault.hashicorp.com/agent-inject-secret-harbor-registry-env.sh: "kv/data/atlas/harbor/harbor-registry" vault.hashicorp.com/agent-inject-template-harbor-registry-env.sh: | - {{- with secret "kv/data/atlas/harbor/harbor-registry" -}} + {{ with secret "kv/data/atlas/harbor/harbor-registry" }} export REGISTRY_HTTP_SECRET="{{ .Data.data.REGISTRY_HTTP_SECRET }}" export REGISTRY_REDIS_PASSWORD="{{ .Data.data.REGISTRY_REDIS_PASSWORD }}" - {{- end }} + {{ end }} vault.hashicorp.com/agent-inject-secret-harbor-registryctl-env.sh: "kv/data/atlas/harbor/harbor-registry" vault.hashicorp.com/agent-inject-template-harbor-registryctl-env.sh: | - {{- with secret "kv/data/atlas/harbor/harbor-core" -}} + {{ with secret "kv/data/atlas/harbor/harbor-core" }} export CORE_SECRET="{{ .Data.data.secret }}" - {{- end }} - {{- with secret "kv/data/atlas/harbor/harbor-jobservice" -}} + {{ end }} + {{ with secret "kv/data/atlas/harbor/harbor-jobservice" }} export JOBSERVICE_SECRET="{{ .Data.data.JOBSERVICE_SECRET }}" - {{- end }} - {{- with secret "kv/data/atlas/harbor/harbor-registry" -}} + {{ end }} + {{ with secret "kv/data/atlas/harbor/harbor-registry" }} export REGISTRY_HTTP_SECRET="{{ .Data.data.REGISTRY_HTTP_SECRET }}" export REGISTRY_REDIS_PASSWORD="{{ .Data.data.REGISTRY_REDIS_PASSWORD }}" - {{- end }} + {{ end }} vault.hashicorp.com/agent-inject-secret-harbor-registry-htpasswd: "kv/data/atlas/harbor/harbor-registry-htpasswd" vault.hashicorp.com/agent-inject-template-harbor-registry-htpasswd: | {{- with secret "kv/data/atlas/harbor/harbor-registry-htpasswd" -}} @@ -521,26 +519,24 @@ spec: vault.hashicorp.com/role: "harbor" vault.hashicorp.com/agent-inject-secret-harbor-core-env.sh: "kv/data/atlas/harbor/harbor-core" vault.hashicorp.com/agent-inject-template-harbor-core-env.sh: | - {{- with secret "kv/data/atlas/harbor/harbor-core" -}} + {{ with secret "kv/data/atlas/harbor/harbor-core" }} export CORE_SECRET="{{ .Data.data.secret }}" export CSRF_KEY="{{ .Data.data.CSRF_KEY }}" export HARBOR_ADMIN_PASSWORD="{{ .Data.data.harbor_admin_password }}" export REGISTRY_CREDENTIAL_PASSWORD="{{ .Data.data.REGISTRY_CREDENTIAL_PASSWORD }}" - {{- end }} - {{- with secret "kv/data/atlas/harbor/harbor-jobservice" -}} + {{ end }} + {{ with secret "kv/data/atlas/harbor/harbor-jobservice" }} export JOBSERVICE_SECRET="{{ .Data.data.JOBSERVICE_SECRET }}" - {{- end }} - {{- with secret "kv/data/atlas/harbor/harbor-db" -}} + {{ end }} + {{ with secret "kv/data/atlas/harbor/harbor-db" }} export POSTGRESQL_PASSWORD="{{ .Data.data.password }}" - {{- end }} - {{- with secret "kv/data/atlas/harbor/harbor-oidc" -}} + {{ end }} + {{ with secret "kv/data/atlas/harbor/harbor-oidc" }} export CONFIG_OVERWRITE_JSON='{{ .Data.data.CONFIG_OVERWRITE_JSON }}' - {{- end }} + {{ end }} vault.hashicorp.com/agent-inject-secret-harbor-core-secretKey: "kv/data/atlas/harbor/harbor-core" vault.hashicorp.com/agent-inject-template-harbor-core-secretKey: | - {{- with secret "kv/data/atlas/harbor/harbor-core" -}} - {{ .Data.data.secretKey }} - {{- end }} + {{- with secret "kv/data/atlas/harbor/harbor-core" -}}{{ .Data.data.secretKey }}{{- end -}} vault.hashicorp.com/agent-inject-secret-harbor-core-tls-key: "kv/data/atlas/harbor/harbor-core" vault.hashicorp.com/agent-inject-template-harbor-core-tls-key: | {{- with secret "kv/data/atlas/harbor/harbor-core" -}} From 349a6cca3b1ac1d03ea3ef1618f0bc5beef82e46 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 22:11:55 -0300 Subject: [PATCH 073/270] health: load wger secrets without shell expansion --- .../health/wger-admin-ensure-cronjob.yaml | 43 +++++++++++++++---- services/health/wger-deployment.yaml | 29 ++++++++++--- services/health/wger-user-sync-cronjob.yaml | 29 ++++++++++--- 3 files changed, 78 insertions(+), 23 deletions(-) diff --git a/services/health/wger-admin-ensure-cronjob.yaml b/services/health/wger-admin-ensure-cronjob.yaml index 03757f6..3205fd3 100644 --- a/services/health/wger-admin-ensure-cronjob.yaml +++ b/services/health/wger-admin-ensure-cronjob.yaml @@ -21,19 +21,44 @@ spec: vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db" vault.hashicorp.com/agent-inject-template-wger-env: | {{- with secret "kv/data/atlas/health/wger-db" -}} - export DJANGO_DB_HOST='{{ .Data.data.DJANGO_DB_HOST | replace "'" "'\"'\"'" }}' - export DJANGO_DB_PORT='{{ .Data.data.DJANGO_DB_PORT | replace "'" "'\"'\"'" }}' - export DJANGO_DB_DATABASE='{{ .Data.data.DJANGO_DB_DATABASE | replace "'" "'\"'\"'" }}' - export DJANGO_DB_USER='{{ .Data.data.DJANGO_DB_USER | replace "'" "'\"'\"'" }}' - export DJANGO_DB_PASSWORD='{{ .Data.data.DJANGO_DB_PASSWORD | replace "'" "'\"'\"'" }}' + export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}" + export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}" + export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}" + export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}" + export DJANGO_DB_PASSWORD="$(cat /vault/secrets/wger-db-password)" {{- end }} {{- with secret "kv/data/atlas/health/wger-secrets" -}} - export SECRET_KEY='{{ .Data.data.SECRET_KEY | replace "'" "'\"'\"'" }}' - export SIGNING_KEY='{{ .Data.data.SIGNING_KEY | replace "'" "'\"'\"'" }}' + export SECRET_KEY="$(cat /vault/secrets/wger-secret-key)" + export SIGNING_KEY="$(cat /vault/secrets/wger-signing-key)" {{- end }} {{- with secret "kv/data/atlas/health/wger-admin" -}} - export WGER_ADMIN_USERNAME='{{ .Data.data.username | replace "'" "'\"'\"'" }}' - export WGER_ADMIN_PASSWORD='{{ .Data.data.password | replace "'" "'\"'\"'" }}' + export WGER_ADMIN_USERNAME="$(cat /vault/secrets/wger-admin-username)" + export WGER_ADMIN_PASSWORD="$(cat /vault/secrets/wger-admin-password)" + {{- end -}} + vault.hashicorp.com/agent-inject-secret-wger-db-password: "kv/data/atlas/health/wger-db" + vault.hashicorp.com/agent-inject-template-wger-db-password: | + {{- with secret "kv/data/atlas/health/wger-db" -}} + {{ .Data.data.DJANGO_DB_PASSWORD }} + {{- end -}} + vault.hashicorp.com/agent-inject-secret-wger-secret-key: "kv/data/atlas/health/wger-secrets" + vault.hashicorp.com/agent-inject-template-wger-secret-key: | + {{- with secret "kv/data/atlas/health/wger-secrets" -}} + {{ .Data.data.SECRET_KEY }} + {{- end -}} + vault.hashicorp.com/agent-inject-secret-wger-signing-key: "kv/data/atlas/health/wger-secrets" + vault.hashicorp.com/agent-inject-template-wger-signing-key: | + {{- with secret "kv/data/atlas/health/wger-secrets" -}} + {{ .Data.data.SIGNING_KEY }} + {{- end -}} + vault.hashicorp.com/agent-inject-secret-wger-admin-username: "kv/data/atlas/health/wger-admin" + vault.hashicorp.com/agent-inject-template-wger-admin-username: | + {{- with secret "kv/data/atlas/health/wger-admin" -}} + {{ .Data.data.username }} + {{- end -}} + vault.hashicorp.com/agent-inject-secret-wger-admin-password: "kv/data/atlas/health/wger-admin" + vault.hashicorp.com/agent-inject-template-wger-admin-password: | + {{- with secret "kv/data/atlas/health/wger-admin" -}} + {{ .Data.data.password }} {{- end -}} spec: serviceAccountName: health-vault-sync diff --git a/services/health/wger-deployment.yaml b/services/health/wger-deployment.yaml index 546a81e..20b1337 100644 --- a/services/health/wger-deployment.yaml +++ b/services/health/wger-deployment.yaml @@ -20,15 +20,30 @@ spec: vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db" vault.hashicorp.com/agent-inject-template-wger-env: | {{- with secret "kv/data/atlas/health/wger-db" -}} - export DJANGO_DB_HOST='{{ .Data.data.DJANGO_DB_HOST | replace "'" "'\"'\"'" }}' - export DJANGO_DB_PORT='{{ .Data.data.DJANGO_DB_PORT | replace "'" "'\"'\"'" }}' - export DJANGO_DB_DATABASE='{{ .Data.data.DJANGO_DB_DATABASE | replace "'" "'\"'\"'" }}' - export DJANGO_DB_USER='{{ .Data.data.DJANGO_DB_USER | replace "'" "'\"'\"'" }}' - export DJANGO_DB_PASSWORD='{{ .Data.data.DJANGO_DB_PASSWORD | replace "'" "'\"'\"'" }}' + export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}" + export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}" + export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}" + export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}" + export DJANGO_DB_PASSWORD="$(cat /vault/secrets/wger-db-password)" {{- end }} {{- with secret "kv/data/atlas/health/wger-secrets" -}} - export SECRET_KEY='{{ .Data.data.SECRET_KEY | replace "'" "'\"'\"'" }}' - export SIGNING_KEY='{{ .Data.data.SIGNING_KEY | replace "'" "'\"'\"'" }}' + export SECRET_KEY="$(cat /vault/secrets/wger-secret-key)" + export SIGNING_KEY="$(cat /vault/secrets/wger-signing-key)" + {{- end -}} + vault.hashicorp.com/agent-inject-secret-wger-db-password: "kv/data/atlas/health/wger-db" + vault.hashicorp.com/agent-inject-template-wger-db-password: | + {{- with secret "kv/data/atlas/health/wger-db" -}} + {{ .Data.data.DJANGO_DB_PASSWORD }} + {{- end -}} + vault.hashicorp.com/agent-inject-secret-wger-secret-key: "kv/data/atlas/health/wger-secrets" + vault.hashicorp.com/agent-inject-template-wger-secret-key: | + {{- with secret "kv/data/atlas/health/wger-secrets" -}} + {{ .Data.data.SECRET_KEY }} + {{- end -}} + vault.hashicorp.com/agent-inject-secret-wger-signing-key: "kv/data/atlas/health/wger-secrets" + vault.hashicorp.com/agent-inject-template-wger-signing-key: | + {{- with secret "kv/data/atlas/health/wger-secrets" -}} + {{ .Data.data.SIGNING_KEY }} {{- end -}} spec: affinity: diff --git a/services/health/wger-user-sync-cronjob.yaml b/services/health/wger-user-sync-cronjob.yaml index 2ac85ea..f99afad 100644 --- a/services/health/wger-user-sync-cronjob.yaml +++ b/services/health/wger-user-sync-cronjob.yaml @@ -22,15 +22,30 @@ spec: vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db" vault.hashicorp.com/agent-inject-template-wger-env: | {{- with secret "kv/data/atlas/health/wger-db" -}} - export DJANGO_DB_HOST='{{ .Data.data.DJANGO_DB_HOST | replace "'" "'\"'\"'" }}' - export DJANGO_DB_PORT='{{ .Data.data.DJANGO_DB_PORT | replace "'" "'\"'\"'" }}' - export DJANGO_DB_DATABASE='{{ .Data.data.DJANGO_DB_DATABASE | replace "'" "'\"'\"'" }}' - export DJANGO_DB_USER='{{ .Data.data.DJANGO_DB_USER | replace "'" "'\"'\"'" }}' - export DJANGO_DB_PASSWORD='{{ .Data.data.DJANGO_DB_PASSWORD | replace "'" "'\"'\"'" }}' + export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}" + export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}" + export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}" + export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}" + export DJANGO_DB_PASSWORD="$(cat /vault/secrets/wger-db-password)" {{- end }} {{- with secret "kv/data/atlas/health/wger-secrets" -}} - export SECRET_KEY='{{ .Data.data.SECRET_KEY | replace "'" "'\"'\"'" }}' - export SIGNING_KEY='{{ .Data.data.SIGNING_KEY | replace "'" "'\"'\"'" }}' + export SECRET_KEY="$(cat /vault/secrets/wger-secret-key)" + export SIGNING_KEY="$(cat /vault/secrets/wger-signing-key)" + {{- end -}} + vault.hashicorp.com/agent-inject-secret-wger-db-password: "kv/data/atlas/health/wger-db" + vault.hashicorp.com/agent-inject-template-wger-db-password: | + {{- with secret "kv/data/atlas/health/wger-db" -}} + {{ .Data.data.DJANGO_DB_PASSWORD }} + {{- end -}} + vault.hashicorp.com/agent-inject-secret-wger-secret-key: "kv/data/atlas/health/wger-secrets" + vault.hashicorp.com/agent-inject-template-wger-secret-key: | + {{- with secret "kv/data/atlas/health/wger-secrets" -}} + {{ .Data.data.SECRET_KEY }} + {{- end -}} + vault.hashicorp.com/agent-inject-secret-wger-signing-key: "kv/data/atlas/health/wger-secrets" + vault.hashicorp.com/agent-inject-template-wger-signing-key: | + {{- with secret "kv/data/atlas/health/wger-secrets" -}} + {{ .Data.data.SIGNING_KEY }} {{- end -}} spec: serviceAccountName: health-vault-sync From e391a78f25e177d89f463022862daa4d589ad920 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 22:16:36 -0300 Subject: [PATCH 074/270] health: avoid surge rollout for wger --- services/health/wger-deployment.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/services/health/wger-deployment.yaml b/services/health/wger-deployment.yaml index 20b1337..cadab68 100644 --- a/services/health/wger-deployment.yaml +++ b/services/health/wger-deployment.yaml @@ -7,6 +7,11 @@ metadata: spec: replicas: 1 revisionHistoryLimit: 3 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 selector: matchLabels: app: wger From 4bb6c7e21236d98e5b6c14481ff03fb54f0729e3 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 22:23:48 -0300 Subject: [PATCH 075/270] health: fix wger env template newlines --- services/health/wger-admin-ensure-cronjob.yaml | 12 ++++++------ services/health/wger-user-sync-cronjob.yaml | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/services/health/wger-admin-ensure-cronjob.yaml b/services/health/wger-admin-ensure-cronjob.yaml index 3205fd3..aba0fc4 100644 --- a/services/health/wger-admin-ensure-cronjob.yaml +++ b/services/health/wger-admin-ensure-cronjob.yaml @@ -20,21 +20,21 @@ spec: vault.hashicorp.com/role: "health" vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db" vault.hashicorp.com/agent-inject-template-wger-env: | - {{- with secret "kv/data/atlas/health/wger-db" -}} + {{ with secret "kv/data/atlas/health/wger-db" }} export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}" export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}" export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}" export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}" export DJANGO_DB_PASSWORD="$(cat /vault/secrets/wger-db-password)" - {{- end }} - {{- with secret "kv/data/atlas/health/wger-secrets" -}} + {{ end }} + {{ with secret "kv/data/atlas/health/wger-secrets" }} export SECRET_KEY="$(cat /vault/secrets/wger-secret-key)" export SIGNING_KEY="$(cat /vault/secrets/wger-signing-key)" - {{- end }} - {{- with secret "kv/data/atlas/health/wger-admin" -}} + {{ end }} + {{ with secret "kv/data/atlas/health/wger-admin" }} export WGER_ADMIN_USERNAME="$(cat /vault/secrets/wger-admin-username)" export WGER_ADMIN_PASSWORD="$(cat /vault/secrets/wger-admin-password)" - {{- end -}} + {{ end }} vault.hashicorp.com/agent-inject-secret-wger-db-password: "kv/data/atlas/health/wger-db" vault.hashicorp.com/agent-inject-template-wger-db-password: | {{- with secret "kv/data/atlas/health/wger-db" -}} diff --git a/services/health/wger-user-sync-cronjob.yaml b/services/health/wger-user-sync-cronjob.yaml index f99afad..8c846e2 100644 --- a/services/health/wger-user-sync-cronjob.yaml +++ b/services/health/wger-user-sync-cronjob.yaml @@ -21,17 +21,17 @@ spec: vault.hashicorp.com/role: "health" vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db" vault.hashicorp.com/agent-inject-template-wger-env: | - {{- with secret "kv/data/atlas/health/wger-db" -}} + {{ with secret "kv/data/atlas/health/wger-db" }} export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}" export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}" export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}" export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}" export DJANGO_DB_PASSWORD="$(cat /vault/secrets/wger-db-password)" - {{- end }} - {{- with secret "kv/data/atlas/health/wger-secrets" -}} + {{ end }} + {{ with secret "kv/data/atlas/health/wger-secrets" }} export SECRET_KEY="$(cat /vault/secrets/wger-secret-key)" export SIGNING_KEY="$(cat /vault/secrets/wger-signing-key)" - {{- end -}} + {{ end }} vault.hashicorp.com/agent-inject-secret-wger-db-password: "kv/data/atlas/health/wger-db" vault.hashicorp.com/agent-inject-template-wger-db-password: | {{- with secret "kv/data/atlas/health/wger-db" -}} From c38f77302fe307edd6494239a41121b9864bf245 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 22:29:27 -0300 Subject: [PATCH 076/270] vault: inject comms and grafana secrets --- services/comms/helmrelease.yaml | 152 ++++++++++++++++-- services/comms/kustomization.yaml | 5 + services/comms/scripts/vault-entrypoint.sh | 34 ++++ services/monitoring/helmrelease.yaml | 68 ++++++-- services/monitoring/kustomization.yaml | 6 + .../monitoring/scripts/vault-entrypoint.sh | 34 ++++ 6 files changed, 274 insertions(+), 25 deletions(-) create mode 100644 services/comms/scripts/vault-entrypoint.sh create mode 100644 services/monitoring/scripts/vault-entrypoint.sh diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index eaa7c20..37eaffb 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -30,6 +30,10 @@ spec: config: publicBaseurl: https://matrix.live.bstein.dev + serviceAccount: + create: false + name: comms-vault + externalPostgresql: host: postgres-service.postgres.svc.cluster.local port: 5432 @@ -71,22 +75,32 @@ spec: limits: cpu: "2" memory: 3Gi - extraEnv: - - name: TURN_SECRET - valueFrom: - secretKeyRef: - name: turn-shared-secret - key: TURN_STATIC_AUTH_SECRET - - name: MAS_SHARED_SECRET - valueFrom: - secretKeyRef: - name: mas-secrets-runtime - key: matrix_shared_secret - - name: MACAROON_SECRET_KEY - valueFrom: - secretKeyRef: - name: synapse-macaroon - key: macaroon_secret_key + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-synapse-env.sh: "kv/data/atlas/comms/synapse-db" + vault.hashicorp.com/agent-inject-template-synapse-env.sh: | + {{ with secret "kv/data/atlas/comms/synapse-db" }} + export POSTGRES_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" + {{ end }} + {{ with secret "kv/data/atlas/comms/synapse-redis" }} + export REDIS_PASSWORD="{{ .Data.data.redis-password }}" + {{ end }} + {{ with secret "kv/data/atlas/comms/turn-shared-secret" }} + export TURN_SECRET="{{ .Data.data.TURN_STATIC_AUTH_SECRET }}" + {{ end }} + {{ with secret "kv/data/atlas/comms/mas-secrets-runtime" }} + export MAS_SHARED_SECRET="{{ .Data.data.matrix_shared_secret }}" + {{ end }} + {{ with secret "kv/data/atlas/comms/synapse-macaroon" }} + export MACAROON_SECRET_KEY="{{ .Data.data.macaroon_secret_key }}" + {{ end }} + vault.hashicorp.com/agent-inject-secret-synapse-signingkey: "kv/data/atlas/comms/othrys-synapse-signingkey" + vault.hashicorp.com/agent-inject-template-synapse-signingkey: | + {{ with secret "kv/data/atlas/comms/othrys-synapse-signingkey" }} + {{ index .Data.data "signing.key" }} + {{ end }} + extraEnv: [] extraCommands: - >- esc() { printf "%s" "$1" | sed "s/'/''/g"; }; @@ -185,6 +199,112 @@ spec: enabled: false existingSecret: othrys-synapse-signingkey existingSecretKey: signing.key + postRenderers: + - kustomize: + patches: + - target: + kind: Deployment + name: othrys-synapse-matrix-synapse + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: othrys-synapse-matrix-synapse + spec: + template: + spec: + serviceAccountName: comms-vault + automountServiceAccountToken: true + containers: + - name: matrix-synapse + command: + - /entrypoint.sh + args: + - sh + - -c + - |- + export POSTGRES_PASSWORD=$(echo "${POSTGRES_PASSWORD:-}" | sed 's/\//\\\//g' | sed 's/\&/\\\&/g') + export REDIS_PASSWORD=$(echo "${REDIS_PASSWORD:-}" | sed 's/\//\\\//g' | sed 's/\&/\\\&/g') + cat /synapse/secrets/*.yaml | \ + sed -e "s/@@POSTGRES_PASSWORD@@/${POSTGRES_PASSWORD:-}/" \ + -e "s/@@REDIS_PASSWORD@@/${REDIS_PASSWORD:-}/" \ + > /synapse/config/conf.d/secrets.yaml + + esc() { printf "%s" "$1" | sed "s/'/''/g"; }; + printf '%s\n' \ + "matrix_authentication_service:" \ + " enabled: true" \ + " endpoint: http://matrix-authentication-service:8080/" \ + " secret: '$(esc "${MAS_SHARED_SECRET:-}")'" \ + "turn_shared_secret: '$(esc "${TURN_SECRET:-}")'" \ + "macaroon_secret_key: '$(esc "${MACAROON_SECRET_KEY:-}")'" \ + > /synapse/config/conf.d/runtime-secrets.yaml + + exec python -B -m synapse.app.homeserver \ + -c /synapse/config/homeserver.yaml \ + -c /synapse/config/conf.d/ + env: + - $patch: replace + - name: VAULT_ENV_FILE + value: /vault/secrets/synapse-env.sh + - name: VAULT_COPY_FILES + value: /vault/secrets/synapse-signingkey:/synapse/keys/signing.key + volumeMounts: + - name: comms-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh + volumes: + - name: comms-vault-entrypoint + configMap: + name: comms-vault-entrypoint + defaultMode: 493 + - name: signingkey + $patch: delete + - name: signingkey + emptyDir: {} + - target: + kind: Deployment + name: othrys-synapse-redis-master + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: othrys-synapse-redis-master + spec: + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "comms" + vault.hashicorp.com/agent-inject-secret-redis-env.sh: "kv/data/atlas/comms/synapse-redis" + vault.hashicorp.com/agent-inject-template-redis-env.sh: | + {{ with secret "kv/data/atlas/comms/synapse-redis" }} + export REDIS_PASSWORD="{{ .Data.data.redis-password }}" + {{ end }} + spec: + serviceAccountName: comms-vault + automountServiceAccountToken: true + containers: + - name: redis + command: + - /entrypoint.sh + args: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - $patch: replace + - name: VAULT_ENV_FILE + value: /vault/secrets/redis-env.sh + volumeMounts: + - name: comms-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh + volumes: + - name: comms-vault-entrypoint + configMap: + name: comms-vault-entrypoint + defaultMode: 493 --- apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease diff --git a/services/comms/kustomization.yaml b/services/comms/kustomization.yaml index b0cc0da..9171b6b 100644 --- a/services/comms/kustomization.yaml +++ b/services/comms/kustomization.yaml @@ -48,6 +48,11 @@ configMapGenerator: - comms_vault_env.sh=scripts/comms_vault_env.sh options: disableNameSuffixHash: true + - name: comms-vault-entrypoint + files: + - scripts/vault-entrypoint.sh + options: + disableNameSuffixHash: true - name: matrix-guest-register files: - server.py=scripts/guest-register/server.py diff --git a/services/comms/scripts/vault-entrypoint.sh b/services/comms/scripts/vault-entrypoint.sh new file mode 100644 index 0000000..fa3b791 --- /dev/null +++ b/services/comms/scripts/vault-entrypoint.sh @@ -0,0 +1,34 @@ +#!/bin/sh +set -eu + +if [ -n "${VAULT_ENV_FILE:-}" ]; then + if [ -f "${VAULT_ENV_FILE}" ]; then + # shellcheck disable=SC1090 + . "${VAULT_ENV_FILE}" + else + echo "Vault env file not found: ${VAULT_ENV_FILE}" >&2 + exit 1 + fi +fi + +if [ -n "${VAULT_COPY_FILES:-}" ]; then + old_ifs="$IFS" + IFS=',' + for pair in ${VAULT_COPY_FILES}; do + src="${pair%%:*}" + dest="${pair#*:}" + if [ -z "${src}" ] || [ -z "${dest}" ]; then + echo "Vault copy entry malformed: ${pair}" >&2 + exit 1 + fi + if [ ! -f "${src}" ]; then + echo "Vault file not found: ${src}" >&2 + exit 1 + fi + mkdir -p "$(dirname "${dest}")" + cp "${src}" "${dest}" + done + IFS="$old_ifs" +fi + +exec "$@" diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index dbb41ef..470784c 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -259,6 +259,23 @@ spec: existingSecret: grafana-admin userKey: admin-user passwordKey: admin-password + serviceAccount: + create: false + name: monitoring-vault-sync + automountServiceAccountToken: true + podAnnotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "monitoring" + vault.hashicorp.com/agent-inject-secret-grafana-env.sh: "kv/data/atlas/monitoring/grafana-admin" + vault.hashicorp.com/agent-inject-template-grafana-env.sh: | + {{ with secret "kv/data/atlas/monitoring/grafana-admin" }} + export GF_SECURITY_ADMIN_USER="{{ .Data.data.admin-user }}" + export GF_SECURITY_ADMIN_PASSWORD="{{ .Data.data.admin-password }}" + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} + export GF_SMTP_USER="{{ .Data.data.relay-username }}" + export GF_SMTP_PASSWORD="{{ .Data.data.relay-password }}" + {{ end }} persistence: enabled: true size: 20Gi @@ -300,15 +317,6 @@ spec: hide_version: true users: default_theme: dark - envValueFrom: - GF_SMTP_USER: - secretKeyRef: - name: grafana-smtp - key: username - GF_SMTP_PASSWORD: - secretKeyRef: - name: grafana-smtp - key: password ingress: enabled: true ingressClassName: traefik @@ -429,6 +437,48 @@ spec: mountPath: /etc/grafana/provisioning/alerting configMap: grafana-alerting readOnly: true + postRenderers: + - kustomize: + patches: + - target: + kind: Deployment + name: grafana + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: grafana + spec: + template: + spec: + serviceAccountName: monitoring-vault-sync + automountServiceAccountToken: true + containers: + - name: grafana + command: + - /entrypoint.sh + args: + - /run.sh + env: + - name: GF_SECURITY_ADMIN_USER + $patch: delete + - name: GF_SECURITY_ADMIN_PASSWORD + $patch: delete + - name: GF_SMTP_USER + $patch: delete + - name: GF_SMTP_PASSWORD + $patch: delete + - name: VAULT_ENV_FILE + value: /vault/secrets/grafana-env.sh + volumeMounts: + - name: monitoring-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh + volumes: + - name: monitoring-vault-entrypoint + configMap: + name: monitoring-vault-entrypoint + defaultMode: 493 --- diff --git a/services/monitoring/kustomization.yaml b/services/monitoring/kustomization.yaml index 6596a36..b12556e 100644 --- a/services/monitoring/kustomization.yaml +++ b/services/monitoring/kustomization.yaml @@ -37,3 +37,9 @@ configMapGenerator: - exporter.py=scripts/jetson_tegrastats_exporter.py options: disableNameSuffixHash: true + - name: monitoring-vault-entrypoint + namespace: monitoring + files: + - scripts/vault-entrypoint.sh + options: + disableNameSuffixHash: true diff --git a/services/monitoring/scripts/vault-entrypoint.sh b/services/monitoring/scripts/vault-entrypoint.sh new file mode 100644 index 0000000..fa3b791 --- /dev/null +++ b/services/monitoring/scripts/vault-entrypoint.sh @@ -0,0 +1,34 @@ +#!/bin/sh +set -eu + +if [ -n "${VAULT_ENV_FILE:-}" ]; then + if [ -f "${VAULT_ENV_FILE}" ]; then + # shellcheck disable=SC1090 + . "${VAULT_ENV_FILE}" + else + echo "Vault env file not found: ${VAULT_ENV_FILE}" >&2 + exit 1 + fi +fi + +if [ -n "${VAULT_COPY_FILES:-}" ]; then + old_ifs="$IFS" + IFS=',' + for pair in ${VAULT_COPY_FILES}; do + src="${pair%%:*}" + dest="${pair#*:}" + if [ -z "${src}" ] || [ -z "${dest}" ]; then + echo "Vault copy entry malformed: ${pair}" >&2 + exit 1 + fi + if [ ! -f "${src}" ]; then + echo "Vault file not found: ${src}" >&2 + exit 1 + fi + mkdir -p "$(dirname "${dest}")" + cp "${src}" "${dest}" + done + IFS="$old_ifs" +fi + +exec "$@" From e8d004c1b908a88a680de5216737a737bc4f90c9 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 22:34:02 -0300 Subject: [PATCH 077/270] comms: fix synapse vault patch --- services/comms/helmrelease.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 37eaffb..755adc4 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -216,7 +216,7 @@ spec: serviceAccountName: comms-vault automountServiceAccountToken: true containers: - - name: matrix-synapse + - name: synapse command: - /entrypoint.sh args: @@ -259,8 +259,7 @@ spec: name: comms-vault-entrypoint defaultMode: 493 - name: signingkey - $patch: delete - - name: signingkey + $patch: replace emptyDir: {} - target: kind: Deployment From 0b21c8f40d29f9eb401ef5079bec3a57789a4b3a Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 22:37:18 -0300 Subject: [PATCH 078/270] vault: fix hyphenated key templates --- services/comms/helmrelease.yaml | 4 ++-- services/monitoring/helmrelease.yaml | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 755adc4..45185cc 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -84,7 +84,7 @@ spec: export POSTGRES_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}" {{ end }} {{ with secret "kv/data/atlas/comms/synapse-redis" }} - export REDIS_PASSWORD="{{ .Data.data.redis-password }}" + export REDIS_PASSWORD="{{ index .Data.data "redis-password" }}" {{ end }} {{ with secret "kv/data/atlas/comms/turn-shared-secret" }} export TURN_SECRET="{{ .Data.data.TURN_STATIC_AUTH_SECRET }}" @@ -278,7 +278,7 @@ spec: vault.hashicorp.com/agent-inject-secret-redis-env.sh: "kv/data/atlas/comms/synapse-redis" vault.hashicorp.com/agent-inject-template-redis-env.sh: | {{ with secret "kv/data/atlas/comms/synapse-redis" }} - export REDIS_PASSWORD="{{ .Data.data.redis-password }}" + export REDIS_PASSWORD="{{ index .Data.data "redis-password" }}" {{ end }} spec: serviceAccountName: comms-vault diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index 470784c..d535ebe 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -269,12 +269,12 @@ spec: vault.hashicorp.com/agent-inject-secret-grafana-env.sh: "kv/data/atlas/monitoring/grafana-admin" vault.hashicorp.com/agent-inject-template-grafana-env.sh: | {{ with secret "kv/data/atlas/monitoring/grafana-admin" }} - export GF_SECURITY_ADMIN_USER="{{ .Data.data.admin-user }}" - export GF_SECURITY_ADMIN_PASSWORD="{{ .Data.data.admin-password }}" + export GF_SECURITY_ADMIN_USER="{{ index .Data.data "admin-user" }}" + export GF_SECURITY_ADMIN_PASSWORD="{{ index .Data.data "admin-password" }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export GF_SMTP_USER="{{ .Data.data.relay-username }}" - export GF_SMTP_PASSWORD="{{ .Data.data.relay-password }}" + export GF_SMTP_USER="{{ index .Data.data "relay-username" }}" + export GF_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" {{ end }} persistence: enabled: true From 98cdafb1626c98101ce5f46bff41fe6bb34d9e58 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 22:43:50 -0300 Subject: [PATCH 079/270] comms: keep redis env while injecting vault --- services/comms/helmrelease.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 45185cc..c94f5a9 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -292,7 +292,8 @@ spec: - -c - /opt/bitnami/scripts/start-scripts/start-master.sh env: - - $patch: replace + - name: REDIS_PASSWORD + $patch: delete - name: VAULT_ENV_FILE value: /vault/secrets/redis-env.sh volumeMounts: From 52cc04dee92cad33f9d926b7553074bf9de5ef44 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 22:56:30 -0300 Subject: [PATCH 080/270] comms: mount vault signing key volume --- services/comms/helmrelease.yaml | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index c94f5a9..5492814 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -253,13 +253,19 @@ spec: - name: comms-vault-entrypoint mountPath: /entrypoint.sh subPath: vault-entrypoint.sh + - name: signingkey + mountPath: /synapse/keys + $patch: delete + - name: signingkey-writable + mountPath: /synapse/keys volumes: + - name: signingkey + $patch: delete - name: comms-vault-entrypoint configMap: name: comms-vault-entrypoint defaultMode: 493 - - name: signingkey - $patch: replace + - name: signingkey-writable emptyDir: {} - target: kind: Deployment @@ -296,6 +302,18 @@ spec: $patch: delete - name: VAULT_ENV_FILE value: /vault/secrets/redis-env.sh + livenessProbe: + exec: + command: + - sh + - -c + - . /vault/secrets/redis-env.sh && /health/ping_liveness_local.sh 5 + readinessProbe: + exec: + command: + - sh + - -c + - . /vault/secrets/redis-env.sh && /health/ping_readiness_local.sh 1 volumeMounts: - name: comms-vault-entrypoint mountPath: /entrypoint.sh From d898c71c0875fa782e4464c9d96bc6fc178790ee Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 22:59:11 -0300 Subject: [PATCH 081/270] comms: mount synapse signing key --- services/comms/helmrelease.yaml | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 5492814..7c91e04 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -250,14 +250,22 @@ spec: - name: VAULT_COPY_FILES value: /vault/secrets/synapse-signingkey:/synapse/keys/signing.key volumeMounts: + - $patch: replace - name: comms-vault-entrypoint mountPath: /entrypoint.sh subPath: vault-entrypoint.sh - - name: signingkey - mountPath: /synapse/keys - $patch: delete + - name: config + mountPath: /synapse/config + - name: tmpconf + mountPath: /synapse/config/conf.d + - name: secrets + mountPath: /synapse/secrets - name: signingkey-writable mountPath: /synapse/keys + - name: media + mountPath: /synapse/data + - name: tmpdir + mountPath: /tmp volumes: - name: signingkey $patch: delete From 6c8d3b24f27d92213127695e4915cf2fb0f46914 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 23:15:19 -0300 Subject: [PATCH 082/270] jellyfin: read LDAP config from vault --- services/jellyfin/deployment.yaml | 32 +++++++++++------ services/jellyfin/kustomization.yaml | 6 ++++ services/jellyfin/scripts/vault-entrypoint.sh | 34 +++++++++++++++++++ 3 files changed, 62 insertions(+), 10 deletions(-) create mode 100644 services/jellyfin/scripts/vault-entrypoint.sh diff --git a/services/jellyfin/deployment.yaml b/services/jellyfin/deployment.yaml index 1177a06..4747417 100644 --- a/services/jellyfin/deployment.yaml +++ b/services/jellyfin/deployment.yaml @@ -20,7 +20,16 @@ spec: metadata: labels: app: jellyfin + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "pegasus" + vault.hashicorp.com/agent-inject-secret-ldap-config.xml: "kv/data/atlas/pegasus/jellyfin-ldap-config" + vault.hashicorp.com/agent-inject-template-ldap-config.xml: | + {{ with secret "kv/data/atlas/pegasus/jellyfin-ldap-config" }} + {{ index .Data.data "ldap-config.xml" }} + {{ end }} spec: + serviceAccountName: pegasus-vault-sync # Clean up any lingering OIDC artifacts and strip the injected script tag initContainers: - name: strip-oidc @@ -90,6 +99,10 @@ spec: - name: jellyfin image: docker.io/jellyfin/jellyfin:10.11.5 imagePullPolicy: IfNotPresent + command: + - /entrypoint.sh + args: + - /jellyfin/jellyfin ports: - name: http containerPort: 8096 @@ -104,6 +117,8 @@ spec: value: "65532" - name: UMASK value: "002" + - name: VAULT_COPY_FILES + value: /vault/secrets/ldap-config.xml:/config/plugins/configurations/LDAP-Auth.xml resources: limits: nvidia.com/gpu.shared: 1 @@ -114,12 +129,11 @@ spec: cpu: "500m" memory: 1Gi volumeMounts: + - name: jellyfin-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh - name: config mountPath: /config - # Override LDAP plugin configuration from a secret to avoid embedding credentials in the PVC. - - name: ldap-config - mountPath: /config/plugins/configurations/LDAP-Auth.xml - subPath: ldap-config.xml - name: cache mountPath: /cache - name: media @@ -143,6 +157,10 @@ spec: allowPrivilegeEscalation: false readOnlyRootFilesystem: false volumes: + - name: jellyfin-vault-entrypoint + configMap: + name: jellyfin-vault-entrypoint + defaultMode: 493 - name: web-root emptyDir: {} - name: config @@ -154,9 +172,3 @@ spec: - name: media persistentVolumeClaim: claimName: jellyfin-media-asteria-new - - name: ldap-config - secret: - secretName: jellyfin-ldap-config - items: - - key: ldap-config.xml - path: ldap-config.xml diff --git a/services/jellyfin/kustomization.yaml b/services/jellyfin/kustomization.yaml index 51566b8..041b27c 100644 --- a/services/jellyfin/kustomization.yaml +++ b/services/jellyfin/kustomization.yaml @@ -7,3 +7,9 @@ resources: - service.yaml - deployment.yaml - ingress.yaml +generatorOptions: + disableNameSuffixHash: true +configMapGenerator: + - name: jellyfin-vault-entrypoint + files: + - vault-entrypoint.sh=scripts/vault-entrypoint.sh diff --git a/services/jellyfin/scripts/vault-entrypoint.sh b/services/jellyfin/scripts/vault-entrypoint.sh new file mode 100644 index 0000000..fa3b791 --- /dev/null +++ b/services/jellyfin/scripts/vault-entrypoint.sh @@ -0,0 +1,34 @@ +#!/bin/sh +set -eu + +if [ -n "${VAULT_ENV_FILE:-}" ]; then + if [ -f "${VAULT_ENV_FILE}" ]; then + # shellcheck disable=SC1090 + . "${VAULT_ENV_FILE}" + else + echo "Vault env file not found: ${VAULT_ENV_FILE}" >&2 + exit 1 + fi +fi + +if [ -n "${VAULT_COPY_FILES:-}" ]; then + old_ifs="$IFS" + IFS=',' + for pair in ${VAULT_COPY_FILES}; do + src="${pair%%:*}" + dest="${pair#*:}" + if [ -z "${src}" ] || [ -z "${dest}" ]; then + echo "Vault copy entry malformed: ${pair}" >&2 + exit 1 + fi + if [ ! -f "${src}" ]; then + echo "Vault file not found: ${src}" >&2 + exit 1 + fi + mkdir -p "$(dirname "${dest}")" + cp "${src}" "${dest}" + done + IFS="$old_ifs" +fi + +exec "$@" From 82090c1953c7a5960aa8a924c1ff7d699ddbc437 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 23:20:04 -0300 Subject: [PATCH 083/270] vault: read oidc config from vault --- .../scripts/vault_oidc_secret_ensure.sh | 18 --- services/vault/kustomization.yaml | 3 + services/vault/oidc-config-cronjob.yaml | 142 +++++------------- services/vault/scripts/vault-entrypoint.sh | 34 +++++ .../vault/scripts/vault_k8s_auth_configure.sh | 2 + 5 files changed, 76 insertions(+), 123 deletions(-) create mode 100644 services/vault/scripts/vault-entrypoint.sh diff --git a/services/keycloak/scripts/vault_oidc_secret_ensure.sh b/services/keycloak/scripts/vault_oidc_secret_ensure.sh index 20d39c1..3c7d4a5 100755 --- a/services/keycloak/scripts/vault_oidc_secret_ensure.sh +++ b/services/keycloak/scripts/vault_oidc_secret_ensure.sh @@ -116,21 +116,3 @@ payload="$(jq -nc \ '{data:{discovery_url:$discovery_url,client_id:$client_id,client_secret:$client_secret,default_role:$default_role,scopes:$scopes,user_claim:$user_claim,groups_claim:$groups_claim,redirect_uris:$redirect_uris,bound_audiences:$bound_audiences,admin_group:$admin_group,admin_policies:$admin_policies,dev_group:$dev_group,dev_policies:$dev_policies,user_group:$user_group,user_policies:$user_policies}}')" curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ -d "${payload}" "${vault_addr}/v1/kv/data/atlas/vault/vault-oidc-config" >/dev/null - -kubectl -n vault create secret generic vault-oidc-config \ - --from-literal=discovery_url="https://sso.bstein.dev/realms/atlas" \ - --from-literal=client_id="vault-oidc" \ - --from-literal=client_secret="${CLIENT_SECRET}" \ - --from-literal=default_role="admin" \ - --from-literal=scopes="openid profile email groups" \ - --from-literal=user_claim="preferred_username" \ - --from-literal=groups_claim="groups" \ - --from-literal=redirect_uris="https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback,http://localhost:8250/oidc/callback" \ - --from-literal=bound_audiences="vault-oidc" \ - --from-literal=admin_group="admin" \ - --from-literal=admin_policies="default,vault-admin" \ - --from-literal=dev_group="dev" \ - --from-literal=dev_policies="default,dev-kv" \ - --from-literal=user_group="dev" \ - --from-literal=user_policies="default,dev-kv" \ - --dry-run=client -o yaml | kubectl -n vault apply -f - >/dev/null diff --git a/services/vault/kustomization.yaml b/services/vault/kustomization.yaml index 9643894..6381404 100644 --- a/services/vault/kustomization.yaml +++ b/services/vault/kustomization.yaml @@ -23,3 +23,6 @@ configMapGenerator: - name: vault-k8s-auth-config-script files: - vault_k8s_auth_configure.sh=scripts/vault_k8s_auth_configure.sh + - name: vault-entrypoint + files: + - vault-entrypoint.sh=scripts/vault-entrypoint.sh diff --git a/services/vault/oidc-config-cronjob.yaml b/services/vault/oidc-config-cronjob.yaml index 3ea7b53..efe5fee 100644 --- a/services/vault/oidc-config-cronjob.yaml +++ b/services/vault/oidc-config-cronjob.yaml @@ -13,6 +13,32 @@ spec: spec: backoffLimit: 1 template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "vault" + vault.hashicorp.com/agent-inject-secret-vault-oidc-env.sh: "kv/data/atlas/vault/vault-oidc-config" + vault.hashicorp.com/agent-inject-template-vault-oidc-env.sh: | + {{ with secret "kv/data/atlas/vault/vault-oidc-config" }} + export VAULT_OIDC_DISCOVERY_URL="{{ .Data.data.discovery_url }}" + export VAULT_OIDC_CLIENT_ID="{{ .Data.data.client_id }}" + export VAULT_OIDC_CLIENT_SECRET="{{ .Data.data.client_secret }}" + export VAULT_OIDC_DEFAULT_ROLE="{{ .Data.data.default_role }}" + export VAULT_OIDC_SCOPES="{{ .Data.data.scopes }}" + export VAULT_OIDC_USER_CLAIM="{{ .Data.data.user_claim }}" + export VAULT_OIDC_GROUPS_CLAIM="{{ .Data.data.groups_claim }}" + export VAULT_OIDC_TOKEN_POLICIES="{{ .Data.data.token_policies }}" + export VAULT_OIDC_ADMIN_GROUP="{{ .Data.data.admin_group }}" + export VAULT_OIDC_ADMIN_POLICIES="{{ .Data.data.admin_policies }}" + export VAULT_OIDC_DEV_GROUP="{{ .Data.data.dev_group }}" + export VAULT_OIDC_DEV_POLICIES="{{ .Data.data.dev_policies }}" + export VAULT_OIDC_USER_GROUP="{{ .Data.data.user_group }}" + export VAULT_OIDC_USER_POLICIES="{{ .Data.data.user_policies }}" + export VAULT_OIDC_REDIRECT_URIS="{{ .Data.data.redirect_uris }}" + export VAULT_OIDC_BOUND_AUDIENCES="{{ .Data.data.bound_audiences }}" + export VAULT_OIDC_BOUND_CLAIMS="{{ .Data.data.bound_claims }}" + export VAULT_OIDC_BOUND_CLAIMS_TYPE="{{ .Data.data.bound_claims_type }}" + {{ end }} spec: serviceAccountName: vault restartPolicy: Never @@ -24,6 +50,8 @@ spec: image: hashicorp/vault:1.17.6 imagePullPolicy: IfNotPresent command: + - /entrypoint.sh + args: - sh - /scripts/vault_oidc_configure.sh env: @@ -34,116 +62,20 @@ spec: secretKeyRef: name: vault-oidc-admin-token key: token - - name: VAULT_OIDC_DISCOVERY_URL - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: discovery_url - - name: VAULT_OIDC_CLIENT_ID - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: client_id - - name: VAULT_OIDC_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: client_secret - - name: VAULT_OIDC_DEFAULT_ROLE - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: default_role - optional: true - - name: VAULT_OIDC_SCOPES - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: scopes - optional: true - - name: VAULT_OIDC_USER_CLAIM - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: user_claim - optional: true - - name: VAULT_OIDC_GROUPS_CLAIM - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: groups_claim - optional: true - - name: VAULT_OIDC_TOKEN_POLICIES - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: token_policies - optional: true - - name: VAULT_OIDC_ADMIN_GROUP - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: admin_group - optional: true - - name: VAULT_OIDC_ADMIN_POLICIES - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: admin_policies - optional: true - - name: VAULT_OIDC_DEV_GROUP - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: dev_group - optional: true - - name: VAULT_OIDC_DEV_POLICIES - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: dev_policies - optional: true - - name: VAULT_OIDC_USER_GROUP - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: user_group - optional: true - - name: VAULT_OIDC_USER_POLICIES - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: user_policies - optional: true - - name: VAULT_OIDC_REDIRECT_URIS - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: redirect_uris - optional: true - - name: VAULT_OIDC_BOUND_AUDIENCES - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: bound_audiences - optional: true - - name: VAULT_OIDC_BOUND_CLAIMS - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: bound_claims - optional: true - - name: VAULT_OIDC_BOUND_CLAIMS_TYPE - valueFrom: - secretKeyRef: - name: vault-oidc-config - key: bound_claims_type - optional: true + - name: VAULT_ENV_FILE + value: /vault/secrets/vault-oidc-env.sh volumeMounts: + - name: vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh - name: oidc-config-script mountPath: /scripts readOnly: true volumes: + - name: vault-entrypoint + configMap: + name: vault-entrypoint + defaultMode: 493 - name: oidc-config-script configMap: name: vault-oidc-config-script diff --git a/services/vault/scripts/vault-entrypoint.sh b/services/vault/scripts/vault-entrypoint.sh new file mode 100644 index 0000000..fa3b791 --- /dev/null +++ b/services/vault/scripts/vault-entrypoint.sh @@ -0,0 +1,34 @@ +#!/bin/sh +set -eu + +if [ -n "${VAULT_ENV_FILE:-}" ]; then + if [ -f "${VAULT_ENV_FILE}" ]; then + # shellcheck disable=SC1090 + . "${VAULT_ENV_FILE}" + else + echo "Vault env file not found: ${VAULT_ENV_FILE}" >&2 + exit 1 + fi +fi + +if [ -n "${VAULT_COPY_FILES:-}" ]; then + old_ifs="$IFS" + IFS=',' + for pair in ${VAULT_COPY_FILES}; do + src="${pair%%:*}" + dest="${pair#*:}" + if [ -z "${src}" ] || [ -z "${dest}" ]; then + echo "Vault copy entry malformed: ${pair}" >&2 + exit 1 + fi + if [ ! -f "${src}" ]; then + echo "Vault file not found: ${src}" >&2 + exit 1 + fi + mkdir -p "$(dirname "${dest}")" + cp "${src}" "${dest}" + done + IFS="$old_ifs" +fi + +exec "$@" diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index eb78aed..f0d7833 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -109,6 +109,8 @@ write_policy_and_role "health" "health" "health-vault-sync" \ "health/*" "" write_policy_and_role "longhorn" "longhorn-system" "longhorn-vault" \ "longhorn/*" "" +write_policy_and_role "vault" "vault" "vault" \ + "vault/*" "" write_policy_and_role "sso-secrets" "sso" "mas-secrets-ensure" \ "shared/keycloak-admin" \ From 0733127039cbe0318bdf0a633b1d9ab15b120467 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 23:21:39 -0300 Subject: [PATCH 084/270] vault: sync oidc and wger env --- services/health/wger-deployment.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/services/health/wger-deployment.yaml b/services/health/wger-deployment.yaml index cadab68..1664904 100644 --- a/services/health/wger-deployment.yaml +++ b/services/health/wger-deployment.yaml @@ -24,17 +24,17 @@ spec: vault.hashicorp.com/role: "health" vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db" vault.hashicorp.com/agent-inject-template-wger-env: | - {{- with secret "kv/data/atlas/health/wger-db" -}} + {{ with secret "kv/data/atlas/health/wger-db" }} export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}" export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}" export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}" export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}" export DJANGO_DB_PASSWORD="$(cat /vault/secrets/wger-db-password)" - {{- end }} - {{- with secret "kv/data/atlas/health/wger-secrets" -}} + {{ end }} + {{ with secret "kv/data/atlas/health/wger-secrets" }} export SECRET_KEY="$(cat /vault/secrets/wger-secret-key)" export SIGNING_KEY="$(cat /vault/secrets/wger-signing-key)" - {{- end -}} + {{ end }} vault.hashicorp.com/agent-inject-secret-wger-db-password: "kv/data/atlas/health/wger-db" vault.hashicorp.com/agent-inject-template-wger-db-password: | {{- with secret "kv/data/atlas/health/wger-db" -}} From 3af97973e04acfcf75688139a5b9a4533b872481 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 23:26:07 -0300 Subject: [PATCH 085/270] health: stabilize wger startup --- services/health/config/nginx.conf | 10 +++++----- services/health/wger-deployment.yaml | 14 ++++++++++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/services/health/config/nginx.conf b/services/health/config/nginx.conf index 8508c38..b1ee8d4 100644 --- a/services/health/config/nginx.conf +++ b/services/health/config/nginx.conf @@ -5,11 +5,11 @@ upstream wger { server { listen 8080; - client_body_temp_path /tmp/nginx/client_body 1 2; - proxy_temp_path /tmp/nginx/proxy 1 2; - fastcgi_temp_path /tmp/nginx/fastcgi 1 2; - uwsgi_temp_path /tmp/nginx/uwsgi 1 2; - scgi_temp_path /tmp/nginx/scgi 1 2; + client_body_temp_path /tmp/client_body 1 2; + proxy_temp_path /tmp/proxy 1 2; + fastcgi_temp_path /tmp/fastcgi 1 2; + uwsgi_temp_path /tmp/uwsgi 1 2; + scgi_temp_path /tmp/scgi 1 2; location = /api/v2/register { return 404; diff --git a/services/health/wger-deployment.yaml b/services/health/wger-deployment.yaml index 1664904..f372344 100644 --- a/services/health/wger-deployment.yaml +++ b/services/health/wger-deployment.yaml @@ -155,6 +155,13 @@ spec: mountPath: /home/wger/static - name: wger-media mountPath: /home/wger/media + startupProbe: + httpGet: + path: /api/v2/version/ + port: app + failureThreshold: 60 + periodSeconds: 10 + timeoutSeconds: 3 readinessProbe: httpGet: path: /api/v2/version/ @@ -196,6 +203,13 @@ spec: mountPath: /wger/static - name: wger-media mountPath: /wger/media + startupProbe: + httpGet: + path: /api/v2/version/ + port: http + failureThreshold: 60 + periodSeconds: 10 + timeoutSeconds: 3 readinessProbe: httpGet: path: /api/v2/version/ From 81e79fd19a75e498954e2de0ebf992d3d2e5c91d Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 23:34:39 -0300 Subject: [PATCH 086/270] jellyfin: trim vault ldap template --- services/jellyfin/deployment.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/services/jellyfin/deployment.yaml b/services/jellyfin/deployment.yaml index 4747417..a5257f1 100644 --- a/services/jellyfin/deployment.yaml +++ b/services/jellyfin/deployment.yaml @@ -25,9 +25,7 @@ spec: vault.hashicorp.com/role: "pegasus" vault.hashicorp.com/agent-inject-secret-ldap-config.xml: "kv/data/atlas/pegasus/jellyfin-ldap-config" vault.hashicorp.com/agent-inject-template-ldap-config.xml: | - {{ with secret "kv/data/atlas/pegasus/jellyfin-ldap-config" }} - {{ index .Data.data "ldap-config.xml" }} - {{ end }} + {{- with secret "kv/data/atlas/pegasus/jellyfin-ldap-config" -}}{{ index .Data.data "ldap-config.xml" }}{{- end -}} spec: serviceAccountName: pegasus-vault-sync # Clean up any lingering OIDC artifacts and strip the injected script tag From e94ea272ce368ec61631d66b69168a079e97c83f Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 23:35:07 -0300 Subject: [PATCH 087/270] health: fix nginx pid path --- services/health/wger-deployment.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/services/health/wger-deployment.yaml b/services/health/wger-deployment.yaml index f372344..62a491e 100644 --- a/services/health/wger-deployment.yaml +++ b/services/health/wger-deployment.yaml @@ -188,6 +188,9 @@ spec: - name: nginx image: nginx:1.27.5-alpine@sha256:65645c7bb6a0661892a8b03b89d0743208a18dd2f3f17a54ef4b76fb8e2f2a10 imagePullPolicy: IfNotPresent + args: + - -g + - daemon off; pid /tmp/nginx.pid; ports: - name: http containerPort: 8080 From c1b771298a5f10f7e6a49c10bb20c65b05a793ca Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 23:37:06 -0300 Subject: [PATCH 088/270] jellyfin: schedule on nvidia accelerators --- services/jellyfin/deployment.yaml | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/services/jellyfin/deployment.yaml b/services/jellyfin/deployment.yaml index a5257f1..ab36a63 100644 --- a/services/jellyfin/deployment.yaml +++ b/services/jellyfin/deployment.yaml @@ -75,18 +75,9 @@ spec: volumeMounts: - name: config mountPath: /config - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - titan-20 - - titan-21 - - titan-22 - - titan-24 + nodeSelector: + accelerator: nvidia + kubernetes.io/arch: arm64 securityContext: runAsUser: 1000 fsGroup: 65532 From 35dcc5d66c235a985bc7440f8887afb5b8071703 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 23:47:23 -0300 Subject: [PATCH 089/270] health: run nginx directly --- services/health/wger-deployment.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/services/health/wger-deployment.yaml b/services/health/wger-deployment.yaml index 62a491e..c2d3de5 100644 --- a/services/health/wger-deployment.yaml +++ b/services/health/wger-deployment.yaml @@ -188,6 +188,8 @@ spec: - name: nginx image: nginx:1.27.5-alpine@sha256:65645c7bb6a0661892a8b03b89d0743208a18dd2f3f17a54ef4b76fb8e2f2a10 imagePullPolicy: IfNotPresent + command: + - nginx args: - -g - daemon off; pid /tmp/nginx.pid; From 88be97d860f3d73c923fe1a160c19386ec3e118c Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 23:55:50 -0300 Subject: [PATCH 090/270] health: add nginx main config --- services/health/config/nginx-main.conf | 22 ++++++++++++++++++++++ services/health/kustomization.yaml | 1 + services/health/wger-deployment.yaml | 5 ++++- 3 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 services/health/config/nginx-main.conf diff --git a/services/health/config/nginx-main.conf b/services/health/config/nginx-main.conf new file mode 100644 index 0000000..81a5e1f --- /dev/null +++ b/services/health/config/nginx-main.conf @@ -0,0 +1,22 @@ +worker_processes auto; +pid /tmp/nginx.pid; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + access_log /dev/stdout main; + error_log /dev/stderr warn; + + sendfile on; + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} diff --git a/services/health/kustomization.yaml b/services/health/kustomization.yaml index c4dd47e..9d21923 100644 --- a/services/health/kustomization.yaml +++ b/services/health/kustomization.yaml @@ -18,6 +18,7 @@ configMapGenerator: - name: wger-nginx-config files: - default.conf=config/nginx.conf + - nginx.conf=config/nginx-main.conf - name: wger-user-sync-script files: - wger_user_sync.py=scripts/wger_user_sync.py diff --git a/services/health/wger-deployment.yaml b/services/health/wger-deployment.yaml index c2d3de5..56a29a4 100644 --- a/services/health/wger-deployment.yaml +++ b/services/health/wger-deployment.yaml @@ -192,7 +192,7 @@ spec: - nginx args: - -g - - daemon off; pid /tmp/nginx.pid; + - daemon off; ports: - name: http containerPort: 8080 @@ -204,6 +204,9 @@ spec: - name: wger-nginx-config mountPath: /etc/nginx/conf.d/default.conf subPath: default.conf + - name: wger-nginx-config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf - name: wger-static mountPath: /wger/static - name: wger-media From 836ce605b6e335e26e4f7ddd6c0507688a119884 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Wed, 14 Jan 2026 23:56:02 -0300 Subject: [PATCH 091/270] jellyfin: prefer gpu nodes by hostname --- services/jellyfin/deployment.yaml | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/services/jellyfin/deployment.yaml b/services/jellyfin/deployment.yaml index ab36a63..e176931 100644 --- a/services/jellyfin/deployment.yaml +++ b/services/jellyfin/deployment.yaml @@ -75,9 +75,31 @@ spec: volumeMounts: - name: config mountPath: /config - nodeSelector: - accelerator: nvidia - kubernetes.io/arch: arm64 + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - titan-22 + - weight: 80 + preference: + matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - titan-20 + - titan-21 + - weight: 60 + preference: + matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - titan-24 securityContext: runAsUser: 1000 fsGroup: 65532 From 139ca78c3d1ee54f716fe3adba19572b7b13d4be Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 00:28:15 -0300 Subject: [PATCH 092/270] bstein-dev-home: bump portal images --- services/bstein-dev-home/backend-deployment.yaml | 2 +- services/bstein-dev-home/frontend-deployment.yaml | 2 +- services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index d4b037c..94aae62 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -43,7 +43,7 @@ spec: - name: harbor-regcred containers: - name: backend - image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} + image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-94 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} imagePullPolicy: Always command: ["/bin/sh", "-c"] args: diff --git a/services/bstein-dev-home/frontend-deployment.yaml b/services/bstein-dev-home/frontend-deployment.yaml index 478ebf9..ef9e36d 100644 --- a/services/bstein-dev-home/frontend-deployment.yaml +++ b/services/bstein-dev-home/frontend-deployment.yaml @@ -22,7 +22,7 @@ spec: - name: harbor-regcred containers: - name: frontend - image: registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} + image: registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-94 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} imagePullPolicy: Always ports: - name: http diff --git a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml index bba2b1b..fe9afea 100644 --- a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml +++ b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml @@ -44,7 +44,7 @@ spec: - name: harbor-regcred containers: - name: sync - image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} + image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-94 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} imagePullPolicy: Always command: ["/bin/sh", "-c"] args: @@ -74,4 +74,4 @@ spec: - name: vaultwarden-cred-sync-script configMap: name: vaultwarden-cred-sync-script - defaultMode: 0555 \ No newline at end of file + defaultMode: 0555 From 7f96daa7b825a78ae9510ca89678ed8c6eeba5c3 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 00:35:41 -0300 Subject: [PATCH 093/270] comms: move synapse secrets to vault --- services/comms/helmrelease.yaml | 13 ++- services/comms/secretproviderclass.yaml | 113 ------------------------ 2 files changed, 9 insertions(+), 117 deletions(-) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 7c91e04..139ad25 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -29,6 +29,7 @@ spec: config: publicBaseurl: https://matrix.live.bstein.dev + registrationSharedSecret: "vault-managed" serviceAccount: create: false @@ -38,15 +39,15 @@ spec: host: postgres-service.postgres.svc.cluster.local port: 5432 username: synapse - existingSecret: synapse-db - existingSecretPasswordKey: POSTGRES_PASSWORD + existingSecret: vault-placeholder + existingSecretPasswordKey: postgres-password database: synapse redis: enabled: true auth: enabled: true - existingSecret: synapse-redis + existingSecret: vault-placeholder existingSecretPasswordKey: redis-password postgresql: @@ -92,6 +93,9 @@ spec: {{ with secret "kv/data/atlas/comms/mas-secrets-runtime" }} export MAS_SHARED_SECRET="{{ .Data.data.matrix_shared_secret }}" {{ end }} + {{ with secret "kv/data/atlas/comms/synapse-registration" }} + export REGISTRATION_SHARED_SECRET="{{ .Data.data.registration_shared_secret }}" + {{ end }} {{ with secret "kv/data/atlas/comms/synapse-macaroon" }} export MACAROON_SECRET_KEY="{{ .Data.data.macaroon_secret_key }}" {{ end }} @@ -109,6 +113,7 @@ spec: " enabled: true" " endpoint: http://matrix-authentication-service:8080/" " secret: '$(esc "${MAS_SHARED_SECRET:-}")'" + "registration_shared_secret: '$(esc "${REGISTRATION_SHARED_SECRET:-}")'" "turn_shared_secret: '$(esc "${TURN_SECRET:-}")'" "macaroon_secret_key: '$(esc "${MACAROON_SECRET_KEY:-}")'" > /synapse/config/conf.d/runtime-secrets.yaml @@ -197,7 +202,7 @@ spec: signingkey: job: enabled: false - existingSecret: othrys-synapse-signingkey + existingSecret: vault-placeholder existingSecretKey: signing.key postRenderers: - kustomize: diff --git a/services/comms/secretproviderclass.yaml b/services/comms/secretproviderclass.yaml index 251173c..69d4b2b 100644 --- a/services/comms/secretproviderclass.yaml +++ b/services/comms/secretproviderclass.yaml @@ -10,123 +10,10 @@ spec: vaultAddress: "http://vault.vault.svc.cluster.local:8200" roleName: "comms" objects: | - - objectName: "turn-secret" - secretPath: "kv/data/atlas/comms/turn-shared-secret" - secretKey: "TURN_STATIC_AUTH_SECRET" - - objectName: "synapse-db-pass" - secretPath: "kv/data/atlas/comms/synapse-db" - secretKey: "POSTGRES_PASSWORD" - - objectName: "synapse-redis__redis-password" - secretPath: "kv/data/atlas/comms/synapse-redis" - secretKey: "redis-password" - - objectName: "synapse-macaroon__macaroon_secret_key" - secretPath: "kv/data/atlas/comms/synapse-macaroon" - secretKey: "macaroon_secret_key" - - objectName: "bot-pass" - secretPath: "kv/data/atlas/comms/atlasbot-credentials-runtime" - secretKey: "bot-password" - - objectName: "seeder-pass" - secretPath: "kv/data/atlas/comms/atlasbot-credentials-runtime" - secretKey: "seeder-password" - - objectName: "chat-matrix" - secretPath: "kv/data/atlas/shared/chat-ai-keys-runtime" - secretKey: "matrix" - - objectName: "chat-homepage" - secretPath: "kv/data/atlas/shared/chat-ai-keys-runtime" - secretKey: "homepage" - - objectName: "mas-admin-secret" - secretPath: "kv/data/atlas/comms/mas-admin-client-runtime" - secretKey: "client_secret" - - objectName: "mas-db-pass" - secretPath: "kv/data/atlas/comms/mas-db" - secretKey: "password" - - objectName: "mas-encryption" - secretPath: "kv/data/atlas/comms/mas-secrets-runtime" - secretKey: "encryption" - - objectName: "mas-matrix-shared" - secretPath: "kv/data/atlas/comms/mas-secrets-runtime" - secretKey: "matrix_shared_secret" - - objectName: "mas-kc-secret" - secretPath: "kv/data/atlas/comms/mas-secrets-runtime" - secretKey: "keycloak_client_secret" - - objectName: "mas-rsa-key" - secretPath: "kv/data/atlas/comms/mas-secrets-runtime" - secretKey: "rsa_key" - - objectName: "othrys-synapse-signingkey__signing.key" - secretPath: "kv/data/atlas/comms/othrys-synapse-signingkey" - secretKey: "signing.key" - - objectName: "synapse-oidc__client-secret" - secretPath: "kv/data/atlas/comms/synapse-oidc" - secretKey: "client-secret" - objectName: "harbor-pull__dockerconfigjson" secretPath: "kv/data/atlas/harbor-pull/comms" secretKey: "dockerconfigjson" secretObjects: - - secretName: turn-shared-secret - type: Opaque - data: - - objectName: turn-secret - key: TURN_STATIC_AUTH_SECRET - - secretName: synapse-db - type: Opaque - data: - - objectName: synapse-db-pass - key: POSTGRES_PASSWORD - - secretName: synapse-redis - type: Opaque - data: - - objectName: synapse-redis__redis-password - key: redis-password - - secretName: synapse-macaroon - type: Opaque - data: - - objectName: synapse-macaroon__macaroon_secret_key - key: macaroon_secret_key - - secretName: atlasbot-credentials-runtime - type: Opaque - data: - - objectName: bot-pass - key: bot-password - - objectName: seeder-pass - key: seeder-password - - secretName: chat-ai-keys-runtime - type: Opaque - data: - - objectName: chat-matrix - key: matrix - - objectName: chat-homepage - key: homepage - - secretName: mas-admin-client-runtime - type: Opaque - data: - - objectName: mas-admin-secret - key: client_secret - - secretName: mas-db - type: Opaque - data: - - objectName: mas-db-pass - key: password - - secretName: mas-secrets-runtime - type: Opaque - data: - - objectName: mas-encryption - key: encryption - - objectName: mas-matrix-shared - key: matrix_shared_secret - - objectName: mas-kc-secret - key: keycloak_client_secret - - objectName: mas-rsa-key - key: rsa_key - - secretName: othrys-synapse-signingkey - type: Opaque - data: - - objectName: othrys-synapse-signingkey__signing.key - key: signing.key - - secretName: synapse-oidc - type: Opaque - data: - - objectName: synapse-oidc__client-secret - key: client-secret - secretName: harbor-regcred type: kubernetes.io/dockerconfigjson data: From 8fed4a08c51b164dc8def917d2810050a1d342e4 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 00:41:28 -0300 Subject: [PATCH 094/270] health: allow portal wger sync --- services/bstein-dev-home/rbac.yaml | 31 ------------------------------ services/health/kustomization.yaml | 1 + services/health/portal-rbac.yaml | 31 ++++++++++++++++++++++++++++++ 3 files changed, 32 insertions(+), 31 deletions(-) create mode 100644 services/health/portal-rbac.yaml diff --git a/services/bstein-dev-home/rbac.yaml b/services/bstein-dev-home/rbac.yaml index 7ce8fd8..f97ed24 100644 --- a/services/bstein-dev-home/rbac.yaml +++ b/services/bstein-dev-home/rbac.yaml @@ -106,34 +106,3 @@ subjects: - kind: ServiceAccount name: bstein-dev-home namespace: bstein-dev-home ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: bstein-dev-home-wger-user-sync - namespace: health -rules: - - apiGroups: ["batch"] - resources: ["cronjobs"] - verbs: ["get"] - resourceNames: ["wger-user-sync"] - - apiGroups: ["batch"] - resources: ["jobs"] - verbs: ["create", "get", "list", "watch"] - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: bstein-dev-home-wger-user-sync - namespace: health -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: bstein-dev-home-wger-user-sync -subjects: - - kind: ServiceAccount - name: bstein-dev-home - namespace: bstein-dev-home diff --git a/services/health/kustomization.yaml b/services/health/kustomization.yaml index 9d21923..4dccf8c 100644 --- a/services/health/kustomization.yaml +++ b/services/health/kustomization.yaml @@ -5,6 +5,7 @@ namespace: health resources: - namespace.yaml - serviceaccount.yaml + - portal-rbac.yaml - wger-media-pvc.yaml - wger-static-pvc.yaml - wger-admin-ensure-cronjob.yaml diff --git a/services/health/portal-rbac.yaml b/services/health/portal-rbac.yaml new file mode 100644 index 0000000..cd9acd1 --- /dev/null +++ b/services/health/portal-rbac.yaml @@ -0,0 +1,31 @@ +# services/health/portal-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: bstein-dev-home-wger-user-sync + namespace: health +rules: + - apiGroups: ["batch"] + resources: ["cronjobs"] + verbs: ["get"] + resourceNames: ["wger-user-sync"] + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create", "get", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: bstein-dev-home-wger-user-sync + namespace: health +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: bstein-dev-home-wger-user-sync +subjects: + - kind: ServiceAccount + name: bstein-dev-home + namespace: bstein-dev-home From 511403c4a6887e18c54fbe68a6e980a30069d6c4 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 00:47:51 -0300 Subject: [PATCH 095/270] bstein-dev-home: bump portal images --- services/bstein-dev-home/backend-deployment.yaml | 2 +- services/bstein-dev-home/frontend-deployment.yaml | 2 +- services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index 94aae62..abf3034 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -43,7 +43,7 @@ spec: - name: harbor-regcred containers: - name: backend - image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-94 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} + image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-95 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} imagePullPolicy: Always command: ["/bin/sh", "-c"] args: diff --git a/services/bstein-dev-home/frontend-deployment.yaml b/services/bstein-dev-home/frontend-deployment.yaml index ef9e36d..642ca06 100644 --- a/services/bstein-dev-home/frontend-deployment.yaml +++ b/services/bstein-dev-home/frontend-deployment.yaml @@ -22,7 +22,7 @@ spec: - name: harbor-regcred containers: - name: frontend - image: registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-94 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} + image: registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-95 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} imagePullPolicy: Always ports: - name: http diff --git a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml index fe9afea..8e835eb 100644 --- a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml +++ b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml @@ -44,7 +44,7 @@ spec: - name: harbor-regcred containers: - name: sync - image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-94 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} + image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-95 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} imagePullPolicy: Always command: ["/bin/sh", "-c"] args: From f5a3894c2b1788aaf439ec6af03d90d141448b1f Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 01:02:41 -0300 Subject: [PATCH 096/270] mailu: use vault sidecar env --- services/comms/helmrelease.yaml | 1 + services/mailu/helmrelease.yaml | 423 +++++++++++++++++++++ services/mailu/kustomization.yaml | 6 + services/mailu/scripts/vault-entrypoint.sh | 34 ++ services/mailu/secretproviderclass.yaml | 65 ---- 5 files changed, 464 insertions(+), 65 deletions(-) create mode 100644 services/mailu/scripts/vault-entrypoint.sh diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 139ad25..2b049c8 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -241,6 +241,7 @@ spec: " enabled: true" \ " endpoint: http://matrix-authentication-service:8080/" \ " secret: '$(esc "${MAS_SHARED_SECRET:-}")'" \ + "registration_shared_secret: '$(esc "${REGISTRATION_SHARED_SECRET:-}")'" \ "turn_shared_secret: '$(esc "${TURN_SECRET:-}")'" \ "macaroon_secret_key: '$(esc "${MACAROON_SECRET_KEY:-}")'" \ > /synapse/config/conf.d/runtime-secrets.yaml diff --git a/services/mailu/helmrelease.yaml b/services/mailu/helmrelease.yaml index e675961..ceb3e0c 100644 --- a/services/mailu/helmrelease.yaml +++ b/services/mailu/helmrelease.yaml @@ -305,3 +305,426 @@ spec: submission: port: 587 targetPort: 587 + postRenderers: + - kustomize: + patches: + - target: + kind: Deployment + name: mailu-admin + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: mailu-admin + spec: + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "mailu-mailserver" + vault.hashicorp.com/agent-inject-secret-mailu-env.sh: "kv/data/atlas/mailu/mailu-secret" + vault.hashicorp.com/agent-inject-template-mailu-env.sh: | + {{ with secret "kv/data/atlas/mailu/mailu-secret" }} + export SECRET_KEY="{{ index .Data.data "secret-key" }}" + {{ end }} + {{ with secret "kv/data/atlas/mailu/mailu-db-secret" }} + export DB_PW="{{ .Data.data.password }}" + export ROUNDCUBE_DB_PW="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/mailu/mailu-initial-account-secret" }} + export INITIAL_ADMIN_PW="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} + export RELAYUSER="{{ index .Data.data "relay-username" }}" + export RELAYPASSWORD="{{ index .Data.data "relay-password" }}" + {{ end }} + spec: + serviceAccountName: mailu-vault-sync + automountServiceAccountToken: true + containers: + - name: admin + command: + - /entrypoint.sh + args: + - python3 + - /start.py + env: + - name: SECRET_KEY + $patch: delete + - name: INITIAL_ADMIN_PW + $patch: delete + - name: DB_PW + $patch: delete + - name: RELAYUSER + $patch: delete + - name: RELAYPASSWORD + $patch: delete + - name: VAULT_ENV_FILE + value: /vault/secrets/mailu-env.sh + volumeMounts: + - name: mailu-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh + volumes: + - name: mailu-vault-entrypoint + configMap: + name: mailu-vault-entrypoint + defaultMode: 493 + - target: + kind: Deployment + name: mailu-front + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: mailu-front + spec: + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "mailu-mailserver" + vault.hashicorp.com/agent-inject-secret-mailu-env.sh: "kv/data/atlas/mailu/mailu-secret" + vault.hashicorp.com/agent-inject-template-mailu-env.sh: | + {{ with secret "kv/data/atlas/mailu/mailu-secret" }} + export SECRET_KEY="{{ index .Data.data "secret-key" }}" + {{ end }} + {{ with secret "kv/data/atlas/mailu/mailu-db-secret" }} + export DB_PW="{{ .Data.data.password }}" + export ROUNDCUBE_DB_PW="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/mailu/mailu-initial-account-secret" }} + export INITIAL_ADMIN_PW="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} + export RELAYUSER="{{ index .Data.data "relay-username" }}" + export RELAYPASSWORD="{{ index .Data.data "relay-password" }}" + {{ end }} + spec: + serviceAccountName: mailu-vault-sync + automountServiceAccountToken: true + containers: + - name: front + command: + - /entrypoint.sh + args: + - python3 + - /start.py + env: + - name: SECRET_KEY + $patch: delete + - name: INITIAL_ADMIN_PW + $patch: delete + - name: DB_PW + $patch: delete + - name: RELAYUSER + $patch: delete + - name: RELAYPASSWORD + $patch: delete + - name: VAULT_ENV_FILE + value: /vault/secrets/mailu-env.sh + volumeMounts: + - name: mailu-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh + volumes: + - name: mailu-vault-entrypoint + configMap: + name: mailu-vault-entrypoint + defaultMode: 493 + - target: + kind: Deployment + name: mailu-postfix + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: mailu-postfix + spec: + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "mailu-mailserver" + vault.hashicorp.com/agent-inject-secret-mailu-env.sh: "kv/data/atlas/mailu/mailu-secret" + vault.hashicorp.com/agent-inject-template-mailu-env.sh: | + {{ with secret "kv/data/atlas/mailu/mailu-secret" }} + export SECRET_KEY="{{ index .Data.data "secret-key" }}" + {{ end }} + {{ with secret "kv/data/atlas/mailu/mailu-db-secret" }} + export DB_PW="{{ .Data.data.password }}" + export ROUNDCUBE_DB_PW="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/mailu/mailu-initial-account-secret" }} + export INITIAL_ADMIN_PW="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} + export RELAYUSER="{{ index .Data.data "relay-username" }}" + export RELAYPASSWORD="{{ index .Data.data "relay-password" }}" + {{ end }} + spec: + serviceAccountName: mailu-vault-sync + automountServiceAccountToken: true + containers: + - name: postfix + command: + - /entrypoint.sh + args: + - python3 + - /start.py + env: + - name: SECRET_KEY + $patch: delete + - name: INITIAL_ADMIN_PW + $patch: delete + - name: DB_PW + $patch: delete + - name: RELAYUSER + $patch: delete + - name: RELAYPASSWORD + $patch: delete + - name: VAULT_ENV_FILE + value: /vault/secrets/mailu-env.sh + volumeMounts: + - name: mailu-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh + volumes: + - name: mailu-vault-entrypoint + configMap: + name: mailu-vault-entrypoint + defaultMode: 493 + - target: + kind: Deployment + name: mailu-dovecot + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: mailu-dovecot + spec: + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "mailu-mailserver" + vault.hashicorp.com/agent-inject-secret-mailu-env.sh: "kv/data/atlas/mailu/mailu-secret" + vault.hashicorp.com/agent-inject-template-mailu-env.sh: | + {{ with secret "kv/data/atlas/mailu/mailu-secret" }} + export SECRET_KEY="{{ index .Data.data "secret-key" }}" + {{ end }} + {{ with secret "kv/data/atlas/mailu/mailu-db-secret" }} + export DB_PW="{{ .Data.data.password }}" + export ROUNDCUBE_DB_PW="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/mailu/mailu-initial-account-secret" }} + export INITIAL_ADMIN_PW="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} + export RELAYUSER="{{ index .Data.data "relay-username" }}" + export RELAYPASSWORD="{{ index .Data.data "relay-password" }}" + {{ end }} + spec: + serviceAccountName: mailu-vault-sync + automountServiceAccountToken: true + containers: + - name: dovecot + command: + - /entrypoint.sh + args: + - python3 + - /start.py + env: + - name: SECRET_KEY + $patch: delete + - name: INITIAL_ADMIN_PW + $patch: delete + - name: DB_PW + $patch: delete + - name: RELAYUSER + $patch: delete + - name: RELAYPASSWORD + $patch: delete + - name: VAULT_ENV_FILE + value: /vault/secrets/mailu-env.sh + volumeMounts: + - name: mailu-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh + volumes: + - name: mailu-vault-entrypoint + configMap: + name: mailu-vault-entrypoint + defaultMode: 493 + - target: + kind: Deployment + name: mailu-rspamd + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: mailu-rspamd + spec: + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "mailu-mailserver" + vault.hashicorp.com/agent-inject-secret-mailu-env.sh: "kv/data/atlas/mailu/mailu-secret" + vault.hashicorp.com/agent-inject-template-mailu-env.sh: | + {{ with secret "kv/data/atlas/mailu/mailu-secret" }} + export SECRET_KEY="{{ index .Data.data "secret-key" }}" + {{ end }} + {{ with secret "kv/data/atlas/mailu/mailu-db-secret" }} + export DB_PW="{{ .Data.data.password }}" + export ROUNDCUBE_DB_PW="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/mailu/mailu-initial-account-secret" }} + export INITIAL_ADMIN_PW="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} + export RELAYUSER="{{ index .Data.data "relay-username" }}" + export RELAYPASSWORD="{{ index .Data.data "relay-password" }}" + {{ end }} + spec: + serviceAccountName: mailu-vault-sync + automountServiceAccountToken: true + containers: + - name: rspamd + command: + - /entrypoint.sh + args: + - python3 + - /start.py + env: + - name: SECRET_KEY + $patch: delete + - name: INITIAL_ADMIN_PW + $patch: delete + - name: DB_PW + $patch: delete + - name: RELAYUSER + $patch: delete + - name: RELAYPASSWORD + $patch: delete + - name: VAULT_ENV_FILE + value: /vault/secrets/mailu-env.sh + volumeMounts: + - name: mailu-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh + volumes: + - name: mailu-vault-entrypoint + configMap: + name: mailu-vault-entrypoint + defaultMode: 493 + - target: + kind: Deployment + name: mailu-oletools + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: mailu-oletools + spec: + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "mailu-mailserver" + vault.hashicorp.com/agent-inject-secret-mailu-env.sh: "kv/data/atlas/mailu/mailu-secret" + vault.hashicorp.com/agent-inject-template-mailu-env.sh: | + {{ with secret "kv/data/atlas/mailu/mailu-secret" }} + export SECRET_KEY="{{ index .Data.data "secret-key" }}" + {{ end }} + {{ with secret "kv/data/atlas/mailu/mailu-db-secret" }} + export DB_PW="{{ .Data.data.password }}" + export ROUNDCUBE_DB_PW="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/mailu/mailu-initial-account-secret" }} + export INITIAL_ADMIN_PW="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} + export RELAYUSER="{{ index .Data.data "relay-username" }}" + export RELAYPASSWORD="{{ index .Data.data "relay-password" }}" + {{ end }} + spec: + serviceAccountName: mailu-vault-sync + automountServiceAccountToken: true + containers: + - name: oletools + command: + - /entrypoint.sh + args: + - python3 + - /start.py + env: + - name: SECRET_KEY + $patch: delete + - name: INITIAL_ADMIN_PW + $patch: delete + - name: DB_PW + $patch: delete + - name: RELAYUSER + $patch: delete + - name: RELAYPASSWORD + $patch: delete + - name: VAULT_ENV_FILE + value: /vault/secrets/mailu-env.sh + volumeMounts: + - name: mailu-vault-entrypoint + mountPath: /entrypoint.sh + subPath: vault-entrypoint.sh + volumes: + - name: mailu-vault-entrypoint + configMap: + name: mailu-vault-entrypoint + defaultMode: 493 + - target: + kind: StatefulSet + name: mailu-clamav + patch: |- + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: mailu-clamav + spec: + template: + spec: + containers: + - name: clamav + env: + - name: SECRET_KEY + $patch: delete + - name: INITIAL_ADMIN_PW + $patch: delete + - name: DB_PW + $patch: delete + - name: RELAYUSER + $patch: delete + - name: RELAYPASSWORD + $patch: delete + - target: + kind: Deployment + name: mailu-tika + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: mailu-tika + spec: + template: + spec: + containers: + - name: tika + env: + - name: SECRET_KEY + $patch: delete + - name: INITIAL_ADMIN_PW + $patch: delete + - name: DB_PW + $patch: delete + - name: RELAYUSER + $patch: delete + - name: RELAYPASSWORD + $patch: delete diff --git a/services/mailu/kustomization.yaml b/services/mailu/kustomization.yaml index 31b1cb9..5c111eb 100644 --- a/services/mailu/kustomization.yaml +++ b/services/mailu/kustomization.yaml @@ -35,3 +35,9 @@ configMapGenerator: namespace: mailu-mailserver files: - listener.py=scripts/mailu_sync_listener.py + - name: mailu-vault-entrypoint + namespace: mailu-mailserver + files: + - vault-entrypoint.sh=scripts/vault-entrypoint.sh + options: + disableNameSuffixHash: true diff --git a/services/mailu/scripts/vault-entrypoint.sh b/services/mailu/scripts/vault-entrypoint.sh new file mode 100644 index 0000000..fa3b791 --- /dev/null +++ b/services/mailu/scripts/vault-entrypoint.sh @@ -0,0 +1,34 @@ +#!/bin/sh +set -eu + +if [ -n "${VAULT_ENV_FILE:-}" ]; then + if [ -f "${VAULT_ENV_FILE}" ]; then + # shellcheck disable=SC1090 + . "${VAULT_ENV_FILE}" + else + echo "Vault env file not found: ${VAULT_ENV_FILE}" >&2 + exit 1 + fi +fi + +if [ -n "${VAULT_COPY_FILES:-}" ]; then + old_ifs="$IFS" + IFS=',' + for pair in ${VAULT_COPY_FILES}; do + src="${pair%%:*}" + dest="${pair#*:}" + if [ -z "${src}" ] || [ -z "${dest}" ]; then + echo "Vault copy entry malformed: ${pair}" >&2 + exit 1 + fi + if [ ! -f "${src}" ]; then + echo "Vault file not found: ${src}" >&2 + exit 1 + fi + mkdir -p "$(dirname "${dest}")" + cp "${src}" "${dest}" + done + IFS="$old_ifs" +fi + +exec "$@" diff --git a/services/mailu/secretproviderclass.yaml b/services/mailu/secretproviderclass.yaml index 11cc2fe..f58c69b 100644 --- a/services/mailu/secretproviderclass.yaml +++ b/services/mailu/secretproviderclass.yaml @@ -10,75 +10,10 @@ spec: vaultAddress: "http://vault.vault.svc.cluster.local:8200" roleName: "mailu-mailserver" objects: | - - objectName: "mailu-secret__secret-key" - secretPath: "kv/data/atlas/mailu/mailu-secret" - secretKey: "secret-key" - - objectName: "postmark-relay__relay-username" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-username" - - objectName: "postmark-relay__relay-password" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-password" - - objectName: "mailu-db-secret__database" - secretPath: "kv/data/atlas/mailu/mailu-db-secret" - secretKey: "database" - - objectName: "mailu-db-secret__username" - secretPath: "kv/data/atlas/mailu/mailu-db-secret" - secretKey: "username" - - objectName: "mailu-db-secret__password" - secretPath: "kv/data/atlas/mailu/mailu-db-secret" - secretKey: "password" - - objectName: "mailu-db-secret__url" - secretPath: "kv/data/atlas/mailu/mailu-db-secret" - secretKey: "url" - - objectName: "mailu-initial-account-secret__password" - secretPath: "kv/data/atlas/mailu/mailu-initial-account-secret" - secretKey: "password" - - objectName: "mailu-sync-credentials__client-id" - secretPath: "kv/data/atlas/mailu/mailu-sync-credentials" - secretKey: "client-id" - - objectName: "mailu-sync-credentials__client-secret" - secretPath: "kv/data/atlas/mailu/mailu-sync-credentials" - secretKey: "client-secret" - objectName: "harbor-pull__dockerconfigjson" secretPath: "kv/data/atlas/harbor-pull/mailu-mailserver" secretKey: "dockerconfigjson" secretObjects: - - secretName: mailu-secret - type: Opaque - data: - - objectName: mailu-secret__secret-key - key: secret-key - - secretName: mailu-postmark-relay - type: Opaque - data: - - objectName: postmark-relay__relay-username - key: relay-username - - objectName: postmark-relay__relay-password - key: relay-password - - secretName: mailu-db-secret - type: Opaque - data: - - objectName: mailu-db-secret__database - key: database - - objectName: mailu-db-secret__username - key: username - - objectName: mailu-db-secret__password - key: password - - objectName: mailu-db-secret__url - key: url - - secretName: mailu-initial-account-secret - type: Opaque - data: - - objectName: mailu-initial-account-secret__password - key: password - - secretName: mailu-sync-credentials - type: Opaque - data: - - objectName: mailu-sync-credentials__client-id - key: client-id - - objectName: mailu-sync-credentials__client-secret - key: client-secret - secretName: harbor-regcred type: kubernetes.io/dockerconfigjson data: From cd14e70d02da6aca36b16d6b580ab1588832f6b9 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 01:13:42 -0300 Subject: [PATCH 097/270] health: run wger sync with python3 --- services/health/wger-admin-ensure-cronjob.yaml | 2 +- services/health/wger-user-sync-cronjob.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/health/wger-admin-ensure-cronjob.yaml b/services/health/wger-admin-ensure-cronjob.yaml index aba0fc4..fc18283 100644 --- a/services/health/wger-admin-ensure-cronjob.yaml +++ b/services/health/wger-admin-ensure-cronjob.yaml @@ -90,7 +90,7 @@ spec: - | set -eu . /vault/secrets/wger-env - exec python /scripts/wger_user_sync.py + exec python3 /scripts/wger_user_sync.py env: - name: SITE_URL value: https://health.bstein.dev diff --git a/services/health/wger-user-sync-cronjob.yaml b/services/health/wger-user-sync-cronjob.yaml index 8c846e2..1645256 100644 --- a/services/health/wger-user-sync-cronjob.yaml +++ b/services/health/wger-user-sync-cronjob.yaml @@ -77,7 +77,7 @@ spec: - | set -eu . /vault/secrets/wger-env - exec python /scripts/wger_user_sync.py + exec python3 /scripts/wger_user_sync.py env: - name: SITE_URL value: https://health.bstein.dev From 85c3d9c2f7889a95fb660de0a1ff8e0e5b0cca3e Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 01:52:24 -0300 Subject: [PATCH 098/270] vault: finalize sidecar migration --- .../applications/kustomization.yaml | 1 + .../wallet-monero-temp/kustomization.yaml | 19 +++++ .../crypto/wallet-monero-temp/deployment.yaml | 82 +++++++++++++++++++ .../wallet-monero-temp/kustomization.yaml | 9 ++ services/crypto/wallet-monero-temp/pvc.yaml | 13 +++ .../secrets-ensure-job.yaml | 37 +++++++++ .../crypto/wallet-monero-temp/service.yaml | 16 ++++ .../wallet-monero-temp/serviceaccount.yaml | 6 ++ .../crypto/xmr-miner/xmrig-daemonset.yaml | 6 -- services/logging/oauth2-proxy.yaml | 2 + services/monitoring/secretproviderclass.yaml | 26 ------ services/oauth2-proxy/deployment.yaml | 2 + services/vault/k8s-auth-config-cronjob.yaml | 7 +- services/vault/oidc-config-cronjob.yaml | 7 +- .../vault/scripts/vault_k8s_auth_configure.sh | 18 ++++ .../vault/scripts/vault_oidc_configure.sh | 15 ++++ 16 files changed, 224 insertions(+), 42 deletions(-) create mode 100644 clusters/atlas/flux-system/applications/wallet-monero-temp/kustomization.yaml create mode 100644 services/crypto/wallet-monero-temp/deployment.yaml create mode 100644 services/crypto/wallet-monero-temp/kustomization.yaml create mode 100644 services/crypto/wallet-monero-temp/pvc.yaml create mode 100644 services/crypto/wallet-monero-temp/secrets-ensure-job.yaml create mode 100644 services/crypto/wallet-monero-temp/service.yaml create mode 100644 services/crypto/wallet-monero-temp/serviceaccount.yaml diff --git a/clusters/atlas/flux-system/applications/kustomization.yaml b/clusters/atlas/flux-system/applications/kustomization.yaml index cc32c85..c73906e 100644 --- a/clusters/atlas/flux-system/applications/kustomization.yaml +++ b/clusters/atlas/flux-system/applications/kustomization.yaml @@ -16,6 +16,7 @@ resources: - harbor/image-automation.yaml - jellyfin/kustomization.yaml - xmr-miner/kustomization.yaml + - wallet-monero-temp/kustomization.yaml - sui-metrics/kustomization.yaml - openldap/kustomization.yaml - keycloak/kustomization.yaml diff --git a/clusters/atlas/flux-system/applications/wallet-monero-temp/kustomization.yaml b/clusters/atlas/flux-system/applications/wallet-monero-temp/kustomization.yaml new file mode 100644 index 0000000..700e17f --- /dev/null +++ b/clusters/atlas/flux-system/applications/wallet-monero-temp/kustomization.yaml @@ -0,0 +1,19 @@ +# clusters/atlas/flux-system/applications/wallet-monero-temp/kustomization.yaml +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: wallet-monero-temp + namespace: flux-system +spec: + interval: 10m + path: ./services/crypto/wallet-monero-temp + targetNamespace: crypto + prune: true + sourceRef: + kind: GitRepository + name: flux-system + namespace: flux-system + dependsOn: + - name: crypto + - name: xmr-miner + wait: true diff --git a/services/crypto/wallet-monero-temp/deployment.yaml b/services/crypto/wallet-monero-temp/deployment.yaml new file mode 100644 index 0000000..6ac5b62 --- /dev/null +++ b/services/crypto/wallet-monero-temp/deployment.yaml @@ -0,0 +1,82 @@ +# services/crypto/wallet-monero-temp/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: wallet-monero-temp + namespace: crypto + labels: + app: wallet-monero-temp +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: wallet-monero-temp + template: + metadata: + labels: + app: wallet-monero-temp + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "crypto" + vault.hashicorp.com/agent-inject-secret-wallet-rpc-env.sh: "kv/data/atlas/crypto/wallet-monero-temp-rpc-auth" + vault.hashicorp.com/agent-inject-template-wallet-rpc-env.sh: | + {{- with secret "kv/data/atlas/crypto/wallet-monero-temp-rpc-auth" -}} + export RPC_USER="{{ .Data.data.username }}" + export RPC_PASS="{{ .Data.data.password }}" + {{- end -}} + spec: + serviceAccountName: crypto-vault-sync + automountServiceAccountToken: true + nodeSelector: + node-role.kubernetes.io/worker: "true" + imagePullSecrets: + - name: harbor-regcred + securityContext: + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + initContainers: + - name: volume-permissions + image: busybox:1.36 + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-lc"] + args: + - chown :1000 /data && chmod 0770 /data + securityContext: + runAsUser: 0 + volumeMounts: + - name: data + mountPath: /data + containers: + - name: wallet-rpc + image: registry.bstein.dev/infra/monero-wallet-rpc:0.18.4.1 + imagePullPolicy: Always + command: ["/bin/sh", "-lc"] + args: + - | + set -eu + . /vault/secrets/wallet-rpc-env.sh + exec /usr/local/bin/monero-wallet-rpc \ + --wallet-dir /data \ + --daemon-address xmr-node.cakewallet.com:18081 \ + --rpc-bind-ip 0.0.0.0 --rpc-bind-port 18083 \ + --rpc-login "${RPC_USER}:${RPC_PASS}" \ + --confirm-external-bind + ports: + - containerPort: 18083 + name: rpc + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: "1" + memory: 512Mi + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + persistentVolumeClaim: + claimName: wallet-monero-temp diff --git a/services/crypto/wallet-monero-temp/kustomization.yaml b/services/crypto/wallet-monero-temp/kustomization.yaml new file mode 100644 index 0000000..005be27 --- /dev/null +++ b/services/crypto/wallet-monero-temp/kustomization.yaml @@ -0,0 +1,9 @@ +# services/crypto/wallet-monero-temp/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - pvc.yaml + - serviceaccount.yaml + - secrets-ensure-job.yaml + - deployment.yaml + - service.yaml diff --git a/services/crypto/wallet-monero-temp/pvc.yaml b/services/crypto/wallet-monero-temp/pvc.yaml new file mode 100644 index 0000000..cf0c757 --- /dev/null +++ b/services/crypto/wallet-monero-temp/pvc.yaml @@ -0,0 +1,13 @@ +# services/crypto/wallet-monero-temp/pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: wallet-monero-temp + namespace: crypto +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + storageClassName: astreae diff --git a/services/crypto/wallet-monero-temp/secrets-ensure-job.yaml b/services/crypto/wallet-monero-temp/secrets-ensure-job.yaml new file mode 100644 index 0000000..7d0f25d --- /dev/null +++ b/services/crypto/wallet-monero-temp/secrets-ensure-job.yaml @@ -0,0 +1,37 @@ +# services/crypto/wallet-monero-temp/secrets-ensure-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: wallet-monero-temp-secrets-ensure + namespace: crypto +spec: + backoffLimit: 1 + template: + spec: + serviceAccountName: crypto-secrets-ensure + restartPolicy: OnFailure + containers: + - name: vault-write + image: hashicorp/vault:1.17.6 + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "-c"] + args: + - | + set -euo pipefail + export VAULT_ADDR=http://vault.vault.svc.cluster.local:8200 + VAULT_TOKEN="$(vault write -field=token auth/kubernetes/login role=crypto-secrets jwt=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token))" + export VAULT_TOKEN + vault kv put kv/atlas/crypto/wallet-monero-temp-rpc-auth \ + username="${RPC_USER}" \ + password="${RPC_PASS}" + env: + - name: RPC_USER + valueFrom: + secretKeyRef: + name: wallet-monero-temp-rpc-auth + key: username + - name: RPC_PASS + valueFrom: + secretKeyRef: + name: wallet-monero-temp-rpc-auth + key: password diff --git a/services/crypto/wallet-monero-temp/service.yaml b/services/crypto/wallet-monero-temp/service.yaml new file mode 100644 index 0000000..4bf3566 --- /dev/null +++ b/services/crypto/wallet-monero-temp/service.yaml @@ -0,0 +1,16 @@ +# services/crypto/wallet-monero-temp/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: wallet-monero-temp + namespace: crypto + labels: + app: wallet-monero-temp +spec: + type: ClusterIP + selector: + app: wallet-monero-temp + ports: + - name: rpc + port: 18083 + targetPort: 18083 diff --git a/services/crypto/wallet-monero-temp/serviceaccount.yaml b/services/crypto/wallet-monero-temp/serviceaccount.yaml new file mode 100644 index 0000000..f9ff1fc --- /dev/null +++ b/services/crypto/wallet-monero-temp/serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/crypto/wallet-monero-temp/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: crypto-secrets-ensure + namespace: crypto diff --git a/services/crypto/xmr-miner/xmrig-daemonset.yaml b/services/crypto/xmr-miner/xmrig-daemonset.yaml index 089dcc4..a1ee2ae 100644 --- a/services/crypto/xmr-miner/xmrig-daemonset.yaml +++ b/services/crypto/xmr-miner/xmrig-daemonset.yaml @@ -24,10 +24,6 @@ spec: - key: hardware operator: In values: ["rpi4","rpi5"] - volumes: - - name: payout - secret: - secretName: monero-payout containers: - name: xmrig image: ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9 @@ -51,5 +47,3 @@ spec: --donate-level N \ --cpu-priority 1 \ --threads "${THR}" ${EXTRA} - volumeMounts: - - { name: payout, mountPath: /run/xmr, readOnly: true } diff --git a/services/logging/oauth2-proxy.yaml b/services/logging/oauth2-proxy.yaml index d7891da..104351a 100644 --- a/services/logging/oauth2-proxy.yaml +++ b/services/logging/oauth2-proxy.yaml @@ -62,7 +62,9 @@ spec: - name: oauth2-proxy image: registry.bstein.dev/tools/oauth2-proxy-vault:v7.6.0 imagePullPolicy: IfNotPresent + command: ["/entrypoint.sh"] args: + - /bin/oauth2-proxy - --provider=oidc - --redirect-url=https://logs.bstein.dev/oauth2/callback - --oidc-issuer-url=https://sso.bstein.dev/realms/atlas diff --git a/services/monitoring/secretproviderclass.yaml b/services/monitoring/secretproviderclass.yaml index 3fab887..8a6c5fb 100644 --- a/services/monitoring/secretproviderclass.yaml +++ b/services/monitoring/secretproviderclass.yaml @@ -10,36 +10,10 @@ spec: vaultAddress: "http://vault.vault.svc.cluster.local:8200" roleName: "monitoring" objects: | - - objectName: "grafana-admin__admin-user" - secretPath: "kv/data/atlas/monitoring/grafana-admin" - secretKey: "admin-user" - - objectName: "grafana-admin__admin-password" - secretPath: "kv/data/atlas/monitoring/grafana-admin" - secretKey: "admin-password" - - objectName: "postmark-relay__relay-username" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-username" - - objectName: "postmark-relay__relay-password" - secretPath: "kv/data/atlas/shared/postmark-relay" - secretKey: "relay-password" - objectName: "harbor-pull__dockerconfigjson" secretPath: "kv/data/atlas/harbor-pull/monitoring" secretKey: "dockerconfigjson" secretObjects: - - secretName: grafana-admin - type: Opaque - data: - - objectName: grafana-admin__admin-user - key: admin-user - - objectName: grafana-admin__admin-password - key: admin-password - - secretName: grafana-smtp - type: Opaque - data: - - objectName: postmark-relay__relay-username - key: username - - objectName: postmark-relay__relay-password - key: password - secretName: harbor-regcred type: kubernetes.io/dockerconfigjson data: diff --git a/services/oauth2-proxy/deployment.yaml b/services/oauth2-proxy/deployment.yaml index 64cdd0e..4af5ab1 100644 --- a/services/oauth2-proxy/deployment.yaml +++ b/services/oauth2-proxy/deployment.yaml @@ -44,7 +44,9 @@ spec: - name: oauth2-proxy image: registry.bstein.dev/tools/oauth2-proxy-vault:v7.6.0 imagePullPolicy: IfNotPresent + command: ["/entrypoint.sh"] args: + - /bin/oauth2-proxy - --provider=oidc - --redirect-url=https://auth.bstein.dev/oauth2/callback - --oidc-issuer-url=https://sso.bstein.dev/realms/atlas diff --git a/services/vault/k8s-auth-config-cronjob.yaml b/services/vault/k8s-auth-config-cronjob.yaml index 3b74932..e9ee3e9 100644 --- a/services/vault/k8s-auth-config-cronjob.yaml +++ b/services/vault/k8s-auth-config-cronjob.yaml @@ -29,11 +29,8 @@ spec: env: - name: VAULT_ADDR value: http://vault.vault.svc.cluster.local:8200 - - name: VAULT_TOKEN - valueFrom: - secretKeyRef: - name: vault-oidc-admin-token - key: token + - name: VAULT_K8S_ROLE + value: vault - name: VAULT_K8S_ROLE_TTL value: 1h volumeMounts: diff --git a/services/vault/oidc-config-cronjob.yaml b/services/vault/oidc-config-cronjob.yaml index efe5fee..b143d99 100644 --- a/services/vault/oidc-config-cronjob.yaml +++ b/services/vault/oidc-config-cronjob.yaml @@ -57,11 +57,8 @@ spec: env: - name: VAULT_ADDR value: http://vault.vault.svc.cluster.local:8200 - - name: VAULT_TOKEN - valueFrom: - secretKeyRef: - name: vault-oidc-admin-token - key: token + - name: VAULT_K8S_ROLE + value: vault - name: VAULT_ENV_FILE value: /vault/secrets/vault-oidc-env.sh volumeMounts: diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index f0d7833..ed67c9b 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -3,6 +3,19 @@ set -eu log() { echo "[vault-k8s-auth] $*"; } +ensure_token() { + if [ -n "${VAULT_TOKEN:-}" ]; then + return + fi + role="${VAULT_K8S_ROLE:-vault}" + jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + if ! VAULT_TOKEN="$(vault write -field=token auth/kubernetes/login role="${role}" jwt="${jwt}")"; then + log "kubernetes auth login failed; set VAULT_TOKEN or fix role ${role}" + exit 1 + fi + export VAULT_TOKEN +} + status_json="$(vault status -format=json || true)" if [ -z "${status_json}" ]; then log "vault status failed; check VAULT_ADDR and VAULT_TOKEN" @@ -19,6 +32,8 @@ if printf '%s' "${status_json}" | grep -q '"sealed":[[:space:]]*true'; then exit 0 fi +ensure_token + k8s_host="https://${KUBERNETES_SERVICE_HOST}:443" k8s_ca="$(cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt)" k8s_token="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" @@ -115,6 +130,9 @@ write_policy_and_role "vault" "vault" "vault" \ write_policy_and_role "sso-secrets" "sso" "mas-secrets-ensure" \ "shared/keycloak-admin" \ "harbor/harbor-oidc vault/vault-oidc-config comms/synapse-oidc logging/oauth2-proxy-logs-oidc" +write_policy_and_role "crypto-secrets" "crypto" "crypto-secrets-ensure" \ + "" \ + "crypto/wallet-monero-temp-rpc-auth" write_policy_and_role "comms-secrets" "comms" \ "comms-secrets-ensure,mas-db-ensure,mas-admin-client-secret-writer,othrys-synapse-signingkey-job" \ "" \ diff --git a/services/vault/scripts/vault_oidc_configure.sh b/services/vault/scripts/vault_oidc_configure.sh index 99f5fd6..4ee91b8 100644 --- a/services/vault/scripts/vault_oidc_configure.sh +++ b/services/vault/scripts/vault_oidc_configure.sh @@ -3,6 +3,19 @@ set -eu log() { echo "[vault-oidc] $*"; } +ensure_token() { + if [ -n "${VAULT_TOKEN:-}" ]; then + return + fi + role="${VAULT_K8S_ROLE:-vault}" + jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + if ! VAULT_TOKEN="$(vault write -field=token auth/kubernetes/login role="${role}" jwt="${jwt}")"; then + log "kubernetes auth login failed; set VAULT_TOKEN or fix role ${role}" + exit 1 + fi + export VAULT_TOKEN +} + status_json="$(vault status -format=json || true)" if [ -z "${status_json}" ]; then log "vault status failed; check VAULT_ADDR and VAULT_TOKEN" @@ -19,6 +32,8 @@ if printf '%s' "${status_json}" | grep -q '"sealed":[[:space:]]*true'; then exit 0 fi +ensure_token + : "${VAULT_OIDC_DISCOVERY_URL:?set VAULT_OIDC_DISCOVERY_URL}" : "${VAULT_OIDC_CLIENT_ID:?set VAULT_OIDC_CLIENT_ID}" : "${VAULT_OIDC_CLIENT_SECRET:?set VAULT_OIDC_CLIENT_SECRET}" From 86c9951cc46d8f66bb6be8dd268cc9449f715f93 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 02:06:28 -0300 Subject: [PATCH 099/270] vault: add admin role for config jobs --- services/vault/k8s-auth-config-cronjob.yaml | 4 +- services/vault/kustomization.yaml | 1 + services/vault/oidc-config-cronjob.yaml | 6 +-- .../vault/scripts/vault_k8s_auth_configure.sh | 42 +++++++++++++++++++ services/vault/serviceaccount-admin.yaml | 6 +++ 5 files changed, 54 insertions(+), 5 deletions(-) create mode 100644 services/vault/serviceaccount-admin.yaml diff --git a/services/vault/k8s-auth-config-cronjob.yaml b/services/vault/k8s-auth-config-cronjob.yaml index e9ee3e9..e71570f 100644 --- a/services/vault/k8s-auth-config-cronjob.yaml +++ b/services/vault/k8s-auth-config-cronjob.yaml @@ -14,7 +14,7 @@ spec: backoffLimit: 1 template: spec: - serviceAccountName: vault + serviceAccountName: vault-admin restartPolicy: Never nodeSelector: kubernetes.io/arch: arm64 @@ -30,7 +30,7 @@ spec: - name: VAULT_ADDR value: http://vault.vault.svc.cluster.local:8200 - name: VAULT_K8S_ROLE - value: vault + value: vault-admin - name: VAULT_K8S_ROLE_TTL value: 1h volumeMounts: diff --git a/services/vault/kustomization.yaml b/services/vault/kustomization.yaml index 6381404..e9f15c1 100644 --- a/services/vault/kustomization.yaml +++ b/services/vault/kustomization.yaml @@ -5,6 +5,7 @@ namespace: vault resources: - namespace.yaml - serviceaccount.yaml + - serviceaccount-admin.yaml - rbac.yaml - configmap.yaml - statefulset.yaml diff --git a/services/vault/oidc-config-cronjob.yaml b/services/vault/oidc-config-cronjob.yaml index b143d99..4f879d0 100644 --- a/services/vault/oidc-config-cronjob.yaml +++ b/services/vault/oidc-config-cronjob.yaml @@ -16,7 +16,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" - vault.hashicorp.com/role: "vault" + vault.hashicorp.com/role: "vault-admin" vault.hashicorp.com/agent-inject-secret-vault-oidc-env.sh: "kv/data/atlas/vault/vault-oidc-config" vault.hashicorp.com/agent-inject-template-vault-oidc-env.sh: | {{ with secret "kv/data/atlas/vault/vault-oidc-config" }} @@ -40,7 +40,7 @@ spec: export VAULT_OIDC_BOUND_CLAIMS_TYPE="{{ .Data.data.bound_claims_type }}" {{ end }} spec: - serviceAccountName: vault + serviceAccountName: vault-admin restartPolicy: Never nodeSelector: kubernetes.io/arch: arm64 @@ -58,7 +58,7 @@ spec: - name: VAULT_ADDR value: http://vault.vault.svc.cluster.local:8200 - name: VAULT_K8S_ROLE - value: vault + value: vault-admin - name: VAULT_ENV_FILE value: /vault/secrets/vault-oidc-env.sh volumeMounts: diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index ed67c9b..2bc9166 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -50,6 +50,13 @@ vault write auth/kubernetes/config \ kubernetes_host="${k8s_host}" \ kubernetes_ca_cert="${k8s_ca}" +write_raw_policy() { + name="$1" + body="$2" + log "writing policy ${name}" + printf '%s\n' "${body}" | vault policy write "${name}" - +} + write_policy_and_role() { role="$1" namespace="$2" @@ -90,6 +97,41 @@ path \"kv/metadata/atlas/${path}\" { ttl="${role_ttl}" } +vault_admin_policy=' +path "sys/auth" { + capabilities = ["read"] +} +path "sys/auth/*" { + capabilities = ["create", "update", "delete", "sudo", "read"] +} +path "auth/kubernetes/*" { + capabilities = ["create", "update", "read"] +} +path "auth/oidc/*" { + capabilities = ["create", "update", "read"] +} +path "sys/policies/acl" { + capabilities = ["list"] +} +path "sys/policies/acl/*" { + capabilities = ["create", "update", "read"] +} +path "kv/data/atlas/vault/*" { + capabilities = ["read"] +} +path "kv/metadata/atlas/vault/*" { + capabilities = ["list"] +} +' + +write_raw_policy "vault-admin" "${vault_admin_policy}" +log "writing role vault-admin" +vault write "auth/kubernetes/role/vault-admin" \ + bound_service_account_names="vault-admin" \ + bound_service_account_namespaces="vault" \ + policies="vault-admin" \ + ttl="${role_ttl}" + write_policy_and_role "outline" "outline" "outline-vault" \ "outline/* shared/postmark-relay" "" write_policy_and_role "planka" "planka" "planka-vault" \ diff --git a/services/vault/serviceaccount-admin.yaml b/services/vault/serviceaccount-admin.yaml new file mode 100644 index 0000000..a9072bb --- /dev/null +++ b/services/vault/serviceaccount-admin.yaml @@ -0,0 +1,6 @@ +# services/vault/serviceaccount-admin.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vault-admin + namespace: vault From e885c7d6ce926ffd8abfd9ff9485a119a48aecf3 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 02:09:34 -0300 Subject: [PATCH 100/270] vault: allow vault-admin token review --- services/vault/rbac.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/services/vault/rbac.yaml b/services/vault/rbac.yaml index d1caa18..01dc405 100644 --- a/services/vault/rbac.yaml +++ b/services/vault/rbac.yaml @@ -11,3 +11,6 @@ subjects: - kind: ServiceAccount name: vault namespace: vault + - kind: ServiceAccount + name: vault-admin + namespace: vault From 84ccf35c44899b59a39eeb51d561eed0c402c1ec Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 02:12:52 -0300 Subject: [PATCH 101/270] flux: auto-update portal images on feature branch --- .../applications/bstein-dev-home/image-automation.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clusters/atlas/flux-system/applications/bstein-dev-home/image-automation.yaml b/clusters/atlas/flux-system/applications/bstein-dev-home/image-automation.yaml index e198db4..88dda40 100644 --- a/clusters/atlas/flux-system/applications/bstein-dev-home/image-automation.yaml +++ b/clusters/atlas/flux-system/applications/bstein-dev-home/image-automation.yaml @@ -13,14 +13,14 @@ spec: git: checkout: ref: - branch: main + branch: feature/vault-consumption commit: author: email: ops@bstein.dev name: flux-bot messageTemplate: "chore(bstein-dev-home): update images to {{range .Updated.Images}}{{.}}{{end}}" push: - branch: main + branch: feature/vault-consumption update: strategy: Setters path: services/bstein-dev-home From 74a2b3e28dc5e823e7673beb71dd3900eabf1e79 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 02:14:08 -0300 Subject: [PATCH 102/270] vault: use static token reviewer --- services/vault/k8s-auth-config-cronjob.yaml | 8 ++++++++ services/vault/kustomization.yaml | 1 + services/vault/scripts/vault_k8s_auth_configure.sh | 10 +++++++++- services/vault/token-reviewer-secret.yaml | 9 +++++++++ 4 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 services/vault/token-reviewer-secret.yaml diff --git a/services/vault/k8s-auth-config-cronjob.yaml b/services/vault/k8s-auth-config-cronjob.yaml index e71570f..a49fe7d 100644 --- a/services/vault/k8s-auth-config-cronjob.yaml +++ b/services/vault/k8s-auth-config-cronjob.yaml @@ -31,14 +31,22 @@ spec: value: http://vault.vault.svc.cluster.local:8200 - name: VAULT_K8S_ROLE value: vault-admin + - name: VAULT_K8S_TOKEN_REVIEWER_JWT_FILE + value: /var/run/secrets/vault-token-reviewer/token - name: VAULT_K8S_ROLE_TTL value: 1h volumeMounts: - name: k8s-auth-config-script mountPath: /scripts readOnly: true + - name: token-reviewer + mountPath: /var/run/secrets/vault-token-reviewer + readOnly: true volumes: - name: k8s-auth-config-script configMap: name: vault-k8s-auth-config-script defaultMode: 0555 + - name: token-reviewer + secret: + secretName: vault-admin-token-reviewer diff --git a/services/vault/kustomization.yaml b/services/vault/kustomization.yaml index e9f15c1..060077b 100644 --- a/services/vault/kustomization.yaml +++ b/services/vault/kustomization.yaml @@ -6,6 +6,7 @@ resources: - namespace.yaml - serviceaccount.yaml - serviceaccount-admin.yaml + - token-reviewer-secret.yaml - rbac.yaml - configmap.yaml - statefulset.yaml diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 2bc9166..ce9533c 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -38,6 +38,14 @@ k8s_host="https://${KUBERNETES_SERVICE_HOST}:443" k8s_ca="$(cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt)" k8s_token="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" role_ttl="${VAULT_K8S_ROLE_TTL:-1h}" +token_reviewer_jwt="${VAULT_K8S_TOKEN_REVIEWER_JWT:-}" + +if [ -z "${token_reviewer_jwt}" ] && [ -n "${VAULT_K8S_TOKEN_REVIEWER_JWT_FILE:-}" ] && [ -r "${VAULT_K8S_TOKEN_REVIEWER_JWT_FILE}" ]; then + token_reviewer_jwt="$(cat "${VAULT_K8S_TOKEN_REVIEWER_JWT_FILE}")" +fi +if [ -z "${token_reviewer_jwt}" ]; then + token_reviewer_jwt="${k8s_token}" +fi if ! vault auth list -format=json | grep -q '"kubernetes/"'; then log "enabling kubernetes auth" @@ -46,7 +54,7 @@ fi log "configuring kubernetes auth" vault write auth/kubernetes/config \ - token_reviewer_jwt="${k8s_token}" \ + token_reviewer_jwt="${token_reviewer_jwt}" \ kubernetes_host="${k8s_host}" \ kubernetes_ca_cert="${k8s_ca}" diff --git a/services/vault/token-reviewer-secret.yaml b/services/vault/token-reviewer-secret.yaml new file mode 100644 index 0000000..db6bd34 --- /dev/null +++ b/services/vault/token-reviewer-secret.yaml @@ -0,0 +1,9 @@ +# services/vault/token-reviewer-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: vault-admin-token-reviewer + namespace: vault + annotations: + kubernetes.io/service-account.name: vault-admin +type: kubernetes.io/service-account-token From 756a1af2e688029b3c4afd36fd0390538373620f Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 02:16:55 -0300 Subject: [PATCH 103/270] vault: allow oidc tuning --- services/vault/scripts/vault_k8s_auth_configure.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index ce9533c..d47ebb5 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -124,6 +124,9 @@ path "sys/policies/acl" { path "sys/policies/acl/*" { capabilities = ["create", "update", "read"] } +path "sys/mounts/auth/*" { + capabilities = ["read", "update", "sudo"] +} path "kv/data/atlas/vault/*" { capabilities = ["read"] } From d69545cdb527411c38fa62209ab7c2f6aff965e5 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 02:18:50 -0300 Subject: [PATCH 104/270] vault: harden oidc claims type --- services/vault/scripts/vault_oidc_configure.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/services/vault/scripts/vault_oidc_configure.sh b/services/vault/scripts/vault_oidc_configure.sh index 4ee91b8..d417af2 100644 --- a/services/vault/scripts/vault_oidc_configure.sh +++ b/services/vault/scripts/vault_oidc_configure.sh @@ -45,6 +45,10 @@ groups_claim="${VAULT_OIDC_GROUPS_CLAIM:-groups}" redirect_uris="${VAULT_OIDC_REDIRECT_URIS:-https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback}" bound_audiences="${VAULT_OIDC_BOUND_AUDIENCES:-${VAULT_OIDC_CLIENT_ID}}" bound_claims_type="${VAULT_OIDC_BOUND_CLAIMS_TYPE:-string}" +bound_claims_type="$(printf '%s' "${bound_claims_type}" | tr -d '[:space:]')" +if [ -z "${bound_claims_type}" ]; then + bound_claims_type="string" +fi admin_group="${VAULT_OIDC_ADMIN_GROUP:-admin}" admin_policies="${VAULT_OIDC_ADMIN_POLICIES:-default,vault-admin}" From 9e6673d02ecf38b2981b740ea8addbdb16814371 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 02:20:53 -0300 Subject: [PATCH 105/270] vault: default oidc claims type --- services/vault/scripts/vault_oidc_configure.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/vault/scripts/vault_oidc_configure.sh b/services/vault/scripts/vault_oidc_configure.sh index d417af2..af74f60 100644 --- a/services/vault/scripts/vault_oidc_configure.sh +++ b/services/vault/scripts/vault_oidc_configure.sh @@ -46,7 +46,7 @@ redirect_uris="${VAULT_OIDC_REDIRECT_URIS:-https://secret.bstein.dev/ui/vault/au bound_audiences="${VAULT_OIDC_BOUND_AUDIENCES:-${VAULT_OIDC_CLIENT_ID}}" bound_claims_type="${VAULT_OIDC_BOUND_CLAIMS_TYPE:-string}" bound_claims_type="$(printf '%s' "${bound_claims_type}" | tr -d '[:space:]')" -if [ -z "${bound_claims_type}" ]; then +if [ -z "${bound_claims_type}" ] || [ "${bound_claims_type}" = "" ]; then bound_claims_type="string" fi From feb9d6997c71e9f90426561e50e5417e4f5270d2 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 02:22:52 -0300 Subject: [PATCH 106/270] vault: prepopulate oidc job --- services/vault/oidc-config-cronjob.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/services/vault/oidc-config-cronjob.yaml b/services/vault/oidc-config-cronjob.yaml index 4f879d0..6d98ecb 100644 --- a/services/vault/oidc-config-cronjob.yaml +++ b/services/vault/oidc-config-cronjob.yaml @@ -16,6 +16,7 @@ spec: metadata: annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "vault-admin" vault.hashicorp.com/agent-inject-secret-vault-oidc-env.sh: "kv/data/atlas/vault/vault-oidc-config" vault.hashicorp.com/agent-inject-template-vault-oidc-env.sh: | From 2ecd274f280e16a924a0d69433a29d8f0be379e5 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 02:26:54 -0300 Subject: [PATCH 107/270] crypto: fix wallet rpc image --- services/crypto/wallet-monero-temp/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/crypto/wallet-monero-temp/deployment.yaml b/services/crypto/wallet-monero-temp/deployment.yaml index 6ac5b62..4b73f64 100644 --- a/services/crypto/wallet-monero-temp/deployment.yaml +++ b/services/crypto/wallet-monero-temp/deployment.yaml @@ -50,7 +50,7 @@ spec: mountPath: /data containers: - name: wallet-rpc - image: registry.bstein.dev/infra/monero-wallet-rpc:0.18.4.1 + image: registry.bstein.dev/crypto/monero-wallet-rpc:0.18.4.1 imagePullPolicy: Always command: ["/bin/sh", "-lc"] args: From f9fa6dcbb45007e446be3228afec291f9f83cf8d Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 02:31:31 -0300 Subject: [PATCH 108/270] crypto: drop wallet rpc bootstrap job --- .../wallet-monero-temp/kustomization.yaml | 2 - .../secrets-ensure-job.yaml | 37 ------------------- .../wallet-monero-temp/serviceaccount.yaml | 6 --- 3 files changed, 45 deletions(-) delete mode 100644 services/crypto/wallet-monero-temp/secrets-ensure-job.yaml delete mode 100644 services/crypto/wallet-monero-temp/serviceaccount.yaml diff --git a/services/crypto/wallet-monero-temp/kustomization.yaml b/services/crypto/wallet-monero-temp/kustomization.yaml index 005be27..6236858 100644 --- a/services/crypto/wallet-monero-temp/kustomization.yaml +++ b/services/crypto/wallet-monero-temp/kustomization.yaml @@ -3,7 +3,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - pvc.yaml - - serviceaccount.yaml - - secrets-ensure-job.yaml - deployment.yaml - service.yaml diff --git a/services/crypto/wallet-monero-temp/secrets-ensure-job.yaml b/services/crypto/wallet-monero-temp/secrets-ensure-job.yaml deleted file mode 100644 index 7d0f25d..0000000 --- a/services/crypto/wallet-monero-temp/secrets-ensure-job.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# services/crypto/wallet-monero-temp/secrets-ensure-job.yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: wallet-monero-temp-secrets-ensure - namespace: crypto -spec: - backoffLimit: 1 - template: - spec: - serviceAccountName: crypto-secrets-ensure - restartPolicy: OnFailure - containers: - - name: vault-write - image: hashicorp/vault:1.17.6 - imagePullPolicy: IfNotPresent - command: ["/bin/sh", "-c"] - args: - - | - set -euo pipefail - export VAULT_ADDR=http://vault.vault.svc.cluster.local:8200 - VAULT_TOKEN="$(vault write -field=token auth/kubernetes/login role=crypto-secrets jwt=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token))" - export VAULT_TOKEN - vault kv put kv/atlas/crypto/wallet-monero-temp-rpc-auth \ - username="${RPC_USER}" \ - password="${RPC_PASS}" - env: - - name: RPC_USER - valueFrom: - secretKeyRef: - name: wallet-monero-temp-rpc-auth - key: username - - name: RPC_PASS - valueFrom: - secretKeyRef: - name: wallet-monero-temp-rpc-auth - key: password diff --git a/services/crypto/wallet-monero-temp/serviceaccount.yaml b/services/crypto/wallet-monero-temp/serviceaccount.yaml deleted file mode 100644 index f9ff1fc..0000000 --- a/services/crypto/wallet-monero-temp/serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# services/crypto/wallet-monero-temp/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: crypto-secrets-ensure - namespace: crypto From 53da4c20ab75ec20472330864f6b43ef9403727b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 02:37:04 -0300 Subject: [PATCH 109/270] keycloak: stop writing oauth2-proxy secret --- services/keycloak/logs-oidc-secret-ensure-job.yaml | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/services/keycloak/logs-oidc-secret-ensure-job.yaml b/services/keycloak/logs-oidc-secret-ensure-job.yaml index f3fcaa3..43177ff 100644 --- a/services/keycloak/logs-oidc-secret-ensure-job.yaml +++ b/services/keycloak/logs-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: logs-oidc-secret-ensure-7 + name: logs-oidc-secret-ensure-8 namespace: sso spec: backoffLimit: 0 @@ -31,7 +31,7 @@ spec: - | set -euo pipefail . /vault/secrets/keycloak-admin-env.sh - apk add --no-cache curl jq kubectl openssl >/dev/null + apk add --no-cache curl jq openssl >/dev/null KC_URL="http://keycloak.sso.svc.cluster.local" ACCESS_TOKEN="" @@ -116,10 +116,5 @@ spec: '{data:{client_id:$client_id,client_secret:$client_secret,cookie_secret:$cookie_secret}}')" curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ -d "${payload}" "${vault_addr}/v1/kv/data/atlas/logging/oauth2-proxy-logs-oidc" >/dev/null - kubectl -n logging create secret generic oauth2-proxy-logs-oidc \ - --from-literal=client_id="logs" \ - --from-literal=client_secret="${CLIENT_SECRET}" \ - --from-literal=cookie_secret="${COOKIE_SECRET}" \ - --dry-run=client -o yaml | kubectl -n logging apply -f - >/dev/null volumeMounts: - volumes: \ No newline at end of file + volumes: From fb992f0cff3b832329d716a834b5b04e4236ae5e Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 02:59:21 -0300 Subject: [PATCH 110/270] logging: move data-prepper pipeline to configmap --- .../logging/data-prepper-helmrelease.yaml | 53 +++++++------------ .../data-prepper-pipeline-configmap.yaml | 39 ++++++++++++++ services/logging/kustomization.yaml | 1 + 3 files changed, 60 insertions(+), 33 deletions(-) create mode 100644 services/logging/data-prepper-pipeline-configmap.yaml diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index 73984f5..fad02f7 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -27,39 +27,9 @@ spec: data-prepper-config.yaml: | ssl: false pipelineConfig: - enabled: true - config: - entry-pipeline: - delay: "100" - source: - otel_trace_source: - ssl: false - sink: - - pipeline: - name: "raw-pipeline" - - pipeline: - name: "service-map-pipeline" - raw-pipeline: - source: - pipeline: - name: "entry-pipeline" - processor: - - otel_traces: - sink: - - opensearch: - hosts: ["http://opensearch-master.logging.svc.cluster.local:9200"] - index_type: trace-analytics-raw - service-map-pipeline: - delay: "100" - source: - pipeline: - name: "entry-pipeline" - processor: - - service_map: - sink: - - opensearch: - hosts: ["http://opensearch-master.logging.svc.cluster.local:9200"] - index_type: trace-analytics-service-map + demoPipeline: false + enabled: false + existingSecret: data-prepper-pipeline resources: requests: cpu: "200m" @@ -78,3 +48,20 @@ spec: operator: In values: - rpi5 + postRenderers: + - kustomize: + patches: + - target: + kind: Deployment + name: data-prepper + namespace: logging + patch: |- + - op: replace + path: /spec/template/spec/volumes + value: + - name: data-prepper-config + configMap: + name: data-prepper-config + - name: data-prepper-pipelines + configMap: + name: data-prepper-pipeline diff --git a/services/logging/data-prepper-pipeline-configmap.yaml b/services/logging/data-prepper-pipeline-configmap.yaml new file mode 100644 index 0000000..13ca326 --- /dev/null +++ b/services/logging/data-prepper-pipeline-configmap.yaml @@ -0,0 +1,39 @@ +# services/logging/data-prepper-pipeline-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: data-prepper-pipeline + namespace: logging +data: + pipelines.yaml: | + entry-pipeline: + delay: "100" + source: + otel_trace_source: + ssl: false + sink: + - pipeline: + name: "raw-pipeline" + - pipeline: + name: "service-map-pipeline" + raw-pipeline: + source: + pipeline: + name: "entry-pipeline" + processor: + - otel_traces: + sink: + - opensearch: + hosts: ["http://opensearch-master.logging.svc.cluster.local:9200"] + index_type: trace-analytics-raw + service-map-pipeline: + delay: "100" + source: + pipeline: + name: "entry-pipeline" + processor: + - service_map: + sink: + - opensearch: + hosts: ["http://opensearch-master.logging.svc.cluster.local:9200"] + index_type: trace-analytics-service-map diff --git a/services/logging/kustomization.yaml b/services/logging/kustomization.yaml index d1c2852..394a248 100644 --- a/services/logging/kustomization.yaml +++ b/services/logging/kustomization.yaml @@ -10,6 +10,7 @@ resources: - node-image-prune-rpi5-serviceaccount.yaml - vault-serviceaccount.yaml - secretproviderclass.yaml + - data-prepper-pipeline-configmap.yaml - opensearch-pvc.yaml - opensearch-helmrelease.yaml - opensearch-dashboards-helmrelease.yaml From 72d49f88fec5668661c57f782e4473bd56292c60 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:08:01 -0300 Subject: [PATCH 111/270] nextcloud: fix cronjob shell flags --- services/nextcloud-mail-sync/cronjob.yaml | 4 ++-- services/nextcloud/maintenance-cronjob.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/services/nextcloud-mail-sync/cronjob.yaml b/services/nextcloud-mail-sync/cronjob.yaml index 6f38778..71aaef4 100644 --- a/services/nextcloud-mail-sync/cronjob.yaml +++ b/services/nextcloud-mail-sync/cronjob.yaml @@ -85,7 +85,7 @@ spec: subPath: sync.sh args: - | - set -euo pipefail + set -eu . /vault/secrets/nextcloud-env.sh exec /sync/sync.sh volumes: @@ -104,4 +104,4 @@ spec: - name: sync-script configMap: name: nextcloud-mail-sync-script - defaultMode: 0755 \ No newline at end of file + defaultMode: 0755 diff --git a/services/nextcloud/maintenance-cronjob.yaml b/services/nextcloud/maintenance-cronjob.yaml index 1ace3fc..cc9720f 100644 --- a/services/nextcloud/maintenance-cronjob.yaml +++ b/services/nextcloud/maintenance-cronjob.yaml @@ -53,7 +53,7 @@ spec: command: ["/bin/sh", "-c"] args: - | - set -euo pipefail + set -eu . /vault/secrets/nextcloud-env.sh exec /maintenance/maintenance.sh env: @@ -94,4 +94,4 @@ spec: - name: maintenance-script configMap: name: nextcloud-maintenance-script - defaultMode: 0755 \ No newline at end of file + defaultMode: 0755 From a7998fc0bfdd5ebf97782dc4571b030093ab6d2c Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:11:57 -0300 Subject: [PATCH 112/270] bstein-dev-home: restore image automation setters --- services/bstein-dev-home/backend-deployment.yaml | 2 +- services/bstein-dev-home/frontend-deployment.yaml | 2 +- services/bstein-dev-home/kustomization.yaml | 5 +++++ services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml | 2 +- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index abf3034..6e7b40f 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -43,7 +43,7 @@ spec: - name: harbor-regcred containers: - name: backend - image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-95 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} + image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-95 imagePullPolicy: Always command: ["/bin/sh", "-c"] args: diff --git a/services/bstein-dev-home/frontend-deployment.yaml b/services/bstein-dev-home/frontend-deployment.yaml index 642ca06..ef26e73 100644 --- a/services/bstein-dev-home/frontend-deployment.yaml +++ b/services/bstein-dev-home/frontend-deployment.yaml @@ -22,7 +22,7 @@ spec: - name: harbor-regcred containers: - name: frontend - image: registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-95 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} + image: registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-95 imagePullPolicy: Always ports: - name: http diff --git a/services/bstein-dev-home/kustomization.yaml b/services/bstein-dev-home/kustomization.yaml index 31e1d41..7efaa91 100644 --- a/services/bstein-dev-home/kustomization.yaml +++ b/services/bstein-dev-home/kustomization.yaml @@ -18,6 +18,11 @@ resources: - vaultwarden-cred-sync-cronjob.yaml - portal-onboarding-e2e-test-job.yaml - ingress.yaml +images: + - name: registry.bstein.dev/bstein/bstein-dev-home-frontend + newTag: 0.1.1-95 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} + - name: registry.bstein.dev/bstein/bstein-dev-home-backend + newTag: 0.1.1-95 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} configMapGenerator: - name: chat-ai-gateway namespace: bstein-dev-home diff --git a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml index 8e835eb..1960d11 100644 --- a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml +++ b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml @@ -44,7 +44,7 @@ spec: - name: harbor-regcred containers: - name: sync - image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-95 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} + image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-95 imagePullPolicy: Always command: ["/bin/sh", "-c"] args: From 9a9ecc49034b60db7a5f46cbe08d2f0e90b82e9e Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:12:13 -0300 Subject: [PATCH 113/270] logging: patch data-prepper volume to configmap --- services/logging/data-prepper-helmrelease.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index fad02f7..25359b9 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -50,11 +50,12 @@ spec: - rpi5 postRenderers: - kustomize: - patches: + patchesJson6902: - target: + group: apps + version: v1 kind: Deployment name: data-prepper - namespace: logging patch: |- - op: replace path: /spec/template/spec/volumes From 4dba510d6fe41c3e502188fef2f3a81a8372edb3 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:14:07 -0300 Subject: [PATCH 114/270] logging: replace pipeline volume with configmap --- .../logging/data-prepper-helmrelease.yaml | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index 25359b9..aa83fa3 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -50,19 +50,18 @@ spec: - rpi5 postRenderers: - kustomize: - patchesJson6902: - - target: - group: apps - version: v1 - kind: Deployment + patchesStrategicMerge: + - | + apiVersion: apps/v1 + kind: Deployment + metadata: name: data-prepper - patch: |- - - op: replace - path: /spec/template/spec/volumes - value: - - name: data-prepper-config - configMap: - name: data-prepper-config - - name: data-prepper-pipelines - configMap: - name: data-prepper-pipeline + namespace: logging + spec: + template: + spec: + volumes: + - name: data-prepper-pipelines + $patch: replace + configMap: + name: data-prepper-pipeline From 88f862e18ade85bde3d609ef4eb13f79ce22fa92 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:17:07 -0300 Subject: [PATCH 115/270] logging: switch data-prepper volume to configmap --- .../logging/data-prepper-helmrelease.yaml | 25 +++++++++---------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index aa83fa3..365534b 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -50,18 +50,17 @@ spec: - rpi5 postRenderers: - kustomize: - patchesStrategicMerge: - - | - apiVersion: apps/v1 - kind: Deployment - metadata: + patches: + - target: + group: apps + version: v1 + kind: Deployment name: data-prepper namespace: logging - spec: - template: - spec: - volumes: - - name: data-prepper-pipelines - $patch: replace - configMap: - name: data-prepper-pipeline + patch: |- + - op: remove + path: /spec/template/spec/volumes/1/secret + - op: add + path: /spec/template/spec/volumes/1/configMap + value: + name: data-prepper-pipeline From c7fa52ab2781178340ab75b8c848192d2aa20090 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:23:42 -0300 Subject: [PATCH 116/270] logging: use strategic patch for pipeline volume --- .../logging/data-prepper-helmrelease.yaml | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index 365534b..63296b2 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -58,9 +58,16 @@ spec: name: data-prepper namespace: logging patch: |- - - op: remove - path: /spec/template/spec/volumes/1/secret - - op: add - path: /spec/template/spec/volumes/1/configMap - value: - name: data-prepper-pipeline + apiVersion: apps/v1 + kind: Deployment + metadata: + name: data-prepper + namespace: logging + spec: + template: + spec: + volumes: + - name: data-prepper-pipelines + $patch: replace + configMap: + name: data-prepper-pipeline From 76151a082c961bac5a028cc37ac0ce5817070420 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:25:33 -0300 Subject: [PATCH 117/270] logging: simplify data-prepper patch --- services/logging/data-prepper-helmrelease.yaml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index 63296b2..eaeb683 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -51,13 +51,7 @@ spec: postRenderers: - kustomize: patches: - - target: - group: apps - version: v1 - kind: Deployment - name: data-prepper - namespace: logging - patch: |- + - patch: |- apiVersion: apps/v1 kind: Deployment metadata: From 760c9cbe6bc8e10fb788bf3a4f2eb6773de44282 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:27:36 -0300 Subject: [PATCH 118/270] logging: drop namespace from data-prepper patch --- services/logging/data-prepper-helmrelease.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index eaeb683..9a52a71 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -56,7 +56,6 @@ spec: kind: Deployment metadata: name: data-prepper - namespace: logging spec: template: spec: From 2ccc33b105d777c0c81f63cd863be09accd0652e Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:30:16 -0300 Subject: [PATCH 119/270] logging: patch data-prepper volume via json --- .../logging/data-prepper-helmrelease.yaml | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index 9a52a71..9d932d5 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -51,16 +51,15 @@ spec: postRenderers: - kustomize: patches: - - patch: |- - apiVersion: apps/v1 + - target: + group: apps + version: v1 kind: Deployment - metadata: - name: data-prepper - spec: - template: - spec: - volumes: - - name: data-prepper-pipelines - $patch: replace - configMap: - name: data-prepper-pipeline + name: data-prepper + patch: |- + - op: remove + path: /spec/template/spec/volumes/1/secret + - op: add + path: /spec/template/spec/volumes/1/configMap + value: + name: data-prepper-pipeline From 69cee91dda43b5ce948d92baddd793e45cee572a Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:42:57 -0300 Subject: [PATCH 120/270] vault: fix data-prepper pipeline and portal admin secret job --- .../portal-onboarding-e2e-test-job.yaml | 4 +- services/keycloak/kustomization.yaml | 1 + ...portal-admin-client-secret-ensure-job.yaml | 138 ++++++++++++++++++ ...al-e2e-execute-actions-email-test-job.yaml | 4 +- .../logging/data-prepper-helmrelease.yaml | 4 +- 5 files changed, 145 insertions(+), 6 deletions(-) create mode 100644 services/keycloak/portal-admin-client-secret-ensure-job.yaml diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index cfe35a1..17597df 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -60,7 +60,7 @@ spec: command: ["/bin/sh", "-c"] args: - | - set -euo pipefail + set -eu . /vault/secrets/portal-env.sh python /scripts/test_portal_onboarding_flow.py volumeMounts: @@ -71,4 +71,4 @@ spec: - name: tests configMap: name: portal-onboarding-e2e-tests - defaultMode: 0555 \ No newline at end of file + defaultMode: 0555 diff --git a/services/keycloak/kustomization.yaml b/services/keycloak/kustomization.yaml index e141467..316f447 100644 --- a/services/keycloak/kustomization.yaml +++ b/services/keycloak/kustomization.yaml @@ -11,6 +11,7 @@ resources: - vault-sync-deployment.yaml - deployment.yaml - realm-settings-job.yaml + - portal-admin-client-secret-ensure-job.yaml - portal-e2e-client-job.yaml - portal-e2e-target-client-job.yaml - portal-e2e-token-exchange-permissions-job.yaml diff --git a/services/keycloak/portal-admin-client-secret-ensure-job.yaml b/services/keycloak/portal-admin-client-secret-ensure-job.yaml new file mode 100644 index 0000000..350fc6e --- /dev/null +++ b/services/keycloak/portal-admin-client-secret-ensure-job.yaml @@ -0,0 +1,138 @@ +# services/keycloak/portal-admin-client-secret-ensure-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: keycloak-portal-admin-secret-ensure-1 + namespace: sso +spec: + backoffLimit: 0 + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" + vault.hashicorp.com/role: "sso" + vault.hashicorp.com/agent-inject-secret-keycloak-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-env.sh: | + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{ end }} + {{ with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" }} + export PORTAL_ADMIN_CLIENT_SECRET="{{ .Data.data.client_secret }}" + {{ end }} + spec: + restartPolicy: Never + serviceAccountName: sso-vault + containers: + - name: configure + image: python:3.11-alpine + env: + - name: KEYCLOAK_SERVER + value: http://keycloak.sso.svc.cluster.local + - name: KEYCLOAK_REALM + value: atlas + - name: PORTAL_ADMIN_CLIENT_ID + value: bstein-dev-home-admin + command: ["/bin/sh", "-c"] + args: + - | + set -eu + . /vault/secrets/keycloak-env.sh + python - <<'PY' + import json + import os + import urllib.parse + import urllib.error + import urllib.request + + base_url = os.environ["KEYCLOAK_SERVER"].rstrip("/") + realm = os.environ["KEYCLOAK_REALM"] + admin_user = os.environ["KEYCLOAK_ADMIN_USER"] + admin_password = os.environ["KEYCLOAK_ADMIN_PASSWORD"] + client_id = os.environ["PORTAL_ADMIN_CLIENT_ID"] + client_secret = os.environ["PORTAL_ADMIN_CLIENT_SECRET"] + + def http_json(method: str, url: str, token: str, payload=None): + data = None + headers = {"Authorization": f"Bearer {token}"} + if payload is not None: + data = json.dumps(payload).encode() + headers["Content-Type"] = "application/json" + req = urllib.request.Request(url, data=data, headers=headers, method=method) + try: + with urllib.request.urlopen(req, timeout=30) as resp: + body = resp.read() + if not body: + return resp.status, None + return resp.status, json.loads(body.decode()) + except urllib.error.HTTPError as exc: + raw = exc.read() + if not raw: + return exc.code, None + try: + return exc.code, json.loads(raw.decode()) + except Exception: + return exc.code, {"raw": raw.decode(errors="replace")} + + def get_admin_token() -> str: + token_data = urllib.parse.urlencode( + { + "grant_type": "password", + "client_id": "admin-cli", + "username": admin_user, + "password": admin_password, + } + ).encode() + req = urllib.request.Request( + f"{base_url}/realms/master/protocol/openid-connect/token", + data=token_data, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + method="POST", + ) + try: + with urllib.request.urlopen(req, timeout=15) as resp: + body = json.loads(resp.read().decode()) + except urllib.error.HTTPError as exc: + raw = exc.read().decode(errors="replace") + raise SystemExit(f"Token request failed: status={exc.code} body={raw}") + return body["access_token"] + + token = get_admin_token() + status, clients = http_json( + "GET", + f"{base_url}/admin/realms/{realm}/clients?clientId={urllib.parse.quote(client_id)}", + token, + ) + if status != 200 or not isinstance(clients, list) or not clients: + raise SystemExit(f"Unable to find client {client_id!r} (status={status})") + + client_uuid = None + for item in clients: + if isinstance(item, dict) and item.get("clientId") == client_id: + client_uuid = item.get("id") + break + if not client_uuid: + raise SystemExit(f"Client {client_id!r} has no id") + + status, client_rep = http_json( + "GET", + f"{base_url}/admin/realms/{realm}/clients/{client_uuid}", + token, + ) + if status != 200 or not isinstance(client_rep, dict): + raise SystemExit(f"Unable to fetch client representation (status={status})") + + if client_rep.get("secret") != client_secret: + client_rep["secret"] = client_secret + status, resp = http_json( + "PUT", + f"{base_url}/admin/realms/{realm}/clients/{client_uuid}", + token, + client_rep, + ) + if status not in (200, 204): + raise SystemExit(f"Client update failed (status={status}) resp={resp}") + + print(f"OK: ensured secret for {client_id}") + PY diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index 7ee4e20..70c0a01 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -59,7 +59,7 @@ spec: command: ["/bin/sh", "-c"] args: - | - set -euo pipefail + set -eu . /vault/secrets/keycloak-env.sh python /scripts/test_keycloak_execute_actions_email.py volumeMounts: @@ -70,4 +70,4 @@ spec: - name: tests configMap: name: portal-e2e-tests - defaultMode: 0555 \ No newline at end of file + defaultMode: 0555 diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index 9d932d5..66ae3c5 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -58,8 +58,8 @@ spec: name: data-prepper patch: |- - op: remove - path: /spec/template/spec/volumes/1/secret + path: /spec/template/spec/volumes/0/secret - op: add - path: /spec/template/spec/volumes/1/configMap + path: /spec/template/spec/volumes/0/configMap value: name: data-prepper-pipeline From bf9a24681ce2c89b15bb5d6b508e5d1c2a486f01 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:44:27 -0300 Subject: [PATCH 121/270] fix: bump keycloak and portal e2e job names --- services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml | 2 +- .../keycloak/portal-e2e-execute-actions-email-test-job.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index 17597df..92a7b2e 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: portal-onboarding-e2e-test-16 + name: portal-onboarding-e2e-test-17 namespace: bstein-dev-home spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index 70c0a01..d8e2c0b 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-execute-actions-email-9 + name: keycloak-portal-e2e-execute-actions-email-10 namespace: sso spec: backoffLimit: 3 From c30f1fc587029fed928e8195d9f61f26c84b0f3d Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:46:58 -0300 Subject: [PATCH 122/270] vault: allow sso role to read portal admin secret --- services/vault/scripts/vault_k8s_auth_configure.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index d47ebb5..daf0214 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -154,7 +154,7 @@ write_policy_and_role "gitea" "gitea" "gitea-vault" \ write_policy_and_role "vaultwarden" "vaultwarden" "vaultwarden-vault" \ "vaultwarden/* shared/postmark-relay" "" write_policy_and_role "sso" "sso" "sso-vault,sso-vault-sync,mas-secrets-ensure" \ - "sso/* shared/keycloak-admin shared/portal-e2e-client shared/postmark-relay harbor-pull/sso" "" + "sso/* portal/bstein-dev-home-keycloak-admin shared/keycloak-admin shared/portal-e2e-client shared/postmark-relay harbor-pull/sso" "" write_policy_and_role "mailu-mailserver" "mailu-mailserver" "mailu-vault-sync" \ "mailu/* shared/postmark-relay harbor-pull/mailu-mailserver" "" write_policy_and_role "harbor" "harbor" "harbor-vault-sync" \ From e6210644c26a8f005aae915bbf76ce80df8472a8 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 03:58:03 -0300 Subject: [PATCH 123/270] smtp: point services at mailu relay --- services/keycloak/realm-settings-job.yaml | 6 +++--- services/monitoring/helmrelease.yaml | 2 +- services/nextcloud/configmap.yaml | 2 +- services/nextcloud/deployment.yaml | 2 +- services/vaultwarden/deployment.yaml | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 76076e8..adc6d24 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-21 + name: keycloak-realm-settings-22 namespace: sso spec: backoffLimit: 0 @@ -59,7 +59,7 @@ spec: - name: KEYCLOAK_REALM value: atlas - name: KEYCLOAK_SMTP_HOST - value: smtp.postmarkapp.com + value: mailu-front.mailu-mailserver.svc.cluster.local - name: KEYCLOAK_SMTP_PORT value: "587" - name: KEYCLOAK_SMTP_FROM @@ -468,4 +468,4 @@ spec: f"Unexpected execution update response for identity-provider-redirector: {status}" ) PY - volumeMounts: \ No newline at end of file + volumeMounts: diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index d535ebe..d4b3565 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -291,7 +291,7 @@ spec: GF_AUTH_ANONYMOUS_ORG_NAME: "Overview" GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer" GF_SMTP_ENABLED: "true" - GF_SMTP_HOST: "smtp.postmarkapp.com:587" + GF_SMTP_HOST: "mailu-front.mailu-mailserver.svc.cluster.local:587" GF_SMTP_FROM: "no-reply-grafana@bstein.dev" GF_SMTP_FROM_NAME: "Atlas Grafana" GRAFANA_ALERT_EMAILS: "alerts@bstein.dev" diff --git a/services/nextcloud/configmap.yaml b/services/nextcloud/configmap.yaml index 7222320..6426656 100644 --- a/services/nextcloud/configmap.yaml +++ b/services/nextcloud/configmap.yaml @@ -18,7 +18,7 @@ data: 'default_phone_region' => 'US', 'mail_smtpmode' => 'smtp', 'mail_sendmailmode' => 'smtp', - 'mail_smtphost' => 'smtp.postmarkapp.com', + 'mail_smtphost' => 'mailu-front.mailu-mailserver.svc.cluster.local', 'mail_smtpport' => '587', 'mail_smtpsecure' => 'tls', 'mail_smtpauth' => true, diff --git a/services/nextcloud/deployment.yaml b/services/nextcloud/deployment.yaml index 9af1e00..b0d55bf 100644 --- a/services/nextcloud/deployment.yaml +++ b/services/nextcloud/deployment.yaml @@ -217,7 +217,7 @@ spec: value: https://cloud.bstein.dev # SMTP (external secret: nextcloud-smtp with keys username, password) - name: SMTP_HOST - value: smtp.postmarkapp.com + value: mailu-front.mailu-mailserver.svc.cluster.local - name: SMTP_PORT value: "587" - name: SMTP_SECURE diff --git a/services/vaultwarden/deployment.yaml b/services/vaultwarden/deployment.yaml index 2fde277..de8ea40 100644 --- a/services/vaultwarden/deployment.yaml +++ b/services/vaultwarden/deployment.yaml @@ -51,7 +51,7 @@ spec: - name: DOMAIN value: "https://vault.bstein.dev" - name: SMTP_HOST - value: "smtp.postmarkapp.com" + value: "mailu-front.mailu-mailserver.svc.cluster.local" - name: SMTP_PORT value: "587" - name: SMTP_SECURITY From de6665c450a66eaeae3552732350b489329ce8f8 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 04:04:50 -0300 Subject: [PATCH 124/270] smtp: use mail.bstein.dev for app relays --- .../bstein-dev-home/scripts/test_portal_onboarding_flow.py | 2 +- services/keycloak/realm-settings-job.yaml | 4 ++-- services/monitoring/helmrelease.yaml | 2 +- services/nextcloud/configmap.yaml | 2 +- services/nextcloud/deployment.yaml | 2 +- services/vaultwarden/deployment.yaml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/services/bstein-dev-home/scripts/test_portal_onboarding_flow.py b/services/bstein-dev-home/scripts/test_portal_onboarding_flow.py index 9c5124a..ad86fe6 100644 --- a/services/bstein-dev-home/scripts/test_portal_onboarding_flow.py +++ b/services/bstein-dev-home/scripts/test_portal_onboarding_flow.py @@ -249,7 +249,7 @@ def main() -> int: if not contact_email: raise SystemExit("E2E_CONTACT_EMAIL must not be empty") - imap_host = os.environ.get("E2E_IMAP_HOST", "mailu-front.mailu-mailserver.svc.cluster.local").strip() + imap_host = os.environ.get("E2E_IMAP_HOST", "mail.bstein.dev").strip() imap_port = int(os.environ.get("E2E_IMAP_PORT", "993")) imap_keycloak_username = os.environ.get("E2E_IMAP_KEYCLOAK_USERNAME", "robotuser").strip() imap_wait_sec = int(os.environ.get("E2E_IMAP_WAIT_SECONDS", "90")) diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index adc6d24..3b77c19 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-22 + name: keycloak-realm-settings-23 namespace: sso spec: backoffLimit: 0 @@ -59,7 +59,7 @@ spec: - name: KEYCLOAK_REALM value: atlas - name: KEYCLOAK_SMTP_HOST - value: mailu-front.mailu-mailserver.svc.cluster.local + value: mail.bstein.dev - name: KEYCLOAK_SMTP_PORT value: "587" - name: KEYCLOAK_SMTP_FROM diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index d4b3565..3ff3f0c 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -291,7 +291,7 @@ spec: GF_AUTH_ANONYMOUS_ORG_NAME: "Overview" GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer" GF_SMTP_ENABLED: "true" - GF_SMTP_HOST: "mailu-front.mailu-mailserver.svc.cluster.local:587" + GF_SMTP_HOST: "mail.bstein.dev:587" GF_SMTP_FROM: "no-reply-grafana@bstein.dev" GF_SMTP_FROM_NAME: "Atlas Grafana" GRAFANA_ALERT_EMAILS: "alerts@bstein.dev" diff --git a/services/nextcloud/configmap.yaml b/services/nextcloud/configmap.yaml index 6426656..7fd2ba9 100644 --- a/services/nextcloud/configmap.yaml +++ b/services/nextcloud/configmap.yaml @@ -18,7 +18,7 @@ data: 'default_phone_region' => 'US', 'mail_smtpmode' => 'smtp', 'mail_sendmailmode' => 'smtp', - 'mail_smtphost' => 'mailu-front.mailu-mailserver.svc.cluster.local', + 'mail_smtphost' => 'mail.bstein.dev', 'mail_smtpport' => '587', 'mail_smtpsecure' => 'tls', 'mail_smtpauth' => true, diff --git a/services/nextcloud/deployment.yaml b/services/nextcloud/deployment.yaml index b0d55bf..cfa91b2 100644 --- a/services/nextcloud/deployment.yaml +++ b/services/nextcloud/deployment.yaml @@ -217,7 +217,7 @@ spec: value: https://cloud.bstein.dev # SMTP (external secret: nextcloud-smtp with keys username, password) - name: SMTP_HOST - value: mailu-front.mailu-mailserver.svc.cluster.local + value: mail.bstein.dev - name: SMTP_PORT value: "587" - name: SMTP_SECURE diff --git a/services/vaultwarden/deployment.yaml b/services/vaultwarden/deployment.yaml index de8ea40..6125ad8 100644 --- a/services/vaultwarden/deployment.yaml +++ b/services/vaultwarden/deployment.yaml @@ -51,7 +51,7 @@ spec: - name: DOMAIN value: "https://vault.bstein.dev" - name: SMTP_HOST - value: "mailu-front.mailu-mailserver.svc.cluster.local" + value: "mail.bstein.dev" - name: SMTP_PORT value: "587" - name: SMTP_SECURITY From 5899c9acb3c83fada0f4c372c6115ab24c38772e Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 04:17:14 -0300 Subject: [PATCH 125/270] vault: allow admin policy to update shared secrets --- services/vault/scripts/vault_k8s_auth_configure.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index daf0214..46086cf 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -133,6 +133,12 @@ path "kv/data/atlas/vault/*" { path "kv/metadata/atlas/vault/*" { capabilities = ["list"] } +path "kv/data/atlas/shared/*" { + capabilities = ["create", "update", "read", "patch"] +} +path "kv/metadata/atlas/shared/*" { + capabilities = ["list"] +} ' write_raw_policy "vault-admin" "${vault_admin_policy}" From c0d0e64bc6fb978d4d0c4cbdd289855a31030899 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 04:24:16 -0300 Subject: [PATCH 126/270] keycloak: rerun realm smtp config --- services/keycloak/realm-settings-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 3b77c19..e276c52 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-23 + name: keycloak-realm-settings-24 namespace: sso spec: backoffLimit: 0 From c759fb1dbbf3283735d488be31fdec2e0f507c37 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 04:27:25 -0300 Subject: [PATCH 127/270] logging: fix data-prepper post-render patch --- services/logging/data-prepper-helmrelease.yaml | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index 66ae3c5..c7b0ffe 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -57,9 +57,15 @@ spec: kind: Deployment name: data-prepper patch: |- - - op: remove - path: /spec/template/spec/volumes/0/secret - - op: add - path: /spec/template/spec/volumes/0/configMap - value: - name: data-prepper-pipeline + apiVersion: apps/v1 + kind: Deployment + metadata: + name: data-prepper + spec: + template: + spec: + volumes: + - name: data-prepper-pipelines + $patch: replace + configMap: + name: data-prepper-pipeline From 70a52dec067d9b5ea0f5572eebe3dcc1929d0d62 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 04:35:06 -0300 Subject: [PATCH 128/270] bstein-dev-home: rerun onboarding e2e job --- services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index 92a7b2e..e6e0baa 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: portal-onboarding-e2e-test-17 + name: portal-onboarding-e2e-test-18 namespace: bstein-dev-home spec: backoffLimit: 0 From 8db4b4f0b564c841b5427a504ae164ad9c0a14f0 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 04:37:12 -0300 Subject: [PATCH 129/270] keycloak: rerun execute-actions email e2e --- .../keycloak/portal-e2e-execute-actions-email-test-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index d8e2c0b..ded9875 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-execute-actions-email-10 + name: keycloak-portal-e2e-execute-actions-email-11 namespace: sso spec: backoffLimit: 3 From a2b2c7db9de8af02211db45eec4a4ac4aa64edc8 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 04:44:35 -0300 Subject: [PATCH 130/270] keycloak: align smtp probe user --- .../keycloak/portal-e2e-execute-actions-email-test-job.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index ded9875..892d5aa 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-execute-actions-email-11 + name: keycloak-portal-e2e-execute-actions-email-12 namespace: sso spec: backoffLimit: 3 @@ -49,9 +49,9 @@ spec: - name: KEYCLOAK_REALM value: atlas - name: E2E_PROBE_USERNAME - value: e2e-smtp-probe + value: robotuser - name: E2E_PROBE_EMAIL - value: robot@bstein.dev + value: robotuser@bstein.dev - name: EXECUTE_ACTIONS_CLIENT_ID value: bstein-dev-home - name: EXECUTE_ACTIONS_REDIRECT_URI From 9993b501a64e4ec41d2bf36578dbdd6646ebb59f Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 04:47:07 -0300 Subject: [PATCH 131/270] logging: disable wait for data-prepper helmrelease --- services/logging/data-prepper-helmrelease.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index c7b0ffe..ca3eba0 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -7,6 +7,10 @@ metadata: spec: interval: 15m timeout: 10m + install: + disableWait: true + upgrade: + disableWait: true chart: spec: chart: data-prepper From 7817248eb92e3331004a4618d78173dafecf5f3d Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 11:26:46 -0300 Subject: [PATCH 132/270] traefik: wire LB service to custom deployment --- infrastructure/traefik/traefik-service-lb.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/infrastructure/traefik/traefik-service-lb.yaml b/infrastructure/traefik/traefik-service-lb.yaml index e4929f1..0280d9d 100644 --- a/infrastructure/traefik/traefik-service-lb.yaml +++ b/infrastructure/traefik/traefik-service-lb.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: traefik - namespace: kube-system + namespace: traefik annotations: metallb.universe.tf/address-pool: communication-pool spec: @@ -20,5 +20,4 @@ spec: targetPort: websecure protocol: TCP selector: - app.kubernetes.io/instance: traefik-kube-system - app.kubernetes.io/name: traefik + app: traefik From 66e7e6acc5b255ba9ab00859fbf0620c2f1740a4 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 16:29:32 -0300 Subject: [PATCH 133/270] core: add bstein.dev coredns overrides --- infrastructure/core/coredns-custom.yaml | 42 +++++++++++++++++++++++++ infrastructure/core/kustomization.yaml | 1 + infrastructure/traefik/deployment.yaml | 2 ++ services/gitea/ingress.yaml | 2 ++ 4 files changed, 47 insertions(+) create mode 100644 infrastructure/core/coredns-custom.yaml diff --git a/infrastructure/core/coredns-custom.yaml b/infrastructure/core/coredns-custom.yaml new file mode 100644 index 0000000..ad07d2a --- /dev/null +++ b/infrastructure/core/coredns-custom.yaml @@ -0,0 +1,42 @@ +# infrastructure/core/coredns-custom.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns-custom + namespace: kube-system +data: + bstein-dev.server: | + bstein.dev:53 { + errors + cache 30 + hosts { + 192.168.22.9 alerts.bstein.dev + 192.168.22.9 auth.bstein.dev + 192.168.22.9 bstein.dev + 192.168.22.9 call.live.bstein.dev + 192.168.22.9 cd.bstein.dev + 192.168.22.9 chat.ai.bstein.dev + 192.168.22.9 ci.bstein.dev + 192.168.22.9 cloud.bstein.dev + 192.168.22.9 health.bstein.dev + 192.168.22.9 kit.live.bstein.dev + 192.168.22.9 live.bstein.dev + 192.168.22.9 logs.bstein.dev + 192.168.22.9 longhorn.bstein.dev + 192.168.22.9 mail.bstein.dev + 192.168.22.9 matrix.live.bstein.dev + 192.168.22.9 metrics.bstein.dev + 192.168.22.9 monero.bstein.dev + 192.168.22.9 notes.bstein.dev + 192.168.22.9 office.bstein.dev + 192.168.22.9 pegasus.bstein.dev + 192.168.22.9 registry.bstein.dev + 192.168.22.9 scm.bstein.dev + 192.168.22.9 secret.bstein.dev + 192.168.22.9 sso.bstein.dev + 192.168.22.9 stream.bstein.dev + 192.168.22.9 tasks.bstein.dev + 192.168.22.9 vault.bstein.dev + fallthrough + } + } diff --git a/infrastructure/core/kustomization.yaml b/infrastructure/core/kustomization.yaml index 14d6a02..5e74d81 100644 --- a/infrastructure/core/kustomization.yaml +++ b/infrastructure/core/kustomization.yaml @@ -4,5 +4,6 @@ kind: Kustomization resources: - ../modules/base - ../modules/profiles/atlas-ha + - coredns-custom.yaml - ../sources/cert-manager/letsencrypt.yaml - ../sources/cert-manager/letsencrypt-prod.yaml diff --git a/infrastructure/traefik/deployment.yaml b/infrastructure/traefik/deployment.yaml index a34307a..600a504 100644 --- a/infrastructure/traefik/deployment.yaml +++ b/infrastructure/traefik/deployment.yaml @@ -27,6 +27,8 @@ items: creationTimestamp: null labels: app: traefik + app.kubernetes.io/instance: traefik-kube-system + app.kubernetes.io/name: traefik spec: containers: - args: diff --git a/services/gitea/ingress.yaml b/services/gitea/ingress.yaml index 0077ba4..b3cd845 100644 --- a/services/gitea/ingress.yaml +++ b/services/gitea/ingress.yaml @@ -7,6 +7,8 @@ metadata: annotations: cert-manager.io/cluster-issuer: letsencrypt nginx.ingress.kubernetes.io/ssl-redirect: "true" + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" spec: tls: - hosts: From d90950b82e6f4a9d990b6004e7b96a66f5d3864c Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 16:39:04 -0300 Subject: [PATCH 134/270] gitea: expose ssh via metallb shared IP --- infrastructure/traefik/traefik-service-lb.yaml | 1 + services/gitea/service.yaml | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/infrastructure/traefik/traefik-service-lb.yaml b/infrastructure/traefik/traefik-service-lb.yaml index 0280d9d..839a55e 100644 --- a/infrastructure/traefik/traefik-service-lb.yaml +++ b/infrastructure/traefik/traefik-service-lb.yaml @@ -6,6 +6,7 @@ metadata: namespace: traefik annotations: metallb.universe.tf/address-pool: communication-pool + metallb.universe.tf/allow-shared-ip: traefik spec: type: LoadBalancer loadBalancerClass: metallb diff --git a/services/gitea/service.yaml b/services/gitea/service.yaml index 5b4f8a0..66667bb 100644 --- a/services/gitea/service.yaml +++ b/services/gitea/service.yaml @@ -21,12 +21,16 @@ kind: Service metadata: name: gitea-ssh namespace: gitea + annotations: + metallb.universe.tf/address-pool: communication-pool + metallb.universe.tf/allow-shared-ip: traefik spec: - type: NodePort + type: LoadBalancer + loadBalancerClass: metallb + loadBalancerIP: 192.168.22.9 selector: app: gitea ports: - name: ssh port: 2242 targetPort: 2242 - nodePort: 32242 From 5816d4f39905f6ca2d5175d2b7897e464f581f83 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Thu, 15 Jan 2026 23:56:32 -0300 Subject: [PATCH 135/270] comms: fix mas vault file paths --- services/comms/mas-configmap.yaml | 6 +++--- services/comms/mas-deployment.yaml | 22 ---------------------- services/keycloak/ingress.yaml | 2 ++ services/oauth2-proxy/ingress.yaml | 2 ++ 4 files changed, 7 insertions(+), 25 deletions(-) diff --git a/services/comms/mas-configmap.yaml b/services/comms/mas-configmap.yaml index a41ebeb..5e6cfdd 100644 --- a/services/comms/mas-configmap.yaml +++ b/services/comms/mas-configmap.yaml @@ -31,13 +31,13 @@ data: clients: - client_id: 01KDXMVQBQ5JNY6SEJPZW6Z8BM client_auth_method: client_secret_basic - client_secret_file: /etc/mas/admin-client/client_secret + client_secret_file: /vault/secrets/mas-admin-secret secrets: - encryption_file: /etc/mas/secrets/encryption + encryption_file: /vault/secrets/mas-encryption keys: - kid: "othrys-rsa-1" - key_file: /etc/mas/keys/rsa_key + key_file: /vault/secrets/mas-rsa-key passwords: enabled: true diff --git a/services/comms/mas-deployment.yaml b/services/comms/mas-deployment.yaml index 532c9da..afe6135 100644 --- a/services/comms/mas-deployment.yaml +++ b/services/comms/mas-deployment.yaml @@ -117,26 +117,6 @@ spec: - name: rendered mountPath: /rendered readOnly: true - - name: vault-secrets - mountPath: /etc/mas/secrets/encryption - subPath: mas-encryption - readOnly: true - - name: vault-secrets - mountPath: /etc/mas/secrets/matrix_shared_secret - subPath: mas-matrix-shared - readOnly: true - - name: vault-secrets - mountPath: /etc/mas/secrets/keycloak_client_secret - subPath: mas-kc-secret - readOnly: true - - name: vault-secrets - mountPath: /etc/mas/keys/rsa_key - subPath: mas-rsa-key - readOnly: true - - name: vault-secrets - mountPath: /etc/mas/admin-client/client_secret - subPath: mas-admin-secret - readOnly: true resources: requests: cpu: 200m @@ -153,8 +133,6 @@ spec: path: config.yaml - name: rendered emptyDir: {} - - name: vault-secrets - emptyDir: {} - name: vault-scripts configMap: name: comms-vault-env diff --git a/services/keycloak/ingress.yaml b/services/keycloak/ingress.yaml index 39f6cb0..9efb18e 100644 --- a/services/keycloak/ingress.yaml +++ b/services/keycloak/ingress.yaml @@ -6,6 +6,8 @@ metadata: namespace: sso annotations: cert-manager.io/cluster-issuer: letsencrypt + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" spec: ingressClassName: traefik rules: diff --git a/services/oauth2-proxy/ingress.yaml b/services/oauth2-proxy/ingress.yaml index 0f5830c..39f71da 100644 --- a/services/oauth2-proxy/ingress.yaml +++ b/services/oauth2-proxy/ingress.yaml @@ -7,6 +7,8 @@ metadata: annotations: cert-manager.io/cluster-issuer: letsencrypt traefik.ingress.kubernetes.io/router.middlewares: sso-oauth2-proxy-errors@kubernetescrd + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" spec: ingressClassName: traefik rules: From b1489a8dd9b492d7b7867285f8556989dc95523a Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 00:15:58 -0300 Subject: [PATCH 136/270] fix logging pipeline secret and scheduling --- services/crypto/default-serviceaccount.yaml | 8 ++++ services/crypto/kustomization.yaml | 1 + .../logging/data-prepper-helmrelease.yaml | 21 ---------- .../data-prepper-pipeline-configmap.yaml | 39 ------------------- services/logging/kustomization.yaml | 9 ++++- .../scripts/data_prepper_pipelines.yaml | 31 +++++++++++++++ .../overlays/atlas/patch-node-selector.yaml | 2 +- 7 files changed, 49 insertions(+), 62 deletions(-) create mode 100644 services/crypto/default-serviceaccount.yaml delete mode 100644 services/logging/data-prepper-pipeline-configmap.yaml create mode 100644 services/logging/scripts/data_prepper_pipelines.yaml diff --git a/services/crypto/default-serviceaccount.yaml b/services/crypto/default-serviceaccount.yaml new file mode 100644 index 0000000..fca7007 --- /dev/null +++ b/services/crypto/default-serviceaccount.yaml @@ -0,0 +1,8 @@ +# services/crypto/default-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: default + namespace: crypto +imagePullSecrets: + - name: harbor-regcred diff --git a/services/crypto/kustomization.yaml b/services/crypto/kustomization.yaml index 4e6ee87..f31fc8a 100644 --- a/services/crypto/kustomization.yaml +++ b/services/crypto/kustomization.yaml @@ -3,3 +3,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - namespace.yaml + - default-serviceaccount.yaml diff --git a/services/logging/data-prepper-helmrelease.yaml b/services/logging/data-prepper-helmrelease.yaml index ca3eba0..1c0bc45 100644 --- a/services/logging/data-prepper-helmrelease.yaml +++ b/services/logging/data-prepper-helmrelease.yaml @@ -52,24 +52,3 @@ spec: operator: In values: - rpi5 - postRenderers: - - kustomize: - patches: - - target: - group: apps - version: v1 - kind: Deployment - name: data-prepper - patch: |- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: data-prepper - spec: - template: - spec: - volumes: - - name: data-prepper-pipelines - $patch: replace - configMap: - name: data-prepper-pipeline diff --git a/services/logging/data-prepper-pipeline-configmap.yaml b/services/logging/data-prepper-pipeline-configmap.yaml deleted file mode 100644 index 13ca326..0000000 --- a/services/logging/data-prepper-pipeline-configmap.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# services/logging/data-prepper-pipeline-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: data-prepper-pipeline - namespace: logging -data: - pipelines.yaml: | - entry-pipeline: - delay: "100" - source: - otel_trace_source: - ssl: false - sink: - - pipeline: - name: "raw-pipeline" - - pipeline: - name: "service-map-pipeline" - raw-pipeline: - source: - pipeline: - name: "entry-pipeline" - processor: - - otel_traces: - sink: - - opensearch: - hosts: ["http://opensearch-master.logging.svc.cluster.local:9200"] - index_type: trace-analytics-raw - service-map-pipeline: - delay: "100" - source: - pipeline: - name: "entry-pipeline" - processor: - - service_map: - sink: - - opensearch: - hosts: ["http://opensearch-master.logging.svc.cluster.local:9200"] - index_type: trace-analytics-service-map diff --git a/services/logging/kustomization.yaml b/services/logging/kustomization.yaml index 394a248..08c73a8 100644 --- a/services/logging/kustomization.yaml +++ b/services/logging/kustomization.yaml @@ -10,7 +10,6 @@ resources: - node-image-prune-rpi5-serviceaccount.yaml - vault-serviceaccount.yaml - secretproviderclass.yaml - - data-prepper-pipeline-configmap.yaml - opensearch-pvc.yaml - opensearch-helmrelease.yaml - opensearch-dashboards-helmrelease.yaml @@ -59,3 +58,11 @@ configMapGenerator: - seed.py=scripts/opensearch_observability_seed.py options: disableNameSuffixHash: true + +secretGenerator: + - name: data-prepper-pipeline + namespace: logging + files: + - pipelines.yaml=scripts/data_prepper_pipelines.yaml + options: + disableNameSuffixHash: true diff --git a/services/logging/scripts/data_prepper_pipelines.yaml b/services/logging/scripts/data_prepper_pipelines.yaml new file mode 100644 index 0000000..5e244ff --- /dev/null +++ b/services/logging/scripts/data_prepper_pipelines.yaml @@ -0,0 +1,31 @@ +entry-pipeline: + delay: "100" + source: + otel_trace_source: + ssl: false + sink: + - pipeline: + name: "raw-pipeline" + - pipeline: + name: "service-map-pipeline" +raw-pipeline: + source: + pipeline: + name: "entry-pipeline" + processor: + - otel_traces: + sink: + - opensearch: + hosts: ["http://opensearch-master.logging.svc.cluster.local:9200"] + index_type: trace-analytics-raw +service-map-pipeline: + delay: "100" + source: + pipeline: + name: "entry-pipeline" + processor: + - service_map: + sink: + - opensearch: + hosts: ["http://opensearch-master.logging.svc.cluster.local:9200"] + index_type: trace-analytics-service-map diff --git a/services/sui-metrics/overlays/atlas/patch-node-selector.yaml b/services/sui-metrics/overlays/atlas/patch-node-selector.yaml index e97ccb7..d4f3360 100644 --- a/services/sui-metrics/overlays/atlas/patch-node-selector.yaml +++ b/services/sui-metrics/overlays/atlas/patch-node-selector.yaml @@ -8,4 +8,4 @@ spec: template: spec: nodeSelector: - kubernetes.io/hostname: titan-24 + hardware: rpi5 From bb1bf3c017d26070c1eb7f59f692f4851a620e86 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 01:40:50 -0300 Subject: [PATCH 137/270] fix ingress tls routing --- services/comms/helmrelease.yaml | 2 ++ services/gitops-ui/helmrelease.yaml | 1 + services/jenkins/ingress.yaml | 1 + services/logging/ingress.yaml | 2 ++ services/monitoring/helmrelease.yaml | 4 ++++ services/nextcloud/collabora.yaml | 1 + services/nextcloud/ingress.yaml | 1 + services/vault/ingress.yaml | 1 + 8 files changed, 13 insertions(+) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 2b049c8..bf45b21 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -135,6 +135,7 @@ spec: annotations: cert-manager.io/cluster-issuer: letsencrypt traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" csHosts: - matrix.live.bstein.dev hosts: @@ -395,6 +396,7 @@ spec: annotations: cert-manager.io/cluster-issuer: letsencrypt traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" hosts: - live.bstein.dev tls: diff --git a/services/gitops-ui/helmrelease.yaml b/services/gitops-ui/helmrelease.yaml index 86ae327..671dfe3 100644 --- a/services/gitops-ui/helmrelease.yaml +++ b/services/gitops-ui/helmrelease.yaml @@ -33,6 +33,7 @@ spec: annotations: cert-manager.io/cluster-issuer: letsencrypt traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" hosts: - host: cd.bstein.dev paths: diff --git a/services/jenkins/ingress.yaml b/services/jenkins/ingress.yaml index e702c8c..611eae4 100644 --- a/services/jenkins/ingress.yaml +++ b/services/jenkins/ingress.yaml @@ -7,6 +7,7 @@ metadata: annotations: cert-manager.io/cluster-issuer: letsencrypt traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" spec: ingressClassName: traefik tls: diff --git a/services/logging/ingress.yaml b/services/logging/ingress.yaml index 7beeb9a..eafeb5d 100644 --- a/services/logging/ingress.yaml +++ b/services/logging/ingress.yaml @@ -6,6 +6,8 @@ metadata: namespace: logging annotations: cert-manager.io/cluster-issuer: letsencrypt + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" spec: ingressClassName: traefik tls: diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index 3ff3f0c..c99a8ca 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -322,6 +322,8 @@ spec: ingressClassName: traefik annotations: cert-manager.io/cluster-issuer: letsencrypt + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" hosts: - metrics.bstein.dev path: / @@ -503,6 +505,8 @@ spec: ingressClassName: traefik annotations: cert-manager.io/cluster-issuer: letsencrypt + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" hosts: - host: alerts.bstein.dev paths: diff --git a/services/nextcloud/collabora.yaml b/services/nextcloud/collabora.yaml index 0f09c79..8a87821 100644 --- a/services/nextcloud/collabora.yaml +++ b/services/nextcloud/collabora.yaml @@ -61,6 +61,7 @@ metadata: annotations: cert-manager.io/cluster-issuer: letsencrypt-prod traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" spec: tls: - hosts: diff --git a/services/nextcloud/ingress.yaml b/services/nextcloud/ingress.yaml index 1c60282..0df2660 100644 --- a/services/nextcloud/ingress.yaml +++ b/services/nextcloud/ingress.yaml @@ -7,6 +7,7 @@ metadata: annotations: cert-manager.io/cluster-issuer: letsencrypt-prod traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" spec: tls: - hosts: diff --git a/services/vault/ingress.yaml b/services/vault/ingress.yaml index 1d9d523..b768381 100644 --- a/services/vault/ingress.yaml +++ b/services/vault/ingress.yaml @@ -7,6 +7,7 @@ metadata: annotations: kubernetes.io/ingress.class: traefik traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" spec: ingressClassName: traefik tls: From f5231d282b6cd79860bddcb21f94bfa6c61cc744 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 02:06:31 -0300 Subject: [PATCH 138/270] vault: allow UI mount listing for admins --- services/vault/scripts/vault_k8s_auth_configure.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 46086cf..daf48b3 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -124,6 +124,12 @@ path "sys/policies/acl" { path "sys/policies/acl/*" { capabilities = ["create", "update", "read"] } +path "sys/internal/ui/mounts" { + capabilities = ["read"] +} +path "sys/mounts" { + capabilities = ["read"] +} path "sys/mounts/auth/*" { capabilities = ["read", "update", "sudo"] } From 8ad9f0a664238ef7aca851e5c84faa9740b7ef4e Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 03:20:32 -0300 Subject: [PATCH 139/270] vault: allow admin kv browse --- services/vault/scripts/vault_k8s_auth_configure.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index daf48b3..3a721c1 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -139,6 +139,12 @@ path "kv/data/atlas/vault/*" { path "kv/metadata/atlas/vault/*" { capabilities = ["list"] } +path "kv/data/*" { + capabilities = ["create", "update", "read", "delete", "patch"] +} +path "kv/metadata/*" { + capabilities = ["read", "list", "delete"] +} path "kv/data/atlas/shared/*" { capabilities = ["create", "update", "read", "patch"] } From 5cd196e043e8a05f3c07b1fd3dbaca6bf3748789 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 03:46:07 -0300 Subject: [PATCH 140/270] vault/keycloak: restore kv access and wger sync rbac --- services/bstein-dev-home/rbac.yaml | 31 +++++++++++++++ services/keycloak/user-overrides-job.yaml | 38 ++++++++++++++++++- .../vault/scripts/vault_k8s_auth_configure.sh | 21 ++++++++++ 3 files changed, 88 insertions(+), 2 deletions(-) diff --git a/services/bstein-dev-home/rbac.yaml b/services/bstein-dev-home/rbac.yaml index f97ed24..7ce8fd8 100644 --- a/services/bstein-dev-home/rbac.yaml +++ b/services/bstein-dev-home/rbac.yaml @@ -106,3 +106,34 @@ subjects: - kind: ServiceAccount name: bstein-dev-home namespace: bstein-dev-home +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: bstein-dev-home-wger-user-sync + namespace: health +rules: + - apiGroups: ["batch"] + resources: ["cronjobs"] + verbs: ["get"] + resourceNames: ["wger-user-sync"] + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create", "get", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: bstein-dev-home-wger-user-sync + namespace: health +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: bstein-dev-home-wger-user-sync +subjects: + - kind: ServiceAccount + name: bstein-dev-home + namespace: bstein-dev-home diff --git a/services/keycloak/user-overrides-job.yaml b/services/keycloak/user-overrides-job.yaml index b865e5e..6b398dc 100644 --- a/services/keycloak/user-overrides-job.yaml +++ b/services/keycloak/user-overrides-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-user-overrides-5 + name: keycloak-user-overrides-6 namespace: sso spec: backoffLimit: 0 @@ -164,5 +164,39 @@ spec: ) if status not in (200, 204): raise SystemExit(f"Unexpected user update response: {status}") + + # Ensure the user is in the admin group for Vault access. + status, groups = http_json( + "GET", + f"{base_url}/admin/realms/{realm}/groups?search=admin", + access_token, + ) + if status != 200 or not isinstance(groups, list): + raise SystemExit("Unable to fetch groups") + group_id = "" + for item in groups: + if isinstance(item, dict) and item.get("name") == "admin": + group_id = item.get("id") or "" + break + if not group_id: + raise SystemExit("admin group not found") + status, memberships = http_json( + "GET", + f"{base_url}/admin/realms/{realm}/users/{user_id}/groups", + access_token, + ) + if status != 200 or not isinstance(memberships, list): + raise SystemExit("Unable to read user groups") + already = any( + isinstance(item, dict) and item.get("id") == group_id for item in memberships + ) + if not already: + status, _ = http_json( + "PUT", + f"{base_url}/admin/realms/{realm}/users/{user_id}/groups/{group_id}", + access_token, + ) + if status not in (200, 204): + raise SystemExit(f"Unexpected group update response: {status}") PY - volumeMounts: \ No newline at end of file + volumeMounts: diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 3a721c1..325185d 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -142,6 +142,9 @@ path "kv/metadata/atlas/vault/*" { path "kv/data/*" { capabilities = ["create", "update", "read", "delete", "patch"] } +path "kv/metadata" { + capabilities = ["list"] +} path "kv/metadata/*" { capabilities = ["read", "list", "delete"] } @@ -154,6 +157,24 @@ path "kv/metadata/atlas/shared/*" { ' write_raw_policy "vault-admin" "${vault_admin_policy}" +dev_kv_policy=' +path "kv/metadata" { + capabilities = ["list"] +} +path "kv/metadata/atlas" { + capabilities = ["list"] +} +path "kv/metadata/atlas/shared" { + capabilities = ["list"] +} +path "kv/metadata/atlas/shared/*" { + capabilities = ["list"] +} +path "kv/data/atlas/shared/*" { + capabilities = ["read"] +} +' +write_raw_policy "dev-kv" "${dev_kv_policy}" log "writing role vault-admin" vault write "auth/kubernetes/role/vault-admin" \ bound_service_account_names="vault-admin" \ From cf5d7dfa009a4d98766807caf0629a06a96837c8 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 04:01:27 -0300 Subject: [PATCH 141/270] jellyfin: set traefik tls annotations --- services/jellyfin/ingress.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/services/jellyfin/ingress.yaml b/services/jellyfin/ingress.yaml index 85a4d1d..9164b9f 100644 --- a/services/jellyfin/ingress.yaml +++ b/services/jellyfin/ingress.yaml @@ -6,6 +6,8 @@ metadata: namespace: jellyfin annotations: cert-manager.io/cluster-issuer: letsencrypt + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" spec: ingressClassName: traefik rules: From 9474ab97f27147d4792f9ac1da83c0e29a0e5207 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 07:53:04 -0300 Subject: [PATCH 142/270] maintenance: disable k3s traefik; keycloak portal admin roles --- ...portal-admin-client-secret-ensure-job.yaml | 67 ++++++++++++++++++- .../disable-k3s-traefik-daemonset.yaml | 49 ++++++++++++++ .../disable-k3s-traefik-serviceaccount.yaml | 6 ++ services/maintenance/kustomization.yaml | 8 +++ .../scripts/disable_k3s_traefik.sh | 64 ++++++++++++++++++ 5 files changed, 193 insertions(+), 1 deletion(-) create mode 100644 services/maintenance/disable-k3s-traefik-daemonset.yaml create mode 100644 services/maintenance/disable-k3s-traefik-serviceaccount.yaml create mode 100644 services/maintenance/scripts/disable_k3s_traefik.sh diff --git a/services/keycloak/portal-admin-client-secret-ensure-job.yaml b/services/keycloak/portal-admin-client-secret-ensure-job.yaml index 350fc6e..af053a9 100644 --- a/services/keycloak/portal-admin-client-secret-ensure-job.yaml +++ b/services/keycloak/portal-admin-client-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-admin-secret-ensure-1 + name: keycloak-portal-admin-secret-ensure-2 namespace: sso spec: backoffLimit: 0 @@ -123,8 +123,18 @@ spec: if status != 200 or not isinstance(client_rep, dict): raise SystemExit(f"Unable to fetch client representation (status={status})") + updated = False + if client_rep.get("serviceAccountsEnabled") is not True: + client_rep["serviceAccountsEnabled"] = True + updated = True + if client_rep.get("publicClient") is not False: + client_rep["publicClient"] = False + updated = True if client_rep.get("secret") != client_secret: client_rep["secret"] = client_secret + updated = True + + if updated: status, resp = http_json( "PUT", f"{base_url}/admin/realms/{realm}/clients/{client_uuid}", @@ -134,5 +144,60 @@ spec: if status not in (200, 204): raise SystemExit(f"Client update failed (status={status}) resp={resp}") + # Ensure the portal admin service account can manage users. + status, svc_user = http_json( + "GET", + f"{base_url}/admin/realms/{realm}/clients/{client_uuid}/service-account-user", + token, + ) + if status != 200 or not isinstance(svc_user, dict) or not svc_user.get("id"): + raise SystemExit(f"Unable to fetch service account user (status={status})") + svc_user_id = svc_user["id"] + + status, rm_clients = http_json( + "GET", + f"{base_url}/admin/realms/{realm}/clients?clientId=realm-management", + token, + ) + if status != 200 or not isinstance(rm_clients, list) or not rm_clients: + raise SystemExit("Unable to find realm-management client") + rm_uuid = rm_clients[0].get("id") + if not rm_uuid: + raise SystemExit("realm-management client has no id") + + wanted_roles = ("query-users", "view-users", "manage-users", "impersonation") + role_reps = [] + for role_name in wanted_roles: + status, role = http_json( + "GET", + f"{base_url}/admin/realms/{realm}/clients/{rm_uuid}/roles/{urllib.parse.quote(role_name)}", + token, + ) + if status != 200 or not isinstance(role, dict): + raise SystemExit(f"Unable to fetch role {role_name} (status={status})") + role_reps.append({"id": role.get("id"), "name": role.get("name")}) + + status, assigned = http_json( + "GET", + f"{base_url}/admin/realms/{realm}/users/{svc_user_id}/role-mappings/clients/{rm_uuid}", + token, + ) + assigned_names = set() + if status == 200 and isinstance(assigned, list): + for r in assigned: + if isinstance(r, dict) and r.get("name"): + assigned_names.add(r["name"]) + + missing = [r for r in role_reps if r.get("name") and r["name"] not in assigned_names] + if missing: + status, resp = http_json( + "POST", + f"{base_url}/admin/realms/{realm}/users/{svc_user_id}/role-mappings/clients/{rm_uuid}", + token, + missing, + ) + if status not in (200, 204): + raise SystemExit(f"Role mapping update failed (status={status}) resp={resp}") + print(f"OK: ensured secret for {client_id}") PY diff --git a/services/maintenance/disable-k3s-traefik-daemonset.yaml b/services/maintenance/disable-k3s-traefik-daemonset.yaml new file mode 100644 index 0000000..71f0ece --- /dev/null +++ b/services/maintenance/disable-k3s-traefik-daemonset.yaml @@ -0,0 +1,49 @@ +# services/maintenance/disable-k3s-traefik-daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: disable-k3s-traefik + namespace: maintenance +spec: + selector: + matchLabels: + app: disable-k3s-traefik + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: disable-k3s-traefik + spec: + serviceAccountName: disable-k3s-traefik + nodeSelector: + node-role.kubernetes.io/control-plane: "true" + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + containers: + - name: disable-k3s-traefik + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 + command: ["/usr/bin/env", "bash"] + args: ["/scripts/disable_k3s_traefik.sh"] + securityContext: + privileged: true + runAsUser: 0 + volumeMounts: + - name: host-root + mountPath: /host + - name: script + mountPath: /scripts + readOnly: true + volumes: + - name: host-root + hostPath: + path: / + - name: script + configMap: + name: disable-k3s-traefik-script + defaultMode: 0555 diff --git a/services/maintenance/disable-k3s-traefik-serviceaccount.yaml b/services/maintenance/disable-k3s-traefik-serviceaccount.yaml new file mode 100644 index 0000000..37bf6dc --- /dev/null +++ b/services/maintenance/disable-k3s-traefik-serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/maintenance/disable-k3s-traefik-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: disable-k3s-traefik + namespace: maintenance diff --git a/services/maintenance/kustomization.yaml b/services/maintenance/kustomization.yaml index ce34afb..23d3f85 100644 --- a/services/maintenance/kustomization.yaml +++ b/services/maintenance/kustomization.yaml @@ -3,8 +3,10 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - namespace.yaml + - disable-k3s-traefik-serviceaccount.yaml - node-nofile-serviceaccount.yaml - pod-cleaner-rbac.yaml + - disable-k3s-traefik-daemonset.yaml - node-nofile-daemonset.yaml - pod-cleaner-cronjob.yaml - node-image-sweeper-serviceaccount.yaml @@ -12,6 +14,12 @@ resources: - image-sweeper-cronjob.yaml configMapGenerator: + - name: disable-k3s-traefik-script + namespace: maintenance + files: + - disable_k3s_traefik.sh=scripts/disable_k3s_traefik.sh + options: + disableNameSuffixHash: true - name: node-nofile-script namespace: maintenance files: diff --git a/services/maintenance/scripts/disable_k3s_traefik.sh b/services/maintenance/scripts/disable_k3s_traefik.sh new file mode 100644 index 0000000..7b8cebd --- /dev/null +++ b/services/maintenance/scripts/disable_k3s_traefik.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +set -euo pipefail + +host_root="/host" +env_file="${host_root}/etc/systemd/system/k3s.service.env" +manifest_dir="${host_root}/var/lib/rancher/k3s/server/manifests" + +changed=0 + +ensure_disable_flag() { + mkdir -p "$(dirname "${env_file}")" + if [ ! -f "${env_file}" ]; then + printf 'K3S_DISABLE=traefik\n' > "${env_file}" + changed=1 + return + fi + + if grep -q '^K3S_DISABLE=' "${env_file}"; then + current="$(grep '^K3S_DISABLE=' "${env_file}" | tail -n1 | cut -d= -f2-)" + current="$(printf '%s' "${current}" | sed 's/^\"//;s/\"$//' | tr -d ' ')" + if ! printf '%s' "${current}" | grep -qw "traefik"; then + if [ -z "${current}" ]; then + updated="traefik" + else + updated="${current},traefik" + fi + sed -i "s/^K3S_DISABLE=.*/K3S_DISABLE=${updated}/" "${env_file}" + changed=1 + fi + else + printf '\nK3S_DISABLE=traefik\n' >> "${env_file}" + changed=1 + fi +} + +remove_manifest() { + if [ -d "${manifest_dir}" ] && ls "${manifest_dir}"/traefik* >/dev/null 2>&1; then + rm -f "${manifest_dir}"/traefik*.yaml "${manifest_dir}"/traefik*.yml + changed=1 + fi +} + +restart_k3s() { + node_name="$(cat "${host_root}/etc/hostname" 2>/dev/null || hostname)" + delay=0 + case "${node_name}" in + *0b) delay=60 ;; + *0c) delay=120 ;; + esac + if [ "${delay}" -gt 0 ]; then + sleep "${delay}" + fi + chroot "${host_root}" /bin/systemctl daemon-reload || true + chroot "${host_root}" /bin/systemctl restart k3s +} + +ensure_disable_flag +remove_manifest + +if [ "${changed}" -eq 1 ]; then + restart_k3s +fi + +sleep infinity From 671d4d5dce5666f444861d4aaba5b3d4358a2ce8 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 09:27:22 -0300 Subject: [PATCH 143/270] maintenance: cleanup k3s traefik and wger attrs --- services/keycloak/realm-settings-job.yaml | 18 +++++++- .../maintenance/k3s-traefik-cleanup-job.yaml | 26 ++++++++++++ .../maintenance/k3s-traefik-cleanup-rbac.yaml | 41 +++++++++++++++++++ services/maintenance/kustomization.yaml | 8 ++++ .../scripts/k3s_traefik_cleanup.sh | 10 +++++ 5 files changed, 102 insertions(+), 1 deletion(-) create mode 100644 services/maintenance/k3s-traefik-cleanup-job.yaml create mode 100644 services/maintenance/k3s-traefik-cleanup-rbac.yaml create mode 100755 services/maintenance/scripts/k3s_traefik_cleanup.sh diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index e276c52..0bd78b5 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-24 + name: keycloak-realm-settings-25 namespace: sso spec: backoffLimit: 0 @@ -234,6 +234,22 @@ spec: "permissions": {"view": ["admin"], "edit": ["admin"]}, "validations": {"length": {"max": 64}}, }, + { + "name": "wger_password", + "displayName": "Wger Password", + "multivalued": False, + "annotations": {"group": "user-metadata"}, + "permissions": {"view": ["admin"], "edit": ["admin"]}, + "validations": {"length": {"max": 255}}, + }, + { + "name": "wger_password_updated_at", + "displayName": "Wger Password Updated At", + "multivalued": False, + "annotations": {"group": "user-metadata"}, + "permissions": {"view": ["admin"], "edit": ["admin"]}, + "validations": {"length": {"max": 64}}, + }, ] def has_attr(name: str) -> bool: diff --git a/services/maintenance/k3s-traefik-cleanup-job.yaml b/services/maintenance/k3s-traefik-cleanup-job.yaml new file mode 100644 index 0000000..33fa7be --- /dev/null +++ b/services/maintenance/k3s-traefik-cleanup-job.yaml @@ -0,0 +1,26 @@ +# services/maintenance/k3s-traefik-cleanup-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: k3s-traefik-cleanup + namespace: maintenance +spec: + backoffLimit: 1 + template: + spec: + serviceAccountName: k3s-traefik-cleanup + restartPolicy: Never + containers: + - name: cleanup + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 + command: ["/usr/bin/env", "bash"] + args: ["/scripts/k3s_traefik_cleanup.sh"] + volumeMounts: + - name: script + mountPath: /scripts + readOnly: true + volumes: + - name: script + configMap: + name: k3s-traefik-cleanup-script + defaultMode: 0555 diff --git a/services/maintenance/k3s-traefik-cleanup-rbac.yaml b/services/maintenance/k3s-traefik-cleanup-rbac.yaml new file mode 100644 index 0000000..3b33da7 --- /dev/null +++ b/services/maintenance/k3s-traefik-cleanup-rbac.yaml @@ -0,0 +1,41 @@ +# services/maintenance/k3s-traefik-cleanup-rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: k3s-traefik-cleanup + namespace: maintenance + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: k3s-traefik-cleanup +rules: + - apiGroups: ["helm.cattle.io"] + resources: ["helmcharts", "helmchartconfigs"] + verbs: ["get", "list", "delete"] + - apiGroups: [""] + resources: ["services", "serviceaccounts"] + verbs: ["get", "list", "delete"] + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "delete"] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterroles", "clusterrolebindings"] + verbs: ["get", "list", "delete"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: k3s-traefik-cleanup +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: k3s-traefik-cleanup +subjects: + - kind: ServiceAccount + name: k3s-traefik-cleanup + namespace: maintenance diff --git a/services/maintenance/kustomization.yaml b/services/maintenance/kustomization.yaml index 23d3f85..8251b27 100644 --- a/services/maintenance/kustomization.yaml +++ b/services/maintenance/kustomization.yaml @@ -4,9 +4,11 @@ kind: Kustomization resources: - namespace.yaml - disable-k3s-traefik-serviceaccount.yaml + - k3s-traefik-cleanup-rbac.yaml - node-nofile-serviceaccount.yaml - pod-cleaner-rbac.yaml - disable-k3s-traefik-daemonset.yaml + - k3s-traefik-cleanup-job.yaml - node-nofile-daemonset.yaml - pod-cleaner-cronjob.yaml - node-image-sweeper-serviceaccount.yaml @@ -20,6 +22,12 @@ configMapGenerator: - disable_k3s_traefik.sh=scripts/disable_k3s_traefik.sh options: disableNameSuffixHash: true + - name: k3s-traefik-cleanup-script + namespace: maintenance + files: + - k3s_traefik_cleanup.sh=scripts/k3s_traefik_cleanup.sh + options: + disableNameSuffixHash: true - name: node-nofile-script namespace: maintenance files: diff --git a/services/maintenance/scripts/k3s_traefik_cleanup.sh b/services/maintenance/scripts/k3s_traefik_cleanup.sh new file mode 100755 index 0000000..179d172 --- /dev/null +++ b/services/maintenance/scripts/k3s_traefik_cleanup.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +set -euo pipefail + +kubectl -n kube-system delete helmchart traefik traefik-crd --ignore-not-found +kubectl -n kube-system delete deployment traefik --ignore-not-found +kubectl -n kube-system delete service traefik --ignore-not-found +kubectl -n kube-system delete serviceaccount traefik helm-traefik helm-traefik-crd --ignore-not-found + +kubectl delete clusterrole traefik-ingress-controller traefik-kube-system --ignore-not-found +kubectl delete clusterrolebinding helm-kube-system-traefik helm-kube-system-traefik-crd traefik-ingress-controller traefik-kube-system --ignore-not-found From ef504eea8002b4ae76e2e40fdb5dafedd70f53da Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 09:33:11 -0300 Subject: [PATCH 144/270] maintenance: allow traefik cleanup watch --- services/maintenance/k3s-traefik-cleanup-rbac.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/services/maintenance/k3s-traefik-cleanup-rbac.yaml b/services/maintenance/k3s-traefik-cleanup-rbac.yaml index 3b33da7..45710c5 100644 --- a/services/maintenance/k3s-traefik-cleanup-rbac.yaml +++ b/services/maintenance/k3s-traefik-cleanup-rbac.yaml @@ -14,16 +14,16 @@ metadata: rules: - apiGroups: ["helm.cattle.io"] resources: ["helmcharts", "helmchartconfigs"] - verbs: ["get", "list", "delete"] + verbs: ["get", "list", "watch", "delete"] - apiGroups: [""] resources: ["services", "serviceaccounts"] - verbs: ["get", "list", "delete"] + verbs: ["get", "list", "watch", "delete"] - apiGroups: ["apps"] resources: ["deployments"] - verbs: ["get", "list", "delete"] + verbs: ["get", "list", "watch", "delete"] - apiGroups: ["rbac.authorization.k8s.io"] resources: ["clusterroles", "clusterrolebindings"] - verbs: ["get", "list", "delete"] + verbs: ["get", "list", "watch", "delete"] --- From 4faa039a8e60e4d5f0a6965b9b4ab9b09ed7eae4 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 09:38:14 -0300 Subject: [PATCH 145/270] maintenance: avoid blocking on k3s traefik cleanup --- services/maintenance/scripts/k3s_traefik_cleanup.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/services/maintenance/scripts/k3s_traefik_cleanup.sh b/services/maintenance/scripts/k3s_traefik_cleanup.sh index 179d172..81ba337 100755 --- a/services/maintenance/scripts/k3s_traefik_cleanup.sh +++ b/services/maintenance/scripts/k3s_traefik_cleanup.sh @@ -1,10 +1,10 @@ #!/usr/bin/env bash set -euo pipefail -kubectl -n kube-system delete helmchart traefik traefik-crd --ignore-not-found -kubectl -n kube-system delete deployment traefik --ignore-not-found -kubectl -n kube-system delete service traefik --ignore-not-found -kubectl -n kube-system delete serviceaccount traefik helm-traefik helm-traefik-crd --ignore-not-found +kubectl -n kube-system delete helmchart traefik traefik-crd --ignore-not-found --wait=false +kubectl -n kube-system delete deployment traefik --ignore-not-found --wait=false +kubectl -n kube-system delete service traefik --ignore-not-found --wait=false +kubectl -n kube-system delete serviceaccount traefik helm-traefik helm-traefik-crd --ignore-not-found --wait=false -kubectl delete clusterrole traefik-ingress-controller traefik-kube-system --ignore-not-found -kubectl delete clusterrolebinding helm-kube-system-traefik helm-kube-system-traefik-crd traefik-ingress-controller traefik-kube-system --ignore-not-found +kubectl delete clusterrole traefik-ingress-controller traefik-kube-system --ignore-not-found --wait=false +kubectl delete clusterrolebinding helm-kube-system-traefik helm-kube-system-traefik-crd traefik-ingress-controller traefik-kube-system --ignore-not-found --wait=false From beb646f78f713745019944b447c2dc193934b6c1 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 09:43:01 -0300 Subject: [PATCH 146/270] jellyfin: move cache to emptyDir --- services/jellyfin/deployment.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/services/jellyfin/deployment.yaml b/services/jellyfin/deployment.yaml index e176931..fe84743 100644 --- a/services/jellyfin/deployment.yaml +++ b/services/jellyfin/deployment.yaml @@ -178,8 +178,7 @@ spec: persistentVolumeClaim: claimName: jellyfin-config-astreae - name: cache - persistentVolumeClaim: - claimName: jellyfin-cache-astreae + emptyDir: {} - name: media persistentVolumeClaim: claimName: jellyfin-media-asteria-new From 9f3d2db63d30700e0f6a8ede61bd4c1f1e1889ba Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 11:14:48 -0300 Subject: [PATCH 147/270] platform: add cert-manager and align postgres vault path --- .../cert-manager-cleanup/kustomization.yaml | 17 ++++++ .../platform/cert-manager/kustomization.yaml | 20 +++++++ .../flux-system/platform/kustomization.yaml | 2 + .../cleanup/cert-manager-cleanup-job.yaml | 26 +++++++++ .../cleanup/cert-manager-cleanup-rbac.yaml | 58 +++++++++++++++++++ .../cert-manager/cleanup/kustomization.yaml | 15 +++++ .../cert-manager/cleanup/namespace.yaml | 5 ++ .../cleanup/scripts/cert_manager_cleanup.sh | 37 ++++++++++++ infrastructure/cert-manager/helmrelease.yaml | 41 +++++++++++++ .../cert-manager/kustomization.yaml | 6 ++ infrastructure/cert-manager/namespace.yaml | 5 ++ .../postgres/secretproviderclass.yaml | 2 +- .../vault/scripts/vault_k8s_auth_configure.sh | 2 + 13 files changed, 235 insertions(+), 1 deletion(-) create mode 100644 clusters/atlas/flux-system/platform/cert-manager-cleanup/kustomization.yaml create mode 100644 clusters/atlas/flux-system/platform/cert-manager/kustomization.yaml create mode 100644 infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml create mode 100644 infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml create mode 100644 infrastructure/cert-manager/cleanup/kustomization.yaml create mode 100644 infrastructure/cert-manager/cleanup/namespace.yaml create mode 100644 infrastructure/cert-manager/cleanup/scripts/cert_manager_cleanup.sh create mode 100644 infrastructure/cert-manager/helmrelease.yaml create mode 100644 infrastructure/cert-manager/kustomization.yaml create mode 100644 infrastructure/cert-manager/namespace.yaml diff --git a/clusters/atlas/flux-system/platform/cert-manager-cleanup/kustomization.yaml b/clusters/atlas/flux-system/platform/cert-manager-cleanup/kustomization.yaml new file mode 100644 index 0000000..230e22a --- /dev/null +++ b/clusters/atlas/flux-system/platform/cert-manager-cleanup/kustomization.yaml @@ -0,0 +1,17 @@ +# clusters/atlas/flux-system/platform/cert-manager-cleanup/kustomization.yaml +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cert-manager-cleanup + namespace: flux-system +spec: + interval: 30m + path: ./infrastructure/cert-manager/cleanup + prune: true + force: true + sourceRef: + kind: GitRepository + name: flux-system + namespace: flux-system + targetNamespace: cert-manager + wait: true diff --git a/clusters/atlas/flux-system/platform/cert-manager/kustomization.yaml b/clusters/atlas/flux-system/platform/cert-manager/kustomization.yaml new file mode 100644 index 0000000..21a9dc9 --- /dev/null +++ b/clusters/atlas/flux-system/platform/cert-manager/kustomization.yaml @@ -0,0 +1,20 @@ +# clusters/atlas/flux-system/platform/cert-manager/kustomization.yaml +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cert-manager + namespace: flux-system +spec: + interval: 30m + path: ./infrastructure/cert-manager + prune: true + force: true + sourceRef: + kind: GitRepository + name: flux-system + namespace: flux-system + targetNamespace: cert-manager + dependsOn: + - name: cert-manager-cleanup + - name: helm + wait: true diff --git a/clusters/atlas/flux-system/platform/kustomization.yaml b/clusters/atlas/flux-system/platform/kustomization.yaml index 83ca71e..03d9d43 100644 --- a/clusters/atlas/flux-system/platform/kustomization.yaml +++ b/clusters/atlas/flux-system/platform/kustomization.yaml @@ -4,6 +4,8 @@ kind: Kustomization resources: - core/kustomization.yaml - helm/kustomization.yaml + - cert-manager-cleanup/kustomization.yaml + - cert-manager/kustomization.yaml - metallb/kustomization.yaml - traefik/kustomization.yaml - gitops-ui/kustomization.yaml diff --git a/infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml b/infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml new file mode 100644 index 0000000..93cf53a --- /dev/null +++ b/infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml @@ -0,0 +1,26 @@ +# infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: cert-manager-cleanup + namespace: cert-manager +spec: + backoffLimit: 1 + template: + spec: + serviceAccountName: cert-manager-cleanup + restartPolicy: Never + containers: + - name: cleanup + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 + command: ["/usr/bin/env", "bash"] + args: ["/scripts/cert_manager_cleanup.sh"] + volumeMounts: + - name: script + mountPath: /scripts + readOnly: true + volumes: + - name: script + configMap: + name: cert-manager-cleanup-script + defaultMode: 0555 diff --git a/infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml b/infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml new file mode 100644 index 0000000..ee275c5 --- /dev/null +++ b/infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml @@ -0,0 +1,58 @@ +# infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cert-manager-cleanup + namespace: cert-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-cleanup +rules: + - apiGroups: [""] + resources: + - pods + - services + - endpoints + - configmaps + - secrets + - serviceaccounts + verbs: ["get", "list", "watch", "delete"] + - apiGroups: ["apps"] + resources: + - deployments + - daemonsets + - statefulsets + - replicasets + verbs: ["get", "list", "watch", "delete"] + - apiGroups: ["batch"] + resources: + - jobs + - cronjobs + verbs: ["get", "list", "watch", "delete"] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: + - roles + - rolebindings + - clusterroles + - clusterrolebindings + verbs: ["get", "list", "watch", "delete"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: + - validatingwebhookconfigurations + - mutatingwebhookconfigurations + verbs: ["get", "list", "watch", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-cleanup +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-cleanup +subjects: + - kind: ServiceAccount + name: cert-manager-cleanup + namespace: cert-manager diff --git a/infrastructure/cert-manager/cleanup/kustomization.yaml b/infrastructure/cert-manager/cleanup/kustomization.yaml new file mode 100644 index 0000000..8aee369 --- /dev/null +++ b/infrastructure/cert-manager/cleanup/kustomization.yaml @@ -0,0 +1,15 @@ +# infrastructure/cert-manager/cleanup/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - namespace.yaml + - cert-manager-cleanup-rbac.yaml + - cert-manager-cleanup-job.yaml + +configMapGenerator: + - name: cert-manager-cleanup-script + namespace: cert-manager + files: + - cert_manager_cleanup.sh=scripts/cert_manager_cleanup.sh + options: + disableNameSuffixHash: true diff --git a/infrastructure/cert-manager/cleanup/namespace.yaml b/infrastructure/cert-manager/cleanup/namespace.yaml new file mode 100644 index 0000000..762cc25 --- /dev/null +++ b/infrastructure/cert-manager/cleanup/namespace.yaml @@ -0,0 +1,5 @@ +# infrastructure/cert-manager/cleanup/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager diff --git a/infrastructure/cert-manager/cleanup/scripts/cert_manager_cleanup.sh b/infrastructure/cert-manager/cleanup/scripts/cert_manager_cleanup.sh new file mode 100644 index 0000000..9bdfc33 --- /dev/null +++ b/infrastructure/cert-manager/cleanup/scripts/cert_manager_cleanup.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +set -euo pipefail + +namespace="cert-manager" +selectors=( + "app.kubernetes.io/name=cert-manager" + "app.kubernetes.io/instance=cert-manager" + "app.kubernetes.io/instance=certmanager-prod" +) + +delete_namespaced() { + local selector="$1" + kubectl -n "${namespace}" delete deployment,daemonset,statefulset,replicaset \ + --selector "${selector}" --ignore-not-found --wait=false + kubectl -n "${namespace}" delete pod,service,endpoints,serviceaccount,configmap,secret \ + --selector "${selector}" --ignore-not-found --wait=false + kubectl -n "${namespace}" delete role,rolebinding \ + --selector "${selector}" --ignore-not-found --wait=false + kubectl -n "${namespace}" delete job,cronjob \ + --selector "${selector}" --ignore-not-found --wait=false +} + +delete_cluster_scoped() { + local selector="$1" + kubectl delete clusterrole,clusterrolebinding \ + --selector "${selector}" --ignore-not-found --wait=false + kubectl delete mutatingwebhookconfiguration,validatingwebhookconfiguration \ + --selector "${selector}" --ignore-not-found --wait=false +} + +for selector in "${selectors[@]}"; do + delete_namespaced "${selector}" + delete_cluster_scoped "${selector}" +done + +kubectl delete mutatingwebhookconfiguration cert-manager-webhook --ignore-not-found --wait=false +kubectl delete validatingwebhookconfiguration cert-manager-webhook --ignore-not-found --wait=false diff --git a/infrastructure/cert-manager/helmrelease.yaml b/infrastructure/cert-manager/helmrelease.yaml new file mode 100644 index 0000000..7fdf277 --- /dev/null +++ b/infrastructure/cert-manager/helmrelease.yaml @@ -0,0 +1,41 @@ +# infrastructure/cert-manager/helmrelease.yaml +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cert-manager + namespace: cert-manager +spec: + interval: 30m + chart: + spec: + chart: cert-manager + version: v1.17.0 + sourceRef: + kind: HelmRepository + name: jetstack + namespace: flux-system + install: + crds: CreateReplace + remediation: { retries: 3 } + timeout: 10m + upgrade: + crds: CreateReplace + remediation: + retries: 3 + remediateLastFailure: true + cleanupOnFail: true + timeout: 10m + values: + installCRDs: true + nodeSelector: + node-role.kubernetes.io/worker: "true" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: hardware + operator: In + values: + - rpi5 + - rpi4 diff --git a/infrastructure/cert-manager/kustomization.yaml b/infrastructure/cert-manager/kustomization.yaml new file mode 100644 index 0000000..dc9d06d --- /dev/null +++ b/infrastructure/cert-manager/kustomization.yaml @@ -0,0 +1,6 @@ +# infrastructure/cert-manager/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - namespace.yaml + - helmrelease.yaml diff --git a/infrastructure/cert-manager/namespace.yaml b/infrastructure/cert-manager/namespace.yaml new file mode 100644 index 0000000..8a43590 --- /dev/null +++ b/infrastructure/cert-manager/namespace.yaml @@ -0,0 +1,5 @@ +# infrastructure/cert-manager/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager diff --git a/infrastructure/postgres/secretproviderclass.yaml b/infrastructure/postgres/secretproviderclass.yaml index b9317a1..3a65075 100644 --- a/infrastructure/postgres/secretproviderclass.yaml +++ b/infrastructure/postgres/secretproviderclass.yaml @@ -11,5 +11,5 @@ spec: roleName: "postgres" objects: | - objectName: "postgres_password" - secretPath: "kv/data/postgres" + secretPath: "kv/data/atlas/postgres/postgres-db" secretKey: "POSTGRES_PASSWORD" diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 325185d..0b2dca6 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -216,6 +216,8 @@ write_policy_and_role "health" "health" "health-vault-sync" \ "health/*" "" write_policy_and_role "longhorn" "longhorn-system" "longhorn-vault" \ "longhorn/*" "" +write_policy_and_role "postgres" "postgres" "postgres-vault" \ + "postgres/postgres-db" "" write_policy_and_role "vault" "vault" "vault" \ "vault/*" "" From 7c3006736c7e8207737f1f37c924c46d4e45d692 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 11:21:58 -0300 Subject: [PATCH 148/270] traefik: add CRDs --- infrastructure/traefik/crds.yaml | 3119 +++++++++++++++++++++ infrastructure/traefik/kustomization.yaml | 1 + 2 files changed, 3120 insertions(+) create mode 100644 infrastructure/traefik/crds.yaml diff --git a/infrastructure/traefik/crds.yaml b/infrastructure/traefik/crds.yaml new file mode 100644 index 0000000..21f26f9 --- /dev/null +++ b/infrastructure/traefik/crds.yaml @@ -0,0 +1,3119 @@ +# infrastructure/traefik/crds.yaml +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: ingressroutes.traefik.io +spec: + group: traefik.io + names: + kind: IngressRoute + listKind: IngressRouteList + plural: ingressroutes + singular: ingressroute + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: IngressRoute is the CRD implementation of a Traefik HTTP Router. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IngressRouteSpec defines the desired state of IngressRoute. + properties: + entryPoints: + description: |- + EntryPoints defines the list of entry point names to bind to. + Entry points have to be configured in the static configuration. + More info: https://doc.traefik.io/traefik/v3.3/routing/entrypoints/ + Default: all. + items: + type: string + type: array + routes: + description: Routes defines the list of routes. + items: + description: Route holds the HTTP route configuration. + properties: + kind: + description: |- + Kind defines the kind of the route. + Rule is the only supported kind. + If not defined, defaults to Rule. + enum: + - Rule + type: string + match: + description: |- + Match defines the router's rule. + More info: https://doc.traefik.io/traefik/v3.3/routing/routers/#rule + type: string + middlewares: + description: |- + Middlewares defines the list of references to Middleware resources. + More info: https://doc.traefik.io/traefik/v3.3/routing/providers/kubernetes-crd/#kind-middleware + items: + description: MiddlewareRef is a reference to a Middleware + resource. + properties: + name: + description: Name defines the name of the referenced Middleware + resource. + type: string + namespace: + description: Namespace defines the namespace of the referenced + Middleware resource. + type: string + required: + - name + type: object + type: array + observability: + description: |- + Observability defines the observability configuration for a router. + More info: https://doc.traefik.io/traefik/v3.2/routing/routers/#observability + properties: + accessLogs: + type: boolean + metrics: + type: boolean + tracing: + type: boolean + type: object + priority: + description: |- + Priority defines the router's priority. + More info: https://doc.traefik.io/traefik/v3.3/routing/routers/#priority + type: integer + services: + description: |- + Services defines the list of Service. + It can contain any combination of TraefikService and/or reference to a Kubernetes Service. + items: + description: Service defines an upstream HTTP service to proxy + traffic to. + properties: + healthCheck: + description: Healthcheck defines health checks for ExternalName + services. + properties: + followRedirects: + description: |- + FollowRedirects defines whether redirects should be followed during the health check calls. + Default: true + type: boolean + headers: + additionalProperties: + type: string + description: Headers defines custom headers to be + sent to the health check endpoint. + type: object + hostname: + description: Hostname defines the value of hostname + in the Host header of the health check request. + type: string + interval: + anyOf: + - type: integer + - type: string + description: |- + Interval defines the frequency of the health check calls. + Default: 30s + x-kubernetes-int-or-string: true + method: + description: Method defines the healthcheck method. + type: string + mode: + description: |- + Mode defines the health check mode. + If defined to grpc, will use the gRPC health check protocol to probe the server. + Default: http + type: string + path: + description: Path defines the server URL path for + the health check endpoint. + type: string + port: + description: Port defines the server URL port for + the health check endpoint. + type: integer + scheme: + description: Scheme replaces the server URL scheme + for the health check endpoint. + type: string + status: + description: Status defines the expected HTTP status + code of the response to the health check request. + type: integer + timeout: + anyOf: + - type: integer + - type: string + description: |- + Timeout defines the maximum duration Traefik will wait for a health check request before considering the server unhealthy. + Default: 5s + x-kubernetes-int-or-string: true + type: object + kind: + description: Kind defines the kind of the Service. + enum: + - Service + - TraefikService + type: string + name: + description: |- + Name defines the name of the referenced Kubernetes Service or TraefikService. + The differentiation between the two is specified in the Kind field. + type: string + namespace: + description: Namespace defines the namespace of the referenced + Kubernetes Service or TraefikService. + type: string + nativeLB: + description: |- + NativeLB controls, when creating the load-balancer, + whether the LB's children are directly the pods IPs or if the only child is the Kubernetes Service clusterIP. + The Kubernetes Service itself does load-balance to the pods. + By default, NativeLB is false. + type: boolean + nodePortLB: + description: |- + NodePortLB controls, when creating the load-balancer, + whether the LB's children are directly the nodes internal IPs using the nodePort when the service type is NodePort. + It allows services to be reachable when Traefik runs externally from the Kubernetes cluster but within the same network of the nodes. + By default, NodePortLB is false. + type: boolean + passHostHeader: + description: |- + PassHostHeader defines whether the client Host header is forwarded to the upstream Kubernetes Service. + By default, passHostHeader is true. + type: boolean + port: + anyOf: + - type: integer + - type: string + description: |- + Port defines the port of a Kubernetes Service. + This can be a reference to a named port. + x-kubernetes-int-or-string: true + responseForwarding: + description: ResponseForwarding defines how Traefik forwards + the response from the upstream Kubernetes Service to + the client. + properties: + flushInterval: + description: |- + FlushInterval defines the interval, in milliseconds, in between flushes to the client while copying the response body. + A negative value means to flush immediately after each write to the client. + This configuration is ignored when ReverseProxy recognizes a response as a streaming response; + for such responses, writes are flushed to the client immediately. + Default: 100ms + type: string + type: object + scheme: + description: |- + Scheme defines the scheme to use for the request to the upstream Kubernetes Service. + It defaults to https when Kubernetes Service port is 443, http otherwise. + type: string + serversTransport: + description: |- + ServersTransport defines the name of ServersTransport resource to use. + It allows to configure the transport between Traefik and your servers. + Can only be used on a Kubernetes Service. + type: string + sticky: + description: |- + Sticky defines the sticky sessions configuration. + More info: https://doc.traefik.io/traefik/v3.3/routing/services/#sticky-sessions + properties: + cookie: + description: Cookie defines the sticky cookie configuration. + properties: + httpOnly: + description: HTTPOnly defines whether the cookie + can be accessed by client-side APIs, such as + JavaScript. + type: boolean + maxAge: + description: |- + MaxAge defines the number of seconds until the cookie expires. + When set to a negative number, the cookie expires immediately. + When set to zero, the cookie never expires. + type: integer + name: + description: Name defines the Cookie name. + type: string + path: + description: |- + Path defines the path that must exist in the requested URL for the browser to send the Cookie header. + When not provided the cookie will be sent on every request to the domain. + More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#pathpath-value + type: string + sameSite: + description: |- + SameSite defines the same site policy. + More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite + type: string + secure: + description: Secure defines whether the cookie + can only be transmitted over an encrypted connection + (i.e. HTTPS). + type: boolean + type: object + type: object + strategy: + description: |- + Strategy defines the load balancing strategy between the servers. + RoundRobin is the only supported value at the moment. + type: string + weight: + description: |- + Weight defines the weight and should only be specified when Name references a TraefikService object + (and to be precise, one that embeds a Weighted Round Robin). + type: integer + required: + - name + type: object + type: array + syntax: + description: |- + Syntax defines the router's rule syntax. + More info: https://doc.traefik.io/traefik/v3.3/routing/routers/#rulesyntax + type: string + required: + - match + type: object + type: array + tls: + description: |- + TLS defines the TLS configuration. + More info: https://doc.traefik.io/traefik/v3.3/routing/routers/#tls + properties: + certResolver: + description: |- + CertResolver defines the name of the certificate resolver to use. + Cert resolvers have to be configured in the static configuration. + More info: https://doc.traefik.io/traefik/v3.3/https/acme/#certificate-resolvers + type: string + domains: + description: |- + Domains defines the list of domains that will be used to issue certificates. + More info: https://doc.traefik.io/traefik/v3.3/routing/routers/#domains + items: + description: Domain holds a domain name with SANs. + properties: + main: + description: Main defines the main domain name. + type: string + sans: + description: SANs defines the subject alternative domain + names. + items: + type: string + type: array + type: object + type: array + options: + description: |- + Options defines the reference to a TLSOption, that specifies the parameters of the TLS connection. + If not defined, the `default` TLSOption is used. + More info: https://doc.traefik.io/traefik/v3.3/https/tls/#tls-options + properties: + name: + description: |- + Name defines the name of the referenced TLSOption. + More info: https://doc.traefik.io/traefik/v3.3/routing/providers/kubernetes-crd/#kind-tlsoption + type: string + namespace: + description: |- + Namespace defines the namespace of the referenced TLSOption. + More info: https://doc.traefik.io/traefik/v3.3/routing/providers/kubernetes-crd/#kind-tlsoption + type: string + required: + - name + type: object + secretName: + description: SecretName is the name of the referenced Kubernetes + Secret to specify the certificate details. + type: string + store: + description: |- + Store defines the reference to the TLSStore, that will be used to store certificates. + Please note that only `default` TLSStore can be used. + properties: + name: + description: |- + Name defines the name of the referenced TLSStore. + More info: https://doc.traefik.io/traefik/v3.3/routing/providers/kubernetes-crd/#kind-tlsstore + type: string + namespace: + description: |- + Namespace defines the namespace of the referenced TLSStore. + More info: https://doc.traefik.io/traefik/v3.3/routing/providers/kubernetes-crd/#kind-tlsstore + type: string + required: + - name + type: object + type: object + required: + - routes + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: ingressroutetcps.traefik.io +spec: + group: traefik.io + names: + kind: IngressRouteTCP + listKind: IngressRouteTCPList + plural: ingressroutetcps + singular: ingressroutetcp + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: IngressRouteTCP is the CRD implementation of a Traefik TCP Router. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IngressRouteTCPSpec defines the desired state of IngressRouteTCP. + properties: + entryPoints: + description: |- + EntryPoints defines the list of entry point names to bind to. + Entry points have to be configured in the static configuration. + More info: https://doc.traefik.io/traefik/v3.3/routing/entrypoints/ + Default: all. + items: + type: string + type: array + routes: + description: Routes defines the list of routes. + items: + description: RouteTCP holds the TCP route configuration. + properties: + match: + description: |- + Match defines the router's rule. + More info: https://doc.traefik.io/traefik/v3.3/routing/routers/#rule_1 + type: string + middlewares: + description: Middlewares defines the list of references to MiddlewareTCP + resources. + items: + description: ObjectReference is a generic reference to a Traefik + resource. + properties: + name: + description: Name defines the name of the referenced Traefik + resource. + type: string + namespace: + description: Namespace defines the namespace of the referenced + Traefik resource. + type: string + required: + - name + type: object + type: array + priority: + description: |- + Priority defines the router's priority. + More info: https://doc.traefik.io/traefik/v3.3/routing/routers/#priority_1 + type: integer + services: + description: Services defines the list of TCP services. + items: + description: ServiceTCP defines an upstream TCP service to + proxy traffic to. + properties: + name: + description: Name defines the name of the referenced Kubernetes + Service. + type: string + namespace: + description: Namespace defines the namespace of the referenced + Kubernetes Service. + type: string + nativeLB: + description: |- + NativeLB controls, when creating the load-balancer, + whether the LB's children are directly the pods IPs or if the only child is the Kubernetes Service clusterIP. + The Kubernetes Service itself does load-balance to the pods. + By default, NativeLB is false. + type: boolean + nodePortLB: + description: |- + NodePortLB controls, when creating the load-balancer, + whether the LB's children are directly the nodes internal IPs using the nodePort when the service type is NodePort. + It allows services to be reachable when Traefik runs externally from the Kubernetes cluster but within the same network of the nodes. + By default, NodePortLB is false. + type: boolean + port: + anyOf: + - type: integer + - type: string + description: |- + Port defines the port of a Kubernetes Service. + This can be a reference to a named port. + x-kubernetes-int-or-string: true + proxyProtocol: + description: |- + ProxyProtocol defines the PROXY protocol configuration. + More info: https://doc.traefik.io/traefik/v3.3/routing/services/#proxy-protocol + properties: + version: + description: Version defines the PROXY Protocol version + to use. + type: integer + type: object + serversTransport: + description: |- + ServersTransport defines the name of ServersTransportTCP resource to use. + It allows to configure the transport between Traefik and your servers. + Can only be used on a Kubernetes Service. + type: string + terminationDelay: + description: |- + TerminationDelay defines the deadline that the proxy sets, after one of its connected peers indicates + it has closed the writing capability of its connection, to close the reading capability as well, + hence fully terminating the connection. + It is a duration in milliseconds, defaulting to 100. + A negative value means an infinite deadline (i.e. the reading capability is never closed). + Deprecated: TerminationDelay will not be supported in future APIVersions, please use ServersTransport to configure the TerminationDelay instead. + type: integer + tls: + description: TLS determines whether to use TLS when dialing + with the backend. + type: boolean + weight: + description: Weight defines the weight used when balancing + requests between multiple Kubernetes Service. + type: integer + required: + - name + - port + type: object + type: array + syntax: + description: |- + Syntax defines the router's rule syntax. + More info: https://doc.traefik.io/traefik/v3.3/routing/routers/#rulesyntax_1 + type: string + required: + - match + type: object + type: array + tls: + description: |- + TLS defines the TLS configuration on a layer 4 / TCP Route. + More info: https://doc.traefik.io/traefik/v3.3/routing/routers/#tls_1 + properties: + certResolver: + description: |- + CertResolver defines the name of the certificate resolver to use. + Cert resolvers have to be configured in the static configuration. + More info: https://doc.traefik.io/traefik/v3.3/https/acme/#certificate-resolvers + type: string + domains: + description: |- + Domains defines the list of domains that will be used to issue certificates. + More info: https://doc.traefik.io/traefik/v3.3/routing/routers/#domains + items: + description: Domain holds a domain name with SANs. + properties: + main: + description: Main defines the main domain name. + type: string + sans: + description: SANs defines the subject alternative domain + names. + items: + type: string + type: array + type: object + type: array + options: + description: |- + Options defines the reference to a TLSOption, that specifies the parameters of the TLS connection. + If not defined, the `default` TLSOption is used. + More info: https://doc.traefik.io/traefik/v3.3/https/tls/#tls-options + properties: + name: + description: Name defines the name of the referenced Traefik + resource. + type: string + namespace: + description: Namespace defines the namespace of the referenced + Traefik resource. + type: string + required: + - name + type: object + passthrough: + description: Passthrough defines whether a TLS router will terminate + the TLS connection. + type: boolean + secretName: + description: SecretName is the name of the referenced Kubernetes + Secret to specify the certificate details. + type: string + store: + description: |- + Store defines the reference to the TLSStore, that will be used to store certificates. + Please note that only `default` TLSStore can be used. + properties: + name: + description: Name defines the name of the referenced Traefik + resource. + type: string + namespace: + description: Namespace defines the namespace of the referenced + Traefik resource. + type: string + required: + - name + type: object + type: object + required: + - routes + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: ingressrouteudps.traefik.io +spec: + group: traefik.io + names: + kind: IngressRouteUDP + listKind: IngressRouteUDPList + plural: ingressrouteudps + singular: ingressrouteudp + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: IngressRouteUDP is a CRD implementation of a Traefik UDP Router. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: IngressRouteUDPSpec defines the desired state of a IngressRouteUDP. + properties: + entryPoints: + description: |- + EntryPoints defines the list of entry point names to bind to. + Entry points have to be configured in the static configuration. + More info: https://doc.traefik.io/traefik/v3.3/routing/entrypoints/ + Default: all. + items: + type: string + type: array + routes: + description: Routes defines the list of routes. + items: + description: RouteUDP holds the UDP route configuration. + properties: + services: + description: Services defines the list of UDP services. + items: + description: ServiceUDP defines an upstream UDP service to + proxy traffic to. + properties: + name: + description: Name defines the name of the referenced Kubernetes + Service. + type: string + namespace: + description: Namespace defines the namespace of the referenced + Kubernetes Service. + type: string + nativeLB: + description: |- + NativeLB controls, when creating the load-balancer, + whether the LB's children are directly the pods IPs or if the only child is the Kubernetes Service clusterIP. + The Kubernetes Service itself does load-balance to the pods. + By default, NativeLB is false. + type: boolean + nodePortLB: + description: |- + NodePortLB controls, when creating the load-balancer, + whether the LB's children are directly the nodes internal IPs using the nodePort when the service type is NodePort. + It allows services to be reachable when Traefik runs externally from the Kubernetes cluster but within the same network of the nodes. + By default, NodePortLB is false. + type: boolean + port: + anyOf: + - type: integer + - type: string + description: |- + Port defines the port of a Kubernetes Service. + This can be a reference to a named port. + x-kubernetes-int-or-string: true + weight: + description: Weight defines the weight used when balancing + requests between multiple Kubernetes Service. + type: integer + required: + - name + - port + type: object + type: array + type: object + type: array + required: + - routes + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: middlewares.traefik.io +spec: + group: traefik.io + names: + kind: Middleware + listKind: MiddlewareList + plural: middlewares + singular: middleware + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + Middleware is the CRD implementation of a Traefik Middleware. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/overview/ + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MiddlewareSpec defines the desired state of a Middleware. + properties: + addPrefix: + description: |- + AddPrefix holds the add prefix middleware configuration. + This middleware updates the path of a request before forwarding it. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/addprefix/ + properties: + prefix: + description: |- + Prefix is the string to add before the current path in the requested URL. + It should include a leading slash (/). + type: string + type: object + basicAuth: + description: |- + BasicAuth holds the basic auth middleware configuration. + This middleware restricts access to your services to known users. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/basicauth/ + properties: + headerField: + description: |- + HeaderField defines a header field to store the authenticated user. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/basicauth/#headerfield + type: string + realm: + description: |- + Realm allows the protected resources on a server to be partitioned into a set of protection spaces, each with its own authentication scheme. + Default: traefik. + type: string + removeHeader: + description: |- + RemoveHeader sets the removeHeader option to true to remove the authorization header before forwarding the request to your service. + Default: false. + type: boolean + secret: + description: Secret is the name of the referenced Kubernetes Secret + containing user credentials. + type: string + type: object + buffering: + description: |- + Buffering holds the buffering middleware configuration. + This middleware retries or limits the size of requests that can be forwarded to backends. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/buffering/#maxrequestbodybytes + properties: + maxRequestBodyBytes: + description: |- + MaxRequestBodyBytes defines the maximum allowed body size for the request (in bytes). + If the request exceeds the allowed size, it is not forwarded to the service, and the client gets a 413 (Request Entity Too Large) response. + Default: 0 (no maximum). + format: int64 + type: integer + maxResponseBodyBytes: + description: |- + MaxResponseBodyBytes defines the maximum allowed response size from the service (in bytes). + If the response exceeds the allowed size, it is not forwarded to the client. The client gets a 500 (Internal Server Error) response instead. + Default: 0 (no maximum). + format: int64 + type: integer + memRequestBodyBytes: + description: |- + MemRequestBodyBytes defines the threshold (in bytes) from which the request will be buffered on disk instead of in memory. + Default: 1048576 (1Mi). + format: int64 + type: integer + memResponseBodyBytes: + description: |- + MemResponseBodyBytes defines the threshold (in bytes) from which the response will be buffered on disk instead of in memory. + Default: 1048576 (1Mi). + format: int64 + type: integer + retryExpression: + description: |- + RetryExpression defines the retry conditions. + It is a logical combination of functions with operators AND (&&) and OR (||). + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/buffering/#retryexpression + type: string + type: object + chain: + description: |- + Chain holds the configuration of the chain middleware. + This middleware enables to define reusable combinations of other pieces of middleware. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/chain/ + properties: + middlewares: + description: Middlewares is the list of MiddlewareRef which composes + the chain. + items: + description: MiddlewareRef is a reference to a Middleware resource. + properties: + name: + description: Name defines the name of the referenced Middleware + resource. + type: string + namespace: + description: Namespace defines the namespace of the referenced + Middleware resource. + type: string + required: + - name + type: object + type: array + type: object + circuitBreaker: + description: CircuitBreaker holds the circuit breaker configuration. + properties: + checkPeriod: + anyOf: + - type: integer + - type: string + description: CheckPeriod is the interval between successive checks + of the circuit breaker condition (when in standby state). + x-kubernetes-int-or-string: true + expression: + description: Expression is the condition that triggers the tripped + state. + type: string + fallbackDuration: + anyOf: + - type: integer + - type: string + description: FallbackDuration is the duration for which the circuit + breaker will wait before trying to recover (from a tripped state). + x-kubernetes-int-or-string: true + recoveryDuration: + anyOf: + - type: integer + - type: string + description: RecoveryDuration is the duration for which the circuit + breaker will try to recover (as soon as it is in recovering + state). + x-kubernetes-int-or-string: true + responseCode: + description: ResponseCode is the status code that the circuit + breaker will return while it is in the open state. + type: integer + type: object + compress: + description: |- + Compress holds the compress middleware configuration. + This middleware compresses responses before sending them to the client, using gzip, brotli, or zstd compression. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/compress/ + properties: + defaultEncoding: + description: DefaultEncoding specifies the default encoding if + the `Accept-Encoding` header is not in the request or contains + a wildcard (`*`). + type: string + encodings: + description: Encodings defines the list of supported compression + algorithms. + items: + type: string + type: array + excludedContentTypes: + description: |- + ExcludedContentTypes defines the list of content types to compare the Content-Type header of the incoming requests and responses before compressing. + `application/grpc` is always excluded. + items: + type: string + type: array + includedContentTypes: + description: IncludedContentTypes defines the list of content + types to compare the Content-Type header of the responses before + compressing. + items: + type: string + type: array + minResponseBodyBytes: + description: |- + MinResponseBodyBytes defines the minimum amount of bytes a response body must have to be compressed. + Default: 1024. + type: integer + type: object + contentType: + description: |- + ContentType holds the content-type middleware configuration. + This middleware exists to enable the correct behavior until at least the default one can be changed in a future version. + properties: + autoDetect: + description: |- + AutoDetect specifies whether to let the `Content-Type` header, if it has not been set by the backend, + be automatically set to a value derived from the contents of the response. + Deprecated: AutoDetect option is deprecated, Content-Type middleware is only meant to be used to enable the content-type detection, please remove any usage of this option. + type: boolean + type: object + digestAuth: + description: |- + DigestAuth holds the digest auth middleware configuration. + This middleware restricts access to your services to known users. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/digestauth/ + properties: + headerField: + description: |- + HeaderField defines a header field to store the authenticated user. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/basicauth/#headerfield + type: string + realm: + description: |- + Realm allows the protected resources on a server to be partitioned into a set of protection spaces, each with its own authentication scheme. + Default: traefik. + type: string + removeHeader: + description: RemoveHeader defines whether to remove the authorization + header before forwarding the request to the backend. + type: boolean + secret: + description: Secret is the name of the referenced Kubernetes Secret + containing user credentials. + type: string + type: object + errors: + description: |- + ErrorPage holds the custom error middleware configuration. + This middleware returns a custom page in lieu of the default, according to configured ranges of HTTP Status codes. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/errorpages/ + properties: + query: + description: |- + Query defines the URL for the error page (hosted by service). + The {status} variable can be used in order to insert the status code in the URL. + type: string + service: + description: |- + Service defines the reference to a Kubernetes Service that will serve the error page. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/errorpages/#service + properties: + healthCheck: + description: Healthcheck defines health checks for ExternalName + services. + properties: + followRedirects: + description: |- + FollowRedirects defines whether redirects should be followed during the health check calls. + Default: true + type: boolean + headers: + additionalProperties: + type: string + description: Headers defines custom headers to be sent + to the health check endpoint. + type: object + hostname: + description: Hostname defines the value of hostname in + the Host header of the health check request. + type: string + interval: + anyOf: + - type: integer + - type: string + description: |- + Interval defines the frequency of the health check calls. + Default: 30s + x-kubernetes-int-or-string: true + method: + description: Method defines the healthcheck method. + type: string + mode: + description: |- + Mode defines the health check mode. + If defined to grpc, will use the gRPC health check protocol to probe the server. + Default: http + type: string + path: + description: Path defines the server URL path for the + health check endpoint. + type: string + port: + description: Port defines the server URL port for the + health check endpoint. + type: integer + scheme: + description: Scheme replaces the server URL scheme for + the health check endpoint. + type: string + status: + description: Status defines the expected HTTP status code + of the response to the health check request. + type: integer + timeout: + anyOf: + - type: integer + - type: string + description: |- + Timeout defines the maximum duration Traefik will wait for a health check request before considering the server unhealthy. + Default: 5s + x-kubernetes-int-or-string: true + type: object + kind: + description: Kind defines the kind of the Service. + enum: + - Service + - TraefikService + type: string + name: + description: |- + Name defines the name of the referenced Kubernetes Service or TraefikService. + The differentiation between the two is specified in the Kind field. + type: string + namespace: + description: Namespace defines the namespace of the referenced + Kubernetes Service or TraefikService. + type: string + nativeLB: + description: |- + NativeLB controls, when creating the load-balancer, + whether the LB's children are directly the pods IPs or if the only child is the Kubernetes Service clusterIP. + The Kubernetes Service itself does load-balance to the pods. + By default, NativeLB is false. + type: boolean + nodePortLB: + description: |- + NodePortLB controls, when creating the load-balancer, + whether the LB's children are directly the nodes internal IPs using the nodePort when the service type is NodePort. + It allows services to be reachable when Traefik runs externally from the Kubernetes cluster but within the same network of the nodes. + By default, NodePortLB is false. + type: boolean + passHostHeader: + description: |- + PassHostHeader defines whether the client Host header is forwarded to the upstream Kubernetes Service. + By default, passHostHeader is true. + type: boolean + port: + anyOf: + - type: integer + - type: string + description: |- + Port defines the port of a Kubernetes Service. + This can be a reference to a named port. + x-kubernetes-int-or-string: true + responseForwarding: + description: ResponseForwarding defines how Traefik forwards + the response from the upstream Kubernetes Service to the + client. + properties: + flushInterval: + description: |- + FlushInterval defines the interval, in milliseconds, in between flushes to the client while copying the response body. + A negative value means to flush immediately after each write to the client. + This configuration is ignored when ReverseProxy recognizes a response as a streaming response; + for such responses, writes are flushed to the client immediately. + Default: 100ms + type: string + type: object + scheme: + description: |- + Scheme defines the scheme to use for the request to the upstream Kubernetes Service. + It defaults to https when Kubernetes Service port is 443, http otherwise. + type: string + serversTransport: + description: |- + ServersTransport defines the name of ServersTransport resource to use. + It allows to configure the transport between Traefik and your servers. + Can only be used on a Kubernetes Service. + type: string + sticky: + description: |- + Sticky defines the sticky sessions configuration. + More info: https://doc.traefik.io/traefik/v3.3/routing/services/#sticky-sessions + properties: + cookie: + description: Cookie defines the sticky cookie configuration. + properties: + httpOnly: + description: HTTPOnly defines whether the cookie can + be accessed by client-side APIs, such as JavaScript. + type: boolean + maxAge: + description: |- + MaxAge defines the number of seconds until the cookie expires. + When set to a negative number, the cookie expires immediately. + When set to zero, the cookie never expires. + type: integer + name: + description: Name defines the Cookie name. + type: string + path: + description: |- + Path defines the path that must exist in the requested URL for the browser to send the Cookie header. + When not provided the cookie will be sent on every request to the domain. + More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#pathpath-value + type: string + sameSite: + description: |- + SameSite defines the same site policy. + More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite + type: string + secure: + description: Secure defines whether the cookie can + only be transmitted over an encrypted connection + (i.e. HTTPS). + type: boolean + type: object + type: object + strategy: + description: |- + Strategy defines the load balancing strategy between the servers. + RoundRobin is the only supported value at the moment. + type: string + weight: + description: |- + Weight defines the weight and should only be specified when Name references a TraefikService object + (and to be precise, one that embeds a Weighted Round Robin). + type: integer + required: + - name + type: object + status: + description: |- + Status defines which status or range of statuses should result in an error page. + It can be either a status code as a number (500), + as multiple comma-separated numbers (500,502), + as ranges by separating two codes with a dash (500-599), + or a combination of the two (404,418,500-599). + items: + type: string + type: array + type: object + forwardAuth: + description: |- + ForwardAuth holds the forward auth middleware configuration. + This middleware delegates the request authentication to a Service. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/forwardauth/ + properties: + addAuthCookiesToResponse: + description: AddAuthCookiesToResponse defines the list of cookies + to copy from the authentication server response to the response. + items: + type: string + type: array + address: + description: Address defines the authentication server address. + type: string + authRequestHeaders: + description: |- + AuthRequestHeaders defines the list of the headers to copy from the request to the authentication server. + If not set or empty then all request headers are passed. + items: + type: string + type: array + authResponseHeaders: + description: AuthResponseHeaders defines the list of headers to + copy from the authentication server response and set on forwarded + request, replacing any existing conflicting headers. + items: + type: string + type: array + authResponseHeadersRegex: + description: |- + AuthResponseHeadersRegex defines the regex to match headers to copy from the authentication server response and set on forwarded request, after stripping all headers that match the regex. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/forwardauth/#authresponseheadersregex + type: string + forwardBody: + description: ForwardBody defines whether to send the request body + to the authentication server. + type: boolean + headerField: + description: |- + HeaderField defines a header field to store the authenticated user. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/forwardauth/#headerfield + type: string + maxBodySize: + description: MaxBodySize defines the maximum body size in bytes + allowed to be forwarded to the authentication server. + format: int64 + type: integer + preserveLocationHeader: + description: PreserveLocationHeader defines whether to forward + the Location header to the client as is or prefix it with the + domain name of the authentication server. + type: boolean + tls: + description: TLS defines the configuration used to secure the + connection to the authentication server. + properties: + caOptional: + description: 'Deprecated: TLS client authentication is a server + side option (see https://github.com/golang/go/blob/740a490f71d026bb7d2d13cb8fa2d6d6e0572b70/src/crypto/tls/common.go#L634).' + type: boolean + caSecret: + description: |- + CASecret is the name of the referenced Kubernetes Secret containing the CA to validate the server certificate. + The CA certificate is extracted from key `tls.ca` or `ca.crt`. + type: string + certSecret: + description: |- + CertSecret is the name of the referenced Kubernetes Secret containing the client certificate. + The client certificate is extracted from the keys `tls.crt` and `tls.key`. + type: string + insecureSkipVerify: + description: InsecureSkipVerify defines whether the server + certificates should be validated. + type: boolean + type: object + trustForwardHeader: + description: 'TrustForwardHeader defines whether to trust (ie: + forward) all X-Forwarded-* headers.' + type: boolean + type: object + grpcWeb: + description: |- + GrpcWeb holds the gRPC web middleware configuration. + This middleware converts a gRPC web request to an HTTP/2 gRPC request. + properties: + allowOrigins: + description: |- + AllowOrigins is a list of allowable origins. + Can also be a wildcard origin "*". + items: + type: string + type: array + type: object + headers: + description: |- + Headers holds the headers middleware configuration. + This middleware manages the requests and responses headers. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/headers/#customrequestheaders + properties: + accessControlAllowCredentials: + description: AccessControlAllowCredentials defines whether the + request can include user credentials. + type: boolean + accessControlAllowHeaders: + description: AccessControlAllowHeaders defines the Access-Control-Request-Headers + values sent in preflight response. + items: + type: string + type: array + accessControlAllowMethods: + description: AccessControlAllowMethods defines the Access-Control-Request-Method + values sent in preflight response. + items: + type: string + type: array + accessControlAllowOriginList: + description: AccessControlAllowOriginList is a list of allowable + origins. Can also be a wildcard origin "*". + items: + type: string + type: array + accessControlAllowOriginListRegex: + description: AccessControlAllowOriginListRegex is a list of allowable + origins written following the Regular Expression syntax (https://golang.org/pkg/regexp/). + items: + type: string + type: array + accessControlExposeHeaders: + description: AccessControlExposeHeaders defines the Access-Control-Expose-Headers + values sent in preflight response. + items: + type: string + type: array + accessControlMaxAge: + description: AccessControlMaxAge defines the time that a preflight + request may be cached. + format: int64 + type: integer + addVaryHeader: + description: AddVaryHeader defines whether the Vary header is + automatically added/updated when the AccessControlAllowOriginList + is set. + type: boolean + allowedHosts: + description: AllowedHosts defines the fully qualified list of + allowed domain names. + items: + type: string + type: array + browserXssFilter: + description: BrowserXSSFilter defines whether to add the X-XSS-Protection + header with the value 1; mode=block. + type: boolean + contentSecurityPolicy: + description: ContentSecurityPolicy defines the Content-Security-Policy + header value. + type: string + contentSecurityPolicyReportOnly: + description: ContentSecurityPolicyReportOnly defines the Content-Security-Policy-Report-Only + header value. + type: string + contentTypeNosniff: + description: ContentTypeNosniff defines whether to add the X-Content-Type-Options + header with the nosniff value. + type: boolean + customBrowserXSSValue: + description: |- + CustomBrowserXSSValue defines the X-XSS-Protection header value. + This overrides the BrowserXssFilter option. + type: string + customFrameOptionsValue: + description: |- + CustomFrameOptionsValue defines the X-Frame-Options header value. + This overrides the FrameDeny option. + type: string + customRequestHeaders: + additionalProperties: + type: string + description: CustomRequestHeaders defines the header names and + values to apply to the request. + type: object + customResponseHeaders: + additionalProperties: + type: string + description: CustomResponseHeaders defines the header names and + values to apply to the response. + type: object + featurePolicy: + description: 'Deprecated: FeaturePolicy option is deprecated, + please use PermissionsPolicy instead.' + type: string + forceSTSHeader: + description: ForceSTSHeader defines whether to add the STS header + even when the connection is HTTP. + type: boolean + frameDeny: + description: FrameDeny defines whether to add the X-Frame-Options + header with the DENY value. + type: boolean + hostsProxyHeaders: + description: HostsProxyHeaders defines the header keys that may + hold a proxied hostname value for the request. + items: + type: string + type: array + isDevelopment: + description: |- + IsDevelopment defines whether to mitigate the unwanted effects of the AllowedHosts, SSL, and STS options when developing. + Usually testing takes place using HTTP, not HTTPS, and on localhost, not your production domain. + If you would like your development environment to mimic production with complete Host blocking, SSL redirects, + and STS headers, leave this as false. + type: boolean + permissionsPolicy: + description: |- + PermissionsPolicy defines the Permissions-Policy header value. + This allows sites to control browser features. + type: string + publicKey: + description: PublicKey is the public key that implements HPKP + to prevent MITM attacks with forged certificates. + type: string + referrerPolicy: + description: |- + ReferrerPolicy defines the Referrer-Policy header value. + This allows sites to control whether browsers forward the Referer header to other sites. + type: string + sslForceHost: + description: 'Deprecated: SSLForceHost option is deprecated, please + use RedirectRegex instead.' + type: boolean + sslHost: + description: 'Deprecated: SSLHost option is deprecated, please + use RedirectRegex instead.' + type: string + sslProxyHeaders: + additionalProperties: + type: string + description: |- + SSLProxyHeaders defines the header keys with associated values that would indicate a valid HTTPS request. + It can be useful when using other proxies (example: "X-Forwarded-Proto": "https"). + type: object + sslRedirect: + description: 'Deprecated: SSLRedirect option is deprecated, please + use EntryPoint redirection or RedirectScheme instead.' + type: boolean + sslTemporaryRedirect: + description: 'Deprecated: SSLTemporaryRedirect option is deprecated, + please use EntryPoint redirection or RedirectScheme instead.' + type: boolean + stsIncludeSubdomains: + description: STSIncludeSubdomains defines whether the includeSubDomains + directive is appended to the Strict-Transport-Security header. + type: boolean + stsPreload: + description: STSPreload defines whether the preload flag is appended + to the Strict-Transport-Security header. + type: boolean + stsSeconds: + description: |- + STSSeconds defines the max-age of the Strict-Transport-Security header. + If set to 0, the header is not set. + format: int64 + type: integer + type: object + inFlightReq: + description: |- + InFlightReq holds the in-flight request middleware configuration. + This middleware limits the number of requests being processed and served concurrently. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/inflightreq/ + properties: + amount: + description: |- + Amount defines the maximum amount of allowed simultaneous in-flight request. + The middleware responds with HTTP 429 Too Many Requests if there are already amount requests in progress (based on the same sourceCriterion strategy). + format: int64 + type: integer + sourceCriterion: + description: |- + SourceCriterion defines what criterion is used to group requests as originating from a common source. + If several strategies are defined at the same time, an error will be raised. + If none are set, the default is to use the requestHost. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/inflightreq/#sourcecriterion + properties: + ipStrategy: + description: |- + IPStrategy holds the IP strategy configuration used by Traefik to determine the client IP. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/ipallowlist/#ipstrategy + properties: + depth: + description: Depth tells Traefik to use the X-Forwarded-For + header and take the IP located at the depth position + (starting from the right). + type: integer + excludedIPs: + description: ExcludedIPs configures Traefik to scan the + X-Forwarded-For header and select the first IP not in + the list. + items: + type: string + type: array + ipv6Subnet: + description: IPv6Subnet configures Traefik to consider + all IPv6 addresses from the defined subnet as originating + from the same IP. Applies to RemoteAddrStrategy and + DepthStrategy. + type: integer + type: object + requestHeaderName: + description: RequestHeaderName defines the name of the header + used to group incoming requests. + type: string + requestHost: + description: RequestHost defines whether to consider the request + Host as the source. + type: boolean + type: object + type: object + ipAllowList: + description: |- + IPAllowList holds the IP allowlist middleware configuration. + This middleware limits allowed requests based on the client IP. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/ipallowlist/ + properties: + ipStrategy: + description: |- + IPStrategy holds the IP strategy configuration used by Traefik to determine the client IP. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/ipallowlist/#ipstrategy + properties: + depth: + description: Depth tells Traefik to use the X-Forwarded-For + header and take the IP located at the depth position (starting + from the right). + type: integer + excludedIPs: + description: ExcludedIPs configures Traefik to scan the X-Forwarded-For + header and select the first IP not in the list. + items: + type: string + type: array + ipv6Subnet: + description: IPv6Subnet configures Traefik to consider all + IPv6 addresses from the defined subnet as originating from + the same IP. Applies to RemoteAddrStrategy and DepthStrategy. + type: integer + type: object + rejectStatusCode: + description: |- + RejectStatusCode defines the HTTP status code used for refused requests. + If not set, the default is 403 (Forbidden). + type: integer + sourceRange: + description: SourceRange defines the set of allowed IPs (or ranges + of allowed IPs by using CIDR notation). + items: + type: string + type: array + type: object + ipWhiteList: + description: 'Deprecated: please use IPAllowList instead.' + properties: + ipStrategy: + description: |- + IPStrategy holds the IP strategy configuration used by Traefik to determine the client IP. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/ipallowlist/#ipstrategy + properties: + depth: + description: Depth tells Traefik to use the X-Forwarded-For + header and take the IP located at the depth position (starting + from the right). + type: integer + excludedIPs: + description: ExcludedIPs configures Traefik to scan the X-Forwarded-For + header and select the first IP not in the list. + items: + type: string + type: array + ipv6Subnet: + description: IPv6Subnet configures Traefik to consider all + IPv6 addresses from the defined subnet as originating from + the same IP. Applies to RemoteAddrStrategy and DepthStrategy. + type: integer + type: object + sourceRange: + description: SourceRange defines the set of allowed IPs (or ranges + of allowed IPs by using CIDR notation). Required. + items: + type: string + type: array + type: object + passTLSClientCert: + description: |- + PassTLSClientCert holds the pass TLS client cert middleware configuration. + This middleware adds the selected data from the passed client TLS certificate to a header. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/passtlsclientcert/ + properties: + info: + description: Info selects the specific client certificate details + you want to add to the X-Forwarded-Tls-Client-Cert-Info header. + properties: + issuer: + description: Issuer defines the client certificate issuer + details to add to the X-Forwarded-Tls-Client-Cert-Info header. + properties: + commonName: + description: CommonName defines whether to add the organizationalUnit + information into the issuer. + type: boolean + country: + description: Country defines whether to add the country + information into the issuer. + type: boolean + domainComponent: + description: DomainComponent defines whether to add the + domainComponent information into the issuer. + type: boolean + locality: + description: Locality defines whether to add the locality + information into the issuer. + type: boolean + organization: + description: Organization defines whether to add the organization + information into the issuer. + type: boolean + province: + description: Province defines whether to add the province + information into the issuer. + type: boolean + serialNumber: + description: SerialNumber defines whether to add the serialNumber + information into the issuer. + type: boolean + type: object + notAfter: + description: NotAfter defines whether to add the Not After + information from the Validity part. + type: boolean + notBefore: + description: NotBefore defines whether to add the Not Before + information from the Validity part. + type: boolean + sans: + description: Sans defines whether to add the Subject Alternative + Name information from the Subject Alternative Name part. + type: boolean + serialNumber: + description: SerialNumber defines whether to add the client + serialNumber information. + type: boolean + subject: + description: Subject defines the client certificate subject + details to add to the X-Forwarded-Tls-Client-Cert-Info header. + properties: + commonName: + description: CommonName defines whether to add the organizationalUnit + information into the subject. + type: boolean + country: + description: Country defines whether to add the country + information into the subject. + type: boolean + domainComponent: + description: DomainComponent defines whether to add the + domainComponent information into the subject. + type: boolean + locality: + description: Locality defines whether to add the locality + information into the subject. + type: boolean + organization: + description: Organization defines whether to add the organization + information into the subject. + type: boolean + organizationalUnit: + description: OrganizationalUnit defines whether to add + the organizationalUnit information into the subject. + type: boolean + province: + description: Province defines whether to add the province + information into the subject. + type: boolean + serialNumber: + description: SerialNumber defines whether to add the serialNumber + information into the subject. + type: boolean + type: object + type: object + pem: + description: PEM sets the X-Forwarded-Tls-Client-Cert header with + the certificate. + type: boolean + type: object + plugin: + additionalProperties: + x-kubernetes-preserve-unknown-fields: true + description: |- + Plugin defines the middleware plugin configuration. + More info: https://doc.traefik.io/traefik/plugins/ + type: object + rateLimit: + description: |- + RateLimit holds the rate limit configuration. + This middleware ensures that services will receive a fair amount of requests, and allows one to define what fair is. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/ratelimit/ + properties: + average: + description: |- + Average is the maximum rate, by default in requests/s, allowed for the given source. + It defaults to 0, which means no rate limiting. + The rate is actually defined by dividing Average by Period. So for a rate below 1req/s, + one needs to define a Period larger than a second. + format: int64 + type: integer + burst: + description: |- + Burst is the maximum number of requests allowed to arrive in the same arbitrarily small period of time. + It defaults to 1. + format: int64 + type: integer + period: + anyOf: + - type: integer + - type: string + description: |- + Period, in combination with Average, defines the actual maximum rate, such as: + r = Average / Period. It defaults to a second. + x-kubernetes-int-or-string: true + sourceCriterion: + description: |- + SourceCriterion defines what criterion is used to group requests as originating from a common source. + If several strategies are defined at the same time, an error will be raised. + If none are set, the default is to use the request's remote address field (as an ipStrategy). + properties: + ipStrategy: + description: |- + IPStrategy holds the IP strategy configuration used by Traefik to determine the client IP. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/ipallowlist/#ipstrategy + properties: + depth: + description: Depth tells Traefik to use the X-Forwarded-For + header and take the IP located at the depth position + (starting from the right). + type: integer + excludedIPs: + description: ExcludedIPs configures Traefik to scan the + X-Forwarded-For header and select the first IP not in + the list. + items: + type: string + type: array + ipv6Subnet: + description: IPv6Subnet configures Traefik to consider + all IPv6 addresses from the defined subnet as originating + from the same IP. Applies to RemoteAddrStrategy and + DepthStrategy. + type: integer + type: object + requestHeaderName: + description: RequestHeaderName defines the name of the header + used to group incoming requests. + type: string + requestHost: + description: RequestHost defines whether to consider the request + Host as the source. + type: boolean + type: object + type: object + redirectRegex: + description: |- + RedirectRegex holds the redirect regex middleware configuration. + This middleware redirects a request using regex matching and replacement. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/redirectregex/#regex + properties: + permanent: + description: Permanent defines whether the redirection is permanent + (301). + type: boolean + regex: + description: Regex defines the regex used to match and capture + elements from the request URL. + type: string + replacement: + description: Replacement defines how to modify the URL to have + the new target URL. + type: string + type: object + redirectScheme: + description: |- + RedirectScheme holds the redirect scheme middleware configuration. + This middleware redirects requests from a scheme/port to another. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/redirectscheme/ + properties: + permanent: + description: Permanent defines whether the redirection is permanent + (301). + type: boolean + port: + description: Port defines the port of the new URL. + type: string + scheme: + description: Scheme defines the scheme of the new URL. + type: string + type: object + replacePath: + description: |- + ReplacePath holds the replace path middleware configuration. + This middleware replaces the path of the request URL and store the original path in an X-Replaced-Path header. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/replacepath/ + properties: + path: + description: Path defines the path to use as replacement in the + request URL. + type: string + type: object + replacePathRegex: + description: |- + ReplacePathRegex holds the replace path regex middleware configuration. + This middleware replaces the path of a URL using regex matching and replacement. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/replacepathregex/ + properties: + regex: + description: Regex defines the regular expression used to match + and capture the path from the request URL. + type: string + replacement: + description: Replacement defines the replacement path format, + which can include captured variables. + type: string + type: object + retry: + description: |- + Retry holds the retry middleware configuration. + This middleware reissues requests a given number of times to a backend server if that server does not reply. + As soon as the server answers, the middleware stops retrying, regardless of the response status. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/retry/ + properties: + attempts: + description: Attempts defines how many times the request should + be retried. + type: integer + initialInterval: + anyOf: + - type: integer + - type: string + description: |- + InitialInterval defines the first wait time in the exponential backoff series. + The maximum interval is calculated as twice the initialInterval. + If unspecified, requests will be retried immediately. + The value of initialInterval should be provided in seconds or as a valid duration format, + see https://pkg.go.dev/time#ParseDuration. + x-kubernetes-int-or-string: true + type: object + stripPrefix: + description: |- + StripPrefix holds the strip prefix middleware configuration. + This middleware removes the specified prefixes from the URL path. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/stripprefix/ + properties: + forceSlash: + description: |- + Deprecated: ForceSlash option is deprecated, please remove any usage of this option. + ForceSlash ensures that the resulting stripped path is not the empty string, by replacing it with / when necessary. + Default: true. + type: boolean + prefixes: + description: Prefixes defines the prefixes to strip from the request + URL. + items: + type: string + type: array + type: object + stripPrefixRegex: + description: |- + StripPrefixRegex holds the strip prefix regex middleware configuration. + This middleware removes the matching prefixes from the URL path. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/http/stripprefixregex/ + properties: + regex: + description: Regex defines the regular expression to match the + path prefix from the request URL. + items: + type: string + type: array + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: middlewaretcps.traefik.io +spec: + group: traefik.io + names: + kind: MiddlewareTCP + listKind: MiddlewareTCPList + plural: middlewaretcps + singular: middlewaretcp + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + MiddlewareTCP is the CRD implementation of a Traefik TCP middleware. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/overview/ + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MiddlewareTCPSpec defines the desired state of a MiddlewareTCP. + properties: + inFlightConn: + description: InFlightConn defines the InFlightConn middleware configuration. + properties: + amount: + description: |- + Amount defines the maximum amount of allowed simultaneous connections. + The middleware closes the connection if there are already amount connections opened. + format: int64 + type: integer + type: object + ipAllowList: + description: |- + IPAllowList defines the IPAllowList middleware configuration. + This middleware accepts/refuses connections based on the client IP. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/tcp/ipallowlist/ + properties: + sourceRange: + description: SourceRange defines the allowed IPs (or ranges of + allowed IPs by using CIDR notation). + items: + type: string + type: array + type: object + ipWhiteList: + description: |- + IPWhiteList defines the IPWhiteList middleware configuration. + This middleware accepts/refuses connections based on the client IP. + Deprecated: please use IPAllowList instead. + More info: https://doc.traefik.io/traefik/v3.3/middlewares/tcp/ipwhitelist/ + properties: + sourceRange: + description: SourceRange defines the allowed IPs (or ranges of + allowed IPs by using CIDR notation). + items: + type: string + type: array + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: serverstransports.traefik.io +spec: + group: traefik.io + names: + kind: ServersTransport + listKind: ServersTransportList + plural: serverstransports + singular: serverstransport + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + ServersTransport is the CRD implementation of a ServersTransport. + If no serversTransport is specified, the default@internal will be used. + The default@internal serversTransport is created from the static configuration. + More info: https://doc.traefik.io/traefik/v3.3/routing/services/#serverstransport_1 + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServersTransportSpec defines the desired state of a ServersTransport. + properties: + certificatesSecrets: + description: CertificatesSecrets defines a list of secret storing + client certificates for mTLS. + items: + type: string + type: array + disableHTTP2: + description: DisableHTTP2 disables HTTP/2 for connections with backend + servers. + type: boolean + forwardingTimeouts: + description: ForwardingTimeouts defines the timeouts for requests + forwarded to the backend servers. + properties: + dialTimeout: + anyOf: + - type: integer + - type: string + description: DialTimeout is the amount of time to wait until a + connection to a backend server can be established. + x-kubernetes-int-or-string: true + idleConnTimeout: + anyOf: + - type: integer + - type: string + description: IdleConnTimeout is the maximum period for which an + idle HTTP keep-alive connection will remain open before closing + itself. + x-kubernetes-int-or-string: true + pingTimeout: + anyOf: + - type: integer + - type: string + description: PingTimeout is the timeout after which the HTTP/2 + connection will be closed if a response to ping is not received. + x-kubernetes-int-or-string: true + readIdleTimeout: + anyOf: + - type: integer + - type: string + description: ReadIdleTimeout is the timeout after which a health + check using ping frame will be carried out if no frame is received + on the HTTP/2 connection. + x-kubernetes-int-or-string: true + responseHeaderTimeout: + anyOf: + - type: integer + - type: string + description: ResponseHeaderTimeout is the amount of time to wait + for a server's response headers after fully writing the request + (including its body, if any). + x-kubernetes-int-or-string: true + type: object + insecureSkipVerify: + description: InsecureSkipVerify disables SSL certificate verification. + type: boolean + maxIdleConnsPerHost: + description: MaxIdleConnsPerHost controls the maximum idle (keep-alive) + to keep per-host. + type: integer + peerCertURI: + description: PeerCertURI defines the peer cert URI used to match against + SAN URI during the peer certificate verification. + type: string + rootCAsSecrets: + description: RootCAsSecrets defines a list of CA secret used to validate + self-signed certificate. + items: + type: string + type: array + serverName: + description: ServerName defines the server name used to contact the + server. + type: string + spiffe: + description: Spiffe defines the SPIFFE configuration. + properties: + ids: + description: IDs defines the allowed SPIFFE IDs (takes precedence + over the SPIFFE TrustDomain). + items: + type: string + type: array + trustDomain: + description: TrustDomain defines the allowed SPIFFE trust domain. + type: string + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: serverstransporttcps.traefik.io +spec: + group: traefik.io + names: + kind: ServersTransportTCP + listKind: ServersTransportTCPList + plural: serverstransporttcps + singular: serverstransporttcp + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + ServersTransportTCP is the CRD implementation of a TCPServersTransport. + If no tcpServersTransport is specified, a default one named default@internal will be used. + The default@internal tcpServersTransport can be configured in the static configuration. + More info: https://doc.traefik.io/traefik/v3.3/routing/services/#serverstransport_3 + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ServersTransportTCPSpec defines the desired state of a ServersTransportTCP. + properties: + dialKeepAlive: + anyOf: + - type: integer + - type: string + description: DialKeepAlive is the interval between keep-alive probes + for an active network connection. If zero, keep-alive probes are + sent with a default value (currently 15 seconds), if supported by + the protocol and operating system. Network protocols or operating + systems that do not support keep-alives ignore this field. If negative, + keep-alive probes are disabled. + x-kubernetes-int-or-string: true + dialTimeout: + anyOf: + - type: integer + - type: string + description: DialTimeout is the amount of time to wait until a connection + to a backend server can be established. + x-kubernetes-int-or-string: true + terminationDelay: + anyOf: + - type: integer + - type: string + description: TerminationDelay defines the delay to wait before fully + terminating the connection, after one connected peer has closed + its writing capability. + x-kubernetes-int-or-string: true + tls: + description: TLS defines the TLS configuration + properties: + certificatesSecrets: + description: CertificatesSecrets defines a list of secret storing + client certificates for mTLS. + items: + type: string + type: array + insecureSkipVerify: + description: InsecureSkipVerify disables TLS certificate verification. + type: boolean + peerCertURI: + description: |- + MaxIdleConnsPerHost controls the maximum idle (keep-alive) to keep per-host. + PeerCertURI defines the peer cert URI used to match against SAN URI during the peer certificate verification. + type: string + rootCAsSecrets: + description: RootCAsSecrets defines a list of CA secret used to + validate self-signed certificates. + items: + type: string + type: array + serverName: + description: ServerName defines the server name used to contact + the server. + type: string + spiffe: + description: Spiffe defines the SPIFFE configuration. + properties: + ids: + description: IDs defines the allowed SPIFFE IDs (takes precedence + over the SPIFFE TrustDomain). + items: + type: string + type: array + trustDomain: + description: TrustDomain defines the allowed SPIFFE trust + domain. + type: string + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: tlsoptions.traefik.io +spec: + group: traefik.io + names: + kind: TLSOption + listKind: TLSOptionList + plural: tlsoptions + singular: tlsoption + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + TLSOption is the CRD implementation of a Traefik TLS Option, allowing to configure some parameters of the TLS connection. + More info: https://doc.traefik.io/traefik/v3.3/https/tls/#tls-options + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TLSOptionSpec defines the desired state of a TLSOption. + properties: + alpnProtocols: + description: |- + ALPNProtocols defines the list of supported application level protocols for the TLS handshake, in order of preference. + More info: https://doc.traefik.io/traefik/v3.3/https/tls/#alpn-protocols + items: + type: string + type: array + cipherSuites: + description: |- + CipherSuites defines the list of supported cipher suites for TLS versions up to TLS 1.2. + More info: https://doc.traefik.io/traefik/v3.3/https/tls/#cipher-suites + items: + type: string + type: array + clientAuth: + description: ClientAuth defines the server's policy for TLS Client + Authentication. + properties: + clientAuthType: + description: ClientAuthType defines the client authentication + type to apply. + enum: + - NoClientCert + - RequestClientCert + - RequireAnyClientCert + - VerifyClientCertIfGiven + - RequireAndVerifyClientCert + type: string + secretNames: + description: SecretNames defines the names of the referenced Kubernetes + Secret storing certificate details. + items: + type: string + type: array + type: object + curvePreferences: + description: |- + CurvePreferences defines the preferred elliptic curves in a specific order. + More info: https://doc.traefik.io/traefik/v3.3/https/tls/#curve-preferences + items: + type: string + type: array + maxVersion: + description: |- + MaxVersion defines the maximum TLS version that Traefik will accept. + Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. + Default: None. + type: string + minVersion: + description: |- + MinVersion defines the minimum TLS version that Traefik will accept. + Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. + Default: VersionTLS10. + type: string + preferServerCipherSuites: + description: |- + PreferServerCipherSuites defines whether the server chooses a cipher suite among his own instead of among the client's. + It is enabled automatically when minVersion or maxVersion is set. + Deprecated: https://github.com/golang/go/issues/45430 + type: boolean + sniStrict: + description: SniStrict defines whether Traefik allows connections + from clients connections that do not specify a server_name extension. + type: boolean + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: tlsstores.traefik.io +spec: + group: traefik.io + names: + kind: TLSStore + listKind: TLSStoreList + plural: tlsstores + singular: tlsstore + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + TLSStore is the CRD implementation of a Traefik TLS Store. + For the time being, only the TLSStore named default is supported. + This means that you cannot have two stores that are named default in different Kubernetes namespaces. + More info: https://doc.traefik.io/traefik/v3.3/https/tls/#certificates-stores + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TLSStoreSpec defines the desired state of a TLSStore. + properties: + certificates: + description: Certificates is a list of secret names, each secret holding + a key/certificate pair to add to the store. + items: + description: Certificate holds a secret name for the TLSStore resource. + properties: + secretName: + description: SecretName is the name of the referenced Kubernetes + Secret to specify the certificate details. + type: string + required: + - secretName + type: object + type: array + defaultCertificate: + description: DefaultCertificate defines the default certificate configuration. + properties: + secretName: + description: SecretName is the name of the referenced Kubernetes + Secret to specify the certificate details. + type: string + required: + - secretName + type: object + defaultGeneratedCert: + description: DefaultGeneratedCert defines the default generated certificate + configuration. + properties: + domain: + description: Domain is the domain definition for the DefaultCertificate. + properties: + main: + description: Main defines the main domain name. + type: string + sans: + description: SANs defines the subject alternative domain names. + items: + type: string + type: array + type: object + resolver: + description: Resolver is the name of the resolver that will be + used to issue the DefaultCertificate. + type: string + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: traefikservices.traefik.io +spec: + group: traefik.io + names: + kind: TraefikService + listKind: TraefikServiceList + plural: traefikservices + singular: traefikservice + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + TraefikService is the CRD implementation of a Traefik Service. + TraefikService object allows to: + - Apply weight to Services on load-balancing + - Mirror traffic on services + More info: https://doc.traefik.io/traefik/v3.3/routing/providers/kubernetes-crd/#kind-traefikservice + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TraefikServiceSpec defines the desired state of a TraefikService. + properties: + mirroring: + description: Mirroring defines the Mirroring service configuration. + properties: + healthCheck: + description: Healthcheck defines health checks for ExternalName + services. + properties: + followRedirects: + description: |- + FollowRedirects defines whether redirects should be followed during the health check calls. + Default: true + type: boolean + headers: + additionalProperties: + type: string + description: Headers defines custom headers to be sent to + the health check endpoint. + type: object + hostname: + description: Hostname defines the value of hostname in the + Host header of the health check request. + type: string + interval: + anyOf: + - type: integer + - type: string + description: |- + Interval defines the frequency of the health check calls. + Default: 30s + x-kubernetes-int-or-string: true + method: + description: Method defines the healthcheck method. + type: string + mode: + description: |- + Mode defines the health check mode. + If defined to grpc, will use the gRPC health check protocol to probe the server. + Default: http + type: string + path: + description: Path defines the server URL path for the health + check endpoint. + type: string + port: + description: Port defines the server URL port for the health + check endpoint. + type: integer + scheme: + description: Scheme replaces the server URL scheme for the + health check endpoint. + type: string + status: + description: Status defines the expected HTTP status code + of the response to the health check request. + type: integer + timeout: + anyOf: + - type: integer + - type: string + description: |- + Timeout defines the maximum duration Traefik will wait for a health check request before considering the server unhealthy. + Default: 5s + x-kubernetes-int-or-string: true + type: object + kind: + description: Kind defines the kind of the Service. + enum: + - Service + - TraefikService + type: string + maxBodySize: + description: |- + MaxBodySize defines the maximum size allowed for the body of the request. + If the body is larger, the request is not mirrored. + Default value is -1, which means unlimited size. + format: int64 + type: integer + mirrorBody: + description: |- + MirrorBody defines whether the body of the request should be mirrored. + Default value is true. + type: boolean + mirrors: + description: Mirrors defines the list of mirrors where Traefik + will duplicate the traffic. + items: + description: MirrorService holds the mirror configuration. + properties: + healthCheck: + description: Healthcheck defines health checks for ExternalName + services. + properties: + followRedirects: + description: |- + FollowRedirects defines whether redirects should be followed during the health check calls. + Default: true + type: boolean + headers: + additionalProperties: + type: string + description: Headers defines custom headers to be sent + to the health check endpoint. + type: object + hostname: + description: Hostname defines the value of hostname + in the Host header of the health check request. + type: string + interval: + anyOf: + - type: integer + - type: string + description: |- + Interval defines the frequency of the health check calls. + Default: 30s + x-kubernetes-int-or-string: true + method: + description: Method defines the healthcheck method. + type: string + mode: + description: |- + Mode defines the health check mode. + If defined to grpc, will use the gRPC health check protocol to probe the server. + Default: http + type: string + path: + description: Path defines the server URL path for the + health check endpoint. + type: string + port: + description: Port defines the server URL port for the + health check endpoint. + type: integer + scheme: + description: Scheme replaces the server URL scheme for + the health check endpoint. + type: string + status: + description: Status defines the expected HTTP status + code of the response to the health check request. + type: integer + timeout: + anyOf: + - type: integer + - type: string + description: |- + Timeout defines the maximum duration Traefik will wait for a health check request before considering the server unhealthy. + Default: 5s + x-kubernetes-int-or-string: true + type: object + kind: + description: Kind defines the kind of the Service. + enum: + - Service + - TraefikService + type: string + name: + description: |- + Name defines the name of the referenced Kubernetes Service or TraefikService. + The differentiation between the two is specified in the Kind field. + type: string + namespace: + description: Namespace defines the namespace of the referenced + Kubernetes Service or TraefikService. + type: string + nativeLB: + description: |- + NativeLB controls, when creating the load-balancer, + whether the LB's children are directly the pods IPs or if the only child is the Kubernetes Service clusterIP. + The Kubernetes Service itself does load-balance to the pods. + By default, NativeLB is false. + type: boolean + nodePortLB: + description: |- + NodePortLB controls, when creating the load-balancer, + whether the LB's children are directly the nodes internal IPs using the nodePort when the service type is NodePort. + It allows services to be reachable when Traefik runs externally from the Kubernetes cluster but within the same network of the nodes. + By default, NodePortLB is false. + type: boolean + passHostHeader: + description: |- + PassHostHeader defines whether the client Host header is forwarded to the upstream Kubernetes Service. + By default, passHostHeader is true. + type: boolean + percent: + description: |- + Percent defines the part of the traffic to mirror. + Supported values: 0 to 100. + type: integer + port: + anyOf: + - type: integer + - type: string + description: |- + Port defines the port of a Kubernetes Service. + This can be a reference to a named port. + x-kubernetes-int-or-string: true + responseForwarding: + description: ResponseForwarding defines how Traefik forwards + the response from the upstream Kubernetes Service to the + client. + properties: + flushInterval: + description: |- + FlushInterval defines the interval, in milliseconds, in between flushes to the client while copying the response body. + A negative value means to flush immediately after each write to the client. + This configuration is ignored when ReverseProxy recognizes a response as a streaming response; + for such responses, writes are flushed to the client immediately. + Default: 100ms + type: string + type: object + scheme: + description: |- + Scheme defines the scheme to use for the request to the upstream Kubernetes Service. + It defaults to https when Kubernetes Service port is 443, http otherwise. + type: string + serversTransport: + description: |- + ServersTransport defines the name of ServersTransport resource to use. + It allows to configure the transport between Traefik and your servers. + Can only be used on a Kubernetes Service. + type: string + sticky: + description: |- + Sticky defines the sticky sessions configuration. + More info: https://doc.traefik.io/traefik/v3.3/routing/services/#sticky-sessions + properties: + cookie: + description: Cookie defines the sticky cookie configuration. + properties: + httpOnly: + description: HTTPOnly defines whether the cookie + can be accessed by client-side APIs, such as JavaScript. + type: boolean + maxAge: + description: |- + MaxAge defines the number of seconds until the cookie expires. + When set to a negative number, the cookie expires immediately. + When set to zero, the cookie never expires. + type: integer + name: + description: Name defines the Cookie name. + type: string + path: + description: |- + Path defines the path that must exist in the requested URL for the browser to send the Cookie header. + When not provided the cookie will be sent on every request to the domain. + More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#pathpath-value + type: string + sameSite: + description: |- + SameSite defines the same site policy. + More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite + type: string + secure: + description: Secure defines whether the cookie can + only be transmitted over an encrypted connection + (i.e. HTTPS). + type: boolean + type: object + type: object + strategy: + description: |- + Strategy defines the load balancing strategy between the servers. + RoundRobin is the only supported value at the moment. + type: string + weight: + description: |- + Weight defines the weight and should only be specified when Name references a TraefikService object + (and to be precise, one that embeds a Weighted Round Robin). + type: integer + required: + - name + type: object + type: array + name: + description: |- + Name defines the name of the referenced Kubernetes Service or TraefikService. + The differentiation between the two is specified in the Kind field. + type: string + namespace: + description: Namespace defines the namespace of the referenced + Kubernetes Service or TraefikService. + type: string + nativeLB: + description: |- + NativeLB controls, when creating the load-balancer, + whether the LB's children are directly the pods IPs or if the only child is the Kubernetes Service clusterIP. + The Kubernetes Service itself does load-balance to the pods. + By default, NativeLB is false. + type: boolean + nodePortLB: + description: |- + NodePortLB controls, when creating the load-balancer, + whether the LB's children are directly the nodes internal IPs using the nodePort when the service type is NodePort. + It allows services to be reachable when Traefik runs externally from the Kubernetes cluster but within the same network of the nodes. + By default, NodePortLB is false. + type: boolean + passHostHeader: + description: |- + PassHostHeader defines whether the client Host header is forwarded to the upstream Kubernetes Service. + By default, passHostHeader is true. + type: boolean + port: + anyOf: + - type: integer + - type: string + description: |- + Port defines the port of a Kubernetes Service. + This can be a reference to a named port. + x-kubernetes-int-or-string: true + responseForwarding: + description: ResponseForwarding defines how Traefik forwards the + response from the upstream Kubernetes Service to the client. + properties: + flushInterval: + description: |- + FlushInterval defines the interval, in milliseconds, in between flushes to the client while copying the response body. + A negative value means to flush immediately after each write to the client. + This configuration is ignored when ReverseProxy recognizes a response as a streaming response; + for such responses, writes are flushed to the client immediately. + Default: 100ms + type: string + type: object + scheme: + description: |- + Scheme defines the scheme to use for the request to the upstream Kubernetes Service. + It defaults to https when Kubernetes Service port is 443, http otherwise. + type: string + serversTransport: + description: |- + ServersTransport defines the name of ServersTransport resource to use. + It allows to configure the transport between Traefik and your servers. + Can only be used on a Kubernetes Service. + type: string + sticky: + description: |- + Sticky defines the sticky sessions configuration. + More info: https://doc.traefik.io/traefik/v3.3/routing/services/#sticky-sessions + properties: + cookie: + description: Cookie defines the sticky cookie configuration. + properties: + httpOnly: + description: HTTPOnly defines whether the cookie can be + accessed by client-side APIs, such as JavaScript. + type: boolean + maxAge: + description: |- + MaxAge defines the number of seconds until the cookie expires. + When set to a negative number, the cookie expires immediately. + When set to zero, the cookie never expires. + type: integer + name: + description: Name defines the Cookie name. + type: string + path: + description: |- + Path defines the path that must exist in the requested URL for the browser to send the Cookie header. + When not provided the cookie will be sent on every request to the domain. + More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#pathpath-value + type: string + sameSite: + description: |- + SameSite defines the same site policy. + More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite + type: string + secure: + description: Secure defines whether the cookie can only + be transmitted over an encrypted connection (i.e. HTTPS). + type: boolean + type: object + type: object + strategy: + description: |- + Strategy defines the load balancing strategy between the servers. + RoundRobin is the only supported value at the moment. + type: string + weight: + description: |- + Weight defines the weight and should only be specified when Name references a TraefikService object + (and to be precise, one that embeds a Weighted Round Robin). + type: integer + required: + - name + type: object + weighted: + description: Weighted defines the Weighted Round Robin configuration. + properties: + services: + description: Services defines the list of Kubernetes Service and/or + TraefikService to load-balance, with weight. + items: + description: Service defines an upstream HTTP service to proxy + traffic to. + properties: + healthCheck: + description: Healthcheck defines health checks for ExternalName + services. + properties: + followRedirects: + description: |- + FollowRedirects defines whether redirects should be followed during the health check calls. + Default: true + type: boolean + headers: + additionalProperties: + type: string + description: Headers defines custom headers to be sent + to the health check endpoint. + type: object + hostname: + description: Hostname defines the value of hostname + in the Host header of the health check request. + type: string + interval: + anyOf: + - type: integer + - type: string + description: |- + Interval defines the frequency of the health check calls. + Default: 30s + x-kubernetes-int-or-string: true + method: + description: Method defines the healthcheck method. + type: string + mode: + description: |- + Mode defines the health check mode. + If defined to grpc, will use the gRPC health check protocol to probe the server. + Default: http + type: string + path: + description: Path defines the server URL path for the + health check endpoint. + type: string + port: + description: Port defines the server URL port for the + health check endpoint. + type: integer + scheme: + description: Scheme replaces the server URL scheme for + the health check endpoint. + type: string + status: + description: Status defines the expected HTTP status + code of the response to the health check request. + type: integer + timeout: + anyOf: + - type: integer + - type: string + description: |- + Timeout defines the maximum duration Traefik will wait for a health check request before considering the server unhealthy. + Default: 5s + x-kubernetes-int-or-string: true + type: object + kind: + description: Kind defines the kind of the Service. + enum: + - Service + - TraefikService + type: string + name: + description: |- + Name defines the name of the referenced Kubernetes Service or TraefikService. + The differentiation between the two is specified in the Kind field. + type: string + namespace: + description: Namespace defines the namespace of the referenced + Kubernetes Service or TraefikService. + type: string + nativeLB: + description: |- + NativeLB controls, when creating the load-balancer, + whether the LB's children are directly the pods IPs or if the only child is the Kubernetes Service clusterIP. + The Kubernetes Service itself does load-balance to the pods. + By default, NativeLB is false. + type: boolean + nodePortLB: + description: |- + NodePortLB controls, when creating the load-balancer, + whether the LB's children are directly the nodes internal IPs using the nodePort when the service type is NodePort. + It allows services to be reachable when Traefik runs externally from the Kubernetes cluster but within the same network of the nodes. + By default, NodePortLB is false. + type: boolean + passHostHeader: + description: |- + PassHostHeader defines whether the client Host header is forwarded to the upstream Kubernetes Service. + By default, passHostHeader is true. + type: boolean + port: + anyOf: + - type: integer + - type: string + description: |- + Port defines the port of a Kubernetes Service. + This can be a reference to a named port. + x-kubernetes-int-or-string: true + responseForwarding: + description: ResponseForwarding defines how Traefik forwards + the response from the upstream Kubernetes Service to the + client. + properties: + flushInterval: + description: |- + FlushInterval defines the interval, in milliseconds, in between flushes to the client while copying the response body. + A negative value means to flush immediately after each write to the client. + This configuration is ignored when ReverseProxy recognizes a response as a streaming response; + for such responses, writes are flushed to the client immediately. + Default: 100ms + type: string + type: object + scheme: + description: |- + Scheme defines the scheme to use for the request to the upstream Kubernetes Service. + It defaults to https when Kubernetes Service port is 443, http otherwise. + type: string + serversTransport: + description: |- + ServersTransport defines the name of ServersTransport resource to use. + It allows to configure the transport between Traefik and your servers. + Can only be used on a Kubernetes Service. + type: string + sticky: + description: |- + Sticky defines the sticky sessions configuration. + More info: https://doc.traefik.io/traefik/v3.3/routing/services/#sticky-sessions + properties: + cookie: + description: Cookie defines the sticky cookie configuration. + properties: + httpOnly: + description: HTTPOnly defines whether the cookie + can be accessed by client-side APIs, such as JavaScript. + type: boolean + maxAge: + description: |- + MaxAge defines the number of seconds until the cookie expires. + When set to a negative number, the cookie expires immediately. + When set to zero, the cookie never expires. + type: integer + name: + description: Name defines the Cookie name. + type: string + path: + description: |- + Path defines the path that must exist in the requested URL for the browser to send the Cookie header. + When not provided the cookie will be sent on every request to the domain. + More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#pathpath-value + type: string + sameSite: + description: |- + SameSite defines the same site policy. + More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite + type: string + secure: + description: Secure defines whether the cookie can + only be transmitted over an encrypted connection + (i.e. HTTPS). + type: boolean + type: object + type: object + strategy: + description: |- + Strategy defines the load balancing strategy between the servers. + RoundRobin is the only supported value at the moment. + type: string + weight: + description: |- + Weight defines the weight and should only be specified when Name references a TraefikService object + (and to be precise, one that embeds a Weighted Round Robin). + type: integer + required: + - name + type: object + type: array + sticky: + description: |- + Sticky defines whether sticky sessions are enabled. + More info: https://doc.traefik.io/traefik/v3.3/routing/providers/kubernetes-crd/#stickiness-and-load-balancing + properties: + cookie: + description: Cookie defines the sticky cookie configuration. + properties: + httpOnly: + description: HTTPOnly defines whether the cookie can be + accessed by client-side APIs, such as JavaScript. + type: boolean + maxAge: + description: |- + MaxAge defines the number of seconds until the cookie expires. + When set to a negative number, the cookie expires immediately. + When set to zero, the cookie never expires. + type: integer + name: + description: Name defines the Cookie name. + type: string + path: + description: |- + Path defines the path that must exist in the requested URL for the browser to send the Cookie header. + When not provided the cookie will be sent on every request to the domain. + More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#pathpath-value + type: string + sameSite: + description: |- + SameSite defines the same site policy. + More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite + type: string + secure: + description: Secure defines whether the cookie can only + be transmitted over an encrypted connection (i.e. HTTPS). + type: boolean + type: object + type: object + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true diff --git a/infrastructure/traefik/kustomization.yaml b/infrastructure/traefik/kustomization.yaml index 4e36574..6abf485 100644 --- a/infrastructure/traefik/kustomization.yaml +++ b/infrastructure/traefik/kustomization.yaml @@ -5,6 +5,7 @@ metadata: name: traefik namespace: flux-system resources: + - crds.yaml - deployment.yaml - serviceaccount.yaml - clusterrole.yaml From 4406724da52b31210e63f74e34ca9cdfd4cad4eb Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 16:25:40 -0300 Subject: [PATCH 149/270] longhorn: add helm repo and adopt workflow --- .../flux-system/platform/kustomization.yaml | 2 + .../longhorn-adopt/kustomization.yaml | 17 ++++++ .../platform/longhorn-ui/kustomization.yaml | 1 + .../platform/longhorn/kustomization.yaml | 20 +++++++ .../longhorn/adopt/kustomization.yaml | 15 +++++ .../longhorn/adopt/longhorn-adopt-rbac.yaml | 56 +++++++++++++++++++ .../adopt/longhorn-helm-adopt-job.yaml | 26 +++++++++ infrastructure/longhorn/adopt/namespace.yaml | 5 ++ .../adopt/scripts/longhorn_helm_adopt.sh | 52 +++++++++++++++++ infrastructure/longhorn/core/helmrelease.yaml | 32 +++++++++++ .../longhorn/core/kustomization.yaml | 6 ++ infrastructure/longhorn/core/namespace.yaml | 5 ++ .../sources/helm/kustomization.yaml | 1 + infrastructure/sources/helm/longhorn.yaml | 9 +++ 14 files changed, 247 insertions(+) create mode 100644 clusters/atlas/flux-system/platform/longhorn-adopt/kustomization.yaml create mode 100644 clusters/atlas/flux-system/platform/longhorn/kustomization.yaml create mode 100644 infrastructure/longhorn/adopt/kustomization.yaml create mode 100644 infrastructure/longhorn/adopt/longhorn-adopt-rbac.yaml create mode 100644 infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml create mode 100644 infrastructure/longhorn/adopt/namespace.yaml create mode 100644 infrastructure/longhorn/adopt/scripts/longhorn_helm_adopt.sh create mode 100644 infrastructure/longhorn/core/helmrelease.yaml create mode 100644 infrastructure/longhorn/core/kustomization.yaml create mode 100644 infrastructure/longhorn/core/namespace.yaml create mode 100644 infrastructure/sources/helm/longhorn.yaml diff --git a/clusters/atlas/flux-system/platform/kustomization.yaml b/clusters/atlas/flux-system/platform/kustomization.yaml index 03d9d43..8ee08d7 100644 --- a/clusters/atlas/flux-system/platform/kustomization.yaml +++ b/clusters/atlas/flux-system/platform/kustomization.yaml @@ -12,6 +12,8 @@ resources: - monitoring/kustomization.yaml - logging/kustomization.yaml - maintenance/kustomization.yaml + - longhorn-adopt/kustomization.yaml + - longhorn/kustomization.yaml - longhorn-ui/kustomization.yaml - postgres/kustomization.yaml - ../platform/vault-csi/kustomization.yaml diff --git a/clusters/atlas/flux-system/platform/longhorn-adopt/kustomization.yaml b/clusters/atlas/flux-system/platform/longhorn-adopt/kustomization.yaml new file mode 100644 index 0000000..f568a5e --- /dev/null +++ b/clusters/atlas/flux-system/platform/longhorn-adopt/kustomization.yaml @@ -0,0 +1,17 @@ +# clusters/atlas/flux-system/platform/longhorn-adopt/kustomization.yaml +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: longhorn-adopt + namespace: flux-system +spec: + interval: 30m + path: ./infrastructure/longhorn/adopt + prune: true + force: true + sourceRef: + kind: GitRepository + name: flux-system + namespace: flux-system + targetNamespace: longhorn-system + wait: true diff --git a/clusters/atlas/flux-system/platform/longhorn-ui/kustomization.yaml b/clusters/atlas/flux-system/platform/longhorn-ui/kustomization.yaml index fc6bd1f..4517728 100644 --- a/clusters/atlas/flux-system/platform/longhorn-ui/kustomization.yaml +++ b/clusters/atlas/flux-system/platform/longhorn-ui/kustomization.yaml @@ -15,4 +15,5 @@ spec: namespace: flux-system dependsOn: - name: core + - name: longhorn wait: true diff --git a/clusters/atlas/flux-system/platform/longhorn/kustomization.yaml b/clusters/atlas/flux-system/platform/longhorn/kustomization.yaml new file mode 100644 index 0000000..8805b5a --- /dev/null +++ b/clusters/atlas/flux-system/platform/longhorn/kustomization.yaml @@ -0,0 +1,20 @@ +# clusters/atlas/flux-system/platform/longhorn/kustomization.yaml +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: longhorn + namespace: flux-system +spec: + interval: 30m + path: ./infrastructure/longhorn/core + prune: true + force: true + sourceRef: + kind: GitRepository + name: flux-system + namespace: flux-system + targetNamespace: longhorn-system + dependsOn: + - name: helm + - name: longhorn-adopt + wait: true diff --git a/infrastructure/longhorn/adopt/kustomization.yaml b/infrastructure/longhorn/adopt/kustomization.yaml new file mode 100644 index 0000000..f70b223 --- /dev/null +++ b/infrastructure/longhorn/adopt/kustomization.yaml @@ -0,0 +1,15 @@ +# infrastructure/longhorn/adopt/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - namespace.yaml + - longhorn-adopt-rbac.yaml + - longhorn-helm-adopt-job.yaml + +configMapGenerator: + - name: longhorn-helm-adopt-script + namespace: longhorn-system + files: + - longhorn_helm_adopt.sh=scripts/longhorn_helm_adopt.sh + options: + disableNameSuffixHash: true diff --git a/infrastructure/longhorn/adopt/longhorn-adopt-rbac.yaml b/infrastructure/longhorn/adopt/longhorn-adopt-rbac.yaml new file mode 100644 index 0000000..31ea73b --- /dev/null +++ b/infrastructure/longhorn/adopt/longhorn-adopt-rbac.yaml @@ -0,0 +1,56 @@ +# infrastructure/longhorn/adopt/longhorn-adopt-rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-helm-adopt + namespace: longhorn-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: longhorn-helm-adopt +rules: + - apiGroups: [""] + resources: + - configmaps + - services + - serviceaccounts + - secrets + verbs: ["get", "list", "watch", "patch", "update"] + - apiGroups: ["apps"] + resources: + - deployments + - daemonsets + verbs: ["get", "list", "watch", "patch", "update"] + - apiGroups: ["batch"] + resources: + - jobs + verbs: ["get", "list", "watch", "patch", "update"] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: + - roles + - rolebindings + - clusterroles + - clusterrolebindings + verbs: ["get", "list", "watch", "patch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: + - customresourcedefinitions + verbs: ["get", "list", "watch", "patch", "update"] + - apiGroups: ["scheduling.k8s.io"] + resources: + - priorityclasses + verbs: ["get", "list", "watch", "patch", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: longhorn-helm-adopt +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: longhorn-helm-adopt +subjects: + - kind: ServiceAccount + name: longhorn-helm-adopt + namespace: longhorn-system diff --git a/infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml b/infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml new file mode 100644 index 0000000..7484e47 --- /dev/null +++ b/infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml @@ -0,0 +1,26 @@ +# infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: longhorn-helm-adopt + namespace: longhorn-system +spec: + backoffLimit: 1 + template: + spec: + serviceAccountName: longhorn-helm-adopt + restartPolicy: Never + containers: + - name: adopt + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 + command: ["/usr/bin/env", "bash"] + args: ["/scripts/longhorn_helm_adopt.sh"] + volumeMounts: + - name: script + mountPath: /scripts + readOnly: true + volumes: + - name: script + configMap: + name: longhorn-helm-adopt-script + defaultMode: 0555 diff --git a/infrastructure/longhorn/adopt/namespace.yaml b/infrastructure/longhorn/adopt/namespace.yaml new file mode 100644 index 0000000..8db20de --- /dev/null +++ b/infrastructure/longhorn/adopt/namespace.yaml @@ -0,0 +1,5 @@ +# infrastructure/longhorn/adopt/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: longhorn-system diff --git a/infrastructure/longhorn/adopt/scripts/longhorn_helm_adopt.sh b/infrastructure/longhorn/adopt/scripts/longhorn_helm_adopt.sh new file mode 100644 index 0000000..343ade8 --- /dev/null +++ b/infrastructure/longhorn/adopt/scripts/longhorn_helm_adopt.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +set -euo pipefail + +release_name="longhorn" +release_namespace="longhorn-system" +selector="app.kubernetes.io/instance=${release_name}" + +annotate_and_label() { + local scope="$1" + local kind="$2" + if [ "${scope}" = "namespaced" ]; then + kubectl -n "${release_namespace}" annotate "${kind}" -l "${selector}" \ + meta.helm.sh/release-name="${release_name}" \ + meta.helm.sh/release-namespace="${release_namespace}" \ + --overwrite >/dev/null 2>&1 || true + kubectl -n "${release_namespace}" label "${kind}" -l "${selector}" \ + app.kubernetes.io/managed-by=Helm --overwrite >/dev/null 2>&1 || true + else + kubectl annotate "${kind}" -l "${selector}" \ + meta.helm.sh/release-name="${release_name}" \ + meta.helm.sh/release-namespace="${release_namespace}" \ + --overwrite >/dev/null 2>&1 || true + kubectl label "${kind}" -l "${selector}" \ + app.kubernetes.io/managed-by=Helm --overwrite >/dev/null 2>&1 || true + fi +} + +namespaced_kinds=( + configmap + service + serviceaccount + deployment + daemonset + job + role + rolebinding +) + +cluster_kinds=( + clusterrole + clusterrolebinding + customresourcedefinition + priorityclass +) + +for kind in "${namespaced_kinds[@]}"; do + annotate_and_label "namespaced" "${kind}" +done + +for kind in "${cluster_kinds[@]}"; do + annotate_and_label "cluster" "${kind}" +done diff --git a/infrastructure/longhorn/core/helmrelease.yaml b/infrastructure/longhorn/core/helmrelease.yaml new file mode 100644 index 0000000..521df04 --- /dev/null +++ b/infrastructure/longhorn/core/helmrelease.yaml @@ -0,0 +1,32 @@ +# infrastructure/longhorn/core/helmrelease.yaml +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: longhorn + namespace: longhorn-system +spec: + interval: 30m + chart: + spec: + chart: longhorn + version: 1.8.2 + sourceRef: + kind: HelmRepository + name: longhorn + namespace: flux-system + install: + crds: Skip + remediation: { retries: 3 } + timeout: 15m + upgrade: + crds: Skip + remediation: + retries: 3 + remediateLastFailure: true + cleanupOnFail: true + timeout: 15m + values: + service: + ui: + type: NodePort + nodePort: 30824 diff --git a/infrastructure/longhorn/core/kustomization.yaml b/infrastructure/longhorn/core/kustomization.yaml new file mode 100644 index 0000000..47153c7 --- /dev/null +++ b/infrastructure/longhorn/core/kustomization.yaml @@ -0,0 +1,6 @@ +# infrastructure/longhorn/core/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - namespace.yaml + - helmrelease.yaml diff --git a/infrastructure/longhorn/core/namespace.yaml b/infrastructure/longhorn/core/namespace.yaml new file mode 100644 index 0000000..6b794fd --- /dev/null +++ b/infrastructure/longhorn/core/namespace.yaml @@ -0,0 +1,5 @@ +# infrastructure/longhorn/core/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: longhorn-system diff --git a/infrastructure/sources/helm/kustomization.yaml b/infrastructure/sources/helm/kustomization.yaml index 74ff668..e45f58f 100644 --- a/infrastructure/sources/helm/kustomization.yaml +++ b/infrastructure/sources/helm/kustomization.yaml @@ -13,6 +13,7 @@ resources: - opentelemetry.yaml - opensearch.yaml - harbor.yaml + - longhorn.yaml - prometheus.yaml - victoria-metrics.yaml - secrets-store-csi.yaml diff --git a/infrastructure/sources/helm/longhorn.yaml b/infrastructure/sources/helm/longhorn.yaml new file mode 100644 index 0000000..3a2d728 --- /dev/null +++ b/infrastructure/sources/helm/longhorn.yaml @@ -0,0 +1,9 @@ +# infrastructure/sources/helm/longhorn.yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: longhorn + namespace: flux-system +spec: + interval: 30m + url: https://charts.longhorn.io From 401df4d68c24d0fb8a2b14ac560699e97772f279 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 17:31:29 -0300 Subject: [PATCH 150/270] longhorn: use harbor mirrors and vault pull secret --- infrastructure/longhorn/core/helmrelease.yaml | 45 +++++++++++++++++++ .../longhorn/core/kustomization.yaml | 3 ++ .../longhorn/core/secretproviderclass.yaml | 21 +++++++++ .../longhorn/core/vault-serviceaccount.yaml | 6 +++ .../longhorn/core/vault-sync-deployment.yaml | 34 ++++++++++++++ .../vault/scripts/vault_k8s_auth_configure.sh | 4 +- 6 files changed, 111 insertions(+), 2 deletions(-) create mode 100644 infrastructure/longhorn/core/secretproviderclass.yaml create mode 100644 infrastructure/longhorn/core/vault-serviceaccount.yaml create mode 100644 infrastructure/longhorn/core/vault-sync-deployment.yaml diff --git a/infrastructure/longhorn/core/helmrelease.yaml b/infrastructure/longhorn/core/helmrelease.yaml index 521df04..a5d44ad 100644 --- a/infrastructure/longhorn/core/helmrelease.yaml +++ b/infrastructure/longhorn/core/helmrelease.yaml @@ -30,3 +30,48 @@ spec: ui: type: NodePort nodePort: 30824 + privateRegistry: + createSecret: false + registrySecret: longhorn-registry + image: + longhorn: + engine: + repository: registry.bstein.dev/bstein/longhorn-engine + tag: v1.8.2 + manager: + repository: registry.bstein.dev/bstein/longhorn-manager + tag: v1.8.2 + ui: + repository: registry.bstein.dev/bstein/longhorn-ui + tag: v1.8.2 + instanceManager: + repository: registry.bstein.dev/bstein/longhorn-instance-manager + tag: v1.8.2 + shareManager: + repository: registry.bstein.dev/bstein/longhorn-share-manager + tag: v1.8.2 + backingImageManager: + repository: registry.bstein.dev/bstein/longhorn-backing-image-manager + tag: v1.8.2 + supportBundleKit: + repository: registry.bstein.dev/bstein/longhorn-support-bundle-kit + tag: v0.0.56 + csi: + attacher: + repository: registry.bstein.dev/bstein/longhorn-csi-attacher + tag: v4.9.0 + provisioner: + repository: registry.bstein.dev/bstein/longhorn-csi-provisioner + tag: v5.3.0 + nodeDriverRegistrar: + repository: registry.bstein.dev/bstein/longhorn-csi-node-driver-registrar + tag: v2.14.0 + resizer: + repository: registry.bstein.dev/bstein/longhorn-csi-resizer + tag: v1.13.2 + snapshotter: + repository: registry.bstein.dev/bstein/longhorn-csi-snapshotter + tag: v8.2.0 + livenessProbe: + repository: registry.bstein.dev/bstein/longhorn-livenessprobe + tag: v2.16.0 diff --git a/infrastructure/longhorn/core/kustomization.yaml b/infrastructure/longhorn/core/kustomization.yaml index 47153c7..e8320c7 100644 --- a/infrastructure/longhorn/core/kustomization.yaml +++ b/infrastructure/longhorn/core/kustomization.yaml @@ -3,4 +3,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - namespace.yaml + - vault-serviceaccount.yaml + - secretproviderclass.yaml + - vault-sync-deployment.yaml - helmrelease.yaml diff --git a/infrastructure/longhorn/core/secretproviderclass.yaml b/infrastructure/longhorn/core/secretproviderclass.yaml new file mode 100644 index 0000000..031d1d8 --- /dev/null +++ b/infrastructure/longhorn/core/secretproviderclass.yaml @@ -0,0 +1,21 @@ +# infrastructure/longhorn/core/secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: longhorn-vault + namespace: longhorn-system +spec: + provider: vault + parameters: + vaultAddress: "http://vault.vault.svc.cluster.local:8200" + roleName: "longhorn" + objects: | + - objectName: "harbor-pull__dockerconfigjson" + secretPath: "kv/data/atlas/harbor-pull/longhorn" + secretKey: "dockerconfigjson" + secretObjects: + - secretName: longhorn-registry + type: kubernetes.io/dockerconfigjson + data: + - objectName: harbor-pull__dockerconfigjson + key: .dockerconfigjson diff --git a/infrastructure/longhorn/core/vault-serviceaccount.yaml b/infrastructure/longhorn/core/vault-serviceaccount.yaml new file mode 100644 index 0000000..17ccef8 --- /dev/null +++ b/infrastructure/longhorn/core/vault-serviceaccount.yaml @@ -0,0 +1,6 @@ +# infrastructure/longhorn/core/vault-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-vault-sync + namespace: longhorn-system diff --git a/infrastructure/longhorn/core/vault-sync-deployment.yaml b/infrastructure/longhorn/core/vault-sync-deployment.yaml new file mode 100644 index 0000000..cb04c39 --- /dev/null +++ b/infrastructure/longhorn/core/vault-sync-deployment.yaml @@ -0,0 +1,34 @@ +# infrastructure/longhorn/core/vault-sync-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: longhorn-vault-sync + namespace: longhorn-system +spec: + replicas: 1 + selector: + matchLabels: + app: longhorn-vault-sync + template: + metadata: + labels: + app: longhorn-vault-sync + spec: + serviceAccountName: longhorn-vault-sync + containers: + - name: sync + image: alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - "sleep infinity" + volumeMounts: + - name: vault-secrets + mountPath: /vault/secrets + readOnly: true + volumes: + - name: vault-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: longhorn-vault diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 0b2dca6..2d2d4ba 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -214,8 +214,8 @@ write_policy_and_role "crypto" "crypto" "crypto-vault-sync" \ "crypto/* harbor-pull/crypto" "" write_policy_and_role "health" "health" "health-vault-sync" \ "health/*" "" -write_policy_and_role "longhorn" "longhorn-system" "longhorn-vault" \ - "longhorn/*" "" +write_policy_and_role "longhorn" "longhorn-system" "longhorn-vault,longhorn-vault-sync" \ + "longhorn/* harbor-pull/longhorn" "" write_policy_and_role "postgres" "postgres" "postgres-vault" \ "postgres/postgres-db" "" write_policy_and_role "vault" "vault" "vault" \ From 1eb7d5825927efbbba76822879870ad254e73dea Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 17:36:07 -0300 Subject: [PATCH 151/270] keycloak: enforce bstein group membership --- services/keycloak/user-overrides-job.yaml | 87 +++++++++++++---------- 1 file changed, 48 insertions(+), 39 deletions(-) diff --git a/services/keycloak/user-overrides-job.yaml b/services/keycloak/user-overrides-job.yaml index 6b398dc..678f479 100644 --- a/services/keycloak/user-overrides-job.yaml +++ b/services/keycloak/user-overrides-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-user-overrides-6 + name: keycloak-user-overrides-7 namespace: sso spec: backoffLimit: 0 @@ -150,53 +150,62 @@ spec: if not isinstance(attrs, dict): attrs = {} existing = attrs.get("mailu_email") + needs_update = True if isinstance(existing, list) and existing and existing[0] == override_mailu_email: - raise SystemExit(0) + needs_update = False if isinstance(existing, str) and existing == override_mailu_email: - raise SystemExit(0) + needs_update = False - attrs["mailu_email"] = [override_mailu_email] - status, _ = http_json( - "PUT", - f"{base_url}/admin/realms/{realm}/users/{user_id}", - access_token, - {"attributes": attrs}, - ) - if status not in (200, 204): - raise SystemExit(f"Unexpected user update response: {status}") + if needs_update: + attrs["mailu_email"] = [override_mailu_email] + status, _ = http_json( + "PUT", + f"{base_url}/admin/realms/{realm}/users/{user_id}", + access_token, + {"attributes": attrs}, + ) + if status not in (200, 204): + raise SystemExit(f"Unexpected user update response: {status}") - # Ensure the user is in the admin group for Vault access. - status, groups = http_json( - "GET", - f"{base_url}/admin/realms/{realm}/groups?search=admin", - access_token, - ) - if status != 200 or not isinstance(groups, list): - raise SystemExit("Unable to fetch groups") - group_id = "" - for item in groups: - if isinstance(item, dict) and item.get("name") == "admin": - group_id = item.get("id") or "" - break - if not group_id: - raise SystemExit("admin group not found") - status, memberships = http_json( - "GET", - f"{base_url}/admin/realms/{realm}/users/{user_id}/groups", - access_token, - ) - if status != 200 or not isinstance(memberships, list): - raise SystemExit("Unable to read user groups") - already = any( - isinstance(item, dict) and item.get("id") == group_id for item in memberships - ) - if not already: + # Ensure the user is in the admin and planka-users groups. + def ensure_group(group_name: str) -> None: + status, groups = http_json( + "GET", + f"{base_url}/admin/realms/{realm}/groups?search={urllib.parse.quote(group_name)}", + access_token, + ) + if status != 200 or not isinstance(groups, list): + raise SystemExit("Unable to fetch groups") + group_id = "" + for item in groups: + if isinstance(item, dict) and item.get("name") == group_name: + group_id = item.get("id") or "" + break + if not group_id: + raise SystemExit(f"{group_name} group not found") + status, memberships = http_json( + "GET", + f"{base_url}/admin/realms/{realm}/users/{user_id}/groups", + access_token, + ) + if status != 200 or not isinstance(memberships, list): + raise SystemExit("Unable to read user groups") + already = any( + isinstance(item, dict) and item.get("id") == group_id for item in memberships + ) + if already: + return status, _ = http_json( "PUT", f"{base_url}/admin/realms/{realm}/users/{user_id}/groups/{group_id}", access_token, ) if status not in (200, 204): - raise SystemExit(f"Unexpected group update response: {status}") + raise SystemExit( + f"Unexpected group update response for {group_name}: {status}" + ) + + for group in ("admin", "planka-users"): + ensure_group(group) PY volumeMounts: From d9d31f77015f844315555a8f1e56d1d5aed93656 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 17:39:37 -0300 Subject: [PATCH 152/270] longhorn: allow kustomization to apply without waiting --- clusters/atlas/flux-system/platform/longhorn/kustomization.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clusters/atlas/flux-system/platform/longhorn/kustomization.yaml b/clusters/atlas/flux-system/platform/longhorn/kustomization.yaml index 8805b5a..1a51254 100644 --- a/clusters/atlas/flux-system/platform/longhorn/kustomization.yaml +++ b/clusters/atlas/flux-system/platform/longhorn/kustomization.yaml @@ -17,4 +17,4 @@ spec: dependsOn: - name: helm - name: longhorn-adopt - wait: true + wait: false From b07f32e7c8e834cd9963e616789e7babebc3d1dc Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 17:45:29 -0300 Subject: [PATCH 153/270] longhorn: pin vault sync to rpi workers --- .../longhorn/core/vault-sync-deployment.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/infrastructure/longhorn/core/vault-sync-deployment.yaml b/infrastructure/longhorn/core/vault-sync-deployment.yaml index cb04c39..95b159c 100644 --- a/infrastructure/longhorn/core/vault-sync-deployment.yaml +++ b/infrastructure/longhorn/core/vault-sync-deployment.yaml @@ -15,6 +15,17 @@ spec: app: longhorn-vault-sync spec: serviceAccountName: longhorn-vault-sync + nodeSelector: + node-role.kubernetes.io/worker: "true" + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 80 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5", "rpi4"] containers: - name: sync image: alpine:3.20 From 1fb7b27de4c296762d5b5342b3f0d4dd82e79478 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 17:47:34 -0300 Subject: [PATCH 154/270] keycloak: rerun realm and user overrides --- services/keycloak/realm-settings-job.yaml | 2 +- services/keycloak/user-overrides-job.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 0bd78b5..0def763 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-25 + name: keycloak-realm-settings-26 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/user-overrides-job.yaml b/services/keycloak/user-overrides-job.yaml index 678f479..a81ea7c 100644 --- a/services/keycloak/user-overrides-job.yaml +++ b/services/keycloak/user-overrides-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-user-overrides-7 + name: keycloak-user-overrides-8 namespace: sso spec: backoffLimit: 0 From 2f176d5a36a23113adb268d84ccb170a29204c36 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 17:58:20 -0300 Subject: [PATCH 155/270] planka: allow project creation for all users --- services/planka/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/planka/deployment.yaml b/services/planka/deployment.yaml index 7d8a628..afda7fd 100644 --- a/services/planka/deployment.yaml +++ b/services/planka/deployment.yaml @@ -106,7 +106,7 @@ spec: - name: OIDC_ADMIN_ROLES value: admin - name: OIDC_PROJECT_OWNER_ROLES - value: planka-users + value: "*" - name: OIDC_ROLES_ATTRIBUTE value: groups volumeMounts: From dc62b4998b7b44a5efc067dbe8a14b06792d04d5 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 18:17:40 -0300 Subject: [PATCH 156/270] cert-manager: pin webhook and cainjector to rpi nodes --- infrastructure/cert-manager/helmrelease.yaml | 26 ++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/infrastructure/cert-manager/helmrelease.yaml b/infrastructure/cert-manager/helmrelease.yaml index 7fdf277..0a0ed22 100644 --- a/infrastructure/cert-manager/helmrelease.yaml +++ b/infrastructure/cert-manager/helmrelease.yaml @@ -39,3 +39,29 @@ spec: values: - rpi5 - rpi4 + webhook: + nodeSelector: + node-role.kubernetes.io/worker: "true" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: hardware + operator: In + values: + - rpi5 + - rpi4 + cainjector: + nodeSelector: + node-role.kubernetes.io/worker: "true" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: hardware + operator: In + values: + - rpi5 + - rpi4 From 8535d50faa1f61fd4046c75e2164295de646f7a9 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 18:26:29 -0300 Subject: [PATCH 157/270] longhorn: force image pulls during migration --- infrastructure/longhorn/core/helmrelease.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/infrastructure/longhorn/core/helmrelease.yaml b/infrastructure/longhorn/core/helmrelease.yaml index a5d44ad..067bbd6 100644 --- a/infrastructure/longhorn/core/helmrelease.yaml +++ b/infrastructure/longhorn/core/helmrelease.yaml @@ -34,6 +34,7 @@ spec: createSecret: false registrySecret: longhorn-registry image: + pullPolicy: Always longhorn: engine: repository: registry.bstein.dev/bstein/longhorn-engine @@ -75,3 +76,5 @@ spec: livenessProbe: repository: registry.bstein.dev/bstein/longhorn-livenessprobe tag: v2.16.0 + defaultSettings: + systemManagedPodsImagePullPolicy: Always From f8ffa830b70e076a36a9d7ba33f09e2b1973ee4a Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 20:00:17 -0300 Subject: [PATCH 158/270] longhorn: move images to infra project --- infrastructure/longhorn/core/helmrelease.yaml | 26 ++++++++--------- .../longhorn/core/kustomization.yaml | 1 + infrastructure/longhorn/core/settings.yaml | 28 +++++++++++++++++++ 3 files changed, 42 insertions(+), 13 deletions(-) create mode 100644 infrastructure/longhorn/core/settings.yaml diff --git a/infrastructure/longhorn/core/helmrelease.yaml b/infrastructure/longhorn/core/helmrelease.yaml index 067bbd6..575f4bd 100644 --- a/infrastructure/longhorn/core/helmrelease.yaml +++ b/infrastructure/longhorn/core/helmrelease.yaml @@ -37,44 +37,44 @@ spec: pullPolicy: Always longhorn: engine: - repository: registry.bstein.dev/bstein/longhorn-engine + repository: registry.bstein.dev/infra/longhorn-engine tag: v1.8.2 manager: - repository: registry.bstein.dev/bstein/longhorn-manager + repository: registry.bstein.dev/infra/longhorn-manager tag: v1.8.2 ui: - repository: registry.bstein.dev/bstein/longhorn-ui + repository: registry.bstein.dev/infra/longhorn-ui tag: v1.8.2 instanceManager: - repository: registry.bstein.dev/bstein/longhorn-instance-manager + repository: registry.bstein.dev/infra/longhorn-instance-manager tag: v1.8.2 shareManager: - repository: registry.bstein.dev/bstein/longhorn-share-manager + repository: registry.bstein.dev/infra/longhorn-share-manager tag: v1.8.2 backingImageManager: - repository: registry.bstein.dev/bstein/longhorn-backing-image-manager + repository: registry.bstein.dev/infra/longhorn-backing-image-manager tag: v1.8.2 supportBundleKit: - repository: registry.bstein.dev/bstein/longhorn-support-bundle-kit + repository: registry.bstein.dev/infra/longhorn-support-bundle-kit tag: v0.0.56 csi: attacher: - repository: registry.bstein.dev/bstein/longhorn-csi-attacher + repository: registry.bstein.dev/infra/longhorn-csi-attacher tag: v4.9.0 provisioner: - repository: registry.bstein.dev/bstein/longhorn-csi-provisioner + repository: registry.bstein.dev/infra/longhorn-csi-provisioner tag: v5.3.0 nodeDriverRegistrar: - repository: registry.bstein.dev/bstein/longhorn-csi-node-driver-registrar + repository: registry.bstein.dev/infra/longhorn-csi-node-driver-registrar tag: v2.14.0 resizer: - repository: registry.bstein.dev/bstein/longhorn-csi-resizer + repository: registry.bstein.dev/infra/longhorn-csi-resizer tag: v1.13.2 snapshotter: - repository: registry.bstein.dev/bstein/longhorn-csi-snapshotter + repository: registry.bstein.dev/infra/longhorn-csi-snapshotter tag: v8.2.0 livenessProbe: - repository: registry.bstein.dev/bstein/longhorn-livenessprobe + repository: registry.bstein.dev/infra/longhorn-livenessprobe tag: v2.16.0 defaultSettings: systemManagedPodsImagePullPolicy: Always diff --git a/infrastructure/longhorn/core/kustomization.yaml b/infrastructure/longhorn/core/kustomization.yaml index e8320c7..82ad411 100644 --- a/infrastructure/longhorn/core/kustomization.yaml +++ b/infrastructure/longhorn/core/kustomization.yaml @@ -7,3 +7,4 @@ resources: - secretproviderclass.yaml - vault-sync-deployment.yaml - helmrelease.yaml + - settings.yaml diff --git a/infrastructure/longhorn/core/settings.yaml b/infrastructure/longhorn/core/settings.yaml new file mode 100644 index 0000000..45b51d4 --- /dev/null +++ b/infrastructure/longhorn/core/settings.yaml @@ -0,0 +1,28 @@ +# infrastructure/longhorn/core/settings.yaml +apiVersion: longhorn.io/v1beta2 +kind: Setting +metadata: + name: default-engine-image + namespace: longhorn-system +value: registry.bstein.dev/infra/longhorn-engine:v1.8.2 +--- +apiVersion: longhorn.io/v1beta2 +kind: Setting +metadata: + name: default-instance-manager-image + namespace: longhorn-system +value: registry.bstein.dev/infra/longhorn-instance-manager:v1.8.2 +--- +apiVersion: longhorn.io/v1beta2 +kind: Setting +metadata: + name: default-backing-image-manager-image + namespace: longhorn-system +value: registry.bstein.dev/infra/longhorn-backing-image-manager:v1.8.2 +--- +apiVersion: longhorn.io/v1beta2 +kind: Setting +metadata: + name: support-bundle-manager-image + namespace: longhorn-system +value: registry.bstein.dev/infra/longhorn-support-bundle-kit:v0.0.56 From 71a1a55a01dc17a09baaeb8184e32a8dcefec5da Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 20:05:36 -0300 Subject: [PATCH 159/270] longhorn: ensure settings via job --- .../longhorn/core/kustomization.yaml | 10 +++++- .../core/longhorn-settings-ensure-job.yaml | 36 +++++++++++++++++++ .../core/scripts/longhorn_settings_ensure.sh | 18 ++++++++++ infrastructure/longhorn/core/settings.yaml | 28 --------------- 4 files changed, 63 insertions(+), 29 deletions(-) create mode 100644 infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml create mode 100644 infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh delete mode 100644 infrastructure/longhorn/core/settings.yaml diff --git a/infrastructure/longhorn/core/kustomization.yaml b/infrastructure/longhorn/core/kustomization.yaml index 82ad411..deb5308 100644 --- a/infrastructure/longhorn/core/kustomization.yaml +++ b/infrastructure/longhorn/core/kustomization.yaml @@ -7,4 +7,12 @@ resources: - secretproviderclass.yaml - vault-sync-deployment.yaml - helmrelease.yaml - - settings.yaml + - longhorn-settings-ensure-job.yaml + +configMapGenerator: + - name: longhorn-settings-ensure-script + files: + - longhorn_settings_ensure.sh=scripts/longhorn_settings_ensure.sh + +generatorOptions: + disableNameSuffixHash: true diff --git a/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml b/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml new file mode 100644 index 0000000..ba5927a --- /dev/null +++ b/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml @@ -0,0 +1,36 @@ +# infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: longhorn-settings-ensure-1 + namespace: longhorn-system +spec: + backoffLimit: 0 + ttlSecondsAfterFinished: 3600 + template: + spec: + serviceAccountName: longhorn-service-account + restartPolicy: Never + volumes: + - name: longhorn-settings-ensure-script + configMap: + name: longhorn-settings-ensure-script + defaultMode: 0555 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] + - key: node-role.kubernetes.io/worker + operator: Exists + containers: + - name: apply + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 + command: ["/scripts/longhorn_settings_ensure.sh"] + volumeMounts: + - name: longhorn-settings-ensure-script + mountPath: /scripts + readOnly: true diff --git a/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh b/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh new file mode 100644 index 0000000..0bc3446 --- /dev/null +++ b/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -euo pipefail + +namespace="longhorn-system" + +patch_setting() { + local name="$1" + local value="$2" + + kubectl -n "${namespace}" patch setting.longhorn.io "${name}" \ + --type=merge \ + -p "{\"value\":\"${value}\"}" >/dev/null +} + +patch_setting default-engine-image "registry.bstein.dev/infra/longhorn-engine:v1.8.2" +patch_setting default-instance-manager-image "registry.bstein.dev/infra/longhorn-instance-manager:v1.8.2" +patch_setting default-backing-image-manager-image "registry.bstein.dev/infra/longhorn-backing-image-manager:v1.8.2" +patch_setting support-bundle-manager-image "registry.bstein.dev/infra/longhorn-support-bundle-kit:v0.0.56" diff --git a/infrastructure/longhorn/core/settings.yaml b/infrastructure/longhorn/core/settings.yaml deleted file mode 100644 index 45b51d4..0000000 --- a/infrastructure/longhorn/core/settings.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# infrastructure/longhorn/core/settings.yaml -apiVersion: longhorn.io/v1beta2 -kind: Setting -metadata: - name: default-engine-image - namespace: longhorn-system -value: registry.bstein.dev/infra/longhorn-engine:v1.8.2 ---- -apiVersion: longhorn.io/v1beta2 -kind: Setting -metadata: - name: default-instance-manager-image - namespace: longhorn-system -value: registry.bstein.dev/infra/longhorn-instance-manager:v1.8.2 ---- -apiVersion: longhorn.io/v1beta2 -kind: Setting -metadata: - name: default-backing-image-manager-image - namespace: longhorn-system -value: registry.bstein.dev/infra/longhorn-backing-image-manager:v1.8.2 ---- -apiVersion: longhorn.io/v1beta2 -kind: Setting -metadata: - name: support-bundle-manager-image - namespace: longhorn-system -value: registry.bstein.dev/infra/longhorn-support-bundle-kit:v0.0.56 From 42e987f4ee60a228a7b5cc2499b322a7fd401ea8 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 20:11:22 -0300 Subject: [PATCH 160/270] longhorn: apply settings via api job --- .../core/longhorn-settings-ensure-job.yaml | 4 +- .../core/scripts/longhorn_settings_ensure.sh | 43 +++++++++++++------ 2 files changed, 32 insertions(+), 15 deletions(-) diff --git a/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml b/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml index ba5927a..7c40721 100644 --- a/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml +++ b/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: longhorn-settings-ensure-1 + name: longhorn-settings-ensure-2 namespace: longhorn-system spec: backoffLimit: 0 @@ -28,7 +28,7 @@ spec: operator: Exists containers: - name: apply - image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 + image: docker.io/alpine:3.20 command: ["/scripts/longhorn_settings_ensure.sh"] volumeMounts: - name: longhorn-settings-ensure-script diff --git a/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh b/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh index 0bc3446..be188b0 100644 --- a/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh +++ b/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh @@ -1,18 +1,35 @@ -#!/usr/bin/env bash -set -euo pipefail +#!/usr/bin/env sh +set -eu -namespace="longhorn-system" +# Longhorn blocks direct CR patches for some settings; use the internal API instead. +apk add --no-cache curl >/dev/null -patch_setting() { - local name="$1" - local value="$2" +api_base="http://longhorn-backend.longhorn-system.svc:9500/v1/settings" - kubectl -n "${namespace}" patch setting.longhorn.io "${name}" \ - --type=merge \ - -p "{\"value\":\"${value}\"}" >/dev/null +wait_for_api() { + attempts=30 + while [ "${attempts}" -gt 0 ]; do + if curl -fsS "${api_base}" >/dev/null 2>&1; then + return 0 + fi + attempts=$((attempts - 1)) + sleep 2 + done + echo "Longhorn API not ready after retries." >&2 + return 1 } -patch_setting default-engine-image "registry.bstein.dev/infra/longhorn-engine:v1.8.2" -patch_setting default-instance-manager-image "registry.bstein.dev/infra/longhorn-instance-manager:v1.8.2" -patch_setting default-backing-image-manager-image "registry.bstein.dev/infra/longhorn-backing-image-manager:v1.8.2" -patch_setting support-bundle-manager-image "registry.bstein.dev/infra/longhorn-support-bundle-kit:v0.0.56" +update_setting() { + name="$1" + value="$2" + curl -fsS -X PUT \ + -H "Content-Type: application/json" \ + -d "{\"value\":\"${value}\"}" \ + "${api_base}/${name}" >/dev/null +} + +wait_for_api +update_setting default-engine-image "registry.bstein.dev/infra/longhorn-engine:v1.8.2" +update_setting default-instance-manager-image "registry.bstein.dev/infra/longhorn-instance-manager:v1.8.2" +update_setting default-backing-image-manager-image "registry.bstein.dev/infra/longhorn-backing-image-manager:v1.8.2" +update_setting support-bundle-manager-image "registry.bstein.dev/infra/longhorn-support-bundle-kit:v0.0.56" From 55992ea48f32e13c503027f82784b9d3830e8137 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 20:15:33 -0300 Subject: [PATCH 161/270] longhorn: make settings job idempotent --- .../longhorn/core/longhorn-settings-ensure-job.yaml | 2 +- .../longhorn/core/scripts/longhorn_settings_ensure.sh | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml b/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml index 7c40721..f8e56cb 100644 --- a/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml +++ b/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: longhorn-settings-ensure-2 + name: longhorn-settings-ensure-3 namespace: longhorn-system spec: backoffLimit: 0 diff --git a/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh b/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh index be188b0..f73a0cc 100644 --- a/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh +++ b/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh @@ -22,6 +22,14 @@ wait_for_api() { update_setting() { name="$1" value="$2" + + current="$(curl -fsS "${api_base}/${name}" || true)" + if echo "${current}" | grep -Fq "\"value\":\"${value}\""; then + echo "Setting ${name} already set." + return 0 + fi + + echo "Setting ${name} -> ${value}" curl -fsS -X PUT \ -H "Content-Type: application/json" \ -d "{\"value\":\"${value}\"}" \ From d9fabbf35360b45200dbfdd78b83179d32322a7c Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 23:12:56 -0300 Subject: [PATCH 162/270] core: scale coredns replicas --- infrastructure/core/coredns-replicas.yaml | 8 ++++++++ infrastructure/core/kustomization.yaml | 1 + 2 files changed, 9 insertions(+) create mode 100644 infrastructure/core/coredns-replicas.yaml diff --git a/infrastructure/core/coredns-replicas.yaml b/infrastructure/core/coredns-replicas.yaml new file mode 100644 index 0000000..52b2aa8 --- /dev/null +++ b/infrastructure/core/coredns-replicas.yaml @@ -0,0 +1,8 @@ +# infrastructure/core/coredns-replicas.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coredns + namespace: kube-system +spec: + replicas: 2 diff --git a/infrastructure/core/kustomization.yaml b/infrastructure/core/kustomization.yaml index 5e74d81..7a262ba 100644 --- a/infrastructure/core/kustomization.yaml +++ b/infrastructure/core/kustomization.yaml @@ -5,5 +5,6 @@ resources: - ../modules/base - ../modules/profiles/atlas-ha - coredns-custom.yaml + - coredns-replicas.yaml - ../sources/cert-manager/letsencrypt.yaml - ../sources/cert-manager/letsencrypt-prod.yaml From e1bd962956371894aa35b34a1feac818f76eb337 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 23:16:04 -0300 Subject: [PATCH 163/270] core: manage coredns deployment --- infrastructure/core/coredns-deployment.yaml | 127 ++++++++++++++++++++ infrastructure/core/coredns-replicas.yaml | 8 -- infrastructure/core/kustomization.yaml | 2 +- 3 files changed, 128 insertions(+), 9 deletions(-) create mode 100644 infrastructure/core/coredns-deployment.yaml delete mode 100644 infrastructure/core/coredns-replicas.yaml diff --git a/infrastructure/core/coredns-deployment.yaml b/infrastructure/core/coredns-deployment.yaml new file mode 100644 index 0000000..4b74159 --- /dev/null +++ b/infrastructure/core/coredns-deployment.yaml @@ -0,0 +1,127 @@ +# infrastructure/core/coredns-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coredns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/name: CoreDNS +spec: + progressDeadlineSeconds: 600 + replicas: 2 + revisionHistoryLimit: 0 + selector: + matchLabels: + k8s-app: kube-dns + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 25% + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: kube-dns + spec: + containers: + - name: coredns + image: rancher/mirrored-coredns-coredns:1.12.1 + imagePullPolicy: IfNotPresent + args: + - -conf + - /etc/coredns/Corefile + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + periodSeconds: 2 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + readOnly: true + - name: custom-config-volume + mountPath: /etc/coredns/custom + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + restartPolicy: Always + schedulerName: default-scheduler + serviceAccountName: coredns + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + k8s-app: kube-dns + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + k8s-app: kube-dns + volumes: + - name: config-volume + configMap: + name: coredns + defaultMode: 420 + items: + - key: Corefile + path: Corefile + - key: NodeHosts + path: NodeHosts + - name: custom-config-volume + configMap: + name: coredns-custom + optional: true + defaultMode: 420 diff --git a/infrastructure/core/coredns-replicas.yaml b/infrastructure/core/coredns-replicas.yaml deleted file mode 100644 index 52b2aa8..0000000 --- a/infrastructure/core/coredns-replicas.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# infrastructure/core/coredns-replicas.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: coredns - namespace: kube-system -spec: - replicas: 2 diff --git a/infrastructure/core/kustomization.yaml b/infrastructure/core/kustomization.yaml index 7a262ba..6286186 100644 --- a/infrastructure/core/kustomization.yaml +++ b/infrastructure/core/kustomization.yaml @@ -5,6 +5,6 @@ resources: - ../modules/base - ../modules/profiles/atlas-ha - coredns-custom.yaml - - coredns-replicas.yaml + - coredns-deployment.yaml - ../sources/cert-manager/letsencrypt.yaml - ../sources/cert-manager/letsencrypt-prod.yaml From 368dd81c5e595852fbbd241aca66b89c8410623c Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 23:25:28 -0300 Subject: [PATCH 164/270] core: use harbor coredns image --- infrastructure/core/coredns-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infrastructure/core/coredns-deployment.yaml b/infrastructure/core/coredns-deployment.yaml index 4b74159..8ef03bd 100644 --- a/infrastructure/core/coredns-deployment.yaml +++ b/infrastructure/core/coredns-deployment.yaml @@ -26,7 +26,7 @@ spec: spec: containers: - name: coredns - image: rancher/mirrored-coredns-coredns:1.12.1 + image: registry.bstein.dev/infra/coredns:v1.12.1 imagePullPolicy: IfNotPresent args: - -conf From 354a803ff44c09c73647fdf7349cf3f39829699c Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 23:27:04 -0300 Subject: [PATCH 165/270] core: fix coredns tag --- infrastructure/core/coredns-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infrastructure/core/coredns-deployment.yaml b/infrastructure/core/coredns-deployment.yaml index 8ef03bd..d3eb368 100644 --- a/infrastructure/core/coredns-deployment.yaml +++ b/infrastructure/core/coredns-deployment.yaml @@ -26,7 +26,7 @@ spec: spec: containers: - name: coredns - image: registry.bstein.dev/infra/coredns:v1.12.1 + image: registry.bstein.dev/infra/coredns:1.12.1 imagePullPolicy: IfNotPresent args: - -conf From 3e3061fe5b3d04fc8d3527f5ab598172ace0106d Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Fri, 16 Jan 2026 23:52:56 -0300 Subject: [PATCH 166/270] finance: add actual budget and firefly --- .../applications/finance/kustomization.yaml | 30 ++++ .../applications/kustomization.yaml | 1 + .../bstein-dev-home/backend-deployment.yaml | 6 + services/finance/actual-budget-data-pvc.yaml | 12 ++ .../finance/actual-budget-deployment.yaml | 156 +++++++++++++++++ services/finance/actual-budget-ingress.yaml | 26 +++ services/finance/actual-budget-service.yaml | 15 ++ services/finance/firefly-cronjob.yaml | 55 ++++++ services/finance/firefly-deployment.yaml | 164 ++++++++++++++++++ services/finance/firefly-ingress.yaml | 26 +++ services/finance/firefly-service.yaml | 15 ++ services/finance/firefly-storage-pvc.yaml | 12 ++ .../finance/firefly-user-sync-cronjob.yaml | 90 ++++++++++ services/finance/kustomization.yaml | 27 +++ services/finance/namespace.yaml | 5 + services/finance/portal-rbac.yaml | 31 ++++ .../scripts/actual_openid_bootstrap.mjs | 70 ++++++++ .../finance/scripts/firefly_user_sync.php | 107 ++++++++++++ services/finance/serviceaccount.yaml | 6 + .../actual-oidc-secret-ensure-job.yaml | 48 +++++ services/keycloak/kustomization.yaml | 4 + services/keycloak/realm-settings-job.yaml | 16 ++ .../scripts/actual_oidc_secret_ensure.sh | 78 +++++++++ .../vault/scripts/vault_k8s_auth_configure.sh | 4 +- 24 files changed, 1003 insertions(+), 1 deletion(-) create mode 100644 clusters/atlas/flux-system/applications/finance/kustomization.yaml create mode 100644 services/finance/actual-budget-data-pvc.yaml create mode 100644 services/finance/actual-budget-deployment.yaml create mode 100644 services/finance/actual-budget-ingress.yaml create mode 100644 services/finance/actual-budget-service.yaml create mode 100644 services/finance/firefly-cronjob.yaml create mode 100644 services/finance/firefly-deployment.yaml create mode 100644 services/finance/firefly-ingress.yaml create mode 100644 services/finance/firefly-service.yaml create mode 100644 services/finance/firefly-storage-pvc.yaml create mode 100644 services/finance/firefly-user-sync-cronjob.yaml create mode 100644 services/finance/kustomization.yaml create mode 100644 services/finance/namespace.yaml create mode 100644 services/finance/portal-rbac.yaml create mode 100644 services/finance/scripts/actual_openid_bootstrap.mjs create mode 100644 services/finance/scripts/firefly_user_sync.php create mode 100644 services/finance/serviceaccount.yaml create mode 100644 services/keycloak/actual-oidc-secret-ensure-job.yaml create mode 100644 services/keycloak/scripts/actual_oidc_secret_ensure.sh diff --git a/clusters/atlas/flux-system/applications/finance/kustomization.yaml b/clusters/atlas/flux-system/applications/finance/kustomization.yaml new file mode 100644 index 0000000..a28b711 --- /dev/null +++ b/clusters/atlas/flux-system/applications/finance/kustomization.yaml @@ -0,0 +1,30 @@ +# clusters/atlas/flux-system/applications/finance/kustomization.yaml +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: finance + namespace: flux-system +spec: + interval: 10m + path: ./services/finance + prune: true + sourceRef: + kind: GitRepository + name: flux-system + targetNamespace: finance + dependsOn: + - name: keycloak + - name: postgres + - name: traefik + - name: vault + - name: mailu + healthChecks: + - apiVersion: apps/v1 + kind: Deployment + name: actual-budget + namespace: finance + - apiVersion: apps/v1 + kind: Deployment + name: firefly + namespace: finance + wait: false diff --git a/clusters/atlas/flux-system/applications/kustomization.yaml b/clusters/atlas/flux-system/applications/kustomization.yaml index c73906e..417a3ec 100644 --- a/clusters/atlas/flux-system/applications/kustomization.yaml +++ b/clusters/atlas/flux-system/applications/kustomization.yaml @@ -28,4 +28,5 @@ resources: - nextcloud-mail-sync/kustomization.yaml - outline/kustomization.yaml - planka/kustomization.yaml + - finance/kustomization.yaml - health/kustomization.yaml diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index 6e7b40f..7ccca82 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -102,6 +102,12 @@ spec: value: wger-user-sync - name: WGER_USER_SYNC_WAIT_TIMEOUT_SEC value: "90" + - name: FIREFLY_NAMESPACE + value: finance + - name: FIREFLY_USER_SYNC_CRONJOB + value: firefly-user-sync + - name: FIREFLY_USER_SYNC_WAIT_TIMEOUT_SEC + value: "90" ports: - name: http containerPort: 8080 diff --git a/services/finance/actual-budget-data-pvc.yaml b/services/finance/actual-budget-data-pvc.yaml new file mode 100644 index 0000000..7016cda --- /dev/null +++ b/services/finance/actual-budget-data-pvc.yaml @@ -0,0 +1,12 @@ +# services/finance/actual-budget-data-pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: actual-budget-data + namespace: finance +spec: + accessModes: ["ReadWriteOnce"] + storageClassName: asteria + resources: + requests: + storage: 10Gi diff --git a/services/finance/actual-budget-deployment.yaml b/services/finance/actual-budget-deployment.yaml new file mode 100644 index 0000000..11b7e5c --- /dev/null +++ b/services/finance/actual-budget-deployment.yaml @@ -0,0 +1,156 @@ +# services/finance/actual-budget-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: actual-budget + namespace: finance + labels: + app: actual-budget +spec: + replicas: 1 + selector: + matchLabels: + app: actual-budget + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + app: actual-budget + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "finance" + vault.hashicorp.com/agent-inject-secret-actual-env.sh: "kv/data/atlas/finance/actual-oidc" + vault.hashicorp.com/agent-inject-template-actual-env.sh: | + {{ with secret "kv/data/atlas/finance/actual-oidc" }} + export ACTUAL_OPENID_CLIENT_ID="{{ .Data.data.ACTUAL_OPENID_CLIENT_ID }}" + export ACTUAL_OPENID_CLIENT_SECRET="{{ .Data.data.ACTUAL_OPENID_CLIENT_SECRET }}" + {{ end }} + spec: + serviceAccountName: finance-vault + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + initContainers: + - name: init-data-permissions + image: docker.io/alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - | + set -e + mkdir -p /data + chown -R 1000:1000 /data + securityContext: + runAsUser: 0 + runAsGroup: 0 + volumeMounts: + - name: actual-data + mountPath: /data + - name: init-openid + image: actualbudget/actual-server:sha-b6452f9-alpine + command: ["/bin/sh", "-c"] + args: + - | + set -eu + . /vault/secrets/actual-env.sh + node /scripts/actual_openid_bootstrap.mjs + env: + - name: ACTUAL_DATA_DIR + value: /data + - name: ACTUAL_LOGIN_METHOD + value: openid + - name: ACTUAL_ALLOWED_LOGIN_METHODS + value: openid + - name: ACTUAL_MULTIUSER + value: "true" + - name: ACTUAL_OPENID_DISCOVERY_URL + value: https://sso.bstein.dev/realms/atlas + - name: ACTUAL_OPENID_SERVER_HOSTNAME + value: https://budget.bstein.dev + volumeMounts: + - name: actual-data + mountPath: /data + - name: actual-openid-bootstrap-script + mountPath: /scripts + readOnly: true + containers: + - name: actual-budget + image: actualbudget/actual-server:sha-b6452f9-alpine + command: ["/bin/sh", "-c"] + args: + - | + . /vault/secrets/actual-env.sh + exec node app + ports: + - name: http + containerPort: 5006 + env: + - name: ACTUAL_DATA_DIR + value: /data + - name: ACTUAL_LOGIN_METHOD + value: openid + - name: ACTUAL_ALLOWED_LOGIN_METHODS + value: openid + - name: ACTUAL_MULTIUSER + value: "true" + - name: ACTUAL_OPENID_DISCOVERY_URL + value: https://sso.bstein.dev/realms/atlas + - name: ACTUAL_OPENID_SERVER_HOSTNAME + value: https://budget.bstein.dev + volumeMounts: + - name: actual-data + mountPath: /data + readinessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 6 + livenessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 3 + failureThreshold: 6 + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: "1" + memory: 1Gi + volumes: + - name: actual-data + persistentVolumeClaim: + claimName: actual-budget-data + - name: actual-openid-bootstrap-script + configMap: + name: actual-openid-bootstrap-script + defaultMode: 0555 diff --git a/services/finance/actual-budget-ingress.yaml b/services/finance/actual-budget-ingress.yaml new file mode 100644 index 0000000..4cbc9e6 --- /dev/null +++ b/services/finance/actual-budget-ingress.yaml @@ -0,0 +1,26 @@ +# services/finance/actual-budget-ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: actual-budget + namespace: finance + annotations: + kubernetes.io/ingress.class: traefik + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" + cert-manager.io/cluster-issuer: letsencrypt +spec: + tls: + - hosts: ["budget.bstein.dev"] + secretName: actual-budget-tls + rules: + - host: budget.bstein.dev + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: actual-budget + port: + number: 80 diff --git a/services/finance/actual-budget-service.yaml b/services/finance/actual-budget-service.yaml new file mode 100644 index 0000000..05213c4 --- /dev/null +++ b/services/finance/actual-budget-service.yaml @@ -0,0 +1,15 @@ +# services/finance/actual-budget-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: actual-budget + namespace: finance + labels: + app: actual-budget +spec: + selector: + app: actual-budget + ports: + - name: http + port: 80 + targetPort: 5006 diff --git a/services/finance/firefly-cronjob.yaml b/services/finance/firefly-cronjob.yaml new file mode 100644 index 0000000..6c4d507 --- /dev/null +++ b/services/finance/firefly-cronjob.yaml @@ -0,0 +1,55 @@ +# services/finance/firefly-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: firefly-cron + namespace: finance +spec: + schedule: "0 3 * * *" + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + backoffLimit: 1 + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" + vault.hashicorp.com/role: "finance" + vault.hashicorp.com/agent-inject-secret-firefly-cron-token: "kv/data/atlas/finance/firefly-secrets" + vault.hashicorp.com/agent-inject-template-firefly-cron-token: | + {{- with secret "kv/data/atlas/finance/firefly-secrets" -}} + {{ .Data.data.STATIC_CRON_TOKEN }} + {{- end -}} + spec: + serviceAccountName: finance-vault + restartPolicy: Never + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" + containers: + - name: cron + image: curlimages/curl:8.5.0 + command: ["/bin/sh", "-c"] + args: + - | + set -eu + token="$(cat /vault/secrets/firefly-cron-token)" + curl -fsS "http://firefly.finance.svc.cluster.local/api/v1/cron/${token}" diff --git a/services/finance/firefly-deployment.yaml b/services/finance/firefly-deployment.yaml new file mode 100644 index 0000000..1b51a07 --- /dev/null +++ b/services/finance/firefly-deployment.yaml @@ -0,0 +1,164 @@ +# services/finance/firefly-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: firefly + namespace: finance + labels: + app: firefly +spec: + replicas: 1 + selector: + matchLabels: + app: firefly + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + app: firefly + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/role: "finance" + vault.hashicorp.com/agent-inject-secret-firefly-env.sh: "kv/data/atlas/finance/firefly-db" + vault.hashicorp.com/agent-inject-template-firefly-env.sh: | + {{ with secret "kv/data/atlas/finance/firefly-db" }} + export DB_CONNECTION="pgsql" + export DB_HOST="{{ .Data.data.DB_HOST }}" + export DB_PORT="{{ .Data.data.DB_PORT }}" + export DB_DATABASE="{{ .Data.data.DB_DATABASE }}" + export DB_USERNAME="{{ .Data.data.DB_USERNAME }}" + export DB_PASSWORD="$(cat /vault/secrets/firefly-db-password)" + {{ end }} + {{ with secret "kv/data/atlas/finance/firefly-secrets" }} + export APP_KEY="$(cat /vault/secrets/firefly-app-key)" + export STATIC_CRON_TOKEN="$(cat /vault/secrets/firefly-cron-token)" + {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} + export MAIL_USERNAME="{{ index .Data.data "relay-username" }}" + export MAIL_PASSWORD="{{ index .Data.data "relay-password" }}" + {{ end }} + vault.hashicorp.com/agent-inject-secret-firefly-db-password: "kv/data/atlas/finance/firefly-db" + vault.hashicorp.com/agent-inject-template-firefly-db-password: | + {{- with secret "kv/data/atlas/finance/firefly-db" -}} + {{ .Data.data.DB_PASSWORD }} + {{- end -}} + vault.hashicorp.com/agent-inject-secret-firefly-app-key: "kv/data/atlas/finance/firefly-secrets" + vault.hashicorp.com/agent-inject-template-firefly-app-key: | + {{- with secret "kv/data/atlas/finance/firefly-secrets" -}} + {{ .Data.data.APP_KEY }} + {{- end -}} + vault.hashicorp.com/agent-inject-secret-firefly-cron-token: "kv/data/atlas/finance/firefly-secrets" + vault.hashicorp.com/agent-inject-template-firefly-cron-token: | + {{- with secret "kv/data/atlas/finance/firefly-secrets" -}} + {{ .Data.data.STATIC_CRON_TOKEN }} + {{- end -}} + spec: + serviceAccountName: finance-vault + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + securityContext: + runAsUser: 33 + runAsGroup: 33 + fsGroup: 33 + fsGroupChangePolicy: OnRootMismatch + initContainers: + - name: init-storage-permissions + image: docker.io/alpine:3.20 + command: ["/bin/sh", "-c"] + args: + - | + set -e + mkdir -p /var/www/html/storage + chown -R 33:33 /var/www/html/storage + securityContext: + runAsUser: 0 + runAsGroup: 0 + volumeMounts: + - name: firefly-storage + mountPath: /var/www/html/storage + containers: + - name: firefly + image: fireflyiii/core:version-6.4.15 + args: ["/bin/sh", "-c", ". /vault/secrets/firefly-env.sh && exec /init"] + env: + - name: APP_ENV + value: production + - name: APP_DEBUG + value: "false" + - name: APP_URL + value: https://money.bstein.dev + - name: SITE_OWNER + value: brad@bstein.dev + - name: TZ + value: Etc/UTC + - name: TRUSTED_PROXIES + value: "**" + - name: AUTHENTICATION_GUARD + value: web + - name: MAIL_MAILER + value: smtp + - name: MAIL_HOST + value: mail.bstein.dev + - name: MAIL_PORT + value: "587" + - name: MAIL_ENCRYPTION + value: tls + - name: MAIL_FROM + value: no-reply-firefly@bstein.dev + - name: CACHE_DRIVER + value: file + - name: SESSION_DRIVER + value: file + ports: + - name: http + containerPort: 8080 + volumeMounts: + - name: firefly-storage + mountPath: /var/www/html/storage + readinessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + livenessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 5 + failureThreshold: 6 + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: "1" + memory: 1Gi + volumes: + - name: firefly-storage + persistentVolumeClaim: + claimName: firefly-storage diff --git a/services/finance/firefly-ingress.yaml b/services/finance/firefly-ingress.yaml new file mode 100644 index 0000000..bd01661 --- /dev/null +++ b/services/finance/firefly-ingress.yaml @@ -0,0 +1,26 @@ +# services/finance/firefly-ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: firefly + namespace: finance + annotations: + kubernetes.io/ingress.class: traefik + traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.tls: "true" + cert-manager.io/cluster-issuer: letsencrypt +spec: + tls: + - hosts: ["money.bstein.dev"] + secretName: firefly-tls + rules: + - host: money.bstein.dev + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: firefly + port: + number: 80 diff --git a/services/finance/firefly-service.yaml b/services/finance/firefly-service.yaml new file mode 100644 index 0000000..a66980b --- /dev/null +++ b/services/finance/firefly-service.yaml @@ -0,0 +1,15 @@ +# services/finance/firefly-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: firefly + namespace: finance + labels: + app: firefly +spec: + selector: + app: firefly + ports: + - name: http + port: 80 + targetPort: 8080 diff --git a/services/finance/firefly-storage-pvc.yaml b/services/finance/firefly-storage-pvc.yaml new file mode 100644 index 0000000..835f827 --- /dev/null +++ b/services/finance/firefly-storage-pvc.yaml @@ -0,0 +1,12 @@ +# services/finance/firefly-storage-pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: firefly-storage + namespace: finance +spec: + accessModes: ["ReadWriteOnce"] + storageClassName: asteria + resources: + requests: + storage: 10Gi diff --git a/services/finance/firefly-user-sync-cronjob.yaml b/services/finance/firefly-user-sync-cronjob.yaml new file mode 100644 index 0000000..dab7f31 --- /dev/null +++ b/services/finance/firefly-user-sync-cronjob.yaml @@ -0,0 +1,90 @@ +# services/finance/firefly-user-sync-cronjob.yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: firefly-user-sync + namespace: finance +spec: + schedule: "0 6 * * *" + suspend: true + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 3 + jobTemplate: + spec: + backoffLimit: 0 + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" + vault.hashicorp.com/role: "finance" + vault.hashicorp.com/agent-inject-secret-firefly-env.sh: "kv/data/atlas/finance/firefly-db" + vault.hashicorp.com/agent-inject-template-firefly-env.sh: | + {{ with secret "kv/data/atlas/finance/firefly-db" }} + export DB_CONNECTION="pgsql" + export DB_HOST="{{ .Data.data.DB_HOST }}" + export DB_PORT="{{ .Data.data.DB_PORT }}" + export DB_DATABASE="{{ .Data.data.DB_DATABASE }}" + export DB_USERNAME="{{ .Data.data.DB_USERNAME }}" + export DB_PASSWORD="$(cat /vault/secrets/firefly-db-password)" + {{ end }} + {{ with secret "kv/data/atlas/finance/firefly-secrets" }} + export APP_KEY="$(cat /vault/secrets/firefly-app-key)" + {{ end }} + vault.hashicorp.com/agent-inject-secret-firefly-db-password: "kv/data/atlas/finance/firefly-db" + vault.hashicorp.com/agent-inject-template-firefly-db-password: | + {{- with secret "kv/data/atlas/finance/firefly-db" -}} + {{ .Data.data.DB_PASSWORD }} + {{- end -}} + vault.hashicorp.com/agent-inject-secret-firefly-app-key: "kv/data/atlas/finance/firefly-secrets" + vault.hashicorp.com/agent-inject-template-firefly-app-key: | + {{- with secret "kv/data/atlas/finance/firefly-secrets" -}} + {{ .Data.data.APP_KEY }} + {{- end -}} + spec: + serviceAccountName: finance-vault + restartPolicy: Never + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" + containers: + - name: sync + image: fireflyiii/core:version-6.4.15 + command: ["/bin/sh", "-c"] + args: + - | + set -eu + . /vault/secrets/firefly-env.sh + exec php /scripts/firefly_user_sync.php + env: + - name: APP_ENV + value: production + - name: APP_DEBUG + value: "false" + - name: TZ + value: Etc/UTC + volumeMounts: + - name: firefly-user-sync-script + mountPath: /scripts + readOnly: true + volumes: + - name: firefly-user-sync-script + configMap: + name: firefly-user-sync-script + defaultMode: 0555 diff --git a/services/finance/kustomization.yaml b/services/finance/kustomization.yaml new file mode 100644 index 0000000..8cde8ba --- /dev/null +++ b/services/finance/kustomization.yaml @@ -0,0 +1,27 @@ +# services/finance/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: finance +resources: + - namespace.yaml + - serviceaccount.yaml + - portal-rbac.yaml + - actual-budget-data-pvc.yaml + - firefly-storage-pvc.yaml + - actual-budget-deployment.yaml + - firefly-deployment.yaml + - firefly-user-sync-cronjob.yaml + - firefly-cronjob.yaml + - actual-budget-service.yaml + - firefly-service.yaml + - actual-budget-ingress.yaml + - firefly-ingress.yaml +generatorOptions: + disableNameSuffixHash: true +configMapGenerator: + - name: actual-openid-bootstrap-script + files: + - actual_openid_bootstrap.mjs=scripts/actual_openid_bootstrap.mjs + - name: firefly-user-sync-script + files: + - firefly_user_sync.php=scripts/firefly_user_sync.php diff --git a/services/finance/namespace.yaml b/services/finance/namespace.yaml new file mode 100644 index 0000000..e262026 --- /dev/null +++ b/services/finance/namespace.yaml @@ -0,0 +1,5 @@ +# services/finance/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: finance diff --git a/services/finance/portal-rbac.yaml b/services/finance/portal-rbac.yaml new file mode 100644 index 0000000..2fb7ede --- /dev/null +++ b/services/finance/portal-rbac.yaml @@ -0,0 +1,31 @@ +# services/finance/portal-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: bstein-dev-home-firefly-user-sync + namespace: finance +rules: + - apiGroups: ["batch"] + resources: ["cronjobs"] + verbs: ["get"] + resourceNames: ["firefly-user-sync"] + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create", "get", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: bstein-dev-home-firefly-user-sync + namespace: finance +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: bstein-dev-home-firefly-user-sync +subjects: + - kind: ServiceAccount + name: bstein-dev-home + namespace: bstein-dev-home diff --git a/services/finance/scripts/actual_openid_bootstrap.mjs b/services/finance/scripts/actual_openid_bootstrap.mjs new file mode 100644 index 0000000..af14524 --- /dev/null +++ b/services/finance/scripts/actual_openid_bootstrap.mjs @@ -0,0 +1,70 @@ +import fs from 'node:fs'; +import path from 'node:path'; +import { pathToFileURL } from 'node:url'; + +function findRoot() { + const candidates = []; + if (process.env.ACTUAL_SERVER_ROOT) { + candidates.push(process.env.ACTUAL_SERVER_ROOT); + } + candidates.push('/app'); + candidates.push('/usr/src/app'); + candidates.push('/srv/app'); + candidates.push('/opt/actual-server'); + + for (const base of candidates) { + if (!base) { + continue; + } + const accountDb = path.join(base, 'src', 'account-db.js'); + if (fs.existsSync(accountDb)) { + return base; + } + } + return ''; +} + +const root = findRoot(); +if (!root) { + console.error('actual server root not found'); + process.exit(1); +} + +const accountDbUrl = pathToFileURL(path.join(root, 'src', 'account-db.js')).href; +const loadConfigUrl = pathToFileURL(path.join(root, 'src', 'load-config.js')).href; + +const accountDb = await import(accountDbUrl); +const { default: finalConfig } = await import(loadConfigUrl); + +const openId = finalConfig?.openId; +if (!openId) { + console.error('missing openid configuration'); + process.exit(1); +} + +const active = accountDb.getActiveLoginMethod(); +if (active === 'openid') { + console.log('openid already enabled'); + process.exit(0); +} + +try { + if (accountDb.needsBootstrap()) { + const result = await accountDb.bootstrap({ openId }); + if (result?.error && result.error !== 'already-bootstrapped') { + console.error(`bootstrap failed: ${result.error}`); + process.exit(1); + } + } else { + const result = await accountDb.enableOpenID({ openId }); + if (result?.error) { + console.error(`enable openid failed: ${result.error}`); + process.exit(1); + } + } + + console.log('openid bootstrap complete'); +} catch (err) { + console.error('openid bootstrap error:', err); + process.exit(1); +} diff --git a/services/finance/scripts/firefly_user_sync.php b/services/finance/scripts/firefly_user_sync.php new file mode 100644 index 0000000..dcb78ea --- /dev/null +++ b/services/finance/scripts/firefly_user_sync.php @@ -0,0 +1,107 @@ +#!/usr/bin/env php +make(ConsoleKernel::class); +$kernel->bootstrap(); + +$repository = $app->make(UserRepositoryInterface::class); + +$existing_user = User::where('email', $email)->first(); +$first_user = User::count() == 0; + +if (!$existing_user) { + $existing_user = User::create( + [ + 'email' => $email, + 'password' => bcrypt($password), + 'blocked' => false, + 'blocked_code' => null, + ] + ); + + if ($first_user) { + $role = Role::where('name', 'owner')->first(); + if ($role) { + $existing_user->roles()->attach($role); + } + } + + log_line(sprintf('created firefly user %s', $email)); +} else { + log_line(sprintf('updating firefly user %s', $email)); +} + +$existing_user->blocked = false; +$existing_user->blocked_code = null; +$existing_user->save(); + +$repository->changePassword($existing_user, $password); +CreatesGroupMemberships::createGroupMembership($existing_user); + +log_line('firefly user sync complete'); diff --git a/services/finance/serviceaccount.yaml b/services/finance/serviceaccount.yaml new file mode 100644 index 0000000..d57a3d2 --- /dev/null +++ b/services/finance/serviceaccount.yaml @@ -0,0 +1,6 @@ +# services/finance/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: finance-vault + namespace: finance diff --git a/services/keycloak/actual-oidc-secret-ensure-job.yaml b/services/keycloak/actual-oidc-secret-ensure-job.yaml new file mode 100644 index 0000000..0cb8aa8 --- /dev/null +++ b/services/keycloak/actual-oidc-secret-ensure-job.yaml @@ -0,0 +1,48 @@ +# services/keycloak/actual-oidc-secret-ensure-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: actual-oidc-secret-ensure-1 + namespace: sso +spec: + backoffLimit: 0 + ttlSecondsAfterFinished: 3600 + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" + vault.hashicorp.com/role: "sso-secrets" + vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin" + vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: | + {{ with secret "kv/data/atlas/shared/keycloak-admin" }} + export KEYCLOAK_ADMIN="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}" + export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}" + {{ end }} + spec: + serviceAccountName: mas-secrets-ensure + restartPolicy: Never + volumes: + - name: actual-oidc-secret-ensure-script + configMap: + name: actual-oidc-secret-ensure-script + defaultMode: 0555 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] + - key: node-role.kubernetes.io/worker + operator: Exists + containers: + - name: apply + image: alpine:3.20 + command: ["/scripts/actual_oidc_secret_ensure.sh"] + volumeMounts: + - name: actual-oidc-secret-ensure-script + mountPath: /scripts + readOnly: true diff --git a/services/keycloak/kustomization.yaml b/services/keycloak/kustomization.yaml index 316f447..6030a82 100644 --- a/services/keycloak/kustomization.yaml +++ b/services/keycloak/kustomization.yaml @@ -24,6 +24,7 @@ resources: - logs-oidc-secret-ensure-job.yaml - harbor-oidc-secret-ensure-job.yaml - vault-oidc-secret-ensure-job.yaml + - actual-oidc-secret-ensure-job.yaml - service.yaml - ingress.yaml generatorOptions: @@ -39,3 +40,6 @@ configMapGenerator: - name: vault-oidc-secret-ensure-script files: - vault_oidc_secret_ensure.sh=scripts/vault_oidc_secret_ensure.sh + - name: actual-oidc-secret-ensure-script + files: + - actual_oidc_secret_ensure.sh=scripts/actual_oidc_secret_ensure.sh diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 0def763..3bf726a 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -250,6 +250,22 @@ spec: "permissions": {"view": ["admin"], "edit": ["admin"]}, "validations": {"length": {"max": 64}}, }, + { + "name": "firefly_password", + "displayName": "Firefly Password", + "multivalued": False, + "annotations": {"group": "user-metadata"}, + "permissions": {"view": ["admin"], "edit": ["admin"]}, + "validations": {"length": {"max": 255}}, + }, + { + "name": "firefly_password_updated_at", + "displayName": "Firefly Password Updated At", + "multivalued": False, + "annotations": {"group": "user-metadata"}, + "permissions": {"view": ["admin"], "edit": ["admin"]}, + "validations": {"length": {"max": 64}}, + }, ] def has_attr(name: str) -> bool: diff --git a/services/keycloak/scripts/actual_oidc_secret_ensure.sh b/services/keycloak/scripts/actual_oidc_secret_ensure.sh new file mode 100644 index 0000000..c686c24 --- /dev/null +++ b/services/keycloak/scripts/actual_oidc_secret_ensure.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env sh +set -euo pipefail + +apk add --no-cache curl jq >/dev/null + +. /vault/secrets/keycloak-admin-env.sh + +KC_URL="http://keycloak.sso.svc.cluster.local" +ACCESS_TOKEN="" +for attempt in 1 2 3 4 5; do + TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d "grant_type=password" \ + -d "client_id=admin-cli" \ + -d "username=${KEYCLOAK_ADMIN}" \ + -d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)" + ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)" + if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then + break + fi + echo "Keycloak token request failed (attempt ${attempt})" >&2 + sleep $((attempt * 2)) +done +if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then + echo "Failed to fetch Keycloak admin token" >&2 + exit 1 +fi + +CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients?clientId=actual-budget" || true)" +CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" + +if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then + create_payload='{"clientId":"actual-budget","enabled":true,"protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://budget.bstein.dev/openid/callback"],"webOrigins":["https://budget.bstein.dev"],"rootUrl":"https://budget.bstein.dev","baseUrl":"/"}' + status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \ + -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + -H 'Content-Type: application/json' \ + -d "${create_payload}" \ + "$KC_URL/admin/realms/atlas/clients")" + if [ "$status" != "201" ] && [ "$status" != "204" ]; then + echo "Keycloak client create failed (status ${status})" >&2 + exit 1 + fi + CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients?clientId=actual-budget" || true)" + CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" +fi + +if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then + echo "Keycloak client actual-budget not found" >&2 + exit 1 +fi + +CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/client-secret" | jq -r '.value' 2>/dev/null || true)" +if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then + echo "Keycloak client secret not found" >&2 + exit 1 +fi + +vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" +vault_role="${VAULT_ROLE:-sso-secrets}" +jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" +login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" +vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" +if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 +fi + +payload="$(jq -nc \ + --arg client_id "actual-budget" \ + --arg client_secret "${CLIENT_SECRET}" \ + '{data:{ACTUAL_OPENID_CLIENT_ID:$client_id, ACTUAL_OPENID_CLIENT_SECRET:$client_secret}}')" + +curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/finance/actual-oidc" >/dev/null diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 2d2d4ba..04114fc 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -214,6 +214,8 @@ write_policy_and_role "crypto" "crypto" "crypto-vault-sync" \ "crypto/* harbor-pull/crypto" "" write_policy_and_role "health" "health" "health-vault-sync" \ "health/*" "" +write_policy_and_role "finance" "finance" "finance-vault" \ + "finance/* shared/postmark-relay" "" write_policy_and_role "longhorn" "longhorn-system" "longhorn-vault,longhorn-vault-sync" \ "longhorn/* harbor-pull/longhorn" "" write_policy_and_role "postgres" "postgres" "postgres-vault" \ @@ -223,7 +225,7 @@ write_policy_and_role "vault" "vault" "vault" \ write_policy_and_role "sso-secrets" "sso" "mas-secrets-ensure" \ "shared/keycloak-admin" \ - "harbor/harbor-oidc vault/vault-oidc-config comms/synapse-oidc logging/oauth2-proxy-logs-oidc" + "harbor/harbor-oidc vault/vault-oidc-config comms/synapse-oidc logging/oauth2-proxy-logs-oidc finance/actual-oidc" write_policy_and_role "crypto-secrets" "crypto" "crypto-secrets-ensure" \ "" \ "crypto/wallet-monero-temp-rpc-auth" From 62fa6ef371e33d50c38cffc96a20425b416284a5 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 00:54:49 -0300 Subject: [PATCH 167/270] finance: seed vault secrets --- .../finance/actual-budget-deployment.yaml | 1 + .../finance/finance-secrets-ensure-job.yaml | 59 ++++++++ services/finance/firefly-deployment.yaml | 4 +- services/finance/kustomization.yaml | 4 + .../finance/scripts/finance_secrets_ensure.sh | 132 ++++++++++++++++++ services/finance/serviceaccount.yaml | 6 + .../vault/scripts/vault_k8s_auth_configure.sh | 3 + 7 files changed, 208 insertions(+), 1 deletion(-) create mode 100644 services/finance/finance-secrets-ensure-job.yaml create mode 100755 services/finance/scripts/finance_secrets_ensure.sh diff --git a/services/finance/actual-budget-deployment.yaml b/services/finance/actual-budget-deployment.yaml index 11b7e5c..8e76d3e 100644 --- a/services/finance/actual-budget-deployment.yaml +++ b/services/finance/actual-budget-deployment.yaml @@ -22,6 +22,7 @@ spec: app: actual-budget annotations: vault.hashicorp.com/agent-inject: "true" + vault.hashicorp.com/agent-pre-populate-only: "true" vault.hashicorp.com/role: "finance" vault.hashicorp.com/agent-inject-secret-actual-env.sh: "kv/data/atlas/finance/actual-oidc" vault.hashicorp.com/agent-inject-template-actual-env.sh: | diff --git a/services/finance/finance-secrets-ensure-job.yaml b/services/finance/finance-secrets-ensure-job.yaml new file mode 100644 index 0000000..1402d14 --- /dev/null +++ b/services/finance/finance-secrets-ensure-job.yaml @@ -0,0 +1,59 @@ +# services/finance/finance-secrets-ensure-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: finance-secrets-ensure-1 + namespace: finance +spec: + backoffLimit: 1 + ttlSecondsAfterFinished: 3600 + template: + spec: + serviceAccountName: finance-secrets-ensure + restartPolicy: Never + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi5"] + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: ["rpi4"] + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" + containers: + - name: ensure + image: alpine:3.20 + command: ["/scripts/finance_secrets_ensure.sh"] + env: + - name: VAULT_ROLE + value: finance-secrets + volumeMounts: + - name: finance-secrets-ensure-script + mountPath: /scripts + readOnly: true + - name: firefly-db + mountPath: /secrets/firefly-db + readOnly: true + - name: actualbudget-db + mountPath: /secrets/actualbudget-db + readOnly: true + volumes: + - name: finance-secrets-ensure-script + configMap: + name: finance-secrets-ensure-script + defaultMode: 0555 + - name: firefly-db + secret: + secretName: firefly-db + - name: actualbudget-db + secret: + secretName: actualbudget-db diff --git a/services/finance/firefly-deployment.yaml b/services/finance/firefly-deployment.yaml index 1b51a07..ff95dad 100644 --- a/services/finance/firefly-deployment.yaml +++ b/services/finance/firefly-deployment.yaml @@ -123,8 +123,10 @@ spec: value: "587" - name: MAIL_ENCRYPTION value: tls - - name: MAIL_FROM + - name: MAIL_FROM_ADDRESS value: no-reply-firefly@bstein.dev + - name: MAIL_FROM_NAME + value: Firefly III - name: CACHE_DRIVER value: file - name: SESSION_DRIVER diff --git a/services/finance/kustomization.yaml b/services/finance/kustomization.yaml index 8cde8ba..2189834 100644 --- a/services/finance/kustomization.yaml +++ b/services/finance/kustomization.yaml @@ -8,6 +8,7 @@ resources: - portal-rbac.yaml - actual-budget-data-pvc.yaml - firefly-storage-pvc.yaml + - finance-secrets-ensure-job.yaml - actual-budget-deployment.yaml - firefly-deployment.yaml - firefly-user-sync-cronjob.yaml @@ -25,3 +26,6 @@ configMapGenerator: - name: firefly-user-sync-script files: - firefly_user_sync.php=scripts/firefly_user_sync.php + - name: finance-secrets-ensure-script + files: + - finance_secrets_ensure.sh=scripts/finance_secrets_ensure.sh diff --git a/services/finance/scripts/finance_secrets_ensure.sh b/services/finance/scripts/finance_secrets_ensure.sh new file mode 100755 index 0000000..a0dca4a --- /dev/null +++ b/services/finance/scripts/finance_secrets_ensure.sh @@ -0,0 +1,132 @@ +#!/usr/bin/env bash +set -euo pipefail + +apk add --no-cache curl jq >/dev/null + +vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" +vault_role="${VAULT_ROLE:-finance-secrets}" +jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" +login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" +vault_token="$(curl -sS --request POST --data "${login_payload}" \ + "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" +if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then + echo "vault login failed" >&2 + exit 1 +fi + +read_secret() { + path="$1" + if [ -f "${path}" ]; then + cat "${path}" + fi +} + +require_value() { + label="$1" + value="$2" + if [ -z "${value}" ]; then + echo "missing ${label}" >&2 + exit 1 + fi +} + +vault_read() { + path="$1" + key="$2" + curl -sS -H "X-Vault-Token: ${vault_token}" \ + "${vault_addr}/v1/kv/data/atlas/${path}" 2>/dev/null | \ + jq -r --arg key "${key}" '.data.data[$key] // empty' 2>/dev/null || true +} + +vault_write_json() { + path="$1" + payload="$2" + curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ + -d "${payload}" "${vault_addr}/v1/kv/data/atlas/${path}" >/dev/null +} + +firefly_db_host="$(read_secret /secrets/firefly-db/DB_HOST)" +if [ -z "${firefly_db_host}" ]; then + firefly_db_host="$(read_secret /secrets/firefly-db/DB_HOSTNAME)" +fi +firefly_db_port="$(read_secret /secrets/firefly-db/DB_PORT)" +firefly_db_name="$(read_secret /secrets/firefly-db/DB_DATABASE)" +if [ -z "${firefly_db_name}" ]; then + firefly_db_name="$(read_secret /secrets/firefly-db/DB_NAME)" +fi +firefly_db_user="$(read_secret /secrets/firefly-db/DB_USERNAME)" +if [ -z "${firefly_db_user}" ]; then + firefly_db_user="$(read_secret /secrets/firefly-db/DB_USER)" +fi +firefly_db_pass="$(read_secret /secrets/firefly-db/DB_PASSWORD)" +if [ -z "${firefly_db_pass}" ]; then + firefly_db_pass="$(read_secret /secrets/firefly-db/DB_PASS)" +fi + +require_value "firefly-db/DB_HOST" "${firefly_db_host}" +require_value "firefly-db/DB_PORT" "${firefly_db_port}" +require_value "firefly-db/DB_DATABASE" "${firefly_db_name}" +require_value "firefly-db/DB_USERNAME" "${firefly_db_user}" +require_value "firefly-db/DB_PASSWORD" "${firefly_db_pass}" + +firefly_payload="$(jq -nc \ + --arg host "${firefly_db_host}" \ + --arg port "${firefly_db_port}" \ + --arg db "${firefly_db_name}" \ + --arg user "${firefly_db_user}" \ + --arg pass "${firefly_db_pass}" \ + '{data:{DB_HOST:$host, DB_PORT:$port, DB_DATABASE:$db, DB_USERNAME:$user, DB_PASSWORD:$pass}}')" +vault_write_json "finance/firefly-db" "${firefly_payload}" + +app_key="$(vault_read "finance/firefly-secrets" "APP_KEY")" +if [ -z "${app_key}" ]; then + app_key="base64:$(head -c 32 /dev/urandom | base64 | tr -d '\n')" +fi +cron_token="$(vault_read "finance/firefly-secrets" "STATIC_CRON_TOKEN")" +if [ -z "${cron_token}" ]; then + cron_token="$(head -c 32 /dev/urandom | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')" +fi +firefly_secret_payload="$(jq -nc \ + --arg app_key "${app_key}" \ + --arg cron "${cron_token}" \ + '{data:{APP_KEY:$app_key, STATIC_CRON_TOKEN:$cron}}')" +vault_write_json "finance/firefly-secrets" "${firefly_secret_payload}" + +if [ -d /secrets/actualbudget-db ]; then + actual_db_host="$(read_secret /secrets/actualbudget-db/DB_HOST)" + if [ -z "${actual_db_host}" ]; then + actual_db_host="$(read_secret /secrets/actualbudget-db/DB_HOSTNAME)" + fi + actual_db_port="$(read_secret /secrets/actualbudget-db/DB_PORT)" + actual_db_name="$(read_secret /secrets/actualbudget-db/DB_DATABASE)" + if [ -z "${actual_db_name}" ]; then + actual_db_name="$(read_secret /secrets/actualbudget-db/DB_NAME)" + fi + actual_db_user="$(read_secret /secrets/actualbudget-db/DB_USERNAME)" + if [ -z "${actual_db_user}" ]; then + actual_db_user="$(read_secret /secrets/actualbudget-db/DB_USER)" + fi + actual_db_pass="$(read_secret /secrets/actualbudget-db/DB_PASSWORD)" + if [ -z "${actual_db_pass}" ]; then + actual_db_pass="$(read_secret /secrets/actualbudget-db/DB_PASS)" + fi + + if [ -n "${actual_db_host}${actual_db_port}${actual_db_name}${actual_db_user}${actual_db_pass}" ]; then + require_value "actualbudget-db/DB_HOST" "${actual_db_host}" + require_value "actualbudget-db/DB_PORT" "${actual_db_port}" + require_value "actualbudget-db/DB_DATABASE" "${actual_db_name}" + require_value "actualbudget-db/DB_USERNAME" "${actual_db_user}" + require_value "actualbudget-db/DB_PASSWORD" "${actual_db_pass}" + + actual_payload="$(jq -nc \ + --arg host "${actual_db_host}" \ + --arg port "${actual_db_port}" \ + --arg db "${actual_db_name}" \ + --arg user "${actual_db_user}" \ + --arg pass "${actual_db_pass}" \ + '{data:{DB_HOST:$host, DB_PORT:$port, DB_DATABASE:$db, DB_USERNAME:$user, DB_PASSWORD:$pass}}')" + vault_write_json "finance/actual-db" "${actual_payload}" + else + echo "actualbudget-db secret empty; skipping actual-db vault write" >&2 + fi +fi diff --git a/services/finance/serviceaccount.yaml b/services/finance/serviceaccount.yaml index d57a3d2..3d18681 100644 --- a/services/finance/serviceaccount.yaml +++ b/services/finance/serviceaccount.yaml @@ -4,3 +4,9 @@ kind: ServiceAccount metadata: name: finance-vault namespace: finance +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: finance-secrets-ensure + namespace: finance diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 04114fc..140f1d4 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -216,6 +216,9 @@ write_policy_and_role "health" "health" "health-vault-sync" \ "health/*" "" write_policy_and_role "finance" "finance" "finance-vault" \ "finance/* shared/postmark-relay" "" +write_policy_and_role "finance-secrets" "finance" "finance-secrets-ensure" \ + "" \ + "finance/*" write_policy_and_role "longhorn" "longhorn-system" "longhorn-vault,longhorn-vault-sync" \ "longhorn/* harbor-pull/longhorn" "" write_policy_and_role "postgres" "postgres" "postgres-vault" \ From f4c6827c8ce5e4f2b0514958e476fb653b63f35d Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 01:00:12 -0300 Subject: [PATCH 168/270] keycloak: bump realm settings job --- services/keycloak/realm-settings-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 3bf726a..0fb1882 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-26 + name: keycloak-realm-settings-27 namespace: sso spec: backoffLimit: 0 From a9351bc737c893fbde1a45b20766c291fa0029ec Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 01:02:58 -0300 Subject: [PATCH 169/270] jobs: drop apk installs and prefer arm64 --- .../longhorn/core/longhorn-settings-ensure-job.yaml | 4 ++-- .../longhorn/core/scripts/longhorn_settings_ensure.sh | 1 - services/keycloak/actual-oidc-secret-ensure-job.yaml | 4 ++-- services/keycloak/harbor-oidc-secret-ensure-job.yaml | 6 +++--- services/keycloak/logs-oidc-secret-ensure-job.yaml | 6 ++---- services/keycloak/mas-secrets-ensure-job.yaml | 10 ++++++---- services/keycloak/scripts/actual_oidc_secret_ensure.sh | 2 -- services/keycloak/scripts/harbor_oidc_secret_ensure.sh | 2 -- services/keycloak/scripts/vault_oidc_secret_ensure.sh | 2 -- services/keycloak/synapse-oidc-secret-ensure-job.yaml | 8 +++----- services/keycloak/vault-oidc-secret-ensure-job.yaml | 6 +++--- services/maintenance/image-sweeper-cronjob.yaml | 2 ++ services/maintenance/pod-cleaner-cronjob.yaml | 3 +++ 13 files changed, 26 insertions(+), 30 deletions(-) diff --git a/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml b/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml index f8e56cb..932c056 100644 --- a/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml +++ b/infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: longhorn-settings-ensure-3 + name: longhorn-settings-ensure-4 namespace: longhorn-system spec: backoffLimit: 0 @@ -28,7 +28,7 @@ spec: operator: Exists containers: - name: apply - image: docker.io/alpine:3.20 + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 command: ["/scripts/longhorn_settings_ensure.sh"] volumeMounts: - name: longhorn-settings-ensure-script diff --git a/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh b/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh index f73a0cc..f13e87a 100644 --- a/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh +++ b/infrastructure/longhorn/core/scripts/longhorn_settings_ensure.sh @@ -2,7 +2,6 @@ set -eu # Longhorn blocks direct CR patches for some settings; use the internal API instead. -apk add --no-cache curl >/dev/null api_base="http://longhorn-backend.longhorn-system.svc:9500/v1/settings" diff --git a/services/keycloak/actual-oidc-secret-ensure-job.yaml b/services/keycloak/actual-oidc-secret-ensure-job.yaml index 0cb8aa8..22ba34f 100644 --- a/services/keycloak/actual-oidc-secret-ensure-job.yaml +++ b/services/keycloak/actual-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: actual-oidc-secret-ensure-1 + name: actual-oidc-secret-ensure-2 namespace: sso spec: backoffLimit: 0 @@ -40,7 +40,7 @@ spec: operator: Exists containers: - name: apply - image: alpine:3.20 + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 command: ["/scripts/actual_oidc_secret_ensure.sh"] volumeMounts: - name: actual-oidc-secret-ensure-script diff --git a/services/keycloak/harbor-oidc-secret-ensure-job.yaml b/services/keycloak/harbor-oidc-secret-ensure-job.yaml index 82c8097..8eac50d 100644 --- a/services/keycloak/harbor-oidc-secret-ensure-job.yaml +++ b/services/keycloak/harbor-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: harbor-oidc-secret-ensure-8 + name: harbor-oidc-secret-ensure-9 namespace: sso spec: backoffLimit: 0 @@ -40,9 +40,9 @@ spec: operator: Exists containers: - name: apply - image: alpine:3.20 + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 command: ["/scripts/harbor_oidc_secret_ensure.sh"] volumeMounts: - name: harbor-oidc-secret-ensure-script mountPath: /scripts - readOnly: true \ No newline at end of file + readOnly: true diff --git a/services/keycloak/logs-oidc-secret-ensure-job.yaml b/services/keycloak/logs-oidc-secret-ensure-job.yaml index 43177ff..df89fa0 100644 --- a/services/keycloak/logs-oidc-secret-ensure-job.yaml +++ b/services/keycloak/logs-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: logs-oidc-secret-ensure-8 + name: logs-oidc-secret-ensure-9 namespace: sso spec: backoffLimit: 0 @@ -25,14 +25,12 @@ spec: restartPolicy: Never containers: - name: apply - image: alpine:3.20 + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 command: ["/bin/sh", "-c"] args: - | set -euo pipefail . /vault/secrets/keycloak-admin-env.sh - apk add --no-cache curl jq openssl >/dev/null - KC_URL="http://keycloak.sso.svc.cluster.local" ACCESS_TOKEN="" for attempt in 1 2 3 4 5; do diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index 88e8177..9d97f72 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -10,7 +10,7 @@ imagePullSecrets: apiVersion: batch/v1 kind: Job metadata: - name: mas-secrets-ensure-18 + name: mas-secrets-ensure-19 namespace: sso spec: backoffLimit: 0 @@ -32,19 +32,21 @@ spec: spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" volumes: - name: work emptyDir: {} initContainers: - name: generate - image: alpine:3.20 + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 command: ["/bin/sh", "-c"] args: - | set -euo pipefail . /vault/secrets/keycloak-admin-env.sh umask 077 - apk add --no-cache curl openssl jq >/dev/null KC_URL="http://keycloak.sso.svc.cluster.local" ACCESS_TOKEN="" @@ -124,4 +126,4 @@ spec: -d "${payload}" "${vault_addr}/v1/kv/data/atlas/comms/mas-secrets-runtime" >/dev/null volumeMounts: - name: work - mountPath: /work \ No newline at end of file + mountPath: /work diff --git a/services/keycloak/scripts/actual_oidc_secret_ensure.sh b/services/keycloak/scripts/actual_oidc_secret_ensure.sh index c686c24..3ed6e6a 100644 --- a/services/keycloak/scripts/actual_oidc_secret_ensure.sh +++ b/services/keycloak/scripts/actual_oidc_secret_ensure.sh @@ -1,8 +1,6 @@ #!/usr/bin/env sh set -euo pipefail -apk add --no-cache curl jq >/dev/null - . /vault/secrets/keycloak-admin-env.sh KC_URL="http://keycloak.sso.svc.cluster.local" diff --git a/services/keycloak/scripts/harbor_oidc_secret_ensure.sh b/services/keycloak/scripts/harbor_oidc_secret_ensure.sh index beef591..7187d34 100755 --- a/services/keycloak/scripts/harbor_oidc_secret_ensure.sh +++ b/services/keycloak/scripts/harbor_oidc_secret_ensure.sh @@ -1,8 +1,6 @@ #!/usr/bin/env sh set -euo pipefail -apk add --no-cache curl jq kubectl >/dev/null - . /vault/secrets/keycloak-admin-env.sh KC_URL="http://keycloak.sso.svc.cluster.local" diff --git a/services/keycloak/scripts/vault_oidc_secret_ensure.sh b/services/keycloak/scripts/vault_oidc_secret_ensure.sh index 3c7d4a5..e8cfe5b 100755 --- a/services/keycloak/scripts/vault_oidc_secret_ensure.sh +++ b/services/keycloak/scripts/vault_oidc_secret_ensure.sh @@ -1,8 +1,6 @@ #!/usr/bin/env sh set -euo pipefail -apk add --no-cache curl jq kubectl >/dev/null - . /vault/secrets/keycloak-admin-env.sh KC_URL="http://keycloak.sso.svc.cluster.local" diff --git a/services/keycloak/synapse-oidc-secret-ensure-job.yaml b/services/keycloak/synapse-oidc-secret-ensure-job.yaml index 2368404..07d1378 100644 --- a/services/keycloak/synapse-oidc-secret-ensure-job.yaml +++ b/services/keycloak/synapse-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-oidc-secret-ensure-8 + name: synapse-oidc-secret-ensure-9 namespace: sso spec: backoffLimit: 0 @@ -25,14 +25,12 @@ spec: restartPolicy: Never containers: - name: apply - image: alpine:3.20 + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 command: ["/bin/sh", "-c"] args: - | set -euo pipefail . /vault/secrets/keycloak-admin-env.sh - apk add --no-cache curl jq >/dev/null - KC_URL="http://keycloak.sso.svc.cluster.local" ACCESS_TOKEN="" for attempt in 1 2 3 4 5; do @@ -82,4 +80,4 @@ spec: curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ -d "${payload}" "${vault_addr}/v1/kv/data/atlas/comms/synapse-oidc" >/dev/null volumeMounts: - volumes: \ No newline at end of file + volumes: diff --git a/services/keycloak/vault-oidc-secret-ensure-job.yaml b/services/keycloak/vault-oidc-secret-ensure-job.yaml index 13c2571..e7e3b54 100644 --- a/services/keycloak/vault-oidc-secret-ensure-job.yaml +++ b/services/keycloak/vault-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: vault-oidc-secret-ensure-5 + name: vault-oidc-secret-ensure-6 namespace: sso spec: backoffLimit: 0 @@ -40,9 +40,9 @@ spec: operator: Exists containers: - name: apply - image: alpine:3.20 + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 command: ["/scripts/vault_oidc_secret_ensure.sh"] volumeMounts: - name: vault-oidc-secret-ensure-script mountPath: /scripts - readOnly: true \ No newline at end of file + readOnly: true diff --git a/services/maintenance/image-sweeper-cronjob.yaml b/services/maintenance/image-sweeper-cronjob.yaml index 08127bc..c94fcca 100644 --- a/services/maintenance/image-sweeper-cronjob.yaml +++ b/services/maintenance/image-sweeper-cronjob.yaml @@ -17,6 +17,8 @@ spec: restartPolicy: OnFailure nodeSelector: kubernetes.io/os: linux + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" tolerations: - key: node-role.kubernetes.io/control-plane operator: Exists diff --git a/services/maintenance/pod-cleaner-cronjob.yaml b/services/maintenance/pod-cleaner-cronjob.yaml index ffca7dd..e083c85 100644 --- a/services/maintenance/pod-cleaner-cronjob.yaml +++ b/services/maintenance/pod-cleaner-cronjob.yaml @@ -16,6 +16,9 @@ spec: spec: serviceAccountName: pod-cleaner restartPolicy: Never + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" containers: - name: cleaner image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 From 8f990031f12b7f931224774fba20a242c55fd90b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 01:07:46 -0300 Subject: [PATCH 170/270] finance: fix vault seed job --- services/finance/finance-secrets-ensure-job.yaml | 9 +++++++-- services/finance/scripts/finance_secrets_ensure.sh | 2 -- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/services/finance/finance-secrets-ensure-job.yaml b/services/finance/finance-secrets-ensure-job.yaml index 1402d14..396e16d 100644 --- a/services/finance/finance-secrets-ensure-job.yaml +++ b/services/finance/finance-secrets-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: finance-secrets-ensure-1 + name: finance-secrets-ensure-2 namespace: finance spec: backoffLimit: 1 @@ -32,7 +32,12 @@ spec: containers: - name: ensure image: alpine:3.20 - command: ["/scripts/finance_secrets_ensure.sh"] + command: ["/bin/sh", "-c"] + args: + - | + set -e + apk add --no-cache bash curl jq >/dev/null + exec bash /scripts/finance_secrets_ensure.sh env: - name: VAULT_ROLE value: finance-secrets diff --git a/services/finance/scripts/finance_secrets_ensure.sh b/services/finance/scripts/finance_secrets_ensure.sh index a0dca4a..33a2d73 100755 --- a/services/finance/scripts/finance_secrets_ensure.sh +++ b/services/finance/scripts/finance_secrets_ensure.sh @@ -1,8 +1,6 @@ #!/usr/bin/env bash set -euo pipefail -apk add --no-cache curl jq >/dev/null - vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" vault_role="${VAULT_ROLE:-finance-secrets}" jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" From 622c7acaa4169086289155b5fb5363922fec029a Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 01:11:45 -0300 Subject: [PATCH 171/270] jobs: rerun keycloak realm + mas db ensure --- services/comms/mas-db-ensure-job.yaml | 2 +- services/keycloak/realm-settings-job.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/comms/mas-db-ensure-job.yaml b/services/comms/mas-db-ensure-job.yaml index 5894c0e..8aab110 100644 --- a/services/comms/mas-db-ensure-job.yaml +++ b/services/comms/mas-db-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-db-ensure-19 + name: mas-db-ensure-20 namespace: comms spec: backoffLimit: 1 diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 0fb1882..2e4ef17 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-27 + name: keycloak-realm-settings-28 namespace: sso spec: backoffLimit: 0 From 5eff31595e018f0895a921400d425b8e21f9b392 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 01:28:13 -0300 Subject: [PATCH 172/270] maintenance: add k3s agent restart daemonset --- .../k3s-agent-restart-daemonset.yaml | 49 +++++++++++++++++++ services/maintenance/kustomization.yaml | 1 + 2 files changed, 50 insertions(+) create mode 100644 services/maintenance/k3s-agent-restart-daemonset.yaml diff --git a/services/maintenance/k3s-agent-restart-daemonset.yaml b/services/maintenance/k3s-agent-restart-daemonset.yaml new file mode 100644 index 0000000..e91e348 --- /dev/null +++ b/services/maintenance/k3s-agent-restart-daemonset.yaml @@ -0,0 +1,49 @@ +# services/maintenance/k3s-agent-restart-daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: k3s-agent-restart + namespace: maintenance +spec: + selector: + matchLabels: + app: k3s-agent-restart + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: k3s-agent-restart + spec: + serviceAccountName: node-nofile + hostPID: true + hostNetwork: true + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + containers: + - name: restart + image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 + command: + - /bin/sh + - -c + args: + - | + set -euo pipefail + if nsenter -t 1 -m -u -i -n -p -- /usr/bin/systemctl restart k3s-agent; then + echo "k3s-agent restarted" + else + nsenter -t 1 -m -u -i -n -p -- /bin/systemctl restart k3s-agent + echo "k3s-agent restarted via /bin/systemctl" + fi + sleep infinity + securityContext: + privileged: true + runAsUser: 0 diff --git a/services/maintenance/kustomization.yaml b/services/maintenance/kustomization.yaml index 8251b27..e53ed3c 100644 --- a/services/maintenance/kustomization.yaml +++ b/services/maintenance/kustomization.yaml @@ -10,6 +10,7 @@ resources: - disable-k3s-traefik-daemonset.yaml - k3s-traefik-cleanup-job.yaml - node-nofile-daemonset.yaml + - k3s-agent-restart-daemonset.yaml - pod-cleaner-cronjob.yaml - node-image-sweeper-serviceaccount.yaml - node-image-sweeper-daemonset.yaml From 1b4f46bb41d53eac4d787a32b00bde292c4f1854 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 01:36:48 -0300 Subject: [PATCH 173/270] sso: rerun realm settings and vault oidc job --- services/keycloak/realm-settings-job.yaml | 2 +- services/keycloak/vault-oidc-secret-ensure-job.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 2e4ef17..2eb6771 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-28 + name: keycloak-realm-settings-29 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/vault-oidc-secret-ensure-job.yaml b/services/keycloak/vault-oidc-secret-ensure-job.yaml index e7e3b54..3bd29c2 100644 --- a/services/keycloak/vault-oidc-secret-ensure-job.yaml +++ b/services/keycloak/vault-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: vault-oidc-secret-ensure-6 + name: vault-oidc-secret-ensure-7 namespace: sso spec: backoffLimit: 0 From 33e35193fbd58e32e77fd85ec961ea28e7839757 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 01:41:39 -0300 Subject: [PATCH 174/270] sso: harden keycloak jobs and rerun --- services/keycloak/realm-settings-job.yaml | 36 ++++++++++++------- .../scripts/vault_oidc_secret_ensure.sh | 9 ++++- .../vault-oidc-secret-ensure-job.yaml | 2 +- 3 files changed, 32 insertions(+), 15 deletions(-) diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 2eb6771..483bc0c 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-29 + name: keycloak-realm-settings-30 namespace: sso spec: backoffLimit: 0 @@ -78,6 +78,7 @@ spec: python - <<'PY' import json import os + import time import urllib.parse import urllib.error import urllib.request @@ -117,18 +118,27 @@ spec: "password": admin_password, } ).encode() - token_req = urllib.request.Request( - f"{base_url}/realms/master/protocol/openid-connect/token", - data=token_data, - headers={"Content-Type": "application/x-www-form-urlencoded"}, - method="POST", - ) - try: - with urllib.request.urlopen(token_req, timeout=10) as resp: - token_body = json.loads(resp.read().decode()) - except urllib.error.HTTPError as exc: - body = exc.read().decode(errors="replace") - raise SystemExit(f"Token request failed: status={exc.code} body={body}") + token_body = None + for attempt in range(1, 11): + token_req = urllib.request.Request( + f"{base_url}/realms/master/protocol/openid-connect/token", + data=token_data, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + method="POST", + ) + try: + with urllib.request.urlopen(token_req, timeout=10) as resp: + token_body = json.loads(resp.read().decode()) + break + except urllib.error.HTTPError as exc: + body = exc.read().decode(errors="replace") + raise SystemExit(f"Token request failed: status={exc.code} body={body}") + except urllib.error.URLError as exc: + if attempt == 10: + raise SystemExit(f"Token request failed after retries: {exc}") + time.sleep(attempt * 2) + if not token_body: + raise SystemExit("Token request failed without response") access_token = token_body["access_token"] # Update realm settings safely by fetching the full realm representation first. diff --git a/services/keycloak/scripts/vault_oidc_secret_ensure.sh b/services/keycloak/scripts/vault_oidc_secret_ensure.sh index e8cfe5b..a951cfa 100755 --- a/services/keycloak/scripts/vault_oidc_secret_ensure.sh +++ b/services/keycloak/scripts/vault_oidc_secret_ensure.sh @@ -5,6 +5,13 @@ set -euo pipefail KC_URL="http://keycloak.sso.svc.cluster.local" ACCESS_TOKEN="" +for attempt in 1 2 3 4 5 6 7 8 9 10; do + if curl -fsS "${KC_URL}/realms/master" >/dev/null 2>&1; then + break + fi + echo "Waiting for Keycloak to be reachable (attempt ${attempt})" >&2 + sleep $((attempt * 2)) +done for attempt in 1 2 3 4 5; do TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \ -H 'Content-Type: application/x-www-form-urlencoded' \ @@ -35,7 +42,7 @@ if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then -H 'Content-Type: application/json' \ -d "${create_payload}" \ "$KC_URL/admin/realms/atlas/clients")" - if [ "$status" != "201" ] && [ "$status" != "204" ]; then + if [ "$status" != "201" ] && [ "$status" != "204" ] && [ "$status" != "409" ]; then echo "Keycloak client create failed (status ${status})" >&2 exit 1 fi diff --git a/services/keycloak/vault-oidc-secret-ensure-job.yaml b/services/keycloak/vault-oidc-secret-ensure-job.yaml index 3bd29c2..3aa3ca5 100644 --- a/services/keycloak/vault-oidc-secret-ensure-job.yaml +++ b/services/keycloak/vault-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: vault-oidc-secret-ensure-7 + name: vault-oidc-secret-ensure-8 namespace: sso spec: backoffLimit: 0 From 6ec0414fcd741fdfe063eb256283fff403caee32 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 01:47:53 -0300 Subject: [PATCH 175/270] jobs: prefer arm64 workers --- .../cleanup/cert-manager-cleanup-job.yaml | 14 ++++++++++++++ .../longhorn/adopt/longhorn-helm-adopt-job.yaml | 14 ++++++++++++++ .../portal-onboarding-e2e-test-job.yaml | 14 ++++++++++++++ services/comms/bstein-force-leave-job.yaml | 16 +++++++++++++++- services/comms/comms-secrets-ensure-job.yaml | 14 ++++++++++++++ .../mas-admin-client-secret-ensure-job.yaml | 14 ++++++++++++++ services/comms/mas-db-ensure-job.yaml | 14 ++++++++++++++ services/comms/mas-local-users-ensure-job.yaml | 16 +++++++++++++++- services/comms/othrys-kick-numeric-job.yaml | 16 +++++++++++++++- .../comms/synapse-seeder-admin-ensure-job.yaml | 16 +++++++++++++++- .../comms/synapse-signingkey-ensure-job.yaml | 14 ++++++++++++++ services/comms/synapse-user-seed-job.yaml | 16 +++++++++++++++- .../keycloak/logs-oidc-secret-ensure-job.yaml | 14 ++++++++++++++ .../portal-admin-client-secret-ensure-job.yaml | 14 ++++++++++++++ services/keycloak/portal-e2e-client-job.yaml | 16 +++++++++++++++- ...ortal-e2e-execute-actions-email-test-job.yaml | 14 ++++++++++++++ .../keycloak/portal-e2e-target-client-job.yaml | 16 +++++++++++++++- ...ortal-e2e-token-exchange-permissions-job.yaml | 16 +++++++++++++++- .../portal-e2e-token-exchange-test-job.yaml | 16 +++++++++++++++- .../keycloak/synapse-oidc-secret-ensure-job.yaml | 14 ++++++++++++++ services/mailu/mailu-sync-job.yaml | 16 +++++++++++++++- .../maintenance/k3s-traefik-cleanup-job.yaml | 14 ++++++++++++++ services/monitoring/grafana-org-bootstrap.yaml | 14 ++++++++++++++ 23 files changed, 332 insertions(+), 10 deletions(-) diff --git a/infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml b/infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml index 93cf53a..5c6a07e 100644 --- a/infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml +++ b/infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml @@ -10,6 +10,20 @@ spec: spec: serviceAccountName: cert-manager-cleanup restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] containers: - name: cleanup image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 diff --git a/infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml b/infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml index 7484e47..580f5f6 100644 --- a/infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml +++ b/infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml @@ -10,6 +10,20 @@ spec: spec: serviceAccountName: longhorn-helm-adopt restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] containers: - name: adopt image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index e6e0baa..7661a31 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -30,6 +30,20 @@ spec: {{ end }} spec: restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: bstein-dev-home containers: - name: test diff --git a/services/comms/bstein-force-leave-job.yaml b/services/comms/bstein-force-leave-job.yaml index 172ffb4..07e7471 100644 --- a/services/comms/bstein-force-leave-job.yaml +++ b/services/comms/bstein-force-leave-job.yaml @@ -17,6 +17,20 @@ spec: {{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}} spec: restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: comms-vault volumes: containers: @@ -186,4 +200,4 @@ spec: print(json.dumps(results, indent=2, sort_keys=True)) if failures: raise SystemExit(f"failed to leave/forget rooms: {', '.join(failures)}") - PY \ No newline at end of file + PY diff --git a/services/comms/comms-secrets-ensure-job.yaml b/services/comms/comms-secrets-ensure-job.yaml index 2dfcdf0..f95baa1 100644 --- a/services/comms/comms-secrets-ensure-job.yaml +++ b/services/comms/comms-secrets-ensure-job.yaml @@ -11,6 +11,20 @@ spec: spec: serviceAccountName: comms-secrets-ensure restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] containers: - name: ensure image: registry.bstein.dev/bstein/kubectl:1.35.0 diff --git a/services/comms/mas-admin-client-secret-ensure-job.yaml b/services/comms/mas-admin-client-secret-ensure-job.yaml index 07f59a6..19f2fdf 100644 --- a/services/comms/mas-admin-client-secret-ensure-job.yaml +++ b/services/comms/mas-admin-client-secret-ensure-job.yaml @@ -46,6 +46,20 @@ spec: spec: serviceAccountName: mas-admin-client-secret-writer restartPolicy: OnFailure + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] volumes: - name: work emptyDir: {} diff --git a/services/comms/mas-db-ensure-job.yaml b/services/comms/mas-db-ensure-job.yaml index 8aab110..b309fb3 100644 --- a/services/comms/mas-db-ensure-job.yaml +++ b/services/comms/mas-db-ensure-job.yaml @@ -11,6 +11,20 @@ spec: spec: serviceAccountName: mas-db-ensure restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] containers: - name: ensure image: registry.bstein.dev/bstein/kubectl:1.35.0 diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index ac3428c..db19be2 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -48,6 +48,20 @@ spec: {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: comms-vault volumes: - name: vault-scripts @@ -187,4 +201,4 @@ spec: token = admin_token() ensure_user(token, os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"]) ensure_user(token, os.environ["BOT_USER"], os.environ["BOT_PASS"]) - PY \ No newline at end of file + PY diff --git a/services/comms/othrys-kick-numeric-job.yaml b/services/comms/othrys-kick-numeric-job.yaml index 637ad58..213cc3a 100644 --- a/services/comms/othrys-kick-numeric-job.yaml +++ b/services/comms/othrys-kick-numeric-job.yaml @@ -47,6 +47,20 @@ spec: {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: comms-vault containers: - name: kick @@ -156,4 +170,4 @@ spec: - name: vault-scripts configMap: name: comms-vault-env - defaultMode: 0555 \ No newline at end of file + defaultMode: 0555 diff --git a/services/comms/synapse-seeder-admin-ensure-job.yaml b/services/comms/synapse-seeder-admin-ensure-job.yaml index ad22634..6fe7d97 100644 --- a/services/comms/synapse-seeder-admin-ensure-job.yaml +++ b/services/comms/synapse-seeder-admin-ensure-job.yaml @@ -47,6 +47,20 @@ spec: {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: OnFailure + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: comms-vault containers: - name: psql @@ -77,4 +91,4 @@ spec: - name: vault-scripts configMap: name: comms-vault-env - defaultMode: 0555 \ No newline at end of file + defaultMode: 0555 diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml index 3b87eb3..ee165f0 100644 --- a/services/comms/synapse-signingkey-ensure-job.yaml +++ b/services/comms/synapse-signingkey-ensure-job.yaml @@ -10,6 +10,20 @@ spec: spec: serviceAccountName: othrys-synapse-signingkey-job restartPolicy: OnFailure + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] volumes: - name: work emptyDir: {} diff --git a/services/comms/synapse-user-seed-job.yaml b/services/comms/synapse-user-seed-job.yaml index 9afe882..7099e9c 100644 --- a/services/comms/synapse-user-seed-job.yaml +++ b/services/comms/synapse-user-seed-job.yaml @@ -48,6 +48,20 @@ spec: {{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}} spec: restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: comms-vault containers: - name: seed @@ -151,4 +165,4 @@ spec: - name: vault-scripts configMap: name: comms-vault-env - defaultMode: 0555 \ No newline at end of file + defaultMode: 0555 diff --git a/services/keycloak/logs-oidc-secret-ensure-job.yaml b/services/keycloak/logs-oidc-secret-ensure-job.yaml index df89fa0..94191e8 100644 --- a/services/keycloak/logs-oidc-secret-ensure-job.yaml +++ b/services/keycloak/logs-oidc-secret-ensure-job.yaml @@ -23,6 +23,20 @@ spec: spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] containers: - name: apply image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 diff --git a/services/keycloak/portal-admin-client-secret-ensure-job.yaml b/services/keycloak/portal-admin-client-secret-ensure-job.yaml index af053a9..2eedb61 100644 --- a/services/keycloak/portal-admin-client-secret-ensure-job.yaml +++ b/services/keycloak/portal-admin-client-secret-ensure-job.yaml @@ -23,6 +23,20 @@ spec: {{ end }} spec: restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: sso-vault containers: - name: configure diff --git a/services/keycloak/portal-e2e-client-job.yaml b/services/keycloak/portal-e2e-client-job.yaml index 9c5229f..eb20440 100644 --- a/services/keycloak/portal-e2e-client-job.yaml +++ b/services/keycloak/portal-e2e-client-job.yaml @@ -39,6 +39,20 @@ spec: {{ end }} spec: restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: sso-vault containers: - name: configure @@ -258,4 +272,4 @@ spec: raise SystemExit(f"Role mapping update failed (status={status}) resp={resp}") PY volumeMounts: - volumes: \ No newline at end of file + volumes: diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index 892d5aa..211bd3e 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -39,6 +39,20 @@ spec: {{ end }} spec: restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: sso-vault containers: - name: test diff --git a/services/keycloak/portal-e2e-target-client-job.yaml b/services/keycloak/portal-e2e-target-client-job.yaml index 6c1086f..5fc9b7f 100644 --- a/services/keycloak/portal-e2e-target-client-job.yaml +++ b/services/keycloak/portal-e2e-target-client-job.yaml @@ -39,6 +39,20 @@ spec: {{ end }} spec: restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: sso-vault containers: - name: configure @@ -159,4 +173,4 @@ spec: print(f"OK: ensured token exchange enabled on client {target_client_id}") PY volumeMounts: - volumes: \ No newline at end of file + volumes: diff --git a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml index 9e3f11c..77828ab 100644 --- a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml @@ -39,6 +39,20 @@ spec: {{ end }} spec: restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: sso-vault containers: - name: configure @@ -291,4 +305,4 @@ spec: print("OK: configured token exchange permissions for portal E2E client") PY - volumeMounts: \ No newline at end of file + volumeMounts: diff --git a/services/keycloak/portal-e2e-token-exchange-test-job.yaml b/services/keycloak/portal-e2e-token-exchange-test-job.yaml index 4e6960d..21551e0 100644 --- a/services/keycloak/portal-e2e-token-exchange-test-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-test-job.yaml @@ -40,6 +40,20 @@ spec: {{ end }} spec: restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: sso-vault containers: - name: test @@ -71,4 +85,4 @@ spec: - name: tests configMap: name: portal-e2e-tests - defaultMode: 0555 \ No newline at end of file + defaultMode: 0555 diff --git a/services/keycloak/synapse-oidc-secret-ensure-job.yaml b/services/keycloak/synapse-oidc-secret-ensure-job.yaml index 07d1378..1780d2e 100644 --- a/services/keycloak/synapse-oidc-secret-ensure-job.yaml +++ b/services/keycloak/synapse-oidc-secret-ensure-job.yaml @@ -23,6 +23,20 @@ spec: spec: serviceAccountName: mas-secrets-ensure restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] containers: - name: apply image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 diff --git a/services/mailu/mailu-sync-job.yaml b/services/mailu/mailu-sync-job.yaml index 38cea89..421dceb 100644 --- a/services/mailu/mailu-sync-job.yaml +++ b/services/mailu/mailu-sync-job.yaml @@ -28,6 +28,20 @@ spec: {{- with secret "kv/data/atlas/mailu/mailu-sync-credentials" -}}{{ index .Data.data "client-secret" }}{{- end -}} spec: restartPolicy: OnFailure + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: mailu-vault-sync containers: - name: mailu-sync @@ -75,4 +89,4 @@ spec: - name: vault-scripts configMap: name: mailu-vault-env - defaultMode: 0555 \ No newline at end of file + defaultMode: 0555 diff --git a/services/maintenance/k3s-traefik-cleanup-job.yaml b/services/maintenance/k3s-traefik-cleanup-job.yaml index 33fa7be..5638e83 100644 --- a/services/maintenance/k3s-traefik-cleanup-job.yaml +++ b/services/maintenance/k3s-traefik-cleanup-job.yaml @@ -10,6 +10,20 @@ spec: spec: serviceAccountName: k3s-traefik-cleanup restartPolicy: Never + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] containers: - name: cleanup image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131 diff --git a/services/monitoring/grafana-org-bootstrap.yaml b/services/monitoring/grafana-org-bootstrap.yaml index a39d938..d0791f5 100644 --- a/services/monitoring/grafana-org-bootstrap.yaml +++ b/services/monitoring/grafana-org-bootstrap.yaml @@ -20,6 +20,20 @@ spec: {{- end -}} spec: restartPolicy: OnFailure + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: kubernetes.io/arch + operator: In + values: ["arm64"] serviceAccountName: monitoring-vault-sync containers: - name: bootstrap From 86ea701ff022ff6c4c37010a10e040f3783ee9f8 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 01:52:16 -0300 Subject: [PATCH 176/270] jobs: bump names after affinity update --- .../cert-manager/cleanup/cert-manager-cleanup-job.yaml | 2 +- infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml | 2 +- services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml | 2 +- services/comms/bstein-force-leave-job.yaml | 2 +- services/comms/comms-secrets-ensure-job.yaml | 2 +- services/comms/mas-admin-client-secret-ensure-job.yaml | 2 +- services/comms/mas-db-ensure-job.yaml | 2 +- services/comms/mas-local-users-ensure-job.yaml | 2 +- services/comms/othrys-kick-numeric-job.yaml | 2 +- services/comms/synapse-seeder-admin-ensure-job.yaml | 2 +- services/comms/synapse-signingkey-ensure-job.yaml | 2 +- services/comms/synapse-user-seed-job.yaml | 2 +- services/keycloak/logs-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/portal-admin-client-secret-ensure-job.yaml | 2 +- services/keycloak/portal-e2e-client-job.yaml | 2 +- .../keycloak/portal-e2e-execute-actions-email-test-job.yaml | 2 +- services/keycloak/portal-e2e-target-client-job.yaml | 2 +- .../keycloak/portal-e2e-token-exchange-permissions-job.yaml | 2 +- services/keycloak/portal-e2e-token-exchange-test-job.yaml | 2 +- services/keycloak/synapse-oidc-secret-ensure-job.yaml | 2 +- services/mailu/mailu-sync-job.yaml | 2 +- services/maintenance/k3s-traefik-cleanup-job.yaml | 2 +- services/monitoring/grafana-org-bootstrap.yaml | 2 +- 23 files changed, 23 insertions(+), 23 deletions(-) diff --git a/infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml b/infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml index 5c6a07e..c1de1fc 100644 --- a/infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml +++ b/infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: cert-manager-cleanup + name: cert-manager-cleanup-2 namespace: cert-manager spec: backoffLimit: 1 diff --git a/infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml b/infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml index 580f5f6..e1a520a 100644 --- a/infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml +++ b/infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: longhorn-helm-adopt + name: longhorn-helm-adopt-2 namespace: longhorn-system spec: backoffLimit: 1 diff --git a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml index 7661a31..f22272e 100644 --- a/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml +++ b/services/bstein-dev-home/portal-onboarding-e2e-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: portal-onboarding-e2e-test-18 + name: portal-onboarding-e2e-test-19 namespace: bstein-dev-home spec: backoffLimit: 0 diff --git a/services/comms/bstein-force-leave-job.yaml b/services/comms/bstein-force-leave-job.yaml index 07e7471..a6105cc 100644 --- a/services/comms/bstein-force-leave-job.yaml +++ b/services/comms/bstein-force-leave-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: bstein-leave-rooms-10 + name: bstein-leave-rooms-11 namespace: comms spec: backoffLimit: 0 diff --git a/services/comms/comms-secrets-ensure-job.yaml b/services/comms/comms-secrets-ensure-job.yaml index f95baa1..ab13671 100644 --- a/services/comms/comms-secrets-ensure-job.yaml +++ b/services/comms/comms-secrets-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: comms-secrets-ensure-4 + name: comms-secrets-ensure-5 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/mas-admin-client-secret-ensure-job.yaml b/services/comms/mas-admin-client-secret-ensure-job.yaml index 19f2fdf..81f35d1 100644 --- a/services/comms/mas-admin-client-secret-ensure-job.yaml +++ b/services/comms/mas-admin-client-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: mas-admin-client-secret-writer + name: mas-admin-client-secret-writer-2 namespace: comms imagePullSecrets: - name: harbor-regcred diff --git a/services/comms/mas-db-ensure-job.yaml b/services/comms/mas-db-ensure-job.yaml index b309fb3..3ddb7e9 100644 --- a/services/comms/mas-db-ensure-job.yaml +++ b/services/comms/mas-db-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-db-ensure-20 + name: mas-db-ensure-21 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index db19be2..a998445 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-local-users-ensure-10 + name: mas-local-users-ensure-11 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/othrys-kick-numeric-job.yaml b/services/comms/othrys-kick-numeric-job.yaml index 213cc3a..df22ae1 100644 --- a/services/comms/othrys-kick-numeric-job.yaml +++ b/services/comms/othrys-kick-numeric-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-kick-numeric-5 + name: othrys-kick-numeric-6 namespace: comms spec: backoffLimit: 0 diff --git a/services/comms/synapse-seeder-admin-ensure-job.yaml b/services/comms/synapse-seeder-admin-ensure-job.yaml index 6fe7d97..9905658 100644 --- a/services/comms/synapse-seeder-admin-ensure-job.yaml +++ b/services/comms/synapse-seeder-admin-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-seeder-admin-ensure-6 + name: synapse-seeder-admin-ensure-7 namespace: comms spec: backoffLimit: 2 diff --git a/services/comms/synapse-signingkey-ensure-job.yaml b/services/comms/synapse-signingkey-ensure-job.yaml index ee165f0..402a820 100644 --- a/services/comms/synapse-signingkey-ensure-job.yaml +++ b/services/comms/synapse-signingkey-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-synapse-signingkey-ensure-6 + name: othrys-synapse-signingkey-ensure-7 namespace: comms spec: backoffLimit: 2 diff --git a/services/comms/synapse-user-seed-job.yaml b/services/comms/synapse-user-seed-job.yaml index 7099e9c..7fef796 100644 --- a/services/comms/synapse-user-seed-job.yaml +++ b/services/comms/synapse-user-seed-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-user-seed-6 + name: synapse-user-seed-7 namespace: comms spec: backoffLimit: 1 diff --git a/services/keycloak/logs-oidc-secret-ensure-job.yaml b/services/keycloak/logs-oidc-secret-ensure-job.yaml index 94191e8..14e80df 100644 --- a/services/keycloak/logs-oidc-secret-ensure-job.yaml +++ b/services/keycloak/logs-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: logs-oidc-secret-ensure-9 + name: logs-oidc-secret-ensure-10 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-admin-client-secret-ensure-job.yaml b/services/keycloak/portal-admin-client-secret-ensure-job.yaml index 2eedb61..561ae00 100644 --- a/services/keycloak/portal-admin-client-secret-ensure-job.yaml +++ b/services/keycloak/portal-admin-client-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-admin-secret-ensure-2 + name: keycloak-portal-admin-secret-ensure-3 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-client-job.yaml b/services/keycloak/portal-e2e-client-job.yaml index eb20440..399a32b 100644 --- a/services/keycloak/portal-e2e-client-job.yaml +++ b/services/keycloak/portal-e2e-client-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-client-6 + name: keycloak-portal-e2e-client-7 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index 211bd3e..5653148 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-execute-actions-email-12 + name: keycloak-portal-e2e-execute-actions-email-13 namespace: sso spec: backoffLimit: 3 diff --git a/services/keycloak/portal-e2e-target-client-job.yaml b/services/keycloak/portal-e2e-target-client-job.yaml index 5fc9b7f..66426e0 100644 --- a/services/keycloak/portal-e2e-target-client-job.yaml +++ b/services/keycloak/portal-e2e-target-client-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-target-5 + name: keycloak-portal-e2e-target-6 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml index 77828ab..a6dd621 100644 --- a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-token-exchange-permissions-9 + name: keycloak-portal-e2e-token-exchange-permissions-10 namespace: sso spec: backoffLimit: 6 diff --git a/services/keycloak/portal-e2e-token-exchange-test-job.yaml b/services/keycloak/portal-e2e-token-exchange-test-job.yaml index 21551e0..8b7beed 100644 --- a/services/keycloak/portal-e2e-token-exchange-test-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-token-exchange-test-5 + name: keycloak-portal-e2e-token-exchange-test-6 namespace: sso spec: backoffLimit: 6 diff --git a/services/keycloak/synapse-oidc-secret-ensure-job.yaml b/services/keycloak/synapse-oidc-secret-ensure-job.yaml index 1780d2e..e808e7e 100644 --- a/services/keycloak/synapse-oidc-secret-ensure-job.yaml +++ b/services/keycloak/synapse-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: synapse-oidc-secret-ensure-9 + name: synapse-oidc-secret-ensure-10 namespace: sso spec: backoffLimit: 0 diff --git a/services/mailu/mailu-sync-job.yaml b/services/mailu/mailu-sync-job.yaml index 421dceb..5f0f777 100644 --- a/services/mailu/mailu-sync-job.yaml +++ b/services/mailu/mailu-sync-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mailu-sync-4 + name: mailu-sync-5 namespace: mailu-mailserver spec: template: diff --git a/services/maintenance/k3s-traefik-cleanup-job.yaml b/services/maintenance/k3s-traefik-cleanup-job.yaml index 5638e83..d5d12a6 100644 --- a/services/maintenance/k3s-traefik-cleanup-job.yaml +++ b/services/maintenance/k3s-traefik-cleanup-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: k3s-traefik-cleanup + name: k3s-traefik-cleanup-2 namespace: maintenance spec: backoffLimit: 1 diff --git a/services/monitoring/grafana-org-bootstrap.yaml b/services/monitoring/grafana-org-bootstrap.yaml index d0791f5..f1d4075 100644 --- a/services/monitoring/grafana-org-bootstrap.yaml +++ b/services/monitoring/grafana-org-bootstrap.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: grafana-org-bootstrap-2 + name: grafana-org-bootstrap-3 namespace: monitoring spec: backoffLimit: 2 From 7b009caf976048e3b8021db2a9b45f9ac6990e68 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 01:54:15 -0300 Subject: [PATCH 177/270] keycloak: bump portal admin secret job --- services/keycloak/portal-admin-client-secret-ensure-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/keycloak/portal-admin-client-secret-ensure-job.yaml b/services/keycloak/portal-admin-client-secret-ensure-job.yaml index 561ae00..90dd4b7 100644 --- a/services/keycloak/portal-admin-client-secret-ensure-job.yaml +++ b/services/keycloak/portal-admin-client-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-admin-secret-ensure-3 + name: keycloak-portal-admin-secret-ensure-4 namespace: sso spec: backoffLimit: 0 From 928b2a87065558d7b4e3a33cc268f573540b1691 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:00:14 -0300 Subject: [PATCH 178/270] comms: bump mas admin secret job --- services/comms/mas-admin-client-secret-ensure-job.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/services/comms/mas-admin-client-secret-ensure-job.yaml b/services/comms/mas-admin-client-secret-ensure-job.yaml index 81f35d1..7b05cca 100644 --- a/services/comms/mas-admin-client-secret-ensure-job.yaml +++ b/services/comms/mas-admin-client-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: mas-admin-client-secret-writer-2 + name: mas-admin-client-secret-writer namespace: comms imagePullSecrets: - name: harbor-regcred @@ -38,7 +38,7 @@ subjects: apiVersion: batch/v1 kind: Job metadata: - name: mas-admin-client-secret-ensure-10 + name: mas-admin-client-secret-ensure-11 namespace: comms spec: backoffLimit: 2 From cd60ebc9825f9bcefe0fc089f9a117311b9554e0 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:01:53 -0300 Subject: [PATCH 179/270] mailu: bump sync job --- services/mailu/mailu-sync-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/mailu/mailu-sync-job.yaml b/services/mailu/mailu-sync-job.yaml index 5f0f777..6da615e 100644 --- a/services/mailu/mailu-sync-job.yaml +++ b/services/mailu/mailu-sync-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mailu-sync-5 + name: mailu-sync-6 namespace: mailu-mailserver spec: template: From 64d0a701918cf454b4e067333f368b057076e1ea Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:06:55 -0300 Subject: [PATCH 180/270] finance: decouple from mailu readiness --- .../atlas/flux-system/applications/finance/kustomization.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/clusters/atlas/flux-system/applications/finance/kustomization.yaml b/clusters/atlas/flux-system/applications/finance/kustomization.yaml index a28b711..1bb87d0 100644 --- a/clusters/atlas/flux-system/applications/finance/kustomization.yaml +++ b/clusters/atlas/flux-system/applications/finance/kustomization.yaml @@ -17,7 +17,6 @@ spec: - name: postgres - name: traefik - name: vault - - name: mailu healthChecks: - apiVersion: apps/v1 kind: Deployment From 9eedcad520f898d1e27854a274158602a0a578f0 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:10:28 -0300 Subject: [PATCH 181/270] finance: ensure vault init ordering --- services/finance/actual-budget-deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/services/finance/actual-budget-deployment.yaml b/services/finance/actual-budget-deployment.yaml index 8e76d3e..67092a0 100644 --- a/services/finance/actual-budget-deployment.yaml +++ b/services/finance/actual-budget-deployment.yaml @@ -23,6 +23,7 @@ spec: annotations: vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/agent-pre-populate-only: "true" + vault.hashicorp.com/agent-init-first: "true" vault.hashicorp.com/role: "finance" vault.hashicorp.com/agent-inject-secret-actual-env.sh: "kv/data/atlas/finance/actual-oidc" vault.hashicorp.com/agent-inject-template-actual-env.sh: | From 9dd2a720636461b267b03ed4a8034b283ba8fc21 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:16:13 -0300 Subject: [PATCH 182/270] mailu: retry sync and rerun job --- services/mailu/mailu-sync-job.yaml | 2 +- services/mailu/scripts/mailu_sync.py | 28 +++++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/services/mailu/mailu-sync-job.yaml b/services/mailu/mailu-sync-job.yaml index 6da615e..18aef7c 100644 --- a/services/mailu/mailu-sync-job.yaml +++ b/services/mailu/mailu-sync-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mailu-sync-6 + name: mailu-sync-7 namespace: mailu-mailserver spec: template: diff --git a/services/mailu/scripts/mailu_sync.py b/services/mailu/scripts/mailu_sync.py index 74b170a..d1754cb 100644 --- a/services/mailu/scripts/mailu_sync.py +++ b/services/mailu/scripts/mailu_sync.py @@ -42,6 +42,28 @@ def log(msg): sys.stdout.flush() +def retry_request(label, func, attempts=10): + for attempt in range(1, attempts + 1): + try: + return func() + except requests.RequestException as exc: + if attempt == attempts: + raise + log(f"{label} failed (attempt {attempt}/{attempts}): {exc}") + time.sleep(attempt * 2) + + +def retry_db_connect(attempts=10): + for attempt in range(1, attempts + 1): + try: + return psycopg2.connect(**DB_CONFIG) + except psycopg2.Error as exc: + if attempt == attempts: + raise + log(f"Database connection failed (attempt {attempt}/{attempts}): {exc}") + time.sleep(attempt * 2) + + def get_kc_token(): resp = SESSION.post( f"{KC_BASE}/realms/{KC_REALM}/protocol/openid-connect/token", @@ -175,13 +197,13 @@ def ensure_mailu_user(cursor, email, password, display_name): def main(): - token = get_kc_token() - users = kc_get_users(token) + token = retry_request("Keycloak token", get_kc_token) + users = retry_request("Keycloak user list", lambda: kc_get_users(token)) if not users: log("No users found; exiting.") return - conn = psycopg2.connect(**DB_CONFIG) + conn = retry_db_connect() conn.autocommit = True cursor = conn.cursor(cursor_factory=RealDictCursor) From 9047dfa3b5f29cdf744d38d3b98877a79a75b7db Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:17:29 -0300 Subject: [PATCH 183/270] finance: rerun secrets seed job --- services/finance/finance-secrets-ensure-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/finance/finance-secrets-ensure-job.yaml b/services/finance/finance-secrets-ensure-job.yaml index 396e16d..103c876 100644 --- a/services/finance/finance-secrets-ensure-job.yaml +++ b/services/finance/finance-secrets-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: finance-secrets-ensure-2 + name: finance-secrets-ensure-3 namespace: finance spec: backoffLimit: 1 From 1a3d35094e042c0d8b7d2a9644ec72ea592a4c06 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:22:59 -0300 Subject: [PATCH 184/270] finance: switch vault seed to python --- .../finance/finance-secrets-ensure-job.yaml | 7 +- services/finance/kustomization.yaml | 2 +- .../finance/scripts/finance_secrets_ensure.py | 175 ++++++++++++++++++ .../finance/scripts/finance_secrets_ensure.sh | 130 ------------- 4 files changed, 179 insertions(+), 135 deletions(-) create mode 100644 services/finance/scripts/finance_secrets_ensure.py delete mode 100755 services/finance/scripts/finance_secrets_ensure.sh diff --git a/services/finance/finance-secrets-ensure-job.yaml b/services/finance/finance-secrets-ensure-job.yaml index 103c876..5de20af 100644 --- a/services/finance/finance-secrets-ensure-job.yaml +++ b/services/finance/finance-secrets-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: finance-secrets-ensure-3 + name: finance-secrets-ensure-4 namespace: finance spec: backoffLimit: 1 @@ -31,13 +31,12 @@ spec: node-role.kubernetes.io/worker: "true" containers: - name: ensure - image: alpine:3.20 + image: python:3.11-alpine command: ["/bin/sh", "-c"] args: - | set -e - apk add --no-cache bash curl jq >/dev/null - exec bash /scripts/finance_secrets_ensure.sh + exec python /scripts/finance_secrets_ensure.py env: - name: VAULT_ROLE value: finance-secrets diff --git a/services/finance/kustomization.yaml b/services/finance/kustomization.yaml index 2189834..11cb4ab 100644 --- a/services/finance/kustomization.yaml +++ b/services/finance/kustomization.yaml @@ -28,4 +28,4 @@ configMapGenerator: - firefly_user_sync.php=scripts/firefly_user_sync.php - name: finance-secrets-ensure-script files: - - finance_secrets_ensure.sh=scripts/finance_secrets_ensure.sh + - finance_secrets_ensure.py=scripts/finance_secrets_ensure.py diff --git a/services/finance/scripts/finance_secrets_ensure.py b/services/finance/scripts/finance_secrets_ensure.py new file mode 100644 index 0000000..9a04ad0 --- /dev/null +++ b/services/finance/scripts/finance_secrets_ensure.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 +import base64 +import json +import os +import secrets +import sys +import urllib.error +import urllib.request +from pathlib import Path + + +def read_file(path: Path) -> str: + if not path.exists(): + return "" + return path.read_text(encoding="utf-8").strip() + + +def require_value(label: str, value: str) -> None: + if not value: + raise RuntimeError(f"missing {label}") + + +def http_json(method: str, url: str, headers=None, payload=None): + data = None + if payload is not None: + data = json.dumps(payload).encode() + req = urllib.request.Request(url, data=data, headers=headers or {}, method=method) + with urllib.request.urlopen(req, timeout=15) as resp: + body = resp.read() + if not body: + return resp.status, None + return resp.status, json.loads(body.decode()) + + +def vault_login(vault_addr: str, role: str, jwt: str) -> str: + status, body = http_json( + "POST", + f"{vault_addr}/v1/auth/kubernetes/login", + headers={"Content-Type": "application/json"}, + payload={"jwt": jwt, "role": role}, + ) + if status != 200 or not body: + raise RuntimeError("vault login failed") + token = body.get("auth", {}).get("client_token") + if not token: + raise RuntimeError("vault login returned no token") + return token + + +def vault_read(vault_addr: str, token: str, path: str): + try: + status, body = http_json( + "GET", + f"{vault_addr}/v1/kv/data/atlas/{path}", + headers={"X-Vault-Token": token}, + ) + except urllib.error.HTTPError as exc: + if exc.code == 404: + return {} + raise + if status != 200 or not body: + return {} + return body.get("data", {}).get("data", {}) or {} + + +def vault_write(vault_addr: str, token: str, path: str, data: dict): + payload = {"data": data} + status, _ = http_json( + "POST", + f"{vault_addr}/v1/kv/data/atlas/{path}", + headers={"X-Vault-Token": token, "Content-Type": "application/json"}, + payload=payload, + ) + if status not in (200, 204): + raise RuntimeError(f"vault write failed for {path} (status {status})") + + +def ensure_firefly_db(vault_addr: str, token: str): + base = Path("/secrets/firefly-db") + host = read_file(base / "DB_HOST") or read_file(base / "DB_HOSTNAME") + port = read_file(base / "DB_PORT") + db_name = read_file(base / "DB_DATABASE") or read_file(base / "DB_NAME") + user = read_file(base / "DB_USERNAME") or read_file(base / "DB_USER") + password = read_file(base / "DB_PASSWORD") or read_file(base / "DB_PASS") + + require_value("firefly-db/DB_HOST", host) + require_value("firefly-db/DB_PORT", port) + require_value("firefly-db/DB_DATABASE", db_name) + require_value("firefly-db/DB_USERNAME", user) + require_value("firefly-db/DB_PASSWORD", password) + + vault_write( + vault_addr, + token, + "finance/firefly-db", + { + "DB_HOST": host, + "DB_PORT": port, + "DB_DATABASE": db_name, + "DB_USERNAME": user, + "DB_PASSWORD": password, + }, + ) + + +def ensure_firefly_secrets(vault_addr: str, token: str): + current = vault_read(vault_addr, token, "finance/firefly-secrets") + app_key = current.get("APP_KEY") + if not app_key: + app_key = "base64:" + base64.b64encode(secrets.token_bytes(32)).decode() + cron_token = current.get("STATIC_CRON_TOKEN") + if not cron_token: + cron_token = secrets.token_urlsafe(32) + vault_write( + vault_addr, + token, + "finance/firefly-secrets", + {"APP_KEY": app_key, "STATIC_CRON_TOKEN": cron_token}, + ) + + +def ensure_actual_db(vault_addr: str, token: str): + base = Path("/secrets/actualbudget-db") + if not base.exists(): + return + host = read_file(base / "DB_HOST") or read_file(base / "DB_HOSTNAME") + port = read_file(base / "DB_PORT") + db_name = read_file(base / "DB_DATABASE") or read_file(base / "DB_NAME") + user = read_file(base / "DB_USERNAME") or read_file(base / "DB_USER") + password = read_file(base / "DB_PASSWORD") or read_file(base / "DB_PASS") + + if not any([host, port, db_name, user, password]): + return + + require_value("actualbudget-db/DB_HOST", host) + require_value("actualbudget-db/DB_PORT", port) + require_value("actualbudget-db/DB_DATABASE", db_name) + require_value("actualbudget-db/DB_USERNAME", user) + require_value("actualbudget-db/DB_PASSWORD", password) + + vault_write( + vault_addr, + token, + "finance/actual-db", + { + "DB_HOST": host, + "DB_PORT": port, + "DB_DATABASE": db_name, + "DB_USERNAME": user, + "DB_PASSWORD": password, + }, + ) + + +def main() -> int: + vault_addr = os.environ.get("VAULT_ADDR", "http://vault.vault.svc.cluster.local:8200") + vault_role = os.environ.get("VAULT_ROLE", "finance-secrets") + jwt = read_file(Path("/var/run/secrets/kubernetes.io/serviceaccount/token")) + if not jwt: + raise RuntimeError("missing service account token") + + token = vault_login(vault_addr, vault_role, jwt) + ensure_firefly_db(vault_addr, token) + ensure_firefly_secrets(vault_addr, token) + ensure_actual_db(vault_addr, token) + print("finance secrets ensured") + return 0 + + +if __name__ == "__main__": + try: + sys.exit(main()) + except Exception as exc: + print(f"finance secrets ensure failed: {exc}", file=sys.stderr) + sys.exit(1) diff --git a/services/finance/scripts/finance_secrets_ensure.sh b/services/finance/scripts/finance_secrets_ensure.sh deleted file mode 100755 index 33a2d73..0000000 --- a/services/finance/scripts/finance_secrets_ensure.sh +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}" -vault_role="${VAULT_ROLE:-finance-secrets}" -jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" -login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')" -vault_token="$(curl -sS --request POST --data "${login_payload}" \ - "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')" -if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then - echo "vault login failed" >&2 - exit 1 -fi - -read_secret() { - path="$1" - if [ -f "${path}" ]; then - cat "${path}" - fi -} - -require_value() { - label="$1" - value="$2" - if [ -z "${value}" ]; then - echo "missing ${label}" >&2 - exit 1 - fi -} - -vault_read() { - path="$1" - key="$2" - curl -sS -H "X-Vault-Token: ${vault_token}" \ - "${vault_addr}/v1/kv/data/atlas/${path}" 2>/dev/null | \ - jq -r --arg key "${key}" '.data.data[$key] // empty' 2>/dev/null || true -} - -vault_write_json() { - path="$1" - payload="$2" - curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \ - -d "${payload}" "${vault_addr}/v1/kv/data/atlas/${path}" >/dev/null -} - -firefly_db_host="$(read_secret /secrets/firefly-db/DB_HOST)" -if [ -z "${firefly_db_host}" ]; then - firefly_db_host="$(read_secret /secrets/firefly-db/DB_HOSTNAME)" -fi -firefly_db_port="$(read_secret /secrets/firefly-db/DB_PORT)" -firefly_db_name="$(read_secret /secrets/firefly-db/DB_DATABASE)" -if [ -z "${firefly_db_name}" ]; then - firefly_db_name="$(read_secret /secrets/firefly-db/DB_NAME)" -fi -firefly_db_user="$(read_secret /secrets/firefly-db/DB_USERNAME)" -if [ -z "${firefly_db_user}" ]; then - firefly_db_user="$(read_secret /secrets/firefly-db/DB_USER)" -fi -firefly_db_pass="$(read_secret /secrets/firefly-db/DB_PASSWORD)" -if [ -z "${firefly_db_pass}" ]; then - firefly_db_pass="$(read_secret /secrets/firefly-db/DB_PASS)" -fi - -require_value "firefly-db/DB_HOST" "${firefly_db_host}" -require_value "firefly-db/DB_PORT" "${firefly_db_port}" -require_value "firefly-db/DB_DATABASE" "${firefly_db_name}" -require_value "firefly-db/DB_USERNAME" "${firefly_db_user}" -require_value "firefly-db/DB_PASSWORD" "${firefly_db_pass}" - -firefly_payload="$(jq -nc \ - --arg host "${firefly_db_host}" \ - --arg port "${firefly_db_port}" \ - --arg db "${firefly_db_name}" \ - --arg user "${firefly_db_user}" \ - --arg pass "${firefly_db_pass}" \ - '{data:{DB_HOST:$host, DB_PORT:$port, DB_DATABASE:$db, DB_USERNAME:$user, DB_PASSWORD:$pass}}')" -vault_write_json "finance/firefly-db" "${firefly_payload}" - -app_key="$(vault_read "finance/firefly-secrets" "APP_KEY")" -if [ -z "${app_key}" ]; then - app_key="base64:$(head -c 32 /dev/urandom | base64 | tr -d '\n')" -fi -cron_token="$(vault_read "finance/firefly-secrets" "STATIC_CRON_TOKEN")" -if [ -z "${cron_token}" ]; then - cron_token="$(head -c 32 /dev/urandom | base64 | tr -d '\n' | tr '+/' '-_' | tr -d '=')" -fi -firefly_secret_payload="$(jq -nc \ - --arg app_key "${app_key}" \ - --arg cron "${cron_token}" \ - '{data:{APP_KEY:$app_key, STATIC_CRON_TOKEN:$cron}}')" -vault_write_json "finance/firefly-secrets" "${firefly_secret_payload}" - -if [ -d /secrets/actualbudget-db ]; then - actual_db_host="$(read_secret /secrets/actualbudget-db/DB_HOST)" - if [ -z "${actual_db_host}" ]; then - actual_db_host="$(read_secret /secrets/actualbudget-db/DB_HOSTNAME)" - fi - actual_db_port="$(read_secret /secrets/actualbudget-db/DB_PORT)" - actual_db_name="$(read_secret /secrets/actualbudget-db/DB_DATABASE)" - if [ -z "${actual_db_name}" ]; then - actual_db_name="$(read_secret /secrets/actualbudget-db/DB_NAME)" - fi - actual_db_user="$(read_secret /secrets/actualbudget-db/DB_USERNAME)" - if [ -z "${actual_db_user}" ]; then - actual_db_user="$(read_secret /secrets/actualbudget-db/DB_USER)" - fi - actual_db_pass="$(read_secret /secrets/actualbudget-db/DB_PASSWORD)" - if [ -z "${actual_db_pass}" ]; then - actual_db_pass="$(read_secret /secrets/actualbudget-db/DB_PASS)" - fi - - if [ -n "${actual_db_host}${actual_db_port}${actual_db_name}${actual_db_user}${actual_db_pass}" ]; then - require_value "actualbudget-db/DB_HOST" "${actual_db_host}" - require_value "actualbudget-db/DB_PORT" "${actual_db_port}" - require_value "actualbudget-db/DB_DATABASE" "${actual_db_name}" - require_value "actualbudget-db/DB_USERNAME" "${actual_db_user}" - require_value "actualbudget-db/DB_PASSWORD" "${actual_db_pass}" - - actual_payload="$(jq -nc \ - --arg host "${actual_db_host}" \ - --arg port "${actual_db_port}" \ - --arg db "${actual_db_name}" \ - --arg user "${actual_db_user}" \ - --arg pass "${actual_db_pass}" \ - '{data:{DB_HOST:$host, DB_PORT:$port, DB_DATABASE:$db, DB_USERNAME:$user, DB_PASSWORD:$pass}}')" - vault_write_json "finance/actual-db" "${actual_payload}" - else - echo "actualbudget-db secret empty; skipping actual-db vault write" >&2 - fi -fi From 3274b9257c6df9eb9720d265aff20c3271678c7a Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:24:50 -0300 Subject: [PATCH 185/270] comms: restart mas after db sync --- services/comms/mas-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/comms/mas-deployment.yaml b/services/comms/mas-deployment.yaml index afe6135..0ab2071 100644 --- a/services/comms/mas-deployment.yaml +++ b/services/comms/mas-deployment.yaml @@ -13,7 +13,7 @@ spec: template: metadata: annotations: - checksum/config: v5-adminapi-7 + checksum/config: v5-adminapi-8 vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/agent-init-first: "true" vault.hashicorp.com/role: "comms" From 1cc1b9bea5c923894f361272f644404be9fa6b66 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:28:21 -0300 Subject: [PATCH 186/270] comms: rerun mas-dependent jobs --- services/comms/bstein-force-leave-job.yaml | 2 +- services/comms/mas-local-users-ensure-job.yaml | 2 +- services/comms/othrys-kick-numeric-job.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/services/comms/bstein-force-leave-job.yaml b/services/comms/bstein-force-leave-job.yaml index a6105cc..0286f8c 100644 --- a/services/comms/bstein-force-leave-job.yaml +++ b/services/comms/bstein-force-leave-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: bstein-leave-rooms-11 + name: bstein-leave-rooms-12 namespace: comms spec: backoffLimit: 0 diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index a998445..8dcf8cf 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-local-users-ensure-11 + name: mas-local-users-ensure-12 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/othrys-kick-numeric-job.yaml b/services/comms/othrys-kick-numeric-job.yaml index df22ae1..ed25515 100644 --- a/services/comms/othrys-kick-numeric-job.yaml +++ b/services/comms/othrys-kick-numeric-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-kick-numeric-6 + name: othrys-kick-numeric-7 namespace: comms spec: backoffLimit: 0 From 6028d82aa35fe2700b39405908c3e974d468c5db Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:29:47 -0300 Subject: [PATCH 187/270] finance: expand actual openid env --- services/finance/actual-budget-deployment.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/services/finance/actual-budget-deployment.yaml b/services/finance/actual-budget-deployment.yaml index 67092a0..cb55d2f 100644 --- a/services/finance/actual-budget-deployment.yaml +++ b/services/finance/actual-budget-deployment.yaml @@ -90,6 +90,14 @@ spec: value: "true" - name: ACTUAL_OPENID_DISCOVERY_URL value: https://sso.bstein.dev/realms/atlas + - name: ACTUAL_OPENID_AUTHORIZATION_ENDPOINT + value: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/auth + - name: ACTUAL_OPENID_TOKEN_ENDPOINT + value: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/token + - name: ACTUAL_OPENID_USERINFO_ENDPOINT + value: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/userinfo + - name: ACTUAL_OPENID_PROVIDER_NAME + value: Atlas SSO - name: ACTUAL_OPENID_SERVER_HOSTNAME value: https://budget.bstein.dev volumeMounts: @@ -120,6 +128,14 @@ spec: value: "true" - name: ACTUAL_OPENID_DISCOVERY_URL value: https://sso.bstein.dev/realms/atlas + - name: ACTUAL_OPENID_AUTHORIZATION_ENDPOINT + value: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/auth + - name: ACTUAL_OPENID_TOKEN_ENDPOINT + value: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/token + - name: ACTUAL_OPENID_USERINFO_ENDPOINT + value: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/userinfo + - name: ACTUAL_OPENID_PROVIDER_NAME + value: Atlas SSO - name: ACTUAL_OPENID_SERVER_HOSTNAME value: https://budget.bstein.dev volumeMounts: From c5b8396bd89fb7f66a39f483643f2bf4068f0c8d Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:34:36 -0300 Subject: [PATCH 188/270] comms: retry mas jobs and rerun --- services/comms/mas-local-users-ensure-job.yaml | 14 +++++++++++++- services/comms/othrys-kick-numeric-job.yaml | 14 +++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index 8dcf8cf..d5c8471 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-local-users-ensure-12 + name: mas-local-users-ensure-13 namespace: comms spec: backoffLimit: 1 @@ -109,6 +109,17 @@ spec: AUTH_BASE = "http://matrix-authentication-service:8080" SERVER_NAME = "live.bstein.dev" + def wait_for_service(url): + last = None + for attempt in range(1, 11): + try: + requests.get(url, timeout=10) + return + except Exception as exc: # noqa: BLE001 + last = exc + time.sleep(attempt * 2) + raise RuntimeError(f"MAS service not reachable: {last}") + def admin_token(): with open(MAS_ADMIN_CLIENT_SECRET_FILE, "r", encoding="utf-8") as f: secret = f.read().strip() @@ -198,6 +209,7 @@ spec: if r.status_code != 200: raise RuntimeError(f"login failed for {username}: {r.status_code} {r.text}") + wait_for_service(MAS_ADMIN_API_BASE) token = admin_token() ensure_user(token, os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"]) ensure_user(token, os.environ["BOT_USER"], os.environ["BOT_PASS"]) diff --git a/services/comms/othrys-kick-numeric-job.yaml b/services/comms/othrys-kick-numeric-job.yaml index ed25515..0d3914a 100644 --- a/services/comms/othrys-kick-numeric-job.yaml +++ b/services/comms/othrys-kick-numeric-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: othrys-kick-numeric-7 + name: othrys-kick-numeric-8 namespace: comms spec: backoffLimit: 0 @@ -107,6 +107,17 @@ spec: def auth(token): return {"Authorization": f"Bearer {token}"} + def wait_for_service(url): + last = None + for attempt in range(1, 11): + try: + requests.get(url, timeout=10) + return + except Exception as exc: # noqa: BLE001 + last = exc + time.sleep(attempt * 2) + raise SystemExit(f"MAS service not reachable: {last}") + def login(user, password): r = requests.post( f"{AUTH_BASE}/_matrix/client/v3/login", @@ -154,6 +165,7 @@ spec: if r.status_code not in (200, 202): raise SystemExit(f"kick {user_id} failed: {r.status_code} {r.text}") + wait_for_service(f"{AUTH_BASE}/_matrix/client/versions") token = login(SEEDER_USER, SEEDER_PASS) room_id = resolve_alias(token, ROOM_ALIAS) for user_id in list_members(token, room_id): From 3cf34b53e9e7b8c3135d3ccbd9e6ab3145dade75 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:36:08 -0300 Subject: [PATCH 189/270] finance: bump actual server image --- services/finance/actual-budget-deployment.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/services/finance/actual-budget-deployment.yaml b/services/finance/actual-budget-deployment.yaml index cb55d2f..81699aa 100644 --- a/services/finance/actual-budget-deployment.yaml +++ b/services/finance/actual-budget-deployment.yaml @@ -72,7 +72,7 @@ spec: - name: actual-data mountPath: /data - name: init-openid - image: actualbudget/actual-server:sha-b6452f9-alpine + image: actualbudget/actual-server:26.1.0-alpine@sha256:34aae5813fdfee12af2a50c4d0667df68029f1d61b90f45f282473273eb70d0d command: ["/bin/sh", "-c"] args: - | @@ -108,7 +108,7 @@ spec: readOnly: true containers: - name: actual-budget - image: actualbudget/actual-server:sha-b6452f9-alpine + image: actualbudget/actual-server:26.1.0-alpine@sha256:34aae5813fdfee12af2a50c4d0667df68029f1d61b90f45f282473273eb70d0d command: ["/bin/sh", "-c"] args: - | From 343165b2fae6a47a1d2c04a6b65d579135aa7cb8 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:39:11 -0300 Subject: [PATCH 190/270] finance: drop dependency gating --- .../flux-system/applications/finance/kustomization.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/clusters/atlas/flux-system/applications/finance/kustomization.yaml b/clusters/atlas/flux-system/applications/finance/kustomization.yaml index 1bb87d0..370e2d1 100644 --- a/clusters/atlas/flux-system/applications/finance/kustomization.yaml +++ b/clusters/atlas/flux-system/applications/finance/kustomization.yaml @@ -12,11 +12,6 @@ spec: kind: GitRepository name: flux-system targetNamespace: finance - dependsOn: - - name: keycloak - - name: postgres - - name: traefik - - name: vault healthChecks: - apiVersion: apps/v1 kind: Deployment From b0ac30e719c72d325ee60359da7515210e52f2f4 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:43:15 -0300 Subject: [PATCH 191/270] comms: retry mas local users and rerun --- .../comms/mas-local-users-ensure-job.yaml | 24 +++++++++++++++---- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index d5c8471..c742d48 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-local-users-ensure-13 + name: mas-local-users-ensure-14 namespace: comms spec: backoffLimit: 1 @@ -120,6 +120,16 @@ spec: time.sleep(attempt * 2) raise RuntimeError(f"MAS service not reachable: {last}") + def request_with_retry(method, url, attempts=6, **kwargs): + last = None + for attempt in range(1, attempts + 1): + try: + return requests.request(method, url, **kwargs) + except requests.RequestException as exc: + last = exc + time.sleep(attempt * 2) + raise RuntimeError(f"request failed for {url}: {last}") + def admin_token(): with open(MAS_ADMIN_CLIENT_SECRET_FILE, "r", encoding="utf-8") as f: secret = f.read().strip() @@ -141,7 +151,8 @@ spec: raise RuntimeError(f"MAS admin token request failed: {last}") def get_user(token, username): - r = requests.get( + r = request_with_retry( + "GET", f"{MAS_ADMIN_API_BASE}/users/by-username/{urllib.parse.quote(username)}", headers={"Authorization": f"Bearer {token}"}, timeout=30, @@ -165,7 +176,8 @@ spec: {"username": username, "password": password}, ] for payload in payloads: - r = requests.post( + r = request_with_retry( + "POST", f"{MAS_ADMIN_API_BASE}/users", headers={"Authorization": f"Bearer {token}"}, json=payload, @@ -178,7 +190,8 @@ spec: return None def update_password(token, user_id, password): - r = requests.post( + r = request_with_retry( + "POST", f"{MAS_ADMIN_API_BASE}/users/{urllib.parse.quote(user_id)}/set-password", headers={"Authorization": f"Bearer {token}"}, json={"password": password}, @@ -197,7 +210,8 @@ spec: login_name = username if not login_name.startswith("@"): login_name = f"@{login_name}:{SERVER_NAME}" - r = requests.post( + r = request_with_retry( + "POST", f"{AUTH_BASE}/_matrix/client/v3/login", json={ "type": "m.login.password", From cee565892be2bebe7b5c813e95c6a3649f9f83f1 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:43:25 -0300 Subject: [PATCH 192/270] finance: harden actual openid bootstrap --- .../scripts/actual_openid_bootstrap.mjs | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/services/finance/scripts/actual_openid_bootstrap.mjs b/services/finance/scripts/actual_openid_bootstrap.mjs index af14524..3b66fc2 100644 --- a/services/finance/scripts/actual_openid_bootstrap.mjs +++ b/services/finance/scripts/actual_openid_bootstrap.mjs @@ -36,7 +36,37 @@ const loadConfigUrl = pathToFileURL(path.join(root, 'src', 'load-config.js')).hr const accountDb = await import(accountDbUrl); const { default: finalConfig } = await import(loadConfigUrl); -const openId = finalConfig?.openId; +const openIdEnv = (() => { + if ( + !process.env.ACTUAL_OPENID_DISCOVERY_URL && + !process.env.ACTUAL_OPENID_AUTHORIZATION_ENDPOINT + ) { + return null; + } + + if (process.env.ACTUAL_OPENID_DISCOVERY_URL) { + return { + issuer: process.env.ACTUAL_OPENID_DISCOVERY_URL, + client_id: process.env.ACTUAL_OPENID_CLIENT_ID, + client_secret: process.env.ACTUAL_OPENID_CLIENT_SECRET, + server_hostname: process.env.ACTUAL_OPENID_SERVER_HOSTNAME, + }; + } + + return { + issuer: { + name: process.env.ACTUAL_OPENID_PROVIDER_NAME, + authorization_endpoint: process.env.ACTUAL_OPENID_AUTHORIZATION_ENDPOINT, + token_endpoint: process.env.ACTUAL_OPENID_TOKEN_ENDPOINT, + userinfo_endpoint: process.env.ACTUAL_OPENID_USERINFO_ENDPOINT, + }, + client_id: process.env.ACTUAL_OPENID_CLIENT_ID, + client_secret: process.env.ACTUAL_OPENID_CLIENT_SECRET, + server_hostname: process.env.ACTUAL_OPENID_SERVER_HOSTNAME, + }; +})(); + +const openId = finalConfig?.openId ?? openIdEnv; if (!openId) { console.error('missing openid configuration'); process.exit(1); From 36d0df817a551e30d4eb3e8d742871ab0426cb6b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:46:16 -0300 Subject: [PATCH 193/270] finance: roll actual bootstrap --- services/finance/actual-budget-deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/services/finance/actual-budget-deployment.yaml b/services/finance/actual-budget-deployment.yaml index 81699aa..d14d197 100644 --- a/services/finance/actual-budget-deployment.yaml +++ b/services/finance/actual-budget-deployment.yaml @@ -31,6 +31,7 @@ spec: export ACTUAL_OPENID_CLIENT_ID="{{ .Data.data.ACTUAL_OPENID_CLIENT_ID }}" export ACTUAL_OPENID_CLIENT_SECRET="{{ .Data.data.ACTUAL_OPENID_CLIENT_SECRET }}" {{ end }} + actual.bstein.dev/bootstrap-rev: "2" spec: serviceAccountName: finance-vault nodeSelector: From 9a3c3a3d3edc7ded9003f8986845fe0afc0ca4b1 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:49:25 -0300 Subject: [PATCH 194/270] vault: retry status checks in config jobs --- services/vault/scripts/vault_k8s_auth_configure.sh | 10 +++++++++- services/vault/scripts/vault_oidc_configure.sh | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 140f1d4..48dfe78 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -16,7 +16,15 @@ ensure_token() { export VAULT_TOKEN } -status_json="$(vault status -format=json || true)" +status_json="" +for attempt in 1 2 3 4 5 6; do + status_json="$(vault status -format=json 2>/dev/null || true)" + if [ -n "${status_json}" ]; then + break + fi + log "vault status failed; retrying (${attempt}/6)" + sleep $((attempt * 2)) +done if [ -z "${status_json}" ]; then log "vault status failed; check VAULT_ADDR and VAULT_TOKEN" exit 1 diff --git a/services/vault/scripts/vault_oidc_configure.sh b/services/vault/scripts/vault_oidc_configure.sh index af74f60..0f569e8 100644 --- a/services/vault/scripts/vault_oidc_configure.sh +++ b/services/vault/scripts/vault_oidc_configure.sh @@ -16,7 +16,15 @@ ensure_token() { export VAULT_TOKEN } -status_json="$(vault status -format=json || true)" +status_json="" +for attempt in 1 2 3 4 5 6; do + status_json="$(vault status -format=json 2>/dev/null || true)" + if [ -n "${status_json}" ]; then + break + fi + log "vault status failed; retrying (${attempt}/6)" + sleep $((attempt * 2)) +done if [ -z "${status_json}" ]; then log "vault status failed; check VAULT_ADDR and VAULT_TOKEN" exit 1 From 84fa9e7dbce128246c274e52fb5a1782a23e7247 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:50:11 -0300 Subject: [PATCH 195/270] finance: prepare actual data dirs --- services/finance/actual-budget-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/finance/actual-budget-deployment.yaml b/services/finance/actual-budget-deployment.yaml index d14d197..299e941 100644 --- a/services/finance/actual-budget-deployment.yaml +++ b/services/finance/actual-budget-deployment.yaml @@ -64,7 +64,7 @@ spec: args: - | set -e - mkdir -p /data + mkdir -p /data /data/server-files /data/user-files chown -R 1000:1000 /data securityContext: runAsUser: 0 From ba546bf63f6f2f28a4ff04a4a064086f06fb811d Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:54:38 -0300 Subject: [PATCH 196/270] portal: retry vaultwarden cred sync --- .../scripts/vaultwarden_cred_sync.py | 31 ++++++++++++++++--- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/services/bstein-dev-home/scripts/vaultwarden_cred_sync.py b/services/bstein-dev-home/scripts/vaultwarden_cred_sync.py index d259b31..9ee4eeb 100644 --- a/services/bstein-dev-home/scripts/vaultwarden_cred_sync.py +++ b/services/bstein-dev-home/scripts/vaultwarden_cred_sync.py @@ -26,14 +26,22 @@ def _iter_keycloak_users(page_size: int = 200) -> Iterable[dict[str, Any]]: url = f"{settings.KEYCLOAK_ADMIN_URL}/admin/realms/{settings.KEYCLOAK_REALM}/users" first = 0 while True: - headers = client.headers() + headers = _headers_with_retry(client) # We need attributes for idempotency (vaultwarden_status/vaultwarden_email). Keycloak defaults to a # brief representation which may omit these. params = {"first": str(first), "max": str(page_size), "briefRepresentation": "false"} - with httpx.Client(timeout=settings.HTTP_CHECK_TIMEOUT_SEC) as http: - resp = http.get(url, params=params, headers=headers) - resp.raise_for_status() - payload = resp.json() + payload = None + for attempt in range(1, 6): + try: + with httpx.Client(timeout=settings.HTTP_CHECK_TIMEOUT_SEC) as http: + resp = http.get(url, params=params, headers=headers) + resp.raise_for_status() + payload = resp.json() + break + except httpx.HTTPError as exc: + if attempt == 5: + raise + time.sleep(attempt * 2) if not isinstance(payload, list) or not payload: return @@ -47,6 +55,19 @@ def _iter_keycloak_users(page_size: int = 200) -> Iterable[dict[str, Any]]: first += page_size +def _headers_with_retry(client, attempts: int = 6) -> dict[str, str]: + last_exc: Exception | None = None + for attempt in range(1, attempts + 1): + try: + return client.headers() + except Exception as exc: + last_exc = exc + time.sleep(attempt * 2) + if last_exc: + raise last_exc + raise RuntimeError("failed to fetch keycloak headers") + + def _extract_attr(attrs: Any, key: str) -> str: if not isinstance(attrs, dict): return "" From 376eae3fa1f561a935e594da1e27bc477f97b0da Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:55:20 -0300 Subject: [PATCH 197/270] finance: migrate actual db before bootstrap --- services/finance/actual-budget-deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/services/finance/actual-budget-deployment.yaml b/services/finance/actual-budget-deployment.yaml index 299e941..c4aeeaa 100644 --- a/services/finance/actual-budget-deployment.yaml +++ b/services/finance/actual-budget-deployment.yaml @@ -79,6 +79,7 @@ spec: - | set -eu . /vault/secrets/actual-env.sh + node /app/src/scripts/run-migrations.js node /scripts/actual_openid_bootstrap.mjs env: - name: ACTUAL_DATA_DIR From 008130f8d0ce2db8f7758539d8ce3bc27db5955a Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 02:59:38 -0300 Subject: [PATCH 198/270] finance: roll firefly after secrets --- services/finance/firefly-deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/services/finance/firefly-deployment.yaml b/services/finance/firefly-deployment.yaml index ff95dad..009f289 100644 --- a/services/finance/firefly-deployment.yaml +++ b/services/finance/firefly-deployment.yaml @@ -56,6 +56,7 @@ spec: {{- with secret "kv/data/atlas/finance/firefly-secrets" -}} {{ .Data.data.STATIC_CRON_TOKEN }} {{- end -}} + firefly.bstein.dev/restart-rev: "2" spec: serviceAccountName: finance-vault nodeSelector: From a9c2d3c5e85e8ad6544e0910583a57df1c2b5e21 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 03:00:25 -0300 Subject: [PATCH 199/270] vault: retry vault cli operations --- .../vault/scripts/vault_k8s_auth_configure.sh | 42 +++++++++++-------- .../vault/scripts/vault_oidc_configure.sh | 38 ++++++++++------- 2 files changed, 46 insertions(+), 34 deletions(-) diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 48dfe78..bbb5e3a 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -3,29 +3,35 @@ set -eu log() { echo "[vault-k8s-auth] $*"; } +vault_cmd() { + for attempt in 1 2 3 4 5 6; do + output="$(vault "$@" 2>&1)" + status=$? + if [ "${status}" -eq 0 ]; then + printf '%s' "${output}" + return 0 + fi + log "vault command failed; retrying (${attempt}/6)" + sleep $((attempt * 2)) + done + log "vault command failed; giving up" + return 1 +} + ensure_token() { if [ -n "${VAULT_TOKEN:-}" ]; then return fi role="${VAULT_K8S_ROLE:-vault}" jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" - if ! VAULT_TOKEN="$(vault write -field=token auth/kubernetes/login role="${role}" jwt="${jwt}")"; then + if ! VAULT_TOKEN="$(vault_cmd write -field=token auth/kubernetes/login role="${role}" jwt="${jwt}")"; then log "kubernetes auth login failed; set VAULT_TOKEN or fix role ${role}" exit 1 fi export VAULT_TOKEN } -status_json="" -for attempt in 1 2 3 4 5 6; do - status_json="$(vault status -format=json 2>/dev/null || true)" - if [ -n "${status_json}" ]; then - break - fi - log "vault status failed; retrying (${attempt}/6)" - sleep $((attempt * 2)) -done -if [ -z "${status_json}" ]; then +if ! status_json="$(vault_cmd status -format=json)"; then log "vault status failed; check VAULT_ADDR and VAULT_TOKEN" exit 1 fi @@ -55,13 +61,13 @@ if [ -z "${token_reviewer_jwt}" ]; then token_reviewer_jwt="${k8s_token}" fi -if ! vault auth list -format=json | grep -q '"kubernetes/"'; then +if ! vault_cmd auth list -format=json | grep -q '"kubernetes/"'; then log "enabling kubernetes auth" - vault auth enable kubernetes + vault_cmd auth enable kubernetes fi log "configuring kubernetes auth" -vault write auth/kubernetes/config \ +vault_cmd write auth/kubernetes/config \ token_reviewer_jwt="${token_reviewer_jwt}" \ kubernetes_host="${k8s_host}" \ kubernetes_ca_cert="${k8s_ca}" @@ -70,7 +76,7 @@ write_raw_policy() { name="$1" body="$2" log "writing policy ${name}" - printf '%s\n' "${body}" | vault policy write "${name}" - + printf '%s\n' "${body}" | vault_cmd policy write "${name}" - } write_policy_and_role() { @@ -103,10 +109,10 @@ path \"kv/metadata/atlas/${path}\" { done log "writing policy ${role}" - printf '%s\n' "${policy_body}" | vault policy write "${role}" - + printf '%s\n' "${policy_body}" | vault_cmd policy write "${role}" - log "writing role ${role}" - vault write "auth/kubernetes/role/${role}" \ + vault_cmd write "auth/kubernetes/role/${role}" \ bound_service_account_names="${service_accounts}" \ bound_service_account_namespaces="${namespace}" \ policies="${role}" \ @@ -184,7 +190,7 @@ path "kv/data/atlas/shared/*" { ' write_raw_policy "dev-kv" "${dev_kv_policy}" log "writing role vault-admin" -vault write "auth/kubernetes/role/vault-admin" \ +vault_cmd write "auth/kubernetes/role/vault-admin" \ bound_service_account_names="vault-admin" \ bound_service_account_namespaces="vault" \ policies="vault-admin" \ diff --git a/services/vault/scripts/vault_oidc_configure.sh b/services/vault/scripts/vault_oidc_configure.sh index 0f569e8..d703ed5 100644 --- a/services/vault/scripts/vault_oidc_configure.sh +++ b/services/vault/scripts/vault_oidc_configure.sh @@ -3,29 +3,35 @@ set -eu log() { echo "[vault-oidc] $*"; } +vault_cmd() { + for attempt in 1 2 3 4 5 6; do + output="$(vault "$@" 2>&1)" + status=$? + if [ "${status}" -eq 0 ]; then + printf '%s' "${output}" + return 0 + fi + log "vault command failed; retrying (${attempt}/6)" + sleep $((attempt * 2)) + done + log "vault command failed; giving up" + return 1 +} + ensure_token() { if [ -n "${VAULT_TOKEN:-}" ]; then return fi role="${VAULT_K8S_ROLE:-vault}" jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" - if ! VAULT_TOKEN="$(vault write -field=token auth/kubernetes/login role="${role}" jwt="${jwt}")"; then + if ! VAULT_TOKEN="$(vault_cmd write -field=token auth/kubernetes/login role="${role}" jwt="${jwt}")"; then log "kubernetes auth login failed; set VAULT_TOKEN or fix role ${role}" exit 1 fi export VAULT_TOKEN } -status_json="" -for attempt in 1 2 3 4 5 6; do - status_json="$(vault status -format=json 2>/dev/null || true)" - if [ -n "${status_json}" ]; then - break - fi - log "vault status failed; retrying (${attempt}/6)" - sleep $((attempt * 2)) -done -if [ -z "${status_json}" ]; then +if ! status_json="$(vault_cmd status -format=json)"; then log "vault status failed; check VAULT_ADDR and VAULT_TOKEN" exit 1 fi @@ -65,19 +71,19 @@ dev_policies="${VAULT_OIDC_DEV_POLICIES:-default,dev-kv}" user_group="${VAULT_OIDC_USER_GROUP:-${dev_group}}" user_policies="${VAULT_OIDC_USER_POLICIES:-${VAULT_OIDC_TOKEN_POLICIES:-${dev_policies}}}" -if ! vault auth list -format=json | grep -q '"oidc/"'; then +if ! vault_cmd auth list -format=json | grep -q '"oidc/"'; then log "enabling oidc auth method" - vault auth enable oidc + vault_cmd auth enable oidc fi log "configuring oidc auth" -vault write auth/oidc/config \ +vault_cmd write auth/oidc/config \ oidc_discovery_url="${VAULT_OIDC_DISCOVERY_URL}" \ oidc_client_id="${VAULT_OIDC_CLIENT_ID}" \ oidc_client_secret="${VAULT_OIDC_CLIENT_SECRET}" \ default_role="${default_role}" -vault auth tune -listing-visibility=unauth oidc >/dev/null +vault_cmd auth tune -listing-visibility=unauth oidc >/dev/null build_bound_claims() { claim="$1" @@ -149,7 +155,7 @@ configure_role() { } EOF log "configuring oidc role ${role_name}" - vault write "auth/oidc/role/${role_name}" @"${payload_file}" + vault_cmd write "auth/oidc/role/${role_name}" @"${payload_file}" rm -f "${payload_file}" } From ee622cbb0b12c51956ab082bb74a8563a0751325 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 03:03:16 -0300 Subject: [PATCH 200/270] finance: source firefly env in shell --- services/finance/firefly-deployment.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/services/finance/firefly-deployment.yaml b/services/finance/firefly-deployment.yaml index 009f289..c077f75 100644 --- a/services/finance/firefly-deployment.yaml +++ b/services/finance/firefly-deployment.yaml @@ -100,7 +100,8 @@ spec: containers: - name: firefly image: fireflyiii/core:version-6.4.15 - args: ["/bin/sh", "-c", ". /vault/secrets/firefly-env.sh && exec /init"] + command: ["/bin/sh", "-c"] + args: [". /vault/secrets/firefly-env.sh && exec /init"] env: - name: APP_ENV value: production From ef5ac62544740b4458296d3933459d1b93a977ca Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 03:09:33 -0300 Subject: [PATCH 201/270] vault: make retry helper resilient --- services/vault/scripts/vault_k8s_auth_configure.sh | 2 ++ services/vault/scripts/vault_oidc_configure.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index bbb5e3a..7566866 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -5,8 +5,10 @@ log() { echo "[vault-k8s-auth] $*"; } vault_cmd() { for attempt in 1 2 3 4 5 6; do + set +e output="$(vault "$@" 2>&1)" status=$? + set -e if [ "${status}" -eq 0 ]; then printf '%s' "${output}" return 0 diff --git a/services/vault/scripts/vault_oidc_configure.sh b/services/vault/scripts/vault_oidc_configure.sh index d703ed5..70da3b7 100644 --- a/services/vault/scripts/vault_oidc_configure.sh +++ b/services/vault/scripts/vault_oidc_configure.sh @@ -5,8 +5,10 @@ log() { echo "[vault-oidc] $*"; } vault_cmd() { for attempt in 1 2 3 4 5 6; do + set +e output="$(vault "$@" 2>&1)" status=$? + set -e if [ "${status}" -eq 0 ]; then printf '%s' "${output}" return 0 From 728f2cd2ee6ce387f8fef33d532465543b1b256c Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 03:17:36 -0300 Subject: [PATCH 202/270] vault: pin cronjobs to service IP --- services/vault/k8s-auth-config-cronjob.yaml | 2 +- services/vault/oidc-config-cronjob.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/vault/k8s-auth-config-cronjob.yaml b/services/vault/k8s-auth-config-cronjob.yaml index a49fe7d..6a644df 100644 --- a/services/vault/k8s-auth-config-cronjob.yaml +++ b/services/vault/k8s-auth-config-cronjob.yaml @@ -28,7 +28,7 @@ spec: - /scripts/vault_k8s_auth_configure.sh env: - name: VAULT_ADDR - value: http://vault.vault.svc.cluster.local:8200 + value: http://10.43.57.249:8200 - name: VAULT_K8S_ROLE value: vault-admin - name: VAULT_K8S_TOKEN_REVIEWER_JWT_FILE diff --git a/services/vault/oidc-config-cronjob.yaml b/services/vault/oidc-config-cronjob.yaml index 6d98ecb..3140073 100644 --- a/services/vault/oidc-config-cronjob.yaml +++ b/services/vault/oidc-config-cronjob.yaml @@ -57,7 +57,7 @@ spec: - /scripts/vault_oidc_configure.sh env: - name: VAULT_ADDR - value: http://vault.vault.svc.cluster.local:8200 + value: http://10.43.57.249:8200 - name: VAULT_K8S_ROLE value: vault-admin - name: VAULT_ENV_FILE From acfab6a15072d75f613344c8db5bbc68ebbde76c Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 03:24:30 -0300 Subject: [PATCH 203/270] sso: retry keycloak secret jobs --- services/keycloak/actual-oidc-secret-ensure-job.yaml | 2 +- services/keycloak/mas-secrets-ensure-job.yaml | 9 ++++++++- services/keycloak/scripts/actual_oidc_secret_ensure.sh | 9 ++++++++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/services/keycloak/actual-oidc-secret-ensure-job.yaml b/services/keycloak/actual-oidc-secret-ensure-job.yaml index 22ba34f..3dadb52 100644 --- a/services/keycloak/actual-oidc-secret-ensure-job.yaml +++ b/services/keycloak/actual-oidc-secret-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: actual-oidc-secret-ensure-2 + name: actual-oidc-secret-ensure-3 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index 9d97f72..f5679cb 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -10,7 +10,7 @@ imagePullSecrets: apiVersion: batch/v1 kind: Job metadata: - name: mas-secrets-ensure-19 + name: mas-secrets-ensure-20 namespace: sso spec: backoffLimit: 0 @@ -49,6 +49,13 @@ spec: umask 077 KC_URL="http://keycloak.sso.svc.cluster.local" + for attempt in 1 2 3 4 5 6 7 8 9 10; do + if curl -fsS "${KC_URL}/realms/master" >/dev/null 2>&1; then + break + fi + echo "Waiting for Keycloak to be reachable (attempt ${attempt})" >&2 + sleep $((attempt * 2)) + done ACCESS_TOKEN="" for attempt in 1 2 3 4 5; do TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \ diff --git a/services/keycloak/scripts/actual_oidc_secret_ensure.sh b/services/keycloak/scripts/actual_oidc_secret_ensure.sh index 3ed6e6a..deb019a 100644 --- a/services/keycloak/scripts/actual_oidc_secret_ensure.sh +++ b/services/keycloak/scripts/actual_oidc_secret_ensure.sh @@ -5,6 +5,13 @@ set -euo pipefail KC_URL="http://keycloak.sso.svc.cluster.local" ACCESS_TOKEN="" +for attempt in 1 2 3 4 5 6 7 8 9 10; do + if curl -fsS "${KC_URL}/realms/master" >/dev/null 2>&1; then + break + fi + echo "Waiting for Keycloak to be reachable (attempt ${attempt})" >&2 + sleep $((attempt * 2)) +done for attempt in 1 2 3 4 5; do TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \ -H 'Content-Type: application/x-www-form-urlencoded' \ @@ -35,7 +42,7 @@ if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then -H 'Content-Type: application/json' \ -d "${create_payload}" \ "$KC_URL/admin/realms/atlas/clients")" - if [ "$status" != "201" ] && [ "$status" != "204" ]; then + if [ "$status" != "201" ] && [ "$status" != "204" ] && [ "$status" != "409" ]; then echo "Keycloak client create failed (status ${status})" >&2 exit 1 fi From 268a1d9449da0740fd7908a9ba4770d09d64ac34 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 03:29:36 -0300 Subject: [PATCH 204/270] sso: retry mas secret lookup --- services/keycloak/mas-secrets-ensure-job.yaml | 27 +++++++++++++++---- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/services/keycloak/mas-secrets-ensure-job.yaml b/services/keycloak/mas-secrets-ensure-job.yaml index f5679cb..24c9e04 100644 --- a/services/keycloak/mas-secrets-ensure-job.yaml +++ b/services/keycloak/mas-secrets-ensure-job.yaml @@ -10,7 +10,7 @@ imagePullSecrets: apiVersion: batch/v1 kind: Job metadata: - name: mas-secrets-ensure-20 + name: mas-secrets-ensure-21 namespace: sso spec: backoffLimit: 0 @@ -75,14 +75,31 @@ spec: echo "Failed to fetch Keycloak admin token" >&2 exit 1 fi - CLIENT_ID="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/atlas/clients?clientId=othrys-mas" | jq -r '.[0].id' 2>/dev/null || true)" + CLIENT_ID="" + for attempt in 1 2 3 4 5; do + CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients?clientId=othrys-mas" || true)" + CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)" + if [ -n "$CLIENT_ID" ] && [ "$CLIENT_ID" != "null" ]; then + break + fi + echo "Keycloak client lookup failed (attempt ${attempt})" >&2 + sleep $((attempt * 2)) + done if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then echo "Keycloak client othrys-mas not found" >&2 exit 1 fi - CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ - "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/client-secret" | jq -r '.value' 2>/dev/null || true)" + CLIENT_SECRET="" + for attempt in 1 2 3 4 5; do + CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \ + "$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/client-secret" | jq -r '.value' 2>/dev/null || true)" + if [ -n "$CLIENT_SECRET" ] && [ "$CLIENT_SECRET" != "null" ]; then + break + fi + echo "Keycloak client secret lookup failed (attempt ${attempt})" >&2 + sleep $((attempt * 2)) + done if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then echo "Keycloak client secret not found" >&2 exit 1 From 356dba3a333aa7dae3bebf5684e7179bb811e123 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 06:56:45 -0300 Subject: [PATCH 205/270] core: add finance hosts to coredns --- infrastructure/core/coredns-custom.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/infrastructure/core/coredns-custom.yaml b/infrastructure/core/coredns-custom.yaml index ad07d2a..4002849 100644 --- a/infrastructure/core/coredns-custom.yaml +++ b/infrastructure/core/coredns-custom.yaml @@ -13,6 +13,7 @@ data: 192.168.22.9 alerts.bstein.dev 192.168.22.9 auth.bstein.dev 192.168.22.9 bstein.dev + 192.168.22.9 budget.bstein.dev 192.168.22.9 call.live.bstein.dev 192.168.22.9 cd.bstein.dev 192.168.22.9 chat.ai.bstein.dev @@ -27,6 +28,7 @@ data: 192.168.22.9 matrix.live.bstein.dev 192.168.22.9 metrics.bstein.dev 192.168.22.9 monero.bstein.dev + 192.168.22.9 money.bstein.dev 192.168.22.9 notes.bstein.dev 192.168.22.9 office.bstein.dev 192.168.22.9 pegasus.bstein.dev From 71bab17665121bd6e2c5e2b5dd8b55ea6777b872 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 07:32:57 -0300 Subject: [PATCH 206/270] comms: fix matrix login routing and prune guests --- services/comms/guest-name-job.yaml | 37 +++++++++++++++++++++++++++++- services/comms/helmrelease.yaml | 19 +-------------- 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/services/comms/guest-name-job.yaml b/services/comms/guest-name-job.yaml index 0ba2f52..142dc73 100644 --- a/services/comms/guest-name-job.yaml +++ b/services/comms/guest-name-job.yaml @@ -123,6 +123,7 @@ spec: SEEDER_USER = os.environ["SEEDER_USER"] ROOM_ALIAS = "#othrys:live.bstein.dev" SERVER_NAME = "live.bstein.dev" + STALE_GUEST_MS = 7 * 24 * 60 * 60 * 1000 def mas_admin_token(): with open(MAS_ADMIN_CLIENT_SECRET_FILE, "r", encoding="utf-8") as f: @@ -235,6 +236,35 @@ spec: break return users + def should_prune_guest(entry, now_ms): + if not entry.get("is_guest"): + return False + last_seen = entry.get("last_seen_ts") + if last_seen is None: + return False + try: + last_seen = int(last_seen) + except (TypeError, ValueError): + return False + return now_ms - last_seen > STALE_GUEST_MS + + def prune_guest(token, user_id): + headers = {"Authorization": f"Bearer {token}"} + try: + r = requests.delete( + f"{BASE}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}", + headers=headers, + params={"erase": "true"}, + timeout=30, + ) + except Exception as exc: # noqa: BLE001 + print(f"guest prune failed for {user_id}: {exc}") + return False + if r.status_code in (200, 202, 204, 404): + return True + print(f"guest prune failed for {user_id}: {r.status_code} {r.text}") + return False + def user_id_for_username(username): return f"@{username}:live.bstein.dev" @@ -404,6 +434,7 @@ spec: except Exception as exc: # noqa: BLE001 print(f"synapse admin list skipped: {exc}") entries = [] + now_ms = int(time.time() * 1000) for entry in entries: user_id = entry.get("name") or "" if not user_id.startswith("@"): @@ -412,6 +443,9 @@ spec: if localpart in mas_usernames: continue is_guest = entry.get("is_guest") + if is_guest and should_prune_guest(entry, now_ms): + if prune_guest(seeder_token, user_id): + continue if not (is_guest or needs_rename_username(localpart)): continue display = get_displayname_admin(seeder_token, user_id) @@ -431,4 +465,5 @@ spec: db_rename_numeric(existing) finally: mas_revoke_session(admin_token, seeder_session) - PY \ No newline at end of file + PY + diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index bf45b21..e259c52 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -130,24 +130,7 @@ spec: values: ["rpi5", "rpi4"] ingress: - enabled: true - className: traefik - annotations: - cert-manager.io/cluster-issuer: letsencrypt - traefik.ingress.kubernetes.io/router.entrypoints: websecure - traefik.ingress.kubernetes.io/router.tls: "true" - csHosts: - - matrix.live.bstein.dev - hosts: - - matrix.live.bstein.dev - wkHosts: - - live.bstein.dev - - bstein.dev - tls: - - secretName: matrix-live-tls - hosts: - - matrix.live.bstein.dev - - live.bstein.dev + enabled: false extraConfig: allow_guest_access: true From 8192dfeebe85e662a281f25427b71aefd2021ced Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 07:38:38 -0300 Subject: [PATCH 207/270] platform: restore cert-manager and encrypt budget storage --- .../platform/cert-manager/kustomization.yaml | 1 - .../flux-system/platform/kustomization.yaml | 1 - .../base/storageclass/asteria-encrypted.yaml | 24 ++++ .../base/storageclass/kustomization.yaml | 1 + services/finance/actual-budget-data-pvc.yaml | 4 +- .../finance/actual-budget-deployment.yaml | 2 +- services/finance/actual-budget-ingress.yaml | 2 +- .../finance/finance-secrets-ensure-job.yaml | 2 +- .../finance/finance-secrets-ensure-rbac.yaml | 24 ++++ services/finance/firefly-ingress.yaml | 2 +- services/finance/kustomization.yaml | 1 + .../finance/scripts/finance_secrets_ensure.py | 120 +++++++++++++++++- 12 files changed, 171 insertions(+), 13 deletions(-) create mode 100644 infrastructure/modules/base/storageclass/asteria-encrypted.yaml create mode 100644 services/finance/finance-secrets-ensure-rbac.yaml diff --git a/clusters/atlas/flux-system/platform/cert-manager/kustomization.yaml b/clusters/atlas/flux-system/platform/cert-manager/kustomization.yaml index 21a9dc9..63469af 100644 --- a/clusters/atlas/flux-system/platform/cert-manager/kustomization.yaml +++ b/clusters/atlas/flux-system/platform/cert-manager/kustomization.yaml @@ -15,6 +15,5 @@ spec: namespace: flux-system targetNamespace: cert-manager dependsOn: - - name: cert-manager-cleanup - name: helm wait: true diff --git a/clusters/atlas/flux-system/platform/kustomization.yaml b/clusters/atlas/flux-system/platform/kustomization.yaml index 8ee08d7..b689cc0 100644 --- a/clusters/atlas/flux-system/platform/kustomization.yaml +++ b/clusters/atlas/flux-system/platform/kustomization.yaml @@ -4,7 +4,6 @@ kind: Kustomization resources: - core/kustomization.yaml - helm/kustomization.yaml - - cert-manager-cleanup/kustomization.yaml - cert-manager/kustomization.yaml - metallb/kustomization.yaml - traefik/kustomization.yaml diff --git a/infrastructure/modules/base/storageclass/asteria-encrypted.yaml b/infrastructure/modules/base/storageclass/asteria-encrypted.yaml new file mode 100644 index 0000000..a6eb566 --- /dev/null +++ b/infrastructure/modules/base/storageclass/asteria-encrypted.yaml @@ -0,0 +1,24 @@ +# infrastructure/modules/base/storageclass/asteria-encrypted.yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: asteria-encrypted +parameters: + diskSelector: asteria + fromBackup: "" + numberOfReplicas: "2" + staleReplicaTimeout: "30" + fsType: "ext4" + replicaAutoBalance: "least-effort" + dataLocality: "disabled" + encrypted: "true" + csi.storage.k8s.io/provisioner-secret-name: ${pvc.name} + csi.storage.k8s.io/provisioner-secret-namespace: ${pvc.namespace} + csi.storage.k8s.io/node-publish-secret-name: ${pvc.name} + csi.storage.k8s.io/node-publish-secret-namespace: ${pvc.namespace} + csi.storage.k8s.io/node-stage-secret-name: ${pvc.name} + csi.storage.k8s.io/node-stage-secret-namespace: ${pvc.namespace} +provisioner: driver.longhorn.io +reclaimPolicy: Retain +allowVolumeExpansion: true +volumeBindingMode: Immediate diff --git a/infrastructure/modules/base/storageclass/kustomization.yaml b/infrastructure/modules/base/storageclass/kustomization.yaml index 704dd73..44d79c7 100644 --- a/infrastructure/modules/base/storageclass/kustomization.yaml +++ b/infrastructure/modules/base/storageclass/kustomization.yaml @@ -3,4 +3,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - asteria.yaml + - asteria-encrypted.yaml - astreae.yaml diff --git a/services/finance/actual-budget-data-pvc.yaml b/services/finance/actual-budget-data-pvc.yaml index 7016cda..2da64a8 100644 --- a/services/finance/actual-budget-data-pvc.yaml +++ b/services/finance/actual-budget-data-pvc.yaml @@ -2,11 +2,11 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: actual-budget-data + name: actual-budget-data-encrypted namespace: finance spec: accessModes: ["ReadWriteOnce"] - storageClassName: asteria + storageClassName: asteria-encrypted resources: requests: storage: 10Gi diff --git a/services/finance/actual-budget-deployment.yaml b/services/finance/actual-budget-deployment.yaml index c4aeeaa..55186b2 100644 --- a/services/finance/actual-budget-deployment.yaml +++ b/services/finance/actual-budget-deployment.yaml @@ -169,7 +169,7 @@ spec: volumes: - name: actual-data persistentVolumeClaim: - claimName: actual-budget-data + claimName: actual-budget-data-encrypted - name: actual-openid-bootstrap-script configMap: name: actual-openid-bootstrap-script diff --git a/services/finance/actual-budget-ingress.yaml b/services/finance/actual-budget-ingress.yaml index 4cbc9e6..c6eaee7 100644 --- a/services/finance/actual-budget-ingress.yaml +++ b/services/finance/actual-budget-ingress.yaml @@ -6,7 +6,7 @@ metadata: namespace: finance annotations: kubernetes.io/ingress.class: traefik - traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.entrypoints: web,websecure traefik.ingress.kubernetes.io/router.tls: "true" cert-manager.io/cluster-issuer: letsencrypt spec: diff --git a/services/finance/finance-secrets-ensure-job.yaml b/services/finance/finance-secrets-ensure-job.yaml index 5de20af..67f06cb 100644 --- a/services/finance/finance-secrets-ensure-job.yaml +++ b/services/finance/finance-secrets-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: finance-secrets-ensure-4 + name: finance-secrets-ensure-5 namespace: finance spec: backoffLimit: 1 diff --git a/services/finance/finance-secrets-ensure-rbac.yaml b/services/finance/finance-secrets-ensure-rbac.yaml new file mode 100644 index 0000000..5f70578 --- /dev/null +++ b/services/finance/finance-secrets-ensure-rbac.yaml @@ -0,0 +1,24 @@ +# services/finance/finance-secrets-ensure-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: finance-secrets-ensure + namespace: finance +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: finance-secrets-ensure + namespace: finance +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: finance-secrets-ensure +subjects: + - kind: ServiceAccount + name: finance-secrets-ensure + namespace: finance diff --git a/services/finance/firefly-ingress.yaml b/services/finance/firefly-ingress.yaml index bd01661..40324a9 100644 --- a/services/finance/firefly-ingress.yaml +++ b/services/finance/firefly-ingress.yaml @@ -6,7 +6,7 @@ metadata: namespace: finance annotations: kubernetes.io/ingress.class: traefik - traefik.ingress.kubernetes.io/router.entrypoints: websecure + traefik.ingress.kubernetes.io/router.entrypoints: web,websecure traefik.ingress.kubernetes.io/router.tls: "true" cert-manager.io/cluster-issuer: letsencrypt spec: diff --git a/services/finance/kustomization.yaml b/services/finance/kustomization.yaml index 11cb4ab..e4c414f 100644 --- a/services/finance/kustomization.yaml +++ b/services/finance/kustomization.yaml @@ -6,6 +6,7 @@ resources: - namespace.yaml - serviceaccount.yaml - portal-rbac.yaml + - finance-secrets-ensure-rbac.yaml - actual-budget-data-pvc.yaml - firefly-storage-pvc.yaml - finance-secrets-ensure-job.yaml diff --git a/services/finance/scripts/finance_secrets_ensure.py b/services/finance/scripts/finance_secrets_ensure.py index 9a04ad0..198ffe6 100644 --- a/services/finance/scripts/finance_secrets_ensure.py +++ b/services/finance/scripts/finance_secrets_ensure.py @@ -3,6 +3,7 @@ import base64 import json import os import secrets +import ssl import sys import urllib.error import urllib.request @@ -20,18 +21,81 @@ def require_value(label: str, value: str) -> None: raise RuntimeError(f"missing {label}") -def http_json(method: str, url: str, headers=None, payload=None): +def http_json(method: str, url: str, headers=None, payload=None, context=None): data = None if payload is not None: data = json.dumps(payload).encode() req = urllib.request.Request(url, data=data, headers=headers or {}, method=method) - with urllib.request.urlopen(req, timeout=15) as resp: + with urllib.request.urlopen(req, timeout=15, context=context) as resp: body = resp.read() if not body: return resp.status, None return resp.status, json.loads(body.decode()) +def k8s_context() -> ssl.SSLContext: + ca_path = Path("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt") + if ca_path.exists(): + return ssl.create_default_context(cafile=str(ca_path)) + return ssl.create_default_context() + + +def k8s_api_url(path: str) -> str: + host = os.environ.get("KUBERNETES_SERVICE_HOST") + port = os.environ.get("KUBERNETES_SERVICE_PORT", "443") + if not host: + raise RuntimeError("missing kubernetes service host") + return f"https://{host}:{port}{path}" + + +def k8s_get_secret(namespace: str, name: str, token: str): + try: + _, body = http_json( + "GET", + k8s_api_url(f"/api/v1/namespaces/{namespace}/secrets/{name}"), + headers={"Authorization": f"Bearer {token}"}, + context=k8s_context(), + ) + except urllib.error.HTTPError as exc: + if exc.code == 404: + return None + raise + return body + + +def k8s_create_secret(namespace: str, name: str, token: str, string_data: dict): + payload = { + "apiVersion": "v1", + "kind": "Secret", + "metadata": {"name": name, "namespace": namespace}, + "type": "Opaque", + "stringData": string_data, + } + try: + status, _ = http_json( + "POST", + k8s_api_url(f"/api/v1/namespaces/{namespace}/secrets"), + headers={ + "Authorization": f"Bearer {token}", + "Content-Type": "application/json", + }, + payload=payload, + context=k8s_context(), + ) + except urllib.error.HTTPError as exc: + if exc.code == 409: + return + raise + if status not in (200, 201): + raise RuntimeError(f"k8s secret create failed for {name} (status {status})") + + +def decode_secret_value(value: str) -> str: + if not value: + return "" + return base64.b64decode(value.encode()).decode("utf-8") + + def vault_login(vault_addr: str, role: str, jwt: str) -> str: status, body = http_json( "POST", @@ -152,17 +216,63 @@ def ensure_actual_db(vault_addr: str, token: str): ) +def ensure_actual_encryption(vault_addr: str, token: str, sa_token: str): + namespace = os.environ.get("FINANCE_NAMESPACE", "finance") + secret_name = os.environ.get("ACTUAL_BUDGET_PVC_NAME", "actual-budget-data-encrypted") + if not sa_token: + raise RuntimeError("missing service account token for k8s") + + vault_data = vault_read(vault_addr, token, "finance/actual-encryption") + vault_key = vault_data.get("CRYPTO_KEY_VALUE", "") + + k8s_secret = k8s_get_secret(namespace, secret_name, sa_token) + k8s_key = "" + if k8s_secret: + data = k8s_secret.get("data", {}) or {} + k8s_key = decode_secret_value(data.get("CRYPTO_KEY_VALUE", "")) + + if vault_key and k8s_key and vault_key != k8s_key: + raise RuntimeError("actual encryption key mismatch between vault and k8s") + + key = vault_key or k8s_key + provider = "secret" + if not key: + key = secrets.token_urlsafe(48) + vault_write( + vault_addr, + token, + "finance/actual-encryption", + {"CRYPTO_KEY_VALUE": key, "CRYPTO_KEY_PROVIDER": provider}, + ) + elif not vault_key: + vault_write( + vault_addr, + token, + "finance/actual-encryption", + {"CRYPTO_KEY_VALUE": key, "CRYPTO_KEY_PROVIDER": provider}, + ) + + if not k8s_secret: + k8s_create_secret( + namespace, + secret_name, + sa_token, + {"CRYPTO_KEY_VALUE": key, "CRYPTO_KEY_PROVIDER": provider}, + ) + + def main() -> int: vault_addr = os.environ.get("VAULT_ADDR", "http://vault.vault.svc.cluster.local:8200") vault_role = os.environ.get("VAULT_ROLE", "finance-secrets") - jwt = read_file(Path("/var/run/secrets/kubernetes.io/serviceaccount/token")) - if not jwt: + sa_token = read_file(Path("/var/run/secrets/kubernetes.io/serviceaccount/token")) + if not sa_token: raise RuntimeError("missing service account token") - token = vault_login(vault_addr, vault_role, jwt) + token = vault_login(vault_addr, vault_role, sa_token) ensure_firefly_db(vault_addr, token) ensure_firefly_secrets(vault_addr, token) ensure_actual_db(vault_addr, token) + ensure_actual_encryption(vault_addr, token, sa_token) print("finance secrets ensured") return 0 From 16b7fcd120d98d4bb7321dd285307ccc1eacceeb Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 07:54:27 -0300 Subject: [PATCH 208/270] finance: let firefly init nginx config --- services/finance/firefly-deployment.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/services/finance/firefly-deployment.yaml b/services/finance/firefly-deployment.yaml index c077f75..3d69401 100644 --- a/services/finance/firefly-deployment.yaml +++ b/services/finance/firefly-deployment.yaml @@ -78,8 +78,6 @@ spec: operator: In values: ["rpi4"] securityContext: - runAsUser: 33 - runAsGroup: 33 fsGroup: 33 fsGroupChangePolicy: OnRootMismatch initContainers: From 6cf46cf7891f91e2cde2100f69f74730886f961f Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 08:05:33 -0300 Subject: [PATCH 209/270] core: point internal dns at traefik service --- infrastructure/core/coredns-custom.yaml | 58 ++++++++++++------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/infrastructure/core/coredns-custom.yaml b/infrastructure/core/coredns-custom.yaml index 4002849..0f3d93f 100644 --- a/infrastructure/core/coredns-custom.yaml +++ b/infrastructure/core/coredns-custom.yaml @@ -10,35 +10,35 @@ data: errors cache 30 hosts { - 192.168.22.9 alerts.bstein.dev - 192.168.22.9 auth.bstein.dev - 192.168.22.9 bstein.dev - 192.168.22.9 budget.bstein.dev - 192.168.22.9 call.live.bstein.dev - 192.168.22.9 cd.bstein.dev - 192.168.22.9 chat.ai.bstein.dev - 192.168.22.9 ci.bstein.dev - 192.168.22.9 cloud.bstein.dev - 192.168.22.9 health.bstein.dev - 192.168.22.9 kit.live.bstein.dev - 192.168.22.9 live.bstein.dev - 192.168.22.9 logs.bstein.dev - 192.168.22.9 longhorn.bstein.dev - 192.168.22.9 mail.bstein.dev - 192.168.22.9 matrix.live.bstein.dev - 192.168.22.9 metrics.bstein.dev - 192.168.22.9 monero.bstein.dev - 192.168.22.9 money.bstein.dev - 192.168.22.9 notes.bstein.dev - 192.168.22.9 office.bstein.dev - 192.168.22.9 pegasus.bstein.dev - 192.168.22.9 registry.bstein.dev - 192.168.22.9 scm.bstein.dev - 192.168.22.9 secret.bstein.dev - 192.168.22.9 sso.bstein.dev - 192.168.22.9 stream.bstein.dev - 192.168.22.9 tasks.bstein.dev - 192.168.22.9 vault.bstein.dev + 10.43.6.87 alerts.bstein.dev + 10.43.6.87 auth.bstein.dev + 10.43.6.87 bstein.dev + 10.43.6.87 budget.bstein.dev + 10.43.6.87 call.live.bstein.dev + 10.43.6.87 cd.bstein.dev + 10.43.6.87 chat.ai.bstein.dev + 10.43.6.87 ci.bstein.dev + 10.43.6.87 cloud.bstein.dev + 10.43.6.87 health.bstein.dev + 10.43.6.87 kit.live.bstein.dev + 10.43.6.87 live.bstein.dev + 10.43.6.87 logs.bstein.dev + 10.43.6.87 longhorn.bstein.dev + 10.43.6.87 mail.bstein.dev + 10.43.6.87 matrix.live.bstein.dev + 10.43.6.87 metrics.bstein.dev + 10.43.6.87 monero.bstein.dev + 10.43.6.87 money.bstein.dev + 10.43.6.87 notes.bstein.dev + 10.43.6.87 office.bstein.dev + 10.43.6.87 pegasus.bstein.dev + 10.43.6.87 registry.bstein.dev + 10.43.6.87 scm.bstein.dev + 10.43.6.87 secret.bstein.dev + 10.43.6.87 sso.bstein.dev + 10.43.6.87 stream.bstein.dev + 10.43.6.87 tasks.bstein.dev + 10.43.6.87 vault.bstein.dev fallthrough } } From 309931f7a5d41cec40365fef8ca980ffafba5631 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 08:12:14 -0300 Subject: [PATCH 210/270] finance: run firefly entrypoint after vault env --- services/finance/firefly-deployment.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/services/finance/firefly-deployment.yaml b/services/finance/firefly-deployment.yaml index 3d69401..f43bdc3 100644 --- a/services/finance/firefly-deployment.yaml +++ b/services/finance/firefly-deployment.yaml @@ -99,7 +99,10 @@ spec: - name: firefly image: fireflyiii/core:version-6.4.15 command: ["/bin/sh", "-c"] - args: [". /vault/secrets/firefly-env.sh && exec /init"] + args: + - | + . /vault/secrets/firefly-env.sh + exec /usr/local/bin/docker-php-serversideup-entrypoint /init env: - name: APP_ENV value: production From df3a56656d75682993a4bf8f5b3450b95aff3d38 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 08:16:57 -0300 Subject: [PATCH 211/270] core: route budget and money to traefik --- infrastructure/core/coredns-custom.yaml | 54 ++++++++++++------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/infrastructure/core/coredns-custom.yaml b/infrastructure/core/coredns-custom.yaml index 0f3d93f..ea28def 100644 --- a/infrastructure/core/coredns-custom.yaml +++ b/infrastructure/core/coredns-custom.yaml @@ -10,35 +10,35 @@ data: errors cache 30 hosts { - 10.43.6.87 alerts.bstein.dev - 10.43.6.87 auth.bstein.dev - 10.43.6.87 bstein.dev + 192.168.22.9 alerts.bstein.dev + 192.168.22.9 auth.bstein.dev + 192.168.22.9 bstein.dev 10.43.6.87 budget.bstein.dev - 10.43.6.87 call.live.bstein.dev - 10.43.6.87 cd.bstein.dev - 10.43.6.87 chat.ai.bstein.dev - 10.43.6.87 ci.bstein.dev - 10.43.6.87 cloud.bstein.dev - 10.43.6.87 health.bstein.dev - 10.43.6.87 kit.live.bstein.dev - 10.43.6.87 live.bstein.dev - 10.43.6.87 logs.bstein.dev - 10.43.6.87 longhorn.bstein.dev - 10.43.6.87 mail.bstein.dev - 10.43.6.87 matrix.live.bstein.dev - 10.43.6.87 metrics.bstein.dev - 10.43.6.87 monero.bstein.dev + 192.168.22.9 call.live.bstein.dev + 192.168.22.9 cd.bstein.dev + 192.168.22.9 chat.ai.bstein.dev + 192.168.22.9 ci.bstein.dev + 192.168.22.9 cloud.bstein.dev + 192.168.22.9 health.bstein.dev + 192.168.22.9 kit.live.bstein.dev + 192.168.22.9 live.bstein.dev + 192.168.22.9 logs.bstein.dev + 192.168.22.9 longhorn.bstein.dev + 192.168.22.9 mail.bstein.dev + 192.168.22.9 matrix.live.bstein.dev + 192.168.22.9 metrics.bstein.dev + 192.168.22.9 monero.bstein.dev 10.43.6.87 money.bstein.dev - 10.43.6.87 notes.bstein.dev - 10.43.6.87 office.bstein.dev - 10.43.6.87 pegasus.bstein.dev - 10.43.6.87 registry.bstein.dev - 10.43.6.87 scm.bstein.dev - 10.43.6.87 secret.bstein.dev - 10.43.6.87 sso.bstein.dev - 10.43.6.87 stream.bstein.dev - 10.43.6.87 tasks.bstein.dev - 10.43.6.87 vault.bstein.dev + 192.168.22.9 notes.bstein.dev + 192.168.22.9 office.bstein.dev + 192.168.22.9 pegasus.bstein.dev + 192.168.22.9 registry.bstein.dev + 192.168.22.9 scm.bstein.dev + 192.168.22.9 secret.bstein.dev + 192.168.22.9 sso.bstein.dev + 192.168.22.9 stream.bstein.dev + 192.168.22.9 tasks.bstein.dev + 192.168.22.9 vault.bstein.dev fallthrough } } From f15b80872e1987c70bc20180e7f76ac70aec2d81 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 16:31:53 -0300 Subject: [PATCH 212/270] comms: add default server name to element config --- services/comms/helmrelease.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index e259c52..90a20ae 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -354,6 +354,7 @@ spec: name: live.bstein.dev config: + default_server_name: live.bstein.dev default_theme: dark brand: Othrys disable_custom_urls: true From 931e41a76fcd935770402340a278919b0e8f9c1f Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 16:51:40 -0300 Subject: [PATCH 213/270] comms: harden guest register provisioning --- .../comms/scripts/guest-register/server.py | 115 ++++++++++++------ 1 file changed, 76 insertions(+), 39 deletions(-) diff --git a/services/comms/scripts/guest-register/server.py b/services/comms/scripts/guest-register/server.py index 0e1fb4c..9daa971 100644 --- a/services/comms/scripts/guest-register/server.py +++ b/services/comms/scripts/guest-register/server.py @@ -3,6 +3,7 @@ import json import os import random import secrets +import time from http.server import BaseHTTPRequestHandler, HTTPServer from urllib import error, parse, request @@ -29,6 +30,20 @@ NOUN = [ "pine","quartz","reef","ridge","sable","sage","shore","thunder","vale","zephyr", ] +def _open_with_retry(req, timeout, attempts=6): + last = None + for attempt in range(1, attempts + 1): + try: + return request.urlopen(req, timeout=timeout) + except error.HTTPError as e: + return e + except (error.URLError, TimeoutError, OSError) as e: + last = e + time.sleep(attempt * 2) + if last: + raise last + raise RuntimeError("request_failed") + def _json(method, url, *, headers=None, body=None, timeout=20): hdrs = {"Content-Type": "application/json"} if headers: @@ -37,18 +52,17 @@ def _json(method, url, *, headers=None, body=None, timeout=20): if body is not None: data = json.dumps(body).encode() req = request.Request(url, data=data, headers=hdrs, method=method) - try: - with request.urlopen(req, timeout=timeout) as resp: - raw = resp.read() - payload = json.loads(raw.decode()) if raw else {} - return resp.status, payload - except error.HTTPError as e: - raw = e.read() + resp = _open_with_retry(req, timeout) + if isinstance(resp, error.HTTPError): + raw = resp.read() try: payload = json.loads(raw.decode()) if raw else {} except Exception: payload = {} - return e.code, payload + return resp.code, payload + raw = resp.read() + payload = json.loads(raw.decode()) if raw else {} + return resp.status, payload def _form(method, url, *, headers=None, fields=None, timeout=20): hdrs = {"Content-Type": "application/x-www-form-urlencoded"} @@ -56,18 +70,17 @@ def _form(method, url, *, headers=None, fields=None, timeout=20): hdrs.update(headers) data = parse.urlencode(fields or {}).encode() req = request.Request(url, data=data, headers=hdrs, method=method) - try: - with request.urlopen(req, timeout=timeout) as resp: - raw = resp.read() - payload = json.loads(raw.decode()) if raw else {} - return resp.status, payload - except error.HTTPError as e: - raw = e.read() + resp = _open_with_retry(req, timeout) + if isinstance(resp, error.HTTPError): + raw = resp.read() try: payload = json.loads(raw.decode()) if raw else {} except Exception: payload = {} - return e.code, payload + return resp.code, payload + raw = resp.read() + payload = json.loads(raw.decode()) if raw else {} + return resp.status, payload _admin_token = None _admin_token_at = 0.0 @@ -110,12 +123,28 @@ def _admin_api(admin_token, method, path, body=None): timeout=20, ) -def _create_user(admin_token, username): - status, payload = _admin_api(admin_token, "POST", "/users", {"username": username}) - if status != 201: - return status, None - user = payload.get("data") or {} - return status, user.get("id") +def _create_user(admin_token, username, password): + payloads = [ + { + "data": { + "type": "user", + "attributes": { + "username": username, + "password": password, + }, + } + }, + {"username": username, "password": password}, + {"username": username}, + ] + for payload in payloads: + status, body = _admin_api(admin_token, "POST", "/users", payload) + if status in (200, 201): + user = body.get("data") or {} + return status, user.get("id") or user.get("id") + if status == 409: + return status, None + return status, None def _set_password(admin_token, user_id, password): status, _payload = _admin_api( @@ -127,20 +156,28 @@ def _set_password(admin_token, user_id, password): return status in (200, 204) def _login_password(username, password): - payload = { - "type": "m.login.password", - "identifier": {"type": "m.id.user", "user": f"@{username}:{SERVER_NAME}"}, - "password": password, - } - status, data = _json( - "POST", - f"{MAS_BASE}/_matrix/client/v3/login", - body=payload, - timeout=20, - ) - if status != 200: - return None, None - return data.get("access_token"), data.get("device_id") + payloads = [ + { + "type": "m.login.password", + "identifier": {"type": "m.id.user", "user": f"@{username}:{SERVER_NAME}"}, + "password": password, + }, + { + "type": "m.login.password", + "identifier": {"type": "m.id.user", "user": username}, + "password": password, + }, + ] + for payload in payloads: + status, data = _json( + "POST", + f"{MAS_BASE}/_matrix/client/v3/login", + body=payload, + timeout=20, + ) + if status == 200: + return data.get("access_token"), data.get("device_id") + return None, None def _set_display_name(access_token, user_id, displayname): _json( @@ -224,18 +261,18 @@ class Handler(BaseHTTPRequestHandler): admin_token = _mas_admin_access_token(now) displayname = _generate_displayname() + password = secrets.token_urlsafe(18) localpart = None mas_user_id = None for _ in range(5): localpart = _generate_localpart() - status, mas_user_id = _create_user(admin_token, localpart) - if status == 201 and mas_user_id: + status, mas_user_id = _create_user(admin_token, localpart, password) + if status in (200, 201) and mas_user_id: break mas_user_id = None if not mas_user_id or not localpart: raise RuntimeError("add_user_failed") - password = secrets.token_urlsafe(18) if not _set_password(admin_token, mas_user_id, password): raise RuntimeError("set_password_failed") access_token, device_id = _login_password(localpart, password) From 69d67b39a5fbcc0b4f9544a7ac879cdba6ce7241 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 16:59:57 -0300 Subject: [PATCH 214/270] comms: make guest register server threaded --- services/comms/scripts/guest-register/server.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/services/comms/scripts/guest-register/server.py b/services/comms/scripts/guest-register/server.py index 9daa971..b1f6490 100644 --- a/services/comms/scripts/guest-register/server.py +++ b/services/comms/scripts/guest-register/server.py @@ -141,7 +141,7 @@ def _create_user(admin_token, username, password): status, body = _admin_api(admin_token, "POST", "/users", payload) if status in (200, 201): user = body.get("data") or {} - return status, user.get("id") or user.get("id") + return status, user.get("id") if status == 409: return status, None return status, None @@ -295,7 +295,11 @@ class Handler(BaseHTTPRequestHandler): def main(): port = int(os.environ.get("PORT", "8080")) - HTTPServer(("0.0.0.0", port), Handler).serve_forever() + try: + from http.server import ThreadingHTTPServer as _Server + except Exception: + _Server = HTTPServer + _Server(("0.0.0.0", port), Handler).serve_forever() if __name__ == "__main__": main() From 1293ffe0a55583cbd9cd7efb9cc6af7956bc1eb6 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 17:21:46 -0300 Subject: [PATCH 215/270] comms: pin mas/synapse host aliases for DNS --- services/comms/guest-register-deployment.yaml | 9 +++++++++ services/comms/helmrelease.yaml | 9 +++++++++ services/comms/mas-deployment.yaml | 5 +++++ 3 files changed, 23 insertions(+) diff --git a/services/comms/guest-register-deployment.yaml b/services/comms/guest-register-deployment.yaml index 2888033..04a0018 100644 --- a/services/comms/guest-register-deployment.yaml +++ b/services/comms/guest-register-deployment.yaml @@ -23,6 +23,15 @@ spec: app.kubernetes.io/name: matrix-guest-register spec: serviceAccountName: comms-vault + hostAliases: + - ip: "10.43.36.27" + hostnames: + - "matrix-authentication-service" + - "matrix-authentication-service.comms.svc.cluster.local" + - ip: "10.43.216.45" + hostnames: + - "othrys-synapse-matrix-synapse" + - "othrys-synapse-matrix-synapse.comms.svc.cluster.local" securityContext: runAsNonRoot: true runAsUser: 10001 diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 90a20ae..b907d38 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -204,6 +204,15 @@ spec: spec: serviceAccountName: comms-vault automountServiceAccountToken: true + hostAliases: + - ip: "10.43.150.98" + hostnames: + - "othrys-synapse-redis-master" + - "othrys-synapse-redis-master.comms.svc.cluster.local" + - ip: "10.43.36.27" + hostnames: + - "matrix-authentication-service" + - "matrix-authentication-service.comms.svc.cluster.local" containers: - name: synapse command: diff --git a/services/comms/mas-deployment.yaml b/services/comms/mas-deployment.yaml index 0ab2071..7c4e8e4 100644 --- a/services/comms/mas-deployment.yaml +++ b/services/comms/mas-deployment.yaml @@ -61,6 +61,11 @@ spec: spec: enableServiceLinks: false serviceAccountName: comms-vault + hostAliases: + - ip: "10.43.216.45" + hostnames: + - "othrys-synapse-matrix-synapse" + - "othrys-synapse-matrix-synapse.comms.svc.cluster.local" nodeSelector: hardware: rpi5 affinity: From f9830c6678027aff925f8f9e32dbc1c6e2c293c1 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 17:30:07 -0300 Subject: [PATCH 216/270] comms: prune stale guests after 14 days --- services/comms/guest-name-job.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/services/comms/guest-name-job.yaml b/services/comms/guest-name-job.yaml index 142dc73..bae2d49 100644 --- a/services/comms/guest-name-job.yaml +++ b/services/comms/guest-name-job.yaml @@ -123,7 +123,7 @@ spec: SEEDER_USER = os.environ["SEEDER_USER"] ROOM_ALIAS = "#othrys:live.bstein.dev" SERVER_NAME = "live.bstein.dev" - STALE_GUEST_MS = 7 * 24 * 60 * 60 * 1000 + STALE_GUEST_MS = 14 * 24 * 60 * 60 * 1000 def mas_admin_token(): with open(MAS_ADMIN_CLIENT_SECRET_FILE, "r", encoding="utf-8") as f: @@ -466,4 +466,3 @@ spec: finally: mas_revoke_session(admin_token, seeder_session) PY - From 6997d5e202c49ce6faa240f7d68f34c724a695f1 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 17:51:21 -0300 Subject: [PATCH 217/270] comms: use guest-tools image for guest rename --- dockerfiles/Dockerfile.comms-guest-tools | 5 +++++ services/comms/guest-name-job.yaml | 3 +-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 dockerfiles/Dockerfile.comms-guest-tools diff --git a/dockerfiles/Dockerfile.comms-guest-tools b/dockerfiles/Dockerfile.comms-guest-tools new file mode 100644 index 0000000..2a18016 --- /dev/null +++ b/dockerfiles/Dockerfile.comms-guest-tools @@ -0,0 +1,5 @@ +FROM python:3.11-slim + +ENV PIP_DISABLE_PIP_VERSION_CHECK=1 + +RUN pip install --no-cache-dir requests psycopg2-binary diff --git a/services/comms/guest-name-job.yaml b/services/comms/guest-name-job.yaml index bae2d49..ef7b2e0 100644 --- a/services/comms/guest-name-job.yaml +++ b/services/comms/guest-name-job.yaml @@ -62,7 +62,7 @@ spec: defaultMode: 0555 containers: - name: rename - image: python:3.11-slim + image: registry.bstein.dev/bstein/comms-guest-tools:0.1.0 volumeMounts: - name: vault-scripts mountPath: /vault/scripts @@ -94,7 +94,6 @@ spec: - | set -euo pipefail . /vault/scripts/comms_vault_env.sh - pip install --no-cache-dir requests psycopg2-binary >/dev/null python - <<'PY' import base64 import os From 0e3c8ef9524d662fd15ff7c23a52038cc561c5db Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 17:57:57 -0300 Subject: [PATCH 218/270] comms: add harbor pull secret to vault serviceaccount --- services/comms/serviceaccount.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/services/comms/serviceaccount.yaml b/services/comms/serviceaccount.yaml index 1b975b8..ee5eabb 100644 --- a/services/comms/serviceaccount.yaml +++ b/services/comms/serviceaccount.yaml @@ -4,3 +4,5 @@ kind: ServiceAccount metadata: name: comms-vault namespace: comms +imagePullSecrets: + - name: harbor-regcred From 5f1b61d25e3d6f017e919d5931ed8ae28ee39bea Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 18:04:53 -0300 Subject: [PATCH 219/270] comms: pin guest rename job to rpi5 nodes --- services/comms/guest-name-job.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/services/comms/guest-name-job.yaml b/services/comms/guest-name-job.yaml index ef7b2e0..0797168 100644 --- a/services/comms/guest-name-job.yaml +++ b/services/comms/guest-name-job.yaml @@ -55,6 +55,8 @@ spec: spec: restartPolicy: Never serviceAccountName: comms-vault + nodeSelector: + hardware: rpi5 volumes: - name: vault-scripts configMap: From be10e01c2f80d321924a56e7ab9bb35933156f4b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 18:16:45 -0300 Subject: [PATCH 220/270] comms: serve host-specific Element config alias --- services/comms/helmrelease.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index b907d38..406ab51 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -382,6 +382,14 @@ spec: url: https://call.live.bstein.dev participant_limit: 16 brand: Othrys Call + nginxConfig: |- + add_header X-Frame-Options SAMEORIGIN; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header Content-Security-Policy "frame-ancestors 'self'"; + location = /config.live.bstein.dev.json { + try_files /config.json =404; + } ingress: enabled: true From ebb300b939b1a0460e0b2c90e3ecff9da1ff89f6 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 18:22:36 -0300 Subject: [PATCH 221/270] comms: mount host-specific Element config file --- services/comms/helmrelease.yaml | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 406ab51..10554b6 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -382,14 +382,18 @@ spec: url: https://call.live.bstein.dev participant_limit: 16 brand: Othrys Call - nginxConfig: |- - add_header X-Frame-Options SAMEORIGIN; - add_header X-Content-Type-Options nosniff; - add_header X-XSS-Protection "1; mode=block"; - add_header Content-Security-Policy "frame-ancestors 'self'"; - location = /config.live.bstein.dev.json { - try_files /config.json =404; - } + extraVolumes: + - name: element-config-host + configMap: + name: othrys-element-element-web + items: + - key: config.json + path: config.live.bstein.dev.json + extraVolumeMounts: + - name: element-config-host + mountPath: /tmp/element-web-config/config.live.bstein.dev.json + subPath: config.live.bstein.dev.json + readOnly: true ingress: enabled: true From 578ef5e8303022e0debe1693c06c473fc994b169 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 18:29:42 -0300 Subject: [PATCH 222/270] comms: add Element host-config entrypoint script --- services/comms/helmrelease.yaml | 14 ++++++-------- services/comms/kustomization.yaml | 5 +++++ services/comms/scripts/element-host-config.sh | 9 +++++++++ 3 files changed, 20 insertions(+), 8 deletions(-) create mode 100644 services/comms/scripts/element-host-config.sh diff --git a/services/comms/helmrelease.yaml b/services/comms/helmrelease.yaml index 10554b6..4456348 100644 --- a/services/comms/helmrelease.yaml +++ b/services/comms/helmrelease.yaml @@ -383,16 +383,14 @@ spec: participant_limit: 16 brand: Othrys Call extraVolumes: - - name: element-config-host + - name: element-host-config configMap: - name: othrys-element-element-web - items: - - key: config.json - path: config.live.bstein.dev.json + name: othrys-element-host-config + defaultMode: 0555 extraVolumeMounts: - - name: element-config-host - mountPath: /tmp/element-web-config/config.live.bstein.dev.json - subPath: config.live.bstein.dev.json + - name: element-host-config + mountPath: /docker-entrypoint.d/20-host-config.sh + subPath: 20-host-config.sh readOnly: true ingress: diff --git a/services/comms/kustomization.yaml b/services/comms/kustomization.yaml index 9171b6b..3360067 100644 --- a/services/comms/kustomization.yaml +++ b/services/comms/kustomization.yaml @@ -63,6 +63,11 @@ configMapGenerator: - bot.py=scripts/atlasbot/bot.py options: disableNameSuffixHash: true + - name: othrys-element-host-config + files: + - 20-host-config.sh=scripts/element-host-config.sh + options: + disableNameSuffixHash: true - name: atlas-kb files: - INDEX.md=knowledge/INDEX.md diff --git a/services/comms/scripts/element-host-config.sh b/services/comms/scripts/element-host-config.sh new file mode 100644 index 0000000..6f381a9 --- /dev/null +++ b/services/comms/scripts/element-host-config.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +set -euo pipefail + +HOST_CONFIG="/tmp/element-web-config/config.live.bstein.dev.json" +BASE_CONFIG="/tmp/element-web-config/config.json" + +if [[ -f "$BASE_CONFIG" ]]; then + cp -f "$BASE_CONFIG" "$HOST_CONFIG" +fi From 96b93a16875eb8e857c4b29b07451a8d837966e7 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 18:38:36 -0300 Subject: [PATCH 223/270] comms: use sh for Element host-config script --- services/comms/scripts/element-host-config.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/services/comms/scripts/element-host-config.sh b/services/comms/scripts/element-host-config.sh index 6f381a9..1c6a488 100644 --- a/services/comms/scripts/element-host-config.sh +++ b/services/comms/scripts/element-host-config.sh @@ -1,9 +1,9 @@ -#!/usr/bin/env bash -set -euo pipefail +#!/bin/sh +set -eu HOST_CONFIG="/tmp/element-web-config/config.live.bstein.dev.json" BASE_CONFIG="/tmp/element-web-config/config.json" -if [[ -f "$BASE_CONFIG" ]]; then +if [ -f "$BASE_CONFIG" ]; then cp -f "$BASE_CONFIG" "$HOST_CONFIG" fi From e729adc6efdbc0995801d6da0f49ede4d2054acb Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 19:12:00 -0300 Subject: [PATCH 224/270] comms: drop livekit token host alias --- services/comms/livekit-token-deployment.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/services/comms/livekit-token-deployment.yaml b/services/comms/livekit-token-deployment.yaml index 31213fd..9911ecb 100644 --- a/services/comms/livekit-token-deployment.yaml +++ b/services/comms/livekit-token-deployment.yaml @@ -37,10 +37,6 @@ spec: - key: hardware operator: In values: ["rpi5","rpi4"] - hostAliases: - - ip: 10.43.60.6 - hostnames: - - live.bstein.dev containers: - name: token-service image: registry.bstein.dev/tools/lk-jwt-service-vault:0.3.0 From f91459e55a3cea7c4c703053d5f53c4dbe379acd Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 19:32:04 -0300 Subject: [PATCH 225/270] comms: restart livekit to reload vault keys --- services/comms/livekit.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/comms/livekit.yaml b/services/comms/livekit.yaml index e7f7769..4a5eb8d 100644 --- a/services/comms/livekit.yaml +++ b/services/comms/livekit.yaml @@ -13,7 +13,7 @@ spec: template: metadata: annotations: - checksum/config: livekit-config-v5 + checksum/config: livekit-config-v6 vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/agent-init-first: "true" vault.hashicorp.com/role: "comms" From b576da53c2fac007413e29d5e470a580cfc2cb5b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 19:49:19 -0300 Subject: [PATCH 226/270] comms: pin livekit token hostnames --- services/comms/livekit-token-deployment.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/services/comms/livekit-token-deployment.yaml b/services/comms/livekit-token-deployment.yaml index 9911ecb..a23cf62 100644 --- a/services/comms/livekit-token-deployment.yaml +++ b/services/comms/livekit-token-deployment.yaml @@ -26,6 +26,12 @@ spec: serviceAccountName: comms-vault imagePullSecrets: - name: harbor-regcred + hostAliases: + - ip: "10.43.6.87" + hostnames: + - live.bstein.dev + - matrix.live.bstein.dev + - kit.live.bstein.dev nodeSelector: hardware: rpi5 affinity: From 758610dff08f9c0a7b3e6b488fd0c97323889b2c Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 20:15:51 -0300 Subject: [PATCH 227/270] core: pin coredns to rpi workers --- infrastructure/core/coredns-deployment.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/infrastructure/core/coredns-deployment.yaml b/infrastructure/core/coredns-deployment.yaml index d3eb368..1e69eec 100644 --- a/infrastructure/core/coredns-deployment.yaml +++ b/infrastructure/core/coredns-deployment.yaml @@ -81,6 +81,20 @@ spec: - name: custom-config-volume mountPath: /etc/coredns/custom readOnly: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: hardware + operator: In + values: + - rpi5 + - rpi4 + - key: node-role.kubernetes.io/worker + operator: In + values: + - "true" dnsPolicy: Default nodeSelector: kubernetes.io/os: linux From e64ba4ca3ca0f9fe24aec2dffbcb8d7b03a5f098 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 20:23:32 -0300 Subject: [PATCH 228/270] comms: re-run mas db ensure --- services/comms/mas-db-ensure-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/comms/mas-db-ensure-job.yaml b/services/comms/mas-db-ensure-job.yaml index 3ddb7e9..56707a9 100644 --- a/services/comms/mas-db-ensure-job.yaml +++ b/services/comms/mas-db-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-db-ensure-21 + name: mas-db-ensure-22 namespace: comms spec: backoffLimit: 1 From 0ddbb5ec79b6f6658b7ae2b4789110e38dc385ca Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 20:27:11 -0300 Subject: [PATCH 229/270] comms: restart mas after db ensure --- services/comms/mas-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/comms/mas-deployment.yaml b/services/comms/mas-deployment.yaml index 7c4e8e4..e5387e2 100644 --- a/services/comms/mas-deployment.yaml +++ b/services/comms/mas-deployment.yaml @@ -13,7 +13,7 @@ spec: template: metadata: annotations: - checksum/config: v5-adminapi-8 + checksum/config: v5-adminapi-9 vault.hashicorp.com/agent-inject: "true" vault.hashicorp.com/agent-init-first: "true" vault.hashicorp.com/role: "comms" From 792b7b1417625a752910b68418b4be6d0c851d64 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 20:30:13 -0300 Subject: [PATCH 230/270] comms: rerun mas local users and secrets jobs --- services/comms/comms-secrets-ensure-job.yaml | 2 +- services/comms/mas-local-users-ensure-job.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/comms/comms-secrets-ensure-job.yaml b/services/comms/comms-secrets-ensure-job.yaml index ab13671..b71dd40 100644 --- a/services/comms/comms-secrets-ensure-job.yaml +++ b/services/comms/comms-secrets-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: comms-secrets-ensure-5 + name: comms-secrets-ensure-6 namespace: comms spec: backoffLimit: 1 diff --git a/services/comms/mas-local-users-ensure-job.yaml b/services/comms/mas-local-users-ensure-job.yaml index c742d48..5802009 100644 --- a/services/comms/mas-local-users-ensure-job.yaml +++ b/services/comms/mas-local-users-ensure-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mas-local-users-ensure-14 + name: mas-local-users-ensure-15 namespace: comms spec: backoffLimit: 1 From a9b94c87bea63a693bb6ba86fdc1b1757c4930c1 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sat, 17 Jan 2026 20:49:11 -0300 Subject: [PATCH 231/270] comms: route live host login to mas --- services/comms/matrix-ingress.yaml | 35 ++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/services/comms/matrix-ingress.yaml b/services/comms/matrix-ingress.yaml index caaa593..cf3d198 100644 --- a/services/comms/matrix-ingress.yaml +++ b/services/comms/matrix-ingress.yaml @@ -81,6 +81,41 @@ spec: - host: live.bstein.dev http: paths: + - path: /_matrix/client/v3/register + pathType: Prefix + backend: + service: + name: matrix-guest-register + port: + number: 8080 + - path: /_matrix/client/r0/register + pathType: Prefix + backend: + service: + name: matrix-guest-register + port: + number: 8080 + - path: /_matrix/client/v3/login + pathType: Prefix + backend: + service: + name: matrix-authentication-service + port: + number: 8080 + - path: /_matrix/client/v3/logout + pathType: Exact + backend: + service: + name: matrix-authentication-service + port: + number: 8080 + - path: /_matrix/client/v3/refresh + pathType: Exact + backend: + service: + name: matrix-authentication-service + port: + number: 8080 - path: /_matrix pathType: Prefix backend: From 74f089dc21c5667ca5a4829712e273bc387c0ccd Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 00:33:09 -0300 Subject: [PATCH 232/270] bstein-dev-home: bump images to 0.1.1-101 --- services/bstein-dev-home/kustomization.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/services/bstein-dev-home/kustomization.yaml b/services/bstein-dev-home/kustomization.yaml index 7efaa91..380141d 100644 --- a/services/bstein-dev-home/kustomization.yaml +++ b/services/bstein-dev-home/kustomization.yaml @@ -20,9 +20,9 @@ resources: - ingress.yaml images: - name: registry.bstein.dev/bstein/bstein-dev-home-frontend - newTag: 0.1.1-95 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} + newTag: 0.1.1-101 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} - name: registry.bstein.dev/bstein/bstein-dev-home-backend - newTag: 0.1.1-95 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} + newTag: 0.1.1-101 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} configMapGenerator: - name: chat-ai-gateway namespace: bstein-dev-home From f753f114c75c36ebcbd98a00f012d4578bc1517c Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 00:44:11 -0300 Subject: [PATCH 233/270] bstein-dev-home: bump images to 0.1.1-102 --- services/bstein-dev-home/kustomization.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/services/bstein-dev-home/kustomization.yaml b/services/bstein-dev-home/kustomization.yaml index 380141d..bdcd593 100644 --- a/services/bstein-dev-home/kustomization.yaml +++ b/services/bstein-dev-home/kustomization.yaml @@ -20,9 +20,9 @@ resources: - ingress.yaml images: - name: registry.bstein.dev/bstein/bstein-dev-home-frontend - newTag: 0.1.1-101 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} + newTag: 0.1.1-102 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} - name: registry.bstein.dev/bstein/bstein-dev-home-backend - newTag: 0.1.1-101 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} + newTag: 0.1.1-102 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} configMapGenerator: - name: chat-ai-gateway namespace: bstein-dev-home From 418d201da066179a3d20f1dfb078bf9410e0c1be Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 00:47:38 -0300 Subject: [PATCH 234/270] mailu: gate sync to approved users --- scripts/tests/test_mailu_sync.py | 20 ++++++++++++++----- .../finance/scripts/firefly_user_sync.php | 7 +++++++ services/keycloak/realm-settings-job.yaml | 8 ++++++++ services/mailu/scripts/mailu_sync.py | 12 +++++++++++ 4 files changed, 42 insertions(+), 5 deletions(-) diff --git a/scripts/tests/test_mailu_sync.py b/scripts/tests/test_mailu_sync.py index 49bd2e4..c12bc8d 100644 --- a/scripts/tests/test_mailu_sync.py +++ b/scripts/tests/test_mailu_sync.py @@ -144,8 +144,18 @@ def test_main_generates_password_and_upserts(monkeypatch): sync = load_sync_module(monkeypatch) monkeypatch.setattr(sync.bcrypt_sha256, "hash", lambda password: f"hash:{password}") users = [ - {"id": "u1", "username": "user1", "email": "user1@example.com", "attributes": {}}, - {"id": "u2", "username": "user2", "email": "user2@example.com", "attributes": {"mailu_app_password": ["keepme"]}}, + { + "id": "u1", + "username": "user1", + "email": "user1@example.com", + "attributes": {"mailu_enabled": ["true"]}, + }, + { + "id": "u2", + "username": "user2", + "email": "user2@example.com", + "attributes": {"mailu_app_password": ["keepme"], "mailu_enabled": ["true"]}, + }, {"id": "u3", "username": "user3", "email": "user3@other.com", "attributes": {}}, ] updated = [] @@ -185,6 +195,6 @@ def test_main_generates_password_and_upserts(monkeypatch): sync.main() - # Always backfill mailu_email, even if Keycloak recovery email is external. - assert len(updated) == 3 - assert conns and len(conns[0]._cursor.executions) == 3 + # Only mail-enabled users are synced and backfilled. + assert len(updated) == 2 + assert conns and len(conns[0]._cursor.executions) == 2 diff --git a/services/finance/scripts/firefly_user_sync.php b/services/finance/scripts/firefly_user_sync.php index dcb78ea..4036c3d 100644 --- a/services/finance/scripts/firefly_user_sync.php +++ b/services/finance/scripts/firefly_user_sync.php @@ -6,6 +6,7 @@ declare(strict_types=1); use FireflyIII\Console\Commands\Correction\CreatesGroupMemberships; use FireflyIII\Models\Role; use FireflyIII\Repositories\User\UserRepositoryInterface; +use FireflyIII\Support\Facades\FireflyConfig; use FireflyIII\User; use Illuminate\Contracts\Console\Kernel as ConsoleKernel; @@ -70,6 +71,12 @@ $app = require $app_bootstrap; $kernel = $app->make(ConsoleKernel::class); $kernel->bootstrap(); +try { + FireflyConfig::set('single_user_mode', true); +} catch (Throwable $exc) { + error_line('failed to enforce single_user_mode: '.$exc->getMessage()); +} + $repository = $app->make(UserRepositoryInterface::class); $existing_user = User::where('email', $email)->first(); diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 483bc0c..4259876 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -220,6 +220,14 @@ spec: "permissions": {"view": ["admin"], "edit": ["admin"]}, "validations": {"length": {"max": 255}}, }, + { + "name": "mailu_enabled", + "displayName": "Atlas Mailbox Enabled", + "multivalued": False, + "annotations": {"group": "user-metadata"}, + "permissions": {"view": ["admin"], "edit": ["admin"]}, + "validations": {"length": {"max": 16}}, + }, { "name": "nextcloud_mail_primary_email", "displayName": "Nextcloud Mail Primary Email", diff --git a/services/mailu/scripts/mailu_sync.py b/services/mailu/scripts/mailu_sync.py index d1754cb..8f2071a 100644 --- a/services/mailu/scripts/mailu_sync.py +++ b/services/mailu/scripts/mailu_sync.py @@ -25,6 +25,7 @@ KC_CLIENT_SECRET = os.environ["KEYCLOAK_CLIENT_SECRET"] MAILU_DOMAIN = os.environ["MAILU_DOMAIN"] MAILU_DEFAULT_QUOTA = int(os.environ.get("MAILU_DEFAULT_QUOTA", "20000000000")) +MAILU_ENABLED_ATTR = os.environ.get("MAILU_ENABLED_ATTR", "mailu_enabled") DB_CONFIG = { "host": os.environ["MAILU_DB_HOST"], @@ -141,6 +142,13 @@ def get_attribute_value(attributes, key): return None +def mailu_enabled(attributes) -> bool: + raw = get_attribute_value(attributes, MAILU_ENABLED_ATTR) + if raw is None: + return False + return str(raw).strip().lower() in {"1", "true", "yes", "y", "on"} + + def resolve_mailu_email(user, attributes): explicit = get_attribute_value(attributes, "mailu_email") if explicit: @@ -209,6 +217,10 @@ def main(): for user in users: attrs = user.get("attributes", {}) or {} + if user.get("enabled") is False: + continue + if not mailu_enabled(attrs): + continue app_pw = get_attribute_value(attrs, "mailu_app_password") mailu_email = resolve_mailu_email(user, attrs) From 9c2cb1b037ce82b6e942baeea98f6d3c3bb5598a Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 01:08:31 -0300 Subject: [PATCH 235/270] mailu: preserve keycloak profile fields --- scripts/tests/test_mailu_sync.py | 20 +++++++++++-------- services/mailu/scripts/mailu_sync.py | 30 ++++++++++++++++++---------- 2 files changed, 31 insertions(+), 19 deletions(-) diff --git a/scripts/tests/test_mailu_sync.py b/scripts/tests/test_mailu_sync.py index c12bc8d..f3edbd1 100644 --- a/scripts/tests/test_mailu_sync.py +++ b/scripts/tests/test_mailu_sync.py @@ -55,11 +55,11 @@ class _FakeResponse: class _FakeSession: - def __init__(self, put_resp, get_resp): + def __init__(self, put_resp, get_resps): self.put_resp = put_resp - self.get_resp = get_resp + self.get_resps = list(get_resps) self.put_called = False - self.get_called = False + self.get_calls = 0 def post(self, *args, **kwargs): return _FakeResponse({"access_token": "dummy"}) @@ -69,22 +69,26 @@ class _FakeSession: return self.put_resp def get(self, *args, **kwargs): - self.get_called = True - return self.get_resp + self.get_calls += 1 + if self.get_resps: + return self.get_resps.pop(0) + return _FakeResponse({}) def test_kc_update_attributes_succeeds(monkeypatch): sync = load_sync_module(monkeypatch) + current_resp = _FakeResponse({"attributes": {}}) ok_resp = _FakeResponse({"attributes": {"mailu_app_password": ["abc"]}}) - sync.SESSION = _FakeSession(_FakeResponse({}), ok_resp) + sync.SESSION = _FakeSession(_FakeResponse({}), [current_resp, ok_resp]) sync.kc_update_attributes("token", {"id": "u1", "username": "u1"}, {"mailu_app_password": "abc"}) - assert sync.SESSION.put_called and sync.SESSION.get_called + assert sync.SESSION.put_called and sync.SESSION.get_calls == 2 def test_kc_update_attributes_raises_without_attribute(monkeypatch): sync = load_sync_module(monkeypatch) + current_resp = _FakeResponse({"attributes": {}}) missing_attr_resp = _FakeResponse({"attributes": {}}, status=200) - sync.SESSION = _FakeSession(_FakeResponse({}), missing_attr_resp) + sync.SESSION = _FakeSession(_FakeResponse({}), [current_resp, missing_attr_resp]) with pytest.raises(Exception): sync.kc_update_attributes("token", {"id": "u1", "username": "u1"}, {"mailu_app_password": "abc"}) diff --git a/services/mailu/scripts/mailu_sync.py b/services/mailu/scripts/mailu_sync.py index 8f2071a..afd1d49 100644 --- a/services/mailu/scripts/mailu_sync.py +++ b/services/mailu/scripts/mailu_sync.py @@ -87,7 +87,12 @@ def kc_get_users(token): while True: resp = SESSION.get( f"{KC_BASE}/admin/realms/{KC_REALM}/users", - params={"first": first, "max": max_results, "enabled": "true"}, + params={ + "first": first, + "max": max_results, + "enabled": "true", + "briefRepresentation": "false", + }, headers=headers, timeout=20, ) @@ -105,17 +110,20 @@ def kc_update_attributes(token, user, attributes): "Authorization": f"Bearer {token}", "Content-Type": "application/json", } - payload = { - "firstName": user.get("firstName"), - "lastName": user.get("lastName"), - "email": user.get("email"), - "enabled": user.get("enabled", True), - "username": user["username"], - "emailVerified": user.get("emailVerified", False), - "attributes": attributes, - } user_url = f"{KC_BASE}/admin/realms/{KC_REALM}/users/{user['id']}" - resp = SESSION.put(user_url, headers=headers, json=payload, timeout=20) + current = SESSION.get( + user_url, + headers={"Authorization": f"Bearer {token}"}, + params={"briefRepresentation": "false"}, + timeout=15, + ) + current.raise_for_status() + current_payload = current.json() + current_attrs = current_payload.get("attributes") if isinstance(current_payload, dict) else None + if not isinstance(current_attrs, dict): + current_attrs = {} + current_attrs.update(attributes) + resp = SESSION.put(user_url, headers=headers, json={"attributes": current_attrs}, timeout=20) resp.raise_for_status() verify = SESSION.get( user_url, From 4c4c0867a74ba4cc77910b4a0b499f7e84c38556 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 01:14:15 -0300 Subject: [PATCH 236/270] bstein-dev-home: add smtp env for access requests --- services/bstein-dev-home/backend-deployment.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index 7ccca82..9d820b7 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -33,6 +33,15 @@ spec: export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" {{ end }} + {{ with secret "kv/data/atlas/shared/postmark-relay" }} + export SMTP_HOST="mail.bstein.dev" + export SMTP_PORT="587" + export SMTP_STARTTLS="true" + export SMTP_USE_TLS="false" + export SMTP_USERNAME="{{ index .Data.data "relay-username" }}" + export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export SMTP_FROM="no-reply-portal@bstein.dev" + {{ end }} spec: automountServiceAccountToken: true serviceAccountName: bstein-dev-home From 7cd2f3c587c60e6aadcbd41609bc261eb9098bcf Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 01:17:52 -0300 Subject: [PATCH 237/270] vault: allow portal to read postmark relay --- services/vault/scripts/vault_k8s_auth_configure.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index 7566866..c18a898 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -203,7 +203,7 @@ write_policy_and_role "outline" "outline" "outline-vault" \ write_policy_and_role "planka" "planka" "planka-vault" \ "planka/* shared/postmark-relay" "" write_policy_and_role "bstein-dev-home" "bstein-dev-home" "bstein-dev-home,bstein-dev-home-vault-sync" \ - "portal/* shared/chat-ai-keys-runtime shared/portal-e2e-client harbor-pull/bstein-dev-home" "" + "portal/* shared/chat-ai-keys-runtime shared/portal-e2e-client shared/postmark-relay harbor-pull/bstein-dev-home" "" write_policy_and_role "gitea" "gitea" "gitea-vault" \ "gitea/*" "" write_policy_and_role "vaultwarden" "vaultwarden" "vaultwarden-vault" \ From c9cb08819805918f5c6816d0f63509cf416459b4 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 01:58:17 -0300 Subject: [PATCH 238/270] keycloak: rerun realm settings job --- services/keycloak/realm-settings-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 4259876..0cef1cb 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-30 + name: keycloak-realm-settings-31 namespace: sso spec: backoffLimit: 0 From 0d27107411d4af34c3e478c69b7559d4c416bd5a Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 02:03:13 -0300 Subject: [PATCH 239/270] mailu: backfill mailu_enabled for legacy users --- scripts/tests/test_mailu_sync.py | 14 ++++++++++---- services/mailu/scripts/mailu_sync.py | 14 +++++++++----- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/scripts/tests/test_mailu_sync.py b/scripts/tests/test_mailu_sync.py index f3edbd1..d5f9487 100644 --- a/scripts/tests/test_mailu_sync.py +++ b/scripts/tests/test_mailu_sync.py @@ -160,7 +160,13 @@ def test_main_generates_password_and_upserts(monkeypatch): "email": "user2@example.com", "attributes": {"mailu_app_password": ["keepme"], "mailu_enabled": ["true"]}, }, - {"id": "u3", "username": "user3", "email": "user3@other.com", "attributes": {}}, + { + "id": "u3", + "username": "user3", + "email": "user3@example.com", + "attributes": {"mailu_email": ["user3@example.com"]}, + }, + {"id": "u4", "username": "user4", "email": "user4@other.com", "attributes": {}}, ] updated = [] @@ -199,6 +205,6 @@ def test_main_generates_password_and_upserts(monkeypatch): sync.main() - # Only mail-enabled users are synced and backfilled. - assert len(updated) == 2 - assert conns and len(conns[0]._cursor.executions) == 2 + # Only mail-enabled users (or legacy users with a mailbox) are synced and backfilled. + assert len(updated) == 3 + assert conns and len(conns[0]._cursor.executions) == 3 diff --git a/services/mailu/scripts/mailu_sync.py b/services/mailu/scripts/mailu_sync.py index afd1d49..7c5edda 100644 --- a/services/mailu/scripts/mailu_sync.py +++ b/services/mailu/scripts/mailu_sync.py @@ -26,6 +26,7 @@ KC_CLIENT_SECRET = os.environ["KEYCLOAK_CLIENT_SECRET"] MAILU_DOMAIN = os.environ["MAILU_DOMAIN"] MAILU_DEFAULT_QUOTA = int(os.environ.get("MAILU_DEFAULT_QUOTA", "20000000000")) MAILU_ENABLED_ATTR = os.environ.get("MAILU_ENABLED_ATTR", "mailu_enabled") +MAILU_EMAIL_ATTR = "mailu_email" DB_CONFIG = { "host": os.environ["MAILU_DB_HOST"], @@ -153,12 +154,12 @@ def get_attribute_value(attributes, key): def mailu_enabled(attributes) -> bool: raw = get_attribute_value(attributes, MAILU_ENABLED_ATTR) if raw is None: - return False + return bool(get_attribute_value(attributes, MAILU_EMAIL_ATTR)) return str(raw).strip().lower() in {"1", "true", "yes", "y", "on"} def resolve_mailu_email(user, attributes): - explicit = get_attribute_value(attributes, "mailu_email") + explicit = get_attribute_value(attributes, MAILU_EMAIL_ATTR) if explicit: return explicit @@ -227,14 +228,17 @@ def main(): attrs = user.get("attributes", {}) or {} if user.get("enabled") is False: continue + needs_update = False + if get_attribute_value(attrs, MAILU_ENABLED_ATTR) is None and get_attribute_value(attrs, MAILU_EMAIL_ATTR): + attrs[MAILU_ENABLED_ATTR] = ["true"] + needs_update = True if not mailu_enabled(attrs): continue app_pw = get_attribute_value(attrs, "mailu_app_password") mailu_email = resolve_mailu_email(user, attrs) - needs_update = False - if not get_attribute_value(attrs, "mailu_email"): - attrs["mailu_email"] = [mailu_email] + if not get_attribute_value(attrs, MAILU_EMAIL_ATTR): + attrs[MAILU_EMAIL_ATTR] = [mailu_email] needs_update = True if not app_pw: From a6ac0c363ef736fc60ec83e7e84c064d7906f358 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 02:37:26 -0300 Subject: [PATCH 240/270] nextcloud-mail-sync: harden keycloak fetch --- services/nextcloud-mail-sync/cronjob.yaml | 2 +- .../scripts/nextcloud-mail-sync.sh | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/services/nextcloud-mail-sync/cronjob.yaml b/services/nextcloud-mail-sync/cronjob.yaml index 71aaef4..728bf98 100644 --- a/services/nextcloud-mail-sync/cronjob.yaml +++ b/services/nextcloud-mail-sync/cronjob.yaml @@ -57,7 +57,7 @@ spec: - -c env: - name: KC_BASE - value: https://sso.bstein.dev + value: http://keycloak.sso.svc.cluster.local - name: KC_REALM value: atlas - name: MAILU_DOMAIN diff --git a/services/nextcloud-mail-sync/scripts/nextcloud-mail-sync.sh b/services/nextcloud-mail-sync/scripts/nextcloud-mail-sync.sh index 6c883fc..681308e 100755 --- a/services/nextcloud-mail-sync/scripts/nextcloud-mail-sync.sh +++ b/services/nextcloud-mail-sync/scripts/nextcloud-mail-sync.sh @@ -81,7 +81,7 @@ list_mail_accounts() { } token=$( - curl -s -d "grant_type=password" \ + curl -fsS -d "grant_type=password" \ -d "client_id=admin-cli" \ -d "username=${KC_ADMIN_USER}" \ -d "password=${KC_ADMIN_PASS}" \ @@ -95,13 +95,17 @@ fi cd /var/www/html -kc_users_url="${KC_BASE}/admin/realms/${KC_REALM}/users?max=2000" +kc_users_url="${KC_BASE}/admin/realms/${KC_REALM}/users?max=2000&briefRepresentation=false" if [[ -n "${ONLY_USERNAME}" ]]; then username_q=$(jq -nr --arg v "${ONLY_USERNAME}" '$v|@uri') - kc_users_url="${KC_BASE}/admin/realms/${KC_REALM}/users?username=${username_q}&exact=true&max=1" + kc_users_url="${KC_BASE}/admin/realms/${KC_REALM}/users?username=${username_q}&exact=true&max=1&briefRepresentation=false" fi -users=$(curl -s -H "Authorization: Bearer ${token}" "${kc_users_url}") +users=$(curl -fsS -H "Authorization: Bearer ${token}" "${kc_users_url}") +if ! jq -e 'type == "array"' >/dev/null 2>&1 <<<"${users}"; then + echo "ERROR: Keycloak user list is not an array; aborting sync" >&2 + exit 1 +fi kc_set_user_mail_meta() { local user_id="${1}" From 343d41ecc74d64415859721a6981efeb0ee57ce2 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 02:50:07 -0300 Subject: [PATCH 241/270] monitoring: add glue dashboard and tag cronjobs --- scripts/dashboards_render_atlas.py | 150 +++++++- .../vaultwarden-cred-sync-cronjob.yaml | 2 + services/comms/guest-name-job.yaml | 2 + services/comms/pin-othrys-job.yaml | 4 +- services/comms/reset-othrys-room-job.yaml | 4 +- services/comms/seed-othrys-room.yaml | 4 +- .../finance/firefly-user-sync-cronjob.yaml | 2 + .../health/wger-admin-ensure-cronjob.yaml | 2 + services/health/wger-user-sync-cronjob.yaml | 2 + services/mailu/mailu-sync-cronjob.yaml | 4 +- .../monitoring/dashboards/atlas-glue.json | 339 +++++++++++++++++ services/monitoring/dashboards/atlas-gpu.json | 30 +- .../monitoring/dashboards/atlas-nodes.json | 2 +- .../monitoring/dashboards/atlas-overview.json | 133 +++++-- .../monitoring/dashboards/atlas-pods.json | 2 +- .../monitoring/grafana-dashboard-glue.yaml | 348 ++++++++++++++++++ .../monitoring/grafana-dashboard-gpu.yaml | 30 +- .../monitoring/grafana-dashboard-nodes.yaml | 2 +- .../grafana-dashboard-overview.yaml | 133 +++++-- .../monitoring/grafana-dashboard-pods.yaml | 2 +- services/monitoring/kustomization.yaml | 1 + services/nextcloud-mail-sync/cronjob.yaml | 2 + services/outline/deployment.yaml | 2 +- services/planka/deployment.yaml | 2 +- services/vault/k8s-auth-config-cronjob.yaml | 2 + services/vault/oidc-config-cronjob.yaml | 2 + 26 files changed, 1095 insertions(+), 113 deletions(-) create mode 100644 services/monitoring/dashboards/atlas-glue.json create mode 100644 services/monitoring/grafana-dashboard-glue.yaml diff --git a/scripts/dashboards_render_atlas.py b/scripts/dashboards_render_atlas.py index a5abfe8..4aa2908 100644 --- a/scripts/dashboards_render_atlas.py +++ b/scripts/dashboards_render_atlas.py @@ -85,19 +85,17 @@ WORKER_TOTAL = len(WORKER_NODES) CONTROL_SUFFIX = f"/{CONTROL_TOTAL}" WORKER_SUFFIX = f"/{WORKER_TOTAL}" # Namespaces considered infrastructure (excluded from workload counts) -INFRA_NAMESPACES = [ - "kube-system", - "longhorn-system", - "metallb-system", +INFRA_PATTERNS = [ + "kube-.*", + ".*-system", + "traefik", "monitoring", "logging", "cert-manager", - "flux-system", - "traefik", "maintenance", "postgres", ] -INFRA_REGEX = f"^({'|'.join(INFRA_NAMESPACES)})$" +INFRA_REGEX = f"^({'|'.join(INFRA_PATTERNS)})$" # Namespaces allowed on control plane without counting as workloads CP_ALLOWED_NS = INFRA_REGEX LONGHORN_NODE_REGEX = "titan-1[2-9]|titan-2[24]" @@ -319,6 +317,21 @@ NAMESPACE_SCOPE_WORKLOAD = f'namespace!~"{INFRA_REGEX}"' NAMESPACE_SCOPE_ALL = 'namespace=~".*"' NAMESPACE_SCOPE_INFRA = f'namespace=~"{INFRA_REGEX}"' NAMESPACE_SCOPE_VARS = ["namespace_scope_cpu", "namespace_scope_gpu", "namespace_scope_ram"] +GLUE_LABEL = 'label_atlas_bstein_dev_glue="true"' +GLUE_JOBS = f"kube_cronjob_labels{{{GLUE_LABEL}}}" +GLUE_LAST_SUCCESS = f"kube_cronjob_status_last_successful_time{{{GLUE_LABEL}}}" +GLUE_LAST_SCHEDULE = f"kube_cronjob_status_last_schedule_time{{{GLUE_LABEL}}}" +GLUE_SUSPENDED = f"kube_cronjob_spec_suspend{{{GLUE_LABEL}}} == 1" +GLUE_LAST_SUCCESS_AGE = f"(time() - {GLUE_LAST_SUCCESS})" +GLUE_LAST_SCHEDULE_AGE = f"(time() - {GLUE_LAST_SCHEDULE})" +GLUE_LAST_SUCCESS_AGE_HOURS = f"({GLUE_LAST_SUCCESS_AGE}) / 3600" +GLUE_LAST_SCHEDULE_AGE_HOURS = f"({GLUE_LAST_SCHEDULE_AGE}) / 3600" +GLUE_STALE_WINDOW_SEC = 36 * 3600 +GLUE_STALE = f"({GLUE_LAST_SUCCESS_AGE} > bool {GLUE_STALE_WINDOW_SEC})" +GLUE_MISSING = f"({GLUE_JOBS} unless {GLUE_LAST_SUCCESS})" +GLUE_STALE_ACTIVE = f"({GLUE_STALE} unless on(namespace,cronjob) {GLUE_SUSPENDED})" +GLUE_MISSING_ACTIVE = f"({GLUE_MISSING} unless on(namespace,cronjob) {GLUE_SUSPENDED})" +GLUE_STALE_COUNT = f"(sum({GLUE_STALE_ACTIVE}) + count({GLUE_MISSING_ACTIVE}))" GPU_NODES = ["titan-20", "titan-21", "titan-22", "titan-24"] GPU_NODE_REGEX = "|".join(GPU_NODES) TRAEFIK_ROUTER_EXPR = "sum by (router) (rate(traefik_router_requests_total[5m]))" @@ -965,7 +978,7 @@ def build_overview(): 30, "Mail Sent (1d)", 'max(postmark_outbound_sent{window="1d"})', - {"h": 2, "w": 6, "x": 0, "y": 8}, + {"h": 2, "w": 5, "x": 0, "y": 8}, unit="none", links=link_to("atlas-mail"), ) @@ -976,7 +989,7 @@ def build_overview(): "type": "stat", "title": "Mail Bounces (1d)", "datasource": PROM_DS, - "gridPos": {"h": 2, "w": 6, "x": 12, "y": 8}, + "gridPos": {"h": 2, "w": 5, "x": 10, "y": 8}, "targets": [ { "expr": 'max(postmark_outbound_bounce_rate{window="1d"})', @@ -1022,7 +1035,7 @@ def build_overview(): 32, "Mail Success Rate (1d)", 'clamp_min(100 - max(postmark_outbound_bounce_rate{window="1d"}), 0)', - {"h": 2, "w": 6, "x": 6, "y": 8}, + {"h": 2, "w": 5, "x": 5, "y": 8}, unit="percent", thresholds=mail_success_thresholds, decimals=1, @@ -1034,13 +1047,24 @@ def build_overview(): 33, "Mail Limit Used (30d)", "max(postmark_sending_limit_used_percent)", - {"h": 2, "w": 6, "x": 18, "y": 8}, + {"h": 2, "w": 5, "x": 15, "y": 8}, unit="percent", thresholds=mail_limit_thresholds, decimals=1, links=link_to("atlas-mail"), ) ) + panels.append( + stat_panel( + 34, + "Glue Jobs Stale", + GLUE_STALE_COUNT, + {"h": 2, "w": 4, "x": 20, "y": 8}, + unit="none", + thresholds=count_thresholds, + links=link_to("atlas-glue"), + ) + ) storage_panels = [ (23, "Astreae Usage", astreae_usage_expr("/mnt/astreae"), "percent"), @@ -1072,7 +1096,7 @@ def build_overview(): namespace_cpu_share_expr(cpu_scope), {"h": 9, "w": 8, "x": 0, "y": 16}, links=namespace_scope_links("namespace_scope_cpu"), - description="Values are normalized within the selected scope; use panel links to switch scope.", + description="Shares are normalized within the selected filter. Switching scope changes the denominator.", ) ) panels.append( @@ -1082,7 +1106,7 @@ def build_overview(): namespace_gpu_share_expr(gpu_scope), {"h": 9, "w": 8, "x": 8, "y": 16}, links=namespace_scope_links("namespace_scope_gpu"), - description="Values are normalized within the selected scope; use panel links to switch scope.", + description="Shares are normalized within the selected filter. Switching scope changes the denominator.", ) ) panels.append( @@ -1092,7 +1116,7 @@ def build_overview(): namespace_ram_share_expr(ram_scope), {"h": 9, "w": 8, "x": 16, "y": 16}, links=namespace_scope_links("namespace_scope_ram"), - description="Values are normalized within the selected scope; use panel links to switch scope.", + description="Shares are normalized within the selected filter. Switching scope changes the denominator.", ) ) @@ -2136,6 +2160,98 @@ def build_mail_dashboard(): } +def build_glue_dashboard(): + panels = [] + sort_desc = [{"id": "labelsToFields", "options": {}}, {"id": "sortBy", "options": {"fields": ["Value"], "order": "desc"}}] + + panels.append( + stat_panel( + 1, + "Glue Jobs Stale (>36h)", + GLUE_STALE_COUNT, + {"h": 4, "w": 6, "x": 0, "y": 0}, + unit="none", + thresholds={ + "mode": "absolute", + "steps": [ + {"color": "green", "value": None}, + {"color": "yellow", "value": 1}, + {"color": "orange", "value": 2}, + {"color": "red", "value": 3}, + ], + }, + ) + ) + panels.append( + table_panel( + 2, + "Glue Jobs Missing Success", + GLUE_MISSING, + {"h": 4, "w": 6, "x": 6, "y": 0}, + unit="none", + transformations=sort_desc, + instant=True, + ) + ) + panels.append( + table_panel( + 3, + "Glue Jobs Suspended", + f"kube_cronjob_spec_suspend{{{GLUE_LABEL}}} == 1", + {"h": 4, "w": 6, "x": 12, "y": 0}, + unit="none", + transformations=sort_desc, + instant=True, + ) + ) + panels.append( + table_panel( + 4, + "Glue Jobs Active Runs", + f"kube_cronjob_status_active{{{GLUE_LABEL}}}", + {"h": 4, "w": 6, "x": 18, "y": 0}, + unit="none", + transformations=sort_desc, + instant=True, + ) + ) + panels.append( + table_panel( + 5, + "Glue Jobs Last Success (hours ago)", + GLUE_LAST_SUCCESS_AGE_HOURS, + {"h": 8, "w": 12, "x": 0, "y": 4}, + unit="h", + transformations=sort_desc, + instant=True, + ) + ) + panels.append( + table_panel( + 6, + "Glue Jobs Last Schedule (hours ago)", + GLUE_LAST_SCHEDULE_AGE_HOURS, + {"h": 8, "w": 12, "x": 12, "y": 4}, + unit="h", + transformations=sort_desc, + instant=True, + ) + ) + + return { + "uid": "atlas-glue", + "title": "Atlas Glue", + "folderUid": PRIVATE_FOLDER, + "editable": True, + "panels": panels, + "time": {"from": "now-7d", "to": "now"}, + "annotations": {"list": []}, + "schemaVersion": 39, + "style": "dark", + "tags": ["atlas", "glue"], + } + + def build_gpu_dashboard(): panels = [] gpu_scope = "$namespace_scope_gpu" @@ -2146,7 +2262,7 @@ def build_gpu_dashboard(): namespace_gpu_share_expr(gpu_scope), {"h": 8, "w": 12, "x": 0, "y": 0}, links=namespace_scope_links("namespace_scope_gpu"), - description="Values are normalized within the selected scope; use panel links to switch scope.", + description="Shares are normalized within the selected filter. Switching scope changes the denominator.", ) ) panels.append( @@ -2229,6 +2345,10 @@ DASHBOARDS = { "builder": build_mail_dashboard, "configmap": ROOT / "services" / "monitoring" / "grafana-dashboard-mail.yaml", }, + "atlas-glue": { + "builder": build_glue_dashboard, + "configmap": ROOT / "services" / "monitoring" / "grafana-dashboard-glue.yaml", + }, "atlas-gpu": { "builder": build_gpu_dashboard, "configmap": ROOT / "services" / "monitoring" / "grafana-dashboard-gpu.yaml", diff --git a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml index 1960d11..86eeaf1 100644 --- a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml +++ b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml @@ -4,6 +4,8 @@ kind: CronJob metadata: name: vaultwarden-cred-sync namespace: bstein-dev-home + labels: + atlas.bstein.dev/glue: "true" spec: schedule: "*/15 * * * *" concurrencyPolicy: Forbid diff --git a/services/comms/guest-name-job.yaml b/services/comms/guest-name-job.yaml index 0797168..21a8af5 100644 --- a/services/comms/guest-name-job.yaml +++ b/services/comms/guest-name-job.yaml @@ -4,6 +4,8 @@ kind: CronJob metadata: name: guest-name-randomizer namespace: comms + labels: + atlas.bstein.dev/glue: "true" spec: schedule: "*/1 * * * *" suspend: false diff --git a/services/comms/pin-othrys-job.yaml b/services/comms/pin-othrys-job.yaml index f25c18e..2b29ca3 100644 --- a/services/comms/pin-othrys-job.yaml +++ b/services/comms/pin-othrys-job.yaml @@ -4,6 +4,8 @@ kind: CronJob metadata: name: pin-othrys-invite namespace: comms + labels: + atlas.bstein.dev/glue: "true" spec: schedule: "*/30 * * * *" suspend: true @@ -164,4 +166,4 @@ spec: - name: vault-scripts configMap: name: comms-vault-env - defaultMode: 0555 \ No newline at end of file + defaultMode: 0555 diff --git a/services/comms/reset-othrys-room-job.yaml b/services/comms/reset-othrys-room-job.yaml index c0d941b..ae8585a 100644 --- a/services/comms/reset-othrys-room-job.yaml +++ b/services/comms/reset-othrys-room-job.yaml @@ -4,6 +4,8 @@ kind: CronJob metadata: name: othrys-room-reset namespace: comms + labels: + atlas.bstein.dev/glue: "true" spec: schedule: "0 0 1 1 *" suspend: true @@ -307,4 +309,4 @@ spec: - name: vault-scripts configMap: name: comms-vault-env - defaultMode: 0555 \ No newline at end of file + defaultMode: 0555 diff --git a/services/comms/seed-othrys-room.yaml b/services/comms/seed-othrys-room.yaml index ce87c85..804d330 100644 --- a/services/comms/seed-othrys-room.yaml +++ b/services/comms/seed-othrys-room.yaml @@ -4,6 +4,8 @@ kind: CronJob metadata: name: seed-othrys-room namespace: comms + labels: + atlas.bstein.dev/glue: "true" spec: schedule: "*/10 * * * *" suspend: true @@ -180,4 +182,4 @@ spec: - name: vault-scripts configMap: name: comms-vault-env - defaultMode: 0555 \ No newline at end of file + defaultMode: 0555 diff --git a/services/finance/firefly-user-sync-cronjob.yaml b/services/finance/firefly-user-sync-cronjob.yaml index dab7f31..aeadfad 100644 --- a/services/finance/firefly-user-sync-cronjob.yaml +++ b/services/finance/firefly-user-sync-cronjob.yaml @@ -4,6 +4,8 @@ kind: CronJob metadata: name: firefly-user-sync namespace: finance + labels: + atlas.bstein.dev/glue: "true" spec: schedule: "0 6 * * *" suspend: true diff --git a/services/health/wger-admin-ensure-cronjob.yaml b/services/health/wger-admin-ensure-cronjob.yaml index fc18283..db178a3 100644 --- a/services/health/wger-admin-ensure-cronjob.yaml +++ b/services/health/wger-admin-ensure-cronjob.yaml @@ -4,6 +4,8 @@ kind: CronJob metadata: name: wger-admin-ensure namespace: health + labels: + atlas.bstein.dev/glue: "true" spec: schedule: "15 3 * * *" concurrencyPolicy: Forbid diff --git a/services/health/wger-user-sync-cronjob.yaml b/services/health/wger-user-sync-cronjob.yaml index 1645256..de2dbb9 100644 --- a/services/health/wger-user-sync-cronjob.yaml +++ b/services/health/wger-user-sync-cronjob.yaml @@ -4,6 +4,8 @@ kind: CronJob metadata: name: wger-user-sync namespace: health + labels: + atlas.bstein.dev/glue: "true" spec: schedule: "0 5 * * *" suspend: true diff --git a/services/mailu/mailu-sync-cronjob.yaml b/services/mailu/mailu-sync-cronjob.yaml index 9e0e35c..57cbd0a 100644 --- a/services/mailu/mailu-sync-cronjob.yaml +++ b/services/mailu/mailu-sync-cronjob.yaml @@ -4,6 +4,8 @@ kind: CronJob metadata: name: mailu-sync-nightly namespace: mailu-mailserver + labels: + atlas.bstein.dev/glue: "true" spec: schedule: "30 4 * * *" concurrencyPolicy: Forbid @@ -79,4 +81,4 @@ spec: - name: vault-scripts configMap: name: mailu-vault-env - defaultMode: 0555 \ No newline at end of file + defaultMode: 0555 diff --git a/services/monitoring/dashboards/atlas-glue.json b/services/monitoring/dashboards/atlas-glue.json new file mode 100644 index 0000000..c836d18 --- /dev/null +++ b/services/monitoring/dashboards/atlas-glue.json @@ -0,0 +1,339 @@ +{ + "uid": "atlas-glue", + "title": "Atlas Glue", + "folderUid": "atlas-internal", + "editable": true, + "panels": [ + { + "id": 1, + "type": "stat", + "title": "Glue Jobs Stale (>36h)", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 0 + }, + "targets": [ + { + "expr": "(sum((((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) > bool 129600) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)))", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "orange", + "value": 2 + }, + { + "color": "red", + "value": 3 + } + ] + }, + "unit": "none", + "custom": { + "displayMode": "auto" + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "value" + } + }, + { + "id": 2, + "type": "table", + "title": "Glue Jobs Missing Success", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 6, + "y": 0 + }, + "targets": [ + { + "expr": "(kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"})", + "refId": "A", + "instant": true + } + ], + "fieldConfig": { + "defaults": { + "unit": "none", + "custom": { + "filterable": true + } + }, + "overrides": [] + }, + "options": { + "showHeader": true, + "columnFilters": false + }, + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "sortBy", + "options": { + "fields": [ + "Value" + ], + "order": "desc" + } + } + ] + }, + { + "id": 3, + "type": "table", + "title": "Glue Jobs Suspended", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 12, + "y": 0 + }, + "targets": [ + { + "expr": "kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1", + "refId": "A", + "instant": true + } + ], + "fieldConfig": { + "defaults": { + "unit": "none", + "custom": { + "filterable": true + } + }, + "overrides": [] + }, + "options": { + "showHeader": true, + "columnFilters": false + }, + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "sortBy", + "options": { + "fields": [ + "Value" + ], + "order": "desc" + } + } + ] + }, + { + "id": 4, + "type": "table", + "title": "Glue Jobs Active Runs", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 18, + "y": 0 + }, + "targets": [ + { + "expr": "kube_cronjob_status_active{label_atlas_bstein_dev_glue=\"true\"}", + "refId": "A", + "instant": true + } + ], + "fieldConfig": { + "defaults": { + "unit": "none", + "custom": { + "filterable": true + } + }, + "overrides": [] + }, + "options": { + "showHeader": true, + "columnFilters": false + }, + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "sortBy", + "options": { + "fields": [ + "Value" + ], + "order": "desc" + } + } + ] + }, + { + "id": 5, + "type": "table", + "title": "Glue Jobs Last Success (hours ago)", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 4 + }, + "targets": [ + { + "expr": "((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"})) / 3600", + "refId": "A", + "instant": true + } + ], + "fieldConfig": { + "defaults": { + "unit": "h", + "custom": { + "filterable": true + } + }, + "overrides": [] + }, + "options": { + "showHeader": true, + "columnFilters": false + }, + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "sortBy", + "options": { + "fields": [ + "Value" + ], + "order": "desc" + } + } + ] + }, + { + "id": 6, + "type": "table", + "title": "Glue Jobs Last Schedule (hours ago)", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 4 + }, + "targets": [ + { + "expr": "((time() - kube_cronjob_status_last_schedule_time{label_atlas_bstein_dev_glue=\"true\"})) / 3600", + "refId": "A", + "instant": true + } + ], + "fieldConfig": { + "defaults": { + "unit": "h", + "custom": { + "filterable": true + } + }, + "overrides": [] + }, + "options": { + "showHeader": true, + "columnFilters": false + }, + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "sortBy", + "options": { + "fields": [ + "Value" + ], + "order": "desc" + } + } + ] + } + ], + "time": { + "from": "now-7d", + "to": "now" + }, + "annotations": { + "list": [] + }, + "schemaVersion": 39, + "style": "dark", + "tags": [ + "atlas", + "glue" + ] +} diff --git a/services/monitoring/dashboards/atlas-gpu.json b/services/monitoring/dashboards/atlas-gpu.json index fb1b216..af8a1c5 100644 --- a/services/monitoring/dashboards/atlas-gpu.json +++ b/services/monitoring/dashboards/atlas-gpu.json @@ -57,7 +57,7 @@ "links": [ { "title": "Workload namespaces only", - "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%21~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", + "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%21~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", "targetBlank": false }, { @@ -67,11 +67,11 @@ }, { "title": "Infrastructure namespaces only", - "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%3D~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", + "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%3D~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", "targetBlank": false } ], - "description": "Values are normalized within the selected scope; use panel links to switch scope." + "description": "Shares are normalized within the selected filter. Switching scope changes the denominator." }, { "id": 2, @@ -207,16 +207,16 @@ "name": "namespace_scope_cpu", "label": "CPU namespace filter", "type": "custom", - "query": "workload namespaces only : namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "query": "workload namespaces only : namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "current": { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, "options": [ { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, { @@ -226,7 +226,7 @@ }, { "text": "infrastructure namespaces only", - "value": "namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": false } ], @@ -241,16 +241,16 @@ "name": "namespace_scope_gpu", "label": "GPU namespace filter", "type": "custom", - "query": "workload namespaces only : namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "query": "workload namespaces only : namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "current": { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, "options": [ { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, { @@ -260,7 +260,7 @@ }, { "text": "infrastructure namespaces only", - "value": "namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": false } ], @@ -275,16 +275,16 @@ "name": "namespace_scope_ram", "label": "RAM namespace filter", "type": "custom", - "query": "workload namespaces only : namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "query": "workload namespaces only : namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "current": { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, "options": [ { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, { @@ -294,7 +294,7 @@ }, { "text": "infrastructure namespaces only", - "value": "namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": false } ], diff --git a/services/monitoring/dashboards/atlas-nodes.json b/services/monitoring/dashboards/atlas-nodes.json index 0bfd639..2d60042 100644 --- a/services/monitoring/dashboards/atlas-nodes.json +++ b/services/monitoring/dashboards/atlas-nodes.json @@ -142,7 +142,7 @@ }, "targets": [ { - "expr": "sum(kube_pod_info{node=~\"titan-0a|titan-0b|titan-0c\",namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"})", + "expr": "sum(kube_pod_info{node=~\"titan-0a|titan-0b|titan-0c\",namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"})", "refId": "A" } ], diff --git a/services/monitoring/dashboards/atlas-overview.json b/services/monitoring/dashboards/atlas-overview.json index a113d22..e1c5d3a 100644 --- a/services/monitoring/dashboards/atlas-overview.json +++ b/services/monitoring/dashboards/atlas-overview.json @@ -76,7 +76,7 @@ }, "targets": [ { - "expr": "sum(kube_pod_info{node=~\"titan-0a|titan-0b|titan-0c\",namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"}) or on() vector(0)", + "expr": "sum(kube_pod_info{node=~\"titan-0a|titan-0b|titan-0c\",namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"}) or on() vector(0)", "refId": "A" } ], @@ -796,7 +796,7 @@ }, "gridPos": { "h": 2, - "w": 6, + "w": 5, "x": 0, "y": 8 }, @@ -863,8 +863,8 @@ }, "gridPos": { "h": 2, - "w": 6, - "x": 12, + "w": 5, + "x": 10, "y": 8 }, "targets": [ @@ -968,8 +968,8 @@ }, "gridPos": { "h": 2, - "w": 6, - "x": 6, + "w": 5, + "x": 5, "y": 8 }, "targets": [ @@ -1044,8 +1044,8 @@ }, "gridPos": { "h": 2, - "w": 6, - "x": 18, + "w": 5, + "x": 15, "y": 8 }, "targets": [ @@ -1110,6 +1110,81 @@ } ] }, + { + "id": 34, + "type": "stat", + "title": "Glue Jobs Stale", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 2, + "w": 4, + "x": 20, + "y": 8 + }, + "targets": [ + { + "expr": "(sum((((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) > bool 129600) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)))", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "orange", + "value": 2 + }, + { + "color": "red", + "value": 3 + } + ] + }, + "unit": "none", + "custom": { + "displayMode": "auto" + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "value" + }, + "links": [ + { + "title": "Open atlas-glue dashboard", + "url": "/d/atlas-glue", + "targetBlank": true + } + ] + }, { "id": 23, "type": "stat", @@ -1447,7 +1522,7 @@ "links": [ { "title": "Workload namespaces only", - "url": "?var-namespace_scope_cpu=namespace%21~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=${namespace_scope_ram}", + "url": "?var-namespace_scope_cpu=namespace%21~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=${namespace_scope_ram}", "targetBlank": false }, { @@ -1457,11 +1532,11 @@ }, { "title": "Infrastructure namespaces only", - "url": "?var-namespace_scope_cpu=namespace%3D~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=${namespace_scope_ram}", + "url": "?var-namespace_scope_cpu=namespace%3D~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=${namespace_scope_ram}", "targetBlank": false } ], - "description": "Values are normalized within the selected scope; use panel links to switch scope." + "description": "Shares are normalized within the selected filter. Switching scope changes the denominator." }, { "id": 12, @@ -1516,7 +1591,7 @@ "links": [ { "title": "Workload namespaces only", - "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%21~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", + "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%21~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", "targetBlank": false }, { @@ -1526,11 +1601,11 @@ }, { "title": "Infrastructure namespaces only", - "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%3D~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", + "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%3D~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", "targetBlank": false } ], - "description": "Values are normalized within the selected scope; use panel links to switch scope." + "description": "Shares are normalized within the selected filter. Switching scope changes the denominator." }, { "id": 13, @@ -1585,7 +1660,7 @@ "links": [ { "title": "Workload namespaces only", - "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=namespace%21~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22", + "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=namespace%21~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22", "targetBlank": false }, { @@ -1595,11 +1670,11 @@ }, { "title": "Infrastructure namespaces only", - "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=namespace%3D~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22", + "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=namespace%3D~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22", "targetBlank": false } ], - "description": "Values are normalized within the selected scope; use panel links to switch scope." + "description": "Shares are normalized within the selected filter. Switching scope changes the denominator." }, { "id": 14, @@ -2174,16 +2249,16 @@ "name": "namespace_scope_cpu", "label": "CPU namespace filter", "type": "custom", - "query": "workload namespaces only : namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "query": "workload namespaces only : namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "current": { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, "options": [ { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, { @@ -2193,7 +2268,7 @@ }, { "text": "infrastructure namespaces only", - "value": "namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": false } ], @@ -2208,16 +2283,16 @@ "name": "namespace_scope_gpu", "label": "GPU namespace filter", "type": "custom", - "query": "workload namespaces only : namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "query": "workload namespaces only : namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "current": { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, "options": [ { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, { @@ -2227,7 +2302,7 @@ }, { "text": "infrastructure namespaces only", - "value": "namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": false } ], @@ -2242,16 +2317,16 @@ "name": "namespace_scope_ram", "label": "RAM namespace filter", "type": "custom", - "query": "workload namespaces only : namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "query": "workload namespaces only : namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "current": { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, "options": [ { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, { @@ -2261,7 +2336,7 @@ }, { "text": "infrastructure namespaces only", - "value": "namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": false } ], diff --git a/services/monitoring/dashboards/atlas-pods.json b/services/monitoring/dashboards/atlas-pods.json index ff2dbdd..adab84b 100644 --- a/services/monitoring/dashboards/atlas-pods.json +++ b/services/monitoring/dashboards/atlas-pods.json @@ -200,7 +200,7 @@ }, "targets": [ { - "expr": "sum(kube_pod_info{node=~\"titan-0a|titan-0b|titan-0c\",namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"})", + "expr": "sum(kube_pod_info{node=~\"titan-0a|titan-0b|titan-0c\",namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"})", "refId": "A" } ], diff --git a/services/monitoring/grafana-dashboard-glue.yaml b/services/monitoring/grafana-dashboard-glue.yaml new file mode 100644 index 0000000..0f8c0a1 --- /dev/null +++ b/services/monitoring/grafana-dashboard-glue.yaml @@ -0,0 +1,348 @@ +# services/monitoring/grafana-dashboard-glue.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard-glue + labels: + grafana_dashboard: "1" +data: + atlas-glue.json: | + { + "uid": "atlas-glue", + "title": "Atlas Glue", + "folderUid": "atlas-internal", + "editable": true, + "panels": [ + { + "id": 1, + "type": "stat", + "title": "Glue Jobs Stale (>36h)", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 0 + }, + "targets": [ + { + "expr": "(sum((((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) > bool 129600) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)))", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "orange", + "value": 2 + }, + { + "color": "red", + "value": 3 + } + ] + }, + "unit": "none", + "custom": { + "displayMode": "auto" + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "value" + } + }, + { + "id": 2, + "type": "table", + "title": "Glue Jobs Missing Success", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 6, + "y": 0 + }, + "targets": [ + { + "expr": "(kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"})", + "refId": "A", + "instant": true + } + ], + "fieldConfig": { + "defaults": { + "unit": "none", + "custom": { + "filterable": true + } + }, + "overrides": [] + }, + "options": { + "showHeader": true, + "columnFilters": false + }, + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "sortBy", + "options": { + "fields": [ + "Value" + ], + "order": "desc" + } + } + ] + }, + { + "id": 3, + "type": "table", + "title": "Glue Jobs Suspended", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 12, + "y": 0 + }, + "targets": [ + { + "expr": "kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1", + "refId": "A", + "instant": true + } + ], + "fieldConfig": { + "defaults": { + "unit": "none", + "custom": { + "filterable": true + } + }, + "overrides": [] + }, + "options": { + "showHeader": true, + "columnFilters": false + }, + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "sortBy", + "options": { + "fields": [ + "Value" + ], + "order": "desc" + } + } + ] + }, + { + "id": 4, + "type": "table", + "title": "Glue Jobs Active Runs", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 18, + "y": 0 + }, + "targets": [ + { + "expr": "kube_cronjob_status_active{label_atlas_bstein_dev_glue=\"true\"}", + "refId": "A", + "instant": true + } + ], + "fieldConfig": { + "defaults": { + "unit": "none", + "custom": { + "filterable": true + } + }, + "overrides": [] + }, + "options": { + "showHeader": true, + "columnFilters": false + }, + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "sortBy", + "options": { + "fields": [ + "Value" + ], + "order": "desc" + } + } + ] + }, + { + "id": 5, + "type": "table", + "title": "Glue Jobs Last Success (hours ago)", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 4 + }, + "targets": [ + { + "expr": "((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"})) / 3600", + "refId": "A", + "instant": true + } + ], + "fieldConfig": { + "defaults": { + "unit": "h", + "custom": { + "filterable": true + } + }, + "overrides": [] + }, + "options": { + "showHeader": true, + "columnFilters": false + }, + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "sortBy", + "options": { + "fields": [ + "Value" + ], + "order": "desc" + } + } + ] + }, + { + "id": 6, + "type": "table", + "title": "Glue Jobs Last Schedule (hours ago)", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 4 + }, + "targets": [ + { + "expr": "((time() - kube_cronjob_status_last_schedule_time{label_atlas_bstein_dev_glue=\"true\"})) / 3600", + "refId": "A", + "instant": true + } + ], + "fieldConfig": { + "defaults": { + "unit": "h", + "custom": { + "filterable": true + } + }, + "overrides": [] + }, + "options": { + "showHeader": true, + "columnFilters": false + }, + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "sortBy", + "options": { + "fields": [ + "Value" + ], + "order": "desc" + } + } + ] + } + ], + "time": { + "from": "now-7d", + "to": "now" + }, + "annotations": { + "list": [] + }, + "schemaVersion": 39, + "style": "dark", + "tags": [ + "atlas", + "glue" + ] + } diff --git a/services/monitoring/grafana-dashboard-gpu.yaml b/services/monitoring/grafana-dashboard-gpu.yaml index 49b5d39..d7950f2 100644 --- a/services/monitoring/grafana-dashboard-gpu.yaml +++ b/services/monitoring/grafana-dashboard-gpu.yaml @@ -66,7 +66,7 @@ data: "links": [ { "title": "Workload namespaces only", - "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%21~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", + "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%21~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", "targetBlank": false }, { @@ -76,11 +76,11 @@ data: }, { "title": "Infrastructure namespaces only", - "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%3D~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", + "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%3D~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", "targetBlank": false } ], - "description": "Values are normalized within the selected scope; use panel links to switch scope." + "description": "Shares are normalized within the selected filter. Switching scope changes the denominator." }, { "id": 2, @@ -216,16 +216,16 @@ data: "name": "namespace_scope_cpu", "label": "CPU namespace filter", "type": "custom", - "query": "workload namespaces only : namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "query": "workload namespaces only : namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "current": { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, "options": [ { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, { @@ -235,7 +235,7 @@ data: }, { "text": "infrastructure namespaces only", - "value": "namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": false } ], @@ -250,16 +250,16 @@ data: "name": "namespace_scope_gpu", "label": "GPU namespace filter", "type": "custom", - "query": "workload namespaces only : namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "query": "workload namespaces only : namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "current": { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, "options": [ { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, { @@ -269,7 +269,7 @@ data: }, { "text": "infrastructure namespaces only", - "value": "namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": false } ], @@ -284,16 +284,16 @@ data: "name": "namespace_scope_ram", "label": "RAM namespace filter", "type": "custom", - "query": "workload namespaces only : namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "query": "workload namespaces only : namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "current": { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, "options": [ { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, { @@ -303,7 +303,7 @@ data: }, { "text": "infrastructure namespaces only", - "value": "namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": false } ], diff --git a/services/monitoring/grafana-dashboard-nodes.yaml b/services/monitoring/grafana-dashboard-nodes.yaml index 5e02c18..f0f1982 100644 --- a/services/monitoring/grafana-dashboard-nodes.yaml +++ b/services/monitoring/grafana-dashboard-nodes.yaml @@ -151,7 +151,7 @@ data: }, "targets": [ { - "expr": "sum(kube_pod_info{node=~\"titan-0a|titan-0b|titan-0c\",namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"})", + "expr": "sum(kube_pod_info{node=~\"titan-0a|titan-0b|titan-0c\",namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"})", "refId": "A" } ], diff --git a/services/monitoring/grafana-dashboard-overview.yaml b/services/monitoring/grafana-dashboard-overview.yaml index e627658..78d5566 100644 --- a/services/monitoring/grafana-dashboard-overview.yaml +++ b/services/monitoring/grafana-dashboard-overview.yaml @@ -85,7 +85,7 @@ data: }, "targets": [ { - "expr": "sum(kube_pod_info{node=~\"titan-0a|titan-0b|titan-0c\",namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"}) or on() vector(0)", + "expr": "sum(kube_pod_info{node=~\"titan-0a|titan-0b|titan-0c\",namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"}) or on() vector(0)", "refId": "A" } ], @@ -805,7 +805,7 @@ data: }, "gridPos": { "h": 2, - "w": 6, + "w": 5, "x": 0, "y": 8 }, @@ -872,8 +872,8 @@ data: }, "gridPos": { "h": 2, - "w": 6, - "x": 12, + "w": 5, + "x": 10, "y": 8 }, "targets": [ @@ -977,8 +977,8 @@ data: }, "gridPos": { "h": 2, - "w": 6, - "x": 6, + "w": 5, + "x": 5, "y": 8 }, "targets": [ @@ -1053,8 +1053,8 @@ data: }, "gridPos": { "h": 2, - "w": 6, - "x": 18, + "w": 5, + "x": 15, "y": 8 }, "targets": [ @@ -1119,6 +1119,81 @@ data: } ] }, + { + "id": 34, + "type": "stat", + "title": "Glue Jobs Stale", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 2, + "w": 4, + "x": 20, + "y": 8 + }, + "targets": [ + { + "expr": "(sum((((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) > bool 129600) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)))", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "orange", + "value": 2 + }, + { + "color": "red", + "value": 3 + } + ] + }, + "unit": "none", + "custom": { + "displayMode": "auto" + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "value" + }, + "links": [ + { + "title": "Open atlas-glue dashboard", + "url": "/d/atlas-glue", + "targetBlank": true + } + ] + }, { "id": 23, "type": "stat", @@ -1456,7 +1531,7 @@ data: "links": [ { "title": "Workload namespaces only", - "url": "?var-namespace_scope_cpu=namespace%21~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=${namespace_scope_ram}", + "url": "?var-namespace_scope_cpu=namespace%21~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=${namespace_scope_ram}", "targetBlank": false }, { @@ -1466,11 +1541,11 @@ data: }, { "title": "Infrastructure namespaces only", - "url": "?var-namespace_scope_cpu=namespace%3D~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=${namespace_scope_ram}", + "url": "?var-namespace_scope_cpu=namespace%3D~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=${namespace_scope_ram}", "targetBlank": false } ], - "description": "Values are normalized within the selected scope; use panel links to switch scope." + "description": "Shares are normalized within the selected filter. Switching scope changes the denominator." }, { "id": 12, @@ -1525,7 +1600,7 @@ data: "links": [ { "title": "Workload namespaces only", - "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%21~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", + "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%21~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", "targetBlank": false }, { @@ -1535,11 +1610,11 @@ data: }, { "title": "Infrastructure namespaces only", - "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%3D~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", + "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=namespace%3D~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22&var-namespace_scope_ram=${namespace_scope_ram}", "targetBlank": false } ], - "description": "Values are normalized within the selected scope; use panel links to switch scope." + "description": "Shares are normalized within the selected filter. Switching scope changes the denominator." }, { "id": 13, @@ -1594,7 +1669,7 @@ data: "links": [ { "title": "Workload namespaces only", - "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=namespace%21~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22", + "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=namespace%21~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22", "targetBlank": false }, { @@ -1604,11 +1679,11 @@ data: }, { "title": "Infrastructure namespaces only", - "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=namespace%3D~%22%5E%28kube-system%7Clonghorn-system%7Cmetallb-system%7Cmonitoring%7Clogging%7Ccert-manager%7Cflux-system%7Ctraefik%7Cmaintenance%7Cpostgres%29%24%22", + "url": "?var-namespace_scope_cpu=${namespace_scope_cpu}&var-namespace_scope_gpu=${namespace_scope_gpu}&var-namespace_scope_ram=namespace%3D~%22%5E%28kube-.%2A%7C.%2A-system%7Ctraefik%7Cmonitoring%7Clogging%7Ccert-manager%7Cmaintenance%7Cpostgres%29%24%22", "targetBlank": false } ], - "description": "Values are normalized within the selected scope; use panel links to switch scope." + "description": "Shares are normalized within the selected filter. Switching scope changes the denominator." }, { "id": 14, @@ -2183,16 +2258,16 @@ data: "name": "namespace_scope_cpu", "label": "CPU namespace filter", "type": "custom", - "query": "workload namespaces only : namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "query": "workload namespaces only : namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "current": { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, "options": [ { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, { @@ -2202,7 +2277,7 @@ data: }, { "text": "infrastructure namespaces only", - "value": "namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": false } ], @@ -2217,16 +2292,16 @@ data: "name": "namespace_scope_gpu", "label": "GPU namespace filter", "type": "custom", - "query": "workload namespaces only : namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "query": "workload namespaces only : namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "current": { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, "options": [ { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, { @@ -2236,7 +2311,7 @@ data: }, { "text": "infrastructure namespaces only", - "value": "namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": false } ], @@ -2251,16 +2326,16 @@ data: "name": "namespace_scope_ram", "label": "RAM namespace filter", "type": "custom", - "query": "workload namespaces only : namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "query": "workload namespaces only : namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\",all namespaces : namespace=~\".*\",infrastructure namespaces only : namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "current": { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, "options": [ { "text": "workload namespaces only", - "value": "namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": true }, { @@ -2270,7 +2345,7 @@ data: }, { "text": "infrastructure namespaces only", - "value": "namespace=~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"", + "value": "namespace=~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"", "selected": false } ], diff --git a/services/monitoring/grafana-dashboard-pods.yaml b/services/monitoring/grafana-dashboard-pods.yaml index 5ea8343..f537d4c 100644 --- a/services/monitoring/grafana-dashboard-pods.yaml +++ b/services/monitoring/grafana-dashboard-pods.yaml @@ -209,7 +209,7 @@ data: }, "targets": [ { - "expr": "sum(kube_pod_info{node=~\"titan-0a|titan-0b|titan-0c\",namespace!~\"^(kube-system|longhorn-system|metallb-system|monitoring|logging|cert-manager|flux-system|traefik|maintenance|postgres)$\"})", + "expr": "sum(kube_pod_info{node=~\"titan-0a|titan-0b|titan-0c\",namespace!~\"^(kube-.*|.*-system|traefik|monitoring|logging|cert-manager|maintenance|postgres)$\"})", "refId": "A" } ], diff --git a/services/monitoring/kustomization.yaml b/services/monitoring/kustomization.yaml index b12556e..69ad326 100644 --- a/services/monitoring/kustomization.yaml +++ b/services/monitoring/kustomization.yaml @@ -14,6 +14,7 @@ resources: - grafana-dashboard-network.yaml - grafana-dashboard-gpu.yaml - grafana-dashboard-mail.yaml + - grafana-dashboard-glue.yaml - dcgm-exporter.yaml - jetson-tegrastats-exporter.yaml - postmark-exporter-service.yaml diff --git a/services/nextcloud-mail-sync/cronjob.yaml b/services/nextcloud-mail-sync/cronjob.yaml index 728bf98..cb42d49 100644 --- a/services/nextcloud-mail-sync/cronjob.yaml +++ b/services/nextcloud-mail-sync/cronjob.yaml @@ -4,6 +4,8 @@ kind: CronJob metadata: name: nextcloud-mail-sync namespace: nextcloud + labels: + atlas.bstein.dev/glue: "true" spec: schedule: "0 5 * * *" concurrencyPolicy: Forbid diff --git a/services/outline/deployment.yaml b/services/outline/deployment.yaml index 471d185..cca3964 100644 --- a/services/outline/deployment.yaml +++ b/services/outline/deployment.yaml @@ -41,9 +41,9 @@ spec: export OIDC_USERINFO_URI="{{ .Data.data.OIDC_USERINFO_URI }}" {{ end }} {{ with secret "kv/data/atlas/outline/outline-smtp" }} - export SMTP_FROM_EMAIL="{{ .Data.data.SMTP_FROM_EMAIL }}" export SMTP_HOST="{{ .Data.data.SMTP_HOST }}" {{ end }} + export SMTP_FROM_EMAIL="no-reply-outline@bstein.dev" {{ with secret "kv/data/atlas/shared/postmark-relay" }} export SMTP_USERNAME="{{ index .Data.data "relay-username" }}" export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" diff --git a/services/planka/deployment.yaml b/services/planka/deployment.yaml index afda7fd..155ac80 100644 --- a/services/planka/deployment.yaml +++ b/services/planka/deployment.yaml @@ -41,12 +41,12 @@ spec: export OIDC_USE_OAUTH_CALLBACK="{{ .Data.data.OIDC_USE_OAUTH_CALLBACK }}" {{ end }} {{ with secret "kv/data/atlas/planka/planka-smtp" }} - export SMTP_FROM="{{ .Data.data.SMTP_FROM }}" export SMTP_HOST="{{ .Data.data.SMTP_HOST }}" export SMTP_PORT="{{ .Data.data.SMTP_PORT }}" export SMTP_SECURE="{{ .Data.data.SMTP_SECURE }}" export SMTP_TLS_REJECT_UNAUTHORIZED="{{ .Data.data.SMTP_TLS_REJECT_UNAUTHORIZED }}" {{ end }} + export SMTP_FROM="no-reply-planka@bstein.dev" {{ with secret "kv/data/atlas/shared/postmark-relay" }} export SMTP_USER="{{ index .Data.data "relay-username" }}" export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" diff --git a/services/vault/k8s-auth-config-cronjob.yaml b/services/vault/k8s-auth-config-cronjob.yaml index 6a644df..29e8e80 100644 --- a/services/vault/k8s-auth-config-cronjob.yaml +++ b/services/vault/k8s-auth-config-cronjob.yaml @@ -4,6 +4,8 @@ kind: CronJob metadata: name: vault-k8s-auth-config namespace: vault + labels: + atlas.bstein.dev/glue: "true" spec: schedule: "*/15 * * * *" concurrencyPolicy: Forbid diff --git a/services/vault/oidc-config-cronjob.yaml b/services/vault/oidc-config-cronjob.yaml index 3140073..013c9f3 100644 --- a/services/vault/oidc-config-cronjob.yaml +++ b/services/vault/oidc-config-cronjob.yaml @@ -4,6 +4,8 @@ kind: CronJob metadata: name: vault-oidc-config namespace: vault + labels: + atlas.bstein.dev/glue: "true" spec: schedule: "*/15 * * * *" concurrencyPolicy: Forbid From 8b8d2c4aa88a9435bfb63602404a10a5c3c33ac7 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 03:00:24 -0300 Subject: [PATCH 242/270] vaultwarden: add retry safeguards and db tuning --- .../scripts/vaultwarden_cred_sync.py | 31 +++++++++++++++++++ .../vaultwarden-cred-sync-cronjob.yaml | 6 ++++ services/vaultwarden/deployment.yaml | 10 ++++++ 3 files changed, 47 insertions(+) diff --git a/services/bstein-dev-home/scripts/vaultwarden_cred_sync.py b/services/bstein-dev-home/scripts/vaultwarden_cred_sync.py index 9ee4eeb..cb4f9c8 100644 --- a/services/bstein-dev-home/scripts/vaultwarden_cred_sync.py +++ b/services/bstein-dev-home/scripts/vaultwarden_cred_sync.py @@ -2,8 +2,10 @@ from __future__ import annotations +import os import sys import time +from datetime import datetime, timezone from typing import Any, Iterable import httpx @@ -16,6 +18,8 @@ from atlas_portal.vaultwarden import invite_user VAULTWARDEN_EMAIL_ATTR = "vaultwarden_email" VAULTWARDEN_STATUS_ATTR = "vaultwarden_status" VAULTWARDEN_SYNCED_AT_ATTR = "vaultwarden_synced_at" +VAULTWARDEN_RETRY_COOLDOWN_SEC = int(os.getenv("VAULTWARDEN_RETRY_COOLDOWN_SEC", "1800")) +VAULTWARDEN_FAILURE_BAILOUT = int(os.getenv("VAULTWARDEN_FAILURE_BAILOUT", "2")) def _iter_keycloak_users(page_size: int = 200) -> Iterable[dict[str, Any]]: @@ -82,6 +86,21 @@ def _extract_attr(attrs: Any, key: str) -> str: return "" +def _parse_synced_at(value: str) -> float | None: + value = (value or "").strip() + if not value: + return None + for fmt in ("%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S%z"): + try: + parsed = datetime.strptime(value, fmt) + if parsed.tzinfo is None: + parsed = parsed.replace(tzinfo=timezone.utc) + return parsed.timestamp() + except ValueError: + continue + return None + + def _vaultwarden_email_for_user(user: dict[str, Any]) -> str: username = (user.get("username") if isinstance(user.get("username"), str) else "") or "" username = username.strip() @@ -129,6 +148,7 @@ def main() -> int: created = 0 skipped = 0 failures = 0 + consecutive_failures = 0 for user in _iter_keycloak_users(): username = (user.get("username") if isinstance(user.get("username"), str) else "") or "" @@ -158,6 +178,11 @@ def main() -> int: current_status = _extract_attr(full_user.get("attributes"), VAULTWARDEN_STATUS_ATTR) current_synced_at = _extract_attr(full_user.get("attributes"), VAULTWARDEN_SYNCED_AT_ATTR) + current_synced_ts = _parse_synced_at(current_synced_at) + if current_status in {"rate_limited", "error"} and current_synced_ts: + if time.time() - current_synced_ts < VAULTWARDEN_RETRY_COOLDOWN_SEC: + skipped += 1 + continue email = _vaultwarden_email_for_user(full_user) if not email: print(f"skip {username}: missing email", file=sys.stderr) @@ -188,6 +213,7 @@ def main() -> int: result = invite_user(email) if result.ok: created += 1 + consecutive_failures = 0 print(f"ok {username}: {result.status}") try: _set_user_attribute(username, VAULTWARDEN_STATUS_ATTR, result.status) @@ -196,12 +222,17 @@ def main() -> int: pass else: failures += 1 + if result.status in {"rate_limited", "error"}: + consecutive_failures += 1 print(f"err {username}: {result.status} {result.detail}", file=sys.stderr) try: _set_user_attribute(username, VAULTWARDEN_STATUS_ATTR, result.status) _set_user_attribute(username, VAULTWARDEN_SYNCED_AT_ATTR, time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())) except Exception: pass + if consecutive_failures >= VAULTWARDEN_FAILURE_BAILOUT: + print("vaultwarden: too many consecutive failures; aborting run", file=sys.stderr) + break print( f"done processed={processed} created_or_present={created} skipped={skipped} failures={failures}", diff --git a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml index 86eeaf1..29141fe 100644 --- a/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml +++ b/services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml @@ -68,6 +68,12 @@ spec: value: bstein-dev-home-admin - name: HTTP_CHECK_TIMEOUT_SEC value: "20" + - name: VAULTWARDEN_ADMIN_SESSION_TTL_SEC + value: "900" + - name: VAULTWARDEN_RETRY_COOLDOWN_SEC + value: "1800" + - name: VAULTWARDEN_FAILURE_BAILOUT + value: "2" volumeMounts: - name: vaultwarden-cred-sync-script mountPath: /scripts diff --git a/services/vaultwarden/deployment.yaml b/services/vaultwarden/deployment.yaml index 6125ad8..219c402 100644 --- a/services/vaultwarden/deployment.yaml +++ b/services/vaultwarden/deployment.yaml @@ -50,6 +50,16 @@ spec: value: "true" - name: DOMAIN value: "https://vault.bstein.dev" + - name: DB_CONNECTION_RETRIES + value: "0" + - name: DATABASE_TIMEOUT + value: "60" + - name: DATABASE_MIN_CONNS + value: "2" + - name: DATABASE_MAX_CONNS + value: "20" + - name: DATABASE_IDLE_TIMEOUT + value: "600" - name: SMTP_HOST value: "mail.bstein.dev" - name: SMTP_PORT From 4874ccda4df1140e907c76ce574e74523473257b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 03:09:40 -0300 Subject: [PATCH 243/270] vaultwarden: pin to arm64 workers --- services/vaultwarden/deployment.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/services/vaultwarden/deployment.yaml b/services/vaultwarden/deployment.yaml index 219c402..c3b7d27 100644 --- a/services/vaultwarden/deployment.yaml +++ b/services/vaultwarden/deployment.yaml @@ -35,6 +35,9 @@ spec: {{ end }} spec: serviceAccountName: vaultwarden-vault + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" containers: - name: vaultwarden image: vaultwarden/server:1.33.2 From d7812623cd429b72dc20f44abad9524bea3fcc59 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 08:12:06 -0300 Subject: [PATCH 244/270] monitoring: add glue row and fix mail dns --- infrastructure/core/coredns-custom.yaml | 2 +- scripts/dashboards_render_atlas.py | 59 +++- .../monitoring/dashboards/atlas-glue.json | 2 +- .../monitoring/dashboards/atlas-overview.json | 313 +++++++++++++----- .../monitoring/grafana-dashboard-glue.yaml | 2 +- .../grafana-dashboard-overview.yaml | 313 +++++++++++++----- 6 files changed, 526 insertions(+), 165 deletions(-) diff --git a/infrastructure/core/coredns-custom.yaml b/infrastructure/core/coredns-custom.yaml index ea28def..8aeff14 100644 --- a/infrastructure/core/coredns-custom.yaml +++ b/infrastructure/core/coredns-custom.yaml @@ -24,7 +24,7 @@ data: 192.168.22.9 live.bstein.dev 192.168.22.9 logs.bstein.dev 192.168.22.9 longhorn.bstein.dev - 192.168.22.9 mail.bstein.dev + 192.168.22.4 mail.bstein.dev 192.168.22.9 matrix.live.bstein.dev 192.168.22.9 metrics.bstein.dev 192.168.22.9 monero.bstein.dev diff --git a/scripts/dashboards_render_atlas.py b/scripts/dashboards_render_atlas.py index 4aa2908..179536e 100644 --- a/scripts/dashboards_render_atlas.py +++ b/scripts/dashboards_render_atlas.py @@ -332,6 +332,8 @@ GLUE_MISSING = f"({GLUE_JOBS} unless {GLUE_LAST_SUCCESS})" GLUE_STALE_ACTIVE = f"({GLUE_STALE} unless on(namespace,cronjob) {GLUE_SUSPENDED})" GLUE_MISSING_ACTIVE = f"({GLUE_MISSING} unless on(namespace,cronjob) {GLUE_SUSPENDED})" GLUE_STALE_COUNT = f"(sum({GLUE_STALE_ACTIVE}) + count({GLUE_MISSING_ACTIVE}))" +GLUE_MISSING_COUNT = f"count({GLUE_MISSING_ACTIVE})" +GLUE_SUSPENDED_COUNT = f"sum({GLUE_SUSPENDED})" GPU_NODES = ["titan-20", "titan-21", "titan-22", "titan-24"] GPU_NODE_REGEX = "|".join(GPU_NODES) TRAEFIK_ROUTER_EXPR = "sum by (router) (rate(traefik_router_requests_total[5m]))" @@ -1054,17 +1056,6 @@ def build_overview(): links=link_to("atlas-mail"), ) ) - panels.append( - stat_panel( - 34, - "Glue Jobs Stale", - GLUE_STALE_COUNT, - {"h": 2, "w": 4, "x": 20, "y": 8}, - unit="none", - thresholds=count_thresholds, - links=link_to("atlas-glue"), - ) - ) storage_panels = [ (23, "Astreae Usage", astreae_usage_expr("/mnt/astreae"), "percent"), @@ -1120,6 +1111,50 @@ def build_overview(): ) ) + panels.append( + { + "id": 34, + "type": "row", + "title": "Glue + Automation", + "gridPos": {"h": 1, "w": 24, "x": 0, "y": 25}, + "collapsed": False, + "panels": [], + } + ) + panels.append( + stat_panel( + 35, + "Glue Jobs Stale", + GLUE_STALE_COUNT, + {"h": 6, "w": 8, "x": 0, "y": 26}, + unit="none", + thresholds=count_thresholds, + links=link_to("atlas-glue"), + ) + ) + panels.append( + stat_panel( + 36, + "Glue Jobs Missing Success", + GLUE_MISSING_COUNT, + {"h": 6, "w": 8, "x": 8, "y": 26}, + unit="none", + thresholds=count_thresholds, + links=link_to("atlas-glue"), + ) + ) + panels.append( + stat_panel( + 37, + "Glue Jobs Suspended", + GLUE_SUSPENDED_COUNT, + {"h": 6, "w": 8, "x": 16, "y": 26}, + unit="none", + thresholds=count_thresholds, + links=link_to("atlas-glue"), + ) + ) + worker_filter = f"{WORKER_REGEX}" panels.append( timeseries_panel( @@ -2186,7 +2221,7 @@ def build_glue_dashboard(): table_panel( 2, "Glue Jobs Missing Success", - GLUE_MISSING, + GLUE_MISSING_ACTIVE, {"h": 4, "w": 6, "x": 6, "y": 0}, unit="none", transformations=sort_desc, diff --git a/services/monitoring/dashboards/atlas-glue.json b/services/monitoring/dashboards/atlas-glue.json index c836d18..732d36c 100644 --- a/services/monitoring/dashboards/atlas-glue.json +++ b/services/monitoring/dashboards/atlas-glue.json @@ -88,7 +88,7 @@ }, "targets": [ { - "expr": "(kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"})", + "expr": "((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)", "refId": "A", "instant": true } diff --git a/services/monitoring/dashboards/atlas-overview.json b/services/monitoring/dashboards/atlas-overview.json index e1c5d3a..8732391 100644 --- a/services/monitoring/dashboards/atlas-overview.json +++ b/services/monitoring/dashboards/atlas-overview.json @@ -1110,81 +1110,6 @@ } ] }, - { - "id": 34, - "type": "stat", - "title": "Glue Jobs Stale", - "datasource": { - "type": "prometheus", - "uid": "atlas-vm" - }, - "gridPos": { - "h": 2, - "w": 4, - "x": 20, - "y": 8 - }, - "targets": [ - { - "expr": "(sum((((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) > bool 129600) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)))", - "refId": "A" - } - ], - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 1 - }, - { - "color": "orange", - "value": 2 - }, - { - "color": "red", - "value": 3 - } - ] - }, - "unit": "none", - "custom": { - "displayMode": "auto" - } - }, - "overrides": [] - }, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "center", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "value" - }, - "links": [ - { - "title": "Open atlas-glue dashboard", - "url": "/d/atlas-glue", - "targetBlank": true - } - ] - }, { "id": 23, "type": "stat", @@ -1676,6 +1601,244 @@ ], "description": "Shares are normalized within the selected filter. Switching scope changes the denominator." }, + { + "id": 34, + "type": "row", + "title": "Glue + Automation", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 25 + }, + "collapsed": false, + "panels": [] + }, + { + "id": 35, + "type": "stat", + "title": "Glue Jobs Stale", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 26 + }, + "targets": [ + { + "expr": "(sum((((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) > bool 129600) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)))", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "orange", + "value": 2 + }, + { + "color": "red", + "value": 3 + } + ] + }, + "unit": "none", + "custom": { + "displayMode": "auto" + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "value" + }, + "links": [ + { + "title": "Open atlas-glue dashboard", + "url": "/d/atlas-glue", + "targetBlank": true + } + ] + }, + { + "id": 36, + "type": "stat", + "title": "Glue Jobs Missing Success", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 6, + "w": 8, + "x": 8, + "y": 26 + }, + "targets": [ + { + "expr": "count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1))", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "orange", + "value": 2 + }, + { + "color": "red", + "value": 3 + } + ] + }, + "unit": "none", + "custom": { + "displayMode": "auto" + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "value" + }, + "links": [ + { + "title": "Open atlas-glue dashboard", + "url": "/d/atlas-glue", + "targetBlank": true + } + ] + }, + { + "id": 37, + "type": "stat", + "title": "Glue Jobs Suspended", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 26 + }, + "targets": [ + { + "expr": "sum(kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "orange", + "value": 2 + }, + { + "color": "red", + "value": 3 + } + ] + }, + "unit": "none", + "custom": { + "displayMode": "auto" + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "value" + }, + "links": [ + { + "title": "Open atlas-glue dashboard", + "url": "/d/atlas-glue", + "targetBlank": true + } + ] + }, { "id": 14, "type": "timeseries", diff --git a/services/monitoring/grafana-dashboard-glue.yaml b/services/monitoring/grafana-dashboard-glue.yaml index 0f8c0a1..7aeec74 100644 --- a/services/monitoring/grafana-dashboard-glue.yaml +++ b/services/monitoring/grafana-dashboard-glue.yaml @@ -97,7 +97,7 @@ data: }, "targets": [ { - "expr": "(kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"})", + "expr": "((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)", "refId": "A", "instant": true } diff --git a/services/monitoring/grafana-dashboard-overview.yaml b/services/monitoring/grafana-dashboard-overview.yaml index 78d5566..0f6cd72 100644 --- a/services/monitoring/grafana-dashboard-overview.yaml +++ b/services/monitoring/grafana-dashboard-overview.yaml @@ -1119,81 +1119,6 @@ data: } ] }, - { - "id": 34, - "type": "stat", - "title": "Glue Jobs Stale", - "datasource": { - "type": "prometheus", - "uid": "atlas-vm" - }, - "gridPos": { - "h": 2, - "w": 4, - "x": 20, - "y": 8 - }, - "targets": [ - { - "expr": "(sum((((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) > bool 129600) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)))", - "refId": "A" - } - ], - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 1 - }, - { - "color": "orange", - "value": 2 - }, - { - "color": "red", - "value": 3 - } - ] - }, - "unit": "none", - "custom": { - "displayMode": "auto" - } - }, - "overrides": [] - }, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "center", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "value" - }, - "links": [ - { - "title": "Open atlas-glue dashboard", - "url": "/d/atlas-glue", - "targetBlank": true - } - ] - }, { "id": 23, "type": "stat", @@ -1685,6 +1610,244 @@ data: ], "description": "Shares are normalized within the selected filter. Switching scope changes the denominator." }, + { + "id": 34, + "type": "row", + "title": "Glue + Automation", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 25 + }, + "collapsed": false, + "panels": [] + }, + { + "id": 35, + "type": "stat", + "title": "Glue Jobs Stale", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 26 + }, + "targets": [ + { + "expr": "(sum((((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) > bool 129600) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)))", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "orange", + "value": 2 + }, + { + "color": "red", + "value": 3 + } + ] + }, + "unit": "none", + "custom": { + "displayMode": "auto" + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "value" + }, + "links": [ + { + "title": "Open atlas-glue dashboard", + "url": "/d/atlas-glue", + "targetBlank": true + } + ] + }, + { + "id": 36, + "type": "stat", + "title": "Glue Jobs Missing Success", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 6, + "w": 8, + "x": 8, + "y": 26 + }, + "targets": [ + { + "expr": "count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1))", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "orange", + "value": 2 + }, + { + "color": "red", + "value": 3 + } + ] + }, + "unit": "none", + "custom": { + "displayMode": "auto" + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "value" + }, + "links": [ + { + "title": "Open atlas-glue dashboard", + "url": "/d/atlas-glue", + "targetBlank": true + } + ] + }, + { + "id": 37, + "type": "stat", + "title": "Glue Jobs Suspended", + "datasource": { + "type": "prometheus", + "uid": "atlas-vm" + }, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 26 + }, + "targets": [ + { + "expr": "sum(kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "orange", + "value": 2 + }, + { + "color": "red", + "value": 3 + } + ] + }, + "unit": "none", + "custom": { + "displayMode": "auto" + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "value" + }, + "links": [ + { + "title": "Open atlas-glue dashboard", + "url": "/d/atlas-glue", + "targetBlank": true + } + ] + }, { "id": 14, "type": "timeseries", From 1fb3d179efc5a5e5b0e52409c0598b1ea204effb Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 09:21:33 -0300 Subject: [PATCH 245/270] monitoring: add testing dashboard and switch postmark apikey --- scripts/dashboards_render_atlas.py | 58 +---- .../bstein-dev-home/backend-deployment.yaml | 4 +- services/finance/firefly-deployment.yaml | 4 +- services/keycloak/deployment.yaml | 4 +- services/keycloak/ldap-federation-job.yaml | 4 +- services/keycloak/portal-e2e-client-job.yaml | 4 +- ...al-e2e-execute-actions-email-test-job.yaml | 4 +- .../portal-e2e-target-client-job.yaml | 4 +- ...al-e2e-token-exchange-permissions-job.yaml | 4 +- .../portal-e2e-token-exchange-test-job.yaml | 4 +- services/keycloak/realm-settings-job.yaml | 4 +- services/keycloak/user-overrides-job.yaml | 4 +- services/mailu/helmrelease.yaml | 24 +- .../monitoring/dashboards/atlas-overview.json | 238 ------------------ .../{atlas-glue.json => atlas-testing.json} | 6 +- .../grafana-dashboard-overview.yaml | 238 ------------------ ...ue.yaml => grafana-dashboard-testing.yaml} | 12 +- services/monitoring/helmrelease.yaml | 4 +- services/monitoring/kustomization.yaml | 2 +- .../postmark-exporter-deployment.yaml | 4 +- services/nextcloud-mail-sync/cronjob.yaml | 4 +- services/nextcloud/deployment.yaml | 4 +- services/nextcloud/maintenance-cronjob.yaml | 4 +- services/outline/deployment.yaml | 4 +- services/planka/deployment.yaml | 4 +- services/vaultwarden/deployment.yaml | 4 +- 26 files changed, 67 insertions(+), 587 deletions(-) rename services/monitoring/dashboards/{atlas-glue.json => atlas-testing.json} (99%) rename services/monitoring/{grafana-dashboard-glue.yaml => grafana-dashboard-testing.yaml} (97%) diff --git a/scripts/dashboards_render_atlas.py b/scripts/dashboards_render_atlas.py index 179536e..5e3e7f0 100644 --- a/scripts/dashboards_render_atlas.py +++ b/scripts/dashboards_render_atlas.py @@ -1111,50 +1111,6 @@ def build_overview(): ) ) - panels.append( - { - "id": 34, - "type": "row", - "title": "Glue + Automation", - "gridPos": {"h": 1, "w": 24, "x": 0, "y": 25}, - "collapsed": False, - "panels": [], - } - ) - panels.append( - stat_panel( - 35, - "Glue Jobs Stale", - GLUE_STALE_COUNT, - {"h": 6, "w": 8, "x": 0, "y": 26}, - unit="none", - thresholds=count_thresholds, - links=link_to("atlas-glue"), - ) - ) - panels.append( - stat_panel( - 36, - "Glue Jobs Missing Success", - GLUE_MISSING_COUNT, - {"h": 6, "w": 8, "x": 8, "y": 26}, - unit="none", - thresholds=count_thresholds, - links=link_to("atlas-glue"), - ) - ) - panels.append( - stat_panel( - 37, - "Glue Jobs Suspended", - GLUE_SUSPENDED_COUNT, - {"h": 6, "w": 8, "x": 16, "y": 26}, - unit="none", - thresholds=count_thresholds, - links=link_to("atlas-glue"), - ) - ) - worker_filter = f"{WORKER_REGEX}" panels.append( timeseries_panel( @@ -2195,7 +2151,7 @@ def build_mail_dashboard(): } -def build_glue_dashboard(): +def build_testing_dashboard(): panels = [] sort_desc = [{"id": "labelsToFields", "options": {}}, {"id": "sortBy", "options": {"fields": ["Value"], "order": "desc"}}] @@ -2274,8 +2230,8 @@ def build_glue_dashboard(): ) return { - "uid": "atlas-glue", - "title": "Atlas Glue", + "uid": "atlas-testing", + "title": "Atlas Testing", "folderUid": PRIVATE_FOLDER, "editable": True, "panels": panels, @@ -2283,7 +2239,7 @@ def build_glue_dashboard(): "annotations": {"list": []}, "schemaVersion": 39, "style": "dark", - "tags": ["atlas", "glue"], + "tags": ["atlas", "testing"], } @@ -2380,9 +2336,9 @@ DASHBOARDS = { "builder": build_mail_dashboard, "configmap": ROOT / "services" / "monitoring" / "grafana-dashboard-mail.yaml", }, - "atlas-glue": { - "builder": build_glue_dashboard, - "configmap": ROOT / "services" / "monitoring" / "grafana-dashboard-glue.yaml", + "atlas-testing": { + "builder": build_testing_dashboard, + "configmap": ROOT / "services" / "monitoring" / "grafana-dashboard-testing.yaml", }, "atlas-gpu": { "builder": build_gpu_dashboard, diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index 9d820b7..ba6ca74 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -38,8 +38,8 @@ spec: export SMTP_PORT="587" export SMTP_STARTTLS="true" export SMTP_USE_TLS="false" - export SMTP_USERNAME="{{ index .Data.data "relay-username" }}" - export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export SMTP_USERNAME="{{ index .Data.data "apikey" }}" + export SMTP_PASSWORD="{{ index .Data.data "apikey" }}" export SMTP_FROM="no-reply-portal@bstein.dev" {{ end }} spec: diff --git a/services/finance/firefly-deployment.yaml b/services/finance/firefly-deployment.yaml index f43bdc3..9c684fe 100644 --- a/services/finance/firefly-deployment.yaml +++ b/services/finance/firefly-deployment.yaml @@ -38,8 +38,8 @@ spec: export STATIC_CRON_TOKEN="$(cat /vault/secrets/firefly-cron-token)" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export MAIL_USERNAME="{{ index .Data.data "relay-username" }}" - export MAIL_PASSWORD="{{ index .Data.data "relay-password" }}" + export MAIL_USERNAME="{{ index .Data.data "apikey" }}" + export MAIL_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} vault.hashicorp.com/agent-inject-secret-firefly-db-password: "kv/data/atlas/finance/firefly-db" vault.hashicorp.com/agent-inject-template-firefly-db-password: | diff --git a/services/keycloak/deployment.yaml b/services/keycloak/deployment.yaml index bf8a47c..3d241c9 100644 --- a/services/keycloak/deployment.yaml +++ b/services/keycloak/deployment.yaml @@ -45,8 +45,8 @@ spec: export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" - export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export KEYCLOAK_SMTP_USER="{{ index .Data.data "apikey" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: serviceAccountName: sso-vault diff --git a/services/keycloak/ldap-federation-job.yaml b/services/keycloak/ldap-federation-job.yaml index 2f911f1..184ad90 100644 --- a/services/keycloak/ldap-federation-job.yaml +++ b/services/keycloak/ldap-federation-job.yaml @@ -34,8 +34,8 @@ spec: export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" - export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export KEYCLOAK_SMTP_USER="{{ index .Data.data "apikey" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: affinity: diff --git a/services/keycloak/portal-e2e-client-job.yaml b/services/keycloak/portal-e2e-client-job.yaml index 399a32b..cc2c2b4 100644 --- a/services/keycloak/portal-e2e-client-job.yaml +++ b/services/keycloak/portal-e2e-client-job.yaml @@ -34,8 +34,8 @@ spec: export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" - export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export KEYCLOAK_SMTP_USER="{{ index .Data.data "apikey" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: restartPolicy: Never diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index 5653148..736ca8d 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -34,8 +34,8 @@ spec: export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" - export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export KEYCLOAK_SMTP_USER="{{ index .Data.data "apikey" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: restartPolicy: Never diff --git a/services/keycloak/portal-e2e-target-client-job.yaml b/services/keycloak/portal-e2e-target-client-job.yaml index 66426e0..64352bb 100644 --- a/services/keycloak/portal-e2e-target-client-job.yaml +++ b/services/keycloak/portal-e2e-target-client-job.yaml @@ -34,8 +34,8 @@ spec: export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" - export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export KEYCLOAK_SMTP_USER="{{ index .Data.data "apikey" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: restartPolicy: Never diff --git a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml index a6dd621..597187c 100644 --- a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml @@ -34,8 +34,8 @@ spec: export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" - export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export KEYCLOAK_SMTP_USER="{{ index .Data.data "apikey" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: restartPolicy: Never diff --git a/services/keycloak/portal-e2e-token-exchange-test-job.yaml b/services/keycloak/portal-e2e-token-exchange-test-job.yaml index 8b7beed..38547da 100644 --- a/services/keycloak/portal-e2e-token-exchange-test-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-test-job.yaml @@ -35,8 +35,8 @@ spec: export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" - export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export KEYCLOAK_SMTP_USER="{{ index .Data.data "apikey" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: restartPolicy: Never diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index 0cef1cb..c74537d 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -34,8 +34,8 @@ spec: export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" - export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export KEYCLOAK_SMTP_USER="{{ index .Data.data "apikey" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: affinity: diff --git a/services/keycloak/user-overrides-job.yaml b/services/keycloak/user-overrides-job.yaml index a81ea7c..ff2aa91 100644 --- a/services/keycloak/user-overrides-job.yaml +++ b/services/keycloak/user-overrides-job.yaml @@ -34,8 +34,8 @@ spec: export LDAP_BIND_PASSWORD="${LDAP_ADMIN_PASSWORD}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export KEYCLOAK_SMTP_USER="{{ index .Data.data "relay-username" }}" - export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export KEYCLOAK_SMTP_USER="{{ index .Data.data "apikey" }}" + export KEYCLOAK_SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: affinity: diff --git a/services/mailu/helmrelease.yaml b/services/mailu/helmrelease.yaml index ceb3e0c..7342141 100644 --- a/services/mailu/helmrelease.yaml +++ b/services/mailu/helmrelease.yaml @@ -335,8 +335,8 @@ spec: export INITIAL_ADMIN_PW="{{ .Data.data.password }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export RELAYUSER="{{ index .Data.data "relay-username" }}" - export RELAYPASSWORD="{{ index .Data.data "relay-password" }}" + export RELAYUSER="{{ index .Data.data "apikey" }}" + export RELAYPASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: serviceAccountName: mailu-vault-sync @@ -397,8 +397,8 @@ spec: export INITIAL_ADMIN_PW="{{ .Data.data.password }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export RELAYUSER="{{ index .Data.data "relay-username" }}" - export RELAYPASSWORD="{{ index .Data.data "relay-password" }}" + export RELAYUSER="{{ index .Data.data "apikey" }}" + export RELAYPASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: serviceAccountName: mailu-vault-sync @@ -459,8 +459,8 @@ spec: export INITIAL_ADMIN_PW="{{ .Data.data.password }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export RELAYUSER="{{ index .Data.data "relay-username" }}" - export RELAYPASSWORD="{{ index .Data.data "relay-password" }}" + export RELAYUSER="{{ index .Data.data "apikey" }}" + export RELAYPASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: serviceAccountName: mailu-vault-sync @@ -521,8 +521,8 @@ spec: export INITIAL_ADMIN_PW="{{ .Data.data.password }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export RELAYUSER="{{ index .Data.data "relay-username" }}" - export RELAYPASSWORD="{{ index .Data.data "relay-password" }}" + export RELAYUSER="{{ index .Data.data "apikey" }}" + export RELAYPASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: serviceAccountName: mailu-vault-sync @@ -583,8 +583,8 @@ spec: export INITIAL_ADMIN_PW="{{ .Data.data.password }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export RELAYUSER="{{ index .Data.data "relay-username" }}" - export RELAYPASSWORD="{{ index .Data.data "relay-password" }}" + export RELAYUSER="{{ index .Data.data "apikey" }}" + export RELAYPASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: serviceAccountName: mailu-vault-sync @@ -645,8 +645,8 @@ spec: export INITIAL_ADMIN_PW="{{ .Data.data.password }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export RELAYUSER="{{ index .Data.data "relay-username" }}" - export RELAYPASSWORD="{{ index .Data.data "relay-password" }}" + export RELAYUSER="{{ index .Data.data "apikey" }}" + export RELAYPASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: serviceAccountName: mailu-vault-sync diff --git a/services/monitoring/dashboards/atlas-overview.json b/services/monitoring/dashboards/atlas-overview.json index 8732391..c5f30d1 100644 --- a/services/monitoring/dashboards/atlas-overview.json +++ b/services/monitoring/dashboards/atlas-overview.json @@ -1601,244 +1601,6 @@ ], "description": "Shares are normalized within the selected filter. Switching scope changes the denominator." }, - { - "id": 34, - "type": "row", - "title": "Glue + Automation", - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 25 - }, - "collapsed": false, - "panels": [] - }, - { - "id": 35, - "type": "stat", - "title": "Glue Jobs Stale", - "datasource": { - "type": "prometheus", - "uid": "atlas-vm" - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 0, - "y": 26 - }, - "targets": [ - { - "expr": "(sum((((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) > bool 129600) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)))", - "refId": "A" - } - ], - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 1 - }, - { - "color": "orange", - "value": 2 - }, - { - "color": "red", - "value": 3 - } - ] - }, - "unit": "none", - "custom": { - "displayMode": "auto" - } - }, - "overrides": [] - }, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "center", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "value" - }, - "links": [ - { - "title": "Open atlas-glue dashboard", - "url": "/d/atlas-glue", - "targetBlank": true - } - ] - }, - { - "id": 36, - "type": "stat", - "title": "Glue Jobs Missing Success", - "datasource": { - "type": "prometheus", - "uid": "atlas-vm" - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 8, - "y": 26 - }, - "targets": [ - { - "expr": "count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1))", - "refId": "A" - } - ], - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 1 - }, - { - "color": "orange", - "value": 2 - }, - { - "color": "red", - "value": 3 - } - ] - }, - "unit": "none", - "custom": { - "displayMode": "auto" - } - }, - "overrides": [] - }, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "center", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "value" - }, - "links": [ - { - "title": "Open atlas-glue dashboard", - "url": "/d/atlas-glue", - "targetBlank": true - } - ] - }, - { - "id": 37, - "type": "stat", - "title": "Glue Jobs Suspended", - "datasource": { - "type": "prometheus", - "uid": "atlas-vm" - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 16, - "y": 26 - }, - "targets": [ - { - "expr": "sum(kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)", - "refId": "A" - } - ], - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 1 - }, - { - "color": "orange", - "value": 2 - }, - { - "color": "red", - "value": 3 - } - ] - }, - "unit": "none", - "custom": { - "displayMode": "auto" - } - }, - "overrides": [] - }, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "center", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "value" - }, - "links": [ - { - "title": "Open atlas-glue dashboard", - "url": "/d/atlas-glue", - "targetBlank": true - } - ] - }, { "id": 14, "type": "timeseries", diff --git a/services/monitoring/dashboards/atlas-glue.json b/services/monitoring/dashboards/atlas-testing.json similarity index 99% rename from services/monitoring/dashboards/atlas-glue.json rename to services/monitoring/dashboards/atlas-testing.json index 732d36c..831d960 100644 --- a/services/monitoring/dashboards/atlas-glue.json +++ b/services/monitoring/dashboards/atlas-testing.json @@ -1,6 +1,6 @@ { - "uid": "atlas-glue", - "title": "Atlas Glue", + "uid": "atlas-testing", + "title": "Atlas Testing", "folderUid": "atlas-internal", "editable": true, "panels": [ @@ -334,6 +334,6 @@ "style": "dark", "tags": [ "atlas", - "glue" + "testing" ] } diff --git a/services/monitoring/grafana-dashboard-overview.yaml b/services/monitoring/grafana-dashboard-overview.yaml index 0f6cd72..8ad7523 100644 --- a/services/monitoring/grafana-dashboard-overview.yaml +++ b/services/monitoring/grafana-dashboard-overview.yaml @@ -1610,244 +1610,6 @@ data: ], "description": "Shares are normalized within the selected filter. Switching scope changes the denominator." }, - { - "id": 34, - "type": "row", - "title": "Glue + Automation", - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 25 - }, - "collapsed": false, - "panels": [] - }, - { - "id": 35, - "type": "stat", - "title": "Glue Jobs Stale", - "datasource": { - "type": "prometheus", - "uid": "atlas-vm" - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 0, - "y": 26 - }, - "targets": [ - { - "expr": "(sum((((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) > bool 129600) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)))", - "refId": "A" - } - ], - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 1 - }, - { - "color": "orange", - "value": 2 - }, - { - "color": "red", - "value": 3 - } - ] - }, - "unit": "none", - "custom": { - "displayMode": "auto" - } - }, - "overrides": [] - }, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "center", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "value" - }, - "links": [ - { - "title": "Open atlas-glue dashboard", - "url": "/d/atlas-glue", - "targetBlank": true - } - ] - }, - { - "id": 36, - "type": "stat", - "title": "Glue Jobs Missing Success", - "datasource": { - "type": "prometheus", - "uid": "atlas-vm" - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 8, - "y": 26 - }, - "targets": [ - { - "expr": "count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1))", - "refId": "A" - } - ], - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 1 - }, - { - "color": "orange", - "value": 2 - }, - { - "color": "red", - "value": 3 - } - ] - }, - "unit": "none", - "custom": { - "displayMode": "auto" - } - }, - "overrides": [] - }, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "center", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "value" - }, - "links": [ - { - "title": "Open atlas-glue dashboard", - "url": "/d/atlas-glue", - "targetBlank": true - } - ] - }, - { - "id": 37, - "type": "stat", - "title": "Glue Jobs Suspended", - "datasource": { - "type": "prometheus", - "uid": "atlas-vm" - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 16, - "y": 26 - }, - "targets": [ - { - "expr": "sum(kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)", - "refId": "A" - } - ], - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 1 - }, - { - "color": "orange", - "value": 2 - }, - { - "color": "red", - "value": 3 - } - ] - }, - "unit": "none", - "custom": { - "displayMode": "auto" - } - }, - "overrides": [] - }, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "center", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "value" - }, - "links": [ - { - "title": "Open atlas-glue dashboard", - "url": "/d/atlas-glue", - "targetBlank": true - } - ] - }, { "id": 14, "type": "timeseries", diff --git a/services/monitoring/grafana-dashboard-glue.yaml b/services/monitoring/grafana-dashboard-testing.yaml similarity index 97% rename from services/monitoring/grafana-dashboard-glue.yaml rename to services/monitoring/grafana-dashboard-testing.yaml index 7aeec74..2215a2d 100644 --- a/services/monitoring/grafana-dashboard-glue.yaml +++ b/services/monitoring/grafana-dashboard-testing.yaml @@ -1,15 +1,15 @@ -# services/monitoring/grafana-dashboard-glue.yaml +# services/monitoring/grafana-dashboard-testing.yaml apiVersion: v1 kind: ConfigMap metadata: - name: grafana-dashboard-glue + name: grafana-dashboard-testing labels: grafana_dashboard: "1" data: - atlas-glue.json: | + atlas-testing.json: | { - "uid": "atlas-glue", - "title": "Atlas Glue", + "uid": "atlas-testing", + "title": "Atlas Testing", "folderUid": "atlas-internal", "editable": true, "panels": [ @@ -343,6 +343,6 @@ data: "style": "dark", "tags": [ "atlas", - "glue" + "testing" ] } diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index c99a8ca..bc818da 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -273,8 +273,8 @@ spec: export GF_SECURITY_ADMIN_PASSWORD="{{ index .Data.data "admin-password" }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export GF_SMTP_USER="{{ index .Data.data "relay-username" }}" - export GF_SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export GF_SMTP_USER="{{ index .Data.data "apikey" }}" + export GF_SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} persistence: enabled: true diff --git a/services/monitoring/kustomization.yaml b/services/monitoring/kustomization.yaml index 69ad326..7d0b01b 100644 --- a/services/monitoring/kustomization.yaml +++ b/services/monitoring/kustomization.yaml @@ -14,7 +14,7 @@ resources: - grafana-dashboard-network.yaml - grafana-dashboard-gpu.yaml - grafana-dashboard-mail.yaml - - grafana-dashboard-glue.yaml + - grafana-dashboard-testing.yaml - dcgm-exporter.yaml - jetson-tegrastats-exporter.yaml - postmark-exporter-service.yaml diff --git a/services/monitoring/postmark-exporter-deployment.yaml b/services/monitoring/postmark-exporter-deployment.yaml index 5e6c837..72972ed 100644 --- a/services/monitoring/postmark-exporter-deployment.yaml +++ b/services/monitoring/postmark-exporter-deployment.yaml @@ -21,8 +21,8 @@ spec: vault.hashicorp.com/agent-inject-secret-postmark-env: "kv/data/atlas/monitoring/postmark-exporter" vault.hashicorp.com/agent-inject-template-postmark-env: | {{- with secret "kv/data/atlas/monitoring/postmark-exporter" -}} - export POSTMARK_SERVER_TOKEN="{{ index .Data.data "relay-username" }}" - export POSTMARK_SERVER_TOKEN_FALLBACK="{{ index .Data.data "relay-password" }}" + export POSTMARK_SERVER_TOKEN="{{ index .Data.data "apikey" }}" + export POSTMARK_SERVER_TOKEN_FALLBACK="{{ index .Data.data "apikey" }}" {{- if index .Data.data "sending-limit" }} export POSTMARK_SENDING_LIMIT="{{ index .Data.data "sending-limit" }}" {{- end }} diff --git a/services/nextcloud-mail-sync/cronjob.yaml b/services/nextcloud-mail-sync/cronjob.yaml index cb42d49..b4baa9c 100644 --- a/services/nextcloud-mail-sync/cronjob.yaml +++ b/services/nextcloud-mail-sync/cronjob.yaml @@ -37,8 +37,8 @@ spec: export OIDC_CLIENT_SECRET="{{ index .Data.data "client-secret" }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export SMTP_NAME="{{ index .Data.data "relay-username" }}" - export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export SMTP_NAME="{{ index .Data.data "apikey" }}" + export SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KC_ADMIN_USER="{{ .Data.data.username }}" diff --git a/services/nextcloud/deployment.yaml b/services/nextcloud/deployment.yaml index cfa91b2..82f7538 100644 --- a/services/nextcloud/deployment.yaml +++ b/services/nextcloud/deployment.yaml @@ -37,8 +37,8 @@ spec: export OIDC_CLIENT_SECRET="{{ index .Data.data "client-secret" }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export SMTP_NAME="{{ index .Data.data "relay-username" }}" - export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export SMTP_NAME="{{ index .Data.data "apikey" }}" + export SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KC_ADMIN_USER="{{ .Data.data.username }}" diff --git a/services/nextcloud/maintenance-cronjob.yaml b/services/nextcloud/maintenance-cronjob.yaml index cc9720f..d4008c7 100644 --- a/services/nextcloud/maintenance-cronjob.yaml +++ b/services/nextcloud/maintenance-cronjob.yaml @@ -33,8 +33,8 @@ spec: export OIDC_CLIENT_SECRET="{{ index .Data.data "client-secret" }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export SMTP_NAME="{{ index .Data.data "relay-username" }}" - export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export SMTP_NAME="{{ index .Data.data "apikey" }}" + export SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} {{ with secret "kv/data/atlas/shared/keycloak-admin" }} export KC_ADMIN_USER="{{ .Data.data.username }}" diff --git a/services/outline/deployment.yaml b/services/outline/deployment.yaml index cca3964..80a81dd 100644 --- a/services/outline/deployment.yaml +++ b/services/outline/deployment.yaml @@ -45,8 +45,8 @@ spec: {{ end }} export SMTP_FROM_EMAIL="no-reply-outline@bstein.dev" {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export SMTP_USERNAME="{{ index .Data.data "relay-username" }}" - export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export SMTP_USERNAME="{{ index .Data.data "apikey" }}" + export SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: serviceAccountName: outline-vault diff --git a/services/planka/deployment.yaml b/services/planka/deployment.yaml index 155ac80..9750039 100644 --- a/services/planka/deployment.yaml +++ b/services/planka/deployment.yaml @@ -48,8 +48,8 @@ spec: {{ end }} export SMTP_FROM="no-reply-planka@bstein.dev" {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export SMTP_USER="{{ index .Data.data "relay-username" }}" - export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export SMTP_USER="{{ index .Data.data "apikey" }}" + export SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: serviceAccountName: planka-vault diff --git a/services/vaultwarden/deployment.yaml b/services/vaultwarden/deployment.yaml index c3b7d27..0567f46 100644 --- a/services/vaultwarden/deployment.yaml +++ b/services/vaultwarden/deployment.yaml @@ -30,8 +30,8 @@ spec: export ADMIN_TOKEN="{{ .Data.data.ADMIN_TOKEN }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export SMTP_USERNAME="{{ index .Data.data "relay-username" }}" - export SMTP_PASSWORD="{{ index .Data.data "relay-password" }}" + export SMTP_USERNAME="{{ index .Data.data "apikey" }}" + export SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} spec: serviceAccountName: vaultwarden-vault From 435ed5d4261f9b482a4d1c66688668fed79fbdcd Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 09:27:18 -0300 Subject: [PATCH 246/270] keycloak: bump jobs for postmark change --- services/keycloak/ldap-federation-job.yaml | 4 ++-- services/keycloak/portal-e2e-client-job.yaml | 2 +- .../keycloak/portal-e2e-execute-actions-email-test-job.yaml | 2 +- services/keycloak/portal-e2e-target-client-job.yaml | 2 +- .../keycloak/portal-e2e-token-exchange-permissions-job.yaml | 2 +- services/keycloak/portal-e2e-token-exchange-test-job.yaml | 2 +- services/keycloak/realm-settings-job.yaml | 2 +- services/keycloak/user-overrides-job.yaml | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/services/keycloak/ldap-federation-job.yaml b/services/keycloak/ldap-federation-job.yaml index 184ad90..303fd9f 100644 --- a/services/keycloak/ldap-federation-job.yaml +++ b/services/keycloak/ldap-federation-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-ldap-federation-10 + name: keycloak-ldap-federation-11 namespace: sso spec: backoffLimit: 2 @@ -377,4 +377,4 @@ spec: except Exception as e: print(f"WARNING: LDAP cleanup failed (continuing): {e}") PY - volumeMounts: \ No newline at end of file + volumeMounts: diff --git a/services/keycloak/portal-e2e-client-job.yaml b/services/keycloak/portal-e2e-client-job.yaml index cc2c2b4..4e0c006 100644 --- a/services/keycloak/portal-e2e-client-job.yaml +++ b/services/keycloak/portal-e2e-client-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-client-7 + name: keycloak-portal-e2e-client-8 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml index 736ca8d..35f79a6 100644 --- a/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml +++ b/services/keycloak/portal-e2e-execute-actions-email-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-execute-actions-email-13 + name: keycloak-portal-e2e-execute-actions-email-14 namespace: sso spec: backoffLimit: 3 diff --git a/services/keycloak/portal-e2e-target-client-job.yaml b/services/keycloak/portal-e2e-target-client-job.yaml index 64352bb..196b48b 100644 --- a/services/keycloak/portal-e2e-target-client-job.yaml +++ b/services/keycloak/portal-e2e-target-client-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-target-6 + name: keycloak-portal-e2e-target-7 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml index 597187c..647b8f9 100644 --- a/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-permissions-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-token-exchange-permissions-10 + name: keycloak-portal-e2e-token-exchange-permissions-11 namespace: sso spec: backoffLimit: 6 diff --git a/services/keycloak/portal-e2e-token-exchange-test-job.yaml b/services/keycloak/portal-e2e-token-exchange-test-job.yaml index 38547da..edd7555 100644 --- a/services/keycloak/portal-e2e-token-exchange-test-job.yaml +++ b/services/keycloak/portal-e2e-token-exchange-test-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-portal-e2e-token-exchange-test-6 + name: keycloak-portal-e2e-token-exchange-test-7 namespace: sso spec: backoffLimit: 6 diff --git a/services/keycloak/realm-settings-job.yaml b/services/keycloak/realm-settings-job.yaml index c74537d..f680200 100644 --- a/services/keycloak/realm-settings-job.yaml +++ b/services/keycloak/realm-settings-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-realm-settings-31 + name: keycloak-realm-settings-32 namespace: sso spec: backoffLimit: 0 diff --git a/services/keycloak/user-overrides-job.yaml b/services/keycloak/user-overrides-job.yaml index ff2aa91..7623c84 100644 --- a/services/keycloak/user-overrides-job.yaml +++ b/services/keycloak/user-overrides-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: keycloak-user-overrides-8 + name: keycloak-user-overrides-9 namespace: sso spec: backoffLimit: 0 From 0b15007e2c89e0e3dd9c4985a393aaab52d80ec5 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 09:55:28 -0300 Subject: [PATCH 247/270] monitoring: disable grafana persistence to recover --- services/monitoring/helmrelease.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index bc818da..0a0e226 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -277,7 +277,7 @@ spec: export GF_SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} persistence: - enabled: true + enabled: false size: 20Gi storageClassName: astreae deploymentStrategy: From 6e3faeb9fd889842050068fb3f4efc2ad471aa30 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 11:37:01 -0300 Subject: [PATCH 248/270] monitoring: restore grafana persistence --- services/monitoring/helmrelease.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index 0a0e226..bc818da 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -277,7 +277,7 @@ spec: export GF_SMTP_PASSWORD="{{ index .Data.data "apikey" }}" {{ end }} persistence: - enabled: false + enabled: true size: 20Gi storageClassName: astreae deploymentStrategy: From a5bec3e5436e67f50eab3de64c5f2b000888a03d Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 11:43:28 -0300 Subject: [PATCH 249/270] monitoring: avoid titan-22 for core pods --- services/monitoring/helmrelease.yaml | 36 ++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index bc818da..8ec0509 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -15,6 +15,15 @@ spec: name: prometheus namespace: flux-system values: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: NotIn + values: + - titan-22 prometheusScrape: false --- @@ -72,6 +81,15 @@ spec: persistentVolume: enabled: true size: 100Gi + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: NotIn + values: + - titan-22 # Enable built-in Kubernetes scraping scrape: @@ -280,6 +298,15 @@ spec: enabled: true size: 20Gi storageClassName: astreae + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: NotIn + values: + - titan-22 deploymentStrategy: type: Recreate service: @@ -500,6 +527,15 @@ spec: name: prometheus namespace: flux-system values: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: NotIn + values: + - titan-22 ingress: enabled: true ingressClassName: traefik From 084242746e563073f790ee179045b961a12f0334 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 11:52:36 -0300 Subject: [PATCH 250/270] monitoring: keep postmark exporter off titan-22 --- services/monitoring/postmark-exporter-deployment.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/services/monitoring/postmark-exporter-deployment.yaml b/services/monitoring/postmark-exporter-deployment.yaml index 72972ed..6406224 100644 --- a/services/monitoring/postmark-exporter-deployment.yaml +++ b/services/monitoring/postmark-exporter-deployment.yaml @@ -29,6 +29,15 @@ spec: {{- end -}} bstein.dev/restarted-at: "2026-01-06T00:00:00Z" spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: NotIn + values: + - titan-22 serviceAccountName: monitoring-vault-sync containers: - name: exporter From c70054a30e108d4e4488a0247b5b840c4ae767d4 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 12:07:45 -0300 Subject: [PATCH 251/270] monitoring: add atlas testing dashboard folder --- services/monitoring/grafana-folders.yaml | 7 +++++++ services/monitoring/helmrelease.yaml | 9 +++++++++ 2 files changed, 16 insertions(+) diff --git a/services/monitoring/grafana-folders.yaml b/services/monitoring/grafana-folders.yaml index 54b278f..35ab954 100644 --- a/services/monitoring/grafana-folders.yaml +++ b/services/monitoring/grafana-folders.yaml @@ -33,3 +33,10 @@ data: permission: View - role: Admin permission: Admin + - uid: atlas-testing + title: Atlas Testing + permissions: + - role: Editor + permission: View + - role: Admin + permission: Admin diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index 8ec0509..ae74b3e 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -448,6 +448,14 @@ spec: editable: true options: path: /var/lib/grafana/dashboards/mail + - name: testing + orgId: 1 + folder: Atlas Testing + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/testing dashboardsConfigMaps: overview: grafana-dashboard-overview overview-public: grafana-dashboard-overview @@ -457,6 +465,7 @@ spec: gpu: grafana-dashboard-gpu network: grafana-dashboard-network mail: grafana-dashboard-mail + testing: grafana-dashboard-testing extraConfigmapMounts: - name: grafana-folders mountPath: /etc/grafana/provisioning/folders From 0eb526c907740a4888861530c2d1cc02771757b3 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 12:20:45 -0300 Subject: [PATCH 252/270] monitoring: label cronjob metrics and move grafana to arm64 --- services/monitoring/grafana-folders.yaml | 7 ------ services/monitoring/helmrelease.yaml | 29 ++++++++++++++++++++---- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/services/monitoring/grafana-folders.yaml b/services/monitoring/grafana-folders.yaml index 35ab954..54b278f 100644 --- a/services/monitoring/grafana-folders.yaml +++ b/services/monitoring/grafana-folders.yaml @@ -33,10 +33,3 @@ data: permission: View - role: Admin permission: Admin - - uid: atlas-testing - title: Atlas Testing - permissions: - - role: Editor - permission: View - - role: Admin - permission: Admin diff --git a/services/monitoring/helmrelease.yaml b/services/monitoring/helmrelease.yaml index ae74b3e..304de05 100644 --- a/services/monitoring/helmrelease.yaml +++ b/services/monitoring/helmrelease.yaml @@ -25,6 +25,8 @@ spec: values: - titan-22 prometheusScrape: false + metricLabelsAllowlist: + - cronjobs=[atlas.bstein.dev/glue] --- @@ -298,15 +300,34 @@ spec: enabled: true size: 20Gi storageClassName: astreae + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: kubernetes.io/hostname - operator: NotIn + - key: hardware + operator: In values: - - titan-22 + - rpi5 + - rpi4 + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: hardware + operator: In + values: + - rpi5 + - weight: 70 + preference: + matchExpressions: + - key: hardware + operator: In + values: + - rpi4 deploymentStrategy: type: Recreate service: @@ -450,7 +471,7 @@ spec: path: /var/lib/grafana/dashboards/mail - name: testing orgId: 1 - folder: Atlas Testing + folder: Atlas Internal type: file disableDeletion: false editable: true From ae3b0afbff65731c171b4b284505b945f4377fe7 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 12:23:50 -0300 Subject: [PATCH 253/270] nextcloud-mail-sync: harden auth, bump portal backend --- services/bstein-dev-home/kustomization.yaml | 2 +- .../scripts/nextcloud-mail-sync.sh | 38 +++++++------------ 2 files changed, 14 insertions(+), 26 deletions(-) diff --git a/services/bstein-dev-home/kustomization.yaml b/services/bstein-dev-home/kustomization.yaml index bdcd593..f9d3c87 100644 --- a/services/bstein-dev-home/kustomization.yaml +++ b/services/bstein-dev-home/kustomization.yaml @@ -22,7 +22,7 @@ images: - name: registry.bstein.dev/bstein/bstein-dev-home-frontend newTag: 0.1.1-102 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend"} - name: registry.bstein.dev/bstein/bstein-dev-home-backend - newTag: 0.1.1-102 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} + newTag: 0.1.1-103 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend"} configMapGenerator: - name: chat-ai-gateway namespace: bstein-dev-home diff --git a/services/nextcloud-mail-sync/scripts/nextcloud-mail-sync.sh b/services/nextcloud-mail-sync/scripts/nextcloud-mail-sync.sh index 681308e..e75ec91 100755 --- a/services/nextcloud-mail-sync/scripts/nextcloud-mail-sync.sh +++ b/services/nextcloud-mail-sync/scripts/nextcloud-mail-sync.sh @@ -54,38 +54,26 @@ list_mail_accounts() { local export_out # Nextcloud Mail does not provide a list command; export is safe (does not print passwords). - # Some occ commands emit to stderr; capture both streams so we don't mis-detect "no accounts". - if ! export_out=$(/usr/sbin/runuser -u www-data -- php occ mail:account:export "${user_id}" 2>&1); then + if ! export_out=$(/usr/sbin/runuser -u www-data -- php occ mail:account:export --output json "${user_id}"); then echo "WARN: unable to export mail accounts for ${user_id}; skipping sync for safety" >&2 return 1 fi - # The export output is human-readable and includes blocks like: - # Account 10: - # - E-Mail: user@example.com - # Extract "account-id email" pairs. - awk ' - /^Account[[:space:]]+[0-9]+:/ { - id=$2; - sub(/:$/, "", id); - next; - } - id != "" && /@/ { - # Keep the regex simple (mawk does not support interval expressions like {2,}). - if (match($0, /[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+/)) { - printf("%s\t%s\n", id, substr($0, RSTART, RLENGTH)); - id=""; - } - } - ' <<<"${export_out}" | sort -u + if ! jq -e 'type == "array"' >/dev/null 2>&1 <<<"${export_out}"; then + echo "WARN: unexpected mail export output for ${user_id}; skipping sync for safety" >&2 + return 1 + fi + + jq -r '.[] | "\(.id)\t\(.email)"' <<<"${export_out}" | sort -u } token=$( - curl -fsS -d "grant_type=password" \ - -d "client_id=admin-cli" \ - -d "username=${KC_ADMIN_USER}" \ - -d "password=${KC_ADMIN_PASS}" \ - "${KC_BASE}/realms/master/protocol/openid-connect/token" | jq -r '.access_token' + curl -fsS \ + --data-urlencode "grant_type=password" \ + --data-urlencode "client_id=admin-cli" \ + --data-urlencode "username=${KC_ADMIN_USER}" \ + --data-urlencode "password=${KC_ADMIN_PASS}" \ + "${KC_BASE}/realms/master/protocol/openid-connect/token" | jq -r '.access_token // empty' ) if [[ -z "${token}" || "${token}" == "null" ]]; then From da200235bb349021aa7d10cafeaccbe7a435caab Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 12:26:04 -0300 Subject: [PATCH 254/270] monitoring: fix glue dashboard queries --- scripts/dashboards_render_atlas.py | 14 ++++++++------ services/monitoring/dashboards/atlas-testing.json | 12 ++++++------ services/monitoring/grafana-dashboard-testing.yaml | 12 ++++++------ 3 files changed, 20 insertions(+), 18 deletions(-) diff --git a/scripts/dashboards_render_atlas.py b/scripts/dashboards_render_atlas.py index 5e3e7f0..0931b48 100644 --- a/scripts/dashboards_render_atlas.py +++ b/scripts/dashboards_render_atlas.py @@ -319,16 +319,18 @@ NAMESPACE_SCOPE_INFRA = f'namespace=~"{INFRA_REGEX}"' NAMESPACE_SCOPE_VARS = ["namespace_scope_cpu", "namespace_scope_gpu", "namespace_scope_ram"] GLUE_LABEL = 'label_atlas_bstein_dev_glue="true"' GLUE_JOBS = f"kube_cronjob_labels{{{GLUE_LABEL}}}" -GLUE_LAST_SUCCESS = f"kube_cronjob_status_last_successful_time{{{GLUE_LABEL}}}" -GLUE_LAST_SCHEDULE = f"kube_cronjob_status_last_schedule_time{{{GLUE_LABEL}}}" -GLUE_SUSPENDED = f"kube_cronjob_spec_suspend{{{GLUE_LABEL}}} == 1" +GLUE_FILTER = f"and on(namespace,cronjob) {GLUE_JOBS}" +GLUE_LAST_SUCCESS = f"(kube_cronjob_status_last_successful_time {GLUE_FILTER})" +GLUE_LAST_SCHEDULE = f"(kube_cronjob_status_last_schedule_time {GLUE_FILTER})" +GLUE_SUSPENDED = f"(kube_cronjob_spec_suspend {GLUE_FILTER}) == 1" +GLUE_ACTIVE = f"(kube_cronjob_status_active {GLUE_FILTER})" GLUE_LAST_SUCCESS_AGE = f"(time() - {GLUE_LAST_SUCCESS})" GLUE_LAST_SCHEDULE_AGE = f"(time() - {GLUE_LAST_SCHEDULE})" GLUE_LAST_SUCCESS_AGE_HOURS = f"({GLUE_LAST_SUCCESS_AGE}) / 3600" GLUE_LAST_SCHEDULE_AGE_HOURS = f"({GLUE_LAST_SCHEDULE_AGE}) / 3600" GLUE_STALE_WINDOW_SEC = 36 * 3600 GLUE_STALE = f"({GLUE_LAST_SUCCESS_AGE} > bool {GLUE_STALE_WINDOW_SEC})" -GLUE_MISSING = f"({GLUE_JOBS} unless {GLUE_LAST_SUCCESS})" +GLUE_MISSING = f"({GLUE_JOBS} unless on(namespace,cronjob) kube_cronjob_status_last_successful_time)" GLUE_STALE_ACTIVE = f"({GLUE_STALE} unless on(namespace,cronjob) {GLUE_SUSPENDED})" GLUE_MISSING_ACTIVE = f"({GLUE_MISSING} unless on(namespace,cronjob) {GLUE_SUSPENDED})" GLUE_STALE_COUNT = f"(sum({GLUE_STALE_ACTIVE}) + count({GLUE_MISSING_ACTIVE}))" @@ -2188,7 +2190,7 @@ def build_testing_dashboard(): table_panel( 3, "Glue Jobs Suspended", - f"kube_cronjob_spec_suspend{{{GLUE_LABEL}}} == 1", + GLUE_SUSPENDED, {"h": 4, "w": 6, "x": 12, "y": 0}, unit="none", transformations=sort_desc, @@ -2199,7 +2201,7 @@ def build_testing_dashboard(): table_panel( 4, "Glue Jobs Active Runs", - f"kube_cronjob_status_active{{{GLUE_LABEL}}}", + GLUE_ACTIVE, {"h": 4, "w": 6, "x": 18, "y": 0}, unit="none", transformations=sort_desc, diff --git a/services/monitoring/dashboards/atlas-testing.json b/services/monitoring/dashboards/atlas-testing.json index 831d960..25cf3f8 100644 --- a/services/monitoring/dashboards/atlas-testing.json +++ b/services/monitoring/dashboards/atlas-testing.json @@ -20,7 +20,7 @@ }, "targets": [ { - "expr": "(sum((((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) > bool 129600) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)))", + "expr": "(sum((((time() - (kube_cronjob_status_last_successful_time and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"})) > bool 129600) unless on(namespace,cronjob) (kube_cronjob_spec_suspend and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"}) == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless on(namespace,cronjob) kube_cronjob_status_last_successful_time) unless on(namespace,cronjob) (kube_cronjob_spec_suspend and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"}) == 1)))", "refId": "A" } ], @@ -88,7 +88,7 @@ }, "targets": [ { - "expr": "((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)", + "expr": "((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless on(namespace,cronjob) kube_cronjob_status_last_successful_time) unless on(namespace,cronjob) (kube_cronjob_spec_suspend and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"}) == 1)", "refId": "A", "instant": true } @@ -138,7 +138,7 @@ }, "targets": [ { - "expr": "kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1", + "expr": "(kube_cronjob_spec_suspend and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"}) == 1", "refId": "A", "instant": true } @@ -188,7 +188,7 @@ }, "targets": [ { - "expr": "kube_cronjob_status_active{label_atlas_bstein_dev_glue=\"true\"}", + "expr": "(kube_cronjob_status_active and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"})", "refId": "A", "instant": true } @@ -238,7 +238,7 @@ }, "targets": [ { - "expr": "((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"})) / 3600", + "expr": "((time() - (kube_cronjob_status_last_successful_time and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"}))) / 3600", "refId": "A", "instant": true } @@ -288,7 +288,7 @@ }, "targets": [ { - "expr": "((time() - kube_cronjob_status_last_schedule_time{label_atlas_bstein_dev_glue=\"true\"})) / 3600", + "expr": "((time() - (kube_cronjob_status_last_schedule_time and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"}))) / 3600", "refId": "A", "instant": true } diff --git a/services/monitoring/grafana-dashboard-testing.yaml b/services/monitoring/grafana-dashboard-testing.yaml index 2215a2d..80a7043 100644 --- a/services/monitoring/grafana-dashboard-testing.yaml +++ b/services/monitoring/grafana-dashboard-testing.yaml @@ -29,7 +29,7 @@ data: }, "targets": [ { - "expr": "(sum((((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) > bool 129600) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)))", + "expr": "(sum((((time() - (kube_cronjob_status_last_successful_time and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"})) > bool 129600) unless on(namespace,cronjob) (kube_cronjob_spec_suspend and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"}) == 1)) + count(((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless on(namespace,cronjob) kube_cronjob_status_last_successful_time) unless on(namespace,cronjob) (kube_cronjob_spec_suspend and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"}) == 1)))", "refId": "A" } ], @@ -97,7 +97,7 @@ data: }, "targets": [ { - "expr": "((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"}) unless on(namespace,cronjob) kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1)", + "expr": "((kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"} unless on(namespace,cronjob) kube_cronjob_status_last_successful_time) unless on(namespace,cronjob) (kube_cronjob_spec_suspend and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"}) == 1)", "refId": "A", "instant": true } @@ -147,7 +147,7 @@ data: }, "targets": [ { - "expr": "kube_cronjob_spec_suspend{label_atlas_bstein_dev_glue=\"true\"} == 1", + "expr": "(kube_cronjob_spec_suspend and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"}) == 1", "refId": "A", "instant": true } @@ -197,7 +197,7 @@ data: }, "targets": [ { - "expr": "kube_cronjob_status_active{label_atlas_bstein_dev_glue=\"true\"}", + "expr": "(kube_cronjob_status_active and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"})", "refId": "A", "instant": true } @@ -247,7 +247,7 @@ data: }, "targets": [ { - "expr": "((time() - kube_cronjob_status_last_successful_time{label_atlas_bstein_dev_glue=\"true\"})) / 3600", + "expr": "((time() - (kube_cronjob_status_last_successful_time and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"}))) / 3600", "refId": "A", "instant": true } @@ -297,7 +297,7 @@ data: }, "targets": [ { - "expr": "((time() - kube_cronjob_status_last_schedule_time{label_atlas_bstein_dev_glue=\"true\"})) / 3600", + "expr": "((time() - (kube_cronjob_status_last_schedule_time and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue=\"true\"}))) / 3600", "refId": "A", "instant": true } From 979470eeb84a42767b644406bb919791c656826a Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 21:23:11 -0300 Subject: [PATCH 255/270] ci: add glue tests and deploy gate --- ci/Jenkinsfile.titan-iac | 53 +++++++++++++++++++++++ ci/requirements.txt | 4 ++ ci/tests/glue/config.yaml | 7 +++ ci/tests/glue/test_glue_cronjobs.py | 46 ++++++++++++++++++++ ci/tests/glue/test_glue_metrics.py | 29 +++++++++++++ clusters/atlas/flux-system/gotk-sync.yaml | 2 +- services/jenkins/configmap-jcasc.yaml | 19 ++++++++ 7 files changed, 159 insertions(+), 1 deletion(-) create mode 100644 ci/Jenkinsfile.titan-iac create mode 100644 ci/requirements.txt create mode 100644 ci/tests/glue/config.yaml create mode 100644 ci/tests/glue/test_glue_cronjobs.py create mode 100644 ci/tests/glue/test_glue_metrics.py diff --git a/ci/Jenkinsfile.titan-iac b/ci/Jenkinsfile.titan-iac new file mode 100644 index 0000000..3b13eb0 --- /dev/null +++ b/ci/Jenkinsfile.titan-iac @@ -0,0 +1,53 @@ +pipeline { + agent { + kubernetes { + defaultContainer 'python' + yaml """ +apiVersion: v1 +kind: Pod +spec: + containers: + - name: python + image: python:3.12-slim + command: + - cat + tty: true +""" + } + } + environment { + PIP_DISABLE_PIP_VERSION_CHECK = '1' + PYTHONUNBUFFERED = '1' + DEPLOY_BRANCH = 'deploy' + } + stages { + stage('Checkout') { + steps { + checkout scm + } + } + stage('Install deps') { + steps { + sh 'pip install --no-cache-dir -r ci/requirements.txt' + } + } + stage('Glue tests') { + steps { + sh 'pytest -q ci/tests/glue' + } + } + stage('Promote') { + steps { + withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) { + sh ''' + set +x + git config user.email "jenkins@bstein.dev" + git config user.name "jenkins" + git remote set-url origin https://${GIT_USER}:${GIT_TOKEN}@scm.bstein.dev/bstein/titan-iac.git + git push origin HEAD:${DEPLOY_BRANCH} + ''' + } + } + } + } +} diff --git a/ci/requirements.txt b/ci/requirements.txt new file mode 100644 index 0000000..eaa21aa --- /dev/null +++ b/ci/requirements.txt @@ -0,0 +1,4 @@ +pytest==8.3.4 +kubernetes==30.1.0 +PyYAML==6.0.2 +requests==2.32.3 diff --git a/ci/tests/glue/config.yaml b/ci/tests/glue/config.yaml new file mode 100644 index 0000000..8adf4ca --- /dev/null +++ b/ci/tests/glue/config.yaml @@ -0,0 +1,7 @@ +max_success_age_hours: 48 +allow_suspended: + - comms/othrys-room-reset + - comms/pin-othrys-invite + - comms/seed-othrys-room + - finance/firefly-user-sync + - health/wger-user-sync diff --git a/ci/tests/glue/test_glue_cronjobs.py b/ci/tests/glue/test_glue_cronjobs.py new file mode 100644 index 0000000..ec6b620 --- /dev/null +++ b/ci/tests/glue/test_glue_cronjobs.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from datetime import datetime, timezone +from pathlib import Path + +import yaml +from kubernetes import client, config + + +CONFIG_PATH = Path(__file__).with_name("config.yaml") + + +def _load_config() -> dict: + with CONFIG_PATH.open("r", encoding="utf-8") as handle: + return yaml.safe_load(handle) or {} + + +def _load_kube(): + try: + config.load_incluster_config() + except config.ConfigException: + config.load_kube_config() + + +def test_glue_cronjobs_recent_success(): + cfg = _load_config() + max_age_hours = int(cfg.get("max_success_age_hours", 48)) + allow_suspended = set(cfg.get("allow_suspended", [])) + + _load_kube() + batch = client.BatchV1Api() + cronjobs = batch.list_cron_job_for_all_namespaces(label_selector="atlas.bstein.dev/glue=true").items + + assert cronjobs, "No glue cronjobs found with atlas.bstein.dev/glue=true" + + now = datetime.now(timezone.utc) + for cronjob in cronjobs: + name = f"{cronjob.metadata.namespace}/{cronjob.metadata.name}" + if cronjob.spec.suspend: + assert name in allow_suspended, f"{name} is suspended but not in allow_suspended" + continue + + last_success = cronjob.status.last_successful_time + assert last_success is not None, f"{name} has no lastSuccessfulTime" + age_hours = (now - last_success).total_seconds() / 3600 + assert age_hours <= max_age_hours, f"{name} last success {age_hours:.1f}h ago" diff --git a/ci/tests/glue/test_glue_metrics.py b/ci/tests/glue/test_glue_metrics.py new file mode 100644 index 0000000..16b01c7 --- /dev/null +++ b/ci/tests/glue/test_glue_metrics.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +import os + +import requests + + +VM_URL = os.environ.get("VM_URL", "http://victoria-metrics-single-server:8428").rstrip("/") + + +def _query(promql: str) -> list[dict]: + response = requests.get(f"{VM_URL}/api/v1/query", params={"query": promql}, timeout=10) + response.raise_for_status() + payload = response.json() + return payload.get("data", {}).get("result", []) + + +def test_glue_metrics_present(): + series = _query('kube_cronjob_labels{label_atlas_bstein_dev_glue="true"}') + assert series, "No glue cronjob label series found" + + +def test_glue_metrics_success_join(): + query = ( + "kube_cronjob_status_last_successful_time " + 'and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue="true"}' + ) + series = _query(query) + assert series, "No glue cronjob last success series found" diff --git a/clusters/atlas/flux-system/gotk-sync.yaml b/clusters/atlas/flux-system/gotk-sync.yaml index 53c0817..400c76d 100644 --- a/clusters/atlas/flux-system/gotk-sync.yaml +++ b/clusters/atlas/flux-system/gotk-sync.yaml @@ -9,7 +9,7 @@ metadata: spec: interval: 1m0s ref: - branch: feature/vault-consumption + branch: deploy secretRef: name: flux-system-gitea url: ssh://git@scm.bstein.dev:2242/bstein/titan-iac.git diff --git a/services/jenkins/configmap-jcasc.yaml b/services/jenkins/configmap-jcasc.yaml index 2c188db..ac26350 100644 --- a/services/jenkins/configmap-jcasc.yaml +++ b/services/jenkins/configmap-jcasc.yaml @@ -139,6 +139,25 @@ data: } } } + pipelineJob('titan-iac-quality-gate') { + triggers { + scm('H/5 * * * *') + } + definition { + cpsScm { + scm { + git { + remote { + url('https://scm.bstein.dev/bstein/titan-iac.git') + credentials('gitea-pat') + } + branches('*/feature/vault-consumption') + } + } + scriptPath('ci/Jenkinsfile.titan-iac') + } + } + } base.yaml: | jenkins: disableRememberMe: false From 73c829c81f34de2658a9a9a37fff555bd63fb908 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Sun, 18 Jan 2026 21:26:05 -0300 Subject: [PATCH 256/270] jenkins: restart to load new jobs --- services/jenkins/deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/services/jenkins/deployment.yaml b/services/jenkins/deployment.yaml index 0320b02..e846a8e 100644 --- a/services/jenkins/deployment.yaml +++ b/services/jenkins/deployment.yaml @@ -38,6 +38,7 @@ spec: GITEA_PAT_USERNAME={{ .Data.data.username }} GITEA_PAT_TOKEN={{ .Data.data.token }} {{- end -}} + bstein.dev/restarted-at: "2026-01-19T00:25:00Z" spec: serviceAccountName: jenkins nodeSelector: From b6609a97061cfbe63f2033d625df312bd6c77542 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 00:35:40 -0300 Subject: [PATCH 257/270] glue: fix portal smtp host and mail sync export --- .gitignore | 1 + services/bstein-dev-home/backend-deployment.yaml | 2 +- .../scripts/nextcloud-mail-sync.sh | 13 ++++++------- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index 8e09aa9..8d0ab1e 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ __pycache__/ *.py[cod] .pytest_cache .venv +tmp/ diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index ba6ca74..b65d477 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -34,7 +34,7 @@ spec: export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" {{ end }} {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export SMTP_HOST="mail.bstein.dev" + export SMTP_HOST="smtp.postmarkapp.com" export SMTP_PORT="587" export SMTP_STARTTLS="true" export SMTP_USE_TLS="false" diff --git a/services/nextcloud-mail-sync/scripts/nextcloud-mail-sync.sh b/services/nextcloud-mail-sync/scripts/nextcloud-mail-sync.sh index e75ec91..732b9fb 100755 --- a/services/nextcloud-mail-sync/scripts/nextcloud-mail-sync.sh +++ b/services/nextcloud-mail-sync/scripts/nextcloud-mail-sync.sh @@ -54,17 +54,16 @@ list_mail_accounts() { local export_out # Nextcloud Mail does not provide a list command; export is safe (does not print passwords). - if ! export_out=$(/usr/sbin/runuser -u www-data -- php occ mail:account:export --output json "${user_id}"); then + if ! export_out=$(/usr/sbin/runuser -u www-data -- php occ mail:account:export "${user_id}"); then echo "WARN: unable to export mail accounts for ${user_id}; skipping sync for safety" >&2 return 1 fi - if ! jq -e 'type == "array"' >/dev/null 2>&1 <<<"${export_out}"; then - echo "WARN: unexpected mail export output for ${user_id}; skipping sync for safety" >&2 - return 1 - fi - - jq -r '.[] | "\(.id)\t\(.email)"' <<<"${export_out}" | sort -u + awk -v OFS='\t' ' + BEGIN { IGNORECASE=1; id="" } + $1 == "Account" { id=$2; sub(":", "", id); next } + $1 == "-" && tolower($2) ~ /^e-?mail:$/ { if (id) print id, $3 } + ' <<<"${export_out}" | sort -u } token=$( From fe9132e45e4ec1a5c91c2dcc2a5b1bb5a4eced94 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 00:56:07 -0300 Subject: [PATCH 258/270] portal: use mailu smtp secret --- services/bstein-dev-home/backend-deployment.yaml | 8 ++++---- services/vault/scripts/vault_k8s_auth_configure.sh | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index b65d477..4ea9fe2 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -33,13 +33,13 @@ spec: export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}" export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}" {{ end }} - {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export SMTP_HOST="smtp.postmarkapp.com" + {{ with secret "kv/data/atlas/mailu/mailu-initial-account-secret" }} + export SMTP_HOST="mailu-front.mailu-mailserver.svc.cluster.local" export SMTP_PORT="587" export SMTP_STARTTLS="true" export SMTP_USE_TLS="false" - export SMTP_USERNAME="{{ index .Data.data "apikey" }}" - export SMTP_PASSWORD="{{ index .Data.data "apikey" }}" + export SMTP_USERNAME="test@bstein.dev" + export SMTP_PASSWORD="{{ .Data.data.password }}" export SMTP_FROM="no-reply-portal@bstein.dev" {{ end }} spec: diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index c18a898..b37db42 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -203,7 +203,7 @@ write_policy_and_role "outline" "outline" "outline-vault" \ write_policy_and_role "planka" "planka" "planka-vault" \ "planka/* shared/postmark-relay" "" write_policy_and_role "bstein-dev-home" "bstein-dev-home" "bstein-dev-home,bstein-dev-home-vault-sync" \ - "portal/* shared/chat-ai-keys-runtime shared/portal-e2e-client shared/postmark-relay harbor-pull/bstein-dev-home" "" + "portal/* shared/chat-ai-keys-runtime shared/portal-e2e-client shared/postmark-relay mailu/mailu-initial-account-secret harbor-pull/bstein-dev-home" "" write_policy_and_role "gitea" "gitea" "gitea-vault" \ "gitea/*" "" write_policy_and_role "vaultwarden" "vaultwarden" "vaultwarden-vault" \ From 6935de7a6cda819aaf7b3be3d1385dd315934e34 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 01:04:08 -0300 Subject: [PATCH 259/270] portal: use mailu sender mailbox --- services/bstein-dev-home/backend-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index 4ea9fe2..b4f426f 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -40,7 +40,7 @@ spec: export SMTP_USE_TLS="false" export SMTP_USERNAME="test@bstein.dev" export SMTP_PASSWORD="{{ .Data.data.password }}" - export SMTP_FROM="no-reply-portal@bstein.dev" + export SMTP_FROM="test@bstein.dev" {{ end }} spec: automountServiceAccountToken: true From 67203d114778e3b4c5ebaaa4ed123256c3931e32 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 01:14:29 -0300 Subject: [PATCH 260/270] nextcloud-mail-sync: pin to arm64 workers --- services/nextcloud-mail-sync/cronjob.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/services/nextcloud-mail-sync/cronjob.yaml b/services/nextcloud-mail-sync/cronjob.yaml index b4baa9c..2073d76 100644 --- a/services/nextcloud-mail-sync/cronjob.yaml +++ b/services/nextcloud-mail-sync/cronjob.yaml @@ -45,6 +45,9 @@ spec: export KC_ADMIN_PASS="{{ .Data.data.password }}" {{ end }} spec: + nodeSelector: + kubernetes.io/arch: arm64 + node-role.kubernetes.io/worker: "true" restartPolicy: OnFailure securityContext: runAsUser: 0 From aaf7e236035e1ae2bd82f3944f3dfddbe062f59b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 01:21:56 -0300 Subject: [PATCH 261/270] portal: allow firefly sync jobs --- services/bstein-dev-home/rbac.yaml | 31 ++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/services/bstein-dev-home/rbac.yaml b/services/bstein-dev-home/rbac.yaml index 7ce8fd8..5ff26eb 100644 --- a/services/bstein-dev-home/rbac.yaml +++ b/services/bstein-dev-home/rbac.yaml @@ -137,3 +137,34 @@ subjects: - kind: ServiceAccount name: bstein-dev-home namespace: bstein-dev-home +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: bstein-dev-home-firefly-user-sync + namespace: finance +rules: + - apiGroups: ["batch"] + resources: ["cronjobs"] + verbs: ["get"] + resourceNames: ["firefly-user-sync"] + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create", "get", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: bstein-dev-home-firefly-user-sync + namespace: finance +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: bstein-dev-home-firefly-user-sync +subjects: + - kind: ServiceAccount + name: bstein-dev-home + namespace: bstein-dev-home From f4b08b93eb0cec24542b75fb8ff4a3a99f003b39 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 01:40:27 -0300 Subject: [PATCH 262/270] mailu: add portal sender mailbox --- .../bstein-dev-home/backend-deployment.yaml | 4 +-- services/mailu/mailu-sync-cronjob.yaml | 5 ++++ services/mailu/mailu-sync-job.yaml | 7 ++++- services/mailu/mailu-sync-listener.yaml | 5 ++++ services/mailu/scripts/mailu_sync.py | 26 ++++++++++++++++++- services/mailu/scripts/mailu_vault_env.sh | 1 + 6 files changed, 44 insertions(+), 4 deletions(-) diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index b4f426f..53fa4a0 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -38,9 +38,9 @@ spec: export SMTP_PORT="587" export SMTP_STARTTLS="true" export SMTP_USE_TLS="false" - export SMTP_USERNAME="test@bstein.dev" + export SMTP_USERNAME="no-reply-portal@bstein.dev" export SMTP_PASSWORD="{{ .Data.data.password }}" - export SMTP_FROM="test@bstein.dev" + export SMTP_FROM="no-reply-portal@bstein.dev" {{ end }} spec: automountServiceAccountToken: true diff --git a/services/mailu/mailu-sync-cronjob.yaml b/services/mailu/mailu-sync-cronjob.yaml index 57cbd0a..9f55f7b 100644 --- a/services/mailu/mailu-sync-cronjob.yaml +++ b/services/mailu/mailu-sync-cronjob.yaml @@ -32,6 +32,9 @@ spec: vault.hashicorp.com/agent-inject-secret-mailu-sync-credentials__client-secret: "kv/data/atlas/mailu/mailu-sync-credentials" vault.hashicorp.com/agent-inject-template-mailu-sync-credentials__client-secret: | {{- with secret "kv/data/atlas/mailu/mailu-sync-credentials" -}}{{ index .Data.data "client-secret" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-initial-account-secret__password: "kv/data/atlas/mailu/mailu-initial-account-secret" + vault.hashicorp.com/agent-inject-template-mailu-initial-account-secret__password: | + {{- with secret "kv/data/atlas/mailu/mailu-initial-account-secret" -}}{{ .Data.data.password }}{{- end -}} spec: restartPolicy: OnFailure serviceAccountName: mailu-vault-sync @@ -55,6 +58,8 @@ spec: value: bstein.dev - name: MAILU_DEFAULT_QUOTA value: "20000000000" + - name: MAILU_SYSTEM_USERS + value: no-reply-portal@bstein.dev - name: MAILU_DB_HOST value: postgres-service.postgres.svc.cluster.local - name: MAILU_DB_PORT diff --git a/services/mailu/mailu-sync-job.yaml b/services/mailu/mailu-sync-job.yaml index 18aef7c..0eaebe6 100644 --- a/services/mailu/mailu-sync-job.yaml +++ b/services/mailu/mailu-sync-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mailu-sync-7 + name: mailu-sync-8 namespace: mailu-mailserver spec: template: @@ -26,6 +26,9 @@ spec: vault.hashicorp.com/agent-inject-secret-mailu-sync-credentials__client-secret: "kv/data/atlas/mailu/mailu-sync-credentials" vault.hashicorp.com/agent-inject-template-mailu-sync-credentials__client-secret: | {{- with secret "kv/data/atlas/mailu/mailu-sync-credentials" -}}{{ index .Data.data "client-secret" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-initial-account-secret__password: "kv/data/atlas/mailu/mailu-initial-account-secret" + vault.hashicorp.com/agent-inject-template-mailu-initial-account-secret__password: | + {{- with secret "kv/data/atlas/mailu/mailu-initial-account-secret" -}}{{ .Data.data.password }}{{- end -}} spec: restartPolicy: OnFailure affinity: @@ -63,6 +66,8 @@ spec: value: bstein.dev - name: MAILU_DEFAULT_QUOTA value: "20000000000" + - name: MAILU_SYSTEM_USERS + value: no-reply-portal@bstein.dev - name: MAILU_DB_HOST value: postgres-service.postgres.svc.cluster.local - name: MAILU_DB_PORT diff --git a/services/mailu/mailu-sync-listener.yaml b/services/mailu/mailu-sync-listener.yaml index cfc915f..83b812f 100644 --- a/services/mailu/mailu-sync-listener.yaml +++ b/services/mailu/mailu-sync-listener.yaml @@ -46,6 +46,9 @@ spec: vault.hashicorp.com/agent-inject-secret-mailu-sync-credentials__client-secret: "kv/data/atlas/mailu/mailu-sync-credentials" vault.hashicorp.com/agent-inject-template-mailu-sync-credentials__client-secret: | {{- with secret "kv/data/atlas/mailu/mailu-sync-credentials" -}}{{ index .Data.data "client-secret" }}{{- end -}} + vault.hashicorp.com/agent-inject-secret-mailu-initial-account-secret__password: "kv/data/atlas/mailu/mailu-initial-account-secret" + vault.hashicorp.com/agent-inject-template-mailu-initial-account-secret__password: | + {{- with secret "kv/data/atlas/mailu/mailu-initial-account-secret" -}}{{ .Data.data.password }}{{- end -}} spec: restartPolicy: Always serviceAccountName: mailu-vault-sync @@ -69,6 +72,8 @@ spec: value: bstein.dev - name: MAILU_DEFAULT_QUOTA value: "20000000000" + - name: MAILU_SYSTEM_USERS + value: no-reply-portal@bstein.dev - name: MAILU_DB_HOST value: postgres-service.postgres.svc.cluster.local - name: MAILU_DB_PORT diff --git a/services/mailu/scripts/mailu_sync.py b/services/mailu/scripts/mailu_sync.py index 7c5edda..001917a 100644 --- a/services/mailu/scripts/mailu_sync.py +++ b/services/mailu/scripts/mailu_sync.py @@ -27,6 +27,12 @@ MAILU_DOMAIN = os.environ["MAILU_DOMAIN"] MAILU_DEFAULT_QUOTA = int(os.environ.get("MAILU_DEFAULT_QUOTA", "20000000000")) MAILU_ENABLED_ATTR = os.environ.get("MAILU_ENABLED_ATTR", "mailu_enabled") MAILU_EMAIL_ATTR = "mailu_email" +MAILU_SYSTEM_USERS = [ + item.strip() + for item in os.environ.get("MAILU_SYSTEM_USERS", "").split(",") + if item.strip() +] +MAILU_SYSTEM_PASSWORD = os.environ.get("MAILU_SYSTEM_PASSWORD", "").strip() DB_CONFIG = { "host": os.environ["MAILU_DB_HOST"], @@ -213,10 +219,26 @@ def ensure_mailu_user(cursor, email, password, display_name): ) +def ensure_system_mailboxes(cursor): + if not MAILU_SYSTEM_USERS: + return + if not MAILU_SYSTEM_PASSWORD: + log("MAILU_SYSTEM_USERS set but MAILU_SYSTEM_PASSWORD is missing; skipping system mailboxes") + return + + for email in MAILU_SYSTEM_USERS: + localpart = email.split("@", 1)[0] if "@" in email else email + try: + ensure_mailu_user(cursor, email, MAILU_SYSTEM_PASSWORD, localpart) + log(f"Ensured system mailbox for {email}") + except Exception as exc: + log(f"Failed to ensure system mailbox {email}: {exc}") + + def main(): token = retry_request("Keycloak token", get_kc_token) users = retry_request("Keycloak user list", lambda: kc_get_users(token)) - if not users: + if not users and not MAILU_SYSTEM_USERS: log("No users found; exiting.") return @@ -257,6 +279,8 @@ def main(): ensure_mailu_user(cursor, mailu_email, app_pw, display_name) log(f"Synced mailbox for {mailu_email}") + ensure_system_mailboxes(cursor) + cursor.close() conn.close() diff --git a/services/mailu/scripts/mailu_vault_env.sh b/services/mailu/scripts/mailu_vault_env.sh index 1ba7dce..fb8055b 100644 --- a/services/mailu/scripts/mailu_vault_env.sh +++ b/services/mailu/scripts/mailu_vault_env.sh @@ -12,3 +12,4 @@ export MAILU_DB_USER="$(read_secret mailu-db-secret__username)" export MAILU_DB_PASSWORD="$(read_secret mailu-db-secret__password)" export KEYCLOAK_CLIENT_ID="$(read_secret mailu-sync-credentials__client-id)" export KEYCLOAK_CLIENT_SECRET="$(read_secret mailu-sync-credentials__client-secret)" +export MAILU_SYSTEM_PASSWORD="$(read_secret mailu-initial-account-secret__password)" From d5a19ca9c3c17b8bfbfd9ca160654e9283cd122b Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 01:40:42 -0300 Subject: [PATCH 263/270] portal-e2e: add readiness checks --- .../scripts/test_portal_onboarding_flow.py | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/services/bstein-dev-home/scripts/test_portal_onboarding_flow.py b/services/bstein-dev-home/scripts/test_portal_onboarding_flow.py index ad86fe6..2903216 100644 --- a/services/bstein-dev-home/scripts/test_portal_onboarding_flow.py +++ b/services/bstein-dev-home/scripts/test_portal_onboarding_flow.py @@ -65,6 +65,23 @@ def _get_json(url: str, headers: dict[str, str] | None = None, timeout_s: int = raise SystemExit(f"HTTP {exc.code} from {url}: {raw}") +def _wait_for_portal_ready(base_url: str, timeout_s: int = 60) -> None: + health_url = f"{base_url.rstrip('/')}/api/healthz" + deadline_at = time.monotonic() + timeout_s + last_error = None + while time.monotonic() < deadline_at: + try: + req = urllib.request.Request(health_url, method="GET") + with urllib.request.urlopen(req, timeout=10) as resp: + if resp.status == 200: + return + except Exception as exc: + last_error = str(exc) + time.sleep(2) + suffix = f" (last_error={last_error})" if last_error else "" + raise SystemExit(f"portal health check timed out{suffix}") + + def _request_json( method: str, url: str, @@ -235,6 +252,7 @@ def _imap_wait_for_verify_token( def main() -> int: portal_base = _env("PORTAL_BASE_URL").rstrip("/") + portal_ready_timeout = int(os.environ.get("E2E_PORTAL_READY_TIMEOUT_SECONDS", "60")) keycloak_base = _env("KEYCLOAK_ADMIN_URL").rstrip("/") realm = _env("KEYCLOAK_REALM", "atlas") @@ -274,6 +292,8 @@ def main() -> int: if not mailu_password: raise SystemExit(f"Keycloak user {imap_keycloak_username!r} missing mailu_app_password attribute") + _wait_for_portal_ready(portal_base, timeout_s=portal_ready_timeout) + username_prefix = os.environ.get("E2E_USERNAME_PREFIX", "e2e-user") now = int(time.time()) username = f"{username_prefix}-{now}" @@ -336,6 +356,8 @@ def main() -> int: except SystemExit as exc: raise SystemExit(f"failed to exchange token for portal approval as {portal_admin_username!r}: {exc}") + _wait_for_portal_ready(portal_base, timeout_s=portal_ready_timeout) + approve_url = f"{portal_base}/api/admin/access/requests/{urllib.parse.quote(username, safe='')}/approve" approve_timeout_s = int(os.environ.get("E2E_APPROVE_TIMEOUT_SECONDS", "180")) approve_attempts = int(os.environ.get("E2E_APPROVE_ATTEMPTS", "3")) @@ -348,6 +370,10 @@ def main() -> int: break except (http.client.RemoteDisconnected, TimeoutError, urllib.error.URLError) as exc: approve_error = str(exc) + try: + _wait_for_portal_ready(portal_base, timeout_s=min(30, portal_ready_timeout)) + except SystemExit: + pass if attempt == approve_attempts: break time.sleep(3) From bed3563ae64c746ff7f6282620ee8f73d26db894 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 01:53:13 -0300 Subject: [PATCH 264/270] mailu-sync: cap wait in listener --- services/mailu/scripts/mailu_sync_listener.py | 28 ++++++++++++------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/services/mailu/scripts/mailu_sync_listener.py b/services/mailu/scripts/mailu_sync_listener.py index 27070c0..6ac0da7 100644 --- a/services/mailu/scripts/mailu_sync_listener.py +++ b/services/mailu/scripts/mailu_sync_listener.py @@ -1,5 +1,6 @@ import http.server import json +import os import subprocess import threading @@ -7,15 +8,17 @@ from time import time # Simple debounce to avoid hammering on bursts MIN_INTERVAL_SECONDS = 10 +WAIT_TIMEOUT_SECONDS = float(os.environ.get("MAILU_SYNC_WAIT_TIMEOUT_SEC", "20")) last_run = 0.0 lock = threading.Lock() sync_done = threading.Event() sync_done.set() sync_running = False +last_rc = None def _run_sync_blocking() -> int: - global last_run, sync_running + global last_run, sync_running, last_rc with lock: if sync_running: return 0 @@ -27,6 +30,7 @@ def _run_sync_blocking() -> int: proc = subprocess.run(["python", "/app/sync.py"], check=False) rc = int(proc.returncode) print(f"mailu-sync-listener: sync completed rc={rc}", flush=True) + last_rc = rc return rc finally: with lock: @@ -66,16 +70,20 @@ class Handler(http.server.BaseHTTPRequestHandler): if wait: with lock: already_running = sync_running - if already_running: - sync_done.wait(timeout=120) - with lock: - still_running = sync_running - self.send_response(200 if not still_running else 503) - self.end_headers() - return - rc = _run_sync_blocking() - self.send_response(200 if rc == 0 else 500) + if not already_running: + _trigger_sync_async() + + sync_done.wait(timeout=WAIT_TIMEOUT_SECONDS) + with lock: + still_running = sync_running + rc = last_rc + + if still_running: + # Avoid blocking callers while a sync is in flight. + self.send_response(200) + else: + self.send_response(200 if rc == 0 else 500) self.end_headers() return From 21899b8a799106c36562067167566451a81b6c73 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 01:53:25 -0300 Subject: [PATCH 265/270] portal: tune vaultwarden backoff --- services/bstein-dev-home/backend-deployment.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/services/bstein-dev-home/backend-deployment.yaml b/services/bstein-dev-home/backend-deployment.yaml index 53fa4a0..376622c 100644 --- a/services/bstein-dev-home/backend-deployment.yaml +++ b/services/bstein-dev-home/backend-deployment.yaml @@ -117,6 +117,10 @@ spec: value: firefly-user-sync - name: FIREFLY_USER_SYNC_WAIT_TIMEOUT_SEC value: "90" + - name: VAULTWARDEN_ADMIN_SESSION_TTL_SEC + value: "900" + - name: VAULTWARDEN_ADMIN_RATE_LIMIT_BACKOFF_SEC + value: "60" ports: - name: http containerPort: 8080 From d943359606d956c495b94ed44fc8d275ad07c431 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 01:57:49 -0300 Subject: [PATCH 266/270] mailu-sync: restart listener for update --- services/mailu/mailu-sync-listener.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/services/mailu/mailu-sync-listener.yaml b/services/mailu/mailu-sync-listener.yaml index 83b812f..5fb8bff 100644 --- a/services/mailu/mailu-sync-listener.yaml +++ b/services/mailu/mailu-sync-listener.yaml @@ -30,6 +30,7 @@ spec: app: mailu-sync-listener annotations: vault.hashicorp.com/agent-inject: "true" + atlas.bstein.dev/mailu-sync-rev: "2" vault.hashicorp.com/role: "mailu-mailserver" vault.hashicorp.com/agent-inject-secret-mailu-db-secret__database: "kv/data/atlas/mailu/mailu-db-secret" vault.hashicorp.com/agent-inject-template-mailu-db-secret__database: | From 2802c1e8b676666ef12f6ad26e05cb71507e3478 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 02:17:16 -0300 Subject: [PATCH 267/270] vaultwarden: use mailu smtp creds --- services/mailu/mailu-sync-cronjob.yaml | 2 +- services/mailu/mailu-sync-job.yaml | 2 +- services/mailu/mailu-sync-listener.yaml | 2 +- services/vaultwarden/deployment.yaml | 9 +++++---- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/services/mailu/mailu-sync-cronjob.yaml b/services/mailu/mailu-sync-cronjob.yaml index 9f55f7b..1da1981 100644 --- a/services/mailu/mailu-sync-cronjob.yaml +++ b/services/mailu/mailu-sync-cronjob.yaml @@ -59,7 +59,7 @@ spec: - name: MAILU_DEFAULT_QUOTA value: "20000000000" - name: MAILU_SYSTEM_USERS - value: no-reply-portal@bstein.dev + value: "no-reply-portal@bstein.dev,no-reply-vaultwarden@bstein.dev" - name: MAILU_DB_HOST value: postgres-service.postgres.svc.cluster.local - name: MAILU_DB_PORT diff --git a/services/mailu/mailu-sync-job.yaml b/services/mailu/mailu-sync-job.yaml index 0eaebe6..ec45ad0 100644 --- a/services/mailu/mailu-sync-job.yaml +++ b/services/mailu/mailu-sync-job.yaml @@ -67,7 +67,7 @@ spec: - name: MAILU_DEFAULT_QUOTA value: "20000000000" - name: MAILU_SYSTEM_USERS - value: no-reply-portal@bstein.dev + value: no-reply-portal@bstein.dev,no-reply-vaultwarden@bstein.dev - name: MAILU_DB_HOST value: postgres-service.postgres.svc.cluster.local - name: MAILU_DB_PORT diff --git a/services/mailu/mailu-sync-listener.yaml b/services/mailu/mailu-sync-listener.yaml index 5fb8bff..cc98107 100644 --- a/services/mailu/mailu-sync-listener.yaml +++ b/services/mailu/mailu-sync-listener.yaml @@ -74,7 +74,7 @@ spec: - name: MAILU_DEFAULT_QUOTA value: "20000000000" - name: MAILU_SYSTEM_USERS - value: no-reply-portal@bstein.dev + value: no-reply-portal@bstein.dev,no-reply-vaultwarden@bstein.dev - name: MAILU_DB_HOST value: postgres-service.postgres.svc.cluster.local - name: MAILU_DB_PORT diff --git a/services/vaultwarden/deployment.yaml b/services/vaultwarden/deployment.yaml index 0567f46..8cfd32a 100644 --- a/services/vaultwarden/deployment.yaml +++ b/services/vaultwarden/deployment.yaml @@ -29,9 +29,8 @@ spec: {{ with secret "kv/data/atlas/vaultwarden/vaultwarden-admin" }} export ADMIN_TOKEN="{{ .Data.data.ADMIN_TOKEN }}" {{ end }} - {{ with secret "kv/data/atlas/shared/postmark-relay" }} - export SMTP_USERNAME="{{ index .Data.data "apikey" }}" - export SMTP_PASSWORD="{{ index .Data.data "apikey" }}" + {{ with secret "kv/data/atlas/mailu/mailu-initial-account-secret" }} + export SMTP_PASSWORD="{{ .Data.data.password }}" {{ end }} spec: serviceAccountName: vaultwarden-vault @@ -64,7 +63,7 @@ spec: - name: DATABASE_IDLE_TIMEOUT value: "600" - name: SMTP_HOST - value: "mail.bstein.dev" + value: "mailu-front.mailu-mailserver.svc.cluster.local" - name: SMTP_PORT value: "587" - name: SMTP_SECURITY @@ -73,6 +72,8 @@ spec: value: "false" - name: SMTP_ACCEPT_INVALID_CERTS value: "false" + - name: SMTP_USERNAME + value: "no-reply-vaultwarden@bstein.dev" - name: SMTP_FROM value: "no-reply-vaultwarden@bstein.dev" - name: SMTP_FROM_NAME From 35816115f8142bc146184d222bba98ad4cd33083 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 02:23:16 -0300 Subject: [PATCH 268/270] vault: allow vaultwarden mailu secret --- services/vault/scripts/vault_k8s_auth_configure.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/vault/scripts/vault_k8s_auth_configure.sh b/services/vault/scripts/vault_k8s_auth_configure.sh index b37db42..202879f 100644 --- a/services/vault/scripts/vault_k8s_auth_configure.sh +++ b/services/vault/scripts/vault_k8s_auth_configure.sh @@ -207,7 +207,7 @@ write_policy_and_role "bstein-dev-home" "bstein-dev-home" "bstein-dev-home,bstei write_policy_and_role "gitea" "gitea" "gitea-vault" \ "gitea/*" "" write_policy_and_role "vaultwarden" "vaultwarden" "vaultwarden-vault" \ - "vaultwarden/* shared/postmark-relay" "" + "vaultwarden/* mailu/mailu-initial-account-secret" "" write_policy_and_role "sso" "sso" "sso-vault,sso-vault-sync,mas-secrets-ensure" \ "sso/* portal/bstein-dev-home-keycloak-admin shared/keycloak-admin shared/portal-e2e-client shared/postmark-relay harbor-pull/sso" "" write_policy_and_role "mailu-mailserver" "mailu-mailserver" "mailu-vault-sync" \ From 89316a5901a8ded2fd4eafc92107c6b1d58defad Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 02:31:41 -0300 Subject: [PATCH 269/270] vaultwarden: use mail hostname --- services/vaultwarden/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/vaultwarden/deployment.yaml b/services/vaultwarden/deployment.yaml index 8cfd32a..2893a92 100644 --- a/services/vaultwarden/deployment.yaml +++ b/services/vaultwarden/deployment.yaml @@ -63,7 +63,7 @@ spec: - name: DATABASE_IDLE_TIMEOUT value: "600" - name: SMTP_HOST - value: "mailu-front.mailu-mailserver.svc.cluster.local" + value: "mail.bstein.dev" - name: SMTP_PORT value: "587" - name: SMTP_SECURITY From b09679a8122f5398a3c765772bba73f7beb50c27 Mon Sep 17 00:00:00 2001 From: Brad Stein Date: Mon, 19 Jan 2026 02:45:19 -0300 Subject: [PATCH 270/270] mailu-sync: bump job --- services/mailu/mailu-sync-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/mailu/mailu-sync-job.yaml b/services/mailu/mailu-sync-job.yaml index ec45ad0..8589e9e 100644 --- a/services/mailu/mailu-sync-job.yaml +++ b/services/mailu/mailu-sync-job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: mailu-sync-8 + name: mailu-sync-9 namespace: mailu-mailserver spec: template: