vault: inject remaining services with wrappers

This commit is contained in:
Brad Stein 2026-01-14 17:29:09 -03:00
parent fb9578b624
commit fb671865e5
52 changed files with 778 additions and 1743 deletions

View File

@ -20,14 +20,6 @@ spec:
healthChecks:
- apiVersion: apps/v1
kind: Deployment
name: endurain
namespace: health
- apiVersion: apps/v1
kind: Deployment
name: sparkyfitness-server
namespace: health
- apiVersion: apps/v1
kind: Deployment
name: sparkyfitness-frontend
name: wger
namespace: health
wait: false

View File

@ -0,0 +1,10 @@
FROM ghcr.io/element-hq/lk-jwt-service:0.3.0 AS base
FROM alpine:3.20
RUN apk add --no-cache ca-certificates
COPY --from=base /lk-jwt-service /lk-jwt-service
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
RUN chmod 0755 /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
CMD ["/lk-jwt-service"]

View File

@ -0,0 +1,10 @@
FROM quay.io/oauth2-proxy/oauth2-proxy:v7.6.0 AS base
FROM alpine:3.20
RUN apk add --no-cache ca-certificates
COPY --from=base /bin/oauth2-proxy /bin/oauth2-proxy
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
RUN chmod 0755 /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
CMD ["/bin/oauth2-proxy"]

View File

@ -0,0 +1,10 @@
FROM registry.bstein.dev/streaming/pegasus:1.2.32 AS base
FROM alpine:3.20
RUN apk add --no-cache ca-certificates
COPY --from=base /pegasus /pegasus
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
RUN chmod 0755 /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
CMD ["/pegasus"]

View File

@ -0,0 +1,14 @@
#!/bin/sh
set -eu
if [ -n "${VAULT_ENV_FILE:-}" ]; then
if [ -f "${VAULT_ENV_FILE}" ]; then
# shellcheck disable=SC1090
. "${VAULT_ENV_FILE}"
else
echo "Vault env file not found: ${VAULT_ENV_FILE}" >&2
exit 1
fi
fi
exec "$@"

View File

@ -96,6 +96,12 @@ spec:
value: "60"
- name: ACCESS_REQUEST_INTERNAL_EMAIL_ALLOWLIST
value: robotuser@bstein.dev
- name: WGER_NAMESPACE
value: health
- name: WGER_USER_SYNC_CRONJOB
value: wger-user-sync
- name: WGER_USER_SYNC_WAIT_TIMEOUT_SEC
value: "90"
ports:
- name: http
containerPort: 8080

View File

@ -106,3 +106,34 @@ subjects:
- kind: ServiceAccount
name: bstein-dev-home
namespace: bstein-dev-home
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: bstein-dev-home-wger-user-sync
namespace: health
rules:
- apiGroups: ["batch"]
resources: ["cronjobs"]
verbs: ["get"]
resourceNames: ["wger-user-sync"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: bstein-dev-home-wger-user-sync
namespace: health
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: bstein-dev-home-wger-user-sync
subjects:
- kind: ServiceAccount
name: bstein-dev-home
namespace: bstein-dev-home

View File

@ -14,8 +14,18 @@ spec:
metadata:
labels:
app: livekit-token-service
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "comms"
vault.hashicorp.com/agent-inject-secret-livekit-env: "kv/data/atlas/comms/livekit-api"
vault.hashicorp.com/agent-inject-template-livekit-env: |
{{- with secret "kv/data/atlas/comms/livekit-api" -}}
export LIVEKIT_SECRET="{{ .Data.data.primary }}"
{{- end -}}
spec:
serviceAccountName: comms-vault
imagePullSecrets:
- name: harbor-regcred
nodeSelector:
hardware: rpi5
affinity:
@ -33,17 +43,14 @@ spec:
- live.bstein.dev
containers:
- name: token-service
image: ghcr.io/element-hq/lk-jwt-service:0.3.0
image: registry.bstein.dev/tools/lk-jwt-service-vault:0.3.0
env:
- name: LIVEKIT_URL
value: wss://kit.live.bstein.dev/livekit/sfu
- name: LIVEKIT_KEY
value: primary
- name: LIVEKIT_SECRET
valueFrom:
secretKeyRef:
name: livekit-api
key: primary
- name: VAULT_ENV_FILE
value: /vault/secrets/livekit-env
- name: LIVEKIT_FULL_ACCESS_HOMESERVERS
value: live.bstein.dev
ports:

View File

@ -13,9 +13,6 @@ spec:
- objectName: "turn-secret"
secretPath: "kv/data/atlas/comms/turn-shared-secret"
secretKey: "TURN_STATIC_AUTH_SECRET"
- objectName: "livekit-primary"
secretPath: "kv/data/atlas/comms/livekit-api"
secretKey: "primary"
- objectName: "synapse-db-pass"
secretPath: "kv/data/atlas/comms/synapse-db"
secretKey: "POSTGRES_PASSWORD"
@ -70,11 +67,6 @@ spec:
data:
- objectName: turn-secret
key: TURN_STATIC_AUTH_SECRET
- secretName: livekit-api
type: Opaque
data:
- objectName: livekit-primary
key: primary
- secretName: synapse-db
type: Opaque
data:

View File

@ -12,9 +12,18 @@ spec:
template:
metadata:
labels: { app: monero-p2pool }
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "crypto"
vault.hashicorp.com/agent-inject-secret-xmr-env: "kv/data/atlas/crypto/xmr-payout"
vault.hashicorp.com/agent-inject-template-xmr-env: |
{{- with secret "kv/data/atlas/crypto/xmr-payout" -}}
export XMR_ADDR="{{ .Data.data.address }}"
{{- end -}}
spec:
nodeSelector:
node-role.kubernetes.io/worker: "true"
serviceAccountName: crypto-vault-sync
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@ -61,27 +70,17 @@ spec:
- name: monero-p2pool
image: debian:bookworm-slim
imagePullPolicy: IfNotPresent
command: ["/opt/p2pool/p2pool"]
command: ["/bin/sh", "-c"]
args:
- "--host"
- "monerod.crypto.svc.cluster.local"
- "--rpc-port"
- "18081"
- "--zmq-port"
- "18083"
- "--stratum"
- "0.0.0.0:3333"
- "--wallet"
- "$(XMR_ADDR)"
# - "--light-mode"
# - "--no-randomx"
# - "--no-cache"
env:
- name: XMR_ADDR
valueFrom:
secretKeyRef:
name: xmr-payout
key: address
- |
set -eu
. /vault/secrets/xmr-env
exec /opt/p2pool/p2pool \
--host monerod.crypto.svc.cluster.local \
--rpc-port 18081 \
--zmq-port 18083 \
--stratum 0.0.0.0:3333 \
--wallet "${XMR_ADDR}"
ports:
- { name: stratum, containerPort: 3333, protocol: TCP }
readinessProbe:

View File

@ -10,18 +10,10 @@ spec:
vaultAddress: "http://vault.vault.svc.cluster.local:8200"
roleName: "crypto"
objects: |
- objectName: "xmr-payout__address"
secretPath: "kv/data/atlas/crypto/xmr-payout"
secretKey: "address"
- objectName: "harbor-pull__dockerconfigjson"
secretPath: "kv/data/atlas/harbor-pull/crypto"
secretKey: "dockerconfigjson"
secretObjects:
- secretName: xmr-payout
type: Opaque
data:
- objectName: xmr-payout__address
key: address
- secretName: harbor-regcred
type: kubernetes.io/dockerconfigjson
data:

View File

@ -0,0 +1,30 @@
upstream wger {
server 127.0.0.1:8000;
}
server {
listen 8080;
location = /api/v2/register {
return 404;
}
location / {
proxy_pass http://wger;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_redirect off;
}
location /static/ {
alias /wger/static/;
}
location /media/ {
alias /wger/media/;
}
client_max_body_size 100M;
}

View File

@ -1,147 +0,0 @@
# services/health/endurain-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: endurain
namespace: health
labels:
app: endurain
spec:
replicas: 1
selector:
matchLabels:
app: endurain
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
template:
metadata:
labels:
app: endurain
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: hardware
operator: In
values: ["rpi5", "rpi4"]
- key: node-role.kubernetes.io/worker
operator: Exists
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
- weight: 70
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi4"]
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
initContainers:
- name: init-data
image: alpine:3.20
command: ["/bin/sh", "-c"]
args:
- |
set -e
mkdir -p /data
chown -R 1000:1000 /data
securityContext:
runAsUser: 0
runAsGroup: 0
volumeMounts:
- name: endurain-data
mountPath: /data
containers:
- name: endurain
image: ghcr.io/endurain-project/endurain:v0.16.6
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8080
env:
- name: ENDURAIN_HOST
value: https://endurain.bstein.dev
- name: BEHIND_PROXY
value: "true"
- name: LOG_LEVEL
value: info
- name: TZ
value: Etc/UTC
- name: DB_HOST
valueFrom:
secretKeyRef:
name: endurain-db
key: DB_HOST
- name: DB_PORT
valueFrom:
secretKeyRef:
name: endurain-db
key: DB_PORT
- name: DB_USER
valueFrom:
secretKeyRef:
name: endurain-db
key: DB_USER
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: endurain-db
key: DB_PASSWORD
- name: DB_DATABASE
valueFrom:
secretKeyRef:
name: endurain-db
key: DB_DATABASE
- name: SECRET_KEY
valueFrom:
secretKeyRef:
name: endurain-secrets
key: SECRET_KEY
- name: FERNET_KEY
valueFrom:
secretKeyRef:
name: endurain-secrets
key: FERNET_KEY
volumeMounts:
- name: endurain-data
mountPath: /app/backend/data
readinessProbe:
httpGet:
path: /api/v1/about
port: http
initialDelaySeconds: 15
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 6
livenessProbe:
httpGet:
path: /api/v1/about
port: http
initialDelaySeconds: 30
periodSeconds: 20
timeoutSeconds: 3
failureThreshold: 6
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: "1"
memory: 2Gi
volumes:
- name: endurain-data
persistentVolumeClaim:
claimName: endurain-data

View File

@ -1,79 +0,0 @@
# services/health/endurain-oidc-config-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: endurain-oidc-config
namespace: health
spec:
schedule: "*/30 * * * *"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 1
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "health"
vault.hashicorp.com/agent-inject-secret-endurain-oidc-env: "kv/data/atlas/health/endurain-admin"
vault.hashicorp.com/agent-inject-template-endurain-oidc-env: |
{{- with secret "kv/data/atlas/health/endurain-admin" -}}
export ENDURAIN_ADMIN_USERNAME="{{ .Data.data.username }}"
export ENDURAIN_ADMIN_PASSWORD="{{ .Data.data.password }}"
{{- end }}
{{- with secret "kv/data/atlas/health/endurain-oidc" -}}
export ENDURAIN_OIDC_CLIENT_ID="{{ .Data.data.client_id }}"
export ENDURAIN_OIDC_CLIENT_SECRET="{{ .Data.data.client_secret }}"
export ENDURAIN_OIDC_ISSUER_URL="{{ .Data.data.issuer_url }}"
{{- end -}}
spec:
serviceAccountName: health-vault-sync
restartPolicy: Never
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values: ["arm64"]
- key: node-role.kubernetes.io/worker
operator: Exists
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
- weight: 70
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi4"]
containers:
- name: configure
image: alpine:3.20
command: ["/bin/sh", "-c"]
args:
- |
set -euo pipefail
apk add --no-cache bash curl jq >/dev/null
. /vault/secrets/endurain-oidc-env
exec /scripts/endurain_oidc_configure.sh
env:
- name: ENDURAIN_BASE_URL
value: http://endurain.health.svc.cluster.local
volumeMounts:
- name: endurain-oidc-config-script
mountPath: /scripts
readOnly: true
volumes:
- name: endurain-oidc-config-script
configMap:
name: endurain-oidc-config-script
defaultMode: 0555

View File

@ -5,26 +5,19 @@ namespace: health
resources:
- namespace.yaml
- serviceaccount.yaml
- secretproviderclass.yaml
- vault-sync-deployment.yaml
- endurain-data-pvc.yaml
- sparkyfitness-data-pvc.yaml
- endurain-oidc-config-cronjob.yaml
- sparkyfitness-oidc-config-cronjob.yaml
- endurain-deployment.yaml
- endurain-service.yaml
- sparkyfitness-server-deployment.yaml
- sparkyfitness-server-service.yaml
- sparkyfitness-frontend-deployment.yaml
- sparkyfitness-frontend-service.yaml
- endurain-ingress.yaml
- sparkyfitness-ingress.yaml
- wger-media-pvc.yaml
- wger-static-pvc.yaml
- wger-admin-ensure-cronjob.yaml
- wger-user-sync-cronjob.yaml
- wger-deployment.yaml
- wger-service.yaml
- wger-ingress.yaml
generatorOptions:
disableNameSuffixHash: true
configMapGenerator:
- name: endurain-oidc-config-script
- name: wger-nginx-config
files:
- endurain_oidc_configure.sh=scripts/endurain_oidc_configure.sh
- name: sparkyfitness-oidc-config-script
- default.conf=config/nginx.conf
- name: wger-user-sync-script
files:
- sparkyfitness_oidc_configure.sh=scripts/sparkyfitness_oidc_configure.sh
- wger_user_sync.py=scripts/wger_user_sync.py

View File

@ -1,134 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
base_url="${ENDURAIN_BASE_URL:-http://endurain.health.svc.cluster.local}"
admin_username="${ENDURAIN_ADMIN_USERNAME:-admin}"
admin_password="${ENDURAIN_ADMIN_PASSWORD:?ENDURAIN_ADMIN_PASSWORD is required}"
default_password="${ENDURAIN_DEFAULT_ADMIN_PASSWORD:-admin}"
oidc_client_id="${ENDURAIN_OIDC_CLIENT_ID:?ENDURAIN_OIDC_CLIENT_ID is required}"
oidc_client_secret="${ENDURAIN_OIDC_CLIENT_SECRET:?ENDURAIN_OIDC_CLIENT_SECRET is required}"
oidc_issuer_url="${ENDURAIN_OIDC_ISSUER_URL:?ENDURAIN_OIDC_ISSUER_URL is required}"
wait_for_endurain() {
for attempt in 1 2 3 4 5 6 7 8 9 10; do
if curl -fsS "${base_url}/api/v1/about" >/dev/null 2>&1; then
return 0
fi
sleep $((attempt * 3))
done
return 1
}
login() {
local username="$1"
local password="$2"
local token
token="$(curl -sS -X POST "${base_url}/api/v1/auth/login" \
-H "X-Client-Type: mobile" \
-H "Content-Type: application/x-www-form-urlencoded" \
--data-urlencode "grant_type=password" \
--data-urlencode "username=${username}" \
--data-urlencode "password=${password}" | jq -r '.access_token' 2>/dev/null || true)"
if [ -n "${token}" ] && [ "${token}" != "null" ]; then
echo "${token}"
return 0
fi
return 1
}
if ! wait_for_endurain; then
echo "Endurain is not responding at ${base_url}" >&2
exit 1
fi
token="$(login "${admin_username}" "${admin_password}" || true)"
if [ -z "${token}" ]; then
token="$(login "${admin_username}" "${default_password}" || true)"
if [ -z "${token}" ]; then
echo "Failed to authenticate to Endurain as admin" >&2
exit 1
fi
if [ "${admin_password}" != "${default_password}" ]; then
user_id="$(curl -sS -H "Authorization: Bearer ${token}" -H "X-Client-Type: mobile" \
"${base_url}/api/v1/users/username/${admin_username}" | jq -r '.id' 2>/dev/null || true)"
if [ -z "${user_id}" ] || [ "${user_id}" = "null" ]; then
echo "Admin user ${admin_username} not found" >&2
exit 1
fi
update_payload="$(jq -nc --arg password "${admin_password}" '{password:$password}')"
status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \
-H "Authorization: Bearer ${token}" \
-H "X-Client-Type: mobile" \
-H "Content-Type: application/json" \
-d "${update_payload}" \
"${base_url}/api/v1/users/${user_id}/password")"
if [ "${status}" != "200" ] && [ "${status}" != "201" ]; then
echo "Failed to rotate Endurain admin password (status ${status})" >&2
exit 1
fi
token="$(login "${admin_username}" "${admin_password}" || true)"
if [ -z "${token}" ]; then
echo "Failed to authenticate with rotated admin password" >&2
exit 1
fi
fi
fi
idp_payload="$(jq -nc \
--arg name "Keycloak" \
--arg slug "keycloak" \
--arg issuer_url "${oidc_issuer_url}" \
--arg scopes "openid profile email" \
--arg client_id "${oidc_client_id}" \
--arg client_secret "${oidc_client_secret}" \
--arg icon "keycloak" \
--argjson enabled true \
--argjson auto_create_users true \
--argjson sync_user_info true \
--argjson user_mapping '{"username":["preferred_username","username","email"],"email":["email","mail"],"name":["name","display_name","full_name"]}' \
'{name:$name,slug:$slug,provider_type:"oidc",enabled:$enabled,issuer_url:$issuer_url,scopes:$scopes,icon:$icon,auto_create_users:$auto_create_users,sync_user_info:$sync_user_info,user_mapping:$user_mapping,client_id:$client_id,client_secret:$client_secret}')"
idp_id="$(curl -sS -H "Authorization: Bearer ${token}" -H "X-Client-Type: mobile" \
"${base_url}/api/v1/idp" | jq -r '.[] | select(.slug=="keycloak") | .id' 2>/dev/null | head -n1 || true)"
if [ -n "${idp_id}" ] && [ "${idp_id}" != "null" ]; then
status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \
-H "Authorization: Bearer ${token}" \
-H "X-Client-Type: mobile" \
-H "Content-Type: application/json" \
-d "${idp_payload}" \
"${base_url}/api/v1/idp/${idp_id}")"
else
status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \
-H "Authorization: Bearer ${token}" \
-H "X-Client-Type: mobile" \
-H "Content-Type: application/json" \
-d "${idp_payload}" \
"${base_url}/api/v1/idp")"
fi
if [ "${status}" != "200" ] && [ "${status}" != "201" ] && [ "${status}" != "204" ]; then
echo "Failed to upsert Endurain OIDC provider (status ${status})" >&2
exit 1
fi
settings_json="$(curl -sS -H "Authorization: Bearer ${token}" -H "X-Client-Type: mobile" \
"${base_url}/api/v1/server_settings")"
if [ -z "${settings_json}" ]; then
echo "Failed to fetch Endurain server settings" >&2
exit 1
fi
settings_payload="$(echo "${settings_json}" | jq \
'.sso_enabled=true | .sso_auto_redirect=true | .signup_enabled=false | .local_login_enabled=true')"
status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \
-H "Authorization: Bearer ${token}" \
-H "X-Client-Type: mobile" \
-H "Content-Type: application/json" \
-d "${settings_payload}" \
"${base_url}/api/v1/server_settings")"
if [ "${status}" != "200" ] && [ "${status}" != "201" ]; then
echo "Failed to update Endurain server settings (status ${status})" >&2
exit 1
fi

View File

@ -1,134 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
base_url="${SPARKYFITNESS_BASE_URL:-http://sparkyfitness-server.health.svc.cluster.local:3010}"
frontend_url="${SPARKYFITNESS_FRONTEND_URL:?SPARKYFITNESS_FRONTEND_URL is required}"
admin_email="${SPARKYFITNESS_ADMIN_EMAIL:?SPARKYFITNESS_ADMIN_EMAIL is required}"
admin_password="${SPARKYFITNESS_ADMIN_PASSWORD:?SPARKYFITNESS_ADMIN_PASSWORD is required}"
oidc_client_id="${SPARKYFITNESS_OIDC_CLIENT_ID:?SPARKYFITNESS_OIDC_CLIENT_ID is required}"
oidc_client_secret="${SPARKYFITNESS_OIDC_CLIENT_SECRET:?SPARKYFITNESS_OIDC_CLIENT_SECRET is required}"
oidc_issuer_url="${SPARKYFITNESS_OIDC_ISSUER_URL:?SPARKYFITNESS_OIDC_ISSUER_URL is required}"
wait_for_server() {
for attempt in 1 2 3 4 5 6 7 8 9 10; do
if curl -fsS "${base_url}/health" >/dev/null 2>&1; then
return 0
fi
sleep $((attempt * 3))
done
return 1
}
cookie_jar="$(mktemp)"
trap 'rm -f "${cookie_jar}"' EXIT
auth_login() {
local payload
payload="$(jq -nc --arg email "${admin_email}" --arg password "${admin_password}" '{email:$email,password:$password}')"
local status
status="$(curl -sS -o /tmp/sparkyfitness_login.json -w "%{http_code}" \
-c "${cookie_jar}" -b "${cookie_jar}" \
-H "Content-Type: application/json" \
-X POST "${base_url}/auth/login" \
-d "${payload}")"
if [ "${status}" = "200" ]; then
return 0
fi
return 1
}
auth_register() {
local payload
payload="$(jq -nc --arg email "${admin_email}" --arg password "${admin_password}" --arg full_name "Sparky Admin" '{email:$email,password:$password,full_name:$full_name}')"
curl -sS -o /tmp/sparkyfitness_register.json -w "%{http_code}" \
-c "${cookie_jar}" -b "${cookie_jar}" \
-H "Content-Type: application/json" \
-X POST "${base_url}/auth/register" \
-d "${payload}"
}
if ! wait_for_server; then
echo "SparkyFitness is not responding at ${base_url}" >&2
exit 1
fi
if ! auth_login; then
status="$(auth_register)"
if [ "${status}" = "409" ]; then
if ! auth_login; then
echo "Admin login failed after existing user detected" >&2
exit 1
fi
elif [ "${status}" = "201" ]; then
if ! auth_login; then
echo "Admin login failed after registration" >&2
exit 1
fi
elif [ "${status}" = "403" ]; then
echo "Registration disabled; unable to bootstrap admin user" >&2
exit 1
else
echo "Admin registration failed (status ${status})" >&2
exit 1
fi
fi
settings_json="$(curl -sS -b "${cookie_jar}" "${base_url}/admin/global-settings")"
if [ -z "${settings_json}" ]; then
echo "Failed to fetch SparkyFitness global settings" >&2
exit 1
fi
email_enabled="$(echo "${settings_json}" | jq -r '.enable_email_password_login // true')"
mfa_mandatory="$(echo "${settings_json}" | jq -r '.is_mfa_mandatory // .mfa_mandatory // false')"
settings_payload="$(jq -nc \
--argjson enable_email_password_login "${email_enabled}" \
--argjson is_oidc_active true \
--argjson is_mfa_mandatory "${mfa_mandatory}" \
'{enable_email_password_login:$enable_email_password_login,is_oidc_active:$is_oidc_active,is_mfa_mandatory:$is_mfa_mandatory}')"
status="$(curl -sS -o /dev/null -w "%{http_code}" -b "${cookie_jar}" \
-H "Content-Type: application/json" \
-X PUT "${base_url}/admin/global-settings" \
-d "${settings_payload}")"
if [ "${status}" != "200" ]; then
echo "Failed to update SparkyFitness global settings (status ${status})" >&2
exit 1
fi
providers_json="$(curl -sS -b "${cookie_jar}" "${base_url}/admin/oidc-settings")"
provider_id="$(echo "${providers_json}" | jq -r --arg issuer "${oidc_issuer_url}" '.[] | select(.issuer_url==$issuer) | .id' 2>/dev/null | head -n1 || true)"
redirect_uri="${frontend_url%/}/oidc-callback"
provider_payload="$(jq -nc \
--arg issuer_url "${oidc_issuer_url}" \
--arg client_id "${oidc_client_id}" \
--arg client_secret "${oidc_client_secret}" \
--arg redirect_uri "${redirect_uri}" \
--arg scope "openid profile email" \
--arg token_endpoint_auth_method "client_secret_post" \
--argjson response_types '["code"]' \
--argjson is_active true \
--arg display_name "Atlas SSO" \
--argjson auto_register true \
--arg signing_algorithm "RS256" \
--arg profile_signing_algorithm "none" \
--argjson timeout 30000 \
'{issuer_url:$issuer_url,client_id:$client_id,client_secret:$client_secret,redirect_uris:[$redirect_uri],scope:$scope,token_endpoint_auth_method:$token_endpoint_auth_method,response_types:$response_types,is_active:$is_active,display_name:$display_name,auto_register:$auto_register,signing_algorithm:$signing_algorithm,profile_signing_algorithm:$profile_signing_algorithm,timeout:$timeout}')"
if [ -n "${provider_id}" ] && [ "${provider_id}" != "null" ]; then
status="$(curl -sS -o /dev/null -w "%{http_code}" -b "${cookie_jar}" \
-H "Content-Type: application/json" \
-X PUT "${base_url}/admin/oidc-settings/${provider_id}" \
-d "${provider_payload}")"
else
status="$(curl -sS -o /dev/null -w "%{http_code}" -b "${cookie_jar}" \
-H "Content-Type: application/json" \
-X POST "${base_url}/admin/oidc-settings" \
-d "${provider_payload}")"
fi
if [ "${status}" != "200" ] && [ "${status}" != "201" ]; then
echo "Failed to upsert SparkyFitness OIDC provider (status ${status})" >&2
exit 1
fi

View File

@ -0,0 +1,120 @@
#!/usr/bin/env python3
from __future__ import annotations
import os
import sys
import django
def _env(name: str, default: str = "") -> str:
value = os.getenv(name, default)
return value.strip() if isinstance(value, str) else ""
def _setup_django() -> None:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.main")
django.setup()
def _set_default_gym(user) -> None:
try:
from wger.gym.models import GymConfig
except Exception:
return
try:
config = GymConfig.objects.first()
except Exception:
return
if not config or not getattr(config, "default_gym", None):
return
profile = getattr(user, "userprofile", None)
if not profile or getattr(profile, "gym", None):
return
profile.gym = config.default_gym
profile.save()
def _ensure_profile(user) -> None:
profile = getattr(user, "userprofile", None)
if not profile:
return
if hasattr(profile, "email_verified") and not profile.email_verified:
profile.email_verified = True
if hasattr(profile, "is_temporary") and profile.is_temporary:
profile.is_temporary = False
profile.save()
def _ensure_admin(username: str, password: str, email: str) -> None:
from django.contrib.auth.models import User
if not username or not password:
raise RuntimeError("admin username/password missing")
user, created = User.objects.get_or_create(username=username)
if created:
user.is_active = True
if not user.is_staff:
user.is_staff = True
if email:
user.email = email
user.set_password(password)
user.save()
_ensure_profile(user)
_set_default_gym(user)
print(f"ensured admin user {username}")
def _ensure_user(username: str, password: str, email: str) -> None:
from django.contrib.auth.models import User
if not username or not password:
raise RuntimeError("username/password missing")
user, created = User.objects.get_or_create(username=username)
if created:
user.is_active = True
if email and user.email != email:
user.email = email
user.set_password(password)
user.save()
_ensure_profile(user)
_set_default_gym(user)
action = "created" if created else "updated"
print(f"{action} user {username}")
def main() -> int:
admin_user = _env("WGER_ADMIN_USERNAME")
admin_password = _env("WGER_ADMIN_PASSWORD")
admin_email = _env("WGER_ADMIN_EMAIL")
username = _env("WGER_USERNAME") or _env("ONLY_USERNAME")
password = _env("WGER_PASSWORD")
email = _env("WGER_EMAIL")
if not any([admin_user and admin_password, username and password]):
print("no admin or user payload provided; exiting")
return 0
_setup_django()
if admin_user and admin_password:
_ensure_admin(admin_user, admin_password, admin_email)
if username and password:
_ensure_user(username, password, email)
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,167 +0,0 @@
# services/health/secretproviderclass.yaml
apiVersion: secrets-store.csi.x-k8s.io/v1
kind: SecretProviderClass
metadata:
name: health-vault
namespace: health
spec:
provider: vault
parameters:
vaultAddress: "http://vault.vault.svc.cluster.local:8200"
roleName: "health"
objects: |
- objectName: "endurain-db__DB_HOST"
secretPath: "kv/data/atlas/health/endurain-db"
secretKey: "DB_HOST"
- objectName: "endurain-db__DB_PORT"
secretPath: "kv/data/atlas/health/endurain-db"
secretKey: "DB_PORT"
- objectName: "endurain-db__DB_USER"
secretPath: "kv/data/atlas/health/endurain-db"
secretKey: "DB_USER"
- objectName: "endurain-db__DB_PASSWORD"
secretPath: "kv/data/atlas/health/endurain-db"
secretKey: "DB_PASSWORD"
- objectName: "endurain-db__DB_DATABASE"
secretPath: "kv/data/atlas/health/endurain-db"
secretKey: "DB_DATABASE"
- objectName: "endurain-secrets__SECRET_KEY"
secretPath: "kv/data/atlas/health/endurain-secrets"
secretKey: "SECRET_KEY"
- objectName: "endurain-secrets__FERNET_KEY"
secretPath: "kv/data/atlas/health/endurain-secrets"
secretKey: "FERNET_KEY"
- objectName: "endurain-admin__username"
secretPath: "kv/data/atlas/health/endurain-admin"
secretKey: "username"
- objectName: "endurain-admin__password"
secretPath: "kv/data/atlas/health/endurain-admin"
secretKey: "password"
- objectName: "endurain-oidc__client_id"
secretPath: "kv/data/atlas/health/endurain-oidc"
secretKey: "client_id"
- objectName: "endurain-oidc__client_secret"
secretPath: "kv/data/atlas/health/endurain-oidc"
secretKey: "client_secret"
- objectName: "endurain-oidc__issuer_url"
secretPath: "kv/data/atlas/health/endurain-oidc"
secretKey: "issuer_url"
- objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_HOST"
secretPath: "kv/data/atlas/health/sparkyfitness-db"
secretKey: "SPARKY_FITNESS_DB_HOST"
- objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_PORT"
secretPath: "kv/data/atlas/health/sparkyfitness-db"
secretKey: "SPARKY_FITNESS_DB_PORT"
- objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_NAME"
secretPath: "kv/data/atlas/health/sparkyfitness-db"
secretKey: "SPARKY_FITNESS_DB_NAME"
- objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_USER"
secretPath: "kv/data/atlas/health/sparkyfitness-db"
secretKey: "SPARKY_FITNESS_DB_USER"
- objectName: "sparkyfitness-db__SPARKY_FITNESS_DB_PASSWORD"
secretPath: "kv/data/atlas/health/sparkyfitness-db"
secretKey: "SPARKY_FITNESS_DB_PASSWORD"
- objectName: "sparkyfitness-db__SPARKY_FITNESS_APP_DB_USER"
secretPath: "kv/data/atlas/health/sparkyfitness-db"
secretKey: "SPARKY_FITNESS_APP_DB_USER"
- objectName: "sparkyfitness-db__SPARKY_FITNESS_APP_DB_PASSWORD"
secretPath: "kv/data/atlas/health/sparkyfitness-db"
secretKey: "SPARKY_FITNESS_APP_DB_PASSWORD"
- objectName: "sparkyfitness-secrets__JWT_SECRET"
secretPath: "kv/data/atlas/health/sparkyfitness-secrets"
secretKey: "JWT_SECRET"
- objectName: "sparkyfitness-secrets__SPARKY_FITNESS_API_ENCRYPTION_KEY"
secretPath: "kv/data/atlas/health/sparkyfitness-secrets"
secretKey: "SPARKY_FITNESS_API_ENCRYPTION_KEY"
- objectName: "sparkyfitness-admin__email"
secretPath: "kv/data/atlas/health/sparkyfitness-admin"
secretKey: "email"
- objectName: "sparkyfitness-admin__password"
secretPath: "kv/data/atlas/health/sparkyfitness-admin"
secretKey: "password"
- objectName: "sparkyfitness-oidc__client_id"
secretPath: "kv/data/atlas/health/sparkyfitness-oidc"
secretKey: "client_id"
- objectName: "sparkyfitness-oidc__client_secret"
secretPath: "kv/data/atlas/health/sparkyfitness-oidc"
secretKey: "client_secret"
- objectName: "sparkyfitness-oidc__issuer_url"
secretPath: "kv/data/atlas/health/sparkyfitness-oidc"
secretKey: "issuer_url"
secretObjects:
- secretName: endurain-db
type: Opaque
data:
- objectName: endurain-db__DB_HOST
key: DB_HOST
- objectName: endurain-db__DB_PORT
key: DB_PORT
- objectName: endurain-db__DB_USER
key: DB_USER
- objectName: endurain-db__DB_PASSWORD
key: DB_PASSWORD
- objectName: endurain-db__DB_DATABASE
key: DB_DATABASE
- secretName: endurain-secrets
type: Opaque
data:
- objectName: endurain-secrets__SECRET_KEY
key: SECRET_KEY
- objectName: endurain-secrets__FERNET_KEY
key: FERNET_KEY
- secretName: endurain-admin
type: Opaque
data:
- objectName: endurain-admin__username
key: username
- objectName: endurain-admin__password
key: password
- secretName: endurain-oidc
type: Opaque
data:
- objectName: endurain-oidc__client_id
key: client_id
- objectName: endurain-oidc__client_secret
key: client_secret
- objectName: endurain-oidc__issuer_url
key: issuer_url
- secretName: sparkyfitness-db
type: Opaque
data:
- objectName: sparkyfitness-db__SPARKY_FITNESS_DB_HOST
key: SPARKY_FITNESS_DB_HOST
- objectName: sparkyfitness-db__SPARKY_FITNESS_DB_PORT
key: SPARKY_FITNESS_DB_PORT
- objectName: sparkyfitness-db__SPARKY_FITNESS_DB_NAME
key: SPARKY_FITNESS_DB_NAME
- objectName: sparkyfitness-db__SPARKY_FITNESS_DB_USER
key: SPARKY_FITNESS_DB_USER
- objectName: sparkyfitness-db__SPARKY_FITNESS_DB_PASSWORD
key: SPARKY_FITNESS_DB_PASSWORD
- objectName: sparkyfitness-db__SPARKY_FITNESS_APP_DB_USER
key: SPARKY_FITNESS_APP_DB_USER
- objectName: sparkyfitness-db__SPARKY_FITNESS_APP_DB_PASSWORD
key: SPARKY_FITNESS_APP_DB_PASSWORD
- secretName: sparkyfitness-secrets
type: Opaque
data:
- objectName: sparkyfitness-secrets__JWT_SECRET
key: JWT_SECRET
- objectName: sparkyfitness-secrets__SPARKY_FITNESS_API_ENCRYPTION_KEY
key: SPARKY_FITNESS_API_ENCRYPTION_KEY
- secretName: sparkyfitness-admin
type: Opaque
data:
- objectName: sparkyfitness-admin__email
key: email
- objectName: sparkyfitness-admin__password
key: password
- secretName: sparkyfitness-oidc
type: Opaque
data:
- objectName: sparkyfitness-oidc__client_id
key: client_id
- objectName: sparkyfitness-oidc__client_secret
key: client_secret
- objectName: sparkyfitness-oidc__issuer_url
key: issuer_url

View File

@ -1,81 +0,0 @@
# services/health/sparkyfitness-frontend-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sparkyfitness-frontend
namespace: health
labels:
app: sparkyfitness-frontend
spec:
replicas: 1
selector:
matchLabels:
app: sparkyfitness-frontend
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
template:
metadata:
labels:
app: sparkyfitness-frontend
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: hardware
operator: In
values: ["rpi5", "rpi4"]
- key: node-role.kubernetes.io/worker
operator: Exists
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
- weight: 70
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi4"]
containers:
- name: sparkyfitness-frontend
image: codewithcj/sparkyfitness:0.16.3.3
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
env:
- name: SPARKY_FITNESS_SERVER_HOST
value: sparkyfitness-server
- name: SPARKY_FITNESS_SERVER_PORT
value: "3010"
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 6
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 30
periodSeconds: 20
timeoutSeconds: 3
failureThreshold: 6
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi

View File

@ -1,15 +0,0 @@
# services/health/sparkyfitness-frontend-service.yaml
apiVersion: v1
kind: Service
metadata:
name: sparkyfitness-frontend
namespace: health
labels:
app: sparkyfitness-frontend
spec:
selector:
app: sparkyfitness-frontend
ports:
- name: http
port: 80
targetPort: http

View File

@ -1,26 +0,0 @@
# services/health/sparkyfitness-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: sparkyfitness
namespace: health
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts: ["sparkyfitness.bstein.dev"]
secretName: sparkyfitness-tls
rules:
- host: sparkyfitness.bstein.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: sparkyfitness-frontend
port:
number: 80

View File

@ -1,81 +0,0 @@
# services/health/sparkyfitness-oidc-config-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: sparkyfitness-oidc-config
namespace: health
spec:
schedule: "*/30 * * * *"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 1
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "health"
vault.hashicorp.com/agent-inject-secret-sparky-oidc-env: "kv/data/atlas/health/sparkyfitness-admin"
vault.hashicorp.com/agent-inject-template-sparky-oidc-env: |
{{- with secret "kv/data/atlas/health/sparkyfitness-admin" -}}
export SPARKYFITNESS_ADMIN_EMAIL="{{ .Data.data.email }}"
export SPARKYFITNESS_ADMIN_PASSWORD="{{ .Data.data.password }}"
{{- end }}
{{- with secret "kv/data/atlas/health/sparkyfitness-oidc" -}}
export SPARKYFITNESS_OIDC_CLIENT_ID="{{ .Data.data.client_id }}"
export SPARKYFITNESS_OIDC_CLIENT_SECRET="{{ .Data.data.client_secret }}"
export SPARKYFITNESS_OIDC_ISSUER_URL="{{ .Data.data.issuer_url }}"
{{- end -}}
spec:
serviceAccountName: health-vault-sync
restartPolicy: Never
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values: ["arm64"]
- key: node-role.kubernetes.io/worker
operator: Exists
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
- weight: 70
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi4"]
containers:
- name: configure
image: alpine:3.20
command: ["/bin/sh", "-c"]
args:
- |
set -euo pipefail
apk add --no-cache bash curl jq >/dev/null
. /vault/secrets/sparky-oidc-env
exec /scripts/sparkyfitness_oidc_configure.sh
env:
- name: SPARKYFITNESS_BASE_URL
value: http://sparkyfitness-server.health.svc.cluster.local:3010
- name: SPARKYFITNESS_FRONTEND_URL
value: https://sparkyfitness.bstein.dev
volumeMounts:
- name: sparkyfitness-oidc-config-script
mountPath: /scripts
readOnly: true
volumes:
- name: sparkyfitness-oidc-config-script
configMap:
name: sparkyfitness-oidc-config-script
defaultMode: 0555

View File

@ -1,170 +0,0 @@
# services/health/sparkyfitness-server-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sparkyfitness-server
namespace: health
labels:
app: sparkyfitness-server
spec:
replicas: 1
selector:
matchLabels:
app: sparkyfitness-server
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
template:
metadata:
labels:
app: sparkyfitness-server
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: hardware
operator: In
values: ["rpi5", "rpi4"]
- key: node-role.kubernetes.io/worker
operator: Exists
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 90
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
- weight: 70
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi4"]
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
initContainers:
- name: init-data
image: alpine:3.20
command: ["/bin/sh", "-c"]
args:
- |
set -e
mkdir -p /data/uploads /data/backup
chown -R 1000:1000 /data
securityContext:
runAsUser: 0
runAsGroup: 0
volumeMounts:
- name: sparkyfitness-data
mountPath: /data
containers:
- name: sparkyfitness-server
image: codewithcj/sparkyfitness_server:0.16.3.3
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 3010
env:
- name: SPARKY_FITNESS_SERVER_PORT
value: "3010"
- name: SPARKY_FITNESS_LOG_LEVEL
value: INFO
- name: NODE_ENV
value: production
- name: TZ
value: Etc/UTC
- name: SPARKY_FITNESS_FRONTEND_URL
value: https://sparkyfitness.bstein.dev
- name: SPARKY_FITNESS_DISABLE_SIGNUP
value: "false"
- name: SPARKY_FITNESS_DB_HOST
valueFrom:
secretKeyRef:
name: sparkyfitness-db
key: SPARKY_FITNESS_DB_HOST
- name: SPARKY_FITNESS_DB_PORT
valueFrom:
secretKeyRef:
name: sparkyfitness-db
key: SPARKY_FITNESS_DB_PORT
- name: SPARKY_FITNESS_DB_NAME
valueFrom:
secretKeyRef:
name: sparkyfitness-db
key: SPARKY_FITNESS_DB_NAME
- name: SPARKY_FITNESS_DB_USER
valueFrom:
secretKeyRef:
name: sparkyfitness-db
key: SPARKY_FITNESS_DB_USER
- name: SPARKY_FITNESS_DB_PASSWORD
valueFrom:
secretKeyRef:
name: sparkyfitness-db
key: SPARKY_FITNESS_DB_PASSWORD
- name: SPARKY_FITNESS_APP_DB_USER
valueFrom:
secretKeyRef:
name: sparkyfitness-db
key: SPARKY_FITNESS_APP_DB_USER
- name: SPARKY_FITNESS_APP_DB_PASSWORD
valueFrom:
secretKeyRef:
name: sparkyfitness-db
key: SPARKY_FITNESS_APP_DB_PASSWORD
- name: SPARKY_FITNESS_API_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: sparkyfitness-secrets
key: SPARKY_FITNESS_API_ENCRYPTION_KEY
- name: JWT_SECRET
valueFrom:
secretKeyRef:
name: sparkyfitness-secrets
key: JWT_SECRET
- name: SPARKY_FITNESS_ADMIN_EMAIL
valueFrom:
secretKeyRef:
name: sparkyfitness-admin
key: email
volumeMounts:
- name: sparkyfitness-data
mountPath: /app/SparkyFitnessServer/uploads
subPath: uploads
- name: sparkyfitness-data
mountPath: /app/SparkyFitnessServer/backup
subPath: backup
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 15
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 6
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 30
periodSeconds: 20
timeoutSeconds: 3
failureThreshold: 6
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: "1"
memory: 2Gi
volumes:
- name: sparkyfitness-data
persistentVolumeClaim:
claimName: sparkyfitness-data

View File

@ -1,15 +0,0 @@
# services/health/sparkyfitness-server-service.yaml
apiVersion: v1
kind: Service
metadata:
name: sparkyfitness-server
namespace: health
labels:
app: sparkyfitness-server
spec:
selector:
app: sparkyfitness-server
ports:
- name: http
port: 3010
targetPort: http

View File

@ -1,34 +0,0 @@
# services/health/vault-sync-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: health-vault-sync
namespace: health
spec:
replicas: 1
selector:
matchLabels:
app: health-vault-sync
template:
metadata:
labels:
app: health-vault-sync
spec:
serviceAccountName: health-vault-sync
containers:
- name: sync
image: alpine:3.20
command: ["/bin/sh", "-c"]
args:
- "sleep infinity"
volumeMounts:
- name: vault-secrets
mountPath: /vault/secrets
readOnly: true
volumes:
- name: vault-secrets
csi:
driver: secrets-store.csi.k8s.io
readOnly: true
volumeAttributes:
secretProviderClass: health-vault

View File

@ -0,0 +1,92 @@
# services/health/wger-admin-ensure-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: wger-admin-ensure
namespace: health
spec:
schedule: "15 3 * * *"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 1
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "health"
vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db"
vault.hashicorp.com/agent-inject-template-wger-env: |
{{- with secret "kv/data/atlas/health/wger-db" -}}
export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}"
export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}"
export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}"
export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}"
export DJANGO_DB_PASSWORD="{{ .Data.data.DJANGO_DB_PASSWORD }}"
{{- end }}
{{- with secret "kv/data/atlas/health/wger-secrets" -}}
export SECRET_KEY="{{ .Data.data.SECRET_KEY }}"
export SIGNING_KEY="{{ .Data.data.SIGNING_KEY }}"
{{- end }}
{{- with secret "kv/data/atlas/health/wger-admin" -}}
export WGER_ADMIN_USERNAME="{{ .Data.data.username }}"
export WGER_ADMIN_PASSWORD="{{ .Data.data.password }}"
{{- end -}}
spec:
serviceAccountName: health-vault-sync
restartPolicy: Never
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
- weight: 70
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi4"]
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: "true"
containers:
- name: ensure
image: wger/server@sha256:710588b78af4e0aa0b4d8a8061e4563e16eae80eeaccfe7f9e0d9cbdd7f0cbc5
imagePullPolicy: IfNotPresent
command: ["/bin/sh", "-c"]
args:
- |
set -eu
. /vault/secrets/wger-env
exec python /scripts/wger_user_sync.py
env:
- name: SITE_URL
value: https://health.bstein.dev
- name: TIME_ZONE
value: Etc/UTC
- name: TZ
value: Etc/UTC
- name: DJANGO_DEBUG
value: "False"
- name: DJANGO_DB_ENGINE
value: django.db.backends.postgresql
- name: DJANGO_CACHE_BACKEND
value: django.core.cache.backends.locmem.LocMemCache
- name: DJANGO_CACHE_LOCATION
value: wger-cache
volumeMounts:
- name: wger-user-sync-script
mountPath: /scripts
readOnly: true
volumes:
- name: wger-user-sync-script
configMap:
name: wger-user-sync-script
defaultMode: 0555

View File

@ -0,0 +1,212 @@
# services/health/wger-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: wger
namespace: health
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: wger
template:
metadata:
labels:
app: wger
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "health"
vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db"
vault.hashicorp.com/agent-inject-template-wger-env: |
{{- with secret "kv/data/atlas/health/wger-db" -}}
export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}"
export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}"
export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}"
export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}"
export DJANGO_DB_PASSWORD="{{ .Data.data.DJANGO_DB_PASSWORD }}"
{{- end }}
{{- with secret "kv/data/atlas/health/wger-secrets" -}}
export SECRET_KEY="{{ .Data.data.SECRET_KEY }}"
export SIGNING_KEY="{{ .Data.data.SIGNING_KEY }}"
{{- end -}}
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
- weight: 70
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi4"]
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: "true"
serviceAccountName: health-vault-sync
initContainers:
- name: init-storage
image: alpine:3.20
command: ["/bin/sh", "-c"]
args:
- |
set -e
mkdir -p /wger/static /wger/media
chown -R 1000:1000 /wger
securityContext:
runAsUser: 0
runAsGroup: 0
volumeMounts:
- name: wger-static
mountPath: /wger/static
- name: wger-media
mountPath: /wger/media
containers:
- name: wger
image: wger/server@sha256:710588b78af4e0aa0b4d8a8061e4563e16eae80eeaccfe7f9e0d9cbdd7f0cbc5
imagePullPolicy: IfNotPresent
command: ["/bin/sh", "-c"]
args:
- |
set -eu
. /vault/secrets/wger-env
exec /home/wger/entrypoint.sh
ports:
- name: app
containerPort: 8000
env:
- name: SITE_URL
value: https://health.bstein.dev
- name: CSRF_TRUSTED_ORIGINS
value: https://health.bstein.dev
- name: X_FORWARDED_PROTO_HEADER_SET
value: "true"
- name: NUMBER_OF_PROXIES
value: "1"
- name: TIME_ZONE
value: Etc/UTC
- name: TZ
value: Etc/UTC
- name: DJANGO_DEBUG
value: "False"
- name: DJANGO_PERFORM_MIGRATIONS
value: "True"
- name: DJANGO_DB_ENGINE
value: django.db.backends.postgresql
- name: DJANGO_CACHE_BACKEND
value: django.core.cache.backends.locmem.LocMemCache
- name: DJANGO_CACHE_LOCATION
value: wger-cache
- name: DJANGO_CACHE_TIMEOUT
value: "3600"
- name: ALLOW_REGISTRATION
value: "False"
- name: ALLOW_GUEST_USERS
value: "False"
- name: ALLOW_UPLOAD_VIDEOS
value: "False"
- name: USE_CELERY
value: "False"
- name: SYNC_EXERCISES_CELERY
value: "False"
- name: SYNC_INGREDIENTS_CELERY
value: "False"
- name: SYNC_EXERCISE_IMAGES_CELERY
value: "False"
- name: SYNC_EXERCISE_VIDEOS_CELERY
value: "False"
- name: CACHE_API_EXERCISES_CELERY
value: "False"
- name: DOWNLOAD_INGREDIENTS_FROM
value: "None"
- name: ENABLE_EMAIL
value: "False"
volumeMounts:
- name: wger-static
mountPath: /home/wger/static
- name: wger-media
mountPath: /home/wger/media
readinessProbe:
httpGet:
path: /api/v2/version/
port: app
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 6
livenessProbe:
httpGet:
path: /api/v2/version/
port: app
initialDelaySeconds: 45
periodSeconds: 20
timeoutSeconds: 3
failureThreshold: 6
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: "1"
memory: 2Gi
- name: nginx
image: nginx:1.27.5-alpine@sha256:65645c7bb6a0661892a8b03b89d0743208a18dd2f3f17a54ef4b76fb8e2f2a10
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8080
securityContext:
runAsUser: 101
runAsGroup: 101
allowPrivilegeEscalation: false
volumeMounts:
- name: wger-nginx-config
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
- name: wger-static
mountPath: /wger/static
- name: wger-media
mountPath: /wger/media
readinessProbe:
httpGet:
path: /api/v2/version/
port: http
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 6
livenessProbe:
httpGet:
path: /api/v2/version/
port: http
initialDelaySeconds: 30
periodSeconds: 20
timeoutSeconds: 3
failureThreshold: 6
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
volumes:
- name: wger-static
persistentVolumeClaim:
claimName: wger-static
- name: wger-media
persistentVolumeClaim:
claimName: wger-media
- name: wger-nginx-config
configMap:
name: wger-nginx-config
defaultMode: 0444

View File

@ -1,8 +1,8 @@
# services/health/endurain-ingress.yaml
# services/health/wger-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: endurain
name: wger
namespace: health
annotations:
kubernetes.io/ingress.class: traefik
@ -11,16 +11,16 @@ metadata:
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts: ["endurain.bstein.dev"]
secretName: endurain-tls
- hosts: ["health.bstein.dev"]
secretName: wger-tls
rules:
- host: endurain.bstein.dev
- host: health.bstein.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: endurain
name: wger
port:
number: 80

View File

@ -1,12 +1,12 @@
# services/health/endurain-data-pvc.yaml
# services/health/wger-media-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: endurain-data
name: wger-media
namespace: health
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: asteria
resources:
requests:
storage: 10Gi
storage: 20Gi

View File

@ -1,14 +1,12 @@
# services/health/endurain-service.yaml
# services/health/wger-service.yaml
apiVersion: v1
kind: Service
metadata:
name: endurain
name: wger
namespace: health
labels:
app: endurain
spec:
selector:
app: endurain
app: wger
ports:
- name: http
port: 80

View File

@ -1,12 +1,12 @@
# services/health/sparkyfitness-data-pvc.yaml
# services/health/wger-static-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sparkyfitness-data
name: wger-static
namespace: health
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: asteria
resources:
requests:
storage: 10Gi
storage: 5Gi

View File

@ -0,0 +1,89 @@
# services/health/wger-user-sync-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: wger-user-sync
namespace: health
spec:
schedule: "0 5 * * *"
suspend: true
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 0
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "health"
vault.hashicorp.com/agent-inject-secret-wger-env: "kv/data/atlas/health/wger-db"
vault.hashicorp.com/agent-inject-template-wger-env: |
{{- with secret "kv/data/atlas/health/wger-db" -}}
export DJANGO_DB_HOST="{{ .Data.data.DJANGO_DB_HOST }}"
export DJANGO_DB_PORT="{{ .Data.data.DJANGO_DB_PORT }}"
export DJANGO_DB_DATABASE="{{ .Data.data.DJANGO_DB_DATABASE }}"
export DJANGO_DB_USER="{{ .Data.data.DJANGO_DB_USER }}"
export DJANGO_DB_PASSWORD="{{ .Data.data.DJANGO_DB_PASSWORD }}"
{{- end }}
{{- with secret "kv/data/atlas/health/wger-secrets" -}}
export SECRET_KEY="{{ .Data.data.SECRET_KEY }}"
export SIGNING_KEY="{{ .Data.data.SIGNING_KEY }}"
{{- end -}}
spec:
serviceAccountName: health-vault-sync
restartPolicy: Never
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
- weight: 70
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi4"]
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: "true"
containers:
- name: sync
image: wger/server@sha256:710588b78af4e0aa0b4d8a8061e4563e16eae80eeaccfe7f9e0d9cbdd7f0cbc5
imagePullPolicy: IfNotPresent
command: ["/bin/sh", "-c"]
args:
- |
set -eu
. /vault/secrets/wger-env
exec python /scripts/wger_user_sync.py
env:
- name: SITE_URL
value: https://health.bstein.dev
- name: TIME_ZONE
value: Etc/UTC
- name: TZ
value: Etc/UTC
- name: DJANGO_DEBUG
value: "False"
- name: DJANGO_DB_ENGINE
value: django.db.backends.postgresql
- name: DJANGO_CACHE_BACKEND
value: django.core.cache.backends.locmem.LocMemCache
- name: DJANGO_CACHE_LOCATION
value: wger-cache
volumeMounts:
- name: wger-user-sync-script
mountPath: /scripts
readOnly: true
volumes:
- name: wger-user-sync-script
configMap:
name: wger-user-sync-script
defaultMode: 0555

View File

@ -17,6 +17,27 @@ spec:
metadata:
labels:
app: jenkins
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "jenkins"
vault.hashicorp.com/agent-inject-secret-jenkins-env: "kv/data/atlas/jenkins/jenkins-oidc"
vault.hashicorp.com/agent-inject-template-jenkins-env: |
{{- with secret "kv/data/atlas/jenkins/jenkins-oidc" -}}
export OIDC_CLIENT_ID="{{ .Data.data.clientId }}"
export OIDC_CLIENT_SECRET="{{ .Data.data.clientSecret }}"
export OIDC_AUTH_URL="{{ .Data.data.authorizationUrl }}"
export OIDC_TOKEN_URL="{{ .Data.data.tokenUrl }}"
export OIDC_USERINFO_URL="{{ .Data.data.userInfoUrl }}"
export OIDC_LOGOUT_URL="{{ .Data.data.logoutUrl }}"
{{- end }}
{{- with secret "kv/data/atlas/jenkins/harbor-robot-creds" -}}
export HARBOR_ROBOT_USERNAME="{{ .Data.data.username }}"
export HARBOR_ROBOT_PASSWORD="{{ .Data.data.password }}"
{{- end }}
{{- with secret "kv/data/atlas/jenkins/gitea-pat" -}}
export GITEA_PAT_USERNAME="{{ .Data.data.username }}"
export GITEA_PAT_TOKEN="{{ .Data.data.token }}"
{{- end -}}
spec:
serviceAccountName: jenkins
nodeSelector:
@ -63,6 +84,13 @@ spec:
- name: jenkins
image: jenkins/jenkins:2.528.3-jdk21
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- |
set -eu
. /vault/secrets/jenkins-env
exec /usr/bin/tini -- /usr/local/bin/jenkins.sh
ports:
- name: http
containerPort: 8080
@ -81,56 +109,6 @@ spec:
value: "true"
- name: OIDC_ISSUER
value: "https://sso.bstein.dev/realms/atlas"
- name: OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: jenkins-oidc
key: clientId
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: jenkins-oidc
key: clientSecret
- name: OIDC_AUTH_URL
valueFrom:
secretKeyRef:
name: jenkins-oidc
key: authorizationUrl
- name: OIDC_TOKEN_URL
valueFrom:
secretKeyRef:
name: jenkins-oidc
key: tokenUrl
- name: OIDC_USERINFO_URL
valueFrom:
secretKeyRef:
name: jenkins-oidc
key: userInfoUrl
- name: OIDC_LOGOUT_URL
valueFrom:
secretKeyRef:
name: jenkins-oidc
key: logoutUrl
- name: HARBOR_ROBOT_USERNAME
valueFrom:
secretKeyRef:
name: harbor-robot-creds
key: username
- name: HARBOR_ROBOT_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-robot-creds
key: password
- name: GITEA_PAT_USERNAME
valueFrom:
secretKeyRef:
name: gitea-pat
key: username
- name: GITEA_PAT_TOKEN
valueFrom:
secretKeyRef:
name: gitea-pat
key: token
resources:
requests:
cpu: 750m

View File

@ -5,13 +5,10 @@ namespace: jenkins
resources:
- namespace.yaml
- serviceaccount.yaml
- vault-serviceaccount.yaml
- secretproviderclass.yaml
- pvc.yaml
- configmap-jcasc.yaml
- configmap-plugins.yaml
- deployment.yaml
- vault-sync-deployment.yaml
- service.yaml
- ingress.yaml

View File

@ -1,72 +0,0 @@
# services/jenkins/secretproviderclass.yaml
apiVersion: secrets-store.csi.x-k8s.io/v1
kind: SecretProviderClass
metadata:
name: jenkins-vault
namespace: jenkins
spec:
provider: vault
parameters:
vaultAddress: "http://vault.vault.svc.cluster.local:8200"
roleName: "jenkins"
objects: |
- objectName: "jenkins-oidc__clientId"
secretPath: "kv/data/atlas/jenkins/jenkins-oidc"
secretKey: "clientId"
- objectName: "jenkins-oidc__clientSecret"
secretPath: "kv/data/atlas/jenkins/jenkins-oidc"
secretKey: "clientSecret"
- objectName: "jenkins-oidc__authorizationUrl"
secretPath: "kv/data/atlas/jenkins/jenkins-oidc"
secretKey: "authorizationUrl"
- objectName: "jenkins-oidc__tokenUrl"
secretPath: "kv/data/atlas/jenkins/jenkins-oidc"
secretKey: "tokenUrl"
- objectName: "jenkins-oidc__userInfoUrl"
secretPath: "kv/data/atlas/jenkins/jenkins-oidc"
secretKey: "userInfoUrl"
- objectName: "jenkins-oidc__logoutUrl"
secretPath: "kv/data/atlas/jenkins/jenkins-oidc"
secretKey: "logoutUrl"
- objectName: "harbor-robot-creds__username"
secretPath: "kv/data/atlas/jenkins/harbor-robot-creds"
secretKey: "username"
- objectName: "harbor-robot-creds__password"
secretPath: "kv/data/atlas/jenkins/harbor-robot-creds"
secretKey: "password"
- objectName: "gitea-pat__username"
secretPath: "kv/data/atlas/jenkins/gitea-pat"
secretKey: "username"
- objectName: "gitea-pat__token"
secretPath: "kv/data/atlas/jenkins/gitea-pat"
secretKey: "token"
secretObjects:
- secretName: jenkins-oidc
type: Opaque
data:
- objectName: jenkins-oidc__clientId
key: clientId
- objectName: jenkins-oidc__clientSecret
key: clientSecret
- objectName: jenkins-oidc__authorizationUrl
key: authorizationUrl
- objectName: jenkins-oidc__tokenUrl
key: tokenUrl
- objectName: jenkins-oidc__userInfoUrl
key: userInfoUrl
- objectName: jenkins-oidc__logoutUrl
key: logoutUrl
- secretName: harbor-robot-creds
type: Opaque
data:
- objectName: harbor-robot-creds__username
key: username
- objectName: harbor-robot-creds__password
key: password
- secretName: gitea-pat
type: Opaque
data:
- objectName: gitea-pat__username
key: username
- objectName: gitea-pat__token
key: token

View File

@ -1,6 +0,0 @@
# services/jenkins/vault-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: jenkins-vault-sync
namespace: jenkins

View File

@ -1,34 +0,0 @@
# services/jenkins/vault-sync-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: jenkins-vault-sync
namespace: jenkins
spec:
replicas: 1
selector:
matchLabels:
app: jenkins-vault-sync
template:
metadata:
labels:
app: jenkins-vault-sync
spec:
serviceAccountName: jenkins-vault-sync
containers:
- name: sync
image: alpine:3.20
command: ["/bin/sh", "-c"]
args:
- "sleep infinity"
volumeMounts:
- name: vault-secrets
mountPath: /vault/secrets
readOnly: true
volumes:
- name: vault-secrets
csi:
driver: secrets-store.csi.k8s.io
readOnly: true
volumeAttributes:
secretProviderClass: jenkins-vault

View File

@ -1,53 +0,0 @@
# services/keycloak/endurain-oidc-secret-ensure-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: endurain-oidc-secret-ensure-4
namespace: sso
spec:
backoffLimit: 0
ttlSecondsAfterFinished: 3600
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "sso-secrets"
vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin"
vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: |
{{ with secret "kv/data/atlas/shared/keycloak-admin" }}
export KEYCLOAK_ADMIN="{{ .Data.data.username }}"
export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}"
export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}"
{{ end }}
spec:
serviceAccountName: mas-secrets-ensure
restartPolicy: Never
volumes:
- name: endurain-oidc-secret-ensure-script
configMap:
name: endurain-oidc-secret-ensure-script
defaultMode: 0555
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values: ["arm64"]
- key: node-role.kubernetes.io/worker
operator: Exists
containers:
- name: apply
image: alpine:3.20
command: ["/bin/sh", "-c"]
args:
- |
set -euo pipefail
apk add --no-cache bash curl jq >/dev/null
exec /scripts/endurain_oidc_secret_ensure.sh
volumeMounts:
- name: endurain-oidc-secret-ensure-script
mountPath: /scripts
readOnly: true

View File

@ -22,8 +22,6 @@ resources:
- synapse-oidc-secret-ensure-job.yaml
- logs-oidc-secret-ensure-job.yaml
- harbor-oidc-secret-ensure-job.yaml
- endurain-oidc-secret-ensure-job.yaml
- sparkyfitness-oidc-secret-ensure-job.yaml
- vault-oidc-secret-ensure-job.yaml
- service.yaml
- ingress.yaml
@ -37,12 +35,6 @@ configMapGenerator:
- name: harbor-oidc-secret-ensure-script
files:
- harbor_oidc_secret_ensure.sh=scripts/harbor_oidc_secret_ensure.sh
- name: endurain-oidc-secret-ensure-script
files:
- endurain_oidc_secret_ensure.sh=scripts/endurain_oidc_secret_ensure.sh
- name: sparkyfitness-oidc-secret-ensure-script
files:
- sparkyfitness_oidc_secret_ensure.sh=scripts/sparkyfitness_oidc_secret_ensure.sh
- name: vault-oidc-secret-ensure-script
files:
- vault_oidc_secret_ensure.sh=scripts/vault_oidc_secret_ensure.sh

View File

@ -1,87 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
. /vault/secrets/keycloak-admin-env.sh
KC_URL="http://keycloak.sso.svc.cluster.local"
REALM="atlas"
CLIENT_ID="endurain"
ROOT_URL="https://endurain.bstein.dev"
REDIRECT_URI="https://endurain.bstein.dev/api/v1/public/idp/callback/keycloak"
ISSUER_URL="https://sso.bstein.dev/realms/atlas"
ACCESS_TOKEN=""
for attempt in 1 2 3 4 5; do
TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \
-H 'Content-Type: application/x-www-form-urlencoded' \
-d "grant_type=password" \
-d "client_id=admin-cli" \
-d "username=${KEYCLOAK_ADMIN}" \
-d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)"
ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)"
if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then
break
fi
echo "Keycloak token request failed (attempt ${attempt})" >&2
sleep $((attempt * 2))
done
if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then
echo "Failed to fetch Keycloak admin token" >&2
exit 1
fi
CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/${REALM}/clients?clientId=${CLIENT_ID}" || true)"
CLIENT_UUID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)"
if [ -z "$CLIENT_UUID" ] || [ "$CLIENT_UUID" = "null" ]; then
create_payload="$(jq -nc \
--arg client_id "${CLIENT_ID}" \
--arg root_url "${ROOT_URL}" \
--arg redirect_uri "${REDIRECT_URI}" \
--arg web_origin "${ROOT_URL}" \
'{clientId:$client_id,name:"Endurain",enabled:true,protocol:"openid-connect",publicClient:false,standardFlowEnabled:true,implicitFlowEnabled:false,directAccessGrantsEnabled:false,serviceAccountsEnabled:false,redirectUris:[$redirect_uri],webOrigins:[$web_origin],rootUrl:$root_url,baseUrl:"/"}')"
status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
-H 'Content-Type: application/json' \
-d "${create_payload}" \
"$KC_URL/admin/realms/${REALM}/clients")"
if [ "$status" != "201" ] && [ "$status" != "204" ]; then
echo "Keycloak client create failed (status ${status})" >&2
exit 1
fi
CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/${REALM}/clients?clientId=${CLIENT_ID}" || true)"
CLIENT_UUID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)"
fi
if [ -z "$CLIENT_UUID" ] || [ "$CLIENT_UUID" = "null" ]; then
echo "Keycloak client ${CLIENT_ID} not found" >&2
exit 1
fi
CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/${REALM}/clients/${CLIENT_UUID}/client-secret" | jq -r '.value' 2>/dev/null || true)"
if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then
echo "Keycloak client secret not found" >&2
exit 1
fi
vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}"
vault_role="${VAULT_ROLE:-sso-secrets}"
jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')"
vault_token="$(curl -sS --request POST --data "${login_payload}" \
"${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')"
if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then
echo "vault login failed" >&2
exit 1
fi
payload="$(jq -nc \
--arg client_id "${CLIENT_ID}" \
--arg client_secret "${CLIENT_SECRET}" \
--arg issuer_url "${ISSUER_URL}" \
'{data:{client_id:$client_id,client_secret:$client_secret,issuer_url:$issuer_url}}')"
curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \
-d "${payload}" "${vault_addr}/v1/kv/data/atlas/health/endurain-oidc" >/dev/null

View File

@ -1,87 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
. /vault/secrets/keycloak-admin-env.sh
KC_URL="http://keycloak.sso.svc.cluster.local"
REALM="atlas"
CLIENT_ID="sparkyfitness"
ROOT_URL="https://sparkyfitness.bstein.dev"
REDIRECT_URI="https://sparkyfitness.bstein.dev/oidc-callback"
ISSUER_URL="https://sso.bstein.dev/realms/atlas"
ACCESS_TOKEN=""
for attempt in 1 2 3 4 5; do
TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \
-H 'Content-Type: application/x-www-form-urlencoded' \
-d "grant_type=password" \
-d "client_id=admin-cli" \
-d "username=${KEYCLOAK_ADMIN}" \
-d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)"
ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)"
if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then
break
fi
echo "Keycloak token request failed (attempt ${attempt})" >&2
sleep $((attempt * 2))
done
if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then
echo "Failed to fetch Keycloak admin token" >&2
exit 1
fi
CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/${REALM}/clients?clientId=${CLIENT_ID}" || true)"
CLIENT_UUID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)"
if [ -z "$CLIENT_UUID" ] || [ "$CLIENT_UUID" = "null" ]; then
create_payload="$(jq -nc \
--arg client_id "${CLIENT_ID}" \
--arg root_url "${ROOT_URL}" \
--arg redirect_uri "${REDIRECT_URI}" \
--arg web_origin "${ROOT_URL}" \
'{clientId:$client_id,name:"SparkyFitness",enabled:true,protocol:"openid-connect",publicClient:false,standardFlowEnabled:true,implicitFlowEnabled:false,directAccessGrantsEnabled:false,serviceAccountsEnabled:false,redirectUris:[$redirect_uri],webOrigins:[$web_origin],rootUrl:$root_url,baseUrl:"/"}')"
status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
-H 'Content-Type: application/json' \
-d "${create_payload}" \
"$KC_URL/admin/realms/${REALM}/clients")"
if [ "$status" != "201" ] && [ "$status" != "204" ]; then
echo "Keycloak client create failed (status ${status})" >&2
exit 1
fi
CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/${REALM}/clients?clientId=${CLIENT_ID}" || true)"
CLIENT_UUID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)"
fi
if [ -z "$CLIENT_UUID" ] || [ "$CLIENT_UUID" = "null" ]; then
echo "Keycloak client ${CLIENT_ID} not found" >&2
exit 1
fi
CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/${REALM}/clients/${CLIENT_UUID}/client-secret" | jq -r '.value' 2>/dev/null || true)"
if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then
echo "Keycloak client secret not found" >&2
exit 1
fi
vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}"
vault_role="${VAULT_ROLE:-sso-secrets}"
jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')"
vault_token="$(curl -sS --request POST --data "${login_payload}" \
"${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')"
if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then
echo "vault login failed" >&2
exit 1
fi
payload="$(jq -nc \
--arg client_id "${CLIENT_ID}" \
--arg client_secret "${CLIENT_SECRET}" \
--arg issuer_url "${ISSUER_URL}" \
'{data:{client_id:$client_id,client_secret:$client_secret,issuer_url:$issuer_url}}')"
curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \
-d "${payload}" "${vault_addr}/v1/kv/data/atlas/health/sparkyfitness-oidc" >/dev/null

View File

@ -10,41 +10,10 @@ spec:
vaultAddress: "http://vault.vault.svc.cluster.local:8200"
roleName: "sso"
objects: |
- objectName: "openldap-admin__LDAP_ADMIN_PASSWORD"
secretPath: "kv/data/atlas/sso/openldap-admin"
secretKey: "LDAP_ADMIN_PASSWORD"
- objectName: "openldap-admin__LDAP_CONFIG_PASSWORD"
secretPath: "kv/data/atlas/sso/openldap-admin"
secretKey: "LDAP_CONFIG_PASSWORD"
- objectName: "oauth2-proxy-oidc__client_id"
secretPath: "kv/data/atlas/sso/oauth2-proxy-oidc"
secretKey: "client_id"
- objectName: "oauth2-proxy-oidc__client_secret"
secretPath: "kv/data/atlas/sso/oauth2-proxy-oidc"
secretKey: "client_secret"
- objectName: "oauth2-proxy-oidc__cookie_secret"
secretPath: "kv/data/atlas/sso/oauth2-proxy-oidc"
secretKey: "cookie_secret"
- objectName: "harbor-pull__dockerconfigjson"
secretPath: "kv/data/atlas/harbor-pull/sso"
secretKey: "dockerconfigjson"
secretObjects:
- secretName: openldap-admin
type: Opaque
data:
- objectName: openldap-admin__LDAP_ADMIN_PASSWORD
key: LDAP_ADMIN_PASSWORD
- objectName: openldap-admin__LDAP_CONFIG_PASSWORD
key: LDAP_CONFIG_PASSWORD
- secretName: oauth2-proxy-oidc
type: Opaque
data:
- objectName: oauth2-proxy-oidc__client_id
key: client_id
- objectName: oauth2-proxy-oidc__client_secret
key: client_secret
- objectName: oauth2-proxy-oidc__cookie_secret
key: cookie_secret
- secretName: harbor-regcred
type: kubernetes.io/dockerconfigjson
data:

View File

@ -1,53 +0,0 @@
# services/keycloak/sparkyfitness-oidc-secret-ensure-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: sparkyfitness-oidc-secret-ensure-3
namespace: sso
spec:
backoffLimit: 0
ttlSecondsAfterFinished: 3600
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "sso-secrets"
vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin"
vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: |
{{ with secret "kv/data/atlas/shared/keycloak-admin" }}
export KEYCLOAK_ADMIN="{{ .Data.data.username }}"
export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}"
export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}"
{{ end }}
spec:
serviceAccountName: mas-secrets-ensure
restartPolicy: Never
volumes:
- name: sparkyfitness-oidc-secret-ensure-script
configMap:
name: sparkyfitness-oidc-secret-ensure-script
defaultMode: 0555
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values: ["arm64"]
- key: node-role.kubernetes.io/worker
operator: Exists
containers:
- name: apply
image: alpine:3.20
command: ["/bin/sh", "-c"]
args:
- |
set -euo pipefail
apk add --no-cache bash curl jq >/dev/null
exec /scripts/sparkyfitness_oidc_secret_ensure.sh
volumeMounts:
- name: sparkyfitness-oidc-secret-ensure-script
mountPath: /scripts
readOnly: true

View File

@ -32,7 +32,20 @@ spec:
metadata:
labels:
app: oauth2-proxy-logs
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "logging"
vault.hashicorp.com/agent-inject-secret-oidc-env: "kv/data/atlas/logging/oauth2-proxy-logs-oidc"
vault.hashicorp.com/agent-inject-template-oidc-env: |
{{- with secret "kv/data/atlas/logging/oauth2-proxy-logs-oidc" -}}
export OAUTH2_PROXY_CLIENT_ID="{{ .Data.data.client_id }}"
export OAUTH2_PROXY_CLIENT_SECRET="{{ .Data.data.client_secret }}"
export OAUTH2_PROXY_COOKIE_SECRET="{{ .Data.data.cookie_secret }}"
{{- end -}}
spec:
serviceAccountName: logging-vault-sync
imagePullSecrets:
- name: harbor-regcred
nodeSelector:
node-role.kubernetes.io/worker: "true"
affinity:
@ -47,7 +60,7 @@ spec:
- rpi4
containers:
- name: oauth2-proxy
image: quay.io/oauth2-proxy/oauth2-proxy:v7.6.0
image: registry.bstein.dev/tools/oauth2-proxy-vault:v7.6.0
imagePullPolicy: IfNotPresent
args:
- --provider=oidc
@ -70,21 +83,8 @@ spec:
- --skip-jwt-bearer-tokens=true
- --cookie-domain=logs.bstein.dev
env:
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
name: oauth2-proxy-logs-oidc
key: client_id
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: oauth2-proxy-logs-oidc
key: client_secret
- name: OAUTH2_PROXY_COOKIE_SECRET
valueFrom:
secretKeyRef:
name: oauth2-proxy-logs-oidc
key: cookie_secret
- name: VAULT_ENV_FILE
value: /vault/secrets/oidc-env
ports:
- containerPort: 4180
name: http

View File

@ -10,28 +10,10 @@ spec:
vaultAddress: "http://vault.vault.svc.cluster.local:8200"
roleName: "logging"
objects: |
- objectName: "oauth2-proxy-logs-oidc__client_id"
secretPath: "kv/data/atlas/logging/oauth2-proxy-logs-oidc"
secretKey: "client_id"
- objectName: "oauth2-proxy-logs-oidc__client_secret"
secretPath: "kv/data/atlas/logging/oauth2-proxy-logs-oidc"
secretKey: "client_secret"
- objectName: "oauth2-proxy-logs-oidc__cookie_secret"
secretPath: "kv/data/atlas/logging/oauth2-proxy-logs-oidc"
secretKey: "cookie_secret"
- objectName: "harbor-pull__dockerconfigjson"
secretPath: "kv/data/atlas/harbor-pull/logging"
secretKey: "dockerconfigjson"
secretObjects:
- secretName: oauth2-proxy-logs-oidc
type: Opaque
data:
- objectName: oauth2-proxy-logs-oidc__client_id
key: client_id
- objectName: oauth2-proxy-logs-oidc__client_secret
key: client_secret
- objectName: oauth2-proxy-logs-oidc__cookie_secret
key: cookie_secret
- secretName: harbor-regcred
type: kubernetes.io/dockerconfigjson
data:

View File

@ -15,7 +15,20 @@ spec:
metadata:
labels:
app: oauth2-proxy
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "sso"
vault.hashicorp.com/agent-inject-secret-oidc-env: "kv/data/atlas/sso/oauth2-proxy-oidc"
vault.hashicorp.com/agent-inject-template-oidc-env: |
{{- with secret "kv/data/atlas/sso/oauth2-proxy-oidc" -}}
export OAUTH2_PROXY_CLIENT_ID="{{ .Data.data.client_id }}"
export OAUTH2_PROXY_CLIENT_SECRET="{{ .Data.data.client_secret }}"
export OAUTH2_PROXY_COOKIE_SECRET="{{ .Data.data.cookie_secret }}"
{{- end -}}
spec:
serviceAccountName: sso-vault
imagePullSecrets:
- name: harbor-regcred
nodeSelector:
node-role.kubernetes.io/worker: "true"
affinity:
@ -29,7 +42,7 @@ spec:
values: ["rpi5","rpi4"]
containers:
- name: oauth2-proxy
image: quay.io/oauth2-proxy/oauth2-proxy:v7.6.0
image: registry.bstein.dev/tools/oauth2-proxy-vault:v7.6.0
imagePullPolicy: IfNotPresent
args:
- --provider=oidc
@ -50,21 +63,8 @@ spec:
- --skip-jwt-bearer-tokens=true
- --oidc-groups-claim=groups
env:
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
name: oauth2-proxy-oidc
key: client_id
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: oauth2-proxy-oidc
key: client_secret
- name: OAUTH2_PROXY_COOKIE_SECRET
valueFrom:
secretKeyRef:
name: oauth2-proxy-oidc
key: cookie_secret
- name: VAULT_ENV_FILE
value: /vault/secrets/oidc-env
ports:
- containerPort: 4180
name: http

View File

@ -16,14 +16,30 @@ spec:
metadata:
labels:
app: openldap
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "sso"
vault.hashicorp.com/agent-inject-secret-openldap-env: "kv/data/atlas/sso/openldap-admin"
vault.hashicorp.com/agent-inject-template-openldap-env: |
{{- with secret "kv/data/atlas/sso/openldap-admin" -}}
export LDAP_ADMIN_PASSWORD="{{ .Data.data.LDAP_ADMIN_PASSWORD }}"
export LDAP_CONFIG_PASSWORD="{{ .Data.data.LDAP_CONFIG_PASSWORD }}"
{{- end -}}
spec:
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: "true"
serviceAccountName: sso-vault
containers:
- name: openldap
image: docker.io/osixia/openldap:1.5.0
imagePullPolicy: IfNotPresent
command: ["/bin/sh", "-c"]
args:
- |
set -eu
. /vault/secrets/openldap-env
exec /usr/bin/python3 -u /container/tool/run
ports:
- name: ldap
containerPort: 389
@ -34,16 +50,6 @@ spec:
value: Atlas
- name: LDAP_DOMAIN
value: bstein.dev
- name: LDAP_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: openldap-admin
key: LDAP_ADMIN_PASSWORD
- name: LDAP_CONFIG_PASSWORD
valueFrom:
secretKeyRef:
name: openldap-admin
key: LDAP_CONFIG_PASSWORD
readinessProbe:
tcpSocket:
port: ldap

View File

@ -14,11 +14,23 @@ spec:
maxUnavailable: 1
selector: { matchLabels: { app: pegasus } }
template:
metadata: { labels: { app: pegasus } }
metadata:
labels: { app: pegasus }
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "pegasus"
vault.hashicorp.com/agent-inject-secret-pegasus-env: "kv/data/atlas/pegasus/pegasus-secrets"
vault.hashicorp.com/agent-inject-template-pegasus-env: |
{{- with secret "kv/data/atlas/pegasus/pegasus-secrets" -}}
export PEGASUS_SESSION_KEY="{{ .Data.data.PEGASUS_SESSION_KEY }}"
export JELLYFIN_URL="{{ .Data.data.JELLYFIN_URL }}"
export JELLYFIN_API_KEY="{{ .Data.data.JELLYFIN_API_KEY }}"
{{- end -}}
spec:
nodeSelector:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: "true"
serviceAccountName: pegasus-vault-sync
imagePullSecrets:
- name: harbor-regcred
securityContext:
@ -60,9 +72,8 @@ spec:
containers:
- name: pegasus
image: registry.bstein.dev/streaming/pegasus:1.2.32 # {"$imagepolicy": "jellyfin:pegasus"}
image: registry.bstein.dev/streaming/pegasus-vault:1.2.32 # {"$imagepolicy": "jellyfin:pegasus"}
imagePullPolicy: Always
command: ["/pegasus"]
env:
- name: PEGASUS_MEDIA_ROOT
valueFrom: { configMapKeyRef: { name: pegasus-config, key: PEGASUS_MEDIA_ROOT } }
@ -70,12 +81,8 @@ spec:
valueFrom: { configMapKeyRef: { name: pegasus-config, key: PEGASUS_BIND } }
- name: PEGASUS_USER_MAP_FILE
value: "/config/user-map.yaml"
- name: PEGASUS_SESSION_KEY
valueFrom: { secretKeyRef: { name: pegasus-secrets, key: PEGASUS_SESSION_KEY } }
- name: JELLYFIN_URL
valueFrom: { secretKeyRef: { name: pegasus-secrets, key: JELLYFIN_URL } }
- name: JELLYFIN_API_KEY
valueFrom: { secretKeyRef: { name: pegasus-secrets, key: JELLYFIN_API_KEY } }
- name: VAULT_ENV_FILE
value: /vault/secrets/pegasus-env
- name: PEGASUS_DEBUG
value: "1"
- name: PEGASUS_DRY_RUN

View File

@ -5,7 +5,7 @@ metadata:
name: pegasus
namespace: jellyfin
spec:
image: registry.bstein.dev/streaming/pegasus
image: registry.bstein.dev/streaming/pegasus-vault
interval: 1m0s
---

View File

@ -10,28 +10,10 @@ spec:
vaultAddress: "http://vault.vault.svc.cluster.local:8200"
roleName: "pegasus"
objects: |
- objectName: "pegasus-secrets__PEGASUS_SESSION_KEY"
secretPath: "kv/data/atlas/pegasus/pegasus-secrets"
secretKey: "PEGASUS_SESSION_KEY"
- objectName: "pegasus-secrets__JELLYFIN_URL"
secretPath: "kv/data/atlas/pegasus/pegasus-secrets"
secretKey: "JELLYFIN_URL"
- objectName: "pegasus-secrets__JELLYFIN_API_KEY"
secretPath: "kv/data/atlas/pegasus/pegasus-secrets"
secretKey: "JELLYFIN_API_KEY"
- objectName: "harbor-pull__dockerconfigjson"
secretPath: "kv/data/atlas/harbor-pull/jellyfin"
secretKey: "dockerconfigjson"
secretObjects:
- secretName: pegasus-secrets
type: Opaque
data:
- objectName: pegasus-secrets__PEGASUS_SESSION_KEY
key: PEGASUS_SESSION_KEY
- objectName: pegasus-secrets__JELLYFIN_URL
key: JELLYFIN_URL
- objectName: pegasus-secrets__JELLYFIN_API_KEY
key: JELLYFIN_API_KEY
- secretName: harbor-regcred
type: kubernetes.io/dockerconfigjson
data:

View File

@ -95,7 +95,7 @@ write_policy_and_role "nextcloud" "nextcloud" "nextcloud-vault" \
"nextcloud/* shared/keycloak-admin shared/postmark-relay" ""
write_policy_and_role "comms" "comms" "comms-vault,atlasbot" \
"comms/* shared/chat-ai-keys-runtime harbor-pull/comms" ""
write_policy_and_role "jenkins" "jenkins" "jenkins-vault-sync" \
write_policy_and_role "jenkins" "jenkins" "jenkins" \
"jenkins/*" ""
write_policy_and_role "monitoring" "monitoring" "monitoring-vault-sync" \
"monitoring/* shared/postmark-relay harbor-pull/monitoring" ""
@ -110,7 +110,7 @@ write_policy_and_role "health" "health" "health-vault-sync" \
write_policy_and_role "sso-secrets" "sso" "mas-secrets-ensure" \
"shared/keycloak-admin" \
"harbor/harbor-oidc vault/vault-oidc-config comms/synapse-oidc logging/oauth2-proxy-logs-oidc health/endurain-oidc health/sparkyfitness-oidc"
"harbor/harbor-oidc vault/vault-oidc-config comms/synapse-oidc logging/oauth2-proxy-logs-oidc"
write_policy_and_role "comms-secrets" "comms" \
"comms-secrets-ensure,mas-db-ensure,mas-admin-client-secret-writer,othrys-synapse-signingkey-job" \
"" \