vault: prep helm releases and image pins

This commit is contained in:
Brad Stein 2026-01-13 19:29:14 -03:00
parent 8ee7d046d2
commit 4602656578
39 changed files with 1011 additions and 4275 deletions

View File

@ -1,13 +0,0 @@
# clusters/atlas/applications/kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../services/crypto
- ../../services/gitea
- ../../services/jellyfin
- ../../services/comms
- ../../services/monitoring
- ../../services/logging
- ../../services/pegasus
- ../../services/vault
- ../../services/bstein-dev-home

View File

@ -1,3 +1,4 @@
# clusters/atlas/flux-system/gotk-components.yaml
---
# This manifest was generated by flux. DO NOT EDIT.
# Flux Version: v2.7.5

View File

@ -1,8 +0,0 @@
# clusters/atlas/platform/kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../../infrastructure/modules/base
- ../../../infrastructure/modules/profiles/atlas-ha
- ../../../infrastructure/sources/cert-manager/letsencrypt.yaml
- ../../../infrastructure/metallb

View File

@ -0,0 +1,47 @@
# infrastructure/metallb/helmrelease.yaml
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: metallb
namespace: metallb-system
spec:
interval: 30m
chart:
spec:
chart: metallb
version: 0.15.3
sourceRef:
kind: HelmRepository
name: metallb
namespace: flux-system
install:
crds: CreateReplace
remediation: { retries: 3 }
timeout: 10m
upgrade:
crds: CreateReplace
remediation:
retries: 3
remediateLastFailure: true
cleanupOnFail: true
timeout: 10m
values:
loadBalancerClass: metallb
prometheus:
metricsPort: 7472
controller:
logLevel: info
webhookMode: enabled
tlsMinVersion: VersionTLS12
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: hardware
operator: In
values:
- rpi4
- rpi5
speaker:
logLevel: info

View File

@ -3,8 +3,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- metallb-rendered.yaml
- helmrelease.yaml
- ippool.yaml
patchesStrategicMerge:
- patches/node-placement.yaml
- patches/speaker-loglevel.yaml

File diff suppressed because it is too large Load Diff

View File

@ -1,27 +0,0 @@
# infrastructure/metallb/patches/node-placement.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: metallb-controller
namespace: metallb-system
spec:
template:
spec:
containers:
- name: controller
args:
- --port=7472
- --log-level=info
- --webhook-mode=enabled
- --tls-min-version=VersionTLS12
- --lb-class=metallb
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: hardware
operator: In
values:
- rpi4
- rpi5

View File

@ -1,15 +0,0 @@
# infrastructure/metallb/patches/speaker-loglevel.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: metallb-speaker
namespace: metallb-system
spec:
template:
spec:
containers:
- name: speaker
args:
- --port=7472
- --log-level=info
- --lb-class=metallb

View File

@ -1,3 +1,4 @@
# infrastructure/sources/cert-manager/letsencrypt-prod.yaml
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:

View File

@ -1,3 +1,4 @@
# infrastructure/sources/cert-manager/letsencrypt.yaml
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:

View File

@ -0,0 +1,9 @@
# infrastructure/sources/helm/ananace.yaml
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: ananace
namespace: flux-system
spec:
interval: 1h
url: https://ananace.gitlab.io/charts

View File

@ -2,12 +2,14 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ananace.yaml
- fluent-bit.yaml
- grafana.yaml
- hashicorp.yaml
- jetstack.yaml
- jenkins.yaml
- mailu.yaml
- metallb.yaml
- opentelemetry.yaml
- opensearch.yaml
- harbor.yaml

View File

@ -0,0 +1,9 @@
# infrastructure/sources/helm/metallb.yaml
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: metallb
namespace: flux-system
spec:
interval: 1h
url: https://metallb.github.io/metallb

View File

@ -42,7 +42,7 @@ spec:
claimName: ollama-models
initContainers:
- name: warm-model
image: ollama/ollama:latest
image: ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d
env:
- name: OLLAMA_HOST
value: 0.0.0.0
@ -75,7 +75,7 @@ spec:
nvidia.com/gpu.shared: 1
containers:
- name: ollama
image: ollama/ollama:latest
image: ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@ -1,3 +1,4 @@
# services/bstein-dev-home/backend-service.yaml
apiVersion: v1
kind: Service
metadata:

View File

@ -1,3 +1,4 @@
# services/bstein-dev-home/frontend-service.yaml
apiVersion: v1
kind: Service
metadata:

View File

@ -1,3 +1,4 @@
# services/bstein-dev-home/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:

View File

@ -13,7 +13,7 @@ spec:
restartPolicy: Never
containers:
- name: ensure
image: bitnami/kubectl:latest
image: registry.bstein.dev/bstein/kubectl:1.35.0
command: ["/bin/sh", "-c"]
args:
- |

View File

@ -19,7 +19,7 @@ spec:
hardware: rpi5
containers:
- name: element-call
image: ghcr.io/element-hq/element-call:latest
image: ghcr.io/element-hq/element-call@sha256:e6897c7818331714eae19d83ef8ea94a8b41115f0d8d3f62c2fed2d02c65c9bc
ports:
- containerPort: 8080
name: http

View File

@ -1,202 +0,0 @@
---
# Source: element-web/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: othrys-element-element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
---
# Source: element-web/templates/configuration-nginx.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: othrys-element-element-web-nginx
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
data:
default.conf: |
server {
listen 8080;
listen [::]:8080;
server_name localhost;
root /usr/share/nginx/html;
index index.html;
add_header X-Frame-Options SAMEORIGIN;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Content-Security-Policy "frame-ancestors 'self'";
# Set no-cache for the index.html only so that browsers always check for a new copy of Element Web.
location = /index.html {
add_header Cache-Control "no-cache";
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
}
---
# Source: element-web/templates/configuration.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: othrys-element-element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
data:
config.json: |
{"brand":"Othrys","default_server_config":{"m.homeserver":{"base_url":"https://matrix.live.bstein.dev","server_name":"live.bstein.dev"},"m.identity_server":{"base_url":"https://vector.im"}},"default_theme":"dark","disable_custom_urls":true,"disable_login_language_selector":true,"disable_guests":false,"registration_url":"https://bstein.dev/request-access","show_labs_settings":true,"features":{"feature_group_calls":true,"feature_video_rooms":true,"feature_element_call_video_rooms":true},"room_directory":{"servers":["live.bstein.dev"]},"jitsi":{},"element_call":{"url":"https://call.live.bstein.dev","participant_limit":16,"brand":"Othrys Call"}}
---
# Source: element-web/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: othrys-element-element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
---
# Source: element-web/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: othrys-element-element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
template:
metadata:
annotations:
checksum/config: manual-rtc-enable-1
checksum/config-nginx: 085061d0925f4840c3770233509dc0b00fe8fa1a5fef8bf282a514fd101c76fa
labels:
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
spec:
serviceAccountName: othrys-element-element-web
securityContext:
{}
containers:
- name: element-web
securityContext:
{}
image: "ghcr.io/element-hq/element-web:v1.12.6"
imagePullPolicy: IfNotPresent
env:
- name: ELEMENT_WEB_PORT
value: '8080'
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /app/config.json
name: config
subPath: config.json
- mountPath: /etc/nginx/conf.d/config.json
name: config-nginx
subPath: config.json
volumes:
- name: config
configMap:
name: othrys-element-element-web
- name: config-nginx
configMap:
name: othrys-element-element-web-nginx
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: hardware
operator: In
values:
- rpi5
- rpi4
weight: 50
---
# Source: element-web/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: othrys-element-element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
ingressClassName: traefik
tls:
- hosts:
- "live.bstein.dev"
secretName: live-othrys-tls
rules:
- host: "live.bstein.dev"
http:
paths:
- path: /
backend:
service:
name: othrys-element-element-web
port:
number: 80
pathType: Prefix

View File

@ -0,0 +1,255 @@
# services/comms/helmrelease.yaml
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: othrys-synapse
namespace: comms
spec:
interval: 30m
chart:
spec:
chart: matrix-synapse
version: 3.12.17
sourceRef:
kind: HelmRepository
name: ananace
namespace: flux-system
install:
remediation: { retries: 3 }
timeout: 15m
upgrade:
remediation:
retries: 3
remediateLastFailure: true
cleanupOnFail: true
timeout: 15m
values:
serverName: live.bstein.dev
publicServerName: matrix.live.bstein.dev
config:
publicBaseurl: https://matrix.live.bstein.dev
externalPostgresql:
host: postgres-service.postgres.svc.cluster.local
port: 5432
username: synapse
existingSecret: synapse-db
existingSecretPasswordKey: POSTGRES_PASSWORD
database: synapse
redis:
enabled: true
auth:
enabled: true
existingSecret: synapse-redis
existingSecretPasswordKey: redis-password
postgresql:
enabled: false
persistence:
enabled: true
storageClass: asteria
accessMode: ReadWriteOnce
size: 50Gi
synapse:
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
podSecurityContext:
fsGroup: 666
runAsUser: 666
runAsGroup: 666
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: "2"
memory: 3Gi
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5", "rpi4"]
ingress:
enabled: true
className: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: websecure
csHosts:
- matrix.live.bstein.dev
hosts:
- matrix.live.bstein.dev
wkHosts:
- live.bstein.dev
- bstein.dev
tls:
- secretName: matrix-live-tls
hosts:
- matrix.live.bstein.dev
- live.bstein.dev
extraConfig:
allow_guest_access: true
allow_public_rooms_without_auth: true
auto_join_rooms:
- "#othrys:live.bstein.dev"
autocreate_auto_join_rooms: true
default_room_version: "11"
experimental_features:
msc3266_enabled: true
msc4143_enabled: true
msc4222_enabled: true
max_event_delay_duration: 24h
password_config:
enabled: true
oidc_enabled: true
oidc_providers:
- idp_id: keycloak
idp_name: Keycloak
issuer: https://sso.bstein.dev/realms/atlas
client_id: synapse
client_secret: "@@OIDC_CLIENT_SECRET@@"
client_auth_method: client_secret_post
scopes: ["openid", "profile", "email"]
authorization_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/auth
token_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/token
userinfo_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/userinfo
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
allow_existing_users: true
rc_message:
per_second: 0.5
burst_count: 30
rc_delayed_event_mgmt:
per_second: 1
burst_count: 20
rc_login:
address:
burst_count: 20
per_second: 5
account:
burst_count: 20
per_second: 5
failed_attempts:
burst_count: 20
per_second: 5
room_list_publication_rules:
- action: allow
well_known_client:
"m.homeserver":
"base_url": "https://matrix.live.bstein.dev"
"org.matrix.msc4143.rtc_foci":
- type: "livekit"
livekit_service_url: "https://kit.live.bstein.dev/livekit/jwt"
worker:
enabled: false
signingkey:
job:
generateImage:
repository: matrixdotorg/synapse
tag: v1.144.0
publishImage:
repository: registry.bstein.dev/bstein/kubectl
tag: 1.35.0
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: othrys-element
namespace: comms
spec:
interval: 30m
chart:
spec:
chart: element-web
version: 1.4.26
sourceRef:
kind: HelmRepository
name: ananace
namespace: flux-system
install:
remediation: { retries: 3 }
timeout: 10m
upgrade:
remediation:
retries: 3
remediateLastFailure: true
cleanupOnFail: true
timeout: 10m
values:
replicaCount: 1
defaultServer:
url: https://matrix.live.bstein.dev
name: live.bstein.dev
config:
default_theme: dark
brand: Othrys
disable_custom_urls: true
disable_login_language_selector: true
disable_guests: false
show_labs_settings: true
features:
feature_group_calls: true
feature_video_rooms: true
feature_element_call_video_rooms: true
room_directory:
servers:
- live.bstein.dev
jitsi: {}
element_call:
url: https://call.live.bstein.dev
participant_limit: 16
brand: Othrys Call
ingress:
enabled: true
className: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: websecure
hosts:
- live.bstein.dev
tls:
- secretName: live-othrys-tls
hosts: [live.bstein.dev]
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5", "rpi4"]

View File

@ -1,8 +1,8 @@
{
"counts": {
"helmrelease_host_hints": 7,
"http_endpoints": 35,
"services": 44,
"workloads": 49
"helmrelease_host_hints": 18,
"http_endpoints": 37,
"services": 43,
"workloads": 54
}
}

View File

@ -12,12 +12,7 @@
"targetNamespace": "bstein-dev-home"
},
{
"name": "ci-demo",
"path": "services/ci-demo",
"targetNamespace": null
},
{
"name": "communication",
"name": "comms",
"path": "services/comms",
"targetNamespace": "comms"
},
@ -71,6 +66,11 @@
"path": "services/keycloak",
"targetNamespace": "sso"
},
{
"name": "logging",
"path": "services/logging",
"targetNamespace": null
},
{
"name": "longhorn-ui",
"path": "infrastructure/longhorn/ui-ingress",
@ -81,6 +81,11 @@
"path": "services/mailu",
"targetNamespace": "mailu-mailserver"
},
{
"name": "maintenance",
"path": "services/maintenance",
"targetNamespace": null
},
{
"name": "metallb",
"path": "infrastructure/metallb",
@ -116,11 +121,26 @@
"path": "services/openldap",
"targetNamespace": "sso"
},
{
"name": "outline",
"path": "services/outline",
"targetNamespace": "outline"
},
{
"name": "pegasus",
"path": "services/pegasus",
"targetNamespace": "jellyfin"
},
{
"name": "planka",
"path": "services/planka",
"targetNamespace": "planka"
},
{
"name": "postgres",
"path": "infrastructure/postgres",
"targetNamespace": "postgres"
},
{
"name": "sui-metrics",
"path": "services/sui-metrics/overlays/atlas",
@ -163,7 +183,7 @@
"serviceAccountName": null,
"nodeSelector": {},
"images": [
"ollama/ollama:latest"
"ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d"
]
},
{
@ -179,7 +199,7 @@
"node-role.kubernetes.io/worker": "true"
},
"images": [
"registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-84"
"registry.bstein.dev/bstein/bstein-dev-home-backend:registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92"
]
},
{
@ -195,7 +215,7 @@
"node-role.kubernetes.io/worker": "true"
},
"images": [
"registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-84"
"registry.bstein.dev/bstein/bstein-dev-home-frontend:registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92"
]
},
{
@ -214,21 +234,6 @@
"python:3.11-slim"
]
},
{
"kind": "Deployment",
"namespace": "ci-demo",
"name": "ci-demo",
"labels": {
"app.kubernetes.io/name": "ci-demo"
},
"serviceAccountName": null,
"nodeSelector": {
"hardware": "rpi4"
},
"images": [
"registry.bstein.dev/infra/ci-demo:v0.0.0-3"
]
},
{
"kind": "Deployment",
"namespace": "comms",
@ -271,7 +276,7 @@
"hardware": "rpi5"
},
"images": [
"ghcr.io/element-hq/element-call:latest"
"ghcr.io/element-hq/element-call@sha256:e6897c7818331714eae19d83ef8ea94a8b41115f0d8d3f62c2fed2d02c65c9bc"
]
},
{
@ -345,56 +350,6 @@
"nginx:1.27-alpine"
]
},
{
"kind": "Deployment",
"namespace": "comms",
"name": "othrys-element-element-web",
"labels": {
"app.kubernetes.io/instance": "othrys-element",
"app.kubernetes.io/name": "element-web"
},
"serviceAccountName": "othrys-element-element-web",
"nodeSelector": {
"hardware": "rpi5"
},
"images": [
"ghcr.io/element-hq/element-web:v1.12.6"
]
},
{
"kind": "Deployment",
"namespace": "comms",
"name": "othrys-synapse-matrix-synapse",
"labels": {
"app.kubernetes.io/component": "synapse",
"app.kubernetes.io/instance": "othrys-synapse",
"app.kubernetes.io/name": "matrix-synapse"
},
"serviceAccountName": "default",
"nodeSelector": {
"hardware": "rpi5"
},
"images": [
"ghcr.io/element-hq/synapse:v1.144.0"
]
},
{
"kind": "Deployment",
"namespace": "comms",
"name": "othrys-synapse-redis-master",
"labels": {
"app.kubernetes.io/component": "master",
"app.kubernetes.io/instance": "othrys-synapse",
"app.kubernetes.io/managed-by": "Helm",
"app.kubernetes.io/name": "redis",
"helm.sh/chart": "redis-17.17.1"
},
"serviceAccountName": "othrys-synapse-redis",
"nodeSelector": {},
"images": [
"docker.io/bitnamilegacy/redis:7.0.12-debian-11-r34"
]
},
{
"kind": "DaemonSet",
"namespace": "crypto",
@ -407,7 +362,7 @@
"node-role.kubernetes.io/worker": "true"
},
"images": [
"ghcr.io/tari-project/xmrig:latest"
"ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9"
]
},
{
@ -681,6 +636,66 @@
"hashicorp/vault-csi-provider:1.7.0"
]
},
{
"kind": "DaemonSet",
"namespace": "logging",
"name": "node-image-gc-rpi4",
"labels": {
"app": "node-image-gc-rpi4"
},
"serviceAccountName": "node-image-gc-rpi4",
"nodeSelector": {
"hardware": "rpi4"
},
"images": [
"bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131"
]
},
{
"kind": "DaemonSet",
"namespace": "logging",
"name": "node-image-prune-rpi5",
"labels": {
"app": "node-image-prune-rpi5"
},
"serviceAccountName": "node-image-prune-rpi5",
"nodeSelector": {
"hardware": "rpi5"
},
"images": [
"bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131"
]
},
{
"kind": "DaemonSet",
"namespace": "logging",
"name": "node-log-rotation",
"labels": {
"app": "node-log-rotation"
},
"serviceAccountName": "node-log-rotation",
"nodeSelector": {
"hardware": "rpi5"
},
"images": [
"bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131"
]
},
{
"kind": "Deployment",
"namespace": "logging",
"name": "oauth2-proxy-logs",
"labels": {
"app": "oauth2-proxy-logs"
},
"serviceAccountName": null,
"nodeSelector": {
"node-role.kubernetes.io/worker": "true"
},
"images": [
"quay.io/oauth2-proxy/oauth2-proxy:v7.6.0"
]
},
{
"kind": "Deployment",
"namespace": "longhorn-system",
@ -708,7 +723,7 @@
"mailu.bstein.dev/vip": "true"
},
"images": [
"lachlanevenson/k8s-kubectl:latest"
"registry.bstein.dev/bstein/kubectl:1.35.0"
]
},
{
@ -726,37 +741,30 @@
},
{
"kind": "DaemonSet",
"namespace": "metallb-system",
"name": "metallb-speaker",
"namespace": "maintenance",
"name": "node-image-sweeper",
"labels": {
"app.kubernetes.io/component": "speaker",
"app.kubernetes.io/instance": "metallb",
"app.kubernetes.io/name": "metallb"
"app": "node-image-sweeper"
},
"serviceAccountName": "metallb-speaker",
"serviceAccountName": "node-image-sweeper",
"nodeSelector": {
"kubernetes.io/os": "linux"
},
"images": [
"quay.io/frrouting/frr:10.4.1",
"quay.io/metallb/speaker:v0.15.3"
"python:3.12.9-alpine3.20"
]
},
{
"kind": "Deployment",
"namespace": "metallb-system",
"name": "metallb-controller",
"kind": "DaemonSet",
"namespace": "maintenance",
"name": "node-nofile",
"labels": {
"app.kubernetes.io/component": "controller",
"app.kubernetes.io/instance": "metallb",
"app.kubernetes.io/name": "metallb"
},
"serviceAccountName": "metallb-controller",
"nodeSelector": {
"kubernetes.io/os": "linux"
"app": "node-nofile"
},
"serviceAccountName": "node-nofile",
"nodeSelector": {},
"images": [
"quay.io/metallb/controller:v0.15.3"
"bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131"
]
},
{
@ -772,6 +780,21 @@
"registry.bstein.dev/monitoring/dcgm-exporter:4.4.2-4.7.0-ubuntu22.04"
]
},
{
"kind": "DaemonSet",
"namespace": "monitoring",
"name": "jetson-tegrastats-exporter",
"labels": {
"app": "jetson-tegrastats-exporter"
},
"serviceAccountName": "default",
"nodeSelector": {
"jetson": "true"
},
"images": [
"python:3.10-slim"
]
},
{
"kind": "Deployment",
"namespace": "monitoring",
@ -797,7 +820,7 @@
"hardware": "rpi5"
},
"images": [
"collabora/code:latest"
"collabora/code@sha256:3c58d0e9bae75e4647467d0c7d91cb66f261d3e814709aed590b5c334a04db26"
]
},
{
@ -815,6 +838,66 @@
"nextcloud:29-apache"
]
},
{
"kind": "Deployment",
"namespace": "outline",
"name": "outline",
"labels": {
"app": "outline"
},
"serviceAccountName": null,
"nodeSelector": {
"node-role.kubernetes.io/worker": "true"
},
"images": [
"outlinewiki/outline:1.2.0"
]
},
{
"kind": "Deployment",
"namespace": "outline",
"name": "outline-redis",
"labels": {
"app": "outline-redis"
},
"serviceAccountName": null,
"nodeSelector": {
"node-role.kubernetes.io/worker": "true"
},
"images": [
"redis:7.4.1-alpine"
]
},
{
"kind": "Deployment",
"namespace": "planka",
"name": "planka",
"labels": {
"app": "planka"
},
"serviceAccountName": null,
"nodeSelector": {
"node-role.kubernetes.io/worker": "true"
},
"images": [
"ghcr.io/plankanban/planka:2.0.0-rc.4"
]
},
{
"kind": "StatefulSet",
"namespace": "postgres",
"name": "postgres",
"labels": {
"app": "postgres"
},
"serviceAccountName": "postgres-vault",
"nodeSelector": {
"node-role.kubernetes.io/worker": "true"
},
"images": [
"postgres:15"
]
},
{
"kind": "Deployment",
"namespace": "sso",
@ -984,22 +1067,6 @@
}
]
},
{
"namespace": "ci-demo",
"name": "ci-demo",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/name": "ci-demo"
},
"ports": [
{
"name": "http",
"port": 80,
"targetPort": "http",
"protocol": "TCP"
}
]
},
{
"namespace": "comms",
"name": "coturn",
@ -1454,94 +1521,6 @@
}
]
},
{
"namespace": "comms",
"name": "othrys-element-element-web",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/instance": "othrys-element",
"app.kubernetes.io/name": "element-web"
},
"ports": [
{
"name": "http",
"port": 80,
"targetPort": "http",
"protocol": "TCP"
}
]
},
{
"namespace": "comms",
"name": "othrys-synapse-matrix-synapse",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/component": "synapse",
"app.kubernetes.io/instance": "othrys-synapse",
"app.kubernetes.io/name": "matrix-synapse"
},
"ports": [
{
"name": "http",
"port": 8008,
"targetPort": "http",
"protocol": "TCP"
}
]
},
{
"namespace": "comms",
"name": "othrys-synapse-redis-headless",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/instance": "othrys-synapse",
"app.kubernetes.io/name": "redis"
},
"ports": [
{
"name": "tcp-redis",
"port": 6379,
"targetPort": "redis",
"protocol": "TCP"
}
]
},
{
"namespace": "comms",
"name": "othrys-synapse-redis-master",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/component": "master",
"app.kubernetes.io/instance": "othrys-synapse",
"app.kubernetes.io/name": "redis"
},
"ports": [
{
"name": "tcp-redis",
"port": 6379,
"targetPort": "redis",
"protocol": "TCP"
}
]
},
{
"namespace": "comms",
"name": "othrys-synapse-replication",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/component": "synapse",
"app.kubernetes.io/instance": "othrys-synapse",
"app.kubernetes.io/name": "matrix-synapse"
},
"ports": [
{
"name": "replication",
"port": 9093,
"targetPort": "replication",
"protocol": "TCP"
}
]
},
{
"namespace": "crypto",
"name": "monerod",
@ -1743,6 +1722,22 @@
}
]
},
{
"namespace": "logging",
"name": "oauth2-proxy-logs",
"type": "ClusterIP",
"selector": {
"app": "oauth2-proxy-logs"
},
"ports": [
{
"name": "http",
"port": 80,
"targetPort": 4180,
"protocol": "TCP"
}
]
},
{
"namespace": "longhorn-system",
"name": "oauth2-proxy-longhorn",
@ -1823,24 +1818,6 @@
}
]
},
{
"namespace": "metallb-system",
"name": "metallb-webhook-service",
"type": "ClusterIP",
"selector": {
"app.kubernetes.io/component": "controller",
"app.kubernetes.io/instance": "metallb",
"app.kubernetes.io/name": "metallb"
},
"ports": [
{
"name": null,
"port": 443,
"targetPort": 9443,
"protocol": "TCP"
}
]
},
{
"namespace": "monitoring",
"name": "dcgm-exporter",
@ -1857,6 +1834,22 @@
}
]
},
{
"namespace": "monitoring",
"name": "jetson-tegrastats-exporter",
"type": "ClusterIP",
"selector": {
"app": "jetson-tegrastats-exporter"
},
"ports": [
{
"name": "metrics",
"port": 9100,
"targetPort": "metrics",
"protocol": "TCP"
}
]
},
{
"namespace": "monitoring",
"name": "postmark-exporter",
@ -1905,6 +1898,70 @@
}
]
},
{
"namespace": "outline",
"name": "outline",
"type": "ClusterIP",
"selector": {
"app": "outline"
},
"ports": [
{
"name": "http",
"port": 80,
"targetPort": "http",
"protocol": "TCP"
}
]
},
{
"namespace": "outline",
"name": "outline-redis",
"type": "ClusterIP",
"selector": {
"app": "outline-redis"
},
"ports": [
{
"name": "redis",
"port": 6379,
"targetPort": "redis",
"protocol": "TCP"
}
]
},
{
"namespace": "planka",
"name": "planka",
"type": "ClusterIP",
"selector": {
"app": "planka"
},
"ports": [
{
"name": "http",
"port": 80,
"targetPort": "http",
"protocol": "TCP"
}
]
},
{
"namespace": "postgres",
"name": "postgres-service",
"type": "ClusterIP",
"selector": {
"app": "postgres"
},
"ports": [
{
"name": "postgres",
"port": 5432,
"targetPort": 5432,
"protocol": "TCP"
}
]
},
{
"namespace": "sso",
"name": "keycloak",
@ -2110,7 +2167,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-wellknown-bstein-dev",
"source": "communication"
"source": "comms"
}
},
{
@ -2130,7 +2187,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-wellknown-bstein-dev",
"source": "communication"
"source": "comms"
}
},
{
@ -2170,7 +2227,7 @@
"via": {
"kind": "Ingress",
"name": "element-call",
"source": "communication"
"source": "comms"
}
},
{
@ -2250,7 +2307,7 @@
"via": {
"kind": "Ingress",
"name": "livekit-jwt-ingress",
"source": "communication"
"source": "comms"
}
},
{
@ -2270,27 +2327,7 @@
"via": {
"kind": "Ingress",
"name": "livekit-ingress",
"source": "communication"
}
},
{
"host": "live.bstein.dev",
"path": "/",
"backend": {
"namespace": "comms",
"service": "othrys-element-element-web",
"port": 80,
"workloads": [
{
"kind": "Deployment",
"name": "othrys-element-element-web"
}
]
},
"via": {
"kind": "Ingress",
"name": "othrys-element-element-web",
"source": "communication"
"source": "comms"
}
},
{
@ -2310,7 +2347,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-wellknown",
"source": "communication"
"source": "comms"
}
},
{
@ -2330,7 +2367,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-wellknown",
"source": "communication"
"source": "comms"
}
},
{
@ -2340,17 +2377,32 @@
"namespace": "comms",
"service": "othrys-synapse-matrix-synapse",
"port": 8008,
"workloads": []
},
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "comms"
}
},
{
"host": "logs.bstein.dev",
"path": "/",
"backend": {
"namespace": "logging",
"service": "oauth2-proxy-logs",
"port": "http",
"workloads": [
{
"kind": "Deployment",
"name": "othrys-synapse-matrix-synapse"
"name": "oauth2-proxy-logs"
}
]
},
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"name": "logs",
"source": "logging"
}
},
{
@ -2405,7 +2457,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2425,7 +2477,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-wellknown-matrix-live",
"source": "communication"
"source": "comms"
}
},
{
@ -2445,7 +2497,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-wellknown-matrix-live",
"source": "communication"
"source": "comms"
}
},
{
@ -2455,17 +2507,12 @@
"namespace": "comms",
"service": "othrys-synapse-matrix-synapse",
"port": 8008,
"workloads": [
{
"kind": "Deployment",
"name": "othrys-synapse-matrix-synapse"
}
]
"workloads": []
},
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2485,7 +2532,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2505,7 +2552,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2525,7 +2572,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2545,7 +2592,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2565,7 +2612,7 @@
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2575,17 +2622,12 @@
"namespace": "comms",
"service": "othrys-synapse-matrix-synapse",
"port": 8008,
"workloads": [
{
"kind": "Deployment",
"name": "othrys-synapse-matrix-synapse"
}
]
"workloads": []
},
"via": {
"kind": "Ingress",
"name": "matrix-routing",
"source": "communication"
"source": "comms"
}
},
{
@ -2608,6 +2650,26 @@
"source": "monerod"
}
},
{
"host": "notes.bstein.dev",
"path": "/",
"backend": {
"namespace": "outline",
"service": "outline",
"port": 80,
"workloads": [
{
"kind": "Deployment",
"name": "outline"
}
]
},
"via": {
"kind": "Ingress",
"name": "outline",
"source": "outline"
}
},
{
"host": "office.bstein.dev",
"path": "/",
@ -2728,6 +2790,26 @@
"source": "jellyfin"
}
},
{
"host": "tasks.bstein.dev",
"path": "/",
"backend": {
"namespace": "planka",
"service": "planka",
"port": 80,
"workloads": [
{
"kind": "Deployment",
"name": "planka"
}
]
},
"via": {
"kind": "Ingress",
"name": "planka",
"source": "planka"
}
},
{
"host": "vault.bstein.dev",
"path": "/",
@ -2750,12 +2832,28 @@
}
],
"helmrelease_host_hints": {
"comms:comms/othrys-element": [
"call.live.bstein.dev",
"live.bstein.dev",
"matrix.live.bstein.dev"
],
"comms:comms/othrys-synapse": [
"bstein.dev",
"kit.live.bstein.dev",
"live.bstein.dev",
"matrix.live.bstein.dev",
"registry.bstein.dev",
"sso.bstein.dev"
],
"gitops-ui:flux-system/weave-gitops": [
"cd.bstein.dev"
],
"harbor:harbor/harbor": [
"registry.bstein.dev"
],
"logging:logging/data-prepper": [
"registry.bstein.dev"
],
"mailu:mailu-mailserver/mailu": [
"bstein.dev",
"mail.bstein.dev"
@ -2764,6 +2862,7 @@
"alerts.bstein.dev"
],
"monitoring:monitoring/grafana": [
"bstein.dev",
"metrics.bstein.dev",
"sso.bstein.dev"
]

View File

@ -7,10 +7,7 @@ sources:
- name: bstein-dev-home
path: services/bstein-dev-home
targetNamespace: bstein-dev-home
- name: ci-demo
path: services/ci-demo
targetNamespace: null
- name: communication
- name: comms
path: services/comms
targetNamespace: comms
- name: core
@ -43,12 +40,18 @@ sources:
- name: keycloak
path: services/keycloak
targetNamespace: sso
- name: logging
path: services/logging
targetNamespace: null
- name: longhorn-ui
path: infrastructure/longhorn/ui-ingress
targetNamespace: longhorn-system
- name: mailu
path: services/mailu
targetNamespace: mailu-mailserver
- name: maintenance
path: services/maintenance
targetNamespace: null
- name: metallb
path: infrastructure/metallb
targetNamespace: metallb-system
@ -70,9 +73,18 @@ sources:
- name: openldap
path: services/openldap
targetNamespace: sso
- name: outline
path: services/outline
targetNamespace: outline
- name: pegasus
path: services/pegasus
targetNamespace: jellyfin
- name: planka
path: services/planka
targetNamespace: planka
- name: postgres
path: infrastructure/postgres
targetNamespace: postgres
- name: sui-metrics
path: services/sui-metrics/overlays/atlas
targetNamespace: sui-metrics
@ -100,7 +112,7 @@ workloads:
serviceAccountName: null
nodeSelector: {}
images:
- ollama/ollama:latest
- ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d
- kind: Deployment
namespace: bstein-dev-home
name: bstein-dev-home-backend
@ -111,7 +123,7 @@ workloads:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-84
- registry.bstein.dev/bstein/bstein-dev-home-backend:registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-92
- kind: Deployment
namespace: bstein-dev-home
name: bstein-dev-home-frontend
@ -122,7 +134,7 @@ workloads:
kubernetes.io/arch: arm64
node-role.kubernetes.io/worker: 'true'
images:
- registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-84
- registry.bstein.dev/bstein/bstein-dev-home-frontend:registry.bstein.dev/bstein/bstein-dev-home-frontend:0.1.1-92
- kind: Deployment
namespace: bstein-dev-home
name: chat-ai-gateway
@ -134,16 +146,6 @@ workloads:
node-role.kubernetes.io/worker: 'true'
images:
- python:3.11-slim
- kind: Deployment
namespace: ci-demo
name: ci-demo
labels:
app.kubernetes.io/name: ci-demo
serviceAccountName: null
nodeSelector:
hardware: rpi4
images:
- registry.bstein.dev/infra/ci-demo:v0.0.0-3
- kind: Deployment
namespace: comms
name: atlasbot
@ -173,7 +175,7 @@ workloads:
nodeSelector:
hardware: rpi5
images:
- ghcr.io/element-hq/element-call:latest
- ghcr.io/element-hq/element-call@sha256:e6897c7818331714eae19d83ef8ea94a8b41115f0d8d3f62c2fed2d02c65c9bc
- kind: Deployment
namespace: comms
name: livekit
@ -222,42 +224,6 @@ workloads:
nodeSelector: {}
images:
- nginx:1.27-alpine
- kind: Deployment
namespace: comms
name: othrys-element-element-web
labels:
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/name: element-web
serviceAccountName: othrys-element-element-web
nodeSelector:
hardware: rpi5
images:
- ghcr.io/element-hq/element-web:v1.12.6
- kind: Deployment
namespace: comms
name: othrys-synapse-matrix-synapse
labels:
app.kubernetes.io/component: synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: matrix-synapse
serviceAccountName: default
nodeSelector:
hardware: rpi5
images:
- ghcr.io/element-hq/synapse:v1.144.0
- kind: Deployment
namespace: comms
name: othrys-synapse-redis-master
labels:
app.kubernetes.io/component: master
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
serviceAccountName: othrys-synapse-redis
nodeSelector: {}
images:
- docker.io/bitnamilegacy/redis:7.0.12-debian-11-r34
- kind: DaemonSet
namespace: crypto
name: monero-xmrig
@ -267,7 +233,7 @@ workloads:
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- ghcr.io/tari-project/xmrig:latest
- ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9
- kind: Deployment
namespace: crypto
name: monero-p2pool
@ -460,6 +426,46 @@ workloads:
kubernetes.io/os: linux
images:
- hashicorp/vault-csi-provider:1.7.0
- kind: DaemonSet
namespace: logging
name: node-image-gc-rpi4
labels:
app: node-image-gc-rpi4
serviceAccountName: node-image-gc-rpi4
nodeSelector:
hardware: rpi4
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: DaemonSet
namespace: logging
name: node-image-prune-rpi5
labels:
app: node-image-prune-rpi5
serviceAccountName: node-image-prune-rpi5
nodeSelector:
hardware: rpi5
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: DaemonSet
namespace: logging
name: node-log-rotation
labels:
app: node-log-rotation
serviceAccountName: node-log-rotation
nodeSelector:
hardware: rpi5
images:
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: Deployment
namespace: logging
name: oauth2-proxy-logs
labels:
app: oauth2-proxy-logs
serviceAccountName: null
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- quay.io/oauth2-proxy/oauth2-proxy:v7.6.0
- kind: Deployment
namespace: longhorn-system
name: oauth2-proxy-longhorn
@ -479,7 +485,7 @@ workloads:
nodeSelector:
mailu.bstein.dev/vip: 'true'
images:
- lachlanevenson/k8s-kubectl:latest
- registry.bstein.dev/bstein/kubectl:1.35.0
- kind: Deployment
namespace: mailu-mailserver
name: mailu-sync-listener
@ -490,30 +496,24 @@ workloads:
images:
- python:3.11-alpine
- kind: DaemonSet
namespace: metallb-system
name: metallb-speaker
namespace: maintenance
name: node-image-sweeper
labels:
app.kubernetes.io/component: speaker
app.kubernetes.io/instance: metallb
app.kubernetes.io/name: metallb
serviceAccountName: metallb-speaker
app: node-image-sweeper
serviceAccountName: node-image-sweeper
nodeSelector:
kubernetes.io/os: linux
images:
- quay.io/frrouting/frr:10.4.1
- quay.io/metallb/speaker:v0.15.3
- kind: Deployment
namespace: metallb-system
name: metallb-controller
- python:3.12.9-alpine3.20
- kind: DaemonSet
namespace: maintenance
name: node-nofile
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: metallb
app.kubernetes.io/name: metallb
serviceAccountName: metallb-controller
nodeSelector:
kubernetes.io/os: linux
app: node-nofile
serviceAccountName: node-nofile
nodeSelector: {}
images:
- quay.io/metallb/controller:v0.15.3
- bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
- kind: DaemonSet
namespace: monitoring
name: dcgm-exporter
@ -523,6 +523,16 @@ workloads:
nodeSelector: {}
images:
- registry.bstein.dev/monitoring/dcgm-exporter:4.4.2-4.7.0-ubuntu22.04
- kind: DaemonSet
namespace: monitoring
name: jetson-tegrastats-exporter
labels:
app: jetson-tegrastats-exporter
serviceAccountName: default
nodeSelector:
jetson: 'true'
images:
- python:3.10-slim
- kind: Deployment
namespace: monitoring
name: postmark-exporter
@ -541,7 +551,7 @@ workloads:
nodeSelector:
hardware: rpi5
images:
- collabora/code:latest
- collabora/code@sha256:3c58d0e9bae75e4647467d0c7d91cb66f261d3e814709aed590b5c334a04db26
- kind: Deployment
namespace: nextcloud
name: nextcloud
@ -552,6 +562,46 @@ workloads:
hardware: rpi5
images:
- nextcloud:29-apache
- kind: Deployment
namespace: outline
name: outline
labels:
app: outline
serviceAccountName: null
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- outlinewiki/outline:1.2.0
- kind: Deployment
namespace: outline
name: outline-redis
labels:
app: outline-redis
serviceAccountName: null
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- redis:7.4.1-alpine
- kind: Deployment
namespace: planka
name: planka
labels:
app: planka
serviceAccountName: null
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- ghcr.io/plankanban/planka:2.0.0-rc.4
- kind: StatefulSet
namespace: postgres
name: postgres
labels:
app: postgres
serviceAccountName: postgres-vault
nodeSelector:
node-role.kubernetes.io/worker: 'true'
images:
- postgres:15
- kind: Deployment
namespace: sso
name: keycloak
@ -663,16 +713,6 @@ services:
port: 80
targetPort: 8080
protocol: TCP
- namespace: ci-demo
name: ci-demo
type: ClusterIP
selector:
app.kubernetes.io/name: ci-demo
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
- namespace: comms
name: coturn
type: LoadBalancer
@ -971,64 +1011,6 @@ services:
port: 80
targetPort: 80
protocol: TCP
- namespace: comms
name: othrys-element-element-web
type: ClusterIP
selector:
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/name: element-web
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
- namespace: comms
name: othrys-synapse-matrix-synapse
type: ClusterIP
selector:
app.kubernetes.io/component: synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: matrix-synapse
ports:
- name: http
port: 8008
targetPort: http
protocol: TCP
- namespace: comms
name: othrys-synapse-redis-headless
type: ClusterIP
selector:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: redis
ports:
- name: tcp-redis
port: 6379
targetPort: redis
protocol: TCP
- namespace: comms
name: othrys-synapse-redis-master
type: ClusterIP
selector:
app.kubernetes.io/component: master
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: redis
ports:
- name: tcp-redis
port: 6379
targetPort: redis
protocol: TCP
- namespace: comms
name: othrys-synapse-replication
type: ClusterIP
selector:
app.kubernetes.io/component: synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: matrix-synapse
ports:
- name: replication
port: 9093
targetPort: replication
protocol: TCP
- namespace: crypto
name: monerod
type: ClusterIP
@ -1156,6 +1138,16 @@ services:
port: 443
targetPort: websecure
protocol: TCP
- namespace: logging
name: oauth2-proxy-logs
type: ClusterIP
selector:
app: oauth2-proxy-logs
ports:
- name: http
port: 80
targetPort: 4180
protocol: TCP
- namespace: longhorn-system
name: oauth2-proxy-longhorn
type: ClusterIP
@ -1208,18 +1200,6 @@ services:
port: 8080
targetPort: 8080
protocol: TCP
- namespace: metallb-system
name: metallb-webhook-service
type: ClusterIP
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: metallb
app.kubernetes.io/name: metallb
ports:
- name: null
port: 443
targetPort: 9443
protocol: TCP
- namespace: monitoring
name: dcgm-exporter
type: ClusterIP
@ -1230,6 +1210,16 @@ services:
port: 9400
targetPort: metrics
protocol: TCP
- namespace: monitoring
name: jetson-tegrastats-exporter
type: ClusterIP
selector:
app: jetson-tegrastats-exporter
ports:
- name: metrics
port: 9100
targetPort: metrics
protocol: TCP
- namespace: monitoring
name: postmark-exporter
type: ClusterIP
@ -1260,6 +1250,46 @@ services:
port: 80
targetPort: http
protocol: TCP
- namespace: outline
name: outline
type: ClusterIP
selector:
app: outline
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
- namespace: outline
name: outline-redis
type: ClusterIP
selector:
app: outline-redis
ports:
- name: redis
port: 6379
targetPort: redis
protocol: TCP
- namespace: planka
name: planka
type: ClusterIP
selector:
app: planka
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
- namespace: postgres
name: postgres-service
type: ClusterIP
selector:
app: postgres
ports:
- name: postgres
port: 5432
targetPort: 5432
protocol: TCP
- namespace: sso
name: keycloak
type: ClusterIP
@ -1391,7 +1421,7 @@ http_endpoints:
via:
kind: Ingress
name: matrix-wellknown-bstein-dev
source: communication
source: comms
- host: bstein.dev
path: /.well-known/matrix/server
backend:
@ -1402,7 +1432,7 @@ http_endpoints:
via:
kind: Ingress
name: matrix-wellknown-bstein-dev
source: communication
source: comms
- host: bstein.dev
path: /api
backend:
@ -1428,7 +1458,7 @@ http_endpoints:
via:
kind: Ingress
name: element-call
source: communication
source: comms
- host: chat.ai.bstein.dev
path: /
backend:
@ -1480,7 +1510,7 @@ http_endpoints:
via:
kind: Ingress
name: livekit-jwt-ingress
source: communication
source: comms
- host: kit.live.bstein.dev
path: /livekit/sfu
backend:
@ -1493,20 +1523,7 @@ http_endpoints:
via:
kind: Ingress
name: livekit-ingress
source: communication
- host: live.bstein.dev
path: /
backend:
namespace: comms
service: othrys-element-element-web
port: 80
workloads:
- kind: Deployment
name: othrys-element-element-web
via:
kind: Ingress
name: othrys-element-element-web
source: communication
source: comms
- host: live.bstein.dev
path: /.well-known/matrix/client
backend:
@ -1517,7 +1534,7 @@ http_endpoints:
via:
kind: Ingress
name: matrix-wellknown
source: communication
source: comms
- host: live.bstein.dev
path: /.well-known/matrix/server
backend:
@ -1528,20 +1545,31 @@ http_endpoints:
via:
kind: Ingress
name: matrix-wellknown
source: communication
source: comms
- host: live.bstein.dev
path: /_matrix
backend:
namespace: comms
service: othrys-synapse-matrix-synapse
port: 8008
workloads: &id002
- kind: Deployment
name: othrys-synapse-matrix-synapse
workloads: []
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: logs.bstein.dev
path: /
backend:
namespace: logging
service: oauth2-proxy-logs
port: http
workloads:
- kind: Deployment
name: oauth2-proxy-logs
via:
kind: Ingress
name: logs
source: logging
- host: longhorn.bstein.dev
path: /
backend:
@ -1572,13 +1600,13 @@ http_endpoints:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: &id003
workloads: &id002
- kind: Deployment
name: matrix-authentication-service
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /.well-known/matrix/client
backend:
@ -1589,7 +1617,7 @@ http_endpoints:
via:
kind: Ingress
name: matrix-wellknown-matrix-live
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /.well-known/matrix/server
backend:
@ -1600,86 +1628,86 @@ http_endpoints:
via:
kind: Ingress
name: matrix-wellknown-matrix-live
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_matrix
backend:
namespace: comms
service: othrys-synapse-matrix-synapse
port: 8008
workloads: *id002
workloads: []
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_matrix/client/r0/register
backend:
namespace: comms
service: matrix-guest-register
port: 8080
workloads: &id004
workloads: &id003
- kind: Deployment
name: matrix-guest-register
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_matrix/client/v3/login
backend:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: *id003
workloads: *id002
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_matrix/client/v3/logout
backend:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: *id003
workloads: *id002
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_matrix/client/v3/refresh
backend:
namespace: comms
service: matrix-authentication-service
port: 8080
workloads: *id003
workloads: *id002
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_matrix/client/v3/register
backend:
namespace: comms
service: matrix-guest-register
port: 8080
workloads: *id004
workloads: *id003
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: matrix.live.bstein.dev
path: /_synapse
backend:
namespace: comms
service: othrys-synapse-matrix-synapse
port: 8008
workloads: *id002
workloads: []
via:
kind: Ingress
name: matrix-routing
source: communication
source: comms
- host: monero.bstein.dev
path: /
backend:
@ -1693,6 +1721,19 @@ http_endpoints:
kind: Ingress
name: monerod
source: monerod
- host: notes.bstein.dev
path: /
backend:
namespace: outline
service: outline
port: 80
workloads:
- kind: Deployment
name: outline
via:
kind: Ingress
name: outline
source: outline
- host: office.bstein.dev
path: /
backend:
@ -1771,6 +1812,19 @@ http_endpoints:
kind: Ingress
name: jellyfin
source: jellyfin
- host: tasks.bstein.dev
path: /
backend:
namespace: planka
service: planka
port: 80
workloads:
- kind: Deployment
name: planka
via:
kind: Ingress
name: planka
source: planka
- host: vault.bstein.dev
path: /
backend:
@ -1785,15 +1839,29 @@ http_endpoints:
name: vaultwarden-ingress
source: vaultwarden
helmrelease_host_hints:
comms:comms/othrys-element:
- call.live.bstein.dev
- live.bstein.dev
- matrix.live.bstein.dev
comms:comms/othrys-synapse:
- bstein.dev
- kit.live.bstein.dev
- live.bstein.dev
- matrix.live.bstein.dev
- registry.bstein.dev
- sso.bstein.dev
gitops-ui:flux-system/weave-gitops:
- cd.bstein.dev
harbor:harbor/harbor:
- registry.bstein.dev
logging:logging/data-prepper:
- registry.bstein.dev
mailu:mailu-mailserver/mailu:
- bstein.dev
- mail.bstein.dev
monitoring:monitoring/alertmanager:
- alerts.bstein.dev
monitoring:monitoring/grafana:
- bstein.dev
- metrics.bstein.dev
- sso.bstein.dev

View File

@ -47,15 +47,14 @@ flowchart LR
wl_comms_livekit["comms/livekit (Deployment)"]
svc_comms_livekit --> wl_comms_livekit
host_live_bstein_dev["live.bstein.dev"]
svc_comms_othrys_element_element_web["comms/othrys-element-element-web (Service)"]
host_live_bstein_dev --> svc_comms_othrys_element_element_web
wl_comms_othrys_element_element_web["comms/othrys-element-element-web (Deployment)"]
svc_comms_othrys_element_element_web --> wl_comms_othrys_element_element_web
host_live_bstein_dev --> svc_comms_matrix_wellknown
svc_comms_othrys_synapse_matrix_synapse["comms/othrys-synapse-matrix-synapse (Service)"]
host_live_bstein_dev --> svc_comms_othrys_synapse_matrix_synapse
wl_comms_othrys_synapse_matrix_synapse["comms/othrys-synapse-matrix-synapse (Deployment)"]
svc_comms_othrys_synapse_matrix_synapse --> wl_comms_othrys_synapse_matrix_synapse
host_logs_bstein_dev["logs.bstein.dev"]
svc_logging_oauth2_proxy_logs["logging/oauth2-proxy-logs (Service)"]
host_logs_bstein_dev --> svc_logging_oauth2_proxy_logs
wl_logging_oauth2_proxy_logs["logging/oauth2-proxy-logs (Deployment)"]
svc_logging_oauth2_proxy_logs --> wl_logging_oauth2_proxy_logs
host_longhorn_bstein_dev["longhorn.bstein.dev"]
svc_longhorn_system_oauth2_proxy_longhorn["longhorn-system/oauth2-proxy-longhorn (Service)"]
host_longhorn_bstein_dev --> svc_longhorn_system_oauth2_proxy_longhorn
@ -80,6 +79,11 @@ flowchart LR
host_monero_bstein_dev --> svc_crypto_monerod
wl_crypto_monerod["crypto/monerod (Deployment)"]
svc_crypto_monerod --> wl_crypto_monerod
host_notes_bstein_dev["notes.bstein.dev"]
svc_outline_outline["outline/outline (Service)"]
host_notes_bstein_dev --> svc_outline_outline
wl_outline_outline["outline/outline (Deployment)"]
svc_outline_outline --> wl_outline_outline
host_office_bstein_dev["office.bstein.dev"]
svc_nextcloud_collabora["nextcloud/collabora (Service)"]
host_office_bstein_dev --> svc_nextcloud_collabora
@ -110,6 +114,11 @@ flowchart LR
host_stream_bstein_dev --> svc_jellyfin_jellyfin
wl_jellyfin_jellyfin["jellyfin/jellyfin (Deployment)"]
svc_jellyfin_jellyfin --> wl_jellyfin_jellyfin
host_tasks_bstein_dev["tasks.bstein.dev"]
svc_planka_planka["planka/planka (Service)"]
host_tasks_bstein_dev --> svc_planka_planka
wl_planka_planka["planka/planka (Deployment)"]
svc_planka_planka --> wl_planka_planka
host_vault_bstein_dev["vault.bstein.dev"]
svc_vaultwarden_vaultwarden_service["vaultwarden/vaultwarden-service (Service)"]
host_vault_bstein_dev --> svc_vaultwarden_vaultwarden_service
@ -133,10 +142,7 @@ flowchart LR
wl_comms_livekit_token_service
svc_comms_livekit
wl_comms_livekit
svc_comms_othrys_element_element_web
wl_comms_othrys_element_element_web
svc_comms_othrys_synapse_matrix_synapse
wl_comms_othrys_synapse_matrix_synapse
svc_comms_matrix_authentication_service
wl_comms_matrix_authentication_service
svc_comms_matrix_guest_register
@ -160,6 +166,10 @@ flowchart LR
svc_jenkins_jenkins
wl_jenkins_jenkins
end
subgraph logging[logging]
svc_logging_oauth2_proxy_logs
wl_logging_oauth2_proxy_logs
end
subgraph longhorn_system[longhorn-system]
svc_longhorn_system_oauth2_proxy_longhorn
wl_longhorn_system_oauth2_proxy_longhorn
@ -173,6 +183,14 @@ flowchart LR
svc_nextcloud_collabora
wl_nextcloud_collabora
end
subgraph outline[outline]
svc_outline_outline
wl_outline_outline
end
subgraph planka[planka]
svc_planka_planka
wl_planka_planka
end
subgraph sso[sso]
svc_sso_oauth2_proxy
wl_sso_oauth2_proxy

View File

@ -5,7 +5,7 @@ namespace: comms
resources:
- namespace.yaml
- mas-configmap.yaml
- element-rendered.yaml
- helmrelease.yaml
- livekit-config.yaml
- element-call-config.yaml
- element-call-deployment.yaml
@ -24,7 +24,6 @@ resources:
- synapse-seeder-admin-ensure-job.yaml
- synapse-user-seed-job.yaml
- mas-local-users-ensure-job.yaml
- synapse-rendered.yaml
- mas-deployment.yaml
- livekit-token-deployment.yaml
- livekit.yaml
@ -39,9 +38,6 @@ resources:
- livekit-middlewares.yaml
- matrix-ingress.yaml
patches:
- path: synapse-deployment-strategy-patch.yaml
configMapGenerator:
- name: matrix-guest-register
files:

View File

@ -62,7 +62,7 @@ spec:
mountPath: /work
containers:
- name: patch
image: bitnami/kubectl:latest
image: registry.bstein.dev/bstein/kubectl:1.35.0
command: ["/bin/sh", "-c"]
args:
- |

View File

@ -13,7 +13,7 @@ spec:
restartPolicy: Never
containers:
- name: ensure
image: bitnami/kubectl:latest
image: registry.bstein.dev/bstein/kubectl:1.35.0
command: ["/bin/sh", "-c"]
args:
- |

View File

@ -1,11 +0,0 @@
# services/comms/synapse-deployment-strategy-patch.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: othrys-synapse-matrix-synapse
spec:
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1

View File

@ -1,895 +0,0 @@
---
# Source: matrix-synapse/charts/redis/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: othrys-synapse-redis
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
---
# Source: matrix-synapse/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: othrys-synapse-matrix-synapse
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
stringData:
config.yaml: |
## Registration ##
## API Configuration ##
## Database configuration ##
database:
name: "psycopg2"
args:
user: "synapse"
password: "@@POSTGRES_PASSWORD@@"
database: "synapse"
host: "postgres-service.postgres.svc.cluster.local"
port: 5432
sslmode: "prefer"
cp_min: 5
cp_max: 10
## Redis configuration ##
redis:
enabled: true
host: "othrys-synapse-redis-master"
port: 6379
password: "@@REDIS_PASSWORD@@"
---
# Source: matrix-synapse/charts/redis/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: othrys-synapse-redis-configuration
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: matrix-synapse/templates/configuration.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: othrys-synapse-matrix-synapse
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
data:
log.yaml: |
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
level: INFO
loggers:
synapse:
level: INFO
root:
level: INFO
handlers: [console]
homeserver.yaml: |
# NOTE:
# Secrets are stored in separate configs to better fit K8s concepts
## Server ##
server_name: "live.bstein.dev"
public_baseurl: "https://matrix.live.bstein.dev"
pid_file: /homeserver.pid
web_client: False
soft_file_limit: 0
log_config: "/synapse/config/log.yaml"
report_stats: false
instance_map:
main:
host: othrys-synapse-replication
port: 9093
## Ports ##
listeners:
- port: 8008
tls: false
bind_addresses: ["0.0.0.0"]
type: http
x_forwarded: true
resources:
- names:
- client
- federation
compress: false
- port: 9090
tls: false
bind_addresses: ["::"]
type: http
resources:
- names: [metrics]
compress: false
- port: 9093
tls: false
bind_addresses: ["::"]
type: http
resources:
- names: [replication]
compress: false
## Files ##
media_store_path: "/synapse/data/media"
uploads_path: "/synapse/data/uploads"
## Registration ##
enable_registration: false
## Metrics ###
enable_metrics: true
## Signing Keys ##
signing_key_path: "/synapse/keys/signing.key"
macaroon_secret_key: "@@MACAROON_SECRET_KEY@@"
# The trusted servers to download signing keys from.
trusted_key_servers:
- server_name: matrix.org
## Workers ##
## Extra config ##
allow_guest_access: true
allow_public_rooms_without_auth: true
auto_join_rooms:
- "#othrys:live.bstein.dev"
autocreate_auto_join_rooms: true
default_room_version: "11"
experimental_features:
msc3266_enabled: true
msc4108_enabled: true
msc4143_enabled: true
msc4222_enabled: true
max_event_delay_duration: 24h
password_config:
enabled: false
turn_uris:
- "turn:turn.live.bstein.dev:3478?transport=udp"
- "turn:turn.live.bstein.dev:3478?transport=tcp"
- "turns:turn.live.bstein.dev:5349?transport=tcp"
turn_shared_secret: "@@TURN_SECRET@@"
turn_allow_guests: true
turn_user_lifetime: 86400000
rc_login:
address:
burst_count: 20
per_second: 5
account:
burst_count: 20
per_second: 5
failed_attempts:
burst_count: 20
per_second: 5
rc_message:
per_second: 0.5
burst_count: 30
rc_delayed_event_mgmt:
per_second: 1
burst_count: 20
room_list_publication_rules:
- action: allow
well_known_client:
"m.homeserver":
"base_url": "https://matrix.live.bstein.dev"
"org.matrix.msc2965.authentication":
"issuer": "https://matrix.live.bstein.dev/"
"account": "https://matrix.live.bstein.dev/account/"
"org.matrix.msc4143.rtc_foci":
- type: "livekit"
livekit_service_url: "https://kit.live.bstein.dev/livekit/jwt"
matrix_authentication_service:
enabled: true
endpoint: http://matrix-authentication-service:8080/
secret: "@@MAS_SHARED_SECRET@@"
---
# Source: matrix-synapse/templates/pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: othrys-synapse-matrix-synapse
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Gi"
storageClassName: "asteria"
---
# Source: matrix-synapse/charts/redis/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: othrys-synapse-redis-headless
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
annotations:
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: redis
---
# Source: matrix-synapse/charts/redis/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: othrys-synapse-redis-master
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
app.kubernetes.io/component: master
spec:
type: ClusterIP
internalTrafficPolicy: Cluster
sessionAffinity: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: redis
app.kubernetes.io/component: master
---
# Source: matrix-synapse/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: othrys-synapse-matrix-synapse
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 8008
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/component: synapse
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
---
# Source: matrix-synapse/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: othrys-synapse-replication
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 9093
targetPort: replication
protocol: TCP
name: replication
selector:
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/component: synapse
---
# Source: matrix-synapse/charts/redis/templates/master/application.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: othrys-synapse-redis-master
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: redis
app.kubernetes.io/component: master
strategy:
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
app.kubernetes.io/component: master
annotations:
checksum/configmap: 86bcc953bb473748a3d3dc60b7c11f34e60c93519234d4c37f42e22ada559d47
checksum/health: aff24913d801436ea469d8d374b2ddb3ec4c43ee7ab24663d5f8ff1a1b6991a9
checksum/scripts: 560c33ff34d845009b51830c332aa05fa211444d1877d3526d3599be7543aaa5
checksum/secret: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
spec:
securityContext:
fsGroup: 1001
serviceAccountName: othrys-synapse-redis
automountServiceAccountToken: true
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: redis
app.kubernetes.io/component: master
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
enableServiceLinks: true
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: docker.io/bitnamilegacy/redis:7.0.12-debian-11-r34
imagePullPolicy: "IfNotPresent"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsGroup: 0
runAsNonRoot: true
runAsUser: 1001
seccompProfile:
type: RuntimeDefault
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: synapse-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits: {}
requests: {}
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: othrys-synapse-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: othrys-synapse-redis-health
defaultMode: 0755
- name: config
configMap:
name: othrys-synapse-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: matrix-synapse/templates/deployment.yaml
# Server: live.bstein.dev
apiVersion: apps/v1
kind: Deployment
metadata:
name: othrys-synapse-matrix-synapse
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: synapse
spec:
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/component: synapse
template:
metadata:
annotations:
checksum/config: manual-rtc-enable-11
checksum/secrets: ec9f3b254a562a0f0709461eb74a8cc91b8c1a2fb06be2594a131776c2541773
labels:
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/component: synapse
spec:
serviceAccountName: default
securityContext:
fsGroup: 666
runAsGroup: 666
runAsUser: 666
containers:
- name: synapse
command:
- sh
- -c
- |
export POSTGRES_PASSWORD=$(echo "${POSTGRES_PASSWORD:-}" | sed 's/\//\\\//g' | sed 's/\&/\\\&/g') && \
export REDIS_PASSWORD=$(echo "${REDIS_PASSWORD:-}" | sed 's/\//\\\//g' | sed 's/\&/\\\&/g') && \
export OIDC_CLIENT_SECRET_ESCAPED=$(echo "${OIDC_CLIENT_SECRET:-}" | sed 's/[\\/&]/\\&/g') && \
export TURN_SECRET_ESCAPED=$(echo "${TURN_SECRET:-}" | sed 's/[\\/&]/\\&/g') && \
export MAS_SHARED_SECRET_ESCAPED=$(echo "${MAS_SHARED_SECRET:-}" | sed 's/[\\/&]/\\&/g') && \
export MACAROON_SECRET_KEY_ESCAPED=$(echo "${MACAROON_SECRET_KEY:-}" | sed 's/[\\/&]/\\&/g') && \
cat /synapse/secrets/*.yaml | \
sed -e "s/@@POSTGRES_PASSWORD@@/${POSTGRES_PASSWORD:-}/" \
-e "s/@@REDIS_PASSWORD@@/${REDIS_PASSWORD:-}/" \
> /synapse/config/conf.d/secrets.yaml
cp /synapse/config/homeserver.yaml /synapse/runtime-config/homeserver.yaml && \
if [ -n "${OIDC_CLIENT_SECRET_ESCAPED}" ]; then \
sed -i "s/@@OIDC_CLIENT_SECRET@@/${OIDC_CLIENT_SECRET_ESCAPED}/g" /synapse/runtime-config/homeserver.yaml; \
fi; \
if [ -n "${TURN_SECRET_ESCAPED}" ]; then \
sed -i "s/@@TURN_SECRET@@/${TURN_SECRET_ESCAPED}/g" /synapse/runtime-config/homeserver.yaml; \
fi; \
if [ -n "${MAS_SHARED_SECRET_ESCAPED}" ]; then \
sed -i "s/@@MAS_SHARED_SECRET@@/${MAS_SHARED_SECRET_ESCAPED}/g" /synapse/runtime-config/homeserver.yaml; \
fi; \
if [ -n "${MACAROON_SECRET_KEY_ESCAPED}" ]; then \
sed -i "s/@@MACAROON_SECRET_KEY@@/${MACAROON_SECRET_KEY_ESCAPED}/g" /synapse/runtime-config/homeserver.yaml; \
fi
exec python -B -m synapse.app.homeserver \
-c /synapse/runtime-config/homeserver.yaml \
-c /synapse/config/conf.d/
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: synapse-db
key: POSTGRES_PASSWORD
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: synapse-redis
key: redis-password
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: synapse-oidc
key: client-secret
- name: TURN_SECRET
valueFrom:
secretKeyRef:
name: turn-shared-secret
key: TURN_STATIC_AUTH_SECRET
- name: MAS_SHARED_SECRET
valueFrom:
secretKeyRef:
name: mas-secrets-runtime
key: matrix_shared_secret
- name: MACAROON_SECRET_KEY
valueFrom:
secretKeyRef:
name: synapse-macaroon
key: macaroon_secret_key
image: "ghcr.io/element-hq/synapse:v1.144.0"
imagePullPolicy: IfNotPresent
securityContext:
{}
ports:
- name: http
containerPort: 8008
protocol: TCP
- name: replication
containerPort: 9093
protocol: TCP
- name: metrics
containerPort: 9090
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: http
readinessProbe:
httpGet:
path: /health
port: http
startupProbe:
failureThreshold: 12
httpGet:
path: /health
port: http
volumeMounts:
- name: config
mountPath: /synapse/config
- name: runtime-config
mountPath: /synapse/runtime-config
- name: tmpconf
mountPath: /synapse/config/conf.d
- name: secrets
mountPath: /synapse/secrets
- name: signingkey
mountPath: /synapse/keys
- name: media
mountPath: /synapse/data
- name: tmpdir
mountPath: /tmp
resources:
limits:
cpu: "2"
memory: 3Gi
requests:
cpu: 500m
memory: 1Gi
volumes:
- name: config
configMap:
name: othrys-synapse-matrix-synapse
- name: secrets
secret:
secretName: othrys-synapse-matrix-synapse
- name: signingkey
secret:
secretName: "othrys-synapse-signingkey"
items:
- key: "signing.key"
path: signing.key
- name: tmpconf
emptyDir: {}
- name: tmpdir
emptyDir: {}
- name: runtime-config
emptyDir: {}
- name: media
persistentVolumeClaim:
claimName: othrys-synapse-matrix-synapse
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: hardware
operator: In
values:
- rpi5
- rpi4
weight: 50
---
# Source: matrix-synapse/templates/signing-key-job.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: othrys-synapse-signingkey-job
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: signingkey-job
annotations:
helm.sh/hook: pre-install
helm.sh/hook-delete-policy: hook-succeeded
---
# Source: matrix-synapse/templates/signing-key-job.yaml
# Create secret if signing key job is enabled, or if we're running in ArgoCD and we don't have an existing secret
apiVersion: v1
kind: Secret
metadata:
annotations:
helm.sh/hook: pre-install
helm.sh/hook-delete-policy: never
helm.sh/resource-policy: keep
# If for some reason we didn't detect ArgoCD, but are running in it, we want to make sure we don't delete the secret
argocd.argoproj.io/hook: Skip
name: othrys-synapse-signingkey
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: signingkey-job
---
# Source: matrix-synapse/templates/signing-key-job.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: othrys-synapse-signingkey-job
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: signingkey-job
annotations:
helm.sh/hook: pre-install
helm.sh/hook-delete-policy: hook-succeeded
rules:
- apiGroups:
- ""
resources:
- secrets
resourceNames:
- othrys-synapse-signingkey
verbs:
- get
- update
- patch
---
# Source: matrix-synapse/templates/signing-key-job.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: othrys-synapse-signingkey-job
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: signingkey-job
annotations:
helm.sh/hook: pre-install
helm.sh/hook-delete-policy: hook-succeeded
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: othrys-synapse-signingkey-job
subjects:
- kind: ServiceAccount
name: othrys-synapse-signingkey-job
namespace: comms
---
# Source: matrix-synapse/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "othrys-synapse-matrix-synapse-test-connection"
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['othrys-synapse-matrix-synapse:8008/_matrix/client/versions']
restartPolicy: Never
---
# Source: matrix-synapse/templates/signing-key-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: othrys-synapse-signingkey-job
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: signingkey-job
annotations:
helm.sh/hook: pre-install
helm.sh/hook-delete-policy: hook-succeeded
spec:
ttlSecondsAfterFinished: 0
template:
metadata:
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: signingkey-job
spec:
containers:
- command:
- sh
- -c
- |
echo "Generating signing key..."
if which generate_signing_key.py >/dev/null; then
generate_signing_key.py -o /synapse/keys/signing.key
else
generate_signing_key -o /synapse/keys/signing.key
fi
image: "matrixdotorg/synapse:latest"
imagePullPolicy: IfNotPresent
name: signing-key-generate
resources:
{}
securityContext:
{}
volumeMounts:
- mountPath: /synapse/keys
name: matrix-synapse-keys
- command:
- sh
- -c
- |
printf "Checking rights to update secret... "
kubectl auth can-i update secret/${SECRET_NAME}
/scripts/signing-key.sh
env:
- name: SECRET_NAME
value: othrys-synapse-signingkey
image: "bitnami/kubectl:latest"
imagePullPolicy: IfNotPresent
name: signing-key-upload
resources:
{}
securityContext:
{}
volumeMounts:
- mountPath: /scripts
name: scripts
readOnly: true
- mountPath: /synapse/keys
name: matrix-synapse-keys
readOnly: true
securityContext:
{}
restartPolicy: Never
serviceAccount: othrys-synapse-signingkey-job
volumes:
- name: scripts
configMap:
name: othrys-synapse-matrix-synapse-scripts
defaultMode: 0755
- name: matrix-synapse-keys
emptyDir: {}
parallelism: 1
completions: 1
backoffLimit: 1

View File

@ -26,7 +26,7 @@ spec:
mountPath: /work
containers:
- name: patch
image: bitnami/kubectl:latest
image: registry.bstein.dev/bstein/kubectl:1.35.0
command: ["/bin/sh", "-c"]
args:
- |

View File

@ -1,59 +0,0 @@
# services/comms/values-element.yaml
replicaCount: 1
defaultServer:
url: https://matrix.live.bstein.dev
name: live.bstein.dev
config:
default_theme: dark
brand: Othrys
disable_custom_urls: true
disable_login_language_selector: true
disable_guests: false
show_labs_settings: true
features:
feature_group_calls: true
feature_video_rooms: true
feature_element_call_video_rooms: true
room_directory:
servers:
- live.bstein.dev
jitsi: {}
element_call:
url: https://call.live.bstein.dev
participant_limit: 16
brand: Othrys Call
ingress:
enabled: true
className: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: websecure
hosts:
- live.bstein.dev
tls:
- secretName: live-othrys-tls
hosts: [live.bstein.dev]
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5","rpi4"]

View File

@ -1,132 +0,0 @@
# services/comms/values-synapse.yaml
serverName: live.bstein.dev
publicServerName: matrix.live.bstein.dev
config:
publicBaseurl: https://matrix.live.bstein.dev
externalPostgresql:
host: postgres-service.postgres.svc.cluster.local
port: 5432
username: synapse
existingSecret: synapse-db
existingSecretPasswordKey: POSTGRES_PASSWORD
database: synapse
redis:
enabled: true
auth:
enabled: true
existingSecret: synapse-redis
existingSecretPasswordKey: redis-password
postgresql:
enabled: false
persistence:
enabled: true
storageClass: asteria
accessMode: ReadWriteOnce
size: 50Gi
synapse:
podSecurityContext:
fsGroup: 666
runAsUser: 666
runAsGroup: 666
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: "2"
memory: 3Gi
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5","rpi4"]
ingress:
enabled: true
className: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: websecure
csHosts:
- matrix.live.bstein.dev
hosts:
- matrix.live.bstein.dev
wkHosts:
- live.bstein.dev
- bstein.dev
tls:
- secretName: matrix-live-tls
hosts:
- matrix.live.bstein.dev
- live.bstein.dev
extraConfig:
allow_guest_access: true
allow_public_rooms_without_auth: true
auto_join_rooms:
- "#othrys:live.bstein.dev"
autocreate_auto_join_rooms: true
default_room_version: "11"
experimental_features:
msc3266_enabled: true
msc4143_enabled: true
msc4222_enabled: true
max_event_delay_duration: 24h
password_config:
enabled: true
oidc_enabled: true
oidc_providers:
- idp_id: keycloak
idp_name: Keycloak
issuer: https://sso.bstein.dev/realms/atlas
client_id: synapse
client_secret: "@@OIDC_CLIENT_SECRET@@"
client_auth_method: client_secret_post
scopes: ["openid", "profile", "email"]
authorization_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/auth
token_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/token
userinfo_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/userinfo
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
allow_existing_users: true
rc_message:
per_second: 0.5
burst_count: 30
rc_delayed_event_mgmt:
per_second: 1
burst_count: 20
rc_login:
address:
burst_count: 20
per_second: 5
account:
burst_count: 20
per_second: 5
failed_attempts:
burst_count: 20
per_second: 5
room_list_publication_rules:
- action: allow
well_known_client:
"m.homeserver":
"base_url": "https://matrix.live.bstein.dev"
"org.matrix.msc4143.rtc_foci":
- type: "livekit"
livekit_service_url: "https://kit.live.bstein.dev/livekit/jwt"
worker:
enabled: false

View File

@ -1,3 +1,4 @@
# services/crypto/xmr-miner/xmrig-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
@ -29,7 +30,7 @@ spec:
secretName: monero-payout
containers:
- name: xmrig
image: ghcr.io/tari-project/xmrig:latest
image: ghcr.io/tari-project/xmrig@sha256:80defbfd0b640d604c91cb5101d3642db7928e1e68ee3c6b011289b3565a39d9
imagePullPolicy: IfNotPresent
env:
- name: XMRIG_THREADS

View File

@ -84,7 +84,7 @@ spec:
mountPath: /work
containers:
- name: apply
image: bitnami/kubectl:latest
image: registry.bstein.dev/bstein/kubectl:1.35.0
command: ["/bin/sh", "-c"]
args:
- |

View File

@ -50,7 +50,7 @@ spec:
mailu.bstein.dev/vip: "true"
containers:
- name: vip-controller
image: lachlanevenson/k8s-kubectl:latest
image: registry.bstein.dev/bstein/kubectl:1.35.0
imagePullPolicy: IfNotPresent
command:
- /bin/sh

View File

@ -1,4 +1,5 @@
# services/monitoring/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: monitoring
name: monitoring

View File

@ -20,7 +20,7 @@ spec:
hardware: rpi5
containers:
- name: collabora
image: collabora/code:latest
image: collabora/code@sha256:3c58d0e9bae75e4647467d0c7d91cb66f261d3e814709aed590b5c334a04db26
imagePullPolicy: IfNotPresent
env:
- name: domain

View File

@ -1,3 +1,4 @@
# services/pegasus/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata: