communication: add Othrys stack via Flux

This commit is contained in:
Brad Stein 2025-12-31 12:00:12 -03:00
parent c0a53e59b5
commit a6bd6b8cc8
39 changed files with 5494 additions and 12 deletions

View File

@ -5,7 +5,7 @@ resources:
- ../../services/crypto
- ../../services/gitea
- ../../services/jellyfin
- ../../services/jitsi
- ../../services/communication
- ../../services/monitoring
- ../../services/pegasus
- ../../services/vault

View File

@ -0,0 +1,17 @@
# clusters/atlas/flux-system/applications/communication/kustomization.yaml
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: communication
namespace: flux-system
spec:
interval: 10m
prune: true
sourceRef:
kind: GitRepository
name: flux-system
path: ./services/communication
targetNamespace: communication
timeout: 2m
dependsOn:
- name: traefik

View File

@ -4,7 +4,7 @@ kind: Kustomization
resources:
- gitea/kustomization.yaml
- vault/kustomization.yaml
- jitsi/kustomization.yaml
- communication/kustomization.yaml
- crypto/kustomization.yaml
- monerod/kustomization.yaml
- pegasus/kustomization.yaml

View File

@ -4,6 +4,7 @@ kind: Kustomization
resources:
- core/kustomization.yaml
- helm/kustomization.yaml
- metallb/kustomization.yaml
- traefik/kustomization.yaml
- gitops-ui/kustomization.yaml
- monitoring/kustomization.yaml

View File

@ -1,19 +1,16 @@
# clusters/atlas/flux-system/applications/jitsi/kustomization.yaml
# clusters/atlas/flux-system/platform/metallb/kustomization.yaml
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: jitsi
name: metallb
namespace: flux-system
spec:
interval: 10m
path: ./services/jitsi
targetNamespace: jitsi
prune: true
interval: 30m
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
dependsOn:
- name: core
path: ./infrastructure/metallb
prune: true
wait: true
timeout: 5m
targetNamespace: metallb-system

View File

@ -15,4 +15,5 @@ spec:
namespace: flux-system
dependsOn:
- name: core
- name: metallb
wait: true

View File

@ -5,3 +5,4 @@ resources:
- ../../../infrastructure/modules/base
- ../../../infrastructure/modules/profiles/atlas-ha
- ../../../infrastructure/sources/cert-manager/letsencrypt.yaml
- ../../../infrastructure/metallb

View File

@ -0,0 +1,20 @@
# infrastructure/metallb/ippool.yaml
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: communication-pool
namespace: metallb-system
spec:
addresses:
- 192.168.22.4-192.168.22.6
- 192.168.22.9-192.168.22.9
autoAssign: true
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: communication-adv
namespace: metallb-system
spec:
ipAddressPools:
- communication-pool

View File

@ -0,0 +1,9 @@
# infrastructure/metallb/kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- metallb-rendered.yaml
- ippool.yaml
patchesStrategicMerge:
- patches/node-placement.yaml

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
# infrastructure/metallb/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: metallb-system

View File

@ -0,0 +1,30 @@
# infrastructure/metallb/patches/node-placement.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: metallb-controller
namespace: metallb-system
spec:
template:
spec:
containers:
- name: controller
args:
- --port=7472
- --log-level=info
- --webhook-mode=enabled
- --tls-min-version=VersionTLS12
- --lb-class=metallb
nodeSelector:
hardware: rpi5
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: metallb-speaker
namespace: metallb-system
spec:
template:
spec:
nodeSelector:
hardware: rpi5

View File

@ -71,9 +71,10 @@ rules:
- tlsoptions
- tlsstores
- serverstransports
- serverstransporttcps
- traefikservices
- middlewaretcps
verbs:
- get
- list
- watch

View File

@ -10,3 +10,4 @@ resources:
- clusterrole.yaml
- clusterrolebinding.yaml
- service.yaml
- traefik-service-lb.yaml

View File

@ -0,0 +1,24 @@
# infrastructure/traefik/traefik-service-lb.yaml
apiVersion: v1
kind: Service
metadata:
name: traefik
namespace: kube-system
annotations:
metallb.universe.tf/address-pool: communication-pool
spec:
type: LoadBalancer
loadBalancerClass: metallb
loadBalancerIP: 192.168.22.9
ports:
- name: web
port: 80
targetPort: web
protocol: TCP
- name: websecure
port: 443
targetPort: websecure
protocol: TCP
selector:
app.kubernetes.io/instance: traefik-kube-system
app.kubernetes.io/name: traefik

View File

@ -0,0 +1,131 @@
# services/communication/atlasbot-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: atlasbot
data:
bot.py: |
import json, os, time, collections
from urllib import request, parse, error
BASE = os.environ.get("MATRIX_BASE", "http://othrys-synapse-matrix-synapse:8008")
USER = os.environ["BOT_USER"]
PASSWORD = os.environ["BOT_PASS"]
ROOM_ALIAS = "#othrys:live.bstein.dev"
OLLAMA_URL = os.environ.get("OLLAMA_URL", "https://chat.ai.bstein.dev/")
MODEL = os.environ.get("OLLAMA_MODEL", "qwen2.5-coder:7b-instruct-q4_0")
API_KEY = os.environ.get("CHAT_API_KEY", "")
def req(method: str, path: str, token: str | None = None, body=None, timeout=60):
url = BASE + path
data = None
headers = {}
if body is not None:
data = json.dumps(body).encode()
headers["Content-Type"] = "application/json"
if token:
headers["Authorization"] = f"Bearer {token}"
r = request.Request(url, data=data, headers=headers, method=method)
with request.urlopen(r, timeout=timeout) as resp:
raw = resp.read()
return json.loads(raw.decode()) if raw else {}
def login() -> str:
payload = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": USER},
"password": PASSWORD,
}
res = req("POST", "/_matrix/client/v3/login", body=payload)
return res["access_token"]
def resolve_alias(token: str, alias: str) -> str:
enc = parse.quote(alias)
res = req("GET", f"/_matrix/client/v3/directory/room/{enc}", token)
return res["room_id"]
def join_room(token: str, room: str):
req("POST", f"/_matrix/client/v3/rooms/{parse.quote(room)}/join", token, body={})
def send_msg(token: str, room: str, text: str):
path = f"/_matrix/client/v3/rooms/{parse.quote(room)}/send/m.room.message"
req("POST", path, token, body={"msgtype": "m.text", "body": text})
history = collections.defaultdict(list) # room_id -> list of str (short transcript)
greeted = set()
def ollama_reply(room_id: str, prompt: str) -> str:
try:
# Keep short context as plain text transcript
transcript = "\n".join(history[room_id][-12:] + [f"User: {prompt}"])
payload = {"model": MODEL, "message": transcript}
headers = {"Content-Type": "application/json"}
if API_KEY:
headers["x-api-key"] = API_KEY
r = request.Request(OLLAMA_URL, data=json.dumps(payload).encode(), headers=headers)
with request.urlopen(r, timeout=15) as resp:
data = json.loads(resp.read().decode())
reply = data.get("message") or data.get("response") or data.get("reply") or "I'm here to help."
history[room_id].append(f"Atlas: {reply}")
return reply
except Exception:
return "Hi! I'm Atlas."
def sync_loop(token: str, room_id: str):
since = None
while True:
params = {"timeout": 30000}
if since:
params["since"] = since
query = parse.urlencode(params)
try:
res = req("GET", f"/_matrix/client/v3/sync?{query}", token, timeout=35)
except Exception:
time.sleep(5)
continue
since = res.get("next_batch", since)
# invites
for rid, data in res.get("rooms", {}).get("invite", {}).items():
try:
join_room(token, rid)
send_msg(token, rid, "Atlas online.")
except Exception:
pass
# messages
for rid, data in res.get("rooms", {}).get("join", {}).items():
if rid not in greeted and room_id and rid == room_id:
greeted.add(rid)
send_msg(token, rid, "Atlas online.")
timeline = data.get("timeline", {}).get("events", [])
for ev in timeline:
if ev.get("type") != "m.room.message":
continue
content = ev.get("content", {})
body = content.get("body", "")
if not body.strip():
continue
sender = ev.get("sender", "")
if sender == f"@{USER}:live.bstein.dev":
continue
# Only respond if bot is mentioned or in a DM
joined_count = data.get("summary", {}).get("m.joined_member_count")
is_dm = joined_count is not None and joined_count <= 2
mentioned = f"@{USER}" in body or "atlas" in body.lower()
history[rid].append(f"{sender}: {body}")
if is_dm or mentioned:
reply = ollama_reply(rid, body)
send_msg(token, rid, reply)
def main():
token = login()
try:
room_id = resolve_alias(token, ROOM_ALIAS)
join_room(token, room_id)
except Exception:
room_id = None
sync_loop(token, room_id)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,9 @@
# services/communication/atlasbot-credentials.yaml
apiVersion: v1
kind: Secret
metadata:
name: atlasbot-credentials
type: Opaque
stringData:
bot-password: "x8eU9xwsjJ2S7Xv1G4mQ"
seeder-password: "Qv5sjyH8nD6pPz7Lk3R0"

View File

@ -0,0 +1,61 @@
# services/communication/atlasbot-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: atlasbot
namespace: communication
labels:
app: atlasbot
spec:
replicas: 1
selector:
matchLabels:
app: atlasbot
template:
metadata:
labels:
app: atlasbot
spec:
nodeSelector:
hardware: rpi5
containers:
- name: atlasbot
image: python:3.11-slim
command: ["/bin/sh","-c"]
args:
- |
python /app/bot.py
env:
- name: MATRIX_BASE
value: http://othrys-synapse-matrix-synapse:8008
- name: BOT_USER
value: atlasbot
- name: BOT_PASS
valueFrom:
secretKeyRef:
name: atlasbot-credentials
key: bot-password
- name: CHAT_API_KEY
valueFrom:
secretKeyRef:
name: chat-ai-keys
key: matrix
- name: OLLAMA_URL
value: https://chat.ai.bstein.dev/
- name: OLLAMA_MODEL
value: qwen2.5-coder:7b-instruct-q4_0
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
volumeMounts:
- name: code
mountPath: /app/bot.py
subPath: bot.py
volumes:
- name: code
configMap:
name: atlasbot

View File

@ -0,0 +1,9 @@
# services/communication/chat-ai-keys.yaml
apiVersion: v1
kind: Secret
metadata:
name: chat-ai-keys
namespace: communication
type: Opaque
stringData:
matrix: "3d9b1e5e80f146f2b3f6a9fbe01b7b77"

View File

@ -0,0 +1,323 @@
# services/communication/coturn.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: coturn
labels:
app: coturn
spec:
replicas: 1
selector:
matchLabels:
app: coturn
template:
metadata:
labels:
app: coturn
spec:
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5","rpi4"]
containers:
- name: coturn
image: ghcr.io/coturn/coturn:4.6.2
command:
- /bin/sh
- -c
- |
exec /usr/bin/turnserver \
--no-cli \
--fingerprint \
--lt-cred-mech \
--user=livekit:"${TURN_STATIC_AUTH_SECRET}" \
--realm=live.bstein.dev \
--listening-port=3478 \
--tls-listening-port=5349 \
--min-port=50000 \
--max-port=50050 \
--cert=/etc/coturn/tls/tls.crt \
--pkey=/etc/coturn/tls/tls.key \
--log-file=stdout \
--no-software-attribute
env:
- name: TURN_STATIC_AUTH_SECRET
valueFrom:
secretKeyRef:
name: turn-shared-secret
key: TURN_STATIC_AUTH_SECRET
ports:
- name: turn-udp
containerPort: 3478
protocol: UDP
- name: turn-tcp
containerPort: 3478
protocol: TCP
- name: turn-tls
containerPort: 5349
protocol: TCP
volumeMounts:
- name: tls
mountPath: /etc/coturn/tls
readOnly: true
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: "2"
memory: 512Mi
volumes:
- name: tls
secret:
secretName: turn-live-tls
---
apiVersion: v1
kind: Service
metadata:
name: coturn
annotations:
metallb.universe.tf/address-pool: communication-pool
spec:
type: LoadBalancer
loadBalancerClass: metallb
loadBalancerIP: 192.168.22.5
selector:
app: coturn
ports:
- name: turn-udp
port: 3478
targetPort: 3478
protocol: UDP
- name: turn-tcp
port: 3478
targetPort: 3478
protocol: TCP
- name: turn-tls
port: 5349
targetPort: 5349
protocol: TCP
# Expose relay range for UDP media
- name: relay-50000
port: 50000
targetPort: 50000
protocol: UDP
- name: relay-50001
port: 50001
targetPort: 50001
protocol: UDP
- name: relay-50002
port: 50002
targetPort: 50002
protocol: UDP
- name: relay-50003
port: 50003
targetPort: 50003
protocol: UDP
- name: relay-50004
port: 50004
targetPort: 50004
protocol: UDP
- name: relay-50005
port: 50005
targetPort: 50005
protocol: UDP
- name: relay-50006
port: 50006
targetPort: 50006
protocol: UDP
- name: relay-50007
port: 50007
targetPort: 50007
protocol: UDP
- name: relay-50008
port: 50008
targetPort: 50008
protocol: UDP
- name: relay-50009
port: 50009
targetPort: 50009
protocol: UDP
- name: relay-50010
port: 50010
targetPort: 50010
protocol: UDP
- name: relay-50011
port: 50011
targetPort: 50011
protocol: UDP
- name: relay-50012
port: 50012
targetPort: 50012
protocol: UDP
- name: relay-50013
port: 50013
targetPort: 50013
protocol: UDP
- name: relay-50014
port: 50014
targetPort: 50014
protocol: UDP
- name: relay-50015
port: 50015
targetPort: 50015
protocol: UDP
- name: relay-50016
port: 50016
targetPort: 50016
protocol: UDP
- name: relay-50017
port: 50017
targetPort: 50017
protocol: UDP
- name: relay-50018
port: 50018
targetPort: 50018
protocol: UDP
- name: relay-50019
port: 50019
targetPort: 50019
protocol: UDP
- name: relay-50020
port: 50020
targetPort: 50020
protocol: UDP
- name: relay-50021
port: 50021
targetPort: 50021
protocol: UDP
- name: relay-50022
port: 50022
targetPort: 50022
protocol: UDP
- name: relay-50023
port: 50023
targetPort: 50023
protocol: UDP
- name: relay-50024
port: 50024
targetPort: 50024
protocol: UDP
- name: relay-50025
port: 50025
targetPort: 50025
protocol: UDP
- name: relay-50026
port: 50026
targetPort: 50026
protocol: UDP
- name: relay-50027
port: 50027
targetPort: 50027
protocol: UDP
- name: relay-50028
port: 50028
targetPort: 50028
protocol: UDP
- name: relay-50029
port: 50029
targetPort: 50029
protocol: UDP
- name: relay-50030
port: 50030
targetPort: 50030
protocol: UDP
- name: relay-50031
port: 50031
targetPort: 50031
protocol: UDP
- name: relay-50032
port: 50032
targetPort: 50032
protocol: UDP
- name: relay-50033
port: 50033
targetPort: 50033
protocol: UDP
- name: relay-50034
port: 50034
targetPort: 50034
protocol: UDP
- name: relay-50035
port: 50035
targetPort: 50035
protocol: UDP
- name: relay-50036
port: 50036
targetPort: 50036
protocol: UDP
- name: relay-50037
port: 50037
targetPort: 50037
protocol: UDP
- name: relay-50038
port: 50038
targetPort: 50038
protocol: UDP
- name: relay-50039
port: 50039
targetPort: 50039
protocol: UDP
- name: relay-50040
port: 50040
targetPort: 50040
protocol: UDP
- name: relay-50041
port: 50041
targetPort: 50041
protocol: UDP
- name: relay-50042
port: 50042
targetPort: 50042
protocol: UDP
- name: relay-50043
port: 50043
targetPort: 50043
protocol: UDP
- name: relay-50044
port: 50044
targetPort: 50044
protocol: UDP
- name: relay-50045
port: 50045
targetPort: 50045
protocol: UDP
- name: relay-50046
port: 50046
targetPort: 50046
protocol: UDP
- name: relay-50047
port: 50047
targetPort: 50047
protocol: UDP
- name: relay-50048
port: 50048
targetPort: 50048
protocol: UDP
- name: relay-50049
port: 50049
targetPort: 50049
protocol: UDP
- name: relay-50050
port: 50050
targetPort: 50050
protocol: UDP
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: turn-live-cert
spec:
secretName: turn-live-tls
issuerRef:
name: letsencrypt
kind: ClusterIssuer
dnsNames:
- turn.live.bstein.dev

View File

@ -0,0 +1,25 @@
# services/communication/element-call-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: element-call-config
namespace: communication
data:
config.json: |
{
"default_server_config": {
"m.homeserver": {
"base_url": "https://matrix.live.bstein.dev",
"server_name": "live.bstein.dev"
},
"m.identity_server": {
"base_url": "https://vector.im"
}
},
"livekit": {
"livekit_service_url": "https://kit.live.bstein.dev/livekit/jwt"
},
"branding": {
"app_name": "Othrys Call"
}
}

View File

@ -0,0 +1,78 @@
# services/communication/element-call-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: element-call
namespace: communication
labels:
app: element-call
spec:
replicas: 1
selector:
matchLabels:
app: element-call
template:
metadata:
labels:
app: element-call
spec:
nodeSelector:
hardware: rpi5
containers:
- name: element-call
image: ghcr.io/element-hq/element-call:latest
ports:
- containerPort: 8080
name: http
volumeMounts:
- name: config
mountPath: /app/config.json
subPath: config.json
volumes:
- name: config
configMap:
name: element-call-config
items:
- key: config.json
path: config.json
optional: false
---
apiVersion: v1
kind: Service
metadata:
name: element-call
namespace: communication
spec:
selector:
app: element-call
ports:
- name: http
port: 80
targetPort: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: element-call
namespace: communication
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- call.live.bstein.dev
secretName: call-live-tls
rules:
- host: call.live.bstein.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: element-call
port:
number: 80

View File

@ -0,0 +1,223 @@
---
# Source: element-web/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: othrys-element-element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
---
# Source: element-web/templates/configuration-nginx.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: othrys-element-element-web-nginx
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
data:
default.conf: |
server {
listen 8080;
listen [::]:8080;
server_name localhost;
root /usr/share/nginx/html;
index index.html;
add_header X-Frame-Options SAMEORIGIN;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Content-Security-Policy "frame-ancestors 'self'";
# Set no-cache for the index.html only so that browsers always check for a new copy of Element Web.
location = /index.html {
add_header Cache-Control "no-cache";
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
}
---
# Source: element-web/templates/configuration.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: othrys-element-element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
data:
config.json: |
{"brand":"Othrys","default_server_config":{"m.homeserver":{"base_url":"https://matrix.live.bstein.dev","server_name":"live.bstein.dev"},"m.identity_server":{"base_url":"https://vector.im"}},"default_theme":"dark","disable_custom_urls":true,"disable_login_language_selector":true,"disable_guests":false,"show_labs_settings":true,"features":{"feature_group_calls":true,"feature_video_rooms":true,"feature_element_call_video_rooms":true},"room_directory":{"servers":["live.bstein.dev"]},"jitsi":{},"element_call":{"url":"https://call.live.bstein.dev","participant_limit":16,"brand":"Othrys Call"}}
---
# Source: element-web/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: othrys-element-element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
---
# Source: element-web/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: othrys-element-element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
template:
metadata:
annotations:
checksum/config: manual-rtc-enable-1
checksum/config-nginx: 085061d0925f4840c3770233509dc0b00fe8fa1a5fef8bf282a514fd101c76fa
labels:
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
spec:
serviceAccountName: othrys-element-element-web
securityContext:
{}
containers:
- name: element-web
securityContext:
{}
image: "ghcr.io/element-hq/element-web:v1.12.6"
imagePullPolicy: IfNotPresent
env:
- name: ELEMENT_WEB_PORT
value: '8080'
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
volumeMounts:
- mountPath: /app/config.json
name: config
subPath: config.json
- mountPath: /etc/nginx/conf.d/config.json
name: config-nginx
subPath: config.json
volumes:
- name: config
configMap:
name: othrys-element-element-web
- name: config-nginx
configMap:
name: othrys-element-element-web-nginx
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: hardware
operator: In
values:
- rpi5
- rpi4
weight: 50
---
# Source: element-web/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: othrys-element-element-web
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
ingressClassName: traefik
tls:
- hosts:
- "live.bstein.dev"
secretName: live-othrys-tls
rules:
- host: "live.bstein.dev"
http:
paths:
- path: /
backend:
service:
name: othrys-element-element-web
port:
number: 80
pathType: Prefix
---
# Source: element-web/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "othrys-element-element-web-test-connection"
labels:
helm.sh/chart: element-web-1.4.26
app.kubernetes.io/name: element-web
app.kubernetes.io/instance: othrys-element
app.kubernetes.io/version: "1.12.6"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['othrys-element-element-web:80']
restartPolicy: Never

View File

@ -0,0 +1,89 @@
# services/communication/guest-name-job.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: guest-name-randomizer
namespace: communication
spec:
schedule: "*/1 * * * *"
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: rename
image: python:3.11-slim
env:
- name: SYNAPSE_BASE
value: http://othrys-synapse-matrix-synapse:8008
- name: SEEDER_USER
value: othrys-seeder
- name: SEEDER_PASS
valueFrom:
secretKeyRef:
name: atlasbot-credentials
key: seeder-password
command:
- /bin/sh
- -c
- |
set -euo pipefail
pip install --no-cache-dir requests >/dev/null
python - <<'PY'
import os, random, requests, urllib.parse
ADJ = ["brisk","calm","eager","gentle","merry","nifty","rapid","sunny","witty","zesty"]
NOUN = ["otter","falcon","comet","ember","grove","harbor","meadow","raven","river","summit"]
BASE = os.environ["SYNAPSE_BASE"]
OTHRYS = "!orejZnVfvbAmwQDYba:live.bstein.dev"
def login(user, password):
r = requests.post(f"{BASE}/_matrix/client/v3/login", json={
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": user},
"password": password,
})
r.raise_for_status()
return r.json()["access_token"]
def list_guests(token):
headers = {"Authorization": f"Bearer {token}"}
users = []
from_token = None
while True:
url = f"{BASE}/_synapse/admin/v2/users?local=true&deactivated=false&limit=100"
if from_token:
url += f"&from={from_token}"
res = requests.get(url, headers=headers)
res.raise_for_status()
data = res.json()
for u in data.get("users", []):
disp = u.get("displayname", "")
if u.get("is_guest") and (not disp or disp.isdigit()):
users.append(u["name"])
from_token = data.get("next_token")
if not from_token:
break
return users
def set_displayname(token, user_id, name):
headers = {"Authorization": f"Bearer {token}"}
payload = {"displayname": name}
# Update global profile
r = requests.put(f"{BASE}/_matrix/client/v3/profile/{urllib.parse.quote(user_id)}/displayname", headers=headers, json=payload)
r.raise_for_status()
# Update Othrys member event so clients see the change quickly
state_url = f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(OTHRYS)}/state/m.room.member/{urllib.parse.quote(user_id)}"
r2 = requests.get(state_url, headers=headers)
content = r2.json() if r2.status_code == 200 else {"membership": "join"}
content["displayname"] = name
requests.put(state_url, headers=headers, json=content)
token = login(os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"])
guests = list_guests(token)
for g in guests:
new = f"{random.choice(ADJ)}-{random.choice(NOUN)}"
set_displayname(token, g, new)
PY

View File

@ -0,0 +1,24 @@
# services/communication/kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: communication
resources:
- namespace.yaml
- synapse-rendered.yaml
- element-rendered.yaml
- livekit-config.yaml
- livekit.yaml
- coturn.yaml
- livekit-token-deployment.yaml
- livekit-ingress.yaml
- livekit-middlewares.yaml
- element-call-config.yaml
- element-call-deployment.yaml
- pin-othrys-job.yaml
- guest-name-job.yaml
- chat-ai-keys.yaml
- atlasbot-credentials.yaml
- atlasbot-configmap.yaml
- atlasbot-deployment.yaml
- seed-othrys-room.yaml
- wellknown.yaml

View File

@ -0,0 +1,21 @@
# services/communication/livekit-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: livekit-config
data:
livekit.yaml: |
port: 7880
rtc:
udp_port: 7882
tcp_port: 7881
use_external_ip: true
turn_servers:
- host: turn.live.bstein.dev
port: 5349
protocol: tls
- host: turn.live.bstein.dev
port: 3478
protocol: udp
room:
auto_create: true

View File

@ -0,0 +1,28 @@
# services/communication/livekit-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: livekit-ingress
namespace: communication
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
traefik.ingress.kubernetes.io/router.middlewares: communication-livekit-sfu-strip@kubernetescrd
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- kit.live.bstein.dev
secretName: kit-live-tls
rules:
- host: kit.live.bstein.dev
http:
paths:
- path: /livekit/sfu
pathType: Prefix
backend:
service:
name: livekit
port:
number: 7880

View File

@ -0,0 +1,48 @@
# services/communication/livekit-middlewares.yaml
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: livekit-sfu-strip
namespace: communication
spec:
stripPrefix:
prefixes:
- /livekit/sfu
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: livekit-jwt-strip
namespace: communication
spec:
stripPrefix:
prefixes:
- /livekit/jwt
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: livekit-jwt-ingress
namespace: communication
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
traefik.ingress.kubernetes.io/router.middlewares: communication-livekit-jwt-strip@kubernetescrd
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- kit.live.bstein.dev
secretName: kit-live-tls
rules:
- host: kit.live.bstein.dev
http:
paths:
- path: /livekit/jwt
pathType: Prefix
backend:
service:
name: livekit-token-service
port:
number: 8080

View File

@ -0,0 +1,69 @@
# services/communication/livekit-token-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: livekit-token-service
labels:
app: livekit-token-service
spec:
replicas: 1
selector:
matchLabels:
app: livekit-token-service
template:
metadata:
labels:
app: livekit-token-service
spec:
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5","rpi4"]
hostAliases:
- ip: 10.43.60.6
hostnames:
- live.bstein.dev
containers:
- name: token-service
image: ghcr.io/element-hq/lk-jwt-service:0.3.0
env:
- name: LIVEKIT_URL
value: wss://kit.live.bstein.dev/livekit/sfu
- name: LIVEKIT_KEY
value: primary
- name: LIVEKIT_SECRET
valueFrom:
secretKeyRef:
name: livekit-api
key: primary
- name: LIVEKIT_FULL_ACCESS_HOMESERVERS
value: live.bstein.dev
ports:
- containerPort: 8080
name: http
resources:
requests:
cpu: 50m
memory: 128Mi
limits:
cpu: 300m
memory: 256Mi
---
apiVersion: v1
kind: Service
metadata:
name: livekit-token-service
spec:
selector:
app: livekit-token-service
ports:
- name: http
port: 8080
targetPort: 8080

View File

@ -0,0 +1,120 @@
# services/communication/livekit.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: livekit
labels:
app: livekit
spec:
replicas: 1
selector:
matchLabels:
app: livekit
template:
metadata:
labels:
app: livekit
spec:
enableServiceLinks: false
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5","rpi4"]
containers:
- name: livekit
image: livekit/livekit-server:v1.9.0
command:
- /bin/sh
- -c
- |
set -euo pipefail
umask 077
printf "%s: %s\n" "${LIVEKIT_API_KEY_ID}" "${LIVEKIT_API_SECRET}" > /var/run/livekit/keys
chmod 600 /var/run/livekit/keys
exec /livekit-server --config /etc/livekit/livekit.yaml --key-file /var/run/livekit/keys
env:
- name: LIVEKIT_API_KEY_ID
value: primary
- name: LIVEKIT_API_SECRET
valueFrom:
secretKeyRef:
name: livekit-api
key: primary
- name: LIVEKIT_RTC__TURN_SERVERS_0__USERNAME
value: livekit
- name: LIVEKIT_RTC__TURN_SERVERS_0__CREDENTIAL
valueFrom:
secretKeyRef:
name: turn-shared-secret
key: TURN_STATIC_AUTH_SECRET
- name: LIVEKIT_RTC__TURN_SERVERS_1__USERNAME
value: livekit
- name: LIVEKIT_RTC__TURN_SERVERS_1__CREDENTIAL
valueFrom:
secretKeyRef:
name: turn-shared-secret
key: TURN_STATIC_AUTH_SECRET
ports:
- containerPort: 7880
name: http
protocol: TCP
- containerPort: 7881
name: tcp-media
protocol: TCP
- containerPort: 7882
name: udp-media
protocol: UDP
volumeMounts:
- name: config
mountPath: /etc/livekit
- name: runtime-keys
mountPath: /var/run/livekit
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: "2"
memory: 1Gi
volumes:
- name: config
configMap:
name: livekit-config
items:
- key: livekit.yaml
path: livekit.yaml
- name: runtime-keys
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: livekit
annotations:
metallb.universe.tf/address-pool: communication-pool
spec:
type: LoadBalancer
loadBalancerClass: metallb
loadBalancerIP: 192.168.22.6
selector:
app: livekit
ports:
- name: http
port: 7880
targetPort: 7880
protocol: TCP
- name: tcp-media
port: 7881
targetPort: 7881
protocol: TCP
- name: udp-media
port: 7882
targetPort: 7882
protocol: UDP

View File

@ -0,0 +1,5 @@
# services/communication/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: communication

View File

@ -0,0 +1,68 @@
# services/communication/pin-othrys-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: pin-othrys-invite
namespace: communication
spec:
ttlSecondsAfterFinished: 3600
template:
spec:
restartPolicy: OnFailure
containers:
- name: pin
image: python:3.11-slim
env:
- name: SYNAPSE_BASE
value: http://othrys-synapse-matrix-synapse:8008
- name: SEEDER_USER
value: othrys-seeder
- name: SEEDER_PASS
valueFrom:
secretKeyRef:
name: atlasbot-credentials
key: seeder-password
command:
- /bin/sh
- -c
- |
set -euo pipefail
pip install --no-cache-dir requests >/dev/null
python - <<'PY'
import requests, urllib.parse, os
BASE = os.environ["SYNAPSE_BASE"]
def login(user, password):
r = requests.post(f"{BASE}/_matrix/client/v3/login", json={
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": user},
"password": password,
})
r.raise_for_status()
return r.json()["access_token"]
def resolve(alias, token):
enc = urllib.parse.quote(alias)
r = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{enc}", headers={"Authorization": f"Bearer {token}"})
r.raise_for_status()
return r.json()["room_id"]
def send(room_id, token, body):
r = requests.post(f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/send/m.room.message",
headers={"Authorization": f"Bearer {token}"},
json={"msgtype": "m.text", "body": body})
r.raise_for_status()
return r.json()["event_id"]
def pin(room_id, token, event_id):
r = requests.put(f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/state/m.room.pinned_events",
headers={"Authorization": f"Bearer {token}"},
json={"pinned": [event_id]})
r.raise_for_status()
token = login(os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"])
room_id = resolve("#othrys:live.bstein.dev", token)
msg = "Invite guests: share https://live.bstein.dev/#/room/#othrys:live.bstein.dev?action=join and choose 'Continue' -> 'Join as guest'."
eid = send(room_id, token, msg)
pin(room_id, token, eid)
PY

View File

@ -0,0 +1,135 @@
# services/communication/seed-othrys-room.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: seed-othrys-room
namespace: communication
spec:
schedule: "*/10 * * * *"
concurrencyPolicy: Forbid
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: seed
image: python:3.11-slim
env:
- name: SYNAPSE_BASE
value: http://othrys-synapse-matrix-synapse:8008
- name: REG_SECRET
valueFrom:
secretKeyRef:
name: othrys-synapse-matrix-synapse
key: config.yaml
- name: SEEDER_USER
value: othrys-seeder
- name: SEEDER_PASS
valueFrom:
secretKeyRef:
name: atlasbot-credentials
key: seeder-password
- name: BOT_USER
value: atlasbot
- name: BOT_PASS
valueFrom:
secretKeyRef:
name: atlasbot-credentials
key: bot-password
command:
- /bin/sh
- -c
- |
set -euo pipefail
pip install --no-cache-dir requests pyyaml matrix-synapse >/dev/null
python - <<'PY'
import os, subprocess, requests, yaml
BASE = os.environ["SYNAPSE_BASE"]
CONFIG = "/config/config.yaml"
def register(user, password, admin=False):
args = ["register_new_matrix_user", "-c", CONFIG, "-u", user, "-p", password]
if admin:
args.append("-a")
args.append(BASE)
res = subprocess.run(args, capture_output=True, text=True)
if res.returncode not in (0, 1): # 1 = already exists
raise SystemExit(f"register {user} failed: {res.returncode} {res.stderr}")
def login(user, password):
r = requests.post(f"{BASE}/_matrix/client/v3/login", json={
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": user},
"password": password,
})
if r.status_code != 200:
raise SystemExit(f"login failed: {r.status_code} {r.text}")
return r.json()["access_token"]
def ensure_room(token):
headers = {"Authorization": f"Bearer {token}"}
alias = "#othrys:live.bstein.dev"
alias_enc = "%23othrys%3Alive.bstein.dev"
exists = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{alias_enc}", headers=headers)
if exists.status_code == 200:
room_id = exists.json()["room_id"]
else:
create = requests.post(f"{BASE}/_matrix/client/v3/createRoom", headers=headers, json={
"preset": "public_chat",
"name": "Othrys",
"room_alias_name": "othrys",
"initial_state": [],
"power_level_content_override": {"events_default": 0, "users_default": 0, "state_default": 50},
})
if create.status_code not in (200, 409):
raise SystemExit(f"create room failed: {create.status_code} {create.text}")
exists = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{alias_enc}", headers=headers)
room_id = exists.json()["room_id"]
state_events = [
("m.room.join_rules", {"join_rule": "public"}),
("m.room.guest_access", {"guest_access": "can_join"}),
("m.room.history_visibility", {"history_visibility": "shared"}),
("m.room.canonical_alias", {"alias": alias}),
]
for ev_type, content in state_events:
requests.put(f"{BASE}/_matrix/client/v3/rooms/{room_id}/state/{ev_type}", headers=headers, json=content)
requests.put(f"{BASE}/_matrix/client/v3/directory/list/room/{room_id}", headers=headers, json={"visibility": "public"})
return room_id
def join_user(token, room_id, user_id):
headers = {"Authorization": f"Bearer {token}"}
requests.post(f"{BASE}/_synapse/admin/v1/join/{room_id}", headers=headers, json={"user_id": user_id})
def join_all_locals(token, room_id):
headers = {"Authorization": f"Bearer {token}"}
users = []
from_token = None
while True:
url = f"{BASE}/_synapse/admin/v2/users?local=true&deactivated=false&limit=100"
if from_token:
url += f"&from={from_token}"
res = requests.get(url, headers=headers).json()
users.extend([u["name"] for u in res.get("users", [])])
from_token = res.get("next_token")
if not from_token:
break
for uid in users:
join_user(token, room_id, uid)
register(os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"], admin=True)
register(os.environ["BOT_USER"], os.environ["BOT_PASS"], admin=False)
token = login(os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"])
room_id = ensure_room(token)
join_user(token, room_id, f"@{os.environ['BOT_USER']}:live.bstein.dev")
join_all_locals(token, room_id)
PY
volumeMounts:
- name: synapse-config
mountPath: /config
readOnly: true
volumes:
- name: synapse-config
secret:
secretName: othrys-synapse-matrix-synapse

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,59 @@
# services/communication/values-element.yaml
replicaCount: 1
defaultServer:
url: https://matrix.live.bstein.dev
name: live.bstein.dev
config:
default_theme: dark
brand: Othrys
disable_custom_urls: true
disable_login_language_selector: true
disable_guests: false
show_labs_settings: true
features:
feature_group_calls: true
feature_video_rooms: true
feature_element_call_video_rooms: true
room_directory:
servers:
- live.bstein.dev
jitsi: {}
element_call:
url: https://call.live.bstein.dev
participant_limit: 16
brand: Othrys Call
ingress:
enabled: true
className: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: websecure
hosts:
- live.bstein.dev
tls:
- secretName: live-othrys-tls
hosts: [live.bstein.dev]
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5","rpi4"]

View File

@ -0,0 +1,132 @@
# services/communication/values-synapse.yaml
serverName: live.bstein.dev
publicServerName: matrix.live.bstein.dev
config:
publicBaseurl: https://matrix.live.bstein.dev
externalPostgresql:
host: postgres-service.postgres.svc.cluster.local
port: 5432
username: synapse
existingSecret: synapse-db
existingSecretPasswordKey: POSTGRES_PASSWORD
database: synapse
redis:
enabled: true
auth:
enabled: true
existingSecret: synapse-redis
existingSecretPasswordKey: redis-password
postgresql:
enabled: false
persistence:
enabled: true
storageClass: asteria
accessMode: ReadWriteOnce
size: 50Gi
synapse:
podSecurityContext:
fsGroup: 666
runAsUser: 666
runAsGroup: 666
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: "2"
memory: 3Gi
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 50
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5","rpi4"]
ingress:
enabled: true
className: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: websecure
csHosts:
- matrix.live.bstein.dev
hosts:
- matrix.live.bstein.dev
wkHosts:
- live.bstein.dev
- bstein.dev
tls:
- secretName: matrix-live-tls
hosts:
- matrix.live.bstein.dev
- live.bstein.dev
extraConfig:
allow_guest_access: true
allow_public_rooms_without_auth: true
auto_join_rooms:
- "#othrys:live.bstein.dev"
autocreate_auto_join_rooms: true
default_room_version: "11"
experimental_features:
msc3266_enabled: true
msc4143_enabled: true
msc4222_enabled: true
max_event_delay_duration: 24h
password_config:
enabled: true
oidc_enabled: true
oidc_providers:
- idp_id: keycloak
idp_name: Keycloak
issuer: https://sso.bstein.dev/realms/atlas
client_id: synapse
client_secret: "@@OIDC_CLIENT_SECRET@@"
client_auth_method: client_secret_post
scopes: ["openid", "profile", "email"]
authorization_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/auth
token_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/token
userinfo_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/userinfo
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
allow_existing_users: true
rc_message:
per_second: 0.5
burst_count: 30
rc_delayed_event_mgmt:
per_second: 1
burst_count: 20
rc_login:
address:
burst_count: 20
per_second: 5
account:
burst_count: 20
per_second: 5
failed_attempts:
burst_count: 20
per_second: 5
room_list_publication_rules:
- action: allow
well_known_client:
"m.homeserver":
"base_url": "https://matrix.live.bstein.dev"
"org.matrix.msc4143.rtc_foci":
- type: "livekit"
livekit_service_url: "https://kit.live.bstein.dev/livekit/jwt"
worker:
enabled: false

View File

@ -0,0 +1,109 @@
# services/communication/wellknown.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: matrix-wellknown
namespace: communication
data:
client.json: |
{
"m.homeserver": {
"base_url": "https://matrix.live.bstein.dev"
},
"org.matrix.msc4143.rtc_foci": [
{
"type": "livekit",
"livekit_service_url": "https://kit.live.bstein.dev/livekit/jwt"
}
]
}
server.json: |
{
"m.server": "live.bstein.dev:443"
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: matrix-wellknown
namespace: communication
labels:
app: matrix-wellknown
spec:
replicas: 1
selector:
matchLabels:
app: matrix-wellknown
template:
metadata:
labels:
app: matrix-wellknown
spec:
containers:
- name: nginx
image: nginx:1.27-alpine
ports:
- containerPort: 80
volumeMounts:
- name: wellknown
mountPath: /usr/share/nginx/html/.well-known/matrix/client
subPath: client.json
- name: wellknown
mountPath: /usr/share/nginx/html/.well-known/matrix/server
subPath: server.json
volumes:
- name: wellknown
configMap:
name: matrix-wellknown
items:
- key: client.json
path: client.json
- key: server.json
path: server.json
---
apiVersion: v1
kind: Service
metadata:
name: matrix-wellknown
namespace: communication
spec:
selector:
app: matrix-wellknown
ports:
- name: http
port: 80
targetPort: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: matrix-wellknown
namespace: communication
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: letsencrypt
spec:
tls:
- hosts:
- live.bstein.dev
secretName: live-othrys-tls
rules:
- host: live.bstein.dev
http:
paths:
- path: /.well-known/matrix/client
pathType: Prefix
backend:
service:
name: matrix-wellknown
port:
number: 80
- path: /.well-known/matrix/server
pathType: Prefix
backend:
service:
name: matrix-wellknown
port:
number: 80

View File

@ -0,0 +1,42 @@
# services/mailu/front-lb.yaml
apiVersion: v1
kind: Service
metadata:
name: mailu-front-lb
namespace: mailu-mailserver
annotations:
metallb.universe.tf/address-pool: communication-pool
spec:
type: LoadBalancer
loadBalancerClass: metallb
loadBalancerIP: 192.168.22.4
externalTrafficPolicy: Cluster
selector:
app.kubernetes.io/component: front
app.kubernetes.io/instance: mailu
app.kubernetes.io/name: mailu
ports:
- name: smtp
port: 25
targetPort: 25
protocol: TCP
- name: smtps
port: 465
targetPort: 465
protocol: TCP
- name: submission
port: 587
targetPort: 587
protocol: TCP
- name: imaps
port: 993
targetPort: 993
protocol: TCP
- name: pop3s
port: 995
targetPort: 995
protocol: TCP
- name: sieve
port: 4190
targetPort: 4190
protocol: TCP

View File

@ -13,6 +13,7 @@ resources:
- mailu-sync-job.yaml
- mailu-sync-cronjob.yaml
- mailu-sync-listener.yaml
- front-lb.yaml
configMapGenerator:
- name: mailu-sync-script