titan-iac/services/communication/synapse-rendered.yaml

1168 lines
34 KiB
YAML

---
# Source: matrix-synapse/charts/redis/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: othrys-synapse-redis
namespace: "communication"
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
---
# Source: matrix-synapse/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: othrys-synapse-matrix-synapse
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
stringData:
config.yaml: |
## Registration ##
## API Configuration ##
## Database configuration ##
database:
name: "psycopg2"
args:
user: "synapse"
password: "@@POSTGRES_PASSWORD@@"
database: "synapse"
host: "postgres-service.postgres.svc.cluster.local"
port: 5432
sslmode: "prefer"
cp_min: 5
cp_max: 10
## Redis configuration ##
redis:
enabled: true
host: "othrys-synapse-redis-master"
port: 6379
password: "@@REDIS_PASSWORD@@"
---
# Source: matrix-synapse/charts/redis/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: othrys-synapse-redis-configuration
namespace: "communication"
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
data:
redis.conf: |-
# User-supplied common configuration:
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
# End of common configuration
master.conf: |-
dir /data
# User-supplied master configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of master configuration
replica.conf: |-
dir /data
# User-supplied replica configuration:
rename-command FLUSHDB ""
rename-command FLUSHALL ""
# End of replica configuration
---
# Source: matrix-synapse/charts/redis/templates/health-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: othrys-synapse-redis-health
namespace: "communication"
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
data:
ping_readiness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 15 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_local.sh: |-
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
[[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD"
response=$(
timeout -s 15 $1 \
redis-cli \
-h localhost \
-p $REDIS_PORT \
ping
)
if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}')
if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ] && [ "$responseFirstWord" != "MASTERDOWN" ]; then
echo "$response"
exit 1
fi
ping_readiness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 15 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
if [ "$response" != "PONG" ]; then
echo "$response"
exit 1
fi
ping_liveness_master.sh: |-
#!/bin/bash
[[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")"
[[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD"
response=$(
timeout -s 15 $1 \
redis-cli \
-h $REDIS_MASTER_HOST \
-p $REDIS_MASTER_PORT_NUMBER \
ping
)
if [ "$?" -eq "124" ]; then
echo "Timed out"
exit 1
fi
responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}')
if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ]; then
echo "$response"
exit 1
fi
ping_readiness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_readiness_local.sh" $1 || exit_status=$?
"$script_dir/ping_readiness_master.sh" $1 || exit_status=$?
exit $exit_status
ping_liveness_local_and_master.sh: |-
script_dir="$(dirname "$0")"
exit_status=0
"$script_dir/ping_liveness_local.sh" $1 || exit_status=$?
"$script_dir/ping_liveness_master.sh" $1 || exit_status=$?
exit $exit_status
---
# Source: matrix-synapse/charts/redis/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: othrys-synapse-redis-scripts
namespace: "communication"
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
data:
start-master.sh: |
#!/bin/bash
[[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")"
if [[ -f /opt/bitnami/redis/mounted-etc/master.conf ]];then
cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf
fi
if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then
cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf
fi
ARGS=("--port" "${REDIS_PORT}")
ARGS+=("--requirepass" "${REDIS_PASSWORD}")
ARGS+=("--masterauth" "${REDIS_PASSWORD}")
ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf")
ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf")
exec redis-server "${ARGS[@]}"
---
# Source: matrix-synapse/templates/configuration.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: othrys-synapse-matrix-synapse
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
data:
log.yaml: |
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
level: INFO
loggers:
synapse:
level: INFO
root:
level: INFO
handlers: [console]
homeserver.yaml: |
# NOTE:
# Secrets are stored in separate configs to better fit K8s concepts
## Server ##
server_name: "live.bstein.dev"
public_baseurl: "https://matrix.live.bstein.dev"
pid_file: /homeserver.pid
web_client: False
soft_file_limit: 0
log_config: "/synapse/config/log.yaml"
report_stats: false
instance_map:
main:
host: othrys-synapse-replication
port: 9093
## Ports ##
listeners:
- port: 8008
tls: false
bind_addresses: ["::"]
type: http
x_forwarded: true
resources:
- names:
- client
- federation
compress: false
- port: 9090
tls: false
bind_addresses: ["::"]
type: http
resources:
- names: [metrics]
compress: false
- port: 9093
tls: false
bind_addresses: ["::"]
type: http
resources:
- names: [replication]
compress: false
## Files ##
media_store_path: "/synapse/data/media"
uploads_path: "/synapse/data/uploads"
## Registration ##
enable_registration: false
## Metrics ###
enable_metrics: true
## Signing Keys ##
signing_key_path: "/synapse/keys/signing.key"
# The trusted servers to download signing keys from.
trusted_key_servers:
- server_name: matrix.org
## Workers ##
## Extra config ##
allow_guest_access: true
allow_public_rooms_without_auth: true
auto_join_rooms:
- "#othrys:live.bstein.dev"
autocreate_auto_join_rooms: true
default_room_version: "11"
experimental_features:
msc3266_enabled: true
msc4143_enabled: true
msc4222_enabled: true
max_event_delay_duration: 24h
password_config:
enabled: true
turn_uris:
- "turn:turn.live.bstein.dev:3478?transport=udp"
- "turn:turn.live.bstein.dev:3478?transport=tcp"
- "turns:turn.live.bstein.dev:5349?transport=tcp"
turn_shared_secret: "@@TURN_SECRET@@"
turn_allow_guests: true
turn_user_lifetime: 86400000
rc_login:
address:
burst_count: 20
per_second: 5
account:
burst_count: 20
per_second: 5
failed_attempts:
burst_count: 20
per_second: 5
rc_message:
per_second: 0.5
burst_count: 30
rc_delayed_event_mgmt:
per_second: 1
burst_count: 20
room_list_publication_rules:
- action: allow
well_known_client:
"m.homeserver":
"base_url": "https://matrix.live.bstein.dev"
"org.matrix.msc4143.rtc_foci":
- type: "livekit"
livekit_service_url: "https://kit.live.bstein.dev/livekit/jwt"
oidc_enabled: true
oidc_providers:
- allow_existing_users: true
authorization_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/auth
client_auth_method: client_secret_post
client_id: synapse
client_secret: "@@OIDC_CLIENT_SECRET@@"
idp_id: keycloak
idp_name: Keycloak
issuer: https://sso.bstein.dev/realms/atlas
scopes:
- openid
- profile
- email
token_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/token
user_mapping_provider:
config:
display_name_template: '{{ user.name }}'
localpart_template: '{{ user.preferred_username }}'
userinfo_endpoint: https://sso.bstein.dev/realms/atlas/protocol/openid-connect/userinfo
matrix_authentication_service:
enabled: true
endpoint: http://matrix-authentication-service:8080/
secret: "@@MAS_SHARED_SECRET@@"
---
# Source: matrix-synapse/templates/pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: othrys-synapse-matrix-synapse
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Gi"
storageClassName: "asteria"
---
# Source: matrix-synapse/charts/redis/templates/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: othrys-synapse-redis-headless
namespace: "communication"
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
annotations:
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
selector:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: redis
---
# Source: matrix-synapse/charts/redis/templates/master/service.yaml
apiVersion: v1
kind: Service
metadata:
name: othrys-synapse-redis-master
namespace: "communication"
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
app.kubernetes.io/component: master
spec:
type: ClusterIP
internalTrafficPolicy: Cluster
sessionAffinity: None
ports:
- name: tcp-redis
port: 6379
targetPort: redis
nodePort: null
selector:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: redis
app.kubernetes.io/component: master
---
# Source: matrix-synapse/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: othrys-synapse-matrix-synapse
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 8008
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/component: synapse
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
---
# Source: matrix-synapse/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: othrys-synapse-replication
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 9093
targetPort: replication
protocol: TCP
name: replication
selector:
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/component: synapse
---
# Source: matrix-synapse/charts/redis/templates/master/application.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: othrys-synapse-redis-master
namespace: "communication"
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
app.kubernetes.io/component: master
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: redis
app.kubernetes.io/component: master
strategy:
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: redis
helm.sh/chart: redis-17.17.1
app.kubernetes.io/component: master
annotations:
checksum/configmap: 86bcc953bb473748a3d3dc60b7c11f34e60c93519234d4c37f42e22ada559d47
checksum/health: aff24913d801436ea469d8d374b2ddb3ec4c43ee7ab24663d5f8ff1a1b6991a9
checksum/scripts: 560c33ff34d845009b51830c332aa05fa211444d1877d3526d3599be7543aaa5
checksum/secret: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a
spec:
securityContext:
fsGroup: 1001
serviceAccountName: othrys-synapse-redis
automountServiceAccountToken: true
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/name: redis
app.kubernetes.io/component: master
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
enableServiceLinks: true
terminationGracePeriodSeconds: 30
containers:
- name: redis
image: docker.io/bitnamilegacy/redis:7.0.12-debian-11-r34
imagePullPolicy: "IfNotPresent"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsGroup: 0
runAsNonRoot: true
runAsUser: 1001
seccompProfile:
type: RuntimeDefault
command:
- /bin/bash
args:
- -c
- /opt/bitnami/scripts/start-scripts/start-master.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: REDIS_REPLICATION_MODE
value: master
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: synapse-redis
key: redis-password
- name: REDIS_TLS_ENABLED
value: "no"
- name: REDIS_PORT
value: "6379"
ports:
- name: redis
containerPort: 6379
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
# One second longer than command timeout should prevent generation of zombie processes.
timeoutSeconds: 6
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_liveness_local.sh 5
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 2
successThreshold: 1
failureThreshold: 5
exec:
command:
- sh
- -c
- /health/ping_readiness_local.sh 1
resources:
limits: {}
requests: {}
volumeMounts:
- name: start-scripts
mountPath: /opt/bitnami/scripts/start-scripts
- name: health
mountPath: /health
- name: redis-data
mountPath: /data
- name: config
mountPath: /opt/bitnami/redis/mounted-etc
- name: redis-tmp-conf
mountPath: /opt/bitnami/redis/etc/
- name: tmp
mountPath: /tmp
volumes:
- name: start-scripts
configMap:
name: othrys-synapse-redis-scripts
defaultMode: 0755
- name: health
configMap:
name: othrys-synapse-redis-health
defaultMode: 0755
- name: config
configMap:
name: othrys-synapse-redis-configuration
- name: redis-tmp-conf
emptyDir: {}
- name: tmp
emptyDir: {}
- name: redis-data
emptyDir: {}
---
# Source: matrix-synapse/templates/deployment.yaml
# Server: live.bstein.dev
apiVersion: apps/v1
kind: Deployment
metadata:
name: othrys-synapse-matrix-synapse
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: synapse
spec:
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/component: synapse
template:
metadata:
annotations:
checksum/config: manual-rtc-enable-1
checksum/secrets: ec9f3b254a562a0f0709461eb74a8cc91b8c1a2fb06be2594a131776c2541773
labels:
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/component: synapse
spec:
serviceAccountName: default
securityContext:
fsGroup: 666
runAsGroup: 666
runAsUser: 666
containers:
- name: synapse
command:
- sh
- -c
- |
export POSTGRES_PASSWORD=$(echo "${POSTGRES_PASSWORD:-}" | sed 's/\//\\\//g' | sed 's/\&/\\\&/g') && \
export REDIS_PASSWORD=$(echo "${REDIS_PASSWORD:-}" | sed 's/\//\\\//g' | sed 's/\&/\\\&/g') && \
export OIDC_CLIENT_SECRET_ESCAPED=$(echo "${OIDC_CLIENT_SECRET:-}" | sed 's/[\\/&]/\\&/g') && \
export TURN_SECRET_ESCAPED=$(echo "${TURN_SECRET:-}" | sed 's/[\\/&]/\\&/g') && \
export MAS_SHARED_SECRET_ESCAPED=$(echo "${MAS_SHARED_SECRET:-}" | sed 's/[\\/&]/\\&/g') && \
cat /synapse/secrets/*.yaml | \
sed -e "s/@@POSTGRES_PASSWORD@@/${POSTGRES_PASSWORD:-}/" \
-e "s/@@REDIS_PASSWORD@@/${REDIS_PASSWORD:-}/" \
> /synapse/config/conf.d/secrets.yaml
cp /synapse/config/homeserver.yaml /synapse/runtime-config/homeserver.yaml && \
if [ -n "${OIDC_CLIENT_SECRET_ESCAPED}" ]; then \
sed -i "s/@@OIDC_CLIENT_SECRET@@/${OIDC_CLIENT_SECRET_ESCAPED}/g" /synapse/runtime-config/homeserver.yaml; \
fi; \
if [ -n "${TURN_SECRET_ESCAPED}" ]; then \
sed -i "s/@@TURN_SECRET@@/${TURN_SECRET_ESCAPED}/g" /synapse/runtime-config/homeserver.yaml; \
fi; \
if [ -n "${MAS_SHARED_SECRET_ESCAPED}" ]; then \
sed -i "s/@@MAS_SHARED_SECRET@@/${MAS_SHARED_SECRET_ESCAPED}/g" /synapse/runtime-config/homeserver.yaml; \
fi
exec python -B -m synapse.app.homeserver \
-c /synapse/runtime-config/homeserver.yaml \
-c /synapse/config/conf.d/
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: synapse-db
key: POSTGRES_PASSWORD
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: synapse-redis
key: redis-password
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: synapse-oidc
key: client-secret
- name: TURN_SECRET
valueFrom:
secretKeyRef:
name: turn-shared-secret
key: TURN_STATIC_AUTH_SECRET
- name: MAS_SHARED_SECRET
valueFrom:
secretKeyRef:
name: mas-secrets-runtime
key: matrix_shared_secret
image: "ghcr.io/element-hq/synapse:v1.144.0"
imagePullPolicy: IfNotPresent
securityContext:
{}
ports:
- name: http
containerPort: 8008
protocol: TCP
- name: replication
containerPort: 9093
protocol: TCP
- name: metrics
containerPort: 9090
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: http
readinessProbe:
httpGet:
path: /health
port: http
startupProbe:
failureThreshold: 12
httpGet:
path: /health
port: http
volumeMounts:
- name: config
mountPath: /synapse/config
- name: runtime-config
mountPath: /synapse/runtime-config
- name: tmpconf
mountPath: /synapse/config/conf.d
- name: secrets
mountPath: /synapse/secrets
- name: signingkey
mountPath: /synapse/keys
- name: media
mountPath: /synapse/data
- name: tmpdir
mountPath: /tmp
resources:
limits:
cpu: "2"
memory: 3Gi
requests:
cpu: 500m
memory: 1Gi
volumes:
- name: config
configMap:
name: othrys-synapse-matrix-synapse
- name: secrets
secret:
secretName: othrys-synapse-matrix-synapse
- name: signingkey
secret:
secretName: "othrys-synapse-signingkey"
items:
- key: "signing.key"
path: signing.key
- name: tmpconf
emptyDir: {}
- name: tmpdir
emptyDir: {}
- name: runtime-config
emptyDir: {}
- name: media
persistentVolumeClaim:
claimName: othrys-synapse-matrix-synapse
nodeSelector:
hardware: rpi5
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: hardware
operator: In
values:
- rpi5
- rpi4
weight: 50
---
# Source: matrix-synapse/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: othrys-synapse-matrix-synapse
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
ingressClassName: traefik
tls:
- hosts:
- "matrix.live.bstein.dev"
- "live.bstein.dev"
secretName: matrix-live-tls
rules:
- host: "live.bstein.dev"
http:
paths:
- path: /_matrix
backend:
service:
name: othrys-synapse-matrix-synapse
port:
number: 8008
pathType: Prefix
- path: /.well-known/matrix
backend:
service:
name: othrys-synapse-matrix-synapse
port:
number: 8008
pathType: Prefix
- host: "matrix.live.bstein.dev"
http:
paths:
- path: /_matrix
backend:
service:
name: othrys-synapse-matrix-synapse
port:
number: 8008
pathType: Prefix
- path: /_synapse
backend:
service:
name: othrys-synapse-matrix-synapse
port:
number: 8008
pathType: Prefix
- host: "bstein.dev"
http:
paths:
- path: /.well-known/matrix
backend:
service:
name: othrys-synapse-matrix-synapse
port:
number: 8008
pathType: Prefix
---
# Source: matrix-synapse/templates/signing-key-job.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: othrys-synapse-signingkey-job
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: signingkey-job
annotations:
helm.sh/hook: pre-install
helm.sh/hook-delete-policy: hook-succeeded
---
# Source: matrix-synapse/templates/signing-key-job.yaml
# Create secret if signing key job is enabled, or if we're running in ArgoCD and we don't have an existing secret
apiVersion: v1
kind: Secret
metadata:
annotations:
helm.sh/hook: pre-install
helm.sh/hook-delete-policy: never
helm.sh/resource-policy: keep
# If for some reason we didn't detect ArgoCD, but are running in it, we want to make sure we don't delete the secret
argocd.argoproj.io/hook: Skip
name: othrys-synapse-signingkey
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: signingkey-job
---
# Source: matrix-synapse/templates/signing-key-job.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: othrys-synapse-matrix-synapse-scripts
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
annotations:
helm.sh/hook: pre-install
helm.sh/hook-delete-policy: hook-succeeded
data:
signing-key.sh: |
#!/bin/sh
set -eu
check_key() {
set +e
echo "Checking for existing signing key..."
key="$(kubectl get secret "$SECRET_NAME" -o jsonpath="{.data['signing\.key']}" 2> /dev/null)"
[ $? -ne 0 ] && return 1
[ -z "$key" ] && return 2
return 0
}
create_key() {
echo "Waiting for new signing key to be generated..."
begin=$(date +%s)
end=$((begin + 300)) # 5 minutes
while true; do
[ -f /synapse/keys/signing.key ] && return 0
[ "$(date +%s)" -gt $end ] && return 1
sleep 5
done
}
store_key() {
echo "Storing signing key in Kubernetes secret..."
kubectl patch secret "$SECRET_NAME" -p "{\"data\":{\"signing.key\":\"$(base64 /synapse/keys/signing.key | tr -d '\n')\"}}"
}
if check_key; then
echo "Key already in place, exiting."
exit
fi
if ! create_key; then
echo "Timed out waiting for a signing key to appear."
exit 1
fi
store_key
---
# Source: matrix-synapse/templates/signing-key-job.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: othrys-synapse-signingkey-job
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: signingkey-job
annotations:
helm.sh/hook: pre-install
helm.sh/hook-delete-policy: hook-succeeded
rules:
- apiGroups:
- ""
resources:
- secrets
resourceNames:
- othrys-synapse-signingkey
verbs:
- get
- update
- patch
---
# Source: matrix-synapse/templates/signing-key-job.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: othrys-synapse-signingkey-job
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: signingkey-job
annotations:
helm.sh/hook: pre-install
helm.sh/hook-delete-policy: hook-succeeded
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: othrys-synapse-signingkey-job
subjects:
- kind: ServiceAccount
name: othrys-synapse-signingkey-job
namespace: communication
---
# Source: matrix-synapse/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "othrys-synapse-matrix-synapse-test-connection"
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['othrys-synapse-matrix-synapse:8008/_matrix/client/versions']
restartPolicy: Never
---
# Source: matrix-synapse/templates/signing-key-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: othrys-synapse-signingkey-job
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: signingkey-job
annotations:
helm.sh/hook: pre-install
helm.sh/hook-delete-policy: hook-succeeded
spec:
ttlSecondsAfterFinished: 0
template:
metadata:
labels:
helm.sh/chart: matrix-synapse-3.12.17
app.kubernetes.io/name: matrix-synapse
app.kubernetes.io/instance: othrys-synapse
app.kubernetes.io/version: "1.144.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: signingkey-job
spec:
containers:
- command:
- sh
- -c
- |
echo "Generating signing key..."
if which generate_signing_key.py >/dev/null; then
generate_signing_key.py -o /synapse/keys/signing.key
else
generate_signing_key -o /synapse/keys/signing.key
fi
image: "matrixdotorg/synapse:latest"
imagePullPolicy: IfNotPresent
name: signing-key-generate
resources:
{}
securityContext:
{}
volumeMounts:
- mountPath: /synapse/keys
name: matrix-synapse-keys
- command:
- sh
- -c
- |
printf "Checking rights to update secret... "
kubectl auth can-i update secret/${SECRET_NAME}
/scripts/signing-key.sh
env:
- name: SECRET_NAME
value: othrys-synapse-signingkey
image: "bitnami/kubectl:latest"
imagePullPolicy: IfNotPresent
name: signing-key-upload
resources:
{}
securityContext:
{}
volumeMounts:
- mountPath: /scripts
name: scripts
readOnly: true
- mountPath: /synapse/keys
name: matrix-synapse-keys
readOnly: true
securityContext:
{}
restartPolicy: Never
serviceAccount: othrys-synapse-signingkey-job
volumes:
- name: scripts
configMap:
name: othrys-synapse-matrix-synapse-scripts
defaultMode: 0755
- name: matrix-synapse-keys
emptyDir: {}
parallelism: 1
completions: 1
backoffLimit: 1