Compare commits
508 Commits
feature/pi
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
65c6e123cf | ||
|
|
4a32ad5fe5 | ||
|
|
de1940ae00 | ||
|
|
35eeb39bdc | ||
|
|
fd490d69f5 | ||
|
|
2d88aab3a3 | ||
|
|
a69a21f05d | ||
|
|
8a13b9d4e7 | ||
|
|
cbf345cfcf | ||
|
|
b5a79e8091 | ||
|
|
c404a967d0 | ||
|
|
42ce51baad | ||
|
|
7f209fbbc9 | ||
|
|
65298d7357 | ||
|
|
7dc03eefce | ||
|
|
e0c92aa49d | ||
|
|
588cc3aa14 | ||
|
|
ad86195436 | ||
|
|
8d4ed6b584 | ||
|
|
df429a57f2 | ||
|
|
c43f7d84e8 | ||
|
|
7bb2b90a13 | ||
|
|
a739f14a86 | ||
|
|
02e8e633a5 | ||
|
|
160c960345 | ||
|
|
86034e0aac | ||
|
|
c18b43c294 | ||
|
|
5210a700bf | ||
|
|
fbf768e90f | ||
|
|
4238262ad3 | ||
|
|
3492b6026e | ||
|
|
5eef2e9ba3 | ||
|
|
5d01b3a60d | ||
|
|
2ede953580 | ||
|
|
1cfc846ffc | ||
|
|
8fb5831e00 | ||
|
|
b6c921b291 | ||
|
|
b4229c5a8f | ||
|
|
f0520d652a | ||
|
|
959a60b73a | ||
|
|
1c4c24724f | ||
|
|
29881676e8 | ||
|
|
4527f29e7e | ||
|
|
a2d5c9c83e | ||
|
|
792ac2b946 | ||
|
|
944a778c0a | ||
|
|
2b9cb84383 | ||
|
|
045d144268 | ||
|
|
b794e3b514 | ||
|
|
3a39d37995 | ||
|
|
8d8b3fc821 | ||
|
|
b18df4caad | ||
|
|
cf20efed66 | ||
|
|
6adbe457c4 | ||
|
|
0c11a64d25 | ||
|
|
c79489d0b8 | ||
|
|
67253315f0 | ||
|
|
fa8ab0840b | ||
|
|
bf5550762e | ||
|
|
39e023e8f3 | ||
|
|
fd0d748c33 | ||
|
|
77956ab811 | ||
|
|
3ea233abcb | ||
|
|
93bc3dfbe5 | ||
|
|
4ca62f6fb5 | ||
|
|
6914b92e67 | ||
|
|
613d496491 | ||
|
|
570c077190 | ||
|
|
b401a4e49f | ||
|
|
559bdf2a72 | ||
|
|
f3a7fe58c4 | ||
|
|
46ab392e97 | ||
|
|
352e136621 | ||
|
|
1b265f43d5 | ||
|
|
ecfead7193 | ||
|
|
53f5968f8f | ||
|
|
8dadb36b97 | ||
|
|
74668938cc | ||
|
|
9def813324 | ||
|
|
6811958b52 | ||
|
|
d1cdb4fd13 | ||
|
|
50580623db | ||
|
|
7340762622 | ||
|
|
2102a5ec76 | ||
|
|
850ed8abf6 | ||
|
|
05ba76ecaa | ||
|
|
6db1c3f5da | ||
|
|
63429fff1d | ||
|
|
de3f7fea69 | ||
|
|
1e5ef8dbd1 | ||
|
|
385d21056a | ||
|
|
3c5fa4bbe2 | ||
|
|
58adb757c4 | ||
|
|
d01cfe9066 | ||
|
|
d522af7bb7 | ||
|
|
e6dd39b4c7 | ||
|
|
4404454cb9 | ||
|
|
59613d500f | ||
|
|
afeae15443 | ||
|
|
ba0155ad3b | ||
|
|
2c048cdeda | ||
|
|
f307c7f2af | ||
|
|
a90d84f796 | ||
|
|
dad9e4e8f2 | ||
|
|
eb57c1fe0f | ||
|
|
e7213d9d1c | ||
|
|
7b656dbaeb | ||
|
|
01af181442 | ||
|
|
192a36cf8a | ||
|
|
7f7dde01de | ||
|
|
32ffe30145 | ||
|
|
521eda1c00 | ||
|
|
49948621d0 | ||
|
|
28b77781d1 | ||
|
|
adfbe4ed64 | ||
|
|
92fbe0ebdf | ||
|
|
b0bd29696e | ||
|
|
496b933c65 | ||
|
|
da7ee45366 | ||
|
|
ffdc4bef36 | ||
|
|
3aaa96a673 | ||
|
|
2f1eb38551 | ||
|
|
cdda5be827 | ||
|
|
52682b98f5 | ||
|
|
749fa16fca | ||
|
|
b0372c41c2 | ||
|
|
e96e8943c9 | ||
|
|
acfaa2c3c0 | ||
|
|
7fb0be3487 | ||
|
|
fd91537982 | ||
|
|
a64d4cee56 | ||
|
|
ba3e24548a | ||
|
|
4beb08f1cf | ||
|
|
e2cbbd6963 | ||
|
|
c46764e80c | ||
|
|
b81053aaec | ||
|
|
9e659b790b | ||
|
|
c07220253e | ||
|
|
39fb0e91e0 | ||
|
|
6243021ade | ||
| 4a6b54b4c3 | |||
| 6c816e9fad | |||
| 2b5c7ca10b | |||
| 45b145667a | |||
| 9fb8dd4839 | |||
|
|
6352e0d976 | ||
|
|
d4ff5d482e | ||
|
|
b303add71c | ||
|
|
a42e61de61 | ||
|
|
6eb0158c6c | ||
|
|
0171ffad38 | ||
|
|
84934a6d1c | ||
|
|
98a2ade86d | ||
|
|
738a5184cb | ||
|
|
488c2694e3 | ||
|
|
015d99dc5f | ||
|
|
b80745dc2d | ||
|
|
0fa1b38f95 | ||
|
|
49e714c88c | ||
|
|
ff0b9762b1 | ||
|
|
ce36ff099b | ||
|
|
6c4a7dea29 | ||
|
|
04a80c1168 | ||
|
|
8179bd85db | ||
|
|
c08499b52d | ||
|
|
eca9e494ad | ||
|
|
ab0e68f9f3 | ||
|
|
0566a47e35 | ||
|
|
133597bfd0 | ||
|
|
ccf318f977 | ||
|
|
8affc052bf | ||
|
|
0cf5043977 | ||
|
|
f2ffc6c1ef | ||
|
|
e7c770b10b | ||
|
|
0ac3c97f90 | ||
|
|
3e5e37d65a | ||
|
|
2acbcbff51 | ||
|
|
70b382bc80 | ||
|
|
d0191361d4 | ||
|
|
59bb0bef78 | ||
|
|
4b456cf54a | ||
|
|
91c6023d25 | ||
|
|
85d15cd3e1 | ||
|
|
c0a4cbf03e | ||
|
|
fad895efbb | ||
|
|
47b31ebcf4 | ||
|
|
88d2225774 | ||
|
|
a1f6758b95 | ||
|
|
23146aaa8a | ||
|
|
cc757ba082 | ||
|
|
c3c8b60671 | ||
|
|
15792b1cf3 | ||
|
|
e75a5d5675 | ||
|
|
4282810602 | ||
|
|
8a58132dd4 | ||
|
|
be0d3e4300 | ||
|
|
ba6848a67a | ||
|
|
23beb08e5e | ||
| 5d560d962d | |||
| 51ade59a46 | |||
| 7f91be27f9 | |||
| 63cd159151 | |||
| 443c70d01b | |||
|
|
9f0ea1683a | ||
|
|
55df293e00 | ||
| 3168ffe027 | |||
| abdefbbd05 | |||
|
|
ead503d71e | ||
|
|
f54bdf8483 | ||
|
|
80cb4c257f | ||
|
|
228e8a9772 | ||
| 15c798b915 | |||
| 2ded2eb23d | |||
|
|
e6bb015ef2 | ||
|
|
ead7c276b4 | ||
| bfad9c19c5 | |||
| 439a44bc85 | |||
|
|
13f179d842 | ||
| c0e5df30d5 | |||
|
|
79fbf2644b | ||
| 0eca6adbbb | |||
| 5801633b30 | |||
| fac139fd0e | |||
|
|
2df830f01b | ||
|
|
26fab34de5 | ||
|
|
e29d0fe349 | ||
|
|
77f7620eca | ||
| fb0dd60954 | |||
|
|
4401c26496 | ||
| 9682a17a82 | |||
| 55d87c0c14 | |||
| 379f20efc5 | |||
| 7883593166 | |||
|
|
5509dd86d5 | ||
| 06b27c9b9a | |||
|
|
a927affb1f | ||
|
|
fab182e91e | ||
| d5be9e1ae9 | |||
| fb48d473d2 | |||
| 5e5cffbdc7 | |||
| e1d804dbb0 | |||
|
|
2086427b72 | ||
| e811c0cabf | |||
|
|
b68c002e2d | ||
| cb7e0238dc | |||
|
|
043a2e75c8 | ||
| 6ac375f82e | |||
|
|
8c1a26ead6 | ||
|
|
d119f838e9 | ||
|
|
ae2356de6a | ||
|
|
c1ac36df17 | ||
|
|
cc79f3ebcd | ||
|
|
1f991fc43d | ||
|
|
b62980b76d | ||
|
|
26da4945ea | ||
|
|
d599a162a9 | ||
|
|
e53adc17b3 | ||
|
|
7cd40d457d | ||
|
|
d559d03bea | ||
|
|
691dc3c71b | ||
|
|
e81ecdd716 | ||
|
|
74e385ad8b | ||
|
|
fecd095717 | ||
|
|
caa02806c0 | ||
|
|
c6c6f90d26 | ||
|
|
e4efb89466 | ||
|
|
8584885ddd | ||
|
|
6aeacaf872 | ||
|
|
0146b92cc1 | ||
|
|
981fca6cb4 | ||
|
|
6dab28081d | ||
|
|
6ebc475da2 | ||
|
|
fff26ebacb | ||
|
|
e3bebaa10b | ||
|
|
df16f03e46 | ||
|
|
b5243e8566 | ||
|
|
4501bbf8f0 | ||
|
|
5331d7149a | ||
|
|
c4b0389892 | ||
|
|
387e104359 | ||
|
|
5ebc320843 | ||
|
|
006f79658f | ||
|
|
9451bb9c61 | ||
|
|
655c26c589 | ||
|
|
607d8c21fa | ||
|
|
b7f6cbd87c | ||
|
|
a07b49a05f | ||
|
|
1d4227beec | ||
|
|
57306201cf | ||
|
|
7437ec5929 | ||
|
|
710ec96990 | ||
|
|
cb1c41c6ea | ||
|
|
e8823197f8 | ||
|
|
c5b1302ff6 | ||
|
|
f02db9801c | ||
|
|
7d113291c9 | ||
|
|
47d5416dde | ||
|
|
f2c4204bab | ||
|
|
71cfdce862 | ||
|
|
d4112e5a74 | ||
|
|
6d2c72ff98 | ||
|
|
c8f7cd6ec2 | ||
|
|
bd85143aa0 | ||
|
|
cb992d1c53 | ||
|
|
7be6cfb9cb | ||
|
|
b848e6b6d8 | ||
|
|
849bba8f5d | ||
|
|
86c492d8c1 | ||
|
|
1ed8b7233d | ||
|
|
ddabda06bf | ||
|
|
881c724725 | ||
|
|
2db4952c39 | ||
|
|
57432e01a3 | ||
|
|
97bc0cea8c | ||
|
|
e930aac039 | ||
|
|
13ec9b2d7d | ||
| d8f07c2b70 | |||
| 20a255252c | |||
| 376e68ec31 | |||
|
|
7497f8d4e0 | ||
| b3270e7231 | |||
| 1dce63fb9b | |||
| 96f3844677 | |||
| 65edbd9ed9 | |||
| 29138b8a51 | |||
|
|
aede5aa899 | ||
| 12293c9d11 | |||
| 2d0360be3b | |||
| f9d7694f25 | |||
| 9e3cc0f760 | |||
| 32410555cd | |||
| 347e7ccc84 | |||
| e47a877169 | |||
| 592d037522 | |||
| 3ccc2a1100 | |||
| 9a20f4f854 | |||
| 9a8c454123 | |||
|
|
e1f430455d | ||
| 01fe20fe68 | |||
| 2221a2d279 | |||
|
|
20305a7181 | ||
| 10c813d583 | |||
| 1b041aa813 | |||
| 8f2b247b5f | |||
| 1f3ce453fb | |||
| ff11f7ee65 | |||
| 11d9c5eae3 | |||
| 95dd0bbd56 | |||
| 72e7a39373 | |||
| 09d438e8b4 | |||
| 6752e4c0e5 | |||
| e7f3edb4bf | |||
| c55d5ac3b5 | |||
| fb43b02b2a | |||
| 55fa72d446 | |||
| 496f7a12dd | |||
| 6b75ae7dcc | |||
| 50a9bda808 | |||
| c573012a7c | |||
| 8ac428f816 | |||
| 99e7dababd | |||
| 8db72c9475 | |||
| 2db8e1423d | |||
|
|
3e440ba7cd | ||
| e437f55d87 | |||
| 3bbd0a6f90 | |||
| cf988e361b | |||
|
|
7f676fdc70 | ||
|
|
f2830ce940 | ||
| a05a6a0e88 | |||
| 30acfe39c4 | |||
|
|
ac62a43815 | ||
| 4bcb1cc940 | |||
| d0abf9a70d | |||
|
|
69ab8805a9 | ||
| 18666d5aec | |||
| d847a731fb | |||
| 9f9b00a6fb | |||
| 28756ceda8 | |||
| 56cca6df83 | |||
| aa935984a8 | |||
| a2172f56ec | |||
| db701b89c2 | |||
| ef352cbdc1 | |||
| f6b97ac82e | |||
| 0a28cf07c2 | |||
| 3dd0bc875d | |||
| cf30f63fb4 | |||
| 2ae886ec74 | |||
| 4d10919ead | |||
| c06ba41d0d | |||
|
|
1ed1d6cf80 | ||
| f26d7afbbc | |||
| e5ffa94c1d | |||
|
|
c2048fa594 | ||
| 08cec8be77 | |||
| a6ff6122b0 | |||
| 0ffe1e1905 | |||
| 4e9b232a4f | |||
| b25422f1b4 | |||
| 50c9852cff | |||
| 3d2f5c0778 | |||
|
|
206daf156a | ||
| f3e77ea994 | |||
| fbb4736d4a | |||
| f02a782991 | |||
| 6f96f7b78f | |||
| 4fb0b371ff | |||
|
|
4c671a5396 | ||
|
|
3c675fd887 | ||
| 2243072be2 | |||
| 19d6ffcf2a | |||
| 53a20a8560 | |||
| f1bb65cb73 | |||
| 0576de7a61 | |||
| c409c7ca80 | |||
| f2aab54884 | |||
| e6785f7db1 | |||
| d514fb35e5 | |||
| 41a5add906 | |||
| 00fe5e8a0f | |||
| 3a148c63e4 | |||
| f17fa41207 | |||
| d642deb4f4 | |||
| 51e35b8643 | |||
| e53933ece7 | |||
| 4efd28c956 | |||
| a1ab78b0c9 | |||
| 287c339aa0 | |||
| dc1f1cbb7c | |||
| 4a10163b10 | |||
| f45217f98e | |||
| 66da1b3aab | |||
| 8d30fddd7d | |||
| a0f1149bbb | |||
| d2672300a3 | |||
| ed5a59f21d | |||
| 66bd705971 | |||
| 4b78e67036 | |||
| 3a4bdbd42f | |||
| e222344cd9 | |||
| a1257b65ff | |||
| 299a68ad95 | |||
| 049a0deb04 | |||
| 7d3b12c774 | |||
| ac71b4621c | |||
| 3271369e2d | |||
| 931ee5944d | |||
| 08077f46c6 | |||
| b9b9308500 | |||
| 3096e0d7de | |||
| 9f5c9bfb86 | |||
| 6b0d6b017c | |||
| de3272e160 | |||
| 8a413c0024 | |||
| aa24e08744 | |||
| cb27592272 | |||
| f67ca30f94 | |||
| b6b1e533ed | |||
| 58ccbfb130 | |||
| 5bf01bb8e6 | |||
| a20fd995a1 | |||
| e2fbff3315 | |||
| c3ef14c269 | |||
| 82cab1ce2a | |||
| c325744540 | |||
| 241a405c05 | |||
| 6a44a56c38 | |||
| 091e743d0e | |||
| 4864939eef | |||
| 01ecb75c5b | |||
| fa30ea0ac2 | |||
| 2509d8876a | |||
| 8331411b93 | |||
| 369e738cf3 | |||
| 4da6b0da3a | |||
| 6276bd037c | |||
| 4f6ca60521 | |||
| 3774b600ee | |||
| 246424762e | |||
| 63e89b51f0 | |||
| 3cb1582adc | |||
| 3ea296b552 | |||
| 5e39164fcd | |||
| 131c34012b | |||
| 8cd170736a | |||
| 52d4709dd9 | |||
| 84d6edf684 | |||
| 5172129ae9 | |||
| fbdda16f55 | |||
| 15dfbb728c | |||
| 370ece5b60 | |||
| 9f088577b1 | |||
| b723382ff4 | |||
| 9485541d2c | |||
| 434b586970 | |||
| 34e0183b48 | |||
| 3bc1a7eb40 | |||
| 9ca75d3fb3 | |||
| fb510e89ee | |||
| 301d084695 | |||
| 628e204fc5 | |||
| 914a48e4f5 | |||
| bebe91d39b | |||
| d65b8e7a32 | |||
| 62aae6ffb2 | |||
| 32b6e55467 | |||
| 99eda351df |
333
Jenkinsfile
vendored
333
Jenkinsfile
vendored
@ -11,9 +11,47 @@ spec:
|
||||
hardware: rpi5
|
||||
kubernetes.io/arch: arm64
|
||||
node-role.kubernetes.io/worker: "true"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values:
|
||||
- titan-06
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values:
|
||||
- titan-13
|
||||
- titan-15
|
||||
- titan-17
|
||||
- titan-19
|
||||
topologySpreadConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: kubernetes.io/hostname
|
||||
whenUnsatisfiable: ScheduleAnyway
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
jenkins/jenkins-jenkins-agent: "true"
|
||||
containers:
|
||||
- name: jnlp
|
||||
image: jenkins/inbound-agent:3355.v388858a_47b_33-2-jdk21
|
||||
resources:
|
||||
requests:
|
||||
cpu: "25m"
|
||||
memory: "256Mi"
|
||||
- name: python
|
||||
image: python:3.12-slim
|
||||
image: registry.bstein.dev/bstein/python:3.12-slim
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
- name: quality-tools
|
||||
image: registry.bstein.dev/bstein/quality-tools:sonar8.0.1-trivy0.70.0-db20260422-arm64
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
@ -23,8 +61,21 @@ spec:
|
||||
environment {
|
||||
PIP_DISABLE_PIP_VERSION_CHECK = '1'
|
||||
PYTHONUNBUFFERED = '1'
|
||||
SUITE_NAME = 'titan-iac'
|
||||
SUITE_NAME = 'titan_iac'
|
||||
PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
|
||||
SONARQUBE_HOST_URL = 'http://sonarqube.quality.svc.cluster.local:9000'
|
||||
SONARQUBE_PROJECT_KEY = 'titan_iac'
|
||||
SONARQUBE_TOKEN = credentials('sonarqube-token')
|
||||
VM_URL = 'http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428'
|
||||
QUALITY_GATE_SONARQUBE_ENFORCE = '1'
|
||||
QUALITY_GATE_SONARQUBE_REPORT = 'build/sonarqube-quality-gate.json'
|
||||
QUALITY_GATE_IRONBANK_ENFORCE = '1'
|
||||
QUALITY_GATE_IRONBANK_REQUIRED = '0'
|
||||
QUALITY_GATE_IRONBANK_REPORT = 'build/ironbank-compliance.json'
|
||||
}
|
||||
options {
|
||||
disableConcurrentBuilds()
|
||||
buildDiscarder(logRotator(daysToKeepStr: '30', numToKeepStr: '200', artifactDaysToKeepStr: '30', artifactNumToKeepStr: '120'))
|
||||
}
|
||||
stages {
|
||||
stage('Checkout') {
|
||||
@ -34,7 +85,175 @@ spec:
|
||||
}
|
||||
stage('Install deps') {
|
||||
steps {
|
||||
sh 'pip install --no-cache-dir -r ci/requirements.txt'
|
||||
sh '''
|
||||
set -eu
|
||||
if ! command -v git >/dev/null 2>&1; then
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends git ca-certificates
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
pip install --no-cache-dir -r ci/requirements.txt
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Prepare local quality evidence') {
|
||||
steps {
|
||||
sh '''
|
||||
set -eu
|
||||
mkdir -p build
|
||||
set +e
|
||||
python3 -m testing.quality_gate --profile local --build-dir build
|
||||
local_quality_rc=$?
|
||||
set -e
|
||||
printf '%s\n' "${local_quality_rc}" > build/local-quality-gate.rc
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Collect SonarQube evidence') {
|
||||
steps {
|
||||
container('quality-tools') {
|
||||
sh '''#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
mkdir -p build
|
||||
args=(
|
||||
"-Dsonar.host.url=${SONARQUBE_HOST_URL}"
|
||||
"-Dsonar.login=${SONARQUBE_TOKEN}"
|
||||
"-Dsonar.projectKey=${SONARQUBE_PROJECT_KEY}"
|
||||
"-Dsonar.projectName=${SONARQUBE_PROJECT_KEY}"
|
||||
"-Dsonar.sources=."
|
||||
"-Dsonar.exclusions=**/.git/**,**/build/**,**/dist/**,**/node_modules/**,**/.venv/**,**/__pycache__/**,**/coverage/**,**/test-results/**,**/playwright-report/**,services/monitoring/dashboards/**,services/monitoring/grafana-dashboard-*.yaml"
|
||||
"-Dsonar.test.inclusions=**/tests/**,**/testing/**,**/*_test.go,**/*.test.ts,**/*.test.tsx,**/*.spec.ts,**/*.spec.tsx"
|
||||
)
|
||||
[ -f build/coverage-unit.xml ] && args+=("-Dsonar.python.coverage.reportPaths=build/coverage-unit.xml")
|
||||
set +e
|
||||
sonar-scanner "${args[@]}" | tee build/sonar-scanner.log
|
||||
rc=${PIPESTATUS[0]}
|
||||
set -e
|
||||
printf '%s\n' "${rc}" > build/sonarqube-analysis.rc
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
set -eu
|
||||
mkdir -p build
|
||||
python3 - <<'PY'
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
host = os.getenv('SONARQUBE_HOST_URL', '').strip().rstrip('/')
|
||||
project_key = os.getenv('SONARQUBE_PROJECT_KEY', '').strip()
|
||||
token = os.getenv('SONARQUBE_TOKEN', '').strip()
|
||||
report_path = os.getenv('QUALITY_GATE_SONARQUBE_REPORT', 'build/sonarqube-quality-gate.json')
|
||||
|
||||
payload = {
|
||||
"status": "ERROR",
|
||||
"note": "missing SONARQUBE_HOST_URL and/or SONARQUBE_PROJECT_KEY",
|
||||
}
|
||||
if host and project_key:
|
||||
task_file = Path('.scannerwork/report-task.txt')
|
||||
task_id = ''
|
||||
if task_file.exists():
|
||||
for line in task_file.read_text(encoding='utf-8').splitlines():
|
||||
key, _, value = line.partition('=')
|
||||
if key == 'ceTaskId':
|
||||
task_id = value.strip()
|
||||
break
|
||||
if task_id:
|
||||
ce_query = urllib.parse.urlencode({"id": task_id})
|
||||
deadline = time.monotonic() + 180
|
||||
while time.monotonic() < deadline:
|
||||
ce_request = urllib.request.Request(f"{host}/api/ce/task?{ce_query}", method="GET")
|
||||
if token:
|
||||
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
|
||||
ce_request.add_header("Authorization", f"Basic {encoded}")
|
||||
try:
|
||||
with urllib.request.urlopen(ce_request, timeout=12) as response:
|
||||
ce_payload = json.loads(response.read().decode("utf-8"))
|
||||
except Exception:
|
||||
time.sleep(3)
|
||||
continue
|
||||
status = str(ce_payload.get("task", {}).get("status", "")).upper()
|
||||
if status in {"SUCCESS", "FAILED", "CANCELED"}:
|
||||
break
|
||||
time.sleep(3)
|
||||
|
||||
query = urllib.parse.urlencode({"projectKey": project_key})
|
||||
request = urllib.request.Request(
|
||||
f"{host}/api/qualitygates/project_status?{query}",
|
||||
method="GET",
|
||||
)
|
||||
if token:
|
||||
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
|
||||
request.add_header("Authorization", f"Basic {encoded}")
|
||||
try:
|
||||
with urllib.request.urlopen(request, timeout=12) as response:
|
||||
payload = json.loads(response.read().decode("utf-8"))
|
||||
except Exception as exc: # noqa: BLE001
|
||||
payload = {"status": "ERROR", "error": str(exc)}
|
||||
|
||||
with open(report_path, "w", encoding="utf-8") as handle:
|
||||
json.dump(payload, handle, indent=2, sort_keys=True)
|
||||
handle.write("\\n")
|
||||
PY
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Collect IronBank evidence') {
|
||||
steps {
|
||||
container('quality-tools') {
|
||||
sh '''#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
mkdir -p build
|
||||
set +e
|
||||
trivy fs --cache-dir "${TRIVY_CACHE_DIR}" --skip-db-update --skip-files clusters/atlas/flux-system/gotk-components.yaml --timeout 5m --no-progress --format json --output build/trivy-fs.json --scanners vuln,secret,misconfig --severity HIGH,CRITICAL .
|
||||
trivy_rc=$?
|
||||
set -e
|
||||
if [ ! -s build/trivy-fs.json ]; then
|
||||
cat > build/ironbank-compliance.json <<EOF
|
||||
{"status":"failed","compliant":false,"scanner":"trivy","scan_type":"filesystem","error":"trivy did not produce JSON output","trivy_rc":${trivy_rc}}
|
||||
EOF
|
||||
exit 0
|
||||
fi
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
set -eu
|
||||
mkdir -p build
|
||||
if [ -s build/trivy-fs.json ]; then
|
||||
python3 ci/scripts/supply_chain_report.py --trivy-json build/trivy-fs.json --waivers ci/titan-iac-trivy-waivers.json --output build/ironbank-compliance.json
|
||||
exit 0
|
||||
fi
|
||||
python3 - <<'PY'
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
report_path = Path(os.getenv('QUALITY_GATE_IRONBANK_REPORT', 'build/ironbank-compliance.json'))
|
||||
if report_path.exists():
|
||||
raise SystemExit(0)
|
||||
|
||||
status = os.getenv('IRONBANK_COMPLIANCE_STATUS', '').strip()
|
||||
compliant = os.getenv('IRONBANK_COMPLIANT', '').strip().lower()
|
||||
payload = {
|
||||
"status": status or "unknown",
|
||||
"compliant": compliant in {"1", "true", "yes", "on"} if compliant else None,
|
||||
}
|
||||
payload = {k: v for k, v in payload.items() if v is not None}
|
||||
if "status" not in payload:
|
||||
payload["status"] = "unknown"
|
||||
payload["note"] = (
|
||||
"Set IRONBANK_COMPLIANCE_STATUS/IRONBANK_COMPLIANT "
|
||||
"or write build/ironbank-compliance.json in image-building repos."
|
||||
)
|
||||
|
||||
report_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
report_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\\n", encoding="utf-8")
|
||||
PY
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Run quality gate') {
|
||||
@ -64,8 +283,96 @@ spec:
|
||||
stage('Enforce quality gate') {
|
||||
steps {
|
||||
sh '''
|
||||
set -eu
|
||||
test "$(cat build/quality-gate.rc 2>/dev/null || echo 1)" -eq 0
|
||||
set -euo pipefail
|
||||
gate_rc="$(cat build/quality-gate.rc 2>/dev/null || echo 1)"
|
||||
fail=0
|
||||
if [ "${gate_rc}" -ne 0 ]; then
|
||||
echo "quality gate failed with rc=${gate_rc}" >&2
|
||||
fail=1
|
||||
fi
|
||||
|
||||
enabled() {
|
||||
case "$(printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]')" in
|
||||
1|true|yes|on) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
if enabled "${QUALITY_GATE_SONARQUBE_ENFORCE:-1}"; then
|
||||
sonar_status="$(python3 - <<'PY'
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
path = Path("build/sonarqube-quality-gate.json")
|
||||
if not path.exists():
|
||||
print("missing")
|
||||
raise SystemExit(0)
|
||||
try:
|
||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||
except Exception: # noqa: BLE001
|
||||
print("error")
|
||||
raise SystemExit(0)
|
||||
status = (payload.get("status") or payload.get("projectStatus", {}).get("status") or payload.get("qualityGate", {}).get("status") or "").strip().lower()
|
||||
print(status or "missing")
|
||||
PY
|
||||
)"
|
||||
case "${sonar_status}" in
|
||||
ok|pass|passed|success) ;;
|
||||
*)
|
||||
echo "sonarqube gate failed: ${sonar_status}" >&2
|
||||
fail=1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
ironbank_required="${QUALITY_GATE_IRONBANK_REQUIRED:-0}"
|
||||
if [ "${PUBLISH_IMAGES:-false}" = "true" ]; then
|
||||
ironbank_required=1
|
||||
fi
|
||||
if enabled "${QUALITY_GATE_IRONBANK_ENFORCE:-1}"; then
|
||||
supply_status="$(python3 - <<'PY'
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
path = Path("build/ironbank-compliance.json")
|
||||
if not path.exists():
|
||||
print("missing")
|
||||
raise SystemExit(0)
|
||||
try:
|
||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||
except Exception: # noqa: BLE001
|
||||
print("error")
|
||||
raise SystemExit(0)
|
||||
compliant = payload.get("compliant")
|
||||
if compliant is True:
|
||||
print("ok")
|
||||
elif compliant is False:
|
||||
print("failed")
|
||||
else:
|
||||
status = str(payload.get("status") or payload.get("result") or payload.get("compliance") or "").strip().lower()
|
||||
print(status or "missing")
|
||||
PY
|
||||
)"
|
||||
case "${supply_status}" in
|
||||
ok|pass|passed|success|compliant) ;;
|
||||
not_applicable|na|n/a)
|
||||
if enabled "${ironbank_required}"; then
|
||||
echo "supply chain gate required but status=${supply_status}" >&2
|
||||
fail=1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
if enabled "${ironbank_required}"; then
|
||||
echo "supply chain gate failed: ${supply_status}" >&2
|
||||
fail=1
|
||||
else
|
||||
echo "supply chain gate not passing (${supply_status}) but not required for this run" >&2
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
exit "${fail}"
|
||||
'''
|
||||
}
|
||||
}
|
||||
@ -74,7 +381,7 @@ spec:
|
||||
script {
|
||||
env.FLUX_BRANCH = sh(
|
||||
returnStdout: true,
|
||||
script: "awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml"
|
||||
script: "grep -m1 '^\\s*branch:' clusters/atlas/flux-system/gotk-sync.yaml | sed 's/^\\s*branch:\\s*//'"
|
||||
).trim()
|
||||
if (!env.FLUX_BRANCH) {
|
||||
error('Flux branch not found in gotk-sync.yaml')
|
||||
@ -93,6 +400,20 @@ spec:
|
||||
steps {
|
||||
withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
|
||||
sh '''
|
||||
set -euo pipefail
|
||||
if ! command -v git >/dev/null 2>&1; then
|
||||
if command -v apk >/dev/null 2>&1; then
|
||||
apk add --no-cache git >/dev/null
|
||||
elif command -v apt-get >/dev/null 2>&1; then
|
||||
apt-get update >/dev/null
|
||||
apt-get install -y git >/dev/null
|
||||
fi
|
||||
fi
|
||||
cd "${WORKSPACE:-$PWD}"
|
||||
if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
|
||||
echo "workspace is not a git checkout; skipping promote"
|
||||
exit 0
|
||||
fi
|
||||
set +x
|
||||
git config user.email "jenkins@bstein.dev"
|
||||
git config user.name "jenkins"
|
||||
|
||||
@ -10,9 +10,47 @@ spec:
|
||||
hardware: rpi5
|
||||
kubernetes.io/arch: arm64
|
||||
node-role.kubernetes.io/worker: "true"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values:
|
||||
- titan-06
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values:
|
||||
- titan-13
|
||||
- titan-15
|
||||
- titan-17
|
||||
- titan-19
|
||||
topologySpreadConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: kubernetes.io/hostname
|
||||
whenUnsatisfiable: ScheduleAnyway
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
jenkins/jenkins-jenkins-agent: "true"
|
||||
containers:
|
||||
- name: jnlp
|
||||
image: jenkins/inbound-agent:3355.v388858a_47b_33-2-jdk21
|
||||
resources:
|
||||
requests:
|
||||
cpu: "25m"
|
||||
memory: "256Mi"
|
||||
- name: python
|
||||
image: python:3.12-slim
|
||||
image: registry.bstein.dev/bstein/python:3.12-slim
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
- name: quality-tools
|
||||
image: registry.bstein.dev/bstein/quality-tools:sonar8.0.1-trivy0.70.0-db20260422-arm64
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
@ -22,8 +60,21 @@ spec:
|
||||
environment {
|
||||
PIP_DISABLE_PIP_VERSION_CHECK = '1'
|
||||
PYTHONUNBUFFERED = '1'
|
||||
SUITE_NAME = 'titan-iac'
|
||||
SUITE_NAME = 'titan_iac'
|
||||
PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
|
||||
SONARQUBE_HOST_URL = 'http://sonarqube.quality.svc.cluster.local:9000'
|
||||
SONARQUBE_PROJECT_KEY = 'titan_iac'
|
||||
SONARQUBE_TOKEN = credentials('sonarqube-token')
|
||||
VM_URL = 'http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428'
|
||||
QUALITY_GATE_SONARQUBE_ENFORCE = '1'
|
||||
QUALITY_GATE_SONARQUBE_REPORT = 'build/sonarqube-quality-gate.json'
|
||||
QUALITY_GATE_IRONBANK_ENFORCE = '1'
|
||||
QUALITY_GATE_IRONBANK_REQUIRED = '0'
|
||||
QUALITY_GATE_IRONBANK_REPORT = 'build/ironbank-compliance.json'
|
||||
}
|
||||
options {
|
||||
disableConcurrentBuilds()
|
||||
buildDiscarder(logRotator(daysToKeepStr: '30', numToKeepStr: '200', artifactDaysToKeepStr: '30', artifactNumToKeepStr: '120'))
|
||||
}
|
||||
stages {
|
||||
stage('Checkout') {
|
||||
@ -33,7 +84,175 @@ spec:
|
||||
}
|
||||
stage('Install deps') {
|
||||
steps {
|
||||
sh 'pip install --no-cache-dir -r ci/requirements.txt'
|
||||
sh '''
|
||||
set -eu
|
||||
if ! command -v git >/dev/null 2>&1; then
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends git ca-certificates
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
pip install --no-cache-dir -r ci/requirements.txt
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Prepare local quality evidence') {
|
||||
steps {
|
||||
sh '''
|
||||
set -eu
|
||||
mkdir -p build
|
||||
set +e
|
||||
python3 -m testing.quality_gate --profile local --build-dir build
|
||||
local_quality_rc=$?
|
||||
set -e
|
||||
printf '%s\n' "${local_quality_rc}" > build/local-quality-gate.rc
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Collect SonarQube evidence') {
|
||||
steps {
|
||||
container('quality-tools') {
|
||||
sh '''#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
mkdir -p build
|
||||
args=(
|
||||
"-Dsonar.host.url=${SONARQUBE_HOST_URL}"
|
||||
"-Dsonar.login=${SONARQUBE_TOKEN}"
|
||||
"-Dsonar.projectKey=${SONARQUBE_PROJECT_KEY}"
|
||||
"-Dsonar.projectName=${SONARQUBE_PROJECT_KEY}"
|
||||
"-Dsonar.sources=."
|
||||
"-Dsonar.exclusions=**/.git/**,**/build/**,**/dist/**,**/node_modules/**,**/.venv/**,**/__pycache__/**,**/coverage/**,**/test-results/**,**/playwright-report/**,services/monitoring/dashboards/**,services/monitoring/grafana-dashboard-*.yaml"
|
||||
"-Dsonar.test.inclusions=**/tests/**,**/testing/**,**/*_test.go,**/*.test.ts,**/*.test.tsx,**/*.spec.ts,**/*.spec.tsx"
|
||||
)
|
||||
[ -f build/coverage-unit.xml ] && args+=("-Dsonar.python.coverage.reportPaths=build/coverage-unit.xml")
|
||||
set +e
|
||||
sonar-scanner "${args[@]}" | tee build/sonar-scanner.log
|
||||
rc=${PIPESTATUS[0]}
|
||||
set -e
|
||||
printf '%s\n' "${rc}" > build/sonarqube-analysis.rc
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
set -eu
|
||||
mkdir -p build
|
||||
python3 - <<'PY'
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
host = os.getenv('SONARQUBE_HOST_URL', '').strip().rstrip('/')
|
||||
project_key = os.getenv('SONARQUBE_PROJECT_KEY', '').strip()
|
||||
token = os.getenv('SONARQUBE_TOKEN', '').strip()
|
||||
report_path = os.getenv('QUALITY_GATE_SONARQUBE_REPORT', 'build/sonarqube-quality-gate.json')
|
||||
|
||||
payload = {
|
||||
"status": "ERROR",
|
||||
"note": "missing SONARQUBE_HOST_URL and/or SONARQUBE_PROJECT_KEY",
|
||||
}
|
||||
if host and project_key:
|
||||
task_file = Path('.scannerwork/report-task.txt')
|
||||
task_id = ''
|
||||
if task_file.exists():
|
||||
for line in task_file.read_text(encoding='utf-8').splitlines():
|
||||
key, _, value = line.partition('=')
|
||||
if key == 'ceTaskId':
|
||||
task_id = value.strip()
|
||||
break
|
||||
if task_id:
|
||||
ce_query = urllib.parse.urlencode({"id": task_id})
|
||||
deadline = time.monotonic() + 180
|
||||
while time.monotonic() < deadline:
|
||||
ce_request = urllib.request.Request(f"{host}/api/ce/task?{ce_query}", method="GET")
|
||||
if token:
|
||||
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
|
||||
ce_request.add_header("Authorization", f"Basic {encoded}")
|
||||
try:
|
||||
with urllib.request.urlopen(ce_request, timeout=12) as response:
|
||||
ce_payload = json.loads(response.read().decode("utf-8"))
|
||||
except Exception:
|
||||
time.sleep(3)
|
||||
continue
|
||||
status = str(ce_payload.get("task", {}).get("status", "")).upper()
|
||||
if status in {"SUCCESS", "FAILED", "CANCELED"}:
|
||||
break
|
||||
time.sleep(3)
|
||||
|
||||
query = urllib.parse.urlencode({"projectKey": project_key})
|
||||
request = urllib.request.Request(
|
||||
f"{host}/api/qualitygates/project_status?{query}",
|
||||
method="GET",
|
||||
)
|
||||
if token:
|
||||
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
|
||||
request.add_header("Authorization", f"Basic {encoded}")
|
||||
try:
|
||||
with urllib.request.urlopen(request, timeout=12) as response:
|
||||
payload = json.loads(response.read().decode("utf-8"))
|
||||
except Exception as exc: # noqa: BLE001
|
||||
payload = {"status": "ERROR", "error": str(exc)}
|
||||
|
||||
with open(report_path, "w", encoding="utf-8") as handle:
|
||||
json.dump(payload, handle, indent=2, sort_keys=True)
|
||||
handle.write("\\n")
|
||||
PY
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Collect IronBank evidence') {
|
||||
steps {
|
||||
container('quality-tools') {
|
||||
sh '''#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
mkdir -p build
|
||||
set +e
|
||||
trivy fs --cache-dir "${TRIVY_CACHE_DIR}" --skip-db-update --skip-files clusters/atlas/flux-system/gotk-components.yaml --timeout 5m --no-progress --format json --output build/trivy-fs.json --scanners vuln,secret,misconfig --severity HIGH,CRITICAL .
|
||||
trivy_rc=$?
|
||||
set -e
|
||||
if [ ! -s build/trivy-fs.json ]; then
|
||||
cat > build/ironbank-compliance.json <<EOF
|
||||
{"status":"failed","compliant":false,"scanner":"trivy","scan_type":"filesystem","error":"trivy did not produce JSON output","trivy_rc":${trivy_rc}}
|
||||
EOF
|
||||
exit 0
|
||||
fi
|
||||
'''
|
||||
}
|
||||
sh '''
|
||||
set -eu
|
||||
mkdir -p build
|
||||
if [ -s build/trivy-fs.json ]; then
|
||||
python3 ci/scripts/supply_chain_report.py --trivy-json build/trivy-fs.json --waivers ci/titan-iac-trivy-waivers.json --output build/ironbank-compliance.json
|
||||
exit 0
|
||||
fi
|
||||
python3 - <<'PY'
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
report_path = Path(os.getenv('QUALITY_GATE_IRONBANK_REPORT', 'build/ironbank-compliance.json'))
|
||||
if report_path.exists():
|
||||
raise SystemExit(0)
|
||||
|
||||
status = os.getenv('IRONBANK_COMPLIANCE_STATUS', '').strip()
|
||||
compliant = os.getenv('IRONBANK_COMPLIANT', '').strip().lower()
|
||||
payload = {
|
||||
"status": status or "unknown",
|
||||
"compliant": compliant in {"1", "true", "yes", "on"} if compliant else None,
|
||||
}
|
||||
payload = {k: v for k, v in payload.items() if v is not None}
|
||||
if "status" not in payload:
|
||||
payload["status"] = "unknown"
|
||||
payload["note"] = (
|
||||
"Set IRONBANK_COMPLIANCE_STATUS/IRONBANK_COMPLIANT "
|
||||
"or write build/ironbank-compliance.json in image-building repos."
|
||||
)
|
||||
|
||||
report_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
report_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\\n", encoding="utf-8")
|
||||
PY
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Run quality gate') {
|
||||
@ -63,8 +282,96 @@ spec:
|
||||
stage('Enforce quality gate') {
|
||||
steps {
|
||||
sh '''
|
||||
set -eu
|
||||
test "$(cat build/quality-gate.rc 2>/dev/null || echo 1)" -eq 0
|
||||
set -euo pipefail
|
||||
gate_rc="$(cat build/quality-gate.rc 2>/dev/null || echo 1)"
|
||||
fail=0
|
||||
if [ "${gate_rc}" -ne 0 ]; then
|
||||
echo "quality gate failed with rc=${gate_rc}" >&2
|
||||
fail=1
|
||||
fi
|
||||
|
||||
enabled() {
|
||||
case "$(printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]')" in
|
||||
1|true|yes|on) return 0 ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
if enabled "${QUALITY_GATE_SONARQUBE_ENFORCE:-1}"; then
|
||||
sonar_status="$(python3 - <<'PY'
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
path = Path("build/sonarqube-quality-gate.json")
|
||||
if not path.exists():
|
||||
print("missing")
|
||||
raise SystemExit(0)
|
||||
try:
|
||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||
except Exception: # noqa: BLE001
|
||||
print("error")
|
||||
raise SystemExit(0)
|
||||
status = (payload.get("status") or payload.get("projectStatus", {}).get("status") or payload.get("qualityGate", {}).get("status") or "").strip().lower()
|
||||
print(status or "missing")
|
||||
PY
|
||||
)"
|
||||
case "${sonar_status}" in
|
||||
ok|pass|passed|success) ;;
|
||||
*)
|
||||
echo "sonarqube gate failed: ${sonar_status}" >&2
|
||||
fail=1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
ironbank_required="${QUALITY_GATE_IRONBANK_REQUIRED:-0}"
|
||||
if [ "${PUBLISH_IMAGES:-false}" = "true" ]; then
|
||||
ironbank_required=1
|
||||
fi
|
||||
if enabled "${QUALITY_GATE_IRONBANK_ENFORCE:-1}"; then
|
||||
supply_status="$(python3 - <<'PY'
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
path = Path("build/ironbank-compliance.json")
|
||||
if not path.exists():
|
||||
print("missing")
|
||||
raise SystemExit(0)
|
||||
try:
|
||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||
except Exception: # noqa: BLE001
|
||||
print("error")
|
||||
raise SystemExit(0)
|
||||
compliant = payload.get("compliant")
|
||||
if compliant is True:
|
||||
print("ok")
|
||||
elif compliant is False:
|
||||
print("failed")
|
||||
else:
|
||||
status = str(payload.get("status") or payload.get("result") or payload.get("compliance") or "").strip().lower()
|
||||
print(status or "missing")
|
||||
PY
|
||||
)"
|
||||
case "${supply_status}" in
|
||||
ok|pass|passed|success|compliant) ;;
|
||||
not_applicable|na|n/a)
|
||||
if enabled "${ironbank_required}"; then
|
||||
echo "supply chain gate required but status=${supply_status}" >&2
|
||||
fail=1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
if enabled "${ironbank_required}"; then
|
||||
echo "supply chain gate failed: ${supply_status}" >&2
|
||||
fail=1
|
||||
else
|
||||
echo "supply chain gate not passing (${supply_status}) but not required for this run" >&2
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
exit "${fail}"
|
||||
'''
|
||||
}
|
||||
}
|
||||
@ -92,6 +399,20 @@ spec:
|
||||
steps {
|
||||
withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
|
||||
sh '''
|
||||
set -euo pipefail
|
||||
if ! command -v git >/dev/null 2>&1; then
|
||||
if command -v apk >/dev/null 2>&1; then
|
||||
apk add --no-cache git >/dev/null
|
||||
elif command -v apt-get >/dev/null 2>&1; then
|
||||
apt-get update >/dev/null
|
||||
apt-get install -y git >/dev/null
|
||||
fi
|
||||
fi
|
||||
cd "${WORKSPACE:-$PWD}"
|
||||
if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
|
||||
echo "workspace is not a git checkout; skipping promote"
|
||||
exit 0
|
||||
fi
|
||||
set +x
|
||||
git config user.email "jenkins@bstein.dev"
|
||||
git config user.name "jenkins"
|
||||
|
||||
@ -6,10 +6,14 @@ from __future__ import annotations
|
||||
import json
|
||||
import os
|
||||
from glob import glob
|
||||
from pathlib import Path
|
||||
import sys
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[2]))
|
||||
|
||||
from ci.scripts import publish_test_metrics_quality as _quality_helpers
|
||||
|
||||
CANONICAL_CHECKS = _quality_helpers.CANONICAL_CHECKS
|
||||
@ -183,8 +187,10 @@ def _build_payload(
|
||||
failed_count: int,
|
||||
branch: str,
|
||||
build_number: str,
|
||||
jenkins_job: str,
|
||||
summary: dict | None = None,
|
||||
workspace_line_coverage_percent: float = 0.0,
|
||||
source_files_total: int = 0,
|
||||
source_lines_over_500: int = 0,
|
||||
check_statuses: dict[str, str] | None = None,
|
||||
) -> str:
|
||||
@ -195,8 +201,15 @@ def _build_payload(
|
||||
"suite": suite,
|
||||
"branch": branch or "unknown",
|
||||
"build_number": build_number or "unknown",
|
||||
"jenkins_job": jenkins_job or suite,
|
||||
}
|
||||
)
|
||||
test_case_base_labels = {
|
||||
"suite": suite,
|
||||
"branch": branch or "unknown",
|
||||
"build_number": build_number or "unknown",
|
||||
"jenkins_job": jenkins_job or suite,
|
||||
}
|
||||
lines = [
|
||||
"# TYPE platform_quality_gate_runs_total counter",
|
||||
f'platform_quality_gate_runs_total{{suite="{suite}",status="ok"}} {ok_count}',
|
||||
@ -209,10 +222,14 @@ def _build_payload(
|
||||
"# TYPE titan_iac_quality_gate_run_status gauge",
|
||||
f'titan_iac_quality_gate_run_status{{suite="{suite}",status="ok"}} {1 if status == "ok" else 0}',
|
||||
f'titan_iac_quality_gate_run_status{{suite="{suite}",status="failed"}} {1 if status == "failed" else 0}',
|
||||
"# TYPE platform_quality_gate_build_info gauge",
|
||||
f"platform_quality_gate_build_info{build_labels} 1",
|
||||
"# TYPE titan_iac_quality_gate_build_info gauge",
|
||||
f"titan_iac_quality_gate_build_info{build_labels} 1",
|
||||
"# TYPE platform_quality_gate_workspace_line_coverage_percent gauge",
|
||||
f'platform_quality_gate_workspace_line_coverage_percent{{suite="{suite}"}} {workspace_line_coverage_percent:.3f}',
|
||||
"# TYPE platform_quality_gate_source_files_total gauge",
|
||||
f'platform_quality_gate_source_files_total{{suite="{suite}"}} {source_files_total}',
|
||||
"# TYPE platform_quality_gate_source_lines_over_500_total gauge",
|
||||
f'platform_quality_gate_source_lines_over_500_total{{suite="{suite}"}} {source_lines_over_500}',
|
||||
]
|
||||
@ -226,12 +243,18 @@ def _build_payload(
|
||||
lines.append("# TYPE platform_quality_gate_test_case_result gauge")
|
||||
if test_cases:
|
||||
for test_name, test_status in test_cases:
|
||||
labels = {
|
||||
**test_case_base_labels,
|
||||
"test": test_name,
|
||||
"status": test_status,
|
||||
}
|
||||
lines.append(
|
||||
f'platform_quality_gate_test_case_result{{suite="{suite}",test="{_escape_label(test_name)}",status="{_escape_label(test_status)}"}} 1'
|
||||
f"platform_quality_gate_test_case_result{_label_str(labels)} 1"
|
||||
)
|
||||
else:
|
||||
labels = {**test_case_base_labels, "test": "__no_test_cases__", "status": "skipped"}
|
||||
lines.append(
|
||||
f'platform_quality_gate_test_case_result{{suite="{suite}",test="__no_test_cases__",status="skipped"}} 1'
|
||||
f"platform_quality_gate_test_case_result{_label_str(labels)} 1"
|
||||
)
|
||||
return "\n".join(lines) + "\n"
|
||||
|
||||
@ -244,8 +267,11 @@ def main() -> int:
|
||||
junit_glob = os.getenv("JUNIT_GLOB", os.getenv("JUNIT_PATH", "build/junit-*.xml"))
|
||||
exit_code_path = os.getenv("QUALITY_GATE_EXIT_CODE_PATH", os.getenv("GLUE_EXIT_CODE_PATH", "build/quality-gate.rc"))
|
||||
summary_path = os.getenv("QUALITY_GATE_SUMMARY_PATH", "build/quality-gate-summary.json")
|
||||
branch = os.getenv("BRANCH_NAME", os.getenv("GIT_BRANCH", ""))
|
||||
branch = os.getenv("BRANCH_NAME") or os.getenv("GIT_BRANCH") or "unknown"
|
||||
if branch.startswith("origin/"):
|
||||
branch = branch[len("origin/") :]
|
||||
build_number = os.getenv("BUILD_NUMBER", "")
|
||||
jenkins_job = os.getenv("JOB_NAME", "titan-iac")
|
||||
|
||||
tests = _collect_junit_totals(junit_glob)
|
||||
test_cases = _collect_junit_cases(junit_glob)
|
||||
@ -255,6 +281,7 @@ def main() -> int:
|
||||
workspace_line_coverage_percent = _summary_float(summary, "workspace_line_coverage_percent")
|
||||
if workspace_line_coverage_percent <= 0:
|
||||
workspace_line_coverage_percent = _infer_workspace_coverage_percent(summary, "build/coverage-unit.xml")
|
||||
source_files_total = _summary_int(summary, "source_files_total")
|
||||
source_lines_over_500 = _summary_int(summary, "source_lines_over_500")
|
||||
if source_lines_over_500 <= 0:
|
||||
source_lines_over_500 = _infer_source_lines_over_500(summary)
|
||||
@ -299,8 +326,10 @@ def main() -> int:
|
||||
failed_count=failed_count,
|
||||
branch=branch,
|
||||
build_number=build_number,
|
||||
jenkins_job=jenkins_job,
|
||||
summary=summary,
|
||||
workspace_line_coverage_percent=workspace_line_coverage_percent,
|
||||
source_files_total=source_files_total,
|
||||
source_lines_over_500=source_lines_over_500,
|
||||
check_statuses=check_statuses,
|
||||
)
|
||||
@ -318,6 +347,7 @@ def main() -> int:
|
||||
"failed_count": failed_count,
|
||||
"checks_recorded": len(check_statuses),
|
||||
"workspace_line_coverage_percent": workspace_line_coverage_percent,
|
||||
"source_files_total": source_files_total,
|
||||
"source_lines_over_500": source_lines_over_500,
|
||||
}
|
||||
print(json.dumps(summary, sort_keys=True))
|
||||
|
||||
173
ci/scripts/supply_chain_report.py
Normal file
173
ci/scripts/supply_chain_report.py
Normal file
@ -0,0 +1,173 @@
|
||||
"""Build a titan-iac supply-chain compliance report from Trivy evidence."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import datetime as dt
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
FAIL_SEVERITIES = {"HIGH", "CRITICAL"}
|
||||
|
||||
|
||||
def _read_json(path: Path) -> dict[str, Any]:
|
||||
"""Read a JSON object from disk for use as pipeline evidence."""
|
||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||
if not isinstance(payload, dict):
|
||||
raise ValueError(f"{path} must contain a JSON object")
|
||||
return payload
|
||||
|
||||
|
||||
def _parse_day(raw: str | None) -> dt.date | None:
|
||||
"""Parse an ISO day while letting optional waiver dates stay optional."""
|
||||
if not raw:
|
||||
return None
|
||||
return dt.date.fromisoformat(raw)
|
||||
|
||||
|
||||
def _today(override: str | None = None) -> dt.date:
|
||||
"""Return the policy day so tests can pin expiry behavior."""
|
||||
return _parse_day(override) or dt.date.today()
|
||||
|
||||
|
||||
def _load_waiver_pairs(path: Path | None, policy_day: dt.date) -> tuple[set[tuple[str, str]], int]:
|
||||
"""Return active ``(misconfiguration id, target)`` waivers and expired count."""
|
||||
if path is None or not path.exists():
|
||||
return set(), 0
|
||||
|
||||
payload = _read_json(path)
|
||||
default_expires_at = payload.get("default_expires_at")
|
||||
active: set[tuple[str, str]] = set()
|
||||
expired = 0
|
||||
|
||||
for entry in payload.get("misconfigurations", []):
|
||||
if not isinstance(entry, dict):
|
||||
continue
|
||||
misconfiguration_id = str(entry.get("id") or "").strip()
|
||||
if not misconfiguration_id:
|
||||
continue
|
||||
expires_at = _parse_day(str(entry.get("expires_at") or default_expires_at or ""))
|
||||
targets = entry.get("targets", [])
|
||||
if not isinstance(targets, list):
|
||||
continue
|
||||
|
||||
if expires_at and expires_at < policy_day:
|
||||
expired += len(targets)
|
||||
continue
|
||||
|
||||
# Waivers are target-specific so a new unsafe manifest fails until it is
|
||||
# either fixed or deliberately accepted with a fresh expiration.
|
||||
for target in targets:
|
||||
if isinstance(target, str) and target:
|
||||
active.add((misconfiguration_id, target))
|
||||
|
||||
return active, expired
|
||||
|
||||
|
||||
def _iter_failed_misconfigurations(payload: dict[str, Any]):
|
||||
"""Yield failed high/critical Trivy misconfiguration records."""
|
||||
for result in payload.get("Results", []):
|
||||
if not isinstance(result, dict):
|
||||
continue
|
||||
target = str(result.get("Target") or "")
|
||||
for item in result.get("Misconfigurations") or []:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
if item.get("Status") != "FAIL":
|
||||
continue
|
||||
if str(item.get("Severity") or "").upper() not in FAIL_SEVERITIES:
|
||||
continue
|
||||
yield target, item
|
||||
|
||||
|
||||
def _count_vulnerabilities(payload: dict[str, Any], severity: str) -> int:
|
||||
"""Count Trivy vulnerabilities at a specific severity."""
|
||||
count = 0
|
||||
for result in payload.get("Results", []):
|
||||
if not isinstance(result, dict):
|
||||
continue
|
||||
for item in result.get("Vulnerabilities") or []:
|
||||
if isinstance(item, dict) and str(item.get("Severity") or "").upper() == severity:
|
||||
count += 1
|
||||
return count
|
||||
|
||||
|
||||
def _count_secrets(payload: dict[str, Any]) -> int:
|
||||
"""Count detected secrets in the Trivy filesystem report."""
|
||||
count = 0
|
||||
for result in payload.get("Results", []):
|
||||
if isinstance(result, dict):
|
||||
count += len(result.get("Secrets") or [])
|
||||
return count
|
||||
|
||||
|
||||
def build_report(
|
||||
trivy_payload: dict[str, Any],
|
||||
waiver_path: Path | None = None,
|
||||
today_override: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Build the compliance summary consumed by the quality gate."""
|
||||
policy_day = _today(today_override)
|
||||
active_waivers, expired_waivers = _load_waiver_pairs(waiver_path, policy_day)
|
||||
|
||||
open_misconfigs: list[dict[str, str]] = []
|
||||
waived_misconfigs = 0
|
||||
for target, item in _iter_failed_misconfigurations(trivy_payload):
|
||||
misconfiguration_id = str(item.get("ID") or "")
|
||||
if (misconfiguration_id, target) in active_waivers:
|
||||
waived_misconfigs += 1
|
||||
continue
|
||||
open_misconfigs.append(
|
||||
{
|
||||
"id": misconfiguration_id,
|
||||
"target": target,
|
||||
"severity": str(item.get("Severity") or ""),
|
||||
"title": str(item.get("Title") or ""),
|
||||
}
|
||||
)
|
||||
|
||||
critical = _count_vulnerabilities(trivy_payload, "CRITICAL")
|
||||
high = _count_vulnerabilities(trivy_payload, "HIGH")
|
||||
secrets = _count_secrets(trivy_payload)
|
||||
status = "ok" if critical == 0 and secrets == 0 and not open_misconfigs else "failed"
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"compliant": status == "ok",
|
||||
"category": "artifact_security",
|
||||
"scan_type": "filesystem",
|
||||
"scanner": "trivy",
|
||||
"critical_vulnerabilities": critical,
|
||||
"high_vulnerabilities": high,
|
||||
"high_vulnerability_policy": "observe",
|
||||
"secrets": secrets,
|
||||
"high_or_critical_misconfigurations": len(open_misconfigs),
|
||||
"waived_misconfigurations": waived_misconfigs,
|
||||
"expired_waivers": expired_waivers,
|
||||
"waiver_file": str(waiver_path) if waiver_path else "",
|
||||
"open_misconfiguration_examples": open_misconfigs[:20],
|
||||
}
|
||||
|
||||
|
||||
def main(argv: list[str] | None = None) -> int:
|
||||
"""CLI entrypoint used by Jenkins after the Trivy scan completes."""
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument("--trivy-json", required=True)
|
||||
parser.add_argument("--waivers")
|
||||
parser.add_argument("--output", required=True)
|
||||
parser.add_argument("--today")
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
trivy_payload = _read_json(Path(args.trivy_json))
|
||||
waiver_path = Path(args.waivers) if args.waivers else None
|
||||
report = build_report(trivy_payload, waiver_path=waiver_path, today_override=args.today)
|
||||
output_path = Path(args.output)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(json.dumps(report, indent=2, sort_keys=True) + "\n", encoding="utf-8")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
raise SystemExit(main())
|
||||
@ -1,52 +1,18 @@
|
||||
max_success_age_hours: 48
|
||||
allow_suspended:
|
||||
- bstein-dev-home/vaultwarden-cred-sync
|
||||
- comms/guest-name-randomizer
|
||||
- comms/othrys-room-reset
|
||||
- comms/pin-othrys-invite
|
||||
- comms/seed-othrys-room
|
||||
- finance/firefly-user-sync
|
||||
- health/wger-admin-ensure
|
||||
- health/wger-user-sync
|
||||
- mailu-mailserver/mailu-sync-nightly
|
||||
- nextcloud/nextcloud-mail-sync
|
||||
- vault/vault-oidc-config
|
||||
ariadne_schedule_tasks:
|
||||
- task: schedule.mailu_sync
|
||||
check_last_success: false
|
||||
- task: schedule.nextcloud_sync
|
||||
check_last_success: true
|
||||
max_success_age_hours: 48
|
||||
- task: schedule.nextcloud_cron
|
||||
check_last_success: true
|
||||
max_success_age_hours: 48
|
||||
- task: schedule.nextcloud_maintenance
|
||||
check_last_success: false
|
||||
- task: schedule.vaultwarden_sync
|
||||
check_last_success: true
|
||||
max_success_age_hours: 48
|
||||
- task: schedule.wger_user_sync
|
||||
check_last_success: true
|
||||
max_success_age_hours: 48
|
||||
- task: schedule.wger_admin
|
||||
check_last_success: false
|
||||
- task: schedule.firefly_user_sync
|
||||
check_last_success: true
|
||||
max_success_age_hours: 48
|
||||
- task: schedule.firefly_cron
|
||||
check_last_success: false
|
||||
- task: schedule.vault_k8s_auth
|
||||
check_last_success: false
|
||||
- task: schedule.vault_oidc
|
||||
check_last_success: false
|
||||
- task: schedule.comms_guest_name
|
||||
check_last_success: true
|
||||
max_success_age_hours: 48
|
||||
- task: schedule.comms_pin_invite
|
||||
check_last_success: false
|
||||
- task: schedule.comms_reset_room
|
||||
check_last_success: false
|
||||
- task: schedule.comms_seed_room
|
||||
check_last_success: true
|
||||
max_success_age_hours: 48
|
||||
- task: schedule.pod_cleaner
|
||||
check_last_success: true
|
||||
max_success_age_hours: 6
|
||||
- task: schedule.opensearch_prune
|
||||
check_last_success: false
|
||||
- task: schedule.image_sweeper
|
||||
check_last_success: true
|
||||
max_success_age_hours: 18
|
||||
- task: schedule.metis_k3s_token_sync
|
||||
check_last_success: true
|
||||
max_success_age_hours: 12
|
||||
- task: schedule.platform_quality_suite_probe
|
||||
check_last_success: true
|
||||
max_success_age_hours: 2
|
||||
- schedule.mailu_sync
|
||||
- schedule.nextcloud_sync
|
||||
- schedule.vaultwarden_sync
|
||||
- schedule.wger_admin
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
"""Glue checks for Ariadne schedules exported to VictoriaMetrics."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
@ -26,11 +28,29 @@ def _query(promql: str) -> list[dict]:
|
||||
|
||||
def _expected_tasks() -> list[dict]:
|
||||
cfg = _load_config()
|
||||
tasks = cfg.get("ariadne_schedule_tasks", [])
|
||||
tasks = [
|
||||
_normalize_task(item, cfg)
|
||||
for item in cfg.get("ariadne_schedule_tasks", [])
|
||||
]
|
||||
assert tasks, "No Ariadne schedule tasks configured"
|
||||
return tasks
|
||||
|
||||
|
||||
def _normalize_task(item: object, cfg: dict) -> dict:
|
||||
if isinstance(item, str):
|
||||
return {
|
||||
"task": item,
|
||||
"check_last_success": True,
|
||||
"max_success_age_hours": cfg.get("max_success_age_hours", 48),
|
||||
}
|
||||
if isinstance(item, dict):
|
||||
normalized = dict(item)
|
||||
normalized.setdefault("check_last_success", True)
|
||||
normalized.setdefault("max_success_age_hours", cfg.get("max_success_age_hours", 48))
|
||||
return normalized
|
||||
raise TypeError(f"Unsupported Ariadne schedule task config entry: {item!r}")
|
||||
|
||||
|
||||
def _tracked_tasks(tasks: list[dict]) -> list[dict]:
|
||||
tracked = [item for item in tasks if item.get("check_last_success")]
|
||||
assert tracked, "No Ariadne schedule tasks are marked for success tracking"
|
||||
|
||||
46
ci/tests/glue/test_glue_cronjobs.py
Normal file
46
ci/tests/glue/test_glue_cronjobs.py
Normal file
@ -0,0 +1,46 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from kubernetes import client, config
|
||||
|
||||
|
||||
CONFIG_PATH = Path(__file__).with_name("config.yaml")
|
||||
|
||||
|
||||
def _load_config() -> dict:
|
||||
with CONFIG_PATH.open("r", encoding="utf-8") as handle:
|
||||
return yaml.safe_load(handle) or {}
|
||||
|
||||
|
||||
def _load_kube():
|
||||
try:
|
||||
config.load_incluster_config()
|
||||
except config.ConfigException:
|
||||
config.load_kube_config()
|
||||
|
||||
|
||||
def test_glue_cronjobs_recent_success():
|
||||
cfg = _load_config()
|
||||
max_age_hours = int(cfg.get("max_success_age_hours", 48))
|
||||
allow_suspended = set(cfg.get("allow_suspended", []))
|
||||
|
||||
_load_kube()
|
||||
batch = client.BatchV1Api()
|
||||
cronjobs = batch.list_cron_job_for_all_namespaces(label_selector="atlas.bstein.dev/glue=true").items
|
||||
|
||||
assert cronjobs, "No glue cronjobs found with atlas.bstein.dev/glue=true"
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
for cronjob in cronjobs:
|
||||
name = f"{cronjob.metadata.namespace}/{cronjob.metadata.name}"
|
||||
if cronjob.spec.suspend:
|
||||
assert name in allow_suspended, f"{name} is suspended but not in allow_suspended"
|
||||
continue
|
||||
|
||||
last_success = cronjob.status.last_successful_time
|
||||
assert last_success is not None, f"{name} has no lastSuccessfulTime"
|
||||
age_hours = (now - last_success).total_seconds() / 3600
|
||||
assert age_hours <= max_age_hours, f"{name} last success {age_hours:.1f}h ago"
|
||||
@ -1,3 +1,5 @@
|
||||
"""Glue checks for the metrics the quality-gate publishes."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
@ -25,11 +27,29 @@ def _query(promql: str) -> list[dict]:
|
||||
|
||||
def _expected_tasks() -> list[dict]:
|
||||
cfg = _load_config()
|
||||
tasks = cfg.get("ariadne_schedule_tasks", [])
|
||||
tasks = [
|
||||
_normalize_task(item, cfg)
|
||||
for item in cfg.get("ariadne_schedule_tasks", [])
|
||||
]
|
||||
assert tasks, "No Ariadne schedule tasks configured"
|
||||
return tasks
|
||||
|
||||
|
||||
def _normalize_task(item: object, cfg: dict) -> dict:
|
||||
if isinstance(item, str):
|
||||
return {
|
||||
"task": item,
|
||||
"check_last_success": True,
|
||||
"max_success_age_hours": cfg.get("max_success_age_hours", 48),
|
||||
}
|
||||
if isinstance(item, dict):
|
||||
normalized = dict(item)
|
||||
normalized.setdefault("check_last_success", True)
|
||||
normalized.setdefault("max_success_age_hours", cfg.get("max_success_age_hours", 48))
|
||||
return normalized
|
||||
raise TypeError(f"Unsupported Ariadne schedule task config entry: {item!r}")
|
||||
|
||||
|
||||
def _tracked_tasks(tasks: list[dict]) -> list[dict]:
|
||||
tracked = [item for item in tasks if item.get("check_last_success")]
|
||||
assert tracked, "No Ariadne schedule tasks are marked for success tracking"
|
||||
|
||||
407
ci/titan-iac-trivy-waivers.json
Normal file
407
ci/titan-iac-trivy-waivers.json
Normal file
@ -0,0 +1,407 @@
|
||||
{
|
||||
"version": 1,
|
||||
"generated_from": "Jenkins titan-iac build 225 Trivy filesystem scan",
|
||||
"default_expires_at": "2026-05-22",
|
||||
"ticket": "atlas-quality-wave-k8s-hardening",
|
||||
"default_reason": "Existing Kubernetes manifest hardening baseline accepted only for the first quality-gate rollout; fix or renew explicitly before expiry.",
|
||||
"misconfigurations": [
|
||||
{
|
||||
"id": "DS-0002",
|
||||
"targets": [
|
||||
"dockerfiles/Dockerfile.ananke-node-helper"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "KSV-0009",
|
||||
"targets": [
|
||||
"services/mailu/vip-controller.yaml",
|
||||
"services/maintenance/k3s-agent-restart-daemonset.yaml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "KSV-0010",
|
||||
"targets": [
|
||||
"services/maintenance/k3s-agent-restart-daemonset.yaml",
|
||||
"services/maintenance/metis-sentinel-amd64-daemonset.yaml",
|
||||
"services/maintenance/metis-sentinel-arm64-daemonset.yaml",
|
||||
"services/monitoring/jetson-tegrastats-exporter.yaml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "KSV-0014",
|
||||
"targets": [
|
||||
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml",
|
||||
"infrastructure/core/node-prefer-noschedule-cronjob.yaml",
|
||||
"infrastructure/core/ntp-sync-daemonset.yaml",
|
||||
"infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml",
|
||||
"infrastructure/longhorn/core/longhorn-disk-tags-ensure-job.yaml",
|
||||
"infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml",
|
||||
"infrastructure/longhorn/core/vault-sync-deployment.yaml",
|
||||
"infrastructure/longhorn/ui-ingress/oauth2-proxy-longhorn.yaml",
|
||||
"infrastructure/modules/profiles/components/device-plugin-jetson/daemonset.yaml",
|
||||
"infrastructure/modules/profiles/components/device-plugin-minipc/daemonset.yaml",
|
||||
"infrastructure/modules/profiles/components/device-plugin-tethys/daemonset.yaml",
|
||||
"infrastructure/postgres/statefulset.yaml",
|
||||
"infrastructure/vault-csi/vault-csi-provider.yaml",
|
||||
"services/ai-llm/deployment.yaml",
|
||||
"services/bstein-dev-home/backend-deployment.yaml",
|
||||
"services/bstein-dev-home/chat-ai-gateway-deployment.yaml",
|
||||
"services/bstein-dev-home/frontend-deployment.yaml",
|
||||
"services/bstein-dev-home/oneoffs/migrations/portal-migrate-job.yaml",
|
||||
"services/bstein-dev-home/oneoffs/portal-onboarding-e2e-test-job.yaml",
|
||||
"services/bstein-dev-home/vault-sync-deployment.yaml",
|
||||
"services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml",
|
||||
"services/comms/atlasbot-deployment.yaml",
|
||||
"services/comms/coturn.yaml",
|
||||
"services/comms/element-call-deployment.yaml",
|
||||
"services/comms/guest-name-job.yaml",
|
||||
"services/comms/guest-register-deployment.yaml",
|
||||
"services/comms/livekit-token-deployment.yaml",
|
||||
"services/comms/livekit.yaml",
|
||||
"services/comms/mas-deployment.yaml",
|
||||
"services/comms/oneoffs/bstein-force-leave-job.yaml",
|
||||
"services/comms/oneoffs/comms-secrets-ensure-job.yaml",
|
||||
"services/comms/oneoffs/mas-admin-client-secret-ensure-job.yaml",
|
||||
"services/comms/oneoffs/mas-db-ensure-job.yaml",
|
||||
"services/comms/oneoffs/mas-local-users-ensure-job.yaml",
|
||||
"services/comms/oneoffs/othrys-kick-numeric-job.yaml",
|
||||
"services/comms/oneoffs/synapse-admin-ensure-job.yaml",
|
||||
"services/comms/oneoffs/synapse-seeder-admin-ensure-job.yaml",
|
||||
"services/comms/oneoffs/synapse-signingkey-ensure-job.yaml",
|
||||
"services/comms/oneoffs/synapse-user-seed-job.yaml",
|
||||
"services/comms/pin-othrys-job.yaml",
|
||||
"services/comms/reset-othrys-room-job.yaml",
|
||||
"services/comms/seed-othrys-room.yaml",
|
||||
"services/comms/vault-sync-deployment.yaml",
|
||||
"services/comms/wellknown.yaml",
|
||||
"services/crypto/monerod/deployment.yaml",
|
||||
"services/crypto/wallet-monero-temp/deployment.yaml",
|
||||
"services/crypto/xmr-miner/deployment.yaml",
|
||||
"services/crypto/xmr-miner/vault-sync-deployment.yaml",
|
||||
"services/crypto/xmr-miner/xmrig-daemonset.yaml",
|
||||
"services/finance/actual-budget-deployment.yaml",
|
||||
"services/finance/firefly-cronjob.yaml",
|
||||
"services/finance/firefly-deployment.yaml",
|
||||
"services/finance/firefly-user-sync-cronjob.yaml",
|
||||
"services/finance/oneoffs/finance-secrets-ensure-job.yaml",
|
||||
"services/gitea/deployment.yaml",
|
||||
"services/harbor/vault-sync-deployment.yaml",
|
||||
"services/health/wger-admin-ensure-cronjob.yaml",
|
||||
"services/health/wger-deployment.yaml",
|
||||
"services/health/wger-user-sync-cronjob.yaml",
|
||||
"services/jellyfin/deployment.yaml",
|
||||
"services/jellyfin/loader.yaml",
|
||||
"services/jenkins/deployment.yaml",
|
||||
"services/jenkins/vault-sync-deployment.yaml",
|
||||
"services/keycloak/deployment.yaml",
|
||||
"services/keycloak/oneoffs/actual-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/harbor-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/ldap-federation-job.yaml",
|
||||
"services/keycloak/oneoffs/logs-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/mas-secrets-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/metis-node-passwords-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/metis-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/metis-ssh-keys-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/portal-admin-client-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/portal-e2e-client-job.yaml",
|
||||
"services/keycloak/oneoffs/portal-e2e-execute-actions-email-test-job.yaml",
|
||||
"services/keycloak/oneoffs/portal-e2e-target-client-job.yaml",
|
||||
"services/keycloak/oneoffs/portal-e2e-token-exchange-permissions-job.yaml",
|
||||
"services/keycloak/oneoffs/portal-e2e-token-exchange-test-job.yaml",
|
||||
"services/keycloak/oneoffs/quality-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/realm-settings-job.yaml",
|
||||
"services/keycloak/oneoffs/soteria-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/synapse-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/user-overrides-job.yaml",
|
||||
"services/keycloak/oneoffs/vault-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/vault-sync-deployment.yaml",
|
||||
"services/logging/node-image-gc-rpi4-daemonset.yaml",
|
||||
"services/logging/node-image-prune-rpi5-daemonset.yaml",
|
||||
"services/logging/node-log-rotation-daemonset.yaml",
|
||||
"services/logging/oauth2-proxy.yaml",
|
||||
"services/logging/oneoffs/opensearch-dashboards-setup-job.yaml",
|
||||
"services/logging/oneoffs/opensearch-ism-job.yaml",
|
||||
"services/logging/oneoffs/opensearch-observability-setup-job.yaml",
|
||||
"services/logging/opensearch-prune-cronjob.yaml",
|
||||
"services/logging/vault-sync-deployment.yaml",
|
||||
"services/mailu/mailu-sync-cronjob.yaml",
|
||||
"services/mailu/mailu-sync-listener.yaml",
|
||||
"services/mailu/oneoffs/mailu-sync-job.yaml",
|
||||
"services/mailu/vault-sync-deployment.yaml",
|
||||
"services/mailu/vip-controller.yaml",
|
||||
"services/maintenance/ariadne-deployment.yaml",
|
||||
"services/maintenance/disable-k3s-traefik-daemonset.yaml",
|
||||
"services/maintenance/image-sweeper-cronjob.yaml",
|
||||
"services/maintenance/k3s-agent-restart-daemonset.yaml",
|
||||
"services/maintenance/metis-deployment.yaml",
|
||||
"services/maintenance/metis-k3s-token-sync-cronjob.yaml",
|
||||
"services/maintenance/metis-sentinel-amd64-daemonset.yaml",
|
||||
"services/maintenance/metis-sentinel-arm64-daemonset.yaml",
|
||||
"services/maintenance/node-image-sweeper-daemonset.yaml",
|
||||
"services/maintenance/node-nofile-daemonset.yaml",
|
||||
"services/maintenance/oauth2-proxy-metis.yaml",
|
||||
"services/maintenance/oauth2-proxy-soteria.yaml",
|
||||
"services/maintenance/oneoffs/ariadne-migrate-job.yaml",
|
||||
"services/maintenance/oneoffs/k3s-traefik-cleanup-job.yaml",
|
||||
"services/maintenance/oneoffs/titan-24-rootfs-sweep-job.yaml",
|
||||
"services/maintenance/pod-cleaner-cronjob.yaml",
|
||||
"services/maintenance/soteria-deployment.yaml",
|
||||
"services/maintenance/vault-sync-deployment.yaml",
|
||||
"services/monitoring/dcgm-exporter.yaml",
|
||||
"services/monitoring/jetson-tegrastats-exporter.yaml",
|
||||
"services/monitoring/oneoffs/grafana-org-bootstrap.yaml",
|
||||
"services/monitoring/oneoffs/grafana-user-dedupe-job.yaml",
|
||||
"services/monitoring/platform-quality-gateway-deployment.yaml",
|
||||
"services/monitoring/platform-quality-suite-probe-cronjob.yaml",
|
||||
"services/monitoring/postmark-exporter-deployment.yaml",
|
||||
"services/monitoring/vmalert-atlas-availability.yaml",
|
||||
"services/monitoring/vault-sync-deployment.yaml",
|
||||
"services/nextcloud-mail-sync/cronjob.yaml",
|
||||
"services/nextcloud/collabora.yaml",
|
||||
"services/nextcloud/cronjob.yaml",
|
||||
"services/nextcloud/deployment.yaml",
|
||||
"services/nextcloud/maintenance-cronjob.yaml",
|
||||
"services/oauth2-proxy/deployment.yaml",
|
||||
"services/openldap/statefulset.yaml",
|
||||
"services/outline/deployment.yaml",
|
||||
"services/outline/redis-deployment.yaml",
|
||||
"services/pegasus/deployment.yaml",
|
||||
"services/pegasus/vault-sync-deployment.yaml",
|
||||
"services/planka/deployment.yaml",
|
||||
"services/quality/oauth2-proxy-sonarqube.yaml",
|
||||
"services/quality/sonarqube-deployment.yaml",
|
||||
"services/quality/sonarqube-exporter-deployment.yaml",
|
||||
"services/sui-metrics/base/deployment.yaml",
|
||||
"services/typhon/vault-sync-deployment.yaml",
|
||||
"services/vault/k8s-auth-config-cronjob.yaml",
|
||||
"services/vault/oidc-config-cronjob.yaml",
|
||||
"services/vault/statefulset.yaml",
|
||||
"services/vaultwarden/deployment.yaml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "KSV-0017",
|
||||
"targets": [
|
||||
"infrastructure/modules/profiles/components/device-plugin-jetson/daemonset.yaml",
|
||||
"infrastructure/modules/profiles/components/device-plugin-minipc/daemonset.yaml",
|
||||
"infrastructure/modules/profiles/components/device-plugin-tethys/daemonset.yaml",
|
||||
"services/logging/node-image-gc-rpi4-daemonset.yaml",
|
||||
"services/logging/node-image-prune-rpi5-daemonset.yaml",
|
||||
"services/logging/node-log-rotation-daemonset.yaml",
|
||||
"services/maintenance/disable-k3s-traefik-daemonset.yaml",
|
||||
"services/maintenance/image-sweeper-cronjob.yaml",
|
||||
"services/maintenance/k3s-agent-restart-daemonset.yaml",
|
||||
"services/maintenance/metis-deployment.yaml",
|
||||
"services/maintenance/metis-sentinel-amd64-daemonset.yaml",
|
||||
"services/maintenance/metis-sentinel-arm64-daemonset.yaml",
|
||||
"services/maintenance/node-image-sweeper-daemonset.yaml",
|
||||
"services/maintenance/node-nofile-daemonset.yaml",
|
||||
"services/maintenance/oneoffs/titan-24-rootfs-sweep-job.yaml",
|
||||
"services/monitoring/dcgm-exporter.yaml",
|
||||
"services/monitoring/jetson-tegrastats-exporter.yaml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "KSV-0041",
|
||||
"targets": [
|
||||
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml",
|
||||
"infrastructure/longhorn/adopt/longhorn-adopt-rbac.yaml",
|
||||
"infrastructure/traefik/clusterrole.yaml",
|
||||
"services/bstein-dev-home/rbac.yaml",
|
||||
"services/comms/comms-secrets-ensure-rbac.yaml",
|
||||
"services/comms/mas-db-ensure-rbac.yaml",
|
||||
"services/comms/mas-secrets-ensure-rbac.yaml",
|
||||
"services/maintenance/soteria-rbac.yaml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "KSV-0047",
|
||||
"targets": [
|
||||
"services/monitoring/rbac.yaml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "KSV-0053",
|
||||
"targets": [
|
||||
"services/comms/comms-secrets-ensure-rbac.yaml",
|
||||
"services/comms/mas-db-ensure-rbac.yaml",
|
||||
"services/jenkins/serviceaccount.yaml",
|
||||
"services/maintenance/ariadne-rbac.yaml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "KSV-0056",
|
||||
"targets": [
|
||||
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml",
|
||||
"infrastructure/longhorn/adopt/longhorn-adopt-rbac.yaml",
|
||||
"services/jenkins/serviceaccount.yaml",
|
||||
"services/maintenance/disable-k3s-traefik-rbac.yaml",
|
||||
"services/maintenance/k3s-traefik-cleanup-rbac.yaml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "KSV-0114",
|
||||
"targets": [
|
||||
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "KSV-0118",
|
||||
"targets": [
|
||||
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml",
|
||||
"infrastructure/core/coredns-deployment.yaml",
|
||||
"infrastructure/core/node-prefer-noschedule-cronjob.yaml",
|
||||
"infrastructure/core/ntp-sync-daemonset.yaml",
|
||||
"infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml",
|
||||
"infrastructure/longhorn/core/longhorn-disk-tags-ensure-job.yaml",
|
||||
"infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml",
|
||||
"infrastructure/longhorn/core/vault-sync-deployment.yaml",
|
||||
"infrastructure/longhorn/ui-ingress/oauth2-proxy-longhorn.yaml",
|
||||
"infrastructure/modules/profiles/components/device-plugin-jetson/daemonset.yaml",
|
||||
"infrastructure/modules/profiles/components/device-plugin-minipc/daemonset.yaml",
|
||||
"infrastructure/modules/profiles/components/device-plugin-tethys/daemonset.yaml",
|
||||
"infrastructure/postgres/statefulset.yaml",
|
||||
"infrastructure/vault-csi/vault-csi-provider.yaml",
|
||||
"services/ai-llm/deployment.yaml",
|
||||
"services/bstein-dev-home/backend-deployment.yaml",
|
||||
"services/bstein-dev-home/chat-ai-gateway-deployment.yaml",
|
||||
"services/bstein-dev-home/frontend-deployment.yaml",
|
||||
"services/bstein-dev-home/oneoffs/migrations/portal-migrate-job.yaml",
|
||||
"services/bstein-dev-home/oneoffs/portal-onboarding-e2e-test-job.yaml",
|
||||
"services/bstein-dev-home/vault-sync-deployment.yaml",
|
||||
"services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml",
|
||||
"services/comms/atlasbot-deployment.yaml",
|
||||
"services/comms/coturn.yaml",
|
||||
"services/comms/element-call-deployment.yaml",
|
||||
"services/comms/guest-name-job.yaml",
|
||||
"services/comms/livekit-token-deployment.yaml",
|
||||
"services/comms/livekit.yaml",
|
||||
"services/comms/mas-deployment.yaml",
|
||||
"services/comms/oneoffs/bstein-force-leave-job.yaml",
|
||||
"services/comms/oneoffs/comms-secrets-ensure-job.yaml",
|
||||
"services/comms/oneoffs/mas-admin-client-secret-ensure-job.yaml",
|
||||
"services/comms/oneoffs/mas-db-ensure-job.yaml",
|
||||
"services/comms/oneoffs/mas-local-users-ensure-job.yaml",
|
||||
"services/comms/oneoffs/othrys-kick-numeric-job.yaml",
|
||||
"services/comms/oneoffs/synapse-admin-ensure-job.yaml",
|
||||
"services/comms/oneoffs/synapse-seeder-admin-ensure-job.yaml",
|
||||
"services/comms/oneoffs/synapse-signingkey-ensure-job.yaml",
|
||||
"services/comms/oneoffs/synapse-user-seed-job.yaml",
|
||||
"services/comms/pin-othrys-job.yaml",
|
||||
"services/comms/reset-othrys-room-job.yaml",
|
||||
"services/comms/seed-othrys-room.yaml",
|
||||
"services/comms/vault-sync-deployment.yaml",
|
||||
"services/comms/wellknown.yaml",
|
||||
"services/crypto/monerod/deployment.yaml",
|
||||
"services/crypto/wallet-monero-temp/deployment.yaml",
|
||||
"services/crypto/xmr-miner/deployment.yaml",
|
||||
"services/crypto/xmr-miner/vault-sync-deployment.yaml",
|
||||
"services/crypto/xmr-miner/xmrig-daemonset.yaml",
|
||||
"services/finance/firefly-cronjob.yaml",
|
||||
"services/finance/firefly-deployment.yaml",
|
||||
"services/finance/firefly-user-sync-cronjob.yaml",
|
||||
"services/finance/oneoffs/finance-secrets-ensure-job.yaml",
|
||||
"services/gitea/deployment.yaml",
|
||||
"services/harbor/vault-sync-deployment.yaml",
|
||||
"services/health/wger-admin-ensure-cronjob.yaml",
|
||||
"services/health/wger-deployment.yaml",
|
||||
"services/health/wger-user-sync-cronjob.yaml",
|
||||
"services/jellyfin/loader.yaml",
|
||||
"services/jenkins/deployment.yaml",
|
||||
"services/jenkins/vault-sync-deployment.yaml",
|
||||
"services/keycloak/oneoffs/actual-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/harbor-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/ldap-federation-job.yaml",
|
||||
"services/keycloak/oneoffs/logs-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/mas-secrets-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/metis-node-passwords-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/metis-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/metis-ssh-keys-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/portal-admin-client-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/portal-e2e-client-job.yaml",
|
||||
"services/keycloak/oneoffs/portal-e2e-execute-actions-email-test-job.yaml",
|
||||
"services/keycloak/oneoffs/portal-e2e-target-client-job.yaml",
|
||||
"services/keycloak/oneoffs/portal-e2e-token-exchange-permissions-job.yaml",
|
||||
"services/keycloak/oneoffs/portal-e2e-token-exchange-test-job.yaml",
|
||||
"services/keycloak/oneoffs/quality-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/realm-settings-job.yaml",
|
||||
"services/keycloak/oneoffs/soteria-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/synapse-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/oneoffs/user-overrides-job.yaml",
|
||||
"services/keycloak/oneoffs/vault-oidc-secret-ensure-job.yaml",
|
||||
"services/keycloak/vault-sync-deployment.yaml",
|
||||
"services/logging/node-image-gc-rpi4-daemonset.yaml",
|
||||
"services/logging/node-image-prune-rpi5-daemonset.yaml",
|
||||
"services/logging/node-log-rotation-daemonset.yaml",
|
||||
"services/logging/oauth2-proxy.yaml",
|
||||
"services/logging/oneoffs/opensearch-dashboards-setup-job.yaml",
|
||||
"services/logging/oneoffs/opensearch-ism-job.yaml",
|
||||
"services/logging/oneoffs/opensearch-observability-setup-job.yaml",
|
||||
"services/logging/opensearch-prune-cronjob.yaml",
|
||||
"services/logging/vault-sync-deployment.yaml",
|
||||
"services/mailu/mailu-sync-cronjob.yaml",
|
||||
"services/mailu/mailu-sync-listener.yaml",
|
||||
"services/mailu/oneoffs/mailu-sync-job.yaml",
|
||||
"services/mailu/vault-sync-deployment.yaml",
|
||||
"services/mailu/vip-controller.yaml",
|
||||
"services/maintenance/ariadne-deployment.yaml",
|
||||
"services/maintenance/disable-k3s-traefik-daemonset.yaml",
|
||||
"services/maintenance/image-sweeper-cronjob.yaml",
|
||||
"services/maintenance/k3s-agent-restart-daemonset.yaml",
|
||||
"services/maintenance/metis-deployment.yaml",
|
||||
"services/maintenance/metis-k3s-token-sync-cronjob.yaml",
|
||||
"services/maintenance/metis-sentinel-amd64-daemonset.yaml",
|
||||
"services/maintenance/metis-sentinel-arm64-daemonset.yaml",
|
||||
"services/maintenance/node-image-sweeper-daemonset.yaml",
|
||||
"services/maintenance/node-nofile-daemonset.yaml",
|
||||
"services/maintenance/oauth2-proxy-metis.yaml",
|
||||
"services/maintenance/oauth2-proxy-soteria.yaml",
|
||||
"services/maintenance/oneoffs/ariadne-migrate-job.yaml",
|
||||
"services/maintenance/oneoffs/k3s-traefik-cleanup-job.yaml",
|
||||
"services/maintenance/oneoffs/titan-24-rootfs-sweep-job.yaml",
|
||||
"services/maintenance/pod-cleaner-cronjob.yaml",
|
||||
"services/maintenance/soteria-deployment.yaml",
|
||||
"services/maintenance/vault-sync-deployment.yaml",
|
||||
"services/monitoring/dcgm-exporter.yaml",
|
||||
"services/monitoring/jetson-tegrastats-exporter.yaml",
|
||||
"services/monitoring/oneoffs/grafana-org-bootstrap.yaml",
|
||||
"services/monitoring/oneoffs/grafana-user-dedupe-job.yaml",
|
||||
"services/monitoring/platform-quality-gateway-deployment.yaml",
|
||||
"services/monitoring/platform-quality-suite-probe-cronjob.yaml",
|
||||
"services/monitoring/postmark-exporter-deployment.yaml",
|
||||
"services/monitoring/vmalert-atlas-availability.yaml",
|
||||
"services/monitoring/vault-sync-deployment.yaml",
|
||||
"services/nextcloud/collabora.yaml",
|
||||
"services/oauth2-proxy/deployment.yaml",
|
||||
"services/openldap/statefulset.yaml",
|
||||
"services/outline/deployment.yaml",
|
||||
"services/outline/redis-deployment.yaml",
|
||||
"services/pegasus/vault-sync-deployment.yaml",
|
||||
"services/quality/oauth2-proxy-sonarqube.yaml",
|
||||
"services/quality/sonarqube-deployment.yaml",
|
||||
"services/quality/sonarqube-exporter-deployment.yaml",
|
||||
"services/sui-metrics/base/deployment.yaml",
|
||||
"services/sui-metrics/overlays/atlas/patch-node-selector.yaml",
|
||||
"services/typhon/deployment.yaml",
|
||||
"services/typhon/vault-sync-deployment.yaml",
|
||||
"services/vault/k8s-auth-config-cronjob.yaml",
|
||||
"services/vault/oidc-config-cronjob.yaml",
|
||||
"services/vaultwarden/deployment.yaml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "KSV-0121",
|
||||
"targets": [
|
||||
"services/logging/node-image-gc-rpi4-daemonset.yaml",
|
||||
"services/logging/node-image-prune-rpi5-daemonset.yaml",
|
||||
"services/logging/node-log-rotation-daemonset.yaml",
|
||||
"services/maintenance/disable-k3s-traefik-daemonset.yaml",
|
||||
"services/maintenance/image-sweeper-cronjob.yaml",
|
||||
"services/maintenance/metis-deployment.yaml",
|
||||
"services/maintenance/node-image-sweeper-daemonset.yaml",
|
||||
"services/maintenance/node-nofile-daemonset.yaml",
|
||||
"services/maintenance/oneoffs/titan-24-rootfs-sweep-job.yaml"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: ai-llm
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/ai-llm
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: bstein-dev-home-migrations
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/bstein-dev-home/oneoffs/migrations
|
||||
|
||||
@ -13,14 +13,14 @@ spec:
|
||||
git:
|
||||
checkout:
|
||||
ref:
|
||||
branch: feature/ariadne
|
||||
branch: main
|
||||
commit:
|
||||
author:
|
||||
email: ops@bstein.dev
|
||||
name: flux-bot
|
||||
messageTemplate: "chore(bstein-dev-home): automated image update"
|
||||
push:
|
||||
branch: feature/ariadne
|
||||
branch: main
|
||||
update:
|
||||
strategy: Setters
|
||||
path: services/bstein-dev-home
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: bstein-dev-home
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/bstein-dev-home
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: comms
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
prune: true
|
||||
@ -13,5 +15,3 @@ spec:
|
||||
path: ./services/comms
|
||||
targetNamespace: comms
|
||||
timeout: 2m
|
||||
dependsOn:
|
||||
- name: traefik
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: crypto
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/crypto
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: finance
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/finance
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: gitea
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/gitea
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: harbor
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/harbor
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: health
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/health
|
||||
@ -15,7 +17,6 @@ spec:
|
||||
dependsOn:
|
||||
- name: keycloak
|
||||
- name: postgres
|
||||
- name: traefik
|
||||
- name: vault
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: jellyfin
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/jellyfin
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: jenkins
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/jenkins
|
||||
@ -14,7 +16,6 @@ spec:
|
||||
targetNamespace: jenkins
|
||||
dependsOn:
|
||||
- name: helm
|
||||
- name: traefik
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: keycloak
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
prune: true
|
||||
|
||||
@ -21,6 +21,7 @@ resources:
|
||||
- sui-metrics/kustomization.yaml
|
||||
- openldap/kustomization.yaml
|
||||
- keycloak/kustomization.yaml
|
||||
- quality/kustomization.yaml
|
||||
- oauth2-proxy/kustomization.yaml
|
||||
- mailu/kustomization.yaml
|
||||
- jenkins/kustomization.yaml
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: mailu
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
sourceRef:
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: monerod
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/crypto/monerod
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: nextcloud-mail-sync
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
prune: true
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: nextcloud
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/nextcloud
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: oauth2-proxy
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
prune: true
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: openldap
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
prune: true
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: outline
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/outline
|
||||
@ -15,7 +17,6 @@ spec:
|
||||
dependsOn:
|
||||
- name: keycloak
|
||||
- name: mailu
|
||||
- name: traefik
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: pegasus
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/pegasus
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: planka
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/planka
|
||||
@ -15,7 +17,6 @@ spec:
|
||||
dependsOn:
|
||||
- name: keycloak
|
||||
- name: mailu
|
||||
- name: traefik
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
|
||||
@ -0,0 +1,36 @@
|
||||
# clusters/atlas/flux-system/applications/quality/kustomization.yaml
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: quality
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/quality
|
||||
prune: true
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
||||
targetNamespace: quality
|
||||
dependsOn:
|
||||
- name: cert-manager
|
||||
- name: keycloak
|
||||
- name: vault
|
||||
- name: postgres
|
||||
healthChecks:
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: sonarqube
|
||||
namespace: quality
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: sonarqube-exporter
|
||||
namespace: quality
|
||||
- apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: oauth2-proxy-sonarqube
|
||||
namespace: quality
|
||||
wait: false
|
||||
timeout: 20m
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: sui-metrics
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/sui-metrics/overlays/atlas
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: typhon
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/typhon
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: vault
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
sourceRef:
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: vaultwarden
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
suspend: false
|
||||
@ -17,4 +19,3 @@ spec:
|
||||
wait: true
|
||||
dependsOn:
|
||||
- name: helm
|
||||
- name: traefik
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: wallet-monero-temp
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/crypto/wallet-monero-temp
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: xmr-miner
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/crypto/xmr-miner
|
||||
|
||||
@ -5966,6 +5966,9 @@ spec:
|
||||
- args:
|
||||
- --events-addr=http://notification-controller.$(RUNTIME_NAMESPACE).svc.cluster.local./
|
||||
- --watch-all-namespaces=true
|
||||
- --concurrent=1
|
||||
- --requeue-dependency=5s
|
||||
- --interval-jitter-percentage=30
|
||||
- --log-level=info
|
||||
- --log-encoding=json
|
||||
- --enable-leader-election
|
||||
|
||||
@ -7,7 +7,7 @@ metadata:
|
||||
name: flux-system
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 1m0s
|
||||
interval: 15m0s
|
||||
ref:
|
||||
branch: main
|
||||
secretRef:
|
||||
@ -20,7 +20,7 @@ metadata:
|
||||
name: flux-system
|
||||
namespace: flux-system
|
||||
spec:
|
||||
interval: 10m0s
|
||||
interval: 1h0m0s
|
||||
path: ./clusters/atlas/flux-system
|
||||
prune: true
|
||||
sourceRef:
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: cert-manager-cleanup
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 30m
|
||||
path: ./infrastructure/cert-manager/cleanup
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: cert-manager
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 30m
|
||||
path: ./infrastructure/cert-manager
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: core
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./infrastructure/core
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: gitops-ui
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
timeout: 10m
|
||||
@ -16,5 +18,4 @@ spec:
|
||||
targetNamespace: flux-system
|
||||
dependsOn:
|
||||
- name: helm
|
||||
- name: traefik
|
||||
wait: true
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: helm
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 30m
|
||||
sourceRef:
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: logging
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/logging
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: longhorn-adopt
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 30m
|
||||
path: ./infrastructure/longhorn/adopt
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: longhorn-ui
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./infrastructure/longhorn/ui-ingress
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: longhorn
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 30m
|
||||
path: ./infrastructure/longhorn/core
|
||||
|
||||
@ -13,14 +13,14 @@ spec:
|
||||
git:
|
||||
checkout:
|
||||
ref:
|
||||
branch: feature/ariadne
|
||||
branch: main
|
||||
commit:
|
||||
author:
|
||||
email: ops@bstein.dev
|
||||
name: flux-bot
|
||||
messageTemplate: "chore(maintenance): automated image update"
|
||||
push:
|
||||
branch: feature/ariadne
|
||||
branch: main
|
||||
update:
|
||||
strategy: Setters
|
||||
path: services/maintenance
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: maintenance
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/maintenance
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: metallb
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 30m
|
||||
sourceRef:
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: monitoring
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./services/monitoring
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: postgres
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./infrastructure/postgres
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: traefik
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 10m
|
||||
path: ./infrastructure/traefik
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: vault-csi
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 30m
|
||||
sourceRef:
|
||||
|
||||
@ -4,6 +4,8 @@ kind: Kustomization
|
||||
metadata:
|
||||
name: vault-injector
|
||||
namespace: flux-system
|
||||
annotations:
|
||||
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||
spec:
|
||||
interval: 30m
|
||||
path: ./infrastructure/vault-injector
|
||||
|
||||
@ -2,4 +2,8 @@ FROM python:3.11-slim
|
||||
|
||||
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
|
||||
|
||||
RUN pip install --no-cache-dir requests psycopg2-binary
|
||||
RUN pip install --no-cache-dir requests psycopg2-binary \
|
||||
&& groupadd --system guest-tools \
|
||||
&& useradd --system --uid 65532 --gid guest-tools --home-dir /nonexistent --shell /usr/sbin/nologin guest-tools
|
||||
|
||||
USER guest-tools
|
||||
|
||||
@ -1,16 +1,8 @@
|
||||
FROM --platform=$BUILDPLATFORM opensearchproject/data-prepper:2.8.0 AS source
|
||||
|
||||
FROM --platform=$TARGETPLATFORM eclipse-temurin:17-jre
|
||||
# Use the mirrored Harbor artifact so CI does not depend on Docker Hub egress.
|
||||
FROM registry.bstein.dev/streaming/data-prepper@sha256:32ac6ad42e0f12da08bebee307e290b17d127b30def9b06eeaffbcbbc5033e83
|
||||
|
||||
ENV DATA_PREPPER_PATH=/usr/share/data-prepper
|
||||
|
||||
RUN useradd -u 10001 -M -U -d / -s /usr/sbin/nologin data_prepper \
|
||||
&& mkdir -p /var/log/data-prepper
|
||||
|
||||
COPY --from=source /usr/share/data-prepper /usr/share/data-prepper
|
||||
|
||||
RUN chown -R 10001:10001 /usr/share/data-prepper /var/log/data-prepper
|
||||
|
||||
USER 10001
|
||||
WORKDIR /usr/share/data-prepper
|
||||
CMD ["bin/data-prepper"]
|
||||
|
||||
@ -1,10 +1,13 @@
|
||||
FROM ghcr.io/element-hq/lk-jwt-service:0.3.0 AS base
|
||||
|
||||
FROM alpine:3.20
|
||||
RUN apk add --no-cache ca-certificates
|
||||
RUN apk add --no-cache ca-certificates \
|
||||
&& addgroup -S livekit-token \
|
||||
&& adduser -S -D -H -u 65532 -G livekit-token livekit-token
|
||||
COPY --from=base /lk-jwt-service /lk-jwt-service
|
||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
||||
RUN chmod 0755 /entrypoint.sh
|
||||
|
||||
USER livekit-token
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/lk-jwt-service"]
|
||||
|
||||
@ -29,10 +29,12 @@ FROM ${DEBIAN_IMAGE}
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends ca-certificates; \
|
||||
update-ca-certificates; rm -rf /var/lib/apt/lists/*
|
||||
update-ca-certificates; rm -rf /var/lib/apt/lists/*; \
|
||||
groupadd --system p2pool; \
|
||||
useradd --system --uid 65532 --gid p2pool --home-dir /nonexistent --shell /usr/sbin/nologin p2pool
|
||||
COPY --from=fetch /out/p2pool /usr/local/bin/p2pool
|
||||
|
||||
RUN /usr/local/bin/p2pool --version || true
|
||||
EXPOSE 3333
|
||||
USER p2pool
|
||||
ENTRYPOINT ["/usr/local/bin/p2pool"]
|
||||
|
||||
|
||||
@ -26,9 +26,12 @@ RUN set -eux; \
|
||||
curl -fsSL "$URL" -o /opt/monero/monero.tar.bz2; \
|
||||
tar -xjf /opt/monero/monero.tar.bz2 -C /opt/monero --strip-components=1; \
|
||||
install -m 0755 /opt/monero/monero-wallet-rpc /usr/local/bin/monero-wallet-rpc; \
|
||||
rm -f /opt/monero/monero.tar.bz2
|
||||
rm -f /opt/monero/monero.tar.bz2; \
|
||||
groupadd --system monero; \
|
||||
useradd --system --uid 1000 --gid monero --home-dir /nonexistent --shell /usr/sbin/nologin monero
|
||||
|
||||
ENV PATH="/usr/local/bin:/usr/bin:/bin"
|
||||
RUN /usr/local/bin/monero-wallet-rpc --version || true
|
||||
|
||||
EXPOSE 18083
|
||||
USER monero
|
||||
|
||||
@ -23,10 +23,14 @@ RUN set -eux; \
|
||||
mkdir -p /opt/monero; \
|
||||
tar -xjf /tmp/monero.tar.bz2 -C /opt/monero --strip-components=1; \
|
||||
rm -f /tmp/monero.tar.bz2; \
|
||||
groupadd --system monero; \
|
||||
useradd --system --uid 1000 --gid monero --home-dir /nonexistent --shell /usr/sbin/nologin monero; \
|
||||
mkdir -p /data; \
|
||||
chown monero:monero /data; \
|
||||
chmod 0770 /data
|
||||
|
||||
ENV LD_LIBRARY_PATH=/opt/monero:/opt/monero/lib \
|
||||
PATH="/opt/monero:${PATH}"
|
||||
|
||||
USER monero
|
||||
CMD ["/opt/monero/monerod", "--version"]
|
||||
|
||||
@ -1,10 +1,13 @@
|
||||
FROM quay.io/oauth2-proxy/oauth2-proxy:v7.6.0 AS base
|
||||
|
||||
FROM alpine:3.20
|
||||
RUN apk add --no-cache ca-certificates
|
||||
RUN apk add --no-cache ca-certificates \
|
||||
&& addgroup -S oauth2-proxy \
|
||||
&& adduser -S -D -H -u 65532 -G oauth2-proxy oauth2-proxy
|
||||
COPY --from=base /bin/oauth2-proxy /bin/oauth2-proxy
|
||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
||||
RUN chmod 0755 /entrypoint.sh
|
||||
|
||||
USER oauth2-proxy
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/bin/oauth2-proxy"]
|
||||
|
||||
@ -1,10 +1,13 @@
|
||||
FROM registry.bstein.dev/streaming/pegasus:1.2.32 AS base
|
||||
|
||||
FROM alpine:3.20
|
||||
RUN apk add --no-cache ca-certificates
|
||||
RUN apk add --no-cache ca-certificates \
|
||||
&& addgroup -S pegasus \
|
||||
&& adduser -S -D -H -u 65532 -G pegasus pegasus
|
||||
COPY --from=base /pegasus /pegasus
|
||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
||||
RUN chmod 0755 /entrypoint.sh
|
||||
|
||||
USER pegasus
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/pegasus"]
|
||||
|
||||
48
dockerfiles/Dockerfile.quality-tools
Normal file
48
dockerfiles/Dockerfile.quality-tools
Normal file
@ -0,0 +1,48 @@
|
||||
# dockerfiles/Dockerfile.quality-tools
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
ARG SONAR_SCANNER_VERSION=8.0.1.6346
|
||||
ARG TRIVY_VERSION=0.70.0
|
||||
ENV TRIVY_CACHE_DIR=/opt/trivy-cache
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
bash \
|
||||
ca-certificates \
|
||||
curl \
|
||||
git \
|
||||
jq \
|
||||
unzip \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& groupadd --system quality-tools \
|
||||
&& useradd --system --uid 65532 --gid quality-tools --home-dir /nonexistent --shell /usr/sbin/nologin quality-tools
|
||||
|
||||
RUN set -eux; \
|
||||
scanner_zip="sonar-scanner-cli-${SONAR_SCANNER_VERSION}-linux-aarch64.zip"; \
|
||||
base_url="https://binaries.sonarsource.com/Distribution/sonar-scanner-cli"; \
|
||||
curl -fsSL "${base_url}/${scanner_zip}" -o "/tmp/${scanner_zip}"; \
|
||||
curl -fsSL "${base_url}/${scanner_zip}.sha256" -o "/tmp/${scanner_zip}.sha256"; \
|
||||
printf '%s %s\n' "$(cat "/tmp/${scanner_zip}.sha256")" "/tmp/${scanner_zip}" | sha256sum -c -; \
|
||||
unzip -q "/tmp/${scanner_zip}" -d /opt; \
|
||||
ln -s "/opt/sonar-scanner-${SONAR_SCANNER_VERSION}-linux-aarch64/bin/sonar-scanner" /usr/local/bin/sonar-scanner; \
|
||||
rm -f "/tmp/${scanner_zip}" "/tmp/${scanner_zip}.sha256"
|
||||
|
||||
RUN set -eux; \
|
||||
trivy_tgz="trivy_${TRIVY_VERSION}_Linux-ARM64.tar.gz"; \
|
||||
curl -fsSL "https://github.com/aquasecurity/trivy/releases/download/v${TRIVY_VERSION}/${trivy_tgz}" -o "/tmp/${trivy_tgz}"; \
|
||||
tar -C /usr/local/bin -xzf "/tmp/${trivy_tgz}" trivy; \
|
||||
rm -f "/tmp/${trivy_tgz}"; \
|
||||
trivy --version; \
|
||||
sonar-scanner -v
|
||||
|
||||
RUN set -eux; \
|
||||
mkdir -p "${TRIVY_CACHE_DIR}"; \
|
||||
trivy image --download-db-only --cache-dir "${TRIVY_CACHE_DIR}"; \
|
||||
chmod -R a+rX "${TRIVY_CACHE_DIR}"; \
|
||||
mkdir -p /workspace; \
|
||||
chown quality-tools:quality-tools /workspace
|
||||
|
||||
WORKDIR /workspace
|
||||
USER quality-tools
|
||||
@ -33,6 +33,36 @@ spec:
|
||||
node-role.kubernetes.io/worker: "true"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: atlas.bstein.dev/spillover
|
||||
operator: DoesNotExist
|
||||
- weight: 95
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values:
|
||||
- titan-13
|
||||
- titan-15
|
||||
- titan-17
|
||||
- titan-19
|
||||
- weight: 90
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values:
|
||||
- rpi5
|
||||
- weight: 50
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values:
|
||||
- rpi4
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
@ -46,6 +76,36 @@ spec:
|
||||
node-role.kubernetes.io/worker: "true"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: atlas.bstein.dev/spillover
|
||||
operator: DoesNotExist
|
||||
- weight: 95
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values:
|
||||
- titan-13
|
||||
- titan-15
|
||||
- titan-17
|
||||
- titan-19
|
||||
- weight: 90
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values:
|
||||
- rpi5
|
||||
- weight: 50
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values:
|
||||
- rpi4
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
@ -59,6 +119,36 @@ spec:
|
||||
node-role.kubernetes.io/worker: "true"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: atlas.bstein.dev/spillover
|
||||
operator: DoesNotExist
|
||||
- weight: 95
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values:
|
||||
- titan-13
|
||||
- titan-15
|
||||
- titan-17
|
||||
- titan-19
|
||||
- weight: 90
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values:
|
||||
- rpi5
|
||||
- weight: 50
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values:
|
||||
- rpi4
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
|
||||
@ -4,6 +4,9 @@ kind: Kustomization
|
||||
resources:
|
||||
- ../modules/base
|
||||
- ../modules/profiles/atlas-ha
|
||||
- node-prefer-noschedule-serviceaccount.yaml
|
||||
- node-prefer-noschedule-rbac.yaml
|
||||
- node-prefer-noschedule-cronjob.yaml
|
||||
- coredns-custom.yaml
|
||||
- coredns-deployment.yaml
|
||||
- ntp-sync-daemonset.yaml
|
||||
|
||||
35
infrastructure/core/node-prefer-noschedule-cronjob.yaml
Normal file
35
infrastructure/core/node-prefer-noschedule-cronjob.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
# infrastructure/core/node-prefer-noschedule-cronjob.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: node-prefer-noschedule
|
||||
namespace: kube-system
|
||||
spec:
|
||||
schedule: "*/20 * * * *"
|
||||
concurrencyPolicy: Forbid
|
||||
successfulJobsHistoryLimit: 1
|
||||
failedJobsHistoryLimit: 3
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 1
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: node-prefer-noschedule
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: taint
|
||||
image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
|
||||
command:
|
||||
- /usr/bin/env
|
||||
- bash
|
||||
- -ceu
|
||||
- |
|
||||
for node in titan-13 titan-15 titan-17 titan-19; do
|
||||
if kubectl get node "${node}" >/dev/null 2>&1; then
|
||||
kubectl label node "${node}" atlas.bstein.dev/spillover=true --overwrite=true
|
||||
kubectl taint node "${node}" longhorn=true:PreferNoSchedule --overwrite=true
|
||||
kubectl taint node "${node}" atlas.bstein.dev/spillover=true:PreferNoSchedule --overwrite=true
|
||||
else
|
||||
echo "skipping missing node ${node}"
|
||||
fi
|
||||
done
|
||||
22
infrastructure/core/node-prefer-noschedule-rbac.yaml
Normal file
22
infrastructure/core/node-prefer-noschedule-rbac.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
# infrastructure/core/node-prefer-noschedule-rbac.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: node-prefer-noschedule
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "patch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: node-prefer-noschedule
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: node-prefer-noschedule
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: node-prefer-noschedule
|
||||
namespace: kube-system
|
||||
@ -0,0 +1,6 @@
|
||||
# infrastructure/core/node-prefer-noschedule-serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: node-prefer-noschedule
|
||||
namespace: kube-system
|
||||
@ -26,6 +26,9 @@ spec:
|
||||
cleanupOnFail: true
|
||||
timeout: 15m
|
||||
values:
|
||||
global:
|
||||
nodeSelector:
|
||||
longhorn-host: "true"
|
||||
service:
|
||||
ui:
|
||||
type: NodePort
|
||||
@ -78,3 +81,12 @@ spec:
|
||||
tag: v2.16.0
|
||||
defaultSettings:
|
||||
systemManagedPodsImagePullPolicy: Always
|
||||
longhornManager:
|
||||
nodeSelector:
|
||||
longhorn-host: "true"
|
||||
longhornDriver:
|
||||
nodeSelector:
|
||||
longhorn-host: "true"
|
||||
longhornUI:
|
||||
nodeSelector:
|
||||
longhorn-host: "true"
|
||||
|
||||
@ -2,10 +2,11 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: longhorn-settings-ensure-4
|
||||
name: longhorn-settings-ensure-7
|
||||
namespace: longhorn-system
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
activeDeadlineSeconds: 240
|
||||
ttlSecondsAfterFinished: 3600
|
||||
template:
|
||||
spec:
|
||||
|
||||
@ -4,11 +4,12 @@ set -eu
|
||||
# Longhorn blocks direct CR patches for some settings; use the internal API instead.
|
||||
|
||||
api_base="http://longhorn-backend.longhorn-system.svc:9500/v1/settings"
|
||||
curl_opts="-fsS --connect-timeout 3 --max-time 15"
|
||||
|
||||
wait_for_api() {
|
||||
attempts=30
|
||||
while [ "${attempts}" -gt 0 ]; do
|
||||
if curl -fsS "${api_base}" >/dev/null 2>&1; then
|
||||
if curl ${curl_opts} "${api_base}" >/dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
attempts=$((attempts - 1))
|
||||
@ -22,14 +23,14 @@ update_setting() {
|
||||
name="$1"
|
||||
value="$2"
|
||||
|
||||
current="$(curl -fsS "${api_base}/${name}" || true)"
|
||||
current="$(curl ${curl_opts} "${api_base}/${name}" || true)"
|
||||
if echo "${current}" | grep -Fq "\"value\":\"${value}\""; then
|
||||
echo "Setting ${name} already set."
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "Setting ${name} -> ${value}"
|
||||
curl -fsS -X PUT \
|
||||
curl ${curl_opts} -X PUT \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"value\":\"${value}\"}" \
|
||||
"${api_base}/${name}" >/dev/null
|
||||
@ -40,3 +41,7 @@ update_setting default-engine-image "registry.bstein.dev/infra/longhorn-engine:v
|
||||
update_setting default-instance-manager-image "registry.bstein.dev/infra/longhorn-instance-manager:v1.8.2"
|
||||
update_setting default-backing-image-manager-image "registry.bstein.dev/infra/longhorn-backing-image-manager:v1.8.2"
|
||||
update_setting support-bundle-manager-image "registry.bstein.dev/infra/longhorn-support-bundle-kit:v0.0.56"
|
||||
# Keep storage-heavy nodes from getting hammered by rebuild storms and skew.
|
||||
update_setting replica-auto-balance "best-effort"
|
||||
update_setting concurrent-replica-rebuild-per-node-limit "2"
|
||||
update_setting node-down-pod-deletion-policy "delete-both-statefulset-and-deployment-pod"
|
||||
|
||||
@ -13,9 +13,27 @@ spec:
|
||||
- objectName: "harbor-pull__dockerconfigjson"
|
||||
secretPath: "kv/data/atlas/shared/harbor-pull"
|
||||
secretKey: "dockerconfigjson"
|
||||
- objectName: "longhorn-backup-b2__AWS_ACCESS_KEY_ID"
|
||||
secretPath: "kv/data/atlas/longhorn/backup-b2"
|
||||
secretKey: "AWS_ACCESS_KEY_ID"
|
||||
- objectName: "longhorn-backup-b2__AWS_SECRET_ACCESS_KEY"
|
||||
secretPath: "kv/data/atlas/longhorn/backup-b2"
|
||||
secretKey: "AWS_SECRET_ACCESS_KEY"
|
||||
- objectName: "longhorn-backup-b2__AWS_ENDPOINTS"
|
||||
secretPath: "kv/data/atlas/longhorn/backup-b2"
|
||||
secretKey: "AWS_ENDPOINTS"
|
||||
secretObjects:
|
||||
- secretName: longhorn-registry
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
data:
|
||||
- objectName: harbor-pull__dockerconfigjson
|
||||
key: .dockerconfigjson
|
||||
- secretName: longhorn-backup-b2
|
||||
type: Opaque
|
||||
data:
|
||||
- objectName: longhorn-backup-b2__AWS_ACCESS_KEY_ID
|
||||
key: AWS_ACCESS_KEY_ID
|
||||
- objectName: longhorn-backup-b2__AWS_SECRET_ACCESS_KEY
|
||||
key: AWS_SECRET_ACCESS_KEY
|
||||
- objectName: longhorn-backup-b2__AWS_ENDPOINTS
|
||||
key: AWS_ENDPOINTS
|
||||
|
||||
@ -26,6 +26,16 @@ spec:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values: ["rpi5", "rpi4"]
|
||||
- weight: 90
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values:
|
||||
- titan-13
|
||||
- titan-15
|
||||
- titan-17
|
||||
- titan-19
|
||||
containers:
|
||||
- name: sync
|
||||
image: alpine:3.20
|
||||
|
||||
@ -25,6 +25,7 @@ spec:
|
||||
serviceAccountName: postgres-vault
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/worker: "true"
|
||||
hardware: rpi5
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
@ -35,7 +36,17 @@ spec:
|
||||
values: ["true"]
|
||||
- key: hardware
|
||||
operator: In
|
||||
values: ["rpi4", "rpi5"]
|
||||
values: ["rpi5"]
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values: ["titan-06"]
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values: ["titan-05", "titan-07", "titan-08", "titan-11"]
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:15
|
||||
|
||||
@ -70,6 +70,38 @@ items:
|
||||
dnsPolicy: ClusterFirst
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/worker: "true"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: atlas.bstein.dev/spillover
|
||||
operator: DoesNotExist
|
||||
- weight: 95
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values:
|
||||
- titan-13
|
||||
- titan-15
|
||||
- titan-17
|
||||
- titan-19
|
||||
- weight: 90
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values:
|
||||
- rpi5
|
||||
- weight: 50
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values:
|
||||
- rpi4
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
serviceAccount: atlas-traefik-ingress-controller
|
||||
|
||||
@ -41,3 +41,12 @@ spec:
|
||||
failurePolicy: Ignore
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/worker: "true"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values: ["titan-13", "titan-15", "titan-17", "titan-19"]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -4,13 +4,21 @@ import pathlib
|
||||
|
||||
def load_module():
|
||||
path = pathlib.Path(__file__).resolve().parents[1] / "dashboards_render_atlas.py"
|
||||
spec = importlib.util.spec_from_file_location("dashboards_render_atlas", path)
|
||||
spec = importlib.util.spec_from_file_location("scripts.dashboards_render_atlas", path)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
def flatten_panels(panels):
|
||||
flat = []
|
||||
for panel in panels:
|
||||
flat.append(panel)
|
||||
flat.extend(panel.get("panels", []))
|
||||
return flat
|
||||
|
||||
|
||||
def test_table_panel_options_and_filterable():
|
||||
mod = load_module()
|
||||
panel = mod.table_panel(
|
||||
@ -42,6 +50,114 @@ def test_node_filter_and_expr_helpers():
|
||||
assert "node_memory_MemAvailable_bytes" in mem_expr
|
||||
|
||||
|
||||
def test_overview_availability_panel_uses_recorded_365d_rollup():
|
||||
mod = load_module()
|
||||
dashboard = mod.build_overview()
|
||||
panel = next(panel for panel in flatten_panels(dashboard["panels"]) if panel["id"] == 27)
|
||||
|
||||
assert panel["title"] == "Atlas Availability (365d)"
|
||||
assert panel["targets"][0]["expr"] == 'last_over_time(atlas:availability:ratio_365d{scope="atlas"}[24h])'
|
||||
assert panel["targets"][0]["instant"] is True
|
||||
assert "precomputed" in panel["description"]
|
||||
assert "last successful rollup for up to 24h" in panel["description"]
|
||||
|
||||
|
||||
def test_overview_uses_readable_quality_power_and_gitops_panels():
|
||||
mod = load_module()
|
||||
dashboard = mod.build_overview()
|
||||
panels_by_title = {panel["title"]: panel for panel in flatten_panels(dashboard["panels"])}
|
||||
|
||||
assert dashboard["links"] == [
|
||||
{"title": "Atlas Testing", "url": "/d/atlas-testing", "targetBlank": True}
|
||||
]
|
||||
assert "atlas-jobs" not in repr(dashboard)
|
||||
assert "Platform Test Success Rate" not in panels_by_title
|
||||
assert panels_by_title["Test Category Pass Rate"]["type"] == "state-timeline"
|
||||
assert panels_by_title["Test Category Pass Rate"]["gridPos"] == {"h": 6, "w": 6, "x": 15, "y": 13}
|
||||
assert panels_by_title["Test Category Pass Rate"]["targets"][0]["legendFormat"] == "{{category}}"
|
||||
assert "${overview_suite:regex}" in panels_by_title["Test Category Pass Rate"]["targets"][0]["expr"]
|
||||
assert panels_by_title["UPS History (Power Draw)"]["gridPos"] == {"h": 6, "w": 6, "x": 3, "y": 7}
|
||||
assert panels_by_title["Ariadne Run Volume"]["gridPos"] == {"h": 6, "w": 6, "x": 9, "y": 7}
|
||||
assert panels_by_title["Pyrphoros UPS Current"]["gridPos"]["w"] == 3
|
||||
assert panels_by_title["Current Enclosure Climate"]["gridPos"]["w"] == 3
|
||||
assert panels_by_title["UPS History (Power Draw)"]["options"]["legend"]["placement"] == "bottom"
|
||||
assert panels_by_title["UPS History (Power Draw)"]["options"]["legend"]["displayMode"] == "list"
|
||||
assert panels_by_title["UPS History (Power Draw)"]["fieldConfig"]["defaults"]["custom"]["drawStyle"] == "line"
|
||||
assert panels_by_title["UPS History (Power Draw)"]["fieldConfig"]["defaults"]["custom"]["fillOpacity"] == 22
|
||||
assert all(target["expr"].startswith("max(") for target in panels_by_title["UPS History (Power Draw)"]["targets"])
|
||||
ups_overrides = panels_by_title["UPS History (Power Draw)"]["fieldConfig"]["overrides"]
|
||||
ups_override_by_name = {override["matcher"]["options"]: override for override in ups_overrides}
|
||||
assert ups_override_by_name["Pyrphoros"]["properties"] == [
|
||||
{"id": "color", "value": {"mode": "fixed", "fixedColor": "dark-blue"}},
|
||||
]
|
||||
assert ups_override_by_name["Statera"]["properties"] == [
|
||||
{"id": "color", "value": {"mode": "fixed", "fixedColor": "dark-yellow"}},
|
||||
]
|
||||
assert panels_by_title["Ariadne Run Volume"]["fieldConfig"]["defaults"]["custom"]["drawStyle"] == "bars"
|
||||
assert panels_by_title["Ariadne Run Volume"]["options"]["legend"]["placement"] == "bottom"
|
||||
assert panels_by_title["Ariadne Run Volume"]["options"]["legend"]["displayMode"] == "list"
|
||||
assert "Fan History (0-10)" not in panels_by_title
|
||||
assert panels_by_title["Fan Intensity History"]["type"] == "state-timeline"
|
||||
assert panels_by_title["Fan Intensity History"]["gridPos"] == {"h": 6, "w": 6, "x": 9, "y": 13}
|
||||
assert panels_by_title["Fan Intensity History"]["fieldConfig"]["defaults"]["max"] == 10
|
||||
assert panels_by_title["Fan Intensity History"]["targets"][0]["legendFormat"] == "{{fan}}"
|
||||
fan_steps = panels_by_title["Fan Intensity History"]["fieldConfig"]["defaults"]["thresholds"]["steps"]
|
||||
assert [step["value"] for step in fan_steps] == [None, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
||||
assert fan_steps[0]["color"] == "#8f1d1d"
|
||||
assert fan_steps[5]["color"] == "#d4b106"
|
||||
assert fan_steps[-1]["color"] == "#1f60c4"
|
||||
fan_mappings = panels_by_title["Fan Intensity History"]["fieldConfig"]["defaults"]["mappings"][0]["options"]
|
||||
assert fan_mappings["0"]["text"] == "Off"
|
||||
assert fan_mappings["10"]["text"] == "10"
|
||||
assert "Tent Interior" in panels_by_title["Fan Intensity History"]["targets"][0]["expr"]
|
||||
assert panels_by_title["Fan Intensity History"]["options"]["legend"]["displayMode"] == "list"
|
||||
assert panels_by_title["Fan Intensity History"]["options"]["legend"]["showLegend"] is False
|
||||
assert panels_by_title["Fan Intensity History"]["options"]["mergeValues"] is False
|
||||
assert panels_by_title["Fan Intensity History"]["options"]["showValue"] == "auto"
|
||||
|
||||
assert panels_by_title["Flux Source"]["type"] == "stat"
|
||||
assert panels_by_title["Flux Source"]["gridPos"] == {"h": 2, "w": 3, "x": 21, "y": 7}
|
||||
assert panels_by_title["Flux Source"]["targets"][0]["legendFormat"] == "{{branch}}"
|
||||
assert panels_by_title["Run Reliability (24h)"]["gridPos"] == {"h": 2, "w": 3, "x": 21, "y": 9}
|
||||
assert panels_by_title["Suites With Runs (24h)"]["gridPos"] == {"h": 2, "w": 3, "x": 21, "y": 13}
|
||||
suites_reporting_expr = panels_by_title["Suites With Runs (24h)"]["targets"][0]["expr"]
|
||||
assert "> bool 0" in suites_reporting_expr
|
||||
assert mod.PLATFORM_TEST_SUITE_CANONICAL_MATCHER in suites_reporting_expr
|
||||
assert "bstein-home" not in suites_reporting_expr
|
||||
assert "published quality-gate run" in panels_by_title["Suites With Runs (24h)"]["description"]
|
||||
assert panels_by_title["LOC Clean Suites"]["gridPos"] == {"h": 2, "w": 3, "x": 21, "y": 17}
|
||||
assert panels_by_title["GitOps Health"]["type"] == "state-timeline"
|
||||
assert panels_by_title["GitOps Health"]["gridPos"] == {"h": 6, "w": 6, "x": 15, "y": 7}
|
||||
gitops_expr = panels_by_title["GitOps Health"]["targets"][0]["expr"]
|
||||
assert "Kustomizations Not Suspended" in gitops_expr
|
||||
assert "HelmReleases Not Suspended" in gitops_expr
|
||||
assert panels_by_title["Test Category Pass Rate"]["options"]["legend"]["displayMode"] == "list"
|
||||
assert panels_by_title["Test Category Pass Rate"]["options"]["legend"]["placement"] == "bottom"
|
||||
assert panels_by_title["Test Category Pass Rate"]["options"]["mergeValues"] is False
|
||||
assert panels_by_title["Test Category Pass Rate"]["options"]["showValue"] == "auto"
|
||||
assert "rowHeight" not in panels_by_title["Test Category Pass Rate"]["options"]
|
||||
assert any(variable["name"] == "overview_suite" for variable in dashboard["templating"]["list"])
|
||||
|
||||
pvc_backup_expr = panels_by_title["PVC Backup Health / Age"]["targets"][0]["expr"]
|
||||
assert "backup-telemetry-missing" in pvc_backup_expr
|
||||
assert 'pvc_backup_(count|last_success_timestamp_seconds|health_reason)' in pvc_backup_expr
|
||||
|
||||
gpu_expr = panels_by_title["Namespace GPU Share"]["targets"][0]["expr"]
|
||||
assert 'resource=~"nvidia(_com_|[.]com/)gpu.*"' in gpu_expr
|
||||
assert "/ on(node) group_left() clamp_min" in gpu_expr
|
||||
assert "kube_node_status_allocatable" in gpu_expr
|
||||
assert "kube_node_labels" not in gpu_expr
|
||||
|
||||
|
||||
def test_overview_and_testing_panels_all_have_concise_descriptions():
|
||||
mod = load_module()
|
||||
|
||||
for dashboard in [mod.build_overview(), mod.build_jobs_dashboard(), mod.build_testing_dashboard()]:
|
||||
panels = flatten_panels(dashboard["panels"])
|
||||
assert all(panel.get("description") for panel in panels if panel["type"] != "row")
|
||||
assert "85.7" not in repr(dashboard)
|
||||
|
||||
|
||||
def test_render_configmap_writes(tmp_path):
|
||||
mod = load_module()
|
||||
mod.DASHBOARD_DIR = tmp_path / "dash"
|
||||
@ -56,3 +172,220 @@ def test_render_configmap_writes(tmp_path):
|
||||
content = (tmp_path / "cm.yaml").read_text()
|
||||
assert "kind: ConfigMap" in content
|
||||
assert f"{uid}.json" in content
|
||||
|
||||
|
||||
def test_testing_suite_variable_uses_canonical_values_only():
|
||||
mod = load_module()
|
||||
variable = mod.testing_suite_variable()
|
||||
canonical_matcher = "|".join(mod.PLATFORM_TEST_SUITE_NAMES)
|
||||
legacy_names = {"bstein-home", "data-prepper", "titan-iac", "pegasus-health"}
|
||||
|
||||
assert variable["allValue"] == canonical_matcher
|
||||
assert not any(alias in variable["query"] for alias in legacy_names)
|
||||
assert not any(alias in variable["allValue"] for alias in legacy_names)
|
||||
assert [option["value"] for option in variable["options"]] == mod.PLATFORM_TEST_SUITE_NAMES
|
||||
assert "lesavka" in variable["allValue"]
|
||||
assert "typhon" in variable["allValue"]
|
||||
|
||||
|
||||
def test_testing_dashboard_is_public_but_jobs_dashboard_remains_internal():
|
||||
mod = load_module()
|
||||
jobs = mod.build_jobs_dashboard()
|
||||
testing = mod.build_testing_dashboard()
|
||||
|
||||
assert jobs["folderUid"] == mod.PRIVATE_FOLDER
|
||||
assert jobs["editable"] is True
|
||||
assert testing["uid"] == "atlas-testing"
|
||||
assert testing["folderUid"] == mod.PUBLIC_DASHBOARD_FOLDER
|
||||
assert testing["editable"] is False
|
||||
|
||||
|
||||
def test_jobs_dashboard_separates_current_gate_health_from_reliability():
|
||||
mod = load_module()
|
||||
dashboard = mod.build_jobs_dashboard()
|
||||
panels_by_title = {panel["title"]: panel for panel in flatten_panels(dashboard["panels"])}
|
||||
|
||||
assert "Current Gate Health by Suite" in panels_by_title
|
||||
assert "Run Reliability by Suite (24h)" in panels_by_title
|
||||
assert "Run Reliability by Suite (7d rolling)" in panels_by_title
|
||||
assert "Daily Run Volume (Selected Scope)" in panels_by_title
|
||||
assert "Coverage History by Suite" in panels_by_title
|
||||
assert "Files <=500 LOC History by Suite" in panels_by_title
|
||||
assert "Run Reliability History by Suite" not in panels_by_title
|
||||
assert "Coverage & LOC Compliance History" not in panels_by_title
|
||||
assert "Run Status Mix (30d)" not in panels_by_title
|
||||
assert "Failures by Suite (24h)" not in panels_by_title
|
||||
assert "Success Rate by Suite (24h)" not in panels_by_title
|
||||
|
||||
current_gate_expr = panels_by_title["Current Gate Health by Suite"]["targets"][0]["expr"]
|
||||
assert 'check)' in current_gate_expr
|
||||
assert "platform_quality:check_status:present_1h" in current_gate_expr
|
||||
assert "platform_quality_gate_checks_total" not in current_gate_expr
|
||||
assert 'status=~"ok|passed|success|not_applicable|skipped|na|n/a"' in current_gate_expr
|
||||
assert panels_by_title["Current Gate Health by Suite"]["gridPos"]["w"] == 6
|
||||
assert panels_by_title["Run Reliability by Suite (24h)"]["gridPos"]["w"] == 6
|
||||
assert panels_by_title["Coverage by Suite (Latest, gate 95)"]["gridPos"] == {"h": 7, "w": 6, "x": 12, "y": 4}
|
||||
assert panels_by_title["Files <=500 LOC by Suite (Latest)"]["gridPos"] == {"h": 7, "w": 6, "x": 18, "y": 4}
|
||||
|
||||
reliability_panel = panels_by_title["Run Reliability by Suite (24h)"]
|
||||
reliability_expr = reliability_panel["targets"][0]["expr"]
|
||||
assert "platform_quality_gate_runs_total" in reliability_expr
|
||||
assert "> 0" in reliability_expr
|
||||
assert "- 1" in reliability_expr
|
||||
assert reliability_expr.startswith("sort(")
|
||||
assert reliability_panel["fieldConfig"]["defaults"]["mappings"] == [
|
||||
{"type": "value", "options": {"-1": {"text": "no runs"}}}
|
||||
]
|
||||
|
||||
rolling_panel = panels_by_title["Run Reliability by Suite (7d rolling)"]
|
||||
assert rolling_panel["type"] == "state-timeline"
|
||||
assert "[7d]" in rolling_panel["targets"][0]["expr"]
|
||||
category_panel = panels_by_title["Test Category Pass Rate History"]
|
||||
assert category_panel["type"] == "state-timeline"
|
||||
assert "category" in category_panel["targets"][0]["expr"]
|
||||
assert 'category!~"fixtures|golden|helpers"' in category_panel["targets"][0]["expr"]
|
||||
assert "Use the Suite filter" in category_panel["description"]
|
||||
assert "category-aware results" in category_panel["description"]
|
||||
|
||||
coverage_panel = panels_by_title["Coverage History by Suite"]
|
||||
loc_panel = panels_by_title["Files <=500 LOC History by Suite"]
|
||||
assert coverage_panel["type"] == "state-timeline"
|
||||
assert loc_panel["type"] == "state-timeline"
|
||||
assert coverage_panel["targets"][0]["expr"] != loc_panel["targets"][0]["expr"]
|
||||
assert panels_by_title["Daily Run Volume (Selected Scope)"]["gridPos"] == {"h": 8, "w": 8, "x": 0, "y": 27}
|
||||
assert coverage_panel["gridPos"] == {"h": 8, "w": 8, "x": 8, "y": 27}
|
||||
assert loc_panel["gridPos"] == {"h": 8, "w": 8, "x": 16, "y": 27}
|
||||
|
||||
run_volume_panel = panels_by_title["Daily Run Volume (Selected Scope)"]
|
||||
assert run_volume_panel["fieldConfig"]["defaults"]["custom"]["drawStyle"] == "bars"
|
||||
assert "[$__interval]" not in run_volume_panel["targets"][0]["expr"]
|
||||
|
||||
|
||||
def test_jobs_dashboard_bar_gauges_use_solid_threshold_colors():
|
||||
mod = load_module()
|
||||
dashboard = mod.build_jobs_dashboard()
|
||||
panels = flatten_panels(dashboard["panels"])
|
||||
bar_gauges = [panel for panel in panels if panel["type"] == "bargauge"]
|
||||
|
||||
assert bar_gauges
|
||||
assert all(panel["options"]["displayMode"] == "basic" for panel in bar_gauges)
|
||||
assert all(
|
||||
panel["fieldConfig"]["defaults"]["color"]["mode"] == "thresholds"
|
||||
for panel in bar_gauges
|
||||
)
|
||||
|
||||
reliability_panel = next(
|
||||
panel for panel in panels if panel["title"] == "Run Reliability by Suite (24h)"
|
||||
)
|
||||
threshold_steps = reliability_panel["fieldConfig"]["defaults"]["thresholds"]["steps"]
|
||||
|
||||
assert {"color": "dark-yellow", "value": 93} in threshold_steps
|
||||
assert {"color": "dark-blue", "value": 100} in threshold_steps
|
||||
|
||||
|
||||
def test_jobs_dashboard_collapses_heavy_drilldowns_for_light_first_paint():
|
||||
mod = load_module()
|
||||
dashboard = mod.build_jobs_dashboard()
|
||||
panels = dashboard["panels"]
|
||||
rows = [panel for panel in panels if panel["type"] == "row"]
|
||||
visible_query_panels = [panel for panel in panels if panel["type"] != "row"]
|
||||
nested_panels_by_title = {
|
||||
child["title"]: child
|
||||
for row in rows
|
||||
for child in row.get("panels", [])
|
||||
}
|
||||
|
||||
assert len(panels) == 16
|
||||
assert len(visible_query_panels) == 10
|
||||
assert sum(len(panel.get("targets", [])) for panel in visible_query_panels) == 10
|
||||
assert all(
|
||||
panel["title"] != "Coverage Gap to 95% by Suite"
|
||||
for panel in visible_query_panels
|
||||
)
|
||||
assert [row["title"] for row in rows] == [
|
||||
"Reliability And Run History",
|
||||
"Check Failure Rates By Suite",
|
||||
"Check Healthy Rates By Suite",
|
||||
"Test Drilldowns And Problem Tests",
|
||||
"Telemetry Completeness And Branches",
|
||||
"SonarQube Project Health",
|
||||
]
|
||||
assert all(row["collapsed"] for row in rows)
|
||||
|
||||
assert "Coverage Failure Rate" in nested_panels_by_title
|
||||
assert "Supply Chain Healthy Rate" in nested_panels_by_title
|
||||
assert "Test Category Pass Rate History" in nested_panels_by_title
|
||||
assert "Selected Test Pass Rate History" in nested_panels_by_title
|
||||
assert "Coverage Metrics Present by Suite" in nested_panels_by_title
|
||||
assert "SonarQube API Up" in nested_panels_by_title
|
||||
|
||||
failure_rate_panel = nested_panels_by_title["Coverage Failure Rate"]
|
||||
assert failure_rate_panel["type"] == "state-timeline"
|
||||
assert failure_rate_panel["fieldConfig"]["defaults"]["unit"] == "percent"
|
||||
assert failure_rate_panel["fieldConfig"]["defaults"]["max"] == 100
|
||||
assert failure_rate_panel["fieldConfig"]["defaults"]["thresholds"]["steps"][0]["color"] == "dark-blue"
|
||||
assert "increase(" not in failure_rate_panel["targets"][0]["expr"]
|
||||
assert "platform_quality:check_status:present_1h" in failure_rate_panel["targets"][0]["expr"]
|
||||
assert "platform_quality_gate_checks_total" not in failure_rate_panel["targets"][0]["expr"]
|
||||
assert "0 *" in failure_rate_panel["targets"][0]["expr"]
|
||||
assert "and on(suite)" not in failure_rate_panel["targets"][0]["expr"]
|
||||
|
||||
pass_rate_panel = nested_panels_by_title["Selected Test Pass Rate History"]
|
||||
assert pass_rate_panel["type"] == "state-timeline"
|
||||
assert "platform_quality:test_case_pass_rate:percent_1h" in pass_rate_panel["targets"][0]["expr"]
|
||||
assert "platform_quality_gate_test_case_result" not in pass_rate_panel["targets"][0]["expr"]
|
||||
|
||||
pass_fail_panel = nested_panels_by_title["Selected Test Pass/Fail History"]
|
||||
assert pass_fail_panel["fieldConfig"]["defaults"]["custom"]["drawStyle"] == "bars"
|
||||
assert all(
|
||||
"platform_quality:test_case_status:count_1h" in target["expr"]
|
||||
for target in pass_fail_panel["targets"]
|
||||
)
|
||||
|
||||
problematic_panel = nested_panels_by_title["Problematic Tests Over Time (Top failures)"]
|
||||
assert problematic_panel["type"] == "state-timeline"
|
||||
assert problematic_panel["gridPos"]["w"] == 24
|
||||
assert 'test!=""' in problematic_panel["targets"][0]["expr"]
|
||||
assert "vector(0)" not in problematic_panel["targets"][0]["expr"]
|
||||
assert problematic_panel["fieldConfig"]["defaults"]["thresholds"]["steps"] == [
|
||||
{"color": "dark-blue", "value": None},
|
||||
{"color": "dark-green", "value": 2},
|
||||
{"color": "dark-yellow", "value": 3},
|
||||
{"color": "dark-orange", "value": 5},
|
||||
{"color": "dark-red", "value": 8},
|
||||
]
|
||||
assert "hourly bucket" in problematic_panel["description"]
|
||||
|
||||
sonar_mix_panel = nested_panels_by_title["Sonar Gate Status Mix (Selected)"]
|
||||
sonar_health_panel = nested_panels_by_title["Sonar Gate Health by Project"]
|
||||
assert sonar_mix_panel["gridPos"]["w"] == 4
|
||||
assert sonar_health_panel["gridPos"]["w"] == 8
|
||||
assert sonar_health_panel["type"] == "state-timeline"
|
||||
assert "100 * max by (project_key)" in sonar_health_panel["targets"][0]["expr"]
|
||||
|
||||
branch_panel = nested_panels_by_title["Primary Branch Clean by Suite (30d)"]
|
||||
recent_branch_panel = nested_panels_by_title["Recent Branch Evidence by Suite (30d)"]
|
||||
assert branch_panel["gridPos"]["x"] == 12
|
||||
assert recent_branch_panel["gridPos"]["x"] == 18
|
||||
assert branch_panel["fieldConfig"]["defaults"]["unit"] == "percent"
|
||||
assert "unless on(suite)" in branch_panel["targets"][0]["expr"]
|
||||
assert "> bool 0" in branch_panel["targets"][0]["expr"]
|
||||
assert branch_panel["targets"][0]["expr"].startswith("sort(")
|
||||
|
||||
|
||||
def test_lesavka_jenkins_job_has_daily_refresh_trigger():
|
||||
casc = pathlib.Path("services/jenkins/configmap-jcasc.yaml").read_text()
|
||||
lesavka_block = casc.split("pipelineJob('lesavka')", 1)[1].split("pipelineJob(", 1)[0]
|
||||
|
||||
assert "scmpoll_spec('H/5 * * * *')" in lesavka_block
|
||||
assert "cron" in lesavka_block
|
||||
assert "spec('H H * * *')" in lesavka_block
|
||||
|
||||
|
||||
def test_typhon_jenkins_job_has_daily_refresh_trigger():
|
||||
casc = pathlib.Path("services/jenkins/configmap-jcasc.yaml").read_text()
|
||||
typhon_block = casc.split("pipelineJob('typhon')", 1)[1].split("pipelineJob(", 1)[0]
|
||||
|
||||
assert "scmpoll_spec('H/5 * * * *')" in typhon_block
|
||||
assert "cron" in typhon_block
|
||||
assert "spec('H H * * *')" in typhon_block
|
||||
|
||||
@ -138,6 +138,100 @@ def test_kc_get_users_paginates(monkeypatch):
|
||||
assert sync.SESSION.calls == 1
|
||||
|
||||
|
||||
def test_kc_get_users_fetches_second_page_after_full_batch(monkeypatch):
|
||||
sync = load_sync_module(monkeypatch)
|
||||
|
||||
class _PagedSession:
|
||||
def __init__(self):
|
||||
self.calls = 0
|
||||
self.first_params = []
|
||||
|
||||
def get(self, *_, **kwargs):
|
||||
self.calls += 1
|
||||
self.first_params.append(kwargs["params"]["first"])
|
||||
if self.calls == 1:
|
||||
return _FakeResponse([{"id": f"u{i}"} for i in range(200)])
|
||||
return _FakeResponse([{"id": "last"}])
|
||||
|
||||
sync.SESSION = _PagedSession()
|
||||
|
||||
users = sync.kc_get_users("tok")
|
||||
|
||||
assert len(users) == 201
|
||||
assert sync.SESSION.first_params == [0, 200]
|
||||
|
||||
|
||||
def test_get_kc_token_posts_client_credentials(monkeypatch):
|
||||
sync = load_sync_module(monkeypatch)
|
||||
calls = []
|
||||
|
||||
class _TokenSession:
|
||||
def post(self, url, data, timeout):
|
||||
calls.append((url, data, timeout))
|
||||
return _FakeResponse({"access_token": "tok"})
|
||||
|
||||
sync.SESSION = _TokenSession()
|
||||
|
||||
assert sync.get_kc_token() == "tok"
|
||||
assert calls[0][1]["grant_type"] == "client_credentials"
|
||||
|
||||
|
||||
def test_retry_request_retries_then_succeeds(monkeypatch):
|
||||
sync = load_sync_module(monkeypatch)
|
||||
attempts = []
|
||||
sleeps = []
|
||||
|
||||
def _flaky():
|
||||
attempts.append(1)
|
||||
if len(attempts) == 1:
|
||||
raise sync.requests.RequestException("temporary")
|
||||
return "ok"
|
||||
|
||||
monkeypatch.setattr(sync.time, "sleep", lambda seconds: sleeps.append(seconds))
|
||||
|
||||
assert sync.retry_request("request", _flaky, attempts=2) == "ok"
|
||||
assert sleeps == [2]
|
||||
|
||||
|
||||
def test_retry_request_reraises_final_error(monkeypatch):
|
||||
sync = load_sync_module(monkeypatch)
|
||||
monkeypatch.setattr(sync.time, "sleep", lambda seconds: None)
|
||||
|
||||
with pytest.raises(sync.requests.RequestException):
|
||||
sync.retry_request(
|
||||
"request",
|
||||
lambda: (_ for _ in ()).throw(sync.requests.RequestException("nope")),
|
||||
attempts=1,
|
||||
)
|
||||
|
||||
|
||||
def test_retry_db_connect_retries_then_succeeds(monkeypatch):
|
||||
sync = load_sync_module(monkeypatch)
|
||||
attempts = []
|
||||
sleeps = []
|
||||
|
||||
def _connect(**kwargs):
|
||||
attempts.append(kwargs)
|
||||
if len(attempts) == 1:
|
||||
raise sync.psycopg2.Error("not yet")
|
||||
return "conn"
|
||||
|
||||
monkeypatch.setattr(sync.psycopg2, "connect", _connect)
|
||||
monkeypatch.setattr(sync.time, "sleep", lambda seconds: sleeps.append(seconds))
|
||||
|
||||
assert sync.retry_db_connect(attempts=2) == "conn"
|
||||
assert sleeps == [2]
|
||||
|
||||
|
||||
def test_retry_db_connect_reraises_final_error(monkeypatch):
|
||||
sync = load_sync_module(monkeypatch)
|
||||
monkeypatch.setattr(sync.psycopg2, "connect", lambda **kwargs: (_ for _ in ()).throw(sync.psycopg2.Error("down")))
|
||||
monkeypatch.setattr(sync.time, "sleep", lambda seconds: None)
|
||||
|
||||
with pytest.raises(sync.psycopg2.Error):
|
||||
sync.retry_db_connect(attempts=1)
|
||||
|
||||
|
||||
def test_ensure_mailu_user_skips_foreign_domain(monkeypatch):
|
||||
sync = load_sync_module(monkeypatch)
|
||||
executed = []
|
||||
@ -166,6 +260,87 @@ def test_ensure_mailu_user_upserts(monkeypatch):
|
||||
assert captured["password"] != "pw"
|
||||
|
||||
|
||||
def test_attribute_and_email_helpers(monkeypatch):
|
||||
sync = load_sync_module(monkeypatch)
|
||||
|
||||
assert sync.get_attribute_value({"x": ["first", "second"]}, "x") == "first"
|
||||
assert sync.get_attribute_value({"x": []}, "x") is None
|
||||
assert sync.get_attribute_value({"x": "value"}, "x") == "value"
|
||||
assert sync.mailu_enabled({"mailu_email": ["legacy@example.com"]}) is True
|
||||
assert sync.mailu_enabled({"mailu_enabled": ["off"]}) is False
|
||||
assert sync.resolve_mailu_email({"username": "fallback", "email": "user@example.com"}, {}) == "user@example.com"
|
||||
assert sync.resolve_mailu_email({"username": "fallback", "email": "user@other.com"}, {}) == "fallback@example.com"
|
||||
|
||||
|
||||
def test_safe_update_payload_filters_fields(monkeypatch):
|
||||
sync = load_sync_module(monkeypatch)
|
||||
|
||||
payload = sync._safe_update_payload(
|
||||
{
|
||||
"username": "user",
|
||||
"enabled": True,
|
||||
"email": "user@example.com",
|
||||
"emailVerified": False,
|
||||
"firstName": "User",
|
||||
"lastName": "Example",
|
||||
"requiredActions": ["UPDATE_PASSWORD", 7],
|
||||
"attributes": "not-a-dict",
|
||||
"ignored": "value",
|
||||
}
|
||||
)
|
||||
|
||||
assert payload == {
|
||||
"username": "user",
|
||||
"enabled": True,
|
||||
"email": "user@example.com",
|
||||
"emailVerified": False,
|
||||
"firstName": "User",
|
||||
"lastName": "Example",
|
||||
"requiredActions": ["UPDATE_PASSWORD"],
|
||||
"attributes": {},
|
||||
}
|
||||
|
||||
|
||||
def test_ensure_system_mailboxes_handles_configurations(monkeypatch, capsys):
|
||||
sync = load_sync_module(monkeypatch)
|
||||
ensured = []
|
||||
monkeypatch.setattr(sync, "MAILU_SYSTEM_USERS", ["postmaster@example.com", "abuse"])
|
||||
monkeypatch.setattr(sync, "MAILU_SYSTEM_PASSWORD", "")
|
||||
|
||||
sync.ensure_system_mailboxes(object())
|
||||
|
||||
assert "MAILU_SYSTEM_PASSWORD is missing" in capsys.readouterr().out
|
||||
|
||||
def _ensure(cursor, email, password, display_name):
|
||||
ensured.append((email, password, display_name))
|
||||
if email == "abuse":
|
||||
raise RuntimeError("boom")
|
||||
|
||||
monkeypatch.setattr(sync, "MAILU_SYSTEM_PASSWORD", "pw")
|
||||
monkeypatch.setattr(sync, "ensure_mailu_user", _ensure)
|
||||
|
||||
sync.ensure_system_mailboxes(object())
|
||||
|
||||
out = capsys.readouterr().out
|
||||
assert ensured == [
|
||||
("postmaster@example.com", "pw", "postmaster"),
|
||||
("abuse", "pw", "abuse"),
|
||||
]
|
||||
assert "Ensured system mailbox for postmaster@example.com" in out
|
||||
assert "Failed to ensure system mailbox abuse" in out
|
||||
|
||||
|
||||
def test_main_exits_without_users_or_system_mailboxes(monkeypatch, capsys):
|
||||
sync = load_sync_module(monkeypatch)
|
||||
monkeypatch.setattr(sync, "MAILU_SYSTEM_USERS", [])
|
||||
monkeypatch.setattr(sync, "get_kc_token", lambda: "tok")
|
||||
monkeypatch.setattr(sync, "kc_get_users", lambda token: [])
|
||||
|
||||
sync.main()
|
||||
|
||||
assert "No users found; exiting." in capsys.readouterr().out
|
||||
|
||||
|
||||
def test_main_generates_password_and_upserts(monkeypatch):
|
||||
sync = load_sync_module(monkeypatch)
|
||||
monkeypatch.setattr(sync.bcrypt_sha256, "hash", lambda password: f"hash:{password}")
|
||||
|
||||
134
scripts/tests/test_mailu_sync_listener.py
Normal file
134
scripts/tests/test_mailu_sync_listener.py
Normal file
@ -0,0 +1,134 @@
|
||||
import importlib.util
|
||||
import io
|
||||
import pathlib
|
||||
import types
|
||||
|
||||
|
||||
def load_listener_module(monkeypatch):
|
||||
monkeypatch.setenv("MAILU_SYNC_WAIT_TIMEOUT_SEC", "0")
|
||||
module_path = (
|
||||
pathlib.Path(__file__).resolve().parents[2]
|
||||
/ "services"
|
||||
/ "mailu"
|
||||
/ "scripts"
|
||||
/ "mailu_sync_listener.py"
|
||||
)
|
||||
spec = importlib.util.spec_from_file_location("mailu_sync_listener_testmod", module_path)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
def _handler_for(listener, body):
|
||||
handler = listener.Handler.__new__(listener.Handler)
|
||||
raw = body if isinstance(body, bytes) else body.encode()
|
||||
handler.headers = {"Content-Length": str(len(raw))}
|
||||
handler.rfile = io.BytesIO(raw)
|
||||
handler.responses = []
|
||||
handler.headers_ended = 0
|
||||
handler.send_response = lambda code: handler.responses.append(code)
|
||||
handler.end_headers = lambda: setattr(handler, "headers_ended", handler.headers_ended + 1)
|
||||
return handler
|
||||
|
||||
|
||||
def test_listener_run_sync_blocking_updates_state(monkeypatch):
|
||||
listener = load_listener_module(monkeypatch)
|
||||
monkeypatch.setattr(listener, "time", lambda: 42.0)
|
||||
monkeypatch.setattr(
|
||||
listener.subprocess,
|
||||
"run",
|
||||
lambda command, check: types.SimpleNamespace(returncode=3),
|
||||
)
|
||||
|
||||
assert listener._run_sync_blocking() == 3
|
||||
assert listener.last_rc == 3
|
||||
assert listener.last_run == 42.0
|
||||
assert listener.sync_done.is_set()
|
||||
|
||||
listener.sync_running = True
|
||||
assert listener._run_sync_blocking() == 0
|
||||
|
||||
|
||||
def test_listener_trigger_sync_async_honors_running_and_debounce(monkeypatch):
|
||||
listener = load_listener_module(monkeypatch)
|
||||
starts = []
|
||||
|
||||
class _Thread:
|
||||
def __init__(self, target, daemon):
|
||||
self.target = target
|
||||
self.daemon = daemon
|
||||
|
||||
def start(self):
|
||||
starts.append((self.target, self.daemon))
|
||||
|
||||
monkeypatch.setattr(listener.threading, "Thread", _Thread)
|
||||
monkeypatch.setattr(listener, "time", lambda: 100.0)
|
||||
|
||||
listener.sync_running = True
|
||||
assert listener._trigger_sync_async() is False
|
||||
|
||||
listener.sync_running = False
|
||||
listener.last_run = 95.0
|
||||
assert listener._trigger_sync_async() is False
|
||||
|
||||
assert listener._trigger_sync_async(force=True) is True
|
||||
assert starts and starts[0][1] is True
|
||||
|
||||
|
||||
def test_listener_post_rejects_invalid_json(monkeypatch):
|
||||
listener = load_listener_module(monkeypatch)
|
||||
handler = _handler_for(listener, b"{not-json")
|
||||
|
||||
handler.do_POST()
|
||||
|
||||
assert handler.responses == [400]
|
||||
assert handler.headers_ended == 1
|
||||
|
||||
|
||||
def test_listener_post_triggers_async_without_wait(monkeypatch):
|
||||
listener = load_listener_module(monkeypatch)
|
||||
called = []
|
||||
monkeypatch.setattr(listener, "_trigger_sync_async", lambda force=False: called.append(force) or True)
|
||||
handler = _handler_for(listener, '{"force": true}')
|
||||
|
||||
handler.do_POST()
|
||||
|
||||
assert called == [True]
|
||||
assert handler.responses == [202]
|
||||
|
||||
|
||||
def test_listener_post_wait_returns_success_or_failure(monkeypatch):
|
||||
listener = load_listener_module(monkeypatch)
|
||||
called = []
|
||||
monkeypatch.setattr(listener, "_trigger_sync_async", lambda force=False: called.append(force) or True)
|
||||
listener.sync_running = False
|
||||
listener.last_rc = 0
|
||||
handler = _handler_for(listener, '{"wait": true, "force": true}')
|
||||
|
||||
handler.do_POST()
|
||||
|
||||
assert called == [True]
|
||||
assert handler.responses == [200]
|
||||
|
||||
listener.last_rc = 2
|
||||
handler = _handler_for(listener, '{"wait": true}')
|
||||
handler.do_POST()
|
||||
assert handler.responses == [500]
|
||||
|
||||
|
||||
def test_listener_post_wait_keeps_running_request_successful(monkeypatch):
|
||||
listener = load_listener_module(monkeypatch)
|
||||
listener.sync_running = True
|
||||
handler = _handler_for(listener, '{"wait": true}')
|
||||
|
||||
handler.do_POST()
|
||||
|
||||
assert handler.responses == [200]
|
||||
|
||||
|
||||
def test_listener_log_message_is_quiet(monkeypatch):
|
||||
listener = load_listener_module(monkeypatch)
|
||||
handler = listener.Handler.__new__(listener.Handler)
|
||||
|
||||
assert handler.log_message("ignored %s", "value") is None
|
||||
73
scripts/verify_jenkins_workspace_cleanup_rollout.sh
Executable file
73
scripts/verify_jenkins_workspace_cleanup_rollout.sh
Executable file
@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
MODE="${1:-dry-run}"
|
||||
if [[ "$MODE" != "dry-run" && "$MODE" != "active" ]]; then
|
||||
echo "usage: $0 [dry-run|active]" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
EXPECTED_DRY_RUN="true"
|
||||
PROM_MODE="dry_run"
|
||||
if [[ "$MODE" == "active" ]]; then
|
||||
EXPECTED_DRY_RUN="false"
|
||||
PROM_MODE="delete"
|
||||
fi
|
||||
|
||||
KUSTOMIZATION="${KUSTOMIZATION:-maintenance}"
|
||||
NAMESPACE="${NAMESPACE:-maintenance}"
|
||||
DEPLOYMENT="${DEPLOYMENT:-ariadne}"
|
||||
LOCAL_METRICS_PORT="${LOCAL_METRICS_PORT:-18080}"
|
||||
|
||||
for cmd in flux kubectl curl grep awk; do
|
||||
if ! command -v "$cmd" >/dev/null 2>&1; then
|
||||
echo "missing required command: $cmd" >&2
|
||||
exit 2
|
||||
fi
|
||||
done
|
||||
|
||||
echo "[1/5] reconcile Flux kustomization: ${KUSTOMIZATION}"
|
||||
flux reconcile kustomization "$KUSTOMIZATION" --namespace flux-system --with-source
|
||||
|
||||
echo "[2/5] wait for deployment rollout"
|
||||
kubectl -n "$NAMESPACE" rollout status "deployment/$DEPLOYMENT" --timeout=5m
|
||||
|
||||
echo "[3/5] verify ariadne env wiring"
|
||||
ENV_DUMP="$(kubectl -n "$NAMESPACE" get deployment "$DEPLOYMENT" -o jsonpath='{range .spec.template.spec.containers[0].env[*]}{.name}={.value}{"\n"}{end}')"
|
||||
echo "$ENV_DUMP" | grep -F "ARIADNE_SCHEDULE_JENKINS_WORKSPACE_CLEANUP=45 */6 * * *"
|
||||
echo "$ENV_DUMP" | grep -F "JENKINS_WORKSPACE_NAMESPACE=jenkins"
|
||||
echo "$ENV_DUMP" | grep -F "JENKINS_WORKSPACE_PVC_PREFIX=pvc-workspace-"
|
||||
echo "$ENV_DUMP" | grep -F "JENKINS_WORKSPACE_CLEANUP_MIN_AGE_HOURS=24"
|
||||
echo "$ENV_DUMP" | grep -F "JENKINS_WORKSPACE_CLEANUP_DRY_RUN=${EXPECTED_DRY_RUN}"
|
||||
echo "$ENV_DUMP" | grep -F "JENKINS_WORKSPACE_CLEANUP_MAX_DELETIONS_PER_RUN=20"
|
||||
|
||||
echo "[4/5] scrape /metrics and confirm cleanup metrics are exported"
|
||||
PF_LOG="$(mktemp)"
|
||||
METRICS_FILE="$(mktemp)"
|
||||
cleanup() {
|
||||
if [[ -n "${PF_PID:-}" ]]; then
|
||||
kill "$PF_PID" >/dev/null 2>&1 || true
|
||||
wait "$PF_PID" 2>/dev/null || true
|
||||
fi
|
||||
rm -f "$PF_LOG" "$METRICS_FILE"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
kubectl -n "$NAMESPACE" port-forward "deployment/$DEPLOYMENT" "${LOCAL_METRICS_PORT}:8080" >"$PF_LOG" 2>&1 &
|
||||
PF_PID=$!
|
||||
sleep 2
|
||||
curl -fsS "http://127.0.0.1:${LOCAL_METRICS_PORT}/metrics" >"$METRICS_FILE"
|
||||
grep -F "# HELP ariadne_jenkins_workspace_cleanup_runs_total" "$METRICS_FILE"
|
||||
grep -F "# HELP ariadne_jenkins_workspace_cleanup_objects_total" "$METRICS_FILE"
|
||||
|
||||
echo "[5/5] show recent cleanup signal"
|
||||
if grep -q "ariadne_jenkins_workspace_cleanup_runs_total" "$METRICS_FILE"; then
|
||||
grep "ariadne_jenkins_workspace_cleanup_runs_total" "$METRICS_FILE" | grep "mode=\"${PROM_MODE}\"" || true
|
||||
else
|
||||
echo "No run counter sample yet for mode=${PROM_MODE}; wait for schedule window and re-run." >&2
|
||||
fi
|
||||
|
||||
echo "Recent cleanup logs (if any):"
|
||||
kubectl -n "$NAMESPACE" logs "deployment/$DEPLOYMENT" --tail=500 | grep -i "jenkins workspace cleanup" | tail -n 20 || true
|
||||
|
||||
echo "verification complete for mode=${MODE}"
|
||||
@ -5,7 +5,7 @@ metadata:
|
||||
name: ollama
|
||||
namespace: ai
|
||||
spec:
|
||||
replicas: 1
|
||||
replicas: 0
|
||||
revisionHistoryLimit: 2
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
@ -21,7 +21,7 @@ spec:
|
||||
app: ollama
|
||||
annotations:
|
||||
ai.bstein.dev/model: qwen2.5:14b-instruct-q4_0
|
||||
ai.bstein.dev/gpu: GPU pool (titan-22/24)
|
||||
ai.bstein.dev/gpu: GPU pool (titan-20/21)
|
||||
ai.bstein.dev/restartedAt: "2026-01-26T12:00:00Z"
|
||||
spec:
|
||||
affinity:
|
||||
@ -32,13 +32,13 @@ spec:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- titan-22
|
||||
- titan-24
|
||||
- titan-20
|
||||
- titan-21
|
||||
runtimeClassName: nvidia
|
||||
volumes:
|
||||
- name: models
|
||||
persistentVolumeClaim:
|
||||
claimName: ollama-models
|
||||
claimName: ollama-models-asteria
|
||||
initContainers:
|
||||
- name: warm-model
|
||||
image: ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d
|
||||
|
||||
@ -2,12 +2,12 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: ollama-models
|
||||
name: ollama-models-asteria
|
||||
namespace: ai
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 30Gi
|
||||
storageClassName: astreae
|
||||
storageClassName: asteria
|
||||
|
||||
@ -49,6 +49,15 @@ spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: arm64
|
||||
node-role.kubernetes.io/worker: "true"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values: ["titan-13", "titan-15", "titan-17", "titan-19"]
|
||||
imagePullSecrets:
|
||||
- name: harbor-regcred
|
||||
containers:
|
||||
|
||||
@ -38,6 +38,36 @@ spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: arm64
|
||||
node-role.kubernetes.io/worker: "true"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: atlas.bstein.dev/spillover
|
||||
operator: DoesNotExist
|
||||
- weight: 95
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: NotIn
|
||||
values:
|
||||
- titan-13
|
||||
- titan-15
|
||||
- titan-17
|
||||
- titan-19
|
||||
- weight: 90
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values: ["rpi5"]
|
||||
- weight: 50
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values: ["rpi4"]
|
||||
containers:
|
||||
- name: gateway
|
||||
image: python:3.11-slim
|
||||
|
||||
@ -26,7 +26,7 @@ spec:
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
containerPort: 8080
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
|
||||
@ -10,4 +10,4 @@ spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 80
|
||||
targetPort: 8080
|
||||
|
||||
@ -15,13 +15,14 @@ resources:
|
||||
- frontend-service.yaml
|
||||
- backend-deployment.yaml
|
||||
- backend-service.yaml
|
||||
- vaultwarden-cred-sync-cronjob.yaml
|
||||
- oneoffs/portal-onboarding-e2e-test-job.yaml
|
||||
- ingress.yaml
|
||||
images:
|
||||
- name: registry.bstein.dev/bstein/bstein-dev-home-frontend
|
||||
newTag: 0.1.1-120 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend:tag"}
|
||||
newTag: 0.1.1-281 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend:tag"}
|
||||
- name: registry.bstein.dev/bstein/bstein-dev-home-backend
|
||||
newTag: 0.1.1-123 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend:tag"}
|
||||
newTag: 0.1.1-281 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend:tag"}
|
||||
configMapGenerator:
|
||||
- name: chat-ai-gateway
|
||||
namespace: bstein-dev-home
|
||||
@ -29,6 +30,12 @@ configMapGenerator:
|
||||
- gateway.py=scripts/gateway.py
|
||||
options:
|
||||
disableNameSuffixHash: true
|
||||
- name: vaultwarden-cred-sync-script
|
||||
namespace: bstein-dev-home
|
||||
files:
|
||||
- vaultwarden_cred_sync.py=scripts/vaultwarden_cred_sync.py
|
||||
options:
|
||||
disableNameSuffixHash: true
|
||||
- name: portal-onboarding-e2e-tests
|
||||
namespace: bstein-dev-home
|
||||
files:
|
||||
|
||||
245
services/bstein-dev-home/scripts/vaultwarden_cred_sync.py
Normal file
245
services/bstein-dev-home/scripts/vaultwarden_cred_sync.py
Normal file
@ -0,0 +1,245 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Iterable
|
||||
|
||||
import httpx
|
||||
|
||||
from atlas_portal import settings
|
||||
from atlas_portal.keycloak import admin_client
|
||||
from atlas_portal.vaultwarden import invite_user
|
||||
|
||||
|
||||
VAULTWARDEN_EMAIL_ATTR = "vaultwarden_email"
|
||||
VAULTWARDEN_STATUS_ATTR = "vaultwarden_status"
|
||||
VAULTWARDEN_SYNCED_AT_ATTR = "vaultwarden_synced_at"
|
||||
VAULTWARDEN_RETRY_COOLDOWN_SEC = int(os.getenv("VAULTWARDEN_RETRY_COOLDOWN_SEC", "1800"))
|
||||
VAULTWARDEN_FAILURE_BAILOUT = int(os.getenv("VAULTWARDEN_FAILURE_BAILOUT", "2"))
|
||||
|
||||
|
||||
def _iter_keycloak_users(page_size: int = 200) -> Iterable[dict[str, Any]]:
|
||||
client = admin_client()
|
||||
if not client.ready():
|
||||
raise RuntimeError("keycloak admin client not configured")
|
||||
|
||||
url = f"{settings.KEYCLOAK_ADMIN_URL}/admin/realms/{settings.KEYCLOAK_REALM}/users"
|
||||
first = 0
|
||||
while True:
|
||||
headers = _headers_with_retry(client)
|
||||
# We need attributes for idempotency (vaultwarden_status/vaultwarden_email). Keycloak defaults to a
|
||||
# brief representation which may omit these.
|
||||
params = {"first": str(first), "max": str(page_size), "briefRepresentation": "false"}
|
||||
payload = None
|
||||
for attempt in range(1, 6):
|
||||
try:
|
||||
with httpx.Client(timeout=settings.HTTP_CHECK_TIMEOUT_SEC) as http:
|
||||
resp = http.get(url, params=params, headers=headers)
|
||||
resp.raise_for_status()
|
||||
payload = resp.json()
|
||||
break
|
||||
except httpx.HTTPError as exc:
|
||||
if attempt == 5:
|
||||
raise
|
||||
time.sleep(attempt * 2)
|
||||
|
||||
if not isinstance(payload, list) or not payload:
|
||||
return
|
||||
|
||||
for item in payload:
|
||||
if isinstance(item, dict):
|
||||
yield item
|
||||
|
||||
if len(payload) < page_size:
|
||||
return
|
||||
first += page_size
|
||||
|
||||
|
||||
def _headers_with_retry(client, attempts: int = 6) -> dict[str, str]:
|
||||
last_exc: Exception | None = None
|
||||
for attempt in range(1, attempts + 1):
|
||||
try:
|
||||
return client.headers()
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
time.sleep(attempt * 2)
|
||||
if last_exc:
|
||||
raise last_exc
|
||||
raise RuntimeError("failed to fetch keycloak headers")
|
||||
|
||||
|
||||
def _extract_attr(attrs: Any, key: str) -> str:
|
||||
if not isinstance(attrs, dict):
|
||||
return ""
|
||||
raw = attrs.get(key)
|
||||
if isinstance(raw, list):
|
||||
for item in raw:
|
||||
if isinstance(item, str) and item.strip():
|
||||
return item.strip()
|
||||
return ""
|
||||
if isinstance(raw, str) and raw.strip():
|
||||
return raw.strip()
|
||||
return ""
|
||||
|
||||
|
||||
def _parse_synced_at(value: str) -> float | None:
|
||||
value = (value or "").strip()
|
||||
if not value:
|
||||
return None
|
||||
for fmt in ("%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S%z"):
|
||||
try:
|
||||
parsed = datetime.strptime(value, fmt)
|
||||
if parsed.tzinfo is None:
|
||||
parsed = parsed.replace(tzinfo=timezone.utc)
|
||||
return parsed.timestamp()
|
||||
except ValueError:
|
||||
continue
|
||||
return None
|
||||
|
||||
|
||||
def _vaultwarden_email_for_user(user: dict[str, Any]) -> str:
|
||||
username = (user.get("username") if isinstance(user.get("username"), str) else "") or ""
|
||||
username = username.strip()
|
||||
if not username:
|
||||
return ""
|
||||
|
||||
attrs = user.get("attributes")
|
||||
vaultwarden_email = _extract_attr(attrs, VAULTWARDEN_EMAIL_ATTR)
|
||||
if vaultwarden_email:
|
||||
return vaultwarden_email
|
||||
|
||||
mailu_email = _extract_attr(attrs, "mailu_email")
|
||||
if mailu_email:
|
||||
return mailu_email
|
||||
|
||||
email = (user.get("email") if isinstance(user.get("email"), str) else "") or ""
|
||||
email = email.strip()
|
||||
if email and email.lower().endswith(f"@{settings.MAILU_DOMAIN.lower()}"):
|
||||
return email
|
||||
|
||||
# Don't guess an internal mailbox address until Mailu sync has run and stored mailu_email.
|
||||
# This avoids spamming Vaultwarden invites that can never be delivered (unknown recipient).
|
||||
return ""
|
||||
|
||||
|
||||
def _set_user_attribute_if_missing(username: str, user: dict[str, Any], key: str, value: str) -> None:
|
||||
value = (value or "").strip()
|
||||
if not value:
|
||||
return
|
||||
existing = _extract_attr(user.get("attributes"), key)
|
||||
if existing:
|
||||
return
|
||||
admin_client().set_user_attribute(username, key, value)
|
||||
|
||||
|
||||
def _set_user_attribute(username: str, key: str, value: str) -> None:
|
||||
value = (value or "").strip()
|
||||
if not value:
|
||||
return
|
||||
admin_client().set_user_attribute(username, key, value)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
processed = 0
|
||||
created = 0
|
||||
skipped = 0
|
||||
failures = 0
|
||||
consecutive_failures = 0
|
||||
|
||||
for user in _iter_keycloak_users():
|
||||
username = (user.get("username") if isinstance(user.get("username"), str) else "") or ""
|
||||
username = username.strip()
|
||||
if not username:
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
enabled = user.get("enabled")
|
||||
if enabled is False:
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
if user.get("serviceAccountClientId") or username.startswith("service-account-"):
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
# Fetch the full user payload so we can reliably read attributes (and skip re-invites).
|
||||
user_id = (user.get("id") if isinstance(user.get("id"), str) else "") or ""
|
||||
user_id = user_id.strip()
|
||||
full_user = user
|
||||
if user_id:
|
||||
try:
|
||||
full_user = admin_client().get_user(user_id)
|
||||
except Exception:
|
||||
full_user = user
|
||||
|
||||
current_status = _extract_attr(full_user.get("attributes"), VAULTWARDEN_STATUS_ATTR)
|
||||
current_synced_at = _extract_attr(full_user.get("attributes"), VAULTWARDEN_SYNCED_AT_ATTR)
|
||||
current_synced_ts = _parse_synced_at(current_synced_at)
|
||||
if current_status in {"rate_limited", "error"} and current_synced_ts:
|
||||
if time.time() - current_synced_ts < VAULTWARDEN_RETRY_COOLDOWN_SEC:
|
||||
skipped += 1
|
||||
continue
|
||||
email = _vaultwarden_email_for_user(full_user)
|
||||
if not email:
|
||||
print(f"skip {username}: missing email", file=sys.stderr)
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
_set_user_attribute_if_missing(username, full_user, VAULTWARDEN_EMAIL_ATTR, email)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# If we've already successfully invited or confirmed presence, do not re-invite on every cron run.
|
||||
# Vaultwarden returns 409 for "already exists", which is idempotent but noisy and can trigger rate limits.
|
||||
if current_status in {"invited", "already_present"}:
|
||||
if not current_synced_at:
|
||||
try:
|
||||
_set_user_attribute(
|
||||
username,
|
||||
VAULTWARDEN_SYNCED_AT_ATTR,
|
||||
time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
processed += 1
|
||||
result = invite_user(email)
|
||||
if result.ok:
|
||||
created += 1
|
||||
consecutive_failures = 0
|
||||
print(f"ok {username}: {result.status}")
|
||||
try:
|
||||
_set_user_attribute(username, VAULTWARDEN_STATUS_ATTR, result.status)
|
||||
_set_user_attribute(username, VAULTWARDEN_SYNCED_AT_ATTR, time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()))
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
failures += 1
|
||||
if result.status in {"rate_limited", "error"}:
|
||||
consecutive_failures += 1
|
||||
print(f"err {username}: {result.status} {result.detail}", file=sys.stderr)
|
||||
try:
|
||||
_set_user_attribute(username, VAULTWARDEN_STATUS_ATTR, result.status)
|
||||
_set_user_attribute(username, VAULTWARDEN_SYNCED_AT_ATTR, time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()))
|
||||
except Exception:
|
||||
pass
|
||||
if consecutive_failures >= VAULTWARDEN_FAILURE_BAILOUT:
|
||||
print("vaultwarden: too many consecutive failures; aborting run", file=sys.stderr)
|
||||
break
|
||||
|
||||
print(
|
||||
f"done processed={processed} created_or_present={created} skipped={skipped} failures={failures}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 0 if failures == 0 else 2
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
86
services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml
Normal file
86
services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml
Normal file
@ -0,0 +1,86 @@
|
||||
# services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: vaultwarden-cred-sync
|
||||
namespace: bstein-dev-home
|
||||
labels:
|
||||
atlas.bstein.dev/glue: "true"
|
||||
spec:
|
||||
schedule: "*/15 * * * *"
|
||||
suspend: true
|
||||
concurrencyPolicy: Forbid
|
||||
successfulJobsHistoryLimit: 1
|
||||
failedJobsHistoryLimit: 3
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vault.hashicorp.com/agent-inject: "true"
|
||||
vault.hashicorp.com/agent-pre-populate-only: "true"
|
||||
vault.hashicorp.com/role: "bstein-dev-home"
|
||||
vault.hashicorp.com/agent-inject-secret-portal-env.sh: "kv/data/atlas/portal/atlas-portal-db"
|
||||
vault.hashicorp.com/agent-inject-template-portal-env.sh: |
|
||||
{{ with secret "kv/data/atlas/portal/atlas-portal-db" }}
|
||||
export PORTAL_DATABASE_URL="{{ .Data.data.PORTAL_DATABASE_URL }}"
|
||||
{{ end }}
|
||||
{{ with secret "kv/data/atlas/portal/bstein-dev-home-keycloak-admin" }}
|
||||
export KEYCLOAK_ADMIN_CLIENT_SECRET="{{ .Data.data.client_secret }}"
|
||||
{{ end }}
|
||||
{{ with secret "kv/data/atlas/shared/chat-ai-keys-runtime" }}
|
||||
export CHAT_KEY_MATRIX="{{ .Data.data.matrix }}"
|
||||
export CHAT_KEY_HOMEPAGE="{{ .Data.data.homepage }}"
|
||||
{{ end }}
|
||||
{{ with secret "kv/data/atlas/shared/portal-e2e-client" }}
|
||||
export PORTAL_E2E_CLIENT_ID="{{ .Data.data.client_id }}"
|
||||
export PORTAL_E2E_CLIENT_SECRET="{{ .Data.data.client_secret }}"
|
||||
{{ end }}
|
||||
spec:
|
||||
serviceAccountName: bstein-dev-home
|
||||
restartPolicy: Never
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: arm64
|
||||
node-role.kubernetes.io/worker: "true"
|
||||
imagePullSecrets:
|
||||
- name: harbor-regcred
|
||||
containers:
|
||||
- name: sync
|
||||
image: registry.bstein.dev/bstein/bstein-dev-home-backend:0.1.1-95
|
||||
imagePullPolicy: Always
|
||||
command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- >-
|
||||
. /vault/secrets/portal-env.sh
|
||||
&& exec python /scripts/vaultwarden_cred_sync.py
|
||||
env:
|
||||
- name: PYTHONPATH
|
||||
value: /app
|
||||
- name: KEYCLOAK_ENABLED
|
||||
value: "true"
|
||||
- name: KEYCLOAK_REALM
|
||||
value: atlas
|
||||
- name: KEYCLOAK_ADMIN_URL
|
||||
value: http://keycloak.sso.svc.cluster.local
|
||||
- name: KEYCLOAK_ADMIN_REALM
|
||||
value: atlas
|
||||
- name: KEYCLOAK_ADMIN_CLIENT_ID
|
||||
value: bstein-dev-home-admin
|
||||
- name: HTTP_CHECK_TIMEOUT_SEC
|
||||
value: "20"
|
||||
- name: VAULTWARDEN_ADMIN_SESSION_TTL_SEC
|
||||
value: "900"
|
||||
- name: VAULTWARDEN_RETRY_COOLDOWN_SEC
|
||||
value: "1800"
|
||||
- name: VAULTWARDEN_FAILURE_BAILOUT
|
||||
value: "2"
|
||||
volumeMounts:
|
||||
- name: vaultwarden-cred-sync-script
|
||||
mountPath: /scripts
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: vaultwarden-cred-sync-script
|
||||
configMap:
|
||||
name: vaultwarden-cred-sync-script
|
||||
defaultMode: 0555
|
||||
471
services/comms/guest-name-job.yaml
Normal file
471
services/comms/guest-name-job.yaml
Normal file
@ -0,0 +1,471 @@
|
||||
# services/comms/guest-name-job.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: guest-name-randomizer
|
||||
namespace: comms
|
||||
labels:
|
||||
atlas.bstein.dev/glue: "true"
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
suspend: true
|
||||
concurrencyPolicy: Forbid
|
||||
successfulJobsHistoryLimit: 1
|
||||
failedJobsHistoryLimit: 1
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vault.hashicorp.com/agent-inject: "true"
|
||||
vault.hashicorp.com/agent-pre-populate-only: "true"
|
||||
vault.hashicorp.com/role: "comms"
|
||||
vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret"
|
||||
vault.hashicorp.com/agent-inject-template-turn-secret: |
|
||||
{{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api"
|
||||
vault.hashicorp.com/agent-inject-template-livekit-primary: |
|
||||
{{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-bot-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-seeder-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-chat-matrix: |
|
||||
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-chat-homepage: |
|
||||
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-mas-admin-secret: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db"
|
||||
vault.hashicorp.com/agent-inject-template-synapse-db-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db"
|
||||
vault.hashicorp.com/agent-inject-template-mas-db-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-mas-matrix-shared: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-mas-kc-secret: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}}
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: comms-vault
|
||||
nodeSelector:
|
||||
hardware: rpi5
|
||||
volumes:
|
||||
- name: vault-scripts
|
||||
configMap:
|
||||
name: comms-vault-env
|
||||
defaultMode: 0555
|
||||
containers:
|
||||
- name: rename
|
||||
image: registry.bstein.dev/bstein/comms-guest-tools:0.1.0
|
||||
volumeMounts:
|
||||
- name: vault-scripts
|
||||
mountPath: /vault/scripts
|
||||
readOnly: true
|
||||
env:
|
||||
- name: SYNAPSE_BASE
|
||||
value: http://othrys-synapse-matrix-synapse:8008
|
||||
- name: MAS_ADMIN_CLIENT_ID
|
||||
value: 01KDXMVQBQ5JNY6SEJPZW6Z8BM
|
||||
- name: MAS_ADMIN_CLIENT_SECRET_FILE
|
||||
value: /vault/secrets/mas-admin-secret
|
||||
- name: MAS_ADMIN_API_BASE
|
||||
value: http://matrix-authentication-service:8081/api/admin/v1
|
||||
- name: MAS_TOKEN_URL
|
||||
value: http://matrix-authentication-service:8080/oauth2/token
|
||||
- name: SEEDER_USER
|
||||
value: othrys-seeder
|
||||
- name: PGHOST
|
||||
value: postgres-service.postgres.svc.cluster.local
|
||||
- name: PGPORT
|
||||
value: "5432"
|
||||
- name: PGDATABASE
|
||||
value: synapse
|
||||
- name: PGUSER
|
||||
value: synapse
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
set -euo pipefail
|
||||
. /vault/scripts/comms_vault_env.sh
|
||||
python - <<'PY'
|
||||
import base64
|
||||
import os
|
||||
import random
|
||||
import requests
|
||||
import time
|
||||
import urllib.parse
|
||||
import psycopg2
|
||||
|
||||
ADJ = [
|
||||
"brisk","calm","eager","gentle","merry","nifty","rapid","sunny","witty","zesty",
|
||||
"amber","bold","bright","crisp","daring","frosty","glad","jolly","lively","mellow",
|
||||
"quiet","ripe","serene","spry","tidy","vivid","warm","wild","clever","kind",
|
||||
]
|
||||
NOUN = [
|
||||
"otter","falcon","comet","ember","grove","harbor","meadow","raven","river","summit",
|
||||
"breeze","cedar","cinder","cove","delta","forest","glade","lark","marsh","peak",
|
||||
"pine","quartz","reef","ridge","sable","sage","shore","thunder","vale","zephyr",
|
||||
]
|
||||
|
||||
BASE = os.environ["SYNAPSE_BASE"]
|
||||
MAS_ADMIN_CLIENT_ID = os.environ["MAS_ADMIN_CLIENT_ID"]
|
||||
MAS_ADMIN_CLIENT_SECRET_FILE = os.environ["MAS_ADMIN_CLIENT_SECRET_FILE"]
|
||||
MAS_ADMIN_API_BASE = os.environ["MAS_ADMIN_API_BASE"].rstrip("/")
|
||||
MAS_TOKEN_URL = os.environ["MAS_TOKEN_URL"]
|
||||
SEEDER_USER = os.environ["SEEDER_USER"]
|
||||
ROOM_ALIAS = "#othrys:live.bstein.dev"
|
||||
SERVER_NAME = "live.bstein.dev"
|
||||
STALE_GUEST_MS = 14 * 24 * 60 * 60 * 1000
|
||||
|
||||
def mas_admin_token():
|
||||
with open(MAS_ADMIN_CLIENT_SECRET_FILE, "r", encoding="utf-8") as f:
|
||||
secret = f.read().strip()
|
||||
basic = base64.b64encode(f"{MAS_ADMIN_CLIENT_ID}:{secret}".encode()).decode()
|
||||
last_err = None
|
||||
for attempt in range(5):
|
||||
try:
|
||||
r = requests.post(
|
||||
MAS_TOKEN_URL,
|
||||
headers={"Authorization": f"Basic {basic}"},
|
||||
data={"grant_type": "client_credentials", "scope": "urn:mas:admin"},
|
||||
timeout=30,
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.json()["access_token"]
|
||||
except Exception as exc: # noqa: BLE001
|
||||
last_err = exc
|
||||
time.sleep(2 ** attempt)
|
||||
raise last_err
|
||||
|
||||
def mas_user_id(token, username):
|
||||
r = requests.get(
|
||||
f"{MAS_ADMIN_API_BASE}/users/by-username/{urllib.parse.quote(username)}",
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=30,
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.json()["data"]["id"]
|
||||
|
||||
def mas_personal_session(token, user_id):
|
||||
r = requests.post(
|
||||
f"{MAS_ADMIN_API_BASE}/personal-sessions",
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
json={
|
||||
"actor_user_id": user_id,
|
||||
"human_name": "guest-name-randomizer",
|
||||
"scope": "urn:matrix:client:api:*",
|
||||
"expires_in": 300,
|
||||
},
|
||||
timeout=30,
|
||||
)
|
||||
r.raise_for_status()
|
||||
data = r.json().get("data", {}).get("attributes", {}) or {}
|
||||
return data["access_token"], r.json()["data"]["id"]
|
||||
|
||||
def mas_revoke_session(token, session_id):
|
||||
requests.post(
|
||||
f"{MAS_ADMIN_API_BASE}/personal-sessions/{urllib.parse.quote(session_id)}/revoke",
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
json={},
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
def resolve_alias(token, alias):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
enc = urllib.parse.quote(alias)
|
||||
r = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{enc}", headers=headers)
|
||||
r.raise_for_status()
|
||||
return r.json()["room_id"]
|
||||
|
||||
def room_members(token, room_id):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
r = requests.get(f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/members", headers=headers)
|
||||
r.raise_for_status()
|
||||
members = set()
|
||||
existing_names = set()
|
||||
for ev in r.json().get("chunk", []):
|
||||
user_id = ev.get("state_key")
|
||||
if user_id:
|
||||
members.add(user_id)
|
||||
disp = (ev.get("content") or {}).get("displayname")
|
||||
if disp:
|
||||
existing_names.add(disp)
|
||||
return members, existing_names
|
||||
|
||||
def mas_list_users(token):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
users = []
|
||||
cursor = None
|
||||
while True:
|
||||
url = f"{MAS_ADMIN_API_BASE}/users?page[size]=100"
|
||||
if cursor:
|
||||
url += f"&page[after]={urllib.parse.quote(cursor)}"
|
||||
r = requests.get(url, headers=headers, timeout=30)
|
||||
r.raise_for_status()
|
||||
data = r.json().get("data", [])
|
||||
if not data:
|
||||
break
|
||||
users.extend(data)
|
||||
cursor = data[-1].get("meta", {}).get("page", {}).get("cursor")
|
||||
if not cursor:
|
||||
break
|
||||
return users
|
||||
|
||||
def synapse_list_users(token):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
users = []
|
||||
from_token = None
|
||||
while True:
|
||||
url = f"{BASE}/_synapse/admin/v2/users?local=true&deactivated=false&limit=100"
|
||||
if from_token:
|
||||
url += f"&from={urllib.parse.quote(from_token)}"
|
||||
r = requests.get(url, headers=headers, timeout=30)
|
||||
r.raise_for_status()
|
||||
payload = r.json()
|
||||
users.extend(payload.get("users", []))
|
||||
from_token = payload.get("next_token")
|
||||
if not from_token:
|
||||
break
|
||||
return users
|
||||
|
||||
def should_prune_guest(entry, now_ms):
|
||||
if not entry.get("is_guest"):
|
||||
return False
|
||||
last_seen = entry.get("last_seen_ts")
|
||||
if last_seen is None:
|
||||
return False
|
||||
try:
|
||||
last_seen = int(last_seen)
|
||||
except (TypeError, ValueError):
|
||||
return False
|
||||
return now_ms - last_seen > STALE_GUEST_MS
|
||||
|
||||
def prune_guest(token, user_id):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
try:
|
||||
r = requests.delete(
|
||||
f"{BASE}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}",
|
||||
headers=headers,
|
||||
params={"erase": "true"},
|
||||
timeout=30,
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
print(f"guest prune failed for {user_id}: {exc}")
|
||||
return False
|
||||
if r.status_code in (200, 202, 204, 404):
|
||||
return True
|
||||
print(f"guest prune failed for {user_id}: {r.status_code} {r.text}")
|
||||
return False
|
||||
|
||||
def user_id_for_username(username):
|
||||
return f"@{username}:live.bstein.dev"
|
||||
|
||||
def get_displayname(token, user_id):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
r = requests.get(f"{BASE}/_matrix/client/v3/profile/{urllib.parse.quote(user_id)}", headers=headers)
|
||||
r.raise_for_status()
|
||||
return r.json().get("displayname")
|
||||
|
||||
def get_displayname_admin(token, user_id):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
r = requests.get(
|
||||
f"{BASE}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}",
|
||||
headers=headers,
|
||||
timeout=30,
|
||||
)
|
||||
if r.status_code == 404:
|
||||
return None
|
||||
r.raise_for_status()
|
||||
return r.json().get("displayname")
|
||||
|
||||
def set_displayname(token, room_id, user_id, name, in_room):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
payload = {"displayname": name}
|
||||
r = requests.put(
|
||||
f"{BASE}/_matrix/client/v3/profile/{urllib.parse.quote(user_id)}/displayname",
|
||||
headers=headers,
|
||||
json=payload,
|
||||
)
|
||||
r.raise_for_status()
|
||||
if not in_room:
|
||||
return
|
||||
state_url = f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/state/m.room.member/{urllib.parse.quote(user_id)}"
|
||||
content = {"membership": "join", "displayname": name}
|
||||
requests.put(state_url, headers=headers, json=content, timeout=30)
|
||||
|
||||
def set_displayname_admin(token, user_id, name):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
payload = {"displayname": name}
|
||||
r = requests.put(
|
||||
f"{BASE}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}",
|
||||
headers=headers,
|
||||
json=payload,
|
||||
timeout=30,
|
||||
)
|
||||
if r.status_code in (200, 201, 204):
|
||||
return True
|
||||
return False
|
||||
|
||||
def needs_rename_username(username):
|
||||
return username.isdigit() or username.startswith("guest-")
|
||||
|
||||
def needs_rename_display(display):
|
||||
return not display or display.isdigit() or display.startswith("guest-")
|
||||
|
||||
def db_rename_numeric(existing_names):
|
||||
profile_rows = []
|
||||
profile_index = {}
|
||||
users = []
|
||||
conn = psycopg2.connect(
|
||||
host=os.environ["PGHOST"],
|
||||
port=int(os.environ["PGPORT"]),
|
||||
dbname=os.environ["PGDATABASE"],
|
||||
user=os.environ["PGUSER"],
|
||||
password=os.environ["PGPASSWORD"],
|
||||
)
|
||||
try:
|
||||
with conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(
|
||||
"SELECT user_id, full_user_id, displayname FROM profiles WHERE full_user_id ~ %s",
|
||||
(f"^@\\d+:{SERVER_NAME}$",),
|
||||
)
|
||||
profile_rows = cur.fetchall()
|
||||
profile_index = {row[1]: row for row in profile_rows}
|
||||
for user_id, full_user_id, display in profile_rows:
|
||||
if display and not needs_rename_display(display):
|
||||
continue
|
||||
new = None
|
||||
for _ in range(30):
|
||||
candidate = f"{random.choice(ADJ)}-{random.choice(NOUN)}"
|
||||
if candidate not in existing_names:
|
||||
new = candidate
|
||||
existing_names.add(candidate)
|
||||
break
|
||||
if not new:
|
||||
continue
|
||||
cur.execute(
|
||||
"UPDATE profiles SET displayname = %s WHERE full_user_id = %s",
|
||||
(new, full_user_id),
|
||||
)
|
||||
|
||||
cur.execute(
|
||||
"SELECT name FROM users WHERE name ~ %s",
|
||||
(f"^@\\d+:{SERVER_NAME}$",),
|
||||
)
|
||||
users = [row[0] for row in cur.fetchall()]
|
||||
if not users:
|
||||
return
|
||||
cur.execute(
|
||||
"SELECT user_id, full_user_id FROM profiles WHERE full_user_id = ANY(%s)",
|
||||
(users,),
|
||||
)
|
||||
for existing_full in cur.fetchall():
|
||||
profile_index.setdefault(existing_full[1], existing_full)
|
||||
|
||||
for full_user_id in users:
|
||||
if full_user_id in profile_index:
|
||||
continue
|
||||
localpart = full_user_id.split(":", 1)[0].lstrip("@")
|
||||
new = None
|
||||
for _ in range(30):
|
||||
candidate = f"{random.choice(ADJ)}-{random.choice(NOUN)}"
|
||||
if candidate not in existing_names:
|
||||
new = candidate
|
||||
existing_names.add(candidate)
|
||||
break
|
||||
if not new:
|
||||
continue
|
||||
cur.execute(
|
||||
"INSERT INTO profiles (user_id, displayname, full_user_id) VALUES (%s, %s, %s) "
|
||||
"ON CONFLICT (full_user_id) DO UPDATE SET displayname = EXCLUDED.displayname",
|
||||
(localpart, new, full_user_id),
|
||||
)
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
admin_token = mas_admin_token()
|
||||
seeder_id = mas_user_id(admin_token, SEEDER_USER)
|
||||
seeder_token, seeder_session = mas_personal_session(admin_token, seeder_id)
|
||||
try:
|
||||
room_id = resolve_alias(seeder_token, ROOM_ALIAS)
|
||||
members, existing = room_members(seeder_token, room_id)
|
||||
users = mas_list_users(admin_token)
|
||||
mas_usernames = set()
|
||||
for user in users:
|
||||
attrs = user.get("attributes") or {}
|
||||
username = attrs.get("username") or ""
|
||||
if username:
|
||||
mas_usernames.add(username)
|
||||
legacy_guest = attrs.get("legacy_guest")
|
||||
if not username:
|
||||
continue
|
||||
if not (legacy_guest or needs_rename_username(username)):
|
||||
continue
|
||||
user_id = user_id_for_username(username)
|
||||
access_token, session_id = mas_personal_session(admin_token, user["id"])
|
||||
try:
|
||||
display = get_displayname(access_token, user_id)
|
||||
if display and not needs_rename_display(display):
|
||||
continue
|
||||
new = None
|
||||
for _ in range(30):
|
||||
candidate = f"{random.choice(ADJ)}-{random.choice(NOUN)}"
|
||||
if candidate not in existing:
|
||||
new = candidate
|
||||
existing.add(candidate)
|
||||
break
|
||||
if not new:
|
||||
continue
|
||||
set_displayname(access_token, room_id, user_id, new, user_id in members)
|
||||
finally:
|
||||
mas_revoke_session(admin_token, session_id)
|
||||
|
||||
try:
|
||||
entries = synapse_list_users(seeder_token)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
print(f"synapse admin list skipped: {exc}")
|
||||
entries = []
|
||||
now_ms = int(time.time() * 1000)
|
||||
for entry in entries:
|
||||
user_id = entry.get("name") or ""
|
||||
if not user_id.startswith("@"):
|
||||
continue
|
||||
localpart = user_id.split(":", 1)[0].lstrip("@")
|
||||
if localpart in mas_usernames:
|
||||
continue
|
||||
is_guest = entry.get("is_guest")
|
||||
if is_guest and should_prune_guest(entry, now_ms):
|
||||
if prune_guest(seeder_token, user_id):
|
||||
continue
|
||||
if not (is_guest or needs_rename_username(localpart)):
|
||||
continue
|
||||
display = get_displayname_admin(seeder_token, user_id)
|
||||
if display and not needs_rename_display(display):
|
||||
continue
|
||||
new = None
|
||||
for _ in range(30):
|
||||
candidate = f"{random.choice(ADJ)}-{random.choice(NOUN)}"
|
||||
if candidate not in existing:
|
||||
new = candidate
|
||||
existing.add(candidate)
|
||||
break
|
||||
if not new:
|
||||
continue
|
||||
if not set_displayname_admin(seeder_token, user_id, new):
|
||||
continue
|
||||
db_rename_numeric(existing)
|
||||
finally:
|
||||
mas_revoke_session(admin_token, seeder_session)
|
||||
PY
|
||||
@ -34,7 +34,11 @@ resources:
|
||||
- livekit-token-deployment.yaml
|
||||
- livekit.yaml
|
||||
- coturn.yaml
|
||||
- seed-othrys-room.yaml
|
||||
- guest-name-job.yaml
|
||||
- oneoffs/othrys-kick-numeric-job.yaml
|
||||
- pin-othrys-job.yaml
|
||||
- reset-othrys-room-job.yaml
|
||||
- oneoffs/bstein-force-leave-job.yaml
|
||||
- livekit-ingress.yaml
|
||||
- livekit-middlewares.yaml
|
||||
|
||||
169
services/comms/pin-othrys-job.yaml
Normal file
169
services/comms/pin-othrys-job.yaml
Normal file
@ -0,0 +1,169 @@
|
||||
# services/comms/pin-othrys-job.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: pin-othrys-invite
|
||||
namespace: comms
|
||||
labels:
|
||||
atlas.bstein.dev/glue: "true"
|
||||
spec:
|
||||
schedule: "*/30 * * * *"
|
||||
suspend: true
|
||||
concurrencyPolicy: Forbid
|
||||
successfulJobsHistoryLimit: 1
|
||||
failedJobsHistoryLimit: 1
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vault.hashicorp.com/agent-inject: "true"
|
||||
vault.hashicorp.com/agent-pre-populate-only: "true"
|
||||
vault.hashicorp.com/role: "comms"
|
||||
vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret"
|
||||
vault.hashicorp.com/agent-inject-template-turn-secret: |
|
||||
{{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api"
|
||||
vault.hashicorp.com/agent-inject-template-livekit-primary: |
|
||||
{{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-bot-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-seeder-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-chat-matrix: |
|
||||
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-chat-homepage: |
|
||||
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-mas-admin-secret: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db"
|
||||
vault.hashicorp.com/agent-inject-template-synapse-db-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db"
|
||||
vault.hashicorp.com/agent-inject-template-mas-db-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-mas-matrix-shared: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-mas-kc-secret: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}}
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: comms-vault
|
||||
containers:
|
||||
- name: pin
|
||||
image: python:3.11-slim
|
||||
env:
|
||||
- name: SYNAPSE_BASE
|
||||
value: http://othrys-synapse-matrix-synapse:8008
|
||||
- name: AUTH_BASE
|
||||
value: http://matrix-authentication-service:8080
|
||||
- name: SEEDER_USER
|
||||
value: othrys-seeder
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
set -euo pipefail
|
||||
. /vault/scripts/comms_vault_env.sh
|
||||
pip install --no-cache-dir requests >/dev/null
|
||||
python - <<'PY'
|
||||
import os, requests, urllib.parse
|
||||
|
||||
BASE = os.environ["SYNAPSE_BASE"]
|
||||
AUTH_BASE = os.environ.get("AUTH_BASE", BASE)
|
||||
ROOM_ALIAS = "#othrys:live.bstein.dev"
|
||||
MESSAGE = (
|
||||
"Invite guests: share https://live.bstein.dev/#/room/#othrys:live.bstein.dev?action=join "
|
||||
"and choose 'Continue' -> 'Join as guest'."
|
||||
)
|
||||
|
||||
def auth(token): return {"Authorization": f"Bearer {token}"}
|
||||
|
||||
def canon_user(user):
|
||||
u = (user or "").strip()
|
||||
if u.startswith("@") and ":" in u:
|
||||
return u
|
||||
u = u.lstrip("@")
|
||||
if ":" in u:
|
||||
return f"@{u}"
|
||||
return f"@{u}:live.bstein.dev"
|
||||
|
||||
def login(user, password):
|
||||
r = requests.post(f"{AUTH_BASE}/_matrix/client/v3/login", json={
|
||||
"type": "m.login.password",
|
||||
"identifier": {"type": "m.id.user", "user": canon_user(user)},
|
||||
"password": password,
|
||||
})
|
||||
r.raise_for_status()
|
||||
return r.json()["access_token"]
|
||||
|
||||
def resolve(alias, token):
|
||||
enc = urllib.parse.quote(alias)
|
||||
r = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{enc}", headers=auth(token))
|
||||
r.raise_for_status()
|
||||
return r.json()["room_id"]
|
||||
|
||||
def get_pinned(room_id, token):
|
||||
r = requests.get(
|
||||
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/state/m.room.pinned_events",
|
||||
headers=auth(token),
|
||||
)
|
||||
if r.status_code == 404:
|
||||
return []
|
||||
r.raise_for_status()
|
||||
return r.json().get("pinned", [])
|
||||
|
||||
def get_event(room_id, event_id, token):
|
||||
r = requests.get(
|
||||
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/event/{urllib.parse.quote(event_id)}",
|
||||
headers=auth(token),
|
||||
)
|
||||
if r.status_code == 404:
|
||||
return None
|
||||
r.raise_for_status()
|
||||
return r.json()
|
||||
|
||||
def send(room_id, token, body):
|
||||
r = requests.post(
|
||||
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/send/m.room.message",
|
||||
headers=auth(token),
|
||||
json={"msgtype": "m.text", "body": body},
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.json()["event_id"]
|
||||
|
||||
def pin(room_id, token, event_id):
|
||||
r = requests.put(
|
||||
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/state/m.room.pinned_events",
|
||||
headers=auth(token),
|
||||
json={"pinned": [event_id]},
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
token = login(os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"])
|
||||
room_id = resolve(ROOM_ALIAS, token)
|
||||
for event_id in get_pinned(room_id, token):
|
||||
ev = get_event(room_id, event_id, token)
|
||||
if ev and ev.get("content", {}).get("body") == MESSAGE:
|
||||
raise SystemExit(0)
|
||||
|
||||
eid = send(room_id, token, MESSAGE)
|
||||
pin(room_id, token, eid)
|
||||
PY
|
||||
volumeMounts:
|
||||
- name: vault-scripts
|
||||
mountPath: /vault/scripts
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: vault-scripts
|
||||
configMap:
|
||||
name: comms-vault-env
|
||||
defaultMode: 0555
|
||||
312
services/comms/reset-othrys-room-job.yaml
Normal file
312
services/comms/reset-othrys-room-job.yaml
Normal file
@ -0,0 +1,312 @@
|
||||
# services/comms/reset-othrys-room-job.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: othrys-room-reset
|
||||
namespace: comms
|
||||
labels:
|
||||
atlas.bstein.dev/glue: "true"
|
||||
spec:
|
||||
schedule: "0 0 1 1 *"
|
||||
suspend: true
|
||||
concurrencyPolicy: Forbid
|
||||
successfulJobsHistoryLimit: 1
|
||||
failedJobsHistoryLimit: 1
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vault.hashicorp.com/agent-inject: "true"
|
||||
vault.hashicorp.com/agent-pre-populate-only: "true"
|
||||
vault.hashicorp.com/role: "comms"
|
||||
vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret"
|
||||
vault.hashicorp.com/agent-inject-template-turn-secret: |
|
||||
{{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api"
|
||||
vault.hashicorp.com/agent-inject-template-livekit-primary: |
|
||||
{{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-bot-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-seeder-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-chat-matrix: |
|
||||
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-chat-homepage: |
|
||||
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-mas-admin-secret: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db"
|
||||
vault.hashicorp.com/agent-inject-template-synapse-db-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db"
|
||||
vault.hashicorp.com/agent-inject-template-mas-db-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-mas-matrix-shared: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-mas-kc-secret: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}}
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: comms-vault
|
||||
containers:
|
||||
- name: reset
|
||||
image: python:3.11-slim
|
||||
env:
|
||||
- name: SYNAPSE_BASE
|
||||
value: http://othrys-synapse-matrix-synapse:8008
|
||||
- name: AUTH_BASE
|
||||
value: http://matrix-authentication-service:8080
|
||||
- name: SERVER_NAME
|
||||
value: live.bstein.dev
|
||||
- name: ROOM_ALIAS
|
||||
value: "#othrys:live.bstein.dev"
|
||||
- name: ROOM_NAME
|
||||
value: Othrys
|
||||
- name: PIN_MESSAGE
|
||||
value: "Invite guests: share https://live.bstein.dev/#/room/#othrys:live.bstein.dev?action=join and choose 'Continue' -> 'Join as guest'."
|
||||
- name: SEEDER_USER
|
||||
value: othrys-seeder
|
||||
- name: BOT_USER
|
||||
value: atlasbot
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
set -euo pipefail
|
||||
. /vault/scripts/comms_vault_env.sh
|
||||
pip install --no-cache-dir requests >/dev/null
|
||||
python - <<'PY'
|
||||
import os
|
||||
import time
|
||||
import urllib.parse
|
||||
import requests
|
||||
|
||||
BASE = os.environ["SYNAPSE_BASE"]
|
||||
AUTH_BASE = os.environ.get("AUTH_BASE", BASE)
|
||||
SERVER_NAME = os.environ.get("SERVER_NAME", "live.bstein.dev")
|
||||
ROOM_ALIAS = os.environ.get("ROOM_ALIAS", "#othrys:live.bstein.dev")
|
||||
ROOM_NAME = os.environ.get("ROOM_NAME", "Othrys")
|
||||
PIN_MESSAGE = os.environ["PIN_MESSAGE"]
|
||||
SEEDER_USER = os.environ["SEEDER_USER"]
|
||||
SEEDER_PASS = os.environ["SEEDER_PASS"]
|
||||
BOT_USER = os.environ["BOT_USER"]
|
||||
|
||||
POWER_LEVELS = {
|
||||
"ban": 50,
|
||||
"events": {
|
||||
"m.room.avatar": 50,
|
||||
"m.room.canonical_alias": 50,
|
||||
"m.room.encryption": 100,
|
||||
"m.room.history_visibility": 100,
|
||||
"m.room.name": 50,
|
||||
"m.room.power_levels": 100,
|
||||
"m.room.server_acl": 100,
|
||||
"m.room.tombstone": 100,
|
||||
},
|
||||
"events_default": 0,
|
||||
"historical": 100,
|
||||
"invite": 50,
|
||||
"kick": 50,
|
||||
"m.call.invite": 50,
|
||||
"redact": 50,
|
||||
"state_default": 50,
|
||||
"users": {f"@{SEEDER_USER}:{SERVER_NAME}": 100},
|
||||
"users_default": 0,
|
||||
}
|
||||
|
||||
def auth(token):
|
||||
return {"Authorization": f"Bearer {token}"}
|
||||
|
||||
def canon_user(user):
|
||||
u = (user or "").strip()
|
||||
if u.startswith("@") and ":" in u:
|
||||
return u
|
||||
u = u.lstrip("@")
|
||||
if ":" in u:
|
||||
return f"@{u}"
|
||||
return f"@{u}:{SERVER_NAME}"
|
||||
|
||||
def login(user, password):
|
||||
r = requests.post(
|
||||
f"{AUTH_BASE}/_matrix/client/v3/login",
|
||||
json={
|
||||
"type": "m.login.password",
|
||||
"identifier": {"type": "m.id.user", "user": canon_user(user)},
|
||||
"password": password,
|
||||
},
|
||||
)
|
||||
if r.status_code != 200:
|
||||
raise SystemExit(f"login failed: {r.status_code} {r.text}")
|
||||
return r.json()["access_token"]
|
||||
|
||||
def resolve_alias(token, alias):
|
||||
enc = urllib.parse.quote(alias)
|
||||
r = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{enc}", headers=auth(token))
|
||||
if r.status_code == 404:
|
||||
return None
|
||||
r.raise_for_status()
|
||||
return r.json()["room_id"]
|
||||
|
||||
def create_room(token):
|
||||
r = requests.post(
|
||||
f"{BASE}/_matrix/client/v3/createRoom",
|
||||
headers=auth(token),
|
||||
json={
|
||||
"preset": "public_chat",
|
||||
"name": ROOM_NAME,
|
||||
"room_version": "11",
|
||||
},
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.json()["room_id"]
|
||||
|
||||
def put_state(token, room_id, ev_type, content):
|
||||
r = requests.put(
|
||||
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/state/{ev_type}",
|
||||
headers=auth(token),
|
||||
json=content,
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
def set_directory_visibility(token, room_id, visibility):
|
||||
r = requests.put(
|
||||
f"{BASE}/_matrix/client/v3/directory/list/room/{urllib.parse.quote(room_id)}",
|
||||
headers=auth(token),
|
||||
json={"visibility": visibility},
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
def delete_alias(token, alias):
|
||||
enc = urllib.parse.quote(alias)
|
||||
r = requests.delete(f"{BASE}/_matrix/client/v3/directory/room/{enc}", headers=auth(token))
|
||||
if r.status_code in (200, 202, 404):
|
||||
return
|
||||
r.raise_for_status()
|
||||
|
||||
def put_alias(token, alias, room_id):
|
||||
enc = urllib.parse.quote(alias)
|
||||
r = requests.put(
|
||||
f"{BASE}/_matrix/client/v3/directory/room/{enc}",
|
||||
headers=auth(token),
|
||||
json={"room_id": room_id},
|
||||
)
|
||||
r.raise_for_status()
|
||||
|
||||
def list_joined_members(token, room_id):
|
||||
r = requests.get(
|
||||
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/members?membership=join",
|
||||
headers=auth(token),
|
||||
)
|
||||
r.raise_for_status()
|
||||
members = []
|
||||
for ev in r.json().get("chunk", []):
|
||||
if ev.get("type") != "m.room.member":
|
||||
continue
|
||||
uid = ev.get("state_key")
|
||||
if not isinstance(uid, str) or not uid.startswith("@"):
|
||||
continue
|
||||
members.append(uid)
|
||||
return members
|
||||
|
||||
def invite_user(token, room_id, user_id):
|
||||
r = requests.post(
|
||||
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/invite",
|
||||
headers=auth(token),
|
||||
json={"user_id": user_id},
|
||||
)
|
||||
if r.status_code in (200, 202):
|
||||
return
|
||||
r.raise_for_status()
|
||||
|
||||
def send_message(token, room_id, body):
|
||||
r = requests.post(
|
||||
f"{BASE}/_matrix/client/v3/rooms/{urllib.parse.quote(room_id)}/send/m.room.message",
|
||||
headers=auth(token),
|
||||
json={"msgtype": "m.text", "body": body},
|
||||
)
|
||||
r.raise_for_status()
|
||||
return r.json()["event_id"]
|
||||
|
||||
def login_with_retry():
|
||||
last = None
|
||||
for attempt in range(1, 6):
|
||||
try:
|
||||
return login(SEEDER_USER, SEEDER_PASS)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
last = exc
|
||||
time.sleep(attempt * 2)
|
||||
raise last
|
||||
|
||||
token = login_with_retry()
|
||||
|
||||
old_room_id = resolve_alias(token, ROOM_ALIAS)
|
||||
if not old_room_id:
|
||||
raise SystemExit(f"alias {ROOM_ALIAS} not found; refusing to proceed")
|
||||
|
||||
new_room_id = create_room(token)
|
||||
|
||||
# Configure the new room.
|
||||
put_state(token, new_room_id, "m.room.join_rules", {"join_rule": "public"})
|
||||
put_state(token, new_room_id, "m.room.guest_access", {"guest_access": "can_join"})
|
||||
put_state(token, new_room_id, "m.room.history_visibility", {"history_visibility": "shared"})
|
||||
put_state(token, new_room_id, "m.room.power_levels", POWER_LEVELS)
|
||||
|
||||
# Move the alias.
|
||||
delete_alias(token, ROOM_ALIAS)
|
||||
put_alias(token, ROOM_ALIAS, new_room_id)
|
||||
put_state(token, new_room_id, "m.room.canonical_alias", {"alias": ROOM_ALIAS})
|
||||
|
||||
set_directory_visibility(token, new_room_id, "public")
|
||||
|
||||
# Invite the bot and all joined members of the old room.
|
||||
bot_user_id = f"@{BOT_USER}:{SERVER_NAME}"
|
||||
invite_user(token, new_room_id, bot_user_id)
|
||||
for uid in list_joined_members(token, old_room_id):
|
||||
if uid == f"@{SEEDER_USER}:{SERVER_NAME}":
|
||||
continue
|
||||
localpart = uid.split(":", 1)[0].lstrip("@")
|
||||
if localpart.isdigit():
|
||||
continue
|
||||
invite_user(token, new_room_id, uid)
|
||||
|
||||
# Pin the guest invite message in the new room.
|
||||
event_id = send_message(token, new_room_id, PIN_MESSAGE)
|
||||
put_state(token, new_room_id, "m.room.pinned_events", {"pinned": [event_id]})
|
||||
|
||||
# De-list and tombstone the old room.
|
||||
set_directory_visibility(token, old_room_id, "private")
|
||||
put_state(token, old_room_id, "m.room.join_rules", {"join_rule": "invite"})
|
||||
put_state(token, old_room_id, "m.room.guest_access", {"guest_access": "forbidden"})
|
||||
put_state(
|
||||
token,
|
||||
old_room_id,
|
||||
"m.room.tombstone",
|
||||
{"body": "Othrys has been reset. Please join the new room.", "replacement_room": new_room_id},
|
||||
)
|
||||
send_message(
|
||||
token,
|
||||
old_room_id,
|
||||
"Othrys was reset. Join the new room at https://live.bstein.dev/#/room/#othrys:live.bstein.dev?action=join",
|
||||
)
|
||||
|
||||
print(f"old_room_id={old_room_id}")
|
||||
print(f"new_room_id={new_room_id}")
|
||||
PY
|
||||
volumeMounts:
|
||||
- name: vault-scripts
|
||||
mountPath: /vault/scripts
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: vault-scripts
|
||||
configMap:
|
||||
name: comms-vault-env
|
||||
defaultMode: 0555
|
||||
185
services/comms/seed-othrys-room.yaml
Normal file
185
services/comms/seed-othrys-room.yaml
Normal file
@ -0,0 +1,185 @@
|
||||
# services/comms/seed-othrys-room.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: seed-othrys-room
|
||||
namespace: comms
|
||||
labels:
|
||||
atlas.bstein.dev/glue: "true"
|
||||
spec:
|
||||
schedule: "*/10 * * * *"
|
||||
suspend: true
|
||||
concurrencyPolicy: Forbid
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vault.hashicorp.com/agent-inject: "true"
|
||||
vault.hashicorp.com/agent-pre-populate-only: "true"
|
||||
vault.hashicorp.com/role: "comms"
|
||||
vault.hashicorp.com/agent-inject-secret-turn-secret: "kv/data/atlas/comms/turn-shared-secret"
|
||||
vault.hashicorp.com/agent-inject-template-turn-secret: |
|
||||
{{- with secret "kv/data/atlas/comms/turn-shared-secret" -}}{{ .Data.data.TURN_STATIC_AUTH_SECRET }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-livekit-primary: "kv/data/atlas/comms/livekit-api"
|
||||
vault.hashicorp.com/agent-inject-template-livekit-primary: |
|
||||
{{- with secret "kv/data/atlas/comms/livekit-api" -}}{{ .Data.data.primary }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-bot-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-bot-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "bot-password" }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-seeder-pass: "kv/data/atlas/comms/atlasbot-credentials-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-seeder-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/atlasbot-credentials-runtime" -}}{{ index .Data.data "seeder-password" }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-chat-matrix: "kv/data/atlas/shared/chat-ai-keys-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-chat-matrix: |
|
||||
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.matrix }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-chat-homepage: "kv/data/atlas/shared/chat-ai-keys-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-chat-homepage: |
|
||||
{{- with secret "kv/data/atlas/shared/chat-ai-keys-runtime" -}}{{ .Data.data.homepage }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-admin-secret: "kv/data/atlas/comms/mas-admin-client-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-mas-admin-secret: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-admin-client-runtime" -}}{{ .Data.data.client_secret }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-synapse-db-pass: "kv/data/atlas/comms/synapse-db"
|
||||
vault.hashicorp.com/agent-inject-template-synapse-db-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/synapse-db" -}}{{ .Data.data.POSTGRES_PASSWORD }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-db-pass: "kv/data/atlas/comms/mas-db"
|
||||
vault.hashicorp.com/agent-inject-template-mas-db-pass: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-db" -}}{{ .Data.data.password }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-matrix-shared: "kv/data/atlas/comms/mas-secrets-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-mas-matrix-shared: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.matrix_shared_secret }}{{- end -}}
|
||||
vault.hashicorp.com/agent-inject-secret-mas-kc-secret: "kv/data/atlas/comms/mas-secrets-runtime"
|
||||
vault.hashicorp.com/agent-inject-template-mas-kc-secret: |
|
||||
{{- with secret "kv/data/atlas/comms/mas-secrets-runtime" -}}{{ .Data.data.keycloak_client_secret }}{{- end -}}
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: comms-vault
|
||||
containers:
|
||||
- name: seed
|
||||
image: python:3.11-slim
|
||||
env:
|
||||
- name: SYNAPSE_BASE
|
||||
value: http://othrys-synapse-matrix-synapse:8008
|
||||
- name: AUTH_BASE
|
||||
value: http://matrix-authentication-service:8080
|
||||
- name: SEEDER_USER
|
||||
value: othrys-seeder
|
||||
- name: BOT_USER
|
||||
value: atlasbot
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
set -euo pipefail
|
||||
. /vault/scripts/comms_vault_env.sh
|
||||
pip install --no-cache-dir requests pyyaml >/dev/null
|
||||
python - <<'PY'
|
||||
import os, requests, urllib.parse
|
||||
|
||||
BASE = os.environ["SYNAPSE_BASE"]
|
||||
AUTH_BASE = os.environ.get("AUTH_BASE", BASE)
|
||||
|
||||
def canon_user(user):
|
||||
u = (user or "").strip()
|
||||
if u.startswith("@") and ":" in u:
|
||||
return u
|
||||
u = u.lstrip("@")
|
||||
if ":" in u:
|
||||
return f"@{u}"
|
||||
return f"@{u}:live.bstein.dev"
|
||||
|
||||
def login(user, password):
|
||||
r = requests.post(f"{AUTH_BASE}/_matrix/client/v3/login", json={
|
||||
"type": "m.login.password",
|
||||
"identifier": {"type": "m.id.user", "user": canon_user(user)},
|
||||
"password": password,
|
||||
})
|
||||
if r.status_code != 200:
|
||||
raise SystemExit(f"login failed: {r.status_code} {r.text}")
|
||||
return r.json()["access_token"]
|
||||
|
||||
def ensure_user(token, localpart, password, admin):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
user_id = f"@{localpart}:live.bstein.dev"
|
||||
url = f"{BASE}/_synapse/admin/v2/users/{urllib.parse.quote(user_id)}"
|
||||
res = requests.get(url, headers=headers)
|
||||
if res.status_code == 200:
|
||||
return
|
||||
payload = {"password": password, "admin": admin, "deactivated": False}
|
||||
create = requests.put(url, headers=headers, json=payload)
|
||||
if create.status_code not in (200, 201):
|
||||
raise SystemExit(f"create user {user_id} failed: {create.status_code} {create.text}")
|
||||
|
||||
def ensure_room(token):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
alias = "#othrys:live.bstein.dev"
|
||||
alias_enc = "%23othrys%3Alive.bstein.dev"
|
||||
exists = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{alias_enc}", headers=headers)
|
||||
if exists.status_code == 200:
|
||||
room_id = exists.json()["room_id"]
|
||||
else:
|
||||
create = requests.post(f"{BASE}/_matrix/client/v3/createRoom", headers=headers, json={
|
||||
"preset": "public_chat",
|
||||
"name": "Othrys",
|
||||
"room_alias_name": "othrys",
|
||||
"initial_state": [],
|
||||
"power_level_content_override": {"events_default": 0, "users_default": 0, "state_default": 50},
|
||||
})
|
||||
if create.status_code not in (200, 409):
|
||||
raise SystemExit(f"create room failed: {create.status_code} {create.text}")
|
||||
exists = requests.get(f"{BASE}/_matrix/client/v3/directory/room/{alias_enc}", headers=headers)
|
||||
room_id = exists.json()["room_id"]
|
||||
state_events = [
|
||||
("m.room.join_rules", {"join_rule": "public"}),
|
||||
("m.room.guest_access", {"guest_access": "can_join"}),
|
||||
("m.room.history_visibility", {"history_visibility": "shared"}),
|
||||
("m.room.canonical_alias", {"alias": alias}),
|
||||
]
|
||||
for ev_type, content in state_events:
|
||||
requests.put(f"{BASE}/_matrix/client/v3/rooms/{room_id}/state/{ev_type}", headers=headers, json=content)
|
||||
requests.put(f"{BASE}/_matrix/client/v3/directory/list/room/{room_id}", headers=headers, json={"visibility": "public"})
|
||||
return room_id
|
||||
|
||||
def join_user(token, room_id, user_id):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
requests.post(f"{BASE}/_synapse/admin/v1/join/{urllib.parse.quote(room_id)}", headers=headers, json={"user_id": user_id})
|
||||
|
||||
def join_all_locals(token, room_id):
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
users = []
|
||||
from_token = None
|
||||
while True:
|
||||
url = f"{BASE}/_synapse/admin/v2/users?local=true&deactivated=false&limit=100"
|
||||
if from_token:
|
||||
url += f"&from={from_token}"
|
||||
res = requests.get(url, headers=headers).json()
|
||||
users.extend([u["name"] for u in res.get("users", [])])
|
||||
from_token = res.get("next_token")
|
||||
if not from_token:
|
||||
break
|
||||
for uid in users:
|
||||
join_user(token, room_id, uid)
|
||||
|
||||
token = login(os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"])
|
||||
ensure_user(token, os.environ["SEEDER_USER"], os.environ["SEEDER_PASS"], admin=True)
|
||||
ensure_user(token, os.environ["BOT_USER"], os.environ["BOT_PASS"], admin=False)
|
||||
room_id = ensure_room(token)
|
||||
join_user(token, room_id, f"@{os.environ['BOT_USER']}:live.bstein.dev")
|
||||
join_all_locals(token, room_id)
|
||||
PY
|
||||
volumeMounts:
|
||||
- name: synapse-config
|
||||
mountPath: /config
|
||||
readOnly: true
|
||||
- name: vault-scripts
|
||||
mountPath: /vault/scripts
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: synapse-config
|
||||
secret:
|
||||
secretName: othrys-synapse-matrix-synapse
|
||||
- name: vault-scripts
|
||||
configMap:
|
||||
name: comms-vault-env
|
||||
defaultMode: 0555
|
||||
56
services/finance/firefly-cronjob.yaml
Normal file
56
services/finance/firefly-cronjob.yaml
Normal file
@ -0,0 +1,56 @@
|
||||
# services/finance/firefly-cronjob.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: firefly-cron
|
||||
namespace: finance
|
||||
spec:
|
||||
schedule: "0 3 * * *"
|
||||
suspend: true
|
||||
concurrencyPolicy: Forbid
|
||||
successfulJobsHistoryLimit: 1
|
||||
failedJobsHistoryLimit: 3
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
vault.hashicorp.com/agent-inject: "true"
|
||||
vault.hashicorp.com/agent-pre-populate-only: "true"
|
||||
vault.hashicorp.com/role: "finance"
|
||||
vault.hashicorp.com/agent-inject-secret-firefly-cron-token: "kv/data/atlas/finance/firefly-secrets"
|
||||
vault.hashicorp.com/agent-inject-template-firefly-cron-token: |
|
||||
{{- with secret "kv/data/atlas/finance/firefly-secrets" -}}
|
||||
{{ .Data.data.STATIC_CRON_TOKEN }}
|
||||
{{- end -}}
|
||||
spec:
|
||||
serviceAccountName: finance-vault
|
||||
restartPolicy: Never
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values: ["rpi5"]
|
||||
- weight: 70
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hardware
|
||||
operator: In
|
||||
values: ["rpi4"]
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: arm64
|
||||
node-role.kubernetes.io/worker: "true"
|
||||
containers:
|
||||
- name: cron
|
||||
image: curlimages/curl:8.5.0
|
||||
command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
set -eu
|
||||
token="$(cat /vault/secrets/firefly-cron-token)"
|
||||
curl -fsS "http://firefly.finance.svc.cluster.local/api/v1/cron/${token}"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user