Compare commits
291 Commits
codex/tita
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b794e3b514 | ||
|
|
3a39d37995 | ||
|
|
8d8b3fc821 | ||
|
|
b18df4caad | ||
|
|
cf20efed66 | ||
|
|
6adbe457c4 | ||
|
|
0c11a64d25 | ||
|
|
c79489d0b8 | ||
|
|
67253315f0 | ||
|
|
fa8ab0840b | ||
|
|
bf5550762e | ||
|
|
39e023e8f3 | ||
|
|
fd0d748c33 | ||
|
|
77956ab811 | ||
|
|
3ea233abcb | ||
|
|
93bc3dfbe5 | ||
|
|
4ca62f6fb5 | ||
|
|
6914b92e67 | ||
|
|
613d496491 | ||
|
|
570c077190 | ||
|
|
b401a4e49f | ||
|
|
559bdf2a72 | ||
|
|
f3a7fe58c4 | ||
|
|
46ab392e97 | ||
|
|
352e136621 | ||
|
|
1b265f43d5 | ||
|
|
ecfead7193 | ||
|
|
53f5968f8f | ||
|
|
8dadb36b97 | ||
|
|
74668938cc | ||
|
|
9def813324 | ||
|
|
6811958b52 | ||
|
|
d1cdb4fd13 | ||
|
|
50580623db | ||
|
|
7340762622 | ||
|
|
2102a5ec76 | ||
|
|
850ed8abf6 | ||
|
|
05ba76ecaa | ||
|
|
6db1c3f5da | ||
|
|
63429fff1d | ||
|
|
de3f7fea69 | ||
|
|
1e5ef8dbd1 | ||
|
|
385d21056a | ||
|
|
3c5fa4bbe2 | ||
|
|
58adb757c4 | ||
|
|
d01cfe9066 | ||
|
|
d522af7bb7 | ||
|
|
e6dd39b4c7 | ||
|
|
4404454cb9 | ||
|
|
59613d500f | ||
|
|
afeae15443 | ||
|
|
ba0155ad3b | ||
|
|
2c048cdeda | ||
|
|
f307c7f2af | ||
|
|
a90d84f796 | ||
|
|
dad9e4e8f2 | ||
|
|
eb57c1fe0f | ||
|
|
e7213d9d1c | ||
|
|
7b656dbaeb | ||
|
|
01af181442 | ||
|
|
192a36cf8a | ||
|
|
7f7dde01de | ||
|
|
32ffe30145 | ||
|
|
521eda1c00 | ||
|
|
49948621d0 | ||
|
|
28b77781d1 | ||
|
|
adfbe4ed64 | ||
|
|
92fbe0ebdf | ||
|
|
b0bd29696e | ||
|
|
496b933c65 | ||
|
|
da7ee45366 | ||
|
|
ffdc4bef36 | ||
|
|
3aaa96a673 | ||
|
|
2f1eb38551 | ||
|
|
cdda5be827 | ||
|
|
52682b98f5 | ||
|
|
749fa16fca | ||
|
|
b0372c41c2 | ||
|
|
e96e8943c9 | ||
|
|
acfaa2c3c0 | ||
|
|
7fb0be3487 | ||
|
|
fd91537982 | ||
|
|
a64d4cee56 | ||
|
|
ba3e24548a | ||
|
|
4beb08f1cf | ||
|
|
e2cbbd6963 | ||
|
|
c46764e80c | ||
|
|
b81053aaec | ||
|
|
9e659b790b | ||
|
|
c07220253e | ||
|
|
39fb0e91e0 | ||
|
|
6243021ade | ||
| 4a6b54b4c3 | |||
| 6c816e9fad | |||
| 2b5c7ca10b | |||
| 45b145667a | |||
| 9fb8dd4839 | |||
|
|
6352e0d976 | ||
|
|
d4ff5d482e | ||
|
|
b303add71c | ||
|
|
a42e61de61 | ||
|
|
6eb0158c6c | ||
|
|
0171ffad38 | ||
|
|
84934a6d1c | ||
|
|
98a2ade86d | ||
|
|
738a5184cb | ||
|
|
488c2694e3 | ||
|
|
015d99dc5f | ||
|
|
b80745dc2d | ||
|
|
0fa1b38f95 | ||
|
|
49e714c88c | ||
|
|
ff0b9762b1 | ||
|
|
ce36ff099b | ||
|
|
6c4a7dea29 | ||
|
|
04a80c1168 | ||
|
|
8179bd85db | ||
|
|
c08499b52d | ||
|
|
eca9e494ad | ||
|
|
ab0e68f9f3 | ||
|
|
0566a47e35 | ||
|
|
133597bfd0 | ||
|
|
ccf318f977 | ||
|
|
8affc052bf | ||
|
|
0cf5043977 | ||
|
|
f2ffc6c1ef | ||
|
|
e7c770b10b | ||
|
|
0ac3c97f90 | ||
|
|
3e5e37d65a | ||
|
|
2acbcbff51 | ||
|
|
70b382bc80 | ||
|
|
d0191361d4 | ||
|
|
59bb0bef78 | ||
|
|
4b456cf54a | ||
|
|
91c6023d25 | ||
|
|
85d15cd3e1 | ||
|
|
c0a4cbf03e | ||
|
|
fad895efbb | ||
|
|
47b31ebcf4 | ||
|
|
88d2225774 | ||
|
|
a1f6758b95 | ||
|
|
23146aaa8a | ||
|
|
cc757ba082 | ||
|
|
c3c8b60671 | ||
|
|
15792b1cf3 | ||
|
|
e75a5d5675 | ||
|
|
4282810602 | ||
|
|
8a58132dd4 | ||
|
|
be0d3e4300 | ||
|
|
ba6848a67a | ||
|
|
23beb08e5e | ||
| 5d560d962d | |||
| 51ade59a46 | |||
| 7f91be27f9 | |||
| 63cd159151 | |||
| 443c70d01b | |||
|
|
9f0ea1683a | ||
|
|
55df293e00 | ||
| 3168ffe027 | |||
| abdefbbd05 | |||
|
|
ead503d71e | ||
|
|
f54bdf8483 | ||
|
|
80cb4c257f | ||
|
|
228e8a9772 | ||
| 15c798b915 | |||
| 2ded2eb23d | |||
|
|
e6bb015ef2 | ||
|
|
ead7c276b4 | ||
| bfad9c19c5 | |||
| 439a44bc85 | |||
|
|
13f179d842 | ||
| c0e5df30d5 | |||
|
|
79fbf2644b | ||
| 0eca6adbbb | |||
| 5801633b30 | |||
| fac139fd0e | |||
|
|
2df830f01b | ||
|
|
26fab34de5 | ||
|
|
e29d0fe349 | ||
|
|
77f7620eca | ||
| fb0dd60954 | |||
|
|
4401c26496 | ||
| 9682a17a82 | |||
| 55d87c0c14 | |||
| 379f20efc5 | |||
| 7883593166 | |||
|
|
5509dd86d5 | ||
| 06b27c9b9a | |||
|
|
a927affb1f | ||
|
|
fab182e91e | ||
| d5be9e1ae9 | |||
| fb48d473d2 | |||
| 5e5cffbdc7 | |||
| e1d804dbb0 | |||
|
|
2086427b72 | ||
| e811c0cabf | |||
|
|
b68c002e2d | ||
| cb7e0238dc | |||
|
|
043a2e75c8 | ||
| 6ac375f82e | |||
|
|
8c1a26ead6 | ||
|
|
d119f838e9 | ||
|
|
ae2356de6a | ||
|
|
c1ac36df17 | ||
|
|
cc79f3ebcd | ||
|
|
1f991fc43d | ||
|
|
b62980b76d | ||
|
|
26da4945ea | ||
|
|
d599a162a9 | ||
|
|
e53adc17b3 | ||
|
|
7cd40d457d | ||
|
|
d559d03bea | ||
|
|
691dc3c71b | ||
|
|
e81ecdd716 | ||
|
|
74e385ad8b | ||
|
|
fecd095717 | ||
|
|
caa02806c0 | ||
|
|
c6c6f90d26 | ||
|
|
e4efb89466 | ||
|
|
8584885ddd | ||
|
|
6aeacaf872 | ||
|
|
0146b92cc1 | ||
|
|
981fca6cb4 | ||
|
|
6dab28081d | ||
|
|
6ebc475da2 | ||
|
|
fff26ebacb | ||
|
|
e3bebaa10b | ||
|
|
df16f03e46 | ||
|
|
b5243e8566 | ||
|
|
4501bbf8f0 | ||
|
|
5331d7149a | ||
|
|
c4b0389892 | ||
|
|
387e104359 | ||
|
|
5ebc320843 | ||
|
|
006f79658f | ||
|
|
9451bb9c61 | ||
|
|
655c26c589 | ||
|
|
607d8c21fa | ||
|
|
b7f6cbd87c | ||
|
|
a07b49a05f | ||
|
|
1d4227beec | ||
|
|
57306201cf | ||
|
|
7437ec5929 | ||
|
|
710ec96990 | ||
|
|
cb1c41c6ea | ||
|
|
e8823197f8 | ||
|
|
c5b1302ff6 | ||
|
|
f02db9801c | ||
|
|
7d113291c9 | ||
|
|
47d5416dde | ||
|
|
f2c4204bab | ||
|
|
71cfdce862 | ||
|
|
d4112e5a74 | ||
|
|
6d2c72ff98 | ||
|
|
c8f7cd6ec2 | ||
|
|
bd85143aa0 | ||
|
|
cb992d1c53 | ||
|
|
7be6cfb9cb | ||
|
|
b848e6b6d8 | ||
|
|
849bba8f5d | ||
|
|
86c492d8c1 | ||
|
|
1ed8b7233d | ||
|
|
ddabda06bf | ||
|
|
881c724725 | ||
|
|
2db4952c39 | ||
|
|
57432e01a3 | ||
|
|
97bc0cea8c | ||
|
|
e930aac039 | ||
|
|
13ec9b2d7d | ||
| d8f07c2b70 | |||
| 20a255252c | |||
| 376e68ec31 | |||
|
|
7497f8d4e0 | ||
| b3270e7231 | |||
| 1dce63fb9b | |||
| 96f3844677 | |||
| 65edbd9ed9 | |||
| 29138b8a51 | |||
|
|
aede5aa899 | ||
| 12293c9d11 | |||
| 2d0360be3b | |||
| f9d7694f25 | |||
| 9e3cc0f760 | |||
| 32410555cd | |||
| 347e7ccc84 | |||
| e47a877169 | |||
| 592d037522 | |||
| 3ccc2a1100 | |||
| 9a20f4f854 | |||
| 9a8c454123 | |||
|
|
e1f430455d | ||
| 01fe20fe68 |
335
Jenkinsfile
vendored
335
Jenkinsfile
vendored
@ -7,14 +7,51 @@ pipeline {
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: "jenkins"
|
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
hardware: rpi5
|
hardware: rpi5
|
||||||
kubernetes.io/arch: arm64
|
kubernetes.io/arch: arm64
|
||||||
node-role.kubernetes.io/worker: "true"
|
node-role.kubernetes.io/worker: "true"
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-06
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-13
|
||||||
|
- titan-15
|
||||||
|
- titan-17
|
||||||
|
- titan-19
|
||||||
|
topologySpreadConstraints:
|
||||||
|
- maxSkew: 1
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
whenUnsatisfiable: ScheduleAnyway
|
||||||
|
labelSelector:
|
||||||
|
matchLabels:
|
||||||
|
jenkins/jenkins-jenkins-agent: "true"
|
||||||
containers:
|
containers:
|
||||||
|
- name: jnlp
|
||||||
|
image: jenkins/inbound-agent:3355.v388858a_47b_33-2-jdk21
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "25m"
|
||||||
|
memory: "256Mi"
|
||||||
- name: python
|
- name: python
|
||||||
image: python:3.12-slim
|
image: registry.bstein.dev/bstein/python:3.12-slim
|
||||||
|
command:
|
||||||
|
- cat
|
||||||
|
tty: true
|
||||||
|
- name: quality-tools
|
||||||
|
image: registry.bstein.dev/bstein/quality-tools:sonar8.0.1-trivy0.70.0-db20260422-arm64
|
||||||
command:
|
command:
|
||||||
- cat
|
- cat
|
||||||
tty: true
|
tty: true
|
||||||
@ -24,9 +61,21 @@ spec:
|
|||||||
environment {
|
environment {
|
||||||
PIP_DISABLE_PIP_VERSION_CHECK = '1'
|
PIP_DISABLE_PIP_VERSION_CHECK = '1'
|
||||||
PYTHONUNBUFFERED = '1'
|
PYTHONUNBUFFERED = '1'
|
||||||
SUITE_NAME = 'titan-iac'
|
SUITE_NAME = 'titan_iac'
|
||||||
PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
|
PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
|
||||||
|
SONARQUBE_HOST_URL = 'http://sonarqube.quality.svc.cluster.local:9000'
|
||||||
|
SONARQUBE_PROJECT_KEY = 'titan_iac'
|
||||||
|
SONARQUBE_TOKEN = credentials('sonarqube-token')
|
||||||
VM_URL = 'http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428'
|
VM_URL = 'http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428'
|
||||||
|
QUALITY_GATE_SONARQUBE_ENFORCE = '1'
|
||||||
|
QUALITY_GATE_SONARQUBE_REPORT = 'build/sonarqube-quality-gate.json'
|
||||||
|
QUALITY_GATE_IRONBANK_ENFORCE = '1'
|
||||||
|
QUALITY_GATE_IRONBANK_REQUIRED = '0'
|
||||||
|
QUALITY_GATE_IRONBANK_REPORT = 'build/ironbank-compliance.json'
|
||||||
|
}
|
||||||
|
options {
|
||||||
|
disableConcurrentBuilds()
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '30', numToKeepStr: '200', artifactDaysToKeepStr: '30', artifactNumToKeepStr: '120'))
|
||||||
}
|
}
|
||||||
stages {
|
stages {
|
||||||
stage('Checkout') {
|
stage('Checkout') {
|
||||||
@ -36,7 +85,175 @@ spec:
|
|||||||
}
|
}
|
||||||
stage('Install deps') {
|
stage('Install deps') {
|
||||||
steps {
|
steps {
|
||||||
sh 'pip install --no-cache-dir -r ci/requirements.txt'
|
sh '''
|
||||||
|
set -eu
|
||||||
|
if ! command -v git >/dev/null 2>&1; then
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y --no-install-recommends git ca-certificates
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
fi
|
||||||
|
pip install --no-cache-dir -r ci/requirements.txt
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Prepare local quality evidence') {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
set -eu
|
||||||
|
mkdir -p build
|
||||||
|
set +e
|
||||||
|
python3 -m testing.quality_gate --profile local --build-dir build
|
||||||
|
local_quality_rc=$?
|
||||||
|
set -e
|
||||||
|
printf '%s\n' "${local_quality_rc}" > build/local-quality-gate.rc
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Collect SonarQube evidence') {
|
||||||
|
steps {
|
||||||
|
container('quality-tools') {
|
||||||
|
sh '''#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
mkdir -p build
|
||||||
|
args=(
|
||||||
|
"-Dsonar.host.url=${SONARQUBE_HOST_URL}"
|
||||||
|
"-Dsonar.login=${SONARQUBE_TOKEN}"
|
||||||
|
"-Dsonar.projectKey=${SONARQUBE_PROJECT_KEY}"
|
||||||
|
"-Dsonar.projectName=${SONARQUBE_PROJECT_KEY}"
|
||||||
|
"-Dsonar.sources=."
|
||||||
|
"-Dsonar.exclusions=**/.git/**,**/build/**,**/dist/**,**/node_modules/**,**/.venv/**,**/__pycache__/**,**/coverage/**,**/test-results/**,**/playwright-report/**,services/monitoring/dashboards/**,services/monitoring/grafana-dashboard-*.yaml"
|
||||||
|
"-Dsonar.test.inclusions=**/tests/**,**/testing/**,**/*_test.go,**/*.test.ts,**/*.test.tsx,**/*.spec.ts,**/*.spec.tsx"
|
||||||
|
)
|
||||||
|
[ -f build/coverage-unit.xml ] && args+=("-Dsonar.python.coverage.reportPaths=build/coverage-unit.xml")
|
||||||
|
set +e
|
||||||
|
sonar-scanner "${args[@]}" | tee build/sonar-scanner.log
|
||||||
|
rc=${PIPESTATUS[0]}
|
||||||
|
set -e
|
||||||
|
printf '%s\n' "${rc}" > build/sonarqube-analysis.rc
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
sh '''
|
||||||
|
set -eu
|
||||||
|
mkdir -p build
|
||||||
|
python3 - <<'PY'
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import urllib.parse
|
||||||
|
import urllib.request
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
host = os.getenv('SONARQUBE_HOST_URL', '').strip().rstrip('/')
|
||||||
|
project_key = os.getenv('SONARQUBE_PROJECT_KEY', '').strip()
|
||||||
|
token = os.getenv('SONARQUBE_TOKEN', '').strip()
|
||||||
|
report_path = os.getenv('QUALITY_GATE_SONARQUBE_REPORT', 'build/sonarqube-quality-gate.json')
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"status": "ERROR",
|
||||||
|
"note": "missing SONARQUBE_HOST_URL and/or SONARQUBE_PROJECT_KEY",
|
||||||
|
}
|
||||||
|
if host and project_key:
|
||||||
|
task_file = Path('.scannerwork/report-task.txt')
|
||||||
|
task_id = ''
|
||||||
|
if task_file.exists():
|
||||||
|
for line in task_file.read_text(encoding='utf-8').splitlines():
|
||||||
|
key, _, value = line.partition('=')
|
||||||
|
if key == 'ceTaskId':
|
||||||
|
task_id = value.strip()
|
||||||
|
break
|
||||||
|
if task_id:
|
||||||
|
ce_query = urllib.parse.urlencode({"id": task_id})
|
||||||
|
deadline = time.monotonic() + 180
|
||||||
|
while time.monotonic() < deadline:
|
||||||
|
ce_request = urllib.request.Request(f"{host}/api/ce/task?{ce_query}", method="GET")
|
||||||
|
if token:
|
||||||
|
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
|
||||||
|
ce_request.add_header("Authorization", f"Basic {encoded}")
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(ce_request, timeout=12) as response:
|
||||||
|
ce_payload = json.loads(response.read().decode("utf-8"))
|
||||||
|
except Exception:
|
||||||
|
time.sleep(3)
|
||||||
|
continue
|
||||||
|
status = str(ce_payload.get("task", {}).get("status", "")).upper()
|
||||||
|
if status in {"SUCCESS", "FAILED", "CANCELED"}:
|
||||||
|
break
|
||||||
|
time.sleep(3)
|
||||||
|
|
||||||
|
query = urllib.parse.urlencode({"projectKey": project_key})
|
||||||
|
request = urllib.request.Request(
|
||||||
|
f"{host}/api/qualitygates/project_status?{query}",
|
||||||
|
method="GET",
|
||||||
|
)
|
||||||
|
if token:
|
||||||
|
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
|
||||||
|
request.add_header("Authorization", f"Basic {encoded}")
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(request, timeout=12) as response:
|
||||||
|
payload = json.loads(response.read().decode("utf-8"))
|
||||||
|
except Exception as exc: # noqa: BLE001
|
||||||
|
payload = {"status": "ERROR", "error": str(exc)}
|
||||||
|
|
||||||
|
with open(report_path, "w", encoding="utf-8") as handle:
|
||||||
|
json.dump(payload, handle, indent=2, sort_keys=True)
|
||||||
|
handle.write("\\n")
|
||||||
|
PY
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Collect IronBank evidence') {
|
||||||
|
steps {
|
||||||
|
container('quality-tools') {
|
||||||
|
sh '''#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
mkdir -p build
|
||||||
|
set +e
|
||||||
|
trivy fs --cache-dir "${TRIVY_CACHE_DIR}" --skip-db-update --skip-files clusters/atlas/flux-system/gotk-components.yaml --timeout 5m --no-progress --format json --output build/trivy-fs.json --scanners vuln,secret,misconfig --severity HIGH,CRITICAL .
|
||||||
|
trivy_rc=$?
|
||||||
|
set -e
|
||||||
|
if [ ! -s build/trivy-fs.json ]; then
|
||||||
|
cat > build/ironbank-compliance.json <<EOF
|
||||||
|
{"status":"failed","compliant":false,"scanner":"trivy","scan_type":"filesystem","error":"trivy did not produce JSON output","trivy_rc":${trivy_rc}}
|
||||||
|
EOF
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
sh '''
|
||||||
|
set -eu
|
||||||
|
mkdir -p build
|
||||||
|
if [ -s build/trivy-fs.json ]; then
|
||||||
|
python3 ci/scripts/supply_chain_report.py --trivy-json build/trivy-fs.json --waivers ci/titan-iac-trivy-waivers.json --output build/ironbank-compliance.json
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
python3 - <<'PY'
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
report_path = Path(os.getenv('QUALITY_GATE_IRONBANK_REPORT', 'build/ironbank-compliance.json'))
|
||||||
|
if report_path.exists():
|
||||||
|
raise SystemExit(0)
|
||||||
|
|
||||||
|
status = os.getenv('IRONBANK_COMPLIANCE_STATUS', '').strip()
|
||||||
|
compliant = os.getenv('IRONBANK_COMPLIANT', '').strip().lower()
|
||||||
|
payload = {
|
||||||
|
"status": status or "unknown",
|
||||||
|
"compliant": compliant in {"1", "true", "yes", "on"} if compliant else None,
|
||||||
|
}
|
||||||
|
payload = {k: v for k, v in payload.items() if v is not None}
|
||||||
|
if "status" not in payload:
|
||||||
|
payload["status"] = "unknown"
|
||||||
|
payload["note"] = (
|
||||||
|
"Set IRONBANK_COMPLIANCE_STATUS/IRONBANK_COMPLIANT "
|
||||||
|
"or write build/ironbank-compliance.json in image-building repos."
|
||||||
|
)
|
||||||
|
|
||||||
|
report_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
report_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\\n", encoding="utf-8")
|
||||||
|
PY
|
||||||
|
'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stage('Run quality gate') {
|
stage('Run quality gate') {
|
||||||
@ -66,8 +283,96 @@ spec:
|
|||||||
stage('Enforce quality gate') {
|
stage('Enforce quality gate') {
|
||||||
steps {
|
steps {
|
||||||
sh '''
|
sh '''
|
||||||
set -eu
|
set -euo pipefail
|
||||||
test "$(cat build/quality-gate.rc 2>/dev/null || echo 1)" -eq 0
|
gate_rc="$(cat build/quality-gate.rc 2>/dev/null || echo 1)"
|
||||||
|
fail=0
|
||||||
|
if [ "${gate_rc}" -ne 0 ]; then
|
||||||
|
echo "quality gate failed with rc=${gate_rc}" >&2
|
||||||
|
fail=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
enabled() {
|
||||||
|
case "$(printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]')" in
|
||||||
|
1|true|yes|on) return 0 ;;
|
||||||
|
*) return 1 ;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
if enabled "${QUALITY_GATE_SONARQUBE_ENFORCE:-1}"; then
|
||||||
|
sonar_status="$(python3 - <<'PY'
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
path = Path("build/sonarqube-quality-gate.json")
|
||||||
|
if not path.exists():
|
||||||
|
print("missing")
|
||||||
|
raise SystemExit(0)
|
||||||
|
try:
|
||||||
|
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||||
|
except Exception: # noqa: BLE001
|
||||||
|
print("error")
|
||||||
|
raise SystemExit(0)
|
||||||
|
status = (payload.get("status") or payload.get("projectStatus", {}).get("status") or payload.get("qualityGate", {}).get("status") or "").strip().lower()
|
||||||
|
print(status or "missing")
|
||||||
|
PY
|
||||||
|
)"
|
||||||
|
case "${sonar_status}" in
|
||||||
|
ok|pass|passed|success) ;;
|
||||||
|
*)
|
||||||
|
echo "sonarqube gate failed: ${sonar_status}" >&2
|
||||||
|
fail=1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
ironbank_required="${QUALITY_GATE_IRONBANK_REQUIRED:-0}"
|
||||||
|
if [ "${PUBLISH_IMAGES:-false}" = "true" ]; then
|
||||||
|
ironbank_required=1
|
||||||
|
fi
|
||||||
|
if enabled "${QUALITY_GATE_IRONBANK_ENFORCE:-1}"; then
|
||||||
|
supply_status="$(python3 - <<'PY'
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
path = Path("build/ironbank-compliance.json")
|
||||||
|
if not path.exists():
|
||||||
|
print("missing")
|
||||||
|
raise SystemExit(0)
|
||||||
|
try:
|
||||||
|
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||||
|
except Exception: # noqa: BLE001
|
||||||
|
print("error")
|
||||||
|
raise SystemExit(0)
|
||||||
|
compliant = payload.get("compliant")
|
||||||
|
if compliant is True:
|
||||||
|
print("ok")
|
||||||
|
elif compliant is False:
|
||||||
|
print("failed")
|
||||||
|
else:
|
||||||
|
status = str(payload.get("status") or payload.get("result") or payload.get("compliance") or "").strip().lower()
|
||||||
|
print(status or "missing")
|
||||||
|
PY
|
||||||
|
)"
|
||||||
|
case "${supply_status}" in
|
||||||
|
ok|pass|passed|success|compliant) ;;
|
||||||
|
not_applicable|na|n/a)
|
||||||
|
if enabled "${ironbank_required}"; then
|
||||||
|
echo "supply chain gate required but status=${supply_status}" >&2
|
||||||
|
fail=1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
if enabled "${ironbank_required}"; then
|
||||||
|
echo "supply chain gate failed: ${supply_status}" >&2
|
||||||
|
fail=1
|
||||||
|
else
|
||||||
|
echo "supply chain gate not passing (${supply_status}) but not required for this run" >&2
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit "${fail}"
|
||||||
'''
|
'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -76,7 +381,7 @@ spec:
|
|||||||
script {
|
script {
|
||||||
env.FLUX_BRANCH = sh(
|
env.FLUX_BRANCH = sh(
|
||||||
returnStdout: true,
|
returnStdout: true,
|
||||||
script: '''awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml'''
|
script: "grep -m1 '^\\s*branch:' clusters/atlas/flux-system/gotk-sync.yaml | sed 's/^\\s*branch:\\s*//'"
|
||||||
).trim()
|
).trim()
|
||||||
if (!env.FLUX_BRANCH) {
|
if (!env.FLUX_BRANCH) {
|
||||||
error('Flux branch not found in gotk-sync.yaml')
|
error('Flux branch not found in gotk-sync.yaml')
|
||||||
@ -93,9 +398,22 @@ spec:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
container('jnlp') {
|
|
||||||
withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
|
withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
|
||||||
sh '''
|
sh '''
|
||||||
|
set -euo pipefail
|
||||||
|
if ! command -v git >/dev/null 2>&1; then
|
||||||
|
if command -v apk >/dev/null 2>&1; then
|
||||||
|
apk add --no-cache git >/dev/null
|
||||||
|
elif command -v apt-get >/dev/null 2>&1; then
|
||||||
|
apt-get update >/dev/null
|
||||||
|
apt-get install -y git >/dev/null
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
cd "${WORKSPACE:-$PWD}"
|
||||||
|
if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
|
||||||
|
echo "workspace is not a git checkout; skipping promote"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
set +x
|
set +x
|
||||||
git config user.email "jenkins@bstein.dev"
|
git config user.email "jenkins@bstein.dev"
|
||||||
git config user.name "jenkins"
|
git config user.name "jenkins"
|
||||||
@ -106,7 +424,6 @@ spec:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
post {
|
post {
|
||||||
always {
|
always {
|
||||||
script {
|
script {
|
||||||
|
|||||||
@ -6,14 +6,51 @@ pipeline {
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: "jenkins"
|
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
hardware: rpi5
|
hardware: rpi5
|
||||||
kubernetes.io/arch: arm64
|
kubernetes.io/arch: arm64
|
||||||
node-role.kubernetes.io/worker: "true"
|
node-role.kubernetes.io/worker: "true"
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-06
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-13
|
||||||
|
- titan-15
|
||||||
|
- titan-17
|
||||||
|
- titan-19
|
||||||
|
topologySpreadConstraints:
|
||||||
|
- maxSkew: 1
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
whenUnsatisfiable: ScheduleAnyway
|
||||||
|
labelSelector:
|
||||||
|
matchLabels:
|
||||||
|
jenkins/jenkins-jenkins-agent: "true"
|
||||||
containers:
|
containers:
|
||||||
|
- name: jnlp
|
||||||
|
image: jenkins/inbound-agent:3355.v388858a_47b_33-2-jdk21
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "25m"
|
||||||
|
memory: "256Mi"
|
||||||
- name: python
|
- name: python
|
||||||
image: python:3.12-slim
|
image: registry.bstein.dev/bstein/python:3.12-slim
|
||||||
|
command:
|
||||||
|
- cat
|
||||||
|
tty: true
|
||||||
|
- name: quality-tools
|
||||||
|
image: registry.bstein.dev/bstein/quality-tools:sonar8.0.1-trivy0.70.0-db20260422-arm64
|
||||||
command:
|
command:
|
||||||
- cat
|
- cat
|
||||||
tty: true
|
tty: true
|
||||||
@ -23,9 +60,21 @@ spec:
|
|||||||
environment {
|
environment {
|
||||||
PIP_DISABLE_PIP_VERSION_CHECK = '1'
|
PIP_DISABLE_PIP_VERSION_CHECK = '1'
|
||||||
PYTHONUNBUFFERED = '1'
|
PYTHONUNBUFFERED = '1'
|
||||||
SUITE_NAME = 'titan-iac'
|
SUITE_NAME = 'titan_iac'
|
||||||
PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
|
PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
|
||||||
|
SONARQUBE_HOST_URL = 'http://sonarqube.quality.svc.cluster.local:9000'
|
||||||
|
SONARQUBE_PROJECT_KEY = 'titan_iac'
|
||||||
|
SONARQUBE_TOKEN = credentials('sonarqube-token')
|
||||||
VM_URL = 'http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428'
|
VM_URL = 'http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428'
|
||||||
|
QUALITY_GATE_SONARQUBE_ENFORCE = '1'
|
||||||
|
QUALITY_GATE_SONARQUBE_REPORT = 'build/sonarqube-quality-gate.json'
|
||||||
|
QUALITY_GATE_IRONBANK_ENFORCE = '1'
|
||||||
|
QUALITY_GATE_IRONBANK_REQUIRED = '0'
|
||||||
|
QUALITY_GATE_IRONBANK_REPORT = 'build/ironbank-compliance.json'
|
||||||
|
}
|
||||||
|
options {
|
||||||
|
disableConcurrentBuilds()
|
||||||
|
buildDiscarder(logRotator(daysToKeepStr: '30', numToKeepStr: '200', artifactDaysToKeepStr: '30', artifactNumToKeepStr: '120'))
|
||||||
}
|
}
|
||||||
stages {
|
stages {
|
||||||
stage('Checkout') {
|
stage('Checkout') {
|
||||||
@ -35,7 +84,175 @@ spec:
|
|||||||
}
|
}
|
||||||
stage('Install deps') {
|
stage('Install deps') {
|
||||||
steps {
|
steps {
|
||||||
sh 'pip install --no-cache-dir -r ci/requirements.txt'
|
sh '''
|
||||||
|
set -eu
|
||||||
|
if ! command -v git >/dev/null 2>&1; then
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y --no-install-recommends git ca-certificates
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
fi
|
||||||
|
pip install --no-cache-dir -r ci/requirements.txt
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Prepare local quality evidence') {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
set -eu
|
||||||
|
mkdir -p build
|
||||||
|
set +e
|
||||||
|
python3 -m testing.quality_gate --profile local --build-dir build
|
||||||
|
local_quality_rc=$?
|
||||||
|
set -e
|
||||||
|
printf '%s\n' "${local_quality_rc}" > build/local-quality-gate.rc
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Collect SonarQube evidence') {
|
||||||
|
steps {
|
||||||
|
container('quality-tools') {
|
||||||
|
sh '''#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
mkdir -p build
|
||||||
|
args=(
|
||||||
|
"-Dsonar.host.url=${SONARQUBE_HOST_URL}"
|
||||||
|
"-Dsonar.login=${SONARQUBE_TOKEN}"
|
||||||
|
"-Dsonar.projectKey=${SONARQUBE_PROJECT_KEY}"
|
||||||
|
"-Dsonar.projectName=${SONARQUBE_PROJECT_KEY}"
|
||||||
|
"-Dsonar.sources=."
|
||||||
|
"-Dsonar.exclusions=**/.git/**,**/build/**,**/dist/**,**/node_modules/**,**/.venv/**,**/__pycache__/**,**/coverage/**,**/test-results/**,**/playwright-report/**,services/monitoring/dashboards/**,services/monitoring/grafana-dashboard-*.yaml"
|
||||||
|
"-Dsonar.test.inclusions=**/tests/**,**/testing/**,**/*_test.go,**/*.test.ts,**/*.test.tsx,**/*.spec.ts,**/*.spec.tsx"
|
||||||
|
)
|
||||||
|
[ -f build/coverage-unit.xml ] && args+=("-Dsonar.python.coverage.reportPaths=build/coverage-unit.xml")
|
||||||
|
set +e
|
||||||
|
sonar-scanner "${args[@]}" | tee build/sonar-scanner.log
|
||||||
|
rc=${PIPESTATUS[0]}
|
||||||
|
set -e
|
||||||
|
printf '%s\n' "${rc}" > build/sonarqube-analysis.rc
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
sh '''
|
||||||
|
set -eu
|
||||||
|
mkdir -p build
|
||||||
|
python3 - <<'PY'
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import urllib.parse
|
||||||
|
import urllib.request
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
host = os.getenv('SONARQUBE_HOST_URL', '').strip().rstrip('/')
|
||||||
|
project_key = os.getenv('SONARQUBE_PROJECT_KEY', '').strip()
|
||||||
|
token = os.getenv('SONARQUBE_TOKEN', '').strip()
|
||||||
|
report_path = os.getenv('QUALITY_GATE_SONARQUBE_REPORT', 'build/sonarqube-quality-gate.json')
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"status": "ERROR",
|
||||||
|
"note": "missing SONARQUBE_HOST_URL and/or SONARQUBE_PROJECT_KEY",
|
||||||
|
}
|
||||||
|
if host and project_key:
|
||||||
|
task_file = Path('.scannerwork/report-task.txt')
|
||||||
|
task_id = ''
|
||||||
|
if task_file.exists():
|
||||||
|
for line in task_file.read_text(encoding='utf-8').splitlines():
|
||||||
|
key, _, value = line.partition('=')
|
||||||
|
if key == 'ceTaskId':
|
||||||
|
task_id = value.strip()
|
||||||
|
break
|
||||||
|
if task_id:
|
||||||
|
ce_query = urllib.parse.urlencode({"id": task_id})
|
||||||
|
deadline = time.monotonic() + 180
|
||||||
|
while time.monotonic() < deadline:
|
||||||
|
ce_request = urllib.request.Request(f"{host}/api/ce/task?{ce_query}", method="GET")
|
||||||
|
if token:
|
||||||
|
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
|
||||||
|
ce_request.add_header("Authorization", f"Basic {encoded}")
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(ce_request, timeout=12) as response:
|
||||||
|
ce_payload = json.loads(response.read().decode("utf-8"))
|
||||||
|
except Exception:
|
||||||
|
time.sleep(3)
|
||||||
|
continue
|
||||||
|
status = str(ce_payload.get("task", {}).get("status", "")).upper()
|
||||||
|
if status in {"SUCCESS", "FAILED", "CANCELED"}:
|
||||||
|
break
|
||||||
|
time.sleep(3)
|
||||||
|
|
||||||
|
query = urllib.parse.urlencode({"projectKey": project_key})
|
||||||
|
request = urllib.request.Request(
|
||||||
|
f"{host}/api/qualitygates/project_status?{query}",
|
||||||
|
method="GET",
|
||||||
|
)
|
||||||
|
if token:
|
||||||
|
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
|
||||||
|
request.add_header("Authorization", f"Basic {encoded}")
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(request, timeout=12) as response:
|
||||||
|
payload = json.loads(response.read().decode("utf-8"))
|
||||||
|
except Exception as exc: # noqa: BLE001
|
||||||
|
payload = {"status": "ERROR", "error": str(exc)}
|
||||||
|
|
||||||
|
with open(report_path, "w", encoding="utf-8") as handle:
|
||||||
|
json.dump(payload, handle, indent=2, sort_keys=True)
|
||||||
|
handle.write("\\n")
|
||||||
|
PY
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Collect IronBank evidence') {
|
||||||
|
steps {
|
||||||
|
container('quality-tools') {
|
||||||
|
sh '''#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
mkdir -p build
|
||||||
|
set +e
|
||||||
|
trivy fs --cache-dir "${TRIVY_CACHE_DIR}" --skip-db-update --skip-files clusters/atlas/flux-system/gotk-components.yaml --timeout 5m --no-progress --format json --output build/trivy-fs.json --scanners vuln,secret,misconfig --severity HIGH,CRITICAL .
|
||||||
|
trivy_rc=$?
|
||||||
|
set -e
|
||||||
|
if [ ! -s build/trivy-fs.json ]; then
|
||||||
|
cat > build/ironbank-compliance.json <<EOF
|
||||||
|
{"status":"failed","compliant":false,"scanner":"trivy","scan_type":"filesystem","error":"trivy did not produce JSON output","trivy_rc":${trivy_rc}}
|
||||||
|
EOF
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
sh '''
|
||||||
|
set -eu
|
||||||
|
mkdir -p build
|
||||||
|
if [ -s build/trivy-fs.json ]; then
|
||||||
|
python3 ci/scripts/supply_chain_report.py --trivy-json build/trivy-fs.json --waivers ci/titan-iac-trivy-waivers.json --output build/ironbank-compliance.json
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
python3 - <<'PY'
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
report_path = Path(os.getenv('QUALITY_GATE_IRONBANK_REPORT', 'build/ironbank-compliance.json'))
|
||||||
|
if report_path.exists():
|
||||||
|
raise SystemExit(0)
|
||||||
|
|
||||||
|
status = os.getenv('IRONBANK_COMPLIANCE_STATUS', '').strip()
|
||||||
|
compliant = os.getenv('IRONBANK_COMPLIANT', '').strip().lower()
|
||||||
|
payload = {
|
||||||
|
"status": status or "unknown",
|
||||||
|
"compliant": compliant in {"1", "true", "yes", "on"} if compliant else None,
|
||||||
|
}
|
||||||
|
payload = {k: v for k, v in payload.items() if v is not None}
|
||||||
|
if "status" not in payload:
|
||||||
|
payload["status"] = "unknown"
|
||||||
|
payload["note"] = (
|
||||||
|
"Set IRONBANK_COMPLIANCE_STATUS/IRONBANK_COMPLIANT "
|
||||||
|
"or write build/ironbank-compliance.json in image-building repos."
|
||||||
|
)
|
||||||
|
|
||||||
|
report_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
report_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\\n", encoding="utf-8")
|
||||||
|
PY
|
||||||
|
'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stage('Run quality gate') {
|
stage('Run quality gate') {
|
||||||
@ -65,8 +282,96 @@ spec:
|
|||||||
stage('Enforce quality gate') {
|
stage('Enforce quality gate') {
|
||||||
steps {
|
steps {
|
||||||
sh '''
|
sh '''
|
||||||
set -eu
|
set -euo pipefail
|
||||||
test "$(cat build/quality-gate.rc 2>/dev/null || echo 1)" -eq 0
|
gate_rc="$(cat build/quality-gate.rc 2>/dev/null || echo 1)"
|
||||||
|
fail=0
|
||||||
|
if [ "${gate_rc}" -ne 0 ]; then
|
||||||
|
echo "quality gate failed with rc=${gate_rc}" >&2
|
||||||
|
fail=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
enabled() {
|
||||||
|
case "$(printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]')" in
|
||||||
|
1|true|yes|on) return 0 ;;
|
||||||
|
*) return 1 ;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
if enabled "${QUALITY_GATE_SONARQUBE_ENFORCE:-1}"; then
|
||||||
|
sonar_status="$(python3 - <<'PY'
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
path = Path("build/sonarqube-quality-gate.json")
|
||||||
|
if not path.exists():
|
||||||
|
print("missing")
|
||||||
|
raise SystemExit(0)
|
||||||
|
try:
|
||||||
|
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||||
|
except Exception: # noqa: BLE001
|
||||||
|
print("error")
|
||||||
|
raise SystemExit(0)
|
||||||
|
status = (payload.get("status") or payload.get("projectStatus", {}).get("status") or payload.get("qualityGate", {}).get("status") or "").strip().lower()
|
||||||
|
print(status or "missing")
|
||||||
|
PY
|
||||||
|
)"
|
||||||
|
case "${sonar_status}" in
|
||||||
|
ok|pass|passed|success) ;;
|
||||||
|
*)
|
||||||
|
echo "sonarqube gate failed: ${sonar_status}" >&2
|
||||||
|
fail=1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
ironbank_required="${QUALITY_GATE_IRONBANK_REQUIRED:-0}"
|
||||||
|
if [ "${PUBLISH_IMAGES:-false}" = "true" ]; then
|
||||||
|
ironbank_required=1
|
||||||
|
fi
|
||||||
|
if enabled "${QUALITY_GATE_IRONBANK_ENFORCE:-1}"; then
|
||||||
|
supply_status="$(python3 - <<'PY'
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
path = Path("build/ironbank-compliance.json")
|
||||||
|
if not path.exists():
|
||||||
|
print("missing")
|
||||||
|
raise SystemExit(0)
|
||||||
|
try:
|
||||||
|
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||||
|
except Exception: # noqa: BLE001
|
||||||
|
print("error")
|
||||||
|
raise SystemExit(0)
|
||||||
|
compliant = payload.get("compliant")
|
||||||
|
if compliant is True:
|
||||||
|
print("ok")
|
||||||
|
elif compliant is False:
|
||||||
|
print("failed")
|
||||||
|
else:
|
||||||
|
status = str(payload.get("status") or payload.get("result") or payload.get("compliance") or "").strip().lower()
|
||||||
|
print(status or "missing")
|
||||||
|
PY
|
||||||
|
)"
|
||||||
|
case "${supply_status}" in
|
||||||
|
ok|pass|passed|success|compliant) ;;
|
||||||
|
not_applicable|na|n/a)
|
||||||
|
if enabled "${ironbank_required}"; then
|
||||||
|
echo "supply chain gate required but status=${supply_status}" >&2
|
||||||
|
fail=1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
if enabled "${ironbank_required}"; then
|
||||||
|
echo "supply chain gate failed: ${supply_status}" >&2
|
||||||
|
fail=1
|
||||||
|
else
|
||||||
|
echo "supply chain gate not passing (${supply_status}) but not required for this run" >&2
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit "${fail}"
|
||||||
'''
|
'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -75,7 +380,7 @@ spec:
|
|||||||
script {
|
script {
|
||||||
env.FLUX_BRANCH = sh(
|
env.FLUX_BRANCH = sh(
|
||||||
returnStdout: true,
|
returnStdout: true,
|
||||||
script: '''awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml'''
|
script: "awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml"
|
||||||
).trim()
|
).trim()
|
||||||
if (!env.FLUX_BRANCH) {
|
if (!env.FLUX_BRANCH) {
|
||||||
error('Flux branch not found in gotk-sync.yaml')
|
error('Flux branch not found in gotk-sync.yaml')
|
||||||
@ -92,9 +397,22 @@ spec:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
container('jnlp') {
|
|
||||||
withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
|
withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
|
||||||
sh '''
|
sh '''
|
||||||
|
set -euo pipefail
|
||||||
|
if ! command -v git >/dev/null 2>&1; then
|
||||||
|
if command -v apk >/dev/null 2>&1; then
|
||||||
|
apk add --no-cache git >/dev/null
|
||||||
|
elif command -v apt-get >/dev/null 2>&1; then
|
||||||
|
apt-get update >/dev/null
|
||||||
|
apt-get install -y git >/dev/null
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
cd "${WORKSPACE:-$PWD}"
|
||||||
|
if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
|
||||||
|
echo "workspace is not a git checkout; skipping promote"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
set +x
|
set +x
|
||||||
git config user.email "jenkins@bstein.dev"
|
git config user.email "jenkins@bstein.dev"
|
||||||
git config user.name "jenkins"
|
git config user.name "jenkins"
|
||||||
@ -105,7 +423,6 @@ spec:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
post {
|
post {
|
||||||
always {
|
always {
|
||||||
script {
|
script {
|
||||||
|
|||||||
@ -6,30 +6,50 @@ from __future__ import annotations
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
from glob import glob
|
from glob import glob
|
||||||
|
from pathlib import Path
|
||||||
|
import sys
|
||||||
import urllib.error
|
import urllib.error
|
||||||
import urllib.request
|
import urllib.request
|
||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
|
sys.path.insert(0, str(Path(__file__).resolve().parents[2]))
|
||||||
|
|
||||||
|
from ci.scripts import publish_test_metrics_quality as _quality_helpers
|
||||||
|
|
||||||
|
CANONICAL_CHECKS = _quality_helpers.CANONICAL_CHECKS
|
||||||
|
_build_check_statuses = _quality_helpers._build_check_statuses
|
||||||
|
_combine_statuses = _quality_helpers._combine_statuses
|
||||||
|
_infer_sonarqube_status = _quality_helpers._infer_sonarqube_status
|
||||||
|
_infer_source_lines_over_500 = _quality_helpers._infer_source_lines_over_500
|
||||||
|
_infer_supply_chain_status = _quality_helpers._infer_supply_chain_status
|
||||||
|
_infer_workspace_coverage_percent = _quality_helpers._infer_workspace_coverage_percent
|
||||||
|
_load_optional_json = _quality_helpers._load_optional_json
|
||||||
|
_normalize_result_status = _quality_helpers._normalize_result_status
|
||||||
|
|
||||||
|
|
||||||
def _escape_label(value: str) -> str:
|
def _escape_label(value: str) -> str:
|
||||||
|
"""Escape a Prometheus label value without changing its content."""
|
||||||
return value.replace("\\", "\\\\").replace("\n", "\\n").replace('"', '\\"')
|
return value.replace("\\", "\\\\").replace("\n", "\\n").replace('"', '\\"')
|
||||||
|
|
||||||
|
|
||||||
def _label_str(labels: dict[str, str]) -> str:
|
def _label_str(labels: dict[str, str]) -> str:
|
||||||
|
"""Render a stable Prometheus label set from a mapping."""
|
||||||
parts = [f'{key}="{_escape_label(val)}"' for key, val in labels.items() if val]
|
parts = [f'{key}="{_escape_label(val)}"' for key, val in labels.items() if val]
|
||||||
return "{" + ",".join(parts) + "}" if parts else ""
|
return "{" + ",".join(parts) + "}" if parts else ""
|
||||||
|
|
||||||
|
|
||||||
def _read_text(url: str) -> str:
|
def _read_text(url: str) -> str:
|
||||||
|
"""Fetch a plain-text response body from the given URL."""
|
||||||
with urllib.request.urlopen(url, timeout=10) as response:
|
with urllib.request.urlopen(url, timeout=10) as response:
|
||||||
return response.read().decode("utf-8")
|
return response.read().decode("utf-8")
|
||||||
|
|
||||||
|
|
||||||
def _post_text(url: str, payload: str) -> None:
|
def _post_text(url: str, payload: str) -> None:
|
||||||
|
"""PUT a plain-text payload and fail on any 4xx/5xx response."""
|
||||||
request = urllib.request.Request(
|
request = urllib.request.Request(
|
||||||
url,
|
url,
|
||||||
data=payload.encode("utf-8"),
|
data=payload.encode("utf-8"),
|
||||||
method="POST",
|
method="PUT",
|
||||||
headers={"Content-Type": "text/plain"},
|
headers={"Content-Type": "text/plain"},
|
||||||
)
|
)
|
||||||
with urllib.request.urlopen(request, timeout=10) as response:
|
with urllib.request.urlopen(request, timeout=10) as response:
|
||||||
@ -38,6 +58,7 @@ def _post_text(url: str, payload: str) -> None:
|
|||||||
|
|
||||||
|
|
||||||
def _parse_junit(path: str) -> dict[str, int]:
|
def _parse_junit(path: str) -> dict[str, int]:
|
||||||
|
"""Parse a JUnit XML file into aggregate test counters."""
|
||||||
if not os.path.exists(path):
|
if not os.path.exists(path):
|
||||||
return {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
|
return {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
|
||||||
|
|
||||||
@ -64,6 +85,7 @@ def _parse_junit(path: str) -> dict[str, int]:
|
|||||||
|
|
||||||
|
|
||||||
def _collect_junit_totals(pattern: str) -> dict[str, int]:
|
def _collect_junit_totals(pattern: str) -> dict[str, int]:
|
||||||
|
"""Sum JUnit counters across every XML file matching the pattern."""
|
||||||
totals = {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
|
totals = {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
|
||||||
for path in sorted(glob(pattern)):
|
for path in sorted(glob(pattern)):
|
||||||
parsed = _parse_junit(path)
|
parsed = _parse_junit(path)
|
||||||
@ -72,7 +94,38 @@ def _collect_junit_totals(pattern: str) -> dict[str, int]:
|
|||||||
return totals
|
return totals
|
||||||
|
|
||||||
|
|
||||||
|
def _collect_junit_cases(pattern: str) -> list[tuple[str, str]]:
|
||||||
|
"""Collect individual JUnit test-case statuses for flaky-test trend panels."""
|
||||||
|
cases: list[tuple[str, str]] = []
|
||||||
|
for path in sorted(glob(pattern)):
|
||||||
|
if not os.path.exists(path):
|
||||||
|
continue
|
||||||
|
root = ET.parse(path).getroot()
|
||||||
|
suites: list[ET.Element]
|
||||||
|
if root.tag == "testsuite":
|
||||||
|
suites = [root]
|
||||||
|
elif root.tag == "testsuites":
|
||||||
|
suites = [elem for elem in root if elem.tag == "testsuite"]
|
||||||
|
else:
|
||||||
|
suites = []
|
||||||
|
for suite in suites:
|
||||||
|
for test_case in suite.findall("testcase"):
|
||||||
|
case_name = test_case.attrib.get("name", "").strip()
|
||||||
|
class_name = test_case.attrib.get("classname", "").strip()
|
||||||
|
if not case_name:
|
||||||
|
continue
|
||||||
|
full_name = f"{class_name}.{case_name}" if class_name else case_name
|
||||||
|
status = "passed"
|
||||||
|
if test_case.find("failure") is not None or test_case.find("error") is not None:
|
||||||
|
status = "failed"
|
||||||
|
elif test_case.find("skipped") is not None:
|
||||||
|
status = "skipped"
|
||||||
|
cases.append((full_name, status))
|
||||||
|
return cases
|
||||||
|
|
||||||
|
|
||||||
def _read_exit_code(path: str) -> int:
|
def _read_exit_code(path: str) -> int:
|
||||||
|
"""Read the quality-gate exit code, defaulting to failure if missing."""
|
||||||
try:
|
try:
|
||||||
with open(path, "r", encoding="utf-8") as handle:
|
with open(path, "r", encoding="utf-8") as handle:
|
||||||
return int(handle.read().strip())
|
return int(handle.read().strip())
|
||||||
@ -81,6 +134,7 @@ def _read_exit_code(path: str) -> int:
|
|||||||
|
|
||||||
|
|
||||||
def _load_summary(path: str) -> dict:
|
def _load_summary(path: str) -> dict:
|
||||||
|
"""Load the JSON quality-gate summary, returning an empty mapping on error."""
|
||||||
try:
|
try:
|
||||||
with open(path, "r", encoding="utf-8") as handle:
|
with open(path, "r", encoding="utf-8") as handle:
|
||||||
return json.load(handle)
|
return json.load(handle)
|
||||||
@ -89,6 +143,7 @@ def _load_summary(path: str) -> dict:
|
|||||||
|
|
||||||
|
|
||||||
def _summary_float(summary: dict, key: str) -> float:
|
def _summary_float(summary: dict, key: str) -> float:
|
||||||
|
"""Extract a float-like value from the summary, defaulting to 0.0."""
|
||||||
value = summary.get(key)
|
value = summary.get(key)
|
||||||
if isinstance(value, (int, float)):
|
if isinstance(value, (int, float)):
|
||||||
return float(value)
|
return float(value)
|
||||||
@ -96,6 +151,7 @@ def _summary_float(summary: dict, key: str) -> float:
|
|||||||
|
|
||||||
|
|
||||||
def _summary_int(summary: dict, key: str) -> int:
|
def _summary_int(summary: dict, key: str) -> int:
|
||||||
|
"""Extract an int-like value from the summary, defaulting to 0."""
|
||||||
value = summary.get(key)
|
value = summary.get(key)
|
||||||
if isinstance(value, int):
|
if isinstance(value, int):
|
||||||
return value
|
return value
|
||||||
@ -105,6 +161,7 @@ def _summary_int(summary: dict, key: str) -> int:
|
|||||||
|
|
||||||
|
|
||||||
def _fetch_existing_counter(pushgateway_url: str, metric: str, labels: dict[str, str]) -> float:
|
def _fetch_existing_counter(pushgateway_url: str, metric: str, labels: dict[str, str]) -> float:
|
||||||
|
"""Return the current counter value for a labeled metric if present."""
|
||||||
text = _read_text(f"{pushgateway_url.rstrip('/')}/metrics")
|
text = _read_text(f"{pushgateway_url.rstrip('/')}/metrics")
|
||||||
for line in text.splitlines():
|
for line in text.splitlines():
|
||||||
if not line.startswith(metric + "{"):
|
if not line.startswith(metric + "{"):
|
||||||
@ -125,22 +182,34 @@ def _build_payload(
|
|||||||
suite: str,
|
suite: str,
|
||||||
status: str,
|
status: str,
|
||||||
tests: dict[str, int],
|
tests: dict[str, int],
|
||||||
|
test_cases: list[tuple[str, str]],
|
||||||
ok_count: int,
|
ok_count: int,
|
||||||
failed_count: int,
|
failed_count: int,
|
||||||
branch: str,
|
branch: str,
|
||||||
build_number: str,
|
build_number: str,
|
||||||
|
jenkins_job: str,
|
||||||
summary: dict | None = None,
|
summary: dict | None = None,
|
||||||
workspace_line_coverage_percent: float = 0.0,
|
workspace_line_coverage_percent: float = 0.0,
|
||||||
|
source_files_total: int = 0,
|
||||||
source_lines_over_500: int = 0,
|
source_lines_over_500: int = 0,
|
||||||
|
check_statuses: dict[str, str] | None = None,
|
||||||
) -> str:
|
) -> str:
|
||||||
|
"""Build the Pushgateway payload for the current suite run."""
|
||||||
passed = max(tests["tests"] - tests["failures"] - tests["errors"] - tests["skipped"], 0)
|
passed = max(tests["tests"] - tests["failures"] - tests["errors"] - tests["skipped"], 0)
|
||||||
build_labels = _label_str(
|
build_labels = _label_str(
|
||||||
{
|
{
|
||||||
"suite": suite,
|
"suite": suite,
|
||||||
"branch": branch or "unknown",
|
"branch": branch or "unknown",
|
||||||
"build_number": build_number or "unknown",
|
"build_number": build_number or "unknown",
|
||||||
|
"jenkins_job": jenkins_job or suite,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
test_case_base_labels = {
|
||||||
|
"suite": suite,
|
||||||
|
"branch": branch or "unknown",
|
||||||
|
"build_number": build_number or "unknown",
|
||||||
|
"jenkins_job": jenkins_job or suite,
|
||||||
|
}
|
||||||
lines = [
|
lines = [
|
||||||
"# TYPE platform_quality_gate_runs_total counter",
|
"# TYPE platform_quality_gate_runs_total counter",
|
||||||
f'platform_quality_gate_runs_total{{suite="{suite}",status="ok"}} {ok_count}',
|
f'platform_quality_gate_runs_total{{suite="{suite}",status="ok"}} {ok_count}',
|
||||||
@ -153,43 +222,81 @@ def _build_payload(
|
|||||||
"# TYPE titan_iac_quality_gate_run_status gauge",
|
"# TYPE titan_iac_quality_gate_run_status gauge",
|
||||||
f'titan_iac_quality_gate_run_status{{suite="{suite}",status="ok"}} {1 if status == "ok" else 0}',
|
f'titan_iac_quality_gate_run_status{{suite="{suite}",status="ok"}} {1 if status == "ok" else 0}',
|
||||||
f'titan_iac_quality_gate_run_status{{suite="{suite}",status="failed"}} {1 if status == "failed" else 0}',
|
f'titan_iac_quality_gate_run_status{{suite="{suite}",status="failed"}} {1 if status == "failed" else 0}',
|
||||||
|
"# TYPE platform_quality_gate_build_info gauge",
|
||||||
|
f"platform_quality_gate_build_info{build_labels} 1",
|
||||||
"# TYPE titan_iac_quality_gate_build_info gauge",
|
"# TYPE titan_iac_quality_gate_build_info gauge",
|
||||||
f"titan_iac_quality_gate_build_info{build_labels} 1",
|
f"titan_iac_quality_gate_build_info{build_labels} 1",
|
||||||
"# TYPE platform_quality_gate_workspace_line_coverage_percent gauge",
|
"# TYPE platform_quality_gate_workspace_line_coverage_percent gauge",
|
||||||
f'platform_quality_gate_workspace_line_coverage_percent{{suite="{suite}"}} {workspace_line_coverage_percent:.3f}',
|
f'platform_quality_gate_workspace_line_coverage_percent{{suite="{suite}"}} {workspace_line_coverage_percent:.3f}',
|
||||||
|
"# TYPE platform_quality_gate_source_files_total gauge",
|
||||||
|
f'platform_quality_gate_source_files_total{{suite="{suite}"}} {source_files_total}',
|
||||||
"# TYPE platform_quality_gate_source_lines_over_500_total gauge",
|
"# TYPE platform_quality_gate_source_lines_over_500_total gauge",
|
||||||
f'platform_quality_gate_source_lines_over_500_total{{suite="{suite}"}} {source_lines_over_500}',
|
f'platform_quality_gate_source_lines_over_500_total{{suite="{suite}"}} {source_lines_over_500}',
|
||||||
]
|
]
|
||||||
results = summary.get("results", []) if isinstance(summary, dict) else []
|
if check_statuses:
|
||||||
if results:
|
|
||||||
lines.append("# TYPE titan_iac_quality_gate_checks_total gauge")
|
lines.append("# TYPE titan_iac_quality_gate_checks_total gauge")
|
||||||
for result in results:
|
for check_name in CANONICAL_CHECKS:
|
||||||
check_name = result.get("name")
|
check_status = check_statuses.get(check_name, "not_applicable")
|
||||||
check_status = result.get("status")
|
|
||||||
if not check_name or not check_status:
|
|
||||||
continue
|
|
||||||
lines.append(
|
lines.append(
|
||||||
f'titan_iac_quality_gate_checks_total{{suite="{suite}",check="{_escape_label(str(check_name))}",result="{_escape_label(str(check_status))}"}} 1'
|
f'titan_iac_quality_gate_checks_total{{suite="{suite}",check="{_escape_label(check_name)}",result="{_escape_label(check_status)}"}} 1'
|
||||||
|
)
|
||||||
|
lines.append("# TYPE platform_quality_gate_test_case_result gauge")
|
||||||
|
if test_cases:
|
||||||
|
for test_name, test_status in test_cases:
|
||||||
|
labels = {
|
||||||
|
**test_case_base_labels,
|
||||||
|
"test": test_name,
|
||||||
|
"status": test_status,
|
||||||
|
}
|
||||||
|
lines.append(
|
||||||
|
f"platform_quality_gate_test_case_result{_label_str(labels)} 1"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
labels = {**test_case_base_labels, "test": "__no_test_cases__", "status": "skipped"}
|
||||||
|
lines.append(
|
||||||
|
f"platform_quality_gate_test_case_result{_label_str(labels)} 1"
|
||||||
)
|
)
|
||||||
return "\n".join(lines) + "\n"
|
return "\n".join(lines) + "\n"
|
||||||
|
|
||||||
|
|
||||||
def main() -> int:
|
def main() -> int:
|
||||||
suite = os.getenv("SUITE_NAME", "titan-iac")
|
"""Publish the quality-gate metrics and print a compact run summary."""
|
||||||
|
suite = os.getenv("SUITE_NAME", "titan_iac")
|
||||||
pushgateway_url = os.getenv("PUSHGATEWAY_URL", "http://platform-quality-gateway.monitoring.svc.cluster.local:9091")
|
pushgateway_url = os.getenv("PUSHGATEWAY_URL", "http://platform-quality-gateway.monitoring.svc.cluster.local:9091")
|
||||||
job_name = os.getenv("QUALITY_GATE_JOB_NAME", "platform-quality-ci")
|
job_name = os.getenv("QUALITY_GATE_JOB_NAME", "platform-quality-ci")
|
||||||
junit_glob = os.getenv("JUNIT_GLOB", os.getenv("JUNIT_PATH", "build/junit-*.xml"))
|
junit_glob = os.getenv("JUNIT_GLOB", os.getenv("JUNIT_PATH", "build/junit-*.xml"))
|
||||||
exit_code_path = os.getenv("QUALITY_GATE_EXIT_CODE_PATH", os.getenv("GLUE_EXIT_CODE_PATH", "build/quality-gate.rc"))
|
exit_code_path = os.getenv("QUALITY_GATE_EXIT_CODE_PATH", os.getenv("GLUE_EXIT_CODE_PATH", "build/quality-gate.rc"))
|
||||||
summary_path = os.getenv("QUALITY_GATE_SUMMARY_PATH", "build/quality-gate-summary.json")
|
summary_path = os.getenv("QUALITY_GATE_SUMMARY_PATH", "build/quality-gate-summary.json")
|
||||||
branch = os.getenv("BRANCH_NAME", os.getenv("GIT_BRANCH", ""))
|
branch = os.getenv("BRANCH_NAME") or os.getenv("GIT_BRANCH") or "unknown"
|
||||||
|
if branch.startswith("origin/"):
|
||||||
|
branch = branch[len("origin/") :]
|
||||||
build_number = os.getenv("BUILD_NUMBER", "")
|
build_number = os.getenv("BUILD_NUMBER", "")
|
||||||
|
jenkins_job = os.getenv("JOB_NAME", "titan-iac")
|
||||||
|
|
||||||
tests = _collect_junit_totals(junit_glob)
|
tests = _collect_junit_totals(junit_glob)
|
||||||
|
test_cases = _collect_junit_cases(junit_glob)
|
||||||
exit_code = _read_exit_code(exit_code_path)
|
exit_code = _read_exit_code(exit_code_path)
|
||||||
status = "ok" if exit_code == 0 else "failed"
|
status = "ok" if exit_code == 0 else "failed"
|
||||||
summary = _load_summary(summary_path)
|
summary = _load_summary(summary_path)
|
||||||
workspace_line_coverage_percent = _summary_float(summary, "workspace_line_coverage_percent")
|
workspace_line_coverage_percent = _summary_float(summary, "workspace_line_coverage_percent")
|
||||||
|
if workspace_line_coverage_percent <= 0:
|
||||||
|
workspace_line_coverage_percent = _infer_workspace_coverage_percent(summary, "build/coverage-unit.xml")
|
||||||
|
source_files_total = _summary_int(summary, "source_files_total")
|
||||||
source_lines_over_500 = _summary_int(summary, "source_lines_over_500")
|
source_lines_over_500 = _summary_int(summary, "source_lines_over_500")
|
||||||
|
if source_lines_over_500 <= 0:
|
||||||
|
source_lines_over_500 = _infer_source_lines_over_500(summary)
|
||||||
|
sonarqube_report = _load_optional_json(os.getenv("QUALITY_GATE_SONARQUBE_REPORT", "build/sonarqube-quality-gate.json"))
|
||||||
|
supply_chain_report = _load_optional_json(os.getenv("QUALITY_GATE_IRONBANK_REPORT", "build/ironbank-compliance.json"))
|
||||||
|
supply_chain_required = os.getenv("QUALITY_GATE_IRONBANK_REQUIRED", "0").strip().lower() in {"1", "true", "yes", "on"}
|
||||||
|
check_statuses = _build_check_statuses(
|
||||||
|
summary=summary,
|
||||||
|
tests=tests,
|
||||||
|
workspace_line_coverage_percent=workspace_line_coverage_percent,
|
||||||
|
source_lines_over_500=source_lines_over_500,
|
||||||
|
sonarqube_report=sonarqube_report,
|
||||||
|
supply_chain_report=supply_chain_report,
|
||||||
|
supply_chain_required=supply_chain_required,
|
||||||
|
)
|
||||||
|
|
||||||
ok_count = int(
|
ok_count = int(
|
||||||
_fetch_existing_counter(
|
_fetch_existing_counter(
|
||||||
@ -214,13 +321,17 @@ def main() -> int:
|
|||||||
suite=suite,
|
suite=suite,
|
||||||
status=status,
|
status=status,
|
||||||
tests=tests,
|
tests=tests,
|
||||||
|
test_cases=test_cases,
|
||||||
ok_count=ok_count,
|
ok_count=ok_count,
|
||||||
failed_count=failed_count,
|
failed_count=failed_count,
|
||||||
branch=branch,
|
branch=branch,
|
||||||
build_number=build_number,
|
build_number=build_number,
|
||||||
|
jenkins_job=jenkins_job,
|
||||||
summary=summary,
|
summary=summary,
|
||||||
workspace_line_coverage_percent=workspace_line_coverage_percent,
|
workspace_line_coverage_percent=workspace_line_coverage_percent,
|
||||||
|
source_files_total=source_files_total,
|
||||||
source_lines_over_500=source_lines_over_500,
|
source_lines_over_500=source_lines_over_500,
|
||||||
|
check_statuses=check_statuses,
|
||||||
)
|
)
|
||||||
push_url = f"{pushgateway_url.rstrip('/')}/metrics/job/{job_name}/suite/{suite}"
|
push_url = f"{pushgateway_url.rstrip('/')}/metrics/job/{job_name}/suite/{suite}"
|
||||||
_post_text(push_url, payload)
|
_post_text(push_url, payload)
|
||||||
@ -234,13 +345,14 @@ def main() -> int:
|
|||||||
"tests_skipped": tests["skipped"],
|
"tests_skipped": tests["skipped"],
|
||||||
"ok_count": ok_count,
|
"ok_count": ok_count,
|
||||||
"failed_count": failed_count,
|
"failed_count": failed_count,
|
||||||
"checks_recorded": len(summary.get("results", [])) if isinstance(summary, dict) else 0,
|
"checks_recorded": len(check_statuses),
|
||||||
"workspace_line_coverage_percent": workspace_line_coverage_percent,
|
"workspace_line_coverage_percent": workspace_line_coverage_percent,
|
||||||
|
"source_files_total": source_files_total,
|
||||||
"source_lines_over_500": source_lines_over_500,
|
"source_lines_over_500": source_lines_over_500,
|
||||||
}
|
}
|
||||||
print(json.dumps(summary, sort_keys=True))
|
print(json.dumps(summary, sort_keys=True))
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__": # pragma: no cover
|
||||||
raise SystemExit(main())
|
raise SystemExit(main())
|
||||||
|
|||||||
200
ci/scripts/publish_test_metrics_quality.py
Normal file
200
ci/scripts/publish_test_metrics_quality.py
Normal file
@ -0,0 +1,200 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Quality/status helpers for publish_test_metrics."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
|
SUCCESS_STATUSES = {"ok", "pass", "passed", "success", "compliant"}
|
||||||
|
NOT_APPLICABLE_STATUSES = {"not_applicable", "n/a", "na", "none", "skipped"}
|
||||||
|
FAILED_STATUSES = {"failed", "fail", "error", "errors", "warn", "warning", "red"}
|
||||||
|
|
||||||
|
CANONICAL_CHECKS = [
|
||||||
|
"tests",
|
||||||
|
"coverage",
|
||||||
|
"loc",
|
||||||
|
"docs_naming",
|
||||||
|
"gate_glue",
|
||||||
|
"sonarqube",
|
||||||
|
"supply_chain",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _infer_workspace_coverage_percent(summary: dict, default_xml: str) -> float:
|
||||||
|
"""Infer workspace line coverage from quality summary coverage XML metadata."""
|
||||||
|
results = summary.get("results", []) if isinstance(summary, dict) else []
|
||||||
|
coverage_xml = default_xml
|
||||||
|
for result in results:
|
||||||
|
if not isinstance(result, dict):
|
||||||
|
continue
|
||||||
|
if str(result.get("name") or "").strip().lower() != "coverage":
|
||||||
|
continue
|
||||||
|
candidate = str(result.get("coverage_xml") or "").strip()
|
||||||
|
if candidate:
|
||||||
|
coverage_xml = candidate
|
||||||
|
break
|
||||||
|
xml_path = Path(coverage_xml)
|
||||||
|
if not xml_path.exists():
|
||||||
|
return 0.0
|
||||||
|
try:
|
||||||
|
root = ET.parse(xml_path).getroot()
|
||||||
|
line_rate = root.attrib.get("line-rate")
|
||||||
|
if line_rate is None:
|
||||||
|
return 0.0
|
||||||
|
return float(line_rate) * 100.0
|
||||||
|
except (ET.ParseError, OSError, ValueError):
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
|
||||||
|
def _infer_source_lines_over_500(summary: dict) -> int:
|
||||||
|
"""Infer over-limit source file count from hygiene issue payloads."""
|
||||||
|
results = summary.get("results", []) if isinstance(summary, dict) else []
|
||||||
|
for result in results:
|
||||||
|
if not isinstance(result, dict):
|
||||||
|
continue
|
||||||
|
if str(result.get("name") or "").strip().lower() not in {"hygiene", "loc", "smell"}:
|
||||||
|
continue
|
||||||
|
issues = result.get("issues")
|
||||||
|
if not isinstance(issues, list):
|
||||||
|
continue
|
||||||
|
return sum(1 for item in issues if isinstance(item, str) and item.startswith("file exceeds"))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_result_status(value: str | None, default: str = "failed") -> str:
|
||||||
|
"""Map arbitrary check status text into canonical check result buckets."""
|
||||||
|
if not value:
|
||||||
|
return default
|
||||||
|
normalized = value.strip().lower()
|
||||||
|
if normalized in SUCCESS_STATUSES:
|
||||||
|
return "ok"
|
||||||
|
if normalized in NOT_APPLICABLE_STATUSES:
|
||||||
|
return "not_applicable"
|
||||||
|
if normalized in FAILED_STATUSES:
|
||||||
|
return "failed"
|
||||||
|
return default
|
||||||
|
|
||||||
|
|
||||||
|
def _load_optional_json(path: str | None) -> dict:
|
||||||
|
"""Load an optional JSON report file, returning an empty object when absent."""
|
||||||
|
if not path:
|
||||||
|
return {}
|
||||||
|
candidate = Path(path)
|
||||||
|
if not candidate.exists():
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
return json.loads(candidate.read_text(encoding="utf-8"))
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def _combine_statuses(statuses: list[str]) -> str:
|
||||||
|
"""Roll up many check statuses into one canonical result."""
|
||||||
|
if not statuses:
|
||||||
|
return "not_applicable"
|
||||||
|
if any(status == "failed" for status in statuses):
|
||||||
|
return "failed"
|
||||||
|
if all(status == "not_applicable" for status in statuses):
|
||||||
|
return "not_applicable"
|
||||||
|
if all(status in {"ok", "not_applicable"} for status in statuses):
|
||||||
|
return "ok"
|
||||||
|
return "failed"
|
||||||
|
|
||||||
|
|
||||||
|
def _infer_sonarqube_status(report: dict) -> str:
|
||||||
|
"""Infer canonical SonarQube check status from its JSON report payload."""
|
||||||
|
if not report:
|
||||||
|
return "not_applicable"
|
||||||
|
status = (
|
||||||
|
report.get("projectStatus", {}).get("status")
|
||||||
|
or report.get("qualityGate", {}).get("status")
|
||||||
|
or report.get("status")
|
||||||
|
)
|
||||||
|
return _normalize_result_status(str(status) if status is not None else None, default="failed")
|
||||||
|
|
||||||
|
|
||||||
|
def _infer_supply_chain_status(report: dict, required: bool) -> str:
|
||||||
|
"""Infer canonical supply-chain status from IronBank/artifact report payload."""
|
||||||
|
if not report:
|
||||||
|
return "failed" if required else "not_applicable"
|
||||||
|
compliant = report.get("compliant")
|
||||||
|
if isinstance(compliant, bool):
|
||||||
|
return "ok" if compliant else "failed"
|
||||||
|
status = report.get("status")
|
||||||
|
if status is None:
|
||||||
|
return "failed" if required else "not_applicable"
|
||||||
|
normalized = _normalize_result_status(str(status), default="failed")
|
||||||
|
if normalized == "not_applicable" and required:
|
||||||
|
return "failed"
|
||||||
|
return normalized
|
||||||
|
|
||||||
|
|
||||||
|
def _build_check_statuses(
|
||||||
|
summary: dict | None,
|
||||||
|
tests: dict[str, int],
|
||||||
|
workspace_line_coverage_percent: float,
|
||||||
|
source_lines_over_500: int,
|
||||||
|
sonarqube_report: dict,
|
||||||
|
supply_chain_report: dict,
|
||||||
|
supply_chain_required: bool,
|
||||||
|
) -> dict[str, str]:
|
||||||
|
"""Generate the canonical quality-check status map for dashboarding."""
|
||||||
|
raw_results = summary.get("results", []) if isinstance(summary, dict) else []
|
||||||
|
status_by_name: dict[str, str] = {}
|
||||||
|
for result in raw_results:
|
||||||
|
if not isinstance(result, dict):
|
||||||
|
continue
|
||||||
|
check_name = str(result.get("name") or "").strip().lower()
|
||||||
|
if not check_name:
|
||||||
|
continue
|
||||||
|
status_by_name[check_name] = _normalize_result_status(result.get("status"), default="failed")
|
||||||
|
|
||||||
|
tests_status = status_by_name.get("tests")
|
||||||
|
if not tests_status:
|
||||||
|
candidate_keys = ["unit", "integration", "e2e", "pytest", "test", "tests"]
|
||||||
|
candidates = [status_by_name[key] for key in candidate_keys if key in status_by_name]
|
||||||
|
if candidates:
|
||||||
|
tests_status = _combine_statuses(candidates)
|
||||||
|
elif tests["tests"] > 0:
|
||||||
|
tests_status = "ok" if (tests["failures"] + tests["errors"]) == 0 else "failed"
|
||||||
|
else:
|
||||||
|
tests_status = "not_applicable"
|
||||||
|
|
||||||
|
coverage_status = status_by_name.get("coverage")
|
||||||
|
if not coverage_status:
|
||||||
|
if workspace_line_coverage_percent > 0:
|
||||||
|
coverage_status = "ok" if workspace_line_coverage_percent >= 95.0 else "failed"
|
||||||
|
else:
|
||||||
|
coverage_status = "not_applicable"
|
||||||
|
|
||||||
|
loc_status = status_by_name.get("loc")
|
||||||
|
if not loc_status:
|
||||||
|
loc_status = "ok" if source_lines_over_500 == 0 else "failed"
|
||||||
|
|
||||||
|
docs_naming_status = status_by_name.get("docs_naming")
|
||||||
|
if not docs_naming_status:
|
||||||
|
candidates = [status_by_name[key] for key in ["docs", "hygiene", "smell", "lint", "naming"] if key in status_by_name]
|
||||||
|
docs_naming_status = _combine_statuses(candidates) if candidates else "not_applicable"
|
||||||
|
|
||||||
|
gate_glue_status = status_by_name.get("gate_glue")
|
||||||
|
if not gate_glue_status:
|
||||||
|
candidates = [status_by_name[key] for key in ["gate_glue", "glue", "gate"] if key in status_by_name]
|
||||||
|
gate_glue_status = _combine_statuses(candidates) if candidates else "not_applicable"
|
||||||
|
|
||||||
|
sonarqube_status = status_by_name.get("sonarqube") or _infer_sonarqube_status(sonarqube_report)
|
||||||
|
supply_chain_status = status_by_name.get("supply_chain") or _infer_supply_chain_status(
|
||||||
|
supply_chain_report,
|
||||||
|
required=supply_chain_required,
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"tests": tests_status,
|
||||||
|
"coverage": coverage_status,
|
||||||
|
"loc": loc_status,
|
||||||
|
"docs_naming": docs_naming_status,
|
||||||
|
"gate_glue": gate_glue_status,
|
||||||
|
"sonarqube": sonarqube_status,
|
||||||
|
"supply_chain": supply_chain_status,
|
||||||
|
}
|
||||||
173
ci/scripts/supply_chain_report.py
Normal file
173
ci/scripts/supply_chain_report.py
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
"""Build a titan-iac supply-chain compliance report from Trivy evidence."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import datetime as dt
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
|
||||||
|
FAIL_SEVERITIES = {"HIGH", "CRITICAL"}
|
||||||
|
|
||||||
|
|
||||||
|
def _read_json(path: Path) -> dict[str, Any]:
|
||||||
|
"""Read a JSON object from disk for use as pipeline evidence."""
|
||||||
|
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||||
|
if not isinstance(payload, dict):
|
||||||
|
raise ValueError(f"{path} must contain a JSON object")
|
||||||
|
return payload
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_day(raw: str | None) -> dt.date | None:
|
||||||
|
"""Parse an ISO day while letting optional waiver dates stay optional."""
|
||||||
|
if not raw:
|
||||||
|
return None
|
||||||
|
return dt.date.fromisoformat(raw)
|
||||||
|
|
||||||
|
|
||||||
|
def _today(override: str | None = None) -> dt.date:
|
||||||
|
"""Return the policy day so tests can pin expiry behavior."""
|
||||||
|
return _parse_day(override) or dt.date.today()
|
||||||
|
|
||||||
|
|
||||||
|
def _load_waiver_pairs(path: Path | None, policy_day: dt.date) -> tuple[set[tuple[str, str]], int]:
|
||||||
|
"""Return active ``(misconfiguration id, target)`` waivers and expired count."""
|
||||||
|
if path is None or not path.exists():
|
||||||
|
return set(), 0
|
||||||
|
|
||||||
|
payload = _read_json(path)
|
||||||
|
default_expires_at = payload.get("default_expires_at")
|
||||||
|
active: set[tuple[str, str]] = set()
|
||||||
|
expired = 0
|
||||||
|
|
||||||
|
for entry in payload.get("misconfigurations", []):
|
||||||
|
if not isinstance(entry, dict):
|
||||||
|
continue
|
||||||
|
misconfiguration_id = str(entry.get("id") or "").strip()
|
||||||
|
if not misconfiguration_id:
|
||||||
|
continue
|
||||||
|
expires_at = _parse_day(str(entry.get("expires_at") or default_expires_at or ""))
|
||||||
|
targets = entry.get("targets", [])
|
||||||
|
if not isinstance(targets, list):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if expires_at and expires_at < policy_day:
|
||||||
|
expired += len(targets)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Waivers are target-specific so a new unsafe manifest fails until it is
|
||||||
|
# either fixed or deliberately accepted with a fresh expiration.
|
||||||
|
for target in targets:
|
||||||
|
if isinstance(target, str) and target:
|
||||||
|
active.add((misconfiguration_id, target))
|
||||||
|
|
||||||
|
return active, expired
|
||||||
|
|
||||||
|
|
||||||
|
def _iter_failed_misconfigurations(payload: dict[str, Any]):
|
||||||
|
"""Yield failed high/critical Trivy misconfiguration records."""
|
||||||
|
for result in payload.get("Results", []):
|
||||||
|
if not isinstance(result, dict):
|
||||||
|
continue
|
||||||
|
target = str(result.get("Target") or "")
|
||||||
|
for item in result.get("Misconfigurations") or []:
|
||||||
|
if not isinstance(item, dict):
|
||||||
|
continue
|
||||||
|
if item.get("Status") != "FAIL":
|
||||||
|
continue
|
||||||
|
if str(item.get("Severity") or "").upper() not in FAIL_SEVERITIES:
|
||||||
|
continue
|
||||||
|
yield target, item
|
||||||
|
|
||||||
|
|
||||||
|
def _count_vulnerabilities(payload: dict[str, Any], severity: str) -> int:
|
||||||
|
"""Count Trivy vulnerabilities at a specific severity."""
|
||||||
|
count = 0
|
||||||
|
for result in payload.get("Results", []):
|
||||||
|
if not isinstance(result, dict):
|
||||||
|
continue
|
||||||
|
for item in result.get("Vulnerabilities") or []:
|
||||||
|
if isinstance(item, dict) and str(item.get("Severity") or "").upper() == severity:
|
||||||
|
count += 1
|
||||||
|
return count
|
||||||
|
|
||||||
|
|
||||||
|
def _count_secrets(payload: dict[str, Any]) -> int:
|
||||||
|
"""Count detected secrets in the Trivy filesystem report."""
|
||||||
|
count = 0
|
||||||
|
for result in payload.get("Results", []):
|
||||||
|
if isinstance(result, dict):
|
||||||
|
count += len(result.get("Secrets") or [])
|
||||||
|
return count
|
||||||
|
|
||||||
|
|
||||||
|
def build_report(
|
||||||
|
trivy_payload: dict[str, Any],
|
||||||
|
waiver_path: Path | None = None,
|
||||||
|
today_override: str | None = None,
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Build the compliance summary consumed by the quality gate."""
|
||||||
|
policy_day = _today(today_override)
|
||||||
|
active_waivers, expired_waivers = _load_waiver_pairs(waiver_path, policy_day)
|
||||||
|
|
||||||
|
open_misconfigs: list[dict[str, str]] = []
|
||||||
|
waived_misconfigs = 0
|
||||||
|
for target, item in _iter_failed_misconfigurations(trivy_payload):
|
||||||
|
misconfiguration_id = str(item.get("ID") or "")
|
||||||
|
if (misconfiguration_id, target) in active_waivers:
|
||||||
|
waived_misconfigs += 1
|
||||||
|
continue
|
||||||
|
open_misconfigs.append(
|
||||||
|
{
|
||||||
|
"id": misconfiguration_id,
|
||||||
|
"target": target,
|
||||||
|
"severity": str(item.get("Severity") or ""),
|
||||||
|
"title": str(item.get("Title") or ""),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
critical = _count_vulnerabilities(trivy_payload, "CRITICAL")
|
||||||
|
high = _count_vulnerabilities(trivy_payload, "HIGH")
|
||||||
|
secrets = _count_secrets(trivy_payload)
|
||||||
|
status = "ok" if critical == 0 and secrets == 0 and not open_misconfigs else "failed"
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": status,
|
||||||
|
"compliant": status == "ok",
|
||||||
|
"category": "artifact_security",
|
||||||
|
"scan_type": "filesystem",
|
||||||
|
"scanner": "trivy",
|
||||||
|
"critical_vulnerabilities": critical,
|
||||||
|
"high_vulnerabilities": high,
|
||||||
|
"high_vulnerability_policy": "observe",
|
||||||
|
"secrets": secrets,
|
||||||
|
"high_or_critical_misconfigurations": len(open_misconfigs),
|
||||||
|
"waived_misconfigurations": waived_misconfigs,
|
||||||
|
"expired_waivers": expired_waivers,
|
||||||
|
"waiver_file": str(waiver_path) if waiver_path else "",
|
||||||
|
"open_misconfiguration_examples": open_misconfigs[:20],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv: list[str] | None = None) -> int:
|
||||||
|
"""CLI entrypoint used by Jenkins after the Trivy scan completes."""
|
||||||
|
parser = argparse.ArgumentParser(description=__doc__)
|
||||||
|
parser.add_argument("--trivy-json", required=True)
|
||||||
|
parser.add_argument("--waivers")
|
||||||
|
parser.add_argument("--output", required=True)
|
||||||
|
parser.add_argument("--today")
|
||||||
|
args = parser.parse_args(argv)
|
||||||
|
|
||||||
|
trivy_payload = _read_json(Path(args.trivy_json))
|
||||||
|
waiver_path = Path(args.waivers) if args.waivers else None
|
||||||
|
report = build_report(trivy_payload, waiver_path=waiver_path, today_override=args.today)
|
||||||
|
output_path = Path(args.output)
|
||||||
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
output_path.write_text(json.dumps(report, indent=2, sort_keys=True) + "\n", encoding="utf-8")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__": # pragma: no cover
|
||||||
|
raise SystemExit(main())
|
||||||
108
ci/tests/glue/test_ariadne_schedules.py
Normal file
108
ci/tests/glue/test_ariadne_schedules.py
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
"""Glue checks for Ariadne schedules exported to VictoriaMetrics."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
CONFIG_PATH = Path(__file__).with_name("config.yaml")
|
||||||
|
|
||||||
|
|
||||||
|
def _load_config() -> dict:
|
||||||
|
with CONFIG_PATH.open("r", encoding="utf-8") as handle:
|
||||||
|
return yaml.safe_load(handle) or {}
|
||||||
|
|
||||||
|
|
||||||
|
def _query(promql: str) -> list[dict]:
|
||||||
|
vm_url = os.environ.get("VM_URL", "http://victoria-metrics-single-server:8428").rstrip("/")
|
||||||
|
response = requests.get(f"{vm_url}/api/v1/query", params={"query": promql}, timeout=10)
|
||||||
|
response.raise_for_status()
|
||||||
|
payload = response.json()
|
||||||
|
return payload.get("data", {}).get("result", [])
|
||||||
|
|
||||||
|
|
||||||
|
def _expected_tasks() -> list[dict]:
|
||||||
|
cfg = _load_config()
|
||||||
|
tasks = [
|
||||||
|
_normalize_task(item, cfg)
|
||||||
|
for item in cfg.get("ariadne_schedule_tasks", [])
|
||||||
|
]
|
||||||
|
assert tasks, "No Ariadne schedule tasks configured"
|
||||||
|
return tasks
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_task(item: object, cfg: dict) -> dict:
|
||||||
|
if isinstance(item, str):
|
||||||
|
return {
|
||||||
|
"task": item,
|
||||||
|
"check_last_success": True,
|
||||||
|
"max_success_age_hours": cfg.get("max_success_age_hours", 48),
|
||||||
|
}
|
||||||
|
if isinstance(item, dict):
|
||||||
|
normalized = dict(item)
|
||||||
|
normalized.setdefault("check_last_success", True)
|
||||||
|
normalized.setdefault("max_success_age_hours", cfg.get("max_success_age_hours", 48))
|
||||||
|
return normalized
|
||||||
|
raise TypeError(f"Unsupported Ariadne schedule task config entry: {item!r}")
|
||||||
|
|
||||||
|
|
||||||
|
def _tracked_tasks(tasks: list[dict]) -> list[dict]:
|
||||||
|
tracked = [item for item in tasks if item.get("check_last_success")]
|
||||||
|
assert tracked, "No Ariadne schedule tasks are marked for success tracking"
|
||||||
|
return tracked
|
||||||
|
|
||||||
|
|
||||||
|
def _task_regex(tasks: list[dict]) -> str:
|
||||||
|
return "|".join(item["task"] for item in tasks)
|
||||||
|
|
||||||
|
|
||||||
|
def test_ariadne_schedule_series_exist():
|
||||||
|
tasks = _expected_tasks()
|
||||||
|
selector = _task_regex(tasks)
|
||||||
|
series = _query(f'ariadne_schedule_next_run_timestamp_seconds{{task=~"{selector}"}}')
|
||||||
|
seen = {item.get("metric", {}).get("task") for item in series}
|
||||||
|
missing = [item["task"] for item in tasks if item["task"] not in seen]
|
||||||
|
assert not missing, f"Missing next-run metrics for: {', '.join(missing)}"
|
||||||
|
|
||||||
|
|
||||||
|
def test_ariadne_schedule_recent_success():
|
||||||
|
tasks = _tracked_tasks(_expected_tasks())
|
||||||
|
selector = _task_regex(tasks)
|
||||||
|
series = _query(f'ariadne_schedule_last_success_timestamp_seconds{{task=~"{selector}"}}')
|
||||||
|
seen = {item.get("metric", {}).get("task") for item in series}
|
||||||
|
missing = [item["task"] for item in tasks if item["task"] not in seen]
|
||||||
|
assert not missing, f"Missing last-success metrics for: {', '.join(missing)}"
|
||||||
|
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
age_by_task = {
|
||||||
|
item.get("metric", {}).get("task"): (now - datetime.fromtimestamp(float(item["value"][1]), tz=timezone.utc)).total_seconds() / 3600
|
||||||
|
for item in series
|
||||||
|
}
|
||||||
|
too_old = [
|
||||||
|
f"{task} ({age_by_task[task]:.1f}h > {item['max_success_age_hours']}h)"
|
||||||
|
for item in tasks
|
||||||
|
if (task := item["task"]) in age_by_task and age_by_task[task] > float(item["max_success_age_hours"])
|
||||||
|
]
|
||||||
|
assert not too_old, "Ariadne schedules are stale: " + ", ".join(too_old)
|
||||||
|
|
||||||
|
|
||||||
|
def test_ariadne_schedule_last_status_present_and_boolean():
|
||||||
|
tasks = _tracked_tasks(_expected_tasks())
|
||||||
|
selector = _task_regex(tasks)
|
||||||
|
series = _query(f'ariadne_schedule_last_status{{task=~"{selector}"}}')
|
||||||
|
seen = {item.get("metric", {}).get("task") for item in series}
|
||||||
|
missing = [item["task"] for item in tasks if item["task"] not in seen]
|
||||||
|
assert not missing, f"Missing last-status metrics for: {', '.join(missing)}"
|
||||||
|
|
||||||
|
invalid = []
|
||||||
|
for item in series:
|
||||||
|
task = item.get("metric", {}).get("task")
|
||||||
|
value = float(item["value"][1])
|
||||||
|
if value not in (0.0, 1.0):
|
||||||
|
invalid.append(f"{task}={value}")
|
||||||
|
assert not invalid, f"Unexpected Ariadne last-status values: {', '.join(invalid)}"
|
||||||
@ -1,3 +1,5 @@
|
|||||||
|
"""Glue checks for the metrics the quality-gate publishes."""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@ -23,26 +25,63 @@ def _query(promql: str) -> list[dict]:
|
|||||||
return payload.get("data", {}).get("result", [])
|
return payload.get("data", {}).get("result", [])
|
||||||
|
|
||||||
|
|
||||||
def test_glue_metrics_present():
|
def _expected_tasks() -> list[dict]:
|
||||||
series = _query('kube_cronjob_labels{label_atlas_bstein_dev_glue="true"}')
|
cfg = _load_config()
|
||||||
assert series, "No glue cronjob label series found"
|
tasks = [
|
||||||
|
_normalize_task(item, cfg)
|
||||||
|
for item in cfg.get("ariadne_schedule_tasks", [])
|
||||||
|
]
|
||||||
|
assert tasks, "No Ariadne schedule tasks configured"
|
||||||
|
return tasks
|
||||||
|
|
||||||
|
|
||||||
def test_glue_metrics_success_join():
|
def _normalize_task(item: object, cfg: dict) -> dict:
|
||||||
query = (
|
if isinstance(item, str):
|
||||||
"kube_cronjob_status_last_successful_time "
|
return {
|
||||||
'and on(namespace,cronjob) kube_cronjob_labels{label_atlas_bstein_dev_glue="true"}'
|
"task": item,
|
||||||
)
|
"check_last_success": True,
|
||||||
series = _query(query)
|
"max_success_age_hours": cfg.get("max_success_age_hours", 48),
|
||||||
assert series, "No glue cronjob last success series found"
|
}
|
||||||
|
if isinstance(item, dict):
|
||||||
|
normalized = dict(item)
|
||||||
|
normalized.setdefault("check_last_success", True)
|
||||||
|
normalized.setdefault("max_success_age_hours", cfg.get("max_success_age_hours", 48))
|
||||||
|
return normalized
|
||||||
|
raise TypeError(f"Unsupported Ariadne schedule task config entry: {item!r}")
|
||||||
|
|
||||||
|
|
||||||
|
def _tracked_tasks(tasks: list[dict]) -> list[dict]:
|
||||||
|
tracked = [item for item in tasks if item.get("check_last_success")]
|
||||||
|
assert tracked, "No Ariadne schedule tasks are marked for success tracking"
|
||||||
|
return tracked
|
||||||
|
|
||||||
|
|
||||||
|
def _task_regex(tasks: list[dict]) -> str:
|
||||||
|
return "|".join(item["task"] for item in tasks)
|
||||||
|
|
||||||
|
|
||||||
def test_ariadne_schedule_metrics_present():
|
def test_ariadne_schedule_metrics_present():
|
||||||
cfg = _load_config()
|
tasks = _expected_tasks()
|
||||||
expected = cfg.get("ariadne_schedule_tasks", [])
|
selector = _task_regex(tasks)
|
||||||
if not expected:
|
series = _query(f'ariadne_schedule_next_run_timestamp_seconds{{task=~"{selector}"}}')
|
||||||
return
|
seen = {item.get("metric", {}).get("task") for item in series}
|
||||||
series = _query("ariadne_schedule_next_run_timestamp_seconds")
|
missing = [item["task"] for item in tasks if item["task"] not in seen]
|
||||||
tasks = {item.get("metric", {}).get("task") for item in series}
|
|
||||||
missing = [task for task in expected if task not in tasks]
|
|
||||||
assert not missing, f"Missing Ariadne schedule metrics for: {', '.join(missing)}"
|
assert not missing, f"Missing Ariadne schedule metrics for: {', '.join(missing)}"
|
||||||
|
|
||||||
|
|
||||||
|
def test_ariadne_schedule_success_and_status_metrics_present():
|
||||||
|
tasks = _tracked_tasks(_expected_tasks())
|
||||||
|
selector = _task_regex(tasks)
|
||||||
|
|
||||||
|
success = _query(f'ariadne_schedule_last_success_timestamp_seconds{{task=~"{selector}"}}')
|
||||||
|
status = _query(f'ariadne_schedule_last_status{{task=~"{selector}"}}')
|
||||||
|
|
||||||
|
success_tasks = {item.get("metric", {}).get("task") for item in success}
|
||||||
|
status_tasks = {item.get("metric", {}).get("task") for item in status}
|
||||||
|
expected = {item["task"] for item in tasks}
|
||||||
|
|
||||||
|
missing_success = sorted(expected - success_tasks)
|
||||||
|
missing_status = sorted(expected - status_tasks)
|
||||||
|
|
||||||
|
assert not missing_success, f"Missing Ariadne success metrics for: {', '.join(missing_success)}"
|
||||||
|
assert not missing_status, f"Missing Ariadne status metrics for: {', '.join(missing_status)}"
|
||||||
|
|||||||
407
ci/titan-iac-trivy-waivers.json
Normal file
407
ci/titan-iac-trivy-waivers.json
Normal file
@ -0,0 +1,407 @@
|
|||||||
|
{
|
||||||
|
"version": 1,
|
||||||
|
"generated_from": "Jenkins titan-iac build 225 Trivy filesystem scan",
|
||||||
|
"default_expires_at": "2026-05-22",
|
||||||
|
"ticket": "atlas-quality-wave-k8s-hardening",
|
||||||
|
"default_reason": "Existing Kubernetes manifest hardening baseline accepted only for the first quality-gate rollout; fix or renew explicitly before expiry.",
|
||||||
|
"misconfigurations": [
|
||||||
|
{
|
||||||
|
"id": "DS-0002",
|
||||||
|
"targets": [
|
||||||
|
"dockerfiles/Dockerfile.ananke-node-helper"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "KSV-0009",
|
||||||
|
"targets": [
|
||||||
|
"services/mailu/vip-controller.yaml",
|
||||||
|
"services/maintenance/k3s-agent-restart-daemonset.yaml"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "KSV-0010",
|
||||||
|
"targets": [
|
||||||
|
"services/maintenance/k3s-agent-restart-daemonset.yaml",
|
||||||
|
"services/maintenance/metis-sentinel-amd64-daemonset.yaml",
|
||||||
|
"services/maintenance/metis-sentinel-arm64-daemonset.yaml",
|
||||||
|
"services/monitoring/jetson-tegrastats-exporter.yaml"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "KSV-0014",
|
||||||
|
"targets": [
|
||||||
|
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml",
|
||||||
|
"infrastructure/core/node-prefer-noschedule-cronjob.yaml",
|
||||||
|
"infrastructure/core/ntp-sync-daemonset.yaml",
|
||||||
|
"infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml",
|
||||||
|
"infrastructure/longhorn/core/longhorn-disk-tags-ensure-job.yaml",
|
||||||
|
"infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml",
|
||||||
|
"infrastructure/longhorn/core/vault-sync-deployment.yaml",
|
||||||
|
"infrastructure/longhorn/ui-ingress/oauth2-proxy-longhorn.yaml",
|
||||||
|
"infrastructure/modules/profiles/components/device-plugin-jetson/daemonset.yaml",
|
||||||
|
"infrastructure/modules/profiles/components/device-plugin-minipc/daemonset.yaml",
|
||||||
|
"infrastructure/modules/profiles/components/device-plugin-tethys/daemonset.yaml",
|
||||||
|
"infrastructure/postgres/statefulset.yaml",
|
||||||
|
"infrastructure/vault-csi/vault-csi-provider.yaml",
|
||||||
|
"services/ai-llm/deployment.yaml",
|
||||||
|
"services/bstein-dev-home/backend-deployment.yaml",
|
||||||
|
"services/bstein-dev-home/chat-ai-gateway-deployment.yaml",
|
||||||
|
"services/bstein-dev-home/frontend-deployment.yaml",
|
||||||
|
"services/bstein-dev-home/oneoffs/migrations/portal-migrate-job.yaml",
|
||||||
|
"services/bstein-dev-home/oneoffs/portal-onboarding-e2e-test-job.yaml",
|
||||||
|
"services/bstein-dev-home/vault-sync-deployment.yaml",
|
||||||
|
"services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml",
|
||||||
|
"services/comms/atlasbot-deployment.yaml",
|
||||||
|
"services/comms/coturn.yaml",
|
||||||
|
"services/comms/element-call-deployment.yaml",
|
||||||
|
"services/comms/guest-name-job.yaml",
|
||||||
|
"services/comms/guest-register-deployment.yaml",
|
||||||
|
"services/comms/livekit-token-deployment.yaml",
|
||||||
|
"services/comms/livekit.yaml",
|
||||||
|
"services/comms/mas-deployment.yaml",
|
||||||
|
"services/comms/oneoffs/bstein-force-leave-job.yaml",
|
||||||
|
"services/comms/oneoffs/comms-secrets-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/mas-admin-client-secret-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/mas-db-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/mas-local-users-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/othrys-kick-numeric-job.yaml",
|
||||||
|
"services/comms/oneoffs/synapse-admin-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/synapse-seeder-admin-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/synapse-signingkey-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/synapse-user-seed-job.yaml",
|
||||||
|
"services/comms/pin-othrys-job.yaml",
|
||||||
|
"services/comms/reset-othrys-room-job.yaml",
|
||||||
|
"services/comms/seed-othrys-room.yaml",
|
||||||
|
"services/comms/vault-sync-deployment.yaml",
|
||||||
|
"services/comms/wellknown.yaml",
|
||||||
|
"services/crypto/monerod/deployment.yaml",
|
||||||
|
"services/crypto/wallet-monero-temp/deployment.yaml",
|
||||||
|
"services/crypto/xmr-miner/deployment.yaml",
|
||||||
|
"services/crypto/xmr-miner/vault-sync-deployment.yaml",
|
||||||
|
"services/crypto/xmr-miner/xmrig-daemonset.yaml",
|
||||||
|
"services/finance/actual-budget-deployment.yaml",
|
||||||
|
"services/finance/firefly-cronjob.yaml",
|
||||||
|
"services/finance/firefly-deployment.yaml",
|
||||||
|
"services/finance/firefly-user-sync-cronjob.yaml",
|
||||||
|
"services/finance/oneoffs/finance-secrets-ensure-job.yaml",
|
||||||
|
"services/gitea/deployment.yaml",
|
||||||
|
"services/harbor/vault-sync-deployment.yaml",
|
||||||
|
"services/health/wger-admin-ensure-cronjob.yaml",
|
||||||
|
"services/health/wger-deployment.yaml",
|
||||||
|
"services/health/wger-user-sync-cronjob.yaml",
|
||||||
|
"services/jellyfin/deployment.yaml",
|
||||||
|
"services/jellyfin/loader.yaml",
|
||||||
|
"services/jenkins/deployment.yaml",
|
||||||
|
"services/jenkins/vault-sync-deployment.yaml",
|
||||||
|
"services/keycloak/deployment.yaml",
|
||||||
|
"services/keycloak/oneoffs/actual-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/harbor-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/ldap-federation-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/logs-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/mas-secrets-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/metis-node-passwords-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/metis-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/metis-ssh-keys-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/portal-admin-client-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/portal-e2e-client-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/portal-e2e-execute-actions-email-test-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/portal-e2e-target-client-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/portal-e2e-token-exchange-permissions-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/portal-e2e-token-exchange-test-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/quality-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/realm-settings-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/soteria-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/synapse-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/user-overrides-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/vault-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/vault-sync-deployment.yaml",
|
||||||
|
"services/logging/node-image-gc-rpi4-daemonset.yaml",
|
||||||
|
"services/logging/node-image-prune-rpi5-daemonset.yaml",
|
||||||
|
"services/logging/node-log-rotation-daemonset.yaml",
|
||||||
|
"services/logging/oauth2-proxy.yaml",
|
||||||
|
"services/logging/oneoffs/opensearch-dashboards-setup-job.yaml",
|
||||||
|
"services/logging/oneoffs/opensearch-ism-job.yaml",
|
||||||
|
"services/logging/oneoffs/opensearch-observability-setup-job.yaml",
|
||||||
|
"services/logging/opensearch-prune-cronjob.yaml",
|
||||||
|
"services/logging/vault-sync-deployment.yaml",
|
||||||
|
"services/mailu/mailu-sync-cronjob.yaml",
|
||||||
|
"services/mailu/mailu-sync-listener.yaml",
|
||||||
|
"services/mailu/oneoffs/mailu-sync-job.yaml",
|
||||||
|
"services/mailu/vault-sync-deployment.yaml",
|
||||||
|
"services/mailu/vip-controller.yaml",
|
||||||
|
"services/maintenance/ariadne-deployment.yaml",
|
||||||
|
"services/maintenance/disable-k3s-traefik-daemonset.yaml",
|
||||||
|
"services/maintenance/image-sweeper-cronjob.yaml",
|
||||||
|
"services/maintenance/k3s-agent-restart-daemonset.yaml",
|
||||||
|
"services/maintenance/metis-deployment.yaml",
|
||||||
|
"services/maintenance/metis-k3s-token-sync-cronjob.yaml",
|
||||||
|
"services/maintenance/metis-sentinel-amd64-daemonset.yaml",
|
||||||
|
"services/maintenance/metis-sentinel-arm64-daemonset.yaml",
|
||||||
|
"services/maintenance/node-image-sweeper-daemonset.yaml",
|
||||||
|
"services/maintenance/node-nofile-daemonset.yaml",
|
||||||
|
"services/maintenance/oauth2-proxy-metis.yaml",
|
||||||
|
"services/maintenance/oauth2-proxy-soteria.yaml",
|
||||||
|
"services/maintenance/oneoffs/ariadne-migrate-job.yaml",
|
||||||
|
"services/maintenance/oneoffs/k3s-traefik-cleanup-job.yaml",
|
||||||
|
"services/maintenance/oneoffs/titan-24-rootfs-sweep-job.yaml",
|
||||||
|
"services/maintenance/pod-cleaner-cronjob.yaml",
|
||||||
|
"services/maintenance/soteria-deployment.yaml",
|
||||||
|
"services/maintenance/vault-sync-deployment.yaml",
|
||||||
|
"services/monitoring/dcgm-exporter.yaml",
|
||||||
|
"services/monitoring/jetson-tegrastats-exporter.yaml",
|
||||||
|
"services/monitoring/oneoffs/grafana-org-bootstrap.yaml",
|
||||||
|
"services/monitoring/oneoffs/grafana-user-dedupe-job.yaml",
|
||||||
|
"services/monitoring/platform-quality-gateway-deployment.yaml",
|
||||||
|
"services/monitoring/platform-quality-suite-probe-cronjob.yaml",
|
||||||
|
"services/monitoring/postmark-exporter-deployment.yaml",
|
||||||
|
"services/monitoring/vmalert-atlas-availability.yaml",
|
||||||
|
"services/monitoring/vault-sync-deployment.yaml",
|
||||||
|
"services/nextcloud-mail-sync/cronjob.yaml",
|
||||||
|
"services/nextcloud/collabora.yaml",
|
||||||
|
"services/nextcloud/cronjob.yaml",
|
||||||
|
"services/nextcloud/deployment.yaml",
|
||||||
|
"services/nextcloud/maintenance-cronjob.yaml",
|
||||||
|
"services/oauth2-proxy/deployment.yaml",
|
||||||
|
"services/openldap/statefulset.yaml",
|
||||||
|
"services/outline/deployment.yaml",
|
||||||
|
"services/outline/redis-deployment.yaml",
|
||||||
|
"services/pegasus/deployment.yaml",
|
||||||
|
"services/pegasus/vault-sync-deployment.yaml",
|
||||||
|
"services/planka/deployment.yaml",
|
||||||
|
"services/quality/oauth2-proxy-sonarqube.yaml",
|
||||||
|
"services/quality/sonarqube-deployment.yaml",
|
||||||
|
"services/quality/sonarqube-exporter-deployment.yaml",
|
||||||
|
"services/sui-metrics/base/deployment.yaml",
|
||||||
|
"services/typhon/vault-sync-deployment.yaml",
|
||||||
|
"services/vault/k8s-auth-config-cronjob.yaml",
|
||||||
|
"services/vault/oidc-config-cronjob.yaml",
|
||||||
|
"services/vault/statefulset.yaml",
|
||||||
|
"services/vaultwarden/deployment.yaml"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "KSV-0017",
|
||||||
|
"targets": [
|
||||||
|
"infrastructure/modules/profiles/components/device-plugin-jetson/daemonset.yaml",
|
||||||
|
"infrastructure/modules/profiles/components/device-plugin-minipc/daemonset.yaml",
|
||||||
|
"infrastructure/modules/profiles/components/device-plugin-tethys/daemonset.yaml",
|
||||||
|
"services/logging/node-image-gc-rpi4-daemonset.yaml",
|
||||||
|
"services/logging/node-image-prune-rpi5-daemonset.yaml",
|
||||||
|
"services/logging/node-log-rotation-daemonset.yaml",
|
||||||
|
"services/maintenance/disable-k3s-traefik-daemonset.yaml",
|
||||||
|
"services/maintenance/image-sweeper-cronjob.yaml",
|
||||||
|
"services/maintenance/k3s-agent-restart-daemonset.yaml",
|
||||||
|
"services/maintenance/metis-deployment.yaml",
|
||||||
|
"services/maintenance/metis-sentinel-amd64-daemonset.yaml",
|
||||||
|
"services/maintenance/metis-sentinel-arm64-daemonset.yaml",
|
||||||
|
"services/maintenance/node-image-sweeper-daemonset.yaml",
|
||||||
|
"services/maintenance/node-nofile-daemonset.yaml",
|
||||||
|
"services/maintenance/oneoffs/titan-24-rootfs-sweep-job.yaml",
|
||||||
|
"services/monitoring/dcgm-exporter.yaml",
|
||||||
|
"services/monitoring/jetson-tegrastats-exporter.yaml"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "KSV-0041",
|
||||||
|
"targets": [
|
||||||
|
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml",
|
||||||
|
"infrastructure/longhorn/adopt/longhorn-adopt-rbac.yaml",
|
||||||
|
"infrastructure/traefik/clusterrole.yaml",
|
||||||
|
"services/bstein-dev-home/rbac.yaml",
|
||||||
|
"services/comms/comms-secrets-ensure-rbac.yaml",
|
||||||
|
"services/comms/mas-db-ensure-rbac.yaml",
|
||||||
|
"services/comms/mas-secrets-ensure-rbac.yaml",
|
||||||
|
"services/maintenance/soteria-rbac.yaml"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "KSV-0047",
|
||||||
|
"targets": [
|
||||||
|
"services/monitoring/rbac.yaml"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "KSV-0053",
|
||||||
|
"targets": [
|
||||||
|
"services/comms/comms-secrets-ensure-rbac.yaml",
|
||||||
|
"services/comms/mas-db-ensure-rbac.yaml",
|
||||||
|
"services/jenkins/serviceaccount.yaml",
|
||||||
|
"services/maintenance/ariadne-rbac.yaml"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "KSV-0056",
|
||||||
|
"targets": [
|
||||||
|
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml",
|
||||||
|
"infrastructure/longhorn/adopt/longhorn-adopt-rbac.yaml",
|
||||||
|
"services/jenkins/serviceaccount.yaml",
|
||||||
|
"services/maintenance/disable-k3s-traefik-rbac.yaml",
|
||||||
|
"services/maintenance/k3s-traefik-cleanup-rbac.yaml"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "KSV-0114",
|
||||||
|
"targets": [
|
||||||
|
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "KSV-0118",
|
||||||
|
"targets": [
|
||||||
|
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml",
|
||||||
|
"infrastructure/core/coredns-deployment.yaml",
|
||||||
|
"infrastructure/core/node-prefer-noschedule-cronjob.yaml",
|
||||||
|
"infrastructure/core/ntp-sync-daemonset.yaml",
|
||||||
|
"infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml",
|
||||||
|
"infrastructure/longhorn/core/longhorn-disk-tags-ensure-job.yaml",
|
||||||
|
"infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml",
|
||||||
|
"infrastructure/longhorn/core/vault-sync-deployment.yaml",
|
||||||
|
"infrastructure/longhorn/ui-ingress/oauth2-proxy-longhorn.yaml",
|
||||||
|
"infrastructure/modules/profiles/components/device-plugin-jetson/daemonset.yaml",
|
||||||
|
"infrastructure/modules/profiles/components/device-plugin-minipc/daemonset.yaml",
|
||||||
|
"infrastructure/modules/profiles/components/device-plugin-tethys/daemonset.yaml",
|
||||||
|
"infrastructure/postgres/statefulset.yaml",
|
||||||
|
"infrastructure/vault-csi/vault-csi-provider.yaml",
|
||||||
|
"services/ai-llm/deployment.yaml",
|
||||||
|
"services/bstein-dev-home/backend-deployment.yaml",
|
||||||
|
"services/bstein-dev-home/chat-ai-gateway-deployment.yaml",
|
||||||
|
"services/bstein-dev-home/frontend-deployment.yaml",
|
||||||
|
"services/bstein-dev-home/oneoffs/migrations/portal-migrate-job.yaml",
|
||||||
|
"services/bstein-dev-home/oneoffs/portal-onboarding-e2e-test-job.yaml",
|
||||||
|
"services/bstein-dev-home/vault-sync-deployment.yaml",
|
||||||
|
"services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml",
|
||||||
|
"services/comms/atlasbot-deployment.yaml",
|
||||||
|
"services/comms/coturn.yaml",
|
||||||
|
"services/comms/element-call-deployment.yaml",
|
||||||
|
"services/comms/guest-name-job.yaml",
|
||||||
|
"services/comms/livekit-token-deployment.yaml",
|
||||||
|
"services/comms/livekit.yaml",
|
||||||
|
"services/comms/mas-deployment.yaml",
|
||||||
|
"services/comms/oneoffs/bstein-force-leave-job.yaml",
|
||||||
|
"services/comms/oneoffs/comms-secrets-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/mas-admin-client-secret-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/mas-db-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/mas-local-users-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/othrys-kick-numeric-job.yaml",
|
||||||
|
"services/comms/oneoffs/synapse-admin-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/synapse-seeder-admin-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/synapse-signingkey-ensure-job.yaml",
|
||||||
|
"services/comms/oneoffs/synapse-user-seed-job.yaml",
|
||||||
|
"services/comms/pin-othrys-job.yaml",
|
||||||
|
"services/comms/reset-othrys-room-job.yaml",
|
||||||
|
"services/comms/seed-othrys-room.yaml",
|
||||||
|
"services/comms/vault-sync-deployment.yaml",
|
||||||
|
"services/comms/wellknown.yaml",
|
||||||
|
"services/crypto/monerod/deployment.yaml",
|
||||||
|
"services/crypto/wallet-monero-temp/deployment.yaml",
|
||||||
|
"services/crypto/xmr-miner/deployment.yaml",
|
||||||
|
"services/crypto/xmr-miner/vault-sync-deployment.yaml",
|
||||||
|
"services/crypto/xmr-miner/xmrig-daemonset.yaml",
|
||||||
|
"services/finance/firefly-cronjob.yaml",
|
||||||
|
"services/finance/firefly-deployment.yaml",
|
||||||
|
"services/finance/firefly-user-sync-cronjob.yaml",
|
||||||
|
"services/finance/oneoffs/finance-secrets-ensure-job.yaml",
|
||||||
|
"services/gitea/deployment.yaml",
|
||||||
|
"services/harbor/vault-sync-deployment.yaml",
|
||||||
|
"services/health/wger-admin-ensure-cronjob.yaml",
|
||||||
|
"services/health/wger-deployment.yaml",
|
||||||
|
"services/health/wger-user-sync-cronjob.yaml",
|
||||||
|
"services/jellyfin/loader.yaml",
|
||||||
|
"services/jenkins/deployment.yaml",
|
||||||
|
"services/jenkins/vault-sync-deployment.yaml",
|
||||||
|
"services/keycloak/oneoffs/actual-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/harbor-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/ldap-federation-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/logs-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/mas-secrets-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/metis-node-passwords-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/metis-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/metis-ssh-keys-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/portal-admin-client-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/portal-e2e-client-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/portal-e2e-execute-actions-email-test-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/portal-e2e-target-client-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/portal-e2e-token-exchange-permissions-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/portal-e2e-token-exchange-test-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/quality-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/realm-settings-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/soteria-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/synapse-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/user-overrides-job.yaml",
|
||||||
|
"services/keycloak/oneoffs/vault-oidc-secret-ensure-job.yaml",
|
||||||
|
"services/keycloak/vault-sync-deployment.yaml",
|
||||||
|
"services/logging/node-image-gc-rpi4-daemonset.yaml",
|
||||||
|
"services/logging/node-image-prune-rpi5-daemonset.yaml",
|
||||||
|
"services/logging/node-log-rotation-daemonset.yaml",
|
||||||
|
"services/logging/oauth2-proxy.yaml",
|
||||||
|
"services/logging/oneoffs/opensearch-dashboards-setup-job.yaml",
|
||||||
|
"services/logging/oneoffs/opensearch-ism-job.yaml",
|
||||||
|
"services/logging/oneoffs/opensearch-observability-setup-job.yaml",
|
||||||
|
"services/logging/opensearch-prune-cronjob.yaml",
|
||||||
|
"services/logging/vault-sync-deployment.yaml",
|
||||||
|
"services/mailu/mailu-sync-cronjob.yaml",
|
||||||
|
"services/mailu/mailu-sync-listener.yaml",
|
||||||
|
"services/mailu/oneoffs/mailu-sync-job.yaml",
|
||||||
|
"services/mailu/vault-sync-deployment.yaml",
|
||||||
|
"services/mailu/vip-controller.yaml",
|
||||||
|
"services/maintenance/ariadne-deployment.yaml",
|
||||||
|
"services/maintenance/disable-k3s-traefik-daemonset.yaml",
|
||||||
|
"services/maintenance/image-sweeper-cronjob.yaml",
|
||||||
|
"services/maintenance/k3s-agent-restart-daemonset.yaml",
|
||||||
|
"services/maintenance/metis-deployment.yaml",
|
||||||
|
"services/maintenance/metis-k3s-token-sync-cronjob.yaml",
|
||||||
|
"services/maintenance/metis-sentinel-amd64-daemonset.yaml",
|
||||||
|
"services/maintenance/metis-sentinel-arm64-daemonset.yaml",
|
||||||
|
"services/maintenance/node-image-sweeper-daemonset.yaml",
|
||||||
|
"services/maintenance/node-nofile-daemonset.yaml",
|
||||||
|
"services/maintenance/oauth2-proxy-metis.yaml",
|
||||||
|
"services/maintenance/oauth2-proxy-soteria.yaml",
|
||||||
|
"services/maintenance/oneoffs/ariadne-migrate-job.yaml",
|
||||||
|
"services/maintenance/oneoffs/k3s-traefik-cleanup-job.yaml",
|
||||||
|
"services/maintenance/oneoffs/titan-24-rootfs-sweep-job.yaml",
|
||||||
|
"services/maintenance/pod-cleaner-cronjob.yaml",
|
||||||
|
"services/maintenance/soteria-deployment.yaml",
|
||||||
|
"services/maintenance/vault-sync-deployment.yaml",
|
||||||
|
"services/monitoring/dcgm-exporter.yaml",
|
||||||
|
"services/monitoring/jetson-tegrastats-exporter.yaml",
|
||||||
|
"services/monitoring/oneoffs/grafana-org-bootstrap.yaml",
|
||||||
|
"services/monitoring/oneoffs/grafana-user-dedupe-job.yaml",
|
||||||
|
"services/monitoring/platform-quality-gateway-deployment.yaml",
|
||||||
|
"services/monitoring/platform-quality-suite-probe-cronjob.yaml",
|
||||||
|
"services/monitoring/postmark-exporter-deployment.yaml",
|
||||||
|
"services/monitoring/vmalert-atlas-availability.yaml",
|
||||||
|
"services/monitoring/vault-sync-deployment.yaml",
|
||||||
|
"services/nextcloud/collabora.yaml",
|
||||||
|
"services/oauth2-proxy/deployment.yaml",
|
||||||
|
"services/openldap/statefulset.yaml",
|
||||||
|
"services/outline/deployment.yaml",
|
||||||
|
"services/outline/redis-deployment.yaml",
|
||||||
|
"services/pegasus/vault-sync-deployment.yaml",
|
||||||
|
"services/quality/oauth2-proxy-sonarqube.yaml",
|
||||||
|
"services/quality/sonarqube-deployment.yaml",
|
||||||
|
"services/quality/sonarqube-exporter-deployment.yaml",
|
||||||
|
"services/sui-metrics/base/deployment.yaml",
|
||||||
|
"services/sui-metrics/overlays/atlas/patch-node-selector.yaml",
|
||||||
|
"services/typhon/deployment.yaml",
|
||||||
|
"services/typhon/vault-sync-deployment.yaml",
|
||||||
|
"services/vault/k8s-auth-config-cronjob.yaml",
|
||||||
|
"services/vault/oidc-config-cronjob.yaml",
|
||||||
|
"services/vaultwarden/deployment.yaml"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "KSV-0121",
|
||||||
|
"targets": [
|
||||||
|
"services/logging/node-image-gc-rpi4-daemonset.yaml",
|
||||||
|
"services/logging/node-image-prune-rpi5-daemonset.yaml",
|
||||||
|
"services/logging/node-log-rotation-daemonset.yaml",
|
||||||
|
"services/maintenance/disable-k3s-traefik-daemonset.yaml",
|
||||||
|
"services/maintenance/image-sweeper-cronjob.yaml",
|
||||||
|
"services/maintenance/metis-deployment.yaml",
|
||||||
|
"services/maintenance/node-image-sweeper-daemonset.yaml",
|
||||||
|
"services/maintenance/node-nofile-daemonset.yaml",
|
||||||
|
"services/maintenance/oneoffs/titan-24-rootfs-sweep-job.yaml"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: ai-llm
|
name: ai-llm
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/ai-llm
|
path: ./services/ai-llm
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: bstein-dev-home-migrations
|
name: bstein-dev-home-migrations
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/bstein-dev-home/oneoffs/migrations
|
path: ./services/bstein-dev-home/oneoffs/migrations
|
||||||
|
|||||||
@ -13,14 +13,14 @@ spec:
|
|||||||
git:
|
git:
|
||||||
checkout:
|
checkout:
|
||||||
ref:
|
ref:
|
||||||
branch: feature/ariadne
|
branch: main
|
||||||
commit:
|
commit:
|
||||||
author:
|
author:
|
||||||
email: ops@bstein.dev
|
email: ops@bstein.dev
|
||||||
name: flux-bot
|
name: flux-bot
|
||||||
messageTemplate: "chore(bstein-dev-home): automated image update"
|
messageTemplate: "chore(bstein-dev-home): automated image update"
|
||||||
push:
|
push:
|
||||||
branch: feature/ariadne
|
branch: main
|
||||||
update:
|
update:
|
||||||
strategy: Setters
|
strategy: Setters
|
||||||
path: services/bstein-dev-home
|
path: services/bstein-dev-home
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: bstein-dev-home
|
name: bstein-dev-home
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/bstein-dev-home
|
path: ./services/bstein-dev-home
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: comms
|
name: comms
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
prune: true
|
prune: true
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: crypto
|
name: crypto
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/crypto
|
path: ./services/crypto
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: finance
|
name: finance
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/finance
|
path: ./services/finance
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: gitea
|
name: gitea
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/gitea
|
path: ./services/gitea
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: harbor
|
name: harbor
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/harbor
|
path: ./services/harbor
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: health
|
name: health
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/health
|
path: ./services/health
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: jellyfin
|
name: jellyfin
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/jellyfin
|
path: ./services/jellyfin
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: jenkins
|
name: jenkins
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/jenkins
|
path: ./services/jenkins
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: keycloak
|
name: keycloak
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
prune: true
|
prune: true
|
||||||
|
|||||||
@ -21,6 +21,7 @@ resources:
|
|||||||
- sui-metrics/kustomization.yaml
|
- sui-metrics/kustomization.yaml
|
||||||
- openldap/kustomization.yaml
|
- openldap/kustomization.yaml
|
||||||
- keycloak/kustomization.yaml
|
- keycloak/kustomization.yaml
|
||||||
|
- quality/kustomization.yaml
|
||||||
- oauth2-proxy/kustomization.yaml
|
- oauth2-proxy/kustomization.yaml
|
||||||
- mailu/kustomization.yaml
|
- mailu/kustomization.yaml
|
||||||
- jenkins/kustomization.yaml
|
- jenkins/kustomization.yaml
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: mailu
|
name: mailu
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
sourceRef:
|
sourceRef:
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: monerod
|
name: monerod
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/crypto/monerod
|
path: ./services/crypto/monerod
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: nextcloud-mail-sync
|
name: nextcloud-mail-sync
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
prune: true
|
prune: true
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: nextcloud
|
name: nextcloud
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/nextcloud
|
path: ./services/nextcloud
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: oauth2-proxy
|
name: oauth2-proxy
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
prune: true
|
prune: true
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: openldap
|
name: openldap
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
prune: true
|
prune: true
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: outline
|
name: outline
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/outline
|
path: ./services/outline
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: pegasus
|
name: pegasus
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/pegasus
|
path: ./services/pegasus
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: planka
|
name: planka
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/planka
|
path: ./services/planka
|
||||||
|
|||||||
@ -0,0 +1,37 @@
|
|||||||
|
# clusters/atlas/flux-system/applications/quality/kustomization.yaml
|
||||||
|
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||||
|
kind: Kustomization
|
||||||
|
metadata:
|
||||||
|
name: quality
|
||||||
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
|
spec:
|
||||||
|
interval: 10m
|
||||||
|
path: ./services/quality
|
||||||
|
prune: true
|
||||||
|
sourceRef:
|
||||||
|
kind: GitRepository
|
||||||
|
name: flux-system
|
||||||
|
targetNamespace: quality
|
||||||
|
dependsOn:
|
||||||
|
- name: traefik
|
||||||
|
- name: cert-manager
|
||||||
|
- name: keycloak
|
||||||
|
- name: vault
|
||||||
|
- name: postgres
|
||||||
|
healthChecks:
|
||||||
|
- apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
name: sonarqube
|
||||||
|
namespace: quality
|
||||||
|
- apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
name: sonarqube-exporter
|
||||||
|
namespace: quality
|
||||||
|
- apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
name: oauth2-proxy-sonarqube
|
||||||
|
namespace: quality
|
||||||
|
wait: false
|
||||||
|
timeout: 20m
|
||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: sui-metrics
|
name: sui-metrics
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/sui-metrics/overlays/atlas
|
path: ./services/sui-metrics/overlays/atlas
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: typhon
|
name: typhon
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/typhon
|
path: ./services/typhon
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: vault
|
name: vault
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
sourceRef:
|
sourceRef:
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: vaultwarden
|
name: vaultwarden
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
suspend: false
|
suspend: false
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: wallet-monero-temp
|
name: wallet-monero-temp
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/crypto/wallet-monero-temp
|
path: ./services/crypto/wallet-monero-temp
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: xmr-miner
|
name: xmr-miner
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/crypto/xmr-miner
|
path: ./services/crypto/xmr-miner
|
||||||
|
|||||||
@ -5966,6 +5966,9 @@ spec:
|
|||||||
- args:
|
- args:
|
||||||
- --events-addr=http://notification-controller.$(RUNTIME_NAMESPACE).svc.cluster.local./
|
- --events-addr=http://notification-controller.$(RUNTIME_NAMESPACE).svc.cluster.local./
|
||||||
- --watch-all-namespaces=true
|
- --watch-all-namespaces=true
|
||||||
|
- --concurrent=1
|
||||||
|
- --requeue-dependency=5s
|
||||||
|
- --interval-jitter-percentage=30
|
||||||
- --log-level=info
|
- --log-level=info
|
||||||
- --log-encoding=json
|
- --log-encoding=json
|
||||||
- --enable-leader-election
|
- --enable-leader-election
|
||||||
|
|||||||
@ -7,7 +7,7 @@ metadata:
|
|||||||
name: flux-system
|
name: flux-system
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
spec:
|
spec:
|
||||||
interval: 1m0s
|
interval: 15m0s
|
||||||
ref:
|
ref:
|
||||||
branch: main
|
branch: main
|
||||||
secretRef:
|
secretRef:
|
||||||
@ -20,7 +20,7 @@ metadata:
|
|||||||
name: flux-system
|
name: flux-system
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
spec:
|
spec:
|
||||||
interval: 10m0s
|
interval: 1h0m0s
|
||||||
path: ./clusters/atlas/flux-system
|
path: ./clusters/atlas/flux-system
|
||||||
prune: true
|
prune: true
|
||||||
sourceRef:
|
sourceRef:
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: cert-manager-cleanup
|
name: cert-manager-cleanup
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 30m
|
interval: 30m
|
||||||
path: ./infrastructure/cert-manager/cleanup
|
path: ./infrastructure/cert-manager/cleanup
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: cert-manager
|
name: cert-manager
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 30m
|
interval: 30m
|
||||||
path: ./infrastructure/cert-manager
|
path: ./infrastructure/cert-manager
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: core
|
name: core
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./infrastructure/core
|
path: ./infrastructure/core
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: gitops-ui
|
name: gitops-ui
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
timeout: 10m
|
timeout: 10m
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: helm
|
name: helm
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 30m
|
interval: 30m
|
||||||
sourceRef:
|
sourceRef:
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: logging
|
name: logging
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/logging
|
path: ./services/logging
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: longhorn-adopt
|
name: longhorn-adopt
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 30m
|
interval: 30m
|
||||||
path: ./infrastructure/longhorn/adopt
|
path: ./infrastructure/longhorn/adopt
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: longhorn-ui
|
name: longhorn-ui
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./infrastructure/longhorn/ui-ingress
|
path: ./infrastructure/longhorn/ui-ingress
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: longhorn
|
name: longhorn
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 30m
|
interval: 30m
|
||||||
path: ./infrastructure/longhorn/core
|
path: ./infrastructure/longhorn/core
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: maintenance
|
name: maintenance
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/maintenance
|
path: ./services/maintenance
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: metallb
|
name: metallb
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 30m
|
interval: 30m
|
||||||
sourceRef:
|
sourceRef:
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: monitoring
|
name: monitoring
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./services/monitoring
|
path: ./services/monitoring
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: postgres
|
name: postgres
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./infrastructure/postgres
|
path: ./infrastructure/postgres
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: traefik
|
name: traefik
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
path: ./infrastructure/traefik
|
path: ./infrastructure/traefik
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: vault-csi
|
name: vault-csi
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 30m
|
interval: 30m
|
||||||
sourceRef:
|
sourceRef:
|
||||||
|
|||||||
@ -4,6 +4,8 @@ kind: Kustomization
|
|||||||
metadata:
|
metadata:
|
||||||
name: vault-injector
|
name: vault-injector
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
|
annotations:
|
||||||
|
kustomize.toolkit.fluxcd.io/ssa: IfNotPresent
|
||||||
spec:
|
spec:
|
||||||
interval: 30m
|
interval: 30m
|
||||||
path: ./infrastructure/vault-injector
|
path: ./infrastructure/vault-injector
|
||||||
|
|||||||
@ -2,4 +2,8 @@ FROM python:3.11-slim
|
|||||||
|
|
||||||
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
|
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
|
||||||
|
|
||||||
RUN pip install --no-cache-dir requests psycopg2-binary
|
RUN pip install --no-cache-dir requests psycopg2-binary \
|
||||||
|
&& groupadd --system guest-tools \
|
||||||
|
&& useradd --system --uid 65532 --gid guest-tools --home-dir /nonexistent --shell /usr/sbin/nologin guest-tools
|
||||||
|
|
||||||
|
USER guest-tools
|
||||||
|
|||||||
@ -1,16 +1,8 @@
|
|||||||
FROM --platform=$BUILDPLATFORM opensearchproject/data-prepper:2.8.0 AS source
|
# Use the mirrored Harbor artifact so CI does not depend on Docker Hub egress.
|
||||||
|
FROM registry.bstein.dev/streaming/data-prepper@sha256:32ac6ad42e0f12da08bebee307e290b17d127b30def9b06eeaffbcbbc5033e83
|
||||||
FROM --platform=$TARGETPLATFORM eclipse-temurin:17-jre
|
|
||||||
|
|
||||||
ENV DATA_PREPPER_PATH=/usr/share/data-prepper
|
ENV DATA_PREPPER_PATH=/usr/share/data-prepper
|
||||||
|
|
||||||
RUN useradd -u 10001 -M -U -d / -s /usr/sbin/nologin data_prepper \
|
|
||||||
&& mkdir -p /var/log/data-prepper
|
|
||||||
|
|
||||||
COPY --from=source /usr/share/data-prepper /usr/share/data-prepper
|
|
||||||
|
|
||||||
RUN chown -R 10001:10001 /usr/share/data-prepper /var/log/data-prepper
|
|
||||||
|
|
||||||
USER 10001
|
USER 10001
|
||||||
WORKDIR /usr/share/data-prepper
|
WORKDIR /usr/share/data-prepper
|
||||||
CMD ["bin/data-prepper"]
|
CMD ["bin/data-prepper"]
|
||||||
|
|||||||
@ -1,10 +1,13 @@
|
|||||||
FROM ghcr.io/element-hq/lk-jwt-service:0.3.0 AS base
|
FROM ghcr.io/element-hq/lk-jwt-service:0.3.0 AS base
|
||||||
|
|
||||||
FROM alpine:3.20
|
FROM alpine:3.20
|
||||||
RUN apk add --no-cache ca-certificates
|
RUN apk add --no-cache ca-certificates \
|
||||||
|
&& addgroup -S livekit-token \
|
||||||
|
&& adduser -S -D -H -u 65532 -G livekit-token livekit-token
|
||||||
COPY --from=base /lk-jwt-service /lk-jwt-service
|
COPY --from=base /lk-jwt-service /lk-jwt-service
|
||||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
||||||
RUN chmod 0755 /entrypoint.sh
|
RUN chmod 0755 /entrypoint.sh
|
||||||
|
|
||||||
|
USER livekit-token
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
ENTRYPOINT ["/entrypoint.sh"]
|
||||||
CMD ["/lk-jwt-service"]
|
CMD ["/lk-jwt-service"]
|
||||||
|
|||||||
@ -29,10 +29,12 @@ FROM ${DEBIAN_IMAGE}
|
|||||||
RUN set -eux; \
|
RUN set -eux; \
|
||||||
apt-get update; \
|
apt-get update; \
|
||||||
apt-get install -y --no-install-recommends ca-certificates; \
|
apt-get install -y --no-install-recommends ca-certificates; \
|
||||||
update-ca-certificates; rm -rf /var/lib/apt/lists/*
|
update-ca-certificates; rm -rf /var/lib/apt/lists/*; \
|
||||||
|
groupadd --system p2pool; \
|
||||||
|
useradd --system --uid 65532 --gid p2pool --home-dir /nonexistent --shell /usr/sbin/nologin p2pool
|
||||||
COPY --from=fetch /out/p2pool /usr/local/bin/p2pool
|
COPY --from=fetch /out/p2pool /usr/local/bin/p2pool
|
||||||
|
|
||||||
RUN /usr/local/bin/p2pool --version || true
|
RUN /usr/local/bin/p2pool --version || true
|
||||||
EXPOSE 3333
|
EXPOSE 3333
|
||||||
|
USER p2pool
|
||||||
ENTRYPOINT ["/usr/local/bin/p2pool"]
|
ENTRYPOINT ["/usr/local/bin/p2pool"]
|
||||||
|
|
||||||
|
|||||||
@ -26,9 +26,12 @@ RUN set -eux; \
|
|||||||
curl -fsSL "$URL" -o /opt/monero/monero.tar.bz2; \
|
curl -fsSL "$URL" -o /opt/monero/monero.tar.bz2; \
|
||||||
tar -xjf /opt/monero/monero.tar.bz2 -C /opt/monero --strip-components=1; \
|
tar -xjf /opt/monero/monero.tar.bz2 -C /opt/monero --strip-components=1; \
|
||||||
install -m 0755 /opt/monero/monero-wallet-rpc /usr/local/bin/monero-wallet-rpc; \
|
install -m 0755 /opt/monero/monero-wallet-rpc /usr/local/bin/monero-wallet-rpc; \
|
||||||
rm -f /opt/monero/monero.tar.bz2
|
rm -f /opt/monero/monero.tar.bz2; \
|
||||||
|
groupadd --system monero; \
|
||||||
|
useradd --system --uid 1000 --gid monero --home-dir /nonexistent --shell /usr/sbin/nologin monero
|
||||||
|
|
||||||
ENV PATH="/usr/local/bin:/usr/bin:/bin"
|
ENV PATH="/usr/local/bin:/usr/bin:/bin"
|
||||||
RUN /usr/local/bin/monero-wallet-rpc --version || true
|
RUN /usr/local/bin/monero-wallet-rpc --version || true
|
||||||
|
|
||||||
EXPOSE 18083
|
EXPOSE 18083
|
||||||
|
USER monero
|
||||||
|
|||||||
@ -23,10 +23,14 @@ RUN set -eux; \
|
|||||||
mkdir -p /opt/monero; \
|
mkdir -p /opt/monero; \
|
||||||
tar -xjf /tmp/monero.tar.bz2 -C /opt/monero --strip-components=1; \
|
tar -xjf /tmp/monero.tar.bz2 -C /opt/monero --strip-components=1; \
|
||||||
rm -f /tmp/monero.tar.bz2; \
|
rm -f /tmp/monero.tar.bz2; \
|
||||||
|
groupadd --system monero; \
|
||||||
|
useradd --system --uid 1000 --gid monero --home-dir /nonexistent --shell /usr/sbin/nologin monero; \
|
||||||
mkdir -p /data; \
|
mkdir -p /data; \
|
||||||
|
chown monero:monero /data; \
|
||||||
chmod 0770 /data
|
chmod 0770 /data
|
||||||
|
|
||||||
ENV LD_LIBRARY_PATH=/opt/monero:/opt/monero/lib \
|
ENV LD_LIBRARY_PATH=/opt/monero:/opt/monero/lib \
|
||||||
PATH="/opt/monero:${PATH}"
|
PATH="/opt/monero:${PATH}"
|
||||||
|
|
||||||
|
USER monero
|
||||||
CMD ["/opt/monero/monerod", "--version"]
|
CMD ["/opt/monero/monerod", "--version"]
|
||||||
|
|||||||
@ -1,10 +1,13 @@
|
|||||||
FROM quay.io/oauth2-proxy/oauth2-proxy:v7.6.0 AS base
|
FROM quay.io/oauth2-proxy/oauth2-proxy:v7.6.0 AS base
|
||||||
|
|
||||||
FROM alpine:3.20
|
FROM alpine:3.20
|
||||||
RUN apk add --no-cache ca-certificates
|
RUN apk add --no-cache ca-certificates \
|
||||||
|
&& addgroup -S oauth2-proxy \
|
||||||
|
&& adduser -S -D -H -u 65532 -G oauth2-proxy oauth2-proxy
|
||||||
COPY --from=base /bin/oauth2-proxy /bin/oauth2-proxy
|
COPY --from=base /bin/oauth2-proxy /bin/oauth2-proxy
|
||||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
||||||
RUN chmod 0755 /entrypoint.sh
|
RUN chmod 0755 /entrypoint.sh
|
||||||
|
|
||||||
|
USER oauth2-proxy
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
ENTRYPOINT ["/entrypoint.sh"]
|
||||||
CMD ["/bin/oauth2-proxy"]
|
CMD ["/bin/oauth2-proxy"]
|
||||||
|
|||||||
@ -1,10 +1,13 @@
|
|||||||
FROM registry.bstein.dev/streaming/pegasus:1.2.32 AS base
|
FROM registry.bstein.dev/streaming/pegasus:1.2.32 AS base
|
||||||
|
|
||||||
FROM alpine:3.20
|
FROM alpine:3.20
|
||||||
RUN apk add --no-cache ca-certificates
|
RUN apk add --no-cache ca-certificates \
|
||||||
|
&& addgroup -S pegasus \
|
||||||
|
&& adduser -S -D -H -u 65532 -G pegasus pegasus
|
||||||
COPY --from=base /pegasus /pegasus
|
COPY --from=base /pegasus /pegasus
|
||||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
||||||
RUN chmod 0755 /entrypoint.sh
|
RUN chmod 0755 /entrypoint.sh
|
||||||
|
|
||||||
|
USER pegasus
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
ENTRYPOINT ["/entrypoint.sh"]
|
||||||
CMD ["/pegasus"]
|
CMD ["/pegasus"]
|
||||||
|
|||||||
48
dockerfiles/Dockerfile.quality-tools
Normal file
48
dockerfiles/Dockerfile.quality-tools
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
# dockerfiles/Dockerfile.quality-tools
|
||||||
|
FROM debian:bookworm-slim
|
||||||
|
|
||||||
|
ARG SONAR_SCANNER_VERSION=8.0.1.6346
|
||||||
|
ARG TRIVY_VERSION=0.70.0
|
||||||
|
ENV TRIVY_CACHE_DIR=/opt/trivy-cache
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y --no-install-recommends \
|
||||||
|
bash \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
git \
|
||||||
|
jq \
|
||||||
|
unzip \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& groupadd --system quality-tools \
|
||||||
|
&& useradd --system --uid 65532 --gid quality-tools --home-dir /nonexistent --shell /usr/sbin/nologin quality-tools
|
||||||
|
|
||||||
|
RUN set -eux; \
|
||||||
|
scanner_zip="sonar-scanner-cli-${SONAR_SCANNER_VERSION}-linux-aarch64.zip"; \
|
||||||
|
base_url="https://binaries.sonarsource.com/Distribution/sonar-scanner-cli"; \
|
||||||
|
curl -fsSL "${base_url}/${scanner_zip}" -o "/tmp/${scanner_zip}"; \
|
||||||
|
curl -fsSL "${base_url}/${scanner_zip}.sha256" -o "/tmp/${scanner_zip}.sha256"; \
|
||||||
|
printf '%s %s\n' "$(cat "/tmp/${scanner_zip}.sha256")" "/tmp/${scanner_zip}" | sha256sum -c -; \
|
||||||
|
unzip -q "/tmp/${scanner_zip}" -d /opt; \
|
||||||
|
ln -s "/opt/sonar-scanner-${SONAR_SCANNER_VERSION}-linux-aarch64/bin/sonar-scanner" /usr/local/bin/sonar-scanner; \
|
||||||
|
rm -f "/tmp/${scanner_zip}" "/tmp/${scanner_zip}.sha256"
|
||||||
|
|
||||||
|
RUN set -eux; \
|
||||||
|
trivy_tgz="trivy_${TRIVY_VERSION}_Linux-ARM64.tar.gz"; \
|
||||||
|
curl -fsSL "https://github.com/aquasecurity/trivy/releases/download/v${TRIVY_VERSION}/${trivy_tgz}" -o "/tmp/${trivy_tgz}"; \
|
||||||
|
tar -C /usr/local/bin -xzf "/tmp/${trivy_tgz}" trivy; \
|
||||||
|
rm -f "/tmp/${trivy_tgz}"; \
|
||||||
|
trivy --version; \
|
||||||
|
sonar-scanner -v
|
||||||
|
|
||||||
|
RUN set -eux; \
|
||||||
|
mkdir -p "${TRIVY_CACHE_DIR}"; \
|
||||||
|
trivy image --download-db-only --cache-dir "${TRIVY_CACHE_DIR}"; \
|
||||||
|
chmod -R a+rX "${TRIVY_CACHE_DIR}"; \
|
||||||
|
mkdir -p /workspace; \
|
||||||
|
chown quality-tools:quality-tools /workspace
|
||||||
|
|
||||||
|
WORKDIR /workspace
|
||||||
|
USER quality-tools
|
||||||
@ -33,6 +33,36 @@ spec:
|
|||||||
node-role.kubernetes.io/worker: "true"
|
node-role.kubernetes.io/worker: "true"
|
||||||
affinity:
|
affinity:
|
||||||
nodeAffinity:
|
nodeAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: atlas.bstein.dev/spillover
|
||||||
|
operator: DoesNotExist
|
||||||
|
- weight: 95
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-13
|
||||||
|
- titan-15
|
||||||
|
- titan-17
|
||||||
|
- titan-19
|
||||||
|
- weight: 90
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: hardware
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rpi5
|
||||||
|
- weight: 50
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: hardware
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rpi4
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
nodeSelectorTerms:
|
nodeSelectorTerms:
|
||||||
- matchExpressions:
|
- matchExpressions:
|
||||||
@ -46,6 +76,36 @@ spec:
|
|||||||
node-role.kubernetes.io/worker: "true"
|
node-role.kubernetes.io/worker: "true"
|
||||||
affinity:
|
affinity:
|
||||||
nodeAffinity:
|
nodeAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: atlas.bstein.dev/spillover
|
||||||
|
operator: DoesNotExist
|
||||||
|
- weight: 95
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-13
|
||||||
|
- titan-15
|
||||||
|
- titan-17
|
||||||
|
- titan-19
|
||||||
|
- weight: 90
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: hardware
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rpi5
|
||||||
|
- weight: 50
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: hardware
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rpi4
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
nodeSelectorTerms:
|
nodeSelectorTerms:
|
||||||
- matchExpressions:
|
- matchExpressions:
|
||||||
@ -59,6 +119,36 @@ spec:
|
|||||||
node-role.kubernetes.io/worker: "true"
|
node-role.kubernetes.io/worker: "true"
|
||||||
affinity:
|
affinity:
|
||||||
nodeAffinity:
|
nodeAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: atlas.bstein.dev/spillover
|
||||||
|
operator: DoesNotExist
|
||||||
|
- weight: 95
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-13
|
||||||
|
- titan-15
|
||||||
|
- titan-17
|
||||||
|
- titan-19
|
||||||
|
- weight: 90
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: hardware
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rpi5
|
||||||
|
- weight: 50
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: hardware
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rpi4
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
nodeSelectorTerms:
|
nodeSelectorTerms:
|
||||||
- matchExpressions:
|
- matchExpressions:
|
||||||
|
|||||||
@ -4,6 +4,9 @@ kind: Kustomization
|
|||||||
resources:
|
resources:
|
||||||
- ../modules/base
|
- ../modules/base
|
||||||
- ../modules/profiles/atlas-ha
|
- ../modules/profiles/atlas-ha
|
||||||
|
- node-prefer-noschedule-serviceaccount.yaml
|
||||||
|
- node-prefer-noschedule-rbac.yaml
|
||||||
|
- node-prefer-noschedule-cronjob.yaml
|
||||||
- coredns-custom.yaml
|
- coredns-custom.yaml
|
||||||
- coredns-deployment.yaml
|
- coredns-deployment.yaml
|
||||||
- ntp-sync-daemonset.yaml
|
- ntp-sync-daemonset.yaml
|
||||||
|
|||||||
35
infrastructure/core/node-prefer-noschedule-cronjob.yaml
Normal file
35
infrastructure/core/node-prefer-noschedule-cronjob.yaml
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
# infrastructure/core/node-prefer-noschedule-cronjob.yaml
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: node-prefer-noschedule
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
schedule: "*/20 * * * *"
|
||||||
|
concurrencyPolicy: Forbid
|
||||||
|
successfulJobsHistoryLimit: 1
|
||||||
|
failedJobsHistoryLimit: 3
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
backoffLimit: 1
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
serviceAccountName: node-prefer-noschedule
|
||||||
|
restartPolicy: OnFailure
|
||||||
|
containers:
|
||||||
|
- name: taint
|
||||||
|
image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
|
||||||
|
command:
|
||||||
|
- /usr/bin/env
|
||||||
|
- bash
|
||||||
|
- -ceu
|
||||||
|
- |
|
||||||
|
for node in titan-13 titan-15 titan-17 titan-19; do
|
||||||
|
if kubectl get node "${node}" >/dev/null 2>&1; then
|
||||||
|
kubectl label node "${node}" atlas.bstein.dev/spillover=true --overwrite=true
|
||||||
|
kubectl taint node "${node}" longhorn=true:PreferNoSchedule --overwrite=true
|
||||||
|
kubectl taint node "${node}" atlas.bstein.dev/spillover=true:PreferNoSchedule --overwrite=true
|
||||||
|
else
|
||||||
|
echo "skipping missing node ${node}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
22
infrastructure/core/node-prefer-noschedule-rbac.yaml
Normal file
22
infrastructure/core/node-prefer-noschedule-rbac.yaml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
# infrastructure/core/node-prefer-noschedule-rbac.yaml
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: node-prefer-noschedule
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "list", "patch"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: node-prefer-noschedule
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: node-prefer-noschedule
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: node-prefer-noschedule
|
||||||
|
namespace: kube-system
|
||||||
@ -0,0 +1,6 @@
|
|||||||
|
# infrastructure/core/node-prefer-noschedule-serviceaccount.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: node-prefer-noschedule
|
||||||
|
namespace: kube-system
|
||||||
@ -26,6 +26,9 @@ spec:
|
|||||||
cleanupOnFail: true
|
cleanupOnFail: true
|
||||||
timeout: 15m
|
timeout: 15m
|
||||||
values:
|
values:
|
||||||
|
global:
|
||||||
|
nodeSelector:
|
||||||
|
longhorn-host: "true"
|
||||||
service:
|
service:
|
||||||
ui:
|
ui:
|
||||||
type: NodePort
|
type: NodePort
|
||||||
@ -78,3 +81,12 @@ spec:
|
|||||||
tag: v2.16.0
|
tag: v2.16.0
|
||||||
defaultSettings:
|
defaultSettings:
|
||||||
systemManagedPodsImagePullPolicy: Always
|
systemManagedPodsImagePullPolicy: Always
|
||||||
|
longhornManager:
|
||||||
|
nodeSelector:
|
||||||
|
longhorn-host: "true"
|
||||||
|
longhornDriver:
|
||||||
|
nodeSelector:
|
||||||
|
longhorn-host: "true"
|
||||||
|
longhornUI:
|
||||||
|
nodeSelector:
|
||||||
|
longhorn-host: "true"
|
||||||
|
|||||||
@ -2,10 +2,11 @@
|
|||||||
apiVersion: batch/v1
|
apiVersion: batch/v1
|
||||||
kind: Job
|
kind: Job
|
||||||
metadata:
|
metadata:
|
||||||
name: longhorn-settings-ensure-4
|
name: longhorn-settings-ensure-7
|
||||||
namespace: longhorn-system
|
namespace: longhorn-system
|
||||||
spec:
|
spec:
|
||||||
backoffLimit: 0
|
backoffLimit: 0
|
||||||
|
activeDeadlineSeconds: 240
|
||||||
ttlSecondsAfterFinished: 3600
|
ttlSecondsAfterFinished: 3600
|
||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
|
|||||||
@ -4,11 +4,12 @@ set -eu
|
|||||||
# Longhorn blocks direct CR patches for some settings; use the internal API instead.
|
# Longhorn blocks direct CR patches for some settings; use the internal API instead.
|
||||||
|
|
||||||
api_base="http://longhorn-backend.longhorn-system.svc:9500/v1/settings"
|
api_base="http://longhorn-backend.longhorn-system.svc:9500/v1/settings"
|
||||||
|
curl_opts="-fsS --connect-timeout 3 --max-time 15"
|
||||||
|
|
||||||
wait_for_api() {
|
wait_for_api() {
|
||||||
attempts=30
|
attempts=30
|
||||||
while [ "${attempts}" -gt 0 ]; do
|
while [ "${attempts}" -gt 0 ]; do
|
||||||
if curl -fsS "${api_base}" >/dev/null 2>&1; then
|
if curl ${curl_opts} "${api_base}" >/dev/null 2>&1; then
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
attempts=$((attempts - 1))
|
attempts=$((attempts - 1))
|
||||||
@ -22,14 +23,14 @@ update_setting() {
|
|||||||
name="$1"
|
name="$1"
|
||||||
value="$2"
|
value="$2"
|
||||||
|
|
||||||
current="$(curl -fsS "${api_base}/${name}" || true)"
|
current="$(curl ${curl_opts} "${api_base}/${name}" || true)"
|
||||||
if echo "${current}" | grep -Fq "\"value\":\"${value}\""; then
|
if echo "${current}" | grep -Fq "\"value\":\"${value}\""; then
|
||||||
echo "Setting ${name} already set."
|
echo "Setting ${name} already set."
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Setting ${name} -> ${value}"
|
echo "Setting ${name} -> ${value}"
|
||||||
curl -fsS -X PUT \
|
curl ${curl_opts} -X PUT \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
-d "{\"value\":\"${value}\"}" \
|
-d "{\"value\":\"${value}\"}" \
|
||||||
"${api_base}/${name}" >/dev/null
|
"${api_base}/${name}" >/dev/null
|
||||||
@ -40,3 +41,7 @@ update_setting default-engine-image "registry.bstein.dev/infra/longhorn-engine:v
|
|||||||
update_setting default-instance-manager-image "registry.bstein.dev/infra/longhorn-instance-manager:v1.8.2"
|
update_setting default-instance-manager-image "registry.bstein.dev/infra/longhorn-instance-manager:v1.8.2"
|
||||||
update_setting default-backing-image-manager-image "registry.bstein.dev/infra/longhorn-backing-image-manager:v1.8.2"
|
update_setting default-backing-image-manager-image "registry.bstein.dev/infra/longhorn-backing-image-manager:v1.8.2"
|
||||||
update_setting support-bundle-manager-image "registry.bstein.dev/infra/longhorn-support-bundle-kit:v0.0.56"
|
update_setting support-bundle-manager-image "registry.bstein.dev/infra/longhorn-support-bundle-kit:v0.0.56"
|
||||||
|
# Keep storage-heavy nodes from getting hammered by rebuild storms and skew.
|
||||||
|
update_setting replica-auto-balance "best-effort"
|
||||||
|
update_setting concurrent-replica-rebuild-per-node-limit "2"
|
||||||
|
update_setting node-down-pod-deletion-policy "delete-both-statefulset-and-deployment-pod"
|
||||||
|
|||||||
@ -26,6 +26,16 @@ spec:
|
|||||||
- key: hardware
|
- key: hardware
|
||||||
operator: In
|
operator: In
|
||||||
values: ["rpi5", "rpi4"]
|
values: ["rpi5", "rpi4"]
|
||||||
|
- weight: 90
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-13
|
||||||
|
- titan-15
|
||||||
|
- titan-17
|
||||||
|
- titan-19
|
||||||
containers:
|
containers:
|
||||||
- name: sync
|
- name: sync
|
||||||
image: alpine:3.20
|
image: alpine:3.20
|
||||||
|
|||||||
@ -25,6 +25,7 @@ spec:
|
|||||||
serviceAccountName: postgres-vault
|
serviceAccountName: postgres-vault
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/worker: "true"
|
node-role.kubernetes.io/worker: "true"
|
||||||
|
hardware: rpi5
|
||||||
affinity:
|
affinity:
|
||||||
nodeAffinity:
|
nodeAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
@ -35,7 +36,17 @@ spec:
|
|||||||
values: ["true"]
|
values: ["true"]
|
||||||
- key: hardware
|
- key: hardware
|
||||||
operator: In
|
operator: In
|
||||||
values: ["rpi4", "rpi5"]
|
values: ["rpi5"]
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values: ["titan-06"]
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: In
|
||||||
|
values: ["titan-05", "titan-07", "titan-08", "titan-11"]
|
||||||
containers:
|
containers:
|
||||||
- name: postgres
|
- name: postgres
|
||||||
image: postgres:15
|
image: postgres:15
|
||||||
|
|||||||
@ -70,6 +70,38 @@ items:
|
|||||||
dnsPolicy: ClusterFirst
|
dnsPolicy: ClusterFirst
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/worker: "true"
|
node-role.kubernetes.io/worker: "true"
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: atlas.bstein.dev/spillover
|
||||||
|
operator: DoesNotExist
|
||||||
|
- weight: 95
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-13
|
||||||
|
- titan-15
|
||||||
|
- titan-17
|
||||||
|
- titan-19
|
||||||
|
- weight: 90
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: hardware
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rpi5
|
||||||
|
- weight: 50
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: hardware
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rpi4
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
schedulerName: default-scheduler
|
schedulerName: default-scheduler
|
||||||
serviceAccount: atlas-traefik-ingress-controller
|
serviceAccount: atlas-traefik-ingress-controller
|
||||||
|
|||||||
@ -41,3 +41,12 @@ spec:
|
|||||||
failurePolicy: Ignore
|
failurePolicy: Ignore
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/worker: "true"
|
node-role.kubernetes.io/worker: "true"
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values: ["titan-13", "titan-15", "titan-17", "titan-19"]
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -4,13 +4,21 @@ import pathlib
|
|||||||
|
|
||||||
def load_module():
|
def load_module():
|
||||||
path = pathlib.Path(__file__).resolve().parents[1] / "dashboards_render_atlas.py"
|
path = pathlib.Path(__file__).resolve().parents[1] / "dashboards_render_atlas.py"
|
||||||
spec = importlib.util.spec_from_file_location("dashboards_render_atlas", path)
|
spec = importlib.util.spec_from_file_location("scripts.dashboards_render_atlas", path)
|
||||||
module = importlib.util.module_from_spec(spec)
|
module = importlib.util.module_from_spec(spec)
|
||||||
assert spec.loader is not None
|
assert spec.loader is not None
|
||||||
spec.loader.exec_module(module)
|
spec.loader.exec_module(module)
|
||||||
return module
|
return module
|
||||||
|
|
||||||
|
|
||||||
|
def flatten_panels(panels):
|
||||||
|
flat = []
|
||||||
|
for panel in panels:
|
||||||
|
flat.append(panel)
|
||||||
|
flat.extend(panel.get("panels", []))
|
||||||
|
return flat
|
||||||
|
|
||||||
|
|
||||||
def test_table_panel_options_and_filterable():
|
def test_table_panel_options_and_filterable():
|
||||||
mod = load_module()
|
mod = load_module()
|
||||||
panel = mod.table_panel(
|
panel = mod.table_panel(
|
||||||
@ -42,6 +50,18 @@ def test_node_filter_and_expr_helpers():
|
|||||||
assert "node_memory_MemAvailable_bytes" in mem_expr
|
assert "node_memory_MemAvailable_bytes" in mem_expr
|
||||||
|
|
||||||
|
|
||||||
|
def test_overview_availability_panel_uses_recorded_365d_rollup():
|
||||||
|
mod = load_module()
|
||||||
|
dashboard = mod.build_overview()
|
||||||
|
panel = next(panel for panel in flatten_panels(dashboard["panels"]) if panel["id"] == 27)
|
||||||
|
|
||||||
|
assert panel["title"] == "Atlas Availability (365d)"
|
||||||
|
assert panel["targets"][0]["expr"] == 'last_over_time(atlas:availability:ratio_365d{scope="atlas"}[30m])'
|
||||||
|
assert panel["targets"][0]["instant"] is True
|
||||||
|
assert "precomputed" in panel["description"]
|
||||||
|
assert "scrape gaps are ignored" in panel["description"]
|
||||||
|
|
||||||
|
|
||||||
def test_render_configmap_writes(tmp_path):
|
def test_render_configmap_writes(tmp_path):
|
||||||
mod = load_module()
|
mod = load_module()
|
||||||
mod.DASHBOARD_DIR = tmp_path / "dash"
|
mod.DASHBOARD_DIR = tmp_path / "dash"
|
||||||
@ -56,3 +76,98 @@ def test_render_configmap_writes(tmp_path):
|
|||||||
content = (tmp_path / "cm.yaml").read_text()
|
content = (tmp_path / "cm.yaml").read_text()
|
||||||
assert "kind: ConfigMap" in content
|
assert "kind: ConfigMap" in content
|
||||||
assert f"{uid}.json" in content
|
assert f"{uid}.json" in content
|
||||||
|
|
||||||
|
|
||||||
|
def test_testing_suite_variable_uses_canonical_values_only():
|
||||||
|
mod = load_module()
|
||||||
|
variable = mod.testing_suite_variable()
|
||||||
|
canonical_matcher = "|".join(mod.PLATFORM_TEST_SUITE_NAMES)
|
||||||
|
legacy_names = {"bstein-home", "data-prepper", "titan-iac", "pegasus-health"}
|
||||||
|
|
||||||
|
assert variable["allValue"] == canonical_matcher
|
||||||
|
assert not any(alias in variable["query"] for alias in legacy_names)
|
||||||
|
assert not any(alias in variable["allValue"] for alias in legacy_names)
|
||||||
|
assert [option["value"] for option in variable["options"]] == mod.PLATFORM_TEST_SUITE_NAMES
|
||||||
|
|
||||||
|
|
||||||
|
def test_jobs_dashboard_separates_current_gate_health_from_reliability():
|
||||||
|
mod = load_module()
|
||||||
|
dashboard = mod.build_jobs_dashboard()
|
||||||
|
panels_by_title = {panel["title"]: panel for panel in flatten_panels(dashboard["panels"])}
|
||||||
|
|
||||||
|
assert "Current Gate Health by Suite" in panels_by_title
|
||||||
|
assert "Run Reliability by Suite (24h)" in panels_by_title
|
||||||
|
assert "Run Reliability History by Suite" in panels_by_title
|
||||||
|
assert "Failures by Suite (24h)" not in panels_by_title
|
||||||
|
assert "Success Rate by Suite (24h)" not in panels_by_title
|
||||||
|
|
||||||
|
current_gate_expr = panels_by_title["Current Gate Health by Suite"]["targets"][0]["expr"]
|
||||||
|
assert 'check)' in current_gate_expr
|
||||||
|
assert 'result=~"ok|passed|success|not_applicable|skipped|na|n/a"' in current_gate_expr
|
||||||
|
|
||||||
|
reliability_panel = panels_by_title["Run Reliability by Suite (24h)"]
|
||||||
|
reliability_expr = reliability_panel["targets"][0]["expr"]
|
||||||
|
assert "platform_quality_gate_runs_total" in reliability_expr
|
||||||
|
assert "> 0" in reliability_expr
|
||||||
|
assert "- 1" in reliability_expr
|
||||||
|
assert reliability_panel["fieldConfig"]["defaults"]["mappings"] == [
|
||||||
|
{"type": "value", "options": {"-1": {"text": "no runs"}}}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def test_jobs_dashboard_bar_gauges_use_solid_threshold_colors():
|
||||||
|
mod = load_module()
|
||||||
|
dashboard = mod.build_jobs_dashboard()
|
||||||
|
panels = flatten_panels(dashboard["panels"])
|
||||||
|
bar_gauges = [panel for panel in panels if panel["type"] == "bargauge"]
|
||||||
|
|
||||||
|
assert bar_gauges
|
||||||
|
assert all(panel["options"]["displayMode"] == "basic" for panel in bar_gauges)
|
||||||
|
assert all(
|
||||||
|
panel["fieldConfig"]["defaults"]["color"]["mode"] == "thresholds"
|
||||||
|
for panel in bar_gauges
|
||||||
|
)
|
||||||
|
|
||||||
|
reliability_panel = next(
|
||||||
|
panel for panel in panels if panel["title"] == "Run Reliability by Suite (24h)"
|
||||||
|
)
|
||||||
|
threshold_steps = reliability_panel["fieldConfig"]["defaults"]["thresholds"]["steps"]
|
||||||
|
|
||||||
|
assert {"color": "dark-yellow", "value": 93} in threshold_steps
|
||||||
|
assert {"color": "dark-blue", "value": 100} in threshold_steps
|
||||||
|
|
||||||
|
|
||||||
|
def test_jobs_dashboard_collapses_heavy_drilldowns_for_light_first_paint():
|
||||||
|
mod = load_module()
|
||||||
|
dashboard = mod.build_jobs_dashboard()
|
||||||
|
panels = dashboard["panels"]
|
||||||
|
rows = [panel for panel in panels if panel["type"] == "row"]
|
||||||
|
visible_query_panels = [panel for panel in panels if panel["type"] != "row"]
|
||||||
|
nested_panels_by_title = {
|
||||||
|
child["title"]: child
|
||||||
|
for row in rows
|
||||||
|
for child in row.get("panels", [])
|
||||||
|
}
|
||||||
|
|
||||||
|
assert len(panels) == 16
|
||||||
|
assert len(visible_query_panels) == 10
|
||||||
|
assert sum(len(panel.get("targets", [])) for panel in visible_query_panels) == 10
|
||||||
|
assert all(
|
||||||
|
panel["title"] != "Coverage Gap to 95% by Suite"
|
||||||
|
for panel in visible_query_panels
|
||||||
|
)
|
||||||
|
assert [row["title"] for row in rows] == [
|
||||||
|
"Reliability And Run History",
|
||||||
|
"Failure Trends By Check",
|
||||||
|
"Success Trends By Check",
|
||||||
|
"Test Drilldowns And Problem Tests",
|
||||||
|
"Telemetry Completeness And Branches",
|
||||||
|
"SonarQube Project Health",
|
||||||
|
]
|
||||||
|
assert all(row["collapsed"] for row in rows)
|
||||||
|
|
||||||
|
assert "Failure Trend: Coverage" in nested_panels_by_title
|
||||||
|
assert "Success Trend: Supply Chain" in nested_panels_by_title
|
||||||
|
assert "Selected Test Pass Rate History" in nested_panels_by_title
|
||||||
|
assert "Coverage Metrics Present by Suite" in nested_panels_by_title
|
||||||
|
assert "SonarQube API Up" in nested_panels_by_title
|
||||||
|
|||||||
@ -138,6 +138,100 @@ def test_kc_get_users_paginates(monkeypatch):
|
|||||||
assert sync.SESSION.calls == 1
|
assert sync.SESSION.calls == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_kc_get_users_fetches_second_page_after_full_batch(monkeypatch):
|
||||||
|
sync = load_sync_module(monkeypatch)
|
||||||
|
|
||||||
|
class _PagedSession:
|
||||||
|
def __init__(self):
|
||||||
|
self.calls = 0
|
||||||
|
self.first_params = []
|
||||||
|
|
||||||
|
def get(self, *_, **kwargs):
|
||||||
|
self.calls += 1
|
||||||
|
self.first_params.append(kwargs["params"]["first"])
|
||||||
|
if self.calls == 1:
|
||||||
|
return _FakeResponse([{"id": f"u{i}"} for i in range(200)])
|
||||||
|
return _FakeResponse([{"id": "last"}])
|
||||||
|
|
||||||
|
sync.SESSION = _PagedSession()
|
||||||
|
|
||||||
|
users = sync.kc_get_users("tok")
|
||||||
|
|
||||||
|
assert len(users) == 201
|
||||||
|
assert sync.SESSION.first_params == [0, 200]
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_kc_token_posts_client_credentials(monkeypatch):
|
||||||
|
sync = load_sync_module(monkeypatch)
|
||||||
|
calls = []
|
||||||
|
|
||||||
|
class _TokenSession:
|
||||||
|
def post(self, url, data, timeout):
|
||||||
|
calls.append((url, data, timeout))
|
||||||
|
return _FakeResponse({"access_token": "tok"})
|
||||||
|
|
||||||
|
sync.SESSION = _TokenSession()
|
||||||
|
|
||||||
|
assert sync.get_kc_token() == "tok"
|
||||||
|
assert calls[0][1]["grant_type"] == "client_credentials"
|
||||||
|
|
||||||
|
|
||||||
|
def test_retry_request_retries_then_succeeds(monkeypatch):
|
||||||
|
sync = load_sync_module(monkeypatch)
|
||||||
|
attempts = []
|
||||||
|
sleeps = []
|
||||||
|
|
||||||
|
def _flaky():
|
||||||
|
attempts.append(1)
|
||||||
|
if len(attempts) == 1:
|
||||||
|
raise sync.requests.RequestException("temporary")
|
||||||
|
return "ok"
|
||||||
|
|
||||||
|
monkeypatch.setattr(sync.time, "sleep", lambda seconds: sleeps.append(seconds))
|
||||||
|
|
||||||
|
assert sync.retry_request("request", _flaky, attempts=2) == "ok"
|
||||||
|
assert sleeps == [2]
|
||||||
|
|
||||||
|
|
||||||
|
def test_retry_request_reraises_final_error(monkeypatch):
|
||||||
|
sync = load_sync_module(monkeypatch)
|
||||||
|
monkeypatch.setattr(sync.time, "sleep", lambda seconds: None)
|
||||||
|
|
||||||
|
with pytest.raises(sync.requests.RequestException):
|
||||||
|
sync.retry_request(
|
||||||
|
"request",
|
||||||
|
lambda: (_ for _ in ()).throw(sync.requests.RequestException("nope")),
|
||||||
|
attempts=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_retry_db_connect_retries_then_succeeds(monkeypatch):
|
||||||
|
sync = load_sync_module(monkeypatch)
|
||||||
|
attempts = []
|
||||||
|
sleeps = []
|
||||||
|
|
||||||
|
def _connect(**kwargs):
|
||||||
|
attempts.append(kwargs)
|
||||||
|
if len(attempts) == 1:
|
||||||
|
raise sync.psycopg2.Error("not yet")
|
||||||
|
return "conn"
|
||||||
|
|
||||||
|
monkeypatch.setattr(sync.psycopg2, "connect", _connect)
|
||||||
|
monkeypatch.setattr(sync.time, "sleep", lambda seconds: sleeps.append(seconds))
|
||||||
|
|
||||||
|
assert sync.retry_db_connect(attempts=2) == "conn"
|
||||||
|
assert sleeps == [2]
|
||||||
|
|
||||||
|
|
||||||
|
def test_retry_db_connect_reraises_final_error(monkeypatch):
|
||||||
|
sync = load_sync_module(monkeypatch)
|
||||||
|
monkeypatch.setattr(sync.psycopg2, "connect", lambda **kwargs: (_ for _ in ()).throw(sync.psycopg2.Error("down")))
|
||||||
|
monkeypatch.setattr(sync.time, "sleep", lambda seconds: None)
|
||||||
|
|
||||||
|
with pytest.raises(sync.psycopg2.Error):
|
||||||
|
sync.retry_db_connect(attempts=1)
|
||||||
|
|
||||||
|
|
||||||
def test_ensure_mailu_user_skips_foreign_domain(monkeypatch):
|
def test_ensure_mailu_user_skips_foreign_domain(monkeypatch):
|
||||||
sync = load_sync_module(monkeypatch)
|
sync = load_sync_module(monkeypatch)
|
||||||
executed = []
|
executed = []
|
||||||
@ -166,6 +260,87 @@ def test_ensure_mailu_user_upserts(monkeypatch):
|
|||||||
assert captured["password"] != "pw"
|
assert captured["password"] != "pw"
|
||||||
|
|
||||||
|
|
||||||
|
def test_attribute_and_email_helpers(monkeypatch):
|
||||||
|
sync = load_sync_module(monkeypatch)
|
||||||
|
|
||||||
|
assert sync.get_attribute_value({"x": ["first", "second"]}, "x") == "first"
|
||||||
|
assert sync.get_attribute_value({"x": []}, "x") is None
|
||||||
|
assert sync.get_attribute_value({"x": "value"}, "x") == "value"
|
||||||
|
assert sync.mailu_enabled({"mailu_email": ["legacy@example.com"]}) is True
|
||||||
|
assert sync.mailu_enabled({"mailu_enabled": ["off"]}) is False
|
||||||
|
assert sync.resolve_mailu_email({"username": "fallback", "email": "user@example.com"}, {}) == "user@example.com"
|
||||||
|
assert sync.resolve_mailu_email({"username": "fallback", "email": "user@other.com"}, {}) == "fallback@example.com"
|
||||||
|
|
||||||
|
|
||||||
|
def test_safe_update_payload_filters_fields(monkeypatch):
|
||||||
|
sync = load_sync_module(monkeypatch)
|
||||||
|
|
||||||
|
payload = sync._safe_update_payload(
|
||||||
|
{
|
||||||
|
"username": "user",
|
||||||
|
"enabled": True,
|
||||||
|
"email": "user@example.com",
|
||||||
|
"emailVerified": False,
|
||||||
|
"firstName": "User",
|
||||||
|
"lastName": "Example",
|
||||||
|
"requiredActions": ["UPDATE_PASSWORD", 7],
|
||||||
|
"attributes": "not-a-dict",
|
||||||
|
"ignored": "value",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
assert payload == {
|
||||||
|
"username": "user",
|
||||||
|
"enabled": True,
|
||||||
|
"email": "user@example.com",
|
||||||
|
"emailVerified": False,
|
||||||
|
"firstName": "User",
|
||||||
|
"lastName": "Example",
|
||||||
|
"requiredActions": ["UPDATE_PASSWORD"],
|
||||||
|
"attributes": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def test_ensure_system_mailboxes_handles_configurations(monkeypatch, capsys):
|
||||||
|
sync = load_sync_module(monkeypatch)
|
||||||
|
ensured = []
|
||||||
|
monkeypatch.setattr(sync, "MAILU_SYSTEM_USERS", ["postmaster@example.com", "abuse"])
|
||||||
|
monkeypatch.setattr(sync, "MAILU_SYSTEM_PASSWORD", "")
|
||||||
|
|
||||||
|
sync.ensure_system_mailboxes(object())
|
||||||
|
|
||||||
|
assert "MAILU_SYSTEM_PASSWORD is missing" in capsys.readouterr().out
|
||||||
|
|
||||||
|
def _ensure(cursor, email, password, display_name):
|
||||||
|
ensured.append((email, password, display_name))
|
||||||
|
if email == "abuse":
|
||||||
|
raise RuntimeError("boom")
|
||||||
|
|
||||||
|
monkeypatch.setattr(sync, "MAILU_SYSTEM_PASSWORD", "pw")
|
||||||
|
monkeypatch.setattr(sync, "ensure_mailu_user", _ensure)
|
||||||
|
|
||||||
|
sync.ensure_system_mailboxes(object())
|
||||||
|
|
||||||
|
out = capsys.readouterr().out
|
||||||
|
assert ensured == [
|
||||||
|
("postmaster@example.com", "pw", "postmaster"),
|
||||||
|
("abuse", "pw", "abuse"),
|
||||||
|
]
|
||||||
|
assert "Ensured system mailbox for postmaster@example.com" in out
|
||||||
|
assert "Failed to ensure system mailbox abuse" in out
|
||||||
|
|
||||||
|
|
||||||
|
def test_main_exits_without_users_or_system_mailboxes(monkeypatch, capsys):
|
||||||
|
sync = load_sync_module(monkeypatch)
|
||||||
|
monkeypatch.setattr(sync, "MAILU_SYSTEM_USERS", [])
|
||||||
|
monkeypatch.setattr(sync, "get_kc_token", lambda: "tok")
|
||||||
|
monkeypatch.setattr(sync, "kc_get_users", lambda token: [])
|
||||||
|
|
||||||
|
sync.main()
|
||||||
|
|
||||||
|
assert "No users found; exiting." in capsys.readouterr().out
|
||||||
|
|
||||||
|
|
||||||
def test_main_generates_password_and_upserts(monkeypatch):
|
def test_main_generates_password_and_upserts(monkeypatch):
|
||||||
sync = load_sync_module(monkeypatch)
|
sync = load_sync_module(monkeypatch)
|
||||||
monkeypatch.setattr(sync.bcrypt_sha256, "hash", lambda password: f"hash:{password}")
|
monkeypatch.setattr(sync.bcrypt_sha256, "hash", lambda password: f"hash:{password}")
|
||||||
|
|||||||
134
scripts/tests/test_mailu_sync_listener.py
Normal file
134
scripts/tests/test_mailu_sync_listener.py
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
import importlib.util
|
||||||
|
import io
|
||||||
|
import pathlib
|
||||||
|
import types
|
||||||
|
|
||||||
|
|
||||||
|
def load_listener_module(monkeypatch):
|
||||||
|
monkeypatch.setenv("MAILU_SYNC_WAIT_TIMEOUT_SEC", "0")
|
||||||
|
module_path = (
|
||||||
|
pathlib.Path(__file__).resolve().parents[2]
|
||||||
|
/ "services"
|
||||||
|
/ "mailu"
|
||||||
|
/ "scripts"
|
||||||
|
/ "mailu_sync_listener.py"
|
||||||
|
)
|
||||||
|
spec = importlib.util.spec_from_file_location("mailu_sync_listener_testmod", module_path)
|
||||||
|
module = importlib.util.module_from_spec(spec)
|
||||||
|
assert spec.loader is not None
|
||||||
|
spec.loader.exec_module(module)
|
||||||
|
return module
|
||||||
|
|
||||||
|
|
||||||
|
def _handler_for(listener, body):
|
||||||
|
handler = listener.Handler.__new__(listener.Handler)
|
||||||
|
raw = body if isinstance(body, bytes) else body.encode()
|
||||||
|
handler.headers = {"Content-Length": str(len(raw))}
|
||||||
|
handler.rfile = io.BytesIO(raw)
|
||||||
|
handler.responses = []
|
||||||
|
handler.headers_ended = 0
|
||||||
|
handler.send_response = lambda code: handler.responses.append(code)
|
||||||
|
handler.end_headers = lambda: setattr(handler, "headers_ended", handler.headers_ended + 1)
|
||||||
|
return handler
|
||||||
|
|
||||||
|
|
||||||
|
def test_listener_run_sync_blocking_updates_state(monkeypatch):
|
||||||
|
listener = load_listener_module(monkeypatch)
|
||||||
|
monkeypatch.setattr(listener, "time", lambda: 42.0)
|
||||||
|
monkeypatch.setattr(
|
||||||
|
listener.subprocess,
|
||||||
|
"run",
|
||||||
|
lambda command, check: types.SimpleNamespace(returncode=3),
|
||||||
|
)
|
||||||
|
|
||||||
|
assert listener._run_sync_blocking() == 3
|
||||||
|
assert listener.last_rc == 3
|
||||||
|
assert listener.last_run == 42.0
|
||||||
|
assert listener.sync_done.is_set()
|
||||||
|
|
||||||
|
listener.sync_running = True
|
||||||
|
assert listener._run_sync_blocking() == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_listener_trigger_sync_async_honors_running_and_debounce(monkeypatch):
|
||||||
|
listener = load_listener_module(monkeypatch)
|
||||||
|
starts = []
|
||||||
|
|
||||||
|
class _Thread:
|
||||||
|
def __init__(self, target, daemon):
|
||||||
|
self.target = target
|
||||||
|
self.daemon = daemon
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
starts.append((self.target, self.daemon))
|
||||||
|
|
||||||
|
monkeypatch.setattr(listener.threading, "Thread", _Thread)
|
||||||
|
monkeypatch.setattr(listener, "time", lambda: 100.0)
|
||||||
|
|
||||||
|
listener.sync_running = True
|
||||||
|
assert listener._trigger_sync_async() is False
|
||||||
|
|
||||||
|
listener.sync_running = False
|
||||||
|
listener.last_run = 95.0
|
||||||
|
assert listener._trigger_sync_async() is False
|
||||||
|
|
||||||
|
assert listener._trigger_sync_async(force=True) is True
|
||||||
|
assert starts and starts[0][1] is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_listener_post_rejects_invalid_json(monkeypatch):
|
||||||
|
listener = load_listener_module(monkeypatch)
|
||||||
|
handler = _handler_for(listener, b"{not-json")
|
||||||
|
|
||||||
|
handler.do_POST()
|
||||||
|
|
||||||
|
assert handler.responses == [400]
|
||||||
|
assert handler.headers_ended == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_listener_post_triggers_async_without_wait(monkeypatch):
|
||||||
|
listener = load_listener_module(monkeypatch)
|
||||||
|
called = []
|
||||||
|
monkeypatch.setattr(listener, "_trigger_sync_async", lambda force=False: called.append(force) or True)
|
||||||
|
handler = _handler_for(listener, '{"force": true}')
|
||||||
|
|
||||||
|
handler.do_POST()
|
||||||
|
|
||||||
|
assert called == [True]
|
||||||
|
assert handler.responses == [202]
|
||||||
|
|
||||||
|
|
||||||
|
def test_listener_post_wait_returns_success_or_failure(monkeypatch):
|
||||||
|
listener = load_listener_module(monkeypatch)
|
||||||
|
called = []
|
||||||
|
monkeypatch.setattr(listener, "_trigger_sync_async", lambda force=False: called.append(force) or True)
|
||||||
|
listener.sync_running = False
|
||||||
|
listener.last_rc = 0
|
||||||
|
handler = _handler_for(listener, '{"wait": true, "force": true}')
|
||||||
|
|
||||||
|
handler.do_POST()
|
||||||
|
|
||||||
|
assert called == [True]
|
||||||
|
assert handler.responses == [200]
|
||||||
|
|
||||||
|
listener.last_rc = 2
|
||||||
|
handler = _handler_for(listener, '{"wait": true}')
|
||||||
|
handler.do_POST()
|
||||||
|
assert handler.responses == [500]
|
||||||
|
|
||||||
|
|
||||||
|
def test_listener_post_wait_keeps_running_request_successful(monkeypatch):
|
||||||
|
listener = load_listener_module(monkeypatch)
|
||||||
|
listener.sync_running = True
|
||||||
|
handler = _handler_for(listener, '{"wait": true}')
|
||||||
|
|
||||||
|
handler.do_POST()
|
||||||
|
|
||||||
|
assert handler.responses == [200]
|
||||||
|
|
||||||
|
|
||||||
|
def test_listener_log_message_is_quiet(monkeypatch):
|
||||||
|
listener = load_listener_module(monkeypatch)
|
||||||
|
handler = listener.Handler.__new__(listener.Handler)
|
||||||
|
|
||||||
|
assert handler.log_message("ignored %s", "value") is None
|
||||||
@ -5,7 +5,7 @@ metadata:
|
|||||||
name: ollama
|
name: ollama
|
||||||
namespace: ai
|
namespace: ai
|
||||||
spec:
|
spec:
|
||||||
replicas: 1
|
replicas: 0
|
||||||
revisionHistoryLimit: 2
|
revisionHistoryLimit: 2
|
||||||
strategy:
|
strategy:
|
||||||
type: RollingUpdate
|
type: RollingUpdate
|
||||||
@ -21,7 +21,7 @@ spec:
|
|||||||
app: ollama
|
app: ollama
|
||||||
annotations:
|
annotations:
|
||||||
ai.bstein.dev/model: qwen2.5:14b-instruct-q4_0
|
ai.bstein.dev/model: qwen2.5:14b-instruct-q4_0
|
||||||
ai.bstein.dev/gpu: GPU pool (titan-22/24)
|
ai.bstein.dev/gpu: GPU pool (titan-20/21)
|
||||||
ai.bstein.dev/restartedAt: "2026-01-26T12:00:00Z"
|
ai.bstein.dev/restartedAt: "2026-01-26T12:00:00Z"
|
||||||
spec:
|
spec:
|
||||||
affinity:
|
affinity:
|
||||||
@ -32,13 +32,13 @@ spec:
|
|||||||
- key: kubernetes.io/hostname
|
- key: kubernetes.io/hostname
|
||||||
operator: In
|
operator: In
|
||||||
values:
|
values:
|
||||||
- titan-22
|
- titan-20
|
||||||
- titan-24
|
- titan-21
|
||||||
runtimeClassName: nvidia
|
runtimeClassName: nvidia
|
||||||
volumes:
|
volumes:
|
||||||
- name: models
|
- name: models
|
||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
claimName: ollama-models
|
claimName: ollama-models-asteria
|
||||||
initContainers:
|
initContainers:
|
||||||
- name: warm-model
|
- name: warm-model
|
||||||
image: ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d
|
image: ollama/ollama@sha256:2c9595c555fd70a28363489ac03bd5bf9e7c5bdf2890373c3a830ffd7252ce6d
|
||||||
|
|||||||
@ -2,12 +2,12 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: PersistentVolumeClaim
|
kind: PersistentVolumeClaim
|
||||||
metadata:
|
metadata:
|
||||||
name: ollama-models
|
name: ollama-models-asteria
|
||||||
namespace: ai
|
namespace: ai
|
||||||
spec:
|
spec:
|
||||||
accessModes:
|
accessModes:
|
||||||
- ReadWriteOnce
|
- ReadWriteMany
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
storage: 30Gi
|
storage: 30Gi
|
||||||
storageClassName: astreae
|
storageClassName: asteria
|
||||||
|
|||||||
@ -49,6 +49,15 @@ spec:
|
|||||||
nodeSelector:
|
nodeSelector:
|
||||||
kubernetes.io/arch: arm64
|
kubernetes.io/arch: arm64
|
||||||
node-role.kubernetes.io/worker: "true"
|
node-role.kubernetes.io/worker: "true"
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values: ["titan-13", "titan-15", "titan-17", "titan-19"]
|
||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
- name: harbor-regcred
|
- name: harbor-regcred
|
||||||
containers:
|
containers:
|
||||||
|
|||||||
@ -38,6 +38,36 @@ spec:
|
|||||||
nodeSelector:
|
nodeSelector:
|
||||||
kubernetes.io/arch: arm64
|
kubernetes.io/arch: arm64
|
||||||
node-role.kubernetes.io/worker: "true"
|
node-role.kubernetes.io/worker: "true"
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: atlas.bstein.dev/spillover
|
||||||
|
operator: DoesNotExist
|
||||||
|
- weight: 95
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-13
|
||||||
|
- titan-15
|
||||||
|
- titan-17
|
||||||
|
- titan-19
|
||||||
|
- weight: 90
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: hardware
|
||||||
|
operator: In
|
||||||
|
values: ["rpi5"]
|
||||||
|
- weight: 50
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: hardware
|
||||||
|
operator: In
|
||||||
|
values: ["rpi4"]
|
||||||
containers:
|
containers:
|
||||||
- name: gateway
|
- name: gateway
|
||||||
image: python:3.11-slim
|
image: python:3.11-slim
|
||||||
|
|||||||
@ -26,7 +26,7 @@ spec:
|
|||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
ports:
|
ports:
|
||||||
- name: http
|
- name: http
|
||||||
containerPort: 80
|
containerPort: 8080
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
path: /
|
path: /
|
||||||
|
|||||||
@ -10,4 +10,4 @@ spec:
|
|||||||
ports:
|
ports:
|
||||||
- name: http
|
- name: http
|
||||||
port: 80
|
port: 80
|
||||||
targetPort: 80
|
targetPort: 8080
|
||||||
|
|||||||
@ -20,9 +20,9 @@ resources:
|
|||||||
- ingress.yaml
|
- ingress.yaml
|
||||||
images:
|
images:
|
||||||
- name: registry.bstein.dev/bstein/bstein-dev-home-frontend
|
- name: registry.bstein.dev/bstein/bstein-dev-home-frontend
|
||||||
newTag: 0.1.1-120 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend:tag"}
|
newTag: 0.1.1-280 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-frontend:tag"}
|
||||||
- name: registry.bstein.dev/bstein/bstein-dev-home-backend
|
- name: registry.bstein.dev/bstein/bstein-dev-home-backend
|
||||||
newTag: 0.1.1-123 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend:tag"}
|
newTag: 0.1.1-280 # {"$imagepolicy": "bstein-dev-home:bstein-dev-home-backend:tag"}
|
||||||
configMapGenerator:
|
configMapGenerator:
|
||||||
- name: chat-ai-gateway
|
- name: chat-ai-gateway
|
||||||
namespace: bstein-dev-home
|
namespace: bstein-dev-home
|
||||||
|
|||||||
@ -53,7 +53,7 @@ spec:
|
|||||||
registry:
|
registry:
|
||||||
existingClaim: harbor-registry
|
existingClaim: harbor-registry
|
||||||
accessMode: ReadWriteOnce
|
accessMode: ReadWriteOnce
|
||||||
size: 50Gi
|
size: 100Gi
|
||||||
jobservice:
|
jobservice:
|
||||||
jobLog:
|
jobLog:
|
||||||
existingClaim: harbor-jobservice-logs
|
existingClaim: harbor-jobservice-logs
|
||||||
@ -77,6 +77,7 @@ spec:
|
|||||||
internal:
|
internal:
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
ananke.bstein.dev/harbor-bootstrap: "true"
|
ananke.bstein.dev/harbor-bootstrap: "true"
|
||||||
|
kubernetes.io/hostname: titan-11
|
||||||
image:
|
image:
|
||||||
repository: registry.bstein.dev/infra/harbor-redis
|
repository: registry.bstein.dev/infra/harbor-redis
|
||||||
tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-redis:tag"}
|
tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-redis:tag"}
|
||||||
@ -113,6 +114,7 @@ spec:
|
|||||||
core:
|
core:
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
ananke.bstein.dev/harbor-bootstrap: "true"
|
ananke.bstein.dev/harbor-bootstrap: "true"
|
||||||
|
kubernetes.io/hostname: titan-11
|
||||||
image:
|
image:
|
||||||
repository: registry.bstein.dev/infra/harbor-core
|
repository: registry.bstein.dev/infra/harbor-core
|
||||||
tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-core:tag"}
|
tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-core:tag"}
|
||||||
@ -125,6 +127,10 @@ spec:
|
|||||||
podAnnotations:
|
podAnnotations:
|
||||||
vault.hashicorp.com/agent-inject: "true"
|
vault.hashicorp.com/agent-inject: "true"
|
||||||
vault.hashicorp.com/role: "harbor"
|
vault.hashicorp.com/role: "harbor"
|
||||||
|
vault.hashicorp.com/agent-requests-cpu: "25m"
|
||||||
|
vault.hashicorp.com/agent-limits-cpu: "100m"
|
||||||
|
vault.hashicorp.com/agent-requests-mem: "32Mi"
|
||||||
|
vault.hashicorp.com/agent-limits-mem: "128Mi"
|
||||||
vault.hashicorp.com/agent-inject-secret-harbor-core-env.sh: "kv/data/atlas/harbor/harbor-core"
|
vault.hashicorp.com/agent-inject-secret-harbor-core-env.sh: "kv/data/atlas/harbor/harbor-core"
|
||||||
vault.hashicorp.com/agent-inject-template-harbor-core-env.sh: |
|
vault.hashicorp.com/agent-inject-template-harbor-core-env.sh: |
|
||||||
{{ with secret "kv/data/atlas/harbor/harbor-core" }}
|
{{ with secret "kv/data/atlas/harbor/harbor-core" }}
|
||||||
@ -174,6 +180,7 @@ spec:
|
|||||||
jobservice:
|
jobservice:
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
ananke.bstein.dev/harbor-bootstrap: "true"
|
ananke.bstein.dev/harbor-bootstrap: "true"
|
||||||
|
kubernetes.io/hostname: titan-11
|
||||||
image:
|
image:
|
||||||
repository: registry.bstein.dev/infra/harbor-jobservice
|
repository: registry.bstein.dev/infra/harbor-jobservice
|
||||||
tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-jobservice:tag"}
|
tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-jobservice:tag"}
|
||||||
@ -183,6 +190,10 @@ spec:
|
|||||||
podAnnotations:
|
podAnnotations:
|
||||||
vault.hashicorp.com/agent-inject: "true"
|
vault.hashicorp.com/agent-inject: "true"
|
||||||
vault.hashicorp.com/role: "harbor"
|
vault.hashicorp.com/role: "harbor"
|
||||||
|
vault.hashicorp.com/agent-requests-cpu: "25m"
|
||||||
|
vault.hashicorp.com/agent-limits-cpu: "100m"
|
||||||
|
vault.hashicorp.com/agent-requests-mem: "32Mi"
|
||||||
|
vault.hashicorp.com/agent-limits-mem: "128Mi"
|
||||||
vault.hashicorp.com/agent-inject-secret-harbor-jobservice-env.sh: "kv/data/atlas/harbor/harbor-jobservice"
|
vault.hashicorp.com/agent-inject-secret-harbor-jobservice-env.sh: "kv/data/atlas/harbor/harbor-jobservice"
|
||||||
vault.hashicorp.com/agent-inject-template-harbor-jobservice-env.sh: |
|
vault.hashicorp.com/agent-inject-template-harbor-jobservice-env.sh: |
|
||||||
{{ with secret "kv/data/atlas/harbor/harbor-core" }}
|
{{ with secret "kv/data/atlas/harbor/harbor-core" }}
|
||||||
@ -216,6 +227,7 @@ spec:
|
|||||||
portal:
|
portal:
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
ananke.bstein.dev/harbor-bootstrap: "true"
|
ananke.bstein.dev/harbor-bootstrap: "true"
|
||||||
|
kubernetes.io/hostname: titan-11
|
||||||
image:
|
image:
|
||||||
repository: registry.bstein.dev/infra/harbor-portal
|
repository: registry.bstein.dev/infra/harbor-portal
|
||||||
tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-portal:tag"}
|
tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-portal:tag"}
|
||||||
@ -243,6 +255,7 @@ spec:
|
|||||||
registry:
|
registry:
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
ananke.bstein.dev/harbor-bootstrap: "true"
|
ananke.bstein.dev/harbor-bootstrap: "true"
|
||||||
|
kubernetes.io/hostname: titan-11
|
||||||
registry:
|
registry:
|
||||||
image:
|
image:
|
||||||
repository: registry.bstein.dev/infra/harbor-registry
|
repository: registry.bstein.dev/infra/harbor-registry
|
||||||
@ -270,6 +283,10 @@ spec:
|
|||||||
podAnnotations:
|
podAnnotations:
|
||||||
vault.hashicorp.com/agent-inject: "true"
|
vault.hashicorp.com/agent-inject: "true"
|
||||||
vault.hashicorp.com/role: "harbor"
|
vault.hashicorp.com/role: "harbor"
|
||||||
|
vault.hashicorp.com/agent-requests-cpu: "25m"
|
||||||
|
vault.hashicorp.com/agent-limits-cpu: "100m"
|
||||||
|
vault.hashicorp.com/agent-requests-mem: "32Mi"
|
||||||
|
vault.hashicorp.com/agent-limits-mem: "128Mi"
|
||||||
vault.hashicorp.com/agent-inject-secret-harbor-registry-env.sh: "kv/data/atlas/harbor/harbor-registry"
|
vault.hashicorp.com/agent-inject-secret-harbor-registry-env.sh: "kv/data/atlas/harbor/harbor-registry"
|
||||||
vault.hashicorp.com/agent-inject-template-harbor-registry-env.sh: |
|
vault.hashicorp.com/agent-inject-template-harbor-registry-env.sh: |
|
||||||
{{ with secret "kv/data/atlas/harbor/harbor-registry" }}
|
{{ with secret "kv/data/atlas/harbor/harbor-registry" }}
|
||||||
@ -321,6 +338,7 @@ spec:
|
|||||||
nginx:
|
nginx:
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
ananke.bstein.dev/harbor-bootstrap: "true"
|
ananke.bstein.dev/harbor-bootstrap: "true"
|
||||||
|
kubernetes.io/hostname: titan-11
|
||||||
image:
|
image:
|
||||||
repository: registry.bstein.dev/infra/harbor-nginx
|
repository: registry.bstein.dev/infra/harbor-nginx
|
||||||
tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-nginx:tag"}
|
tag: v2.14.1-arm64 # {"$imagepolicy": "harbor:harbor-nginx:tag"}
|
||||||
|
|||||||
@ -8,7 +8,7 @@ spec:
|
|||||||
accessModes: [ "ReadWriteOnce" ]
|
accessModes: [ "ReadWriteOnce" ]
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
storage: 50Gi
|
storage: 100Gi
|
||||||
storageClassName: astreae
|
storageClassName: astreae
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
|
|||||||
@ -77,23 +77,26 @@ spec:
|
|||||||
mountPath: /config
|
mountPath: /config
|
||||||
affinity:
|
affinity:
|
||||||
nodeAffinity:
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: longhorn-host
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- "true"
|
||||||
|
- key: node-role.kubernetes.io/worker
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- "true"
|
||||||
preferredDuringSchedulingIgnoredDuringExecution:
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
- weight: 100
|
- weight: 100
|
||||||
preference:
|
preference:
|
||||||
matchExpressions:
|
matchExpressions:
|
||||||
- key: kubernetes.io/hostname
|
- key: hardware
|
||||||
operator: In
|
operator: In
|
||||||
values:
|
values:
|
||||||
- titan-22
|
- rpi5
|
||||||
- weight: 80
|
- weight: 80
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: kubernetes.io/hostname
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- titan-20
|
|
||||||
- titan-21
|
|
||||||
- weight: 60
|
|
||||||
preference:
|
preference:
|
||||||
matchExpressions:
|
matchExpressions:
|
||||||
- key: kubernetes.io/hostname
|
- key: kubernetes.io/hostname
|
||||||
@ -105,7 +108,6 @@ spec:
|
|||||||
fsGroup: 65532
|
fsGroup: 65532
|
||||||
fsGroupChangePolicy: OnRootMismatch
|
fsGroupChangePolicy: OnRootMismatch
|
||||||
runAsGroup: 65532
|
runAsGroup: 65532
|
||||||
runtimeClassName: nvidia
|
|
||||||
containers:
|
containers:
|
||||||
- name: jellyfin
|
- name: jellyfin
|
||||||
image: docker.io/jellyfin/jellyfin:10.11.5
|
image: docker.io/jellyfin/jellyfin:10.11.5
|
||||||
@ -118,8 +120,6 @@ spec:
|
|||||||
- name: http
|
- name: http
|
||||||
containerPort: 8096
|
containerPort: 8096
|
||||||
env:
|
env:
|
||||||
- name: NVIDIA_DRIVER_CAPABILITIES
|
|
||||||
value: "compute,video,utility"
|
|
||||||
- name: JELLYFIN_PublishedServerUrl
|
- name: JELLYFIN_PublishedServerUrl
|
||||||
value: "https://stream.bstein.dev"
|
value: "https://stream.bstein.dev"
|
||||||
- name: PUID
|
- name: PUID
|
||||||
@ -131,12 +131,7 @@ spec:
|
|||||||
- name: VAULT_COPY_FILES
|
- name: VAULT_COPY_FILES
|
||||||
value: /vault/secrets/ldap-config.xml:/config/plugins/configurations/LDAP-Auth.xml
|
value: /vault/secrets/ldap-config.xml:/config/plugins/configurations/LDAP-Auth.xml
|
||||||
resources:
|
resources:
|
||||||
limits:
|
|
||||||
nvidia.com/gpu.shared: 1
|
|
||||||
# cpu: "4"
|
|
||||||
# memory: 8Gi
|
|
||||||
requests:
|
requests:
|
||||||
nvidia.com/gpu.shared: 1
|
|
||||||
cpu: "500m"
|
cpu: "500m"
|
||||||
memory: 1Gi
|
memory: 1Gi
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
|||||||
@ -45,6 +45,17 @@ data:
|
|||||||
username: "${HARBOR_ROBOT_USERNAME}"
|
username: "${HARBOR_ROBOT_USERNAME}"
|
||||||
password: "${HARBOR_ROBOT_PASSWORD}"
|
password: "${HARBOR_ROBOT_PASSWORD}"
|
||||||
description: "Harbor robot for pipelines"
|
description: "Harbor robot for pipelines"
|
||||||
|
- usernamePassword:
|
||||||
|
scope: GLOBAL
|
||||||
|
id: harbor-robot-streaming
|
||||||
|
username: "${HARBOR_STREAMING_ROBOT_USERNAME}"
|
||||||
|
password: "${HARBOR_STREAMING_ROBOT_PASSWORD}"
|
||||||
|
description: "Harbor robot for streaming pushes"
|
||||||
|
- string:
|
||||||
|
scope: GLOBAL
|
||||||
|
id: sonarqube-token
|
||||||
|
secret: "${SONARQUBE_TOKEN}"
|
||||||
|
description: "SonarQube token for quality-gate evidence collection"
|
||||||
jobs.yaml: |
|
jobs.yaml: |
|
||||||
jobs:
|
jobs:
|
||||||
- script: |
|
- script: |
|
||||||
@ -81,6 +92,9 @@ data:
|
|||||||
scmpoll_spec('H/2 * * * *')
|
scmpoll_spec('H/2 * * * *')
|
||||||
ignorePostCommitHooks(false)
|
ignorePostCommitHooks(false)
|
||||||
}
|
}
|
||||||
|
cron {
|
||||||
|
spec('H H * * *')
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -107,6 +121,9 @@ data:
|
|||||||
scmpoll_spec('H/2 * * * *')
|
scmpoll_spec('H/2 * * * *')
|
||||||
ignorePostCommitHooks(false)
|
ignorePostCommitHooks(false)
|
||||||
}
|
}
|
||||||
|
cron {
|
||||||
|
spec('H H * * *')
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -133,6 +150,9 @@ data:
|
|||||||
scmpoll_spec('H/5 * * * *')
|
scmpoll_spec('H/5 * * * *')
|
||||||
ignorePostCommitHooks(false)
|
ignorePostCommitHooks(false)
|
||||||
}
|
}
|
||||||
|
cron {
|
||||||
|
spec('H H * * *')
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -159,6 +179,9 @@ data:
|
|||||||
scmpoll_spec('H/5 * * * *')
|
scmpoll_spec('H/5 * * * *')
|
||||||
ignorePostCommitHooks(false)
|
ignorePostCommitHooks(false)
|
||||||
}
|
}
|
||||||
|
cron {
|
||||||
|
spec('H H * * *')
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -203,6 +226,32 @@ data:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pipelineJob('arcanagon') {
|
||||||
|
properties {
|
||||||
|
pipelineTriggers {
|
||||||
|
triggers {
|
||||||
|
scmTrigger {
|
||||||
|
scmpoll_spec('H/5 * * * *')
|
||||||
|
ignorePostCommitHooks(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
definition {
|
||||||
|
cpsScm {
|
||||||
|
scm {
|
||||||
|
git {
|
||||||
|
remote {
|
||||||
|
url('https://scm.bstein.dev/bstein/arcanagon.git')
|
||||||
|
credentials('gitea-pat')
|
||||||
|
}
|
||||||
|
branches('*/master')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
scriptPath('Jenkinsfile')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
pipelineJob('pegasus') {
|
pipelineJob('pegasus') {
|
||||||
properties {
|
properties {
|
||||||
pipelineTriggers {
|
pipelineTriggers {
|
||||||
@ -211,6 +260,9 @@ data:
|
|||||||
scmpoll_spec('H/5 * * * *')
|
scmpoll_spec('H/5 * * * *')
|
||||||
ignorePostCommitHooks(false)
|
ignorePostCommitHooks(false)
|
||||||
}
|
}
|
||||||
|
cron {
|
||||||
|
spec('H H * * *')
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -237,6 +289,9 @@ data:
|
|||||||
scmpoll_spec('H/5 * * * *')
|
scmpoll_spec('H/5 * * * *')
|
||||||
ignorePostCommitHooks(false)
|
ignorePostCommitHooks(false)
|
||||||
}
|
}
|
||||||
|
cron {
|
||||||
|
spec('H H * * *')
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -263,6 +318,9 @@ data:
|
|||||||
scmpoll_spec('H/5 * * * *')
|
scmpoll_spec('H/5 * * * *')
|
||||||
ignorePostCommitHooks(false)
|
ignorePostCommitHooks(false)
|
||||||
}
|
}
|
||||||
|
cron {
|
||||||
|
spec('H H * * *')
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -289,6 +347,9 @@ data:
|
|||||||
scmpoll_spec('H/5 * * * *')
|
scmpoll_spec('H/5 * * * *')
|
||||||
ignorePostCommitHooks(false)
|
ignorePostCommitHooks(false)
|
||||||
}
|
}
|
||||||
|
cron {
|
||||||
|
spec('H H * * *')
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -315,6 +376,9 @@ data:
|
|||||||
scmpoll_spec('H/5 * * * *')
|
scmpoll_spec('H/5 * * * *')
|
||||||
ignorePostCommitHooks(false)
|
ignorePostCommitHooks(false)
|
||||||
}
|
}
|
||||||
|
cron {
|
||||||
|
spec('H H * * *')
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -404,9 +468,9 @@ data:
|
|||||||
plainText
|
plainText
|
||||||
clouds:
|
clouds:
|
||||||
- kubernetes:
|
- kubernetes:
|
||||||
containerCapStr: "10"
|
containerCapStr: "4"
|
||||||
connectTimeout: "5"
|
connectTimeout: "20"
|
||||||
readTimeout: "15"
|
readTimeout: "90"
|
||||||
jenkinsUrl: "http://jenkins.jenkins.svc.cluster.local:8080"
|
jenkinsUrl: "http://jenkins.jenkins.svc.cluster.local:8080"
|
||||||
jenkinsTunnel: "jenkins.jenkins.svc.cluster.local:50000"
|
jenkinsTunnel: "jenkins.jenkins.svc.cluster.local:50000"
|
||||||
skipTlsVerify: false
|
skipTlsVerify: false
|
||||||
@ -425,8 +489,10 @@ data:
|
|||||||
- name: "default"
|
- name: "default"
|
||||||
namespace: "jenkins"
|
namespace: "jenkins"
|
||||||
workspaceVolume:
|
workspaceVolume:
|
||||||
emptyDirWorkspaceVolume:
|
dynamicPVC:
|
||||||
memory: false
|
accessModes: "ReadWriteOnce"
|
||||||
|
requestsSize: "20Gi"
|
||||||
|
storageClassName: "astreae"
|
||||||
containers:
|
containers:
|
||||||
- name: "jnlp"
|
- name: "jnlp"
|
||||||
args: "^${computer.jnlpmac} ^${computer.name}"
|
args: "^${computer.jnlpmac} ^${computer.name}"
|
||||||
@ -444,11 +510,56 @@ data:
|
|||||||
workingDir: /home/jenkins/agent
|
workingDir: /home/jenkins/agent
|
||||||
idleMinutes: 0
|
idleMinutes: 0
|
||||||
instanceCap: 2147483647
|
instanceCap: 2147483647
|
||||||
label: "jenkins-jenkins-agent"
|
label: "jenkins-jenkins-agent "
|
||||||
nodeUsageMode: "NORMAL"
|
nodeUsageMode: "NORMAL"
|
||||||
podRetention: Never
|
podRetention: Never
|
||||||
serviceAccount: "jenkins"
|
serviceAccount: "jenkins"
|
||||||
slaveConnectTimeoutStr: "100"
|
slaveConnectTimeoutStr: "100"
|
||||||
|
yaml: |
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
hardware: rpi5
|
||||||
|
kubernetes.io/arch: arm64
|
||||||
|
node-role.kubernetes.io/worker: "true"
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-06
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: atlas.bstein.dev/spillover
|
||||||
|
operator: DoesNotExist
|
||||||
|
- weight: 95
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-13
|
||||||
|
- titan-15
|
||||||
|
- titan-17
|
||||||
|
- titan-19
|
||||||
|
- weight: 85
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: hardware
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- rpi5
|
||||||
|
topologySpreadConstraints:
|
||||||
|
- maxSkew: 1
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
whenUnsatisfiable: ScheduleAnyway
|
||||||
|
labelSelector:
|
||||||
|
matchLabels:
|
||||||
|
jenkins/jenkins-jenkins-agent: "true"
|
||||||
yamlMergeStrategy: override
|
yamlMergeStrategy: override
|
||||||
inheritYamlMergeStrategy: false
|
inheritYamlMergeStrategy: false
|
||||||
slaveAgentPort: 50000
|
slaveAgentPort: 50000
|
||||||
|
|||||||
@ -33,17 +33,26 @@ spec:
|
|||||||
{{ with secret "kv/data/atlas/jenkins/harbor-robot-creds" }}
|
{{ with secret "kv/data/atlas/jenkins/harbor-robot-creds" }}
|
||||||
HARBOR_ROBOT_USERNAME={{ .Data.data.username }}
|
HARBOR_ROBOT_USERNAME={{ .Data.data.username }}
|
||||||
HARBOR_ROBOT_PASSWORD={{ .Data.data.password }}
|
HARBOR_ROBOT_PASSWORD={{ .Data.data.password }}
|
||||||
|
HARBOR_STREAMING_ROBOT_USERNAME={{ .Data.data.username }}
|
||||||
|
HARBOR_STREAMING_ROBOT_PASSWORD={{ .Data.data.password }}
|
||||||
|
{{ end }}
|
||||||
|
{{ with secret "kv/data/atlas/jenkins/harbor-streaming-robot-creds" }}
|
||||||
|
HARBOR_STREAMING_ROBOT_USERNAME={{ .Data.data.username }}
|
||||||
|
HARBOR_STREAMING_ROBOT_PASSWORD={{ .Data.data.password }}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
{{ with secret "kv/data/atlas/shared/harbor-pull" }}
|
{{ with secret "kv/data/atlas/shared/harbor-pull" }}
|
||||||
{{- if and .Data.data.username .Data.data.password }}
|
{{- if and .Data.data.username .Data.data.password }}
|
||||||
HARBOR_ROBOT_USERNAME={{ .Data.data.username }}
|
HARBOR_PULL_USERNAME={{ .Data.data.username }}
|
||||||
HARBOR_ROBOT_PASSWORD={{ .Data.data.password }}
|
HARBOR_PULL_PASSWORD={{ .Data.data.password }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
{{ with secret "kv/data/atlas/jenkins/gitea-pat" }}
|
{{ with secret "kv/data/atlas/jenkins/gitea-pat" }}
|
||||||
GITEA_PAT_USERNAME={{ .Data.data.username }}
|
GITEA_PAT_USERNAME={{ .Data.data.username }}
|
||||||
GITEA_PAT_TOKEN={{ .Data.data.token }}
|
GITEA_PAT_TOKEN={{ .Data.data.token }}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
|
{{ with secret "kv/data/atlas/quality/sonarqube-oidc" }}
|
||||||
|
SONARQUBE_TOKEN={{ .Data.data.sonarqube_exporter_token }}
|
||||||
|
{{ end }}
|
||||||
{{ with secret "kv/data/atlas/jenkins/webhook-tokens" }}
|
{{ with secret "kv/data/atlas/jenkins/webhook-tokens" }}
|
||||||
TITAN_IAC_WEBHOOK_TOKEN={{ .Data.data.titan_iac_quality_gate }}
|
TITAN_IAC_WEBHOOK_TOKEN={{ .Data.data.titan_iac_quality_gate }}
|
||||||
GIT_NOTIFY_TOKEN_BSTEIN_DEV_HOME={{ .Data.data.git_notify_bstein_dev_home }}
|
GIT_NOTIFY_TOKEN_BSTEIN_DEV_HOME={{ .Data.data.git_notify_bstein_dev_home }}
|
||||||
@ -61,6 +70,21 @@ spec:
|
|||||||
affinity:
|
affinity:
|
||||||
nodeAffinity:
|
nodeAffinity:
|
||||||
preferredDuringSchedulingIgnoredDuringExecution:
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: atlas.bstein.dev/spillover
|
||||||
|
operator: DoesNotExist
|
||||||
|
- weight: 95
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values:
|
||||||
|
- titan-13
|
||||||
|
- titan-15
|
||||||
|
- titan-17
|
||||||
|
- titan-19
|
||||||
- weight: 90
|
- weight: 90
|
||||||
preference:
|
preference:
|
||||||
matchExpressions:
|
matchExpressions:
|
||||||
@ -79,6 +103,7 @@ spec:
|
|||||||
- sso.bstein.dev
|
- sso.bstein.dev
|
||||||
securityContext:
|
securityContext:
|
||||||
fsGroup: 1000
|
fsGroup: 1000
|
||||||
|
fsGroupChangePolicy: OnRootMismatch
|
||||||
initContainers:
|
initContainers:
|
||||||
- name: install-plugins
|
- name: install-plugins
|
||||||
image: jenkins/jenkins:2.528.3-jdk21
|
image: jenkins/jenkins:2.528.3-jdk21
|
||||||
@ -155,7 +180,8 @@ spec:
|
|||||||
port: http
|
port: http
|
||||||
initialDelaySeconds: 30
|
initialDelaySeconds: 30
|
||||||
periodSeconds: 10
|
periodSeconds: 10
|
||||||
failureThreshold: 20
|
timeoutSeconds: 5
|
||||||
|
failureThreshold: 60
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: jenkins-home
|
- name: jenkins-home
|
||||||
mountPath: /var/jenkins_home
|
mountPath: /var/jenkins_home
|
||||||
|
|||||||
@ -35,6 +35,9 @@ subjects:
|
|||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: jenkins
|
name: jenkins
|
||||||
namespace: jenkins
|
namespace: jenkins
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: default
|
||||||
|
namespace: jenkins
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: Role
|
kind: Role
|
||||||
@ -60,6 +63,9 @@ subjects:
|
|||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: jenkins
|
name: jenkins
|
||||||
namespace: jenkins
|
namespace: jenkins
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: default
|
||||||
|
namespace: jenkins
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
|
|||||||
@ -18,6 +18,15 @@ spec:
|
|||||||
nodeSelector:
|
nodeSelector:
|
||||||
kubernetes.io/arch: arm64
|
kubernetes.io/arch: arm64
|
||||||
node-role.kubernetes.io/worker: "true"
|
node-role.kubernetes.io/worker: "true"
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/hostname
|
||||||
|
operator: NotIn
|
||||||
|
values: ["titan-13", "titan-15", "titan-17", "titan-19"]
|
||||||
containers:
|
containers:
|
||||||
- name: sync
|
- name: sync
|
||||||
image: alpine:3.20
|
image: alpine:3.20
|
||||||
|
|||||||
@ -24,7 +24,9 @@ resources:
|
|||||||
- oneoffs/logs-oidc-secret-ensure-job.yaml
|
- oneoffs/logs-oidc-secret-ensure-job.yaml
|
||||||
- oneoffs/metis-oidc-secret-ensure-job.yaml
|
- oneoffs/metis-oidc-secret-ensure-job.yaml
|
||||||
- oneoffs/soteria-oidc-secret-ensure-job.yaml
|
- oneoffs/soteria-oidc-secret-ensure-job.yaml
|
||||||
|
- oneoffs/quality-oidc-secret-ensure-job.yaml
|
||||||
- oneoffs/metis-ssh-keys-secret-ensure-job.yaml
|
- oneoffs/metis-ssh-keys-secret-ensure-job.yaml
|
||||||
|
- oneoffs/metis-node-passwords-secret-ensure-job.yaml
|
||||||
- oneoffs/harbor-oidc-secret-ensure-job.yaml
|
- oneoffs/harbor-oidc-secret-ensure-job.yaml
|
||||||
- oneoffs/vault-oidc-secret-ensure-job.yaml
|
- oneoffs/vault-oidc-secret-ensure-job.yaml
|
||||||
- oneoffs/actual-oidc-secret-ensure-job.yaml
|
- oneoffs/actual-oidc-secret-ensure-job.yaml
|
||||||
|
|||||||
@ -0,0 +1,110 @@
|
|||||||
|
# services/keycloak/oneoffs/metis-node-passwords-secret-ensure-job.yaml
|
||||||
|
# One-off job for sso/metis-node-passwords-secret-ensure-4.
|
||||||
|
# Purpose: ensure per-node Metis recovery placeholders exist in Vault.
|
||||||
|
# Atlas/root values are preserved while intranet IPs are standardized per node.
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: metis-node-passwords-secret-ensure-4
|
||||||
|
namespace: sso
|
||||||
|
spec:
|
||||||
|
backoffLimit: 0
|
||||||
|
ttlSecondsAfterFinished: 3600
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
serviceAccountName: mas-secrets-ensure
|
||||||
|
restartPolicy: Never
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: node-role.kubernetes.io/worker
|
||||||
|
operator: Exists
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/arch
|
||||||
|
operator: In
|
||||||
|
values: ["arm64"]
|
||||||
|
containers:
|
||||||
|
- name: apply
|
||||||
|
image: registry.bstein.dev/bstein/kubectl:1.35.0
|
||||||
|
command: ["/bin/sh", "-c"]
|
||||||
|
args:
|
||||||
|
- |
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}"
|
||||||
|
vault_role="${VAULT_ROLE:-sso-secrets}"
|
||||||
|
|
||||||
|
jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
|
||||||
|
login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')"
|
||||||
|
vault_token="$(curl -sS --request POST --data "${login_payload}" "${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')"
|
||||||
|
if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then
|
||||||
|
echo "vault login failed" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ensured=0
|
||||||
|
while read -r node intranet_ip; do
|
||||||
|
if [ -z "${node}" ] || [ -z "${intranet_ip}" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
secret_path="kv/data/atlas/nodes/${node}"
|
||||||
|
read_status="$(curl -sS -o /tmp/node-read.json -w "%{http_code}" -H "X-Vault-Token: ${vault_token}" "${vault_addr}/v1/${secret_path}" || true)"
|
||||||
|
if [ "${read_status}" = "200" ]; then
|
||||||
|
atlas_password="$(jq -r '.data.data.atlas_password // empty' /tmp/node-read.json)"
|
||||||
|
root_password="$(jq -r '.data.data.root_password // empty' /tmp/node-read.json)"
|
||||||
|
elif [ "${read_status}" = "404" ]; then
|
||||||
|
atlas_password=""
|
||||||
|
root_password=""
|
||||||
|
else
|
||||||
|
echo "Vault read failed for ${node} (status ${read_status})" >&2
|
||||||
|
cat /tmp/node-read.json >&2 || true
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
payload="$(jq -nc --arg atlas_password "${atlas_password}" --arg root_password "${root_password}" --arg intranet_ip "${intranet_ip}" '{data:{atlas_password:$atlas_password,root_password:$root_password,intranet_ip:$intranet_ip}}')"
|
||||||
|
|
||||||
|
write_status="$(curl -sS -o /tmp/node-write.json -w "%{http_code}" -X POST -H "X-Vault-Token: ${vault_token}" -H 'Content-Type: application/json' -d "${payload}" "${vault_addr}/v1/${secret_path}")"
|
||||||
|
if [ "${write_status}" != "200" ] && [ "${write_status}" != "204" ]; then
|
||||||
|
echo "Vault write failed for ${node} (status ${write_status})" >&2
|
||||||
|
cat /tmp/node-write.json >&2 || true
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ensured=$((ensured + 1))
|
||||||
|
echo "Ensured node secret placeholder for ${node} (${intranet_ip})"
|
||||||
|
done <<'EOF_NODES'
|
||||||
|
titan-jh 192.168.22.8
|
||||||
|
titan-db 192.168.22.10
|
||||||
|
titan-0a 192.168.22.11
|
||||||
|
titan-0b 192.168.22.12
|
||||||
|
titan-0c 192.168.22.13
|
||||||
|
titan-20 192.168.22.20
|
||||||
|
titan-21 192.168.22.21
|
||||||
|
titan-22 192.168.22.22
|
||||||
|
titan-23 192.168.22.23
|
||||||
|
titan-24 192.168.22.26
|
||||||
|
titan-04 192.168.22.30
|
||||||
|
titan-05 192.168.22.31
|
||||||
|
titan-06 192.168.22.32
|
||||||
|
titan-07 192.168.22.33
|
||||||
|
titan-08 192.168.22.34
|
||||||
|
titan-09 192.168.22.35
|
||||||
|
titan-10 192.168.22.36
|
||||||
|
titan-11 192.168.22.37
|
||||||
|
titan-12 192.168.22.40
|
||||||
|
titan-13 192.168.22.41
|
||||||
|
titan-14 192.168.22.42
|
||||||
|
titan-15 192.168.22.43
|
||||||
|
titan-16 192.168.22.44
|
||||||
|
titan-17 192.168.22.45
|
||||||
|
titan-18 192.168.22.46
|
||||||
|
titan-19 192.168.22.47
|
||||||
|
EOF_NODES
|
||||||
|
|
||||||
|
echo "Ensured ${ensured} Metis node placeholders in Vault"
|
||||||
@ -73,7 +73,7 @@ spec:
|
|||||||
CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)"
|
CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)"
|
||||||
|
|
||||||
if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then
|
if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then
|
||||||
create_payload='{"clientId":"metis","enabled":true,"protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://sentinel.bstein.dev/oauth2/callback"],"webOrigins":["https://sentinel.bstein.dev"],"rootUrl":"https://sentinel.bstein.dev","baseUrl":"/"}'
|
create_payload='{"clientId":"metis","enabled":true,"protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://recovery.bstein.dev/oauth2/callback"],"webOrigins":["https://recovery.bstein.dev"],"rootUrl":"https://recovery.bstein.dev","baseUrl":"/"}'
|
||||||
status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \
|
status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \
|
||||||
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
||||||
-H 'Content-Type: application/json' \
|
-H 'Content-Type: application/json' \
|
||||||
@ -121,7 +121,7 @@ spec:
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
update_payload='{"enabled":true,"clientId":"metis","protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://sentinel.bstein.dev/oauth2/callback"],"webOrigins":["https://sentinel.bstein.dev"],"rootUrl":"https://sentinel.bstein.dev","baseUrl":"/"}'
|
update_payload='{"enabled":true,"clientId":"metis","protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://recovery.bstein.dev/oauth2/callback"],"webOrigins":["https://recovery.bstein.dev"],"rootUrl":"https://recovery.bstein.dev","baseUrl":"/"}'
|
||||||
status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \
|
status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \
|
||||||
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
||||||
-H 'Content-Type: application/json' \
|
-H 'Content-Type: application/json' \
|
||||||
|
|||||||
198
services/keycloak/oneoffs/quality-oidc-secret-ensure-job.yaml
Normal file
198
services/keycloak/oneoffs/quality-oidc-secret-ensure-job.yaml
Normal file
@ -0,0 +1,198 @@
|
|||||||
|
# services/keycloak/oneoffs/quality-oidc-secret-ensure-job.yaml
|
||||||
|
# One-off job for sso/quality-oidc-secret-ensure-1.
|
||||||
|
# Purpose: ensure the SonarQube oauth2-proxy OIDC client and Vault secret exist.
|
||||||
|
# Keep this completed Job around; bump the suffix if it ever needs to be rerun.
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: quality-oidc-secret-ensure-1
|
||||||
|
namespace: sso
|
||||||
|
spec:
|
||||||
|
backoffLimit: 0
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
vault.hashicorp.com/agent-inject: "true"
|
||||||
|
vault.hashicorp.com/agent-pre-populate-only: "true"
|
||||||
|
vault.hashicorp.com/role: "sso-secrets"
|
||||||
|
vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin"
|
||||||
|
vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: |
|
||||||
|
{{ with secret "kv/data/atlas/shared/keycloak-admin" }}
|
||||||
|
export KEYCLOAK_ADMIN="{{ .Data.data.username }}"
|
||||||
|
export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}"
|
||||||
|
export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}"
|
||||||
|
{{ end }}
|
||||||
|
spec:
|
||||||
|
serviceAccountName: mas-secrets-ensure
|
||||||
|
restartPolicy: Never
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: node-role.kubernetes.io/worker
|
||||||
|
operator: Exists
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 100
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: kubernetes.io/arch
|
||||||
|
operator: In
|
||||||
|
values: ["arm64"]
|
||||||
|
containers:
|
||||||
|
- name: apply
|
||||||
|
image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
|
||||||
|
command: ["/bin/sh", "-c"]
|
||||||
|
args:
|
||||||
|
- |
|
||||||
|
set -euo pipefail
|
||||||
|
. /vault/secrets/keycloak-admin-env.sh
|
||||||
|
KC_URL="http://keycloak.sso.svc.cluster.local"
|
||||||
|
ACCESS_TOKEN=""
|
||||||
|
for attempt in 1 2 3 4 5; do
|
||||||
|
TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \
|
||||||
|
-H 'Content-Type: application/x-www-form-urlencoded' \
|
||||||
|
-d "grant_type=password" \
|
||||||
|
-d "client_id=admin-cli" \
|
||||||
|
-d "username=${KEYCLOAK_ADMIN}" \
|
||||||
|
-d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)"
|
||||||
|
ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)"
|
||||||
|
if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "Keycloak token request failed (attempt ${attempt})" >&2
|
||||||
|
sleep $((attempt * 2))
|
||||||
|
done
|
||||||
|
if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then
|
||||||
|
echo "Failed to fetch Keycloak admin token" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
||||||
|
"$KC_URL/admin/realms/atlas/clients?clientId=sonarqube" || true)"
|
||||||
|
CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)"
|
||||||
|
|
||||||
|
if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then
|
||||||
|
create_payload='{"clientId":"sonarqube","enabled":true,"protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://quality.bstein.dev/oauth2/callback"],"webOrigins":["https://quality.bstein.dev"],"rootUrl":"https://quality.bstein.dev","baseUrl":"/"}'
|
||||||
|
status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \
|
||||||
|
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-d "${create_payload}" \
|
||||||
|
"$KC_URL/admin/realms/atlas/clients")"
|
||||||
|
if [ "$status" != "201" ] && [ "$status" != "204" ] && [ "$status" != "409" ]; then
|
||||||
|
echo "Keycloak client create failed (status ${status})" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
||||||
|
"$KC_URL/admin/realms/atlas/clients?clientId=sonarqube" || true)"
|
||||||
|
CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then
|
||||||
|
echo "Keycloak client sonarqube not found" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
SCOPE_ID="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
||||||
|
"$KC_URL/admin/realms/atlas/client-scopes?search=groups" | jq -r '.[] | select(.name=="groups") | .id' 2>/dev/null | head -n1 || true)"
|
||||||
|
if [ -z "$SCOPE_ID" ] || [ "$SCOPE_ID" = "null" ]; then
|
||||||
|
echo "Keycloak client scope groups not found" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
DEFAULT_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
||||||
|
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/default-client-scopes" || true)"
|
||||||
|
OPTIONAL_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
||||||
|
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes" || true)"
|
||||||
|
|
||||||
|
if ! echo "$DEFAULT_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1 \
|
||||||
|
&& ! echo "$OPTIONAL_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1; then
|
||||||
|
status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \
|
||||||
|
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
||||||
|
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")"
|
||||||
|
if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then
|
||||||
|
status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \
|
||||||
|
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
||||||
|
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")"
|
||||||
|
if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then
|
||||||
|
echo "Failed to attach groups client scope to sonarqube (status ${status})" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
update_payload='{"enabled":true,"clientId":"sonarqube","protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://quality.bstein.dev/oauth2/callback"],"webOrigins":["https://quality.bstein.dev"],"rootUrl":"https://quality.bstein.dev","baseUrl":"/"}'
|
||||||
|
status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \
|
||||||
|
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-d "${update_payload}" \
|
||||||
|
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}")"
|
||||||
|
if [ "$status" != "204" ]; then
|
||||||
|
echo "Keycloak client update failed (status ${status})" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
|
||||||
|
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/client-secret" | jq -r '.value' 2>/dev/null || true)"
|
||||||
|
if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then
|
||||||
|
echo "Keycloak client secret not found" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}"
|
||||||
|
vault_role="${VAULT_ROLE:-sso-secrets}"
|
||||||
|
jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
|
||||||
|
login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')"
|
||||||
|
vault_token="$(curl -sS --request POST --data "${login_payload}" \
|
||||||
|
"${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')"
|
||||||
|
if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then
|
||||||
|
echo "vault login failed" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
read_status="$(curl -sS -o /tmp/sonarqube-oidc-read.json -w "%{http_code}" \
|
||||||
|
-H "X-Vault-Token: ${vault_token}" \
|
||||||
|
"${vault_addr}/v1/kv/data/atlas/quality/sonarqube-oidc" || true)"
|
||||||
|
COOKIE_SECRET=""
|
||||||
|
if [ "${read_status}" = "200" ]; then
|
||||||
|
COOKIE_SECRET="$(jq -r '.data.data.cookie_secret // empty' /tmp/sonarqube-oidc-read.json)"
|
||||||
|
elif [ "${read_status}" != "404" ]; then
|
||||||
|
echo "Vault read failed (status ${read_status})" >&2
|
||||||
|
cat /tmp/sonarqube-oidc-read.json >&2 || true
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -n "${COOKIE_SECRET}" ]; then
|
||||||
|
length="$(printf '%s' "${COOKIE_SECRET}" | wc -c | tr -d ' ')"
|
||||||
|
if [ "${length}" != "16" ] && [ "${length}" != "24" ] && [ "${length}" != "32" ]; then
|
||||||
|
COOKIE_SECRET=""
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if [ -z "${COOKIE_SECRET}" ]; then
|
||||||
|
COOKIE_SECRET="$(openssl rand -hex 16 | tr -d '\n')"
|
||||||
|
fi
|
||||||
|
|
||||||
|
payload="$(jq -nc \
|
||||||
|
--arg client_id "sonarqube" \
|
||||||
|
--arg client_secret "${CLIENT_SECRET}" \
|
||||||
|
--arg cookie_secret "${COOKIE_SECRET}" \
|
||||||
|
'{data:{client_id:$client_id,client_secret:$client_secret,cookie_secret:$cookie_secret}}')"
|
||||||
|
write_status="$(curl -sS -o /tmp/sonarqube-oidc-write.json -w "%{http_code}" -X POST \
|
||||||
|
-H "X-Vault-Token: ${vault_token}" \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-d "${payload}" "${vault_addr}/v1/kv/data/atlas/quality/sonarqube-oidc")"
|
||||||
|
if [ "${write_status}" != "200" ] && [ "${write_status}" != "204" ]; then
|
||||||
|
echo "Vault write failed (status ${write_status})" >&2
|
||||||
|
cat /tmp/sonarqube-oidc-write.json >&2 || true
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
verify_status="$(curl -sS -o /tmp/sonarqube-oidc-verify.json -w "%{http_code}" \
|
||||||
|
-H "X-Vault-Token: ${vault_token}" \
|
||||||
|
"${vault_addr}/v1/kv/data/atlas/quality/sonarqube-oidc" || true)"
|
||||||
|
if [ "${verify_status}" != "200" ]; then
|
||||||
|
echo "Vault verify failed (status ${verify_status})" >&2
|
||||||
|
cat /tmp/sonarqube-oidc-verify.json >&2 || true
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "SonarQube OIDC secret ready in Vault"
|
||||||
@ -107,17 +107,22 @@ payload="$(jq -nc \
|
|||||||
--arg client_id "vault-oidc" \
|
--arg client_id "vault-oidc" \
|
||||||
--arg client_secret "${CLIENT_SECRET}" \
|
--arg client_secret "${CLIENT_SECRET}" \
|
||||||
--arg default_role "admin" \
|
--arg default_role "admin" \
|
||||||
|
--arg token_policies "default" \
|
||||||
--arg scopes "openid profile email groups" \
|
--arg scopes "openid profile email groups" \
|
||||||
--arg user_claim "preferred_username" \
|
--arg user_claim "preferred_username" \
|
||||||
--arg groups_claim "groups" \
|
--arg groups_claim "groups" \
|
||||||
--arg redirect_uris "https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback,http://localhost:8250/oidc/callback" \
|
--arg redirect_uris "https://secret.bstein.dev/ui/vault/auth/oidc/oidc/callback,http://localhost:8250/oidc/callback" \
|
||||||
--arg bound_audiences "vault-oidc" \
|
--arg bound_audiences "vault-oidc" \
|
||||||
|
--arg bound_claims_type "string" \
|
||||||
--arg admin_group "admin" \
|
--arg admin_group "admin" \
|
||||||
--arg admin_policies "default,vault-admin" \
|
--arg admin_policies "default,vault-admin" \
|
||||||
|
--arg admin_bound_claims '{"groups":"admin"}' \
|
||||||
--arg dev_group "dev" \
|
--arg dev_group "dev" \
|
||||||
--arg dev_policies "default,dev-kv" \
|
--arg dev_policies "default,dev-kv" \
|
||||||
--arg user_group "dev" \
|
--arg user_group "dev" \
|
||||||
--arg user_policies "default,dev-kv" \
|
--arg user_policies "default,dev-kv" \
|
||||||
'{data:{discovery_url:$discovery_url,client_id:$client_id,client_secret:$client_secret,default_role:$default_role,scopes:$scopes,user_claim:$user_claim,groups_claim:$groups_claim,redirect_uris:$redirect_uris,bound_audiences:$bound_audiences,admin_group:$admin_group,admin_policies:$admin_policies,dev_group:$dev_group,dev_policies:$dev_policies,user_group:$user_group,user_policies:$user_policies}}')"
|
--arg ui_default_auth_method "oidc" \
|
||||||
|
--arg ui_default_auth_path "oidc" \
|
||||||
|
'{data:{discovery_url:$discovery_url,client_id:$client_id,client_secret:$client_secret,default_role:$default_role,token_policies:$token_policies,scopes:$scopes,user_claim:$user_claim,groups_claim:$groups_claim,redirect_uris:$redirect_uris,bound_audiences:$bound_audiences,bound_claims_type:$bound_claims_type,admin_group:$admin_group,admin_policies:$admin_policies,admin_bound_claims:$admin_bound_claims,dev_group:$dev_group,dev_policies:$dev_policies,user_group:$user_group,user_policies:$user_policies,ui_default_auth_method:$ui_default_auth_method,ui_default_auth_path:$ui_default_auth_path}}')"
|
||||||
curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \
|
curl -sS -X POST -H "X-Vault-Token: ${vault_token}" \
|
||||||
-d "${payload}" "${vault_addr}/v1/kv/data/atlas/vault/vault-oidc-config" >/dev/null
|
-d "${payload}" "${vault_addr}/v1/kv/data/atlas/vault/vault-oidc-config" >/dev/null
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user