Compare commits
No commits in common. "main" and "restructure/hybrid-clusters" have entirely different histories.
main
...
restructur
11
.gitignore
vendored
11
.gitignore
vendored
@ -1,10 +1 @@
|
|||||||
*.md
|
AGENTS.md
|
||||||
!README.md
|
|
||||||
!knowledge/**/*.md
|
|
||||||
!services/comms/knowledge/**/*.md
|
|
||||||
__pycache__/
|
|
||||||
*.py[cod]
|
|
||||||
.pytest_cache
|
|
||||||
.venv
|
|
||||||
.venv-ci
|
|
||||||
tmp/
|
|
||||||
|
|||||||
414
Jenkinsfile
vendored
414
Jenkinsfile
vendored
@ -1,414 +0,0 @@
|
|||||||
// Mirror of ci/Jenkinsfile.titan-iac for multibranch discovery.
|
|
||||||
pipeline {
|
|
||||||
agent {
|
|
||||||
kubernetes {
|
|
||||||
defaultContainer 'python'
|
|
||||||
yaml """
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
spec:
|
|
||||||
nodeSelector:
|
|
||||||
hardware: rpi5
|
|
||||||
kubernetes.io/arch: arm64
|
|
||||||
node-role.kubernetes.io/worker: "true"
|
|
||||||
containers:
|
|
||||||
- name: jnlp
|
|
||||||
image: jenkins/inbound-agent:3355.v388858a_47b_33-2-jdk21
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: "25m"
|
|
||||||
memory: "256Mi"
|
|
||||||
- name: python
|
|
||||||
image: registry.bstein.dev/bstein/python:3.12-slim
|
|
||||||
command:
|
|
||||||
- cat
|
|
||||||
tty: true
|
|
||||||
- name: quality-tools
|
|
||||||
image: registry.bstein.dev/bstein/quality-tools:sonar8.0.1-trivy0.70.0-db20260422-arm64
|
|
||||||
command:
|
|
||||||
- cat
|
|
||||||
tty: true
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
environment {
|
|
||||||
PIP_DISABLE_PIP_VERSION_CHECK = '1'
|
|
||||||
PYTHONUNBUFFERED = '1'
|
|
||||||
SUITE_NAME = 'titan_iac'
|
|
||||||
PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
|
|
||||||
SONARQUBE_HOST_URL = 'http://sonarqube.quality.svc.cluster.local:9000'
|
|
||||||
SONARQUBE_PROJECT_KEY = 'titan_iac'
|
|
||||||
SONARQUBE_TOKEN = credentials('sonarqube-token')
|
|
||||||
VM_URL = 'http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428'
|
|
||||||
QUALITY_GATE_SONARQUBE_ENFORCE = '1'
|
|
||||||
QUALITY_GATE_SONARQUBE_REPORT = 'build/sonarqube-quality-gate.json'
|
|
||||||
QUALITY_GATE_IRONBANK_ENFORCE = '1'
|
|
||||||
QUALITY_GATE_IRONBANK_REQUIRED = '0'
|
|
||||||
QUALITY_GATE_IRONBANK_REPORT = 'build/ironbank-compliance.json'
|
|
||||||
}
|
|
||||||
options {
|
|
||||||
disableConcurrentBuilds()
|
|
||||||
buildDiscarder(logRotator(daysToKeepStr: '30', numToKeepStr: '200', artifactDaysToKeepStr: '30', artifactNumToKeepStr: '120'))
|
|
||||||
}
|
|
||||||
stages {
|
|
||||||
stage('Checkout') {
|
|
||||||
steps {
|
|
||||||
checkout scm
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Install deps') {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
set -eu
|
|
||||||
if ! command -v git >/dev/null 2>&1; then
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y --no-install-recommends git ca-certificates
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
fi
|
|
||||||
pip install --no-cache-dir -r ci/requirements.txt
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Prepare local quality evidence') {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
set -eu
|
|
||||||
mkdir -p build
|
|
||||||
set +e
|
|
||||||
python3 -m testing.quality_gate --profile local --build-dir build
|
|
||||||
local_quality_rc=$?
|
|
||||||
set -e
|
|
||||||
printf '%s\n' "${local_quality_rc}" > build/local-quality-gate.rc
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Collect SonarQube evidence') {
|
|
||||||
steps {
|
|
||||||
container('quality-tools') {
|
|
||||||
sh '''#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
mkdir -p build
|
|
||||||
args=(
|
|
||||||
"-Dsonar.host.url=${SONARQUBE_HOST_URL}"
|
|
||||||
"-Dsonar.login=${SONARQUBE_TOKEN}"
|
|
||||||
"-Dsonar.projectKey=${SONARQUBE_PROJECT_KEY}"
|
|
||||||
"-Dsonar.projectName=${SONARQUBE_PROJECT_KEY}"
|
|
||||||
"-Dsonar.sources=."
|
|
||||||
"-Dsonar.exclusions=**/.git/**,**/build/**,**/dist/**,**/node_modules/**,**/.venv/**,**/__pycache__/**,**/coverage/**,**/test-results/**,**/playwright-report/**,services/monitoring/dashboards/**,services/monitoring/grafana-dashboard-*.yaml"
|
|
||||||
"-Dsonar.test.inclusions=**/tests/**,**/testing/**,**/*_test.go,**/*.test.ts,**/*.test.tsx,**/*.spec.ts,**/*.spec.tsx"
|
|
||||||
)
|
|
||||||
[ -f build/coverage-unit.xml ] && args+=("-Dsonar.python.coverage.reportPaths=build/coverage-unit.xml")
|
|
||||||
set +e
|
|
||||||
sonar-scanner "${args[@]}" | tee build/sonar-scanner.log
|
|
||||||
rc=${PIPESTATUS[0]}
|
|
||||||
set -e
|
|
||||||
printf '%s\n' "${rc}" > build/sonarqube-analysis.rc
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
sh '''
|
|
||||||
set -eu
|
|
||||||
mkdir -p build
|
|
||||||
python3 - <<'PY'
|
|
||||||
import base64
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import urllib.parse
|
|
||||||
import urllib.request
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
host = os.getenv('SONARQUBE_HOST_URL', '').strip().rstrip('/')
|
|
||||||
project_key = os.getenv('SONARQUBE_PROJECT_KEY', '').strip()
|
|
||||||
token = os.getenv('SONARQUBE_TOKEN', '').strip()
|
|
||||||
report_path = os.getenv('QUALITY_GATE_SONARQUBE_REPORT', 'build/sonarqube-quality-gate.json')
|
|
||||||
|
|
||||||
payload = {
|
|
||||||
"status": "ERROR",
|
|
||||||
"note": "missing SONARQUBE_HOST_URL and/or SONARQUBE_PROJECT_KEY",
|
|
||||||
}
|
|
||||||
if host and project_key:
|
|
||||||
task_file = Path('.scannerwork/report-task.txt')
|
|
||||||
task_id = ''
|
|
||||||
if task_file.exists():
|
|
||||||
for line in task_file.read_text(encoding='utf-8').splitlines():
|
|
||||||
key, _, value = line.partition('=')
|
|
||||||
if key == 'ceTaskId':
|
|
||||||
task_id = value.strip()
|
|
||||||
break
|
|
||||||
if task_id:
|
|
||||||
ce_query = urllib.parse.urlencode({"id": task_id})
|
|
||||||
deadline = time.monotonic() + 180
|
|
||||||
while time.monotonic() < deadline:
|
|
||||||
ce_request = urllib.request.Request(f"{host}/api/ce/task?{ce_query}", method="GET")
|
|
||||||
if token:
|
|
||||||
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
|
|
||||||
ce_request.add_header("Authorization", f"Basic {encoded}")
|
|
||||||
try:
|
|
||||||
with urllib.request.urlopen(ce_request, timeout=12) as response:
|
|
||||||
ce_payload = json.loads(response.read().decode("utf-8"))
|
|
||||||
except Exception:
|
|
||||||
time.sleep(3)
|
|
||||||
continue
|
|
||||||
status = str(ce_payload.get("task", {}).get("status", "")).upper()
|
|
||||||
if status in {"SUCCESS", "FAILED", "CANCELED"}:
|
|
||||||
break
|
|
||||||
time.sleep(3)
|
|
||||||
|
|
||||||
query = urllib.parse.urlencode({"projectKey": project_key})
|
|
||||||
request = urllib.request.Request(
|
|
||||||
f"{host}/api/qualitygates/project_status?{query}",
|
|
||||||
method="GET",
|
|
||||||
)
|
|
||||||
if token:
|
|
||||||
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
|
|
||||||
request.add_header("Authorization", f"Basic {encoded}")
|
|
||||||
try:
|
|
||||||
with urllib.request.urlopen(request, timeout=12) as response:
|
|
||||||
payload = json.loads(response.read().decode("utf-8"))
|
|
||||||
except Exception as exc: # noqa: BLE001
|
|
||||||
payload = {"status": "ERROR", "error": str(exc)}
|
|
||||||
|
|
||||||
with open(report_path, "w", encoding="utf-8") as handle:
|
|
||||||
json.dump(payload, handle, indent=2, sort_keys=True)
|
|
||||||
handle.write("\\n")
|
|
||||||
PY
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Collect IronBank evidence') {
|
|
||||||
steps {
|
|
||||||
container('quality-tools') {
|
|
||||||
sh '''#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
mkdir -p build
|
|
||||||
set +e
|
|
||||||
trivy fs --cache-dir "${TRIVY_CACHE_DIR}" --skip-db-update --skip-files clusters/atlas/flux-system/gotk-components.yaml --timeout 5m --no-progress --format json --output build/trivy-fs.json --scanners vuln,secret,misconfig --severity HIGH,CRITICAL .
|
|
||||||
trivy_rc=$?
|
|
||||||
set -e
|
|
||||||
if [ ! -s build/trivy-fs.json ]; then
|
|
||||||
cat > build/ironbank-compliance.json <<EOF
|
|
||||||
{"status":"failed","compliant":false,"scanner":"trivy","scan_type":"filesystem","error":"trivy did not produce JSON output","trivy_rc":${trivy_rc}}
|
|
||||||
EOF
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
sh '''
|
|
||||||
set -eu
|
|
||||||
mkdir -p build
|
|
||||||
if [ -s build/trivy-fs.json ]; then
|
|
||||||
python3 ci/scripts/supply_chain_report.py --trivy-json build/trivy-fs.json --waivers ci/titan-iac-trivy-waivers.json --output build/ironbank-compliance.json
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
python3 - <<'PY'
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
report_path = Path(os.getenv('QUALITY_GATE_IRONBANK_REPORT', 'build/ironbank-compliance.json'))
|
|
||||||
if report_path.exists():
|
|
||||||
raise SystemExit(0)
|
|
||||||
|
|
||||||
status = os.getenv('IRONBANK_COMPLIANCE_STATUS', '').strip()
|
|
||||||
compliant = os.getenv('IRONBANK_COMPLIANT', '').strip().lower()
|
|
||||||
payload = {
|
|
||||||
"status": status or "unknown",
|
|
||||||
"compliant": compliant in {"1", "true", "yes", "on"} if compliant else None,
|
|
||||||
}
|
|
||||||
payload = {k: v for k, v in payload.items() if v is not None}
|
|
||||||
if "status" not in payload:
|
|
||||||
payload["status"] = "unknown"
|
|
||||||
payload["note"] = (
|
|
||||||
"Set IRONBANK_COMPLIANCE_STATUS/IRONBANK_COMPLIANT "
|
|
||||||
"or write build/ironbank-compliance.json in image-building repos."
|
|
||||||
)
|
|
||||||
|
|
||||||
report_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
report_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\\n", encoding="utf-8")
|
|
||||||
PY
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Run quality gate') {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
set -eu
|
|
||||||
mkdir -p build
|
|
||||||
set +e
|
|
||||||
python3 -m testing.quality_gate --profile jenkins --build-dir build
|
|
||||||
quality_gate_rc=$?
|
|
||||||
set -e
|
|
||||||
printf '%s\n' "${quality_gate_rc}" > build/quality-gate.rc
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Publish test metrics') {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
set -eu
|
|
||||||
export JUNIT_GLOB='build/junit-*.xml'
|
|
||||||
export QUALITY_GATE_EXIT_CODE_PATH='build/quality-gate.rc'
|
|
||||||
export QUALITY_GATE_SUMMARY_PATH='build/quality-gate-summary.json'
|
|
||||||
python3 ci/scripts/publish_test_metrics.py
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Enforce quality gate') {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
set -euo pipefail
|
|
||||||
gate_rc="$(cat build/quality-gate.rc 2>/dev/null || echo 1)"
|
|
||||||
fail=0
|
|
||||||
if [ "${gate_rc}" -ne 0 ]; then
|
|
||||||
echo "quality gate failed with rc=${gate_rc}" >&2
|
|
||||||
fail=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
enabled() {
|
|
||||||
case "$(printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]')" in
|
|
||||||
1|true|yes|on) return 0 ;;
|
|
||||||
*) return 1 ;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
if enabled "${QUALITY_GATE_SONARQUBE_ENFORCE:-1}"; then
|
|
||||||
sonar_status="$(python3 - <<'PY'
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
path = Path("build/sonarqube-quality-gate.json")
|
|
||||||
if not path.exists():
|
|
||||||
print("missing")
|
|
||||||
raise SystemExit(0)
|
|
||||||
try:
|
|
||||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
|
||||||
except Exception: # noqa: BLE001
|
|
||||||
print("error")
|
|
||||||
raise SystemExit(0)
|
|
||||||
status = (payload.get("status") or payload.get("projectStatus", {}).get("status") or payload.get("qualityGate", {}).get("status") or "").strip().lower()
|
|
||||||
print(status or "missing")
|
|
||||||
PY
|
|
||||||
)"
|
|
||||||
case "${sonar_status}" in
|
|
||||||
ok|pass|passed|success) ;;
|
|
||||||
*)
|
|
||||||
echo "sonarqube gate failed: ${sonar_status}" >&2
|
|
||||||
fail=1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
ironbank_required="${QUALITY_GATE_IRONBANK_REQUIRED:-0}"
|
|
||||||
if [ "${PUBLISH_IMAGES:-false}" = "true" ]; then
|
|
||||||
ironbank_required=1
|
|
||||||
fi
|
|
||||||
if enabled "${QUALITY_GATE_IRONBANK_ENFORCE:-1}"; then
|
|
||||||
supply_status="$(python3 - <<'PY'
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
path = Path("build/ironbank-compliance.json")
|
|
||||||
if not path.exists():
|
|
||||||
print("missing")
|
|
||||||
raise SystemExit(0)
|
|
||||||
try:
|
|
||||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
|
||||||
except Exception: # noqa: BLE001
|
|
||||||
print("error")
|
|
||||||
raise SystemExit(0)
|
|
||||||
compliant = payload.get("compliant")
|
|
||||||
if compliant is True:
|
|
||||||
print("ok")
|
|
||||||
elif compliant is False:
|
|
||||||
print("failed")
|
|
||||||
else:
|
|
||||||
status = str(payload.get("status") or payload.get("result") or payload.get("compliance") or "").strip().lower()
|
|
||||||
print(status or "missing")
|
|
||||||
PY
|
|
||||||
)"
|
|
||||||
case "${supply_status}" in
|
|
||||||
ok|pass|passed|success|compliant) ;;
|
|
||||||
not_applicable|na|n/a)
|
|
||||||
if enabled "${ironbank_required}"; then
|
|
||||||
echo "supply chain gate required but status=${supply_status}" >&2
|
|
||||||
fail=1
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
if enabled "${ironbank_required}"; then
|
|
||||||
echo "supply chain gate failed: ${supply_status}" >&2
|
|
||||||
fail=1
|
|
||||||
else
|
|
||||||
echo "supply chain gate not passing (${supply_status}) but not required for this run" >&2
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit "${fail}"
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Resolve Flux branch') {
|
|
||||||
steps {
|
|
||||||
script {
|
|
||||||
env.FLUX_BRANCH = sh(
|
|
||||||
returnStdout: true,
|
|
||||||
script: "grep -m1 '^\\s*branch:' clusters/atlas/flux-system/gotk-sync.yaml | sed 's/^\\s*branch:\\s*//'"
|
|
||||||
).trim()
|
|
||||||
if (!env.FLUX_BRANCH) {
|
|
||||||
error('Flux branch not found in gotk-sync.yaml')
|
|
||||||
}
|
|
||||||
echo "Flux branch: ${env.FLUX_BRANCH}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Promote') {
|
|
||||||
when {
|
|
||||||
expression {
|
|
||||||
def branch = env.BRANCH_NAME ?: (env.GIT_BRANCH ?: '').replaceFirst('origin/', '')
|
|
||||||
return env.FLUX_BRANCH && branch == env.FLUX_BRANCH
|
|
||||||
}
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
|
|
||||||
sh '''
|
|
||||||
set -euo pipefail
|
|
||||||
if ! command -v git >/dev/null 2>&1; then
|
|
||||||
if command -v apk >/dev/null 2>&1; then
|
|
||||||
apk add --no-cache git >/dev/null
|
|
||||||
elif command -v apt-get >/dev/null 2>&1; then
|
|
||||||
apt-get update >/dev/null
|
|
||||||
apt-get install -y git >/dev/null
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
cd "${WORKSPACE:-$PWD}"
|
|
||||||
if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
|
|
||||||
echo "workspace is not a git checkout; skipping promote"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
set +x
|
|
||||||
git config user.email "jenkins@bstein.dev"
|
|
||||||
git config user.name "jenkins"
|
|
||||||
git remote set-url origin https://${GIT_USER}:${GIT_TOKEN}@scm.bstein.dev/bstein/titan-iac.git
|
|
||||||
git push origin HEAD:${FLUX_BRANCH}
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
script {
|
|
||||||
if (fileExists('build/junit-unit.xml') || fileExists('build/junit-glue.xml')) {
|
|
||||||
try {
|
|
||||||
junit allowEmptyResults: true, testResults: 'build/junit-*.xml'
|
|
||||||
} catch (Throwable err) {
|
|
||||||
echo "junit step unavailable: ${err.class.simpleName}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
archiveArtifacts artifacts: 'build/**', allowEmptyArchive: true, fingerprint: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
29
README.md
29
README.md
@ -1,29 +0,0 @@
|
|||||||
# titan-iac
|
|
||||||
|
|
||||||
Flux-managed Kubernetes desired-state config for `bstein.dev`.
|
|
||||||
|
|
||||||
Canonical source URL:
|
|
||||||
- `ssh://git@scm.bstein.dev:2242/bstein/titan-iac.git`
|
|
||||||
|
|
||||||
## Scope
|
|
||||||
|
|
||||||
This repo contains cluster configuration consumed by Flux:
|
|
||||||
- platform/infrastructure manifests
|
|
||||||
- service manifests and kustomizations
|
|
||||||
- operational scripts for render/reconcile workflows
|
|
||||||
|
|
||||||
This repo is **not** the Ananke application source repo.
|
|
||||||
Ananke lives in `bstein/ananke` and orchestrates host-side shutdown/startup behavior around this desired state.
|
|
||||||
|
|
||||||
## Validation workflow
|
|
||||||
|
|
||||||
```bash
|
|
||||||
kustomize build services/<app>
|
|
||||||
kubectl apply --server-side --dry-run=client -k services/<app>
|
|
||||||
flux reconcile kustomization <name> --namespace flux-system --with-source
|
|
||||||
```
|
|
||||||
|
|
||||||
## Apply model
|
|
||||||
|
|
||||||
Use Git + Flux as the source of truth.
|
|
||||||
Avoid manual in-cluster edits for durable changes.
|
|
||||||
@ -1,413 +0,0 @@
|
|||||||
pipeline {
|
|
||||||
agent {
|
|
||||||
kubernetes {
|
|
||||||
defaultContainer 'python'
|
|
||||||
yaml """
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
spec:
|
|
||||||
nodeSelector:
|
|
||||||
hardware: rpi5
|
|
||||||
kubernetes.io/arch: arm64
|
|
||||||
node-role.kubernetes.io/worker: "true"
|
|
||||||
containers:
|
|
||||||
- name: jnlp
|
|
||||||
image: jenkins/inbound-agent:3355.v388858a_47b_33-2-jdk21
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: "25m"
|
|
||||||
memory: "256Mi"
|
|
||||||
- name: python
|
|
||||||
image: registry.bstein.dev/bstein/python:3.12-slim
|
|
||||||
command:
|
|
||||||
- cat
|
|
||||||
tty: true
|
|
||||||
- name: quality-tools
|
|
||||||
image: registry.bstein.dev/bstein/quality-tools:sonar8.0.1-trivy0.70.0-db20260422-arm64
|
|
||||||
command:
|
|
||||||
- cat
|
|
||||||
tty: true
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
environment {
|
|
||||||
PIP_DISABLE_PIP_VERSION_CHECK = '1'
|
|
||||||
PYTHONUNBUFFERED = '1'
|
|
||||||
SUITE_NAME = 'titan_iac'
|
|
||||||
PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
|
|
||||||
SONARQUBE_HOST_URL = 'http://sonarqube.quality.svc.cluster.local:9000'
|
|
||||||
SONARQUBE_PROJECT_KEY = 'titan_iac'
|
|
||||||
SONARQUBE_TOKEN = credentials('sonarqube-token')
|
|
||||||
VM_URL = 'http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428'
|
|
||||||
QUALITY_GATE_SONARQUBE_ENFORCE = '1'
|
|
||||||
QUALITY_GATE_SONARQUBE_REPORT = 'build/sonarqube-quality-gate.json'
|
|
||||||
QUALITY_GATE_IRONBANK_ENFORCE = '1'
|
|
||||||
QUALITY_GATE_IRONBANK_REQUIRED = '0'
|
|
||||||
QUALITY_GATE_IRONBANK_REPORT = 'build/ironbank-compliance.json'
|
|
||||||
}
|
|
||||||
options {
|
|
||||||
disableConcurrentBuilds()
|
|
||||||
buildDiscarder(logRotator(daysToKeepStr: '30', numToKeepStr: '200', artifactDaysToKeepStr: '30', artifactNumToKeepStr: '120'))
|
|
||||||
}
|
|
||||||
stages {
|
|
||||||
stage('Checkout') {
|
|
||||||
steps {
|
|
||||||
checkout scm
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Install deps') {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
set -eu
|
|
||||||
if ! command -v git >/dev/null 2>&1; then
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y --no-install-recommends git ca-certificates
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
fi
|
|
||||||
pip install --no-cache-dir -r ci/requirements.txt
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Prepare local quality evidence') {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
set -eu
|
|
||||||
mkdir -p build
|
|
||||||
set +e
|
|
||||||
python3 -m testing.quality_gate --profile local --build-dir build
|
|
||||||
local_quality_rc=$?
|
|
||||||
set -e
|
|
||||||
printf '%s\n' "${local_quality_rc}" > build/local-quality-gate.rc
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Collect SonarQube evidence') {
|
|
||||||
steps {
|
|
||||||
container('quality-tools') {
|
|
||||||
sh '''#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
mkdir -p build
|
|
||||||
args=(
|
|
||||||
"-Dsonar.host.url=${SONARQUBE_HOST_URL}"
|
|
||||||
"-Dsonar.login=${SONARQUBE_TOKEN}"
|
|
||||||
"-Dsonar.projectKey=${SONARQUBE_PROJECT_KEY}"
|
|
||||||
"-Dsonar.projectName=${SONARQUBE_PROJECT_KEY}"
|
|
||||||
"-Dsonar.sources=."
|
|
||||||
"-Dsonar.exclusions=**/.git/**,**/build/**,**/dist/**,**/node_modules/**,**/.venv/**,**/__pycache__/**,**/coverage/**,**/test-results/**,**/playwright-report/**,services/monitoring/dashboards/**,services/monitoring/grafana-dashboard-*.yaml"
|
|
||||||
"-Dsonar.test.inclusions=**/tests/**,**/testing/**,**/*_test.go,**/*.test.ts,**/*.test.tsx,**/*.spec.ts,**/*.spec.tsx"
|
|
||||||
)
|
|
||||||
[ -f build/coverage-unit.xml ] && args+=("-Dsonar.python.coverage.reportPaths=build/coverage-unit.xml")
|
|
||||||
set +e
|
|
||||||
sonar-scanner "${args[@]}" | tee build/sonar-scanner.log
|
|
||||||
rc=${PIPESTATUS[0]}
|
|
||||||
set -e
|
|
||||||
printf '%s\n' "${rc}" > build/sonarqube-analysis.rc
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
sh '''
|
|
||||||
set -eu
|
|
||||||
mkdir -p build
|
|
||||||
python3 - <<'PY'
|
|
||||||
import base64
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import urllib.parse
|
|
||||||
import urllib.request
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
host = os.getenv('SONARQUBE_HOST_URL', '').strip().rstrip('/')
|
|
||||||
project_key = os.getenv('SONARQUBE_PROJECT_KEY', '').strip()
|
|
||||||
token = os.getenv('SONARQUBE_TOKEN', '').strip()
|
|
||||||
report_path = os.getenv('QUALITY_GATE_SONARQUBE_REPORT', 'build/sonarqube-quality-gate.json')
|
|
||||||
|
|
||||||
payload = {
|
|
||||||
"status": "ERROR",
|
|
||||||
"note": "missing SONARQUBE_HOST_URL and/or SONARQUBE_PROJECT_KEY",
|
|
||||||
}
|
|
||||||
if host and project_key:
|
|
||||||
task_file = Path('.scannerwork/report-task.txt')
|
|
||||||
task_id = ''
|
|
||||||
if task_file.exists():
|
|
||||||
for line in task_file.read_text(encoding='utf-8').splitlines():
|
|
||||||
key, _, value = line.partition('=')
|
|
||||||
if key == 'ceTaskId':
|
|
||||||
task_id = value.strip()
|
|
||||||
break
|
|
||||||
if task_id:
|
|
||||||
ce_query = urllib.parse.urlencode({"id": task_id})
|
|
||||||
deadline = time.monotonic() + 180
|
|
||||||
while time.monotonic() < deadline:
|
|
||||||
ce_request = urllib.request.Request(f"{host}/api/ce/task?{ce_query}", method="GET")
|
|
||||||
if token:
|
|
||||||
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
|
|
||||||
ce_request.add_header("Authorization", f"Basic {encoded}")
|
|
||||||
try:
|
|
||||||
with urllib.request.urlopen(ce_request, timeout=12) as response:
|
|
||||||
ce_payload = json.loads(response.read().decode("utf-8"))
|
|
||||||
except Exception:
|
|
||||||
time.sleep(3)
|
|
||||||
continue
|
|
||||||
status = str(ce_payload.get("task", {}).get("status", "")).upper()
|
|
||||||
if status in {"SUCCESS", "FAILED", "CANCELED"}:
|
|
||||||
break
|
|
||||||
time.sleep(3)
|
|
||||||
|
|
||||||
query = urllib.parse.urlencode({"projectKey": project_key})
|
|
||||||
request = urllib.request.Request(
|
|
||||||
f"{host}/api/qualitygates/project_status?{query}",
|
|
||||||
method="GET",
|
|
||||||
)
|
|
||||||
if token:
|
|
||||||
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
|
|
||||||
request.add_header("Authorization", f"Basic {encoded}")
|
|
||||||
try:
|
|
||||||
with urllib.request.urlopen(request, timeout=12) as response:
|
|
||||||
payload = json.loads(response.read().decode("utf-8"))
|
|
||||||
except Exception as exc: # noqa: BLE001
|
|
||||||
payload = {"status": "ERROR", "error": str(exc)}
|
|
||||||
|
|
||||||
with open(report_path, "w", encoding="utf-8") as handle:
|
|
||||||
json.dump(payload, handle, indent=2, sort_keys=True)
|
|
||||||
handle.write("\\n")
|
|
||||||
PY
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Collect IronBank evidence') {
|
|
||||||
steps {
|
|
||||||
container('quality-tools') {
|
|
||||||
sh '''#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
mkdir -p build
|
|
||||||
set +e
|
|
||||||
trivy fs --cache-dir "${TRIVY_CACHE_DIR}" --skip-db-update --skip-files clusters/atlas/flux-system/gotk-components.yaml --timeout 5m --no-progress --format json --output build/trivy-fs.json --scanners vuln,secret,misconfig --severity HIGH,CRITICAL .
|
|
||||||
trivy_rc=$?
|
|
||||||
set -e
|
|
||||||
if [ ! -s build/trivy-fs.json ]; then
|
|
||||||
cat > build/ironbank-compliance.json <<EOF
|
|
||||||
{"status":"failed","compliant":false,"scanner":"trivy","scan_type":"filesystem","error":"trivy did not produce JSON output","trivy_rc":${trivy_rc}}
|
|
||||||
EOF
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
sh '''
|
|
||||||
set -eu
|
|
||||||
mkdir -p build
|
|
||||||
if [ -s build/trivy-fs.json ]; then
|
|
||||||
python3 ci/scripts/supply_chain_report.py --trivy-json build/trivy-fs.json --waivers ci/titan-iac-trivy-waivers.json --output build/ironbank-compliance.json
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
python3 - <<'PY'
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
report_path = Path(os.getenv('QUALITY_GATE_IRONBANK_REPORT', 'build/ironbank-compliance.json'))
|
|
||||||
if report_path.exists():
|
|
||||||
raise SystemExit(0)
|
|
||||||
|
|
||||||
status = os.getenv('IRONBANK_COMPLIANCE_STATUS', '').strip()
|
|
||||||
compliant = os.getenv('IRONBANK_COMPLIANT', '').strip().lower()
|
|
||||||
payload = {
|
|
||||||
"status": status or "unknown",
|
|
||||||
"compliant": compliant in {"1", "true", "yes", "on"} if compliant else None,
|
|
||||||
}
|
|
||||||
payload = {k: v for k, v in payload.items() if v is not None}
|
|
||||||
if "status" not in payload:
|
|
||||||
payload["status"] = "unknown"
|
|
||||||
payload["note"] = (
|
|
||||||
"Set IRONBANK_COMPLIANCE_STATUS/IRONBANK_COMPLIANT "
|
|
||||||
"or write build/ironbank-compliance.json in image-building repos."
|
|
||||||
)
|
|
||||||
|
|
||||||
report_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
report_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\\n", encoding="utf-8")
|
|
||||||
PY
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Run quality gate') {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
set -eu
|
|
||||||
mkdir -p build
|
|
||||||
set +e
|
|
||||||
python3 -m testing.quality_gate --profile jenkins --build-dir build
|
|
||||||
quality_gate_rc=$?
|
|
||||||
set -e
|
|
||||||
printf '%s\n' "${quality_gate_rc}" > build/quality-gate.rc
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Publish test metrics') {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
set -eu
|
|
||||||
export JUNIT_GLOB='build/junit-*.xml'
|
|
||||||
export QUALITY_GATE_EXIT_CODE_PATH='build/quality-gate.rc'
|
|
||||||
export QUALITY_GATE_SUMMARY_PATH='build/quality-gate-summary.json'
|
|
||||||
python3 ci/scripts/publish_test_metrics.py
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Enforce quality gate') {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
set -euo pipefail
|
|
||||||
gate_rc="$(cat build/quality-gate.rc 2>/dev/null || echo 1)"
|
|
||||||
fail=0
|
|
||||||
if [ "${gate_rc}" -ne 0 ]; then
|
|
||||||
echo "quality gate failed with rc=${gate_rc}" >&2
|
|
||||||
fail=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
enabled() {
|
|
||||||
case "$(printf '%s' "${1:-}" | tr '[:upper:]' '[:lower:]')" in
|
|
||||||
1|true|yes|on) return 0 ;;
|
|
||||||
*) return 1 ;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
if enabled "${QUALITY_GATE_SONARQUBE_ENFORCE:-1}"; then
|
|
||||||
sonar_status="$(python3 - <<'PY'
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
path = Path("build/sonarqube-quality-gate.json")
|
|
||||||
if not path.exists():
|
|
||||||
print("missing")
|
|
||||||
raise SystemExit(0)
|
|
||||||
try:
|
|
||||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
|
||||||
except Exception: # noqa: BLE001
|
|
||||||
print("error")
|
|
||||||
raise SystemExit(0)
|
|
||||||
status = (payload.get("status") or payload.get("projectStatus", {}).get("status") or payload.get("qualityGate", {}).get("status") or "").strip().lower()
|
|
||||||
print(status or "missing")
|
|
||||||
PY
|
|
||||||
)"
|
|
||||||
case "${sonar_status}" in
|
|
||||||
ok|pass|passed|success) ;;
|
|
||||||
*)
|
|
||||||
echo "sonarqube gate failed: ${sonar_status}" >&2
|
|
||||||
fail=1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
ironbank_required="${QUALITY_GATE_IRONBANK_REQUIRED:-0}"
|
|
||||||
if [ "${PUBLISH_IMAGES:-false}" = "true" ]; then
|
|
||||||
ironbank_required=1
|
|
||||||
fi
|
|
||||||
if enabled "${QUALITY_GATE_IRONBANK_ENFORCE:-1}"; then
|
|
||||||
supply_status="$(python3 - <<'PY'
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
path = Path("build/ironbank-compliance.json")
|
|
||||||
if not path.exists():
|
|
||||||
print("missing")
|
|
||||||
raise SystemExit(0)
|
|
||||||
try:
|
|
||||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
|
||||||
except Exception: # noqa: BLE001
|
|
||||||
print("error")
|
|
||||||
raise SystemExit(0)
|
|
||||||
compliant = payload.get("compliant")
|
|
||||||
if compliant is True:
|
|
||||||
print("ok")
|
|
||||||
elif compliant is False:
|
|
||||||
print("failed")
|
|
||||||
else:
|
|
||||||
status = str(payload.get("status") or payload.get("result") or payload.get("compliance") or "").strip().lower()
|
|
||||||
print(status or "missing")
|
|
||||||
PY
|
|
||||||
)"
|
|
||||||
case "${supply_status}" in
|
|
||||||
ok|pass|passed|success|compliant) ;;
|
|
||||||
not_applicable|na|n/a)
|
|
||||||
if enabled "${ironbank_required}"; then
|
|
||||||
echo "supply chain gate required but status=${supply_status}" >&2
|
|
||||||
fail=1
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
if enabled "${ironbank_required}"; then
|
|
||||||
echo "supply chain gate failed: ${supply_status}" >&2
|
|
||||||
fail=1
|
|
||||||
else
|
|
||||||
echo "supply chain gate not passing (${supply_status}) but not required for this run" >&2
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit "${fail}"
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Resolve Flux branch') {
|
|
||||||
steps {
|
|
||||||
script {
|
|
||||||
env.FLUX_BRANCH = sh(
|
|
||||||
returnStdout: true,
|
|
||||||
script: "awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml"
|
|
||||||
).trim()
|
|
||||||
if (!env.FLUX_BRANCH) {
|
|
||||||
error('Flux branch not found in gotk-sync.yaml')
|
|
||||||
}
|
|
||||||
echo "Flux branch: ${env.FLUX_BRANCH}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Promote') {
|
|
||||||
when {
|
|
||||||
expression {
|
|
||||||
def branch = env.BRANCH_NAME ?: (env.GIT_BRANCH ?: '').replaceFirst('origin/', '')
|
|
||||||
return env.FLUX_BRANCH && branch == env.FLUX_BRANCH
|
|
||||||
}
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
|
|
||||||
sh '''
|
|
||||||
set -euo pipefail
|
|
||||||
if ! command -v git >/dev/null 2>&1; then
|
|
||||||
if command -v apk >/dev/null 2>&1; then
|
|
||||||
apk add --no-cache git >/dev/null
|
|
||||||
elif command -v apt-get >/dev/null 2>&1; then
|
|
||||||
apt-get update >/dev/null
|
|
||||||
apt-get install -y git >/dev/null
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
cd "${WORKSPACE:-$PWD}"
|
|
||||||
if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
|
|
||||||
echo "workspace is not a git checkout; skipping promote"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
set +x
|
|
||||||
git config user.email "jenkins@bstein.dev"
|
|
||||||
git config user.name "jenkins"
|
|
||||||
git remote set-url origin https://${GIT_USER}:${GIT_TOKEN}@scm.bstein.dev/bstein/titan-iac.git
|
|
||||||
git push origin HEAD:${FLUX_BRANCH}
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
script {
|
|
||||||
if (fileExists('build/junit-unit.xml') || fileExists('build/junit-glue.xml')) {
|
|
||||||
try {
|
|
||||||
junit allowEmptyResults: true, testResults: 'build/junit-*.xml'
|
|
||||||
} catch (Throwable err) {
|
|
||||||
echo "junit step unavailable: ${err.class.simpleName}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
archiveArtifacts artifacts: 'build/**', allowEmptyArchive: true, fingerprint: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,7 +0,0 @@
|
|||||||
pytest==8.3.4
|
|
||||||
pytest-cov==6.0.0
|
|
||||||
coverage==7.6.10
|
|
||||||
kubernetes==30.1.0
|
|
||||||
PyYAML==6.0.2
|
|
||||||
requests==2.32.3
|
|
||||||
ruff==0.8.4
|
|
||||||
@ -1,352 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""Publish titan-iac quality-gate results to Pushgateway."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
from glob import glob
|
|
||||||
from pathlib import Path
|
|
||||||
import sys
|
|
||||||
import urllib.error
|
|
||||||
import urllib.request
|
|
||||||
import xml.etree.ElementTree as ET
|
|
||||||
|
|
||||||
sys.path.insert(0, str(Path(__file__).resolve().parents[2]))
|
|
||||||
|
|
||||||
from ci.scripts import publish_test_metrics_quality as _quality_helpers
|
|
||||||
|
|
||||||
CANONICAL_CHECKS = _quality_helpers.CANONICAL_CHECKS
|
|
||||||
_build_check_statuses = _quality_helpers._build_check_statuses
|
|
||||||
_combine_statuses = _quality_helpers._combine_statuses
|
|
||||||
_infer_sonarqube_status = _quality_helpers._infer_sonarqube_status
|
|
||||||
_infer_source_lines_over_500 = _quality_helpers._infer_source_lines_over_500
|
|
||||||
_infer_supply_chain_status = _quality_helpers._infer_supply_chain_status
|
|
||||||
_infer_workspace_coverage_percent = _quality_helpers._infer_workspace_coverage_percent
|
|
||||||
_load_optional_json = _quality_helpers._load_optional_json
|
|
||||||
_normalize_result_status = _quality_helpers._normalize_result_status
|
|
||||||
|
|
||||||
|
|
||||||
def _escape_label(value: str) -> str:
|
|
||||||
"""Escape a Prometheus label value without changing its content."""
|
|
||||||
return value.replace("\\", "\\\\").replace("\n", "\\n").replace('"', '\\"')
|
|
||||||
|
|
||||||
|
|
||||||
def _label_str(labels: dict[str, str]) -> str:
|
|
||||||
"""Render a stable Prometheus label set from a mapping."""
|
|
||||||
parts = [f'{key}="{_escape_label(val)}"' for key, val in labels.items() if val]
|
|
||||||
return "{" + ",".join(parts) + "}" if parts else ""
|
|
||||||
|
|
||||||
|
|
||||||
def _read_text(url: str) -> str:
|
|
||||||
"""Fetch a plain-text response body from the given URL."""
|
|
||||||
with urllib.request.urlopen(url, timeout=10) as response:
|
|
||||||
return response.read().decode("utf-8")
|
|
||||||
|
|
||||||
|
|
||||||
def _post_text(url: str, payload: str) -> None:
|
|
||||||
"""PUT a plain-text payload and fail on any 4xx/5xx response."""
|
|
||||||
request = urllib.request.Request(
|
|
||||||
url,
|
|
||||||
data=payload.encode("utf-8"),
|
|
||||||
method="PUT",
|
|
||||||
headers={"Content-Type": "text/plain"},
|
|
||||||
)
|
|
||||||
with urllib.request.urlopen(request, timeout=10) as response:
|
|
||||||
if response.status >= 400:
|
|
||||||
raise RuntimeError(f"push failed with status={response.status}")
|
|
||||||
|
|
||||||
|
|
||||||
def _parse_junit(path: str) -> dict[str, int]:
|
|
||||||
"""Parse a JUnit XML file into aggregate test counters."""
|
|
||||||
if not os.path.exists(path):
|
|
||||||
return {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
|
|
||||||
|
|
||||||
tree = ET.parse(path)
|
|
||||||
root = tree.getroot()
|
|
||||||
totals = {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
|
|
||||||
|
|
||||||
suites: list[ET.Element]
|
|
||||||
if root.tag == "testsuite":
|
|
||||||
suites = [root]
|
|
||||||
elif root.tag == "testsuites":
|
|
||||||
suites = [elem for elem in root if elem.tag == "testsuite"]
|
|
||||||
else:
|
|
||||||
suites = []
|
|
||||||
|
|
||||||
for suite in suites:
|
|
||||||
for key in totals:
|
|
||||||
raw_value = suite.attrib.get(key, "0")
|
|
||||||
try:
|
|
||||||
totals[key] += int(float(raw_value))
|
|
||||||
except ValueError:
|
|
||||||
totals[key] += 0
|
|
||||||
return totals
|
|
||||||
|
|
||||||
|
|
||||||
def _collect_junit_totals(pattern: str) -> dict[str, int]:
|
|
||||||
"""Sum JUnit counters across every XML file matching the pattern."""
|
|
||||||
totals = {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
|
|
||||||
for path in sorted(glob(pattern)):
|
|
||||||
parsed = _parse_junit(path)
|
|
||||||
for key in totals:
|
|
||||||
totals[key] += parsed[key]
|
|
||||||
return totals
|
|
||||||
|
|
||||||
|
|
||||||
def _collect_junit_cases(pattern: str) -> list[tuple[str, str]]:
|
|
||||||
"""Collect individual JUnit test-case statuses for flaky-test trend panels."""
|
|
||||||
cases: list[tuple[str, str]] = []
|
|
||||||
for path in sorted(glob(pattern)):
|
|
||||||
if not os.path.exists(path):
|
|
||||||
continue
|
|
||||||
root = ET.parse(path).getroot()
|
|
||||||
suites: list[ET.Element]
|
|
||||||
if root.tag == "testsuite":
|
|
||||||
suites = [root]
|
|
||||||
elif root.tag == "testsuites":
|
|
||||||
suites = [elem for elem in root if elem.tag == "testsuite"]
|
|
||||||
else:
|
|
||||||
suites = []
|
|
||||||
for suite in suites:
|
|
||||||
for test_case in suite.findall("testcase"):
|
|
||||||
case_name = test_case.attrib.get("name", "").strip()
|
|
||||||
class_name = test_case.attrib.get("classname", "").strip()
|
|
||||||
if not case_name:
|
|
||||||
continue
|
|
||||||
full_name = f"{class_name}.{case_name}" if class_name else case_name
|
|
||||||
status = "passed"
|
|
||||||
if test_case.find("failure") is not None or test_case.find("error") is not None:
|
|
||||||
status = "failed"
|
|
||||||
elif test_case.find("skipped") is not None:
|
|
||||||
status = "skipped"
|
|
||||||
cases.append((full_name, status))
|
|
||||||
return cases
|
|
||||||
|
|
||||||
|
|
||||||
def _read_exit_code(path: str) -> int:
|
|
||||||
"""Read the quality-gate exit code, defaulting to failure if missing."""
|
|
||||||
try:
|
|
||||||
with open(path, "r", encoding="utf-8") as handle:
|
|
||||||
return int(handle.read().strip())
|
|
||||||
except (FileNotFoundError, ValueError):
|
|
||||||
return 1
|
|
||||||
|
|
||||||
|
|
||||||
def _load_summary(path: str) -> dict:
|
|
||||||
"""Load the JSON quality-gate summary, returning an empty mapping on error."""
|
|
||||||
try:
|
|
||||||
with open(path, "r", encoding="utf-8") as handle:
|
|
||||||
return json.load(handle)
|
|
||||||
except (FileNotFoundError, json.JSONDecodeError):
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def _summary_float(summary: dict, key: str) -> float:
|
|
||||||
"""Extract a float-like value from the summary, defaulting to 0.0."""
|
|
||||||
value = summary.get(key)
|
|
||||||
if isinstance(value, (int, float)):
|
|
||||||
return float(value)
|
|
||||||
return 0.0
|
|
||||||
|
|
||||||
|
|
||||||
def _summary_int(summary: dict, key: str) -> int:
|
|
||||||
"""Extract an int-like value from the summary, defaulting to 0."""
|
|
||||||
value = summary.get(key)
|
|
||||||
if isinstance(value, int):
|
|
||||||
return value
|
|
||||||
if isinstance(value, float):
|
|
||||||
return int(value)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
def _fetch_existing_counter(pushgateway_url: str, metric: str, labels: dict[str, str]) -> float:
|
|
||||||
"""Return the current counter value for a labeled metric if present."""
|
|
||||||
text = _read_text(f"{pushgateway_url.rstrip('/')}/metrics")
|
|
||||||
for line in text.splitlines():
|
|
||||||
if not line.startswith(metric + "{"):
|
|
||||||
continue
|
|
||||||
if any(f'{key}="{value}"' not in line for key, value in labels.items()):
|
|
||||||
continue
|
|
||||||
parts = line.split()
|
|
||||||
if len(parts) < 2:
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
return float(parts[1])
|
|
||||||
except ValueError:
|
|
||||||
return 0.0
|
|
||||||
return 0.0
|
|
||||||
|
|
||||||
|
|
||||||
def _build_payload(
|
|
||||||
suite: str,
|
|
||||||
status: str,
|
|
||||||
tests: dict[str, int],
|
|
||||||
test_cases: list[tuple[str, str]],
|
|
||||||
ok_count: int,
|
|
||||||
failed_count: int,
|
|
||||||
branch: str,
|
|
||||||
build_number: str,
|
|
||||||
jenkins_job: str,
|
|
||||||
summary: dict | None = None,
|
|
||||||
workspace_line_coverage_percent: float = 0.0,
|
|
||||||
source_lines_over_500: int = 0,
|
|
||||||
check_statuses: dict[str, str] | None = None,
|
|
||||||
) -> str:
|
|
||||||
"""Build the Pushgateway payload for the current suite run."""
|
|
||||||
passed = max(tests["tests"] - tests["failures"] - tests["errors"] - tests["skipped"], 0)
|
|
||||||
build_labels = _label_str(
|
|
||||||
{
|
|
||||||
"suite": suite,
|
|
||||||
"branch": branch or "unknown",
|
|
||||||
"build_number": build_number or "unknown",
|
|
||||||
"jenkins_job": jenkins_job or suite,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
test_case_base_labels = {
|
|
||||||
"suite": suite,
|
|
||||||
"branch": branch or "unknown",
|
|
||||||
"build_number": build_number or "unknown",
|
|
||||||
"jenkins_job": jenkins_job or suite,
|
|
||||||
}
|
|
||||||
lines = [
|
|
||||||
"# TYPE platform_quality_gate_runs_total counter",
|
|
||||||
f'platform_quality_gate_runs_total{{suite="{suite}",status="ok"}} {ok_count}',
|
|
||||||
f'platform_quality_gate_runs_total{{suite="{suite}",status="failed"}} {failed_count}',
|
|
||||||
"# TYPE titan_iac_quality_gate_tests_total gauge",
|
|
||||||
f'titan_iac_quality_gate_tests_total{{suite="{suite}",result="passed"}} {passed}',
|
|
||||||
f'titan_iac_quality_gate_tests_total{{suite="{suite}",result="failed"}} {tests["failures"]}',
|
|
||||||
f'titan_iac_quality_gate_tests_total{{suite="{suite}",result="error"}} {tests["errors"]}',
|
|
||||||
f'titan_iac_quality_gate_tests_total{{suite="{suite}",result="skipped"}} {tests["skipped"]}',
|
|
||||||
"# TYPE titan_iac_quality_gate_run_status gauge",
|
|
||||||
f'titan_iac_quality_gate_run_status{{suite="{suite}",status="ok"}} {1 if status == "ok" else 0}',
|
|
||||||
f'titan_iac_quality_gate_run_status{{suite="{suite}",status="failed"}} {1 if status == "failed" else 0}',
|
|
||||||
"# TYPE platform_quality_gate_build_info gauge",
|
|
||||||
f"platform_quality_gate_build_info{build_labels} 1",
|
|
||||||
"# TYPE titan_iac_quality_gate_build_info gauge",
|
|
||||||
f"titan_iac_quality_gate_build_info{build_labels} 1",
|
|
||||||
"# TYPE platform_quality_gate_workspace_line_coverage_percent gauge",
|
|
||||||
f'platform_quality_gate_workspace_line_coverage_percent{{suite="{suite}"}} {workspace_line_coverage_percent:.3f}',
|
|
||||||
"# TYPE platform_quality_gate_source_lines_over_500_total gauge",
|
|
||||||
f'platform_quality_gate_source_lines_over_500_total{{suite="{suite}"}} {source_lines_over_500}',
|
|
||||||
]
|
|
||||||
if check_statuses:
|
|
||||||
lines.append("# TYPE titan_iac_quality_gate_checks_total gauge")
|
|
||||||
for check_name in CANONICAL_CHECKS:
|
|
||||||
check_status = check_statuses.get(check_name, "not_applicable")
|
|
||||||
lines.append(
|
|
||||||
f'titan_iac_quality_gate_checks_total{{suite="{suite}",check="{_escape_label(check_name)}",result="{_escape_label(check_status)}"}} 1'
|
|
||||||
)
|
|
||||||
lines.append("# TYPE platform_quality_gate_test_case_result gauge")
|
|
||||||
if test_cases:
|
|
||||||
for test_name, test_status in test_cases:
|
|
||||||
labels = {
|
|
||||||
**test_case_base_labels,
|
|
||||||
"test": test_name,
|
|
||||||
"status": test_status,
|
|
||||||
}
|
|
||||||
lines.append(
|
|
||||||
f"platform_quality_gate_test_case_result{_label_str(labels)} 1"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
labels = {**test_case_base_labels, "test": "__no_test_cases__", "status": "skipped"}
|
|
||||||
lines.append(
|
|
||||||
f"platform_quality_gate_test_case_result{_label_str(labels)} 1"
|
|
||||||
)
|
|
||||||
return "\n".join(lines) + "\n"
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> int:
|
|
||||||
"""Publish the quality-gate metrics and print a compact run summary."""
|
|
||||||
suite = os.getenv("SUITE_NAME", "titan_iac")
|
|
||||||
pushgateway_url = os.getenv("PUSHGATEWAY_URL", "http://platform-quality-gateway.monitoring.svc.cluster.local:9091")
|
|
||||||
job_name = os.getenv("QUALITY_GATE_JOB_NAME", "platform-quality-ci")
|
|
||||||
junit_glob = os.getenv("JUNIT_GLOB", os.getenv("JUNIT_PATH", "build/junit-*.xml"))
|
|
||||||
exit_code_path = os.getenv("QUALITY_GATE_EXIT_CODE_PATH", os.getenv("GLUE_EXIT_CODE_PATH", "build/quality-gate.rc"))
|
|
||||||
summary_path = os.getenv("QUALITY_GATE_SUMMARY_PATH", "build/quality-gate-summary.json")
|
|
||||||
branch = os.getenv("BRANCH_NAME") or os.getenv("GIT_BRANCH") or "unknown"
|
|
||||||
if branch.startswith("origin/"):
|
|
||||||
branch = branch[len("origin/") :]
|
|
||||||
build_number = os.getenv("BUILD_NUMBER", "")
|
|
||||||
jenkins_job = os.getenv("JOB_NAME", "titan-iac")
|
|
||||||
|
|
||||||
tests = _collect_junit_totals(junit_glob)
|
|
||||||
test_cases = _collect_junit_cases(junit_glob)
|
|
||||||
exit_code = _read_exit_code(exit_code_path)
|
|
||||||
status = "ok" if exit_code == 0 else "failed"
|
|
||||||
summary = _load_summary(summary_path)
|
|
||||||
workspace_line_coverage_percent = _summary_float(summary, "workspace_line_coverage_percent")
|
|
||||||
if workspace_line_coverage_percent <= 0:
|
|
||||||
workspace_line_coverage_percent = _infer_workspace_coverage_percent(summary, "build/coverage-unit.xml")
|
|
||||||
source_lines_over_500 = _summary_int(summary, "source_lines_over_500")
|
|
||||||
if source_lines_over_500 <= 0:
|
|
||||||
source_lines_over_500 = _infer_source_lines_over_500(summary)
|
|
||||||
sonarqube_report = _load_optional_json(os.getenv("QUALITY_GATE_SONARQUBE_REPORT", "build/sonarqube-quality-gate.json"))
|
|
||||||
supply_chain_report = _load_optional_json(os.getenv("QUALITY_GATE_IRONBANK_REPORT", "build/ironbank-compliance.json"))
|
|
||||||
supply_chain_required = os.getenv("QUALITY_GATE_IRONBANK_REQUIRED", "0").strip().lower() in {"1", "true", "yes", "on"}
|
|
||||||
check_statuses = _build_check_statuses(
|
|
||||||
summary=summary,
|
|
||||||
tests=tests,
|
|
||||||
workspace_line_coverage_percent=workspace_line_coverage_percent,
|
|
||||||
source_lines_over_500=source_lines_over_500,
|
|
||||||
sonarqube_report=sonarqube_report,
|
|
||||||
supply_chain_report=supply_chain_report,
|
|
||||||
supply_chain_required=supply_chain_required,
|
|
||||||
)
|
|
||||||
|
|
||||||
ok_count = int(
|
|
||||||
_fetch_existing_counter(
|
|
||||||
pushgateway_url,
|
|
||||||
"platform_quality_gate_runs_total",
|
|
||||||
{"job": job_name, "suite": suite, "status": "ok"},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
failed_count = int(
|
|
||||||
_fetch_existing_counter(
|
|
||||||
pushgateway_url,
|
|
||||||
"platform_quality_gate_runs_total",
|
|
||||||
{"job": job_name, "suite": suite, "status": "failed"},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if status == "ok":
|
|
||||||
ok_count += 1
|
|
||||||
else:
|
|
||||||
failed_count += 1
|
|
||||||
|
|
||||||
payload = _build_payload(
|
|
||||||
suite=suite,
|
|
||||||
status=status,
|
|
||||||
tests=tests,
|
|
||||||
test_cases=test_cases,
|
|
||||||
ok_count=ok_count,
|
|
||||||
failed_count=failed_count,
|
|
||||||
branch=branch,
|
|
||||||
build_number=build_number,
|
|
||||||
jenkins_job=jenkins_job,
|
|
||||||
summary=summary,
|
|
||||||
workspace_line_coverage_percent=workspace_line_coverage_percent,
|
|
||||||
source_lines_over_500=source_lines_over_500,
|
|
||||||
check_statuses=check_statuses,
|
|
||||||
)
|
|
||||||
push_url = f"{pushgateway_url.rstrip('/')}/metrics/job/{job_name}/suite/{suite}"
|
|
||||||
_post_text(push_url, payload)
|
|
||||||
|
|
||||||
summary = {
|
|
||||||
"suite": suite,
|
|
||||||
"status": status,
|
|
||||||
"tests_total": tests["tests"],
|
|
||||||
"tests_failed": tests["failures"],
|
|
||||||
"tests_error": tests["errors"],
|
|
||||||
"tests_skipped": tests["skipped"],
|
|
||||||
"ok_count": ok_count,
|
|
||||||
"failed_count": failed_count,
|
|
||||||
"checks_recorded": len(check_statuses),
|
|
||||||
"workspace_line_coverage_percent": workspace_line_coverage_percent,
|
|
||||||
"source_lines_over_500": source_lines_over_500,
|
|
||||||
}
|
|
||||||
print(json.dumps(summary, sort_keys=True))
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__": # pragma: no cover
|
|
||||||
raise SystemExit(main())
|
|
||||||
@ -1,200 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""Quality/status helpers for publish_test_metrics."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
|
||||||
import xml.etree.ElementTree as ET
|
|
||||||
|
|
||||||
SUCCESS_STATUSES = {"ok", "pass", "passed", "success", "compliant"}
|
|
||||||
NOT_APPLICABLE_STATUSES = {"not_applicable", "n/a", "na", "none", "skipped"}
|
|
||||||
FAILED_STATUSES = {"failed", "fail", "error", "errors", "warn", "warning", "red"}
|
|
||||||
|
|
||||||
CANONICAL_CHECKS = [
|
|
||||||
"tests",
|
|
||||||
"coverage",
|
|
||||||
"loc",
|
|
||||||
"docs_naming",
|
|
||||||
"gate_glue",
|
|
||||||
"sonarqube",
|
|
||||||
"supply_chain",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def _infer_workspace_coverage_percent(summary: dict, default_xml: str) -> float:
|
|
||||||
"""Infer workspace line coverage from quality summary coverage XML metadata."""
|
|
||||||
results = summary.get("results", []) if isinstance(summary, dict) else []
|
|
||||||
coverage_xml = default_xml
|
|
||||||
for result in results:
|
|
||||||
if not isinstance(result, dict):
|
|
||||||
continue
|
|
||||||
if str(result.get("name") or "").strip().lower() != "coverage":
|
|
||||||
continue
|
|
||||||
candidate = str(result.get("coverage_xml") or "").strip()
|
|
||||||
if candidate:
|
|
||||||
coverage_xml = candidate
|
|
||||||
break
|
|
||||||
xml_path = Path(coverage_xml)
|
|
||||||
if not xml_path.exists():
|
|
||||||
return 0.0
|
|
||||||
try:
|
|
||||||
root = ET.parse(xml_path).getroot()
|
|
||||||
line_rate = root.attrib.get("line-rate")
|
|
||||||
if line_rate is None:
|
|
||||||
return 0.0
|
|
||||||
return float(line_rate) * 100.0
|
|
||||||
except (ET.ParseError, OSError, ValueError):
|
|
||||||
return 0.0
|
|
||||||
|
|
||||||
|
|
||||||
def _infer_source_lines_over_500(summary: dict) -> int:
|
|
||||||
"""Infer over-limit source file count from hygiene issue payloads."""
|
|
||||||
results = summary.get("results", []) if isinstance(summary, dict) else []
|
|
||||||
for result in results:
|
|
||||||
if not isinstance(result, dict):
|
|
||||||
continue
|
|
||||||
if str(result.get("name") or "").strip().lower() not in {"hygiene", "loc", "smell"}:
|
|
||||||
continue
|
|
||||||
issues = result.get("issues")
|
|
||||||
if not isinstance(issues, list):
|
|
||||||
continue
|
|
||||||
return sum(1 for item in issues if isinstance(item, str) and item.startswith("file exceeds"))
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
def _normalize_result_status(value: str | None, default: str = "failed") -> str:
|
|
||||||
"""Map arbitrary check status text into canonical check result buckets."""
|
|
||||||
if not value:
|
|
||||||
return default
|
|
||||||
normalized = value.strip().lower()
|
|
||||||
if normalized in SUCCESS_STATUSES:
|
|
||||||
return "ok"
|
|
||||||
if normalized in NOT_APPLICABLE_STATUSES:
|
|
||||||
return "not_applicable"
|
|
||||||
if normalized in FAILED_STATUSES:
|
|
||||||
return "failed"
|
|
||||||
return default
|
|
||||||
|
|
||||||
|
|
||||||
def _load_optional_json(path: str | None) -> dict:
|
|
||||||
"""Load an optional JSON report file, returning an empty object when absent."""
|
|
||||||
if not path:
|
|
||||||
return {}
|
|
||||||
candidate = Path(path)
|
|
||||||
if not candidate.exists():
|
|
||||||
return {}
|
|
||||||
try:
|
|
||||||
return json.loads(candidate.read_text(encoding="utf-8"))
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def _combine_statuses(statuses: list[str]) -> str:
|
|
||||||
"""Roll up many check statuses into one canonical result."""
|
|
||||||
if not statuses:
|
|
||||||
return "not_applicable"
|
|
||||||
if any(status == "failed" for status in statuses):
|
|
||||||
return "failed"
|
|
||||||
if all(status == "not_applicable" for status in statuses):
|
|
||||||
return "not_applicable"
|
|
||||||
if all(status in {"ok", "not_applicable"} for status in statuses):
|
|
||||||
return "ok"
|
|
||||||
return "failed"
|
|
||||||
|
|
||||||
|
|
||||||
def _infer_sonarqube_status(report: dict) -> str:
|
|
||||||
"""Infer canonical SonarQube check status from its JSON report payload."""
|
|
||||||
if not report:
|
|
||||||
return "not_applicable"
|
|
||||||
status = (
|
|
||||||
report.get("projectStatus", {}).get("status")
|
|
||||||
or report.get("qualityGate", {}).get("status")
|
|
||||||
or report.get("status")
|
|
||||||
)
|
|
||||||
return _normalize_result_status(str(status) if status is not None else None, default="failed")
|
|
||||||
|
|
||||||
|
|
||||||
def _infer_supply_chain_status(report: dict, required: bool) -> str:
|
|
||||||
"""Infer canonical supply-chain status from IronBank/artifact report payload."""
|
|
||||||
if not report:
|
|
||||||
return "failed" if required else "not_applicable"
|
|
||||||
compliant = report.get("compliant")
|
|
||||||
if isinstance(compliant, bool):
|
|
||||||
return "ok" if compliant else "failed"
|
|
||||||
status = report.get("status")
|
|
||||||
if status is None:
|
|
||||||
return "failed" if required else "not_applicable"
|
|
||||||
normalized = _normalize_result_status(str(status), default="failed")
|
|
||||||
if normalized == "not_applicable" and required:
|
|
||||||
return "failed"
|
|
||||||
return normalized
|
|
||||||
|
|
||||||
|
|
||||||
def _build_check_statuses(
|
|
||||||
summary: dict | None,
|
|
||||||
tests: dict[str, int],
|
|
||||||
workspace_line_coverage_percent: float,
|
|
||||||
source_lines_over_500: int,
|
|
||||||
sonarqube_report: dict,
|
|
||||||
supply_chain_report: dict,
|
|
||||||
supply_chain_required: bool,
|
|
||||||
) -> dict[str, str]:
|
|
||||||
"""Generate the canonical quality-check status map for dashboarding."""
|
|
||||||
raw_results = summary.get("results", []) if isinstance(summary, dict) else []
|
|
||||||
status_by_name: dict[str, str] = {}
|
|
||||||
for result in raw_results:
|
|
||||||
if not isinstance(result, dict):
|
|
||||||
continue
|
|
||||||
check_name = str(result.get("name") or "").strip().lower()
|
|
||||||
if not check_name:
|
|
||||||
continue
|
|
||||||
status_by_name[check_name] = _normalize_result_status(result.get("status"), default="failed")
|
|
||||||
|
|
||||||
tests_status = status_by_name.get("tests")
|
|
||||||
if not tests_status:
|
|
||||||
candidate_keys = ["unit", "integration", "e2e", "pytest", "test", "tests"]
|
|
||||||
candidates = [status_by_name[key] for key in candidate_keys if key in status_by_name]
|
|
||||||
if candidates:
|
|
||||||
tests_status = _combine_statuses(candidates)
|
|
||||||
elif tests["tests"] > 0:
|
|
||||||
tests_status = "ok" if (tests["failures"] + tests["errors"]) == 0 else "failed"
|
|
||||||
else:
|
|
||||||
tests_status = "not_applicable"
|
|
||||||
|
|
||||||
coverage_status = status_by_name.get("coverage")
|
|
||||||
if not coverage_status:
|
|
||||||
if workspace_line_coverage_percent > 0:
|
|
||||||
coverage_status = "ok" if workspace_line_coverage_percent >= 95.0 else "failed"
|
|
||||||
else:
|
|
||||||
coverage_status = "not_applicable"
|
|
||||||
|
|
||||||
loc_status = status_by_name.get("loc")
|
|
||||||
if not loc_status:
|
|
||||||
loc_status = "ok" if source_lines_over_500 == 0 else "failed"
|
|
||||||
|
|
||||||
docs_naming_status = status_by_name.get("docs_naming")
|
|
||||||
if not docs_naming_status:
|
|
||||||
candidates = [status_by_name[key] for key in ["docs", "hygiene", "smell", "lint", "naming"] if key in status_by_name]
|
|
||||||
docs_naming_status = _combine_statuses(candidates) if candidates else "not_applicable"
|
|
||||||
|
|
||||||
gate_glue_status = status_by_name.get("gate_glue")
|
|
||||||
if not gate_glue_status:
|
|
||||||
candidates = [status_by_name[key] for key in ["gate_glue", "glue", "gate"] if key in status_by_name]
|
|
||||||
gate_glue_status = _combine_statuses(candidates) if candidates else "not_applicable"
|
|
||||||
|
|
||||||
sonarqube_status = status_by_name.get("sonarqube") or _infer_sonarqube_status(sonarqube_report)
|
|
||||||
supply_chain_status = status_by_name.get("supply_chain") or _infer_supply_chain_status(
|
|
||||||
supply_chain_report,
|
|
||||||
required=supply_chain_required,
|
|
||||||
)
|
|
||||||
|
|
||||||
return {
|
|
||||||
"tests": tests_status,
|
|
||||||
"coverage": coverage_status,
|
|
||||||
"loc": loc_status,
|
|
||||||
"docs_naming": docs_naming_status,
|
|
||||||
"gate_glue": gate_glue_status,
|
|
||||||
"sonarqube": sonarqube_status,
|
|
||||||
"supply_chain": supply_chain_status,
|
|
||||||
}
|
|
||||||
@ -1,173 +0,0 @@
|
|||||||
"""Build a titan-iac supply-chain compliance report from Trivy evidence."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import datetime as dt
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
|
|
||||||
FAIL_SEVERITIES = {"HIGH", "CRITICAL"}
|
|
||||||
|
|
||||||
|
|
||||||
def _read_json(path: Path) -> dict[str, Any]:
|
|
||||||
"""Read a JSON object from disk for use as pipeline evidence."""
|
|
||||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
|
||||||
if not isinstance(payload, dict):
|
|
||||||
raise ValueError(f"{path} must contain a JSON object")
|
|
||||||
return payload
|
|
||||||
|
|
||||||
|
|
||||||
def _parse_day(raw: str | None) -> dt.date | None:
|
|
||||||
"""Parse an ISO day while letting optional waiver dates stay optional."""
|
|
||||||
if not raw:
|
|
||||||
return None
|
|
||||||
return dt.date.fromisoformat(raw)
|
|
||||||
|
|
||||||
|
|
||||||
def _today(override: str | None = None) -> dt.date:
|
|
||||||
"""Return the policy day so tests can pin expiry behavior."""
|
|
||||||
return _parse_day(override) or dt.date.today()
|
|
||||||
|
|
||||||
|
|
||||||
def _load_waiver_pairs(path: Path | None, policy_day: dt.date) -> tuple[set[tuple[str, str]], int]:
|
|
||||||
"""Return active ``(misconfiguration id, target)`` waivers and expired count."""
|
|
||||||
if path is None or not path.exists():
|
|
||||||
return set(), 0
|
|
||||||
|
|
||||||
payload = _read_json(path)
|
|
||||||
default_expires_at = payload.get("default_expires_at")
|
|
||||||
active: set[tuple[str, str]] = set()
|
|
||||||
expired = 0
|
|
||||||
|
|
||||||
for entry in payload.get("misconfigurations", []):
|
|
||||||
if not isinstance(entry, dict):
|
|
||||||
continue
|
|
||||||
misconfiguration_id = str(entry.get("id") or "").strip()
|
|
||||||
if not misconfiguration_id:
|
|
||||||
continue
|
|
||||||
expires_at = _parse_day(str(entry.get("expires_at") or default_expires_at or ""))
|
|
||||||
targets = entry.get("targets", [])
|
|
||||||
if not isinstance(targets, list):
|
|
||||||
continue
|
|
||||||
|
|
||||||
if expires_at and expires_at < policy_day:
|
|
||||||
expired += len(targets)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Waivers are target-specific so a new unsafe manifest fails until it is
|
|
||||||
# either fixed or deliberately accepted with a fresh expiration.
|
|
||||||
for target in targets:
|
|
||||||
if isinstance(target, str) and target:
|
|
||||||
active.add((misconfiguration_id, target))
|
|
||||||
|
|
||||||
return active, expired
|
|
||||||
|
|
||||||
|
|
||||||
def _iter_failed_misconfigurations(payload: dict[str, Any]):
|
|
||||||
"""Yield failed high/critical Trivy misconfiguration records."""
|
|
||||||
for result in payload.get("Results", []):
|
|
||||||
if not isinstance(result, dict):
|
|
||||||
continue
|
|
||||||
target = str(result.get("Target") or "")
|
|
||||||
for item in result.get("Misconfigurations") or []:
|
|
||||||
if not isinstance(item, dict):
|
|
||||||
continue
|
|
||||||
if item.get("Status") != "FAIL":
|
|
||||||
continue
|
|
||||||
if str(item.get("Severity") or "").upper() not in FAIL_SEVERITIES:
|
|
||||||
continue
|
|
||||||
yield target, item
|
|
||||||
|
|
||||||
|
|
||||||
def _count_vulnerabilities(payload: dict[str, Any], severity: str) -> int:
|
|
||||||
"""Count Trivy vulnerabilities at a specific severity."""
|
|
||||||
count = 0
|
|
||||||
for result in payload.get("Results", []):
|
|
||||||
if not isinstance(result, dict):
|
|
||||||
continue
|
|
||||||
for item in result.get("Vulnerabilities") or []:
|
|
||||||
if isinstance(item, dict) and str(item.get("Severity") or "").upper() == severity:
|
|
||||||
count += 1
|
|
||||||
return count
|
|
||||||
|
|
||||||
|
|
||||||
def _count_secrets(payload: dict[str, Any]) -> int:
|
|
||||||
"""Count detected secrets in the Trivy filesystem report."""
|
|
||||||
count = 0
|
|
||||||
for result in payload.get("Results", []):
|
|
||||||
if isinstance(result, dict):
|
|
||||||
count += len(result.get("Secrets") or [])
|
|
||||||
return count
|
|
||||||
|
|
||||||
|
|
||||||
def build_report(
|
|
||||||
trivy_payload: dict[str, Any],
|
|
||||||
waiver_path: Path | None = None,
|
|
||||||
today_override: str | None = None,
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""Build the compliance summary consumed by the quality gate."""
|
|
||||||
policy_day = _today(today_override)
|
|
||||||
active_waivers, expired_waivers = _load_waiver_pairs(waiver_path, policy_day)
|
|
||||||
|
|
||||||
open_misconfigs: list[dict[str, str]] = []
|
|
||||||
waived_misconfigs = 0
|
|
||||||
for target, item in _iter_failed_misconfigurations(trivy_payload):
|
|
||||||
misconfiguration_id = str(item.get("ID") or "")
|
|
||||||
if (misconfiguration_id, target) in active_waivers:
|
|
||||||
waived_misconfigs += 1
|
|
||||||
continue
|
|
||||||
open_misconfigs.append(
|
|
||||||
{
|
|
||||||
"id": misconfiguration_id,
|
|
||||||
"target": target,
|
|
||||||
"severity": str(item.get("Severity") or ""),
|
|
||||||
"title": str(item.get("Title") or ""),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
critical = _count_vulnerabilities(trivy_payload, "CRITICAL")
|
|
||||||
high = _count_vulnerabilities(trivy_payload, "HIGH")
|
|
||||||
secrets = _count_secrets(trivy_payload)
|
|
||||||
status = "ok" if critical == 0 and secrets == 0 and not open_misconfigs else "failed"
|
|
||||||
|
|
||||||
return {
|
|
||||||
"status": status,
|
|
||||||
"compliant": status == "ok",
|
|
||||||
"category": "artifact_security",
|
|
||||||
"scan_type": "filesystem",
|
|
||||||
"scanner": "trivy",
|
|
||||||
"critical_vulnerabilities": critical,
|
|
||||||
"high_vulnerabilities": high,
|
|
||||||
"high_vulnerability_policy": "observe",
|
|
||||||
"secrets": secrets,
|
|
||||||
"high_or_critical_misconfigurations": len(open_misconfigs),
|
|
||||||
"waived_misconfigurations": waived_misconfigs,
|
|
||||||
"expired_waivers": expired_waivers,
|
|
||||||
"waiver_file": str(waiver_path) if waiver_path else "",
|
|
||||||
"open_misconfiguration_examples": open_misconfigs[:20],
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def main(argv: list[str] | None = None) -> int:
|
|
||||||
"""CLI entrypoint used by Jenkins after the Trivy scan completes."""
|
|
||||||
parser = argparse.ArgumentParser(description=__doc__)
|
|
||||||
parser.add_argument("--trivy-json", required=True)
|
|
||||||
parser.add_argument("--waivers")
|
|
||||||
parser.add_argument("--output", required=True)
|
|
||||||
parser.add_argument("--today")
|
|
||||||
args = parser.parse_args(argv)
|
|
||||||
|
|
||||||
trivy_payload = _read_json(Path(args.trivy_json))
|
|
||||||
waiver_path = Path(args.waivers) if args.waivers else None
|
|
||||||
report = build_report(trivy_payload, waiver_path=waiver_path, today_override=args.today)
|
|
||||||
output_path = Path(args.output)
|
|
||||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
output_path.write_text(json.dumps(report, indent=2, sort_keys=True) + "\n", encoding="utf-8")
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__": # pragma: no cover
|
|
||||||
raise SystemExit(main())
|
|
||||||
@ -1,18 +0,0 @@
|
|||||||
max_success_age_hours: 48
|
|
||||||
allow_suspended:
|
|
||||||
- bstein-dev-home/vaultwarden-cred-sync
|
|
||||||
- comms/guest-name-randomizer
|
|
||||||
- comms/othrys-room-reset
|
|
||||||
- comms/pin-othrys-invite
|
|
||||||
- comms/seed-othrys-room
|
|
||||||
- finance/firefly-user-sync
|
|
||||||
- health/wger-admin-ensure
|
|
||||||
- health/wger-user-sync
|
|
||||||
- mailu-mailserver/mailu-sync-nightly
|
|
||||||
- nextcloud/nextcloud-mail-sync
|
|
||||||
- vault/vault-oidc-config
|
|
||||||
ariadne_schedule_tasks:
|
|
||||||
- schedule.mailu_sync
|
|
||||||
- schedule.nextcloud_sync
|
|
||||||
- schedule.vaultwarden_sync
|
|
||||||
- schedule.wger_admin
|
|
||||||
@ -1,108 +0,0 @@
|
|||||||
"""Glue checks for Ariadne schedules exported to VictoriaMetrics."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import os
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import requests
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
|
|
||||||
CONFIG_PATH = Path(__file__).with_name("config.yaml")
|
|
||||||
|
|
||||||
|
|
||||||
def _load_config() -> dict:
|
|
||||||
with CONFIG_PATH.open("r", encoding="utf-8") as handle:
|
|
||||||
return yaml.safe_load(handle) or {}
|
|
||||||
|
|
||||||
|
|
||||||
def _query(promql: str) -> list[dict]:
|
|
||||||
vm_url = os.environ.get("VM_URL", "http://victoria-metrics-single-server:8428").rstrip("/")
|
|
||||||
response = requests.get(f"{vm_url}/api/v1/query", params={"query": promql}, timeout=10)
|
|
||||||
response.raise_for_status()
|
|
||||||
payload = response.json()
|
|
||||||
return payload.get("data", {}).get("result", [])
|
|
||||||
|
|
||||||
|
|
||||||
def _expected_tasks() -> list[dict]:
|
|
||||||
cfg = _load_config()
|
|
||||||
tasks = [
|
|
||||||
_normalize_task(item, cfg)
|
|
||||||
for item in cfg.get("ariadne_schedule_tasks", [])
|
|
||||||
]
|
|
||||||
assert tasks, "No Ariadne schedule tasks configured"
|
|
||||||
return tasks
|
|
||||||
|
|
||||||
|
|
||||||
def _normalize_task(item: object, cfg: dict) -> dict:
|
|
||||||
if isinstance(item, str):
|
|
||||||
return {
|
|
||||||
"task": item,
|
|
||||||
"check_last_success": True,
|
|
||||||
"max_success_age_hours": cfg.get("max_success_age_hours", 48),
|
|
||||||
}
|
|
||||||
if isinstance(item, dict):
|
|
||||||
normalized = dict(item)
|
|
||||||
normalized.setdefault("check_last_success", True)
|
|
||||||
normalized.setdefault("max_success_age_hours", cfg.get("max_success_age_hours", 48))
|
|
||||||
return normalized
|
|
||||||
raise TypeError(f"Unsupported Ariadne schedule task config entry: {item!r}")
|
|
||||||
|
|
||||||
|
|
||||||
def _tracked_tasks(tasks: list[dict]) -> list[dict]:
|
|
||||||
tracked = [item for item in tasks if item.get("check_last_success")]
|
|
||||||
assert tracked, "No Ariadne schedule tasks are marked for success tracking"
|
|
||||||
return tracked
|
|
||||||
|
|
||||||
|
|
||||||
def _task_regex(tasks: list[dict]) -> str:
|
|
||||||
return "|".join(item["task"] for item in tasks)
|
|
||||||
|
|
||||||
|
|
||||||
def test_ariadne_schedule_series_exist():
|
|
||||||
tasks = _expected_tasks()
|
|
||||||
selector = _task_regex(tasks)
|
|
||||||
series = _query(f'ariadne_schedule_next_run_timestamp_seconds{{task=~"{selector}"}}')
|
|
||||||
seen = {item.get("metric", {}).get("task") for item in series}
|
|
||||||
missing = [item["task"] for item in tasks if item["task"] not in seen]
|
|
||||||
assert not missing, f"Missing next-run metrics for: {', '.join(missing)}"
|
|
||||||
|
|
||||||
|
|
||||||
def test_ariadne_schedule_recent_success():
|
|
||||||
tasks = _tracked_tasks(_expected_tasks())
|
|
||||||
selector = _task_regex(tasks)
|
|
||||||
series = _query(f'ariadne_schedule_last_success_timestamp_seconds{{task=~"{selector}"}}')
|
|
||||||
seen = {item.get("metric", {}).get("task") for item in series}
|
|
||||||
missing = [item["task"] for item in tasks if item["task"] not in seen]
|
|
||||||
assert not missing, f"Missing last-success metrics for: {', '.join(missing)}"
|
|
||||||
|
|
||||||
now = datetime.now(timezone.utc)
|
|
||||||
age_by_task = {
|
|
||||||
item.get("metric", {}).get("task"): (now - datetime.fromtimestamp(float(item["value"][1]), tz=timezone.utc)).total_seconds() / 3600
|
|
||||||
for item in series
|
|
||||||
}
|
|
||||||
too_old = [
|
|
||||||
f"{task} ({age_by_task[task]:.1f}h > {item['max_success_age_hours']}h)"
|
|
||||||
for item in tasks
|
|
||||||
if (task := item["task"]) in age_by_task and age_by_task[task] > float(item["max_success_age_hours"])
|
|
||||||
]
|
|
||||||
assert not too_old, "Ariadne schedules are stale: " + ", ".join(too_old)
|
|
||||||
|
|
||||||
|
|
||||||
def test_ariadne_schedule_last_status_present_and_boolean():
|
|
||||||
tasks = _tracked_tasks(_expected_tasks())
|
|
||||||
selector = _task_regex(tasks)
|
|
||||||
series = _query(f'ariadne_schedule_last_status{{task=~"{selector}"}}')
|
|
||||||
seen = {item.get("metric", {}).get("task") for item in series}
|
|
||||||
missing = [item["task"] for item in tasks if item["task"] not in seen]
|
|
||||||
assert not missing, f"Missing last-status metrics for: {', '.join(missing)}"
|
|
||||||
|
|
||||||
invalid = []
|
|
||||||
for item in series:
|
|
||||||
task = item.get("metric", {}).get("task")
|
|
||||||
value = float(item["value"][1])
|
|
||||||
if value not in (0.0, 1.0):
|
|
||||||
invalid.append(f"{task}={value}")
|
|
||||||
assert not invalid, f"Unexpected Ariadne last-status values: {', '.join(invalid)}"
|
|
||||||
@ -1,46 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
from kubernetes import client, config
|
|
||||||
|
|
||||||
|
|
||||||
CONFIG_PATH = Path(__file__).with_name("config.yaml")
|
|
||||||
|
|
||||||
|
|
||||||
def _load_config() -> dict:
|
|
||||||
with CONFIG_PATH.open("r", encoding="utf-8") as handle:
|
|
||||||
return yaml.safe_load(handle) or {}
|
|
||||||
|
|
||||||
|
|
||||||
def _load_kube():
|
|
||||||
try:
|
|
||||||
config.load_incluster_config()
|
|
||||||
except config.ConfigException:
|
|
||||||
config.load_kube_config()
|
|
||||||
|
|
||||||
|
|
||||||
def test_glue_cronjobs_recent_success():
|
|
||||||
cfg = _load_config()
|
|
||||||
max_age_hours = int(cfg.get("max_success_age_hours", 48))
|
|
||||||
allow_suspended = set(cfg.get("allow_suspended", []))
|
|
||||||
|
|
||||||
_load_kube()
|
|
||||||
batch = client.BatchV1Api()
|
|
||||||
cronjobs = batch.list_cron_job_for_all_namespaces(label_selector="atlas.bstein.dev/glue=true").items
|
|
||||||
|
|
||||||
assert cronjobs, "No glue cronjobs found with atlas.bstein.dev/glue=true"
|
|
||||||
|
|
||||||
now = datetime.now(timezone.utc)
|
|
||||||
for cronjob in cronjobs:
|
|
||||||
name = f"{cronjob.metadata.namespace}/{cronjob.metadata.name}"
|
|
||||||
if cronjob.spec.suspend:
|
|
||||||
assert name in allow_suspended, f"{name} is suspended but not in allow_suspended"
|
|
||||||
continue
|
|
||||||
|
|
||||||
last_success = cronjob.status.last_successful_time
|
|
||||||
assert last_success is not None, f"{name} has no lastSuccessfulTime"
|
|
||||||
age_hours = (now - last_success).total_seconds() / 3600
|
|
||||||
assert age_hours <= max_age_hours, f"{name} last success {age_hours:.1f}h ago"
|
|
||||||
@ -1,87 +0,0 @@
|
|||||||
"""Glue checks for the metrics the quality-gate publishes."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import requests
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
|
|
||||||
VM_URL = os.environ.get("VM_URL", "http://victoria-metrics-single-server:8428").rstrip("/")
|
|
||||||
CONFIG_PATH = Path(__file__).with_name("config.yaml")
|
|
||||||
|
|
||||||
|
|
||||||
def _load_config() -> dict:
|
|
||||||
with CONFIG_PATH.open("r", encoding="utf-8") as handle:
|
|
||||||
return yaml.safe_load(handle) or {}
|
|
||||||
|
|
||||||
|
|
||||||
def _query(promql: str) -> list[dict]:
|
|
||||||
response = requests.get(f"{VM_URL}/api/v1/query", params={"query": promql}, timeout=10)
|
|
||||||
response.raise_for_status()
|
|
||||||
payload = response.json()
|
|
||||||
return payload.get("data", {}).get("result", [])
|
|
||||||
|
|
||||||
|
|
||||||
def _expected_tasks() -> list[dict]:
|
|
||||||
cfg = _load_config()
|
|
||||||
tasks = [
|
|
||||||
_normalize_task(item, cfg)
|
|
||||||
for item in cfg.get("ariadne_schedule_tasks", [])
|
|
||||||
]
|
|
||||||
assert tasks, "No Ariadne schedule tasks configured"
|
|
||||||
return tasks
|
|
||||||
|
|
||||||
|
|
||||||
def _normalize_task(item: object, cfg: dict) -> dict:
|
|
||||||
if isinstance(item, str):
|
|
||||||
return {
|
|
||||||
"task": item,
|
|
||||||
"check_last_success": True,
|
|
||||||
"max_success_age_hours": cfg.get("max_success_age_hours", 48),
|
|
||||||
}
|
|
||||||
if isinstance(item, dict):
|
|
||||||
normalized = dict(item)
|
|
||||||
normalized.setdefault("check_last_success", True)
|
|
||||||
normalized.setdefault("max_success_age_hours", cfg.get("max_success_age_hours", 48))
|
|
||||||
return normalized
|
|
||||||
raise TypeError(f"Unsupported Ariadne schedule task config entry: {item!r}")
|
|
||||||
|
|
||||||
|
|
||||||
def _tracked_tasks(tasks: list[dict]) -> list[dict]:
|
|
||||||
tracked = [item for item in tasks if item.get("check_last_success")]
|
|
||||||
assert tracked, "No Ariadne schedule tasks are marked for success tracking"
|
|
||||||
return tracked
|
|
||||||
|
|
||||||
|
|
||||||
def _task_regex(tasks: list[dict]) -> str:
|
|
||||||
return "|".join(item["task"] for item in tasks)
|
|
||||||
|
|
||||||
|
|
||||||
def test_ariadne_schedule_metrics_present():
|
|
||||||
tasks = _expected_tasks()
|
|
||||||
selector = _task_regex(tasks)
|
|
||||||
series = _query(f'ariadne_schedule_next_run_timestamp_seconds{{task=~"{selector}"}}')
|
|
||||||
seen = {item.get("metric", {}).get("task") for item in series}
|
|
||||||
missing = [item["task"] for item in tasks if item["task"] not in seen]
|
|
||||||
assert not missing, f"Missing Ariadne schedule metrics for: {', '.join(missing)}"
|
|
||||||
|
|
||||||
|
|
||||||
def test_ariadne_schedule_success_and_status_metrics_present():
|
|
||||||
tasks = _tracked_tasks(_expected_tasks())
|
|
||||||
selector = _task_regex(tasks)
|
|
||||||
|
|
||||||
success = _query(f'ariadne_schedule_last_success_timestamp_seconds{{task=~"{selector}"}}')
|
|
||||||
status = _query(f'ariadne_schedule_last_status{{task=~"{selector}"}}')
|
|
||||||
|
|
||||||
success_tasks = {item.get("metric", {}).get("task") for item in success}
|
|
||||||
status_tasks = {item.get("metric", {}).get("task") for item in status}
|
|
||||||
expected = {item["task"] for item in tasks}
|
|
||||||
|
|
||||||
missing_success = sorted(expected - success_tasks)
|
|
||||||
missing_status = sorted(expected - status_tasks)
|
|
||||||
|
|
||||||
assert not missing_success, f"Missing Ariadne success metrics for: {', '.join(missing_success)}"
|
|
||||||
assert not missing_status, f"Missing Ariadne status metrics for: {', '.join(missing_status)}"
|
|
||||||
@ -1,401 +0,0 @@
|
|||||||
{
|
|
||||||
"version": 1,
|
|
||||||
"generated_from": "Jenkins titan-iac build 225 Trivy filesystem scan",
|
|
||||||
"default_expires_at": "2026-05-22",
|
|
||||||
"ticket": "atlas-quality-wave-k8s-hardening",
|
|
||||||
"default_reason": "Existing Kubernetes manifest hardening baseline accepted only for the first quality-gate rollout; fix or renew explicitly before expiry.",
|
|
||||||
"misconfigurations": [
|
|
||||||
{
|
|
||||||
"id": "DS-0002",
|
|
||||||
"targets": [
|
|
||||||
"dockerfiles/Dockerfile.ananke-node-helper"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "KSV-0009",
|
|
||||||
"targets": [
|
|
||||||
"services/mailu/vip-controller.yaml",
|
|
||||||
"services/maintenance/k3s-agent-restart-daemonset.yaml"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "KSV-0010",
|
|
||||||
"targets": [
|
|
||||||
"services/maintenance/k3s-agent-restart-daemonset.yaml",
|
|
||||||
"services/maintenance/metis-sentinel-amd64-daemonset.yaml",
|
|
||||||
"services/maintenance/metis-sentinel-arm64-daemonset.yaml",
|
|
||||||
"services/monitoring/jetson-tegrastats-exporter.yaml"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "KSV-0014",
|
|
||||||
"targets": [
|
|
||||||
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml",
|
|
||||||
"infrastructure/core/ntp-sync-daemonset.yaml",
|
|
||||||
"infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml",
|
|
||||||
"infrastructure/longhorn/core/longhorn-disk-tags-ensure-job.yaml",
|
|
||||||
"infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml",
|
|
||||||
"infrastructure/longhorn/core/vault-sync-deployment.yaml",
|
|
||||||
"infrastructure/longhorn/ui-ingress/oauth2-proxy-longhorn.yaml",
|
|
||||||
"infrastructure/modules/profiles/components/device-plugin-jetson/daemonset.yaml",
|
|
||||||
"infrastructure/modules/profiles/components/device-plugin-minipc/daemonset.yaml",
|
|
||||||
"infrastructure/modules/profiles/components/device-plugin-tethys/daemonset.yaml",
|
|
||||||
"infrastructure/postgres/statefulset.yaml",
|
|
||||||
"infrastructure/vault-csi/vault-csi-provider.yaml",
|
|
||||||
"services/ai-llm/deployment.yaml",
|
|
||||||
"services/bstein-dev-home/backend-deployment.yaml",
|
|
||||||
"services/bstein-dev-home/chat-ai-gateway-deployment.yaml",
|
|
||||||
"services/bstein-dev-home/frontend-deployment.yaml",
|
|
||||||
"services/bstein-dev-home/oneoffs/migrations/portal-migrate-job.yaml",
|
|
||||||
"services/bstein-dev-home/oneoffs/portal-onboarding-e2e-test-job.yaml",
|
|
||||||
"services/bstein-dev-home/vault-sync-deployment.yaml",
|
|
||||||
"services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml",
|
|
||||||
"services/comms/atlasbot-deployment.yaml",
|
|
||||||
"services/comms/coturn.yaml",
|
|
||||||
"services/comms/element-call-deployment.yaml",
|
|
||||||
"services/comms/guest-name-job.yaml",
|
|
||||||
"services/comms/guest-register-deployment.yaml",
|
|
||||||
"services/comms/livekit-token-deployment.yaml",
|
|
||||||
"services/comms/livekit.yaml",
|
|
||||||
"services/comms/mas-deployment.yaml",
|
|
||||||
"services/comms/oneoffs/bstein-force-leave-job.yaml",
|
|
||||||
"services/comms/oneoffs/comms-secrets-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/mas-admin-client-secret-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/mas-db-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/mas-local-users-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/othrys-kick-numeric-job.yaml",
|
|
||||||
"services/comms/oneoffs/synapse-admin-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/synapse-seeder-admin-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/synapse-signingkey-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/synapse-user-seed-job.yaml",
|
|
||||||
"services/comms/pin-othrys-job.yaml",
|
|
||||||
"services/comms/reset-othrys-room-job.yaml",
|
|
||||||
"services/comms/seed-othrys-room.yaml",
|
|
||||||
"services/comms/vault-sync-deployment.yaml",
|
|
||||||
"services/comms/wellknown.yaml",
|
|
||||||
"services/crypto/monerod/deployment.yaml",
|
|
||||||
"services/crypto/wallet-monero-temp/deployment.yaml",
|
|
||||||
"services/crypto/xmr-miner/deployment.yaml",
|
|
||||||
"services/crypto/xmr-miner/vault-sync-deployment.yaml",
|
|
||||||
"services/crypto/xmr-miner/xmrig-daemonset.yaml",
|
|
||||||
"services/finance/actual-budget-deployment.yaml",
|
|
||||||
"services/finance/firefly-cronjob.yaml",
|
|
||||||
"services/finance/firefly-deployment.yaml",
|
|
||||||
"services/finance/firefly-user-sync-cronjob.yaml",
|
|
||||||
"services/finance/oneoffs/finance-secrets-ensure-job.yaml",
|
|
||||||
"services/gitea/deployment.yaml",
|
|
||||||
"services/harbor/vault-sync-deployment.yaml",
|
|
||||||
"services/health/wger-admin-ensure-cronjob.yaml",
|
|
||||||
"services/health/wger-deployment.yaml",
|
|
||||||
"services/health/wger-user-sync-cronjob.yaml",
|
|
||||||
"services/jellyfin/deployment.yaml",
|
|
||||||
"services/jellyfin/loader.yaml",
|
|
||||||
"services/jenkins/deployment.yaml",
|
|
||||||
"services/jenkins/vault-sync-deployment.yaml",
|
|
||||||
"services/keycloak/deployment.yaml",
|
|
||||||
"services/keycloak/oneoffs/actual-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/harbor-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/ldap-federation-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/logs-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/mas-secrets-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/metis-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/metis-ssh-keys-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/portal-admin-client-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/portal-e2e-client-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/portal-e2e-execute-actions-email-test-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/portal-e2e-target-client-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/portal-e2e-token-exchange-permissions-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/portal-e2e-token-exchange-test-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/quality-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/realm-settings-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/soteria-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/synapse-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/user-overrides-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/vault-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/vault-sync-deployment.yaml",
|
|
||||||
"services/logging/node-image-gc-rpi4-daemonset.yaml",
|
|
||||||
"services/logging/node-image-prune-rpi5-daemonset.yaml",
|
|
||||||
"services/logging/node-log-rotation-daemonset.yaml",
|
|
||||||
"services/logging/oauth2-proxy.yaml",
|
|
||||||
"services/logging/oneoffs/opensearch-dashboards-setup-job.yaml",
|
|
||||||
"services/logging/oneoffs/opensearch-ism-job.yaml",
|
|
||||||
"services/logging/oneoffs/opensearch-observability-setup-job.yaml",
|
|
||||||
"services/logging/opensearch-prune-cronjob.yaml",
|
|
||||||
"services/logging/vault-sync-deployment.yaml",
|
|
||||||
"services/mailu/mailu-sync-cronjob.yaml",
|
|
||||||
"services/mailu/mailu-sync-listener.yaml",
|
|
||||||
"services/mailu/oneoffs/mailu-sync-job.yaml",
|
|
||||||
"services/mailu/vault-sync-deployment.yaml",
|
|
||||||
"services/mailu/vip-controller.yaml",
|
|
||||||
"services/maintenance/ariadne-deployment.yaml",
|
|
||||||
"services/maintenance/disable-k3s-traefik-daemonset.yaml",
|
|
||||||
"services/maintenance/image-sweeper-cronjob.yaml",
|
|
||||||
"services/maintenance/k3s-agent-restart-daemonset.yaml",
|
|
||||||
"services/maintenance/metis-deployment.yaml",
|
|
||||||
"services/maintenance/metis-k3s-token-sync-cronjob.yaml",
|
|
||||||
"services/maintenance/metis-sentinel-amd64-daemonset.yaml",
|
|
||||||
"services/maintenance/metis-sentinel-arm64-daemonset.yaml",
|
|
||||||
"services/maintenance/node-image-sweeper-daemonset.yaml",
|
|
||||||
"services/maintenance/node-nofile-daemonset.yaml",
|
|
||||||
"services/maintenance/oauth2-proxy-metis.yaml",
|
|
||||||
"services/maintenance/oauth2-proxy-soteria.yaml",
|
|
||||||
"services/maintenance/oneoffs/ariadne-migrate-job.yaml",
|
|
||||||
"services/maintenance/oneoffs/k3s-traefik-cleanup-job.yaml",
|
|
||||||
"services/maintenance/oneoffs/titan-24-rootfs-sweep-job.yaml",
|
|
||||||
"services/maintenance/pod-cleaner-cronjob.yaml",
|
|
||||||
"services/maintenance/soteria-deployment.yaml",
|
|
||||||
"services/maintenance/vault-sync-deployment.yaml",
|
|
||||||
"services/monitoring/dcgm-exporter.yaml",
|
|
||||||
"services/monitoring/jetson-tegrastats-exporter.yaml",
|
|
||||||
"services/monitoring/oneoffs/grafana-org-bootstrap.yaml",
|
|
||||||
"services/monitoring/oneoffs/grafana-user-dedupe-job.yaml",
|
|
||||||
"services/monitoring/platform-quality-gateway-deployment.yaml",
|
|
||||||
"services/monitoring/platform-quality-suite-probe-cronjob.yaml",
|
|
||||||
"services/monitoring/postmark-exporter-deployment.yaml",
|
|
||||||
"services/monitoring/vault-sync-deployment.yaml",
|
|
||||||
"services/nextcloud-mail-sync/cronjob.yaml",
|
|
||||||
"services/nextcloud/collabora.yaml",
|
|
||||||
"services/nextcloud/cronjob.yaml",
|
|
||||||
"services/nextcloud/deployment.yaml",
|
|
||||||
"services/nextcloud/maintenance-cronjob.yaml",
|
|
||||||
"services/oauth2-proxy/deployment.yaml",
|
|
||||||
"services/openldap/statefulset.yaml",
|
|
||||||
"services/outline/deployment.yaml",
|
|
||||||
"services/outline/redis-deployment.yaml",
|
|
||||||
"services/pegasus/deployment.yaml",
|
|
||||||
"services/pegasus/vault-sync-deployment.yaml",
|
|
||||||
"services/planka/deployment.yaml",
|
|
||||||
"services/quality/oauth2-proxy-sonarqube.yaml",
|
|
||||||
"services/quality/sonarqube-deployment.yaml",
|
|
||||||
"services/quality/sonarqube-exporter-deployment.yaml",
|
|
||||||
"services/sui-metrics/base/deployment.yaml",
|
|
||||||
"services/typhon/vault-sync-deployment.yaml",
|
|
||||||
"services/vault/k8s-auth-config-cronjob.yaml",
|
|
||||||
"services/vault/oidc-config-cronjob.yaml",
|
|
||||||
"services/vault/statefulset.yaml",
|
|
||||||
"services/vaultwarden/deployment.yaml"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "KSV-0017",
|
|
||||||
"targets": [
|
|
||||||
"infrastructure/modules/profiles/components/device-plugin-jetson/daemonset.yaml",
|
|
||||||
"infrastructure/modules/profiles/components/device-plugin-minipc/daemonset.yaml",
|
|
||||||
"infrastructure/modules/profiles/components/device-plugin-tethys/daemonset.yaml",
|
|
||||||
"services/logging/node-image-gc-rpi4-daemonset.yaml",
|
|
||||||
"services/logging/node-image-prune-rpi5-daemonset.yaml",
|
|
||||||
"services/logging/node-log-rotation-daemonset.yaml",
|
|
||||||
"services/maintenance/disable-k3s-traefik-daemonset.yaml",
|
|
||||||
"services/maintenance/image-sweeper-cronjob.yaml",
|
|
||||||
"services/maintenance/k3s-agent-restart-daemonset.yaml",
|
|
||||||
"services/maintenance/metis-deployment.yaml",
|
|
||||||
"services/maintenance/metis-sentinel-amd64-daemonset.yaml",
|
|
||||||
"services/maintenance/metis-sentinel-arm64-daemonset.yaml",
|
|
||||||
"services/maintenance/node-image-sweeper-daemonset.yaml",
|
|
||||||
"services/maintenance/node-nofile-daemonset.yaml",
|
|
||||||
"services/maintenance/oneoffs/titan-24-rootfs-sweep-job.yaml",
|
|
||||||
"services/monitoring/dcgm-exporter.yaml",
|
|
||||||
"services/monitoring/jetson-tegrastats-exporter.yaml"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "KSV-0041",
|
|
||||||
"targets": [
|
|
||||||
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml",
|
|
||||||
"infrastructure/longhorn/adopt/longhorn-adopt-rbac.yaml",
|
|
||||||
"infrastructure/traefik/clusterrole.yaml",
|
|
||||||
"services/bstein-dev-home/rbac.yaml",
|
|
||||||
"services/comms/comms-secrets-ensure-rbac.yaml",
|
|
||||||
"services/comms/mas-db-ensure-rbac.yaml",
|
|
||||||
"services/comms/mas-secrets-ensure-rbac.yaml",
|
|
||||||
"services/maintenance/soteria-rbac.yaml"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "KSV-0047",
|
|
||||||
"targets": [
|
|
||||||
"services/monitoring/rbac.yaml"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "KSV-0053",
|
|
||||||
"targets": [
|
|
||||||
"services/comms/comms-secrets-ensure-rbac.yaml",
|
|
||||||
"services/comms/mas-db-ensure-rbac.yaml",
|
|
||||||
"services/jenkins/serviceaccount.yaml",
|
|
||||||
"services/maintenance/ariadne-rbac.yaml"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "KSV-0056",
|
|
||||||
"targets": [
|
|
||||||
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml",
|
|
||||||
"infrastructure/longhorn/adopt/longhorn-adopt-rbac.yaml",
|
|
||||||
"services/jenkins/serviceaccount.yaml",
|
|
||||||
"services/maintenance/disable-k3s-traefik-rbac.yaml",
|
|
||||||
"services/maintenance/k3s-traefik-cleanup-rbac.yaml"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "KSV-0114",
|
|
||||||
"targets": [
|
|
||||||
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "KSV-0118",
|
|
||||||
"targets": [
|
|
||||||
"infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml",
|
|
||||||
"infrastructure/core/coredns-deployment.yaml",
|
|
||||||
"infrastructure/core/ntp-sync-daemonset.yaml",
|
|
||||||
"infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml",
|
|
||||||
"infrastructure/longhorn/core/longhorn-disk-tags-ensure-job.yaml",
|
|
||||||
"infrastructure/longhorn/core/longhorn-settings-ensure-job.yaml",
|
|
||||||
"infrastructure/longhorn/core/vault-sync-deployment.yaml",
|
|
||||||
"infrastructure/longhorn/ui-ingress/oauth2-proxy-longhorn.yaml",
|
|
||||||
"infrastructure/modules/profiles/components/device-plugin-jetson/daemonset.yaml",
|
|
||||||
"infrastructure/modules/profiles/components/device-plugin-minipc/daemonset.yaml",
|
|
||||||
"infrastructure/modules/profiles/components/device-plugin-tethys/daemonset.yaml",
|
|
||||||
"infrastructure/postgres/statefulset.yaml",
|
|
||||||
"infrastructure/vault-csi/vault-csi-provider.yaml",
|
|
||||||
"services/ai-llm/deployment.yaml",
|
|
||||||
"services/bstein-dev-home/backend-deployment.yaml",
|
|
||||||
"services/bstein-dev-home/chat-ai-gateway-deployment.yaml",
|
|
||||||
"services/bstein-dev-home/frontend-deployment.yaml",
|
|
||||||
"services/bstein-dev-home/oneoffs/migrations/portal-migrate-job.yaml",
|
|
||||||
"services/bstein-dev-home/oneoffs/portal-onboarding-e2e-test-job.yaml",
|
|
||||||
"services/bstein-dev-home/vault-sync-deployment.yaml",
|
|
||||||
"services/bstein-dev-home/vaultwarden-cred-sync-cronjob.yaml",
|
|
||||||
"services/comms/atlasbot-deployment.yaml",
|
|
||||||
"services/comms/coturn.yaml",
|
|
||||||
"services/comms/element-call-deployment.yaml",
|
|
||||||
"services/comms/guest-name-job.yaml",
|
|
||||||
"services/comms/livekit-token-deployment.yaml",
|
|
||||||
"services/comms/livekit.yaml",
|
|
||||||
"services/comms/mas-deployment.yaml",
|
|
||||||
"services/comms/oneoffs/bstein-force-leave-job.yaml",
|
|
||||||
"services/comms/oneoffs/comms-secrets-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/mas-admin-client-secret-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/mas-db-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/mas-local-users-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/othrys-kick-numeric-job.yaml",
|
|
||||||
"services/comms/oneoffs/synapse-admin-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/synapse-seeder-admin-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/synapse-signingkey-ensure-job.yaml",
|
|
||||||
"services/comms/oneoffs/synapse-user-seed-job.yaml",
|
|
||||||
"services/comms/pin-othrys-job.yaml",
|
|
||||||
"services/comms/reset-othrys-room-job.yaml",
|
|
||||||
"services/comms/seed-othrys-room.yaml",
|
|
||||||
"services/comms/vault-sync-deployment.yaml",
|
|
||||||
"services/comms/wellknown.yaml",
|
|
||||||
"services/crypto/monerod/deployment.yaml",
|
|
||||||
"services/crypto/wallet-monero-temp/deployment.yaml",
|
|
||||||
"services/crypto/xmr-miner/deployment.yaml",
|
|
||||||
"services/crypto/xmr-miner/vault-sync-deployment.yaml",
|
|
||||||
"services/crypto/xmr-miner/xmrig-daemonset.yaml",
|
|
||||||
"services/finance/firefly-cronjob.yaml",
|
|
||||||
"services/finance/firefly-deployment.yaml",
|
|
||||||
"services/finance/firefly-user-sync-cronjob.yaml",
|
|
||||||
"services/finance/oneoffs/finance-secrets-ensure-job.yaml",
|
|
||||||
"services/gitea/deployment.yaml",
|
|
||||||
"services/harbor/vault-sync-deployment.yaml",
|
|
||||||
"services/health/wger-admin-ensure-cronjob.yaml",
|
|
||||||
"services/health/wger-deployment.yaml",
|
|
||||||
"services/health/wger-user-sync-cronjob.yaml",
|
|
||||||
"services/jellyfin/loader.yaml",
|
|
||||||
"services/jenkins/deployment.yaml",
|
|
||||||
"services/jenkins/vault-sync-deployment.yaml",
|
|
||||||
"services/keycloak/oneoffs/actual-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/harbor-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/ldap-federation-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/logs-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/mas-secrets-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/metis-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/metis-ssh-keys-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/portal-admin-client-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/portal-e2e-client-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/portal-e2e-execute-actions-email-test-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/portal-e2e-target-client-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/portal-e2e-token-exchange-permissions-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/portal-e2e-token-exchange-test-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/quality-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/realm-settings-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/soteria-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/synapse-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/user-overrides-job.yaml",
|
|
||||||
"services/keycloak/oneoffs/vault-oidc-secret-ensure-job.yaml",
|
|
||||||
"services/keycloak/vault-sync-deployment.yaml",
|
|
||||||
"services/logging/node-image-gc-rpi4-daemonset.yaml",
|
|
||||||
"services/logging/node-image-prune-rpi5-daemonset.yaml",
|
|
||||||
"services/logging/node-log-rotation-daemonset.yaml",
|
|
||||||
"services/logging/oauth2-proxy.yaml",
|
|
||||||
"services/logging/oneoffs/opensearch-dashboards-setup-job.yaml",
|
|
||||||
"services/logging/oneoffs/opensearch-ism-job.yaml",
|
|
||||||
"services/logging/oneoffs/opensearch-observability-setup-job.yaml",
|
|
||||||
"services/logging/opensearch-prune-cronjob.yaml",
|
|
||||||
"services/logging/vault-sync-deployment.yaml",
|
|
||||||
"services/mailu/mailu-sync-cronjob.yaml",
|
|
||||||
"services/mailu/mailu-sync-listener.yaml",
|
|
||||||
"services/mailu/oneoffs/mailu-sync-job.yaml",
|
|
||||||
"services/mailu/vault-sync-deployment.yaml",
|
|
||||||
"services/mailu/vip-controller.yaml",
|
|
||||||
"services/maintenance/ariadne-deployment.yaml",
|
|
||||||
"services/maintenance/disable-k3s-traefik-daemonset.yaml",
|
|
||||||
"services/maintenance/image-sweeper-cronjob.yaml",
|
|
||||||
"services/maintenance/k3s-agent-restart-daemonset.yaml",
|
|
||||||
"services/maintenance/metis-deployment.yaml",
|
|
||||||
"services/maintenance/metis-k3s-token-sync-cronjob.yaml",
|
|
||||||
"services/maintenance/metis-sentinel-amd64-daemonset.yaml",
|
|
||||||
"services/maintenance/metis-sentinel-arm64-daemonset.yaml",
|
|
||||||
"services/maintenance/node-image-sweeper-daemonset.yaml",
|
|
||||||
"services/maintenance/node-nofile-daemonset.yaml",
|
|
||||||
"services/maintenance/oauth2-proxy-metis.yaml",
|
|
||||||
"services/maintenance/oauth2-proxy-soteria.yaml",
|
|
||||||
"services/maintenance/oneoffs/ariadne-migrate-job.yaml",
|
|
||||||
"services/maintenance/oneoffs/k3s-traefik-cleanup-job.yaml",
|
|
||||||
"services/maintenance/oneoffs/titan-24-rootfs-sweep-job.yaml",
|
|
||||||
"services/maintenance/pod-cleaner-cronjob.yaml",
|
|
||||||
"services/maintenance/soteria-deployment.yaml",
|
|
||||||
"services/maintenance/vault-sync-deployment.yaml",
|
|
||||||
"services/monitoring/dcgm-exporter.yaml",
|
|
||||||
"services/monitoring/jetson-tegrastats-exporter.yaml",
|
|
||||||
"services/monitoring/oneoffs/grafana-org-bootstrap.yaml",
|
|
||||||
"services/monitoring/oneoffs/grafana-user-dedupe-job.yaml",
|
|
||||||
"services/monitoring/platform-quality-gateway-deployment.yaml",
|
|
||||||
"services/monitoring/platform-quality-suite-probe-cronjob.yaml",
|
|
||||||
"services/monitoring/postmark-exporter-deployment.yaml",
|
|
||||||
"services/monitoring/vault-sync-deployment.yaml",
|
|
||||||
"services/nextcloud/collabora.yaml",
|
|
||||||
"services/oauth2-proxy/deployment.yaml",
|
|
||||||
"services/openldap/statefulset.yaml",
|
|
||||||
"services/outline/deployment.yaml",
|
|
||||||
"services/outline/redis-deployment.yaml",
|
|
||||||
"services/pegasus/vault-sync-deployment.yaml",
|
|
||||||
"services/quality/oauth2-proxy-sonarqube.yaml",
|
|
||||||
"services/quality/sonarqube-deployment.yaml",
|
|
||||||
"services/quality/sonarqube-exporter-deployment.yaml",
|
|
||||||
"services/sui-metrics/base/deployment.yaml",
|
|
||||||
"services/sui-metrics/overlays/atlas/patch-node-selector.yaml",
|
|
||||||
"services/typhon/deployment.yaml",
|
|
||||||
"services/typhon/vault-sync-deployment.yaml",
|
|
||||||
"services/vault/k8s-auth-config-cronjob.yaml",
|
|
||||||
"services/vault/oidc-config-cronjob.yaml",
|
|
||||||
"services/vaultwarden/deployment.yaml"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "KSV-0121",
|
|
||||||
"targets": [
|
|
||||||
"services/logging/node-image-gc-rpi4-daemonset.yaml",
|
|
||||||
"services/logging/node-image-prune-rpi5-daemonset.yaml",
|
|
||||||
"services/logging/node-log-rotation-daemonset.yaml",
|
|
||||||
"services/maintenance/disable-k3s-traefik-daemonset.yaml",
|
|
||||||
"services/maintenance/image-sweeper-cronjob.yaml",
|
|
||||||
"services/maintenance/metis-deployment.yaml",
|
|
||||||
"services/maintenance/node-image-sweeper-daemonset.yaml",
|
|
||||||
"services/maintenance/node-nofile-daemonset.yaml",
|
|
||||||
"services/maintenance/oneoffs/titan-24-rootfs-sweep-job.yaml"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
12
clusters/atlas/applications/kustomization.yaml
Normal file
12
clusters/atlas/applications/kustomization.yaml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# clusters/atlas/applications/kustomization.yaml
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
resources:
|
||||||
|
- ../../services/crypto
|
||||||
|
- ../../services/gitea
|
||||||
|
- ../../services/jellyfin
|
||||||
|
- ../../services/jitsi
|
||||||
|
- ../../services/monitoring
|
||||||
|
- ../../services/pegasus
|
||||||
|
- ../../services/vault
|
||||||
|
- ../../services/zot
|
||||||
@ -1,23 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/ai-llm/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: ai-llm
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/ai-llm
|
|
||||||
targetNamespace: ai
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
wait: true
|
|
||||||
healthChecks:
|
|
||||||
- apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
name: ollama
|
|
||||||
namespace: ai
|
|
||||||
dependsOn:
|
|
||||||
- name: core
|
|
||||||
@ -1,17 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/bstein-dev-home-migrations/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: bstein-dev-home-migrations
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/bstein-dev-home/oneoffs/migrations
|
|
||||||
prune: true
|
|
||||||
force: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
targetNamespace: bstein-dev-home
|
|
||||||
wait: false
|
|
||||||
suspend: true
|
|
||||||
@ -1,26 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/bstein-dev-home/image-automation.yaml
|
|
||||||
apiVersion: image.toolkit.fluxcd.io/v1
|
|
||||||
kind: ImageUpdateAutomation
|
|
||||||
metadata:
|
|
||||||
name: bstein-dev-home
|
|
||||||
namespace: bstein-dev-home
|
|
||||||
spec:
|
|
||||||
interval: 1m0s
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
git:
|
|
||||||
checkout:
|
|
||||||
ref:
|
|
||||||
branch: main
|
|
||||||
commit:
|
|
||||||
author:
|
|
||||||
email: ops@bstein.dev
|
|
||||||
name: flux-bot
|
|
||||||
messageTemplate: "chore(bstein-dev-home): automated image update"
|
|
||||||
push:
|
|
||||||
branch: main
|
|
||||||
update:
|
|
||||||
strategy: Setters
|
|
||||||
path: services/bstein-dev-home
|
|
||||||
@ -1,15 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/bstein-dev-home/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: bstein-dev-home
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/bstein-dev-home
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
targetNamespace: bstein-dev-home
|
|
||||||
wait: false
|
|
||||||
@ -1,17 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/comms/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: comms
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
path: ./services/comms
|
|
||||||
targetNamespace: comms
|
|
||||||
timeout: 2m
|
|
||||||
dependsOn:
|
|
||||||
- name: traefik
|
|
||||||
@ -1,24 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/finance/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: finance
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/finance
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
targetNamespace: finance
|
|
||||||
healthChecks:
|
|
||||||
- apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
name: actual-budget
|
|
||||||
namespace: finance
|
|
||||||
- apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
name: firefly
|
|
||||||
namespace: finance
|
|
||||||
wait: false
|
|
||||||
@ -13,8 +13,4 @@ spec:
|
|||||||
kind: GitRepository
|
kind: GitRepository
|
||||||
name: flux-system
|
name: flux-system
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
dependsOn:
|
|
||||||
- name: longhorn
|
|
||||||
- name: vault
|
|
||||||
- name: postgres
|
|
||||||
wait: true
|
wait: true
|
||||||
|
|||||||
@ -1,27 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/harbor/image-automation.yaml
|
|
||||||
apiVersion: image.toolkit.fluxcd.io/v1
|
|
||||||
kind: ImageUpdateAutomation
|
|
||||||
metadata:
|
|
||||||
name: harbor
|
|
||||||
namespace: harbor
|
|
||||||
spec:
|
|
||||||
suspend: true
|
|
||||||
interval: 5m0s
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
git:
|
|
||||||
checkout:
|
|
||||||
ref:
|
|
||||||
branch: feature/ci-gitops
|
|
||||||
commit:
|
|
||||||
author:
|
|
||||||
email: ops@bstein.dev
|
|
||||||
name: flux-bot
|
|
||||||
messageTemplate: "chore(harbor): apply image updates"
|
|
||||||
push:
|
|
||||||
branch: feature/ci-gitops
|
|
||||||
update:
|
|
||||||
strategy: Setters
|
|
||||||
path: ./services/harbor
|
|
||||||
@ -1,21 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/harbor/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: harbor
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/harbor
|
|
||||||
targetNamespace: harbor
|
|
||||||
prune: false
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
wait: false
|
|
||||||
dependsOn:
|
|
||||||
- name: core
|
|
||||||
- name: longhorn
|
|
||||||
- name: vault
|
|
||||||
- name: postgres
|
|
||||||
@ -1,25 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/health/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: health
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/health
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
targetNamespace: health
|
|
||||||
dependsOn:
|
|
||||||
- name: keycloak
|
|
||||||
- name: postgres
|
|
||||||
- name: traefik
|
|
||||||
- name: vault
|
|
||||||
healthChecks:
|
|
||||||
- apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
name: wger
|
|
||||||
namespace: health
|
|
||||||
wait: false
|
|
||||||
@ -15,6 +15,5 @@ spec:
|
|||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
dependsOn:
|
dependsOn:
|
||||||
- name: core
|
- name: core
|
||||||
- name: openldap
|
|
||||||
wait: true
|
wait: true
|
||||||
timeout: 5m
|
timeout: 5m
|
||||||
|
|||||||
@ -1,28 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/jenkins/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: jenkins
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/jenkins
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
targetNamespace: jenkins
|
|
||||||
dependsOn:
|
|
||||||
- name: helm
|
|
||||||
- name: traefik
|
|
||||||
healthChecks:
|
|
||||||
- apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
name: jenkins
|
|
||||||
namespace: jenkins
|
|
||||||
- apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
name: jenkins
|
|
||||||
namespace: jenkins
|
|
||||||
wait: false
|
|
||||||
timeout: 20m
|
|
||||||
@ -1,18 +1,18 @@
|
|||||||
# clusters/atlas/flux-system/applications/openldap/kustomization.yaml
|
# clusters/atlas/flux-system/applications/jitsi/kustomization.yaml
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||||
kind: Kustomization
|
kind: Kustomization
|
||||||
metadata:
|
metadata:
|
||||||
name: openldap
|
name: jitsi
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
|
path: ./services/jitsi
|
||||||
|
targetNamespace: jitsi
|
||||||
prune: true
|
prune: true
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: GitRepository
|
kind: GitRepository
|
||||||
name: flux-system
|
name: flux-system
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
path: ./services/openldap
|
|
||||||
targetNamespace: sso
|
|
||||||
dependsOn:
|
dependsOn:
|
||||||
- name: core
|
- name: core
|
||||||
wait: true
|
wait: true
|
||||||
@ -1,19 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/keycloak/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: keycloak
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
path: ./services/keycloak
|
|
||||||
targetNamespace: sso
|
|
||||||
dependsOn:
|
|
||||||
- name: longhorn
|
|
||||||
- name: vault
|
|
||||||
- name: postgres
|
|
||||||
timeout: 2m
|
|
||||||
@ -2,34 +2,14 @@
|
|||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
kind: Kustomization
|
kind: Kustomization
|
||||||
resources:
|
resources:
|
||||||
|
- zot/kustomization.yaml
|
||||||
- gitea/kustomization.yaml
|
- gitea/kustomization.yaml
|
||||||
- vault/kustomization.yaml
|
- vault/kustomization.yaml
|
||||||
- vaultwarden/kustomization.yaml
|
- jitsi/kustomization.yaml
|
||||||
- comms/kustomization.yaml
|
|
||||||
- crypto/kustomization.yaml
|
- crypto/kustomization.yaml
|
||||||
- monerod/kustomization.yaml
|
- monerod/kustomization.yaml
|
||||||
- pegasus/kustomization.yaml
|
- pegasus/kustomization.yaml
|
||||||
- pegasus/image-automation.yaml
|
- pegasus/image-automation.yaml
|
||||||
- bstein-dev-home/kustomization.yaml
|
|
||||||
- bstein-dev-home/image-automation.yaml
|
|
||||||
- bstein-dev-home-migrations/kustomization.yaml
|
|
||||||
- harbor/kustomization.yaml
|
|
||||||
- harbor/image-automation.yaml
|
|
||||||
- jellyfin/kustomization.yaml
|
- jellyfin/kustomization.yaml
|
||||||
- xmr-miner/kustomization.yaml
|
- xmr-miner/kustomization.yaml
|
||||||
- wallet-monero-temp/kustomization.yaml
|
|
||||||
- sui-metrics/kustomization.yaml
|
- sui-metrics/kustomization.yaml
|
||||||
- openldap/kustomization.yaml
|
|
||||||
- keycloak/kustomization.yaml
|
|
||||||
- quality/kustomization.yaml
|
|
||||||
- oauth2-proxy/kustomization.yaml
|
|
||||||
- mailu/kustomization.yaml
|
|
||||||
- jenkins/kustomization.yaml
|
|
||||||
- ai-llm/kustomization.yaml
|
|
||||||
- typhon/kustomization.yaml
|
|
||||||
- nextcloud/kustomization.yaml
|
|
||||||
- nextcloud-mail-sync/kustomization.yaml
|
|
||||||
- outline/kustomization.yaml
|
|
||||||
- planka/kustomization.yaml
|
|
||||||
- finance/kustomization.yaml
|
|
||||||
- health/kustomization.yaml
|
|
||||||
|
|||||||
@ -16,4 +16,4 @@ spec:
|
|||||||
dependsOn:
|
dependsOn:
|
||||||
- name: crypto
|
- name: crypto
|
||||||
wait: true
|
wait: true
|
||||||
timeout: 15m
|
timeout: 5m
|
||||||
|
|||||||
@ -1,17 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/nextcloud-mail-sync/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: nextcloud-mail-sync
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
path: ./services/nextcloud-mail-sync
|
|
||||||
targetNamespace: nextcloud
|
|
||||||
timeout: 2m
|
|
||||||
dependsOn:
|
|
||||||
- name: keycloak
|
|
||||||
@ -1,16 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/nextcloud/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: nextcloud
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/nextcloud
|
|
||||||
targetNamespace: nextcloud
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
wait: true
|
|
||||||
@ -1,15 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/oauth2-proxy/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: oauth2-proxy
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
path: ./services/oauth2-proxy
|
|
||||||
targetNamespace: sso
|
|
||||||
timeout: 2m
|
|
||||||
@ -1,28 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/outline/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: outline
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/outline
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
targetNamespace: outline
|
|
||||||
dependsOn:
|
|
||||||
- name: keycloak
|
|
||||||
- name: mailu
|
|
||||||
- name: traefik
|
|
||||||
healthChecks:
|
|
||||||
- apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
name: outline
|
|
||||||
namespace: outline
|
|
||||||
- apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
name: outline
|
|
||||||
namespace: outline
|
|
||||||
wait: false
|
|
||||||
@ -1,26 +1,20 @@
|
|||||||
# clusters/atlas/flux-system/applications/pegasus/image-automation.yaml
|
# clusters/atlas/flux-system/applications/pegasus/image-automation.yaml
|
||||||
apiVersion: image.toolkit.fluxcd.io/v1
|
apiVersion: image.toolkit.fluxcd.io/v1beta1
|
||||||
kind: ImageUpdateAutomation
|
kind: ImageUpdateAutomation
|
||||||
metadata:
|
metadata:
|
||||||
name: pegasus
|
name: pegasus
|
||||||
namespace: jellyfin
|
namespace: flux-system
|
||||||
spec:
|
spec:
|
||||||
interval: 1m0s
|
interval: 1m0s
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: GitRepository
|
kind: GitRepository
|
||||||
name: flux-system
|
name: flux-system
|
||||||
namespace: flux-system
|
|
||||||
git:
|
git:
|
||||||
checkout:
|
|
||||||
ref:
|
|
||||||
branch: feature/ci-gitops
|
|
||||||
commit:
|
commit:
|
||||||
author:
|
author:
|
||||||
email: ops@bstein.dev
|
email: ops@bstein.dev
|
||||||
name: flux-bot
|
name: flux-bot
|
||||||
messageTemplate: "chore(pegasus): apply image updates"
|
messageTemplate: "chore(pegasus): update image to {{range .Updated.Images}}{{.}}{{end}}"
|
||||||
push:
|
|
||||||
branch: feature/ci-gitops
|
|
||||||
update:
|
update:
|
||||||
strategy: Setters
|
strategy: Setters
|
||||||
path: services/pegasus
|
path: ./services/pegasus
|
||||||
|
|||||||
@ -1,28 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/planka/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: planka
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/planka
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
targetNamespace: planka
|
|
||||||
dependsOn:
|
|
||||||
- name: keycloak
|
|
||||||
- name: mailu
|
|
||||||
- name: traefik
|
|
||||||
healthChecks:
|
|
||||||
- apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
name: planka
|
|
||||||
namespace: planka
|
|
||||||
- apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
name: planka
|
|
||||||
namespace: planka
|
|
||||||
wait: false
|
|
||||||
@ -1,35 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/quality/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: quality
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/quality
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
targetNamespace: quality
|
|
||||||
dependsOn:
|
|
||||||
- name: traefik
|
|
||||||
- name: cert-manager
|
|
||||||
- name: keycloak
|
|
||||||
- name: vault
|
|
||||||
- name: postgres
|
|
||||||
healthChecks:
|
|
||||||
- apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
name: sonarqube
|
|
||||||
namespace: quality
|
|
||||||
- apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
name: sonarqube-exporter
|
|
||||||
namespace: quality
|
|
||||||
- apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
name: oauth2-proxy-sonarqube
|
|
||||||
namespace: quality
|
|
||||||
wait: false
|
|
||||||
timeout: 20m
|
|
||||||
@ -1,29 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/typhon/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: typhon
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/typhon
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
targetNamespace: climate
|
|
||||||
dependsOn:
|
|
||||||
- name: vault
|
|
||||||
- name: vault-csi
|
|
||||||
- name: monitoring
|
|
||||||
healthChecks:
|
|
||||||
- apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
name: typhon
|
|
||||||
namespace: climate
|
|
||||||
- apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
name: typhon
|
|
||||||
namespace: climate
|
|
||||||
wait: false
|
|
||||||
timeout: 20m
|
|
||||||
@ -15,5 +15,4 @@ spec:
|
|||||||
prune: true
|
prune: true
|
||||||
wait: true
|
wait: true
|
||||||
dependsOn:
|
dependsOn:
|
||||||
- name: longhorn
|
|
||||||
- name: helm
|
- name: helm
|
||||||
|
|||||||
@ -1,20 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/vaultwarden/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: vaultwarden
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
suspend: false
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
path: ./services/vaultwarden
|
|
||||||
targetNamespace: vaultwarden
|
|
||||||
prune: true
|
|
||||||
wait: true
|
|
||||||
dependsOn:
|
|
||||||
- name: helm
|
|
||||||
- name: traefik
|
|
||||||
@ -1,19 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/applications/wallet-monero-temp/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: wallet-monero-temp
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/crypto/wallet-monero-temp
|
|
||||||
targetNamespace: crypto
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
dependsOn:
|
|
||||||
- name: crypto
|
|
||||||
- name: xmr-miner
|
|
||||||
wait: true
|
|
||||||
@ -17,4 +17,3 @@ spec:
|
|||||||
- name: crypto
|
- name: crypto
|
||||||
- name: monerod
|
- name: monerod
|
||||||
wait: true
|
wait: true
|
||||||
timeout: 30m
|
|
||||||
|
|||||||
@ -1,18 +1,18 @@
|
|||||||
# clusters/atlas/flux-system/applications/mailu/kustomization.yaml
|
# clusters/atlas/flux-system/applications/zot/kustomization.yaml
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||||
kind: Kustomization
|
kind: Kustomization
|
||||||
metadata:
|
metadata:
|
||||||
name: mailu
|
name: zot
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
spec:
|
spec:
|
||||||
interval: 10m
|
interval: 10m
|
||||||
|
path: ./services/zot
|
||||||
|
targetNamespace: zot
|
||||||
|
prune: false
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: GitRepository
|
kind: GitRepository
|
||||||
name: flux-system
|
name: flux-system
|
||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
path: ./services/mailu
|
|
||||||
targetNamespace: mailu-mailserver
|
|
||||||
prune: true
|
|
||||||
wait: true
|
wait: true
|
||||||
dependsOn:
|
dependsOn:
|
||||||
- name: helm
|
- name: core
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,3 @@
|
|||||||
# clusters/atlas/flux-system/gotk-sync.yaml
|
|
||||||
# This manifest was generated by flux. DO NOT EDIT.
|
# This manifest was generated by flux. DO NOT EDIT.
|
||||||
---
|
---
|
||||||
apiVersion: source.toolkit.fluxcd.io/v1
|
apiVersion: source.toolkit.fluxcd.io/v1
|
||||||
@ -9,7 +8,7 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
interval: 1m0s
|
interval: 1m0s
|
||||||
ref:
|
ref:
|
||||||
branch: main
|
branch: restructure/hybrid-clusters
|
||||||
secretRef:
|
secretRef:
|
||||||
name: flux-system-gitea
|
name: flux-system-gitea
|
||||||
url: ssh://git@scm.bstein.dev:2242/bstein/titan-iac.git
|
url: ssh://git@scm.bstein.dev:2242/bstein/titan-iac.git
|
||||||
|
|||||||
@ -1,17 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/platform/cert-manager-cleanup/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: cert-manager-cleanup
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
path: ./infrastructure/cert-manager/cleanup
|
|
||||||
prune: true
|
|
||||||
force: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
targetNamespace: cert-manager
|
|
||||||
wait: true
|
|
||||||
@ -1,19 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/platform/cert-manager/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: cert-manager
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
path: ./infrastructure/cert-manager
|
|
||||||
prune: true
|
|
||||||
force: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
targetNamespace: cert-manager
|
|
||||||
dependsOn:
|
|
||||||
- name: helm
|
|
||||||
wait: true
|
|
||||||
@ -1,20 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/platform/gitops-ui/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: gitops-ui
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
timeout: 10m
|
|
||||||
path: ./services/gitops-ui
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
targetNamespace: flux-system
|
|
||||||
dependsOn:
|
|
||||||
- name: helm
|
|
||||||
- name: traefik
|
|
||||||
wait: true
|
|
||||||
@ -4,17 +4,6 @@ kind: Kustomization
|
|||||||
resources:
|
resources:
|
||||||
- core/kustomization.yaml
|
- core/kustomization.yaml
|
||||||
- helm/kustomization.yaml
|
- helm/kustomization.yaml
|
||||||
- cert-manager/kustomization.yaml
|
|
||||||
- metallb/kustomization.yaml
|
|
||||||
- traefik/kustomization.yaml
|
- traefik/kustomization.yaml
|
||||||
- gitops-ui/kustomization.yaml
|
|
||||||
- monitoring/kustomization.yaml
|
- monitoring/kustomization.yaml
|
||||||
- logging/kustomization.yaml
|
|
||||||
- maintenance/kustomization.yaml
|
|
||||||
- maintenance/image-automation.yaml
|
|
||||||
- longhorn-adopt/kustomization.yaml
|
|
||||||
- longhorn/kustomization.yaml
|
|
||||||
- longhorn-ui/kustomization.yaml
|
- longhorn-ui/kustomization.yaml
|
||||||
- postgres/kustomization.yaml
|
|
||||||
- ../platform/vault-csi/kustomization.yaml
|
|
||||||
- ../platform/vault-injector/kustomization.yaml
|
|
||||||
|
|||||||
@ -1,14 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/platform/logging/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: logging
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/logging
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
wait: false
|
|
||||||
@ -1,17 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/platform/longhorn-adopt/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: longhorn-adopt
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
path: ./infrastructure/longhorn/adopt
|
|
||||||
prune: true
|
|
||||||
force: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
targetNamespace: longhorn-system
|
|
||||||
wait: true
|
|
||||||
@ -15,5 +15,4 @@ spec:
|
|||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
dependsOn:
|
dependsOn:
|
||||||
- name: core
|
- name: core
|
||||||
- name: longhorn
|
|
||||||
wait: true
|
wait: true
|
||||||
|
|||||||
@ -1,20 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/platform/longhorn/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: longhorn
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
path: ./infrastructure/longhorn/core
|
|
||||||
prune: true
|
|
||||||
force: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
targetNamespace: longhorn-system
|
|
||||||
dependsOn:
|
|
||||||
- name: helm
|
|
||||||
- name: longhorn-adopt
|
|
||||||
wait: false
|
|
||||||
@ -1,26 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/platform/maintenance/image-automation.yaml
|
|
||||||
apiVersion: image.toolkit.fluxcd.io/v1
|
|
||||||
kind: ImageUpdateAutomation
|
|
||||||
metadata:
|
|
||||||
name: maintenance
|
|
||||||
namespace: maintenance
|
|
||||||
spec:
|
|
||||||
interval: 1m0s
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
git:
|
|
||||||
checkout:
|
|
||||||
ref:
|
|
||||||
branch: main
|
|
||||||
commit:
|
|
||||||
author:
|
|
||||||
email: ops@bstein.dev
|
|
||||||
name: flux-bot
|
|
||||||
messageTemplate: "chore(maintenance): automated image update"
|
|
||||||
push:
|
|
||||||
branch: main
|
|
||||||
update:
|
|
||||||
strategy: Setters
|
|
||||||
path: services/maintenance
|
|
||||||
@ -1,15 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/platform/maintenance/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: maintenance
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./services/maintenance
|
|
||||||
prune: true
|
|
||||||
force: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
wait: false
|
|
||||||
@ -1,16 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/platform/metallb/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: metallb
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
path: ./infrastructure/metallb
|
|
||||||
prune: true
|
|
||||||
wait: true
|
|
||||||
targetNamespace: metallb-system
|
|
||||||
@ -11,4 +11,4 @@ spec:
|
|||||||
sourceRef:
|
sourceRef:
|
||||||
kind: GitRepository
|
kind: GitRepository
|
||||||
name: flux-system
|
name: flux-system
|
||||||
wait: false
|
wait: true
|
||||||
|
|||||||
@ -1,25 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/platform/postgres/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: postgres
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 10m
|
|
||||||
path: ./infrastructure/postgres
|
|
||||||
prune: true
|
|
||||||
force: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
targetNamespace: postgres
|
|
||||||
dependsOn:
|
|
||||||
- name: longhorn
|
|
||||||
- name: vault
|
|
||||||
- name: vault-csi
|
|
||||||
healthChecks:
|
|
||||||
- apiVersion: apps/v1
|
|
||||||
kind: StatefulSet
|
|
||||||
name: postgres
|
|
||||||
namespace: postgres
|
|
||||||
wait: true
|
|
||||||
@ -15,5 +15,4 @@ spec:
|
|||||||
namespace: flux-system
|
namespace: flux-system
|
||||||
dependsOn:
|
dependsOn:
|
||||||
- name: core
|
- name: core
|
||||||
- name: metallb
|
|
||||||
wait: true
|
wait: true
|
||||||
|
|||||||
@ -1,16 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/platform/vault-csi/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: vault-csi
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
path: ./infrastructure/vault-csi
|
|
||||||
prune: true
|
|
||||||
wait: true
|
|
||||||
targetNamespace: kube-system
|
|
||||||
@ -1,16 +0,0 @@
|
|||||||
# clusters/atlas/flux-system/platform/vault-injector/kustomization.yaml
|
|
||||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
|
||||||
kind: Kustomization
|
|
||||||
metadata:
|
|
||||||
name: vault-injector
|
|
||||||
namespace: flux-system
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
path: ./infrastructure/vault-injector
|
|
||||||
targetNamespace: vault
|
|
||||||
prune: true
|
|
||||||
sourceRef:
|
|
||||||
kind: GitRepository
|
|
||||||
name: flux-system
|
|
||||||
namespace: flux-system
|
|
||||||
wait: true
|
|
||||||
7
clusters/atlas/platform/kustomization.yaml
Normal file
7
clusters/atlas/platform/kustomization.yaml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# clusters/atlas/platform/kustomization.yaml
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
resources:
|
||||||
|
- ../../../infrastructure/modules/base
|
||||||
|
- ../../../infrastructure/modules/profiles/atlas-ha
|
||||||
|
- ../../../infrastructure/sources/cert-manager/letsencrypt.yaml
|
||||||
5
clusters/oceanus/README.md
Normal file
5
clusters/oceanus/README.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# Oceanus Cluster Scaffold
|
||||||
|
|
||||||
|
This directory prepares the Flux and Kustomize layout for a future Oceanus-managed cluster.
|
||||||
|
Populate `flux-system/` with `gotk-components.yaml` and related manifests after running `flux bootstrap`.
|
||||||
|
Define node-specific resources under `infrastructure/modules/profiles/oceanus-validator/` and reference workloads in `applications/` as they come online.
|
||||||
@ -1,12 +0,0 @@
|
|||||||
FROM debian:bookworm-slim
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y --no-install-recommends \
|
|
||||||
bash \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
util-linux \
|
|
||||||
zstd \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
CMD ["/bin/sh"]
|
|
||||||
@ -1,9 +0,0 @@
|
|||||||
FROM python:3.11-slim
|
|
||||||
|
|
||||||
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
|
|
||||||
|
|
||||||
RUN pip install --no-cache-dir requests psycopg2-binary \
|
|
||||||
&& groupadd --system guest-tools \
|
|
||||||
&& useradd --system --uid 65532 --gid guest-tools --home-dir /nonexistent --shell /usr/sbin/nologin guest-tools
|
|
||||||
|
|
||||||
USER guest-tools
|
|
||||||
@ -1,8 +0,0 @@
|
|||||||
# Use the mirrored Harbor artifact so CI does not depend on Docker Hub egress.
|
|
||||||
FROM registry.bstein.dev/streaming/data-prepper@sha256:32ac6ad42e0f12da08bebee307e290b17d127b30def9b06eeaffbcbbc5033e83
|
|
||||||
|
|
||||||
ENV DATA_PREPPER_PATH=/usr/share/data-prepper
|
|
||||||
|
|
||||||
USER 10001
|
|
||||||
WORKDIR /usr/share/data-prepper
|
|
||||||
CMD ["bin/data-prepper"]
|
|
||||||
@ -1,9 +0,0 @@
|
|||||||
FROM registry.bstein.dev/infra/harbor-core:v2.14.1-arm64
|
|
||||||
|
|
||||||
USER root
|
|
||||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
|
||||||
RUN chmod 0755 /entrypoint.sh
|
|
||||||
USER harbor
|
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
||||||
CMD ["/harbor/entrypoint.sh"]
|
|
||||||
@ -1,9 +0,0 @@
|
|||||||
FROM registry.bstein.dev/infra/harbor-jobservice:v2.14.1-arm64
|
|
||||||
|
|
||||||
USER root
|
|
||||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
|
||||||
RUN chmod 0755 /entrypoint.sh
|
|
||||||
USER harbor
|
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
||||||
CMD ["/harbor/entrypoint.sh"]
|
|
||||||
@ -1,9 +0,0 @@
|
|||||||
FROM registry.bstein.dev/infra/harbor-registry:v2.14.1-arm64
|
|
||||||
|
|
||||||
USER root
|
|
||||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
|
||||||
RUN chmod 0755 /entrypoint.sh
|
|
||||||
USER harbor
|
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
||||||
CMD ["/home/harbor/entrypoint.sh"]
|
|
||||||
@ -1,9 +0,0 @@
|
|||||||
FROM registry.bstein.dev/infra/harbor-registryctl:v2.14.1-arm64
|
|
||||||
|
|
||||||
USER root
|
|
||||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
|
||||||
RUN chmod 0755 /entrypoint.sh
|
|
||||||
USER harbor
|
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
||||||
CMD ["/home/harbor/start.sh"]
|
|
||||||
@ -1,13 +0,0 @@
|
|||||||
FROM ghcr.io/element-hq/lk-jwt-service:0.3.0 AS base
|
|
||||||
|
|
||||||
FROM alpine:3.20
|
|
||||||
RUN apk add --no-cache ca-certificates \
|
|
||||||
&& addgroup -S livekit-token \
|
|
||||||
&& adduser -S -D -H -u 65532 -G livekit-token livekit-token
|
|
||||||
COPY --from=base /lk-jwt-service /lk-jwt-service
|
|
||||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
|
||||||
RUN chmod 0755 /entrypoint.sh
|
|
||||||
|
|
||||||
USER livekit-token
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
||||||
CMD ["/lk-jwt-service"]
|
|
||||||
@ -29,12 +29,10 @@ FROM ${DEBIAN_IMAGE}
|
|||||||
RUN set -eux; \
|
RUN set -eux; \
|
||||||
apt-get update; \
|
apt-get update; \
|
||||||
apt-get install -y --no-install-recommends ca-certificates; \
|
apt-get install -y --no-install-recommends ca-certificates; \
|
||||||
update-ca-certificates; rm -rf /var/lib/apt/lists/*; \
|
update-ca-certificates; rm -rf /var/lib/apt/lists/*
|
||||||
groupadd --system p2pool; \
|
|
||||||
useradd --system --uid 65532 --gid p2pool --home-dir /nonexistent --shell /usr/sbin/nologin p2pool
|
|
||||||
COPY --from=fetch /out/p2pool /usr/local/bin/p2pool
|
COPY --from=fetch /out/p2pool /usr/local/bin/p2pool
|
||||||
|
|
||||||
RUN /usr/local/bin/p2pool --version || true
|
RUN /usr/local/bin/p2pool --version || true
|
||||||
EXPOSE 3333
|
EXPOSE 3333
|
||||||
USER p2pool
|
|
||||||
ENTRYPOINT ["/usr/local/bin/p2pool"]
|
ENTRYPOINT ["/usr/local/bin/p2pool"]
|
||||||
|
|
||||||
|
|||||||
@ -26,12 +26,9 @@ RUN set -eux; \
|
|||||||
curl -fsSL "$URL" -o /opt/monero/monero.tar.bz2; \
|
curl -fsSL "$URL" -o /opt/monero/monero.tar.bz2; \
|
||||||
tar -xjf /opt/monero/monero.tar.bz2 -C /opt/monero --strip-components=1; \
|
tar -xjf /opt/monero/monero.tar.bz2 -C /opt/monero --strip-components=1; \
|
||||||
install -m 0755 /opt/monero/monero-wallet-rpc /usr/local/bin/monero-wallet-rpc; \
|
install -m 0755 /opt/monero/monero-wallet-rpc /usr/local/bin/monero-wallet-rpc; \
|
||||||
rm -f /opt/monero/monero.tar.bz2; \
|
rm -f /opt/monero/monero.tar.bz2
|
||||||
groupadd --system monero; \
|
|
||||||
useradd --system --uid 1000 --gid monero --home-dir /nonexistent --shell /usr/sbin/nologin monero
|
|
||||||
|
|
||||||
ENV PATH="/usr/local/bin:/usr/bin:/bin"
|
ENV PATH="/usr/local/bin:/usr/bin:/bin"
|
||||||
RUN /usr/local/bin/monero-wallet-rpc --version || true
|
RUN /usr/local/bin/monero-wallet-rpc --version || true
|
||||||
|
|
||||||
EXPOSE 18083
|
EXPOSE 18083
|
||||||
USER monero
|
|
||||||
|
|||||||
@ -23,14 +23,10 @@ RUN set -eux; \
|
|||||||
mkdir -p /opt/monero; \
|
mkdir -p /opt/monero; \
|
||||||
tar -xjf /tmp/monero.tar.bz2 -C /opt/monero --strip-components=1; \
|
tar -xjf /tmp/monero.tar.bz2 -C /opt/monero --strip-components=1; \
|
||||||
rm -f /tmp/monero.tar.bz2; \
|
rm -f /tmp/monero.tar.bz2; \
|
||||||
groupadd --system monero; \
|
|
||||||
useradd --system --uid 1000 --gid monero --home-dir /nonexistent --shell /usr/sbin/nologin monero; \
|
|
||||||
mkdir -p /data; \
|
mkdir -p /data; \
|
||||||
chown monero:monero /data; \
|
|
||||||
chmod 0770 /data
|
chmod 0770 /data
|
||||||
|
|
||||||
ENV LD_LIBRARY_PATH=/opt/monero:/opt/monero/lib \
|
ENV LD_LIBRARY_PATH=/opt/monero:/opt/monero/lib \
|
||||||
PATH="/opt/monero:${PATH}"
|
PATH="/opt/monero:${PATH}"
|
||||||
|
|
||||||
USER monero
|
|
||||||
CMD ["/opt/monero/monerod", "--version"]
|
CMD ["/opt/monero/monerod", "--version"]
|
||||||
|
|||||||
@ -1,13 +0,0 @@
|
|||||||
FROM quay.io/oauth2-proxy/oauth2-proxy:v7.6.0 AS base
|
|
||||||
|
|
||||||
FROM alpine:3.20
|
|
||||||
RUN apk add --no-cache ca-certificates \
|
|
||||||
&& addgroup -S oauth2-proxy \
|
|
||||||
&& adduser -S -D -H -u 65532 -G oauth2-proxy oauth2-proxy
|
|
||||||
COPY --from=base /bin/oauth2-proxy /bin/oauth2-proxy
|
|
||||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
|
||||||
RUN chmod 0755 /entrypoint.sh
|
|
||||||
|
|
||||||
USER oauth2-proxy
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
||||||
CMD ["/bin/oauth2-proxy"]
|
|
||||||
@ -1,13 +0,0 @@
|
|||||||
FROM registry.bstein.dev/streaming/pegasus:1.2.32 AS base
|
|
||||||
|
|
||||||
FROM alpine:3.20
|
|
||||||
RUN apk add --no-cache ca-certificates \
|
|
||||||
&& addgroup -S pegasus \
|
|
||||||
&& adduser -S -D -H -u 65532 -G pegasus pegasus
|
|
||||||
COPY --from=base /pegasus /pegasus
|
|
||||||
COPY dockerfiles/vault-entrypoint.sh /entrypoint.sh
|
|
||||||
RUN chmod 0755 /entrypoint.sh
|
|
||||||
|
|
||||||
USER pegasus
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
||||||
CMD ["/pegasus"]
|
|
||||||
@ -1,48 +0,0 @@
|
|||||||
# dockerfiles/Dockerfile.quality-tools
|
|
||||||
FROM debian:bookworm-slim
|
|
||||||
|
|
||||||
ARG SONAR_SCANNER_VERSION=8.0.1.6346
|
|
||||||
ARG TRIVY_VERSION=0.70.0
|
|
||||||
ENV TRIVY_CACHE_DIR=/opt/trivy-cache
|
|
||||||
|
|
||||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y --no-install-recommends \
|
|
||||||
bash \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
git \
|
|
||||||
jq \
|
|
||||||
unzip \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
|
||||||
&& groupadd --system quality-tools \
|
|
||||||
&& useradd --system --uid 65532 --gid quality-tools --home-dir /nonexistent --shell /usr/sbin/nologin quality-tools
|
|
||||||
|
|
||||||
RUN set -eux; \
|
|
||||||
scanner_zip="sonar-scanner-cli-${SONAR_SCANNER_VERSION}-linux-aarch64.zip"; \
|
|
||||||
base_url="https://binaries.sonarsource.com/Distribution/sonar-scanner-cli"; \
|
|
||||||
curl -fsSL "${base_url}/${scanner_zip}" -o "/tmp/${scanner_zip}"; \
|
|
||||||
curl -fsSL "${base_url}/${scanner_zip}.sha256" -o "/tmp/${scanner_zip}.sha256"; \
|
|
||||||
printf '%s %s\n' "$(cat "/tmp/${scanner_zip}.sha256")" "/tmp/${scanner_zip}" | sha256sum -c -; \
|
|
||||||
unzip -q "/tmp/${scanner_zip}" -d /opt; \
|
|
||||||
ln -s "/opt/sonar-scanner-${SONAR_SCANNER_VERSION}-linux-aarch64/bin/sonar-scanner" /usr/local/bin/sonar-scanner; \
|
|
||||||
rm -f "/tmp/${scanner_zip}" "/tmp/${scanner_zip}.sha256"
|
|
||||||
|
|
||||||
RUN set -eux; \
|
|
||||||
trivy_tgz="trivy_${TRIVY_VERSION}_Linux-ARM64.tar.gz"; \
|
|
||||||
curl -fsSL "https://github.com/aquasecurity/trivy/releases/download/v${TRIVY_VERSION}/${trivy_tgz}" -o "/tmp/${trivy_tgz}"; \
|
|
||||||
tar -C /usr/local/bin -xzf "/tmp/${trivy_tgz}" trivy; \
|
|
||||||
rm -f "/tmp/${trivy_tgz}"; \
|
|
||||||
trivy --version; \
|
|
||||||
sonar-scanner -v
|
|
||||||
|
|
||||||
RUN set -eux; \
|
|
||||||
mkdir -p "${TRIVY_CACHE_DIR}"; \
|
|
||||||
trivy image --download-db-only --cache-dir "${TRIVY_CACHE_DIR}"; \
|
|
||||||
chmod -R a+rX "${TRIVY_CACHE_DIR}"; \
|
|
||||||
mkdir -p /workspace; \
|
|
||||||
chown quality-tools:quality-tools /workspace
|
|
||||||
|
|
||||||
WORKDIR /workspace
|
|
||||||
USER quality-tools
|
|
||||||
@ -1,34 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
if [ -n "${VAULT_ENV_FILE:-}" ]; then
|
|
||||||
if [ -f "${VAULT_ENV_FILE}" ]; then
|
|
||||||
# shellcheck disable=SC1090
|
|
||||||
. "${VAULT_ENV_FILE}"
|
|
||||||
else
|
|
||||||
echo "Vault env file not found: ${VAULT_ENV_FILE}" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${VAULT_COPY_FILES:-}" ]; then
|
|
||||||
old_ifs="$IFS"
|
|
||||||
IFS=','
|
|
||||||
for pair in ${VAULT_COPY_FILES}; do
|
|
||||||
src="${pair%%:*}"
|
|
||||||
dest="${pair#*:}"
|
|
||||||
if [ -z "${src}" ] || [ -z "${dest}" ]; then
|
|
||||||
echo "Vault copy entry malformed: ${pair}" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
if [ ! -f "${src}" ]; then
|
|
||||||
echo "Vault file not found: ${src}" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
mkdir -p "$(dirname "${dest}")"
|
|
||||||
cp "${src}" "${dest}"
|
|
||||||
done
|
|
||||||
IFS="$old_ifs"
|
|
||||||
fi
|
|
||||||
|
|
||||||
exec "$@"
|
|
||||||
16
docs/topology.md
Normal file
16
docs/topology.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# Titan Homelab Topology
|
||||||
|
|
||||||
|
| Hostname | Role / Function | Managed By | Notes |
|
||||||
|
|------------|--------------------------------|---------------------|-------|
|
||||||
|
| titan-0a | Kubernetes control-plane | Flux (atlas cluster)| HA leader, tainted for control only |
|
||||||
|
| titan-0b | Kubernetes control-plane | Flux (atlas cluster)| Standby control node |
|
||||||
|
| titan-0c | Kubernetes control-plane | Flux (atlas cluster)| Standby control node |
|
||||||
|
| titan-04-19| Raspberry Pi workers | Flux (atlas cluster)| Workload nodes, labelled per hardware |
|
||||||
|
| titan-22 | GPU mini-PC (Jellyfin) | Flux + Ansible | NVIDIA runtime managed via `modules/profiles/atlas-ha` |
|
||||||
|
| titan-24 | Tethys hybrid node | Flux + Ansible | Runs SUI metrics via K8s, validator via Ansible |
|
||||||
|
| titan-db | HA control plane database | Ansible | PostgreSQL / etcd backing services |
|
||||||
|
| titan-jh | Jumphost & bastion | Ansible | Entry point / future KVM services |
|
||||||
|
| oceanus | Dedicated SUI validator host | Ansible / Flux prep | Baremetal validator workloads, exposes metrics to atlas; Kustomize scaffold under `clusters/oceanus/` |
|
||||||
|
| styx | Air-gapped workstation | Manual / Scripts | Remains isolated, scripts tracked in `hosts/styx` |
|
||||||
|
|
||||||
|
Use the `clusters/` directory for cluster-scoped state and the `hosts/` directory for baremetal orchestration.
|
||||||
@ -1,18 +1,5 @@
|
|||||||
# hosts/roles/titan_jh/tasks/main.yaml
|
# hosts/roles/titan_jh/tasks/main.yaml
|
||||||
---
|
---
|
||||||
- name: Install node exporter
|
|
||||||
ansible.builtin.package:
|
|
||||||
name: prometheus-node-exporter
|
|
||||||
state: present
|
|
||||||
tags: ['jumphost', 'monitoring']
|
|
||||||
|
|
||||||
- name: Enable node exporter
|
|
||||||
ansible.builtin.service:
|
|
||||||
name: prometheus-node-exporter
|
|
||||||
enabled: true
|
|
||||||
state: started
|
|
||||||
tags: ['jumphost', 'monitoring']
|
|
||||||
|
|
||||||
- name: Placeholder for jumphost hardening
|
- name: Placeholder for jumphost hardening
|
||||||
ansible.builtin.debug:
|
ansible.builtin.debug:
|
||||||
msg: "Harden SSH, manage bastion tooling, and configure audit logging here."
|
msg: "Harden SSH, manage bastion tooling, and configure audit logging here."
|
||||||
|
|||||||
2
hosts/styx/README.md
Normal file
2
hosts/styx/README.md
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
# hosts/styx/README.md
|
||||||
|
Styx is air-gapped; provisioning scripts live under `scripts/`.
|
||||||
@ -1,40 +0,0 @@
|
|||||||
# infrastructure/cert-manager/cleanup/cert-manager-cleanup-job.yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: cert-manager-cleanup-2
|
|
||||||
namespace: cert-manager
|
|
||||||
spec:
|
|
||||||
backoffLimit: 1
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
serviceAccountName: cert-manager-cleanup
|
|
||||||
restartPolicy: Never
|
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: node-role.kubernetes.io/worker
|
|
||||||
operator: Exists
|
|
||||||
preferredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- weight: 100
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: kubernetes.io/arch
|
|
||||||
operator: In
|
|
||||||
values: ["arm64"]
|
|
||||||
containers:
|
|
||||||
- name: cleanup
|
|
||||||
image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
|
|
||||||
command: ["/usr/bin/env", "bash"]
|
|
||||||
args: ["/scripts/cert_manager_cleanup.sh"]
|
|
||||||
volumeMounts:
|
|
||||||
- name: script
|
|
||||||
mountPath: /scripts
|
|
||||||
readOnly: true
|
|
||||||
volumes:
|
|
||||||
- name: script
|
|
||||||
configMap:
|
|
||||||
name: cert-manager-cleanup-script
|
|
||||||
defaultMode: 0555
|
|
||||||
@ -1,58 +0,0 @@
|
|||||||
# infrastructure/cert-manager/cleanup/cert-manager-cleanup-rbac.yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: cert-manager-cleanup
|
|
||||||
namespace: cert-manager
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: cert-manager-cleanup
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources:
|
|
||||||
- pods
|
|
||||||
- services
|
|
||||||
- endpoints
|
|
||||||
- configmaps
|
|
||||||
- secrets
|
|
||||||
- serviceaccounts
|
|
||||||
verbs: ["get", "list", "watch", "delete"]
|
|
||||||
- apiGroups: ["apps"]
|
|
||||||
resources:
|
|
||||||
- deployments
|
|
||||||
- daemonsets
|
|
||||||
- statefulsets
|
|
||||||
- replicasets
|
|
||||||
verbs: ["get", "list", "watch", "delete"]
|
|
||||||
- apiGroups: ["batch"]
|
|
||||||
resources:
|
|
||||||
- jobs
|
|
||||||
- cronjobs
|
|
||||||
verbs: ["get", "list", "watch", "delete"]
|
|
||||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
|
||||||
resources:
|
|
||||||
- roles
|
|
||||||
- rolebindings
|
|
||||||
- clusterroles
|
|
||||||
- clusterrolebindings
|
|
||||||
verbs: ["get", "list", "watch", "delete"]
|
|
||||||
- apiGroups: ["admissionregistration.k8s.io"]
|
|
||||||
resources:
|
|
||||||
- validatingwebhookconfigurations
|
|
||||||
- mutatingwebhookconfigurations
|
|
||||||
verbs: ["get", "list", "watch", "delete"]
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: cert-manager-cleanup
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: cert-manager-cleanup
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: cert-manager-cleanup
|
|
||||||
namespace: cert-manager
|
|
||||||
@ -1,15 +0,0 @@
|
|||||||
# infrastructure/cert-manager/cleanup/kustomization.yaml
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- namespace.yaml
|
|
||||||
- cert-manager-cleanup-rbac.yaml
|
|
||||||
- cert-manager-cleanup-job.yaml
|
|
||||||
|
|
||||||
configMapGenerator:
|
|
||||||
- name: cert-manager-cleanup-script
|
|
||||||
namespace: cert-manager
|
|
||||||
files:
|
|
||||||
- cert_manager_cleanup.sh=scripts/cert_manager_cleanup.sh
|
|
||||||
options:
|
|
||||||
disableNameSuffixHash: true
|
|
||||||
@ -1,5 +0,0 @@
|
|||||||
# infrastructure/cert-manager/cleanup/namespace.yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: cert-manager
|
|
||||||
@ -1,37 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
namespace="cert-manager"
|
|
||||||
selectors=(
|
|
||||||
"app.kubernetes.io/name=cert-manager"
|
|
||||||
"app.kubernetes.io/instance=cert-manager"
|
|
||||||
"app.kubernetes.io/instance=certmanager-prod"
|
|
||||||
)
|
|
||||||
|
|
||||||
delete_namespaced() {
|
|
||||||
local selector="$1"
|
|
||||||
kubectl -n "${namespace}" delete deployment,daemonset,statefulset,replicaset \
|
|
||||||
--selector "${selector}" --ignore-not-found --wait=false
|
|
||||||
kubectl -n "${namespace}" delete pod,service,endpoints,serviceaccount,configmap,secret \
|
|
||||||
--selector "${selector}" --ignore-not-found --wait=false
|
|
||||||
kubectl -n "${namespace}" delete role,rolebinding \
|
|
||||||
--selector "${selector}" --ignore-not-found --wait=false
|
|
||||||
kubectl -n "${namespace}" delete job,cronjob \
|
|
||||||
--selector "${selector}" --ignore-not-found --wait=false
|
|
||||||
}
|
|
||||||
|
|
||||||
delete_cluster_scoped() {
|
|
||||||
local selector="$1"
|
|
||||||
kubectl delete clusterrole,clusterrolebinding \
|
|
||||||
--selector "${selector}" --ignore-not-found --wait=false
|
|
||||||
kubectl delete mutatingwebhookconfiguration,validatingwebhookconfiguration \
|
|
||||||
--selector "${selector}" --ignore-not-found --wait=false
|
|
||||||
}
|
|
||||||
|
|
||||||
for selector in "${selectors[@]}"; do
|
|
||||||
delete_namespaced "${selector}"
|
|
||||||
delete_cluster_scoped "${selector}"
|
|
||||||
done
|
|
||||||
|
|
||||||
kubectl delete mutatingwebhookconfiguration cert-manager-webhook --ignore-not-found --wait=false
|
|
||||||
kubectl delete validatingwebhookconfiguration cert-manager-webhook --ignore-not-found --wait=false
|
|
||||||
@ -1,159 +0,0 @@
|
|||||||
# infrastructure/cert-manager/helmrelease.yaml
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: cert-manager
|
|
||||||
namespace: cert-manager
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: cert-manager
|
|
||||||
version: v1.17.0
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: jetstack
|
|
||||||
namespace: flux-system
|
|
||||||
install:
|
|
||||||
crds: CreateReplace
|
|
||||||
remediation: { retries: 3 }
|
|
||||||
timeout: 10m
|
|
||||||
upgrade:
|
|
||||||
crds: CreateReplace
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
remediateLastFailure: true
|
|
||||||
cleanupOnFail: true
|
|
||||||
timeout: 10m
|
|
||||||
values:
|
|
||||||
installCRDs: true
|
|
||||||
extraArgs:
|
|
||||||
- --acme-http01-solver-nameservers=1.1.1.1:53,8.8.8.8:53
|
|
||||||
nodeSelector:
|
|
||||||
node-role.kubernetes.io/worker: "true"
|
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
preferredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- weight: 100
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: atlas.bstein.dev/spillover
|
|
||||||
operator: DoesNotExist
|
|
||||||
- weight: 95
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: kubernetes.io/hostname
|
|
||||||
operator: NotIn
|
|
||||||
values:
|
|
||||||
- titan-13
|
|
||||||
- titan-15
|
|
||||||
- titan-17
|
|
||||||
- titan-19
|
|
||||||
- weight: 90
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: hardware
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- rpi5
|
|
||||||
- weight: 50
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: hardware
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- rpi4
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: hardware
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- rpi5
|
|
||||||
- rpi4
|
|
||||||
webhook:
|
|
||||||
nodeSelector:
|
|
||||||
node-role.kubernetes.io/worker: "true"
|
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
preferredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- weight: 100
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: atlas.bstein.dev/spillover
|
|
||||||
operator: DoesNotExist
|
|
||||||
- weight: 95
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: kubernetes.io/hostname
|
|
||||||
operator: NotIn
|
|
||||||
values:
|
|
||||||
- titan-13
|
|
||||||
- titan-15
|
|
||||||
- titan-17
|
|
||||||
- titan-19
|
|
||||||
- weight: 90
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: hardware
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- rpi5
|
|
||||||
- weight: 50
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: hardware
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- rpi4
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: hardware
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- rpi5
|
|
||||||
- rpi4
|
|
||||||
cainjector:
|
|
||||||
nodeSelector:
|
|
||||||
node-role.kubernetes.io/worker: "true"
|
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
preferredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- weight: 100
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: atlas.bstein.dev/spillover
|
|
||||||
operator: DoesNotExist
|
|
||||||
- weight: 95
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: kubernetes.io/hostname
|
|
||||||
operator: NotIn
|
|
||||||
values:
|
|
||||||
- titan-13
|
|
||||||
- titan-15
|
|
||||||
- titan-17
|
|
||||||
- titan-19
|
|
||||||
- weight: 90
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: hardware
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- rpi5
|
|
||||||
- weight: 50
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: hardware
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- rpi4
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: hardware
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- rpi5
|
|
||||||
- rpi4
|
|
||||||
@ -1,6 +0,0 @@
|
|||||||
# infrastructure/cert-manager/kustomization.yaml
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- namespace.yaml
|
|
||||||
- helmrelease.yaml
|
|
||||||
@ -1,5 +0,0 @@
|
|||||||
# infrastructure/cert-manager/namespace.yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: cert-manager
|
|
||||||
@ -1,47 +0,0 @@
|
|||||||
# infrastructure/core/coredns-custom.yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: coredns-custom
|
|
||||||
namespace: kube-system
|
|
||||||
data:
|
|
||||||
bstein-dev.server: |
|
|
||||||
bstein.dev:53 {
|
|
||||||
errors
|
|
||||||
cache 30
|
|
||||||
hosts {
|
|
||||||
192.168.22.9 alerts.bstein.dev
|
|
||||||
192.168.22.9 auth.bstein.dev
|
|
||||||
192.168.22.9 bstein.dev
|
|
||||||
10.43.6.87 budget.bstein.dev
|
|
||||||
192.168.22.9 call.live.bstein.dev
|
|
||||||
192.168.22.9 cd.bstein.dev
|
|
||||||
192.168.22.9 chat.ai.bstein.dev
|
|
||||||
192.168.22.9 ci.bstein.dev
|
|
||||||
192.168.22.9 cloud.bstein.dev
|
|
||||||
192.168.22.9 health.bstein.dev
|
|
||||||
192.168.22.9 kit.live.bstein.dev
|
|
||||||
192.168.22.9 live.bstein.dev
|
|
||||||
192.168.22.9 logs.bstein.dev
|
|
||||||
192.168.22.9 longhorn.bstein.dev
|
|
||||||
192.168.22.4 mail.bstein.dev
|
|
||||||
192.168.22.9 matrix.live.bstein.dev
|
|
||||||
192.168.22.9 metrics.bstein.dev
|
|
||||||
192.168.22.9 monero.bstein.dev
|
|
||||||
10.43.6.87 money.bstein.dev
|
|
||||||
192.168.22.9 notes.bstein.dev
|
|
||||||
192.168.22.9 office.bstein.dev
|
|
||||||
192.168.22.9 pegasus.bstein.dev
|
|
||||||
3.136.224.193 pm-bounces.bstein.dev
|
|
||||||
3.150.68.49 pm-bounces.bstein.dev
|
|
||||||
18.189.137.81 pm-bounces.bstein.dev
|
|
||||||
192.168.22.9 registry.bstein.dev
|
|
||||||
192.168.22.9 scm.bstein.dev
|
|
||||||
192.168.22.9 secret.bstein.dev
|
|
||||||
192.168.22.9 sso.bstein.dev
|
|
||||||
192.168.22.9 stream.bstein.dev
|
|
||||||
192.168.22.9 tasks.bstein.dev
|
|
||||||
192.168.22.9 vault.bstein.dev
|
|
||||||
fallthrough
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,141 +0,0 @@
|
|||||||
# infrastructure/core/coredns-deployment.yaml
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: coredns
|
|
||||||
namespace: kube-system
|
|
||||||
labels:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
kubernetes.io/name: CoreDNS
|
|
||||||
spec:
|
|
||||||
progressDeadlineSeconds: 600
|
|
||||||
replicas: 2
|
|
||||||
revisionHistoryLimit: 0
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
strategy:
|
|
||||||
type: RollingUpdate
|
|
||||||
rollingUpdate:
|
|
||||||
maxSurge: 25%
|
|
||||||
maxUnavailable: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: coredns
|
|
||||||
image: registry.k8s.io/coredns/coredns:v1.12.1
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
args:
|
|
||||||
- -conf
|
|
||||||
- /etc/coredns/Corefile
|
|
||||||
ports:
|
|
||||||
- containerPort: 53
|
|
||||||
name: dns
|
|
||||||
protocol: UDP
|
|
||||||
- containerPort: 53
|
|
||||||
name: dns-tcp
|
|
||||||
protocol: TCP
|
|
||||||
- containerPort: 9153
|
|
||||||
name: metrics
|
|
||||||
protocol: TCP
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /health
|
|
||||||
port: 8080
|
|
||||||
scheme: HTTP
|
|
||||||
initialDelaySeconds: 60
|
|
||||||
periodSeconds: 10
|
|
||||||
timeoutSeconds: 1
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 3
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /ready
|
|
||||||
port: 8181
|
|
||||||
scheme: HTTP
|
|
||||||
periodSeconds: 2
|
|
||||||
timeoutSeconds: 1
|
|
||||||
successThreshold: 1
|
|
||||||
failureThreshold: 3
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: 170Mi
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 70Mi
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
add:
|
|
||||||
- NET_BIND_SERVICE
|
|
||||||
drop:
|
|
||||||
- all
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
volumeMounts:
|
|
||||||
- name: config-volume
|
|
||||||
mountPath: /etc/coredns
|
|
||||||
readOnly: true
|
|
||||||
- name: custom-config-volume
|
|
||||||
mountPath: /etc/coredns/custom
|
|
||||||
readOnly: true
|
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: hardware
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- rpi5
|
|
||||||
- rpi4
|
|
||||||
- key: node-role.kubernetes.io/worker
|
|
||||||
operator: In
|
|
||||||
values:
|
|
||||||
- "true"
|
|
||||||
dnsPolicy: Default
|
|
||||||
nodeSelector:
|
|
||||||
kubernetes.io/os: linux
|
|
||||||
priorityClassName: system-cluster-critical
|
|
||||||
restartPolicy: Always
|
|
||||||
schedulerName: default-scheduler
|
|
||||||
serviceAccountName: coredns
|
|
||||||
tolerations:
|
|
||||||
- key: CriticalAddonsOnly
|
|
||||||
operator: Exists
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
operator: Exists
|
|
||||||
effect: NoSchedule
|
|
||||||
- key: node-role.kubernetes.io/master
|
|
||||||
operator: Exists
|
|
||||||
effect: NoSchedule
|
|
||||||
topologySpreadConstraints:
|
|
||||||
- maxSkew: 1
|
|
||||||
topologyKey: kubernetes.io/hostname
|
|
||||||
whenUnsatisfiable: DoNotSchedule
|
|
||||||
labelSelector:
|
|
||||||
matchLabels:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
- maxSkew: 1
|
|
||||||
topologyKey: topology.kubernetes.io/zone
|
|
||||||
whenUnsatisfiable: ScheduleAnyway
|
|
||||||
labelSelector:
|
|
||||||
matchLabels:
|
|
||||||
k8s-app: kube-dns
|
|
||||||
volumes:
|
|
||||||
- name: config-volume
|
|
||||||
configMap:
|
|
||||||
name: coredns
|
|
||||||
defaultMode: 420
|
|
||||||
items:
|
|
||||||
- key: Corefile
|
|
||||||
path: Corefile
|
|
||||||
- key: NodeHosts
|
|
||||||
path: NodeHosts
|
|
||||||
- name: custom-config-volume
|
|
||||||
configMap:
|
|
||||||
name: coredns-custom
|
|
||||||
optional: true
|
|
||||||
defaultMode: 420
|
|
||||||
@ -4,8 +4,4 @@ kind: Kustomization
|
|||||||
resources:
|
resources:
|
||||||
- ../modules/base
|
- ../modules/base
|
||||||
- ../modules/profiles/atlas-ha
|
- ../modules/profiles/atlas-ha
|
||||||
- coredns-custom.yaml
|
|
||||||
- coredns-deployment.yaml
|
|
||||||
- ntp-sync-daemonset.yaml
|
|
||||||
- ../sources/cert-manager/letsencrypt.yaml
|
- ../sources/cert-manager/letsencrypt.yaml
|
||||||
- ../sources/cert-manager/letsencrypt-prod.yaml
|
|
||||||
|
|||||||
@ -1,50 +0,0 @@
|
|||||||
# infrastructure/core/ntp-sync-daemonset.yaml
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: DaemonSet
|
|
||||||
metadata:
|
|
||||||
name: ntp-sync
|
|
||||||
namespace: kube-system
|
|
||||||
labels:
|
|
||||||
app: ntp-sync
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: ntp-sync
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: ntp-sync
|
|
||||||
spec:
|
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
operator: DoesNotExist
|
|
||||||
- key: node-role.kubernetes.io/master
|
|
||||||
operator: DoesNotExist
|
|
||||||
containers:
|
|
||||||
- name: ntp-sync
|
|
||||||
image: public.ecr.aws/docker/library/busybox:1.36.1
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
command: ["/bin/sh", "-c"]
|
|
||||||
args:
|
|
||||||
- |
|
|
||||||
set -eu
|
|
||||||
while true; do
|
|
||||||
ntpd -q -p pool.ntp.org || true
|
|
||||||
sleep 300
|
|
||||||
done
|
|
||||||
securityContext:
|
|
||||||
capabilities:
|
|
||||||
add: ["SYS_TIME"]
|
|
||||||
runAsUser: 0
|
|
||||||
runAsGroup: 0
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 10m
|
|
||||||
memory: 16Mi
|
|
||||||
limits:
|
|
||||||
cpu: 50m
|
|
||||||
memory: 64Mi
|
|
||||||
@ -1,15 +0,0 @@
|
|||||||
# infrastructure/longhorn/adopt/kustomization.yaml
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- namespace.yaml
|
|
||||||
- longhorn-adopt-rbac.yaml
|
|
||||||
- longhorn-helm-adopt-job.yaml
|
|
||||||
|
|
||||||
configMapGenerator:
|
|
||||||
- name: longhorn-helm-adopt-script
|
|
||||||
namespace: longhorn-system
|
|
||||||
files:
|
|
||||||
- longhorn_helm_adopt.sh=scripts/longhorn_helm_adopt.sh
|
|
||||||
options:
|
|
||||||
disableNameSuffixHash: true
|
|
||||||
@ -1,56 +0,0 @@
|
|||||||
# infrastructure/longhorn/adopt/longhorn-adopt-rbac.yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: longhorn-helm-adopt
|
|
||||||
namespace: longhorn-system
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: longhorn-helm-adopt
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources:
|
|
||||||
- configmaps
|
|
||||||
- services
|
|
||||||
- serviceaccounts
|
|
||||||
- secrets
|
|
||||||
verbs: ["get", "list", "watch", "patch", "update"]
|
|
||||||
- apiGroups: ["apps"]
|
|
||||||
resources:
|
|
||||||
- deployments
|
|
||||||
- daemonsets
|
|
||||||
verbs: ["get", "list", "watch", "patch", "update"]
|
|
||||||
- apiGroups: ["batch"]
|
|
||||||
resources:
|
|
||||||
- jobs
|
|
||||||
verbs: ["get", "list", "watch", "patch", "update"]
|
|
||||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
|
||||||
resources:
|
|
||||||
- roles
|
|
||||||
- rolebindings
|
|
||||||
- clusterroles
|
|
||||||
- clusterrolebindings
|
|
||||||
verbs: ["get", "list", "watch", "patch", "update"]
|
|
||||||
- apiGroups: ["apiextensions.k8s.io"]
|
|
||||||
resources:
|
|
||||||
- customresourcedefinitions
|
|
||||||
verbs: ["get", "list", "watch", "patch", "update"]
|
|
||||||
- apiGroups: ["scheduling.k8s.io"]
|
|
||||||
resources:
|
|
||||||
- priorityclasses
|
|
||||||
verbs: ["get", "list", "watch", "patch", "update"]
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: longhorn-helm-adopt
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: longhorn-helm-adopt
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: longhorn-helm-adopt
|
|
||||||
namespace: longhorn-system
|
|
||||||
@ -1,40 +0,0 @@
|
|||||||
# infrastructure/longhorn/adopt/longhorn-helm-adopt-job.yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: longhorn-helm-adopt-2
|
|
||||||
namespace: longhorn-system
|
|
||||||
spec:
|
|
||||||
backoffLimit: 1
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
serviceAccountName: longhorn-helm-adopt
|
|
||||||
restartPolicy: Never
|
|
||||||
affinity:
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: node-role.kubernetes.io/worker
|
|
||||||
operator: Exists
|
|
||||||
preferredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- weight: 100
|
|
||||||
preference:
|
|
||||||
matchExpressions:
|
|
||||||
- key: kubernetes.io/arch
|
|
||||||
operator: In
|
|
||||||
values: ["arm64"]
|
|
||||||
containers:
|
|
||||||
- name: adopt
|
|
||||||
image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
|
|
||||||
command: ["/usr/bin/env", "bash"]
|
|
||||||
args: ["/scripts/longhorn_helm_adopt.sh"]
|
|
||||||
volumeMounts:
|
|
||||||
- name: script
|
|
||||||
mountPath: /scripts
|
|
||||||
readOnly: true
|
|
||||||
volumes:
|
|
||||||
- name: script
|
|
||||||
configMap:
|
|
||||||
name: longhorn-helm-adopt-script
|
|
||||||
defaultMode: 0555
|
|
||||||
@ -1,5 +0,0 @@
|
|||||||
# infrastructure/longhorn/adopt/namespace.yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: longhorn-system
|
|
||||||
@ -1,52 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
release_name="longhorn"
|
|
||||||
release_namespace="longhorn-system"
|
|
||||||
selector="app.kubernetes.io/instance=${release_name}"
|
|
||||||
|
|
||||||
annotate_and_label() {
|
|
||||||
local scope="$1"
|
|
||||||
local kind="$2"
|
|
||||||
if [ "${scope}" = "namespaced" ]; then
|
|
||||||
kubectl -n "${release_namespace}" annotate "${kind}" -l "${selector}" \
|
|
||||||
meta.helm.sh/release-name="${release_name}" \
|
|
||||||
meta.helm.sh/release-namespace="${release_namespace}" \
|
|
||||||
--overwrite >/dev/null 2>&1 || true
|
|
||||||
kubectl -n "${release_namespace}" label "${kind}" -l "${selector}" \
|
|
||||||
app.kubernetes.io/managed-by=Helm --overwrite >/dev/null 2>&1 || true
|
|
||||||
else
|
|
||||||
kubectl annotate "${kind}" -l "${selector}" \
|
|
||||||
meta.helm.sh/release-name="${release_name}" \
|
|
||||||
meta.helm.sh/release-namespace="${release_namespace}" \
|
|
||||||
--overwrite >/dev/null 2>&1 || true
|
|
||||||
kubectl label "${kind}" -l "${selector}" \
|
|
||||||
app.kubernetes.io/managed-by=Helm --overwrite >/dev/null 2>&1 || true
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
namespaced_kinds=(
|
|
||||||
configmap
|
|
||||||
service
|
|
||||||
serviceaccount
|
|
||||||
deployment
|
|
||||||
daemonset
|
|
||||||
job
|
|
||||||
role
|
|
||||||
rolebinding
|
|
||||||
)
|
|
||||||
|
|
||||||
cluster_kinds=(
|
|
||||||
clusterrole
|
|
||||||
clusterrolebinding
|
|
||||||
customresourcedefinition
|
|
||||||
priorityclass
|
|
||||||
)
|
|
||||||
|
|
||||||
for kind in "${namespaced_kinds[@]}"; do
|
|
||||||
annotate_and_label "namespaced" "${kind}"
|
|
||||||
done
|
|
||||||
|
|
||||||
for kind in "${cluster_kinds[@]}"; do
|
|
||||||
annotate_and_label "cluster" "${kind}"
|
|
||||||
done
|
|
||||||
@ -1,92 +0,0 @@
|
|||||||
# infrastructure/longhorn/core/helmrelease.yaml
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
||||||
kind: HelmRelease
|
|
||||||
metadata:
|
|
||||||
name: longhorn
|
|
||||||
namespace: longhorn-system
|
|
||||||
spec:
|
|
||||||
interval: 30m
|
|
||||||
chart:
|
|
||||||
spec:
|
|
||||||
chart: longhorn
|
|
||||||
version: 1.8.2
|
|
||||||
sourceRef:
|
|
||||||
kind: HelmRepository
|
|
||||||
name: longhorn
|
|
||||||
namespace: flux-system
|
|
||||||
install:
|
|
||||||
crds: Skip
|
|
||||||
remediation: { retries: 3 }
|
|
||||||
timeout: 15m
|
|
||||||
upgrade:
|
|
||||||
crds: Skip
|
|
||||||
remediation:
|
|
||||||
retries: 3
|
|
||||||
remediateLastFailure: true
|
|
||||||
cleanupOnFail: true
|
|
||||||
timeout: 15m
|
|
||||||
values:
|
|
||||||
global:
|
|
||||||
nodeSelector:
|
|
||||||
longhorn-host: "true"
|
|
||||||
service:
|
|
||||||
ui:
|
|
||||||
type: NodePort
|
|
||||||
nodePort: 30824
|
|
||||||
privateRegistry:
|
|
||||||
createSecret: false
|
|
||||||
registrySecret: longhorn-registry
|
|
||||||
image:
|
|
||||||
pullPolicy: Always
|
|
||||||
longhorn:
|
|
||||||
engine:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-engine
|
|
||||||
tag: v1.8.2
|
|
||||||
manager:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-manager
|
|
||||||
tag: v1.8.2
|
|
||||||
ui:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-ui
|
|
||||||
tag: v1.8.2
|
|
||||||
instanceManager:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-instance-manager
|
|
||||||
tag: v1.8.2
|
|
||||||
shareManager:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-share-manager
|
|
||||||
tag: v1.8.2
|
|
||||||
backingImageManager:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-backing-image-manager
|
|
||||||
tag: v1.8.2
|
|
||||||
supportBundleKit:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-support-bundle-kit
|
|
||||||
tag: v0.0.56
|
|
||||||
csi:
|
|
||||||
attacher:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-csi-attacher
|
|
||||||
tag: v4.9.0
|
|
||||||
provisioner:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-csi-provisioner
|
|
||||||
tag: v5.3.0
|
|
||||||
nodeDriverRegistrar:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-csi-node-driver-registrar
|
|
||||||
tag: v2.14.0
|
|
||||||
resizer:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-csi-resizer
|
|
||||||
tag: v1.13.2
|
|
||||||
snapshotter:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-csi-snapshotter
|
|
||||||
tag: v8.2.0
|
|
||||||
livenessProbe:
|
|
||||||
repository: registry.bstein.dev/infra/longhorn-livenessprobe
|
|
||||||
tag: v2.16.0
|
|
||||||
defaultSettings:
|
|
||||||
systemManagedPodsImagePullPolicy: Always
|
|
||||||
longhornManager:
|
|
||||||
nodeSelector:
|
|
||||||
longhorn-host: "true"
|
|
||||||
longhornDriver:
|
|
||||||
nodeSelector:
|
|
||||||
longhorn-host: "true"
|
|
||||||
longhornUI:
|
|
||||||
nodeSelector:
|
|
||||||
longhorn-host: "true"
|
|
||||||
@ -1,22 +0,0 @@
|
|||||||
# infrastructure/longhorn/core/kustomization.yaml
|
|
||||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
|
||||||
kind: Kustomization
|
|
||||||
resources:
|
|
||||||
- namespace.yaml
|
|
||||||
- vault-serviceaccount.yaml
|
|
||||||
- secretproviderclass.yaml
|
|
||||||
- vault-sync-deployment.yaml
|
|
||||||
- helmrelease.yaml
|
|
||||||
- longhorn-settings-ensure-job.yaml
|
|
||||||
- longhorn-disk-tags-ensure-job.yaml
|
|
||||||
|
|
||||||
configMapGenerator:
|
|
||||||
- name: longhorn-settings-ensure-script
|
|
||||||
files:
|
|
||||||
- longhorn_settings_ensure.sh=scripts/longhorn_settings_ensure.sh
|
|
||||||
- name: longhorn-disk-tags-ensure-script
|
|
||||||
files:
|
|
||||||
- longhorn_disk_tags_ensure.py=scripts/longhorn_disk_tags_ensure.py
|
|
||||||
|
|
||||||
generatorOptions:
|
|
||||||
disableNameSuffixHash: true
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user