quality: standardize suite checks and add SonarQube stack

This commit is contained in:
Brad Stein 2026-04-19 14:18:41 -03:00
parent 9a20f4f854
commit 3ccc2a1100
28 changed files with 6110 additions and 5583 deletions

90
Jenkinsfile vendored
View File

@ -7,7 +7,6 @@ pipeline {
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
spec: spec:
serviceAccountName: "jenkins"
nodeSelector: nodeSelector:
hardware: rpi5 hardware: rpi5
kubernetes.io/arch: arm64 kubernetes.io/arch: arm64
@ -24,9 +23,13 @@ spec:
environment { environment {
PIP_DISABLE_PIP_VERSION_CHECK = '1' PIP_DISABLE_PIP_VERSION_CHECK = '1'
PYTHONUNBUFFERED = '1' PYTHONUNBUFFERED = '1'
SUITE_NAME = 'titan-iac' SUITE_NAME = 'titan_iac'
PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091' PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
VM_URL = 'http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428' QUALITY_GATE_SONARQUBE_ENFORCE = '1'
QUALITY_GATE_SONARQUBE_REPORT = 'build/sonarqube-quality-gate.json'
QUALITY_GATE_IRONBANK_ENFORCE = '1'
QUALITY_GATE_IRONBANK_REQUIRED = '0'
QUALITY_GATE_IRONBANK_REPORT = 'build/ironbank-compliance.json'
} }
stages { stages {
stage('Checkout') { stage('Checkout') {
@ -39,6 +42,83 @@ spec:
sh 'pip install --no-cache-dir -r ci/requirements.txt' sh 'pip install --no-cache-dir -r ci/requirements.txt'
} }
} }
stage('Collect SonarQube evidence') {
steps {
sh '''
set -eu
mkdir -p build
python3 - <<'PY'
import base64
import json
import os
import urllib.parse
import urllib.request
host = os.getenv('SONARQUBE_HOST_URL', '').strip().rstrip('/')
project_key = os.getenv('SONARQUBE_PROJECT_KEY', '').strip()
token = os.getenv('SONARQUBE_TOKEN', '').strip()
report_path = os.getenv('QUALITY_GATE_SONARQUBE_REPORT', 'build/sonarqube-quality-gate.json')
payload = {
"status": "ERROR",
"note": "missing SONARQUBE_HOST_URL and/or SONARQUBE_PROJECT_KEY",
}
if host and project_key:
query = urllib.parse.urlencode({"projectKey": project_key})
request = urllib.request.Request(
f"{host}/api/qualitygates/project_status?{query}",
method="GET",
)
if token:
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
request.add_header("Authorization", f"Basic {encoded}")
try:
with urllib.request.urlopen(request, timeout=12) as response:
payload = json.loads(response.read().decode("utf-8"))
except Exception as exc: # noqa: BLE001
payload = {"status": "ERROR", "error": str(exc)}
with open(report_path, "w", encoding="utf-8") as handle:
json.dump(payload, handle, indent=2, sort_keys=True)
handle.write("\\n")
PY
'''
}
}
stage('Collect IronBank evidence') {
steps {
sh '''
set -eu
mkdir -p build
python3 - <<'PY'
import json
import os
from pathlib import Path
report_path = Path(os.getenv('QUALITY_GATE_IRONBANK_REPORT', 'build/ironbank-compliance.json'))
if report_path.exists():
raise SystemExit(0)
status = os.getenv('IRONBANK_COMPLIANCE_STATUS', '').strip()
compliant = os.getenv('IRONBANK_COMPLIANT', '').strip().lower()
payload = {
"status": status or "unknown",
"compliant": compliant in {"1", "true", "yes", "on"} if compliant else None,
}
payload = {k: v for k, v in payload.items() if v is not None}
if "status" not in payload:
payload["status"] = "unknown"
payload["note"] = (
"Set IRONBANK_COMPLIANCE_STATUS/IRONBANK_COMPLIANT "
"or write build/ironbank-compliance.json in image-building repos."
)
report_path.parent.mkdir(parents=True, exist_ok=True)
report_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\\n", encoding="utf-8")
PY
'''
}
}
stage('Run quality gate') { stage('Run quality gate') {
steps { steps {
sh ''' sh '''
@ -76,7 +156,7 @@ spec:
script { script {
env.FLUX_BRANCH = sh( env.FLUX_BRANCH = sh(
returnStdout: true, returnStdout: true,
script: '''awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml''' script: "awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml"
).trim() ).trim()
if (!env.FLUX_BRANCH) { if (!env.FLUX_BRANCH) {
error('Flux branch not found in gotk-sync.yaml') error('Flux branch not found in gotk-sync.yaml')
@ -93,7 +173,6 @@ spec:
} }
} }
steps { steps {
container('jnlp') {
withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) { withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
sh ''' sh '''
set +x set +x
@ -106,7 +185,6 @@ spec:
} }
} }
} }
}
post { post {
always { always {
script { script {

View File

@ -6,7 +6,6 @@ pipeline {
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
spec: spec:
serviceAccountName: "jenkins"
nodeSelector: nodeSelector:
hardware: rpi5 hardware: rpi5
kubernetes.io/arch: arm64 kubernetes.io/arch: arm64
@ -23,9 +22,13 @@ spec:
environment { environment {
PIP_DISABLE_PIP_VERSION_CHECK = '1' PIP_DISABLE_PIP_VERSION_CHECK = '1'
PYTHONUNBUFFERED = '1' PYTHONUNBUFFERED = '1'
SUITE_NAME = 'titan-iac' SUITE_NAME = 'titan_iac'
PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091' PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
VM_URL = 'http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428' QUALITY_GATE_SONARQUBE_ENFORCE = '1'
QUALITY_GATE_SONARQUBE_REPORT = 'build/sonarqube-quality-gate.json'
QUALITY_GATE_IRONBANK_ENFORCE = '1'
QUALITY_GATE_IRONBANK_REQUIRED = '0'
QUALITY_GATE_IRONBANK_REPORT = 'build/ironbank-compliance.json'
} }
stages { stages {
stage('Checkout') { stage('Checkout') {
@ -38,6 +41,83 @@ spec:
sh 'pip install --no-cache-dir -r ci/requirements.txt' sh 'pip install --no-cache-dir -r ci/requirements.txt'
} }
} }
stage('Collect SonarQube evidence') {
steps {
sh '''
set -eu
mkdir -p build
python3 - <<'PY'
import base64
import json
import os
import urllib.parse
import urllib.request
host = os.getenv('SONARQUBE_HOST_URL', '').strip().rstrip('/')
project_key = os.getenv('SONARQUBE_PROJECT_KEY', '').strip()
token = os.getenv('SONARQUBE_TOKEN', '').strip()
report_path = os.getenv('QUALITY_GATE_SONARQUBE_REPORT', 'build/sonarqube-quality-gate.json')
payload = {
"status": "ERROR",
"note": "missing SONARQUBE_HOST_URL and/or SONARQUBE_PROJECT_KEY",
}
if host and project_key:
query = urllib.parse.urlencode({"projectKey": project_key})
request = urllib.request.Request(
f"{host}/api/qualitygates/project_status?{query}",
method="GET",
)
if token:
encoded = base64.b64encode(f"{token}:".encode("utf-8")).decode("utf-8")
request.add_header("Authorization", f"Basic {encoded}")
try:
with urllib.request.urlopen(request, timeout=12) as response:
payload = json.loads(response.read().decode("utf-8"))
except Exception as exc: # noqa: BLE001
payload = {"status": "ERROR", "error": str(exc)}
with open(report_path, "w", encoding="utf-8") as handle:
json.dump(payload, handle, indent=2, sort_keys=True)
handle.write("\\n")
PY
'''
}
}
stage('Collect IronBank evidence') {
steps {
sh '''
set -eu
mkdir -p build
python3 - <<'PY'
import json
import os
from pathlib import Path
report_path = Path(os.getenv('QUALITY_GATE_IRONBANK_REPORT', 'build/ironbank-compliance.json'))
if report_path.exists():
raise SystemExit(0)
status = os.getenv('IRONBANK_COMPLIANCE_STATUS', '').strip()
compliant = os.getenv('IRONBANK_COMPLIANT', '').strip().lower()
payload = {
"status": status or "unknown",
"compliant": compliant in {"1", "true", "yes", "on"} if compliant else None,
}
payload = {k: v for k, v in payload.items() if v is not None}
if "status" not in payload:
payload["status"] = "unknown"
payload["note"] = (
"Set IRONBANK_COMPLIANCE_STATUS/IRONBANK_COMPLIANT "
"or write build/ironbank-compliance.json in image-building repos."
)
report_path.parent.mkdir(parents=True, exist_ok=True)
report_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\\n", encoding="utf-8")
PY
'''
}
}
stage('Run quality gate') { stage('Run quality gate') {
steps { steps {
sh ''' sh '''
@ -75,7 +155,7 @@ spec:
script { script {
env.FLUX_BRANCH = sh( env.FLUX_BRANCH = sh(
returnStdout: true, returnStdout: true,
script: '''awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml''' script: "awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml"
).trim() ).trim()
if (!env.FLUX_BRANCH) { if (!env.FLUX_BRANCH) {
error('Flux branch not found in gotk-sync.yaml') error('Flux branch not found in gotk-sync.yaml')
@ -92,7 +172,6 @@ spec:
} }
} }
steps { steps {
container('jnlp') {
withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) { withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
sh ''' sh '''
set +x set +x
@ -105,7 +184,6 @@ spec:
} }
} }
} }
}
post { post {
always { always {
script { script {

View File

@ -12,20 +12,24 @@ import xml.etree.ElementTree as ET
def _escape_label(value: str) -> str: def _escape_label(value: str) -> str:
"""Escape a Prometheus label value without changing its content."""
return value.replace("\\", "\\\\").replace("\n", "\\n").replace('"', '\\"') return value.replace("\\", "\\\\").replace("\n", "\\n").replace('"', '\\"')
def _label_str(labels: dict[str, str]) -> str: def _label_str(labels: dict[str, str]) -> str:
"""Render a stable Prometheus label set from a mapping."""
parts = [f'{key}="{_escape_label(val)}"' for key, val in labels.items() if val] parts = [f'{key}="{_escape_label(val)}"' for key, val in labels.items() if val]
return "{" + ",".join(parts) + "}" if parts else "" return "{" + ",".join(parts) + "}" if parts else ""
def _read_text(url: str) -> str: def _read_text(url: str) -> str:
"""Fetch a plain-text response body from the given URL."""
with urllib.request.urlopen(url, timeout=10) as response: with urllib.request.urlopen(url, timeout=10) as response:
return response.read().decode("utf-8") return response.read().decode("utf-8")
def _post_text(url: str, payload: str) -> None: def _post_text(url: str, payload: str) -> None:
"""POST a plain-text payload and fail on any 4xx/5xx response."""
request = urllib.request.Request( request = urllib.request.Request(
url, url,
data=payload.encode("utf-8"), data=payload.encode("utf-8"),
@ -38,6 +42,7 @@ def _post_text(url: str, payload: str) -> None:
def _parse_junit(path: str) -> dict[str, int]: def _parse_junit(path: str) -> dict[str, int]:
"""Parse a JUnit XML file into aggregate test counters."""
if not os.path.exists(path): if not os.path.exists(path):
return {"tests": 0, "failures": 0, "errors": 0, "skipped": 0} return {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
@ -64,6 +69,7 @@ def _parse_junit(path: str) -> dict[str, int]:
def _collect_junit_totals(pattern: str) -> dict[str, int]: def _collect_junit_totals(pattern: str) -> dict[str, int]:
"""Sum JUnit counters across every XML file matching the pattern."""
totals = {"tests": 0, "failures": 0, "errors": 0, "skipped": 0} totals = {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
for path in sorted(glob(pattern)): for path in sorted(glob(pattern)):
parsed = _parse_junit(path) parsed = _parse_junit(path)
@ -73,6 +79,7 @@ def _collect_junit_totals(pattern: str) -> dict[str, int]:
def _read_exit_code(path: str) -> int: def _read_exit_code(path: str) -> int:
"""Read the quality-gate exit code, defaulting to failure if missing."""
try: try:
with open(path, "r", encoding="utf-8") as handle: with open(path, "r", encoding="utf-8") as handle:
return int(handle.read().strip()) return int(handle.read().strip())
@ -81,6 +88,7 @@ def _read_exit_code(path: str) -> int:
def _load_summary(path: str) -> dict: def _load_summary(path: str) -> dict:
"""Load the JSON quality-gate summary, returning an empty mapping on error."""
try: try:
with open(path, "r", encoding="utf-8") as handle: with open(path, "r", encoding="utf-8") as handle:
return json.load(handle) return json.load(handle)
@ -88,40 +96,26 @@ def _load_summary(path: str) -> dict:
return {} return {}
def _summary_coverage_percent(summary: dict | None) -> float: def _summary_float(summary: dict, key: str) -> float:
if not isinstance(summary, dict): """Extract a float-like value from the summary, defaulting to 0.0."""
return 0.0 value = summary.get(key)
results = summary.get("results", []) if isinstance(value, (int, float)):
if not isinstance(results, list): return float(value)
return 0.0
for result in results:
if not isinstance(result, dict):
continue
if result.get("name") != "coverage":
continue
return 95.0 if result.get("status") == "ok" else 0.0
return 0.0 return 0.0
def _summary_source_lines_over_500(summary: dict | None) -> int: def _summary_int(summary: dict, key: str) -> int:
if not isinstance(summary, dict): """Extract an int-like value from the summary, defaulting to 0."""
return 0 value = summary.get(key)
results = summary.get("results", []) if isinstance(value, int):
if not isinstance(results, list): return value
return 0 if isinstance(value, float):
for result in results: return int(value)
if not isinstance(result, dict):
continue
if result.get("name") != "hygiene":
continue
issues = result.get("issues", [])
if not isinstance(issues, list):
return 0
return sum(1 for issue in issues if isinstance(issue, str) and "500" in issue)
return 0 return 0
def _fetch_existing_counter(pushgateway_url: str, metric: str, labels: dict[str, str]) -> float: def _fetch_existing_counter(pushgateway_url: str, metric: str, labels: dict[str, str]) -> float:
"""Return the current counter value for a labeled metric if present."""
text = _read_text(f"{pushgateway_url.rstrip('/')}/metrics") text = _read_text(f"{pushgateway_url.rstrip('/')}/metrics")
for line in text.splitlines(): for line in text.splitlines():
if not line.startswith(metric + "{"): if not line.startswith(metric + "{"):
@ -146,10 +140,11 @@ def _build_payload(
failed_count: int, failed_count: int,
branch: str, branch: str,
build_number: str, build_number: str,
workspace_coverage_percent: float,
source_lines_over_500: int,
summary: dict | None = None, summary: dict | None = None,
workspace_line_coverage_percent: float = 0.0,
source_lines_over_500: int = 0,
) -> str: ) -> str:
"""Build the Pushgateway payload for the current suite run."""
passed = max(tests["tests"] - tests["failures"] - tests["errors"] - tests["skipped"], 0) passed = max(tests["tests"] - tests["failures"] - tests["errors"] - tests["skipped"], 0)
build_labels = _label_str( build_labels = _label_str(
{ {
@ -173,9 +168,9 @@ def _build_payload(
"# TYPE titan_iac_quality_gate_build_info gauge", "# TYPE titan_iac_quality_gate_build_info gauge",
f"titan_iac_quality_gate_build_info{build_labels} 1", f"titan_iac_quality_gate_build_info{build_labels} 1",
"# TYPE platform_quality_gate_workspace_line_coverage_percent gauge", "# TYPE platform_quality_gate_workspace_line_coverage_percent gauge",
f'platform_quality_gate_workspace_line_coverage_percent{{suite="{suite}"}} {workspace_coverage_percent:.3f}', f'platform_quality_gate_workspace_line_coverage_percent{{suite="{suite}"}} {workspace_line_coverage_percent:.3f}',
"# TYPE platform_quality_gate_source_lines_over_500_total gauge", "# TYPE platform_quality_gate_source_lines_over_500_total gauge",
f'platform_quality_gate_source_lines_over_500_total{{suite="{suite}"}} {max(source_lines_over_500, 0)}', f'platform_quality_gate_source_lines_over_500_total{{suite="{suite}"}} {source_lines_over_500}',
] ]
results = summary.get("results", []) if isinstance(summary, dict) else [] results = summary.get("results", []) if isinstance(summary, dict) else []
if results: if results:
@ -192,7 +187,8 @@ def _build_payload(
def main() -> int: def main() -> int:
suite = os.getenv("SUITE_NAME", "titan-iac") """Publish the quality-gate metrics and print a compact run summary."""
suite = os.getenv("SUITE_NAME", "titan_iac")
pushgateway_url = os.getenv("PUSHGATEWAY_URL", "http://platform-quality-gateway.monitoring.svc.cluster.local:9091") pushgateway_url = os.getenv("PUSHGATEWAY_URL", "http://platform-quality-gateway.monitoring.svc.cluster.local:9091")
job_name = os.getenv("QUALITY_GATE_JOB_NAME", "platform-quality-ci") job_name = os.getenv("QUALITY_GATE_JOB_NAME", "platform-quality-ci")
junit_glob = os.getenv("JUNIT_GLOB", os.getenv("JUNIT_PATH", "build/junit-*.xml")) junit_glob = os.getenv("JUNIT_GLOB", os.getenv("JUNIT_PATH", "build/junit-*.xml"))
@ -205,8 +201,8 @@ def main() -> int:
exit_code = _read_exit_code(exit_code_path) exit_code = _read_exit_code(exit_code_path)
status = "ok" if exit_code == 0 else "failed" status = "ok" if exit_code == 0 else "failed"
summary = _load_summary(summary_path) summary = _load_summary(summary_path)
workspace_coverage_percent = _summary_coverage_percent(summary) workspace_line_coverage_percent = _summary_float(summary, "workspace_line_coverage_percent")
source_lines_over_500 = _summary_source_lines_over_500(summary) source_lines_over_500 = _summary_int(summary, "source_lines_over_500")
ok_count = int( ok_count = int(
_fetch_existing_counter( _fetch_existing_counter(
@ -235,9 +231,9 @@ def main() -> int:
failed_count=failed_count, failed_count=failed_count,
branch=branch, branch=branch,
build_number=build_number, build_number=build_number,
workspace_coverage_percent=workspace_coverage_percent,
source_lines_over_500=source_lines_over_500,
summary=summary, summary=summary,
workspace_line_coverage_percent=workspace_line_coverage_percent,
source_lines_over_500=source_lines_over_500,
) )
push_url = f"{pushgateway_url.rstrip('/')}/metrics/job/{job_name}/suite/{suite}" push_url = f"{pushgateway_url.rstrip('/')}/metrics/job/{job_name}/suite/{suite}"
_post_text(push_url, payload) _post_text(push_url, payload)
@ -249,11 +245,11 @@ def main() -> int:
"tests_failed": tests["failures"], "tests_failed": tests["failures"],
"tests_error": tests["errors"], "tests_error": tests["errors"],
"tests_skipped": tests["skipped"], "tests_skipped": tests["skipped"],
"workspace_coverage_percent": round(workspace_coverage_percent, 3),
"source_lines_over_500": source_lines_over_500,
"ok_count": ok_count, "ok_count": ok_count,
"failed_count": failed_count, "failed_count": failed_count,
"checks_recorded": len(summary.get("results", [])) if isinstance(summary, dict) else 0, "checks_recorded": len(summary.get("results", [])) if isinstance(summary, dict) else 0,
"workspace_line_coverage_percent": workspace_line_coverage_percent,
"source_lines_over_500": source_lines_over_500,
} }
print(json.dumps(summary, sort_keys=True)) print(json.dumps(summary, sort_keys=True))
return 0 return 0

View File

@ -21,6 +21,7 @@ resources:
- sui-metrics/kustomization.yaml - sui-metrics/kustomization.yaml
- openldap/kustomization.yaml - openldap/kustomization.yaml
- keycloak/kustomization.yaml - keycloak/kustomization.yaml
- quality/kustomization.yaml
- oauth2-proxy/kustomization.yaml - oauth2-proxy/kustomization.yaml
- mailu/kustomization.yaml - mailu/kustomization.yaml
- jenkins/kustomization.yaml - jenkins/kustomization.yaml

View File

@ -0,0 +1,35 @@
# clusters/atlas/flux-system/applications/quality/kustomization.yaml
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: quality
namespace: flux-system
spec:
interval: 10m
path: ./services/quality
prune: true
sourceRef:
kind: GitRepository
name: flux-system
targetNamespace: quality
dependsOn:
- name: traefik
- name: cert-manager
- name: keycloak
- name: vault
- name: postgres
healthChecks:
- apiVersion: apps/v1
kind: Deployment
name: sonarqube
namespace: quality
- apiVersion: apps/v1
kind: Deployment
name: sonarqube-exporter
namespace: quality
- apiVersion: apps/v1
kind: Deployment
name: oauth2-proxy-sonarqube
namespace: quality
wait: false
timeout: 20m

File diff suppressed because it is too large Load Diff

View File

@ -203,6 +203,32 @@ data:
} }
} }
} }
pipelineJob('arcanagon') {
properties {
pipelineTriggers {
triggers {
scmTrigger {
scmpoll_spec('H/5 * * * *')
ignorePostCommitHooks(false)
}
}
}
}
definition {
cpsScm {
scm {
git {
remote {
url('https://scm.bstein.dev/bstein/arcanagon.git')
credentials('gitea-pat')
}
branches('*/master')
}
}
scriptPath('Jenkinsfile')
}
}
}
pipelineJob('pegasus') { pipelineJob('pegasus') {
properties { properties {
pipelineTriggers { pipelineTriggers {
@ -425,8 +451,10 @@ data:
- name: "default" - name: "default"
namespace: "jenkins" namespace: "jenkins"
workspaceVolume: workspaceVolume:
emptyDirWorkspaceVolume: dynamicPVC:
memory: false accessModes: "ReadWriteOnce"
requestsSize: "20Gi"
storageClassName: "astreae"
containers: containers:
- name: "jnlp" - name: "jnlp"
args: "^${computer.jnlpmac} ^${computer.name}" args: "^${computer.jnlpmac} ^${computer.name}"

View File

@ -24,6 +24,7 @@ resources:
- oneoffs/logs-oidc-secret-ensure-job.yaml - oneoffs/logs-oidc-secret-ensure-job.yaml
- oneoffs/metis-oidc-secret-ensure-job.yaml - oneoffs/metis-oidc-secret-ensure-job.yaml
- oneoffs/soteria-oidc-secret-ensure-job.yaml - oneoffs/soteria-oidc-secret-ensure-job.yaml
- oneoffs/quality-oidc-secret-ensure-job.yaml
- oneoffs/metis-ssh-keys-secret-ensure-job.yaml - oneoffs/metis-ssh-keys-secret-ensure-job.yaml
- oneoffs/harbor-oidc-secret-ensure-job.yaml - oneoffs/harbor-oidc-secret-ensure-job.yaml
- oneoffs/vault-oidc-secret-ensure-job.yaml - oneoffs/vault-oidc-secret-ensure-job.yaml

View File

@ -0,0 +1,198 @@
# services/keycloak/oneoffs/quality-oidc-secret-ensure-job.yaml
# One-off job for sso/quality-oidc-secret-ensure-1.
# Purpose: ensure the SonarQube oauth2-proxy OIDC client and Vault secret exist.
# Keep this completed Job around; bump the suffix if it ever needs to be rerun.
apiVersion: batch/v1
kind: Job
metadata:
name: quality-oidc-secret-ensure-1
namespace: sso
spec:
backoffLimit: 0
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: "sso-secrets"
vault.hashicorp.com/agent-inject-secret-keycloak-admin-env.sh: "kv/data/atlas/shared/keycloak-admin"
vault.hashicorp.com/agent-inject-template-keycloak-admin-env.sh: |
{{ with secret "kv/data/atlas/shared/keycloak-admin" }}
export KEYCLOAK_ADMIN="{{ .Data.data.username }}"
export KEYCLOAK_ADMIN_USER="{{ .Data.data.username }}"
export KEYCLOAK_ADMIN_PASSWORD="{{ .Data.data.password }}"
{{ end }}
spec:
serviceAccountName: mas-secrets-ensure
restartPolicy: Never
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/worker
operator: Exists
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: kubernetes.io/arch
operator: In
values: ["arm64"]
containers:
- name: apply
image: bitnami/kubectl@sha256:554ab88b1858e8424c55de37ad417b16f2a0e65d1607aa0f3fe3ce9b9f10b131
command: ["/bin/sh", "-c"]
args:
- |
set -euo pipefail
. /vault/secrets/keycloak-admin-env.sh
KC_URL="http://keycloak.sso.svc.cluster.local"
ACCESS_TOKEN=""
for attempt in 1 2 3 4 5; do
TOKEN_JSON="$(curl -sS -X POST "$KC_URL/realms/master/protocol/openid-connect/token" \
-H 'Content-Type: application/x-www-form-urlencoded' \
-d "grant_type=password" \
-d "client_id=admin-cli" \
-d "username=${KEYCLOAK_ADMIN}" \
-d "password=${KEYCLOAK_ADMIN_PASSWORD}" || true)"
ACCESS_TOKEN="$(echo "$TOKEN_JSON" | jq -r '.access_token' 2>/dev/null || true)"
if [ -n "$ACCESS_TOKEN" ] && [ "$ACCESS_TOKEN" != "null" ]; then
break
fi
echo "Keycloak token request failed (attempt ${attempt})" >&2
sleep $((attempt * 2))
done
if [ -z "$ACCESS_TOKEN" ] || [ "$ACCESS_TOKEN" = "null" ]; then
echo "Failed to fetch Keycloak admin token" >&2
exit 1
fi
CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients?clientId=sonarqube" || true)"
CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)"
if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then
create_payload='{"clientId":"sonarqube","enabled":true,"protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://quality.bstein.dev/oauth2/callback"],"webOrigins":["https://quality.bstein.dev"],"rootUrl":"https://quality.bstein.dev","baseUrl":"/"}'
status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
-H 'Content-Type: application/json' \
-d "${create_payload}" \
"$KC_URL/admin/realms/atlas/clients")"
if [ "$status" != "201" ] && [ "$status" != "204" ] && [ "$status" != "409" ]; then
echo "Keycloak client create failed (status ${status})" >&2
exit 1
fi
CLIENT_QUERY="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients?clientId=sonarqube" || true)"
CLIENT_ID="$(echo "$CLIENT_QUERY" | jq -r '.[0].id' 2>/dev/null || true)"
fi
if [ -z "$CLIENT_ID" ] || [ "$CLIENT_ID" = "null" ]; then
echo "Keycloak client sonarqube not found" >&2
exit 1
fi
SCOPE_ID="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/client-scopes?search=groups" | jq -r '.[] | select(.name=="groups") | .id' 2>/dev/null | head -n1 || true)"
if [ -z "$SCOPE_ID" ] || [ "$SCOPE_ID" = "null" ]; then
echo "Keycloak client scope groups not found" >&2
exit 1
fi
DEFAULT_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/default-client-scopes" || true)"
OPTIONAL_SCOPES="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes" || true)"
if ! echo "$DEFAULT_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1 \
&& ! echo "$OPTIONAL_SCOPES" | jq -e '.[] | select(.name=="groups")' >/dev/null 2>&1; then
status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")"
if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then
status="$(curl -sS -o /dev/null -w "%{http_code}" -X POST \
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/optional-client-scopes/${SCOPE_ID}")"
if [ "$status" != "200" ] && [ "$status" != "201" ] && [ "$status" != "204" ]; then
echo "Failed to attach groups client scope to sonarqube (status ${status})" >&2
exit 1
fi
fi
fi
update_payload='{"enabled":true,"clientId":"sonarqube","protocol":"openid-connect","publicClient":false,"standardFlowEnabled":true,"implicitFlowEnabled":false,"directAccessGrantsEnabled":false,"serviceAccountsEnabled":false,"redirectUris":["https://quality.bstein.dev/oauth2/callback"],"webOrigins":["https://quality.bstein.dev"],"rootUrl":"https://quality.bstein.dev","baseUrl":"/"}'
status="$(curl -sS -o /dev/null -w "%{http_code}" -X PUT \
-H "Authorization: Bearer ${ACCESS_TOKEN}" \
-H 'Content-Type: application/json' \
-d "${update_payload}" \
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}")"
if [ "$status" != "204" ]; then
echo "Keycloak client update failed (status ${status})" >&2
exit 1
fi
CLIENT_SECRET="$(curl -sS -H "Authorization: Bearer ${ACCESS_TOKEN}" \
"$KC_URL/admin/realms/atlas/clients/${CLIENT_ID}/client-secret" | jq -r '.value' 2>/dev/null || true)"
if [ -z "$CLIENT_SECRET" ] || [ "$CLIENT_SECRET" = "null" ]; then
echo "Keycloak client secret not found" >&2
exit 1
fi
vault_addr="${VAULT_ADDR:-http://vault.vault.svc.cluster.local:8200}"
vault_role="${VAULT_ROLE:-sso-secrets}"
jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
login_payload="$(jq -nc --arg jwt "${jwt}" --arg role "${vault_role}" '{jwt:$jwt, role:$role}')"
vault_token="$(curl -sS --request POST --data "${login_payload}" \
"${vault_addr}/v1/auth/kubernetes/login" | jq -r '.auth.client_token')"
if [ -z "${vault_token}" ] || [ "${vault_token}" = "null" ]; then
echo "vault login failed" >&2
exit 1
fi
read_status="$(curl -sS -o /tmp/sonarqube-oidc-read.json -w "%{http_code}" \
-H "X-Vault-Token: ${vault_token}" \
"${vault_addr}/v1/kv/data/atlas/quality/sonarqube-oidc" || true)"
COOKIE_SECRET=""
if [ "${read_status}" = "200" ]; then
COOKIE_SECRET="$(jq -r '.data.data.cookie_secret // empty' /tmp/sonarqube-oidc-read.json)"
elif [ "${read_status}" != "404" ]; then
echo "Vault read failed (status ${read_status})" >&2
cat /tmp/sonarqube-oidc-read.json >&2 || true
exit 1
fi
if [ -n "${COOKIE_SECRET}" ]; then
length="$(printf '%s' "${COOKIE_SECRET}" | wc -c | tr -d ' ')"
if [ "${length}" != "16" ] && [ "${length}" != "24" ] && [ "${length}" != "32" ]; then
COOKIE_SECRET=""
fi
fi
if [ -z "${COOKIE_SECRET}" ]; then
COOKIE_SECRET="$(openssl rand -hex 16 | tr -d '\n')"
fi
payload="$(jq -nc \
--arg client_id "sonarqube" \
--arg client_secret "${CLIENT_SECRET}" \
--arg cookie_secret "${COOKIE_SECRET}" \
'{data:{client_id:$client_id,client_secret:$client_secret,cookie_secret:$cookie_secret}}')"
write_status="$(curl -sS -o /tmp/sonarqube-oidc-write.json -w "%{http_code}" -X POST \
-H "X-Vault-Token: ${vault_token}" \
-H 'Content-Type: application/json' \
-d "${payload}" "${vault_addr}/v1/kv/data/atlas/quality/sonarqube-oidc")"
if [ "${write_status}" != "200" ] && [ "${write_status}" != "204" ]; then
echo "Vault write failed (status ${write_status})" >&2
cat /tmp/sonarqube-oidc-write.json >&2 || true
exit 1
fi
verify_status="$(curl -sS -o /tmp/sonarqube-oidc-verify.json -w "%{http_code}" \
-H "X-Vault-Token: ${vault_token}" \
"${vault_addr}/v1/kv/data/atlas/quality/sonarqube-oidc" || true)"
if [ "${verify_status}" != "200" ]; then
echo "Vault verify failed (status ${verify_status})" >&2
cat /tmp/sonarqube-oidc-verify.json >&2 || true
exit 1
fi
echo "SonarQube OIDC secret ready in Vault"

View File

@ -32,13 +32,14 @@ spec:
} }
} }
environment { environment {
SUITE_NAME = 'data-prepper' SUITE_NAME = 'data_prepper'
PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091' PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
QUALITY_GATE_SONARQUBE_REPORT = 'build/sonarqube-quality-gate.json'
QUALITY_GATE_IRONBANK_REPORT = 'build/ironbank-compliance.json'
} }
parameters { parameters {
string(name: 'HARBOR_REPO', defaultValue: 'registry.bstein.dev/monitoring/data-prepper', description: 'Docker repository for Data Prepper') string(name: 'HARBOR_REPO', defaultValue: 'registry.bstein.dev/streaming/data-prepper', description: 'Docker repository for Data Prepper')
string(name: 'IMAGE_TAG', defaultValue: '2.8.0', description: 'Image tag to publish') string(name: 'IMAGE_TAG', defaultValue: '2.8.0', description: 'Image tag to publish')
booleanParam(name: 'PUSH_IMAGE', defaultValue: false, description: 'Publish image artifacts (manual release only)')
booleanParam(name: 'PUSH_LATEST', defaultValue: true, description: 'Also push the latest tag') booleanParam(name: 'PUSH_LATEST', defaultValue: true, description: 'Also push the latest tag')
} }
stages { stages {
@ -49,19 +50,88 @@ spec:
} }
} }
} }
stage('Build & Push (optional)') { stage('Collect quality evidence') {
when { steps {
expression { return params.PUSH_IMAGE } container('git') {
sh '''
set -euo pipefail
apk add --no-cache curl jq >/dev/null 2>&1 || true
mkdir -p build
sonar_report="${QUALITY_GATE_SONARQUBE_REPORT:-build/sonarqube-quality-gate.json}"
if [ ! -f "${sonar_report}" ]; then
if [ -n "${SONARQUBE_HOST_URL:-}" ] && [ -n "${SONARQUBE_PROJECT_KEY:-}" ]; then
host="${SONARQUBE_HOST_URL%/}"
query="$(printf '%s' "${SONARQUBE_PROJECT_KEY}" | sed 's/ /%20/g')"
sonar_ok=0
if [ -n "${SONARQUBE_TOKEN:-}" ]; then
auth="$(printf '%s:' "${SONARQUBE_TOKEN}" | base64 | tr -d '\\n')"
if curl -fsS -H "Authorization: Basic ${auth}" "${host}/api/qualitygates/project_status?projectKey=${query}" > "${sonar_report}"; then
sonar_ok=1
fi
else
if curl -fsS "${host}/api/qualitygates/project_status?projectKey=${query}" > "${sonar_report}"; then
sonar_ok=1
fi
fi
if [ "${sonar_ok}" -ne 1 ]; then
cat > "${sonar_report}" <<EOF
{
"status": "ERROR",
"error": "sonarqube query failed"
} }
EOF
fi
else
cat > "${sonar_report}" <<EOF
{
"status": "ERROR",
"note": "missing SONARQUBE_HOST_URL and/or SONARQUBE_PROJECT_KEY"
}
EOF
fi
fi
ironbank_report="${QUALITY_GATE_IRONBANK_REPORT:-build/ironbank-compliance.json}"
if [ ! -f "${ironbank_report}" ]; then
status="${IRONBANK_COMPLIANCE_STATUS:-unknown}"
compliant="${IRONBANK_COMPLIANT:-}"
if [ -n "${compliant}" ]; then
compliant_lc="$(printf '%s' "${compliant}" | tr '[:upper:]' '[:lower:]')"
compliant_json="null"
case "${compliant_lc}" in
1|true|yes|on) compliant_json="true" ;;
0|false|no|off) compliant_json="false" ;;
esac
cat > "${ironbank_report}" <<EOF
{
"status": "${status}",
"compliant": ${compliant_json},
"note": "Set IRONBANK_COMPLIANCE_STATUS/IRONBANK_COMPLIANT or write build/ironbank-compliance.json in image-building repos."
}
EOF
else
cat > "${ironbank_report}" <<EOF
{
"status": "${status}",
"note": "Set IRONBANK_COMPLIANCE_STATUS/IRONBANK_COMPLIANT or write build/ironbank-compliance.json in image-building repos."
}
EOF
fi
fi
'''
}
}
}
stage('Build & Push') {
steps { steps {
container('kaniko') { container('kaniko') {
withCredentials([usernamePassword(credentialsId: 'harbor-robot', usernameVariable: 'HARBOR_USERNAME', passwordVariable: 'HARBOR_PASSWORD')]) { withCredentials([usernamePassword(credentialsId: 'harbor-robot', usernameVariable: 'HARBOR_USERNAME', passwordVariable: 'HARBOR_PASSWORD')]) {
sh ''' sh '''
set -euo pipefail set -euo pipefail
if [ -z "${HARBOR_REPO:-}" ]; then if [ -z "${HARBOR_REPO:-}" ] || [ "${HARBOR_REPO}" = "registry.bstein.dev/monitoring/data-prepper" ]; then
HARBOR_REPO="registry.bstein.dev/monitoring/data-prepper" HARBOR_REPO="registry.bstein.dev/streaming/data-prepper"
fi fi
IMAGE_TAG_SAFE="${IMAGE_TAG:-2.8.0}"
mkdir -p /kaniko/.docker mkdir -p /kaniko/.docker
ref_host="$(echo "${HARBOR_REPO}" | cut -d/ -f1)" ref_host="$(echo "${HARBOR_REPO}" | cut -d/ -f1)"
auth="$(printf "%s:%s" "${HARBOR_USERNAME}" "${HARBOR_PASSWORD}" | base64 | tr -d '\\n')" auth="$(printf "%s:%s" "${HARBOR_USERNAME}" "${HARBOR_PASSWORD}" | base64 | tr -d '\\n')"
@ -74,8 +144,8 @@ spec:
} }
} }
EOF EOF
dest_args="--destination ${HARBOR_REPO}:${IMAGE_TAG_SAFE}" dest_args="--destination ${HARBOR_REPO}:${IMAGE_TAG}"
if [ "${PUSH_LATEST:-true}" = "true" ]; then if [ "${PUSH_LATEST}" = "true" ]; then
dest_args="${dest_args} --destination ${HARBOR_REPO}:latest" dest_args="${dest_args} --destination ${HARBOR_REPO}:latest"
fi fi
/kaniko/executor \ /kaniko/executor \
@ -88,32 +158,22 @@ EOF
} }
} }
} }
stage('Smoke test suite') {
steps {
container('kaniko') {
sh '''
set -euo pipefail
/kaniko/executor \
--context "${WORKSPACE}" \
--dockerfile "${WORKSPACE}/dockerfiles/Dockerfile.data-prepper" \
--verbosity info \
--no-push
'''
}
}
}
} }
post { post {
success { always {
script {
env.QUALITY_OUTCOME = currentBuild.currentResult == 'SUCCESS' ? 'ok' : 'failed'
}
container('git') { container('git') {
sh ''' sh '''
set -euo pipefail set -euo pipefail
apk add --no-cache curl >/dev/null 2>&1 || true apk add --no-cache curl jq >/dev/null 2>&1 || true
suite="${SUITE_NAME}" suite="${SUITE_NAME}"
gateway="${PUSHGATEWAY_URL}" gateway="${PUSHGATEWAY_URL}"
status="${QUALITY_OUTCOME:-failed}"
fetch_counter() { fetch_counter() {
status="$1" status_name="$1"
line="$(curl -fsS "${gateway}/metrics" 2>/dev/null | awk -v suite="${suite}" -v status="${status}" ' line="$(curl -fsS "${gateway}/metrics" 2>/dev/null | awk -v suite="${suite}" -v status="${status_name}" '
/platform_quality_gate_runs_total/ { /platform_quality_gate_runs_total/ {
if (index($0, "job=\\"platform-quality-ci\\"") && index($0, "suite=\\"" suite "\\"") && index($0, "status=\\"" status "\\"")) { if (index($0, "job=\\"platform-quality-ci\\"") && index($0, "suite=\\"" suite "\\"") && index($0, "status=\\"" status "\\"")) {
print $2 print $2
@ -125,71 +185,66 @@ EOF
} }
ok_count="$(fetch_counter ok)" ok_count="$(fetch_counter ok)"
failed_count="$(fetch_counter failed)" failed_count="$(fetch_counter failed)"
if [ "${status}" = "ok" ]; then
ok_count=$((ok_count + 1)) ok_count=$((ok_count + 1))
tests_passed=1 else
tests_failed=0
cat <<METRICS | curl -fsS --data-binary @- "${gateway}/metrics/job/platform-quality-ci/suite/${suite}" >/dev/null
# TYPE platform_quality_gate_runs_total counter
platform_quality_gate_runs_total{suite="${suite}",status="ok"} ${ok_count}
platform_quality_gate_runs_total{suite="${suite}",status="failed"} ${failed_count}
# TYPE data_prepper_quality_gate_tests_total gauge
data_prepper_quality_gate_tests_total{suite="${suite}",result="passed"} ${tests_passed}
data_prepper_quality_gate_tests_total{suite="${suite}",result="failed"} ${tests_failed}
data_prepper_quality_gate_tests_total{suite="${suite}",result="error"} 0
data_prepper_quality_gate_tests_total{suite="${suite}",result="skipped"} 0
# TYPE platform_quality_gate_workspace_line_coverage_percent gauge
platform_quality_gate_workspace_line_coverage_percent{suite="${suite}"} 100
# TYPE platform_quality_gate_source_lines_over_500_total gauge
platform_quality_gate_source_lines_over_500_total{suite="${suite}"} 0
# TYPE data_prepper_quality_gate_checks_total gauge
data_prepper_quality_gate_checks_total{suite="${suite}",check="build",result="ok"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="coverage",result="ok"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="loc",result="ok"} 1
METRICS
'''
}
}
failure {
container('git') {
sh '''
set -euo pipefail
apk add --no-cache curl >/dev/null 2>&1 || true
suite="${SUITE_NAME}"
gateway="${PUSHGATEWAY_URL}"
fetch_counter() {
status="$1"
line="$(curl -fsS "${gateway}/metrics" 2>/dev/null | awk -v suite="${suite}" -v status="${status}" '
/platform_quality_gate_runs_total/ {
if (index($0, "job=\\"platform-quality-ci\\"") && index($0, "suite=\\"" suite "\\"") && index($0, "status=\\"" status "\\"")) {
print $2
exit
}
}
' || true)"
[ -n "${line}" ] && printf '%s\n' "${line}" || printf '0\n'
}
ok_count="$(fetch_counter ok)"
failed_count="$(fetch_counter failed)"
failed_count=$((failed_count + 1)) failed_count=$((failed_count + 1))
tests_passed=0 fi
tests_failed=1 sonarqube_check="not_applicable"
cat <<METRICS | curl -fsS --data-binary @- "${gateway}/metrics/job/platform-quality-ci/suite/${suite}" >/dev/null if [ -f build/sonarqube-quality-gate.json ]; then
sonar_status="$(jq -r '.status // .projectStatus.status // .qualityGate.status // empty' build/sonarqube-quality-gate.json 2>/dev/null | tr '[:upper:]' '[:lower:]')"
if [ -n "${sonar_status}" ]; then
case "${sonar_status}" in
ok|pass|passed|success) sonarqube_check="ok" ;;
*) sonarqube_check="failed" ;;
esac
else
sonarqube_check="failed"
fi
fi
supply_chain_check="not_applicable"
if [ -f build/ironbank-compliance.json ]; then
compliant="$(jq -r '.compliant // empty' build/ironbank-compliance.json 2>/dev/null)"
if [ "${compliant}" = "true" ]; then
supply_chain_check="ok"
elif [ "${compliant}" = "false" ]; then
supply_chain_check="failed"
else
ironbank_status="$(jq -r '.status // .result // .compliance // empty' build/ironbank-compliance.json 2>/dev/null | tr '[:upper:]' '[:lower:]')"
case "${ironbank_status}" in
ok|pass|passed|success|compliant) supply_chain_check="ok" ;;
"") supply_chain_check="failed" ;;
*) supply_chain_check="failed" ;;
esac
fi
fi
gate_glue_check="ok"
if [ "${status}" != "ok" ]; then
gate_glue_check="failed"
fi
if ! cat <<METRICS | curl -fsS --data-binary @- "${gateway}/metrics/job/platform-quality-ci/suite/${suite}" >/dev/null; then
echo "warning: metrics push failed for suite=${suite}" >&2
fi
# TYPE platform_quality_gate_runs_total counter # TYPE platform_quality_gate_runs_total counter
platform_quality_gate_runs_total{suite="${suite}",status="ok"} ${ok_count} platform_quality_gate_runs_total{suite="${suite}",status="ok"} ${ok_count}
platform_quality_gate_runs_total{suite="${suite}",status="failed"} ${failed_count} platform_quality_gate_runs_total{suite="${suite}",status="failed"} ${failed_count}
# TYPE data_prepper_quality_gate_tests_total gauge # TYPE data_prepper_quality_gate_tests_total gauge
data_prepper_quality_gate_tests_total{suite="${suite}",result="passed"} ${tests_passed} data_prepper_quality_gate_tests_total{suite="${suite}",result="passed"} 0
data_prepper_quality_gate_tests_total{suite="${suite}",result="failed"} ${tests_failed} data_prepper_quality_gate_tests_total{suite="${suite}",result="failed"} 0
data_prepper_quality_gate_tests_total{suite="${suite}",result="error"} 0 data_prepper_quality_gate_tests_total{suite="${suite}",result="error"} 0
data_prepper_quality_gate_tests_total{suite="${suite}",result="skipped"} 0 data_prepper_quality_gate_tests_total{suite="${suite}",result="skipped"} 0
# TYPE platform_quality_gate_workspace_line_coverage_percent gauge # TYPE platform_quality_gate_workspace_line_coverage_percent gauge
platform_quality_gate_workspace_line_coverage_percent{suite="${suite}"} 0 platform_quality_gate_workspace_line_coverage_percent{suite="${suite}"} 0
# TYPE platform_quality_gate_source_lines_over_500_total gauge # TYPE platform_quality_gate_source_lines_over_500_total gauge
platform_quality_gate_source_lines_over_500_total{suite="${suite}"} 1 platform_quality_gate_source_lines_over_500_total{suite="${suite}"} 0
# TYPE data_prepper_quality_gate_checks_total gauge # TYPE data_prepper_quality_gate_checks_total gauge
data_prepper_quality_gate_checks_total{suite="${suite}",check="build",result="failed"} 1 data_prepper_quality_gate_checks_total{suite="${suite}",check="tests",result="not_applicable"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="coverage",result="failed"} 1 data_prepper_quality_gate_checks_total{suite="${suite}",check="coverage",result="not_applicable"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="loc",result="failed"} 1 data_prepper_quality_gate_checks_total{suite="${suite}",check="loc",result="not_applicable"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="docs_naming",result="not_applicable"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="gate_glue",result="${gate_glue_check}"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="sonarqube",result="${sonarqube_check}"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="supply_chain",result="${supply_chain_check}"} 1
METRICS METRICS
''' '''
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -74,7 +74,7 @@ failures=0
check_http_suite "atlasbot" "http://atlasbot.comms.svc.cluster.local:8090/health" "200" '"status": "ok"' || failures=$((failures + 1)) check_http_suite "atlasbot" "http://atlasbot.comms.svc.cluster.local:8090/health" "200" '"status": "ok"' || failures=$((failures + 1))
check_http_suite "pegasus" "http://pegasus.jellyfin.svc.cluster.local/healthz" "200" || failures=$((failures + 1)) check_http_suite "pegasus" "http://pegasus.jellyfin.svc.cluster.local/healthz" "200" || failures=$((failures + 1))
check_http_suite "bstein-home" "http://bstein-dev-home-backend.bstein-dev-home.svc.cluster.local/api/healthz" "200" || failures=$((failures + 1)) check_http_suite "bstein_home" "http://bstein-dev-home-backend.bstein-dev-home.svc.cluster.local/api/healthz" "200" || failures=$((failures + 1))
if [ "${failures}" -gt 0 ]; then if [ "${failures}" -gt 0 ]; then
printf '[probe] completed with %s suite failure(s)\n' "${failures}" >&2 printf '[probe] completed with %s suite failure(s)\n' "${failures}" >&2

View File

@ -0,0 +1,16 @@
# services/quality/kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- sonarqube-serviceaccount.yaml
- quality-vault-serviceaccount.yaml
- sonarqube-pvc.yaml
- sonarqube-service.yaml
- sonarqube-deployment.yaml
- sonarqube-exporter-configmap.yaml
- sonarqube-exporter-service.yaml
- sonarqube-exporter-deployment.yaml
- oauth2-proxy-sonarqube.yaml
- sonarqube-certificate.yaml
- sonarqube-ingress.yaml

View File

@ -0,0 +1,6 @@
# services/quality/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: quality

View File

@ -0,0 +1,118 @@
# services/quality/oauth2-proxy-sonarqube.yaml
apiVersion: v1
kind: Service
metadata:
name: oauth2-proxy-sonarqube
namespace: quality
labels:
app: oauth2-proxy-sonarqube
spec:
ports:
- name: http
port: 80
targetPort: 4180
selector:
app: oauth2-proxy-sonarqube
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: oauth2-proxy-sonarqube
namespace: quality
labels:
app: oauth2-proxy-sonarqube
spec:
replicas: 2
selector:
matchLabels:
app: oauth2-proxy-sonarqube
template:
metadata:
labels:
app: oauth2-proxy-sonarqube
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "quality"
vault.hashicorp.com/agent-inject-secret-oidc-config: "kv/data/atlas/quality/sonarqube-oidc"
vault.hashicorp.com/agent-inject-template-oidc-config: |
{{- with secret "kv/data/atlas/quality/sonarqube-oidc" -}}
client_id = "{{ .Data.data.client_id }}"
client_secret = "{{ .Data.data.client_secret }}"
cookie_secret = "{{ .Data.data.cookie_secret }}"
{{- end -}}
spec:
serviceAccountName: quality-vault-sync
nodeSelector:
node-role.kubernetes.io/worker: "true"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values: ["arm64"]
- key: hardware
operator: In
values: ["rpi5"]
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
containers:
- name: oauth2-proxy
image: quay.io/oauth2-proxy/oauth2-proxy:v7.6.0
imagePullPolicy: IfNotPresent
args:
- --provider=oidc
- --config=/vault/secrets/oidc-config
- --redirect-url=https://quality.bstein.dev/oauth2/callback
- --oidc-issuer-url=https://sso.bstein.dev/realms/atlas
- --scope=openid profile email groups
- --email-domain=*
- --allowed-group=admin
- --allowed-group=/admin
- --allowed-group=dev
- --allowed-group=/dev
- --set-xauthrequest=true
- --pass-access-token=true
- --set-authorization-header=true
- --cookie-secure=true
- --cookie-samesite=lax
- --cookie-refresh=20m
- --cookie-expire=168h
- --insecure-oidc-allow-unverified-email=true
- --upstream=http://sonarqube.quality.svc.cluster.local:9000
- --http-address=0.0.0.0:4180
- --skip-provider-button=true
- --approval-prompt=auto
- --skip-jwt-bearer-tokens=true
- --oidc-groups-claim=groups
- --cookie-domain=quality.bstein.dev
ports:
- containerPort: 4180
name: http
readinessProbe:
httpGet:
path: /ping
port: 4180
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet:
path: /ping
port: 4180
initialDelaySeconds: 20
periodSeconds: 20
resources:
requests:
cpu: 25m
memory: 64Mi
limits:
cpu: 250m
memory: 256Mi

View File

@ -0,0 +1,7 @@
# services/quality/quality-vault-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: quality-vault-sync
namespace: quality

View File

@ -0,0 +1,14 @@
# services/quality/sonarqube-certificate.yaml
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: quality-tls
namespace: quality
spec:
secretName: quality-tls
issuerRef:
kind: ClusterIssuer
name: letsencrypt
dnsNames:
- quality.bstein.dev

View File

@ -0,0 +1,122 @@
# services/quality/sonarqube-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarqube
namespace: quality
labels:
app: sonarqube
spec:
replicas: 1
selector:
matchLabels:
app: sonarqube
template:
metadata:
labels:
app: sonarqube
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/role: "quality"
vault.hashicorp.com/agent-inject-secret-sonarqube-db-env.sh: "kv/data/atlas/quality/sonarqube-db"
vault.hashicorp.com/agent-inject-template-sonarqube-db-env.sh: |
{{- with secret "kv/data/atlas/quality/sonarqube-db" -}}
export SONAR_JDBC_USERNAME="{{ .Data.data.username }}"
export SONAR_JDBC_PASSWORD="{{ .Data.data.password }}"
{{- end -}}
spec:
serviceAccountName: sonarqube
nodeSelector:
node-role.kubernetes.io/worker: "true"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values: ["arm64"]
- key: hardware
operator: In
values: ["rpi5"]
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
initContainers:
- name: prepare-volume-permissions
image: busybox:1.36
command:
- /bin/sh
- -ec
- |
mkdir -p /opt/sonarqube/data /opt/sonarqube/extensions /opt/sonarqube/logs /opt/sonarqube/temp
chown -R 1000:1000 /opt/sonarqube
volumeMounts:
- name: sonarqube-data
mountPath: /opt/sonarqube
containers:
- name: sonarqube
image: sonarqube:lts-community
imagePullPolicy: IfNotPresent
command:
- /bin/bash
- -ec
args:
- |
set -euo pipefail
. /vault/secrets/sonarqube-db-env.sh
exec /opt/sonarqube/docker/entrypoint.sh
env:
- name: SONAR_JDBC_URL
value: jdbc:postgresql://postgres-service.postgres.svc.cluster.local:5432/sonarqube
- name: SONAR_ES_BOOTSTRAP_CHECKS_DISABLE
value: "true"
- name: SONAR_WEB_HOST
value: "0.0.0.0"
ports:
- containerPort: 9000
name: http
readinessProbe:
httpGet:
path: /api/system/status
port: 9000
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 10
failureThreshold: 12
livenessProbe:
httpGet:
path: /api/system/status
port: 9000
initialDelaySeconds: 120
timeoutSeconds: 5
periodSeconds: 20
failureThreshold: 6
resources:
requests:
cpu: 500m
memory: 2Gi
limits:
cpu: "2"
memory: 4Gi
volumeMounts:
- name: sonarqube-data
mountPath: /opt/sonarqube/data
subPath: data
- name: sonarqube-data
mountPath: /opt/sonarqube/extensions
subPath: extensions
- name: sonarqube-data
mountPath: /opt/sonarqube/logs
subPath: logs
- name: sonarqube-data
mountPath: /opt/sonarqube/temp
subPath: temp
volumes:
- name: sonarqube-data
persistentVolumeClaim:
claimName: sonarqube-data

View File

@ -0,0 +1,192 @@
# services/quality/sonarqube-exporter-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sonarqube-exporter-script
namespace: quality
data:
exporter.py: |
#!/usr/bin/env python3
import base64
import json
import os
import threading
import time
import urllib.error
import urllib.parse
import urllib.request
from http.server import BaseHTTPRequestHandler, HTTPServer
SONARQUBE_URL = os.getenv("SONARQUBE_URL", "http://sonarqube.quality.svc.cluster.local:9000").strip().rstrip("/")
SONARQUBE_TOKEN = os.getenv("SONARQUBE_TOKEN", "").strip()
SONARQUBE_TIMEOUT_SECONDS = float(os.getenv("SONARQUBE_TIMEOUT_SECONDS", "10"))
SONARQUBE_EXPORTER_PORT = int(os.getenv("SONARQUBE_EXPORTER_PORT", "9798"))
SONARQUBE_EXPORTER_CACHE_TTL_SECONDS = int(os.getenv("SONARQUBE_EXPORTER_CACHE_TTL_SECONDS", "45"))
SONARQUBE_PROJECT_LIMIT = int(os.getenv("SONARQUBE_PROJECT_LIMIT", "200"))
CACHE_LOCK = threading.Lock()
CACHE_EXPIRES_AT = 0.0
CACHE_BODY = ""
def _escape(value: str) -> str:
return value.replace("\\", "\\\\").replace("\"", "\\\"").replace("\n", "\\n")
def _fetch_json(path: str):
url = f"{SONARQUBE_URL}{path}"
req = urllib.request.Request(url, method="GET")
if SONARQUBE_TOKEN:
encoded = base64.b64encode(f"{SONARQUBE_TOKEN}:".encode("utf-8")).decode("utf-8")
req.add_header("Authorization", f"Basic {encoded}")
try:
with urllib.request.urlopen(req, timeout=SONARQUBE_TIMEOUT_SECONDS) as resp:
payload = json.loads(resp.read().decode("utf-8"))
return payload, ""
except urllib.error.HTTPError as exc:
return None, f"http_{exc.code}"
except Exception as exc: # noqa: BLE001
return None, exc.__class__.__name__
def _metrics_body() -> str:
lines = []
now = time.time()
scrape_success = 1
lines.append("# HELP sonarqube_exporter_last_scrape_timestamp_seconds Unix timestamp when exporter last refreshed data.")
lines.append("# TYPE sonarqube_exporter_last_scrape_timestamp_seconds gauge")
lines.append(f"sonarqube_exporter_last_scrape_timestamp_seconds {now:.3f}")
system_payload, system_error = _fetch_json("/api/system/status")
system_status = "unknown"
sonarqube_up = 0
if isinstance(system_payload, dict):
system_status = str(system_payload.get("status") or "unknown")
elif system_error:
system_status = system_error
scrape_success = 0
if system_status.upper() in {
"UP",
"STARTING",
"DB_MIGRATION_NEEDED",
"DB_MIGRATION_RUNNING",
}:
sonarqube_up = 1
lines.append("# HELP sonarqube_up SonarQube API reachability and health (1=reachable/healthy-ish, 0=down).")
lines.append("# TYPE sonarqube_up gauge")
lines.append(f"sonarqube_up {sonarqube_up}")
lines.append("# HELP sonarqube_system_status Current SonarQube system status label.")
lines.append("# TYPE sonarqube_system_status gauge")
lines.append(f'sonarqube_system_status{{status="{_escape(system_status)}"}} 1')
projects_payload, projects_error = _fetch_json("/api/projects/search?ps=500&p=1")
project_items = []
projects_total = 0
if isinstance(projects_payload, dict):
paging = projects_payload.get("paging") or {}
projects_total = int(paging.get("total") or 0)
project_items = list(projects_payload.get("components") or [])
else:
scrape_success = 0
lines.append("# HELP sonarqube_projects_total Total discovered SonarQube projects.")
lines.append("# TYPE sonarqube_projects_total gauge")
lines.append(f"sonarqube_projects_total {projects_total}")
gate_counts = {}
gate_fetch_errors = 0
inspected = 0
project_samples = []
for project in project_items:
if inspected >= SONARQUBE_PROJECT_LIMIT:
break
key = str(project.get("key") or "").strip()
if not key:
continue
inspected += 1
gate_payload, gate_error = _fetch_json(
"/api/qualitygates/project_status?projectKey=" + urllib.parse.quote_plus(key)
)
if not isinstance(gate_payload, dict):
gate_fetch_errors += 1
continue
project_status = gate_payload.get("projectStatus") or {}
gate_status = str(project_status.get("status") or "UNKNOWN").upper()
gate_counts[gate_status] = gate_counts.get(gate_status, 0) + 1
is_ok = 1 if gate_status == "OK" else 0
project_samples.append(
f'sonarqube_project_quality_gate_pass{{project_key="{_escape(key)}",status="{_escape(gate_status)}"}} {is_ok}'
)
lines.append("# HELP sonarqube_project_quality_gate_pass Project quality gate pass state (1=OK, 0=not OK).")
lines.append("# TYPE sonarqube_project_quality_gate_pass gauge")
lines.extend(project_samples)
lines.append("# HELP sonarqube_quality_gate_projects_total Number of projects by quality gate status.")
lines.append("# TYPE sonarqube_quality_gate_projects_total gauge")
for status, count in sorted(gate_counts.items()):
lines.append(f'sonarqube_quality_gate_projects_total{{status="{_escape(status)}"}} {count}')
lines.append("# HELP sonarqube_quality_gate_fetch_errors_total Number of project gate API fetch failures in the last scrape.")
lines.append("# TYPE sonarqube_quality_gate_fetch_errors_total gauge")
lines.append(f"sonarqube_quality_gate_fetch_errors_total {gate_fetch_errors}")
lines.append("# HELP sonarqube_exporter_scrape_success Exporter scrape success (1=success, 0=partial/error).")
lines.append("# TYPE sonarqube_exporter_scrape_success gauge")
lines.append(f"sonarqube_exporter_scrape_success {scrape_success}")
if projects_error:
lines.append("# HELP sonarqube_exporter_projects_error Indicates projects API failure on the most recent scrape.")
lines.append("# TYPE sonarqube_exporter_projects_error gauge")
lines.append(f'sonarqube_exporter_projects_error{{error="{_escape(projects_error)}"}} 1')
return "\n".join(lines) + "\n"
def _get_metrics() -> str:
global CACHE_BODY, CACHE_EXPIRES_AT
now = time.time()
with CACHE_LOCK:
if CACHE_BODY and now < CACHE_EXPIRES_AT:
return CACHE_BODY
CACHE_BODY = _metrics_body()
CACHE_EXPIRES_AT = now + max(5, SONARQUBE_EXPORTER_CACHE_TTL_SECONDS)
return CACHE_BODY
class Handler(BaseHTTPRequestHandler):
def do_GET(self): # noqa: N802
if self.path in ("/-/healthy", "/healthz"):
body = b"ok\n"
self.send_response(200)
self.send_header("Content-Type", "text/plain; charset=utf-8")
self.send_header("Content-Length", str(len(body)))
self.end_headers()
self.wfile.write(body)
return
if self.path == "/metrics":
body = _get_metrics().encode("utf-8")
self.send_response(200)
self.send_header("Content-Type", "text/plain; version=0.0.4; charset=utf-8")
self.send_header("Content-Length", str(len(body)))
self.end_headers()
self.wfile.write(body)
return
self.send_response(404)
self.end_headers()
def log_message(self, fmt, *args): # noqa: A003
return
def main():
server = HTTPServer(("0.0.0.0", SONARQUBE_EXPORTER_PORT), Handler)
server.serve_forever()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,97 @@
# services/quality/sonarqube-exporter-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarqube-exporter
namespace: quality
labels:
app: sonarqube-exporter
spec:
replicas: 1
selector:
matchLabels:
app: sonarqube-exporter
template:
metadata:
labels:
app: sonarqube-exporter
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9798"
prometheus.io/path: /metrics
spec:
nodeSelector:
node-role.kubernetes.io/worker: "true"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values: ["arm64"]
- key: hardware
operator: In
values: ["rpi5"]
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: hardware
operator: In
values: ["rpi5"]
containers:
- name: exporter
image: python:3.12-slim
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -ec
args:
- |
cp /config/exporter.py /app/exporter.py
python /app/exporter.py
env:
- name: SONARQUBE_URL
value: http://sonarqube.quality.svc.cluster.local:9000
- name: SONARQUBE_EXPORTER_PORT
value: "9798"
- name: SONARQUBE_EXPORTER_CACHE_TTL_SECONDS
value: "45"
- name: SONARQUBE_PROJECT_LIMIT
value: "250"
ports:
- name: metrics
containerPort: 9798
readinessProbe:
httpGet:
path: /-/healthy
port: 9798
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet:
path: /-/healthy
port: 9798
initialDelaySeconds: 20
periodSeconds: 20
resources:
requests:
cpu: 25m
memory: 96Mi
limits:
cpu: 250m
memory: 256Mi
volumeMounts:
- name: exporter-script
mountPath: /config
readOnly: true
- name: app-tmp
mountPath: /app
volumes:
- name: exporter-script
configMap:
name: sonarqube-exporter-script
defaultMode: 493
- name: app-tmp
emptyDir: {}

View File

@ -0,0 +1,19 @@
# services/quality/sonarqube-exporter-service.yaml
apiVersion: v1
kind: Service
metadata:
name: sonarqube-exporter
namespace: quality
labels:
app: sonarqube-exporter
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9798"
prometheus.io/path: /metrics
spec:
selector:
app: sonarqube-exporter
ports:
- name: metrics
port: 9798
targetPort: metrics

View File

@ -0,0 +1,28 @@
# services/quality/sonarqube-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: sonarqube
namespace: quality
annotations:
cert-manager.io/cluster-issuer: letsencrypt
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
ingressClassName: traefik
rules:
- host: quality.bstein.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: oauth2-proxy-sonarqube
port:
number: 80
tls:
- hosts:
- quality.bstein.dev
secretName: quality-tls

View File

@ -0,0 +1,14 @@
# services/quality/sonarqube-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: sonarqube-data
namespace: quality
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storageClassName: astreae

View File

@ -0,0 +1,15 @@
# services/quality/sonarqube-service.yaml
apiVersion: v1
kind: Service
metadata:
name: sonarqube
namespace: quality
labels:
app: sonarqube
spec:
selector:
app: sonarqube
ports:
- name: http
port: 9000
targetPort: 9000

View File

@ -0,0 +1,7 @@
# services/quality/sonarqube-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: sonarqube
namespace: quality