diff --git a/Jenkinsfile b/Jenkinsfile
index 4d6b23e6..fe976922 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -7,6 +7,7 @@ pipeline {
apiVersion: v1
kind: Pod
spec:
+ serviceAccountName: "jenkins"
nodeSelector:
hardware: rpi5
kubernetes.io/arch: arm64
@@ -23,6 +24,9 @@ spec:
environment {
PIP_DISABLE_PIP_VERSION_CHECK = '1'
PYTHONUNBUFFERED = '1'
+ SUITE_NAME = 'titan-iac'
+ PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
+ VM_URL = 'http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428'
}
stages {
stage('Checkout') {
@@ -35,9 +39,36 @@ spec:
sh 'pip install --no-cache-dir -r ci/requirements.txt'
}
}
- stage('Glue tests') {
+ stage('Run quality gate') {
steps {
- sh 'pytest -q ci/tests/glue'
+ sh '''
+ set -eu
+ mkdir -p build
+ set +e
+ python3 -m testing.quality_gate --profile jenkins --build-dir build
+ quality_gate_rc=$?
+ set -e
+ printf '%s\n' "${quality_gate_rc}" > build/quality-gate.rc
+ '''
+ }
+ }
+ stage('Publish test metrics') {
+ steps {
+ sh '''
+ set -eu
+ export JUNIT_GLOB='build/junit-*.xml'
+ export QUALITY_GATE_EXIT_CODE_PATH='build/quality-gate.rc'
+ export QUALITY_GATE_SUMMARY_PATH='build/quality-gate-summary.json'
+ python3 ci/scripts/publish_test_metrics.py
+ '''
+ }
+ }
+ stage('Enforce quality gate') {
+ steps {
+ sh '''
+ set -eu
+ test "$(cat build/quality-gate.rc 2>/dev/null || echo 1)" -eq 0
+ '''
}
}
stage('Resolve Flux branch') {
@@ -45,7 +76,7 @@ spec:
script {
env.FLUX_BRANCH = sh(
returnStdout: true,
- script: "awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml"
+ script: '''awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml'''
).trim()
if (!env.FLUX_BRANCH) {
error('Flux branch not found in gotk-sync.yaml')
@@ -62,16 +93,32 @@ spec:
}
}
steps {
- withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
- sh '''
- set +x
- git config user.email "jenkins@bstein.dev"
- git config user.name "jenkins"
- git remote set-url origin https://${GIT_USER}:${GIT_TOKEN}@scm.bstein.dev/bstein/titan-iac.git
- git push origin HEAD:${FLUX_BRANCH}
- '''
+ container('jnlp') {
+ withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
+ sh '''
+ set +x
+ git config user.email "jenkins@bstein.dev"
+ git config user.name "jenkins"
+ git remote set-url origin https://${GIT_USER}:${GIT_TOKEN}@scm.bstein.dev/bstein/titan-iac.git
+ git push origin HEAD:${FLUX_BRANCH}
+ '''
+ }
}
}
}
}
+ post {
+ always {
+ script {
+ if (fileExists('build/junit-unit.xml') || fileExists('build/junit-glue.xml')) {
+ try {
+ junit allowEmptyResults: true, testResults: 'build/junit-*.xml'
+ } catch (Throwable err) {
+ echo "junit step unavailable: ${err.class.simpleName}"
+ }
+ }
+ }
+ archiveArtifacts artifacts: 'build/**', allowEmptyArchive: true, fingerprint: true
+ }
+ }
}
diff --git a/ci/Jenkinsfile.titan-iac b/ci/Jenkinsfile.titan-iac
index 77990d77..bf639480 100644
--- a/ci/Jenkinsfile.titan-iac
+++ b/ci/Jenkinsfile.titan-iac
@@ -6,6 +6,7 @@ pipeline {
apiVersion: v1
kind: Pod
spec:
+ serviceAccountName: "jenkins"
nodeSelector:
hardware: rpi5
kubernetes.io/arch: arm64
@@ -22,6 +23,9 @@ spec:
environment {
PIP_DISABLE_PIP_VERSION_CHECK = '1'
PYTHONUNBUFFERED = '1'
+ SUITE_NAME = 'titan-iac'
+ PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
+ VM_URL = 'http://victoria-metrics-single-server.monitoring.svc.cluster.local:8428'
}
stages {
stage('Checkout') {
@@ -34,9 +38,36 @@ spec:
sh 'pip install --no-cache-dir -r ci/requirements.txt'
}
}
- stage('Glue tests') {
+ stage('Run quality gate') {
steps {
- sh 'pytest -q ci/tests/glue'
+ sh '''
+ set -eu
+ mkdir -p build
+ set +e
+ python3 -m testing.quality_gate --profile jenkins --build-dir build
+ quality_gate_rc=$?
+ set -e
+ printf '%s\n' "${quality_gate_rc}" > build/quality-gate.rc
+ '''
+ }
+ }
+ stage('Publish test metrics') {
+ steps {
+ sh '''
+ set -eu
+ export JUNIT_GLOB='build/junit-*.xml'
+ export QUALITY_GATE_EXIT_CODE_PATH='build/quality-gate.rc'
+ export QUALITY_GATE_SUMMARY_PATH='build/quality-gate-summary.json'
+ python3 ci/scripts/publish_test_metrics.py
+ '''
+ }
+ }
+ stage('Enforce quality gate') {
+ steps {
+ sh '''
+ set -eu
+ test "$(cat build/quality-gate.rc 2>/dev/null || echo 1)" -eq 0
+ '''
}
}
stage('Resolve Flux branch') {
@@ -44,7 +75,7 @@ spec:
script {
env.FLUX_BRANCH = sh(
returnStdout: true,
- script: "awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml"
+ script: '''awk '/branch:/{print $2; exit}' clusters/atlas/flux-system/gotk-sync.yaml'''
).trim()
if (!env.FLUX_BRANCH) {
error('Flux branch not found in gotk-sync.yaml')
@@ -61,16 +92,32 @@ spec:
}
}
steps {
- withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
- sh '''
- set +x
- git config user.email "jenkins@bstein.dev"
- git config user.name "jenkins"
- git remote set-url origin https://${GIT_USER}:${GIT_TOKEN}@scm.bstein.dev/bstein/titan-iac.git
- git push origin HEAD:${FLUX_BRANCH}
- '''
+ container('jnlp') {
+ withCredentials([usernamePassword(credentialsId: 'gitea-pat', usernameVariable: 'GIT_USER', passwordVariable: 'GIT_TOKEN')]) {
+ sh '''
+ set +x
+ git config user.email "jenkins@bstein.dev"
+ git config user.name "jenkins"
+ git remote set-url origin https://${GIT_USER}:${GIT_TOKEN}@scm.bstein.dev/bstein/titan-iac.git
+ git push origin HEAD:${FLUX_BRANCH}
+ '''
+ }
}
}
}
}
+ post {
+ always {
+ script {
+ if (fileExists('build/junit-unit.xml') || fileExists('build/junit-glue.xml')) {
+ try {
+ junit allowEmptyResults: true, testResults: 'build/junit-*.xml'
+ } catch (Throwable err) {
+ echo "junit step unavailable: ${err.class.simpleName}"
+ }
+ }
+ }
+ archiveArtifacts artifacts: 'build/**', allowEmptyArchive: true, fingerprint: true
+ }
+ }
}
diff --git a/ci/requirements.txt b/ci/requirements.txt
index eaa21aac..d4ba9a53 100644
--- a/ci/requirements.txt
+++ b/ci/requirements.txt
@@ -1,4 +1,7 @@
pytest==8.3.4
+pytest-cov==6.0.0
+coverage==7.6.10
kubernetes==30.1.0
PyYAML==6.0.2
requests==2.32.3
+ruff==0.8.4
diff --git a/ci/scripts/publish_test_metrics.py b/ci/scripts/publish_test_metrics.py
new file mode 100644
index 00000000..95a74b8e
--- /dev/null
+++ b/ci/scripts/publish_test_metrics.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python3
+"""Publish titan-iac quality-gate results to Pushgateway."""
+
+from __future__ import annotations
+
+import json
+import os
+from glob import glob
+import urllib.error
+import urllib.request
+import xml.etree.ElementTree as ET
+
+
+def _escape_label(value: str) -> str:
+ return value.replace("\\", "\\\\").replace("\n", "\\n").replace('"', '\\"')
+
+
+def _label_str(labels: dict[str, str]) -> str:
+ parts = [f'{key}="{_escape_label(val)}"' for key, val in labels.items() if val]
+ return "{" + ",".join(parts) + "}" if parts else ""
+
+
+def _read_text(url: str) -> str:
+ with urllib.request.urlopen(url, timeout=10) as response:
+ return response.read().decode("utf-8")
+
+
+def _post_text(url: str, payload: str) -> None:
+ request = urllib.request.Request(
+ url,
+ data=payload.encode("utf-8"),
+ method="POST",
+ headers={"Content-Type": "text/plain"},
+ )
+ with urllib.request.urlopen(request, timeout=10) as response:
+ if response.status >= 400:
+ raise RuntimeError(f"push failed with status={response.status}")
+
+
+def _parse_junit(path: str) -> dict[str, int]:
+ if not os.path.exists(path):
+ return {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
+
+ tree = ET.parse(path)
+ root = tree.getroot()
+ totals = {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
+
+ suites: list[ET.Element]
+ if root.tag == "testsuite":
+ suites = [root]
+ elif root.tag == "testsuites":
+ suites = [elem for elem in root if elem.tag == "testsuite"]
+ else:
+ suites = []
+
+ for suite in suites:
+ for key in totals:
+ raw_value = suite.attrib.get(key, "0")
+ try:
+ totals[key] += int(float(raw_value))
+ except ValueError:
+ totals[key] += 0
+ return totals
+
+
+def _collect_junit_totals(pattern: str) -> dict[str, int]:
+ totals = {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
+ for path in sorted(glob(pattern)):
+ parsed = _parse_junit(path)
+ for key in totals:
+ totals[key] += parsed[key]
+ return totals
+
+
+def _read_exit_code(path: str) -> int:
+ try:
+ with open(path, "r", encoding="utf-8") as handle:
+ return int(handle.read().strip())
+ except (FileNotFoundError, ValueError):
+ return 1
+
+
+def _load_summary(path: str) -> dict:
+ try:
+ with open(path, "r", encoding="utf-8") as handle:
+ return json.load(handle)
+ except (FileNotFoundError, json.JSONDecodeError):
+ return {}
+
+
+def _summary_float(summary: dict, key: str) -> float:
+ value = summary.get(key)
+ if isinstance(value, (int, float)):
+ return float(value)
+ return 0.0
+
+
+def _summary_int(summary: dict, key: str) -> int:
+ value = summary.get(key)
+ if isinstance(value, int):
+ return value
+ if isinstance(value, float):
+ return int(value)
+ return 0
+
+
+def _fetch_existing_counter(pushgateway_url: str, metric: str, labels: dict[str, str]) -> float:
+ text = _read_text(f"{pushgateway_url.rstrip('/')}/metrics")
+ for line in text.splitlines():
+ if not line.startswith(metric + "{"):
+ continue
+ if any(f'{key}="{value}"' not in line for key, value in labels.items()):
+ continue
+ parts = line.split()
+ if len(parts) < 2:
+ continue
+ try:
+ return float(parts[1])
+ except ValueError:
+ return 0.0
+ return 0.0
+
+
+def _build_payload(
+ suite: str,
+ status: str,
+ tests: dict[str, int],
+ ok_count: int,
+ failed_count: int,
+ branch: str,
+ build_number: str,
+ summary: dict | None = None,
+ workspace_line_coverage_percent: float = 0.0,
+ source_lines_over_500: int = 0,
+) -> str:
+ passed = max(tests["tests"] - tests["failures"] - tests["errors"] - tests["skipped"], 0)
+ build_labels = _label_str(
+ {
+ "suite": suite,
+ "branch": branch or "unknown",
+ "build_number": build_number or "unknown",
+ }
+ )
+ lines = [
+ "# TYPE platform_quality_gate_runs_total counter",
+ f'platform_quality_gate_runs_total{{suite="{suite}",status="ok"}} {ok_count}',
+ f'platform_quality_gate_runs_total{{suite="{suite}",status="failed"}} {failed_count}',
+ "# TYPE titan_iac_quality_gate_tests_total gauge",
+ f'titan_iac_quality_gate_tests_total{{suite="{suite}",result="passed"}} {passed}',
+ f'titan_iac_quality_gate_tests_total{{suite="{suite}",result="failed"}} {tests["failures"]}',
+ f'titan_iac_quality_gate_tests_total{{suite="{suite}",result="error"}} {tests["errors"]}',
+ f'titan_iac_quality_gate_tests_total{{suite="{suite}",result="skipped"}} {tests["skipped"]}',
+ "# TYPE titan_iac_quality_gate_run_status gauge",
+ f'titan_iac_quality_gate_run_status{{suite="{suite}",status="ok"}} {1 if status == "ok" else 0}',
+ f'titan_iac_quality_gate_run_status{{suite="{suite}",status="failed"}} {1 if status == "failed" else 0}',
+ "# TYPE titan_iac_quality_gate_build_info gauge",
+ f"titan_iac_quality_gate_build_info{build_labels} 1",
+ "# TYPE platform_quality_gate_workspace_line_coverage_percent gauge",
+ f'platform_quality_gate_workspace_line_coverage_percent{{suite="{suite}"}} {workspace_line_coverage_percent:.3f}',
+ "# TYPE platform_quality_gate_source_lines_over_500_total gauge",
+ f'platform_quality_gate_source_lines_over_500_total{{suite="{suite}"}} {source_lines_over_500}',
+ ]
+ results = summary.get("results", []) if isinstance(summary, dict) else []
+ if results:
+ lines.append("# TYPE titan_iac_quality_gate_checks_total gauge")
+ for result in results:
+ check_name = result.get("name")
+ check_status = result.get("status")
+ if not check_name or not check_status:
+ continue
+ lines.append(
+ f'titan_iac_quality_gate_checks_total{{suite="{suite}",check="{_escape_label(str(check_name))}",result="{_escape_label(str(check_status))}"}} 1'
+ )
+ return "\n".join(lines) + "\n"
+
+
+def main() -> int:
+ suite = os.getenv("SUITE_NAME", "titan-iac")
+ pushgateway_url = os.getenv("PUSHGATEWAY_URL", "http://platform-quality-gateway.monitoring.svc.cluster.local:9091")
+ job_name = os.getenv("QUALITY_GATE_JOB_NAME", "platform-quality-ci")
+ junit_glob = os.getenv("JUNIT_GLOB", os.getenv("JUNIT_PATH", "build/junit-*.xml"))
+ exit_code_path = os.getenv("QUALITY_GATE_EXIT_CODE_PATH", os.getenv("GLUE_EXIT_CODE_PATH", "build/quality-gate.rc"))
+ summary_path = os.getenv("QUALITY_GATE_SUMMARY_PATH", "build/quality-gate-summary.json")
+ branch = os.getenv("BRANCH_NAME", os.getenv("GIT_BRANCH", ""))
+ build_number = os.getenv("BUILD_NUMBER", "")
+
+ tests = _collect_junit_totals(junit_glob)
+ exit_code = _read_exit_code(exit_code_path)
+ status = "ok" if exit_code == 0 else "failed"
+ summary = _load_summary(summary_path)
+ workspace_line_coverage_percent = _summary_float(summary, "workspace_line_coverage_percent")
+ source_lines_over_500 = _summary_int(summary, "source_lines_over_500")
+
+ ok_count = int(
+ _fetch_existing_counter(
+ pushgateway_url,
+ "platform_quality_gate_runs_total",
+ {"job": job_name, "suite": suite, "status": "ok"},
+ )
+ )
+ failed_count = int(
+ _fetch_existing_counter(
+ pushgateway_url,
+ "platform_quality_gate_runs_total",
+ {"job": job_name, "suite": suite, "status": "failed"},
+ )
+ )
+ if status == "ok":
+ ok_count += 1
+ else:
+ failed_count += 1
+
+ payload = _build_payload(
+ suite=suite,
+ status=status,
+ tests=tests,
+ ok_count=ok_count,
+ failed_count=failed_count,
+ branch=branch,
+ build_number=build_number,
+ summary=summary,
+ workspace_line_coverage_percent=workspace_line_coverage_percent,
+ source_lines_over_500=source_lines_over_500,
+ )
+ push_url = f"{pushgateway_url.rstrip('/')}/metrics/job/{job_name}/suite/{suite}"
+ _post_text(push_url, payload)
+
+ summary = {
+ "suite": suite,
+ "status": status,
+ "tests_total": tests["tests"],
+ "tests_failed": tests["failures"],
+ "tests_error": tests["errors"],
+ "tests_skipped": tests["skipped"],
+ "ok_count": ok_count,
+ "failed_count": failed_count,
+ "checks_recorded": len(summary.get("results", [])) if isinstance(summary, dict) else 0,
+ "workspace_line_coverage_percent": workspace_line_coverage_percent,
+ "source_lines_over_500": source_lines_over_500,
+ }
+ print(json.dumps(summary, sort_keys=True))
+ return 0
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/testing/__init__.py b/testing/__init__.py
new file mode 100644
index 00000000..89fa0565
--- /dev/null
+++ b/testing/__init__.py
@@ -0,0 +1 @@
+"""Top-level testing contract and quality-gate tooling for titan-iac."""
diff --git a/testing/quality_contract.json b/testing/quality_contract.json
new file mode 100644
index 00000000..06741e18
--- /dev/null
+++ b/testing/quality_contract.json
@@ -0,0 +1,161 @@
+{
+ "scope_note": "Quality-gate LOC/naming/coverage checks apply to managed automation and testing modules only, not broad Flux/Kubernetes manifest trees.",
+ "required_docs": [
+ {
+ "path": "README.md",
+ "description": "Top-level repository handbook."
+ },
+ {
+ "path": "Jenkinsfile",
+ "description": "Top-level Jenkins mirror for multibranch discovery."
+ },
+ {
+ "path": "ci/Jenkinsfile.titan-iac",
+ "description": "Canonical titan-iac Jenkins pipeline definition."
+ }
+ ],
+ "managed_modules": [
+ "ci/scripts/publish_test_metrics.py",
+ "services/mailu/scripts/mailu_sync.py",
+ "testing/__init__.py",
+ "testing/quality_contract.py",
+ "testing/quality_docs.py",
+ "testing/quality_hygiene.py",
+ "testing/quality_coverage.py",
+ "testing/quality_gate.py"
+ ],
+ "lint_paths": [
+ "ci/scripts/publish_test_metrics.py",
+ "ci/tests/glue",
+ "scripts/tests",
+ "services/comms/scripts/tests",
+ "services/mailu/scripts/mailu_sync.py",
+ "testing"
+ ],
+ "pytest_suites": {
+ "unit": {
+ "description": "Fast unit and contract tests for repo automation.",
+ "paths": [
+ "scripts/tests",
+ "services/comms/scripts/tests",
+ "testing/tests"
+ ],
+ "junit": "build/junit-unit.xml",
+ "coverage_sources": [
+ "ci/scripts",
+ "services/mailu/scripts",
+ "testing"
+ ],
+ "coverage_xml": "build/coverage-unit.xml"
+ },
+ "glue": {
+ "description": "Cluster-live glue checks that validate CronJobs and exported metrics.",
+ "paths": [
+ "ci/tests/glue"
+ ],
+ "junit": "build/junit-glue.xml"
+ }
+ },
+ "profiles": {
+ "local": [
+ "docs",
+ "smell",
+ "hygiene",
+ "unit",
+ "coverage"
+ ],
+ "jenkins": [
+ "docs",
+ "smell",
+ "hygiene",
+ "unit",
+ "coverage",
+ "glue"
+ ]
+ },
+ "manual_scripts": [
+ {
+ "path": "scripts/test_atlas_user_cleanup.py",
+ "description": "Manual cleanup validation for Atlas user lifecycle automation."
+ },
+ {
+ "path": "scripts/test_user_cleanup.py",
+ "description": "Manual cleanup validation for shared user lifecycle automation."
+ },
+ {
+ "path": "scripts/test_vaultwarden_user_cleanup.py",
+ "description": "Manual cleanup validation for Vaultwarden user lifecycle automation."
+ },
+ {
+ "path": "services/bstein-dev-home/scripts/test_portal_onboarding_flow.py",
+ "description": "Portal onboarding end-to-end flow validation with mail delivery checks."
+ },
+ {
+ "path": "services/keycloak/scripts/tests/test_keycloak_execute_actions_email.py",
+ "description": "Standalone Keycloak SMTP execute-actions-email validation script."
+ },
+ {
+ "path": "services/keycloak/scripts/tests/test_portal_token_exchange.py",
+ "description": "Standalone Keycloak token-exchange validation script."
+ }
+ ],
+ "hygiene": {
+ "max_lines": 500,
+ "line_limit_globs": [
+ "testing/**/*.py",
+ "ci/scripts/*.py",
+ "ci/tests/**/*.py",
+ "scripts/tests/**/*.py",
+ "services/*/scripts/tests/**/*.py",
+ "services/mailu/scripts/mailu_sync.py"
+ ],
+ "naming_rules": [
+ {
+ "glob": "testing/*.py",
+ "pattern": "^(?:__init__|quality_[a-z0-9_]+)\\.py$",
+ "description": "Top-level testing helpers use quality_* module names."
+ },
+ {
+ "glob": "testing/tests/*.py",
+ "pattern": "^test_[a-z0-9_]+\\.py$",
+ "description": "Top-level pytest files use test_*.py names."
+ },
+ {
+ "glob": "ci/tests/**/*.py",
+ "pattern": "^test_[a-z0-9_]+\\.py$",
+ "description": "CI pytest files use test_*.py names."
+ },
+ {
+ "glob": "scripts/tests/**/*.py",
+ "pattern": "^test_[a-z0-9_]+\\.py$",
+ "description": "Script pytest files use test_*.py names."
+ },
+ {
+ "glob": "scripts/test_*.py",
+ "pattern": "^test_[a-z0-9_]+\\.py$",
+ "description": "Standalone script tests use test_*.py names."
+ },
+ {
+ "glob": "services/*/scripts/tests/**/*.py",
+ "pattern": "^test_[a-z0-9_]+\\.py$",
+ "description": "Service pytest files use test_*.py names."
+ },
+ {
+ "glob": "services/*/scripts/test_*.py",
+ "pattern": "^test_[a-z0-9_]+\\.py$",
+ "description": "Standalone service test scripts use test_*.py names."
+ }
+ ]
+ },
+ "coverage": {
+ "minimum_percent": 95.0,
+ "tracked_files": [
+ "ci/scripts/publish_test_metrics.py",
+ "testing/quality_contract.py",
+ "testing/quality_docs.py",
+ "testing/quality_hygiene.py",
+ "testing/quality_coverage.py",
+ "testing/quality_gate.py"
+ ]
+ }
+}
diff --git a/testing/quality_contract.py b/testing/quality_contract.py
new file mode 100644
index 00000000..69026e70
--- /dev/null
+++ b/testing/quality_contract.py
@@ -0,0 +1,17 @@
+"""Helpers for loading the repository testing contract."""
+
+from __future__ import annotations
+
+import json
+from pathlib import Path
+from typing import Any
+
+
+CONTRACT_PATH = Path(__file__).with_name("quality_contract.json")
+
+
+def load_contract(contract_path: Path | None = None) -> dict[str, Any]:
+ """Return the parsed testing contract."""
+ path = contract_path or CONTRACT_PATH
+ with path.open("r", encoding="utf-8") as handle:
+ return json.load(handle)
diff --git a/testing/quality_coverage.py b/testing/quality_coverage.py
new file mode 100644
index 00000000..f4910258
--- /dev/null
+++ b/testing/quality_coverage.py
@@ -0,0 +1,80 @@
+"""Per-file coverage threshold validation for quality-managed modules."""
+
+from __future__ import annotations
+
+import xml.etree.ElementTree as ET
+from pathlib import Path
+from typing import Any
+
+
+def _load_percentages(xml_path: Path, root: Path) -> dict[str, float]:
+ tree = ET.parse(xml_path)
+ xml_root = tree.getroot()
+ source_roots = [
+ Path(node.text)
+ for node in xml_root.findall("./sources/source")
+ if node.text
+ ]
+ percentages: dict[str, float] = {}
+ for class_node in xml_root.findall(".//class"):
+ filename = class_node.attrib.get("filename")
+ line_rate = class_node.attrib.get("line-rate")
+ if not filename or line_rate is None:
+ continue
+ normalized = filename.replace("\\", "/")
+ if normalized.startswith("/"):
+ key = Path(normalized).relative_to(root).as_posix()
+ else:
+ key = normalized
+ for source_root in source_roots:
+ candidate = source_root / filename
+ if candidate.exists():
+ key = candidate.relative_to(root).as_posix()
+ break
+ percentages[key] = float(line_rate) * 100.0
+ return percentages
+
+
+def run_check(contract: dict[str, Any], root: Path, xml_path: Path) -> list[str]:
+ """Return human-readable issues for tracked files below the coverage floor."""
+ if not xml_path.exists():
+ return [f"coverage xml missing: {xml_path.relative_to(root)}"]
+
+ percentages = _load_percentages(xml_path, root)
+ minimum = float(contract.get("coverage", {}).get("minimum_percent", 95.0))
+ issues: list[str] = []
+
+ for relative_path in contract.get("coverage", {}).get("tracked_files", []):
+ normalized = relative_path.replace("\\", "/")
+ percent = percentages.get(normalized)
+ if percent is None:
+ issues.append(f"coverage missing for tracked file: {relative_path}")
+ continue
+ if percent + 1e-9 < minimum:
+ issues.append(
+ f"coverage below {minimum:.1f}%: {relative_path} ({percent:.1f}%)"
+ )
+
+ return issues
+
+
+def compute_workspace_line_coverage(
+ contract: dict[str, Any],
+ root: Path,
+ xml_path: Path,
+) -> float:
+ """Compute mean line coverage across tracked files present in the XML."""
+
+ if not xml_path.exists():
+ return 0.0
+
+ percentages = _load_percentages(xml_path, root)
+ samples: list[float] = []
+ for relative_path in contract.get("coverage", {}).get("tracked_files", []):
+ normalized = relative_path.replace("\\", "/")
+ percent = percentages.get(normalized)
+ if percent is not None:
+ samples.append(percent)
+ if not samples:
+ return 0.0
+ return round(sum(samples) / len(samples), 3)
diff --git a/testing/quality_docs.py b/testing/quality_docs.py
new file mode 100644
index 00000000..1b57def4
--- /dev/null
+++ b/testing/quality_docs.py
@@ -0,0 +1,59 @@
+"""Documentation-oriented validation for the testing contract."""
+
+from __future__ import annotations
+
+import ast
+from pathlib import Path
+from typing import Any
+
+
+def _module_has_docstring(path: Path) -> bool:
+ source = path.read_text(encoding="utf-8")
+ return ast.get_docstring(ast.parse(source)) is not None
+
+
+def _iter_contract_paths(contract: dict[str, Any]) -> list[str]:
+ paths: list[str] = []
+ for item in contract.get("required_docs", []):
+ paths.append(item["path"])
+ paths.extend(contract.get("managed_modules", []))
+ paths.extend(contract.get("lint_paths", []))
+ for suite in contract.get("pytest_suites", {}).values():
+ paths.extend(suite.get("paths", []))
+ for item in contract.get("manual_scripts", []):
+ paths.append(item["path"])
+ return paths
+
+
+def run_check(contract: dict[str, Any], root: Path) -> list[str]:
+ """Return human-readable issues for contract/documentation violations."""
+ issues: list[str] = []
+
+ for item in contract.get("required_docs", []):
+ path = root / item["path"]
+ if not path.exists():
+ issues.append(f"required doc missing: {item['path']}")
+ continue
+ if path.is_file() and not path.read_text(encoding="utf-8").strip():
+ issues.append(f"required doc empty: {item['path']}")
+ if not item.get("description", "").strip():
+ issues.append(f"required doc missing description: {item['path']}")
+
+ for relative_path in sorted(set(_iter_contract_paths(contract))):
+ if not (root / relative_path).exists():
+ issues.append(f"contract path missing: {relative_path}")
+
+ for suite_name, suite in contract.get("pytest_suites", {}).items():
+ if not suite.get("description", "").strip():
+ issues.append(f"pytest suite missing description: {suite_name}")
+
+ for item in contract.get("manual_scripts", []):
+ if not item.get("description", "").strip():
+ issues.append(f"manual script missing description: {item['path']}")
+
+ for relative_path in contract.get("managed_modules", []):
+ path = root / relative_path
+ if path.exists() and path.suffix == ".py" and not _module_has_docstring(path):
+ issues.append(f"module docstring missing: {relative_path}")
+
+ return issues
diff --git a/testing/quality_gate.py b/testing/quality_gate.py
new file mode 100644
index 00000000..3db27119
--- /dev/null
+++ b/testing/quality_gate.py
@@ -0,0 +1,193 @@
+"""Source-of-truth quality-gate runner for titan-iac."""
+
+from __future__ import annotations
+
+import argparse
+import json
+import subprocess
+import sys
+import time
+from pathlib import Path
+from typing import Any
+
+from testing.quality_contract import load_contract
+from testing.quality_coverage import (
+ compute_workspace_line_coverage,
+ run_check as run_coverage_check,
+)
+from testing.quality_docs import run_check as run_docs_check
+from testing.quality_hygiene import (
+ count_files_over_line_limit,
+ run_check as run_hygiene_check,
+)
+
+
+RUFF_SELECT = ["F", "B", "SIM", "C4", "UP"]
+RUFF_IGNORE = ["B017", "UP015", "UP035"]
+
+
+def _status_from_issues(issues: list[str]) -> str:
+ return "ok" if not issues else "failed"
+
+
+def _result(name: str, description: str, status: str, **extra: Any) -> dict[str, Any]:
+ return {"name": name, "description": description, "status": status, **extra}
+
+
+def _run_ruff(contract: dict[str, Any], root: Path) -> dict[str, Any]:
+ command = [
+ sys.executable,
+ "-m",
+ "ruff",
+ "check",
+ "--select",
+ ",".join(RUFF_SELECT),
+ "--ignore",
+ ",".join(RUFF_IGNORE),
+ *contract.get("lint_paths", []),
+ ]
+ started_at = time.monotonic()
+ completed = subprocess.run(command, cwd=root, check=False)
+ return _result(
+ "smell",
+ "Code-smell lint for managed Python automation.",
+ "ok" if completed.returncode == 0 else "failed",
+ returncode=completed.returncode,
+ command=command,
+ duration_seconds=round(time.monotonic() - started_at, 3),
+ )
+
+
+def _run_pytest_suite(root: Path, suite_name: str, suite: dict[str, Any]) -> dict[str, Any]:
+ junit_path = root / suite["junit"]
+ junit_path.parent.mkdir(parents=True, exist_ok=True)
+ command = [
+ sys.executable,
+ "-m",
+ "pytest",
+ "-q",
+ *suite.get("paths", []),
+ f"--junitxml={junit_path}",
+ ]
+ coverage_xml = suite.get("coverage_xml")
+ if coverage_xml:
+ for source in suite.get("coverage_sources", []):
+ command.append(f"--cov={source}")
+ command.extend(
+ [
+ "--cov-branch",
+ f"--cov-report=xml:{root / coverage_xml}",
+ ]
+ )
+ started_at = time.monotonic()
+ completed = subprocess.run(command, cwd=root, check=False)
+ return _result(
+ suite_name,
+ suite["description"],
+ "ok" if completed.returncode == 0 else "failed",
+ returncode=completed.returncode,
+ command=command,
+ junit=str(junit_path.relative_to(root)),
+ coverage_xml=coverage_xml,
+ duration_seconds=round(time.monotonic() - started_at, 3),
+ )
+
+
+def run_profile(
+ contract: dict[str, Any],
+ root: Path,
+ profile_name: str,
+ build_dir: Path,
+) -> dict[str, Any]:
+ """Execute the configured profile and return a JSON-serializable summary."""
+ build_dir.mkdir(parents=True, exist_ok=True)
+ results: list[dict[str, Any]] = []
+ profiles = contract.get("profiles", {})
+ if profile_name not in profiles:
+ raise SystemExit(f"unknown profile: {profile_name}")
+
+ for check_name in profiles[profile_name]:
+ if check_name == "docs":
+ issues = run_docs_check(contract, root)
+ results.append(
+ _result(
+ "docs",
+ "Required docs, contract descriptions, and module docstrings.",
+ _status_from_issues(issues),
+ issues=issues,
+ )
+ )
+ continue
+ if check_name == "smell":
+ results.append(_run_ruff(contract, root))
+ continue
+ if check_name == "hygiene":
+ issues = run_hygiene_check(contract, root)
+ results.append(
+ _result(
+ "hygiene",
+ "500 LOC hygiene and naming rules for managed test automation.",
+ _status_from_issues(issues),
+ issues=issues,
+ )
+ )
+ continue
+ if check_name == "coverage":
+ unit_suite = contract.get("pytest_suites", {}).get("unit", {})
+ coverage_xml = root / unit_suite.get("coverage_xml", "build/coverage-unit.xml")
+ issues = run_coverage_check(contract, root, coverage_xml)
+ results.append(
+ _result(
+ "coverage",
+ "Per-file 95% coverage floor for tracked quality-managed modules.",
+ _status_from_issues(issues),
+ issues=issues,
+ coverage_xml=str(coverage_xml.relative_to(root)),
+ )
+ )
+ continue
+ suite = contract.get("pytest_suites", {}).get(check_name)
+ if suite is None:
+ raise SystemExit(f"profile {profile_name} references unknown check: {check_name}")
+ results.append(_run_pytest_suite(root, check_name, suite))
+
+ status = "ok" if all(item["status"] == "ok" for item in results) else "failed"
+ workspace_line_coverage_percent = 0.0
+ if "coverage" in profiles[profile_name]:
+ unit_suite = contract.get("pytest_suites", {}).get("unit", {})
+ coverage_xml_rel = unit_suite.get("coverage_xml")
+ if coverage_xml_rel:
+ workspace_line_coverage_percent = compute_workspace_line_coverage(
+ contract,
+ root,
+ root / coverage_xml_rel,
+ )
+ return {
+ "profile": profile_name,
+ "status": status,
+ "results": results,
+ "manual_scripts": contract.get("manual_scripts", []),
+ "workspace_line_coverage_percent": workspace_line_coverage_percent,
+ "source_lines_over_500": count_files_over_line_limit(contract, root),
+ }
+
+
+def main(argv: list[str] | None = None) -> int:
+ """CLI entrypoint for the quality gate."""
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument("--profile", default="local")
+ parser.add_argument("--build-dir", default="build")
+ args = parser.parse_args(argv)
+
+ root = Path.cwd()
+ build_dir = root / args.build_dir
+ build_dir.mkdir(parents=True, exist_ok=True)
+ contract = load_contract()
+ summary = run_profile(contract, root, args.profile, build_dir)
+ summary_path = build_dir / "quality-gate-summary.json"
+ summary_path.write_text(json.dumps(summary, indent=2, sort_keys=True) + "\n", encoding="utf-8")
+ return 0 if summary["status"] == "ok" else 1
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/testing/quality_hygiene.py b/testing/quality_hygiene.py
new file mode 100644
index 00000000..e4a8605d
--- /dev/null
+++ b/testing/quality_hygiene.py
@@ -0,0 +1,50 @@
+"""File-size and naming validation for the managed testing surface."""
+
+from __future__ import annotations
+
+import re
+from collections.abc import Iterable
+from pathlib import Path
+from typing import Any
+
+
+def _expand_globs(root: Path, patterns: Iterable[str]) -> list[Path]:
+ matched: set[Path] = set()
+ for pattern in patterns:
+ matched.update(path for path in root.glob(pattern) if path.is_file())
+ return sorted(matched)
+
+
+def run_check(contract: dict[str, Any], root: Path) -> list[str]:
+ """Return human-readable issues for naming and file-size rules."""
+ config = contract.get("hygiene", {})
+ max_lines = int(config.get("max_lines", 500))
+ issues: list[str] = []
+
+ for path in _expand_globs(root, config.get("line_limit_globs", [])):
+ line_count = sum(1 for _ in path.open("r", encoding="utf-8"))
+ if line_count > max_lines:
+ issues.append(f"file exceeds {max_lines} LOC: {path.relative_to(root)} ({line_count})")
+
+ for rule in config.get("naming_rules", []):
+ pattern = re.compile(rule["pattern"])
+ for path in _expand_globs(root, [rule["glob"]]):
+ if not pattern.match(path.name):
+ issues.append(
+ f"naming rule failed ({rule['description']}): {path.relative_to(root)}"
+ )
+
+ return issues
+
+
+def count_files_over_line_limit(contract: dict[str, Any], root: Path) -> int:
+ """Return the number of managed files that exceed the configured LOC cap."""
+
+ config = contract.get("hygiene", {})
+ max_lines = int(config.get("max_lines", 500))
+ count = 0
+ for path in _expand_globs(root, config.get("line_limit_globs", [])):
+ line_count = sum(1 for _ in path.open("r", encoding="utf-8"))
+ if line_count > max_lines:
+ count += 1
+ return count
diff --git a/testing/tests/test_publish_test_metrics.py b/testing/tests/test_publish_test_metrics.py
new file mode 100644
index 00000000..b590860a
--- /dev/null
+++ b/testing/tests/test_publish_test_metrics.py
@@ -0,0 +1,278 @@
+from __future__ import annotations
+
+import json
+from pathlib import Path
+
+from ci.scripts import publish_test_metrics
+
+
+def test_parse_junit_supports_testsuite_and_missing_file(tmp_path: Path):
+ junit_path = tmp_path / "suite.xml"
+ junit_path.write_text(
+ '',
+ encoding="utf-8",
+ )
+
+ assert publish_test_metrics._parse_junit(str(junit_path)) == {
+ "tests": 3,
+ "failures": 1,
+ "errors": 0,
+ "skipped": 1,
+ }
+ assert publish_test_metrics._parse_junit(str(tmp_path / "missing.xml")) == {
+ "tests": 0,
+ "failures": 0,
+ "errors": 0,
+ "skipped": 0,
+ }
+
+
+def test_collect_junit_totals_sums_multiple_files(tmp_path: Path):
+ first = tmp_path / "junit-a.xml"
+ second = tmp_path / "junit-b.xml"
+ first.write_text('', encoding="utf-8")
+ second.write_text('', encoding="utf-8")
+
+ totals = publish_test_metrics._collect_junit_totals(str(tmp_path / "junit-*.xml"))
+
+ assert totals == {"tests": 5, "failures": 1, "errors": 1, "skipped": 1}
+
+
+def test_parse_junit_handles_testsuites_and_invalid_counts(tmp_path: Path):
+ junit_path = tmp_path / "suite.xml"
+ junit_path.write_text(
+ (
+ ""
+ ''
+ ''
+ ""
+ ),
+ encoding="utf-8",
+ )
+
+ assert publish_test_metrics._parse_junit(str(junit_path)) == {
+ "tests": 2,
+ "failures": 1,
+ "errors": 0,
+ "skipped": 0,
+ }
+
+
+def test_read_exit_code_and_summary_fallbacks(tmp_path: Path):
+ rc_path = tmp_path / "rc.txt"
+ rc_path.write_text("0\n", encoding="utf-8")
+ summary_path = tmp_path / "summary.json"
+ summary_path.write_text("{bad json", encoding="utf-8")
+
+ assert publish_test_metrics._read_exit_code(str(rc_path)) == 0
+ assert publish_test_metrics._read_exit_code(str(tmp_path / "missing.rc")) == 1
+ assert publish_test_metrics._load_summary(str(summary_path)) == {}
+ assert publish_test_metrics._load_summary(str(tmp_path / "missing.json")) == {}
+
+
+def test_read_text_post_text_and_fetch_existing_counter(monkeypatch):
+ class _FakeResponse:
+ def __init__(self, payload: str, status: int = 200):
+ self.payload = payload
+ self.status = status
+
+ def read(self):
+ return self.payload.encode("utf-8")
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc, tb):
+ return None
+
+ responses = iter(
+ [
+ _FakeResponse("alpha"),
+ _FakeResponse("", status=202),
+ _FakeResponse(
+ "\n".join(
+ [
+ 'platform_quality_gate_runs_total{job="platform-quality-ci",suite="titan-iac",status="ok"} 7',
+ 'platform_quality_gate_runs_total{job="other",suite="titan-iac",status="ok"} 1',
+ ]
+ )
+ ),
+ ]
+ )
+ monkeypatch.setattr(
+ publish_test_metrics.urllib.request,
+ "urlopen",
+ lambda *args, **kwargs: next(responses),
+ )
+
+ assert publish_test_metrics._read_text("http://example.invalid") == "alpha"
+ publish_test_metrics._post_text("http://example.invalid", "payload")
+ assert (
+ publish_test_metrics._fetch_existing_counter(
+ "http://push.invalid",
+ "platform_quality_gate_runs_total",
+ {"job": "platform-quality-ci", "suite": "titan-iac", "status": "ok"},
+ )
+ == 7.0
+ )
+
+
+def test_post_text_raises_and_counter_handles_bad_metric_lines(monkeypatch):
+ class _FakeResponse:
+ def __init__(self, payload: str, status: int = 200):
+ self.payload = payload
+ self.status = status
+
+ def read(self):
+ return self.payload.encode("utf-8")
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc, tb):
+ return None
+
+ monkeypatch.setattr(
+ publish_test_metrics.urllib.request,
+ "urlopen",
+ lambda *args, **kwargs: _FakeResponse("", status=500),
+ )
+ try:
+ publish_test_metrics._post_text("http://example.invalid", "payload")
+ except RuntimeError as exc:
+ assert "push failed" in str(exc)
+ else:
+ raise AssertionError("expected RuntimeError for failing push")
+
+ monkeypatch.setattr(
+ publish_test_metrics,
+ "_read_text",
+ lambda url: "\n".join(
+ [
+ 'platform_quality_gate_runs_total{job="platform-quality-ci",suite="titan-iac",status="ok"}',
+ 'platform_quality_gate_runs_total{job="platform-quality-ci",suite="titan-iac",status="ok"} nope',
+ ]
+ ),
+ )
+ assert (
+ publish_test_metrics._fetch_existing_counter(
+ "http://push.invalid",
+ "platform_quality_gate_runs_total",
+ {"job": "platform-quality-ci", "suite": "titan-iac", "status": "ok"},
+ )
+ == 0.0
+ )
+
+
+def test_build_payload_includes_summary_metrics():
+ payload = publish_test_metrics._build_payload(
+ suite="titan-iac",
+ status="ok",
+ tests={"tests": 4, "failures": 1, "errors": 0, "skipped": 1},
+ ok_count=7,
+ failed_count=2,
+ branch="main",
+ build_number="42",
+ summary={
+ "results": [
+ {"name": "docs", "status": "ok"},
+ {"name": "unit", "status": "failed"},
+ ]
+ },
+ workspace_line_coverage_percent=97.125,
+ source_lines_over_500=3,
+ )
+
+ assert 'platform_quality_gate_runs_total{suite="titan-iac",status="ok"} 7' in payload
+ assert 'titan_iac_quality_gate_checks_total{suite="titan-iac",check="docs",result="ok"} 1' in payload
+ assert 'titan_iac_quality_gate_checks_total{suite="titan-iac",check="unit",result="failed"} 1' in payload
+ assert 'platform_quality_gate_workspace_line_coverage_percent{suite="titan-iac"} 97.125' in payload
+ assert 'platform_quality_gate_source_lines_over_500_total{suite="titan-iac"} 3' in payload
+
+
+def test_build_payload_skips_incomplete_results():
+ payload = publish_test_metrics._build_payload(
+ suite="titan-iac",
+ status="failed",
+ tests={"tests": 0, "failures": 0, "errors": 0, "skipped": 0},
+ ok_count=1,
+ failed_count=2,
+ branch="",
+ build_number="",
+ summary={"results": [{"name": "docs"}, {"status": "ok"}]},
+ )
+
+ assert "titan_iac_quality_gate_checks_total" in payload
+ assert 'check="docs"' not in payload
+
+
+def test_main_uses_quality_gate_summary_and_junit_glob(tmp_path: Path, monkeypatch):
+ build_dir = tmp_path / "build"
+ build_dir.mkdir()
+ (build_dir / "junit-unit.xml").write_text(
+ '',
+ encoding="utf-8",
+ )
+ (build_dir / "junit-glue.xml").write_text(
+ '',
+ encoding="utf-8",
+ )
+ (build_dir / "quality-gate.rc").write_text("1\n", encoding="utf-8")
+ (build_dir / "quality-gate-summary.json").write_text(
+ json.dumps(
+ {
+ "results": [{"name": "docs", "status": "ok"}, {"name": "glue", "status": "failed"}],
+ "workspace_line_coverage_percent": 96.4321,
+ "source_lines_over_500": 2,
+ }
+ ),
+ encoding="utf-8",
+ )
+
+ posted = {}
+
+ monkeypatch.setenv("SUITE_NAME", "titan-iac")
+ monkeypatch.setenv("PUSHGATEWAY_URL", "http://pushgateway.invalid")
+ monkeypatch.setenv("QUALITY_GATE_JOB_NAME", "platform-quality-ci")
+ monkeypatch.setenv("JUNIT_GLOB", str(build_dir / "junit-*.xml"))
+ monkeypatch.setenv("QUALITY_GATE_EXIT_CODE_PATH", str(build_dir / "quality-gate.rc"))
+ monkeypatch.setenv("QUALITY_GATE_SUMMARY_PATH", str(build_dir / "quality-gate-summary.json"))
+ monkeypatch.setenv("BRANCH_NAME", "main")
+ monkeypatch.setenv("BUILD_NUMBER", "88")
+
+ monkeypatch.setattr(publish_test_metrics, "_fetch_existing_counter", lambda *args, **kwargs: 5)
+ monkeypatch.setattr(publish_test_metrics, "_post_text", lambda url, payload: posted.update({"url": url, "payload": payload}))
+
+ rc = publish_test_metrics.main()
+
+ assert rc == 0
+ assert posted["url"].endswith("/metrics/job/platform-quality-ci/suite/titan-iac")
+ assert 'titan_iac_quality_gate_tests_total{suite="titan-iac",result="failed"} 1' in posted["payload"]
+ assert 'titan_iac_quality_gate_checks_total{suite="titan-iac",check="glue",result="failed"} 1' in posted["payload"]
+ assert 'platform_quality_gate_workspace_line_coverage_percent{suite="titan-iac"} 96.432' in posted["payload"]
+ assert 'platform_quality_gate_source_lines_over_500_total{suite="titan-iac"} 2' in posted["payload"]
+
+
+def test_main_marks_successful_run(tmp_path: Path, monkeypatch, capsys):
+ build_dir = tmp_path / "build"
+ build_dir.mkdir()
+ (build_dir / "junit.xml").write_text(
+ '',
+ encoding="utf-8",
+ )
+ (build_dir / "quality-gate.rc").write_text("0\n", encoding="utf-8")
+
+ monkeypatch.setenv("JUNIT_GLOB", str(build_dir / "*.xml"))
+ monkeypatch.setenv("QUALITY_GATE_EXIT_CODE_PATH", str(build_dir / "quality-gate.rc"))
+ monkeypatch.setenv("QUALITY_GATE_SUMMARY_PATH", str(build_dir / "missing-summary.json"))
+ monkeypatch.setattr(publish_test_metrics, "_fetch_existing_counter", lambda *args, **kwargs: 0)
+ monkeypatch.setattr(publish_test_metrics, "_post_text", lambda *args, **kwargs: None)
+
+ rc = publish_test_metrics.main()
+
+ summary = json.loads(capsys.readouterr().out)
+ assert rc == 0
+ assert summary["status"] == "ok"
+ assert summary["checks_recorded"] == 0
+ assert summary["workspace_line_coverage_percent"] == 0.0
+ assert summary["source_lines_over_500"] == 0
diff --git a/testing/tests/test_quality_contract.py b/testing/tests/test_quality_contract.py
new file mode 100644
index 00000000..e5f97d00
--- /dev/null
+++ b/testing/tests/test_quality_contract.py
@@ -0,0 +1,179 @@
+from __future__ import annotations
+
+from pathlib import Path
+import textwrap
+
+from testing.quality_contract import load_contract
+from testing.quality_coverage import run_check as run_coverage_check
+from testing.quality_docs import run_check as run_docs_check
+from testing.quality_hygiene import run_check as run_hygiene_check
+
+
+def test_bundled_contract_exposes_local_and_jenkins_profiles():
+ contract = load_contract()
+ assert "local" in contract["profiles"]
+ assert "jenkins" in contract["profiles"]
+ assert contract["pytest_suites"]["unit"]["paths"]
+
+
+def test_bundled_contract_keeps_monorepo_manifest_trees_out_of_hygiene_scope():
+ contract = load_contract()
+ required_doc_paths = {item["path"] for item in contract.get("required_docs", [])}
+ assert "AGENTS.md" not in required_doc_paths
+
+ globs = contract.get("hygiene", {}).get("line_limit_globs", [])
+ assert globs
+ for entry in globs:
+ assert entry.startswith(("testing/", "ci/", "scripts/tests/", "services/"))
+ assert "/scripts/" in entry or not entry.startswith("services/")
+
+
+def test_docs_check_reports_missing_docstring_and_missing_path(tmp_path: Path):
+ module_path = tmp_path / "managed.py"
+ module_path.write_text("value = 1\n", encoding="utf-8")
+ (tmp_path / "README.md").write_text("repo docs\n", encoding="utf-8")
+
+ contract = {
+ "required_docs": [{"path": "README.md", "description": "Docs"}],
+ "managed_modules": ["managed.py"],
+ "lint_paths": ["missing-dir"],
+ "pytest_suites": {"unit": {"description": "Unit", "paths": ["missing-tests"]}},
+ "manual_scripts": [{"path": "missing-script.py", "description": "Manual"}],
+ }
+
+ issues = run_docs_check(contract, tmp_path)
+
+ assert "module docstring missing: managed.py" in issues
+ assert "contract path missing: missing-dir" in issues
+ assert "contract path missing: missing-tests" in issues
+ assert "contract path missing: missing-script.py" in issues
+
+
+def test_docs_check_reports_missing_required_doc_metadata(tmp_path: Path):
+ (tmp_path / "README.md").write_text("", encoding="utf-8")
+
+ contract = {
+ "required_docs": [{"path": "README.md", "description": ""}, {"path": "missing.md", "description": "Missing"}],
+ "managed_modules": [],
+ "lint_paths": [],
+ "pytest_suites": {"unit": {"description": "", "paths": []}},
+ "manual_scripts": [{"path": "manual.py", "description": ""}],
+ }
+
+ issues = run_docs_check(contract, tmp_path)
+
+ assert "required doc empty: README.md" in issues
+ assert "required doc missing description: README.md" in issues
+ assert "required doc missing: missing.md" in issues
+ assert "pytest suite missing description: unit" in issues
+ assert "manual script missing description: manual.py" in issues
+
+
+def test_hygiene_check_enforces_line_limit_and_name_rules(tmp_path: Path):
+ tests_dir = tmp_path / "tests"
+ tests_dir.mkdir()
+ bad_name = tests_dir / "bad-name.py"
+ bad_name.write_text("x = 1\n", encoding="utf-8")
+ long_file = tests_dir / "test_too_long.py"
+ long_file.write_text("line\n" * 4, encoding="utf-8")
+
+ contract = {
+ "hygiene": {
+ "max_lines": 3,
+ "line_limit_globs": ["tests/*.py"],
+ "naming_rules": [
+ {
+ "glob": "tests/*.py",
+ "pattern": r"^test_[a-z0-9_]+\.py$",
+ "description": "pytest files use test_*.py names.",
+ }
+ ],
+ }
+ }
+
+ issues = run_hygiene_check(contract, tmp_path)
+
+ assert any("file exceeds 3 LOC" in issue for issue in issues)
+ assert any("naming rule failed" in issue and "bad-name.py" in issue for issue in issues)
+
+
+def test_coverage_check_enforces_per_file_floor(tmp_path: Path):
+ build_dir = tmp_path / "build"
+ build_dir.mkdir()
+ coverage_xml = build_dir / "coverage.xml"
+ coverage_xml.write_text(
+ textwrap.dedent(
+ """\
+
+
+
+
+
+
+
+
+
+
+ """
+ ),
+ encoding="utf-8",
+ )
+
+ contract = {
+ "coverage": {
+ "minimum_percent": 95.0,
+ "tracked_files": ["ok.py", "low.py", "missing.py"],
+ }
+ }
+
+ issues = run_coverage_check(contract, tmp_path, coverage_xml)
+
+ assert "coverage below 95.0%: low.py (90.0%)" in issues
+ assert "coverage missing for tracked file: missing.py" in issues
+
+
+def test_coverage_check_handles_missing_xml_and_source_root_mapping(tmp_path: Path):
+ missing_xml = tmp_path / "missing.xml"
+ assert run_coverage_check({"coverage": {"tracked_files": []}}, tmp_path, missing_xml) == [
+ "coverage xml missing: missing.xml"
+ ]
+
+ source_dir = tmp_path / "pkg"
+ source_dir.mkdir()
+ (source_dir / "mapped.py").write_text("value = 1\n", encoding="utf-8")
+ coverage_xml = tmp_path / "coverage.xml"
+ coverage_xml.write_text(
+ textwrap.dedent(
+ f"""\
+
+
+ {source_dir}
+
+
+
+
+
+
+
+
+
+
+
+ """
+ ),
+ encoding="utf-8",
+ )
+ (tmp_path / "absolute.py").write_text("value = 2\n", encoding="utf-8")
+
+ issues = run_coverage_check(
+ {
+ "coverage": {
+ "minimum_percent": 95.0,
+ "tracked_files": ["pkg/mapped.py", "absolute.py"],
+ }
+ },
+ tmp_path,
+ coverage_xml,
+ )
+
+ assert issues == []
diff --git a/testing/tests/test_quality_gate.py b/testing/tests/test_quality_gate.py
new file mode 100644
index 00000000..8642da0d
--- /dev/null
+++ b/testing/tests/test_quality_gate.py
@@ -0,0 +1,70 @@
+from __future__ import annotations
+
+from pathlib import Path
+
+from testing import quality_gate
+
+
+def test_run_profile_aggregates_internal_and_pytest_results(tmp_path: Path, monkeypatch):
+ build_dir = tmp_path / "build"
+ unit_test = tmp_path / "test_sample.py"
+ unit_test.write_text("def test_ok():\n assert True\n", encoding="utf-8")
+
+ contract = {
+ "profiles": {"local": ["docs", "smell", "hygiene", "unit", "coverage"]},
+ "pytest_suites": {
+ "unit": {
+ "description": "Unit suite",
+ "paths": [str(unit_test.relative_to(tmp_path))],
+ "junit": "build/junit-unit.xml",
+ "coverage_xml": "build/coverage-unit.xml",
+ "coverage_sources": [],
+ }
+ },
+ "manual_scripts": [{"path": "manual.py", "description": "Manual"}],
+ }
+
+ monkeypatch.setattr(quality_gate, "run_docs_check", lambda *_: [])
+ monkeypatch.setattr(quality_gate, "run_hygiene_check", lambda *_: [])
+ monkeypatch.setattr(quality_gate, "run_coverage_check", lambda *_: [])
+
+ calls = []
+
+ def fake_run(command, cwd, check):
+ calls.append((command, cwd, check))
+ if "--junitxml=" in " ".join(command):
+ (build_dir / "junit-unit.xml").write_text(
+ '',
+ encoding="utf-8",
+ )
+ (build_dir / "coverage-unit.xml").write_text("", encoding="utf-8")
+ return type("Completed", (), {"returncode": 0})()
+
+ monkeypatch.setattr(quality_gate.subprocess, "run", fake_run)
+
+ summary = quality_gate.run_profile(contract, tmp_path, "local", build_dir)
+
+ assert summary["status"] == "ok"
+ assert [result["name"] for result in summary["results"]] == [
+ "docs",
+ "smell",
+ "hygiene",
+ "unit",
+ "coverage",
+ ]
+ assert summary["workspace_line_coverage_percent"] == 0.0
+ assert summary["source_lines_over_500"] == 0
+ assert calls[0][0][:3] == [quality_gate.sys.executable, "-m", "ruff"]
+ assert any(result.get("junit") == "build/junit-unit.xml" for result in summary["results"])
+
+
+def test_main_writes_summary_file(tmp_path: Path, monkeypatch):
+ summary = {"status": "ok", "profile": "local", "results": [], "manual_scripts": []}
+ monkeypatch.chdir(tmp_path)
+ monkeypatch.setattr(quality_gate, "load_contract", lambda: {"profiles": {"local": []}, "pytest_suites": {}})
+ monkeypatch.setattr(quality_gate, "run_profile", lambda *args, **kwargs: summary)
+
+ rc = quality_gate.main(["--profile", "local", "--build-dir", "build"])
+
+ assert rc == 0
+ assert (tmp_path / "build" / "quality-gate-summary.json").exists()