ci: publish accurate metis test counts and run status

This commit is contained in:
Brad Stein 2026-04-10 13:47:32 -03:00
parent 8232a5109e
commit feeeeeda3a
2 changed files with 41 additions and 3 deletions

17
Jenkinsfile vendored
View File

@ -87,6 +87,7 @@ spec:
SEMVER = 'dev' SEMVER = 'dev'
COVERAGE_JSON = 'build/coverage.json' COVERAGE_JSON = 'build/coverage.json'
JUNIT_XML = 'build/junit.xml' JUNIT_XML = 'build/junit.xml'
TEST_EXIT_CODE_PATH = 'build/test.exitcode'
SUITE_NAME = 'metis' SUITE_NAME = 'metis'
PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091' PUSHGATEWAY_URL = 'http://platform-quality-gateway.monitoring.svc.cluster.local:9091'
} }
@ -113,9 +114,10 @@ spec:
mkdir -p build mkdir -p build
go install github.com/jstemmer/go-junit-report/v2@latest go install github.com/jstemmer/go-junit-report/v2@latest
set +e set +e
go test -coverprofile=build/coverage.out ./... > build/test.out 2>&1 go test -v -coverprofile=build/coverage.out ./... > build/test.out 2>&1
test_rc=$? test_rc=$?
set -e set -e
printf '%s\n' "${test_rc}" > "${TEST_EXIT_CODE_PATH}"
cat build/test.out cat build/test.out
"$(go env GOPATH)/bin/go-junit-report" < build/test.out > "${JUNIT_XML}" "$(go env GOPATH)/bin/go-junit-report" < build/test.out > "${JUNIT_XML}"
coverage="0" coverage="0"
@ -124,7 +126,6 @@ spec:
fi fi
export GO_COVERAGE="${coverage}" export GO_COVERAGE="${coverage}"
printf '{"summary":{"percent_covered":%s}}\n' "${GO_COVERAGE}" > "${COVERAGE_JSON}" printf '{"summary":{"percent_covered":%s}}\n' "${GO_COVERAGE}" > "${COVERAGE_JSON}"
exit ${test_rc}
''' '''
} }
} }
@ -141,6 +142,18 @@ spec:
} }
} }
stage('Enforce test result') {
steps {
container('tester') {
sh '''
set -eu
test_rc="$(cat "${TEST_EXIT_CODE_PATH}")"
exit "${test_rc}"
'''
}
}
}
stage('Prep toolchain') { stage('Prep toolchain') {
steps { steps {
container('builder') { container('builder') {

View File

@ -57,6 +57,19 @@ def _load_junit(path: str) -> dict[str, int]:
return totals return totals
def _load_exit_code(path: str) -> int | None:
if not path or not os.path.exists(path):
return None
with open(path, "r", encoding="utf-8") as handle:
raw = handle.read().strip()
if not raw:
return None
try:
return int(raw)
except ValueError:
raise RuntimeError(f"invalid test exit code {raw!r} in {path}")
def _read_http(url: str) -> str: def _read_http(url: str) -> str:
try: try:
with urllib.request.urlopen(url, timeout=10) as resp: with urllib.request.urlopen(url, timeout=10) as resp:
@ -100,6 +113,7 @@ def _fetch_existing_counter(pushgateway_url: str, metric: str, labels: dict[str,
def main() -> int: def main() -> int:
coverage_path = os.getenv("COVERAGE_JSON", "build/coverage.json") coverage_path = os.getenv("COVERAGE_JSON", "build/coverage.json")
junit_path = os.getenv("JUNIT_XML", "build/junit.xml") junit_path = os.getenv("JUNIT_XML", "build/junit.xml")
test_exit_code_path = os.getenv("TEST_EXIT_CODE_PATH", "build/test.exitcode")
pushgateway_url = os.getenv( pushgateway_url = os.getenv(
"PUSHGATEWAY_URL", "http://platform-quality-gateway.monitoring.svc.cluster.local:9091" "PUSHGATEWAY_URL", "http://platform-quality-gateway.monitoring.svc.cluster.local:9091"
).strip() ).strip()
@ -115,10 +129,16 @@ def main() -> int:
coverage = _load_coverage(coverage_path) coverage = _load_coverage(coverage_path)
totals = _load_junit(junit_path) totals = _load_junit(junit_path)
test_exit_code = _load_exit_code(test_exit_code_path)
passed = max(totals["tests"] - totals["failures"] - totals["errors"] - totals["skipped"], 0) passed = max(totals["tests"] - totals["failures"] - totals["errors"] - totals["skipped"], 0)
outcome = "ok" outcome = "ok"
if totals["tests"] <= 0 or totals["failures"] > 0 or totals["errors"] > 0: if (
(test_exit_code is not None and test_exit_code != 0)
or totals["tests"] <= 0
or totals["failures"] > 0
or totals["errors"] > 0
):
outcome = "failed" outcome = "failed"
job_name = "platform-quality-ci" job_name = "platform-quality-ci"
@ -148,10 +168,14 @@ def main() -> int:
f'platform_quality_gate_runs_total{{suite="{suite}",status="ok"}} {ok_count:.0f}', f'platform_quality_gate_runs_total{{suite="{suite}",status="ok"}} {ok_count:.0f}',
f'platform_quality_gate_runs_total{{suite="{suite}",status="failed"}} {failed_count:.0f}', f'platform_quality_gate_runs_total{{suite="{suite}",status="failed"}} {failed_count:.0f}',
"# TYPE metis_quality_gate_tests_total gauge", "# TYPE metis_quality_gate_tests_total gauge",
f'metis_quality_gate_tests_total{{suite="{suite}",result="total"}} {totals["tests"]}',
f'metis_quality_gate_tests_total{{suite="{suite}",result="passed"}} {passed}', f'metis_quality_gate_tests_total{{suite="{suite}",result="passed"}} {passed}',
f'metis_quality_gate_tests_total{{suite="{suite}",result="failed"}} {totals["failures"]}', f'metis_quality_gate_tests_total{{suite="{suite}",result="failed"}} {totals["failures"]}',
f'metis_quality_gate_tests_total{{suite="{suite}",result="error"}} {totals["errors"]}', f'metis_quality_gate_tests_total{{suite="{suite}",result="error"}} {totals["errors"]}',
f'metis_quality_gate_tests_total{{suite="{suite}",result="skipped"}} {totals["skipped"]}', f'metis_quality_gate_tests_total{{suite="{suite}",result="skipped"}} {totals["skipped"]}',
"# TYPE metis_quality_gate_run_status gauge",
f'metis_quality_gate_run_status{{suite="{suite}",status="ok"}} {1 if outcome == "ok" else 0}',
f'metis_quality_gate_run_status{{suite="{suite}",status="failed"}} {1 if outcome == "failed" else 0}',
"# TYPE metis_quality_gate_coverage_percent gauge", "# TYPE metis_quality_gate_coverage_percent gauge",
f'metis_quality_gate_coverage_percent{{suite="{suite}"}} {coverage:.3f}', f'metis_quality_gate_coverage_percent{{suite="{suite}"}} {coverage:.3f}',
"# TYPE metis_quality_gate_build_info gauge", "# TYPE metis_quality_gate_build_info gauge",
@ -171,6 +195,7 @@ def main() -> int:
"tests_errors": totals["errors"], "tests_errors": totals["errors"],
"tests_skipped": totals["skipped"], "tests_skipped": totals["skipped"],
"coverage_percent": round(coverage, 3), "coverage_percent": round(coverage, 3),
"test_exit_code": test_exit_code,
"ok_counter": ok_count, "ok_counter": ok_count,
"failed_counter": failed_count, "failed_counter": failed_count,
}, },