ci: fix data-prepper defaults and restore metrics publisher coverage

This commit is contained in:
codex 2026-04-19 21:57:40 -03:00
parent 881c724725
commit ddabda06bf
3 changed files with 60 additions and 1 deletions

View File

@ -422,5 +422,5 @@ def main() -> int:
return 0
if __name__ == "__main__":
if __name__ == "__main__": # pragma: no cover
raise SystemExit(main())

View File

@ -196,6 +196,8 @@ EOF
withCredentials([usernamePassword(credentialsId: 'harbor-robot', usernameVariable: 'HARBOR_USERNAME', passwordVariable: 'HARBOR_PASSWORD')]) {
sh '''
set -euo pipefail
IMAGE_TAG="${IMAGE_TAG:-2.8.0}"
PUSH_LATEST="${PUSH_LATEST:-true}"
if [ -z "${HARBOR_REPO:-}" ] || [ "${HARBOR_REPO}" = "registry.bstein.dev/monitoring/data-prepper" ]; then
HARBOR_REPO="registry.bstein.dev/streaming/data-prepper"
fi

View File

@ -145,6 +145,55 @@ def test_build_check_statuses_handles_missing_reports():
assert check_statuses["supply_chain"] == "not_applicable"
def test_status_normalization_and_optional_reports(tmp_path: Path):
report_path = tmp_path / "report.json"
report_path.write_text("{bad json", encoding="utf-8")
assert publish_test_metrics._normalize_result_status(None, default="failed") == "failed"
assert publish_test_metrics._normalize_result_status("n/a") == "not_applicable"
assert publish_test_metrics._normalize_result_status("unexpected", default="ok") == "ok"
assert publish_test_metrics._load_optional_json(None) == {}
assert publish_test_metrics._load_optional_json(str(tmp_path / "missing.json")) == {}
assert publish_test_metrics._load_optional_json(str(report_path)) == {}
assert publish_test_metrics._combine_statuses([]) == "not_applicable"
assert publish_test_metrics._combine_statuses(["not_applicable", "not_applicable"]) == "not_applicable"
assert publish_test_metrics._combine_statuses(["unknown"]) == "failed"
assert publish_test_metrics._infer_supply_chain_status({"compliant": False}, required=True) == "failed"
assert publish_test_metrics._infer_supply_chain_status({"status": None}, required=False) == "not_applicable"
assert publish_test_metrics._infer_supply_chain_status({"status": "not_applicable"}, required=True) == "failed"
def test_build_check_statuses_handles_non_dict_results_and_fallbacks():
check_statuses = publish_test_metrics._build_check_statuses(
summary={
"results": [
None,
{"name": "", "status": "ok"},
{"name": "unit", "status": "warning"},
{"name": "hygiene", "status": "ok"},
{"name": "gate", "status": "ok"},
]
},
tests={"tests": 3, "failures": 1, "errors": 0, "skipped": 0},
workspace_line_coverage_percent=94.0,
source_lines_over_500=0,
sonarqube_report={"status": "ERROR"},
supply_chain_report={"status": "not_applicable"},
supply_chain_required=True,
)
assert check_statuses["tests"] == "failed"
assert check_statuses["coverage"] == "failed"
assert check_statuses["loc"] == "ok"
assert check_statuses["docs_naming"] == "ok"
assert check_statuses["gate_glue"] == "ok"
assert check_statuses["sonarqube"] == "failed"
assert check_statuses["supply_chain"] == "failed"
def test_read_text_post_text_and_fetch_existing_counter(monkeypatch):
class _FakeResponse:
def __init__(self, payload: str, status: int = 200):
@ -257,6 +306,14 @@ def test_post_text_raises_and_counter_handles_bad_metric_lines(monkeypatch):
)
== 9.0
)
assert (
publish_test_metrics._fetch_existing_counter(
"http://push.invalid",
"platform_quality_gate_runs_total",
{"job": "platform-quality-ci", "suite": "missing-suite", "status": "ok"},
)
== 0.0
)
def test_build_payload_includes_canonical_checks():