titan-iac/testing/tests/test_publish_test_metrics.py

365 lines
13 KiB
Python

from __future__ import annotations
import json
from pathlib import Path
from ci.scripts import publish_test_metrics
def test_parse_junit_supports_testsuite_and_missing_file(tmp_path: Path):
junit_path = tmp_path / "suite.xml"
junit_path.write_text(
'<testsuite tests="3" failures="1" errors="0" skipped="1" />',
encoding="utf-8",
)
assert publish_test_metrics._parse_junit(str(junit_path)) == {
"tests": 3,
"failures": 1,
"errors": 0,
"skipped": 1,
}
assert publish_test_metrics._parse_junit(str(tmp_path / "missing.xml")) == {
"tests": 0,
"failures": 0,
"errors": 0,
"skipped": 0,
}
def test_collect_junit_totals_sums_multiple_files(tmp_path: Path):
first = tmp_path / "junit-a.xml"
second = tmp_path / "junit-b.xml"
first.write_text('<testsuite tests="2" failures="1" errors="0" skipped="0" />', encoding="utf-8")
second.write_text('<testsuite tests="3" failures="0" errors="1" skipped="1" />', encoding="utf-8")
totals = publish_test_metrics._collect_junit_totals(str(tmp_path / "junit-*.xml"))
assert totals == {"tests": 5, "failures": 1, "errors": 1, "skipped": 1}
def test_parse_junit_handles_testsuites_and_invalid_counts(tmp_path: Path):
junit_path = tmp_path / "suite.xml"
junit_path.write_text(
(
"<testsuites>"
'<testsuite tests="2" failures="1" errors="0" skipped="0" />'
'<testsuite tests="bad" failures="0" errors="0" skipped="0" />'
"</testsuites>"
),
encoding="utf-8",
)
assert publish_test_metrics._parse_junit(str(junit_path)) == {
"tests": 2,
"failures": 1,
"errors": 0,
"skipped": 0,
}
def test_parse_junit_handles_unknown_root(tmp_path: Path):
junit_path = tmp_path / "suite.xml"
junit_path.write_text("<root><item /></root>", encoding="utf-8")
assert publish_test_metrics._parse_junit(str(junit_path)) == {
"tests": 0,
"failures": 0,
"errors": 0,
"skipped": 0,
}
def test_read_exit_code_and_summary_fallbacks(tmp_path: Path):
rc_path = tmp_path / "rc.txt"
rc_path.write_text("0\n", encoding="utf-8")
summary_path = tmp_path / "summary.json"
summary_path.write_text("{bad json", encoding="utf-8")
assert publish_test_metrics._read_exit_code(str(rc_path)) == 0
assert publish_test_metrics._read_exit_code(str(tmp_path / "missing.rc")) == 1
assert publish_test_metrics._load_summary(str(summary_path)) == {}
assert publish_test_metrics._load_summary(str(tmp_path / "missing.json")) == {}
def test_summary_extractors_handle_invalid_shapes_and_values():
assert publish_test_metrics._summary_float({}, "workspace_line_coverage_percent") == 0.0
assert publish_test_metrics._summary_float({"workspace_line_coverage_percent": "bad"}, "workspace_line_coverage_percent") == 0.0
assert publish_test_metrics._summary_float({"workspace_line_coverage_percent": 95}, "workspace_line_coverage_percent") == 95.0
assert publish_test_metrics._summary_float({"workspace_line_coverage_percent": 97.5}, "workspace_line_coverage_percent") == 97.5
assert publish_test_metrics._summary_int({}, "source_lines_over_500") == 0
assert publish_test_metrics._summary_int({"source_lines_over_500": "bad"}, "source_lines_over_500") == 0
assert publish_test_metrics._summary_int({"source_lines_over_500": 2}, "source_lines_over_500") == 2
assert publish_test_metrics._summary_int({"source_lines_over_500": 2.9}, "source_lines_over_500") == 2
def test_build_check_statuses_maps_legacy_names_and_reports():
check_statuses = publish_test_metrics._build_check_statuses(
summary={
"results": [
{"name": "unit", "status": "ok"},
{"name": "coverage", "status": "failed"},
{"name": "hygiene", "status": "ok"},
{"name": "smell", "status": "ok"},
{"name": "docs", "status": "ok"},
{"name": "glue", "status": "failed"},
]
},
tests={"tests": 4, "failures": 0, "errors": 0, "skipped": 0},
workspace_line_coverage_percent=97.0,
source_lines_over_500=0,
sonarqube_report={"projectStatus": {"status": "OK"}},
supply_chain_report={"status": "compliant"},
supply_chain_required=True,
)
assert check_statuses == {
"tests": "ok",
"coverage": "failed",
"loc": "ok",
"docs_naming": "ok",
"gate_glue": "failed",
"sonarqube": "ok",
"supply_chain": "ok",
}
def test_build_check_statuses_handles_missing_reports():
check_statuses = publish_test_metrics._build_check_statuses(
summary={"results": []},
tests={"tests": 0, "failures": 0, "errors": 0, "skipped": 0},
workspace_line_coverage_percent=0.0,
source_lines_over_500=2,
sonarqube_report={},
supply_chain_report={},
supply_chain_required=False,
)
assert check_statuses["tests"] == "not_applicable"
assert check_statuses["coverage"] == "not_applicable"
assert check_statuses["loc"] == "failed"
assert check_statuses["docs_naming"] == "not_applicable"
assert check_statuses["gate_glue"] == "not_applicable"
assert check_statuses["sonarqube"] == "not_applicable"
assert check_statuses["supply_chain"] == "not_applicable"
def test_read_text_post_text_and_fetch_existing_counter(monkeypatch):
class _FakeResponse:
def __init__(self, payload: str, status: int = 200):
self.payload = payload
self.status = status
def read(self):
return self.payload.encode("utf-8")
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return None
responses = iter(
[
_FakeResponse("alpha"),
_FakeResponse("", status=202),
_FakeResponse(
"\n".join(
[
'platform_quality_gate_runs_total{job="platform-quality-ci",suite="titan-iac",status="ok"} 7',
'platform_quality_gate_runs_total{job="other",suite="titan-iac",status="ok"} 1',
]
)
),
]
)
monkeypatch.setattr(
publish_test_metrics.urllib.request,
"urlopen",
lambda *args, **kwargs: next(responses),
)
assert publish_test_metrics._read_text("http://example.invalid") == "alpha"
publish_test_metrics._post_text("http://example.invalid", "payload")
assert (
publish_test_metrics._fetch_existing_counter(
"http://push.invalid",
"platform_quality_gate_runs_total",
{"job": "platform-quality-ci", "suite": "titan-iac", "status": "ok"},
)
== 7.0
)
def test_post_text_raises_and_counter_handles_bad_metric_lines(monkeypatch):
class _FakeResponse:
def __init__(self, payload: str, status: int = 200):
self.payload = payload
self.status = status
def read(self):
return self.payload.encode("utf-8")
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return None
monkeypatch.setattr(
publish_test_metrics.urllib.request,
"urlopen",
lambda *args, **kwargs: _FakeResponse("", status=500),
)
try:
publish_test_metrics._post_text("http://example.invalid", "payload")
except RuntimeError as exc:
assert "push failed" in str(exc)
else:
raise AssertionError("expected RuntimeError for failing push")
monkeypatch.setattr(
publish_test_metrics,
"_read_text",
lambda url: "\n".join(
[
'platform_quality_gate_runs_total{job="platform-quality-ci",suite="titan-iac",status="ok"}',
'platform_quality_gate_runs_total{job="platform-quality-ci",suite="titan-iac",status="ok"} nope',
]
),
)
assert (
publish_test_metrics._fetch_existing_counter(
"http://push.invalid",
"platform_quality_gate_runs_total",
{"job": "platform-quality-ci", "suite": "titan-iac", "status": "ok"},
)
== 0.0
)
monkeypatch.setattr(
publish_test_metrics,
"_read_text",
lambda url: "\n".join(
[
'different_metric{job="platform-quality-ci",suite="titan-iac",status="ok"} 8',
'platform_quality_gate_runs_total{job="platform-quality-ci",suite="other",status="ok"} 8',
'platform_quality_gate_runs_total{job="platform-quality-ci",suite="titan-iac",status="ok"} 9',
]
),
)
assert (
publish_test_metrics._fetch_existing_counter(
"http://push.invalid",
"platform_quality_gate_runs_total",
{"job": "platform-quality-ci", "suite": "titan-iac", "status": "ok"},
)
== 9.0
)
def test_build_payload_includes_canonical_checks():
payload = publish_test_metrics._build_payload(
suite="titan-iac",
status="ok",
tests={"tests": 4, "failures": 1, "errors": 0, "skipped": 1},
ok_count=7,
failed_count=2,
branch="main",
build_number="42",
workspace_line_coverage_percent=95.0,
source_lines_over_500=0,
check_statuses={
"tests": "failed",
"coverage": "ok",
"loc": "ok",
"docs_naming": "ok",
"gate_glue": "ok",
"sonarqube": "failed",
"supply_chain": "failed",
},
)
assert 'platform_quality_gate_runs_total{suite="titan-iac",status="ok"} 7' in payload
assert 'titan_iac_quality_gate_checks_total{suite="titan-iac",check="docs_naming",result="ok"} 1' in payload
assert 'titan_iac_quality_gate_checks_total{suite="titan-iac",check="tests",result="failed"} 1' in payload
def test_build_payload_omits_checks_block_without_check_statuses():
payload = publish_test_metrics._build_payload(
suite="titan-iac",
status="failed",
tests={"tests": 0, "failures": 0, "errors": 0, "skipped": 0},
ok_count=1,
failed_count=2,
branch="",
build_number="",
workspace_line_coverage_percent=0.0,
source_lines_over_500=1,
)
assert "titan_iac_quality_gate_checks_total" not in payload
def test_main_uses_quality_gate_summary_and_junit_glob(tmp_path: Path, monkeypatch):
build_dir = tmp_path / "build"
build_dir.mkdir()
(build_dir / "junit-unit.xml").write_text(
'<testsuite tests="2" failures="0" errors="0" skipped="0" />',
encoding="utf-8",
)
(build_dir / "junit-glue.xml").write_text(
'<testsuite tests="3" failures="1" errors="0" skipped="0" />',
encoding="utf-8",
)
(build_dir / "quality-gate.rc").write_text("1\n", encoding="utf-8")
(build_dir / "quality-gate-summary.json").write_text(
json.dumps({"results": [{"name": "docs", "status": "ok"}, {"name": "glue", "status": "failed"}]}),
encoding="utf-8",
)
posted = {}
monkeypatch.setenv("SUITE_NAME", "titan-iac")
monkeypatch.setenv("PUSHGATEWAY_URL", "http://pushgateway.invalid")
monkeypatch.setenv("QUALITY_GATE_JOB_NAME", "platform-quality-ci")
monkeypatch.setenv("JUNIT_GLOB", str(build_dir / "junit-*.xml"))
monkeypatch.setenv("QUALITY_GATE_EXIT_CODE_PATH", str(build_dir / "quality-gate.rc"))
monkeypatch.setenv("QUALITY_GATE_SUMMARY_PATH", str(build_dir / "quality-gate-summary.json"))
monkeypatch.setenv("BRANCH_NAME", "main")
monkeypatch.setenv("BUILD_NUMBER", "88")
monkeypatch.setattr(publish_test_metrics, "_fetch_existing_counter", lambda *args, **kwargs: 5)
monkeypatch.setattr(publish_test_metrics, "_post_text", lambda url, payload: posted.update({"url": url, "payload": payload}))
rc = publish_test_metrics.main()
assert rc == 0
assert posted["url"].endswith("/metrics/job/platform-quality-ci/suite/titan-iac")
assert 'titan_iac_quality_gate_tests_total{suite="titan-iac",result="failed"} 1' in posted["payload"]
assert 'titan_iac_quality_gate_checks_total{suite="titan-iac",check="gate_glue",result="failed"} 1' in posted["payload"]
def test_main_marks_successful_run(tmp_path: Path, monkeypatch, capsys):
build_dir = tmp_path / "build"
build_dir.mkdir()
(build_dir / "junit.xml").write_text(
'<testsuite tests="1" failures="0" errors="0" skipped="0" />',
encoding="utf-8",
)
(build_dir / "quality-gate.rc").write_text("0\n", encoding="utf-8")
monkeypatch.setenv("JUNIT_GLOB", str(build_dir / "*.xml"))
monkeypatch.setenv("QUALITY_GATE_EXIT_CODE_PATH", str(build_dir / "quality-gate.rc"))
monkeypatch.setenv("QUALITY_GATE_SUMMARY_PATH", str(build_dir / "missing-summary.json"))
monkeypatch.setattr(publish_test_metrics, "_fetch_existing_counter", lambda *args, **kwargs: 0)
monkeypatch.setattr(publish_test_metrics, "_post_text", lambda *args, **kwargs: None)
rc = publish_test_metrics.main()
summary = json.loads(capsys.readouterr().out)
assert rc == 0
assert summary["status"] == "ok"
assert summary["checks_recorded"] == 7