titan-iac/testing/tests/test_publish_test_metrics.py

279 lines
9.9 KiB
Python

from __future__ import annotations
import json
from pathlib import Path
from ci.scripts import publish_test_metrics
def test_parse_junit_supports_testsuite_and_missing_file(tmp_path: Path):
junit_path = tmp_path / "suite.xml"
junit_path.write_text(
'<testsuite tests="3" failures="1" errors="0" skipped="1" />',
encoding="utf-8",
)
assert publish_test_metrics._parse_junit(str(junit_path)) == {
"tests": 3,
"failures": 1,
"errors": 0,
"skipped": 1,
}
assert publish_test_metrics._parse_junit(str(tmp_path / "missing.xml")) == {
"tests": 0,
"failures": 0,
"errors": 0,
"skipped": 0,
}
def test_collect_junit_totals_sums_multiple_files(tmp_path: Path):
first = tmp_path / "junit-a.xml"
second = tmp_path / "junit-b.xml"
first.write_text('<testsuite tests="2" failures="1" errors="0" skipped="0" />', encoding="utf-8")
second.write_text('<testsuite tests="3" failures="0" errors="1" skipped="1" />', encoding="utf-8")
totals = publish_test_metrics._collect_junit_totals(str(tmp_path / "junit-*.xml"))
assert totals == {"tests": 5, "failures": 1, "errors": 1, "skipped": 1}
def test_parse_junit_handles_testsuites_and_invalid_counts(tmp_path: Path):
junit_path = tmp_path / "suite.xml"
junit_path.write_text(
(
"<testsuites>"
'<testsuite tests="2" failures="1" errors="0" skipped="0" />'
'<testsuite tests="bad" failures="0" errors="0" skipped="0" />'
"</testsuites>"
),
encoding="utf-8",
)
assert publish_test_metrics._parse_junit(str(junit_path)) == {
"tests": 2,
"failures": 1,
"errors": 0,
"skipped": 0,
}
def test_read_exit_code_and_summary_fallbacks(tmp_path: Path):
rc_path = tmp_path / "rc.txt"
rc_path.write_text("0\n", encoding="utf-8")
summary_path = tmp_path / "summary.json"
summary_path.write_text("{bad json", encoding="utf-8")
assert publish_test_metrics._read_exit_code(str(rc_path)) == 0
assert publish_test_metrics._read_exit_code(str(tmp_path / "missing.rc")) == 1
assert publish_test_metrics._load_summary(str(summary_path)) == {}
assert publish_test_metrics._load_summary(str(tmp_path / "missing.json")) == {}
def test_read_text_post_text_and_fetch_existing_counter(monkeypatch):
class _FakeResponse:
def __init__(self, payload: str, status: int = 200):
self.payload = payload
self.status = status
def read(self):
return self.payload.encode("utf-8")
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return None
responses = iter(
[
_FakeResponse("alpha"),
_FakeResponse("", status=202),
_FakeResponse(
"\n".join(
[
'platform_quality_gate_runs_total{job="platform-quality-ci",suite="titan-iac",status="ok"} 7',
'platform_quality_gate_runs_total{job="other",suite="titan-iac",status="ok"} 1',
]
)
),
]
)
monkeypatch.setattr(
publish_test_metrics.urllib.request,
"urlopen",
lambda *args, **kwargs: next(responses),
)
assert publish_test_metrics._read_text("http://example.invalid") == "alpha"
publish_test_metrics._post_text("http://example.invalid", "payload")
assert (
publish_test_metrics._fetch_existing_counter(
"http://push.invalid",
"platform_quality_gate_runs_total",
{"job": "platform-quality-ci", "suite": "titan-iac", "status": "ok"},
)
== 7.0
)
def test_post_text_raises_and_counter_handles_bad_metric_lines(monkeypatch):
class _FakeResponse:
def __init__(self, payload: str, status: int = 200):
self.payload = payload
self.status = status
def read(self):
return self.payload.encode("utf-8")
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return None
monkeypatch.setattr(
publish_test_metrics.urllib.request,
"urlopen",
lambda *args, **kwargs: _FakeResponse("", status=500),
)
try:
publish_test_metrics._post_text("http://example.invalid", "payload")
except RuntimeError as exc:
assert "push failed" in str(exc)
else:
raise AssertionError("expected RuntimeError for failing push")
monkeypatch.setattr(
publish_test_metrics,
"_read_text",
lambda url: "\n".join(
[
'platform_quality_gate_runs_total{job="platform-quality-ci",suite="titan-iac",status="ok"}',
'platform_quality_gate_runs_total{job="platform-quality-ci",suite="titan-iac",status="ok"} nope',
]
),
)
assert (
publish_test_metrics._fetch_existing_counter(
"http://push.invalid",
"platform_quality_gate_runs_total",
{"job": "platform-quality-ci", "suite": "titan-iac", "status": "ok"},
)
== 0.0
)
def test_build_payload_includes_summary_metrics():
payload = publish_test_metrics._build_payload(
suite="titan-iac",
status="ok",
tests={"tests": 4, "failures": 1, "errors": 0, "skipped": 1},
ok_count=7,
failed_count=2,
branch="main",
build_number="42",
summary={
"results": [
{"name": "docs", "status": "ok"},
{"name": "unit", "status": "failed"},
]
},
workspace_line_coverage_percent=97.125,
source_lines_over_500=3,
)
assert 'platform_quality_gate_runs_total{suite="titan-iac",status="ok"} 7' in payload
assert 'titan_iac_quality_gate_checks_total{suite="titan-iac",check="docs",result="ok"} 1' in payload
assert 'titan_iac_quality_gate_checks_total{suite="titan-iac",check="unit",result="failed"} 1' in payload
assert 'platform_quality_gate_workspace_line_coverage_percent{suite="titan-iac"} 97.125' in payload
assert 'platform_quality_gate_source_lines_over_500_total{suite="titan-iac"} 3' in payload
def test_build_payload_skips_incomplete_results():
payload = publish_test_metrics._build_payload(
suite="titan-iac",
status="failed",
tests={"tests": 0, "failures": 0, "errors": 0, "skipped": 0},
ok_count=1,
failed_count=2,
branch="",
build_number="",
summary={"results": [{"name": "docs"}, {"status": "ok"}]},
)
assert "titan_iac_quality_gate_checks_total" in payload
assert 'check="docs"' not in payload
def test_main_uses_quality_gate_summary_and_junit_glob(tmp_path: Path, monkeypatch):
build_dir = tmp_path / "build"
build_dir.mkdir()
(build_dir / "junit-unit.xml").write_text(
'<testsuite tests="2" failures="0" errors="0" skipped="0" />',
encoding="utf-8",
)
(build_dir / "junit-glue.xml").write_text(
'<testsuite tests="3" failures="1" errors="0" skipped="0" />',
encoding="utf-8",
)
(build_dir / "quality-gate.rc").write_text("1\n", encoding="utf-8")
(build_dir / "quality-gate-summary.json").write_text(
json.dumps(
{
"results": [{"name": "docs", "status": "ok"}, {"name": "glue", "status": "failed"}],
"workspace_line_coverage_percent": 96.4321,
"source_lines_over_500": 2,
}
),
encoding="utf-8",
)
posted = {}
monkeypatch.setenv("SUITE_NAME", "titan-iac")
monkeypatch.setenv("PUSHGATEWAY_URL", "http://pushgateway.invalid")
monkeypatch.setenv("QUALITY_GATE_JOB_NAME", "platform-quality-ci")
monkeypatch.setenv("JUNIT_GLOB", str(build_dir / "junit-*.xml"))
monkeypatch.setenv("QUALITY_GATE_EXIT_CODE_PATH", str(build_dir / "quality-gate.rc"))
monkeypatch.setenv("QUALITY_GATE_SUMMARY_PATH", str(build_dir / "quality-gate-summary.json"))
monkeypatch.setenv("BRANCH_NAME", "main")
monkeypatch.setenv("BUILD_NUMBER", "88")
monkeypatch.setattr(publish_test_metrics, "_fetch_existing_counter", lambda *args, **kwargs: 5)
monkeypatch.setattr(publish_test_metrics, "_post_text", lambda url, payload: posted.update({"url": url, "payload": payload}))
rc = publish_test_metrics.main()
assert rc == 0
assert posted["url"].endswith("/metrics/job/platform-quality-ci/suite/titan-iac")
assert 'titan_iac_quality_gate_tests_total{suite="titan-iac",result="failed"} 1' in posted["payload"]
assert 'titan_iac_quality_gate_checks_total{suite="titan-iac",check="glue",result="failed"} 1' in posted["payload"]
assert 'platform_quality_gate_workspace_line_coverage_percent{suite="titan-iac"} 96.432' in posted["payload"]
assert 'platform_quality_gate_source_lines_over_500_total{suite="titan-iac"} 2' in posted["payload"]
def test_main_marks_successful_run(tmp_path: Path, monkeypatch, capsys):
build_dir = tmp_path / "build"
build_dir.mkdir()
(build_dir / "junit.xml").write_text(
'<testsuite tests="1" failures="0" errors="0" skipped="0" />',
encoding="utf-8",
)
(build_dir / "quality-gate.rc").write_text("0\n", encoding="utf-8")
monkeypatch.setenv("JUNIT_GLOB", str(build_dir / "*.xml"))
monkeypatch.setenv("QUALITY_GATE_EXIT_CODE_PATH", str(build_dir / "quality-gate.rc"))
monkeypatch.setenv("QUALITY_GATE_SUMMARY_PATH", str(build_dir / "missing-summary.json"))
monkeypatch.setattr(publish_test_metrics, "_fetch_existing_counter", lambda *args, **kwargs: 0)
monkeypatch.setattr(publish_test_metrics, "_post_text", lambda *args, **kwargs: None)
rc = publish_test_metrics.main()
summary = json.loads(capsys.readouterr().out)
assert rc == 0
assert summary["status"] == "ok"
assert summary["checks_recorded"] == 0
assert summary["workspace_line_coverage_percent"] == 0.0
assert summary["source_lines_over_500"] == 0