monitoring(testing): split quality trend panels

This commit is contained in:
jenkins 2026-04-22 12:42:33 -03:00
parent 5d560d962d
commit 23beb08e5e
6 changed files with 2571 additions and 4048 deletions

View File

@ -3088,6 +3088,10 @@ def build_jobs_dashboard():
smell_selector = f'suite=~"{suite_var}",{exported}'
test_case_selector = f'suite=~"{suite_var}",branch=~"{branch_var}",test=~"{test_var}",test!="__no_test_cases__",{exported}'
build_info_selector = f'suite=~"{suite_var}",branch=~"{branch_var}",{exported}'
selected_suite_universe = (
f'(sum by (suite) (increase(platform_quality_gate_runs_total{{{runs_selector}}}[30d])) >= bool 0)'
)
selected_suite_zero = f"(0 * {selected_suite_universe})"
suite_universe = " or ".join(
f'label_replace(vector(1), "suite", "{suite}", "__name__", ".*")'
@ -3169,7 +3173,7 @@ def build_jobs_dashboard():
core = (
f'sum by (suite) (max_over_time(({{{checks_selector},check=~"{regex}",{state}}})[$__interval]))'
)
return f'({core}) or on(suite) (0 * ({suite_universe}))'
return f'({core}) or on(suite) ({selected_suite_zero})'
problematic_tests_history_core = (
f'topk(12, sum by (suite, test, jenkins_job) (increase(platform_quality_gate_test_case_result{{suite=~"{suite_var}",branch=~"{branch_var}",test!="__no_test_cases__",status="failed",{exported}}}[$__interval])))'
@ -3196,6 +3200,10 @@ def build_jobs_dashboard():
"legendFormat": "skipped · {{suite}} · #{{build_number}}",
},
]
selected_test_pass_rate = (
f'100 * (sum by (suite, test) (max_over_time(platform_quality_gate_test_case_result{{{test_case_selector},status="passed"}}[$__interval]))) '
f'/ clamp_min((sum by (suite, test) (max_over_time(platform_quality_gate_test_case_result{{{test_case_selector},status=~"passed|failed|error|skipped"}}[$__interval]))), 1)'
)
recent_branch_evidence = (
f'sort_desc(count by (suite, branch) (max_over_time(platform_quality_gate_build_info{{{build_info_selector}}}[30d])))'
)
@ -3222,16 +3230,21 @@ def build_jobs_dashboard():
missing_test_case_by_suite = _missing_suite_series(
f"count by (suite) (platform_quality_gate_test_case_result{{{exported}}})"
)
placeholder_test_case_by_suite = _missing_suite_series(
f'count by (suite) (platform_quality_gate_test_case_result{{{exported},test!="__no_test_cases__"}})'
)
success_thresholds = {
"mode": "absolute",
"steps": [
{"color": "red", "value": None},
{"color": "orange", "value": 80},
{"color": "yellow", "value": 95},
{"color": "green", "value": 99},
{"color": "orange", "value": 90},
{"color": "yellow", "value": 93},
{"color": "green", "value": 95},
{"color": "blue", "value": 100},
],
}
coverage_thresholds = success_thresholds
failures_thresholds = {
"mode": "absolute",
"steps": [
@ -3341,101 +3354,12 @@ def build_jobs_dashboard():
)
)
panels.append(
stat_panel(
19,
"Failing Tests",
checks_failed_tests,
{"h": 4, "w": 3, "x": 0, "y": 5},
unit="none",
instant=True,
thresholds=failures_thresholds,
)
)
panels.append(
stat_panel(
20,
"Failing Coverage",
checks_failed_coverage,
{"h": 4, "w": 3, "x": 3, "y": 5},
unit="none",
instant=True,
thresholds=failures_thresholds,
)
)
panels.append(
stat_panel(
21,
"Failing LOC",
checks_failed_loc,
{"h": 4, "w": 3, "x": 6, "y": 5},
unit="none",
instant=True,
thresholds=failures_thresholds,
)
)
panels.append(
stat_panel(
22,
"Failing Docs/Naming",
checks_failed_docs,
{"h": 4, "w": 3, "x": 9, "y": 5},
unit="none",
instant=True,
thresholds=failures_thresholds,
)
)
panels.append(
stat_panel(
23,
"Failing Gate/Glue",
checks_failed_gate,
{"h": 4, "w": 3, "x": 12, "y": 5},
unit="none",
instant=True,
thresholds=failures_thresholds,
)
)
panels.append(
stat_panel(
24,
"Failing SonarQube",
checks_failed_sonarqube,
{"h": 4, "w": 3, "x": 15, "y": 5},
unit="none",
instant=True,
thresholds=failures_thresholds,
)
)
panels.append(
stat_panel(
25,
"Failing Supply Chain",
checks_failed_supply_chain,
{"h": 4, "w": 3, "x": 18, "y": 5},
unit="none",
instant=True,
thresholds=failures_thresholds,
)
)
panels.append(
stat_panel(
26,
"Total Failing Checks",
checks_failed_total,
{"h": 4, "w": 3, "x": 21, "y": 5},
unit="none",
instant=True,
thresholds=failures_thresholds,
)
)
panels.append(
bargauge_panel(
8,
"Failures by Suite (24h)",
failures_by_suite_24h,
{"h": 8, "w": 8, "x": 0, "y": 9},
{"h": 8, "w": 8, "x": 0, "y": 5},
unit="none",
instant=True,
legend="{{suite}}",
@ -3447,7 +3371,7 @@ def build_jobs_dashboard():
9,
"Success Rate by Suite (24h)",
success_rate_by_suite_24h,
{"h": 8, "w": 8, "x": 8, "y": 9},
{"h": 8, "w": 8, "x": 8, "y": 5},
unit="percent",
instant=True,
legend="{{suite}}",
@ -3460,7 +3384,7 @@ def build_jobs_dashboard():
10,
"Coverage Gap to 95% by Suite",
coverage_gap,
{"h": 8, "w": 8, "x": 16, "y": 9},
{"h": 8, "w": 8, "x": 16, "y": 5},
unit="percent",
instant=True,
legend="{{suite}}",
@ -3475,7 +3399,7 @@ def build_jobs_dashboard():
11,
"Success History by Suite",
success_history_by_suite,
{"h": 8, "w": 24, "x": 0, "y": 17},
{"h": 8, "w": 24, "x": 0, "y": 13},
unit="percent",
legend="{{suite}}",
legend_display="list",
@ -3499,7 +3423,7 @@ def build_jobs_dashboard():
12,
"Run Outcomes (Selected Scope)",
None,
{"h": 8, "w": 8, "x": 0, "y": 25},
{"h": 8, "w": 8, "x": 0, "y": 21},
unit="none",
targets=[
{
@ -3528,7 +3452,7 @@ def build_jobs_dashboard():
13,
"Coverage & LOC History (Selected Scope)",
None,
{"h": 8, "w": 8, "x": 8, "y": 25},
{"h": 8, "w": 8, "x": 8, "y": 21},
unit="none",
targets=[
{
@ -3551,213 +3475,66 @@ def build_jobs_dashboard():
14,
"Run Status Mix (30d)",
f'sum by (status) (increase(platform_quality_gate_runs_total{{{runs_selector}}}[30d]))',
{"h": 8, "w": 8, "x": 16, "y": 25},
{"h": 8, "w": 8, "x": 16, "y": 21},
)
run_mix_panel["targets"][0]["legendFormat"] = "{{status}}"
run_mix_panel["fieldConfig"]["defaults"]["unit"] = "none"
panels.append(run_mix_panel)
panels.append(
timeseries_panel(
130,
"Fail Trend: Tests",
_check_state_series(check_regex_tests, True),
{"h": 6, "w": 3, "x": 0, "y": 33},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
131,
"Fail Trend: Coverage",
_check_state_series(check_regex_coverage, True),
{"h": 6, "w": 3, "x": 3, "y": 33},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
132,
"Fail Trend: LOC",
_check_state_series(check_regex_loc, True),
{"h": 6, "w": 3, "x": 6, "y": 33},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
133,
"Fail Trend: Style",
_check_state_series(check_regex_style, True),
{"h": 6, "w": 3, "x": 9, "y": 33},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
134,
"Fail Trend: Gate Glue",
_check_state_series(check_regex_gate_glue, True),
{"h": 6, "w": 3, "x": 12, "y": 33},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
135,
"Fail Trend: SonarQube",
_check_state_series(check_regex_sonarqube, True),
{"h": 6, "w": 3, "x": 15, "y": 33},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
136,
"Fail Trend: Supply Chain",
_check_state_series(check_regex_supply_chain, True),
{"h": 6, "w": 3, "x": 18, "y": 33},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
138,
"Pass Trend: Tests",
_check_state_series(check_regex_tests, False),
{"h": 6, "w": 3, "x": 0, "y": 39},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
139,
"Pass Trend: Coverage",
_check_state_series(check_regex_coverage, False),
{"h": 6, "w": 3, "x": 3, "y": 39},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
140,
"Pass Trend: LOC",
_check_state_series(check_regex_loc, False),
{"h": 6, "w": 3, "x": 6, "y": 39},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
141,
"Pass Trend: Style",
_check_state_series(check_regex_style, False),
{"h": 6, "w": 3, "x": 9, "y": 39},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
142,
"Pass Trend: Gate Glue",
_check_state_series(check_regex_gate_glue, False),
{"h": 6, "w": 3, "x": 12, "y": 39},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
143,
"Pass Trend: SonarQube",
_check_state_series(check_regex_sonarqube, False),
{"h": 6, "w": 3, "x": 15, "y": 39},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
timeseries_panel(
144,
"Pass Trend: Supply Chain",
_check_state_series(check_regex_supply_chain, False),
{"h": 6, "w": 3, "x": 18, "y": 39},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
)
panels.append(
bargauge_panel(
15,
"Latest Test Counters (Suite + Result)",
f'sum by (suite, result) ({{{tests_selector}}})',
{"h": 6, "w": 3, "x": 21, "y": 39},
unit="none",
instant=True,
legend="{{suite}} · {{result}}",
sort_order="desc",
limit=24,
)
)
check_dimensions = [
("Tests", check_regex_tests),
("Coverage", check_regex_coverage),
("LOC", check_regex_loc),
("Style", check_regex_style),
("Gate Glue", check_regex_gate_glue),
("SonarQube", check_regex_sonarqube),
("Supply Chain", check_regex_supply_chain),
]
def _append_check_trends(start_id: int, title_prefix: str, failed: bool, y: int) -> None:
for index, (label, regex) in enumerate(check_dimensions[:4]):
panel = timeseries_panel(
start_id + index,
f"{title_prefix}: {label}",
_check_state_series(regex, failed),
{"h": 7, "w": 6, "x": index * 6, "y": y},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
panel["description"] = (
"One line per selected suite. 1 means this check dimension was in that state during the bucket; "
"0 means the suite reported the dimension and it was not in that state."
)
panels.append(panel)
for index, (label, regex) in enumerate(check_dimensions[4:]):
panel = timeseries_panel(
start_id + 4 + index,
f"{title_prefix}: {label}",
_check_state_series(regex, failed),
{"h": 7, "w": 8, "x": index * 8, "y": y + 7},
unit="none",
legend="{{suite}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "max"],
)
panel["description"] = (
"One line per selected suite. 1 means this check dimension was in that state during the bucket; "
"0 means the suite reported the dimension and it was not in that state."
)
panels.append(panel)
_append_check_trends(130, "Failure Trend", True, 29)
_append_check_trends(138, "Success Trend", False, 43)
panels.append(
timeseries_panel(
145,
"Problematic Tests Over Time (Top failures)",
problematic_tests_history,
{"h": 8, "w": 12, "x": 0, "y": 45},
{"h": 8, "w": 12, "x": 0, "y": 57},
unit="none",
legend="{{suite}} · {{test}}",
legend_display="list",
@ -3767,27 +3544,12 @@ def build_jobs_dashboard():
data_links=jenkins_latest_artifact_data_links(),
)
)
panels.append(
timeseries_panel(
146,
"Selected Test Pass/Fail History",
None,
{"h": 8, "w": 8, "x": 12, "y": 45},
unit="none",
targets=selected_test_pass_fail,
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "sum"],
links=jenkins_suite_links(),
data_links=jenkins_artifact_data_links(),
)
)
panels.append(
bargauge_panel(
147,
"Most Problematic Test by Suite (30d)",
worst_test_per_suite,
{"h": 8, "w": 4, "x": 20, "y": 45},
{"h": 8, "w": 12, "x": 12, "y": 57},
unit="none",
instant=True,
legend="{{suite}} · {{test}}",
@ -3798,17 +3560,49 @@ def build_jobs_dashboard():
data_links=jenkins_latest_artifact_data_links(),
)
)
panels.append(
timeseries_panel(
146,
"Selected Test Pass/Fail History",
None,
{"h": 8, "w": 12, "x": 0, "y": 65},
unit="none",
targets=selected_test_pass_fail,
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "sum"],
links=jenkins_suite_links(),
data_links=jenkins_artifact_data_links(),
)
)
selected_pass_rate_panel = timeseries_panel(
152,
"Selected Test Pass Rate History",
selected_test_pass_rate,
{"h": 8, "w": 12, "x": 12, "y": 65},
unit="percent",
legend="{{suite}} · {{test}}",
legend_display="list",
legend_placement="bottom",
legend_calcs=["lastNotNull", "min"],
links=jenkins_suite_links(),
data_links=jenkins_artifact_data_links(),
)
selected_pass_rate_panel["fieldConfig"]["defaults"]["min"] = 0
selected_pass_rate_panel["fieldConfig"]["defaults"]["max"] = 100
selected_pass_rate_panel["fieldConfig"]["defaults"]["thresholds"] = success_thresholds
panels.append(selected_pass_rate_panel)
coverage_panel = bargauge_panel(
17,
"Coverage by Suite (Latest, gate 95)",
coverage_with_missing,
{"h": 8, "w": 12, "x": 0, "y": 53},
{"h": 8, "w": 12, "x": 0, "y": 73},
unit="percent",
instant=True,
legend="{{suite}}",
sort_order="asc",
thresholds=success_thresholds,
thresholds=coverage_thresholds,
decimals=2,
)
coverage_panel["fieldConfig"]["defaults"]["mappings"] = [
@ -3820,7 +3614,7 @@ def build_jobs_dashboard():
18,
"Files >500 LOC by Suite (Latest)",
smell_with_missing,
{"h": 8, "w": 12, "x": 12, "y": 53},
{"h": 8, "w": 12, "x": 12, "y": 73},
unit="none",
instant=True,
legend="{{suite}}",
@ -3837,7 +3631,7 @@ def build_jobs_dashboard():
27,
"Missing Tests Metrics by Suite",
missing_tests_by_suite,
{"h": 7, "w": 6, "x": 0, "y": 61},
{"h": 7, "w": 6, "x": 0, "y": 81},
unit="none",
instant=True,
legend="{{suite}}",
@ -3851,7 +3645,7 @@ def build_jobs_dashboard():
28,
"Missing Checks Metrics by Suite",
missing_checks_by_suite,
{"h": 7, "w": 6, "x": 6, "y": 61},
{"h": 7, "w": 6, "x": 6, "y": 81},
unit="none",
instant=True,
legend="{{suite}}",
@ -3865,7 +3659,7 @@ def build_jobs_dashboard():
29,
"Missing Coverage Metrics by Suite",
missing_coverage_by_suite,
{"h": 7, "w": 6, "x": 12, "y": 61},
{"h": 7, "w": 6, "x": 12, "y": 81},
unit="none",
instant=True,
legend="{{suite}}",
@ -3879,7 +3673,7 @@ def build_jobs_dashboard():
30,
"Missing LOC Metrics by Suite",
missing_loc_by_suite,
{"h": 7, "w": 6, "x": 18, "y": 61},
{"h": 7, "w": 6, "x": 18, "y": 81},
unit="none",
instant=True,
legend="{{suite}}",
@ -3893,7 +3687,7 @@ def build_jobs_dashboard():
31,
"SonarQube API Up",
"(max(sonarqube_up) or on() vector(0))",
{"h": 6, "w": 4, "x": 0, "y": 68},
{"h": 6, "w": 4, "x": 0, "y": 88},
unit="none",
instant=True,
thresholds={
@ -3910,7 +3704,7 @@ def build_jobs_dashboard():
32,
"Sonar Projects (Selected)",
f'(count(sonarqube_project_quality_gate_pass{{project_key=~"{suite_var}"}}) or on() vector(0))',
{"h": 6, "w": 4, "x": 4, "y": 68},
{"h": 6, "w": 4, "x": 4, "y": 88},
unit="none",
instant=True,
thresholds=failures_thresholds,
@ -3921,7 +3715,7 @@ def build_jobs_dashboard():
33,
"Sonar Gate Fetch Errors",
"(max(sonarqube_quality_gate_fetch_errors_total) or on() vector(0))",
{"h": 6, "w": 4, "x": 8, "y": 68},
{"h": 6, "w": 4, "x": 8, "y": 88},
unit="none",
instant=True,
thresholds=failures_thresholds,
@ -3931,7 +3725,7 @@ def build_jobs_dashboard():
34,
"Sonar Gate Status Mix (Selected)",
f'count by (status) (sonarqube_project_quality_gate_pass{{project_key=~"{suite_var}"}})',
{"h": 6, "w": 6, "x": 12, "y": 68},
{"h": 6, "w": 6, "x": 12, "y": 88},
)
sonar_status_mix_panel["targets"][0]["legendFormat"] = "{{status}}"
panels.append(sonar_status_mix_panel)
@ -3941,7 +3735,7 @@ def build_jobs_dashboard():
"Projects Failing Sonar Gate",
f'(sort_desc(count by (project_key) (sonarqube_project_quality_gate_pass{{project_key=~"{suite_var}",status!~"OK|ok"}})) '
f'or on() label_replace(vector(0), "project_key", "none", "__name__", ".*"))',
{"h": 6, "w": 6, "x": 18, "y": 68},
{"h": 6, "w": 6, "x": 18, "y": 88},
unit="none",
instant=True,
legend="{{project_key}}",
@ -3954,7 +3748,21 @@ def build_jobs_dashboard():
148,
"Missing Test-Case Metrics by Suite",
missing_test_case_by_suite,
{"h": 6, "w": 24, "x": 0, "y": 74},
{"h": 6, "w": 12, "x": 0, "y": 94},
unit="none",
instant=True,
legend="{{suite}}",
sort_order="desc",
thresholds=missing_thresholds,
decimals=0,
)
)
panels.append(
bargauge_panel(
151,
"No Real Test Cases by Suite",
placeholder_test_case_by_suite,
{"h": 6, "w": 12, "x": 12, "y": 94},
unit="none",
instant=True,
legend="{{suite}}",
@ -3968,7 +3776,7 @@ def build_jobs_dashboard():
149,
"Recent Branch Evidence by Suite (30d)",
recent_branch_evidence,
{"h": 7, "w": 12, "x": 0, "y": 80},
{"h": 7, "w": 12, "x": 0, "y": 100},
unit="none",
instant=True,
legend="{{suite}} · {{branch}}",
@ -3983,7 +3791,7 @@ def build_jobs_dashboard():
150,
"Non-Primary Branch Evidence (30d)",
non_primary_branch_evidence,
{"h": 7, "w": 12, "x": 12, "y": 80},
{"h": 7, "w": 12, "x": 12, "y": 100},
unit="none",
instant=True,
legend="{{suite}} · {{branch}}",

View File

@ -182,6 +182,56 @@ EOF
}
}
}
stage('Validation tests') {
steps {
container('git') {
sh '''#!/usr/bin/env sh
set -eu
mkdir -p build
failures=0
cases=""
add_case() {
name="$1"
message="$2"
if [ -n "${message}" ]; then
failures=$((failures + 1))
cases="${cases}<testcase classname=\"data_prepper.packaging\" name=\"${name}\"><failure message=\"${message}\" /></testcase>"
else
cases="${cases}<testcase classname=\"data_prepper.packaging\" name=\"${name}\" />"
fi
}
if [ -s dockerfiles/Dockerfile.data-prepper ]; then
add_case "dockerfile_present" ""
else
add_case "dockerfile_present" "dockerfiles/Dockerfile.data-prepper is missing or empty"
fi
if [ -s services/logging/scripts/data_prepper_pipelines.yaml ]; then
add_case "pipeline_config_present" ""
else
add_case "pipeline_config_present" "data_prepper_pipelines.yaml is missing or empty"
fi
if grep -q 'data-prepper-helmrelease.yaml' services/logging/kustomization.yaml; then
add_case "logging_kustomization_includes_data_prepper" ""
else
add_case "logging_kustomization_includes_data_prepper" "services/logging/kustomization.yaml does not include data-prepper HelmRelease"
fi
cat > build/junit-data-prepper.xml <<EOF
<testsuite name="data_prepper.packaging" tests="3" failures="${failures}" errors="0" skipped="0">
${cases}
</testsuite>
EOF
if [ "${failures}" -ne 0 ]; then
exit 1
fi
'''
}
}
}
stage('Enforce quality gate') {
steps {
container('git') {
@ -290,7 +340,7 @@ EOF
container('git') {
sh '''
set -euo pipefail
apk add --no-cache curl jq >/dev/null 2>&1 || true
apk add --no-cache curl jq python3 >/dev/null 2>&1 || true
suite="${SUITE_NAME}"
gateway="${PUSHGATEWAY_URL}"
status="${QUALITY_OUTCOME:-failed}"
@ -350,16 +400,90 @@ EOF
metric_branch="$(printf '%s' "${metric_branch_raw}" | jq -Rsa . | sed -e 's/^"//' -e 's/"$//')"
metric_build_number="$(printf '%s' "${BUILD_NUMBER:-unknown}" | jq -Rsa . | sed -e 's/^"//' -e 's/"$//')"
metric_jenkins_job="$(printf '%s' "${JOB_NAME:-data-prepper}" | jq -Rsa . | sed -e 's/^"//' -e 's/"$//')"
cat <<METRICS | curl -fsS -X PUT --data-binary @- "${gateway}/metrics/job/platform-quality-ci/suite/${suite}" >/dev/null || \
echo "warning: metrics push failed for suite=${suite}" >&2
export METRIC_SUITE="${suite}"
export METRIC_BRANCH_RAW="${metric_branch_raw}"
export METRIC_BUILD_NUMBER_RAW="${BUILD_NUMBER:-unknown}"
export METRIC_JENKINS_JOB_RAW="${JOB_NAME:-data-prepper}"
python3 - <<'PY'
import glob
import os
import xml.etree.ElementTree as ET
from pathlib import Path
def label_value(value: str) -> str:
return value.replace("\\", "\\\\").replace("\n", "\\n").replace('"', '\\"')
totals = {"passed": 0, "failed": 0, "error": 0, "skipped": 0}
case_lines = []
suite = os.environ["METRIC_SUITE"]
branch = os.environ["METRIC_BRANCH_RAW"]
build_number = os.environ["METRIC_BUILD_NUMBER_RAW"]
jenkins_job = os.environ["METRIC_JENKINS_JOB_RAW"]
for path in glob.glob("build/junit-*.xml"):
try:
root = ET.parse(path).getroot()
except ET.ParseError:
totals["error"] += 1
continue
for case in root.findall(".//testcase"):
name = case.get("name") or "unnamed"
classname = case.get("classname") or Path(path).stem
test_name = f"{classname}::{name}" if classname else name
status = "passed"
if case.find("error") is not None:
status = "error"
elif case.find("failure") is not None:
status = "failed"
elif case.find("skipped") is not None:
status = "skipped"
totals[status] += 1
labels = {
"suite": suite,
"branch": branch,
"build_number": build_number,
"jenkins_job": jenkins_job,
"test": test_name,
"status": status,
}
label_blob = ",".join(f'{key}="{label_value(value)}"' for key, value in labels.items())
case_lines.append(f"platform_quality_gate_test_case_result{{{label_blob}}} 1")
if not case_lines:
totals["skipped"] += 1
labels = {
"suite": suite,
"branch": branch,
"build_number": build_number,
"jenkins_job": jenkins_job,
"test": "__no_test_cases__",
"status": "skipped",
}
label_blob = ",".join(f'{key}="{label_value(value)}"' for key, value in labels.items())
case_lines.append(f"platform_quality_gate_test_case_result{{{label_blob}}} 1")
Path("build/test-counts.env").write_text(
"\n".join(f"test_{key}_count={value}" for key, value in totals.items()) + "\n",
encoding="utf-8",
)
Path("build/testcase-metrics.prom").write_text("\n".join(case_lines) + "\n", encoding="utf-8")
PY
. build/test-counts.env
tests_check="ok"
if [ "$((test_failed_count + test_error_count))" -gt 0 ]; then
tests_check="failed"
fi
cat > build/platform-quality-metrics.prom <<METRICS
# TYPE platform_quality_gate_runs_total counter
platform_quality_gate_runs_total{suite="${suite}",status="ok"} ${ok_count}
platform_quality_gate_runs_total{suite="${suite}",status="failed"} ${failed_count}
# TYPE data_prepper_quality_gate_tests_total gauge
data_prepper_quality_gate_tests_total{suite="${suite}",result="passed"} 0
data_prepper_quality_gate_tests_total{suite="${suite}",result="failed"} 0
data_prepper_quality_gate_tests_total{suite="${suite}",result="error"} 0
data_prepper_quality_gate_tests_total{suite="${suite}",result="skipped"} 0
data_prepper_quality_gate_tests_total{suite="${suite}",result="passed"} ${test_passed_count}
data_prepper_quality_gate_tests_total{suite="${suite}",result="failed"} ${test_failed_count}
data_prepper_quality_gate_tests_total{suite="${suite}",result="error"} ${test_error_count}
data_prepper_quality_gate_tests_total{suite="${suite}",result="skipped"} ${test_skipped_count}
# TYPE platform_quality_gate_workspace_line_coverage_percent gauge
# No coverable project source is present in this packaging suite; report full
# non-applicable coverage so rollups do not confuse N/A with uncovered code.
@ -369,7 +493,7 @@ platform_quality_gate_source_lines_over_500_total{suite="${suite}"} 0
# TYPE platform_quality_gate_build_info gauge
platform_quality_gate_build_info{suite="${suite}",branch="${metric_branch}",build_number="${metric_build_number}",jenkins_job="${metric_jenkins_job}"} 1
# TYPE data_prepper_quality_gate_checks_total gauge
data_prepper_quality_gate_checks_total{suite="${suite}",check="tests",result="not_applicable"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="tests",result="${tests_check}"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="coverage",result="not_applicable"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="loc",result="not_applicable"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="docs_naming",result="not_applicable"} 1
@ -377,10 +501,17 @@ data_prepper_quality_gate_checks_total{suite="${suite}",check="gate_glue",result
data_prepper_quality_gate_checks_total{suite="${suite}",check="sonarqube",result="${sonarqube_check}"} 1
data_prepper_quality_gate_checks_total{suite="${suite}",check="supply_chain",result="${supply_chain_check}"} 1
# TYPE platform_quality_gate_test_case_result gauge
platform_quality_gate_test_case_result{suite="${suite}",branch="${metric_branch}",build_number="${metric_build_number}",jenkins_job="${metric_jenkins_job}",test="__no_test_cases__",status="skipped"} 1
METRICS
cat build/testcase-metrics.prom >> build/platform-quality-metrics.prom
curl -fsS -X PUT --data-binary @build/platform-quality-metrics.prom "${gateway}/metrics/job/platform-quality-ci/suite/${suite}" >/dev/null || \
echo "warning: metrics push failed for suite=${suite}" >&2
'''
}
script {
if (fileExists('build/junit-data-prepper.xml')) {
junit allowEmptyResults: true, testResults: 'build/junit-*.xml'
}
}
archiveArtifacts artifacts: 'build/**', allowEmptyArchive: true, fingerprint: true
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff