162 lines
5.4 KiB
Bash
Executable File
162 lines
5.4 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
# Run the Rust test suite, publish CI test metrics when configured, and retain artifacts.
|
|
set -euo pipefail
|
|
|
|
ROOT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)
|
|
REPORT_DIR="${ROOT_DIR}/target/test-gate"
|
|
TEST_LOG="${REPORT_DIR}/cargo-test.log"
|
|
SUMMARY_JSON="${REPORT_DIR}/summary.json"
|
|
SUMMARY_TXT="${REPORT_DIR}/summary.txt"
|
|
METRICS_FILE="${REPORT_DIR}/metrics.prom"
|
|
PUSHGATEWAY_URL=${QUALITY_GATE_PUSHGATEWAY_URL:-}
|
|
PUSHGATEWAY_JOB=${LESAVKA_TEST_GATE_PUSHGATEWAY_JOB:-lesavka-test-gate}
|
|
|
|
mkdir -p "${REPORT_DIR}"
|
|
cd "${ROOT_DIR}"
|
|
|
|
branch=${BRANCH_NAME:-${GIT_BRANCH:-}}
|
|
if [[ -z "${branch}" ]]; then
|
|
branch=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo unknown)
|
|
fi
|
|
commit=${GIT_COMMIT:-}
|
|
if [[ -z "${commit}" ]]; then
|
|
commit=$(git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
|
fi
|
|
build_url=${BUILD_URL:-}
|
|
|
|
start_seconds=$(date +%s)
|
|
status=0
|
|
set +e
|
|
cargo build --workspace --bins --color never 2>&1 | tee "${TEST_LOG}"
|
|
build_status=${PIPESTATUS[0]}
|
|
if [[ "${build_status}" -eq 0 ]]; then
|
|
RUST_TEST_THREADS="${RUST_TEST_THREADS:-1}" cargo test --workspace --all-targets --color never 2>&1 | tee -a "${TEST_LOG}"
|
|
status=${PIPESTATUS[0]}
|
|
else
|
|
status=${build_status}
|
|
fi
|
|
set -e
|
|
end_seconds=$(date +%s)
|
|
duration_seconds=$((end_seconds - start_seconds))
|
|
|
|
python3 - \
|
|
"${TEST_LOG}" \
|
|
"${SUMMARY_JSON}" \
|
|
"${SUMMARY_TXT}" \
|
|
"${METRICS_FILE}" \
|
|
"${status}" \
|
|
"${duration_seconds}" \
|
|
"${branch}" \
|
|
"${commit}" \
|
|
"${build_url}" <<'PY'
|
|
import json
|
|
import pathlib
|
|
import re
|
|
import sys
|
|
from datetime import datetime, timezone
|
|
|
|
log_path = pathlib.Path(sys.argv[1])
|
|
summary_json_path = pathlib.Path(sys.argv[2])
|
|
summary_txt_path = pathlib.Path(sys.argv[3])
|
|
metrics_path = pathlib.Path(sys.argv[4])
|
|
status = int(sys.argv[5])
|
|
duration_seconds = int(sys.argv[6])
|
|
branch = sys.argv[7] or 'unknown'
|
|
commit = sys.argv[8] or 'unknown'
|
|
build_url = sys.argv[9]
|
|
|
|
result_re = re.compile(
|
|
r'test result: (?:ok|FAILED)\. '
|
|
r'(?P<passed>\d+) passed; '
|
|
r'(?P<failed>\d+) failed; '
|
|
r'(?P<ignored>\d+) ignored; '
|
|
r'(?P<measured>\d+) measured; '
|
|
r'(?P<filtered>\d+) filtered out;'
|
|
)
|
|
|
|
counts = {'passed': 0, 'failed': 0, 'ignored': 0, 'measured': 0, 'filtered': 0}
|
|
for raw in log_path.read_text(encoding='utf-8', errors='replace').splitlines():
|
|
match = result_re.search(raw)
|
|
if not match:
|
|
continue
|
|
for key in counts:
|
|
counts[key] += int(match.group(key))
|
|
|
|
outcome = 'ok' if status == 0 else 'failed'
|
|
summary = {
|
|
'suite': 'lesavka',
|
|
'branch': branch,
|
|
'commit': commit,
|
|
'build_url': build_url,
|
|
'outcome': outcome,
|
|
'exit_code': status,
|
|
'duration_seconds': duration_seconds,
|
|
'generated_at': datetime.now(timezone.utc).isoformat(),
|
|
'tests': counts,
|
|
}
|
|
summary_json_path.write_text(json.dumps(summary, indent=2, sort_keys=True) + '\n', encoding='utf-8')
|
|
summary_txt_path.write_text(
|
|
'\n'.join([
|
|
f"lesavka test gate: {outcome}",
|
|
f"branch: {branch}",
|
|
f"commit: {commit}",
|
|
f"duration: {duration_seconds}s",
|
|
f"passed: {counts['passed']}",
|
|
f"failed: {counts['failed']}",
|
|
f"ignored: {counts['ignored']}",
|
|
f"filtered: {counts['filtered']}",
|
|
]) + '\n',
|
|
encoding='utf-8',
|
|
)
|
|
|
|
def label_value(value: str) -> str:
|
|
return value.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n')
|
|
|
|
labels = f'suite="lesavka",branch="{label_value(branch)}"'
|
|
success = 1 if outcome == 'ok' else 0
|
|
failure = 1 - success
|
|
lines = [
|
|
'# HELP lesavka_test_gate_last_run_success Whether the latest lesavka cargo test gate run succeeded.',
|
|
'# TYPE lesavka_test_gate_last_run_success gauge',
|
|
f'lesavka_test_gate_last_run_success{{{labels}}} {success}',
|
|
'# HELP lesavka_test_gate_duration_seconds Duration of the latest lesavka cargo test gate run.',
|
|
'# TYPE lesavka_test_gate_duration_seconds gauge',
|
|
f'lesavka_test_gate_duration_seconds{{{labels}}} {duration_seconds}',
|
|
'# HELP lesavka_test_gate_tests Number of Rust tests reported by the latest lesavka test gate run.',
|
|
'# TYPE lesavka_test_gate_tests gauge',
|
|
]
|
|
for result, value in counts.items():
|
|
lines.append(f'lesavka_test_gate_tests{{{labels},result="{result}"}} {value}')
|
|
lines.extend([
|
|
'# HELP platform_quality_gate_tests_total Test result counts from the latest lesavka gate run.',
|
|
'# TYPE platform_quality_gate_tests_total gauge',
|
|
f'platform_quality_gate_tests_total{{{labels},result="passed"}} {counts["passed"]}',
|
|
f'platform_quality_gate_tests_total{{{labels},result="failed"}} {counts["failed"]}',
|
|
f'platform_quality_gate_tests_total{{{labels},result="ignored"}} {counts["ignored"]}',
|
|
'# HELP platform_quality_gate_checks_total Check outcomes from the latest lesavka gate run.',
|
|
'# TYPE platform_quality_gate_checks_total gauge',
|
|
f'platform_quality_gate_checks_total{{{labels},check="tests",status="ok"}} {success}',
|
|
f'platform_quality_gate_checks_total{{{labels},check="tests",status="failed"}} {failure}',
|
|
])
|
|
metrics_path.write_text('\n'.join(lines) + '\n', encoding='utf-8')
|
|
PY
|
|
|
|
publish_metrics() {
|
|
if [[ -z "${PUSHGATEWAY_URL}" ]]; then
|
|
echo "Skipping test metrics publish: QUALITY_GATE_PUSHGATEWAY_URL is not set"
|
|
return 0
|
|
fi
|
|
|
|
curl --fail --silent --show-error \
|
|
--data-binary @"${METRICS_FILE}" \
|
|
"${PUSHGATEWAY_URL%/}/metrics/job/${PUSHGATEWAY_JOB}/suite/lesavka"
|
|
}
|
|
|
|
publish_status=0
|
|
publish_metrics || publish_status=$?
|
|
|
|
if [[ "${status}" -ne 0 ]]; then
|
|
exit "${status}"
|
|
fi
|
|
exit "${publish_status}"
|