#!/usr/bin/env bash set -euo pipefail ROOT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd) REPORT_DIR="${ROOT_DIR}/target/quality-gate" COVERAGE_LCOV="${REPORT_DIR}/coverage.lcov" SUMMARY_TXT="${REPORT_DIR}/summary.txt" METRICS_FILE="${REPORT_DIR}/metrics.prom" BASELINE_JSON="${ROOT_DIR}/scripts/ci/quality_gate_baseline.json" COVERAGE_CONTRACT_JSON="${ROOT_DIR}/testing/coverage_contract.json" PUSHGATEWAY_URL=${QUALITY_GATE_PUSHGATEWAY_URL:-} mkdir -p "${REPORT_DIR}" branch=${BRANCH_NAME:-${GIT_BRANCH:-}} if [[ -z "${branch}" ]]; then branch=$(git -C "${ROOT_DIR}" rev-parse --abbrev-ref HEAD 2>/dev/null || echo unknown) fi commit=${GIT_COMMIT:-} if [[ -z "${commit}" ]]; then commit=$(git -C "${ROOT_DIR}" rev-parse --short HEAD 2>/dev/null || echo unknown) fi cat >"${METRICS_FILE}" </dev/null | awk -v suite="lesavka" -v status="${status}" ' /^platform_quality_gate_runs_total\{/ { if (index($0, "job=\"platform-quality-ci\"") == 0) next if (index($0, "suite=\"" suite "\"") == 0) next if (index($0, "status=\"" status "\"") == 0) next print int($2) found = 1 exit } END { if (!found) print 0 }' } refresh_counter_metrics() { local outcome status_line ok_count failed_count tmp_file status_line="$(awk '/^platform_quality_gate_runs_total\{suite="lesavka"/ {print; exit}' "${METRICS_FILE}" 2>/dev/null || true)" outcome="$(printf '%s' "${status_line}" | sed -n 's/.*status="\([^"]*\)".*/\1/p')" [[ -n "${outcome}" ]] || outcome="failed" ok_count="$(fetch_remote_counter ok)" failed_count="$(fetch_remote_counter failed)" if [[ "${outcome}" == "ok" ]]; then ok_count=$((ok_count + 1)) else failed_count=$((failed_count + 1)) fi tmp_file="$(mktemp "${REPORT_DIR}/metrics.prom.XXXXXX")" { echo '# HELP platform_quality_gate_runs_total Number of quality gate runs by result.' echo '# TYPE platform_quality_gate_runs_total counter' echo "platform_quality_gate_runs_total{suite=\"lesavka\",branch=\"${branch}\",commit=\"${commit}\",status=\"ok\"} ${ok_count}" echo "platform_quality_gate_runs_total{suite=\"lesavka\",branch=\"${branch}\",commit=\"${commit}\",status=\"failed\"} ${failed_count}" awk ' /^# HELP platform_quality_gate_runs_total / {next} /^# TYPE platform_quality_gate_runs_total / {next} /^platform_quality_gate_runs_total\{suite="lesavka"/ {next} {print} ' "${METRICS_FILE}" } >"${tmp_file}" mv "${tmp_file}" "${METRICS_FILE}" } publish_metrics() { if [[ -z "${PUSHGATEWAY_URL}" ]]; then echo "Skipping Pushgateway publish: QUALITY_GATE_PUSHGATEWAY_URL is not set" return 0 fi curl --fail --silent --show-error \ --data-binary @"${METRICS_FILE}" \ "${PUSHGATEWAY_URL%/}/metrics/job/platform-quality-ci/suite/lesavka" } status=0 # Several integration contracts intentionally mutate process environment and # probe singleton runtime state. Keep coverage collection serial so per-file # percentages stay stable enough to serve as a baseline gate. if RUST_TEST_THREADS="${RUST_TEST_THREADS:-1}" cargo llvm-cov --workspace --all-targets --lcov --output-path "${COVERAGE_LCOV}"; then if python3 - "${COVERAGE_LCOV}" "${BASELINE_JSON}" "${METRICS_FILE}" "${SUMMARY_TXT}" "${ROOT_DIR}" "${COVERAGE_CONTRACT_JSON}" "${branch}" "${commit}" <<'PY' import json import pathlib import subprocess import sys from datetime import datetime, timezone coverage_path = pathlib.Path(sys.argv[1]) baseline_path = pathlib.Path(sys.argv[2]) metrics_path = pathlib.Path(sys.argv[3]) summary_path = pathlib.Path(sys.argv[4]) root = pathlib.Path(sys.argv[5]) contract_path = pathlib.Path(sys.argv[6]) branch = sys.argv[7] commit = sys.argv[8] def run_git(*args: str) -> list[str]: proc = subprocess.run( ['git', '-C', str(root), *args], check=True, text=True, capture_output=True, ) return [line for line in proc.stdout.splitlines() if line] def repo_files() -> list[str]: tracked = run_git('ls-files') untracked = run_git('ls-files', '--others', '--exclude-standard') return sorted(set(tracked + untracked)) def is_test_path(rel: str) -> bool: return 'tests' in pathlib.Path(rel).parts lcov_counts: dict[str, list[tuple[int, int]]] = {} current_file: str | None = None for raw in coverage_path.read_text(encoding='utf-8').splitlines(): if raw.startswith('SF:'): filename = pathlib.Path(raw[3:]) try: rel = filename.relative_to(root).as_posix() except ValueError: current_file = None continue if is_test_path(rel) or '/src/' not in rel or not rel.endswith('.rs'): current_file = None continue current_file = rel lcov_counts.setdefault(current_file, []) continue if current_file is None or not raw.startswith('DA:'): continue fields = raw[3:].split(',') if len(fields) < 2: continue try: line_number = int(fields[0]) hit_count = int(fields[1]) except ValueError: continue lcov_counts[current_file].append((line_number, hit_count)) files = [] workspace_line_count = 0 workspace_covered_count = 0 for rel, counts in sorted(lcov_counts.items()): path = root / rel if not path.exists(): continue loc = sum(1 for _ in path.open('r', encoding='utf-8')) executable_lines = len(counts) covered_lines = sum(1 for _line, hits in counts if hits > 0) line_percent = 100.0 if executable_lines == 0 else covered_lines * 100.0 / executable_lines workspace_line_count += executable_lines workspace_covered_count += covered_lines files.append({ 'path': rel, 'loc': loc, 'line_percent': line_percent, }) files.sort(key=lambda item: item['path']) source_loc_over_500 = [] for rel in repo_files(): if not rel.endswith('.rs') or '/src/' not in rel: continue if is_test_path(rel): continue path = root / rel if not path.exists() or path.is_dir(): continue loc = sum(1 for _ in path.open('r', encoding='utf-8')) if loc > 500: source_loc_over_500.append(f'{rel}: source file exceeds 500 LOC ({loc})') baseline = {'files': {}} if baseline_path.exists(): with baseline_path.open('r', encoding='utf-8') as fh: baseline = json.load(fh) baseline_files = baseline.get('files', {}) regressions = [] current_by_path = {item['path']: item for item in files} missing_from_baseline = [path for path in current_by_path if path not in baseline_files] for path, current in current_by_path.items(): baseline_entry = baseline_files.get(path) if baseline_entry is None: continue if current['loc'] > int(baseline_entry['loc']): regressions.append(f"{path}: loc grew from {baseline_entry['loc']} to {current['loc']}") if current['line_percent'] + 0.01 < float(baseline_entry['line_percent']): regressions.append( f"{path}: line coverage fell from {baseline_entry['line_percent']:.2f}% to {current['line_percent']:.2f}%" ) coverage_contract = {'minimum_line_percent': 95.0, 'files': []} if contract_path.exists(): with contract_path.open('r', encoding='utf-8') as fh: coverage_contract = json.load(fh) contract_min = float(coverage_contract.get('minimum_line_percent', 95.0)) contract_files = list(dict.fromkeys(coverage_contract.get('files', []))) contract_failures = [] contract_files_at_target = 0 for path in contract_files: current = current_by_path.get(path) if current is None: contract_failures.append(f'{path}: missing from coverage report') continue if current['line_percent'] + 0.01 < contract_min: contract_failures.append( f'{path}: contract requires >= {contract_min:.2f}% line coverage, found {current["line_percent"]:.2f}%' ) else: contract_files_at_target += 1 if current['loc'] > 500: contract_failures.append(f'{path}: contract requires <= 500 LOC, found {current["loc"]}') workspace_lines = ( 100.0 if workspace_line_count == 0 else workspace_covered_count * 100.0 / workspace_line_count ) files_at_95 = sum(1 for item in files if item['line_percent'] >= 95.0) files_below_95 = len(files) - files_at_95 over_500 = sum(1 for item in files if item['loc'] > 500) all_file_failures = [ f'{item["path"]}: requires >= {contract_min:.2f}% line coverage, found {item["line_percent"]:.2f}%' for item in files if item['line_percent'] + 0.01 < contract_min ] def esc(value: str) -> str: return value.replace('\\', r'\\').replace('\n', r'\\n').replace('"', r'\"') labels = f'suite="lesavka",branch="{esc(branch)}",commit="{esc(commit)}"' metrics = [] metrics.append('# HELP platform_quality_gate_runs_total Number of quality gate runs by result.') metrics.append('# TYPE platform_quality_gate_runs_total counter') status_label = 'ok' if not regressions and not contract_failures and not all_file_failures and not source_loc_over_500 else 'failed' ok_value = 1 if status_label == 'ok' else 0 failed_value = 1 if status_label == 'failed' else 0 metrics.append(f'platform_quality_gate_runs_total{{{labels},status="{status_label}"}} 1') metrics.append('# HELP platform_quality_gate_checks_total Check outcomes from the latest lesavka gate run.') metrics.append('# TYPE platform_quality_gate_checks_total gauge') metrics.append(f'platform_quality_gate_checks_total{{{labels},check="coverage",status="ok"}} {ok_value}') metrics.append(f'platform_quality_gate_checks_total{{{labels},check="coverage",status="failed"}} {failed_value}') loc_ok_value = 0 if source_loc_over_500 else 1 loc_failed_value = 1 if source_loc_over_500 else 0 metrics.append(f'platform_quality_gate_checks_total{{{labels},check="loc",status="ok"}} {loc_ok_value}') metrics.append(f'platform_quality_gate_checks_total{{{labels},check="loc",status="failed"}} {loc_failed_value}') for check in ('tests', 'style', 'media_reliability', 'gate_glue', 'sonarqube', 'supply_chain'): metrics.append(f'platform_quality_gate_checks_total{{{labels},check="{check}",status="not_applicable"}} 1') metrics.append('# HELP platform_quality_gate_workspace_line_coverage_percent Workspace line coverage percent.') metrics.append('# TYPE platform_quality_gate_workspace_line_coverage_percent gauge') metrics.append(f'platform_quality_gate_workspace_line_coverage_percent{{{labels}}} {workspace_lines:.2f}') metrics.append('# HELP platform_quality_gate_files_total Count of tracked source files in the quality gate.') metrics.append('# TYPE platform_quality_gate_files_total gauge') metrics.append(f'platform_quality_gate_files_total{{{labels}}} {len(files)}') metrics.append('# HELP platform_quality_gate_files_at_or_above_95_total Count of files at or above the 95 percent line target.') metrics.append('# TYPE platform_quality_gate_files_at_or_above_95_total gauge') metrics.append(f'platform_quality_gate_files_at_or_above_95_total{{{labels}}} {files_at_95}') metrics.append('# HELP platform_quality_gate_files_below_95_total Count of files below the 95 percent line target.') metrics.append('# TYPE platform_quality_gate_files_below_95_total gauge') metrics.append(f'platform_quality_gate_files_below_95_total{{{labels}}} {files_below_95}') metrics.append('# HELP platform_quality_gate_source_lines_over_500_total Count of tracked source files over 500 LOC.') metrics.append('# TYPE platform_quality_gate_source_lines_over_500_total gauge') metrics.append(f'platform_quality_gate_source_lines_over_500_total{{{labels}}} {len(source_loc_over_500)}') metrics.append('# HELP platform_quality_gate_repo_source_lines_over_500_total Count of repo source files over 500 LOC, including untracked working-tree files.') metrics.append('# TYPE platform_quality_gate_repo_source_lines_over_500_total gauge') metrics.append(f'platform_quality_gate_repo_source_lines_over_500_total{{{labels}}} {len(source_loc_over_500)}') metrics.append('# HELP platform_quality_gate_contract_files_total Count of files covered by the strict testing coverage contract.') metrics.append('# TYPE platform_quality_gate_contract_files_total gauge') metrics.append(f'platform_quality_gate_contract_files_total{{{labels}}} {len(contract_files)}') metrics.append('# HELP platform_quality_gate_contract_files_at_target_total Count of strict contract files meeting the line coverage target.') metrics.append('# TYPE platform_quality_gate_contract_files_at_target_total gauge') metrics.append(f'platform_quality_gate_contract_files_at_target_total{{{labels}}} {contract_files_at_target}') metrics.append('# HELP platform_quality_gate_contract_files_below_target_total Count of strict contract files missing the line coverage target or LOC cap.') metrics.append('# TYPE platform_quality_gate_contract_files_below_target_total gauge') metrics.append( f'platform_quality_gate_contract_files_below_target_total{{{labels}}} {len(contract_failures)}' ) metrics.append('# HELP platform_quality_gate_file_line_coverage_percent Per-file line coverage percent.') metrics.append('# TYPE platform_quality_gate_file_line_coverage_percent gauge') metrics.append('# HELP platform_quality_gate_file_source_lines Per-file source line count.') metrics.append('# TYPE platform_quality_gate_file_source_lines gauge') for item in files: label = esc(item['path']) metrics.append( f'platform_quality_gate_file_line_coverage_percent{{{labels},file="{label}"}} {item["line_percent"]:.2f}' ) metrics.append( f'platform_quality_gate_file_source_lines{{{labels},file="{label}"}} {item["loc"]}' ) metrics_path.write_text('\n'.join(metrics) + '\n', encoding='utf-8') lines = [] lines.append(f'quality gate report generated at {datetime.now(timezone.utc).isoformat()}') lines.append(f'workspace line coverage: {workspace_lines:.2f}%') lines.append(f'source files tracked: {len(files)}') lines.append(f'files >= 95% line coverage: {files_at_95}') lines.append(f'files < 95% line coverage: {files_below_95}') lines.append(f'files over 500 LOC: {over_500}') lines.append(f'strict contract files at target: {contract_files_at_target}/{len(contract_files)} (>= {contract_min:.2f}% and <= 500 LOC)') lines.append('') lines.append('path | loc | line coverage | baseline loc | baseline coverage | status') lines.append('-' * 86) for item in files: baseline_entry = baseline_files.get(item['path']) if baseline_entry is None: baseline_loc = 'n/a' baseline_cov = 'n/a' status = 'new' else: baseline_loc = str(baseline_entry['loc']) baseline_cov = f"{float(baseline_entry['line_percent']):.2f}%" status = 'ok' if item['loc'] > int(baseline_entry['loc']) or item['line_percent'] + 0.01 < float(baseline_entry['line_percent']): status = 'regressed' lines.append( f"{item['path']} | {item['loc']} | {item['line_percent']:.2f}% | {baseline_loc} | {baseline_cov} | {status}" ) if contract_files: lines.append('') lines.append('strict testing coverage contract') lines.append('-' * 86) for path in contract_files: current = current_by_path.get(path) if current is None: lines.append(f'{path} | missing') else: lines.append(f'{path} | {current["loc"]} LOC | {current["line_percent"]:.2f}%') if all_file_failures: lines.append('') lines.append(f'all-file coverage failures (< {contract_min:.2f}%)') lines.append('-' * 86) lines.extend(all_file_failures) if source_loc_over_500: lines.append('') lines.append('source LOC hard-limit failures') lines.append('-' * 86) lines.extend(source_loc_over_500) summary_path.write_text('\n'.join(lines) + '\n', encoding='utf-8') print(summary_path.read_text(encoding='utf-8')) if missing_from_baseline: print('missing baseline entries:', ', '.join(missing_from_baseline), file=sys.stderr) if regressions or contract_failures or all_file_failures or source_loc_over_500: for line in regressions: print(line, file=sys.stderr) for line in contract_failures: print(line, file=sys.stderr) for line in all_file_failures: print(line, file=sys.stderr) for line in source_loc_over_500: print(line, file=sys.stderr) raise SystemExit(1) PY then : else status=$? fi else status=$? fi publish_status=0 refresh_counter_metrics if publish_metrics; then : else publish_status=$? fi if [[ ${status} -eq 0 && ${publish_status} -ne 0 ]]; then status=${publish_status} fi exit ${status}