2026-04-10 15:56:18 -03:00
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR = $( cd " $( dirname " ${ BASH_SOURCE [0] } " ) /../.. " && pwd )
REPORT_DIR = " ${ ROOT_DIR } /target/quality-gate "
COVERAGE_JSON = " ${ REPORT_DIR } /coverage.json "
SUMMARY_TXT = " ${ REPORT_DIR } /summary.txt "
METRICS_FILE = " ${ REPORT_DIR } /metrics.prom "
BASELINE_JSON = " ${ ROOT_DIR } /scripts/ci/quality_gate_baseline.json "
2026-04-10 17:00:33 -03:00
COVERAGE_CONTRACT_JSON = " ${ ROOT_DIR } /testing/coverage_contract.json "
2026-04-10 15:56:18 -03:00
PUSHGATEWAY_URL = ${ QUALITY_GATE_PUSHGATEWAY_URL :- }
mkdir -p " ${ REPORT_DIR } "
2026-04-23 01:13:29 -03:00
branch = ${ BRANCH_NAME :- ${ GIT_BRANCH :- } }
if [ [ -z " ${ branch } " ] ] ; then
branch = $( git -C " ${ ROOT_DIR } " rev-parse --abbrev-ref HEAD 2>/dev/null || echo unknown)
fi
commit = ${ GIT_COMMIT :- }
if [ [ -z " ${ commit } " ] ] ; then
commit = $( git -C " ${ ROOT_DIR } " rev-parse --short HEAD 2>/dev/null || echo unknown)
fi
cat >" ${ METRICS_FILE } " <<METRICS
2026-04-10 15:56:18 -03:00
# HELP platform_quality_gate_runs_total Number of quality gate runs by result.
# TYPE platform_quality_gate_runs_total counter
2026-04-23 01:13:29 -03:00
platform_quality_gate_runs_total{ suite = "lesavka" ,branch= " ${ branch } " ,commit= " ${ commit } " ,status= "ok" } 0
platform_quality_gate_runs_total{ suite = "lesavka" ,branch= " ${ branch } " ,commit= " ${ commit } " ,status= "failed" } 1
2026-04-10 15:56:18 -03:00
METRICS
2026-04-10 16:38:41 -03:00
fetch_remote_counter( ) {
local status = " $1 "
if [ [ -z " ${ PUSHGATEWAY_URL } " ] ] ; then
echo 0
return 0
fi
curl --silent --show-error --fail " ${ PUSHGATEWAY_URL %/ } /metrics " 2>/dev/null | awk -v suite = "lesavka" -v status = " ${ status } " '
/^platform_quality_gate_runs_total\{ / {
if ( index( $0 , "job=\"platform-quality-ci\"" ) = = 0) next
if ( index( $0 , "suite=\"" suite "\"" ) = = 0) next
if ( index( $0 , "status=\"" status "\"" ) = = 0) next
print int( $2 )
found = 1
exit
}
END {
if ( !found) print 0
} '
}
refresh_counter_metrics( ) {
local outcome status_line ok_count failed_count tmp_file
status_line = " $( awk '/^platform_quality_gate_runs_total\{suite="lesavka"/ {print; exit}' " ${ METRICS_FILE } " 2>/dev/null || true ) "
outcome = " $( printf '%s' " ${ status_line } " | sed -n 's/.*status="\([^"]*\)".*/\1/p' ) "
[ [ -n " ${ outcome } " ] ] || outcome = "failed"
ok_count = " $( fetch_remote_counter ok) "
failed_count = " $( fetch_remote_counter failed) "
if [ [ " ${ outcome } " = = "ok" ] ] ; then
ok_count = $(( ok_count + 1 ))
else
failed_count = $(( failed_count + 1 ))
fi
tmp_file = " $( mktemp " ${ REPORT_DIR } /metrics.prom.XXXXXX " ) "
{
echo '# HELP platform_quality_gate_runs_total Number of quality gate runs by result.'
echo '# TYPE platform_quality_gate_runs_total counter'
2026-04-23 01:13:29 -03:00
echo " platform_quality_gate_runs_total{suite=\"lesavka\",branch=\" ${ branch } \",commit=\" ${ commit } \",status=\"ok\"} ${ ok_count } "
echo " platform_quality_gate_runs_total{suite=\"lesavka\",branch=\" ${ branch } \",commit=\" ${ commit } \",status=\"failed\"} ${ failed_count } "
2026-04-10 16:38:41 -03:00
awk '
/^# HELP platform_quality_gate_runs_total / { next}
/^# TYPE platform_quality_gate_runs_total / { next}
/^platform_quality_gate_runs_total\{ suite = "lesavka" / { next}
{ print}
' " ${ METRICS_FILE } "
} >" ${ tmp_file } "
mv " ${ tmp_file } " " ${ METRICS_FILE } "
}
2026-04-10 15:56:18 -03:00
publish_metrics( ) {
if [ [ -z " ${ PUSHGATEWAY_URL } " ] ] ; then
echo "Skipping Pushgateway publish: QUALITY_GATE_PUSHGATEWAY_URL is not set"
return 0
fi
curl --fail --silent --show-error \
--data-binary @" ${ METRICS_FILE } " \
2026-04-10 16:38:41 -03:00
" ${ PUSHGATEWAY_URL %/ } /metrics/job/platform-quality-ci/suite/lesavka "
2026-04-10 15:56:18 -03:00
}
status = 0
2026-04-21 21:38:22 -03:00
# Several integration contracts intentionally mutate process environment and
# probe singleton runtime state. Keep coverage collection serial so per-file
# percentages stay stable enough to serve as a baseline gate.
if RUST_TEST_THREADS = " ${ RUST_TEST_THREADS :- 1 } " cargo llvm-cov --workspace --all-targets --summary-only --json --output-path " ${ COVERAGE_JSON } " ; then
2026-04-23 01:13:29 -03:00
if python3 - " ${ COVERAGE_JSON } " " ${ BASELINE_JSON } " " ${ METRICS_FILE } " " ${ SUMMARY_TXT } " " ${ ROOT_DIR } " " ${ COVERAGE_CONTRACT_JSON } " " ${ branch } " " ${ commit } " <<'PY'
2026-04-10 15:56:18 -03:00
import json
import pathlib
import sys
from datetime import datetime, timezone
coverage_path = pathlib.Path( sys.argv[ 1] )
baseline_path = pathlib.Path( sys.argv[ 2] )
metrics_path = pathlib.Path( sys.argv[ 3] )
summary_path = pathlib.Path( sys.argv[ 4] )
root = pathlib.Path( sys.argv[ 5] )
2026-04-10 17:00:33 -03:00
contract_path = pathlib.Path( sys.argv[ 6] )
2026-04-23 01:13:29 -03:00
branch = sys.argv[ 7]
commit = sys.argv[ 8]
2026-04-10 15:56:18 -03:00
with coverage_path.open( 'r' , encoding = 'utf-8' ) as fh:
report = json.load( fh)
coverage_data = report[ 'data' ] [ 0]
coverage_totals = coverage_data[ 'totals' ]
files = [ ]
for entry in coverage_data[ 'files' ] :
filename = pathlib.Path( entry[ 'filename' ] )
rel = filename.relative_to( root) .as_posix( )
if '/src/tests/' in rel:
continue
if '/src/' not in rel:
continue
loc = sum( 1 for _ in filename.open( 'r' , encoding = 'utf-8' ) )
line_percent = float( entry[ 'summary' ] [ 'lines' ] [ 'percent' ] )
files.append( {
'path' : rel,
'loc' : loc,
'line_percent' : line_percent,
} )
files.sort( key = lambda item: item[ 'path' ] )
baseline = { 'files' : { } }
if baseline_path.exists( ) :
with baseline_path.open( 'r' , encoding = 'utf-8' ) as fh:
baseline = json.load( fh)
baseline_files = baseline.get( 'files' , { } )
regressions = [ ]
current_by_path = { item[ 'path' ] : item for item in files}
missing_from_baseline = [ path for path in current_by_path if path not in baseline_files]
for path, current in current_by_path.items( ) :
baseline_entry = baseline_files.get( path)
if baseline_entry is None:
continue
if current[ 'loc' ] > int( baseline_entry[ 'loc' ] ) :
regressions.append( f"{path}: loc grew from {baseline_entry['loc']} to {current['loc']}" )
if current[ 'line_percent' ] + 0.01 < float( baseline_entry[ 'line_percent' ] ) :
regressions.append(
f"{path}: line coverage fell from {baseline_entry['line_percent']:.2f}% to {current['line_percent']:.2f}%"
)
2026-04-10 17:00:33 -03:00
coverage_contract = { 'minimum_line_percent' : 95.0, 'files' : [ ] }
if contract_path.exists( ) :
with contract_path.open( 'r' , encoding = 'utf-8' ) as fh:
coverage_contract = json.load( fh)
contract_min = float( coverage_contract.get( 'minimum_line_percent' , 95.0) )
contract_files = list( dict.fromkeys( coverage_contract.get( 'files' , [ ] ) ) )
contract_failures = [ ]
contract_files_at_target = 0
for path in contract_files:
current = current_by_path.get( path)
if current is None:
contract_failures.append( f'{path}: missing from coverage report' )
continue
if current[ 'line_percent' ] + 0.01 < contract_min:
contract_failures.append(
f'{path}: contract requires >= {contract_min:.2f}% line coverage, found {current["line_percent"]:.2f}%'
)
else :
contract_files_at_target += 1
if current[ 'loc' ] > 500:
contract_failures.append( f'{path}: contract requires <= 500 LOC, found {current["loc"]}' )
2026-04-10 15:56:18 -03:00
workspace_lines = float( coverage_totals[ 'lines' ] [ 'percent' ] )
files_at_95 = sum( 1 for item in files if item[ 'line_percent' ] >= 95.0)
files_below_95 = len( files) - files_at_95
over_500 = sum( 1 for item in files if item[ 'loc' ] > 500)
2026-04-23 03:49:49 -03:00
all_file_failures = [
f'{item["path"]}: requires >= {contract_min:.2f}% line coverage, found {item["line_percent"]:.2f}%'
for item in files
if item[ 'line_percent' ] + 0.01 < contract_min
]
2026-04-10 15:56:18 -03:00
2026-04-23 01:13:29 -03:00
def esc( value: str) -> str:
return value.replace( '\\' , r'\\' ) .replace( '\n' , r'\\n' ) .replace( '"' , r'\"' )
labels = f'suite="lesavka",branch="{esc(branch)}",commit="{esc(commit)}"'
2026-04-10 15:56:18 -03:00
metrics = [ ]
metrics.append( '# HELP platform_quality_gate_runs_total Number of quality gate runs by result.' )
metrics.append( '# TYPE platform_quality_gate_runs_total counter' )
2026-04-23 03:49:49 -03:00
status_label = 'ok' if not regressions and not missing_from_baseline and not contract_failures and not all_file_failures else 'failed'
2026-04-23 01:13:29 -03:00
ok_value = 1 if status_label = = 'ok' else 0
failed_value = 1 if status_label = = 'failed' else 0
metrics.append( f'platform_quality_gate_runs_total{{{labels},status="{status_label}"}} 1' )
metrics.append( '# HELP platform_quality_gate_checks_total Check outcomes from the latest lesavka gate run.' )
metrics.append( '# TYPE platform_quality_gate_checks_total gauge' )
metrics.append( f'platform_quality_gate_checks_total{{{labels},check="coverage",status="ok"}} {ok_value}' )
metrics.append( f'platform_quality_gate_checks_total{{{labels},check="coverage",status="failed"}} {failed_value}' )
2026-04-10 15:56:18 -03:00
metrics.append( '# HELP platform_quality_gate_workspace_line_coverage_percent Workspace line coverage percent.' )
metrics.append( '# TYPE platform_quality_gate_workspace_line_coverage_percent gauge' )
2026-04-23 01:13:29 -03:00
metrics.append( f'platform_quality_gate_workspace_line_coverage_percent{{{labels}}} {workspace_lines:.2f}' )
2026-04-10 15:56:18 -03:00
metrics.append( '# HELP platform_quality_gate_files_total Count of tracked source files in the quality gate.' )
metrics.append( '# TYPE platform_quality_gate_files_total gauge' )
2026-04-23 01:13:29 -03:00
metrics.append( f'platform_quality_gate_files_total{{{labels}}} {len(files)}' )
2026-04-10 15:56:18 -03:00
metrics.append( '# HELP platform_quality_gate_files_at_or_above_95_total Count of files at or above the 95 percent line target.' )
metrics.append( '# TYPE platform_quality_gate_files_at_or_above_95_total gauge' )
2026-04-23 01:13:29 -03:00
metrics.append( f'platform_quality_gate_files_at_or_above_95_total{{{labels}}} {files_at_95}' )
2026-04-10 15:56:18 -03:00
metrics.append( '# HELP platform_quality_gate_files_below_95_total Count of files below the 95 percent line target.' )
metrics.append( '# TYPE platform_quality_gate_files_below_95_total gauge' )
2026-04-23 01:13:29 -03:00
metrics.append( f'platform_quality_gate_files_below_95_total{{{labels}}} {files_below_95}' )
2026-04-10 15:56:18 -03:00
metrics.append( '# HELP platform_quality_gate_source_lines_over_500_total Count of tracked source files over 500 LOC.' )
metrics.append( '# TYPE platform_quality_gate_source_lines_over_500_total gauge' )
2026-04-23 01:13:29 -03:00
metrics.append( f'platform_quality_gate_source_lines_over_500_total{{{labels}}} {over_500}' )
2026-04-10 17:00:33 -03:00
metrics.append( '# HELP platform_quality_gate_contract_files_total Count of files covered by the strict testing coverage contract.' )
metrics.append( '# TYPE platform_quality_gate_contract_files_total gauge' )
2026-04-23 01:13:29 -03:00
metrics.append( f'platform_quality_gate_contract_files_total{{{labels}}} {len(contract_files)}' )
2026-04-10 17:00:33 -03:00
metrics.append( '# HELP platform_quality_gate_contract_files_at_target_total Count of strict contract files meeting the line coverage target.' )
metrics.append( '# TYPE platform_quality_gate_contract_files_at_target_total gauge' )
2026-04-23 01:13:29 -03:00
metrics.append( f'platform_quality_gate_contract_files_at_target_total{{{labels}}} {contract_files_at_target}' )
2026-04-10 17:00:33 -03:00
metrics.append( '# HELP platform_quality_gate_contract_files_below_target_total Count of strict contract files missing the line coverage target or LOC cap.' )
metrics.append( '# TYPE platform_quality_gate_contract_files_below_target_total gauge' )
metrics.append(
2026-04-23 01:13:29 -03:00
f'platform_quality_gate_contract_files_below_target_total{{{labels}}} {len(contract_failures)}'
2026-04-10 17:00:33 -03:00
)
2026-04-10 15:56:18 -03:00
metrics.append( '# HELP platform_quality_gate_file_line_coverage_percent Per-file line coverage percent.' )
metrics.append( '# TYPE platform_quality_gate_file_line_coverage_percent gauge' )
metrics.append( '# HELP platform_quality_gate_file_source_lines Per-file source line count.' )
metrics.append( '# TYPE platform_quality_gate_file_source_lines gauge' )
for item in files:
label = esc( item[ 'path' ] )
metrics.append(
2026-04-23 01:13:29 -03:00
f'platform_quality_gate_file_line_coverage_percent{{{labels},file="{label}"}} {item["line_percent"]:.2f}'
2026-04-10 15:56:18 -03:00
)
metrics.append(
2026-04-23 01:13:29 -03:00
f'platform_quality_gate_file_source_lines{{{labels},file="{label}"}} {item["loc"]}'
2026-04-10 15:56:18 -03:00
)
metrics_path.write_text( '\n' .join( metrics) + '\n' , encoding = 'utf-8' )
lines = [ ]
lines.append( f'quality gate report generated at {datetime.now(timezone.utc).isoformat()}' )
lines.append( f'workspace line coverage: {workspace_lines:.2f}%' )
lines.append( f'source files tracked: {len(files)}' )
lines.append( f'files >= 95% line coverage: {files_at_95}' )
lines.append( f'files < 95% line coverage: {files_below_95}' )
lines.append( f'files over 500 LOC: {over_500}' )
2026-04-10 17:00:33 -03:00
lines.append( f'strict contract files at target: {contract_files_at_target}/{len(contract_files)} (>= {contract_min:.2f}% and <= 500 LOC)' )
2026-04-10 15:56:18 -03:00
lines.append( '' )
lines.append( 'path | loc | line coverage | baseline loc | baseline coverage | status' )
lines.append( '-' * 86)
for item in files:
baseline_entry = baseline_files.get( item[ 'path' ] )
if baseline_entry is None:
baseline_loc = 'n/a'
baseline_cov = 'n/a'
status = 'new'
else :
baseline_loc = str( baseline_entry[ 'loc' ] )
baseline_cov = f"{float(baseline_entry['line_percent']):.2f}%"
status = 'ok'
if item[ 'loc' ] > int( baseline_entry[ 'loc' ] ) or item[ 'line_percent' ] + 0.01 < float( baseline_entry[ 'line_percent' ] ) :
status = 'regressed'
lines.append(
f"{item['path']} | {item['loc']} | {item['line_percent']:.2f}% | {baseline_loc} | {baseline_cov} | {status}"
)
2026-04-10 17:00:33 -03:00
if contract_files:
lines.append( '' )
lines.append( 'strict testing coverage contract' )
lines.append( '-' * 86)
for path in contract_files:
current = current_by_path.get( path)
if current is None:
lines.append( f'{path} | missing' )
else :
lines.append( f'{path} | {current["loc"]} LOC | {current["line_percent"]:.2f}%' )
2026-04-23 03:49:49 -03:00
if all_file_failures:
lines.append( '' )
lines.append( f'all-file coverage failures (< {contract_min:.2f}%)' )
lines.append( '-' * 86)
lines.extend( all_file_failures)
2026-04-10 15:56:18 -03:00
summary_path.write_text( '\n' .join( lines) + '\n' , encoding = 'utf-8' )
print( summary_path.read_text( encoding = 'utf-8' ) )
if missing_from_baseline:
print( 'missing baseline entries:' , ', ' .join( missing_from_baseline) , file = sys.stderr)
2026-04-23 03:49:49 -03:00
if regressions or missing_from_baseline or contract_failures or all_file_failures:
2026-04-10 15:56:18 -03:00
for line in regressions:
print( line, file = sys.stderr)
2026-04-10 17:00:33 -03:00
for line in contract_failures:
print( line, file = sys.stderr)
2026-04-23 03:49:49 -03:00
for line in all_file_failures:
print( line, file = sys.stderr)
2026-04-10 15:56:18 -03:00
raise SystemExit( 1)
PY
then
:
else
status = $?
fi
else
status = $?
fi
publish_status = 0
2026-04-10 16:38:41 -03:00
refresh_counter_metrics
2026-04-10 15:56:18 -03:00
if publish_metrics; then
:
else
publish_status = $?
fi
if [ [ ${ status } -eq 0 && ${ publish_status } -ne 0 ] ] ; then
status = ${ publish_status }
fi
exit ${ status }