ci: add wave-1 quality and hygiene gate checks
This commit is contained in:
parent
8161a4dae8
commit
3635fd36fb
1
.gitignore
vendored
1
.gitignore
vendored
@ -6,3 +6,4 @@
|
||||
.DS_Store
|
||||
/web/node_modules/
|
||||
/web/dist/
|
||||
/build/
|
||||
|
||||
33
Jenkinsfile
vendored
33
Jenkinsfile
vendored
@ -108,30 +108,37 @@ spec:
|
||||
sh '''
|
||||
set -eu
|
||||
apt-get update >/dev/null
|
||||
apt-get install -y --no-install-recommends jq >/dev/null
|
||||
apt-get install -y --no-install-recommends jq python3 >/dev/null
|
||||
mkdir -p build
|
||||
set +e
|
||||
go test -json ./... > build/go-test.json
|
||||
test_rc=$?
|
||||
bash scripts/check.sh
|
||||
gate_rc=$?
|
||||
set -e
|
||||
tests_total="$(jq -s '[.[] | select(.Test != null and (.Action=="pass" or .Action=="fail" or .Action=="skip"))] | length' build/go-test.json)"
|
||||
tests_failed="$(jq -s '[.[] | select(.Test != null and .Action=="fail")] | length' build/go-test.json)"
|
||||
tests_skipped="$(jq -s '[.[] | select(.Test != null and .Action=="skip")] | length' build/go-test.json)"
|
||||
tests_errors="$(jq -s '[.[] | select(.Test == null and .Action=="fail")] | length' build/go-test.json)"
|
||||
if [ ! -f build/go-test.json ]; then
|
||||
: > build/go-test.json
|
||||
fi
|
||||
tests_total="$(jq -s '[.[] | select(.Test != null and (.Action=="pass" or .Action=="fail" or .Action=="skip"))] | length' build/go-test.json 2>/dev/null || echo 0)"
|
||||
tests_failed="$(jq -s '[.[] | select(.Test != null and .Action=="fail")] | length' build/go-test.json 2>/dev/null || echo 0)"
|
||||
tests_skipped="$(jq -s '[.[] | select(.Test != null and .Action=="skip")] | length' build/go-test.json 2>/dev/null || echo 0)"
|
||||
tests_errors="$(jq -s '[.[] | select(.Test == null and .Action=="fail")] | length' build/go-test.json 2>/dev/null || echo 0)"
|
||||
tests_passed=$((tests_total - tests_failed - tests_skipped))
|
||||
if [ "${tests_passed}" -lt 0 ]; then
|
||||
tests_passed=0
|
||||
fi
|
||||
coverage_percent="$(jq -r '.coverage_percent // 0' build/quality-summary.json 2>/dev/null || echo 0)"
|
||||
over_500="$(jq -r '.source_lines_over_500 // 0' build/quality-summary.json 2>/dev/null || echo 0)"
|
||||
cat > build/test-summary.json <<EOF
|
||||
{
|
||||
"tests": ${tests_total},
|
||||
"passed": ${tests_passed},
|
||||
"failed": ${tests_failed},
|
||||
"errors": ${tests_errors},
|
||||
"skipped": ${tests_skipped}
|
||||
"skipped": ${tests_skipped},
|
||||
"coverage_percent": ${coverage_percent},
|
||||
"source_lines_over_500": ${over_500}
|
||||
}
|
||||
EOF
|
||||
printf '%s\n' "${test_rc}" > build/test.exitcode
|
||||
printf '%s\n' "${gate_rc}" > build/test.exitcode
|
||||
'''
|
||||
}
|
||||
}
|
||||
@ -173,6 +180,8 @@ EOF
|
||||
tests_failed="$(jq -r '.failed // 0' build/test-summary.json 2>/dev/null || echo 0)"
|
||||
tests_errors="$(jq -r '.errors // 0' build/test-summary.json 2>/dev/null || echo 0)"
|
||||
tests_skipped="$(jq -r '.skipped // 0' build/test-summary.json 2>/dev/null || echo 0)"
|
||||
coverage_percent="$(jq -r '.coverage_percent // 0' build/test-summary.json 2>/dev/null || echo 0)"
|
||||
over_500="$(jq -r '.source_lines_over_500 // 0' build/test-summary.json 2>/dev/null || echo 0)"
|
||||
cat <<METRICS | curl -fsS --data-binary @- "${gateway}/metrics/job/platform-quality-ci/suite/${suite}" >/dev/null
|
||||
# TYPE platform_quality_gate_runs_total counter
|
||||
platform_quality_gate_runs_total{suite="${suite}",status="ok"} ${ok_count}
|
||||
@ -182,6 +191,12 @@ soteria_quality_gate_tests_total{suite="${suite}",result="passed"} ${tests_passe
|
||||
soteria_quality_gate_tests_total{suite="${suite}",result="failed"} ${tests_failed}
|
||||
soteria_quality_gate_tests_total{suite="${suite}",result="error"} ${tests_errors}
|
||||
soteria_quality_gate_tests_total{suite="${suite}",result="skipped"} ${tests_skipped}
|
||||
# TYPE soteria_quality_gate_coverage_percent gauge
|
||||
soteria_quality_gate_coverage_percent{suite="${suite}"} ${coverage_percent}
|
||||
# TYPE platform_quality_gate_workspace_line_coverage_percent gauge
|
||||
platform_quality_gate_workspace_line_coverage_percent{suite="${suite}"} ${coverage_percent}
|
||||
# TYPE platform_quality_gate_source_lines_over_500_total gauge
|
||||
platform_quality_gate_source_lines_over_500_total{suite="${suite}"} ${over_500}
|
||||
METRICS
|
||||
'''
|
||||
}
|
||||
|
||||
80
scripts/check.sh
Normal file
80
scripts/check.sh
Normal file
@ -0,0 +1,80 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
BUILD_DIR="${ROOT_DIR}/build"
|
||||
mkdir -p "${BUILD_DIR}"
|
||||
|
||||
cd "${ROOT_DIR}"
|
||||
|
||||
echo "[quality] gofmt"
|
||||
mapfile -t go_files < <(find cmd internal -type f -name '*.go' ! -name '*_test.go' ! -path '*/ui-dist/*' | sort)
|
||||
if ((${#go_files[@]})); then
|
||||
gofmt_diff="$(gofmt -l "${go_files[@]}")"
|
||||
if [[ -n "${gofmt_diff}" ]]; then
|
||||
echo "gofmt check failed. Run: gofmt -w <files>"
|
||||
echo "${gofmt_diff}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "[quality] structure hygiene"
|
||||
python3 "${ROOT_DIR}/scripts/structure_hygiene_check.py" --root "${ROOT_DIR}"
|
||||
|
||||
echo "[quality] doc hygiene"
|
||||
python3 "${ROOT_DIR}/scripts/doc_hygiene_check.py" \
|
||||
--root "${ROOT_DIR}" \
|
||||
--waivers "${ROOT_DIR}/scripts/doc_hygiene_waivers.tsv"
|
||||
|
||||
echo "[quality] LOC hygiene"
|
||||
python3 "${ROOT_DIR}/scripts/loc_hygiene_check.py" \
|
||||
--root "${ROOT_DIR}" \
|
||||
--max-lines 500 \
|
||||
--waivers "${ROOT_DIR}/scripts/loc_hygiene_waivers.tsv" \
|
||||
--summary-json "${BUILD_DIR}/loc-summary.json"
|
||||
|
||||
echo "[quality] code smell"
|
||||
bash "${ROOT_DIR}/scripts/code_smell_check.sh"
|
||||
|
||||
echo "[quality] ui test framework policy"
|
||||
python3 "${ROOT_DIR}/scripts/ui_test_framework_check.py" --root "${ROOT_DIR}"
|
||||
|
||||
echo "[quality] go vet"
|
||||
go vet ./...
|
||||
|
||||
echo "[quality] unit tests + coverage"
|
||||
set +e
|
||||
go test -json -coverprofile="${BUILD_DIR}/coverage.out" ./... > "${BUILD_DIR}/go-test.json"
|
||||
test_rc=$?
|
||||
set -e
|
||||
|
||||
echo "[quality] coverage hygiene"
|
||||
python3 "${ROOT_DIR}/scripts/coverage_hygiene_check.py" \
|
||||
--root "${ROOT_DIR}" \
|
||||
--coverprofile "${BUILD_DIR}/coverage.out" \
|
||||
--min-total 39.5 \
|
||||
--baseline "${ROOT_DIR}/scripts/coverage_hygiene_baseline.tsv" \
|
||||
--summary-json "${BUILD_DIR}/coverage-summary.json"
|
||||
|
||||
python3 - <<'PY'
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
build = Path("build")
|
||||
loc_path = build / "loc-summary.json"
|
||||
cov_path = build / "coverage-summary.json"
|
||||
summary = {"coverage_percent": 0.0, "source_lines_over_500": 0}
|
||||
if loc_path.exists():
|
||||
try:
|
||||
summary["source_lines_over_500"] = int(json.loads(loc_path.read_text()).get("over_500", 0))
|
||||
except Exception:
|
||||
pass
|
||||
if cov_path.exists():
|
||||
try:
|
||||
summary["coverage_percent"] = float(json.loads(cov_path.read_text()).get("total_percent", 0.0))
|
||||
except Exception:
|
||||
pass
|
||||
(build / "quality-summary.json").write_text(json.dumps(summary, indent=2), encoding="utf-8")
|
||||
PY
|
||||
|
||||
exit "${test_rc}"
|
||||
42
scripts/code_smell_check.sh
Normal file
42
scripts/code_smell_check.sh
Normal file
@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
FAILED=0
|
||||
|
||||
check_pattern() {
|
||||
local pattern="$1"
|
||||
local scope="$2"
|
||||
local label="$3"
|
||||
shift 3
|
||||
set +e
|
||||
rg -n "${pattern}" "${scope}" "$@" >/tmp/soteria_smell_hits.txt
|
||||
local status=$?
|
||||
set -e
|
||||
if [[ ${status} -eq 0 ]]; then
|
||||
echo "Code smell check failed: ${label}"
|
||||
cat /tmp/soteria_smell_hits.txt
|
||||
FAILED=1
|
||||
elif [[ ${status} -eq 2 ]]; then
|
||||
echo "Code smell check failed: invalid regex for ${label}"
|
||||
FAILED=1
|
||||
fi
|
||||
}
|
||||
|
||||
check_pattern 'fmt\.Print(f|ln)?\(' "${ROOT_DIR}/cmd" \
|
||||
"use structured logging instead of fmt.Print* in cmd code" --glob '!**/*_test.go'
|
||||
check_pattern 'fmt\.Print(f|ln)?\(' "${ROOT_DIR}/internal" \
|
||||
"use structured logging instead of fmt.Print* in internal code" --glob '!**/*_test.go'
|
||||
check_pattern 'panic\(' "${ROOT_DIR}/cmd" \
|
||||
"avoid panic in production cmd code" --glob '!**/*_test.go'
|
||||
check_pattern 'panic\(' "${ROOT_DIR}/internal" \
|
||||
"avoid panic in production internal code" --glob '!**/*_test.go'
|
||||
check_pattern 'log\.Fatalf\(' "${ROOT_DIR}" \
|
||||
"keep log.Fatalf limited to cmd/soteria/main.go" \
|
||||
--glob '!cmd/soteria/main.go' --glob '!**/*_test.go'
|
||||
|
||||
if [[ "${FAILED}" -ne 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Code smell checks: ok"
|
||||
12
scripts/coverage_hygiene_baseline.tsv
Normal file
12
scripts/coverage_hygiene_baseline.tsv
Normal file
@ -0,0 +1,12 @@
|
||||
# relative_path min_percent reason
|
||||
cmd/soteria/main.go 0.00 baseline
|
||||
internal/config/config.go 0.00 baseline
|
||||
internal/k8s/client.go 0.00 baseline
|
||||
internal/k8s/jobs.go 0.00 baseline
|
||||
internal/k8s/state.go 0.00 baseline
|
||||
internal/k8s/volumes.go 0.00 baseline
|
||||
internal/longhorn/client.go 0.00 baseline
|
||||
internal/server/b2.go 0.00 baseline
|
||||
internal/server/metrics.go 58.48 baseline
|
||||
internal/server/server.go 63.65 baseline
|
||||
internal/server/ui_renderer.go 64.32 baseline
|
||||
|
117
scripts/coverage_hygiene_check.py
Normal file
117
scripts/coverage_hygiene_check.py
Normal file
@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Enforce non-regressing Go coverage against a checked-in baseline."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
COVER_RE = re.compile(
|
||||
r"^(?P<path>.+?):(?P<start>\d+)\.(?P<start_col>\d+),(?P<end>\d+)\.(?P<end_col>\d+)\s+(?P<stmts>\d+)\s+(?P<count>\d+)$"
|
||||
)
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--root", default=".")
|
||||
parser.add_argument("--coverprofile", required=True)
|
||||
parser.add_argument("--min-total", type=float, default=0.0)
|
||||
parser.add_argument("--baseline", required=True)
|
||||
parser.add_argument("--summary-json", default="")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def normalize_path(raw: str, root_name: str) -> str:
|
||||
marker = f"/{root_name}/"
|
||||
if marker in raw:
|
||||
return raw.split(marker, 1)[1]
|
||||
path = Path(raw)
|
||||
return path.as_posix()
|
||||
|
||||
|
||||
def parse_coverprofile(path: Path, root_name: str) -> tuple[dict[str, tuple[int, int]], float]:
|
||||
per_file: dict[str, list[int]] = defaultdict(lambda: [0, 0])
|
||||
total_statements = 0
|
||||
total_covered = 0
|
||||
for line in path.read_text(encoding="utf-8").splitlines():
|
||||
if not line or line.startswith("mode:"):
|
||||
continue
|
||||
match = COVER_RE.match(line)
|
||||
if not match:
|
||||
continue
|
||||
rel = normalize_path(match.group("path"), root_name)
|
||||
stmts = int(match.group("stmts"))
|
||||
count = int(match.group("count"))
|
||||
per_file[rel][0] += stmts
|
||||
total_statements += stmts
|
||||
if count > 0:
|
||||
per_file[rel][1] += stmts
|
||||
total_covered += stmts
|
||||
normalized = {k: (v[0], v[1]) for k, v in per_file.items()}
|
||||
total_pct = 100.0 if total_statements == 0 else (total_covered * 100.0 / total_statements)
|
||||
return normalized, total_pct
|
||||
|
||||
|
||||
def load_baseline(path: Path) -> dict[str, float]:
|
||||
baseline: dict[str, float] = {}
|
||||
for raw in path.read_text(encoding="utf-8").splitlines():
|
||||
line = raw.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
parts = line.split("\t")
|
||||
if len(parts) < 2:
|
||||
continue
|
||||
rel = parts[0]
|
||||
try:
|
||||
pct = float(parts[1])
|
||||
except ValueError:
|
||||
continue
|
||||
baseline[rel] = pct
|
||||
return baseline
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
root = Path(args.root).resolve()
|
||||
coverprofile = Path(args.coverprofile).resolve()
|
||||
baseline = load_baseline(Path(args.baseline).resolve())
|
||||
|
||||
if not coverprofile.exists():
|
||||
print(f"Coverage hygiene check failed: missing coverprofile {coverprofile}")
|
||||
return 1
|
||||
|
||||
per_file, total_pct = parse_coverprofile(coverprofile, root.name)
|
||||
violations: list[str] = []
|
||||
for rel, floor in sorted(baseline.items()):
|
||||
stmts, covered = per_file.get(rel, (0, 0))
|
||||
pct = 100.0 if stmts == 0 else (covered * 100.0 / stmts)
|
||||
if pct + 1e-9 < floor:
|
||||
violations.append(f"{rel}: {pct:.2f}% < baseline {floor:.2f}%")
|
||||
|
||||
if total_pct + 1e-9 < args.min_total:
|
||||
violations.append(f"total coverage {total_pct:.2f}% < floor {args.min_total:.2f}%")
|
||||
|
||||
summary = {
|
||||
"total_percent": round(total_pct, 3),
|
||||
"checked_files": len(baseline),
|
||||
"violations": len(violations),
|
||||
}
|
||||
if args.summary_json:
|
||||
Path(args.summary_json).write_text(json.dumps(summary, indent=2), encoding="utf-8")
|
||||
|
||||
if violations:
|
||||
print("Coverage hygiene check failed:")
|
||||
for item in violations:
|
||||
print(item)
|
||||
return 1
|
||||
|
||||
print(f"Coverage hygiene checks: ok (total {total_pct:.2f}%)")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
119
scripts/doc_hygiene_check.py
Normal file
119
scripts/doc_hygiene_check.py
Normal file
@ -0,0 +1,119 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Require docs for exported Go types/functions, with explicit legacy waivers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
TYPE_RE = re.compile(r"^\s*type\s+([A-Z][A-Za-z0-9_]*)\b")
|
||||
FUNC_RE = re.compile(r"^\s*func\s*(?:\([^)]*\)\s*)?([A-Z][A-Za-z0-9_]*)\s*\(")
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--root", default=".")
|
||||
parser.add_argument("--waivers", required=True)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def iter_go_sources(root: Path) -> list[Path]:
|
||||
files: list[Path] = []
|
||||
for rel_root in ("cmd", "internal"):
|
||||
base = root / rel_root
|
||||
if not base.exists():
|
||||
continue
|
||||
for path in sorted(base.rglob("*.go")):
|
||||
rel = path.relative_to(root).as_posix()
|
||||
if rel.endswith("_test.go"):
|
||||
continue
|
||||
if rel.startswith("internal/server/ui-dist/"):
|
||||
continue
|
||||
files.append(path)
|
||||
return files
|
||||
|
||||
|
||||
def has_leading_comment(lines: list[str], idx: int) -> bool:
|
||||
cursor = idx - 1
|
||||
while cursor >= 0 and lines[cursor].strip() == "":
|
||||
cursor -= 1
|
||||
if cursor < 0:
|
||||
return False
|
||||
line = lines[cursor].lstrip()
|
||||
if line.startswith("//"):
|
||||
return True
|
||||
if "*/" in line:
|
||||
while cursor >= 0:
|
||||
if "/*" in lines[cursor]:
|
||||
return True
|
||||
cursor -= 1
|
||||
return False
|
||||
|
||||
|
||||
def load_waivers(path: Path) -> set[tuple[str, str, str]]:
|
||||
waivers: set[tuple[str, str, str]] = set()
|
||||
if not path.exists():
|
||||
return waivers
|
||||
for raw in path.read_text(encoding="utf-8").splitlines():
|
||||
line = raw.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
parts = line.split("\t")
|
||||
if len(parts) < 3:
|
||||
continue
|
||||
waivers.add((parts[0], parts[1], parts[2]))
|
||||
return waivers
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
root = Path(args.root).resolve()
|
||||
waiver_path = Path(args.waivers).resolve()
|
||||
waivers = load_waivers(waiver_path)
|
||||
|
||||
violations: list[str] = []
|
||||
seen: set[tuple[str, str, str]] = set()
|
||||
for path in iter_go_sources(root):
|
||||
rel = path.relative_to(root).as_posix()
|
||||
lines = path.read_text(encoding="utf-8", errors="ignore").splitlines()
|
||||
for idx, line in enumerate(lines):
|
||||
kind = ""
|
||||
name = ""
|
||||
type_match = TYPE_RE.match(line)
|
||||
if type_match:
|
||||
kind = "type"
|
||||
name = type_match.group(1)
|
||||
else:
|
||||
func_match = FUNC_RE.match(line)
|
||||
if func_match:
|
||||
kind = "func"
|
||||
name = func_match.group(1)
|
||||
if not kind:
|
||||
continue
|
||||
if has_leading_comment(lines, idx):
|
||||
continue
|
||||
key = (rel, kind, name)
|
||||
seen.add(key)
|
||||
if key not in waivers:
|
||||
violations.append(f"{rel}:{idx + 1}: missing doc comment for {kind} {name}")
|
||||
|
||||
stale_waivers = sorted(waivers - seen)
|
||||
if stale_waivers:
|
||||
print("Doc hygiene warning: stale waivers present (safe to remove):")
|
||||
for rel, kind, name in stale_waivers:
|
||||
print(f" {rel}\t{kind}\t{name}")
|
||||
|
||||
if violations:
|
||||
print("Doc hygiene check failed:")
|
||||
for item in violations:
|
||||
print(item)
|
||||
return 1
|
||||
|
||||
print("Doc hygiene checks: ok")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
71
scripts/doc_hygiene_waivers.tsv
Normal file
71
scripts/doc_hygiene_waivers.tsv
Normal file
@ -0,0 +1,71 @@
|
||||
# relative_path kind name reason
|
||||
internal/api/types.go type BackupRequest legacy-no-doc
|
||||
internal/api/types.go type BackupResponse legacy-no-doc
|
||||
internal/api/types.go type RestoreTestRequest legacy-no-doc
|
||||
internal/api/types.go type RestoreTestResponse legacy-no-doc
|
||||
internal/api/types.go type InventoryResponse legacy-no-doc
|
||||
internal/api/types.go type NamespaceInventory legacy-no-doc
|
||||
internal/api/types.go type PVCInventory legacy-no-doc
|
||||
internal/api/types.go type BackupListResponse legacy-no-doc
|
||||
internal/api/types.go type BackupRecord legacy-no-doc
|
||||
internal/api/types.go type AuthInfoResponse legacy-no-doc
|
||||
internal/api/types.go type BackupPolicy legacy-no-doc
|
||||
internal/api/types.go type BackupPolicyUpsertRequest legacy-no-doc
|
||||
internal/api/types.go type BackupPolicyListResponse legacy-no-doc
|
||||
internal/api/types.go type NamespaceBackupRequest legacy-no-doc
|
||||
internal/api/types.go type NamespaceBackupResult legacy-no-doc
|
||||
internal/api/types.go type NamespaceBackupResponse legacy-no-doc
|
||||
internal/api/types.go type NamespaceRestoreRequest legacy-no-doc
|
||||
internal/api/types.go type NamespaceRestoreResult legacy-no-doc
|
||||
internal/api/types.go type NamespaceRestoreResponse legacy-no-doc
|
||||
internal/api/types.go type B2UsageResponse legacy-no-doc
|
||||
internal/api/types.go type B2BucketUsage legacy-no-doc
|
||||
internal/config/config.go type Config legacy-no-doc
|
||||
internal/config/config.go func Load legacy-no-doc
|
||||
internal/k8s/client.go type Client legacy-no-doc
|
||||
internal/k8s/client.go func New legacy-no-doc
|
||||
internal/k8s/jobs.go type BackupJobSummary legacy-no-doc
|
||||
internal/k8s/jobs.go func ListBackupJobs legacy-no-doc
|
||||
internal/k8s/jobs.go func ReadBackupJobLog legacy-no-doc
|
||||
internal/k8s/jobs.go func ListBackupJobsForPVC legacy-no-doc
|
||||
internal/k8s/jobs.go func CreateBackupJob legacy-no-doc
|
||||
internal/k8s/jobs.go func CreateRestoreJob legacy-no-doc
|
||||
internal/k8s/state.go func LoadSecretData legacy-no-doc
|
||||
internal/k8s/state.go func SaveSecretData legacy-no-doc
|
||||
internal/k8s/volumes.go type PVCSummary legacy-no-doc
|
||||
internal/k8s/volumes.go func ResolvePVCVolume legacy-no-doc
|
||||
internal/k8s/volumes.go func ListBoundPVCs legacy-no-doc
|
||||
internal/k8s/volumes.go func PersistentVolumeClaimExists legacy-no-doc
|
||||
internal/longhorn/client.go type Client legacy-no-doc
|
||||
internal/longhorn/client.go func New legacy-no-doc
|
||||
internal/longhorn/client.go type APIError legacy-no-doc
|
||||
internal/longhorn/client.go func Error legacy-no-doc
|
||||
internal/longhorn/client.go type Volume legacy-no-doc
|
||||
internal/longhorn/client.go type BackupStatus legacy-no-doc
|
||||
internal/longhorn/client.go type BackupVolume legacy-no-doc
|
||||
internal/longhorn/client.go type Backup legacy-no-doc
|
||||
internal/longhorn/client.go func CreateSnapshot legacy-no-doc
|
||||
internal/longhorn/client.go func SnapshotBackup legacy-no-doc
|
||||
internal/longhorn/client.go func GetVolume legacy-no-doc
|
||||
internal/longhorn/client.go func CreateVolumeFromBackup legacy-no-doc
|
||||
internal/longhorn/client.go func CreatePVC legacy-no-doc
|
||||
internal/longhorn/client.go func DeleteVolume legacy-no-doc
|
||||
internal/longhorn/client.go func GetBackupVolume legacy-no-doc
|
||||
internal/longhorn/client.go func ListBackups legacy-no-doc
|
||||
internal/longhorn/client.go func FindBackup legacy-no-doc
|
||||
internal/server/metrics.go func Handler legacy-no-doc
|
||||
internal/server/metrics.go func RecordBackupRequest legacy-no-doc
|
||||
internal/server/metrics.go func RecordRestoreRequest legacy-no-doc
|
||||
internal/server/metrics.go func RecordPolicyBackup legacy-no-doc
|
||||
internal/server/metrics.go func RecordNamespaceBackupRequest legacy-no-doc
|
||||
internal/server/metrics.go func RecordNamespaceRestoreRequest legacy-no-doc
|
||||
internal/server/metrics.go func RecordAuthzDenied legacy-no-doc
|
||||
internal/server/metrics.go func RecordInventoryFailure legacy-no-doc
|
||||
internal/server/metrics.go func RecordInventory legacy-no-doc
|
||||
internal/server/metrics.go func RecordB2Usage legacy-no-doc
|
||||
internal/server/server.go type Server legacy-no-doc
|
||||
internal/server/server.go func New legacy-no-doc
|
||||
internal/server/server.go func Start legacy-no-doc
|
||||
internal/server/server.go func Handler legacy-no-doc
|
||||
internal/server/ui_renderer.go func ServeIndex legacy-no-doc
|
||||
internal/server/ui_renderer.go func ServeAsset legacy-no-doc
|
||||
|
97
scripts/loc_hygiene_check.py
Normal file
97
scripts/loc_hygiene_check.py
Normal file
@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Enforce source-file LOC budgets with explicit per-file waivers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--root", default=".")
|
||||
parser.add_argument("--max-lines", type=int, default=500)
|
||||
parser.add_argument("--waivers", required=True)
|
||||
parser.add_argument("--summary-json", default="")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def iter_source_files(root: Path) -> list[Path]:
|
||||
files: list[Path] = []
|
||||
for rel_root, suffixes in (
|
||||
("cmd", {".go"}),
|
||||
("internal", {".go"}),
|
||||
("web/src", {".ts", ".tsx", ".css"}),
|
||||
):
|
||||
base = root / rel_root
|
||||
if not base.exists():
|
||||
continue
|
||||
for path in sorted(base.rglob("*")):
|
||||
if not path.is_file():
|
||||
continue
|
||||
if path.suffix not in suffixes:
|
||||
continue
|
||||
rel = path.relative_to(root).as_posix()
|
||||
if rel.endswith("_test.go"):
|
||||
continue
|
||||
if rel.startswith("internal/server/ui-dist/"):
|
||||
continue
|
||||
files.append(path)
|
||||
return files
|
||||
|
||||
|
||||
def load_waivers(path: Path) -> dict[str, int]:
|
||||
waivers: dict[str, int] = {}
|
||||
if not path.exists():
|
||||
return waivers
|
||||
for raw in path.read_text(encoding="utf-8").splitlines():
|
||||
line = raw.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
parts = line.split("\t")
|
||||
if len(parts) < 2:
|
||||
continue
|
||||
rel = parts[0]
|
||||
try:
|
||||
max_lines = int(parts[1])
|
||||
except ValueError:
|
||||
continue
|
||||
waivers[rel] = max_lines
|
||||
return waivers
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
root = Path(args.root).resolve()
|
||||
waivers = load_waivers(Path(args.waivers).resolve())
|
||||
|
||||
violations: list[str] = []
|
||||
over_500 = 0
|
||||
|
||||
for path in iter_source_files(root):
|
||||
rel = path.relative_to(root).as_posix()
|
||||
line_count = len(path.read_text(encoding="utf-8", errors="ignore").splitlines())
|
||||
if line_count > 500:
|
||||
over_500 += 1
|
||||
limit = waivers.get(rel, args.max_lines)
|
||||
if line_count > limit:
|
||||
violations.append(f"{rel}: {line_count} lines (limit {limit})")
|
||||
|
||||
summary = {"over_500": over_500, "violations": len(violations)}
|
||||
if args.summary_json:
|
||||
Path(args.summary_json).write_text(json.dumps(summary, indent=2), encoding="utf-8")
|
||||
|
||||
if violations:
|
||||
print("LOC hygiene check failed:")
|
||||
for item in violations:
|
||||
print(item)
|
||||
return 1
|
||||
|
||||
print("LOC hygiene checks: ok")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
4
scripts/loc_hygiene_waivers.tsv
Normal file
4
scripts/loc_hygiene_waivers.tsv
Normal file
@ -0,0 +1,4 @@
|
||||
# relative_path max_lines reason
|
||||
internal/k8s/jobs.go 670 legacy-oversize
|
||||
internal/server/server.go 2203 legacy-oversize
|
||||
web/src/App.tsx 918 legacy-oversize
|
||||
|
89
scripts/structure_hygiene_check.py
Normal file
89
scripts/structure_hygiene_check.py
Normal file
@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Enforce lightweight naming/layout hygiene for Soteria sources."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
GENERIC_TOKENS = {
|
||||
"tmp",
|
||||
"temp",
|
||||
"foo",
|
||||
"bar",
|
||||
"baz",
|
||||
"misc",
|
||||
"new",
|
||||
"old",
|
||||
"final",
|
||||
"wip",
|
||||
}
|
||||
MAX_DEPTH = 10
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--root", default=".")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def iter_source_files(root: Path) -> list[Path]:
|
||||
files: list[Path] = []
|
||||
for rel_root, suffixes in (
|
||||
("cmd", {".go"}),
|
||||
("internal", {".go"}),
|
||||
("web/src", {".ts", ".tsx", ".css"}),
|
||||
):
|
||||
base = root / rel_root
|
||||
if not base.exists():
|
||||
continue
|
||||
for path in sorted(base.rglob("*")):
|
||||
if not path.is_file():
|
||||
continue
|
||||
if path.suffix not in suffixes:
|
||||
continue
|
||||
rel = path.relative_to(root).as_posix()
|
||||
if rel.endswith("_test.go"):
|
||||
continue
|
||||
if rel.startswith("internal/server/ui-dist/"):
|
||||
continue
|
||||
files.append(path)
|
||||
return files
|
||||
|
||||
|
||||
def filename_tokens(path: Path) -> list[str]:
|
||||
stem = path.stem
|
||||
return [tok for tok in re.split(r"[-_]", stem) if tok]
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
root = Path(args.root).resolve()
|
||||
violations: list[str] = []
|
||||
files = iter_source_files(root)
|
||||
|
||||
for path in files:
|
||||
rel = path.resolve().relative_to(root).as_posix()
|
||||
depth = len(Path(rel).parts)
|
||||
if depth > MAX_DEPTH:
|
||||
violations.append(f"{rel}: depth {depth} exceeds {MAX_DEPTH}")
|
||||
|
||||
tokens = {tok.lower() for tok in filename_tokens(path)}
|
||||
bad = sorted(tokens.intersection(GENERIC_TOKENS))
|
||||
if bad:
|
||||
violations.append(f"{rel}: non-descriptive filename token(s): {', '.join(bad)}")
|
||||
|
||||
if violations:
|
||||
print("Structure hygiene check failed:")
|
||||
for item in violations:
|
||||
print(item)
|
||||
return 1
|
||||
|
||||
print("Structure hygiene checks: ok")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
53
scripts/ui_test_framework_check.py
Normal file
53
scripts/ui_test_framework_check.py
Normal file
@ -0,0 +1,53 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Enforce frontend test framework policy: Jest + Playwright only."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
BANNED_FRAMEWORKS = {"vitest", "cypress", "mocha", "ava", "karma", "qunit"}
|
||||
ALLOWED_TEST_FRAMEWORKS = {"jest", "@playwright/test", "playwright"}
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--root", default=".")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
root = Path(args.root).resolve()
|
||||
package_json = root / "web" / "package.json"
|
||||
if not package_json.exists():
|
||||
print("UI framework check: skipped (web/package.json not found)")
|
||||
return 0
|
||||
|
||||
payload = json.loads(package_json.read_text(encoding="utf-8"))
|
||||
deps = payload.get("dependencies", {}) or {}
|
||||
dev_deps = payload.get("devDependencies", {}) or {}
|
||||
scripts = payload.get("scripts", {}) or {}
|
||||
all_pkgs = set(deps.keys()) | set(dev_deps.keys())
|
||||
|
||||
banned = sorted(BANNED_FRAMEWORKS.intersection(all_pkgs))
|
||||
if banned:
|
||||
print("UI framework check failed: banned test framework(s) detected:")
|
||||
for pkg in banned:
|
||||
print(f" {pkg}")
|
||||
return 1
|
||||
|
||||
has_test_script = any(name.startswith("test") for name in scripts.keys())
|
||||
has_allowed_framework = bool(ALLOWED_TEST_FRAMEWORKS.intersection(all_pkgs))
|
||||
if has_test_script and not has_allowed_framework:
|
||||
print("UI framework check failed: test scripts exist but Jest/Playwright dependency is missing")
|
||||
return 1
|
||||
|
||||
print("UI framework checks: ok")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Loading…
x
Reference in New Issue
Block a user