testing: add unified quality gate

This commit is contained in:
Brad Stein 2026-04-11 00:02:26 -03:00
parent 00e6208d97
commit 8245e1aaa7
46 changed files with 5276 additions and 133 deletions

177
Jenkinsfile vendored
View File

@ -51,6 +51,13 @@ spec:
volumeMounts:
- name: workspace-volume
mountPath: /home/jenkins/agent
- name: frontend
image: mcr.microsoft.com/playwright:v1.51.0-jammy
command: ["cat"]
tty: true
volumeMounts:
- name: workspace-volume
mountPath: /home/jenkins/agent
volumes:
- name: workspace-volume
emptyDir: {}
@ -176,8 +183,40 @@ spec:
set -euo pipefail
mkdir -p build
export PYTHONPATH="${WORKSPACE}/backend:${PYTHONPATH:-}"
python -m pip install --no-cache-dir -r backend/requirements.txt pytest pytest-mock
python -m pytest backend/tests -q --junitxml=build/junit-backend.xml
python -m pip install --no-cache-dir -r backend/requirements.txt -r backend/requirements-dev.txt
python -m pytest backend/tests -q --cov=backend/atlas_portal --cov-report=xml:build/backend-coverage.xml --junitxml=build/junit-backend.xml
'''
}
}
}
stage('Frontend tests') {
steps {
container('frontend') {
sh '''
set -euo pipefail
mkdir -p build
cd frontend
npm ci
npm run lint
npm run test:unit
npm run test:component
npm run test:e2e
'''
}
}
}
stage('Unified quality gate') {
steps {
container('tester') {
sh '''
set -euo pipefail
export PYTHONPATH="${WORKSPACE}:${PYTHONPATH:-}"
python -m testing.ci.quality_gate \
--backend-coverage build/backend-coverage.xml \
--frontend-coverage frontend/coverage/coverage-summary.json \
--report build/quality-gate.json
'''
}
}
@ -225,66 +264,12 @@ spec:
container('tester') {
sh '''
set -euo pipefail
export QUALITY_STATUS=ok
python - <<'PY'
import os
import re
import urllib.request
import xml.etree.ElementTree as ET
from pathlib import Path
suite = os.environ.get("SUITE_NAME", "bstein-home")
status = os.environ.get("QUALITY_STATUS", "failed")
gateway = os.environ.get("PUSHGATEWAY_URL", "http://platform-quality-gateway.monitoring.svc.cluster.local:9091").rstrip("/")
text = urllib.request.urlopen(f"{gateway}/metrics", timeout=10).read().decode("utf-8", errors="replace")
def counter(name: str) -> float:
pattern = re.compile(
rf'^platform_quality_gate_runs_total\\{{[^}}]*job="platform-quality-ci"[^}}]*suite="{re.escape(suite)}"[^}}]*status="{name}"[^}}]*\\}}\\s+([0-9]+(?:\\.[0-9]+)?)$',
re.M,
)
match = pattern.search(text)
return float(match.group(1)) if match else 0.0
ok = counter("ok")
failed = counter("failed")
if status == "ok":
ok += 1
else:
failed += 1
totals = {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
junit_path = Path("build/junit-backend.xml")
if junit_path.exists():
root = ET.parse(junit_path).getroot()
suites = [root] if root.tag == "testsuite" else list(root.findall("testsuite")) if root.tag == "testsuites" else []
for node in suites:
for key in totals:
raw = node.attrib.get(key) or "0"
try:
totals[key] += int(float(raw))
except ValueError:
pass
passed = max(totals["tests"] - totals["failures"] - totals["errors"] - totals["skipped"], 0)
payload = (
"# TYPE platform_quality_gate_runs_total counter\\n"
f'platform_quality_gate_runs_total{{suite="{suite}",status="ok"}} {int(ok)}\\n'
f'platform_quality_gate_runs_total{{suite="{suite}",status="failed"}} {int(failed)}\\n'
"# TYPE bstein_home_quality_gate_tests_total gauge\\n"
f'bstein_home_quality_gate_tests_total{{suite="{suite}",result="passed"}} {passed}\\n'
f'bstein_home_quality_gate_tests_total{{suite="{suite}",result="failed"}} {totals["failures"]}\\n'
f'bstein_home_quality_gate_tests_total{{suite="{suite}",result="error"}} {totals["errors"]}\\n'
f'bstein_home_quality_gate_tests_total{{suite="{suite}",result="skipped"}} {totals["skipped"]}\\n'
)
req = urllib.request.Request(
f"{gateway}/metrics/job/platform-quality-ci/suite/{suite}",
data=payload.encode("utf-8"),
method="POST",
headers={"Content-Type": "text/plain"},
)
urllib.request.urlopen(req, timeout=10).read()
PY
python -m testing.ci.publish_metrics \
--gateway "${PUSHGATEWAY_URL}" \
--suite "${SUITE_NAME}" \
--job platform-quality-ci \
--status ok \
--junit build/junit-backend.xml build/junit-frontend-unit.xml build/junit-frontend-component.xml build/junit-frontend-e2e.xml
'''
}
}
@ -292,66 +277,12 @@ PY
container('tester') {
sh '''
set -euo pipefail
export QUALITY_STATUS=failed
python - <<'PY'
import os
import re
import urllib.request
import xml.etree.ElementTree as ET
from pathlib import Path
suite = os.environ.get("SUITE_NAME", "bstein-home")
status = os.environ.get("QUALITY_STATUS", "failed")
gateway = os.environ.get("PUSHGATEWAY_URL", "http://platform-quality-gateway.monitoring.svc.cluster.local:9091").rstrip("/")
text = urllib.request.urlopen(f"{gateway}/metrics", timeout=10).read().decode("utf-8", errors="replace")
def counter(name: str) -> float:
pattern = re.compile(
rf'^platform_quality_gate_runs_total\\{{[^}}]*job="platform-quality-ci"[^}}]*suite="{re.escape(suite)}"[^}}]*status="{name}"[^}}]*\\}}\\s+([0-9]+(?:\\.[0-9]+)?)$',
re.M,
)
match = pattern.search(text)
return float(match.group(1)) if match else 0.0
ok = counter("ok")
failed = counter("failed")
if status == "ok":
ok += 1
else:
failed += 1
totals = {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
junit_path = Path("build/junit-backend.xml")
if junit_path.exists():
root = ET.parse(junit_path).getroot()
suites = [root] if root.tag == "testsuite" else list(root.findall("testsuite")) if root.tag == "testsuites" else []
for node in suites:
for key in totals:
raw = node.attrib.get(key) or "0"
try:
totals[key] += int(float(raw))
except ValueError:
pass
passed = max(totals["tests"] - totals["failures"] - totals["errors"] - totals["skipped"], 0)
payload = (
"# TYPE platform_quality_gate_runs_total counter\\n"
f'platform_quality_gate_runs_total{{suite="{suite}",status="ok"}} {int(ok)}\\n'
f'platform_quality_gate_runs_total{{suite="{suite}",status="failed"}} {int(failed)}\\n'
"# TYPE bstein_home_quality_gate_tests_total gauge\\n"
f'bstein_home_quality_gate_tests_total{{suite="{suite}",result="passed"}} {passed}\\n'
f'bstein_home_quality_gate_tests_total{{suite="{suite}",result="failed"}} {totals["failures"]}\\n'
f'bstein_home_quality_gate_tests_total{{suite="{suite}",result="error"}} {totals["errors"]}\\n'
f'bstein_home_quality_gate_tests_total{{suite="{suite}",result="skipped"}} {totals["skipped"]}\\n'
)
req = urllib.request.Request(
f"{gateway}/metrics/job/platform-quality-ci/suite/{suite}",
data=payload.encode("utf-8"),
method="POST",
headers={"Content-Type": "text/plain"},
)
urllib.request.urlopen(req, timeout=10).read()
PY
python -m testing.ci.publish_metrics \
--gateway "${PUSHGATEWAY_URL}" \
--suite "${SUITE_NAME}" \
--job platform-quality-ci \
--status failed \
--junit build/junit-backend.xml build/junit-frontend-unit.xml build/junit-frontend-component.xml build/junit-frontend-e2e.xml
'''
}
}
@ -360,7 +291,7 @@ PY
def props = fileExists('build.env') ? readProperties(file: 'build.env') : [:]
echo "Build complete for ${props['SEMVER'] ?: env.VERSION_TAG}"
}
archiveArtifacts artifacts: 'build/junit-backend.xml', allowEmptyArchive: true
archiveArtifacts artifacts: 'build/junit-backend.xml,build/junit-frontend-unit.xml,build/junit-frontend-component.xml,build/junit-frontend-e2e.xml,build/quality-gate.json', allowEmptyArchive: true
}
}
}

View File

@ -11,6 +11,12 @@ from .routes import access_requests, account, admin_access, ai, auth_config, hea
def create_app() -> Flask:
"""Build the Flask app with API routes and SPA fallback handling.
WHY: the portal needs a single assembly point so the API, auth routes, and
frontend fallback all stay wired the same way in Flask, tests, and Jenkins.
"""
app = Flask(__name__, static_folder="../frontend/dist", static_url_path="")
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1, x_host=1, x_port=1)
CORS(app, resources={r"/api/*": {"origins": "*"}})
@ -27,6 +33,8 @@ def create_app() -> Flask:
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
def serve_frontend(path: str) -> Any:
"""Serve the compiled SPA assets or return a JSON build hint."""
dist_path = Path(app.static_folder)
index_path = dist_path / "index.html"

View File

@ -8,6 +8,12 @@ _RATE_BUCKETS: dict[str, dict[str, list[float]]] = {}
def rate_limit_allow(ip: str, *, key: str, limit: int, window_sec: int) -> bool:
"""Return whether a request bucket still has capacity.
WHY: access-request endpoints need a simple in-process guard that is easy to
exercise in tests and cheap to apply before any heavier work starts.
"""
if limit <= 0:
return True
now = time.time()

View File

@ -9,8 +9,12 @@ from .. import settings
def register(app) -> None:
"""Expose the login URLs the frontend needs for auth state rendering."""
@app.route("/api/auth/config", methods=["GET"])
def auth_config() -> Any:
"""Render the auth configuration payload consumed by the SPA."""
if not settings.KEYCLOAK_ENABLED:
return jsonify({"enabled": False})

View File

@ -6,7 +6,10 @@ from flask import jsonify
def register(app) -> None:
"""Register the lightweight health endpoint on the Flask app."""
@app.route("/api/healthz")
def healthz() -> Any:
return jsonify({"ok": True})
"""Return the basic liveness payload used by probes and tests."""
return jsonify({"ok": True})

View File

@ -11,12 +11,15 @@ from .. import settings
def register(app) -> None:
"""Expose the Monero node health endpoint through Flask."""
@app.route("/api/monero/get_info")
def monero_get_info() -> Any:
"""Proxy `get_info` from the Monero daemon with a predictable response."""
try:
with urlopen(settings.MONERO_GET_INFO_URL, timeout=2) as resp:
payload = json.loads(resp.read().decode("utf-8"))
return jsonify(payload)
except (URLError, TimeoutError, ValueError) as exc:
return jsonify({"error": str(exc), "url": settings.MONERO_GET_INFO_URL}), 503

View File

@ -4,6 +4,8 @@ import os
def _env_bool(name: str, default: str = "false") -> bool:
"""Parse a truthy environment variable with the repo's boolean semantics."""
return os.getenv(name, default).lower() in ("1", "true", "yes")

View File

@ -10,11 +10,19 @@ from . import settings
def random_password(length: int = 32) -> str:
"""Generate a URL-safe mixed-case password for one-off account bootstrap."""
alphabet = string.ascii_letters + string.digits
return "".join(secrets.choice(alphabet) for _ in range(length))
def best_effort_post(url: str) -> None:
"""Fire-and-forget a JSON ping without letting transport failures bubble.
WHY: background sync helpers should keep moving even if the destination is
briefly unavailable or the cluster network is in a bad state.
"""
if not url:
return
try:
@ -22,4 +30,3 @@ def best_effort_post(url: str) -> None:
client.post(url, json={"ts": int(time.time())})
except Exception:
return

View File

@ -0,0 +1,3 @@
pytest==8.3.4
pytest-cov==6.0.0
pytest-mock==3.14.0

15
backend/tests/conftest.py Normal file
View File

@ -0,0 +1,15 @@
from __future__ import annotations
"""Pytest bootstrap for backend tests.
The backend package lives under `backend/`, so test runs from the repository
root need that directory on `sys.path` before importing `atlas_portal`.
"""
import sys
from pathlib import Path
ROOT = Path(__file__).resolve().parents[1]
if str(ROOT) not in sys.path:
sys.path.insert(0, str(ROOT))

View File

@ -0,0 +1,50 @@
from __future__ import annotations
"""Tests for Flask application assembly and frontend fallback behavior."""
from pathlib import Path
from atlas_portal.app_factory import create_app
def test_create_app_exposes_health_endpoint() -> None:
app = create_app()
client = app.test_client()
resp = client.get("/api/healthz")
assert resp.status_code == 200
assert resp.get_json() == {"ok": True}
def test_create_app_returns_json_when_frontend_is_missing() -> None:
app = create_app()
client = app.test_client()
original = app.static_folder
app.static_folder = str(Path("/tmp") / "missing-frontend-dist")
try:
resp = client.get("/")
finally:
app.static_folder = original
data = resp.get_json()
assert resp.status_code == 200
assert "Frontend not built yet" in data["message"]
def test_create_app_serves_existing_static_assets(tmp_path) -> None:
app = create_app()
(tmp_path / "index.html").write_text("<html>ok</html>")
(tmp_path / "asset.txt").write_text("payload")
original = app.static_folder
app.static_folder = str(tmp_path)
try:
with app.test_request_context("/asset.txt"):
resp = app.view_functions["serve_frontend"]("asset.txt")
finally:
app.static_folder = original
assert resp.status_code == 200
resp.direct_passthrough = False
assert resp.get_data() == b"payload"

View File

@ -0,0 +1,36 @@
from __future__ import annotations
"""Tests for the Keycloak auth config route."""
from atlas_portal.app_factory import create_app
from atlas_portal import settings
def test_auth_config_disabled_by_default() -> None:
app = create_app()
client = app.test_client()
resp = client.get("/api/auth/config")
assert resp.status_code == 200
assert resp.get_json() == {"enabled": False}
def test_auth_config_builds_urls_when_enabled(monkeypatch) -> None:
monkeypatch.setattr(settings, "KEYCLOAK_ENABLED", True)
monkeypatch.setattr(settings, "KEYCLOAK_URL", "https://sso.example.dev")
monkeypatch.setattr(settings, "KEYCLOAK_REALM", "atlas")
monkeypatch.setattr(settings, "KEYCLOAK_CLIENT_ID", "portal-client")
monkeypatch.setattr(settings, "KEYCLOAK_ISSUER", "https://sso.example.dev/realms/atlas")
app = create_app()
client = app.test_client()
resp = client.get("/api/auth/config", base_url="https://portal.example.dev")
data = resp.get_json()
assert resp.status_code == 200
assert data["enabled"] is True
assert data["login_url"].startswith("https://sso.example.dev/realms/atlas/protocol/openid-connect/auth")
assert "client_id=portal-client" in data["login_url"]
assert data["account_password_url"].endswith("#/security/signingin")

View File

@ -0,0 +1,46 @@
from __future__ import annotations
"""Tests for the tiny health and Monero endpoints."""
import json
from urllib.error import URLError
from atlas_portal.app_factory import create_app
from atlas_portal.routes import monero
def test_monero_endpoint_returns_upstream_json(monkeypatch) -> None:
class DummyResponse:
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def read(self):
return json.dumps({"status": "OK", "nettype": "mainnet"}).encode("utf-8")
monkeypatch.setattr(monero, "urlopen", lambda *args, **kwargs: DummyResponse())
app = create_app()
client = app.test_client()
resp = client.get("/api/monero/get_info")
assert resp.status_code == 200
assert resp.get_json()["status"] == "OK"
def test_monero_endpoint_handles_upstream_failure(monkeypatch) -> None:
def boom(*args, **kwargs):
raise URLError("boom")
monkeypatch.setattr(monero, "urlopen", boom)
app = create_app()
client = app.test_client()
resp = client.get("/api/monero/get_info")
assert resp.status_code == 503
assert resp.get_json()["url"].startswith("http://")

View File

@ -0,0 +1,75 @@
from __future__ import annotations
"""Tests for generic backend utilities used across routes."""
from atlas_portal import rate_limit, utils
def test_rate_limit_allows_when_limit_is_non_positive() -> None:
assert rate_limit.rate_limit_allow("1.2.3.4", key="access", limit=0, window_sec=60)
assert rate_limit.rate_limit_allow("1.2.3.4", key="access", limit=-1, window_sec=60)
def test_rate_limit_rejects_after_limit(monkeypatch) -> None:
monkeypatch.setattr(rate_limit.time, "time", lambda: 100.0)
assert rate_limit.rate_limit_allow("1.2.3.4", key="access", limit=2, window_sec=60)
assert rate_limit.rate_limit_allow("1.2.3.4", key="access", limit=2, window_sec=60)
assert not rate_limit.rate_limit_allow("1.2.3.4", key="access", limit=2, window_sec=60)
def test_random_password_has_requested_length() -> None:
password = utils.random_password(24)
assert len(password) == 24
assert password.isalnum()
def test_best_effort_post_ignores_errors(monkeypatch) -> None:
calls = []
class DummyClient:
def __init__(self, timeout):
calls.append(timeout)
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def post(self, url, json=None):
raise RuntimeError("boom")
monkeypatch.setattr(utils.httpx, "Client", DummyClient)
utils.best_effort_post("https://example.dev/hook")
assert calls
def test_best_effort_post_success(monkeypatch) -> None:
posts = []
class DummyClient:
def __init__(self, timeout):
self.timeout = timeout
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def post(self, url, json=None):
posts.append((url, json))
return None
monkeypatch.setattr(utils.httpx, "Client", DummyClient)
utils.best_effort_post("https://example.dev/hook")
assert posts and posts[0][0] == "https://example.dev/hook"
def test_best_effort_post_ignores_empty_url() -> None:
utils.best_effort_post("")

View File

@ -0,0 +1,26 @@
from __future__ import annotations
"""Tests for environment-backed settings parsing."""
import importlib
def test_env_bool_handles_truthy_and_falsey(monkeypatch) -> None:
import atlas_portal.settings as settings
monkeypatch.setenv("TEST_FLAG", "YES")
assert settings._env_bool("TEST_FLAG") is True
monkeypatch.setenv("TEST_FLAG", "0")
assert settings._env_bool("TEST_FLAG") is False
def test_settings_reload_picks_up_environment(monkeypatch) -> None:
monkeypatch.setenv("KEYCLOAK_ENABLED", "true")
monkeypatch.setenv("PORTAL_ADMIN_USERS", "alice,bob")
import atlas_portal.settings as settings
reloaded = importlib.reload(settings)
assert reloaded.KEYCLOAK_ENABLED is True
assert reloaded.PORTAL_ADMIN_USERS == ["alice", "bob"]

File diff suppressed because it is too large Load Diff

View File

@ -7,7 +7,12 @@
"dev": "vite",
"prebuild": "node scripts/build_media_manifest.mjs",
"build": "vite build",
"preview": "vite preview"
"preview": "vite preview",
"test:unit": "vitest run --coverage --config ../testing/frontend/vitest.config.js",
"test:component": "playwright test --config ../testing/frontend/playwright-ct.config.mjs",
"test:e2e": "playwright test --config ../testing/frontend/playwright.config.mjs",
"test": "npm run test:unit && npm run test:component && npm run test:e2e",
"lint": "cd .. && eslint --config testing/frontend/eslint.config.js $(find frontend/src testing/frontend -type f \\( -name '*.js' -o -name '*.mjs' \\) | sort)"
},
"dependencies": {
"axios": "^1.6.7",
@ -18,7 +23,16 @@
"vue-router": "^4.3.2"
},
"devDependencies": {
"@eslint/js": "^9.22.0",
"@playwright/experimental-ct-vue": "^1.51.0",
"@playwright/test": "^1.51.0",
"@vitest/coverage-v8": "^3.0.9",
"@vitejs/plugin-vue": "^5.0.4",
"@vue/test-utils": "^2.4.6",
"eslint": "^9.22.0",
"globals": "^16.0.0",
"jsdom": "^26.0.0",
"vitest": "^3.0.9",
"vite": "^5.2.0"
}
}

View File

@ -0,0 +1,12 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Playwright CT</title>
</head>
<body>
<div id="app"></div>
<script type="module" src="./index.ts"></script>
</body>
</html>

View File

@ -0,0 +1 @@
export {};

View File

@ -18,7 +18,27 @@ export const auth = reactive({
let keycloak = null;
let initPromise = null;
function normalizeGroups(groups) {
/**
* Build a Keycloak client for the current environment.
*
* WHY: tests need to inject a predictable client without changing the runtime
* behavior for the browser.
*/
export function createKeycloak(config) {
const factory = globalThis.__ATLAS_KEYCLOAK_FACTORY__;
if (typeof factory === "function") return factory(config);
const ctor = globalThis.__ATLAS_KEYCLOAK_CONSTRUCTOR__;
if (typeof ctor === "function") return new ctor(config);
return new Keycloak(config);
}
/**
* Normalize Keycloak groups into the format the UI expects.
*
* @param {unknown} groups - Raw group list from the access token.
* @returns {string[]} A cleaned list of group names without leading slashes.
*/
export function normalizeGroups(groups) {
if (!Array.isArray(groups)) return [];
return groups
.filter((g) => typeof g === "string")
@ -26,6 +46,12 @@ function normalizeGroups(groups) {
.filter(Boolean);
}
/**
* Refresh the reactive auth state from the current Keycloak token.
*
* WHY: the UI reads from a shared reactive object, so a token refresh needs to
* update all dependent fields in one place.
*/
function updateFromToken() {
const parsed = keycloak?.tokenParsed || {};
auth.authenticated = Boolean(keycloak?.authenticated);
@ -35,6 +61,11 @@ function updateFromToken() {
auth.groups = normalizeGroups(parsed.groups);
}
/**
* Initialize Keycloak session probing and populate the reactive auth state.
*
* @returns {Promise<void>} A singleton promise so callers can await startup.
*/
export async function initAuth() {
if (initPromise) return initPromise;
@ -51,7 +82,7 @@ export async function initAuth() {
if (!auth.enabled) return;
keycloak = new Keycloak({
keycloak = createKeycloak({
url: cfg.url,
realm: cfg.realm,
clientId: cfg.client_id,
@ -92,6 +123,10 @@ export async function initAuth() {
return initPromise;
}
/**
* Open the Keycloak login flow and preserve the current location as the return
* target.
*/
export async function login(
redirectPath = window.location.pathname + window.location.search + window.location.hash,
loginHint = "",
@ -105,11 +140,21 @@ export async function login(
await keycloak.login(options);
}
/**
* Log the current user out of Keycloak and return them to the portal root.
*/
export async function logout() {
if (!keycloak) return;
await keycloak.logout({ redirectUri: window.location.origin });
}
/**
* Perform a fetch with the current bearer token attached when available.
*
* @param {string} url - Target URL.
* @param {RequestInit} options - Standard fetch options.
* @returns {Promise<Response>} The browser fetch response.
*/
export async function authFetch(url, options = {}) {
const headers = new Headers(options.headers || {});
if (keycloak?.authenticated) {

View File

@ -1,3 +1,9 @@
/**
* Return the static Atlas and Oceanus hardware inventory used as fallback data.
*
* WHY: the home page needs stable content when live cluster data cannot be
* fetched during startup or testing.
*/
export function fallbackHardware() {
return {
clusters: [
@ -39,6 +45,11 @@ export function fallbackHardware() {
};
}
/**
* Return the curated service catalog shown on the home page when live data is absent.
*
* WHY: the service grid must stay useful without a live backend response.
*/
export function fallbackServices() {
return {
services: [
@ -262,6 +273,11 @@ export function fallbackServices() {
};
}
/**
* Return the static ingress and egress relationships that power the network diagram.
*
* WHY: the topology diagram needs deterministic fallback data for offline runs.
*/
export function fallbackNetwork() {
return {
ingress: [
@ -297,6 +313,12 @@ export function fallbackNetwork() {
};
}
/**
* Return the Atlas metrics summary card content used on the overview page.
*
* WHY: the metrics cards should still render a coherent overview if live
* dashboard links are unavailable.
*/
export function fallbackMetrics() {
return {
dashboard: "https://metrics.bstein.dev",
@ -304,7 +326,16 @@ export function fallbackMetrics() {
};
}
export function buildHardwareDiagram(data) {
/**
* Render the hardware topology diagram used on the home page.
*
* WHY: the landing page needs a deterministic Mermaid diagram even before
* live cluster state is available.
*
* @param {object} _data - Live hardware state, accepted for future shaping.
* @returns {string} Mermaid flowchart text for the Atlas hardware overview.
*/
export function buildHardwareDiagram(_data) {
return `
flowchart TB
subgraph TitanLab["Titan Lab (25 nodes)"]
@ -370,6 +401,12 @@ flowchart TB
`;
}
/**
* Render the ingress and auth sequence for the portal network flow.
*
* WHY: the home page should explain request routing without depending on live
* cluster state.
*/
export function buildNetworkDiagram() {
return `
sequenceDiagram
@ -394,6 +431,12 @@ sequenceDiagram
`;
}
/**
* Render the delivery pipeline from developer push to Flux reconciliation.
*
* WHY: the overview page needs a compact visual of the release path even when
* the CI backend is not reachable.
*/
export function buildPipelineDiagram() {
return `
flowchart LR

View File

@ -99,7 +99,9 @@ const atlasPillClass = computed(() => (props.labStatus?.atlas?.up ? "pill-ok" :
const oceanusPillClass = computed(() => (props.labStatus?.oceanus?.up ? "pill-ok" : "pill-bad"));
const metricItems = computed(() => {
const items = [
const items = props.metricsData?.items?.length
? props.metricsData.items
: [
{ label: "Lab nodes", value: "26", note: "Workers: 8 rpi5s, 8 rpi4s, 2 jetsons,\n\t\t\t\t 1 minipc\nControl plane: 3 rpi5\nDedicated Hosts: oceanus, titan-db,\n\t\t\t\t\t\t\t\t tethys, theia" },
{ label: "CPU cores", value: "142", note: "32 arm64 cores @ 1.5Ghz\n12 arm64 cores @ 1.9Ghz\n52 arm64 cores @ 2.4Ghz\n10 amd64 cores @ 5.00Ghz\n12 amd64 cores @ 4.67Ghz\n24 amd64 cores @ 4.04Ghz" },
{
@ -108,7 +110,7 @@ const metricItems = computed(() => {
note: "64GB Raspberry Pi 4\n104GB Raspberry Pi 5\n32GB NVIDIA Jetson Xavier\n352GB AMD64 Chipsets",
},
{ label: "Storage", value: "80 TB", note: "astreae: 32GB/4xRPI4\nasteria: 48GB/4xRPI4" },
];
];
return items.map((item) => ({
...item,
note: item.note ? item.note.replaceAll("\t", " ") : "",
@ -127,6 +129,15 @@ const hardwareDiagram = computed(() => buildHardwareDiagram(props.labData || {})
const networkDiagram = computed(() => buildNetworkDiagram(props.networkData || {}));
const pipelineDiagram = computed(() => buildPipelineDiagram());
/**
* Pick a friendly emoji icon for a service name.
*
* WHY: the service grid should stay readable even when upstream service data
* omits a custom icon, so the default icon needs to be deterministic.
*
* @param {string} name - Service display name.
* @returns {string} Emoji used in the service grid card.
*/
function pickIcon(name) {
const h = name.toLowerCase();
if (h.includes("nextcloud")) return "☁️";

13
testing/README.md Normal file
View File

@ -0,0 +1,13 @@
# Testing Strategy
The repo keeps test orchestration separate from application code:
- `testing/quality_contract.json` defines the managed production scope that the quality gate owns.
- `testing/ci/` holds shared CI helpers for file-size ratchets, docstring checks, coverage checks, and Pushgateway publishing.
- `testing/tests/` exercises the CI helpers themselves so the gate logic stays stable.
- `backend/tests/` holds backend unit and route tests, run with `pytest`.
- `testing/frontend/unit/` holds Vitest coverage tests for frontend logic and Vue components.
- `testing/frontend/component/` holds Playwright component tests for browser-mounted Vue components.
- `testing/frontend/e2e/` holds Playwright end-to-end smoke tests against the live frontend dev server.
The goal is to keep production code focused on app behavior while CI and local workflow logic lives in one place that both Jenkins and developers can reuse. The top-level `testing` package is the repo-owned home for quality policy, browser automation, and metrics publishing.

1
testing/__init__.py Normal file
View File

@ -0,0 +1 @@
"""Top-level test orchestration helpers for the repository."""

1
testing/ci/__init__.py Normal file
View File

@ -0,0 +1 @@
"""Continuous-integration helpers for test results and quality gates."""

View File

@ -0,0 +1,40 @@
from __future__ import annotations
"""Command-line entry point for publishing CI test metrics."""
import argparse
from pathlib import Path
from .summary import load_junit_summary, publish_quality_metrics
def _build_parser() -> argparse.ArgumentParser:
"""Build the CLI parser for the metrics publisher."""
parser = argparse.ArgumentParser(description="Publish test-suite metrics to Pushgateway")
parser.add_argument("--gateway", required=True, help="Pushgateway base URL")
parser.add_argument("--suite", required=True, help="Logical suite name")
parser.add_argument("--job", default="platform-quality-ci", help="Pushgateway job label")
parser.add_argument("--status", choices=("ok", "failed"), required=True, help="Gate outcome")
parser.add_argument("--junit", nargs="*", default=(), help="JUnit XML files to aggregate")
return parser
def main(argv: list[str] | None = None) -> int:
"""Parse arguments, aggregate JUnit files, and publish metrics."""
parser = _build_parser()
args = parser.parse_args(argv)
summary = load_junit_summary(Path(path) for path in args.junit)
publish_quality_metrics(
gateway=args.gateway,
suite=args.suite,
job=args.job,
status=args.status,
summary=summary,
)
return 0
if __name__ == "__main__":
raise SystemExit(main())

279
testing/ci/quality_gate.py Normal file
View File

@ -0,0 +1,279 @@
from __future__ import annotations
"""Unified quality gate for the repo's managed production scope."""
import argparse
import ast
import json
import re
import xml.etree.ElementTree as ET
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable
ROOT = Path(__file__).resolve().parents[2]
DEFAULT_CONTRACT = ROOT / "testing" / "quality_contract.json"
DEFAULT_BACKEND_COVERAGE = ROOT / "build" / "backend-coverage.xml"
DEFAULT_FRONTEND_COVERAGE = ROOT / "frontend" / "coverage" / "coverage-summary.json"
TEXT_EXTENSIONS = {".py", ".js", ".mjs", ".ts", ".vue", ".json", ".yaml", ".yml"}
@dataclass(frozen=True)
class GateIssue:
"""Describe one violated gate condition."""
check: str
path: str
message: str
def load_contract(path: Path) -> dict:
"""Load the JSON gate contract from disk."""
return json.loads(path.read_text())
def _resolve(path_str: str) -> Path:
path = Path(path_str)
return path if path.is_absolute() else ROOT / path
def _count_lines(path: Path) -> int:
return len(path.read_text().splitlines())
def check_file_sizes(paths: Iterable[Path], *, max_lines: int = 500) -> list[GateIssue]:
"""Flag text files that exceed the maximum line budget."""
issues: list[GateIssue] = []
for path in paths:
if not path.exists() or path.suffix.lower() not in TEXT_EXTENSIONS:
continue
lines = _count_lines(path)
if lines > max_lines:
issues.append(GateIssue("loc", str(path), f"{lines} lines exceeds {max_lines}"))
return issues
def _python_node_issues(path: Path) -> list[GateIssue]:
"""Require docstrings on all functions and classes in a Python module."""
issues: list[GateIssue] = []
tree = ast.parse(path.read_text())
for node in ast.walk(tree):
if not isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
continue
if ast.get_docstring(node):
continue
issues.append(GateIssue("docstring", str(path), f"missing docstring on {node.__class__.__name__} {node.name}"))
return issues
_FUNCTION_RE = re.compile(r"^\s*(?:export\s+)?function\s+([A-Za-z_$][\w$]*)\s*\(")
_CLASS_RE = re.compile(r"^\s*class\s+([A-Za-z_$][\w$]*)\s*")
def _has_js_contract(lines: list[str], index: int) -> bool:
"""Check whether the nearest leading comment block documents a JS function."""
seen_comment = False
for pos in range(index - 1, -1, -1):
raw = lines[pos].rstrip()
stripped = raw.strip()
if not stripped:
if seen_comment:
continue
continue
if stripped.startswith("//"):
seen_comment = True
if "WHY:" in stripped or "@param" in stripped or "@returns" in stripped:
return True
continue
if stripped.startswith("*"):
seen_comment = True
if "WHY:" in stripped or "@param" in stripped or "@returns" in stripped:
return True
continue
if stripped.endswith("*/"):
seen_comment = True
if "WHY:" in stripped or "@param" in stripped or "@returns" in stripped:
return True
continue
if stripped.startswith("/**"):
seen_comment = True
if "WHY:" in stripped or "@param" in stripped or "@returns" in stripped:
return True
continue
break
return seen_comment and any(
marker in line for line in lines[max(0, index - 6):index] for marker in ("WHY:", "@param", "@returns")
)
def _js_node_issues(path: Path) -> list[GateIssue]:
"""Require leading contract comments for named JS functions and classes."""
lines = path.read_text().splitlines()
issues: list[GateIssue] = []
for index, line in enumerate(lines):
match = _FUNCTION_RE.match(line) or _CLASS_RE.match(line)
if not match:
continue
name = match.group(1)
if _has_js_contract(lines, index):
continue
issues.append(GateIssue("docstring", str(path), f"missing contract comment on {name}"))
return issues
def check_docstrings(paths: Iterable[Path]) -> list[GateIssue]:
"""Check that managed production files document non-trivial definitions."""
issues: list[GateIssue] = []
for path in paths:
if not path.exists():
continue
suffix = path.suffix.lower()
if suffix == ".py":
issues.extend(_python_node_issues(path))
elif suffix in {".js", ".mjs", ".ts", ".vue"}:
issues.extend(_js_node_issues(path))
return issues
def _normalize_key(value: str) -> str:
return value.replace("\\", "/").lstrip("./")
def _path_suffixes(value: str) -> set[str]:
parts = _normalize_key(value).split("/")
return {"/".join(parts[index:]) for index in range(len(parts))}
def _coverage_lookup(report: dict, wanted: str) -> dict | None:
wanted_key = _normalize_key(wanted)
wanted_suffixes = _path_suffixes(wanted_key)
candidates = []
for key, value in report.items():
if not isinstance(value, dict) or "lines" not in value:
continue
normalized = _normalize_key(key)
if normalized == wanted_key or normalized in wanted_suffixes or any(normalized.endswith(f"/{suffix}") for suffix in wanted_suffixes):
candidates.append(value)
if candidates:
return candidates[0]
return None
def _load_frontend_coverage(path: Path) -> dict:
data = json.loads(path.read_text())
return {key: value for key, value in data.items() if isinstance(value, dict)}
def _load_backend_coverage(path: Path) -> dict[str, dict[str, float]]:
root = ET.parse(path).getroot()
report: dict[str, dict[str, float]] = {}
for class_node in root.findall(".//class"):
filename = class_node.attrib.get("filename")
if not filename:
continue
report[_normalize_key(filename)] = {
"lines": float(class_node.attrib.get("line-rate", "0")) * 100,
"branches": float(class_node.attrib.get("branch-rate", "0")) * 100,
}
return report
def check_coverage(
paths: Iterable[Path],
*,
backend_report: Path,
frontend_report: Path,
threshold: float = 95.0,
) -> list[GateIssue]:
"""Check the per-file coverage floor for the managed production scope."""
issues: list[GateIssue] = []
backend_cov = _load_backend_coverage(backend_report) if backend_report.exists() else {}
frontend_cov = _load_frontend_coverage(frontend_report) if frontend_report.exists() else {}
for path in paths:
if not path.exists():
continue
rel = path.relative_to(ROOT).as_posix() if path.is_absolute() else _normalize_key(str(path))
if rel.startswith("backend/"):
metrics = _coverage_lookup(backend_cov, rel)
if metrics is None:
issues.append(GateIssue("coverage", rel, "missing from backend coverage report"))
continue
if metrics["lines"] < threshold:
issues.append(GateIssue("coverage", rel, f"line coverage {metrics['lines']:.2f}% below {threshold}%"))
elif rel.startswith("frontend/"):
lookup = rel.split("frontend/", 1)[1]
metrics = _coverage_lookup(frontend_cov, lookup)
if metrics is None:
issues.append(GateIssue("coverage", rel, "missing from frontend coverage report"))
continue
pct = metrics.get("lines", {}).get("pct", 0.0)
if pct < threshold:
issues.append(GateIssue("coverage", rel, f"line coverage {pct:.2f}% below {threshold}%"))
return issues
def _build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Run the repo's unified quality gate")
parser.add_argument("--contract", default=str(DEFAULT_CONTRACT), help="Path to the JSON gate contract")
parser.add_argument("--backend-coverage", default=str(DEFAULT_BACKEND_COVERAGE), help="Backend coverage XML")
parser.add_argument("--frontend-coverage", default=str(DEFAULT_FRONTEND_COVERAGE), help="Frontend coverage summary JSON")
parser.add_argument("--report", default=str(ROOT / "build" / "quality-gate.json"), help="Write a JSON report here")
return parser
def run_gate(contract_path: Path, *, backend_coverage: Path, frontend_coverage: Path) -> tuple[list[GateIssue], dict]:
contract = load_contract(contract_path)
managed_files = [_resolve(path) for path in contract["managed_files"]]
docstring_files = [_resolve(path) for path in contract["docstring_files"]]
coverage_files = [_resolve(path) for path in contract["coverage_files"]]
max_lines = int(contract.get("max_lines", 500))
threshold = float(contract.get("coverage_threshold_pct", 95))
issues: list[GateIssue] = []
issues.extend(check_file_sizes(managed_files, max_lines=max_lines))
issues.extend(check_docstrings(docstring_files))
issues.extend(check_coverage(coverage_files, backend_report=backend_coverage, frontend_report=frontend_coverage, threshold=threshold))
report = {
"managed_files": [str(path.relative_to(ROOT)) for path in managed_files],
"docstring_files": [str(path.relative_to(ROOT)) for path in docstring_files],
"coverage_files": [str(path.relative_to(ROOT)) for path in coverage_files],
"max_lines": max_lines,
"coverage_threshold_pct": threshold,
"issue_count": len(issues),
"issues": [issue.__dict__ for issue in issues],
}
return issues, report
def main(argv: list[str] | None = None) -> int:
parser = _build_parser()
args = parser.parse_args(argv)
backend_coverage = _resolve(args.backend_coverage)
frontend_coverage = _resolve(args.frontend_coverage)
report_path = _resolve(args.report)
issues, report = run_gate(_resolve(args.contract), backend_coverage=backend_coverage, frontend_coverage=frontend_coverage)
report_path.parent.mkdir(parents=True, exist_ok=True)
report_path.write_text(json.dumps(report, indent=2, sort_keys=True) + "\n")
for issue in issues:
print(f"{issue.check}: {issue.path}: {issue.message}")
if issues:
print(f"quality gate failed: {len(issues)} issue(s)")
return 1
print(f"quality gate passed: {len(report['managed_files'])} managed files checked")
return 0
if __name__ == "__main__":
raise SystemExit(main())

99
testing/ci/summary.py Normal file
View File

@ -0,0 +1,99 @@
from __future__ import annotations
"""Parse test results and format Pushgateway-friendly metrics payloads."""
from dataclasses import dataclass
import re
import urllib.request
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Iterable
@dataclass(frozen=True)
class RunSummary:
"""Aggregate counts from a collection of JUnit XML files."""
tests: int = 0
failures: int = 0
errors: int = 0
skipped: int = 0
@property
def passed(self) -> int:
"""Return the number of passing test cases derived from the aggregate."""
return max(self.tests - self.failures - self.errors - self.skipped, 0)
def load_junit_summary(paths: Iterable[Path]) -> RunSummary:
"""Load one or more JUnit XML files and combine their result counts.
WHY: CI needs a single stable view of each suite instead of separately
parsing backend, unit, component, and e2e XML files in shell.
"""
totals = {"tests": 0, "failures": 0, "errors": 0, "skipped": 0}
for path in paths:
if not path.exists():
continue
root = ET.parse(path).getroot()
suites = [root] if root.tag == "testsuite" else list(root.findall("testsuite")) if root.tag == "testsuites" else []
for node in suites:
for key in totals:
raw = node.attrib.get(key) or "0"
try:
totals[key] += int(float(raw))
except ValueError:
continue
return RunSummary(**totals)
def read_pushgateway_counters(text: str, *, suite: str, job: str) -> dict[str, float]:
"""Read the current quality-gate counters for a suite from Pushgateway text."""
counters: dict[str, float] = {"ok": 0.0, "failed": 0.0}
for status in counters:
pattern = re.compile(
rf'^platform_quality_gate_runs_total\{{[^}}]*job="{re.escape(job)}"[^}}]*suite="{re.escape(suite)}"[^}}]*status="{status}"[^}}]*\}}\s+([0-9]+(?:\.[0-9]+)?)$',
re.M,
)
match = pattern.search(text)
if match:
counters[status] = float(match.group(1))
return counters
def render_payload(*, suite: str, ok: int, failed: int, summary: RunSummary) -> str:
"""Render the Pushgateway payload for the quality-gate counters."""
return (
"# TYPE platform_quality_gate_runs_total counter\n"
f'platform_quality_gate_runs_total{{suite="{suite}",status="ok"}} {ok}\n'
f'platform_quality_gate_runs_total{{suite="{suite}",status="failed"}} {failed}\n'
"# TYPE bstein_home_quality_gate_tests_total gauge\n"
f'bstein_home_quality_gate_tests_total{{suite="{suite}",result="passed"}} {summary.passed}\n'
f'bstein_home_quality_gate_tests_total{{suite="{suite}",result="failed"}} {summary.failures}\n'
f'bstein_home_quality_gate_tests_total{{suite="{suite}",result="error"}} {summary.errors}\n'
f'bstein_home_quality_gate_tests_total{{suite="{suite}",result="skipped"}} {summary.skipped}\n'
)
def publish_quality_metrics(*, gateway: str, suite: str, job: str, status: str, summary: RunSummary) -> None:
"""Publish run and test totals to Pushgateway."""
gateway = gateway.rstrip("/")
text = urllib.request.urlopen(f"{gateway}/metrics", timeout=10).read().decode("utf-8", errors="replace")
counters = read_pushgateway_counters(text, suite=suite, job=job)
if status == "ok":
counters["ok"] += 1
else:
counters["failed"] += 1
payload = render_payload(suite=suite, ok=int(counters["ok"]), failed=int(counters["failed"]), summary=summary)
req = urllib.request.Request(
f"{gateway}/metrics/job/{job}/suite/{suite}",
data=payload.encode("utf-8"),
method="POST",
headers={"Content-Type": "text/plain"},
)
urllib.request.urlopen(req, timeout=10).read()

View File

@ -0,0 +1,17 @@
import { expect, test } from "../../../frontend/node_modules/@playwright/experimental-ct-vue/index.js";
import MetricRow from "../../../frontend/src/components/MetricRow.vue";
test("renders metric values in the browser", async ({ mount }) => {
const component = await mount(MetricRow, {
props: {
items: [
{ label: "Nodes", value: "26", note: "Atlas" },
{ label: "Storage", value: "80 TB", note: "Longhorn" },
],
},
});
await expect(component).toContainText("Nodes");
await expect(component).toContainText("80 TB");
});

View File

@ -0,0 +1,15 @@
import { expect, test } from "../../../frontend/node_modules/@playwright/experimental-ct-vue/index.js";
import StatsGrid from "../../../frontend/src/components/StatsGrid.vue";
import { fallbackHardware } from "../../../frontend/src/data/sample.js";
test("renders a live hardware summary in the browser", async ({ mount }) => {
const component = await mount(StatsGrid, {
props: {
hardware: fallbackHardware(),
},
});
await expect(component).toContainText("Control plane");
await expect(component).toContainText("titan-16");
});

View File

@ -0,0 +1,37 @@
import { expect, test } from "../../../frontend/node_modules/@playwright/test/index.mjs";
test.beforeEach(async ({ page }) => {
await page.route("**/api/auth/config", async (route) => {
await route.fulfill({
status: 200,
contentType: "application/json",
body: JSON.stringify({ enabled: false }),
});
});
await page.route("**/api/lab/status", async (route) => {
await route.fulfill({
status: 200,
contentType: "application/json",
body: JSON.stringify({
connected: true,
atlas: { up: true },
oceanus: { up: false },
}),
});
});
});
test("shows the overview and expands the mermaid diagram", async ({ page }) => {
await page.goto("/", { waitUntil: "domcontentloaded" });
await expect(page.getByRole("heading", { name: "Overview" })).toBeVisible();
await expect(page.getByText("Live data connected")).toBeVisible();
await expect(page.locator(".service-grid .service").filter({ hasText: "Nextcloud" }).first()).toBeVisible();
const firstCard = page.locator(".mermaid-card").first();
await expect(firstCard.locator("svg")).toBeVisible();
await firstCard.getByRole("button", { name: "Full screen" }).click();
await expect(page.locator(".overlay")).toBeVisible();
await page.keyboard.press("Escape");
await expect(page.locator(".overlay")).toHaveCount(0);
});

View File

@ -0,0 +1,41 @@
import { expect, test } from "../../../frontend/node_modules/@playwright/test/index.mjs";
test.beforeEach(async ({ page }) => {
await page.route("**/api/auth/config", async (route) => {
await route.fulfill({
status: 200,
contentType: "application/json",
body: JSON.stringify({ enabled: false }),
});
});
await page.route("**/api/access/request/availability*", async (route) => {
await route.fulfill({
status: 200,
contentType: "application/json",
body: JSON.stringify({ available: true }),
});
});
await page.route("**/api/access/request", async (route) => {
await route.fulfill({
status: 200,
contentType: "application/json",
body: JSON.stringify({
status: "pending_email_verification",
request_code: "alice~ABC123",
}),
});
});
});
test("submits an access request and shows the request code", async ({ page }) => {
await page.goto("/request-access");
await page.getByLabel("Lab Name (username)").fill("alice");
await expect(page.getByText("Username is available.")).toBeVisible();
await page.getByLabel("Last name").fill("Atlas");
await page.getByLabel("Email").fill("alice@example.dev");
await page.getByRole("button", { name: "Submit request" }).click();
await expect(page.getByText("Request submitted.")).toBeVisible();
await expect(page.getByText("alice~ABC123")).toBeVisible();
});

View File

@ -0,0 +1,24 @@
import js from "../../frontend/node_modules/@eslint/js/src/index.js";
import globals from "../../frontend/node_modules/globals/index.js";
const sharedGlobals = {
...globals.browser,
...globals.node,
...globals.vitest,
};
export default [
js.configs.recommended,
{
files: ["frontend/src/**/*.js", "testing/frontend/**/*.js", "testing/frontend/**/*.mjs"],
languageOptions: {
ecmaVersion: "latest",
sourceType: "module",
globals: sharedGlobals,
},
rules: {
"no-unused-vars": ["error", { argsIgnorePattern: "^_", varsIgnorePattern: "^_" }],
"no-undef": "error",
},
},
];

View File

@ -0,0 +1,15 @@
import path from "node:path";
import { fileURLToPath } from "node:url";
import { defineConfig } from "../../frontend/node_modules/@playwright/experimental-ct-vue/index.js";
const testingDir = path.dirname(fileURLToPath(import.meta.url));
export default defineConfig({
testDir: path.resolve(testingDir, "component"),
use: {
ctPort: 3100,
ctTemplateDir: "../../frontend/playwright",
viewport: { width: 1280, height: 900 },
},
reporter: [["list"], ["junit", { outputFile: path.resolve(testingDir, "../../build/junit-frontend-component.xml") }]],
});

View File

@ -0,0 +1,23 @@
import path from "node:path";
import { fileURLToPath } from "node:url";
import { defineConfig } from "../../frontend/node_modules/@playwright/test/index.mjs";
const testingDir = path.dirname(fileURLToPath(import.meta.url));
const frontendRoot = path.resolve(testingDir, "../../frontend");
export default defineConfig({
testDir: path.resolve(testingDir, "e2e"),
use: {
baseURL: "http://127.0.0.1:4173",
trace: "on-first-retry",
viewport: { width: 1440, height: 1080 },
},
webServer: {
command: "npm run dev -- --host 127.0.0.1 --port 4173",
cwd: frontendRoot,
url: "http://127.0.0.1:4173",
reuseExistingServer: !process.env.CI,
timeout: 120000,
},
reporter: [["list"], ["junit", { outputFile: path.resolve(testingDir, "../../build/junit-frontend-e2e.xml") }]],
});

View File

@ -0,0 +1,12 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Playwright CT</title>
</head>
<body>
<div id="app"></div>
<script type="module" src="./index.ts"></script>
</body>
</html>

View File

@ -0,0 +1 @@
export {};

View File

@ -0,0 +1,320 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
async function loadAuth() {
vi.resetModules();
return import("../../../frontend/src/auth.js");
}
describe("auth helpers", () => {
beforeEach(() => {
globalThis.__ATLAS_KEYCLOAK_FACTORY__ = null;
globalThis.__ATLAS_KEYCLOAK_CONSTRUCTOR__ = null;
});
afterEach(() => {
vi.restoreAllMocks();
vi.unstubAllGlobals();
vi.resetModules();
});
it("normalizes token groups", async () => {
const { normalizeGroups } = await loadAuth();
expect(normalizeGroups(["/dev", "admin", 3, null, ""])).toEqual(["dev", "admin"]);
expect(normalizeGroups("not-an-array")).toEqual([]);
});
it("attaches a bearer token when fetching", async () => {
const authModule = await loadAuth();
authModule.auth.token = "bearer-token";
const fetchMock = vi.fn(async () => new Response("{}", { status: 200 }));
vi.stubGlobal("fetch", fetchMock);
await authModule.authFetch("/api/healthz");
expect(fetchMock).toHaveBeenCalledTimes(1);
const [, options] = fetchMock.mock.calls[0];
expect(new Headers(options.headers).get("Authorization")).toBe("Bearer bearer-token");
});
it("initializes auth state from the auth config endpoint", async () => {
const authModule = await loadAuth();
vi.spyOn(window, "setInterval").mockReturnValue(1234);
vi.stubGlobal(
"fetch",
vi.fn(async () =>
new Response(JSON.stringify({ enabled: false, login_url: "", reset_url: "" }), {
status: 200,
headers: { "content-type": "application/json" },
}),
),
);
await authModule.initAuth();
expect(authModule.auth.ready).toBe(true);
expect(authModule.auth.enabled).toBe(false);
});
it("falls back to the keycloak-js constructor when no injected factory exists", async () => {
const client = {
authenticated: true,
token: "mock-token",
tokenParsed: {
preferred_username: "bob",
email: "bob@example.dev",
groups: ["/ops"],
},
init: vi.fn(async () => true),
login: vi.fn(async () => {}),
logout: vi.fn(async () => {}),
updateToken: vi.fn(async () => true),
};
globalThis.__ATLAS_KEYCLOAK_CONSTRUCTOR__ = function MockKeycloak() {
return client;
};
const authModule = await loadAuth();
const created = authModule.createKeycloak({ url: "https://sso.example.dev" });
expect(created).toBe(client);
});
it("uses the real Keycloak constructor when no test hooks are present", async () => {
const authModule = await loadAuth();
const client = authModule.createKeycloak({
url: "https://sso.example.dev",
realm: "atlas",
clientId: "portal-client",
});
expect(client).toHaveProperty("init");
expect(client).toHaveProperty("login");
expect(client).toHaveProperty("logout");
});
it("reuses the auth initialization promise and tolerates empty token fields", async () => {
const client = {
authenticated: false,
token: "",
tokenParsed: undefined,
init: vi.fn(async () => true),
login: vi.fn(async () => {}),
logout: vi.fn(async () => {}),
updateToken: vi.fn(async () => true),
};
globalThis.__ATLAS_KEYCLOAK_FACTORY__ = () => client;
const authModule = await loadAuth();
vi.spyOn(window, "setInterval").mockReturnValue(1234);
vi.stubGlobal(
"fetch",
vi.fn(async () =>
new Response(
JSON.stringify({
enabled: true,
url: "https://sso.example.dev",
realm: "atlas",
client_id: "portal-client",
login_url: "https://sso.example.dev/login",
reset_url: "https://sso.example.dev/reset",
account_url: "https://sso.example.dev/account",
account_password_url: "https://sso.example.dev/account/#/security/signingin",
}),
{
status: 200,
headers: { "content-type": "application/json" },
},
),
),
);
const first = authModule.initAuth();
const second = authModule.initAuth();
await first;
await second;
client.onAuthSuccess();
expect(authModule.auth.authenticated).toBe(false);
expect(authModule.auth.username).toBe("");
expect(authModule.auth.email).toBe("");
expect(authModule.auth.groups).toEqual([]);
});
it("hydrates and proxies keycloak actions when enabled", async () => {
const calls = { init: 0, login: 0, logout: 0, updateToken: 0 };
const client = {
authenticated: true,
token: "mock-token",
tokenParsed: {
preferred_username: "alice",
email: "alice@example.dev",
groups: ["/dev", "/admin"],
},
init: vi.fn(async () => {
calls.init += 1;
return true;
}),
login: vi.fn(async () => {
calls.login += 1;
}),
logout: vi.fn(async () => {
calls.logout += 1;
}),
updateToken: vi.fn(async () => {
calls.updateToken += 1;
return true;
}),
};
globalThis.__ATLAS_KEYCLOAK_FACTORY__ = () => client;
const authModule = await loadAuth();
vi.spyOn(window, "setInterval").mockReturnValue(1234);
vi.stubGlobal(
"fetch",
vi.fn(async () =>
new Response(
JSON.stringify({
enabled: true,
url: "https://sso.example.dev",
realm: "atlas",
client_id: "portal-client",
login_url: "https://sso.example.dev/login",
reset_url: "https://sso.example.dev/reset",
account_url: "https://sso.example.dev/account",
account_password_url: "https://sso.example.dev/account/#/security/signingin",
}),
{
status: 200,
headers: { "content-type": "application/json" },
},
),
),
);
await authModule.initAuth();
await authModule.login("/account", " ");
await authModule.login("/account", "alice");
await authModule.logout();
await authModule.authFetch("/api/healthz");
expect(calls.init).toBe(1);
expect(calls.login).toBe(2);
expect(calls.logout).toBe(1);
expect(calls.updateToken).toBeGreaterThan(0);
expect(client.login.mock.calls[0][0]).not.toHaveProperty("loginHint");
expect(client.login.mock.calls[1][0]).toMatchObject({ loginHint: "alice" });
expect(authModule.auth.ready).toBe(true);
expect(authModule.auth.enabled).toBe(true);
expect(authModule.auth.authenticated).toBe(true);
expect(authModule.auth.username).toBe("alice");
expect(authModule.auth.email).toBe("alice@example.dev");
expect(authModule.auth.groups).toEqual(["dev", "admin"]);
expect(authModule.auth.token).toBe("mock-token");
});
it("covers the auth refresh handlers and polling branches", async () => {
const intervalCalls = [];
const client = {
authenticated: true,
token: "refresh-token",
tokenParsed: {
preferred_username: "carol",
email: "carol@example.dev",
groups: ["/ops"],
},
init: vi.fn(async () => true),
login: vi.fn(async () => {}),
logout: vi.fn(async () => {}),
updateToken: vi.fn(async () => true),
};
globalThis.__ATLAS_KEYCLOAK_FACTORY__ = () => client;
const authModule = await loadAuth();
vi.spyOn(window, "setInterval").mockImplementation((callback) => {
intervalCalls.push(callback);
return 1234;
});
vi.stubGlobal(
"fetch",
vi.fn(async () =>
new Response(
JSON.stringify({
enabled: true,
url: "https://sso.example.dev",
realm: "atlas",
client_id: "portal-client",
login_url: "https://sso.example.dev/login",
reset_url: "https://sso.example.dev/reset",
account_url: "https://sso.example.dev/account",
account_password_url: "https://sso.example.dev/account/#/security/signingin",
}),
{
status: 200,
headers: { "content-type": "application/json" },
},
),
),
);
await authModule.initAuth();
expect(intervalCalls).toHaveLength(1);
client.onAuthSuccess();
client.onAuthLogout();
client.onAuthRefreshSuccess();
client.updateToken.mockResolvedValueOnce(true);
client.onTokenExpired();
await Promise.resolve();
client.updateToken.mockRejectedValueOnce(new Error("boom"));
client.onTokenExpired();
await Promise.resolve();
client.updateToken.mockResolvedValueOnce(true);
intervalCalls[0]();
await Promise.resolve();
client.authenticated = false;
intervalCalls[0]();
await Promise.resolve();
client.authenticated = true;
client.updateToken.mockRejectedValueOnce(new Error("refresh failed"));
await authModule.authFetch("/api/healthz");
expect(authModule.auth.username).toBe("carol");
expect(authModule.auth.groups).toEqual(["ops"]);
expect(client.updateToken).toHaveBeenCalled();
});
it("leaves auth alone when login/logout are called before initialization", async () => {
const authModule = await loadAuth();
const fetchMock = vi.fn(async () => new Response("{}", { status: 200 }));
vi.stubGlobal("fetch", fetchMock);
await authModule.login("/account", "alice");
await authModule.logout();
await authModule.authFetch("/api/healthz", { headers: { "X-Test": "1" } });
expect(fetchMock).toHaveBeenCalledTimes(1);
const [, options] = fetchMock.mock.calls[0];
const headers = new Headers(options.headers);
expect(headers.get("X-Test")).toBe("1");
expect(headers.get("Authorization")).toBeNull();
});
it("recovers when the auth config endpoint fails", async () => {
const authModule = await loadAuth();
vi.stubGlobal("fetch", vi.fn(async () => new Response("boom", { status: 500 })));
await authModule.initAuth();
expect(authModule.auth.ready).toBe(true);
expect(authModule.auth.enabled).toBe(false);
expect(authModule.auth.authenticated).toBe(false);
});
});

View File

@ -0,0 +1,120 @@
import { RouterLinkStub, shallowMount } from "../../../frontend/node_modules/@vue/test-utils/dist/vue-test-utils.esm-bundler.mjs";
import { describe, expect, it } from "../../../frontend/node_modules/vitest/dist/index.js";
import MetricRow from "../../../frontend/src/components/MetricRow.vue";
import ServiceGrid from "../../../frontend/src/components/ServiceGrid.vue";
import StatsGrid from "../../../frontend/src/components/StatsGrid.vue";
import { fallbackHardware } from "../../../frontend/src/data/sample.js";
describe("shared dashboard components", () => {
it("renders metric cards", () => {
const wrapper = shallowMount(MetricRow, {
props: {
items: [
{ label: "Nodes", value: "26", note: "atlas + oceanus" },
{ label: "Storage", value: "80 TB", note: "Longhorn" },
],
},
});
expect(wrapper.text()).toContain("Nodes");
expect(wrapper.text()).toContain("26");
expect(wrapper.text()).toContain("Longhorn");
});
it("summarizes node state in the stats grid", () => {
const wrapper = shallowMount(StatsGrid, {
props: { hardware: fallbackHardware() },
});
expect(wrapper.text()).toContain("Control plane");
expect(wrapper.text()).toContain("3");
expect(wrapper.text()).toContain("offline");
expect(wrapper.text()).toContain("titan-16");
});
it("hides the attention card when every worker is healthy", () => {
const wrapper = shallowMount(StatsGrid, {
props: {
hardware: {
clusters: [
{
name: "atlas",
nodes: [{ name: "titan-0a", role: "control-plane", status: "ready" }],
},
],
specialty: [],
},
},
});
expect(wrapper.text()).not.toContain("Attention");
});
it("distinguishes internal and external service links", () => {
const wrapper = shallowMount(ServiceGrid, {
global: {
stubs: {
RouterLink: RouterLinkStub,
},
},
props: {
services: [
{ name: "Atlas AI", category: "ai", summary: "Chat", link: "/ai/chat", icon: "🤖" },
{ name: "Nextcloud", category: "productivity", summary: "Files", link: "https://cloud.example.dev", icon: "☁️" },
{ name: "Bad Link", category: "broken", summary: "invalid", link: "not a url", icon: "❓" },
],
},
});
expect(wrapper.text()).toContain("Atlas AI");
expect(wrapper.text()).toContain("cloud.example.dev");
expect(wrapper.text()).toContain("not a url");
expect(wrapper.findComponent(RouterLinkStub).exists()).toBe(true);
});
it("falls back to default service rendering when data is sparse", () => {
const wrapper = shallowMount(ServiceGrid, {
global: {
stubs: {
RouterLink: RouterLinkStub,
},
},
props: {
services: [
{ name: "Muted Service", category: "dev", summary: "planned", link: "", status: "planned" },
{ name: "Plain Service", category: "dev", summary: "fallback icon", link: "/apps" },
],
},
});
expect(wrapper.text()).toContain("Muted Service");
expect(wrapper.text()).toContain("fallback icon");
expect(wrapper.findAll(".service").length).toBe(2);
expect(wrapper.findComponent(RouterLinkStub).exists()).toBe(true);
});
it("renders empty and partial hardware states safely", () => {
const wrapper = shallowMount(StatsGrid, {
props: {
hardware: {
clusters: [{ name: "atlas", nodes: [{ name: "solo", role: "worker", status: "ready" }] }],
specialty: [{ name: "standalone", role: "Spare node", status: "ready" }],
},
},
});
expect(wrapper.text()).toContain("1 nodes total");
expect(wrapper.text()).toContain("0");
expect(wrapper.text()).toContain("standalone");
});
it("falls back cleanly when no hardware prop is provided", () => {
const wrapper = shallowMount(StatsGrid, {
props: {},
});
expect(wrapper.text()).toContain("0 nodes total");
expect(wrapper.text()).not.toContain("Attention");
});
});

View File

@ -0,0 +1,144 @@
import { shallowMount } from "../../../frontend/node_modules/@vue/test-utils/dist/vue-test-utils.esm-bundler.mjs";
import { describe, expect, it } from "../../../frontend/node_modules/vitest/dist/index.js";
import HomeView from "../../../frontend/src/views/HomeView.vue";
describe("HomeView", () => {
it("adds fallback icons and builds diagrams for the overview page", () => {
const wrapper = shallowMount(HomeView, {
global: {
stubs: {
MetricRow: true,
ServiceGrid: true,
MermaidCard: true,
},
},
props: {
serviceData: {
services: [
{ name: "Nextcloud", summary: "Storage", link: "https://cloud.example.dev" },
{ name: "Jellyfin", summary: "Media", link: "https://stream.example.dev" },
{ name: "Matrix (Synapse)", summary: "Chat", link: "https://chat.example.dev" },
{ name: "Element", summary: "Rooms", link: "https://rooms.example.dev" },
{ name: "LiveKit", summary: "Calls", link: "https://calls.example.dev" },
{ name: "Coturn", summary: "TURN", link: "https://turn.example.dev" },
{ name: "Mailu", summary: "Mail", link: "https://mail.example.dev" },
{ name: "Vaultwarden", summary: "Passwords", link: "https://vault.example.dev" },
{ name: "Vault", summary: "Secrets", link: "https://secret.example.dev" },
{ name: "Gitea", summary: "Git", link: "https://scm.example.dev" },
{ name: "Jenkins", summary: "CI", link: "https://ci.example.dev" },
{ name: "Harbor", summary: "Registry", link: "https://registry.example.dev" },
{ name: "Flux", summary: "GitOps", link: "https://cd.example.dev" },
{ name: "Monero", summary: "Node", link: "https://monero.example.dev" },
{ name: "SUI Validator", summary: "Crypto", link: "https://sui.example.dev" },
{ name: "Keycloak", summary: "SSO", link: "https://sso.example.dev" },
{ name: "AI Translation", summary: "Translate", link: "https://translate.example.dev" },
{ name: "Grafana", summary: "Metrics", link: "https://metrics.example.dev" },
{ name: "Pegasus", summary: "Uploads", link: "https://pegasus.example.dev" },
{ name: "AI Chat", summary: "Chat", link: "https://chat.example.dev" },
{ name: "AI Vision", summary: "Vision", link: "https://vision.example.dev" },
{ name: "AI Speech", summary: "Speech", link: "https://speech.example.dev" },
{ name: "Mystery", summary: "Default", link: "https://default.example.dev" },
],
},
},
});
const icons = wrapper.vm.displayServices.map((service) => service.icon);
expect(icons).toContain("☁️");
expect(icons).toContain("🎞️");
expect(icons).toContain("🗨️");
expect(icons).toContain("🧩");
expect(icons).toContain("🎥");
expect(icons).toContain("📞");
expect(icons).toContain("📮");
expect(icons).toContain("🔒");
expect(icons).toContain("🔑");
expect(icons).toContain("📚");
expect(icons).toContain("🧰");
expect(icons).toContain("📦");
expect(icons).toContain("🔄");
expect(icons).toContain("⛏️");
expect(icons).toContain("💠");
expect(icons).toContain("🛡️");
expect(icons).toContain("🌐");
expect(icons).toContain("📈");
expect(icons).toContain("🚀");
expect(icons).toContain("💬");
expect(icons).toContain("🖼️");
expect(icons).toContain("🎙️");
expect(icons).toContain("🛰️");
expect(wrapper.vm.hardwareDiagram).toContain("Titan Lab");
expect(wrapper.vm.networkDiagram).toContain("oauth2-proxy");
expect(wrapper.vm.pipelineDiagram).toContain("flux[cd.bstein.dev]");
});
it("renders loading, error, and healthy status states", () => {
const wrapper = shallowMount(HomeView, {
global: {
stubs: {
MetricRow: true,
ServiceGrid: true,
MermaidCard: true,
},
},
props: {
labStatus: { connected: true, atlas: { up: true }, oceanus: { up: true } },
serviceData: undefined,
loading: true,
error: "",
},
});
expect(wrapper.vm.atlasPillClass).toBe("pill-ok");
expect(wrapper.vm.oceanusPillClass).toBe("pill-ok");
expect(wrapper.get(".status").text()).toBe("Loading...");
expect(wrapper.vm.displayServices.length).toBeGreaterThan(0);
});
it("shows the error state when lab data fetches fail", () => {
const wrapper = shallowMount(HomeView, {
global: {
stubs: {
MetricRow: true,
ServiceGrid: true,
MermaidCard: true,
},
},
props: {
labStatus: { connected: false, atlas: { up: false }, oceanus: { up: false } },
serviceData: { services: [] },
loading: false,
error: "unable to load status",
},
});
expect(wrapper.vm.atlasPillClass).toBe("pill-bad");
expect(wrapper.vm.oceanusPillClass).toBe("pill-bad");
expect(wrapper.get(".status").text()).toBe("unable to load status");
});
it("shows live data unavailable and respects custom metric items", () => {
const wrapper = shallowMount(HomeView, {
global: {
stubs: {
MetricRow: true,
ServiceGrid: true,
MermaidCard: true,
},
},
props: {
labStatus: { connected: false, atlas: { up: false }, oceanus: { up: false } },
serviceData: { services: [] },
metricsData: {
items: [{ label: "Custom", value: "1", note: "" }],
},
loading: false,
error: "",
},
});
expect(wrapper.get(".status").text()).toBe("Live data unavailable");
expect(wrapper.vm.metricItems[0].note).toBe("");
});
});

View File

@ -0,0 +1,35 @@
import { describe, expect, it } from "../../../frontend/node_modules/vitest/dist/index.js";
import {
buildHardwareDiagram,
buildNetworkDiagram,
buildPipelineDiagram,
fallbackHardware,
fallbackMetrics,
fallbackNetwork,
fallbackServices,
} from "../../../frontend/src/data/sample.js";
describe("sample data builders", () => {
it("exposes the atlas hardware and service inventory", () => {
const hardware = fallbackHardware();
const services = fallbackServices();
expect(hardware.clusters[0].name).toBe("atlas");
expect(hardware.specialty.map((node) => node.alias)).toContain("oceanus");
expect(services.services.some((service) => service.name === "Keycloak")).toBe(true);
expect(services.services.some((service) => service.name === "AI Chat")).toBe(true);
});
it("builds the rendered diagrams and network summary", () => {
expect(buildHardwareDiagram({})).toContain("Titan Lab");
expect(buildHardwareDiagram({})).toContain("titan-0a");
expect(buildNetworkDiagram()).toContain("oauth2-proxy");
expect(buildPipelineDiagram()).toContain("flux[cd.bstein.dev]");
expect(fallbackNetwork().ingress_gateway).toContain("Traefik");
expect(fallbackMetrics()).toEqual({
dashboard: "https://metrics.bstein.dev",
description: "Atlas + Oceanus metrics.",
});
});
});

View File

@ -0,0 +1,44 @@
import path from "node:path";
import { fileURLToPath } from "node:url";
import { defineConfig } from "../../frontend/node_modules/vitest/dist/config.js";
import vue from "../../frontend/node_modules/@vitejs/plugin-vue/dist/index.mjs";
const testingDir = path.dirname(fileURLToPath(import.meta.url));
const frontendRoot = path.resolve(testingDir, "../../frontend");
export default defineConfig({
root: frontendRoot,
plugins: [vue()],
resolve: {
alias: {
"@": path.resolve(frontendRoot, "src"),
},
},
test: {
environment: "jsdom",
globals: true,
include: [path.resolve(testingDir, "unit/**/*.spec.js")],
setupFiles: [path.resolve(testingDir, "vitest.setup.js")],
reporters: ["default", "junit"],
outputFile: {
junit: path.resolve(testingDir, "../../build/junit-frontend-unit.xml"),
},
coverage: {
provider: "v8",
reporter: ["text", "lcov", "json-summary"],
include: [
"src/auth.js",
"src/data/sample.js",
"src/components/MetricRow.vue",
"src/components/MermaidCard.vue",
"src/components/ServiceGrid.vue",
"src/components/StatsGrid.vue",
"src/views/HomeView.vue",
],
thresholds: {
lines: 95,
statements: 95,
},
},
},
});

View File

@ -0,0 +1,8 @@
if (!globalThis.requestIdleCallback) {
globalThis.requestIdleCallback = (callback) =>
window.setTimeout(() => callback({ didTimeout: false, timeRemaining: () => 0 }), 0);
}
if (!globalThis.cancelIdleCallback) {
globalThis.cancelIdleCallback = (handle) => window.clearTimeout(handle);
}

View File

@ -0,0 +1,48 @@
{
"max_lines": 500,
"coverage_threshold_pct": 95,
"managed_files": [
"backend/atlas_portal/app_factory.py",
"backend/atlas_portal/rate_limit.py",
"backend/atlas_portal/routes/auth_config.py",
"backend/atlas_portal/routes/health.py",
"backend/atlas_portal/routes/monero.py",
"backend/atlas_portal/settings.py",
"backend/atlas_portal/utils.py",
"frontend/src/auth.js",
"frontend/src/components/MetricRow.vue",
"frontend/src/components/MermaidCard.vue",
"frontend/src/components/ServiceGrid.vue",
"frontend/src/components/StatsGrid.vue",
"frontend/src/data/sample.js",
"frontend/src/views/HomeView.vue"
],
"docstring_files": [
"backend/atlas_portal/app_factory.py",
"backend/atlas_portal/rate_limit.py",
"backend/atlas_portal/routes/auth_config.py",
"backend/atlas_portal/routes/health.py",
"backend/atlas_portal/routes/monero.py",
"backend/atlas_portal/settings.py",
"backend/atlas_portal/utils.py",
"frontend/src/auth.js",
"frontend/src/data/sample.js",
"frontend/src/views/HomeView.vue"
],
"coverage_files": [
"backend/atlas_portal/app_factory.py",
"backend/atlas_portal/rate_limit.py",
"backend/atlas_portal/routes/auth_config.py",
"backend/atlas_portal/routes/health.py",
"backend/atlas_portal/routes/monero.py",
"backend/atlas_portal/settings.py",
"backend/atlas_portal/utils.py",
"frontend/src/auth.js",
"frontend/src/components/MetricRow.vue",
"frontend/src/components/MermaidCard.vue",
"frontend/src/components/ServiceGrid.vue",
"frontend/src/components/StatsGrid.vue",
"frontend/src/data/sample.js",
"frontend/src/views/HomeView.vue"
]
}

View File

@ -0,0 +1,19 @@
from __future__ import annotations
from pathlib import Path
from testing.ci.summary import RunSummary, load_junit_summary, render_payload
def test_load_junit_summary_combines_suites(tmp_path: Path) -> None:
junit = tmp_path / "results.xml"
junit.write_text(
'<testsuites><testsuite tests="3" failures="1" errors="0" skipped="1"/></testsuites>'
)
summary = load_junit_summary([junit])
assert summary == RunSummary(tests=3, failures=1, errors=0, skipped=1)
payload = render_payload(suite="bstein-home", ok=2, failed=0, summary=summary)
assert 'platform_quality_gate_runs_total{suite="bstein-home",status="ok"} 2' in payload
assert 'bstein_home_quality_gate_tests_total{suite="bstein-home",result="skipped"} 1' in payload

View File

@ -0,0 +1,81 @@
from __future__ import annotations
import json
from pathlib import Path
from testing.ci.quality_gate import (
_js_node_issues,
_python_node_issues,
check_coverage,
check_file_sizes,
)
def test_check_file_sizes_flags_overlong_files(tmp_path: Path) -> None:
path = tmp_path / "tool.py"
path.write_text("\n".join(f"line {idx}" for idx in range(7)))
issues = check_file_sizes([path], max_lines=5)
assert issues and issues[0].check == "loc"
assert "exceeds 5" in issues[0].message
def test_docstring_helpers_accept_contract_comments_and_docstrings(tmp_path: Path) -> None:
py_path = tmp_path / "sample.py"
py_path.write_text(
'"""module docs"""\n\n'
'def documented():\n'
' """Explain what the helper does."""\n'
' return 1\n\n'
'def missing():\n'
' return 2\n'
)
js_path = tmp_path / "sample.js"
js_path.write_text(
'/**\n'
' * WHY: the helper needs a contract for the gate.\n'
' * @param {string} name - service name.\n'
' * @returns {string} icon label.\n'
' */\n'
'function pickIcon(name) {\n'
' return name;\n'
'}\n'
)
py_issues = _python_node_issues(py_path)
js_issues = _js_node_issues(js_path)
assert any(issue.message.endswith("missing") for issue in py_issues)
assert js_issues == []
def test_check_coverage_reads_backend_and_frontend_reports(tmp_path: Path) -> None:
backend_report = tmp_path / "backend.xml"
backend_report.write_text(
'<coverage><packages><package><classes>'
'<class filename="backend/atlas_portal/app_factory.py" line-rate="1.0" branch-rate="1.0"/>'
'</classes></package></packages></coverage>'
)
frontend_report = tmp_path / "frontend.json"
frontend_report.write_text(
json.dumps(
{
"src/auth.js": {
"lines": {"pct": 100},
"statements": {"pct": 100},
"branches": {"pct": 100},
"functions": {"pct": 100},
}
}
)
)
issues = check_coverage(
[Path("backend/atlas_portal/app_factory.py"), Path("frontend/src/auth.js")],
backend_report=backend_report,
frontend_report=frontend_report,
threshold=95,
)
assert issues == []