ci: centralize integration tests under testing

This commit is contained in:
Brad Stein 2026-04-10 17:00:33 -03:00
parent 5cd4ec05dd
commit 82bfff20ac
14 changed files with 420 additions and 33 deletions

View File

@ -3,6 +3,7 @@ members = [
"common", "common",
"server", "server",
"client", "client",
"testing",
] ]
resolver = "3" resolver = "3"

10
Jenkinsfile vendored
View File

@ -28,9 +28,7 @@ pipeline {
stage('Format') { stage('Format') {
steps { steps {
sh 'cargo fmt --all --manifest-path common/Cargo.toml -- --check' sh 'cargo fmt --all -- --check'
sh 'cargo fmt --all --manifest-path server/Cargo.toml -- --check'
sh 'cargo fmt --all --manifest-path client/Cargo.toml -- --check'
} }
} }
@ -40,6 +38,12 @@ pipeline {
} }
} }
stage('Testing') {
steps {
sh 'cargo test -p lesavka_testing'
}
}
stage('Quality Gate') { stage('Quality Gate') {
steps { steps {
sh 'QUALITY_GATE_PUSHGATEWAY_URL="${QUALITY_GATE_PUSHGATEWAY_URL}" scripts/ci/quality_gate.sh' sh 'QUALITY_GATE_PUSHGATEWAY_URL="${QUALITY_GATE_PUSHGATEWAY_URL}" scripts/ci/quality_gate.sh'

View File

@ -10,7 +10,4 @@ pub mod layout;
pub mod output; pub mod output;
pub mod paste; pub mod paste;
#[cfg(test)]
mod tests;
pub use app::LesavkaClientApp; pub use app::LesavkaClientApp;

View File

@ -1,19 +0,0 @@
// client/tests/integration.rs
mod tests {
use crate::input::keymap::{char_to_usage, is_modifier, keycode_to_usage};
use evdev::KeyCode;
#[test]
fn keymap_smoke_test_hits_letter_number_modifier_and_char_paths() {
assert_eq!(keycode_to_usage(KeyCode::KEY_A), Some(0x04));
assert_eq!(keycode_to_usage(KeyCode::KEY_Z), Some(0x1D));
assert_eq!(keycode_to_usage(KeyCode::KEY_F1), Some(0x3A));
assert_eq!(keycode_to_usage(KeyCode::KEY_KPENTER), Some(0x58));
assert_eq!(keycode_to_usage(KeyCode::KEY_LEFTCTRL), None);
assert!(is_modifier(KeyCode::KEY_LEFTCTRL).is_some());
assert!(is_modifier(KeyCode::KEY_RIGHTMETA).is_some());
assert_eq!(char_to_usage('!'), Some((0x1E, 0x02)));
assert_eq!(char_to_usage(' '), Some((0x2C, 0x00)));
}
}

View File

@ -1,3 +0,0 @@
// client/src/tests/mod.rs
pub mod integration_test;

View File

@ -16,7 +16,7 @@ import json
import pathlib import pathlib
import re import re
import sys import sys
from collections import Counter, defaultdict from collections import defaultdict
clippy_path = pathlib.Path(sys.argv[1]) clippy_path = pathlib.Path(sys.argv[1])
baseline_path = pathlib.Path(sys.argv[2]) baseline_path = pathlib.Path(sys.argv[2])
@ -126,6 +126,54 @@ def source_loc_counts() -> dict[str, int]:
counts[rel] = sum(1 for _ in file.open('r', encoding='utf-8')) counts[rel] = sum(1 for _ in file.open('r', encoding='utf-8'))
return dict(sorted(counts.items())) return dict(sorted(counts.items()))
def integration_layout_violations() -> list[str]:
violations: list[str] = []
for file in sorted(root.rglob('*.rs')):
rel = repo_relative(str(file))
if rel is None or rel.startswith('target/') or rel.startswith('testing/'):
continue
parts = pathlib.Path(rel).parts
if len(parts) >= 3 and parts[1] == 'src' and parts[2] == 'tests':
violations.append(
f'{rel}: integration tests must live under testing/tests/ instead of package-local src/tests/'
)
elif len(parts) >= 2 and parts[1] == 'tests':
violations.append(
f'{rel}: integration tests must live under testing/tests/ instead of package-local tests/'
)
return violations
def testing_contract_violations() -> list[str]:
violations: list[str] = []
contract_dir = root / 'testing' / 'tests'
if not contract_dir.exists():
return ['testing/tests: missing dedicated top-level integration test directory']
test_files = sorted(contract_dir.rglob('*.rs'))
if not test_files:
return ['testing/tests: no integration test files found']
filename_re = re.compile(r'^[a-z0-9_]+\.rs$')
required_markers = ('Scope:', 'Targets:', 'Why:')
for file in test_files:
rel = repo_relative(str(file))
if rel is None:
continue
loc = sum(1 for _ in file.open('r', encoding='utf-8'))
if loc > 500:
violations.append(f'{rel}: exceeds 500 LOC contract ({loc})')
if not filename_re.match(file.name):
violations.append(f'{rel}: filename must use snake_case for meaningful modularization')
text = file.read_text(encoding='utf-8')
header = '\n'.join(text.splitlines()[:20])
for marker in required_markers:
if marker not in header:
violations.append(f'{rel}: missing required module contract marker {marker}')
if '#[test]' not in text and '#[tokio::test]' not in text:
violations.append(f'{rel}: missing test entrypoints')
return violations
current = {} current = {}
for path, loc in source_loc_counts().items(): for path, loc in source_loc_counts().items():
current[path] = {'loc': loc} current[path] = {'loc': loc}
@ -154,6 +202,9 @@ for path, current_entry in current.items():
f'{path}: {key} grew from {baseline_value} to {current_value}' f'{path}: {key} grew from {baseline_value} to {current_value}'
) )
layout_violations = integration_layout_violations()
testing_violations = testing_contract_violations()
totals = { totals = {
'files': len(current), 'files': len(current),
'over_500': sum(1 for entry in current.values() if int(entry.get('loc', 0)) > 500), 'over_500': sum(1 for entry in current.values() if int(entry.get('loc', 0)) > 500),
@ -167,6 +218,8 @@ lines.append(f"files tracked: {totals['files']}")
lines.append(f"files over 500 LOC: {totals['over_500']}") lines.append(f"files over 500 LOC: {totals['over_500']}")
lines.append(f"clippy warnings tracked: {totals['clippy_warnings']}") lines.append(f"clippy warnings tracked: {totals['clippy_warnings']}")
lines.append(f"non-trivial undocumented functions tracked: {totals['doc_debt']}") lines.append(f"non-trivial undocumented functions tracked: {totals['doc_debt']}")
lines.append(f'legacy integration-test layout violations: {len(layout_violations)}')
lines.append(f'testing module contract violations: {len(testing_violations)}')
lines.append('') lines.append('')
lines.append('path | loc | clippy warnings | doc debt | baseline status') lines.append('path | loc | clippy warnings | doc debt | baseline status')
lines.append('-' * 78) lines.append('-' * 78)
@ -193,11 +246,27 @@ for path in sorted(current):
f"{path} | {entry.get('loc', 0)} | {entry.get('clippy_warnings', 0)} | {entry.get('doc_debt', 0)} | {baseline_loc}/{baseline_clippy}/{baseline_doc} | {status}" f"{path} | {entry.get('loc', 0)} | {entry.get('clippy_warnings', 0)} | {entry.get('doc_debt', 0)} | {baseline_loc}/{baseline_clippy}/{baseline_doc} | {status}"
) )
if layout_violations:
lines.append('')
lines.append('layout violations')
lines.append('-' * 78)
lines.extend(layout_violations)
if testing_violations:
lines.append('')
lines.append('testing module contract violations')
lines.append('-' * 78)
lines.extend(testing_violations)
summary_path.write_text('\n'.join(lines) + '\n', encoding='utf-8') summary_path.write_text('\n'.join(lines) + '\n', encoding='utf-8')
print(summary_path.read_text(encoding='utf-8')) print(summary_path.read_text(encoding='utf-8'))
if regressions: if regressions or layout_violations or testing_violations:
for line in regressions: for line in regressions:
print(line, file=sys.stderr) print(line, file=sys.stderr)
for line in layout_violations:
print(line, file=sys.stderr)
for line in testing_violations:
print(line, file=sys.stderr)
raise SystemExit(1) raise SystemExit(1)
PY PY

View File

@ -186,6 +186,10 @@
"loc": 236, "loc": 236,
"clippy_warnings": 8, "clippy_warnings": 8,
"doc_debt": 6 "doc_debt": 6
},
"testing/src/lib.rs": {
"loc": 10,
"doc_debt": 0
} }
} }
} }

View File

@ -7,6 +7,7 @@ COVERAGE_JSON="${REPORT_DIR}/coverage.json"
SUMMARY_TXT="${REPORT_DIR}/summary.txt" SUMMARY_TXT="${REPORT_DIR}/summary.txt"
METRICS_FILE="${REPORT_DIR}/metrics.prom" METRICS_FILE="${REPORT_DIR}/metrics.prom"
BASELINE_JSON="${ROOT_DIR}/scripts/ci/quality_gate_baseline.json" BASELINE_JSON="${ROOT_DIR}/scripts/ci/quality_gate_baseline.json"
COVERAGE_CONTRACT_JSON="${ROOT_DIR}/testing/coverage_contract.json"
PUSHGATEWAY_URL=${QUALITY_GATE_PUSHGATEWAY_URL:-} PUSHGATEWAY_URL=${QUALITY_GATE_PUSHGATEWAY_URL:-}
mkdir -p "${REPORT_DIR}" mkdir -p "${REPORT_DIR}"
@ -82,7 +83,7 @@ publish_metrics() {
status=0 status=0
if cargo llvm-cov --workspace --all-targets --summary-only --json --output-path "${COVERAGE_JSON}"; then if cargo llvm-cov --workspace --all-targets --summary-only --json --output-path "${COVERAGE_JSON}"; then
if python3 - "${COVERAGE_JSON}" "${BASELINE_JSON}" "${METRICS_FILE}" "${SUMMARY_TXT}" "${ROOT_DIR}" <<'PY' if python3 - "${COVERAGE_JSON}" "${BASELINE_JSON}" "${METRICS_FILE}" "${SUMMARY_TXT}" "${ROOT_DIR}" "${COVERAGE_CONTRACT_JSON}" <<'PY'
import json import json
import pathlib import pathlib
import sys import sys
@ -93,6 +94,7 @@ baseline_path = pathlib.Path(sys.argv[2])
metrics_path = pathlib.Path(sys.argv[3]) metrics_path = pathlib.Path(sys.argv[3])
summary_path = pathlib.Path(sys.argv[4]) summary_path = pathlib.Path(sys.argv[4])
root = pathlib.Path(sys.argv[5]) root = pathlib.Path(sys.argv[5])
contract_path = pathlib.Path(sys.argv[6])
with coverage_path.open('r', encoding='utf-8') as fh: with coverage_path.open('r', encoding='utf-8') as fh:
report = json.load(fh) report = json.load(fh)
@ -138,6 +140,29 @@ for path, current in current_by_path.items():
f"{path}: line coverage fell from {baseline_entry['line_percent']:.2f}% to {current['line_percent']:.2f}%" f"{path}: line coverage fell from {baseline_entry['line_percent']:.2f}% to {current['line_percent']:.2f}%"
) )
coverage_contract = {'minimum_line_percent': 95.0, 'files': []}
if contract_path.exists():
with contract_path.open('r', encoding='utf-8') as fh:
coverage_contract = json.load(fh)
contract_min = float(coverage_contract.get('minimum_line_percent', 95.0))
contract_files = list(dict.fromkeys(coverage_contract.get('files', [])))
contract_failures = []
contract_files_at_target = 0
for path in contract_files:
current = current_by_path.get(path)
if current is None:
contract_failures.append(f'{path}: missing from coverage report')
continue
if current['line_percent'] + 0.01 < contract_min:
contract_failures.append(
f'{path}: contract requires >= {contract_min:.2f}% line coverage, found {current["line_percent"]:.2f}%'
)
else:
contract_files_at_target += 1
if current['loc'] > 500:
contract_failures.append(f'{path}: contract requires <= 500 LOC, found {current["loc"]}')
workspace_lines = float(coverage_totals['lines']['percent']) workspace_lines = float(coverage_totals['lines']['percent'])
files_at_95 = sum(1 for item in files if item['line_percent'] >= 95.0) files_at_95 = sum(1 for item in files if item['line_percent'] >= 95.0)
files_below_95 = len(files) - files_at_95 files_below_95 = len(files) - files_at_95
@ -146,7 +171,7 @@ over_500 = sum(1 for item in files if item['loc'] > 500)
metrics = [] metrics = []
metrics.append('# HELP platform_quality_gate_runs_total Number of quality gate runs by result.') metrics.append('# HELP platform_quality_gate_runs_total Number of quality gate runs by result.')
metrics.append('# TYPE platform_quality_gate_runs_total counter') metrics.append('# TYPE platform_quality_gate_runs_total counter')
status_label = 'ok' if not regressions and not missing_from_baseline else 'failed' status_label = 'ok' if not regressions and not missing_from_baseline and not contract_failures else 'failed'
metrics.append(f'platform_quality_gate_runs_total{{suite="lesavka",status="{status_label}"}} 1') metrics.append(f'platform_quality_gate_runs_total{{suite="lesavka",status="{status_label}"}} 1')
metrics.append('# HELP platform_quality_gate_workspace_line_coverage_percent Workspace line coverage percent.') metrics.append('# HELP platform_quality_gate_workspace_line_coverage_percent Workspace line coverage percent.')
metrics.append('# TYPE platform_quality_gate_workspace_line_coverage_percent gauge') metrics.append('# TYPE platform_quality_gate_workspace_line_coverage_percent gauge')
@ -163,6 +188,17 @@ metrics.append(f'platform_quality_gate_files_below_95_total{{suite="lesavka"}} {
metrics.append('# HELP platform_quality_gate_source_lines_over_500_total Count of tracked source files over 500 LOC.') metrics.append('# HELP platform_quality_gate_source_lines_over_500_total Count of tracked source files over 500 LOC.')
metrics.append('# TYPE platform_quality_gate_source_lines_over_500_total gauge') metrics.append('# TYPE platform_quality_gate_source_lines_over_500_total gauge')
metrics.append(f'platform_quality_gate_source_lines_over_500_total{{suite="lesavka"}} {over_500}') metrics.append(f'platform_quality_gate_source_lines_over_500_total{{suite="lesavka"}} {over_500}')
metrics.append('# HELP platform_quality_gate_contract_files_total Count of files covered by the strict testing coverage contract.')
metrics.append('# TYPE platform_quality_gate_contract_files_total gauge')
metrics.append(f'platform_quality_gate_contract_files_total{{suite="lesavka"}} {len(contract_files)}')
metrics.append('# HELP platform_quality_gate_contract_files_at_target_total Count of strict contract files meeting the line coverage target.')
metrics.append('# TYPE platform_quality_gate_contract_files_at_target_total gauge')
metrics.append(f'platform_quality_gate_contract_files_at_target_total{{suite="lesavka"}} {contract_files_at_target}')
metrics.append('# HELP platform_quality_gate_contract_files_below_target_total Count of strict contract files missing the line coverage target or LOC cap.')
metrics.append('# TYPE platform_quality_gate_contract_files_below_target_total gauge')
metrics.append(
f'platform_quality_gate_contract_files_below_target_total{{suite="lesavka"}} {len(contract_failures)}'
)
metrics.append('# HELP platform_quality_gate_file_line_coverage_percent Per-file line coverage percent.') metrics.append('# HELP platform_quality_gate_file_line_coverage_percent Per-file line coverage percent.')
metrics.append('# TYPE platform_quality_gate_file_line_coverage_percent gauge') metrics.append('# TYPE platform_quality_gate_file_line_coverage_percent gauge')
metrics.append('# HELP platform_quality_gate_file_source_lines Per-file source line count.') metrics.append('# HELP platform_quality_gate_file_source_lines Per-file source line count.')
@ -189,6 +225,7 @@ lines.append(f'source files tracked: {len(files)}')
lines.append(f'files >= 95% line coverage: {files_at_95}') lines.append(f'files >= 95% line coverage: {files_at_95}')
lines.append(f'files < 95% line coverage: {files_below_95}') lines.append(f'files < 95% line coverage: {files_below_95}')
lines.append(f'files over 500 LOC: {over_500}') lines.append(f'files over 500 LOC: {over_500}')
lines.append(f'strict contract files at target: {contract_files_at_target}/{len(contract_files)} (>= {contract_min:.2f}% and <= 500 LOC)')
lines.append('') lines.append('')
lines.append('path | loc | line coverage | baseline loc | baseline coverage | status') lines.append('path | loc | line coverage | baseline loc | baseline coverage | status')
lines.append('-' * 86) lines.append('-' * 86)
@ -208,6 +245,17 @@ for item in files:
f"{item['path']} | {item['loc']} | {item['line_percent']:.2f}% | {baseline_loc} | {baseline_cov} | {status}" f"{item['path']} | {item['loc']} | {item['line_percent']:.2f}% | {baseline_loc} | {baseline_cov} | {status}"
) )
if contract_files:
lines.append('')
lines.append('strict testing coverage contract')
lines.append('-' * 86)
for path in contract_files:
current = current_by_path.get(path)
if current is None:
lines.append(f'{path} | missing')
else:
lines.append(f'{path} | {current["loc"]} LOC | {current["line_percent"]:.2f}%')
summary_path.write_text('\n'.join(lines) + '\n', encoding='utf-8') summary_path.write_text('\n'.join(lines) + '\n', encoding='utf-8')
print(summary_path.read_text(encoding='utf-8')) print(summary_path.read_text(encoding='utf-8'))
@ -215,9 +263,11 @@ print(summary_path.read_text(encoding='utf-8'))
if missing_from_baseline: if missing_from_baseline:
print('missing baseline entries:', ', '.join(missing_from_baseline), file=sys.stderr) print('missing baseline entries:', ', '.join(missing_from_baseline), file=sys.stderr)
if regressions or missing_from_baseline: if regressions or missing_from_baseline or contract_failures:
for line in regressions: for line in regressions:
print(line, file=sys.stderr) print(line, file=sys.stderr)
for line in contract_failures:
print(line, file=sys.stderr)
raise SystemExit(1) raise SystemExit(1)
PY PY
then then

16
testing/Cargo.toml Normal file
View File

@ -0,0 +1,16 @@
[package]
name = "lesavka_testing"
version = "0.1.0"
edition = "2024"
publish = false
[lib]
name = "lesavka_testing"
path = "src/lib.rs"
[dev-dependencies]
evdev = "0.13"
libc = "0.2"
lesavka_client = { path = "../client" }
lesavka_common = { path = "../common" }
lesavka_server = { path = "../server" }

View File

@ -0,0 +1,10 @@
{
"minimum_line_percent": 95.0,
"files": [
"client/src/app_support.rs",
"client/src/input/keymap.rs",
"common/src/hid.rs",
"common/src/paste.rs",
"server/src/paste.rs"
]
}

10
testing/src/lib.rs Normal file
View File

@ -0,0 +1,10 @@
//! Top-level integration testing crate for the lesavka workspace.
//!
//! Scope: keep cross-crate and contract-style tests out of package-local
//! `src/tests` and `tests` folders so CI has one integration test entrypoint.
//! Targets: the `testing/tests` suite plus coverage contracts consumed by the
//! Jenkins quality gate.
//! Why: a single top-level testing module is easier to scale, review, and
//! ratchet than ad-hoc integration tests spread across workspace members.
#![forbid(unsafe_code)]

View File

@ -0,0 +1,161 @@
//! Integration coverage for the client HID keymap contract.
//!
//! Scope: exercise the public keycode and modifier helpers through the
//! top-level testing crate instead of package-local integration tests.
//! Targets: `client/src/input/keymap.rs` and its shared-character delegation.
//! Why: the full keyboard map is large enough that one dedicated contract test
//! is clearer than scattering smoke coverage across unrelated crates.
use evdev::KeyCode;
use lesavka_client::input::keymap::{char_to_usage, is_modifier, keycode_to_usage};
#[test]
fn keycode_to_usage_covers_each_supported_keyboard_block() {
let cases = [
(KeyCode::KEY_A, 0x04),
(KeyCode::KEY_B, 0x05),
(KeyCode::KEY_C, 0x06),
(KeyCode::KEY_D, 0x07),
(KeyCode::KEY_E, 0x08),
(KeyCode::KEY_F, 0x09),
(KeyCode::KEY_G, 0x0A),
(KeyCode::KEY_H, 0x0B),
(KeyCode::KEY_I, 0x0C),
(KeyCode::KEY_J, 0x0D),
(KeyCode::KEY_K, 0x0E),
(KeyCode::KEY_L, 0x0F),
(KeyCode::KEY_M, 0x10),
(KeyCode::KEY_N, 0x11),
(KeyCode::KEY_O, 0x12),
(KeyCode::KEY_P, 0x13),
(KeyCode::KEY_Q, 0x14),
(KeyCode::KEY_R, 0x15),
(KeyCode::KEY_S, 0x16),
(KeyCode::KEY_T, 0x17),
(KeyCode::KEY_U, 0x18),
(KeyCode::KEY_V, 0x19),
(KeyCode::KEY_W, 0x1A),
(KeyCode::KEY_X, 0x1B),
(KeyCode::KEY_Y, 0x1C),
(KeyCode::KEY_Z, 0x1D),
(KeyCode::KEY_1, 0x1E),
(KeyCode::KEY_2, 0x1F),
(KeyCode::KEY_3, 0x20),
(KeyCode::KEY_4, 0x21),
(KeyCode::KEY_5, 0x22),
(KeyCode::KEY_6, 0x23),
(KeyCode::KEY_7, 0x24),
(KeyCode::KEY_8, 0x25),
(KeyCode::KEY_9, 0x26),
(KeyCode::KEY_0, 0x27),
(KeyCode::KEY_ENTER, 0x28),
(KeyCode::KEY_ESC, 0x29),
(KeyCode::KEY_BACKSPACE, 0x2A),
(KeyCode::KEY_TAB, 0x2B),
(KeyCode::KEY_SPACE, 0x2C),
(KeyCode::KEY_MINUS, 0x2D),
(KeyCode::KEY_EQUAL, 0x2E),
(KeyCode::KEY_LEFTBRACE, 0x2F),
(KeyCode::KEY_RIGHTBRACE, 0x30),
(KeyCode::KEY_BACKSLASH, 0x31),
(KeyCode::KEY_SEMICOLON, 0x33),
(KeyCode::KEY_APOSTROPHE, 0x34),
(KeyCode::KEY_GRAVE, 0x35),
(KeyCode::KEY_COMMA, 0x36),
(KeyCode::KEY_DOT, 0x37),
(KeyCode::KEY_SLASH, 0x38),
(KeyCode::KEY_CAPSLOCK, 0x39),
(KeyCode::KEY_F1, 0x3A),
(KeyCode::KEY_F2, 0x3B),
(KeyCode::KEY_F3, 0x3C),
(KeyCode::KEY_F4, 0x3D),
(KeyCode::KEY_F5, 0x3E),
(KeyCode::KEY_F6, 0x3F),
(KeyCode::KEY_F7, 0x40),
(KeyCode::KEY_F8, 0x41),
(KeyCode::KEY_F9, 0x42),
(KeyCode::KEY_F10, 0x43),
(KeyCode::KEY_F11, 0x44),
(KeyCode::KEY_F12, 0x45),
(KeyCode::KEY_SYSRQ, 0x46),
(KeyCode::KEY_SCROLLLOCK, 0x47),
(KeyCode::KEY_PAUSE, 0x48),
(KeyCode::KEY_INSERT, 0x49),
(KeyCode::KEY_HOME, 0x4A),
(KeyCode::KEY_PAGEUP, 0x4B),
(KeyCode::KEY_DELETE, 0x4C),
(KeyCode::KEY_END, 0x4D),
(KeyCode::KEY_PAGEDOWN, 0x4E),
(KeyCode::KEY_RIGHT, 0x4F),
(KeyCode::KEY_LEFT, 0x50),
(KeyCode::KEY_DOWN, 0x51),
(KeyCode::KEY_UP, 0x52),
(KeyCode::KEY_NUMLOCK, 0x53),
(KeyCode::KEY_KPSLASH, 0x54),
(KeyCode::KEY_KPASTERISK, 0x55),
(KeyCode::KEY_KPMINUS, 0x56),
(KeyCode::KEY_KPPLUS, 0x57),
(KeyCode::KEY_KPENTER, 0x58),
(KeyCode::KEY_KP1, 0x59),
(KeyCode::KEY_KP2, 0x5A),
(KeyCode::KEY_KP3, 0x5B),
(KeyCode::KEY_KP4, 0x5C),
(KeyCode::KEY_KP5, 0x5D),
(KeyCode::KEY_KP6, 0x5E),
(KeyCode::KEY_KP7, 0x5F),
(KeyCode::KEY_KP8, 0x60),
(KeyCode::KEY_KP9, 0x61),
(KeyCode::KEY_KP0, 0x62),
(KeyCode::KEY_KPDOT, 0x63),
(KeyCode::KEY_KPEQUAL, 0x67),
(KeyCode::KEY_102ND, 0x64),
(KeyCode::KEY_MENU, 0x65),
];
for (key, usage) in cases {
assert_eq!(
keycode_to_usage(key),
Some(usage),
"unexpected mapping for {key:?}"
);
}
assert_eq!(keycode_to_usage(KeyCode::KEY_LEFTCTRL), None);
assert_eq!(keycode_to_usage(KeyCode::KEY_RESERVED), None);
}
#[test]
fn is_modifier_covers_each_supported_modifier_bit() {
let cases = [
(KeyCode::KEY_LEFTCTRL, 0x01),
(KeyCode::KEY_LEFTSHIFT, 0x02),
(KeyCode::KEY_LEFTALT, 0x04),
(KeyCode::KEY_LEFTMETA, 0x08),
(KeyCode::KEY_RIGHTCTRL, 0x10),
(KeyCode::KEY_RIGHTSHIFT, 0x20),
(KeyCode::KEY_RIGHTALT, 0x40),
(KeyCode::KEY_RIGHTMETA, 0x80),
];
for (key, mask) in cases {
assert_eq!(
is_modifier(key),
Some(mask),
"unexpected modifier for {key:?}"
);
}
assert_eq!(is_modifier(KeyCode::KEY_A), None);
}
#[test]
fn char_to_usage_exposes_shared_hid_mapping_from_the_client_surface() {
assert_eq!(char_to_usage('a'), Some((0x04, 0x00)));
assert_eq!(char_to_usage('A'), Some((0x04, 0x02)));
assert_eq!(char_to_usage('0'), Some((0x27, 0x00)));
assert_eq!(char_to_usage('!'), Some((0x1E, 0x02)));
assert_eq!(char_to_usage('?'), Some((0x38, 0x02)));
assert_eq!(char_to_usage('\t'), Some((0x2B, 0x00)));
assert_eq!(char_to_usage('\n'), Some((0x28, 0x00)));
assert_eq!(char_to_usage('é'), None);
}

View File

@ -1,3 +1,11 @@
//! Integration coverage for public server runtime helpers.
//!
//! Scope: keep public helper smoke tests in the shared top-level testing
//! module rather than the server crate's local `tests/` folder.
//! Targets: `server/src/runtime_support.rs` public pure helpers.
//! Why: these helpers are part of a cross-crate contract surface and fit the
//! centralized testing layout better than package-scoped integration tests.
use lesavka_server::runtime_support::{next_stream_id, should_recover_hid_error}; use lesavka_server::runtime_support::{next_stream_id, should_recover_hid_error};
#[test] #[test]

View File

@ -0,0 +1,79 @@
//! Integration coverage for the shared printable-character HID contract.
//!
//! Scope: verify the shared character-to-HID table from the top-level testing
//! module so both client and server rely on the same canonical assertions.
//! Targets: `common/src/hid.rs`.
//! Why: this mapping is a cross-crate contract and should live in one obvious
//! place instead of being duplicated by package-local test suites.
use lesavka_common::hid::char_to_usage;
#[test]
fn char_to_usage_covers_letters_digits_symbols_and_whitespace() {
for (offset, ch) in ('a'..='z').enumerate() {
assert_eq!(char_to_usage(ch), Some((0x04 + offset as u8, 0x00)));
}
for (offset, ch) in ('A'..='Z').enumerate() {
assert_eq!(char_to_usage(ch), Some((0x04 + offset as u8, 0x02)));
}
for (offset, ch) in ('1'..='9').enumerate() {
assert_eq!(char_to_usage(ch), Some((0x1E + offset as u8, 0x00)));
}
assert_eq!(char_to_usage('0'), Some((0x27, 0x00)));
let cases = [
('!', (0x1E, 0x02)),
('@', (0x1F, 0x02)),
('#', (0x20, 0x02)),
('$', (0x21, 0x02)),
('%', (0x22, 0x02)),
('^', (0x23, 0x02)),
('&', (0x24, 0x02)),
('*', (0x25, 0x02)),
('(', (0x26, 0x02)),
(')', (0x27, 0x02)),
('-', (0x2D, 0x00)),
('_', (0x2D, 0x02)),
('=', (0x2E, 0x00)),
('+', (0x2E, 0x02)),
('[', (0x2F, 0x00)),
('{', (0x2F, 0x02)),
(']', (0x30, 0x00)),
('}', (0x30, 0x02)),
('\\', (0x31, 0x00)),
('|', (0x31, 0x02)),
(';', (0x33, 0x00)),
(':', (0x33, 0x02)),
('\'', (0x34, 0x00)),
('"', (0x34, 0x02)),
('`', (0x35, 0x00)),
('~', (0x35, 0x02)),
(',', (0x36, 0x00)),
('<', (0x36, 0x02)),
('.', (0x37, 0x00)),
('>', (0x37, 0x02)),
('/', (0x38, 0x00)),
('?', (0x38, 0x02)),
(' ', (0x2C, 0x00)),
('\n', (0x28, 0x00)),
('\r', (0x28, 0x00)),
('\t', (0x2B, 0x00)),
];
for (ch, expected) in cases {
assert_eq!(
char_to_usage(ch),
Some(expected),
"unexpected mapping for {ch:?}"
);
}
}
#[test]
fn char_to_usage_rejects_characters_outside_the_ascii_contract() {
for ch in ['é', '🙂', '\u{2603}'] {
assert_eq!(char_to_usage(ch), None, "unexpected support for {ch:?}");
}
}