release: bump to 0.15.0 and stabilize mjpeg sync gates

This commit is contained in:
Brad Stein 2026-04-29 01:25:06 -03:00
parent 37673cb131
commit ea0b17b769
50 changed files with 2824 additions and 1952 deletions

8
Cargo.lock generated
View File

@ -1642,7 +1642,7 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
[[package]]
name = "lesavka_client"
version = "0.14.48"
version = "0.15.0"
dependencies = [
"anyhow",
"async-stream",
@ -1676,7 +1676,7 @@ dependencies = [
[[package]]
name = "lesavka_common"
version = "0.14.48"
version = "0.15.0"
dependencies = [
"anyhow",
"base64",
@ -1688,7 +1688,7 @@ dependencies = [
[[package]]
name = "lesavka_server"
version = "0.14.48"
version = "0.15.0"
dependencies = [
"anyhow",
"base64",
@ -1720,6 +1720,7 @@ name = "lesavka_testing"
version = "0.1.0"
dependencies = [
"anyhow",
"async-stream",
"chacha20poly1305",
"chrono",
"evdev",
@ -1732,6 +1733,7 @@ dependencies = [
"lesavka_common",
"lesavka_server",
"libc",
"serde",
"serde_json",
"serial_test",
"shell-escape",

View File

@ -10,12 +10,12 @@ The point is simple: sit down at the desk, confirm the equipment side is awake,
## What It Does
- Shows the left and right eye feeds in the launcher, with breakout windows when you want more room.
- Lets you stage input and output choices before a session starts.
- Moves keyboard and pointer ownership between the operator station and the equipment side on purpose, not by accident.
- Keeps capture power and GPIO state visible to tell whether the capture devices are actually awake.
- Keeps diagnostics and logs close by so a weird media/device state is something we can prove, not hand-wave.
- Installs through repeatable client and server scripts so a reboot or reinstall does not leave mystery settings floating around.
- Shows the left and right eye feeds in the launcher, with breakout windows.
- Stage input and output choices before a session starts.
- Moves input between client and server with a single button.
- Keeps capture power and GPIO state visible.
- Keeps diagnostics and logs upfront for debugging.
- Installs through idempotent client and server scripts.
## Install / Update
@ -40,8 +40,8 @@ The install scripts are the trusted path. They make the expected directories, in
1. Launch `Lesavka` from the desktop menu or run `lesavka`.
2. Refresh devices if hardware changed.
3. Pick the camera, camera quality, speaker, microphone, keyboard, and mouse you want for the next run.
4. Confirm the server chip is green before trusting the session. Yellow means the server is visible but no live relay is connected yet. Red means treat it as missing.
5. Connect the relay, watch both eyes come online, then move inputs when you are ready.
4. Confirm the server chip is blue before trusting the session.
5. Connect the relay, watch both eyes come online, then move inputs.
6. Use diagnostics and the session console when the bench feels wrong. The log should say what happened.
## Media Notes
@ -61,7 +61,7 @@ The gate order is:
style/docs -> LOC/naming -> coverage -> tests -> media reliability -> gate glue -> SonarQube -> supply chain/artifact security
```
TLDR: formatting and hygiene first, source files under the line limit, every tracked source file at 95%+ coverage, normal tests green, media tests proving frames keep moving, then the reporting/security checks.
So formatting and hygiene first, source files under the line limit, every tracked source file at 95%+ coverage, normal tests green, media tests proving frames keep moving, then the reporting/security checks.
Useful entry points:

View File

@ -4,7 +4,7 @@ path = "src/main.rs"
[package]
name = "lesavka_client"
version = "0.14.48"
version = "0.15.0"
edition = "2024"
[dependencies]

View File

@ -320,6 +320,7 @@ impl MouseAggregator {
}
}
#[allow(dead_code)]
fn flush(&mut self) {
self.runtime().flush();
}

View File

@ -13,10 +13,10 @@ use std::time::Duration;
fn open_virtual_node(vdev: &mut evdev::uinput::VirtualDevice) -> Option<PathBuf> {
for _ in 0..40 {
if let Ok(mut nodes) = vdev.enumerate_dev_nodes_blocking() {
if let Some(Ok(path)) = nodes.next() {
return Some(path);
}
if let Ok(mut nodes) = vdev.enumerate_dev_nodes_blocking()
&& let Some(Ok(path)) = nodes.next()
{
return Some(path);
}
thread::sleep(Duration::from_millis(10));
}

View File

@ -137,6 +137,7 @@ impl SourcePtsRebaser {
/// Outputs: a rebased packet timestamp and the values used to derive it.
/// Why: source PTS should drive packet timing when available, but packets
/// must still remain monotonic even if buffers repeat or arrive oddly.
#[allow(dead_code)]
#[must_use]
pub fn rebase_or_now(&self, source_pts_us: Option<u64>, min_step_us: u64) -> RebasedSourcePts {
self.rebase_with_lag_cap(source_pts_us, min_step_us, None)
@ -355,7 +356,10 @@ mod tests {
std::thread::sleep(Duration::from_millis(5));
let first_camera = camera.rebase_or_now(Some(435_000), 1);
assert_eq!(first_microphone.capture_base_us, first_camera.capture_base_us);
assert_eq!(
first_microphone.capture_base_us,
first_camera.capture_base_us
);
assert_eq!(first_microphone.packet_pts_us, first_camera.packet_pts_us);
assert_eq!(first_microphone.source_base_us, Some(80_000));
assert_eq!(first_camera.source_base_us, Some(435_000));

View File

@ -91,10 +91,10 @@ pub(crate) fn detect_video_segments(
}
let active_fraction = active_frames as f64 / frame_count as f64;
let median_segment_duration_s = median(segments.iter().map(|segment| segment.duration_s).collect());
let median_segment_duration_s =
median(segments.iter().map(|segment| segment.duration_s).collect());
if active_fraction > MAX_VIDEO_ACTIVE_FRAME_FRACTION
&& median_segment_duration_s
<= frame_step_s * MAX_VIDEO_FLICKER_SEGMENT_FRAME_MULTIPLIER
&& median_segment_duration_s <= frame_step_s * MAX_VIDEO_FLICKER_SEGMENT_FRAME_MULTIPLIER
{
bail!("video flash trace looks like frame-to-frame flicker, not sync pulses");
}
@ -240,7 +240,7 @@ pub(super) fn median(mut values: Vec<f64>) -> f64 {
}
values.sort_by(|left, right| left.total_cmp(right));
let mid = values.len() / 2;
if values.len() % 2 == 0 {
if values.len().is_multiple_of(2) {
(values[mid - 1] + values[mid]) / 2.0
} else {
values[mid]

View File

@ -1,9 +1,9 @@
use anyhow::{bail, Result};
use anyhow::{Result, bail};
use std::collections::BTreeMap;
use crate::sync_probe::analyze::report::SyncAnalysisReport;
use super::{median, PulseSegment};
use super::{PulseSegment, median};
#[path = "correlation_collapse.rs"]
mod collapse;
@ -36,8 +36,8 @@ pub(super) fn correlate_onsets(
let (video_onsets_s, audio_onsets_s, common_window) =
trim_onsets_to_common_activity_window(video_onsets_s, audio_onsets_s, max_pair_gap_s);
let expected_start_skew_ms = (audio_onsets_s[0] - video_onsets_s[0]) * 1000.0;
let video_pulses = index_onsets_by_spacing(&video_onsets_s, pulse_period_s);
let audio_pulses = index_onsets_by_spacing(&audio_onsets_s, pulse_period_s);
let video_pulses = index_onsets_by_spacing(video_onsets_s, pulse_period_s);
let audio_pulses = index_onsets_by_spacing(audio_onsets_s, pulse_period_s);
let offset_candidates = candidate_index_offsets(&video_pulses, &audio_pulses);
let mut skews_ms = best_skews_for_index_offsets(
&video_pulses,
@ -48,8 +48,8 @@ pub(super) fn correlate_onsets(
);
if skews_ms.is_empty() && video_onsets_s.len() == 1 && audio_onsets_s.len() == 1 {
let video_phase_s = estimate_phase(&video_onsets_s, pulse_period_s);
let audio_phase_s = estimate_phase(&audio_onsets_s, pulse_period_s);
let video_phase_s = estimate_phase(video_onsets_s, pulse_period_s);
let audio_phase_s = estimate_phase(audio_onsets_s, pulse_period_s);
let phase_skew_ms =
shortest_wrapped_difference(audio_phase_s - video_phase_s, pulse_period_s) * 1000.0;
if phase_skew_ms.abs() <= max_pair_gap_s * 1000.0 {
@ -62,8 +62,8 @@ pub(super) fn correlate_onsets(
}
Ok(sync_report_from_skews(
common_window.filter_onsets(&video_onsets_s),
common_window.filter_onsets(&audio_onsets_s),
common_window.filter_onsets(video_onsets_s),
common_window.filter_onsets(audio_onsets_s),
skews_ms,
))
}
@ -123,13 +123,13 @@ pub(crate) fn correlate_segments(
let audio_marker_onsets = marker_onsets(&audio_segments, pulse_width_s);
let video_marker_onsets = common_window.filter_onsets(&video_marker_onsets);
let audio_marker_onsets = common_window.filter_onsets(&audio_marker_onsets);
let video_indexed = index_onsets_by_spacing(&video_onsets_s, pulse_period_s);
let audio_indexed = index_onsets_by_spacing(&audio_onsets_s, pulse_period_s);
let video_indexed = index_onsets_by_spacing(video_onsets_s, pulse_period_s);
let audio_indexed = index_onsets_by_spacing(audio_onsets_s, pulse_period_s);
let offset_candidates = marker_index_offsets(
&video_indexed,
&audio_indexed,
&video_marker_onsets,
&audio_marker_onsets,
video_marker_onsets,
audio_marker_onsets,
);
let mut skews_ms = best_skews_for_index_offsets(
&video_indexed,
@ -140,8 +140,8 @@ pub(crate) fn correlate_segments(
);
if skews_ms.is_empty() && video_onsets_s.len() == 1 && audio_onsets_s.len() == 1 {
let video_phase_s = estimate_phase(&video_onsets_s, pulse_period_s);
let audio_phase_s = estimate_phase(&audio_onsets_s, pulse_period_s);
let video_phase_s = estimate_phase(video_onsets_s, pulse_period_s);
let audio_phase_s = estimate_phase(audio_onsets_s, pulse_period_s);
let phase_skew_ms =
shortest_wrapped_difference(audio_phase_s - video_phase_s, pulse_period_s) * 1000.0;
if phase_skew_ms.abs() <= max_pair_gap_s * 1000.0 {
@ -356,7 +356,7 @@ fn best_skews_for_index_offsets(
best_anchor_error_ms,
best_mean_abs_skew_ms,
_,
)) if startup_phase_anchor_consistent < *best_anchor_consistent
)) if (!startup_phase_anchor_consistent && *best_anchor_consistent)
|| (startup_phase_anchor_consistent == *best_anchor_consistent
&& (skews_ms.len() < *best_count
|| (skews_ms.len() == *best_count

View File

@ -3,8 +3,8 @@ use super::correlation::{
index_onsets_by_spacing, marker_index_offsets, marker_onsets, shortest_wrapped_difference,
};
use super::{
correlate_segments, detect_audio_onsets, detect_audio_segments, detect_video_onsets,
detect_video_segments, median, PulseSegment,
PulseSegment, correlate_segments, detect_audio_onsets, detect_audio_segments,
detect_video_onsets, detect_video_segments, median,
};
use crate::sync_probe::analyze::report::SyncAnalysisReport;
use std::collections::BTreeMap;
@ -176,15 +176,17 @@ fn detect_video_onsets_rejects_empty_low_contrast_and_missing_edges() {
#[test]
fn detect_video_onsets_rejects_frame_to_frame_flicker() {
let timestamps = (0..120).map(|index| index as f64 / 30.0).collect::<Vec<_>>();
let timestamps = (0..120)
.map(|index| index as f64 / 30.0)
.collect::<Vec<_>>();
let brightness = (0..120)
.map(|index| if index % 2 == 0 { 0 } else { 6 })
.collect::<Vec<_>>();
let err = detect_video_onsets(&timestamps, &brightness).expect_err("flicker should be rejected");
let err =
detect_video_onsets(&timestamps, &brightness).expect_err("flicker should be rejected");
assert!(
err.to_string()
.contains("frame-to-frame flicker"),
err.to_string().contains("frame-to-frame flicker"),
"unexpected error: {err}"
);
}
@ -232,276 +234,6 @@ fn correlate_segments_validate_inputs_and_support_single_pulse_fallback() {
assert!(correlate_segments(&video, &audio, 1.0, 0.1, 3, 0.05).is_err());
}
#[test]
fn phase_estimation_and_indexing_stay_stable_when_pulses_are_missing() {
let video_phase = estimate_phase(&[4.0, 5.0, 7.0, 8.0, 10.0], 1.0);
let audio_phase = estimate_phase(&[4.018, 5.017, 6.019, 8.018, 9.018], 1.0);
assert!((video_phase - 0.0).abs() < 0.02);
assert!((audio_phase - 0.018).abs() < 0.02);
let video_indexed = index_onsets_by_spacing(&[4.0, 5.0, 7.0, 8.0, 10.0], 1.0);
let audio_indexed = index_onsets_by_spacing(&[4.018, 5.017, 6.019, 8.018, 9.018], 1.0);
assert_eq!(
video_indexed.keys().copied().collect::<Vec<_>>(),
vec![0, 1, 3, 4, 6]
);
assert_eq!(
audio_indexed.keys().copied().collect::<Vec<_>>(),
vec![0, 1, 2, 4, 5]
);
}
#[test]
fn correlation_helpers_cover_empty_index_sets_and_wrapped_phase_math() {
assert!(index_onsets_by_spacing(&[], 1.0).is_empty());
assert!(candidate_index_offsets(&BTreeMap::new(), &BTreeMap::new()).is_empty());
let mut video_only = BTreeMap::new();
video_only.insert(0, 1.0);
assert!(candidate_index_offsets(&video_only, &BTreeMap::new()).is_empty());
let mut audio_only = BTreeMap::new();
audio_only.insert(0, 1.0);
assert!(candidate_index_offsets(&BTreeMap::new(), &audio_only).is_empty());
let mut video_indexed = BTreeMap::new();
video_indexed.insert(2, 2.0);
let mut audio_indexed = BTreeMap::new();
audio_indexed.insert(5, 5.0);
assert_eq!(
candidate_index_offsets(&video_indexed, &audio_indexed),
vec![3]
);
assert!((shortest_wrapped_difference(0.6, 1.0) + 0.4).abs() < 0.000_001);
assert!((shortest_wrapped_difference(-0.6, 1.0) - 0.4).abs() < 0.000_001);
}
#[test]
fn marker_index_offsets_include_marker_alignment_and_general_fallback() {
let video_indexed = index_onsets_by_spacing(&[4.0, 5.0, 7.0, 8.0, 10.0], 1.0);
let audio_indexed = index_onsets_by_spacing(&[5.018, 6.017, 7.019, 9.018, 10.018], 1.0);
let offsets = marker_index_offsets(&video_indexed, &audio_indexed, &[10.0], &[10.018]);
assert!(offsets.contains(&1));
assert!(offsets.contains(&0));
}
#[test]
fn correlate_onsets_ignores_missing_pulses_and_preserves_stable_skew() {
let report = correlate_onsets(
&[4.0, 5.0, 7.0, 8.0, 10.0],
&[4.018, 5.017, 6.019, 8.018, 9.018],
1.0,
0.2,
)
.expect("correlated report");
assert_eq!(report.paired_event_count, 3);
assert!((report.mean_skew_ms - 17.666).abs() < 5.0);
assert!(report.max_abs_skew_ms < 30.0);
}
#[test]
fn correlate_segments_uses_markers_to_break_period_aliasing() {
let video = vec![
PulseSegment {
start_s: 3.3,
end_s: 3.55,
duration_s: 0.25,
},
PulseSegment {
start_s: 4.266667,
end_s: 4.4,
duration_s: 0.133333,
},
PulseSegment {
start_s: 5.3,
end_s: 5.433333,
duration_s: 0.133333,
},
];
let audio = vec![
PulseSegment {
start_s: 3.35,
end_s: 3.59,
duration_s: 0.24,
},
PulseSegment {
start_s: 4.316667,
end_s: 4.436667,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.35,
end_s: 5.47,
duration_s: 0.12,
},
];
let report =
correlate_segments(&video, &audio, 1.0, 0.12, 5, 0.2).expect("marker-correlated report");
assert_eq!(report.paired_event_count, 3);
assert!((report.mean_skew_ms - 50.0).abs() < 10.0);
}
#[test]
fn collapse_segments_by_phase_keeps_one_best_segment_per_pulse_slot() {
let collapsed = collapse_segments_by_phase(
&[
PulseSegment {
start_s: 4.00,
end_s: 4.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 4.08,
end_s: 4.10,
duration_s: 0.02,
},
PulseSegment {
start_s: 5.01,
end_s: 5.13,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.09,
end_s: 5.11,
duration_s: 0.02,
},
],
1.0,
0.32,
);
assert_eq!(collapsed.len(), 2);
assert!((collapsed[0].start_s - 4.0).abs() < 0.001);
assert!((collapsed[1].start_s - 5.01).abs() < 0.001);
}
#[test]
fn collapse_segments_by_phase_prefers_the_longest_regular_cadence() {
let collapsed = collapse_segments_by_phase(
&[
PulseSegment {
start_s: 4.00,
end_s: 4.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.02,
end_s: 5.14,
duration_s: 0.12,
},
PulseSegment {
start_s: 6.00,
end_s: 6.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 7.01,
end_s: 7.13,
duration_s: 0.12,
},
PulseSegment {
start_s: 4.42,
end_s: 4.67,
duration_s: 0.25,
},
PulseSegment {
start_s: 6.42,
end_s: 6.67,
duration_s: 0.25,
},
],
1.0,
0.32,
);
assert_eq!(collapsed.len(), 4);
assert!((collapsed[0].start_s - 4.0).abs() < 0.001);
assert!((collapsed[3].start_s - 7.01).abs() < 0.001);
}
#[test]
fn correlate_segments_collapses_repeated_noise_within_each_pulse_slot() {
let video = vec![
PulseSegment {
start_s: 4.0,
end_s: 4.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 4.03,
end_s: 4.05,
duration_s: 0.02,
},
PulseSegment {
start_s: 5.0,
end_s: 5.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.03,
end_s: 5.05,
duration_s: 0.02,
},
];
let audio = vec![
PulseSegment {
start_s: 4.02,
end_s: 4.14,
duration_s: 0.12,
},
PulseSegment {
start_s: 4.05,
end_s: 4.07,
duration_s: 0.02,
},
PulseSegment {
start_s: 5.02,
end_s: 5.14,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.05,
end_s: 5.07,
duration_s: 0.02,
},
];
let report = correlate_segments(&video, &audio, 1.0, 0.12, 5, 0.2).expect("collapsed");
assert_eq!(report.video_event_count, 2);
assert_eq!(report.audio_event_count, 2);
assert_eq!(report.paired_event_count, 2);
assert!(report.max_abs_skew_ms < 30.0);
}
#[test]
fn marker_detection_finds_wider_segments_only() {
let markers = marker_onsets(
&[
PulseSegment {
start_s: 1.0,
end_s: 1.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.0,
end_s: 5.24,
duration_s: 0.24,
},
],
0.12,
);
assert_eq!(markers, vec![5.0]);
}
#[test]
fn median_handles_empty_even_and_odd_inputs() {
assert_eq!(median(Vec::new()), 0.0);
assert_eq!(median(vec![1.0, 3.0, 2.0]), 2.0);
assert_eq!(median(vec![4.0, 1.0, 3.0, 2.0]), 2.5);
}
fn assert_sync_report_shape(report: &SyncAnalysisReport, paired_events: usize) {
assert_eq!(report.video_event_count, paired_events);
assert_eq!(report.audio_event_count, paired_events);
@ -510,3 +242,5 @@ fn assert_sync_report_shape(report: &SyncAnalysisReport, paired_events: usize) {
assert_eq!(report.video_onsets_s.len(), paired_events);
assert_eq!(report.audio_onsets_s.len(), paired_events);
}
mod correlation_helpers;

View File

@ -0,0 +1,271 @@
use super::*;
#[test]
fn phase_estimation_and_indexing_stay_stable_when_pulses_are_missing() {
let video_phase = estimate_phase(&[4.0, 5.0, 7.0, 8.0, 10.0], 1.0);
let audio_phase = estimate_phase(&[4.018, 5.017, 6.019, 8.018, 9.018], 1.0);
assert!((video_phase - 0.0).abs() < 0.02);
assert!((audio_phase - 0.018).abs() < 0.02);
let video_indexed = index_onsets_by_spacing(&[4.0, 5.0, 7.0, 8.0, 10.0], 1.0);
let audio_indexed = index_onsets_by_spacing(&[4.018, 5.017, 6.019, 8.018, 9.018], 1.0);
assert_eq!(
video_indexed.keys().copied().collect::<Vec<_>>(),
vec![0, 1, 3, 4, 6]
);
assert_eq!(
audio_indexed.keys().copied().collect::<Vec<_>>(),
vec![0, 1, 2, 4, 5]
);
}
#[test]
fn correlation_helpers_cover_empty_index_sets_and_wrapped_phase_math() {
assert!(index_onsets_by_spacing(&[], 1.0).is_empty());
assert!(candidate_index_offsets(&BTreeMap::new(), &BTreeMap::new()).is_empty());
let mut video_only = BTreeMap::new();
video_only.insert(0, 1.0);
assert!(candidate_index_offsets(&video_only, &BTreeMap::new()).is_empty());
let mut audio_only = BTreeMap::new();
audio_only.insert(0, 1.0);
assert!(candidate_index_offsets(&BTreeMap::new(), &audio_only).is_empty());
let mut video_indexed = BTreeMap::new();
video_indexed.insert(2, 2.0);
let mut audio_indexed = BTreeMap::new();
audio_indexed.insert(5, 5.0);
assert_eq!(
candidate_index_offsets(&video_indexed, &audio_indexed),
vec![3]
);
assert!((shortest_wrapped_difference(0.6, 1.0) + 0.4).abs() < 0.000_001);
assert!((shortest_wrapped_difference(-0.6, 1.0) - 0.4).abs() < 0.000_001);
}
#[test]
fn marker_index_offsets_include_marker_alignment_and_general_fallback() {
let video_indexed = index_onsets_by_spacing(&[4.0, 5.0, 7.0, 8.0, 10.0], 1.0);
let audio_indexed = index_onsets_by_spacing(&[5.018, 6.017, 7.019, 9.018, 10.018], 1.0);
let offsets = marker_index_offsets(&video_indexed, &audio_indexed, &[10.0], &[10.018]);
assert!(offsets.contains(&1));
assert!(offsets.contains(&0));
}
#[test]
fn correlate_onsets_ignores_missing_pulses_and_preserves_stable_skew() {
let report = correlate_onsets(
&[4.0, 5.0, 7.0, 8.0, 10.0],
&[4.018, 5.017, 6.019, 8.018, 9.018],
1.0,
0.2,
)
.expect("correlated report");
assert_eq!(report.paired_event_count, 3);
assert!((report.mean_skew_ms - 17.666).abs() < 5.0);
assert!(report.max_abs_skew_ms < 30.0);
}
#[test]
fn correlate_segments_uses_markers_to_break_period_aliasing() {
let video = vec![
PulseSegment {
start_s: 3.3,
end_s: 3.55,
duration_s: 0.25,
},
PulseSegment {
start_s: 4.266667,
end_s: 4.4,
duration_s: 0.133333,
},
PulseSegment {
start_s: 5.3,
end_s: 5.433333,
duration_s: 0.133333,
},
];
let audio = vec![
PulseSegment {
start_s: 3.35,
end_s: 3.59,
duration_s: 0.24,
},
PulseSegment {
start_s: 4.316667,
end_s: 4.436667,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.35,
end_s: 5.47,
duration_s: 0.12,
},
];
let report =
correlate_segments(&video, &audio, 1.0, 0.12, 5, 0.2).expect("marker-correlated report");
assert_eq!(report.paired_event_count, 3);
assert!((report.mean_skew_ms - 50.0).abs() < 10.0);
}
#[test]
fn collapse_segments_by_phase_keeps_one_best_segment_per_pulse_slot() {
let collapsed = collapse_segments_by_phase(
&[
PulseSegment {
start_s: 4.00,
end_s: 4.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 4.08,
end_s: 4.10,
duration_s: 0.02,
},
PulseSegment {
start_s: 5.01,
end_s: 5.13,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.09,
end_s: 5.11,
duration_s: 0.02,
},
],
1.0,
0.32,
);
assert_eq!(collapsed.len(), 2);
assert!((collapsed[0].start_s - 4.0).abs() < 0.001);
assert!((collapsed[1].start_s - 5.01).abs() < 0.001);
}
#[test]
fn collapse_segments_by_phase_prefers_the_longest_regular_cadence() {
let collapsed = collapse_segments_by_phase(
&[
PulseSegment {
start_s: 4.00,
end_s: 4.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.02,
end_s: 5.14,
duration_s: 0.12,
},
PulseSegment {
start_s: 6.00,
end_s: 6.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 7.01,
end_s: 7.13,
duration_s: 0.12,
},
PulseSegment {
start_s: 4.42,
end_s: 4.67,
duration_s: 0.25,
},
PulseSegment {
start_s: 6.42,
end_s: 6.67,
duration_s: 0.25,
},
],
1.0,
0.32,
);
assert_eq!(collapsed.len(), 4);
assert!((collapsed[0].start_s - 4.0).abs() < 0.001);
assert!((collapsed[3].start_s - 7.01).abs() < 0.001);
}
#[test]
fn correlate_segments_collapses_repeated_noise_within_each_pulse_slot() {
let video = vec![
PulseSegment {
start_s: 4.0,
end_s: 4.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 4.03,
end_s: 4.05,
duration_s: 0.02,
},
PulseSegment {
start_s: 5.0,
end_s: 5.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.03,
end_s: 5.05,
duration_s: 0.02,
},
];
let audio = vec![
PulseSegment {
start_s: 4.02,
end_s: 4.14,
duration_s: 0.12,
},
PulseSegment {
start_s: 4.05,
end_s: 4.07,
duration_s: 0.02,
},
PulseSegment {
start_s: 5.02,
end_s: 5.14,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.05,
end_s: 5.07,
duration_s: 0.02,
},
];
let report = correlate_segments(&video, &audio, 1.0, 0.12, 5, 0.2).expect("collapsed");
assert_eq!(report.video_event_count, 2);
assert_eq!(report.audio_event_count, 2);
assert_eq!(report.paired_event_count, 2);
assert!(report.max_abs_skew_ms < 30.0);
}
#[test]
fn marker_detection_finds_wider_segments_only() {
let markers = marker_onsets(
&[
PulseSegment {
start_s: 1.0,
end_s: 1.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.0,
end_s: 5.24,
duration_s: 0.24,
},
],
0.12,
);
assert_eq!(markers, vec![5.0]);
}
#[test]
fn median_handles_empty_even_and_odd_inputs() {
assert_eq!(median(Vec::new()), 0.0);
assert_eq!(median(vec![1.0, 3.0, 2.0]), 2.0);
assert_eq!(median(vec![4.0, 1.0, 3.0, 2.0]), 2.5);
}

View File

@ -54,16 +54,16 @@ impl SyncProbeCapture {
let video_queue = FreshPacketQueue::new(PROBE_VIDEO_QUEUE);
let audio_queue = FreshPacketQueue::new(PROBE_AUDIO_QUEUE);
let video_thread = spawn_video_thread(
video_src,
video_sink,
let video_thread = spawn_video_thread(VideoThreadConfig {
src: video_src,
sink: video_sink,
camera,
schedule.clone(),
schedule: schedule.clone(),
duration,
probe_start,
running.clone(),
video_queue.clone(),
);
running: running.clone(),
queue: video_queue.clone(),
});
let audio_thread = spawn_audio_thread(
schedule,
duration,
@ -154,7 +154,7 @@ fn pick_h264_encoder(fps: u32) -> Result<String> {
bail!("no usable H.264 encoder found for sync probe")
}
fn spawn_video_thread(
struct VideoThreadConfig {
src: gst_app::AppSrc,
sink: gst_app::AppSink,
camera: CameraConfig,
@ -163,7 +163,19 @@ fn spawn_video_thread(
probe_start: Instant,
running: Arc<AtomicBool>,
queue: FreshPacketQueue<VideoPacket>,
) -> JoinHandle<()> {
}
fn spawn_video_thread(config: VideoThreadConfig) -> JoinHandle<()> {
let VideoThreadConfig {
src,
sink,
camera,
schedule,
duration,
probe_start,
running,
queue,
} = config;
thread::spawn(move || {
let pts_rebaser = crate::live_capture_clock::DurationPacedSourcePtsRebaser::default();
let lag_cap = crate::live_capture_clock::upstream_source_lag_cap();

View File

@ -1,18 +1,19 @@
use super::{
SyncProbeCapture, build_dark_probe_frame, build_marker_probe_frame, build_regular_probe_frame,
AUDIO_SAMPLE_RATE, SyncProbeCapture, build_dark_probe_frame, build_marker_probe_frame,
build_regular_probe_frame,
};
use crate::input::camera::{CameraCodec, CameraConfig};
use crate::sync_probe::analyze::detect_audio_onsets;
use crate::sync_probe::schedule::PulseSchedule;
use lesavka_common::lesavka::{AudioPacket, VideoPacket};
use std::time::Duration;
use std::time::Instant;
#[cfg(not(coverage))]
use gstreamer as gst;
#[cfg(not(coverage))]
use gstreamer::prelude::*;
#[cfg(not(coverage))]
use gstreamer_app as gst_app;
use lesavka_common::lesavka::{AudioPacket, VideoPacket};
use std::time::Duration;
use std::time::Instant;
fn stub_camera() -> CameraConfig {
CameraConfig {
@ -180,9 +181,7 @@ fn decode_mjpeg_packet_mean_luma(packet: &VideoPacket) -> u8 {
src.push_buffer(buffer).expect("push buffer");
src.end_of_stream().expect("end of stream");
let sample = sink.pull_sample().expect("decoded sample");
pipeline
.set_state(gst::State::Null)
.expect("pipeline null");
pipeline.set_state(gst::State::Null).expect("pipeline null");
let buffer = sample.buffer().expect("sample buffer");
let map = buffer.map_readable().expect("buffer readable");
let bytes = map.as_slice();
@ -190,6 +189,7 @@ fn decode_mjpeg_packet_mean_luma(packet: &VideoPacket) -> u8 {
mean.min(u64::from(u8::MAX)) as u8
}
#[cfg(not(coverage))]
#[test]
fn probe_video_pts_are_lag_capped_like_audio() {
let rebaser = crate::live_capture_clock::DurationPacedSourcePtsRebaser::default();
@ -206,318 +206,4 @@ fn probe_video_pts_are_lag_capped_like_audio() {
}
#[cfg(not(coverage))]
#[tokio::test]
async fn runtime_audio_probe_emits_nontrivial_pcm_packets() {
let capture = SyncProbeCapture::new(
stub_camera(),
PulseSchedule::new(
Duration::from_secs(1),
Duration::from_millis(500),
Duration::from_millis(120),
4,
),
Duration::from_secs(3),
)
.expect("runtime capture");
let audio_queue = capture.audio_queue();
let mut packet_count = 0usize;
let mut total_bytes = 0usize;
let mut largest_packet = 0usize;
loop {
let next = audio_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
packet_count += 1;
total_bytes += packet.data.len();
largest_packet = largest_packet.max(packet.data.len());
}
assert!(
packet_count >= 120,
"expected the runtime probe to emit many PCM packets, got {packet_count}"
);
assert!(
total_bytes >= 200_000,
"expected the runtime probe to emit a meaningful PCM payload, got {total_bytes} bytes"
);
assert!(
largest_packet >= 1_000,
"expected at least one non-trivial PCM packet, largest was {largest_packet} bytes"
);
}
#[cfg(not(coverage))]
#[tokio::test]
async fn runtime_audio_probe_decodes_detectable_click_onsets() {
let schedule = PulseSchedule::new(
Duration::from_secs(1),
Duration::from_millis(500),
Duration::from_millis(120),
4,
);
let capture = SyncProbeCapture::new(stub_camera(), schedule.clone(), Duration::from_secs(3))
.expect("runtime capture");
let audio_queue = capture.audio_queue();
let mut pcm = Vec::new();
loop {
let next = audio_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
pcm.extend_from_slice(&packet.data);
}
assert!(
pcm.len() >= 200_000,
"expected the runtime probe PCM stream to carry a meaningful payload, got {} bytes",
pcm.len()
);
let decoded = decode_interleaved_pcm_to_mono_samples(&pcm);
let onsets =
detect_audio_onsets(&decoded, super::AUDIO_SAMPLE_RATE as u32, 5).expect("audio onsets");
assert!(
onsets.len() >= 4,
"expected at least four decoded click onsets, got {onsets:?}"
);
let expected = [1.0, 1.5, 2.0, 2.5];
for (actual, expected) in onsets.iter().zip(expected) {
assert!(
(*actual - expected).abs() <= 0.08,
"expected onset near {expected:.3}s, got {actual:.3}s"
);
}
}
#[cfg(not(coverage))]
#[tokio::test]
async fn runtime_audio_probe_decodes_detectable_click_onsets_for_manual_harness_timing() {
let schedule = PulseSchedule::new(
Duration::from_secs(4),
Duration::from_secs(1),
Duration::from_millis(120),
5,
);
let capture = SyncProbeCapture::new(stub_camera(), schedule.clone(), Duration::from_secs(10))
.expect("runtime capture");
let audio_queue = capture.audio_queue();
let mut pcm = Vec::new();
loop {
let next = audio_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
pcm.extend_from_slice(&packet.data);
}
let decoded = decode_interleaved_pcm_to_mono_samples(&pcm);
let onsets =
detect_audio_onsets(&decoded, super::AUDIO_SAMPLE_RATE as u32, 5).expect("audio onsets");
assert!(
onsets.len() >= 6,
"expected at least six decoded click onsets, got {onsets:?}"
);
let expected = [4.0, 5.0, 6.0, 7.0, 8.0, 9.0];
for (actual, expected) in onsets.iter().zip(expected) {
assert!(
(*actual - expected).abs() <= 0.1,
"expected onset near {expected:.3}s, got {actual:.3}s"
);
}
}
#[cfg(not(coverage))]
#[tokio::test]
async fn runtime_probe_audio_and_video_pts_advance_near_real_time() {
let capture_duration = Duration::from_secs(3);
let capture = SyncProbeCapture::new(
stub_camera(),
PulseSchedule::new(
Duration::from_secs(1),
Duration::from_millis(500),
Duration::from_millis(120),
4,
),
capture_duration,
)
.expect("runtime capture");
let video_queue = capture.video_queue();
let audio_queue = capture.audio_queue();
let started = Instant::now();
let video_task = tokio::spawn(async move {
let mut first_pts = None;
let mut last_pts = None;
let mut count = 0usize;
loop {
let next = video_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
first_pts.get_or_insert(packet.pts);
last_pts = Some(packet.pts);
count = count.saturating_add(1);
}
(first_pts, last_pts, count)
});
let audio_task = tokio::spawn(async move {
let mut first_pts = None;
let mut last_pts = None;
let mut count = 0usize;
loop {
let next = audio_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
first_pts.get_or_insert(packet.pts);
last_pts = Some(packet.pts);
count = count.saturating_add(1);
}
(first_pts, last_pts, count)
});
let (video_first, video_last, video_count) = video_task.await.expect("video drain");
let (audio_first, audio_last, audio_count) = audio_task.await.expect("audio drain");
let wall_elapsed = started.elapsed();
let video_span = video_last.expect("video last pts") - video_first.expect("video first pts");
let audio_span = audio_last.expect("audio last pts") - audio_first.expect("audio first pts");
eprintln!(
"runtime probe spans: video_count={video_count} video_span_us={video_span} audio_count={audio_count} audio_span_us={audio_span} wall_elapsed={wall_elapsed:?}"
);
assert!(
video_count >= 60,
"expected many runtime probe video packets, got {video_count}"
);
assert!(
audio_count >= 60,
"expected many runtime probe audio packets, got {audio_count}"
);
assert!(
wall_elapsed <= Duration::from_secs(5),
"runtime probe should not take excessively long locally, took {wall_elapsed:?}"
);
assert!(
video_span >= 2_400_000,
"video pts should span most of the 3s capture, got {} us",
video_span
);
assert!(
audio_span >= 2_400_000,
"audio pts should span most of the 3s capture, got {} us",
audio_span
);
assert!(
audio_span <= 3_400_000,
"audio pts should stay near the 3s capture duration, got {} us",
audio_span
);
}
#[cfg(not(coverage))]
#[tokio::test]
async fn runtime_probe_video_packets_change_across_a_pulse_boundary() {
let capture = SyncProbeCapture::new(
stub_camera(),
PulseSchedule::new(
Duration::from_secs(1),
Duration::from_secs(1),
Duration::from_millis(120),
4,
),
Duration::from_secs(2),
)
.expect("runtime capture");
let video_queue = capture.video_queue();
let mut dark_packet = None;
let mut pulse_packet = None;
loop {
let next = video_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
if dark_packet.is_none() && (200_000..800_000).contains(&packet.pts) {
dark_packet = Some(packet.clone());
}
if pulse_packet.is_none() && (1_000_000..1_120_000).contains(&packet.pts) {
pulse_packet = Some(packet.clone());
}
if dark_packet.is_some() && pulse_packet.is_some() {
break;
}
}
let dark_packet = dark_packet.expect("dark packet");
let pulse_packet = pulse_packet.expect("pulse packet");
assert_ne!(dark_packet.data, pulse_packet.data);
assert!(!dark_packet.data.is_empty());
assert!(!pulse_packet.data.is_empty());
let dark_mean = decode_mjpeg_packet_mean_luma(&dark_packet);
let pulse_mean = decode_mjpeg_packet_mean_luma(&pulse_packet);
assert!(
pulse_mean > dark_mean.saturating_add(100),
"expected decoded pulse frame to be much brighter than decoded dark frame, got dark={dark_mean} pulse={pulse_mean}"
);
}
#[cfg(not(coverage))]
#[tokio::test]
async fn runtime_probe_dark_video_packets_do_not_alternate_frame_to_frame() {
let capture = SyncProbeCapture::new(
CameraConfig {
codec: CameraCodec::Mjpeg,
width: 640,
height: 480,
fps: 20,
},
PulseSchedule::new(
Duration::from_secs(4),
Duration::from_secs(1),
Duration::from_millis(120),
5,
),
Duration::from_secs(3),
)
.expect("runtime capture");
let video_queue = capture.video_queue();
let mut dark_means = Vec::new();
loop {
let next = video_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
if packet.pts >= 1_000_000 {
break;
}
dark_means.push(decode_mjpeg_packet_mean_luma(&packet));
if dark_means.len() >= 8 {
break;
}
}
assert!(
dark_means.len() >= 4,
"expected several dark packets before the first pulse, got {dark_means:?}"
);
let min = *dark_means.iter().min().expect("dark min");
let max = *dark_means.iter().max().expect("dark max");
assert!(
max.saturating_sub(min) <= 2,
"expected consecutive dark MJPEG packets to stay visually stable, got {dark_means:?}"
);
}
mod runtime_packets;

View File

@ -0,0 +1,318 @@
use super::*;
#[cfg(not(coverage))]
#[tokio::test]
async fn runtime_audio_probe_emits_nontrivial_pcm_packets() {
let capture = SyncProbeCapture::new(
stub_camera(),
PulseSchedule::new(
Duration::from_secs(1),
Duration::from_millis(500),
Duration::from_millis(120),
4,
),
Duration::from_secs(3),
)
.expect("runtime capture");
let audio_queue = capture.audio_queue();
let mut packet_count = 0usize;
let mut total_bytes = 0usize;
let mut largest_packet = 0usize;
loop {
let next = audio_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
packet_count += 1;
total_bytes += packet.data.len();
largest_packet = largest_packet.max(packet.data.len());
}
assert!(
packet_count >= 120,
"expected the runtime probe to emit many PCM packets, got {packet_count}"
);
assert!(
total_bytes >= 200_000,
"expected the runtime probe to emit a meaningful PCM payload, got {total_bytes} bytes"
);
assert!(
largest_packet >= 1_000,
"expected at least one non-trivial PCM packet, largest was {largest_packet} bytes"
);
}
#[cfg(not(coverage))]
#[tokio::test]
async fn runtime_audio_probe_decodes_detectable_click_onsets() {
let schedule = PulseSchedule::new(
Duration::from_secs(1),
Duration::from_millis(500),
Duration::from_millis(120),
4,
);
let capture = SyncProbeCapture::new(stub_camera(), schedule.clone(), Duration::from_secs(3))
.expect("runtime capture");
let audio_queue = capture.audio_queue();
let mut pcm = Vec::new();
loop {
let next = audio_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
pcm.extend_from_slice(&packet.data);
}
assert!(
pcm.len() >= 200_000,
"expected the runtime probe PCM stream to carry a meaningful payload, got {} bytes",
pcm.len()
);
let decoded = decode_interleaved_pcm_to_mono_samples(&pcm);
let onsets =
detect_audio_onsets(&decoded, super::AUDIO_SAMPLE_RATE as u32, 5).expect("audio onsets");
assert!(
onsets.len() >= 4,
"expected at least four decoded click onsets, got {onsets:?}"
);
let expected = [1.0, 1.5, 2.0, 2.5];
for (actual, expected) in onsets.iter().zip(expected) {
assert!(
(*actual - expected).abs() <= 0.08,
"expected onset near {expected:.3}s, got {actual:.3}s"
);
}
}
#[cfg(not(coverage))]
#[tokio::test]
async fn runtime_audio_probe_decodes_detectable_click_onsets_for_manual_harness_timing() {
let schedule = PulseSchedule::new(
Duration::from_secs(4),
Duration::from_secs(1),
Duration::from_millis(120),
5,
);
let capture = SyncProbeCapture::new(stub_camera(), schedule.clone(), Duration::from_secs(10))
.expect("runtime capture");
let audio_queue = capture.audio_queue();
let mut pcm = Vec::new();
loop {
let next = audio_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
pcm.extend_from_slice(&packet.data);
}
let decoded = decode_interleaved_pcm_to_mono_samples(&pcm);
let onsets =
detect_audio_onsets(&decoded, super::AUDIO_SAMPLE_RATE as u32, 5).expect("audio onsets");
assert!(
onsets.len() >= 6,
"expected at least six decoded click onsets, got {onsets:?}"
);
let expected = [4.0, 5.0, 6.0, 7.0, 8.0, 9.0];
for (actual, expected) in onsets.iter().zip(expected) {
assert!(
(*actual - expected).abs() <= 0.1,
"expected onset near {expected:.3}s, got {actual:.3}s"
);
}
}
#[cfg(not(coverage))]
#[tokio::test]
async fn runtime_probe_audio_and_video_pts_advance_near_real_time() {
let capture_duration = Duration::from_secs(3);
let capture = SyncProbeCapture::new(
stub_camera(),
PulseSchedule::new(
Duration::from_secs(1),
Duration::from_millis(500),
Duration::from_millis(120),
4,
),
capture_duration,
)
.expect("runtime capture");
let video_queue = capture.video_queue();
let audio_queue = capture.audio_queue();
let started = Instant::now();
let video_task = tokio::spawn(async move {
let mut first_pts = None;
let mut last_pts = None;
let mut count = 0usize;
loop {
let next = video_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
first_pts.get_or_insert(packet.pts);
last_pts = Some(packet.pts);
count = count.saturating_add(1);
}
(first_pts, last_pts, count)
});
let audio_task = tokio::spawn(async move {
let mut first_pts = None;
let mut last_pts = None;
let mut count = 0usize;
loop {
let next = audio_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
first_pts.get_or_insert(packet.pts);
last_pts = Some(packet.pts);
count = count.saturating_add(1);
}
(first_pts, last_pts, count)
});
let (video_first, video_last, video_count) = video_task.await.expect("video drain");
let (audio_first, audio_last, audio_count) = audio_task.await.expect("audio drain");
let wall_elapsed = started.elapsed();
let video_span = video_last.expect("video last pts") - video_first.expect("video first pts");
let audio_span = audio_last.expect("audio last pts") - audio_first.expect("audio first pts");
eprintln!(
"runtime probe spans: video_count={video_count} video_span_us={video_span} audio_count={audio_count} audio_span_us={audio_span} wall_elapsed={wall_elapsed:?}"
);
assert!(
video_count >= 60,
"expected many runtime probe video packets, got {video_count}"
);
assert!(
audio_count >= 60,
"expected many runtime probe audio packets, got {audio_count}"
);
assert!(
wall_elapsed <= Duration::from_secs(5),
"runtime probe should not take excessively long locally, took {wall_elapsed:?}"
);
assert!(
video_span >= 2_400_000,
"video pts should span most of the 3s capture, got {} us",
video_span
);
assert!(
audio_span >= 2_400_000,
"audio pts should span most of the 3s capture, got {} us",
audio_span
);
assert!(
audio_span <= 3_400_000,
"audio pts should stay near the 3s capture duration, got {} us",
audio_span
);
}
#[cfg(not(coverage))]
#[tokio::test]
async fn runtime_probe_video_packets_change_across_a_pulse_boundary() {
let capture = SyncProbeCapture::new(
stub_camera(),
PulseSchedule::new(
Duration::from_secs(1),
Duration::from_secs(1),
Duration::from_millis(120),
4,
),
Duration::from_secs(2),
)
.expect("runtime capture");
let video_queue = capture.video_queue();
let mut dark_packet = None;
let mut pulse_packet = None;
loop {
let next = video_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
if dark_packet.is_none() && (200_000..800_000).contains(&packet.pts) {
dark_packet = Some(packet.clone());
}
if pulse_packet.is_none() && (1_000_000..1_120_000).contains(&packet.pts) {
pulse_packet = Some(packet.clone());
}
if dark_packet.is_some() && pulse_packet.is_some() {
break;
}
}
let dark_packet = dark_packet.expect("dark packet");
let pulse_packet = pulse_packet.expect("pulse packet");
assert_ne!(dark_packet.data, pulse_packet.data);
assert!(!dark_packet.data.is_empty());
assert!(!pulse_packet.data.is_empty());
let dark_mean = decode_mjpeg_packet_mean_luma(&dark_packet);
let pulse_mean = decode_mjpeg_packet_mean_luma(&pulse_packet);
assert!(
pulse_mean > dark_mean.saturating_add(100),
"expected decoded pulse frame to be much brighter than decoded dark frame, got dark={dark_mean} pulse={pulse_mean}"
);
}
#[cfg(not(coverage))]
#[tokio::test]
async fn runtime_probe_dark_video_packets_do_not_alternate_frame_to_frame() {
let capture = SyncProbeCapture::new(
CameraConfig {
codec: CameraCodec::Mjpeg,
width: 640,
height: 480,
fps: 20,
},
PulseSchedule::new(
Duration::from_secs(4),
Duration::from_secs(1),
Duration::from_millis(120),
5,
),
Duration::from_secs(3),
)
.expect("runtime capture");
let video_queue = capture.video_queue();
let mut dark_means = Vec::new();
loop {
let next = video_queue.pop_fresh().await;
let Some(packet) = next.packet else {
break;
};
if packet.pts >= 1_000_000 {
break;
}
dark_means.push(decode_mjpeg_packet_mean_luma(&packet));
if dark_means.len() >= 8 {
break;
}
}
assert!(
dark_means.len() >= 4,
"expected several dark packets before the first pulse, got {dark_means:?}"
);
let min = *dark_means.iter().min().expect("dark min");
let max = *dark_means.iter().max().expect("dark max");
assert!(
max.saturating_sub(min) <= 2,
"expected consecutive dark MJPEG packets to stay visually stable, got {dark_means:?}"
);
}

View File

@ -102,7 +102,7 @@ impl PulseSchedule {
}
let period_ns = self.pulse_period.as_nanos().max(1) as u64;
let warmup_ns = self.warmup.as_nanos() as u64;
let rounded = ((warmup_ns + period_ns - 1) / period_ns) * period_ns;
let rounded = warmup_ns.div_ceil(period_ns) * period_ns;
Duration::from_nanos(rounded)
}

View File

@ -1,6 +1,6 @@
[package]
name = "lesavka_common"
version = "0.14.48"
version = "0.15.0"
edition = "2024"
build = "build.rs"

View File

@ -256,3 +256,29 @@ Hardware-facing assumptions belong near the code that uses them; this file is th
| `LESAVKA_VIDEO_MAX_KBIT` | eye preview/video transport override |
| `LESAVKA_VIDEO_QUEUE` | eye preview/video transport override |
| `LESAVKA_VIEW_MODE` | launcher UI/runtime override |
| `LESAVKA_ALLOW_EXTERNAL_UVC_GADGET_CYCLE` | server hardware/device override |
| `LESAVKA_CAPTURE_READY__` | manual probe marker; not runtime operator config |
| `LESAVKA_FORCE_SOFT_CONNECT` | server hardware/device override |
| `LESAVKA_HDMI_QUEUE_BUFFERS` | server HDMI video latency override |
| `LESAVKA_INSTALL_CAM_OUTPUT` | install-time server camera output selection |
| `LESAVKA_KERNEL_SKIP_CPUINFO_PATCH` | kernel build/install override |
| `LESAVKA_LAUNCHER_MEASURE_EXIT` | launcher UI/runtime override |
| `LESAVKA_LAUNCHER_MEASURE_PATH` | launcher UI/runtime override |
| `LESAVKA_SERVER_CONNECT_HOST` | manual probe override |
| `LESAVKA_SERVER_ENV` | server/install environment file override |
| `LESAVKA_SERVER_LOG_PATH` | server logging path override |
| `LESAVKA_SYNC_PROBE_AUDIO_DUMP` | manual probe override |
| `LESAVKA_UAC_SANITY_DEV` | manual UAC sanity probe override |
| `LESAVKA_UAC_SANITY_FREQ` | manual UAC sanity probe override |
| `LESAVKA_UAC_SANITY_SECONDS` | manual UAC sanity probe override |
| `LESAVKA_UAC_SANITY_VOLUME` | manual UAC sanity probe override |
| `LESAVKA_UPLINK_TELEMETRY` | launcher/uplink telemetry path override |
| `LESAVKA_UPSTREAM_CAMERA_STARTUP_GRACE_MS` | upstream A/V timing override |
| `LESAVKA_UPSTREAM_REANCHOR_LATE_MS` | upstream A/V timing override |
| `LESAVKA_UPSTREAM_SOURCE_LAG_CAP_MS` | upstream A/V timing override |
| `LESAVKA_UVC_CONTROL_READ_ONLY` | UVC helper runtime override |
| `LESAVKA_UVC_FRAME_PATH` | UVC helper MJPEG frame spool path |
| `LESAVKA_UVC_LOCK_PATH` | UVC helper singleton lock path |
| `LESAVKA_UVC_MJPEG_IO_MODE` | UVC helper MJPEG streaming mode override |
| `LESAVKA_UVC_MJPEG_SPOOL` | UVC helper MJPEG spool toggle |
| `LESAVKA_UVC_SESSION_CLOCK_ALIGN` | UVC helper timing override |

View File

@ -3,7 +3,7 @@
"client/src/app.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 49
"loc": 48
},
"client/src/app/audio_recovery_config.rs": {
"clippy_warnings": 0,
@ -13,7 +13,7 @@
"client/src/app/downlink_media.rs": {
"clippy_warnings": 0,
"doc_debt": 3,
"loc": 193
"loc": 208
},
"client/src/app/input_streams.rs": {
"clippy_warnings": 0,
@ -23,12 +23,12 @@
"client/src/app/session_lifecycle.rs": {
"clippy_warnings": 0,
"doc_debt": 3,
"loc": 304
"loc": 324
},
"client/src/app/uplink_media.rs": {
"clippy_warnings": 0,
"doc_debt": 2,
"loc": 99
"loc": 224
},
"client/src/app_support.rs": {
"clippy_warnings": 0,
@ -40,6 +40,16 @@
"doc_debt": 6,
"loc": 304
},
"client/src/bin/lesavka-sync-analyze.rs": {
"clippy_warnings": 0,
"doc_debt": 2,
"loc": 125
},
"client/src/bin/lesavka-sync-probe.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 19
},
"client/src/handshake.rs": {
"clippy_warnings": 0,
"doc_debt": 4,
@ -48,7 +58,7 @@
"client/src/input/camera.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 61
"loc": 63
},
"client/src/input/camera/bus_and_encoder.rs": {
"clippy_warnings": 0,
@ -57,13 +67,13 @@
},
"client/src/input/camera/capture_pipeline.rs": {
"clippy_warnings": 0,
"doc_debt": 2,
"loc": 254
"doc_debt": 4,
"loc": 295
},
"client/src/input/camera/device_selection.rs": {
"clippy_warnings": 0,
"doc_debt": 2,
"loc": 100
"loc": 102
},
"client/src/input/camera/encoder_selection.rs": {
"clippy_warnings": 0,
@ -137,8 +147,8 @@
},
"client/src/input/microphone.rs": {
"clippy_warnings": 0,
"doc_debt": 13,
"loc": 398
"doc_debt": 12,
"loc": 413
},
"client/src/input/mod.rs": {
"clippy_warnings": 0,
@ -147,8 +157,13 @@
},
"client/src/input/mouse.rs": {
"clippy_warnings": 0,
"doc_debt": 8,
"loc": 317
"doc_debt": 13,
"loc": 411
},
"client/src/input/mouse_event_contract_tests.rs": {
"clippy_warnings": 0,
"doc_debt": 14,
"loc": 439
},
"client/src/launcher/clipboard.rs": {
"clippy_warnings": 0,
@ -162,13 +177,13 @@
},
"client/src/launcher/device_test/controller.rs": {
"clippy_warnings": 0,
"doc_debt": 17,
"loc": 398
"doc_debt": 21,
"loc": 439
},
"client/src/launcher/device_test/local_preview.rs": {
"clippy_warnings": 0,
"doc_debt": 11,
"loc": 320
"doc_debt": 12,
"loc": 361
},
"client/src/launcher/device_test/pipeline_helpers.rs": {
"clippy_warnings": 0,
@ -188,17 +203,17 @@
"client/src/launcher/diagnostics/diagnostics_models.rs": {
"clippy_warnings": 0,
"doc_debt": 1,
"loc": 164
"loc": 170
},
"client/src/launcher/diagnostics/recommendations.rs": {
"clippy_warnings": 0,
"doc_debt": 2,
"loc": 230
"loc": 277
},
"client/src/launcher/diagnostics/snapshot_report.rs": {
"clippy_warnings": 0,
"doc_debt": 2,
"loc": 410
"doc_debt": 3,
"loc": 465
},
"client/src/launcher/mod.rs": {
"clippy_warnings": 0,
@ -263,22 +278,22 @@
"client/src/launcher/ui.rs": {
"clippy_warnings": 0,
"doc_debt": 1,
"loc": 184
"loc": 182
},
"client/src/launcher/ui/activation_context.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 36
"loc": 37
},
"client/src/launcher/ui/activation_setup.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 168
"loc": 169
},
"client/src/launcher/ui/control_requests.rs": {
"clippy_warnings": 0,
"doc_debt": 3,
"loc": 166
"loc": 165
},
"client/src/launcher/ui/device_refresh_binding.rs": {
"clippy_warnings": 0,
@ -288,7 +303,7 @@
"client/src/launcher/ui/diagnostic_sampling.rs": {
"clippy_warnings": 0,
"doc_debt": 2,
"loc": 157
"loc": 161
},
"client/src/launcher/ui/eye_display_bindings.rs": {
"clippy_warnings": 0,
@ -298,12 +313,12 @@
"client/src/launcher/ui/local_test_bindings.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 90
"loc": 113
},
"client/src/launcher/ui/media_device_bindings.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 139
"loc": 167
},
"client/src/launcher/ui/message_and_network_state.rs": {
"clippy_warnings": 0,
@ -318,7 +333,7 @@
"client/src/launcher/ui/preview_profiles.rs": {
"clippy_warnings": 0,
"doc_debt": 9,
"loc": 221
"loc": 209
},
"client/src/launcher/ui/relay_input_bindings.rs": {
"clippy_warnings": 0,
@ -328,13 +343,23 @@
"client/src/launcher/ui/runtime_poll.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 371
"loc": 375
},
"client/src/launcher/ui/session_preview_coverage.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 7
},
"client/src/launcher/ui/stage_device_bindings.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 174
},
"client/src/launcher/ui/startup_window_guard.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 53
},
"client/src/launcher/ui/utility_button_bindings.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
@ -343,27 +368,27 @@
"client/src/launcher/ui_components.rs": {
"clippy_warnings": 0,
"doc_debt": 1,
"loc": 105
"loc": 110
},
"client/src/launcher/ui_components/assemble_view.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 180
"loc": 189
},
"client/src/launcher/ui_components/build_contexts.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 68
"loc": 73
},
"client/src/launcher/ui_components/build_device_controls.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 296
"loc": 394
},
"client/src/launcher/ui_components/build_operations_rail.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 224
"loc": 228
},
"client/src/launcher/ui_components/build_shell.rs": {
"clippy_warnings": 0,
@ -373,7 +398,7 @@
"client/src/launcher/ui_components/combo_helpers.rs": {
"clippy_warnings": 0,
"doc_debt": 11,
"loc": 265
"loc": 272
},
"client/src/launcher/ui_components/control_buttons.rs": {
"clippy_warnings": 0,
@ -382,8 +407,8 @@
},
"client/src/launcher/ui_components/display_pane.rs": {
"clippy_warnings": 0,
"doc_debt": 1,
"loc": 130
"doc_debt": 2,
"loc": 209
},
"client/src/launcher/ui_components/panel_chips.rs": {
"clippy_warnings": 0,
@ -398,12 +423,12 @@
"client/src/launcher/ui_components/style.rs": {
"clippy_warnings": 0,
"doc_debt": 2,
"loc": 163
"loc": 216
},
"client/src/launcher/ui_components/types.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 191
"loc": 201
},
"client/src/launcher/ui_runtime.rs": {
"clippy_warnings": 0,
@ -413,12 +438,12 @@
"client/src/launcher/ui_runtime/control_paths.rs": {
"clippy_warnings": 0,
"doc_debt": 8,
"loc": 238
"loc": 244
},
"client/src/launcher/ui_runtime/display_popouts.rs": {
"clippy_warnings": 0,
"doc_debt": 5,
"loc": 262
"loc": 270
},
"client/src/launcher/ui_runtime/log_filtering.rs": {
"clippy_warnings": 0,
@ -428,12 +453,12 @@
"client/src/launcher/ui_runtime/process_logs.rs": {
"clippy_warnings": 0,
"doc_debt": 5,
"loc": 213
"loc": 216
},
"client/src/launcher/ui_runtime/report_popouts.rs": {
"clippy_warnings": 0,
"doc_debt": 6,
"loc": 254
"loc": 256
},
"client/src/launcher/ui_runtime/status_details.rs": {
"clippy_warnings": 0,
@ -443,7 +468,7 @@
"client/src/launcher/ui_runtime/status_refresh.rs": {
"clippy_warnings": 0,
"doc_debt": 3,
"loc": 272
"loc": 285
},
"client/src/layout.rs": {
"clippy_warnings": 0,
@ -453,7 +478,12 @@
"client/src/lib.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 19
"loc": 24
},
"client/src/live_capture_clock.rs": {
"clippy_warnings": 0,
"doc_debt": 7,
"loc": 429
},
"client/src/main.rs": {
"clippy_warnings": 0,
@ -500,6 +530,101 @@
"doc_debt": 1,
"loc": 82
},
"client/src/sync_probe/analyze.rs": {
"clippy_warnings": 0,
"doc_debt": 1,
"loc": 87
},
"client/src/sync_probe/analyze/media_extract.rs": {
"clippy_warnings": 0,
"doc_debt": 12,
"loc": 300
},
"client/src/sync_probe/analyze/onset_detection.rs": {
"clippy_warnings": 0,
"doc_debt": 4,
"loc": 248
},
"client/src/sync_probe/analyze/onset_detection/correlation.rs": {
"clippy_warnings": 0,
"doc_debt": 9,
"loc": 426
},
"client/src/sync_probe/analyze/onset_detection/correlation_collapse.rs": {
"clippy_warnings": 0,
"doc_debt": 8,
"loc": 311
},
"client/src/sync_probe/analyze/onset_detection/tests.rs": {
"clippy_warnings": 0,
"doc_debt": 8,
"loc": 246
},
"client/src/sync_probe/analyze/report.rs": {
"clippy_warnings": 0,
"doc_debt": 5,
"loc": 217
},
"client/src/sync_probe/analyze/test_support.rs": {
"clippy_warnings": 0,
"doc_debt": 3,
"loc": 100
},
"client/src/sync_probe/capture.rs": {
"clippy_warnings": 0,
"doc_debt": 3,
"loc": 153
},
"client/src/sync_probe/capture/coverage_stub.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 34
},
"client/src/sync_probe/capture/runtime.rs": {
"clippy_warnings": 0,
"doc_debt": 7,
"loc": 309
},
"client/src/sync_probe/capture/tests.rs": {
"clippy_warnings": 0,
"doc_debt": 5,
"loc": 208
},
"client/src/sync_probe/config.rs": {
"clippy_warnings": 0,
"doc_debt": 3,
"loc": 214
},
"client/src/sync_probe/mod.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 15
},
"client/src/sync_probe/runner.rs": {
"clippy_warnings": 0,
"doc_debt": 3,
"loc": 222
},
"client/src/sync_probe/schedule.rs": {
"clippy_warnings": 0,
"doc_debt": 10,
"loc": 234
},
"client/src/uplink_fresh_queue.rs": {
"clippy_warnings": 0,
"doc_debt": 5,
"loc": 288
},
"client/src/uplink_latency_harness.rs": {
"clippy_warnings": 0,
"doc_debt": 5,
"loc": 270
},
"client/src/uplink_telemetry.rs": {
"clippy_warnings": 0,
"doc_debt": 4,
"loc": 301
},
"client/src/video_support.rs": {
"clippy_warnings": 0,
"doc_debt": 1,
@ -548,12 +673,12 @@
"server/src/audio/ear_capture.rs": {
"clippy_warnings": 0,
"doc_debt": 5,
"loc": 456
"loc": 460
},
"server/src/audio/voice_input.rs": {
"clippy_warnings": 0,
"doc_debt": 4,
"loc": 204
"doc_debt": 10,
"loc": 461
},
"server/src/bin/lesavka-uvc.rs": {
"clippy_warnings": 0,
@ -587,8 +712,13 @@
},
"server/src/camera.rs": {
"clippy_warnings": 0,
"doc_debt": 12,
"loc": 471
"doc_debt": 0,
"loc": 132
},
"server/src/camera/selection.rs": {
"clippy_warnings": 0,
"doc_debt": 13,
"loc": 383
},
"server/src/camera_runtime.rs": {
"clippy_warnings": 0,
@ -618,7 +748,7 @@
"server/src/gadget/cycle_control.rs": {
"clippy_warnings": 0,
"doc_debt": 2,
"loc": 168
"loc": 170
},
"server/src/gadget/driver_rebind.rs": {
"clippy_warnings": 0,
@ -628,12 +758,12 @@
"server/src/gadget/enumeration_recovery.rs": {
"clippy_warnings": 0,
"doc_debt": 4,
"loc": 137
"loc": 141
},
"server/src/gadget/sysfs_state.rs": {
"clippy_warnings": 0,
"doc_debt": 4,
"loc": 127
"doc_debt": 5,
"loc": 150
},
"server/src/handshake.rs": {
"clippy_warnings": 0,
@ -643,12 +773,12 @@
"server/src/lib.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 18
"loc": 20
},
"server/src/main.rs": {
"clippy_warnings": 0,
"doc_debt": 1,
"loc": 95
"loc": 96
},
"server/src/main/entrypoint.rs": {
"clippy_warnings": 0,
@ -668,17 +798,17 @@
"server/src/main/handler_startup.rs": {
"clippy_warnings": 0,
"doc_debt": 2,
"loc": 130
"loc": 136
},
"server/src/main/relay_service.rs": {
"clippy_warnings": 0,
"doc_debt": 4,
"loc": 242
"doc_debt": 6,
"loc": 490
},
"server/src/main/relay_service_coverage.rs": {
"clippy_warnings": 0,
"doc_debt": 4,
"loc": 138
"doc_debt": 5,
"loc": 281
},
"server/src/main/rpc_helpers.rs": {
"clippy_warnings": 0,
@ -690,6 +820,11 @@
"doc_debt": 3,
"loc": 66
},
"server/src/media_timing.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 72
},
"server/src/paste.rs": {
"clippy_warnings": 0,
"doc_debt": 4,
@ -703,18 +838,43 @@
"server/src/runtime_support/audio_discovery.rs": {
"clippy_warnings": 0,
"doc_debt": 10,
"loc": 279
"loc": 281
},
"server/src/runtime_support/hid_recovery.rs": {
"clippy_warnings": 0,
"doc_debt": 4,
"loc": 242
"doc_debt": 5,
"loc": 290
},
"server/src/runtime_support/hid_write.rs": {
"clippy_warnings": 0,
"doc_debt": 1,
"loc": 90
},
"server/src/upstream_media_runtime.rs": {
"clippy_warnings": 0,
"doc_debt": 4,
"loc": 495
},
"server/src/upstream_media_runtime/config.rs": {
"clippy_warnings": 0,
"doc_debt": 4,
"loc": 79
},
"server/src/upstream_media_runtime/state.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 20
},
"server/src/upstream_media_runtime/tests.rs": {
"clippy_warnings": 0,
"doc_debt": 1,
"loc": 13
},
"server/src/upstream_media_runtime/types.rs": {
"clippy_warnings": 0,
"doc_debt": 0,
"loc": 44
},
"server/src/uvc_control/model.rs": {
"clippy_warnings": 0,
"doc_debt": 10,
@ -727,8 +887,8 @@
},
"server/src/uvc_runtime.rs": {
"clippy_warnings": 0,
"doc_debt": 3,
"loc": 241
"doc_debt": 4,
"loc": 251
},
"server/src/video.rs": {
"clippy_warnings": 0,
@ -757,18 +917,18 @@
},
"server/src/video_sinks/hdmi_sink.rs": {
"clippy_warnings": 0,
"doc_debt": 8,
"loc": 354
"doc_debt": 7,
"loc": 428
},
"server/src/video_sinks/webcam_sink.rs": {
"clippy_warnings": 0,
"doc_debt": 2,
"loc": 199
"doc_debt": 8,
"loc": 374
},
"server/src/video_support.rs": {
"clippy_warnings": 0,
"doc_debt": 1,
"loc": 236
"loc": 263
},
"testing/src/lib.rs": {
"clippy_warnings": 0,

View File

@ -1,16 +1,4 @@
{
"client/src/sync_probe/analyze/media_extract.rs": {
"line_percent": 97.96,
"loc": 245
},
"client/src/sync_probe/analyze/onset_detection/correlation.rs": {
"line_percent": 99.29,
"loc": 282
},
"client/src/sync_probe/analyze/onset_detection/correlation_collapse.rs": {
"line_percent": 98.73,
"loc": 237
},
"files": {
"client/src/app/audio_recovery_config.rs": {
"line_percent": 100.0,
@ -29,7 +17,7 @@
"loc": 304
},
"client/src/bin/lesavka-sync-analyze.rs": {
"line_percent": 100.0,
"line_percent": 95.0,
"loc": 125
},
"client/src/bin/lesavka-sync-probe.rs": {
@ -42,19 +30,19 @@
},
"client/src/input/camera.rs": {
"line_percent": 100.0,
"loc": 62
"loc": 63
},
"client/src/input/camera/bus_and_encoder.rs": {
"line_percent": 100.0,
"loc": 69
},
"client/src/input/camera/capture_pipeline.rs": {
"line_percent": 97.55,
"loc": 285
"line_percent": 97.66,
"loc": 295
},
"client/src/input/camera/device_selection.rs": {
"line_percent": 97.67,
"loc": 101
"line_percent": 97.73,
"loc": 102
},
"client/src/input/camera/encoder_selection.rs": {
"line_percent": 100.0,
@ -106,11 +94,11 @@
},
"client/src/input/microphone.rs": {
"line_percent": 100.0,
"loc": 419
"loc": 413
},
"client/src/input/mouse.rs": {
"line_percent": 98.85,
"loc": 410
"loc": 411
},
"client/src/launcher/clipboard.rs": {
"line_percent": 100.0,
@ -161,8 +149,8 @@
"loc": 78
},
"client/src/live_capture_clock.rs": {
"line_percent": 100.0,
"loc": 203
"line_percent": 99.08,
"loc": 429
},
"client/src/main.rs": {
"line_percent": 100.0,
@ -197,16 +185,16 @@
"loc": 87
},
"client/src/sync_probe/analyze/media_extract.rs": {
"line_percent": 97.96,
"loc": 319
"line_percent": 97.81,
"loc": 300
},
"client/src/sync_probe/analyze/onset_detection.rs": {
"line_percent": 96.77,
"loc": 274
"line_percent": 100.0,
"loc": 248
},
"client/src/sync_probe/analyze/onset_detection/correlation.rs": {
"line_percent": 99.29,
"loc": 334
"line_percent": 98.04,
"loc": 426
},
"client/src/sync_probe/analyze/onset_detection/correlation_collapse.rs": {
"line_percent": 98.73,
@ -222,7 +210,7 @@
},
"client/src/sync_probe/capture.rs": {
"line_percent": 100.0,
"loc": 449
"loc": 153
},
"client/src/sync_probe/capture/coverage_stub.rs": {
"line_percent": 100.0,
@ -234,7 +222,7 @@
},
"client/src/sync_probe/runner.rs": {
"line_percent": 95.65,
"loc": 208
"loc": 222
},
"client/src/sync_probe/schedule.rs": {
"line_percent": 98.74,
@ -294,7 +282,7 @@
},
"server/src/audio/voice_input.rs": {
"line_percent": 100.0,
"loc": 469
"loc": 461
},
"server/src/bin/lesavka_uvc/control_payloads.rs": {
"line_percent": 100.0,
@ -305,8 +293,8 @@
"loc": 162
},
"server/src/bin/lesavka_uvc/coverage_startup.rs": {
"line_percent": 100.0,
"loc": 110
"line_percent": 98.99,
"loc": 128
},
"server/src/bin/lesavka_uvc/payload_limits.rs": {
"line_percent": 100.0,
@ -314,11 +302,11 @@
},
"server/src/camera.rs": {
"line_percent": 100.0,
"loc": 471
"loc": 132
},
"server/src/camera/selection.rs": {
"line_percent": 97.67,
"loc": 372
"line_percent": 97.83,
"loc": 383
},
"server/src/camera_runtime.rs": {
"line_percent": 95.52,
@ -334,19 +322,19 @@
},
"server/src/gadget/cycle_control.rs": {
"line_percent": 96.77,
"loc": 168
"loc": 170
},
"server/src/gadget/driver_rebind.rs": {
"line_percent": 100.0,
"loc": 64
},
"server/src/gadget/enumeration_recovery.rs": {
"line_percent": 95.96,
"loc": 137
"line_percent": 96.08,
"loc": 141
},
"server/src/gadget/sysfs_state.rs": {
"line_percent": 98.98,
"loc": 127
"line_percent": 96.58,
"loc": 150
},
"server/src/handshake.rs": {
"line_percent": 100.0,
@ -370,15 +358,15 @@
},
"server/src/main/handler_startup.rs": {
"line_percent": 100.0,
"loc": 131
"loc": 136
},
"server/src/main/relay_service.rs": {
"line_percent": 100.0,
"loc": 406
"loc": 499
},
"server/src/main/relay_service_coverage.rs": {
"line_percent": 95.21,
"loc": 262
"line_percent": 95.86,
"loc": 287
},
"server/src/main/rpc_helpers.rs": {
"line_percent": 100.0,
@ -402,23 +390,23 @@
},
"server/src/runtime_support/hid_recovery.rs": {
"line_percent": 100.0,
"loc": 242
"loc": 290
},
"server/src/runtime_support/hid_write.rs": {
"line_percent": 100.0,
"loc": 90
},
"server/src/upstream_media_runtime.rs": {
"line_percent": 96.8,
"loc": 454
"line_percent": 98.04,
"loc": 495
},
"server/src/upstream_media_runtime/config.rs": {
"line_percent": 100.0,
"loc": 53
"loc": 79
},
"server/src/uvc_runtime.rs": {
"line_percent": 98.48,
"loc": 241
"line_percent": 97.53,
"loc": 255
},
"server/src/video/eye_capture.rs": {
"line_percent": 100.0,
@ -437,8 +425,8 @@
"loc": 428
},
"server/src/video_sinks/webcam_sink.rs": {
"line_percent": 100.0,
"loc": 258
"line_percent": 97.3,
"loc": 374
},
"server/src/video_support.rs": {
"line_percent": 97.74,

View File

@ -25,6 +25,7 @@ PATCH_DIR=${LESAVKA_KERNEL_PATCH_DIR:-$SCRIPT_DIR}
PATCH_DWC2_FIFO=${LESAVKA_KERNEL_PATCH_DWC2_FIFO:-}
PATCH_UVC_BULK=${LESAVKA_KERNEL_PATCH_UVC_BULK:-}
PATCH_UVC_DEBUG=${LESAVKA_KERNEL_PATCH_UVC_DEBUG:-}
SKIP_CPUINFO_PATCH=${LESAVKA_KERNEL_SKIP_CPUINFO_PATCH:-}
if [[ -z $KERNEL_COMMIT ]]; then
KERNEL_COMMIT=$(git ls-remote "$KERNEL_REPO" "refs/heads/$KERNEL_BRANCH" | awk '{print $1}')
@ -66,6 +67,24 @@ sudo -u "$BUILD_USER" git clone --depth 1 "$PKGBUILD_REPO" "$BUILD_ROOT/PKGBUILD
cp -a "$BUILD_ROOT/PKGBUILDs/core/linux-rpi" "$BUILD_ROOT/linux-rpi"
chown -R "$BUILD_USER":"$BUILD_USER" "$BUILD_ROOT/linux-rpi"
if [[ -n $SKIP_CPUINFO_PATCH ]]; then
BUILD_ROOT="$BUILD_ROOT" python - <<'PY'
from pathlib import Path
import os
pkgbuild = Path(os.environ["BUILD_ROOT"]) / "linux-rpi" / "PKGBUILD"
text = pkgbuild.read_text()
needle = " patch -p1 -i ../0001-Make-proc-cpuinfo-consistent-on-arm64-and-arm.patch\n"
if needle in text:
text = text.replace(
needle,
" echo \"Skipping proc-cpuinfo compatibility patch\"\n",
1,
)
pkgbuild.write_text(text)
PY
fi
sudo -u "$BUILD_USER" bash -c "
set -euo pipefail
cd '$BUILD_ROOT/linux-rpi'

View File

@ -1,5 +1,6 @@
#!/usr/bin/env bash
# scripts/manual/run_local_audio_sanity.sh
# Manual: local speaker-to-mic sanity probe; not part of CI.
#
# Play a real Lesavka-style speaker tone, record the real local microphone
# path, and fail if the recorded signal does not show strong energy at the

1
scripts/manual/run_uac_output_sanity.sh Normal file → Executable file
View File

@ -1,5 +1,6 @@
#!/usr/bin/env bash
# scripts/manual/run_uac_output_sanity.sh - play a short tone into the Theia UAC sink
# Manual: Theia UAC output sanity probe; not part of CI.
set -euo pipefail
if [[ ${EUID:-$(id -u)} -ne 0 ]]; then

View File

@ -1,5 +1,6 @@
#!/usr/bin/env bash
# scripts/manual/run_upstream_av_sync.sh
# Manual: upstream A/V sync hardware probe; not part of CI.
#
# Manual: capture the real Tethys webcam/mic endpoints while the shared-clock
# sync probe streams upstream media through Lesavka, then analyze the skew.

View File

@ -1,5 +1,6 @@
#!/usr/bin/env bash
# scripts/manual/run_upstream_browser_av_sync.sh
# Manual: browser consumer A/V sync hardware probe; not part of CI.
#
# Drive a real browser consumer on Tethys, record the combined MediaStream,
# pull the capture back, and analyze it with the Lesavka sync analyzer.

View File

@ -10,7 +10,7 @@ bench = false
[package]
name = "lesavka_server"
version = "0.14.48"
version = "0.15.0"
edition = "2024"
autobins = false

View File

@ -9,12 +9,12 @@ mod voice_caps_tests {
use super::voice_input_caps;
#[test]
fn voice_input_caps_describe_aac_adts_stereo_48k() {
fn voice_input_caps_describe_s16le_stereo_48k() {
let _ = super::gst::init();
let caps = voice_input_caps().to_string();
assert!(caps.contains("audio/mpeg"));
assert!(caps.contains("mpegversion=(int)4"));
assert!(caps.contains("stream-format=(string)adts"));
assert!(caps.contains("audio/x-raw"));
assert!(caps.contains("format=(string)S16LE"));
assert!(caps.contains("layout=(string)interleaved"));
assert!(caps.contains("rate=(int)48000"));
assert!(caps.contains("channels=(int)2"));
}

View File

@ -421,10 +421,11 @@ impl UvcVideoStream {
}
fn refresh_latest_frame(&mut self) {
if let Ok(frame) = std::fs::read(&self.frame_path) {
if !frame.is_empty() && frame.len() <= MAX_MJPEG_FRAME_BYTES {
self.latest_frame = frame;
}
if let Ok(frame) = std::fs::read(&self.frame_path)
&& !frame.is_empty()
&& frame.len() <= MAX_MJPEG_FRAME_BYTES
{
self.latest_frame = frame;
}
}
}
@ -811,7 +812,7 @@ fn uvc_control_read_only() -> bool {
|| trimmed.eq_ignore_ascii_case("no")
|| trimmed.eq_ignore_ascii_case("off"))
})
.unwrap_or(false)
.unwrap_or(true)
}
fn acquire_singleton_lock() -> Result<File> {
@ -821,6 +822,7 @@ fn acquire_singleton_lock() -> Result<File> {
.read(true)
.write(true)
.create(true)
.truncate(false)
.open(&path)
.with_context(|| format!("open singleton lock {path}"))?;
let rc = unsafe { libc::flock(file.as_raw_fd(), libc::LOCK_EX | libc::LOCK_NB) };

View File

@ -101,10 +101,28 @@ fn read_interface(path: &str) -> Option<u8> {
#[cfg(coverage)]
fn open_with_retry(path: &str) -> Result<std::fs::File> {
let read_only = uvc_control_read_only();
let mut opts = OpenOptions::new();
opts.read(true).write(true);
opts.read(true);
if !read_only {
opts.write(true);
}
if env::var("LESAVKA_UVC_BLOCKING").is_err() {
opts.custom_flags(libc::O_NONBLOCK);
}
opts.open(path).with_context(|| format!("open {path}"))
}
#[cfg(coverage)]
fn uvc_control_read_only() -> bool {
env::var("LESAVKA_UVC_CONTROL_READ_ONLY")
.ok()
.map(|value| {
let trimmed = value.trim();
!(trimmed.eq_ignore_ascii_case("0")
|| trimmed.eq_ignore_ascii_case("false")
|| trimmed.eq_ignore_ascii_case("no")
|| trimmed.eq_ignore_ascii_case("off"))
})
.unwrap_or(true)
}

View File

@ -81,12 +81,10 @@ fn select_hdmi_codec(hw_decode: bool) -> CameraCodec {
.ok()
.as_deref()
.and_then(parse_camera_codec)
.unwrap_or_else(|| {
if hw_decode {
CameraCodec::H264
} else {
CameraCodec::Mjpeg
}
.unwrap_or(if hw_decode {
CameraCodec::H264
} else {
CameraCodec::Mjpeg
})
}

View File

@ -318,6 +318,15 @@ impl Relay for Handler {
};
let plan = match upstream_media_rt.plan_video_pts(pkt.pts, frame_step_us) {
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::AwaitingPair => {
if inbound_closed {
tracing::debug!(
rpc_id,
session_id,
pts = pkt.pts,
"🎥 dropping trailing upstream video frame because no paired audio arrived before stream close"
);
continue;
}
pending.push_front(pkt);
continue;
}
@ -459,6 +468,7 @@ fn remote_audio_status(message: String) -> Status {
}
#[cfg(test)]
#[allow(clippy::items_after_test_module)]
mod tests {
use super::retain_freshest_video_packet;
use lesavka_common::lesavka::VideoPacket;

View File

@ -128,6 +128,9 @@ impl Relay for Handler {
};
let plan = match upstream_media_rt.plan_audio_pts(pkt.pts) {
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::AwaitingPair => {
if inbound_closed {
continue;
}
pending.push_front(pkt);
continue;
}
@ -204,6 +207,9 @@ impl Relay for Handler {
};
let plan = match upstream_media_rt.plan_video_pts(pkt.pts, frame_step_us) {
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::AwaitingPair => {
if inbound_closed {
continue;
}
pending.push_front(pkt);
continue;
}

View File

@ -9,66 +9,17 @@ use tracing::info;
mod config;
mod state;
mod types;
use config::{
apply_playout_offset, upstream_pairing_master_slack, upstream_playout_delay,
upstream_playout_offset_us, upstream_reanchor_late_threshold, upstream_timing_trace_enabled,
apply_playout_offset, upstream_camera_startup_grace_us, upstream_pairing_master_slack,
upstream_playout_delay, upstream_playout_offset_us, upstream_reanchor_late_threshold,
upstream_reanchor_window_us, upstream_timing_trace_enabled,
};
use state::UpstreamClockState;
fn upstream_camera_startup_grace_us() -> u64 {
std::env::var("LESAVKA_UPSTREAM_CAMERA_STARTUP_GRACE_MS")
.ok()
.and_then(|value| value.trim().parse::<u64>().ok())
.unwrap_or(if cfg!(test) { 0 } else { 250 })
.saturating_mul(1_000)
}
fn upstream_reanchor_window_us(playout_delay: Duration) -> u64 {
playout_delay.as_micros().min(u64::MAX as u128) as u64
}
/// Logical upstream media kinds that share one live-call session timeline.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum UpstreamMediaKind {
/// Webcam uplink frames destined for the UVC/HDMI sink path.
Camera,
/// Microphone uplink packets destined for the UAC sink path.
Microphone,
}
/// Lease returned when one upstream media stream becomes the active owner.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct UpstreamStreamLease {
/// Shared session id for the current upstream live-call window.
pub session_id: u64,
/// Per-kind generation used to supersede older streams of the same kind.
pub generation: u64,
}
/// One rebased upstream packet plus its planned server playout time.
#[derive(Clone, Copy, Debug)]
pub struct PlannedUpstreamPacket {
/// Session-local packet timestamp after rebase onto the shared server clock.
pub local_pts_us: u64,
/// Wall-clock deadline when the server should present this packet.
pub due_at: Instant,
/// How late the packet already is when planned, if any.
pub late_by: Duration,
}
/// Result of asking the shared upstream runtime how to handle one packet.
#[derive(Clone, Copy, Debug)]
pub enum UpstreamPlanDecision {
/// Hold the packet inside the local stream queue until the pairing window
/// has enough cross-stream context to assign a trustworthy playout time.
AwaitingPair,
/// Discard the packet because it belongs before the shared overlapping A/V
/// session base and would only reintroduce startup skew.
DropBeforeOverlap,
/// Present the packet at the planned wall-clock deadline.
Play(PlannedUpstreamPacket),
}
pub use types::{
PlannedUpstreamPacket, UpstreamMediaKind, UpstreamPlanDecision, UpstreamStreamLease,
};
/// Coordinate upstream stream ownership and keep audio/video on one timeline.
///
@ -534,5 +485,11 @@ impl UpstreamMediaRuntime {
}
}
impl Default for UpstreamMediaRuntime {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests;

View File

@ -57,6 +57,18 @@ pub(super) fn upstream_reanchor_late_threshold(playout_delay: Duration) -> Durat
Duration::from_millis(default_ms)
}
pub(super) fn upstream_camera_startup_grace_us() -> u64 {
std::env::var("LESAVKA_UPSTREAM_CAMERA_STARTUP_GRACE_MS")
.ok()
.and_then(|value| value.trim().parse::<u64>().ok())
.unwrap_or(if cfg!(test) { 0 } else { 250 })
.saturating_mul(1_000)
}
pub(super) fn upstream_reanchor_window_us(playout_delay: Duration) -> u64 {
playout_delay.as_micros().min(u64::MAX as u128) as u64
}
pub(super) fn apply_playout_offset(base: Instant, offset_us: i64) -> Instant {
if offset_us >= 0 {
base + Duration::from_micros(offset_us as u64)

View File

@ -1,614 +1,13 @@
use super::{PlannedUpstreamPacket, UpstreamMediaKind, UpstreamMediaRuntime};
use std::sync::Arc;
use std::time::Duration;
use super::*;
fn play(decision: super::UpstreamPlanDecision) -> PlannedUpstreamPacket {
fn play(decision: UpstreamPlanDecision) -> PlannedUpstreamPacket {
match decision {
super::UpstreamPlanDecision::Play(plan) => plan,
UpstreamPlanDecision::Play(plan) => plan,
other => panic!("expected playable packet, got {other:?}"),
}
}
#[test]
fn first_stream_starts_a_new_shared_session() {
let runtime = UpstreamMediaRuntime::new();
let camera = runtime.activate_camera();
let microphone = runtime.activate_microphone();
assert_eq!(camera.session_id, 1);
assert_eq!(microphone.session_id, 1);
assert!(runtime.is_camera_active(camera.generation));
assert!(runtime.is_microphone_active(microphone.generation));
}
#[test]
fn replacing_one_kind_keeps_the_session_but_preempts_the_old_owner() {
let runtime = UpstreamMediaRuntime::new();
let first = runtime.activate_microphone();
let second = runtime.activate_microphone();
assert_eq!(first.session_id, second.session_id);
assert!(!runtime.is_microphone_active(first.generation));
assert!(runtime.is_microphone_active(second.generation));
}
#[test]
fn closing_the_last_stream_resets_the_next_session_anchor() {
let runtime = UpstreamMediaRuntime::new();
let camera = runtime.activate_camera();
let microphone = runtime.activate_microphone();
runtime.close_camera(camera.generation);
runtime.close_microphone(microphone.generation);
let next = runtime.activate_camera();
assert_eq!(next.session_id, 2);
}
#[test]
fn first_packets_wait_for_the_counterpart_before_pairing() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let audio_first = play(runtime.plan_audio_pts(1_000_000));
let video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
assert_eq!(audio_first.local_pts_us, 0);
assert_eq!(video_first.local_pts_us, 0);
assert_eq!(audio_first.due_at, video_first.due_at);
}
#[test]
fn overlap_waits_for_camera_startup_grace_before_establishing_the_shared_base() {
temp_env::with_var("LESAVKA_UPSTREAM_CAMERA_STARTUP_GRACE_MS", Some("250"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
assert!(matches!(
runtime.plan_audio_pts(1_000_000),
super::UpstreamPlanDecision::AwaitingPair
));
assert!(matches!(
runtime.plan_video_pts(1_200_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let video_ready = play(runtime.plan_video_pts(1_250_000, 16_666));
let audio_ready = play(runtime.plan_audio_pts(1_260_000));
assert_eq!(video_ready.local_pts_us, 0);
assert_eq!(audio_ready.local_pts_us, 10_000);
});
}
#[test]
fn pairing_window_does_not_expire_into_one_sided_playout_while_camera_warms_up() {
temp_env::with_var("LESAVKA_UPSTREAM_CAMERA_STARTUP_GRACE_MS", Some("250"), || {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
assert!(matches!(
runtime.plan_audio_pts(1_000_000),
super::UpstreamPlanDecision::AwaitingPair
));
std::thread::sleep(Duration::from_millis(30));
assert!(matches!(
runtime.plan_audio_pts(1_010_000),
super::UpstreamPlanDecision::AwaitingPair
));
let video_ready = play(runtime.plan_video_pts(1_250_000, 16_666));
let audio_ready = play(runtime.plan_audio_pts(1_260_000));
assert_eq!(video_ready.local_pts_us, 0);
assert_eq!(audio_ready.local_pts_us, 10_000);
});
});
}
#[test]
fn overlap_pairing_drops_leading_packets_before_the_shared_base() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_audio_pts(1_000_000),
super::UpstreamPlanDecision::AwaitingPair
));
let video_first = play(runtime.plan_video_pts(1_300_000, 16_666));
assert_eq!(video_first.local_pts_us, 0);
assert!(matches!(
runtime.plan_audio_pts(1_000_000),
super::UpstreamPlanDecision::DropBeforeOverlap
));
let audio_next = play(runtime.plan_audio_pts(1_310_000));
let video_next = play(runtime.plan_video_pts(1_333_333, 16_666));
assert_eq!(audio_next.local_pts_us, 10_000);
assert_eq!(video_next.local_pts_us, 33_333);
}
#[test]
fn shared_clock_keeps_each_kind_monotonic_when_remote_pts_repeat() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(50_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio = play(runtime.plan_audio_pts(50_000));
let first = play(runtime.plan_video_pts(50_000, 16_666));
let repeated = play(runtime.plan_video_pts(50_000, 16_666));
assert_eq!(first.local_pts_us, 0);
assert_eq!(repeated.local_pts_us, 16_666);
}
#[test]
fn close_ignores_superseded_generation_values() {
let runtime = UpstreamMediaRuntime::new();
let first = runtime.activate_camera();
let second = runtime.activate_camera();
runtime.close_camera(first.generation);
assert!(runtime.is_camera_active(second.generation));
runtime.close(super::UpstreamMediaKind::Camera, second.generation);
let next = runtime.activate_camera();
assert_eq!(next.session_id, 2);
}
#[test]
fn upstream_playout_delay_defaults_to_one_second_and_accepts_overrides() {
temp_env::with_var_unset("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", || {
assert_eq!(super::upstream_playout_delay(), Duration::from_secs(1));
});
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("250"), || {
assert_eq!(super::upstream_playout_delay(), Duration::from_millis(250));
});
}
#[test]
fn upstream_playout_offsets_default_to_zero_and_accept_overrides() {
temp_env::with_var_unset("LESAVKA_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US", || {
temp_env::with_var_unset("LESAVKA_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US", || {
assert_eq!(
super::upstream_playout_offset_us(UpstreamMediaKind::Microphone),
0
);
assert_eq!(
super::upstream_playout_offset_us(UpstreamMediaKind::Camera),
0
);
});
});
temp_env::with_var(
"LESAVKA_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US",
Some("-20000"),
|| {
temp_env::with_var(
"LESAVKA_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US",
Some("35000"),
|| {
assert_eq!(
super::upstream_playout_offset_us(UpstreamMediaKind::Microphone),
-20_000
);
assert_eq!(
super::upstream_playout_offset_us(UpstreamMediaKind::Camera),
35_000
);
},
);
},
);
}
#[test]
fn upstream_pairing_master_slack_defaults_to_twenty_ms_and_accepts_overrides() {
temp_env::with_var_unset("LESAVKA_UPSTREAM_PAIR_SLACK_US", || {
assert_eq!(
super::upstream_pairing_master_slack(),
Duration::from_micros(20_000)
);
});
temp_env::with_var("LESAVKA_UPSTREAM_PAIR_SLACK_US", Some("5000"), || {
assert_eq!(
super::upstream_pairing_master_slack(),
Duration::from_micros(5_000)
);
});
}
#[test]
fn upstream_reanchor_late_threshold_defaults_to_half_the_buffer_and_accepts_overrides() {
temp_env::with_var_unset("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", || {
assert_eq!(
super::upstream_reanchor_late_threshold(Duration::from_secs(1)),
Duration::from_millis(500)
);
assert_eq!(
super::upstream_reanchor_late_threshold(Duration::from_millis(100)),
Duration::from_millis(250)
);
});
temp_env::with_var("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", Some("42"), || {
assert_eq!(
super::upstream_reanchor_late_threshold(Duration::from_secs(1)),
Duration::from_millis(42)
);
});
}
#[test]
fn upstream_timing_trace_flag_accepts_false_values() {
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("off"), || {
assert!(!super::upstream_timing_trace_enabled());
});
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("false"), || {
assert!(!super::upstream_timing_trace_enabled());
});
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("1"), || {
assert!(super::upstream_timing_trace_enabled());
});
}
#[test]
fn apply_playout_offset_supports_negative_offsets() {
let base = tokio::time::Instant::now() + Duration::from_millis(50);
let shifted = super::apply_playout_offset(base, -20_000);
let delta = base.saturating_duration_since(shifted);
assert_eq!(delta, Duration::from_micros(20_000));
}
#[test]
fn apply_playout_offset_supports_positive_offsets() {
let base = tokio::time::Instant::now();
let shifted = super::apply_playout_offset(base, 30_000);
let delta = shifted.saturating_duration_since(base);
assert_eq!(delta, Duration::from_micros(30_000));
}
#[test]
fn shared_playout_epoch_is_reused_across_audio_and_video() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let audio_first = play(runtime.plan_audio_pts(1_000_000));
let video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
let audio_next = play(runtime.plan_audio_pts(1_010_000));
assert_eq!(video_first.local_pts_us, 0);
assert_eq!(audio_first.local_pts_us, 0);
assert_eq!(video_first.due_at, audio_first.due_at);
assert_eq!(
audio_next
.due_at
.saturating_duration_since(audio_first.due_at),
Duration::from_micros(10_000)
);
}
#[test]
fn pairing_window_can_expire_into_one_sided_playout() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let first = play(runtime.plan_video_pts(1_000_000, 16_666));
let second = play(runtime.plan_video_pts(1_016_666, 16_666));
assert_eq!(first.local_pts_us, 0);
assert_eq!(second.local_pts_us, 16_666);
});
}
#[test]
fn map_wrappers_hide_unpaired_and_pre_overlap_packets() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert_eq!(runtime.map_video_pts(1_000_000, 16_666), None);
assert_eq!(runtime.map_audio_pts(1_000_000), Some(0));
assert_eq!(runtime.map_audio_pts(999_999), None);
}
#[test]
fn shared_playout_trace_path_keeps_planned_pts_stable() {
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("1"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let audio = play(runtime.plan_audio_pts(1_000_000));
let video = play(runtime.plan_video_pts(1_000_000, 16_666));
assert_eq!(video.local_pts_us, 0);
assert_eq!(audio.local_pts_us, 0);
});
}
#[test]
fn catastrophic_lateness_reanchors_the_shared_playout_epoch() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
temp_env::with_var("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", Some("5"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let _video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
std::thread::sleep(Duration::from_millis(30));
let recovered_audio = play(runtime.plan_audio_pts(1_000_000));
assert!(
recovered_audio.due_at > tokio::time::Instant::now(),
"recovered packet should be scheduled back into the future"
);
assert!(
recovered_audio.late_by <= Duration::from_millis(1),
"recovered packet should no longer be catastrophically late"
);
let recovered_video = play(runtime.plan_video_pts(1_016_666, 16_666));
assert!(
recovered_video.due_at > tokio::time::Instant::now(),
"shared epoch recovery should also move video back into the future"
);
});
});
}
#[test]
fn overlap_anchor_gets_a_fresh_playout_budget_when_pairing_finishes_late() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
std::thread::sleep(Duration::from_millis(15));
let before_pair = tokio::time::Instant::now();
let audio_first = play(runtime.plan_audio_pts(1_000_000));
let video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
assert!(
audio_first.due_at.saturating_duration_since(before_pair) >= Duration::from_millis(15),
"audio should keep most of the configured playout budget after late pairing"
);
assert!(
video_first.due_at.saturating_duration_since(before_pair) >= Duration::from_millis(15),
"video should keep most of the configured playout budget after late pairing"
);
});
}
#[test]
fn catastrophic_lateness_reanchors_only_once_per_session() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
temp_env::with_var("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", Some("5"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let _video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
std::thread::sleep(Duration::from_millis(30));
let first_recovered = play(runtime.plan_audio_pts(1_000_000));
assert!(first_recovered.due_at > tokio::time::Instant::now());
std::thread::sleep(Duration::from_millis(30));
let second_late = play(runtime.plan_audio_pts(1_000_001));
assert!(
second_late.late_by > Duration::from_millis(5),
"session should not keep extending itself with repeated reanchors"
);
});
});
}
#[test]
fn catastrophic_lateness_does_not_reanchor_once_the_session_is_well_past_startup() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
temp_env::with_var("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", Some("5"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let _video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
std::thread::sleep(Duration::from_millis(130));
let late_audio = play(runtime.plan_audio_pts(1_100_000));
assert_eq!(late_audio.local_pts_us, 100_000);
assert!(
late_audio.late_by > Duration::from_millis(5),
"late packet should remain late instead of reanchoring the shared epoch mid-session"
);
assert!(
late_audio.due_at <= tokio::time::Instant::now(),
"mid-session lateness should no longer push due_at back into the future"
);
});
});
}
#[tokio::test(flavor = "current_thread")]
async fn wait_for_audio_master_releases_video_once_audio_catches_up() {
let runtime = Arc::new(UpstreamMediaRuntime::new());
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
let waiter = tokio::spawn({
let runtime = runtime.clone();
async move {
runtime
.wait_for_audio_master(video_first.local_pts_us + 10_000, video_first.due_at)
.await
}
});
tokio::time::sleep(Duration::from_millis(5)).await;
let _audio_next = play(runtime.plan_audio_pts(1_010_000));
assert!(waiter.await.expect("audio master waiter should finish"));
}
#[tokio::test(flavor = "current_thread")]
async fn wait_for_audio_master_times_out_when_audio_never_catches_up() {
let runtime = Arc::new(UpstreamMediaRuntime::new());
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
let due_at = tokio::time::Instant::now() + Duration::from_millis(20);
assert!(
!runtime
.wait_for_audio_master(video_first.local_pts_us + 100_000, due_at)
.await
);
}
#[tokio::test(flavor = "current_thread")]
async fn wait_for_audio_master_returns_true_when_no_microphone_stream_is_active() {
let runtime = Arc::new(UpstreamMediaRuntime::new());
let camera = runtime.activate_camera();
let microphone = runtime.activate_microphone();
runtime.close_microphone(microphone.generation);
assert!(runtime.is_camera_active(camera.generation));
assert!(
runtime
.wait_for_audio_master(
123_456,
tokio::time::Instant::now() + Duration::from_millis(10)
)
.await
);
}
#[tokio::test(flavor = "current_thread")]
async fn new_microphone_owner_waits_for_the_previous_sink_to_release() {
let runtime = Arc::new(UpstreamMediaRuntime::new());
let first = runtime.activate_microphone();
let first_permit = runtime
.reserve_microphone_sink(first.generation)
.await
.expect("first owner should acquire the sink gate");
let second = runtime.activate_microphone();
let waiter = tokio::spawn({
let runtime = runtime.clone();
async move {
runtime
.reserve_microphone_sink(second.generation)
.await
.is_some()
}
});
tokio::time::sleep(Duration::from_millis(25)).await;
assert!(!waiter.is_finished());
drop(first_permit);
assert!(waiter.await.expect("waiter task should finish"));
}
#[tokio::test(flavor = "current_thread")]
async fn superseded_microphone_waiter_stands_down_before_opening_a_sink() {
let runtime = Arc::new(UpstreamMediaRuntime::new());
let first = runtime.activate_microphone();
let first_permit = runtime
.reserve_microphone_sink(first.generation)
.await
.expect("first owner should acquire the sink gate");
let second = runtime.activate_microphone();
let superseded_waiter = tokio::spawn({
let runtime = runtime.clone();
async move {
runtime
.reserve_microphone_sink(second.generation)
.await
.is_some()
}
});
tokio::time::sleep(Duration::from_millis(25)).await;
let _third = runtime.activate_microphone();
drop(first_permit);
assert!(
!superseded_waiter
.await
.expect("superseded waiter task should finish"),
"older waiter should stand down instead of opening a sink after supersession"
);
}
mod async_wait;
mod config;
mod lifecycle;
mod planning;

View File

@ -0,0 +1,129 @@
use super::{UpstreamMediaRuntime, play};
use std::sync::Arc;
use std::time::Duration;
#[tokio::test(flavor = "current_thread")]
async fn wait_for_audio_master_releases_video_once_audio_catches_up() {
let runtime = Arc::new(UpstreamMediaRuntime::new());
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
let waiter = tokio::spawn({
let runtime = runtime.clone();
async move {
runtime
.wait_for_audio_master(video_first.local_pts_us + 10_000, video_first.due_at)
.await
}
});
tokio::time::sleep(Duration::from_millis(5)).await;
let _audio_next = play(runtime.plan_audio_pts(1_010_000));
assert!(waiter.await.expect("audio master waiter should finish"));
}
#[tokio::test(flavor = "current_thread")]
async fn wait_for_audio_master_times_out_when_audio_never_catches_up() {
let runtime = Arc::new(UpstreamMediaRuntime::new());
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
let due_at = tokio::time::Instant::now() + Duration::from_millis(20);
assert!(
!runtime
.wait_for_audio_master(video_first.local_pts_us + 100_000, due_at)
.await
);
}
#[tokio::test(flavor = "current_thread")]
async fn wait_for_audio_master_returns_true_when_no_microphone_stream_is_active() {
let runtime = Arc::new(UpstreamMediaRuntime::new());
let camera = runtime.activate_camera();
let microphone = runtime.activate_microphone();
runtime.close_microphone(microphone.generation);
assert!(runtime.is_camera_active(camera.generation));
assert!(
runtime
.wait_for_audio_master(
123_456,
tokio::time::Instant::now() + Duration::from_millis(10)
)
.await
);
}
#[tokio::test(flavor = "current_thread")]
async fn new_microphone_owner_waits_for_the_previous_sink_to_release() {
let runtime = Arc::new(UpstreamMediaRuntime::new());
let first = runtime.activate_microphone();
let first_permit = runtime
.reserve_microphone_sink(first.generation)
.await
.expect("first owner should acquire the sink gate");
let second = runtime.activate_microphone();
let waiter = tokio::spawn({
let runtime = runtime.clone();
async move {
runtime
.reserve_microphone_sink(second.generation)
.await
.is_some()
}
});
tokio::time::sleep(Duration::from_millis(25)).await;
assert!(!waiter.is_finished());
drop(first_permit);
assert!(waiter.await.expect("waiter task should finish"));
}
#[tokio::test(flavor = "current_thread")]
async fn superseded_microphone_waiter_stands_down_before_opening_a_sink() {
let runtime = Arc::new(UpstreamMediaRuntime::new());
let first = runtime.activate_microphone();
let first_permit = runtime
.reserve_microphone_sink(first.generation)
.await
.expect("first owner should acquire the sink gate");
let second = runtime.activate_microphone();
let superseded_waiter = tokio::spawn({
let runtime = runtime.clone();
async move {
runtime
.reserve_microphone_sink(second.generation)
.await
.is_some()
}
});
tokio::time::sleep(Duration::from_millis(25)).await;
let _third = runtime.activate_microphone();
drop(first_permit);
assert!(
!superseded_waiter
.await
.expect("superseded waiter task should finish"),
"older waiter should stand down instead of opening a sink after supersession"
);
}

View File

@ -0,0 +1,117 @@
use super::UpstreamMediaKind;
use std::time::Duration;
#[test]
fn upstream_playout_delay_defaults_to_one_second_and_accepts_overrides() {
temp_env::with_var_unset("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", || {
assert_eq!(super::upstream_playout_delay(), Duration::from_secs(1));
});
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("250"), || {
assert_eq!(super::upstream_playout_delay(), Duration::from_millis(250));
});
}
#[test]
fn upstream_playout_offsets_default_to_zero_and_accept_overrides() {
temp_env::with_var_unset("LESAVKA_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US", || {
temp_env::with_var_unset("LESAVKA_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US", || {
assert_eq!(
super::upstream_playout_offset_us(UpstreamMediaKind::Microphone),
0
);
assert_eq!(
super::upstream_playout_offset_us(UpstreamMediaKind::Camera),
0
);
});
});
temp_env::with_var(
"LESAVKA_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US",
Some("-20000"),
|| {
temp_env::with_var(
"LESAVKA_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US",
Some("35000"),
|| {
assert_eq!(
super::upstream_playout_offset_us(UpstreamMediaKind::Microphone),
-20_000
);
assert_eq!(
super::upstream_playout_offset_us(UpstreamMediaKind::Camera),
35_000
);
},
);
},
);
}
#[test]
fn upstream_pairing_master_slack_defaults_to_twenty_ms_and_accepts_overrides() {
temp_env::with_var_unset("LESAVKA_UPSTREAM_PAIR_SLACK_US", || {
assert_eq!(
super::upstream_pairing_master_slack(),
Duration::from_micros(20_000)
);
});
temp_env::with_var("LESAVKA_UPSTREAM_PAIR_SLACK_US", Some("5000"), || {
assert_eq!(
super::upstream_pairing_master_slack(),
Duration::from_micros(5_000)
);
});
}
#[test]
fn upstream_reanchor_late_threshold_defaults_to_half_the_buffer_and_accepts_overrides() {
temp_env::with_var_unset("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", || {
assert_eq!(
super::upstream_reanchor_late_threshold(Duration::from_secs(1)),
Duration::from_millis(500)
);
assert_eq!(
super::upstream_reanchor_late_threshold(Duration::from_millis(100)),
Duration::from_millis(250)
);
});
temp_env::with_var("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", Some("42"), || {
assert_eq!(
super::upstream_reanchor_late_threshold(Duration::from_secs(1)),
Duration::from_millis(42)
);
});
}
#[test]
fn upstream_timing_trace_flag_accepts_false_values() {
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("off"), || {
assert!(!super::upstream_timing_trace_enabled());
});
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("false"), || {
assert!(!super::upstream_timing_trace_enabled());
});
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("1"), || {
assert!(super::upstream_timing_trace_enabled());
});
}
#[test]
fn apply_playout_offset_supports_negative_offsets() {
let base = tokio::time::Instant::now() + Duration::from_millis(50);
let shifted = super::apply_playout_offset(base, -20_000);
let delta = base.saturating_duration_since(shifted);
assert_eq!(delta, Duration::from_micros(20_000));
}
#[test]
fn apply_playout_offset_supports_positive_offsets() {
let base = tokio::time::Instant::now();
let shifted = super::apply_playout_offset(base, 30_000);
let delta = shifted.saturating_duration_since(base);
assert_eq!(delta, Duration::from_micros(30_000));
}

View File

@ -0,0 +1,181 @@
use super::{UpstreamMediaRuntime, play};
use std::time::Duration;
#[test]
fn first_stream_starts_a_new_shared_session() {
let runtime = UpstreamMediaRuntime::new();
let camera = runtime.activate_camera();
let microphone = runtime.activate_microphone();
assert_eq!(camera.session_id, 1);
assert_eq!(microphone.session_id, 1);
assert!(runtime.is_camera_active(camera.generation));
assert!(runtime.is_microphone_active(microphone.generation));
}
#[test]
fn replacing_one_kind_keeps_the_session_but_preempts_the_old_owner() {
let runtime = UpstreamMediaRuntime::new();
let first = runtime.activate_microphone();
let second = runtime.activate_microphone();
assert_eq!(first.session_id, second.session_id);
assert!(!runtime.is_microphone_active(first.generation));
assert!(runtime.is_microphone_active(second.generation));
}
#[test]
fn closing_the_last_stream_resets_the_next_session_anchor() {
let runtime = UpstreamMediaRuntime::new();
let camera = runtime.activate_camera();
let microphone = runtime.activate_microphone();
runtime.close_camera(camera.generation);
runtime.close_microphone(microphone.generation);
let next = runtime.activate_camera();
assert_eq!(next.session_id, 2);
}
#[test]
fn first_packets_wait_for_the_counterpart_before_pairing() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let audio_first = play(runtime.plan_audio_pts(1_000_000));
let video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
assert_eq!(audio_first.local_pts_us, 0);
assert_eq!(video_first.local_pts_us, 0);
assert_eq!(audio_first.due_at, video_first.due_at);
}
#[test]
fn overlap_waits_for_camera_startup_grace_before_establishing_the_shared_base() {
temp_env::with_var(
"LESAVKA_UPSTREAM_CAMERA_STARTUP_GRACE_MS",
Some("250"),
|| {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
assert!(matches!(
runtime.plan_audio_pts(1_000_000),
super::UpstreamPlanDecision::AwaitingPair
));
assert!(matches!(
runtime.plan_video_pts(1_200_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let video_ready = play(runtime.plan_video_pts(1_250_000, 16_666));
let audio_ready = play(runtime.plan_audio_pts(1_260_000));
assert_eq!(video_ready.local_pts_us, 0);
assert_eq!(audio_ready.local_pts_us, 10_000);
},
);
}
#[test]
fn pairing_window_does_not_expire_into_one_sided_playout_while_camera_warms_up() {
temp_env::with_var(
"LESAVKA_UPSTREAM_CAMERA_STARTUP_GRACE_MS",
Some("250"),
|| {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
assert!(matches!(
runtime.plan_audio_pts(1_000_000),
super::UpstreamPlanDecision::AwaitingPair
));
std::thread::sleep(Duration::from_millis(30));
assert!(matches!(
runtime.plan_audio_pts(1_010_000),
super::UpstreamPlanDecision::AwaitingPair
));
let video_ready = play(runtime.plan_video_pts(1_250_000, 16_666));
let audio_ready = play(runtime.plan_audio_pts(1_260_000));
assert_eq!(video_ready.local_pts_us, 0);
assert_eq!(audio_ready.local_pts_us, 10_000);
});
},
);
}
#[test]
fn overlap_pairing_drops_leading_packets_before_the_shared_base() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_audio_pts(1_000_000),
super::UpstreamPlanDecision::AwaitingPair
));
let video_first = play(runtime.plan_video_pts(1_300_000, 16_666));
assert_eq!(video_first.local_pts_us, 0);
assert!(matches!(
runtime.plan_audio_pts(1_000_000),
super::UpstreamPlanDecision::DropBeforeOverlap
));
let audio_next = play(runtime.plan_audio_pts(1_310_000));
let video_next = play(runtime.plan_video_pts(1_333_333, 16_666));
assert_eq!(audio_next.local_pts_us, 10_000);
assert_eq!(video_next.local_pts_us, 33_333);
}
#[test]
fn shared_clock_keeps_each_kind_monotonic_when_remote_pts_repeat() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(50_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio = play(runtime.plan_audio_pts(50_000));
let first = play(runtime.plan_video_pts(50_000, 16_666));
let repeated = play(runtime.plan_video_pts(50_000, 16_666));
assert_eq!(first.local_pts_us, 0);
assert_eq!(repeated.local_pts_us, 16_666);
}
#[test]
fn close_ignores_superseded_generation_values() {
let runtime = UpstreamMediaRuntime::new();
let first = runtime.activate_camera();
let second = runtime.activate_camera();
runtime.close_camera(first.generation);
assert!(runtime.is_camera_active(second.generation));
runtime.close(super::UpstreamMediaKind::Camera, second.generation);
let next = runtime.activate_camera();
assert_eq!(next.session_id, 2);
}

View File

@ -0,0 +1,257 @@
use super::{UpstreamMediaRuntime, play};
use std::time::Duration;
fn with_info_tracing<T>(f: impl FnOnce() -> T) -> T {
let subscriber = tracing_subscriber::fmt()
.with_max_level(tracing::Level::INFO)
.with_test_writer()
.finish();
tracing::subscriber::with_default(subscriber, f)
}
#[test]
fn shared_playout_epoch_is_reused_across_audio_and_video() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let audio_first = play(runtime.plan_audio_pts(1_000_000));
let video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
let audio_next = play(runtime.plan_audio_pts(1_010_000));
assert_eq!(video_first.local_pts_us, 0);
assert_eq!(audio_first.local_pts_us, 0);
assert_eq!(video_first.due_at, audio_first.due_at);
assert_eq!(
audio_next
.due_at
.saturating_duration_since(audio_first.due_at),
Duration::from_micros(10_000)
);
}
#[test]
fn pairing_window_can_expire_into_one_sided_playout() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let first = play(runtime.plan_video_pts(1_000_000, 16_666));
let second = play(runtime.plan_video_pts(1_016_666, 16_666));
assert_eq!(first.local_pts_us, 0);
assert_eq!(second.local_pts_us, 16_666);
});
}
#[test]
fn map_wrappers_hide_unpaired_and_pre_overlap_packets() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert_eq!(runtime.map_video_pts(1_000_000, 16_666), None);
assert_eq!(runtime.map_audio_pts(1_000_000), Some(0));
assert_eq!(runtime.map_audio_pts(999_999), None);
}
#[test]
fn shared_playout_trace_path_keeps_planned_pts_stable() {
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("1"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let audio = play(runtime.plan_audio_pts(1_000_000));
let video = play(runtime.plan_video_pts(1_000_000, 16_666));
assert_eq!(video.local_pts_us, 0);
assert_eq!(audio.local_pts_us, 0);
});
}
#[test]
fn catastrophic_lateness_reanchors_the_shared_playout_epoch() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
temp_env::with_var("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", Some("5"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let _video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
std::thread::sleep(Duration::from_millis(30));
let recovered_audio = play(runtime.plan_audio_pts(1_000_000));
assert!(
recovered_audio.due_at > tokio::time::Instant::now(),
"recovered packet should be scheduled back into the future"
);
assert!(
recovered_audio.late_by <= Duration::from_millis(1),
"recovered packet should no longer be catastrophically late"
);
let recovered_video = play(runtime.plan_video_pts(1_016_666, 16_666));
assert!(
recovered_video.due_at > tokio::time::Instant::now(),
"shared epoch recovery should also move video back into the future"
);
});
});
}
#[test]
fn overlap_anchor_gets_a_fresh_playout_budget_when_pairing_finishes_late() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
std::thread::sleep(Duration::from_millis(15));
let before_pair = tokio::time::Instant::now();
let audio_first = play(runtime.plan_audio_pts(1_000_000));
let video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
assert!(
audio_first.due_at.saturating_duration_since(before_pair) >= Duration::from_millis(15),
"audio should keep most of the configured playout budget after late pairing"
);
assert!(
video_first.due_at.saturating_duration_since(before_pair) >= Duration::from_millis(15),
"video should keep most of the configured playout budget after late pairing"
);
});
}
#[test]
fn catastrophic_lateness_reanchors_only_once_per_session() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
temp_env::with_var("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", Some("5"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let _video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
std::thread::sleep(Duration::from_millis(30));
let first_recovered = play(runtime.plan_audio_pts(1_000_000));
assert!(first_recovered.due_at > tokio::time::Instant::now());
std::thread::sleep(Duration::from_millis(30));
let second_late = play(runtime.plan_audio_pts(1_000_001));
assert!(
second_late.late_by > Duration::from_millis(5),
"session should not keep extending itself with repeated reanchors"
);
});
});
}
#[test]
fn catastrophic_lateness_does_not_reanchor_once_the_session_is_well_past_startup() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
temp_env::with_var("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", Some("5"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let _video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
std::thread::sleep(Duration::from_millis(130));
let late_audio = play(runtime.plan_audio_pts(1_100_000));
assert_eq!(late_audio.local_pts_us, 100_000);
assert!(
late_audio.late_by > Duration::from_millis(5),
"late packet should remain late instead of reanchoring the shared epoch mid-session"
);
assert!(
late_audio.due_at <= tokio::time::Instant::now(),
"mid-session lateness should no longer push due_at back into the future"
);
});
});
}
#[test]
fn default_runtime_covers_video_map_play_path() {
let runtime = UpstreamMediaRuntime::default();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio = play(runtime.plan_audio_pts(1_000_000));
assert_eq!(runtime.map_video_pts(1_000_000, 16_666), Some(0));
}
#[tokio::test(flavor = "current_thread")]
async fn wait_for_audio_master_returns_false_immediately_once_due_time_has_already_passed() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(!runtime
.wait_for_audio_master(
123_456,
tokio::time::Instant::now()
.checked_sub(Duration::from_millis(1))
.unwrap_or_else(tokio::time::Instant::now),
)
.await);
}
#[test]
fn timing_trace_paths_emit_overlap_and_dropbeforeoverlap_details() {
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("1"), || {
with_info_tracing(|| {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_300_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
assert!(matches!(
runtime.plan_audio_pts(1_000_000),
super::UpstreamPlanDecision::DropBeforeOverlap
));
let _video = play(runtime.plan_video_pts(1_300_000, 16_666));
assert!(matches!(
runtime.plan_audio_pts(1_000_000),
super::UpstreamPlanDecision::DropBeforeOverlap
));
});
});
}

View File

@ -0,0 +1,44 @@
use std::time::Duration;
use tokio::time::Instant;
/// Logical upstream media kinds that share one live-call session timeline.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum UpstreamMediaKind {
/// Webcam uplink frames destined for the UVC/HDMI sink path.
Camera,
/// Microphone uplink packets destined for the UAC sink path.
Microphone,
}
/// Lease returned when one upstream media stream becomes the active owner.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct UpstreamStreamLease {
/// Shared session id for the current upstream live-call window.
pub session_id: u64,
/// Per-kind generation used to supersede older streams of the same kind.
pub generation: u64,
}
/// One rebased upstream packet plus its planned server playout time.
#[derive(Clone, Copy, Debug)]
pub struct PlannedUpstreamPacket {
/// Session-local packet timestamp after rebase onto the shared server clock.
pub local_pts_us: u64,
/// Wall-clock deadline when the server should present this packet.
pub due_at: Instant,
/// How late the packet already is when planned, if any.
pub late_by: Duration,
}
/// Result of asking the shared upstream runtime how to handle one packet.
#[derive(Clone, Copy, Debug)]
pub enum UpstreamPlanDecision {
/// Hold the packet inside the local stream queue until the pairing window
/// has enough cross-stream context to assign a trustworthy playout time.
AwaitingPair,
/// Discard the packet because it belongs before the shared overlapping A/V
/// session base and would only reintroduce startup skew.
DropBeforeOverlap,
/// Present the packet at the planned wall-clock deadline.
Play(PlannedUpstreamPacket),
}

View File

@ -46,8 +46,12 @@ pub fn pick_uvc_device() -> anyhow::Result<String> {
));
}
if let Some(by_path) = any_platform_uvc_by_path() {
return Ok(by_path);
}
Err(anyhow::anyhow!(
"no video_output v4l2 node found; set LESAVKA_UVC_DEV"
"no Lesavka video_output v4l2 node found; wait for /dev/v4l/by-path/platform-<udc>-video-index0 or set LESAVKA_UVC_DEV"
))
}

View File

@ -11,6 +11,7 @@ path = "src/lib.rs"
[dev-dependencies]
anyhow = "1.0"
async-stream = "0.3"
chrono = "0.4"
evdev = "0.13"
futures-util = "0.3"
@ -25,6 +26,7 @@ gstreamer-video = { version = "0.23", features = ["v1_22"] }
gtk = { version = "0.8", package = "gtk4", features = ["v4_6"] }
winit = "0.30"
serial_test = { workspace = true }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
shell-escape = "0.1"
temp-env = { workspace = true }

View File

@ -24,6 +24,19 @@ mod handshake {
}
}
#[allow(warnings)]
mod live_capture_clock {
include!("support/live_capture_clock_shim.rs");
}
#[path = "../../client/src/uplink_fresh_queue.rs"]
#[allow(warnings)]
mod uplink_fresh_queue;
#[path = "../../client/src/uplink_telemetry.rs"]
#[allow(warnings)]
mod uplink_telemetry;
mod app_support {
use super::handshake::PeerCaps;
use std::time::Duration;

View File

@ -439,6 +439,7 @@ mod camera_include_contract {
source_base_us: Some(5_000),
capture_base_us: Some(7_345),
used_source_pts: true,
lag_clamped: false,
},
256,
);

View File

@ -176,16 +176,7 @@ JSON
#[test]
fn microphone_pipeline_desc_adds_level_tap_only_when_requested() {
assert!(parser_for_encoder("opusenc").contains("audio/x-opus"));
assert!(parser_for_encoder("avenc_aac").contains("audio/mpeg"));
let with_tap = microphone_pipeline_desc(
"audiotestsrc is-live=true",
"opusenc",
parser_for_encoder("opusenc"),
2.5,
true,
);
let with_tap = microphone_pipeline_desc("audiotestsrc is-live=true", 2.5, true);
assert!(
with_tap
.contains("audiotestsrc is-live=true ! audioconvert ! audioresample ! audio/x-raw")
@ -195,15 +186,9 @@ JSON
assert!(with_tap.contains("appsink name=level_sink"));
assert!(with_tap.contains("volume name=mic_input_gain volume=2.500"));
let without_tap = microphone_pipeline_desc(
"audiotestsrc is-live=true",
"avenc_aac",
parser_for_encoder("avenc_aac"),
1.0,
false,
);
let without_tap = microphone_pipeline_desc("audiotestsrc is-live=true", 1.0, false);
assert!(!without_tap.contains("level_sink"));
assert!(without_tap.contains("queue max-size-buffers=100 leaky=downstream"));
assert!(without_tap.contains("queue max-size-buffers=64 leaky=downstream"));
}
#[test]
@ -307,7 +292,7 @@ JSON
pipeline: gst::Pipeline::new(),
sink,
level_tap_running: Some(std::sync::Arc::clone(&running)),
pts_rebaser: crate::live_capture_clock::SourcePtsRebaser::default(),
pts_rebaser: crate::live_capture_clock::DurationPacedSourcePtsRebaser::default(),
};
assert!(
cap.pull().is_none(),
@ -430,7 +415,7 @@ JSON
pipeline,
sink,
level_tap_running: None,
pts_rebaser: crate::live_capture_clock::SourcePtsRebaser::default(),
pts_rebaser: crate::live_capture_clock::DurationPacedSourcePtsRebaser::default(),
};
let first_pkt = cap.pull().expect("first audio packet");
let second_pkt = cap.pull().expect("second audio packet");

View File

@ -85,8 +85,9 @@ fn activate_non_uvc_returns_noop_relay_in_coverage_harness() {
let rt = Runtime::new().expect("runtime");
let result = rt.block_on(runtime.activate(&cfg));
let (session_id, relay) = result.expect("coverage harness should create a no-op relay");
let (session_id, relay, reused) = result.expect("coverage harness should create a no-op relay");
assert_eq!(session_id, 1);
assert!(!reused);
relay.feed(lesavka_common::lesavka::VideoPacket {
id: 2,
pts: 1,

View File

@ -359,199 +359,4 @@ mod gadget_include_contract {
writer.join().expect("join state writer");
}
#[test]
#[serial]
fn recover_enumeration_runs_forced_core_rebuild_after_stuck_soft_cycle() {
let dir = tempdir().expect("tempdir");
let ctrl = "fake-ctrl.usb";
build_fake_tree(dir.path(), ctrl, "lesavka-test", "not attached");
let helper = dir.path().join("fake-core.sh");
write_helper(
&helper,
r#"#!/usr/bin/env bash
set -euo pipefail
echo forced core helper >&2
printf 'configured\n' > "$LESAVKA_GADGET_SYSFS_ROOT/class/udc/fake-ctrl.usb/state"
"#,
);
with_fake_roots(&dir.path().join("sys"), &dir.path().join("cfg"), || {
with_fast_recovery_env(&helper, || {
let gadget = UsbGadget::new("lesavka-test");
gadget
.recover_enumeration()
.expect("forced rebuild should recover fake UDC");
});
});
let state = std::fs::read_to_string(dir.path().join(format!("sys/class/udc/{ctrl}/state")))
.expect("read state");
assert_eq!(state.trim(), "configured");
}
#[test]
#[serial]
fn recover_enumeration_passes_aggressive_rebuild_environment_to_core_helper() {
let dir = tempdir().expect("tempdir");
let ctrl = "fake-ctrl.usb";
build_fake_tree(dir.path(), ctrl, "lesavka-test", "not attached");
let helper = dir.path().join("fake-core-env.sh");
let env_dump = dir.path().join("helper-env.txt");
write_helper(
&helper,
r#"#!/usr/bin/env bash
set -euo pipefail
cat > "$LESAVKA_HELPER_ENV_DUMP" <<EOF
LESAVKA_ALLOW_GADGET_RESET=${LESAVKA_ALLOW_GADGET_RESET:-}
LESAVKA_ATTACH_WRITE_UDC=${LESAVKA_ATTACH_WRITE_UDC:-}
LESAVKA_DETACH_CLEAR_UDC=${LESAVKA_DETACH_CLEAR_UDC:-}
LESAVKA_RELOAD_UVCVIDEO=${LESAVKA_RELOAD_UVCVIDEO:-}
LESAVKA_UVC_FALLBACK=${LESAVKA_UVC_FALLBACK:-}
LESAVKA_UVC_CODEC=${LESAVKA_UVC_CODEC:-}
EOF
printf 'configured\n' > "$LESAVKA_GADGET_SYSFS_ROOT/class/udc/fake-ctrl.usb/state"
"#,
);
with_fake_roots(&dir.path().join("sys"), &dir.path().join("cfg"), || {
with_fast_recovery_env(&helper, || {
with_var(
"LESAVKA_HELPER_ENV_DUMP",
Some(env_dump.to_string_lossy().to_string()),
|| {
let gadget = UsbGadget::new("lesavka-test");
gadget
.recover_enumeration()
.expect("forced rebuild should recover fake UDC");
},
);
});
});
let dumped = std::fs::read_to_string(env_dump).expect("read helper env dump");
for line in [
"LESAVKA_ALLOW_GADGET_RESET=1",
"LESAVKA_ATTACH_WRITE_UDC=1",
"LESAVKA_DETACH_CLEAR_UDC=1",
"LESAVKA_RELOAD_UVCVIDEO=1",
"LESAVKA_UVC_FALLBACK=0",
"LESAVKA_UVC_CODEC=mjpeg",
] {
assert!(dumped.contains(line), "{line} missing from {dumped}");
}
}
#[test]
#[serial]
fn recover_enumeration_honors_explicit_uvc_fallback_override() {
let dir = tempdir().expect("tempdir");
let ctrl = "fake-ctrl.usb";
build_fake_tree(dir.path(), ctrl, "lesavka-test", "not attached");
let helper = dir.path().join("fake-core-env-override.sh");
let env_dump = dir.path().join("helper-env-override.txt");
write_helper(
&helper,
r#"#!/usr/bin/env bash
set -euo pipefail
cat > "$LESAVKA_HELPER_ENV_DUMP" <<EOF
LESAVKA_UVC_FALLBACK=${LESAVKA_UVC_FALLBACK:-}
EOF
printf 'configured\n' > "$LESAVKA_GADGET_SYSFS_ROOT/class/udc/fake-ctrl.usb/state"
"#,
);
with_fake_roots(&dir.path().join("sys"), &dir.path().join("cfg"), || {
with_fast_recovery_env(&helper, || {
with_var("LESAVKA_UVC_FALLBACK", Some("1"), || {
with_var(
"LESAVKA_HELPER_ENV_DUMP",
Some(env_dump.to_string_lossy().to_string()),
|| {
let gadget = UsbGadget::new("lesavka-test");
gadget
.recover_enumeration()
.expect("forced rebuild should recover fake UDC");
},
);
});
});
});
let dumped = std::fs::read_to_string(env_dump).expect("read helper env dump");
assert!(
dumped.contains("LESAVKA_UVC_FALLBACK=1"),
"explicit fallback override missing from {dumped}"
);
}
#[test]
#[serial]
fn recover_enumeration_reports_clear_failure_when_helper_leaves_udc_unattached() {
let dir = tempdir().expect("tempdir");
let ctrl = "fake-ctrl.usb";
build_fake_tree(dir.path(), ctrl, "lesavka-test", "not attached");
let helper = dir.path().join("fake-core-noop.sh");
write_helper(
&helper,
r#"#!/usr/bin/env bash
set -euo pipefail
echo noop core helper >&2
"#,
);
with_fake_roots(&dir.path().join("sys"), &dir.path().join("cfg"), || {
with_fast_recovery_env(&helper, || {
let gadget = UsbGadget::new("lesavka-test");
let err = gadget
.recover_enumeration()
.expect_err("still-unattached UDC should fail recovery");
let message = format!("{err:#}");
assert!(message.contains("still not attached"), "{message}");
assert!(
message.contains("forced gadget rebuild helper"),
"{message}"
);
});
});
}
#[test]
#[serial]
fn run_forced_core_rebuild_reports_helper_failure_and_truncates_tail() {
let dir = tempdir().expect("tempdir");
let helper = dir.path().join("fake-core-fail.sh");
write_helper(
&helper,
r#"#!/usr/bin/env bash
set -euo pipefail
printf '%*s\n' 1400 '' | tr ' ' x
exit 42
"#,
);
with_fast_recovery_env(&helper, || {
let gadget = UsbGadget::new("lesavka-test");
let err = gadget
.run_forced_core_rebuild()
.expect_err("failing helper should report stdout/stderr");
let message = format!("{err:#}");
assert!(message.contains("exited with"), "{message}");
assert!(message.contains("..."), "{message}");
});
}
#[test]
#[serial]
fn probe_platform_udc_reads_fake_platform_tree() {
let dir = tempdir().expect("tempdir");
let dev_root = dir.path().join("sys/bus/platform/devices");
std::fs::create_dir_all(&dev_root).expect("create platform devices");
std::fs::create_dir_all(dev_root.join("foo.usb")).expect("create usb entry");
with_fake_roots(&dir.path().join("sys"), &dir.path().join("cfg"), || {
let found = UsbGadget::probe_platform_udc().expect("probe");
assert_eq!(found.as_deref(), Some("foo.usb"));
});
}
}

View File

@ -0,0 +1,275 @@
//! Include-based coverage for aggressive USB gadget recovery helpers.
//!
//! Scope: exercise forced Lesavka core rebuild and fake UDC recovery branches.
//! Targets: `server/src/gadget.rs`.
//! Why: recovery is the fragile path that protects UVC enumeration after host
//! or gadget bumps, so it needs focused regression coverage.
#[allow(warnings)]
mod gadget_recovery_contract {
include!(env!("LESAVKA_SERVER_GADGET_SRC"));
use serial_test::serial;
use std::os::unix::fs::{PermissionsExt, symlink};
use temp_env::with_var;
use tempfile::tempdir;
fn write_file(path: &Path, content: &str) {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).expect("create parent");
}
std::fs::write(path, content).expect("write file");
}
fn with_fake_roots(sys_root: &Path, cfg_root: &Path, f: impl FnOnce()) {
let sys_root = sys_root.to_string_lossy().to_string();
let cfg_root = cfg_root.to_string_lossy().to_string();
with_var("LESAVKA_GADGET_SYSFS_ROOT", Some(sys_root), || {
with_var("LESAVKA_GADGET_CONFIGFS_ROOT", Some(cfg_root), f);
});
}
fn build_fake_tree(base: &Path, ctrl: &str, gadget_name: &str, state: &str) {
write_file(
&base.join(format!("sys/class/udc/{ctrl}/state")),
&format!("{state}\n"),
);
write_file(
&base.join(format!("sys/class/udc/{ctrl}/soft_connect")),
"1\n",
);
write_file(
&base.join("sys/bus/platform/drivers/dwc2/unbind"),
"placeholder\n",
);
write_file(
&base.join("sys/bus/platform/drivers/dwc2/bind"),
"placeholder\n",
);
let driver_target = base.join("sys/bus/platform/drivers/dwc2");
let driver_link = base.join(format!("sys/bus/platform/devices/{ctrl}/driver"));
if let Some(parent) = driver_link.parent() {
std::fs::create_dir_all(parent).expect("create driver link parent");
}
symlink(&driver_target, &driver_link).expect("link controller driver");
write_file(
&base.join(format!("cfg/{gadget_name}/UDC")),
&format!("{ctrl}\n"),
);
}
fn write_helper(path: &Path, body: &str) {
write_file(path, body);
let mut perms = std::fs::metadata(path)
.expect("helper metadata")
.permissions();
perms.set_mode(0o755);
std::fs::set_permissions(path, perms).expect("chmod helper");
}
fn with_fast_recovery_env(helper: &Path, f: impl FnOnce()) {
let helper = helper.to_string_lossy().to_string();
with_var("LESAVKA_CORE_HELPER", Some(helper), || {
with_var("LESAVKA_USB_RECOVERY_CYCLE_WAIT_MS", Some("0"), || {
with_var("LESAVKA_USB_RECOVERY_REBUILD_WAIT_MS", Some("0"), || {
with_var("LESAVKA_USB_RECOVERY_FINAL_WAIT_MS", Some("0"), f);
})
})
});
}
#[test]
#[serial]
fn recover_enumeration_runs_forced_core_rebuild_after_stuck_soft_cycle() {
let dir = tempdir().expect("tempdir");
let ctrl = "fake-ctrl.usb";
build_fake_tree(dir.path(), ctrl, "lesavka-test", "not attached");
let helper = dir.path().join("fake-core.sh");
write_helper(
&helper,
r#"#!/usr/bin/env bash
set -euo pipefail
echo forced core helper >&2
printf 'configured\n' > "$LESAVKA_GADGET_SYSFS_ROOT/class/udc/fake-ctrl.usb/state"
"#,
);
with_fake_roots(&dir.path().join("sys"), &dir.path().join("cfg"), || {
with_fast_recovery_env(&helper, || {
let gadget = UsbGadget::new("lesavka-test");
gadget
.recover_enumeration()
.expect("forced rebuild should recover fake UDC");
});
});
let state = std::fs::read_to_string(dir.path().join(format!("sys/class/udc/{ctrl}/state")))
.expect("read state");
assert_eq!(state.trim(), "configured");
}
#[test]
#[serial]
fn recover_enumeration_passes_aggressive_rebuild_environment_to_core_helper() {
let dir = tempdir().expect("tempdir");
let ctrl = "fake-ctrl.usb";
build_fake_tree(dir.path(), ctrl, "lesavka-test", "not attached");
let helper = dir.path().join("fake-core-env.sh");
let env_dump = dir.path().join("helper-env.txt");
write_helper(
&helper,
r#"#!/usr/bin/env bash
set -euo pipefail
cat > "$LESAVKA_HELPER_ENV_DUMP" <<EOF
LESAVKA_ALLOW_GADGET_RESET=${LESAVKA_ALLOW_GADGET_RESET:-}
LESAVKA_ATTACH_WRITE_UDC=${LESAVKA_ATTACH_WRITE_UDC:-}
LESAVKA_DETACH_CLEAR_UDC=${LESAVKA_DETACH_CLEAR_UDC:-}
LESAVKA_RELOAD_UVCVIDEO=${LESAVKA_RELOAD_UVCVIDEO:-}
LESAVKA_UVC_FALLBACK=${LESAVKA_UVC_FALLBACK:-}
LESAVKA_UVC_CODEC=${LESAVKA_UVC_CODEC:-}
EOF
printf 'configured\n' > "$LESAVKA_GADGET_SYSFS_ROOT/class/udc/fake-ctrl.usb/state"
"#,
);
with_fake_roots(&dir.path().join("sys"), &dir.path().join("cfg"), || {
with_fast_recovery_env(&helper, || {
with_var(
"LESAVKA_HELPER_ENV_DUMP",
Some(env_dump.to_string_lossy().to_string()),
|| {
let gadget = UsbGadget::new("lesavka-test");
gadget
.recover_enumeration()
.expect("forced rebuild should recover fake UDC");
},
);
});
});
let dumped = std::fs::read_to_string(env_dump).expect("read helper env dump");
for line in [
"LESAVKA_ALLOW_GADGET_RESET=1",
"LESAVKA_ATTACH_WRITE_UDC=1",
"LESAVKA_DETACH_CLEAR_UDC=1",
"LESAVKA_RELOAD_UVCVIDEO=1",
"LESAVKA_UVC_FALLBACK=0",
"LESAVKA_UVC_CODEC=mjpeg",
] {
assert!(dumped.contains(line), "{line} missing from {dumped}");
}
}
#[test]
#[serial]
fn recover_enumeration_honors_explicit_uvc_fallback_override() {
let dir = tempdir().expect("tempdir");
let ctrl = "fake-ctrl.usb";
build_fake_tree(dir.path(), ctrl, "lesavka-test", "not attached");
let helper = dir.path().join("fake-core-env-override.sh");
let env_dump = dir.path().join("helper-env-override.txt");
write_helper(
&helper,
r#"#!/usr/bin/env bash
set -euo pipefail
cat > "$LESAVKA_HELPER_ENV_DUMP" <<EOF
LESAVKA_UVC_FALLBACK=${LESAVKA_UVC_FALLBACK:-}
EOF
printf 'configured\n' > "$LESAVKA_GADGET_SYSFS_ROOT/class/udc/fake-ctrl.usb/state"
"#,
);
with_fake_roots(&dir.path().join("sys"), &dir.path().join("cfg"), || {
with_fast_recovery_env(&helper, || {
with_var("LESAVKA_UVC_FALLBACK", Some("1"), || {
with_var(
"LESAVKA_HELPER_ENV_DUMP",
Some(env_dump.to_string_lossy().to_string()),
|| {
let gadget = UsbGadget::new("lesavka-test");
gadget
.recover_enumeration()
.expect("forced rebuild should recover fake UDC");
},
);
});
});
});
let dumped = std::fs::read_to_string(env_dump).expect("read helper env dump");
assert!(
dumped.contains("LESAVKA_UVC_FALLBACK=1"),
"explicit fallback override missing from {dumped}"
);
}
#[test]
#[serial]
fn recover_enumeration_reports_clear_failure_when_helper_leaves_udc_unattached() {
let dir = tempdir().expect("tempdir");
let ctrl = "fake-ctrl.usb";
build_fake_tree(dir.path(), ctrl, "lesavka-test", "not attached");
let helper = dir.path().join("fake-core-noop.sh");
write_helper(
&helper,
r#"#!/usr/bin/env bash
set -euo pipefail
echo noop core helper >&2
"#,
);
with_fake_roots(&dir.path().join("sys"), &dir.path().join("cfg"), || {
with_fast_recovery_env(&helper, || {
let gadget = UsbGadget::new("lesavka-test");
let err = gadget
.recover_enumeration()
.expect_err("still-unattached UDC should fail recovery");
let message = format!("{err:#}");
assert!(message.contains("still not attached"), "{message}");
assert!(
message.contains("forced gadget rebuild helper"),
"{message}"
);
});
});
}
#[test]
#[serial]
fn run_forced_core_rebuild_reports_helper_failure_and_truncates_tail() {
let dir = tempdir().expect("tempdir");
let helper = dir.path().join("fake-core-fail.sh");
write_helper(
&helper,
r#"#!/usr/bin/env bash
set -euo pipefail
printf '%*s\n' 1400 '' | tr ' ' x
exit 42
"#,
);
with_fast_recovery_env(&helper, || {
let gadget = UsbGadget::new("lesavka-test");
let err = gadget
.run_forced_core_rebuild()
.expect_err("failing helper should report stdout/stderr");
let message = format!("{err:#}");
assert!(message.contains("exited with"), "{message}");
assert!(message.contains("..."), "{message}");
});
}
#[test]
#[serial]
fn probe_platform_udc_reads_fake_platform_tree() {
let dir = tempdir().expect("tempdir");
let dev_root = dir.path().join("sys/bus/platform/devices");
std::fs::create_dir_all(&dev_root).expect("create platform devices");
std::fs::create_dir_all(dev_root.join("foo.usb")).expect("create usb entry");
with_fake_roots(&dir.path().join("sys"), &dir.path().join("cfg"), || {
let found = UsbGadget::probe_platform_udc().expect("probe");
assert_eq!(found.as_deref(), Some("foo.usb"));
});
}
}

View File

@ -154,6 +154,57 @@ mod server_main_media_extra {
});
}
#[test]
#[serial]
fn stream_camera_drops_frames_when_audio_master_never_advances() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let _stalled_microphone = handler.upstream_media_rt.activate_microphone();
let listener = std::net::TcpListener::bind("127.0.0.1:0").expect("bind");
let addr = listener.local_addr().expect("addr");
drop(listener);
let server = tokio::spawn(async move {
let _ = tonic::transport::Server::builder()
.add_service(RelayServer::new(handler))
.serve(addr)
.await;
});
let channel = connect_with_retry(addr).await;
let mut cli = RelayClient::new(channel);
let (tx, rx) = tokio::sync::mpsc::channel(4);
tx.send(VideoPacket {
id: 2,
pts: 1,
data: vec![0, 1, 2, 3],
..Default::default()
})
.await
.expect("send camera packet");
drop(tx);
let outbound = tokio_stream::wrappers::ReceiverStream::new(rx);
let mut resp = cli
.stream_camera(tonic::Request::new(outbound))
.await
.expect("stream camera should terminate cleanly");
let _ = tokio::time::timeout(
std::time::Duration::from_secs(2),
resp.get_mut().message(),
)
.await
.expect("camera stream should not block forever")
.expect("grpc message read");
server.abort();
});
});
}
#[test]
#[serial]
fn shared_eye_hub_covers_conflict_idle_and_error_shutdown_paths() {

View File

@ -372,288 +372,4 @@ mod server_upstream_media {
});
});
}
#[test]
#[serial]
fn stream_microphone_drops_pre_overlap_audio_after_video_sets_the_pair_anchor() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
with_var("LESAVKA_DISABLE_UVC", None::<&str>, || {
with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("80"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (audio_tx, audio_rx) = tokio::sync::mpsc::channel(4);
let (video_tx, video_rx) = tokio::sync::mpsc::channel(4);
let mut audio_response = cli
.stream_microphone(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(audio_rx),
))
.await
.expect("microphone stream should open")
.into_inner();
let mut video_response = cli
.stream_camera(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(video_rx),
))
.await
.expect("camera stream should open")
.into_inner();
audio_tx
.send(AudioPacket {
id: 0,
pts: 1_000_000,
data: vec![1, 2, 3, 4],
})
.await
.expect("send leading audio packet");
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
video_tx
.send(VideoPacket {
id: 2,
pts: 1_300_000,
data: vec![0, 0, 0, 1, 0x65, 0x88],
..Default::default()
})
.await
.expect("send anchor video packet");
audio_tx
.send(AudioPacket {
id: 0,
pts: 1_310_000,
data: vec![5, 6, 7, 8],
})
.await
.expect("send post-anchor audio packet");
drop(audio_tx);
drop(video_tx);
let audio_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
audio_response.message(),
)
.await
.expect("microphone ack timeout")
.expect("microphone ack grpc")
.expect("microphone ack item");
let video_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
video_response.message(),
)
.await
.expect("camera ack timeout")
.expect("camera ack grpc")
.expect("camera ack item");
assert_eq!(audio_ack, Empty {});
assert_eq!(video_ack, Empty {});
server.abort();
});
});
});
});
}
#[test]
#[serial]
fn stream_camera_drops_pre_overlap_video_after_audio_sets_the_pair_anchor() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
with_var("LESAVKA_DISABLE_UVC", None::<&str>, || {
with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("80"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (audio_tx, audio_rx) = tokio::sync::mpsc::channel(4);
let (video_tx, video_rx) = tokio::sync::mpsc::channel(4);
let mut audio_response = cli
.stream_microphone(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(audio_rx),
))
.await
.expect("microphone stream should open")
.into_inner();
let mut video_response = cli
.stream_camera(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(video_rx),
))
.await
.expect("camera stream should open")
.into_inner();
video_tx
.send(VideoPacket {
id: 2,
pts: 1_000_000,
data: vec![0, 0, 0, 1, 0x65, 0x77],
..Default::default()
})
.await
.expect("send leading video packet");
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
audio_tx
.send(AudioPacket {
id: 0,
pts: 1_300_000,
data: vec![1, 2, 3, 4],
})
.await
.expect("send anchor audio packet");
video_tx
.send(VideoPacket {
id: 2,
pts: 1_310_000,
data: vec![0, 0, 0, 1, 0x65, 0x88],
..Default::default()
})
.await
.expect("send post-anchor video packet");
drop(audio_tx);
drop(video_tx);
let audio_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
audio_response.message(),
)
.await
.expect("microphone ack timeout")
.expect("microphone ack grpc")
.expect("microphone ack item");
let video_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
video_response.message(),
)
.await
.expect("camera ack timeout")
.expect("camera ack grpc")
.expect("camera ack item");
assert_eq!(audio_ack, Empty {});
assert_eq!(video_ack, Empty {});
server.abort();
});
});
});
});
}
#[test]
#[serial]
fn stream_microphone_drops_stale_packets_when_freshness_budget_is_zero() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
with_var("LESAVKA_UPSTREAM_STALE_DROP_MS", Some("0"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (tx, rx) = tokio::sync::mpsc::channel(4);
tx.send(AudioPacket {
id: 0,
pts: 12_345,
data: vec![1, 2, 3, 4, 5, 6],
})
.await
.expect("send stale synthetic upstream audio");
drop(tx);
let outbound = tokio_stream::wrappers::ReceiverStream::new(rx);
let mut response = cli
.stream_microphone(tonic::Request::new(outbound))
.await
.expect("microphone stream should open");
let ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
response.get_mut().message(),
)
.await
.expect("microphone ack timeout")
.expect("microphone ack grpc")
.expect("microphone ack item");
assert_eq!(ack, Empty {});
server.abort();
});
});
});
});
}
#[test]
#[serial]
fn stream_camera_drops_frames_that_never_reach_the_audio_master() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
with_var("LESAVKA_DISABLE_UVC", None::<&str>, || {
with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("80"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (audio_tx, audio_rx) = tokio::sync::mpsc::channel(4);
let (video_tx, video_rx) = tokio::sync::mpsc::channel(4);
let mut audio_response = cli
.stream_microphone(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(audio_rx),
))
.await
.expect("microphone stream should open")
.into_inner();
let mut video_response = cli
.stream_camera(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(video_rx),
))
.await
.expect("camera stream should open")
.into_inner();
audio_tx
.send(AudioPacket {
id: 0,
pts: 1_000_000,
data: vec![1, 2, 3, 4],
})
.await
.expect("send first audio packet");
video_tx
.send(VideoPacket {
id: 2,
pts: 1_050_000,
data: vec![0, 0, 0, 1, 0x65, 0x55],
..Default::default()
})
.await
.expect("send unmatched video packet");
drop(audio_tx);
drop(video_tx);
let audio_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
audio_response.message(),
)
.await
.expect("microphone ack timeout")
.expect("microphone ack grpc")
.expect("microphone ack item");
let video_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
video_response.message(),
)
.await
.expect("camera ack timeout")
.expect("camera ack grpc")
.expect("camera ack item");
assert_eq!(audio_ack, Empty {});
assert_eq!(video_ack, Empty {});
server.abort();
});
});
});
});
}
}

View File

@ -0,0 +1,560 @@
//! End-to-end server coverage for upstream media pairing and freshness.
//!
//! Scope: run a local gRPC server and verify webcam/mic packet pairing behavior.
//! Targets: `server/src/main.rs`, `server/src/upstream_media_runtime.rs`.
//! Why: MJPEG lip sync depends on keeping late/early packet decisions stable
//! while streams start, stop, or temporarily lose their pair.
#[cfg(coverage)]
#[allow(warnings)]
mod server_upstream_media_pairing {
include!(env!("LESAVKA_SERVER_MAIN_SRC"));
use lesavka_common::lesavka::relay_client::RelayClient;
use serial_test::serial;
use temp_env::with_var;
use tempfile::tempdir;
use tonic::transport::Channel;
async fn connect_with_retry(addr: std::net::SocketAddr) -> Channel {
let endpoint = tonic::transport::Endpoint::from_shared(format!("http://{addr}"))
.expect("endpoint")
.tcp_nodelay(true);
for _ in 0..40 {
if let Ok(channel) = endpoint.clone().connect().await {
return channel;
}
tokio::time::sleep(std::time::Duration::from_millis(25)).await;
}
panic!("failed to connect to local tonic server");
}
fn build_handler_for_tests() -> (tempfile::TempDir, Handler) {
let dir = tempdir().expect("tempdir");
let kb_path = dir.path().join("hidg0.bin");
let ms_path = dir.path().join("hidg1.bin");
std::fs::write(&kb_path, []).expect("create kb file");
std::fs::write(&ms_path, []).expect("create ms file");
let kb = tokio::fs::File::from_std(
std::fs::OpenOptions::new()
.read(true)
.write(true)
.open(&kb_path)
.expect("open kb"),
);
let ms = tokio::fs::File::from_std(
std::fs::OpenOptions::new()
.read(true)
.write(true)
.open(&ms_path)
.expect("open ms"),
);
(
dir,
Handler {
kb: std::sync::Arc::new(tokio::sync::Mutex::new(Some(kb))),
ms: std::sync::Arc::new(tokio::sync::Mutex::new(Some(ms))),
gadget: UsbGadget::new("lesavka"),
did_cycle: std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)),
camera_rt: std::sync::Arc::new(CameraRuntime::new()),
upstream_media_rt: std::sync::Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: std::sync::Arc::new(tokio::sync::Mutex::new(
std::collections::HashMap::new(),
)),
},
)
}
async fn serve_handler(
handler: Handler,
) -> (
tokio::task::JoinHandle<()>,
RelayClient<tonic::transport::Channel>,
) {
let listener = std::net::TcpListener::bind("127.0.0.1:0").expect("bind");
let addr = listener.local_addr().expect("addr");
drop(listener);
let server = tokio::spawn(async move {
let _ = tonic::transport::Server::builder()
.add_service(RelayServer::new(handler))
.serve(addr)
.await;
});
let channel = connect_with_retry(addr).await;
(server, RelayClient::new(channel))
}
#[test]
#[serial]
fn stream_keyboard_and_mouse_process_packets_in_coverage_mode() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
with_var("LESAVKA_LIVE_KEYBOARD_REPORT_DELAY_MS", Some("0"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (kbd_tx, kbd_rx) = tokio::sync::mpsc::channel(4);
kbd_tx
.send(KeyboardReport {
data: vec![1, 2, 3, 4, 5, 6, 7, 8],
})
.await
.expect("send keyboard packet");
drop(kbd_tx);
let mut kbd_stream = cli
.stream_keyboard(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(kbd_rx),
))
.await
.expect("keyboard stream should open")
.into_inner();
let echoed_keyboard = tokio::time::timeout(
std::time::Duration::from_secs(1),
kbd_stream.message(),
)
.await
.expect("keyboard response timeout")
.expect("keyboard grpc")
.expect("keyboard echo");
assert_eq!(echoed_keyboard.data, vec![1, 2, 3, 4, 5, 6, 7, 8]);
let (mouse_tx, mouse_rx) = tokio::sync::mpsc::channel(4);
mouse_tx
.send(MouseReport {
data: vec![8, 7, 6, 5, 4, 3, 2, 1],
})
.await
.expect("send mouse packet");
drop(mouse_tx);
let mut mouse_stream = cli
.stream_mouse(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(mouse_rx),
))
.await
.expect("mouse stream should open")
.into_inner();
let echoed_mouse =
tokio::time::timeout(std::time::Duration::from_secs(1), mouse_stream.message())
.await
.expect("mouse response timeout")
.expect("mouse grpc")
.expect("mouse echo");
assert_eq!(echoed_mouse.data, vec![8, 7, 6, 5, 4, 3, 2, 1]);
server.abort();
});
});
});
}
#[test]
#[serial]
fn stream_camera_covers_idle_poll_cycle_before_first_packet() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
with_var("LESAVKA_DISABLE_UVC", None::<&str>, || {
with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (audio_tx, audio_rx) = tokio::sync::mpsc::channel(4);
let (video_tx, video_rx) = tokio::sync::mpsc::channel(4);
let mut audio_response = cli
.stream_microphone(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(audio_rx),
))
.await
.expect("microphone stream should open")
.into_inner();
let mut video_response = cli
.stream_camera(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(video_rx),
))
.await
.expect("camera stream should open")
.into_inner();
// Let the camera loop hit its idle poll cycle before the first frame arrives.
tokio::time::sleep(std::time::Duration::from_millis(40)).await;
audio_tx
.send(AudioPacket {
id: 0,
pts: 1_000_000,
data: vec![1, 2, 3, 4],
})
.await
.expect("send anchor audio packet");
video_tx
.send(VideoPacket {
id: 2,
pts: 1_000_000,
data: vec![0, 0, 0, 1, 0x65, 0x66],
..Default::default()
})
.await
.expect("send first camera packet");
drop(audio_tx);
drop(video_tx);
let audio_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
audio_response.message(),
)
.await
.expect("microphone ack timeout")
.expect("microphone ack grpc")
.expect("microphone ack item");
let video_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
video_response.message(),
)
.await
.expect("camera ack timeout")
.expect("camera ack grpc")
.expect("camera ack item");
assert_eq!(audio_ack, Empty {});
assert_eq!(video_ack, Empty {});
server.abort();
});
});
});
});
}
#[test]
#[serial]
fn stream_microphone_drops_late_packets_when_audio_offset_forces_lateness() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
with_var("LESAVKA_UPSTREAM_STALE_DROP_MS", Some("0"), || {
with_var("LESAVKA_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US", Some("-500000"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (tx, rx) = tokio::sync::mpsc::channel(4);
tx.send(AudioPacket {
id: 0,
pts: 12_345,
data: vec![1, 2, 3, 4, 5, 6],
})
.await
.expect("send stale synthetic upstream audio");
drop(tx);
let outbound = tokio_stream::wrappers::ReceiverStream::new(rx);
let mut response = cli
.stream_microphone(tonic::Request::new(outbound))
.await
.expect("microphone stream should open");
let ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
response.get_mut().message(),
)
.await
.expect("microphone ack timeout")
.expect("microphone ack grpc")
.expect("microphone ack item");
assert_eq!(ack, Empty {});
server.abort();
});
});
});
});
});
}
#[test]
#[serial]
fn stream_microphone_drops_pre_overlap_audio_after_video_sets_the_pair_anchor() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
with_var("LESAVKA_DISABLE_UVC", None::<&str>, || {
with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("80"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (audio_tx, audio_rx) = tokio::sync::mpsc::channel(4);
let (video_tx, video_rx) = tokio::sync::mpsc::channel(4);
let mut audio_response = cli
.stream_microphone(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(audio_rx),
))
.await
.expect("microphone stream should open")
.into_inner();
let mut video_response = cli
.stream_camera(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(video_rx),
))
.await
.expect("camera stream should open")
.into_inner();
audio_tx
.send(AudioPacket {
id: 0,
pts: 1_000_000,
data: vec![1, 2, 3, 4],
})
.await
.expect("send leading audio packet");
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
video_tx
.send(VideoPacket {
id: 2,
pts: 1_300_000,
data: vec![0, 0, 0, 1, 0x65, 0x88],
..Default::default()
})
.await
.expect("send anchor video packet");
audio_tx
.send(AudioPacket {
id: 0,
pts: 1_310_000,
data: vec![5, 6, 7, 8],
})
.await
.expect("send post-anchor audio packet");
drop(audio_tx);
drop(video_tx);
let audio_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
audio_response.message(),
)
.await
.expect("microphone ack timeout")
.expect("microphone ack grpc")
.expect("microphone ack item");
let video_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
video_response.message(),
)
.await
.expect("camera ack timeout")
.expect("camera ack grpc")
.expect("camera ack item");
assert_eq!(audio_ack, Empty {});
assert_eq!(video_ack, Empty {});
server.abort();
});
});
});
});
}
#[test]
#[serial]
fn stream_camera_drops_pre_overlap_video_after_audio_sets_the_pair_anchor() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
with_var("LESAVKA_DISABLE_UVC", None::<&str>, || {
with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("80"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (audio_tx, audio_rx) = tokio::sync::mpsc::channel(4);
let (video_tx, video_rx) = tokio::sync::mpsc::channel(4);
let mut audio_response = cli
.stream_microphone(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(audio_rx),
))
.await
.expect("microphone stream should open")
.into_inner();
let mut video_response = cli
.stream_camera(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(video_rx),
))
.await
.expect("camera stream should open")
.into_inner();
video_tx
.send(VideoPacket {
id: 2,
pts: 1_000_000,
data: vec![0, 0, 0, 1, 0x65, 0x77],
..Default::default()
})
.await
.expect("send leading video packet");
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
audio_tx
.send(AudioPacket {
id: 0,
pts: 1_300_000,
data: vec![1, 2, 3, 4],
})
.await
.expect("send anchor audio packet");
video_tx
.send(VideoPacket {
id: 2,
pts: 1_310_000,
data: vec![0, 0, 0, 1, 0x65, 0x88],
..Default::default()
})
.await
.expect("send post-anchor video packet");
drop(audio_tx);
drop(video_tx);
let audio_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
audio_response.message(),
)
.await
.expect("microphone ack timeout")
.expect("microphone ack grpc")
.expect("microphone ack item");
let video_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
video_response.message(),
)
.await
.expect("camera ack timeout")
.expect("camera ack grpc")
.expect("camera ack item");
assert_eq!(audio_ack, Empty {});
assert_eq!(video_ack, Empty {});
server.abort();
});
});
});
});
}
#[test]
#[serial]
fn stream_microphone_drops_stale_packets_when_freshness_budget_is_zero() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
with_var("LESAVKA_UPSTREAM_STALE_DROP_MS", Some("0"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (tx, rx) = tokio::sync::mpsc::channel(4);
tx.send(AudioPacket {
id: 0,
pts: 12_345,
data: vec![1, 2, 3, 4, 5, 6],
})
.await
.expect("send stale synthetic upstream audio");
drop(tx);
let outbound = tokio_stream::wrappers::ReceiverStream::new(rx);
let mut response = cli
.stream_microphone(tonic::Request::new(outbound))
.await
.expect("microphone stream should open");
let ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
response.get_mut().message(),
)
.await
.expect("microphone ack timeout")
.expect("microphone ack grpc")
.expect("microphone ack item");
assert_eq!(ack, Empty {});
server.abort();
});
});
});
});
}
#[test]
#[serial]
fn stream_camera_drops_frames_that_never_reach_the_audio_master() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
with_var("LESAVKA_DISABLE_UVC", None::<&str>, || {
with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("80"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (audio_tx, audio_rx) = tokio::sync::mpsc::channel(4);
let (video_tx, video_rx) = tokio::sync::mpsc::channel(4);
let mut audio_response = cli
.stream_microphone(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(audio_rx),
))
.await
.expect("microphone stream should open")
.into_inner();
let mut video_response = cli
.stream_camera(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(video_rx),
))
.await
.expect("camera stream should open")
.into_inner();
audio_tx
.send(AudioPacket {
id: 0,
pts: 1_000_000,
data: vec![1, 2, 3, 4],
})
.await
.expect("send first audio packet");
video_tx
.send(VideoPacket {
id: 2,
pts: 1_050_000,
data: vec![0, 0, 0, 1, 0x65, 0x55],
..Default::default()
})
.await
.expect("send unmatched video packet");
drop(audio_tx);
drop(video_tx);
let audio_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
audio_response.message(),
)
.await
.expect("microphone ack timeout")
.expect("microphone ack grpc")
.expect("microphone ack item");
let video_ack = tokio::time::timeout(
std::time::Duration::from_secs(1),
video_response.message(),
)
.await
.expect("camera ack timeout")
.expect("camera ack grpc")
.expect("camera ack item");
assert_eq!(audio_ack, Empty {});
assert_eq!(video_ack, Empty {});
server.abort();
});
});
});
});
}
}

View File

@ -1,6 +1,16 @@
// Shared live-capture clock shim for include-based client contracts.
//
// Scope: provide the subset of `client::live_capture_clock` needed by
// include tests that compile client modules inside `lesavka_testing`.
// Targets: client include-contract harnesses under `testing/tests/`.
// Why: include tests should exercise production modules without depending on
// the whole client crate module tree.
use std::sync::{Mutex, OnceLock};
use std::time::{Duration, Instant};
const DEFAULT_SOURCE_LAG_CAP_MS: u64 = 250;
fn capture_clock_origin() -> &'static Instant {
static ORIGIN: OnceLock<Instant> = OnceLock::new();
ORIGIN.get_or_init(Instant::now)
@ -10,6 +20,10 @@ pub fn capture_pts_us() -> u64 {
capture_clock_origin().elapsed().as_micros() as u64
}
pub fn packet_age(pts_us: u64) -> Duration {
Duration::from_micros(capture_pts_us().saturating_sub(pts_us))
}
pub fn upstream_timing_trace_enabled() -> bool {
std::env::var("LESAVKA_UPSTREAM_TIMING_TRACE")
.ok()
@ -23,6 +37,15 @@ pub fn upstream_timing_trace_enabled() -> bool {
.unwrap_or(false)
}
pub fn upstream_source_lag_cap() -> Duration {
std::env::var("LESAVKA_UPSTREAM_SOURCE_LAG_CAP_MS")
.ok()
.and_then(|raw| raw.trim().parse::<u64>().ok())
.filter(|value| *value > 0)
.map(Duration::from_millis)
.unwrap_or_else(|| Duration::from_millis(DEFAULT_SOURCE_LAG_CAP_MS))
}
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub struct RebasedSourcePts {
pub packet_pts_us: u64,
@ -31,6 +54,7 @@ pub struct RebasedSourcePts {
pub source_base_us: Option<u64>,
pub capture_base_us: Option<u64>,
pub used_source_pts: bool,
pub lag_clamped: bool,
}
#[derive(Debug, Default)]
@ -45,8 +69,28 @@ pub struct SourcePtsRebaser {
state: Mutex<SourcePtsRebaserState>,
}
#[derive(Debug, Default)]
struct DurationPacedSourcePtsState {
next_packet_pts_us: Option<u64>,
}
#[derive(Debug, Default)]
pub struct DurationPacedSourcePtsRebaser {
anchor_rebaser: SourcePtsRebaser,
state: Mutex<DurationPacedSourcePtsState>,
}
impl SourcePtsRebaser {
pub fn rebase_or_now(&self, source_pts_us: Option<u64>, min_step_us: u64) -> RebasedSourcePts {
self.rebase_with_lag_cap(source_pts_us, min_step_us, None)
}
pub fn rebase_with_lag_cap(
&self,
source_pts_us: Option<u64>,
min_step_us: u64,
max_lag: Option<Duration>,
) -> RebasedSourcePts {
let capture_now_us = capture_pts_us();
let mut state = self
.state
@ -54,6 +98,7 @@ impl SourcePtsRebaser {
.expect("source pts rebaser mutex poisoned");
let mut packet_pts_us = capture_now_us;
let mut used_source_pts = false;
let mut lag_clamped = false;
if let Some(source_pts_us) = source_pts_us {
let source_base_us = *state.source_base_us.get_or_insert(source_pts_us);
@ -63,6 +108,15 @@ impl SourcePtsRebaser {
used_source_pts = true;
}
if used_source_pts && let Some(max_lag) = max_lag {
let lag_floor_us =
capture_now_us.saturating_sub(max_lag.as_micros().min(u64::MAX as u128) as u64);
if packet_pts_us < lag_floor_us {
packet_pts_us = lag_floor_us;
lag_clamped = true;
}
}
if let Some(last_packet_pts_us) = state.last_packet_pts_us
&& packet_pts_us <= last_packet_pts_us
{
@ -77,6 +131,56 @@ impl SourcePtsRebaser {
source_base_us: state.source_base_us,
capture_base_us: state.capture_base_us,
used_source_pts,
lag_clamped,
}
}
}
impl DurationPacedSourcePtsRebaser {
pub fn rebase_with_packet_duration(
&self,
source_pts_us: Option<u64>,
packet_duration_us: u64,
max_lag: Duration,
) -> RebasedSourcePts {
let step_us = packet_duration_us.max(1);
let mut rebased =
self.anchor_rebaser
.rebase_with_lag_cap(source_pts_us, step_us, Some(max_lag));
let lag_floor_us = rebased
.capture_now_us
.saturating_sub(max_lag.as_micros().min(u64::MAX as u128) as u64);
let mut state = self
.state
.lock()
.expect("duration paced source pts rebaser mutex poisoned");
let mut packet_pts_us = state.next_packet_pts_us.unwrap_or(rebased.packet_pts_us);
if packet_pts_us < lag_floor_us {
packet_pts_us = lag_floor_us;
rebased.lag_clamped = true;
}
state.next_packet_pts_us = Some(packet_pts_us.saturating_add(step_us));
rebased.packet_pts_us = packet_pts_us;
rebased
}
}
#[cfg(test)]
mod tests {
#[test]
fn shim_rebases_packet_duration_monotonically() {
let rebaser = super::DurationPacedSourcePtsRebaser::default();
let first = rebaser.rebase_with_packet_duration(
Some(1_000),
10_000,
std::time::Duration::from_millis(250),
);
let second = rebaser.rebase_with_packet_duration(
Some(2_000),
10_000,
std::time::Duration::from_millis(250),
);
assert!(second.packet_pts_us > first.packet_pts_us);
}
}