feat(media): stabilize hdmi upstream timing

This commit is contained in:
Brad Stein 2026-04-24 14:49:57 -03:00
parent ce11632c89
commit 0650965e52
57 changed files with 4803 additions and 461 deletions

6
Cargo.lock generated
View File

@ -1642,7 +1642,7 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
[[package]]
name = "lesavka_client"
version = "0.12.4"
version = "0.13.0"
dependencies = [
"anyhow",
"async-stream",
@ -1676,7 +1676,7 @@ dependencies = [
[[package]]
name = "lesavka_common"
version = "0.12.4"
version = "0.13.0"
dependencies = [
"anyhow",
"base64",
@ -1688,7 +1688,7 @@ dependencies = [
[[package]]
name = "lesavka_server"
version = "0.12.4"
version = "0.13.0"
dependencies = [
"anyhow",
"base64",

View File

@ -4,7 +4,7 @@ path = "src/main.rs"
[package]
name = "lesavka_client"
version = "0.12.4"
version = "0.13.0"
edition = "2024"
[dependencies]

Binary file not shown.

Before

Width:  |  Height:  |  Size: 662 KiB

After

Width:  |  Height:  |  Size: 2.3 MiB

View File

@ -19,11 +19,10 @@ impl LesavkaClientApp {
let telemetry_thread = telemetry.clone();
let queue_thread = queue.clone();
std::thread::spawn(move || {
let mut age_tracker = PacketAgeTracker::default();
while stop_rx.try_recv().is_err() {
if let Some(pkt) = mic_clone.pull() {
trace!("🎤📤 cli {} bytes → gRPC", pkt.data.len());
let enqueue_age = age_tracker.packet_age(pkt.pts);
let enqueue_age = crate::live_capture_clock::packet_age(pkt.pts);
let stats = queue_thread.push(pkt, enqueue_age);
if stats.dropped_queue_full > 0 {
telemetry_thread.record_queue_full_drop(stats.dropped_queue_full);
@ -102,7 +101,6 @@ impl LesavkaClientApp {
let telemetry = telemetry.clone();
let queue = queue.clone();
move || {
let mut age_tracker = PacketAgeTracker::default();
loop {
if stop_rx.try_recv().is_ok() {
break;
@ -118,7 +116,7 @@ impl LesavkaClientApp {
tracing::trace!("📸 cli frame#{n} {} B", pkt.data.len());
}
tracing::trace!("📸⬆️ sent webcam AU pts={} {} B", pkt.pts, pkt.data.len());
let enqueue_age = age_tracker.packet_age(pkt.pts);
let enqueue_age = crate::live_capture_clock::packet_age(pkt.pts);
let stats = queue.push(pkt, enqueue_age);
if stats.dropped_queue_full > 0 {
telemetry.record_queue_full_drop(stats.dropped_queue_full);
@ -205,21 +203,3 @@ fn queue_depth_u32(depth: usize) -> u32 {
fn duration_ms(duration: Duration) -> f32 {
duration.as_secs_f32() * 1_000.0
}
#[cfg(not(coverage))]
#[derive(Default)]
struct PacketAgeTracker {
origin: Option<Instant>,
}
#[cfg(not(coverage))]
impl PacketAgeTracker {
fn packet_age(&mut self, pts_us: u64) -> Duration {
let pts = Duration::from_micros(pts_us);
let now = Instant::now();
let origin = self
.origin
.get_or_insert_with(|| now.checked_sub(pts).unwrap_or(now));
now.saturating_duration_since(*origin + pts)
}
}

View File

@ -0,0 +1,98 @@
#[cfg(any(not(coverage), test))]
use anyhow::{Context, Result, bail};
#[cfg(not(coverage))]
use lesavka_client::sync_probe::analyze::{SyncAnalysisOptions, analyze_capture};
#[cfg(not(coverage))]
fn main() -> Result<()> {
let (capture_path, emit_json) = parse_args(std::env::args().skip(1))?;
let report = analyze_capture(&capture_path, &SyncAnalysisOptions::default())
.with_context(|| format!("analyzing sync capture {}", capture_path.display()))?;
if emit_json {
println!(
"{}",
serde_json::to_string_pretty(&report).context("serializing JSON report")?
);
} else {
println!("A/V sync report for {}", capture_path.display());
println!("- video onsets: {}", report.video_event_count);
println!("- audio onsets: {}", report.audio_event_count);
println!("- paired pulses: {}", report.paired_event_count);
println!(
"- first skew: {:+.1} ms (audio after video is positive)",
report.first_skew_ms
);
println!("- last skew: {:+.1} ms", report.last_skew_ms);
println!("- mean skew: {:+.1} ms", report.mean_skew_ms);
println!("- median skew: {:+.1} ms", report.median_skew_ms);
println!("- max abs skew: {:.1} ms", report.max_abs_skew_ms);
println!("- drift: {:+.1} ms", report.drift_ms);
}
Ok(())
}
#[cfg(any(not(coverage), test))]
fn parse_args<I, S>(args: I) -> Result<(std::path::PathBuf, bool)>
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
let args = args.into_iter().map(Into::into).collect::<Vec<_>>();
if args.is_empty() || args.iter().any(|arg| arg == "--help" || arg == "-h") {
println!("Usage: lesavka-sync-analyze <capture.mkv> [--json]");
std::process::exit(0);
}
let mut emit_json = false;
let mut capture_path = None::<std::path::PathBuf>;
for arg in args {
if arg == "--json" {
emit_json = true;
continue;
}
if capture_path.is_some() {
bail!("unexpected extra argument `{arg}`");
}
capture_path = Some(std::path::PathBuf::from(arg));
}
let capture_path = capture_path.context("capture path is required")?;
Ok((capture_path, emit_json))
}
#[cfg(coverage)]
fn main() {}
#[cfg(test)]
mod tests {
use super::parse_args;
#[test]
fn parse_args_accepts_capture_path_and_json_flag() {
let (path, json) = parse_args(["capture.mkv", "--json"]).expect("args");
assert_eq!(path, std::path::PathBuf::from("capture.mkv"));
assert!(json);
}
#[test]
fn parse_args_rejects_extra_positional_arguments() {
assert!(parse_args(["one.mkv", "two.mkv"]).is_err());
}
#[test]
fn parse_args_requires_a_capture_path() {
let error = parse_args(["--json"]).expect_err("missing capture path should fail");
assert!(
error.to_string().contains("capture path is required"),
"unexpected error: {error:#}"
);
}
#[test]
fn coverage_main_stub_is_non_panicking() {
let _ = super::main();
}
}

View File

@ -0,0 +1,19 @@
#[cfg(not(coverage))]
use anyhow::Result;
#[cfg(not(coverage))]
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<()> {
lesavka_client::sync_probe::run_sync_probe_from_args(std::env::args().skip(1)).await
}
#[cfg(coverage)]
fn main() {}
#[cfg(test)]
mod tests {
#[test]
fn coverage_main_stub_is_non_panicking() {
let _ = super::main();
}
}

View File

@ -1,3 +1,21 @@
#[cfg(any(coverage, test))]
fn shared_capture_pts_us() -> u64 {
use std::sync::OnceLock;
use std::time::Instant;
static CAPTURE_ORIGIN: OnceLock<Instant> = OnceLock::new();
CAPTURE_ORIGIN
.get_or_init(Instant::now)
.elapsed()
.as_micros()
.min(u64::MAX as u128) as u64
}
#[cfg(not(any(coverage, test)))]
fn shared_capture_pts_us() -> u64 {
crate::live_capture_clock::capture_pts_us()
}
impl CameraCapture {
pub fn new(device_fragment: Option<&str>, cfg: Option<CameraConfig>) -> anyhow::Result<Self> {
gst::init().ok();
@ -234,7 +252,7 @@ impl CameraCapture {
let sample = self.sink.pull_sample().ok()?;
let buf = sample.buffer()?;
let map = buf.map_readable().ok()?;
let pts = buf.pts().unwrap_or(gst::ClockTime::ZERO).nseconds() / 1_000;
let pts = shared_capture_pts_us();
static FIRST_CAMERA_PACKET: AtomicBool = AtomicBool::new(false);
if !FIRST_CAMERA_PACKET.swap(true, Ordering::Relaxed) {
tracing::info!(

View File

@ -20,6 +20,24 @@ use tracing::{debug, warn};
#[cfg(not(coverage))]
use tracing::{error, info, trace};
#[cfg(any(coverage, test))]
fn shared_capture_pts_us() -> u64 {
use std::sync::OnceLock;
use std::time::Instant;
static CAPTURE_ORIGIN: OnceLock<Instant> = OnceLock::new();
CAPTURE_ORIGIN
.get_or_init(Instant::now)
.elapsed()
.as_micros()
.min(u64::MAX as u128) as u64
}
#[cfg(not(any(coverage, test)))]
fn shared_capture_pts_us() -> u64 {
crate::live_capture_clock::capture_pts_us()
}
const MIC_GAIN_ENV: &str = "LESAVKA_MIC_GAIN";
const MIC_GAIN_CONTROL_ENV: &str = "LESAVKA_MIC_GAIN_CONTROL";
const MIC_LEVEL_TAP_ENV: &str = "LESAVKA_UPLINK_MIC_LEVEL";
@ -122,7 +140,7 @@ impl MicrophoneCapture {
Ok(sample) => {
let buf = sample.buffer().unwrap();
let map = buf.map_readable().unwrap();
let pts = buf.pts().unwrap_or(gst::ClockTime::ZERO).nseconds() / 1_000;
let pts = shared_capture_pts_us();
#[cfg(not(coverage))]
{
static CNT: AtomicU64 = AtomicU64::new(0);

View File

@ -289,9 +289,9 @@ fn server_chip_state_tracks_connection_not_just_reachability() {
assert_eq!(server_version_label(&state), "-");
state.set_server_available(true);
state.set_server_version(Some("0.12.4".to_string()));
state.set_server_version(Some("0.13.0".to_string()));
assert_eq!(server_light_state(&state, false), StatusLightState::Live);
assert_eq!(server_version_label(&state), "v0.12.4");
assert_eq!(server_version_label(&state), "v0.13.0");
assert_eq!(
server_light_state(&state, true),

View File

@ -12,8 +12,10 @@ pub mod handshake;
pub mod input;
pub mod launcher;
pub mod layout;
pub(crate) mod live_capture_clock;
pub mod output;
pub mod paste;
pub mod sync_probe;
pub(crate) mod uplink_fresh_queue;
pub(crate) mod uplink_latency_harness;
pub(crate) mod uplink_telemetry;

View File

@ -0,0 +1,56 @@
#![forbid(unsafe_code)]
use std::sync::OnceLock;
use std::time::{Duration, Instant};
static CAPTURE_ORIGIN: OnceLock<Instant> = OnceLock::new();
fn origin() -> Instant {
*CAPTURE_ORIGIN.get_or_init(Instant::now)
}
/// Return the shared live-capture timestamp for upstream camera/mic packets.
///
/// Inputs: none.
/// Outputs: microseconds elapsed since the relay child first stamped live media.
/// Why: camera and microphone capture pipelines run independently, so they need
/// one explicit common origin before the server can keep them on the same live
/// call timeline.
#[must_use]
pub fn capture_pts_us() -> u64 {
origin().elapsed().as_micros().min(u64::MAX as u128) as u64
}
/// Measure how old one shared capture timestamp is right now.
///
/// Inputs: a packet timestamp previously produced by `capture_pts_us`.
/// Outputs: the elapsed age as a `Duration`.
/// Why: upstream freshness telemetry should use the same shared live clock as
/// packet timestamps so queue-age calculations stay honest.
#[must_use]
pub fn packet_age(pts_us: u64) -> Duration {
Duration::from_micros(capture_pts_us().saturating_sub(pts_us))
}
#[cfg(test)]
mod tests {
use super::{capture_pts_us, packet_age};
use std::time::Duration;
#[test]
fn capture_pts_us_monotonically_advances() {
let first = capture_pts_us();
std::thread::sleep(Duration::from_millis(2));
let second = capture_pts_us();
assert!(second >= first);
}
#[test]
fn packet_age_is_small_for_recent_packets() {
let pts = capture_pts_us();
std::thread::sleep(Duration::from_millis(2));
let age = packet_age(pts);
assert!(age >= Duration::from_millis(1));
assert!(age < Duration::from_secs(1));
}
}

View File

@ -0,0 +1,86 @@
//! Analyze captured upstream sync-probe media for audio/video skew and drift.
mod media_extract;
mod onset_detection;
mod report;
#[cfg(test)]
pub(super) mod test_support;
use anyhow::Result;
use std::path::Path;
use media_extract::{extract_audio_samples, extract_video_brightness, extract_video_timestamps};
use onset_detection::{
DEFAULT_AUDIO_SAMPLE_RATE_HZ, correlate_segments, detect_audio_segments, detect_video_segments,
};
pub use onset_detection::{detect_audio_onsets, detect_video_onsets};
pub use report::{SyncAnalysisOptions, SyncAnalysisReport};
/// Analyzes a captured upstream sync-probe file by extracting video and audio
/// pulses, then correlating them into skew and drift metrics.
pub fn analyze_capture(
capture_path: &Path,
options: &SyncAnalysisOptions,
) -> Result<SyncAnalysisReport> {
let timestamps = extract_video_timestamps(capture_path)?;
let brightness = extract_video_brightness(capture_path)?;
let video_segments = detect_video_segments(&timestamps, &brightness)?;
let audio_samples = extract_audio_samples(capture_path)?;
let audio_segments = detect_audio_segments(
&audio_samples,
DEFAULT_AUDIO_SAMPLE_RATE_HZ,
options.audio_window_ms,
)?;
correlate_segments(
&video_segments,
&audio_segments,
options.pulse_period_s,
options.pulse_width_s,
options.marker_tick_period,
options.max_pair_gap_s,
)
}
#[cfg(test)]
mod tests {
use super::test_support::{
audio_samples_to_bytes, click_track_samples, frame_json, with_fake_media_tools,
};
use super::{SyncAnalysisOptions, analyze_capture};
#[test]
fn analyze_capture_runs_against_fake_media_tools() {
let timestamps = (0..15).map(|index| index as f64 / 10.0).collect::<Vec<_>>();
let brightness = timestamps
.iter()
.enumerate()
.map(|(index, _)| if matches!(index, 0 | 5 | 10) { 250 } else { 5 })
.collect::<Vec<_>>();
let audio = click_track_samples(&[0.05, 0.55, 1.05], 53_000);
with_fake_media_tools(
&frame_json(&timestamps),
&brightness,
&audio_samples_to_bytes(&audio),
|capture_path| {
let report = analyze_capture(
capture_path,
&SyncAnalysisOptions {
pulse_period_s: 0.5,
..SyncAnalysisOptions::default()
},
)
.expect("analysis report");
assert_eq!(report.video_event_count, 3);
assert_eq!(report.audio_event_count, 3);
assert_eq!(report.paired_event_count, 3);
assert_eq!(report.skews_ms.len(), 3);
assert!((report.first_skew_ms - 50.0).abs() < 10.0);
assert!(report.max_abs_skew_ms < 120.0);
},
);
}
}

View File

@ -0,0 +1,240 @@
use anyhow::{Context, Result, bail};
use serde::Deserialize;
use std::path::Path;
use std::process::Command;
#[derive(Debug, Deserialize)]
struct ProbeFrameResponse {
#[serde(default)]
frames: Vec<ProbeFrameEntry>,
}
#[derive(Debug, Deserialize)]
struct ProbeFrameEntry {
best_effort_timestamp_time: Option<String>,
}
pub(super) fn extract_video_timestamps(capture_path: &Path) -> Result<Vec<f64>> {
let output = run_command(
Command::new("ffprobe")
.arg("-hide_banner")
.arg("-loglevel")
.arg("error")
.arg("-select_streams")
.arg("v:0")
.arg("-show_frames")
.arg("-show_entries")
.arg("frame=best_effort_timestamp_time")
.arg("-of")
.arg("json")
.arg(capture_path),
"ffprobe video timestamps",
)?;
let response: ProbeFrameResponse =
serde_json::from_slice(&output).context("parsing ffprobe frame JSON")?;
let timestamps = response
.frames
.into_iter()
.filter_map(|entry| entry.best_effort_timestamp_time)
.map(|value| value.parse::<f64>().context("parsing frame timestamp"))
.collect::<Result<Vec<_>>>()?;
if timestamps.is_empty() {
bail!("ffprobe did not return any video frame timestamps");
}
Ok(timestamps)
}
pub(super) fn extract_video_brightness(capture_path: &Path) -> Result<Vec<u8>> {
let output = run_command(
Command::new("ffmpeg")
.arg("-hide_banner")
.arg("-loglevel")
.arg("error")
.arg("-i")
.arg(capture_path)
.arg("-map")
.arg("0:v:0")
.arg("-vf")
.arg("scale=1:1,format=gray")
.arg("-f")
.arg("rawvideo")
.arg("-pix_fmt")
.arg("gray")
.arg("-"),
"ffmpeg video brightness extraction",
)?;
if output.is_empty() {
bail!("ffmpeg did not emit any video brightness data");
}
Ok(output)
}
pub(super) fn extract_audio_samples(capture_path: &Path) -> Result<Vec<i16>> {
let output = run_command(
Command::new("ffmpeg")
.arg("-hide_banner")
.arg("-loglevel")
.arg("error")
.arg("-i")
.arg(capture_path)
.arg("-map")
.arg("0:a:0")
.arg("-ac")
.arg("1")
.arg("-ar")
.arg(super::onset_detection::DEFAULT_AUDIO_SAMPLE_RATE_HZ.to_string())
.arg("-f")
.arg("s16le")
.arg("-acodec")
.arg("pcm_s16le")
.arg("-"),
"ffmpeg audio extraction",
)?;
if output.len() < 2 {
bail!("ffmpeg did not emit enough audio data to analyze");
}
Ok(output
.chunks_exact(2)
.map(|chunk| i16::from_le_bytes([chunk[0], chunk[1]]))
.collect())
}
pub(super) fn run_command(command: &mut Command, description: &str) -> Result<Vec<u8>> {
let output = command
.output()
.with_context(|| format!("running {description}"))?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
bail!("{description} failed: {}", stderr.trim());
}
Ok(output.stdout)
}
#[cfg(test)]
mod tests {
use super::{
extract_audio_samples, extract_video_brightness, extract_video_timestamps, run_command,
};
use crate::sync_probe::analyze::test_support::{
audio_samples_to_bytes, frame_json, with_fake_media_tools,
};
use std::process::Command;
#[test]
fn extract_video_timestamps_reads_fake_ffprobe_output() {
let timestamps = vec![0.0, 0.5, 1.0];
with_fake_media_tools(
&frame_json(&timestamps),
&[1, 2, 3],
&[1, 0],
|capture_path| {
let parsed = extract_video_timestamps(capture_path).expect("video timestamps");
assert_eq!(parsed, timestamps);
},
);
}
#[test]
fn extract_video_timestamps_rejects_empty_and_invalid_outputs() {
with_fake_media_tools(br#"{"frames":[]}"#, &[1], &[1, 0], |capture_path| {
let error = extract_video_timestamps(capture_path).expect_err("empty frames fail");
assert!(
error
.to_string()
.contains("did not return any video frame timestamps")
);
});
with_fake_media_tools(
br#"{"frames":[{"best_effort_timestamp_time":"bad"}]}"#,
&[1],
&[1, 0],
|capture_path| {
let error =
extract_video_timestamps(capture_path).expect_err("invalid timestamp fails");
assert!(error.to_string().contains("parsing frame timestamp"));
},
);
}
#[test]
fn extract_video_brightness_reads_fake_ffmpeg_output() {
let brightness = vec![5u8, 100, 250];
with_fake_media_tools(
br#"{"frames":[{"best_effort_timestamp_time":"0.0"}]}"#,
&brightness,
&[1, 0],
|capture_path| {
let parsed = extract_video_brightness(capture_path).expect("video brightness");
assert_eq!(parsed, brightness);
},
);
}
#[test]
fn extract_video_brightness_rejects_empty_output() {
with_fake_media_tools(
br#"{"frames":[{"best_effort_timestamp_time":"0.0"}]}"#,
&[],
&[1, 0],
|capture_path| {
let error = extract_video_brightness(capture_path).expect_err("empty brightness");
assert!(
error
.to_string()
.contains("did not emit any video brightness data")
);
},
);
}
#[test]
fn extract_audio_samples_reads_fake_ffmpeg_output() {
let samples = vec![1i16, -2, 32_000];
with_fake_media_tools(
br#"{"frames":[{"best_effort_timestamp_time":"0.0"}]}"#,
&[1],
&audio_samples_to_bytes(&samples),
|capture_path| {
let parsed = extract_audio_samples(capture_path).expect("audio samples");
assert_eq!(parsed, samples);
},
);
}
#[test]
fn extract_audio_samples_rejects_too_short_output() {
with_fake_media_tools(
br#"{"frames":[{"best_effort_timestamp_time":"0.0"}]}"#,
&[1],
&[7],
|capture_path| {
let error = extract_audio_samples(capture_path).expect_err("short audio");
assert!(
error
.to_string()
.contains("did not emit enough audio data to analyze")
);
},
);
}
#[test]
fn run_command_reports_success_and_failure() {
let output = run_command(
Command::new("sh").arg("-c").arg("printf 'ok'"),
"success command",
)
.expect("success output");
assert_eq!(output, b"ok");
let error = run_command(
Command::new("sh")
.arg("-c")
.arg("printf 'boom' >&2; exit 7"),
"failing command",
)
.expect_err("failing command should error");
assert!(error.to_string().contains("failing command failed: boom"));
}
}

View File

@ -0,0 +1,229 @@
use anyhow::{Result, bail};
mod correlation;
#[cfg(test)]
mod tests;
pub(crate) use correlation::correlate_segments;
pub(super) const DEFAULT_AUDIO_SAMPLE_RATE_HZ: u32 = 48_000;
const MIN_VIDEO_CONTRAST: u8 = 16;
#[derive(Clone, Copy, Debug, PartialEq)]
pub(crate) struct PulseSegment {
pub start_s: f64,
pub end_s: f64,
pub duration_s: f64,
}
pub fn detect_video_onsets(timestamps_s: &[f64], brightness: &[u8]) -> Result<Vec<f64>> {
Ok(detect_video_segments(timestamps_s, brightness)?
.into_iter()
.map(|segment| segment.start_s)
.collect())
}
pub(crate) fn detect_video_segments(
timestamps_s: &[f64],
brightness: &[u8],
) -> Result<Vec<PulseSegment>> {
let frame_count = timestamps_s.len().min(brightness.len());
if frame_count == 0 {
bail!("capture did not contain any video frames");
}
let slice = &brightness[..frame_count];
let min = *slice.iter().min().expect("non-empty brightness slice");
let max = *slice.iter().max().expect("non-empty brightness slice");
if max.saturating_sub(min) < MIN_VIDEO_CONTRAST {
bail!("video flash contrast is too low to detect sync pulses");
}
let threshold = ((u16::from(min) + u16::from(max)) / 2) as u8;
let frame_step_s = median_frame_step_seconds(&timestamps_s[..frame_count]).max(1.0 / 120.0);
let mut segments = Vec::new();
let mut previous_active = false;
let mut segment_start = 0.0_f64;
let mut previous_timestamp = None;
let mut last_active_timestamp = None;
for (timestamp, level) in timestamps_s.iter().copied().zip(slice.iter().copied()) {
let active = level >= threshold;
if active && !previous_active {
segment_start = previous_timestamp
.map(|prior| edge_midpoint(prior, timestamp))
.unwrap_or(timestamp);
}
if active {
last_active_timestamp = Some(timestamp);
}
if previous_active && !active {
let end_s = edge_midpoint(
last_active_timestamp.unwrap_or(timestamp - frame_step_s),
timestamp,
)
.max(segment_start + frame_step_s / 2.0);
segments.push(PulseSegment {
start_s: segment_start,
end_s,
duration_s: end_s - segment_start,
});
}
previous_active = active;
previous_timestamp = Some(timestamp);
}
if previous_active {
let last_timestamp = timestamps_s[frame_count - 1];
let end_s = last_timestamp + frame_step_s / 2.0;
segments.push(PulseSegment {
start_s: segment_start,
end_s,
duration_s: end_s - segment_start,
});
}
Ok(segments)
}
pub fn detect_audio_onsets(
samples: &[i16],
sample_rate_hz: u32,
window_ms: u32,
) -> Result<Vec<f64>> {
Ok(detect_audio_segments(samples, sample_rate_hz, window_ms)?
.into_iter()
.map(|segment| segment.start_s)
.collect())
}
pub(crate) fn detect_audio_segments(
samples: &[i16],
sample_rate_hz: u32,
window_ms: u32,
) -> Result<Vec<PulseSegment>> {
if samples.is_empty() {
bail!("capture did not contain any audio samples");
}
if sample_rate_hz == 0 {
bail!("audio sample rate must stay positive");
}
if window_ms == 0 {
bail!("audio analysis window must stay positive");
}
let window_samples = ((sample_rate_hz as usize * window_ms as usize) / 1000).max(1);
let envelope = samples
.chunks(window_samples)
.map(|chunk| {
let total: u64 = chunk
.iter()
.map(|sample| i32::from(*sample).unsigned_abs() as u64)
.sum();
total as f64 / chunk.len() as f64
})
.collect::<Vec<_>>();
let peak = envelope.iter().copied().fold(0.0_f64, f64::max);
if peak < 50.0 {
bail!("audio probe peaks are too quiet to detect sync pulses");
}
let baseline = median(envelope.clone());
let threshold = baseline + ((peak - baseline) * 0.45);
let sample_abs = samples
.iter()
.map(|sample| i32::from(*sample).unsigned_abs() as f64)
.collect::<Vec<_>>();
let sample_peak = sample_abs.iter().copied().fold(0.0_f64, f64::max);
let sample_baseline = median(sample_abs.clone());
let sample_threshold = sample_baseline + ((sample_peak - sample_baseline) * 0.35);
let mut segments = Vec::new();
let mut previous_active = false;
let mut segment_start = 0usize;
for (index, level) in envelope.iter().copied().enumerate() {
let active = level >= threshold;
if active && !previous_active {
segment_start = index;
}
if previous_active && !active {
segments.push(window_segment(
samples,
sample_rate_hz,
window_samples,
segment_start,
index,
sample_threshold,
));
}
previous_active = active;
}
if previous_active {
segments.push(window_segment(
samples,
sample_rate_hz,
window_samples,
segment_start,
envelope.len(),
sample_threshold,
));
}
Ok(segments)
}
pub(super) fn edge_midpoint(previous_s: f64, current_s: f64) -> f64 {
previous_s + ((current_s - previous_s) / 2.0)
}
pub(super) fn window_segment(
samples: &[i16],
sample_rate_hz: u32,
window_samples: usize,
start_window_index: usize,
end_window_index_exclusive: usize,
sample_threshold: f64,
) -> PulseSegment {
let start_sample = start_window_index.saturating_mul(window_samples);
let end_sample = end_window_index_exclusive
.saturating_mul(window_samples)
.min(samples.len());
let refined_start_sample = samples[start_sample..end_sample]
.iter()
.position(|sample| i32::from(*sample).unsigned_abs() as f64 >= sample_threshold)
.map(|offset| start_sample + offset)
.unwrap_or(start_sample);
let refined_end_sample = samples[start_sample..end_sample]
.iter()
.rposition(|sample| i32::from(*sample).unsigned_abs() as f64 >= sample_threshold)
.map(|offset| start_sample + offset + 1)
.unwrap_or(end_sample);
let start_s = refined_start_sample as f64 / f64::from(sample_rate_hz);
let end_s = refined_end_sample.max(refined_start_sample + 1) as f64 / f64::from(sample_rate_hz);
PulseSegment {
start_s,
end_s,
duration_s: end_s - start_s,
}
}
pub(super) fn median_frame_step_seconds(timestamps_s: &[f64]) -> f64 {
let diffs = timestamps_s
.windows(2)
.filter_map(|pair| {
let diff = pair[1] - pair[0];
(diff.is_finite() && diff > 0.0).then_some(diff)
})
.collect::<Vec<_>>();
median(diffs)
}
pub(super) fn median(mut values: Vec<f64>) -> f64 {
if values.is_empty() {
return 0.0;
}
values.sort_by(|left, right| left.total_cmp(right));
let mid = values.len() / 2;
if values.len() % 2 == 0 {
(values[mid - 1] + values[mid]) / 2.0
} else {
values[mid]
}
}

View File

@ -0,0 +1,310 @@
use anyhow::{Result, bail};
use std::collections::BTreeMap;
use crate::sync_probe::analyze::report::SyncAnalysisReport;
use super::{PulseSegment, median};
const MARKER_WIDTH_MULTIPLIER: f64 = 1.5;
#[cfg_attr(not(test), allow(dead_code))]
pub(super) fn correlate_onsets(
video_onsets_s: &[f64],
audio_onsets_s: &[f64],
pulse_period_s: f64,
max_pair_gap_s: f64,
) -> Result<SyncAnalysisReport> {
if video_onsets_s.is_empty() {
bail!("video onset list is empty");
}
if audio_onsets_s.is_empty() {
bail!("audio onset list is empty");
}
if max_pair_gap_s <= 0.0 {
bail!("max pair gap must stay positive");
}
if pulse_period_s <= 0.0 {
bail!("pulse period must stay positive");
}
let video_pulses = index_onsets_by_spacing(video_onsets_s, pulse_period_s);
let audio_pulses = index_onsets_by_spacing(audio_onsets_s, pulse_period_s);
let offset_candidates = candidate_index_offsets(&video_pulses, &audio_pulses);
let mut skews_ms = best_skews_for_index_offsets(
&video_pulses,
&audio_pulses,
&offset_candidates,
max_pair_gap_s,
);
if skews_ms.is_empty() && video_onsets_s.len() == 1 && audio_onsets_s.len() == 1 {
let video_phase_s = estimate_phase(video_onsets_s, pulse_period_s);
let audio_phase_s = estimate_phase(audio_onsets_s, pulse_period_s);
let phase_skew_ms =
shortest_wrapped_difference(audio_phase_s - video_phase_s, pulse_period_s) * 1000.0;
if phase_skew_ms.abs() <= max_pair_gap_s * 1000.0 {
skews_ms.push(phase_skew_ms);
}
}
if skews_ms.is_empty() {
bail!("no audio/video pulse pairs were close enough to compare");
}
Ok(sync_report_from_skews(
video_onsets_s,
audio_onsets_s,
skews_ms,
))
}
pub(crate) fn correlate_segments(
video_segments: &[PulseSegment],
audio_segments: &[PulseSegment],
pulse_period_s: f64,
pulse_width_s: f64,
marker_tick_period: u32,
max_pair_gap_s: f64,
) -> Result<SyncAnalysisReport> {
let video_onsets_s = video_segments
.iter()
.map(|segment| segment.start_s)
.collect::<Vec<_>>();
let audio_onsets_s = audio_segments
.iter()
.map(|segment| segment.start_s)
.collect::<Vec<_>>();
if video_onsets_s.is_empty() {
bail!("video onset list is empty");
}
if audio_onsets_s.is_empty() {
bail!("audio onset list is empty");
}
if pulse_period_s <= 0.0 {
bail!("pulse period must stay positive");
}
if pulse_width_s <= 0.0 {
bail!("pulse width must stay positive");
}
if marker_tick_period == 0 {
bail!("marker tick period must stay positive");
}
if max_pair_gap_s <= 0.0 {
bail!("max pair gap must stay positive");
}
let video_marker_onsets = marker_onsets(video_segments, pulse_width_s);
let audio_marker_onsets = marker_onsets(audio_segments, pulse_width_s);
let video_indexed = index_onsets_by_spacing(&video_onsets_s, pulse_period_s);
let audio_indexed = index_onsets_by_spacing(&audio_onsets_s, pulse_period_s);
let offset_candidates = marker_index_offsets(
&video_indexed,
&audio_indexed,
&video_marker_onsets,
&audio_marker_onsets,
);
let mut skews_ms = best_skews_for_index_offsets(
&video_indexed,
&audio_indexed,
&offset_candidates,
max_pair_gap_s,
);
if skews_ms.is_empty() && video_onsets_s.len() == 1 && audio_onsets_s.len() == 1 {
let video_phase_s = estimate_phase(&video_onsets_s, pulse_period_s);
let audio_phase_s = estimate_phase(&audio_onsets_s, pulse_period_s);
let phase_skew_ms =
shortest_wrapped_difference(audio_phase_s - video_phase_s, pulse_period_s) * 1000.0;
if phase_skew_ms.abs() <= max_pair_gap_s * 1000.0 {
skews_ms.push(phase_skew_ms);
}
}
if skews_ms.is_empty() {
bail!("no audio/video pulse pairs were close enough to compare");
}
Ok(sync_report_from_skews(
&video_onsets_s,
&audio_onsets_s,
skews_ms,
))
}
pub(super) fn estimate_phase(onsets_s: &[f64], pulse_period_s: f64) -> f64 {
let (sum_sin, sum_cos) =
onsets_s
.iter()
.copied()
.fold((0.0_f64, 0.0_f64), |(sum_sin, sum_cos), onset| {
let wrapped = onset.rem_euclid(pulse_period_s);
let angle = (wrapped / pulse_period_s) * std::f64::consts::TAU;
(sum_sin + angle.sin(), sum_cos + angle.cos())
});
let mean_angle = sum_sin.atan2(sum_cos).rem_euclid(std::f64::consts::TAU);
(mean_angle / std::f64::consts::TAU) * pulse_period_s
}
pub(super) fn index_onsets_by_spacing(onsets_s: &[f64], pulse_period_s: f64) -> BTreeMap<i64, f64> {
let mut indexed = BTreeMap::new();
let Some(first_onset) = onsets_s.first().copied() else {
return indexed;
};
let mut pulse_index = 0_i64;
let mut previous_onset = first_onset;
indexed.insert(pulse_index, first_onset);
for onset in onsets_s.iter().copied().skip(1) {
let pulse_steps = ((onset - previous_onset) / pulse_period_s).round().max(1.0) as i64;
pulse_index += pulse_steps;
indexed.insert(pulse_index, onset);
previous_onset = onset;
}
indexed
}
pub(super) fn candidate_index_offsets(
video_indexed: &BTreeMap<i64, f64>,
audio_indexed: &BTreeMap<i64, f64>,
) -> Vec<i64> {
if video_indexed.is_empty() || audio_indexed.is_empty() {
return Vec::new();
}
let video_min = *video_indexed
.keys()
.next()
.expect("non-empty indexed video onset map has a first key");
let video_max = *video_indexed
.keys()
.next_back()
.expect("non-empty indexed video onset map has a last key");
let audio_min = *audio_indexed
.keys()
.next()
.expect("non-empty indexed audio onset map has a first key");
let audio_max = *audio_indexed
.keys()
.next_back()
.expect("non-empty indexed audio onset map has a last key");
(audio_min - video_max..=audio_max - video_min).collect()
}
pub(super) fn marker_index_offsets(
video_indexed: &BTreeMap<i64, f64>,
audio_indexed: &BTreeMap<i64, f64>,
video_marker_onsets: &[f64],
audio_marker_onsets: &[f64],
) -> Vec<i64> {
let mut offsets = Vec::new();
if !video_marker_onsets.is_empty() && !audio_marker_onsets.is_empty() {
let video_markers = pulse_indices_for_onsets(video_indexed, video_marker_onsets);
let audio_markers = pulse_indices_for_onsets(audio_indexed, audio_marker_onsets);
for video_marker in &video_markers {
for audio_marker in &audio_markers {
offsets.push(audio_marker - video_marker);
}
}
}
offsets.extend(candidate_index_offsets(video_indexed, audio_indexed));
offsets.sort_unstable();
offsets.dedup();
offsets
}
fn pulse_indices_for_onsets(indexed: &BTreeMap<i64, f64>, marker_onsets: &[f64]) -> Vec<i64> {
marker_onsets
.iter()
.filter_map(|marker_onset| {
indexed.iter().find_map(|(pulse_index, onset)| {
((onset - marker_onset).abs() < 0.000_001).then_some(*pulse_index)
})
})
.collect()
}
fn best_skews_for_index_offsets(
video_indexed: &BTreeMap<i64, f64>,
audio_indexed: &BTreeMap<i64, f64>,
offset_candidates: &[i64],
max_pair_gap_s: f64,
) -> Vec<f64> {
let max_pair_gap_ms = max_pair_gap_s * 1000.0;
let mut best: Option<(usize, f64, Vec<f64>)> = None;
for offset in offset_candidates.iter().copied() {
let skews_ms = video_indexed
.iter()
.filter_map(|(pulse_index, video_time)| {
audio_indexed
.get(&(pulse_index + offset))
.map(|audio_time| (audio_time - video_time) * 1000.0)
})
.filter(|skew_ms| skew_ms.abs() <= max_pair_gap_ms)
.collect::<Vec<_>>();
if skews_ms.is_empty() {
continue;
}
let score =
skews_ms.iter().map(|skew_ms| skew_ms.abs()).sum::<f64>() / skews_ms.len() as f64;
match &best {
Some((best_count, best_score, _))
if skews_ms.len() < *best_count
|| (skews_ms.len() == *best_count && score >= *best_score) => {}
_ => best = Some((skews_ms.len(), score, skews_ms)),
}
}
best.map(|(_, _, skews)| skews).unwrap_or_default()
}
pub(super) fn marker_onsets(segments: &[PulseSegment], pulse_width_s: f64) -> Vec<f64> {
let threshold = pulse_width_s * MARKER_WIDTH_MULTIPLIER;
segments
.iter()
.filter(|segment| segment.duration_s >= threshold)
.map(|segment| segment.start_s)
.collect()
}
pub(super) fn shortest_wrapped_difference(delta_s: f64, pulse_period_s: f64) -> f64 {
let half_period = pulse_period_s / 2.0;
((delta_s + half_period).rem_euclid(pulse_period_s)) - half_period
}
fn sync_report_from_skews(
video_onsets_s: &[f64],
audio_onsets_s: &[f64],
skews_ms: Vec<f64>,
) -> SyncAnalysisReport {
let mut sorted_skews = skews_ms.clone();
sorted_skews.sort_by(|left, right| left.total_cmp(right));
let first_skew_ms = *skews_ms.first().expect("paired skew list is not empty");
let last_skew_ms = *skews_ms.last().expect("paired skew list is not empty");
let mean_skew_ms = skews_ms.iter().sum::<f64>() / skews_ms.len() as f64;
let median_skew_ms = median(sorted_skews);
let max_abs_skew_ms = skews_ms
.iter()
.copied()
.map(f64::abs)
.fold(0.0_f64, f64::max);
SyncAnalysisReport {
video_event_count: video_onsets_s.len(),
audio_event_count: audio_onsets_s.len(),
paired_event_count: skews_ms.len(),
first_skew_ms,
last_skew_ms,
mean_skew_ms,
median_skew_ms,
max_abs_skew_ms,
drift_ms: last_skew_ms - first_skew_ms,
skews_ms,
video_onsets_s: video_onsets_s.to_vec(),
audio_onsets_s: audio_onsets_s.to_vec(),
}
}

View File

@ -0,0 +1,309 @@
use super::correlation::{
candidate_index_offsets, correlate_onsets, estimate_phase, index_onsets_by_spacing,
marker_index_offsets, marker_onsets, shortest_wrapped_difference,
};
use super::{
PulseSegment, correlate_segments, detect_audio_onsets, detect_audio_segments,
detect_video_onsets, detect_video_segments, median,
};
use crate::sync_probe::analyze::report::SyncAnalysisReport;
use std::collections::BTreeMap;
#[test]
fn detect_video_onsets_finds_bright_transitions() {
let timestamps = (0..60).map(|idx| idx as f64 / 10.0).collect::<Vec<_>>();
let brightness = timestamps
.iter()
.enumerate()
.map(|(idx, _)| {
if idx == 0 || idx == 10 || idx == 20 {
250
} else {
5
}
})
.collect::<Vec<_>>();
let onsets = detect_video_onsets(&timestamps, &brightness).expect("video onsets");
assert_eq!(onsets, vec![0.0, 0.95, 1.95]);
}
#[test]
fn detect_audio_onsets_finds_click_bursts() {
let mut samples = vec![0i16; 48_000];
for start in [0usize, 48_000 / 2] {
for sample in samples.iter_mut().skip(start).take(300) {
*sample = 18_000;
}
}
let onsets = detect_audio_onsets(&samples, 48_000, 5).expect("audio onsets");
assert_eq!(onsets.len(), 2);
assert!((onsets[0] - 0.0).abs() < 0.01);
assert!((onsets[1] - 0.5).abs() < 0.02);
}
#[test]
fn detect_video_segments_keeps_regular_and_marker_durations_distinct() {
let timestamps = (0..30).map(|idx| idx as f64 / 30.0).collect::<Vec<_>>();
let brightness = [
0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
];
let segments = detect_video_segments(&timestamps, &brightness).expect("video segments");
assert_eq!(segments.len(), 2);
assert!(segments[1].duration_s > segments[0].duration_s);
}
#[test]
fn detect_audio_segments_keeps_regular_and_marker_durations_distinct() {
let mut samples = vec![0i16; 48_000];
for sample in samples.iter_mut().take(3_000) {
*sample = 18_000;
}
for sample in samples.iter_mut().skip(24_000).take(6_000) {
*sample = 18_000;
}
let segments = detect_audio_segments(&samples, 48_000, 5).expect("audio segments");
assert_eq!(segments.len(), 2);
assert!(segments[1].duration_s > segments[0].duration_s);
}
#[test]
fn detect_video_segments_closes_a_pulse_that_stays_active_until_the_last_frame() {
let timestamps = [0.0, 0.1, 0.2, 0.3];
let brightness = [0, 0, 255, 255];
let segments = detect_video_segments(&timestamps, &brightness).expect("trailing video segment");
assert_eq!(segments.len(), 1);
assert!(segments[0].end_s > segments[0].start_s);
assert!(segments[0].end_s >= 0.3);
}
#[test]
fn detect_audio_segments_closes_a_click_that_stays_active_until_the_capture_ends() {
let mut samples = vec![0i16; 4_800];
let midpoint = samples.len() / 2;
for sample in samples.iter_mut().skip(midpoint) {
*sample = 18_000;
}
let segments = detect_audio_segments(&samples, 48_000, 5).expect("trailing audio segment");
assert_eq!(segments.len(), 1);
assert!(segments[0].end_s > segments[0].start_s);
}
#[test]
fn correlate_onsets_reports_skew_and_drift() {
let report = correlate_onsets(&[0.0, 1.0, 2.0, 3.0], &[0.05, 1.04, 2.03, 3.02], 1.0, 0.2)
.expect("correlated report");
assert_sync_report_shape(&report, 4);
assert!((report.first_skew_ms - 50.0).abs() < 0.001);
assert!((report.last_skew_ms - 20.0).abs() < 0.001);
assert!((report.drift_ms + 30.0).abs() < 0.001);
assert!(report.max_abs_skew_ms >= 50.0);
}
#[test]
fn correlate_onsets_single_pulse_uses_phase_fallback() {
let report = correlate_onsets(&[0.95], &[0.05], 1.0, 0.2).expect("single-pulse fallback");
assert_eq!(report.paired_event_count, 1);
assert!((report.first_skew_ms - 100.0).abs() < 0.001);
}
#[test]
fn detect_video_onsets_rejects_empty_low_contrast_and_missing_edges() {
assert!(detect_video_onsets(&[], &[]).is_err());
assert!(detect_video_onsets(&[0.0, 0.1], &[10, 12]).is_err());
assert!(detect_video_onsets(&[0.0, 0.1, 0.2], &[255, 255, 255]).is_err());
}
#[test]
fn detect_audio_onsets_rejects_empty_invalid_and_too_quiet_inputs() {
assert!(detect_audio_onsets(&[], 48_000, 5).is_err());
assert!(detect_audio_onsets(&[1, 2, 3], 0, 5).is_err());
assert!(detect_audio_onsets(&[1, 2, 3], 48_000, 0).is_err());
assert!(detect_audio_onsets(&vec![1i16; 4_800], 48_000, 5).is_err());
}
#[test]
fn correlate_onsets_rejects_empty_inputs_invalid_gap_and_unpairable_events() {
assert!(correlate_onsets(&[], &[0.0], 1.0, 0.2).is_err());
assert!(correlate_onsets(&[0.0], &[], 1.0, 0.2).is_err());
assert!(correlate_onsets(&[0.0], &[0.0], 1.0, 0.0).is_err());
assert!(correlate_onsets(&[0.0, 1.0], &[2.0, 3.0], 1.0, 0.1).is_err());
assert!(correlate_onsets(&[0.0], &[0.0], 0.0, 0.1).is_err());
}
#[test]
fn correlate_segments_validate_inputs_and_support_single_pulse_fallback() {
let video = [PulseSegment {
start_s: 0.95,
end_s: 1.05,
duration_s: 0.1,
}];
let audio = [PulseSegment {
start_s: 0.05,
end_s: 0.15,
duration_s: 0.1,
}];
let report =
correlate_segments(&video, &audio, 1.0, 0.1, 3, 0.2).expect("single segment fallback");
assert_eq!(report.paired_event_count, 1);
assert!((report.first_skew_ms - 100.0).abs() < 0.001);
assert!(correlate_segments(&[], &audio, 1.0, 0.1, 3, 0.2).is_err());
assert!(correlate_segments(&video, &[], 1.0, 0.1, 3, 0.2).is_err());
assert!(correlate_segments(&video, &audio, 0.0, 0.1, 3, 0.2).is_err());
assert!(correlate_segments(&video, &audio, 1.0, 0.0, 3, 0.2).is_err());
assert!(correlate_segments(&video, &audio, 1.0, 0.1, 0, 0.2).is_err());
assert!(correlate_segments(&video, &audio, 1.0, 0.1, 3, 0.0).is_err());
assert!(correlate_segments(&video, &audio, 1.0, 0.1, 3, 0.05).is_err());
}
#[test]
fn phase_estimation_and_indexing_stay_stable_when_pulses_are_missing() {
let video_phase = estimate_phase(&[4.0, 5.0, 7.0, 8.0, 10.0], 1.0);
let audio_phase = estimate_phase(&[4.018, 5.017, 6.019, 8.018, 9.018], 1.0);
assert!((video_phase - 0.0).abs() < 0.02);
assert!((audio_phase - 0.018).abs() < 0.02);
let video_indexed = index_onsets_by_spacing(&[4.0, 5.0, 7.0, 8.0, 10.0], 1.0);
let audio_indexed = index_onsets_by_spacing(&[4.018, 5.017, 6.019, 8.018, 9.018], 1.0);
assert_eq!(
video_indexed.keys().copied().collect::<Vec<_>>(),
vec![0, 1, 3, 4, 6]
);
assert_eq!(
audio_indexed.keys().copied().collect::<Vec<_>>(),
vec![0, 1, 2, 4, 5]
);
}
#[test]
fn correlation_helpers_cover_empty_index_sets_and_wrapped_phase_math() {
assert!(index_onsets_by_spacing(&[], 1.0).is_empty());
assert!(candidate_index_offsets(&BTreeMap::new(), &BTreeMap::new()).is_empty());
let mut video_only = BTreeMap::new();
video_only.insert(0, 1.0);
assert!(candidate_index_offsets(&video_only, &BTreeMap::new()).is_empty());
let mut audio_only = BTreeMap::new();
audio_only.insert(0, 1.0);
assert!(candidate_index_offsets(&BTreeMap::new(), &audio_only).is_empty());
let mut video_indexed = BTreeMap::new();
video_indexed.insert(2, 2.0);
let mut audio_indexed = BTreeMap::new();
audio_indexed.insert(5, 5.0);
assert_eq!(
candidate_index_offsets(&video_indexed, &audio_indexed),
vec![3]
);
assert!((shortest_wrapped_difference(0.6, 1.0) + 0.4).abs() < 0.000_001);
assert!((shortest_wrapped_difference(-0.6, 1.0) - 0.4).abs() < 0.000_001);
}
#[test]
fn marker_index_offsets_include_marker_alignment_and_general_fallback() {
let video_indexed = index_onsets_by_spacing(&[4.0, 5.0, 7.0, 8.0, 10.0], 1.0);
let audio_indexed = index_onsets_by_spacing(&[5.018, 6.017, 7.019, 9.018, 10.018], 1.0);
let offsets = marker_index_offsets(&video_indexed, &audio_indexed, &[10.0], &[10.018]);
assert!(offsets.contains(&1));
assert!(offsets.contains(&0));
}
#[test]
fn correlate_onsets_ignores_missing_pulses_and_preserves_stable_skew() {
let report = correlate_onsets(
&[4.0, 5.0, 7.0, 8.0, 10.0],
&[4.018, 5.017, 6.019, 8.018, 9.018],
1.0,
0.2,
)
.expect("correlated report");
assert_eq!(report.paired_event_count, 3);
assert!((report.mean_skew_ms - 17.666).abs() < 5.0);
assert!(report.max_abs_skew_ms < 30.0);
}
#[test]
fn correlate_segments_uses_markers_to_break_period_aliasing() {
let video = vec![
PulseSegment {
start_s: 3.3,
end_s: 3.55,
duration_s: 0.25,
},
PulseSegment {
start_s: 4.266667,
end_s: 4.4,
duration_s: 0.133333,
},
PulseSegment {
start_s: 5.3,
end_s: 5.433333,
duration_s: 0.133333,
},
];
let audio = vec![
PulseSegment {
start_s: 3.35,
end_s: 3.59,
duration_s: 0.24,
},
PulseSegment {
start_s: 4.316667,
end_s: 4.436667,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.35,
end_s: 5.47,
duration_s: 0.12,
},
];
let report =
correlate_segments(&video, &audio, 1.0, 0.12, 5, 0.2).expect("marker-correlated report");
assert_eq!(report.paired_event_count, 3);
assert!((report.mean_skew_ms - 50.0).abs() < 10.0);
}
#[test]
fn marker_detection_finds_wider_segments_only() {
let markers = marker_onsets(
&[
PulseSegment {
start_s: 1.0,
end_s: 1.12,
duration_s: 0.12,
},
PulseSegment {
start_s: 5.0,
end_s: 5.24,
duration_s: 0.24,
},
],
0.12,
);
assert_eq!(markers, vec![5.0]);
}
#[test]
fn median_handles_empty_even_and_odd_inputs() {
assert_eq!(median(Vec::new()), 0.0);
assert_eq!(median(vec![1.0, 3.0, 2.0]), 2.0);
assert_eq!(median(vec![4.0, 1.0, 3.0, 2.0]), 2.5);
}
fn assert_sync_report_shape(report: &SyncAnalysisReport, paired_events: usize) {
assert_eq!(report.video_event_count, paired_events);
assert_eq!(report.audio_event_count, paired_events);
assert_eq!(report.paired_event_count, paired_events);
assert_eq!(report.skews_ms.len(), paired_events);
assert_eq!(report.video_onsets_s.len(), paired_events);
assert_eq!(report.audio_onsets_s.len(), paired_events);
}

View File

@ -0,0 +1,59 @@
use serde::Serialize;
const DEFAULT_AUDIO_WINDOW_MS: u32 = 5;
const DEFAULT_MAX_PAIR_GAP_S: f64 = 0.5;
const DEFAULT_PULSE_PERIOD_S: f64 = 1.0;
const DEFAULT_PULSE_WIDTH_S: f64 = 0.12;
const DEFAULT_MARKER_TICK_PERIOD: u32 = 5;
#[derive(Clone, Debug, PartialEq, Serialize)]
pub struct SyncAnalysisReport {
pub video_event_count: usize,
pub audio_event_count: usize,
pub paired_event_count: usize,
pub first_skew_ms: f64,
pub last_skew_ms: f64,
pub mean_skew_ms: f64,
pub median_skew_ms: f64,
pub max_abs_skew_ms: f64,
pub drift_ms: f64,
pub skews_ms: Vec<f64>,
pub video_onsets_s: Vec<f64>,
pub audio_onsets_s: Vec<f64>,
}
#[derive(Clone, Debug, PartialEq)]
pub struct SyncAnalysisOptions {
pub audio_window_ms: u32,
pub max_pair_gap_s: f64,
pub pulse_period_s: f64,
pub pulse_width_s: f64,
pub marker_tick_period: u32,
}
impl Default for SyncAnalysisOptions {
fn default() -> Self {
Self {
audio_window_ms: DEFAULT_AUDIO_WINDOW_MS,
max_pair_gap_s: DEFAULT_MAX_PAIR_GAP_S,
pulse_period_s: DEFAULT_PULSE_PERIOD_S,
pulse_width_s: DEFAULT_PULSE_WIDTH_S,
marker_tick_period: DEFAULT_MARKER_TICK_PERIOD,
}
}
}
#[cfg(test)]
mod tests {
use super::SyncAnalysisOptions;
#[test]
fn default_options_match_live_probe_expectations() {
let options = SyncAnalysisOptions::default();
assert_eq!(options.audio_window_ms, 5);
assert!((options.max_pair_gap_s - 0.5).abs() < f64::EPSILON);
assert!((options.pulse_period_s - 1.0).abs() < f64::EPSILON);
assert!((options.pulse_width_s - 0.12).abs() < f64::EPSILON);
assert_eq!(options.marker_tick_period, 5);
}
}

View File

@ -0,0 +1,85 @@
use std::env;
use std::fs;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use temp_env::with_var;
use tempfile::tempdir;
pub(super) fn with_fake_media_tools<T>(
ffprobe_output: &[u8],
ffmpeg_video_output: &[u8],
ffmpeg_audio_output: &[u8],
test: impl FnOnce(&Path) -> T,
) -> T {
let temp_dir = tempdir().expect("tempdir");
fs::write(temp_dir.path().join("ffprobe.out"), ffprobe_output).expect("write ffprobe");
fs::write(
temp_dir.path().join("ffmpeg-video.out"),
ffmpeg_video_output,
)
.expect("write ffmpeg video");
fs::write(
temp_dir.path().join("ffmpeg-audio.out"),
ffmpeg_audio_output,
)
.expect("write ffmpeg audio");
write_executable(
temp_dir.path(),
"ffprobe",
"#!/bin/sh\ncat \"$(dirname \"$0\")/ffprobe.out\"\n",
);
write_executable(
temp_dir.path(),
"ffmpeg",
"#!/bin/sh\ncase \" $* \" in\n *\" -map 0:v:0 \"*) cat \"$(dirname \"$0\")/ffmpeg-video.out\" ;;\n *\" -map 0:a:0 \"*) cat \"$(dirname \"$0\")/ffmpeg-audio.out\" ;;\n *) printf 'unexpected ffmpeg args: %s\\n' \"$*\" >&2; exit 64 ;;\nesac\n",
);
let prior_path = env::var("PATH").unwrap_or_default();
let merged_path = if prior_path.is_empty() {
temp_dir.path().display().to_string()
} else {
format!("{}:{prior_path}", temp_dir.path().display())
};
let capture_path = temp_dir.path().join("capture.mkv");
fs::write(&capture_path, b"fake-capture").expect("write capture");
with_var("PATH", Some(merged_path.as_str()), || test(&capture_path))
}
pub(super) fn frame_json(timestamps: &[f64]) -> Vec<u8> {
let frames = timestamps
.iter()
.map(|timestamp| {
serde_json::json!({
"best_effort_timestamp_time": format!("{timestamp:.3}")
})
})
.collect::<Vec<_>>();
serde_json::to_vec(&serde_json::json!({ "frames": frames })).expect("frame json")
}
pub(super) fn click_track_samples(click_times_s: &[f64], total_samples: usize) -> Vec<i16> {
let mut samples = vec![0i16; total_samples];
for click_time_s in click_times_s {
let start = (*click_time_s * 48_000.0).round() as usize;
for sample in samples.iter_mut().skip(start).take(300) {
*sample = 18_000;
}
}
samples
}
pub(super) fn audio_samples_to_bytes(samples: &[i16]) -> Vec<u8> {
samples
.iter()
.flat_map(|sample| sample.to_le_bytes())
.collect()
}
fn write_executable(dir: &Path, name: &str, contents: &str) {
let path = dir.join(name);
fs::write(&path, contents).expect("write script");
let mut permissions = fs::metadata(&path).expect("script metadata").permissions();
permissions.set_mode(0o755);
fs::set_permissions(&path, permissions).expect("script permissions");
}

View File

@ -0,0 +1,165 @@
//! Shared-clock synthetic A/V source for the upstream sync probe.
#[cfg(any(not(coverage), test))]
use anyhow::{Context, Result, bail};
#[cfg(any(not(coverage), test))]
use gst::prelude::*;
#[cfg(any(not(coverage), test))]
use gstreamer as gst;
#[cfg(any(not(coverage), test))]
use gstreamer_app as gst_app;
#[cfg(any(not(coverage), test))]
use lesavka_common::lesavka::{AudioPacket, VideoPacket};
#[cfg(any(not(coverage), test))]
use std::sync::{
Arc,
atomic::{AtomicBool, Ordering},
};
#[cfg(any(not(coverage), test))]
use std::thread::{self, JoinHandle};
#[cfg(any(not(coverage), test))]
use std::time::{Duration, Instant};
#[cfg(any(not(coverage), test))]
use std::{f64::consts::TAU, mem::size_of};
#[cfg(any(not(coverage), test))]
use crate::input::camera::{CameraCodec, CameraConfig};
#[cfg(any(not(coverage), test))]
use crate::sync_probe::schedule::PulseSchedule;
#[cfg(any(not(coverage), test))]
use crate::uplink_fresh_queue::{FreshPacketQueue, FreshQueueConfig};
#[cfg(coverage)]
mod coverage_stub;
#[cfg(not(coverage))]
mod runtime;
#[cfg(test)]
mod tests;
#[cfg(coverage)]
pub use coverage_stub::SyncProbeCapture;
#[cfg(not(coverage))]
pub use runtime::SyncProbeCapture;
#[cfg(any(not(coverage), test))]
const PROBE_VIDEO_QUEUE: FreshQueueConfig = FreshQueueConfig {
capacity: 8,
max_age: Duration::from_millis(350),
};
#[cfg(any(not(coverage), test))]
const PROBE_AUDIO_QUEUE: FreshQueueConfig = FreshQueueConfig {
capacity: 32,
max_age: Duration::from_millis(400),
};
#[cfg(any(not(coverage), test))]
const AUDIO_SAMPLE_RATE: i32 = 48_000;
#[cfg(any(not(coverage), test))]
const AUDIO_CHANNELS: usize = 2;
#[cfg(any(not(coverage), test))]
const AUDIO_CHUNK_MS: u64 = 10;
#[cfg(any(not(coverage), test))]
const AUDIO_PULSE_FREQUENCY_HZ: f64 = 1_800.0;
#[cfg(any(not(coverage), test))]
const AUDIO_PULSE_AMPLITUDE: f64 = 24_000.0;
#[cfg(any(not(coverage), test))]
fn build_dark_probe_frame(width: usize, height: usize) -> Vec<u8> {
vec![16u8; width.saturating_mul(height).saturating_mul(3)]
}
#[cfg(any(not(coverage), test))]
fn build_regular_probe_frame(width: usize, height: usize) -> Vec<u8> {
let mut frame = build_dark_probe_frame(width, height);
let x0 = width / 4;
let x1 = width.saturating_sub(x0);
let y0 = height / 4;
let y1 = height.saturating_sub(y0);
fill_rect(&mut frame, width, x0, y0, x1, y1, 255);
frame
}
#[cfg(any(not(coverage), test))]
fn build_marker_probe_frame(width: usize, height: usize) -> Vec<u8> {
let mut frame = build_dark_probe_frame(width, height);
let x0 = width / 5;
let x1 = width.saturating_sub(x0);
let y0 = height / 5;
let y1 = height.saturating_sub(y0);
fill_rect(&mut frame, width, x0, y0, x1, y1, 255);
let cross_half_w = (width / 48).max(6);
let cross_half_h = (height / 48).max(6);
let cx = width / 2;
let cy = height / 2;
fill_rect(
&mut frame,
width,
cx.saturating_sub(cross_half_w),
y0,
(cx + cross_half_w).min(width),
y1,
255,
);
fill_rect(
&mut frame,
width,
x0,
cy.saturating_sub(cross_half_h),
x1,
(cy + cross_half_h).min(height),
255,
);
frame
}
#[cfg(any(not(coverage), test))]
fn fill_rect(
frame: &mut [u8],
width: usize,
x0: usize,
y0: usize,
x1: usize,
y1: usize,
value: u8,
) {
let height = frame.len() / width.saturating_mul(3);
let x1 = x1.min(width);
let y1 = y1.min(height);
for y in y0.min(height)..y1 {
for x in x0.min(width)..x1 {
let offset = (y * width + x) * 3;
frame[offset] = value;
frame[offset + 1] = value;
frame[offset + 2] = value;
}
}
}
#[cfg(any(not(coverage), test))]
fn render_audio_chunk(
schedule: &PulseSchedule,
chunk_pts: Duration,
samples_per_chunk: usize,
) -> Vec<u8> {
let sample_step = Duration::from_nanos(1_000_000_000u64 / AUDIO_SAMPLE_RATE as u64);
let mut pcm = Vec::with_capacity(samples_per_chunk * AUDIO_CHANNELS * size_of::<i16>());
for sample_index in 0..samples_per_chunk {
let sample_pts = chunk_pts + sample_step.saturating_mul(sample_index as u32);
let amplitude = if schedule.flash_active(sample_pts) {
let phase = TAU * AUDIO_PULSE_FREQUENCY_HZ * sample_pts.as_secs_f64();
(phase.sin() * AUDIO_PULSE_AMPLITUDE) as i16
} else {
0
};
for _ in 0..AUDIO_CHANNELS {
pcm.extend_from_slice(&amplitude.to_le_bytes());
}
}
pcm
}
fn probe_pts_exceeds_duration(pts_usecs: u64, duration: std::time::Duration) -> bool {
pts_usecs > duration.as_micros() as u64
}

View File

@ -0,0 +1,34 @@
#[derive(Default)]
pub struct SyncProbeCapture;
impl SyncProbeCapture {
pub fn new(
_camera: crate::input::camera::CameraConfig,
_schedule: crate::sync_probe::schedule::PulseSchedule,
_duration: std::time::Duration,
) -> anyhow::Result<Self> {
Ok(Self)
}
pub fn video_queue(
&self,
) -> crate::uplink_fresh_queue::FreshPacketQueue<lesavka_common::lesavka::VideoPacket> {
crate::uplink_fresh_queue::FreshPacketQueue::new(
crate::uplink_fresh_queue::FreshQueueConfig {
capacity: 1,
max_age: std::time::Duration::from_millis(1),
},
)
}
pub fn audio_queue(
&self,
) -> crate::uplink_fresh_queue::FreshPacketQueue<lesavka_common::lesavka::AudioPacket> {
crate::uplink_fresh_queue::FreshPacketQueue::new(
crate::uplink_fresh_queue::FreshQueueConfig {
capacity: 1,
max_age: std::time::Duration::from_millis(1),
},
)
}
}

View File

@ -0,0 +1,330 @@
use super::*;
pub struct SyncProbeCapture {
pipeline: gst::Pipeline,
running: Arc<AtomicBool>,
video_queue: FreshPacketQueue<VideoPacket>,
audio_queue: FreshPacketQueue<AudioPacket>,
video_thread: Option<JoinHandle<()>>,
audio_thread: Option<JoinHandle<()>>,
}
impl SyncProbeCapture {
pub fn new(camera: CameraConfig, schedule: PulseSchedule, duration: Duration) -> Result<Self> {
gst::init().context("gst init")?;
let pipeline = build_pipeline(camera, &schedule)?;
let video_src = pipeline
.by_name("sync_probe_video_src")
.context("missing sync probe video appsrc")?
.downcast::<gst_app::AppSrc>()
.expect("video appsrc");
let video_sink = pipeline
.by_name("sync_probe_video_sink")
.context("missing sync probe video appsink")?
.downcast::<gst_app::AppSink>()
.expect("video appsink");
let audio_src = pipeline
.by_name("sync_probe_audio_src")
.context("missing sync probe audio appsrc")?
.downcast::<gst_app::AppSrc>()
.expect("audio appsrc");
let audio_sink = pipeline
.by_name("sync_probe_audio_sink")
.context("missing sync probe audio appsink")?
.downcast::<gst_app::AppSink>()
.expect("audio appsink");
pipeline
.set_state(gst::State::Playing)
.context("starting sync probe pipeline")?;
let running = Arc::new(AtomicBool::new(true));
let probe_start = Instant::now();
let video_queue = FreshPacketQueue::new(PROBE_VIDEO_QUEUE);
let audio_queue = FreshPacketQueue::new(PROBE_AUDIO_QUEUE);
let video_thread = spawn_video_thread(
video_src,
video_sink,
camera,
schedule.clone(),
duration,
probe_start,
running.clone(),
video_queue.clone(),
);
let audio_thread = spawn_audio_thread(
audio_src,
audio_sink,
schedule,
duration,
probe_start,
running.clone(),
audio_queue.clone(),
);
Ok(Self {
pipeline,
running,
video_queue,
audio_queue,
video_thread: Some(video_thread),
audio_thread: Some(audio_thread),
})
}
pub fn video_queue(&self) -> FreshPacketQueue<VideoPacket> {
self.video_queue.clone()
}
pub fn audio_queue(&self) -> FreshPacketQueue<AudioPacket> {
self.audio_queue.clone()
}
}
impl Drop for SyncProbeCapture {
fn drop(&mut self) {
self.running.store(false, Ordering::Release);
self.video_queue.close();
self.audio_queue.close();
let _ = self.pipeline.set_state(gst::State::Null);
if let Some(handle) = self.video_thread.take() {
let _ = handle.join();
}
if let Some(handle) = self.audio_thread.take() {
let _ = handle.join();
}
}
}
fn build_pipeline(camera: CameraConfig, _schedule: &PulseSchedule) -> Result<gst::Pipeline> {
let video_caps = format!(
"video/x-raw,format=RGB,width={},height={},framerate={}/1",
camera.width,
camera.height,
camera.fps.max(1)
);
let video_branch = match camera.codec {
CameraCodec::Mjpeg => format!(
"appsrc name=sync_probe_video_src is-live=true format=time do-timestamp=false caps={video_caps} ! \
queue max-size-buffers=4 leaky=downstream ! videoconvert ! \
jpegenc quality=90 ! image/jpeg,parsed=true,width={},height={},framerate={}/1 ! \
appsink name=sync_probe_video_sink emit-signals=false sync=false max-buffers=4 drop=true",
camera.width,
camera.height,
camera.fps.max(1),
),
CameraCodec::H264 => format!(
"appsrc name=sync_probe_video_src is-live=true format=time do-timestamp=false caps={video_caps} ! \
queue max-size-buffers=4 leaky=downstream ! videoconvert ! \
{} ! h264parse config-interval=-1 ! video/x-h264,stream-format=byte-stream,alignment=au ! \
appsink name=sync_probe_video_sink emit-signals=false sync=false max-buffers=4 drop=true",
pick_h264_encoder(camera.fps.max(1))?
),
};
let audio_branch = format!(
"appsrc name=sync_probe_audio_src is-live=true format=time do-timestamp=false \
caps=audio/x-raw,format=S16LE,layout=interleaved,channels={},rate={} ! \
queue max-size-buffers=8 leaky=downstream ! \
audioconvert ! audioresample ! audio/x-raw,channels=2,rate={} ! \
{} ! aacparse ! capsfilter caps=audio/mpeg,stream-format=adts,rate={},channels=2 ! \
appsink name=sync_probe_audio_sink emit-signals=false sync=false max-buffers=32 drop=true",
AUDIO_CHANNELS,
AUDIO_SAMPLE_RATE,
AUDIO_SAMPLE_RATE,
pick_aac_encoder()?,
AUDIO_SAMPLE_RATE,
);
let desc = format!("{video_branch} {audio_branch}");
gst::parse::launch(&desc)
.with_context(|| format!("building sync probe pipeline: {desc}"))?
.downcast::<gst::Pipeline>()
.map_err(|_| anyhow::anyhow!("sync probe description did not build a pipeline"))
}
fn pick_h264_encoder(fps: u32) -> Result<String> {
if gst::ElementFactory::find("x264enc").is_some() {
return Ok(format!(
"x264enc tune=zerolatency speed-preset=ultrafast bitrate=2500 key-int-max={}",
fps.max(1)
));
}
if gst::ElementFactory::find("openh264enc").is_some() {
return Ok("openh264enc bitrate=2500000".to_string());
}
if gst::ElementFactory::find("v4l2h264enc").is_some() {
return Ok("v4l2h264enc".to_string());
}
bail!("no usable H.264 encoder found for sync probe")
}
fn pick_aac_encoder() -> Result<&'static str> {
[
"avenc_aac bitrate=128000",
"fdkaacenc bitrate=128000",
"faac bitrate=128000",
]
.into_iter()
.find(|entry| {
let name = entry.split_ascii_whitespace().next().unwrap_or_default();
gst::ElementFactory::find(name).is_some()
})
.ok_or_else(|| anyhow::anyhow!("no usable AAC encoder found for sync probe"))
}
fn spawn_video_thread(
src: gst_app::AppSrc,
sink: gst_app::AppSink,
camera: CameraConfig,
schedule: PulseSchedule,
duration: Duration,
probe_start: Instant,
running: Arc<AtomicBool>,
queue: FreshPacketQueue<VideoPacket>,
) -> JoinHandle<()> {
thread::spawn(move || {
let dark_frame = build_dark_probe_frame(camera.width as usize, camera.height as usize);
let regular_pulse_frame =
build_regular_probe_frame(camera.width as usize, camera.height as usize);
let marker_pulse_frame =
build_marker_probe_frame(camera.width as usize, camera.height as usize);
let frame_step = Duration::from_nanos(1_000_000_000u64 / u64::from(camera.fps.max(1)));
let mut frame_index = 0u64;
while running.load(Ordering::Acquire) {
let pts = schedule.frame_pts(frame_index, camera.fps.max(1));
if pts > duration {
break;
}
let deadline = probe_start + pts;
if let Some(remaining) = deadline.checked_duration_since(Instant::now())
&& !remaining.is_zero()
{
thread::sleep(remaining);
}
let frame = if schedule.flash_active(pts) && schedule.pulse_is_marker(pts) {
&marker_pulse_frame
} else if schedule.flash_active(pts) {
&regular_pulse_frame
} else {
&dark_frame
};
let mut buffer = gst::Buffer::from_slice(frame.clone());
if let Some(meta) = buffer.get_mut() {
let pts_time = gst::ClockTime::from_nseconds(pts.as_nanos() as u64);
meta.set_pts(Some(pts_time));
meta.set_dts(Some(pts_time));
meta.set_duration(Some(gst::ClockTime::from_nseconds(
frame_step.as_nanos() as u64
)));
}
if src.push_buffer(buffer).is_err() {
break;
}
if let Some(sample) = sink.try_pull_sample(gst::ClockTime::from_mseconds(250))
&& let Some(buffer) = sample.buffer()
&& let Ok(map) = buffer.map_readable()
{
let packet = VideoPacket {
id: 2,
pts: buffer.pts().unwrap_or(gst::ClockTime::ZERO).nseconds() / 1_000,
data: map.as_slice().to_vec(),
..Default::default()
};
let _ = queue.push(packet, Duration::ZERO);
}
frame_index = frame_index.saturating_add(1);
}
let _ = src.end_of_stream();
queue.close();
})
}
fn spawn_audio_thread(
src: gst_app::AppSrc,
sink: gst_app::AppSink,
schedule: PulseSchedule,
duration: Duration,
probe_start: Instant,
running: Arc<AtomicBool>,
queue: FreshPacketQueue<AudioPacket>,
) -> JoinHandle<()> {
thread::spawn(move || {
let chunk_duration = Duration::from_millis(AUDIO_CHUNK_MS);
let samples_per_chunk =
(AUDIO_SAMPLE_RATE as usize * AUDIO_CHUNK_MS as usize / 1_000).max(1);
let mut chunk_index = 0u64;
while running.load(Ordering::Acquire) {
let pts = chunk_duration.saturating_mul(chunk_index as u32);
if pts > duration {
break;
}
let deadline = probe_start + pts;
if let Some(remaining) = deadline.checked_duration_since(Instant::now())
&& !remaining.is_zero()
{
thread::sleep(remaining);
}
let chunk = render_audio_chunk(&schedule, pts, samples_per_chunk);
let mut buffer = gst::Buffer::from_slice(chunk);
if let Some(meta) = buffer.get_mut() {
let pts_time = gst::ClockTime::from_nseconds(pts.as_nanos() as u64);
meta.set_pts(Some(pts_time));
meta.set_dts(Some(pts_time));
meta.set_duration(Some(gst::ClockTime::from_nseconds(
chunk_duration.as_nanos() as u64,
)));
}
if src.push_buffer(buffer).is_err() {
break;
}
drain_audio_samples(&sink, &queue, duration, gst::ClockTime::ZERO);
chunk_index = chunk_index.saturating_add(1);
}
let _ = src.end_of_stream();
drain_audio_samples(&sink, &queue, duration, gst::ClockTime::from_mseconds(100));
queue.close();
})
}
fn drain_audio_samples(
sink: &gst_app::AppSink,
queue: &FreshPacketQueue<AudioPacket>,
duration: Duration,
timeout: gst::ClockTime,
) {
while let Some(sample) = sink.try_pull_sample(timeout) {
let Some(buffer) = sample.buffer() else {
continue;
};
let pts_usecs = buffer.pts().unwrap_or(gst::ClockTime::ZERO).nseconds() / 1_000;
if probe_pts_exceeds_duration(pts_usecs, duration) {
break;
}
let Ok(map) = buffer.map_readable() else {
continue;
};
let packet = AudioPacket {
id: 0,
pts: pts_usecs,
data: map.as_slice().to_vec(),
};
let _ = queue.push(packet, Duration::ZERO);
if timeout == gst::ClockTime::ZERO {
continue;
}
}
}

View File

@ -0,0 +1,131 @@
use super::{
SyncProbeCapture, build_dark_probe_frame, build_marker_probe_frame, build_regular_probe_frame,
};
use crate::input::camera::{CameraCodec, CameraConfig};
use crate::sync_probe::analyze::detect_audio_onsets;
use crate::sync_probe::schedule::PulseSchedule;
use lesavka_common::lesavka::{AudioPacket, VideoPacket};
use std::time::Duration;
fn stub_camera() -> CameraConfig {
CameraConfig {
codec: CameraCodec::Mjpeg,
width: 1280,
height: 720,
fps: 30,
}
}
#[tokio::test]
async fn coverage_stub_exposes_live_video_and_audio_queues() {
let capture = SyncProbeCapture::new(
stub_camera(),
PulseSchedule::new(
Duration::from_secs(4),
Duration::from_secs(1),
Duration::from_millis(100),
5,
),
Duration::from_secs(2),
)
.expect("stub capture");
let video_queue = capture.video_queue();
let audio_queue = capture.audio_queue();
let _ = video_queue.push(
VideoPacket {
id: 2,
pts: 1,
data: vec![1, 2, 3],
..Default::default()
},
Duration::ZERO,
);
let _ = audio_queue.push(
AudioPacket {
id: 0,
pts: 2,
data: vec![4, 5, 6],
},
Duration::ZERO,
);
let video = video_queue.pop_fresh().await;
let audio = audio_queue.pop_fresh().await;
assert_eq!(video.packet.expect("video packet").data, vec![1, 2, 3]);
assert_eq!(audio.packet.expect("audio packet").data, vec![4, 5, 6]);
}
#[test]
fn probe_pts_duration_cutoff_matches_video_boundary_rule() {
assert!(!super::probe_pts_exceeds_duration(
Duration::from_secs(2).as_micros() as u64,
Duration::from_secs(2),
));
assert!(super::probe_pts_exceeds_duration(
Duration::from_secs(2).as_micros() as u64 + 1,
Duration::from_secs(2),
));
}
#[test]
fn synthesized_audio_pulses_track_video_flash_boundaries() {
let schedule = PulseSchedule::new(
Duration::from_secs(4),
Duration::from_secs(1),
Duration::from_millis(120),
5,
);
let chunk_samples =
(super::AUDIO_SAMPLE_RATE as usize * super::AUDIO_CHUNK_MS as usize / 1_000).max(1);
let mut samples = Vec::<i16>::new();
let chunk_duration = Duration::from_millis(super::AUDIO_CHUNK_MS);
for chunk_index in 0..600u64 {
let pts = chunk_duration.saturating_mul(chunk_index as u32);
if pts > Duration::from_secs(6) {
break;
}
let chunk = super::render_audio_chunk(&schedule, pts, chunk_samples);
samples.extend(
chunk
.chunks_exact(std::mem::size_of::<i16>())
.map(|bytes| i16::from_le_bytes([bytes[0], bytes[1]])),
);
}
let mono = samples
.chunks_exact(super::AUDIO_CHANNELS)
.map(|frame| frame[0])
.collect::<Vec<_>>();
let onsets =
detect_audio_onsets(&mono, super::AUDIO_SAMPLE_RATE as u32, 5).expect("audio onsets");
assert!(
onsets.len() >= 2,
"expected at least 2 onsets, got {onsets:?}"
);
assert!(
onsets
.iter()
.zip([4.0, 5.0])
.all(|(actual, expected)| { (*actual - expected).abs() <= 0.02 })
);
}
#[test]
fn probe_video_frames_render_distinct_idle_regular_and_marker_patterns() {
let dark = build_dark_probe_frame(64, 36);
let regular = build_regular_probe_frame(64, 36);
let marker = build_marker_probe_frame(64, 36);
assert_eq!(dark.len(), regular.len());
assert_eq!(dark.len(), marker.len());
assert!(
regular.iter().map(|byte| u64::from(*byte)).sum::<u64>()
!= dark.iter().map(|byte| u64::from(*byte)).sum::<u64>()
);
assert!(
marker.iter().map(|byte| u64::from(*byte)).sum::<u64>()
!= dark.iter().map(|byte| u64::from(*byte)).sum::<u64>()
);
assert_ne!(regular, marker);
}

View File

@ -0,0 +1,214 @@
//! CLI parsing for the upstream A/V sync probe.
use anyhow::{Context, Result, bail};
use std::time::Duration;
use crate::app_support::DEFAULT_SERVER_ADDR;
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ProbeConfig {
pub server: String,
pub duration: Duration,
pub warmup: Duration,
pub pulse_period: Duration,
pub pulse_width: Duration,
pub marker_tick_period: u32,
}
#[derive(Debug, Eq, PartialEq)]
pub enum ParseOutcome {
Run(ProbeConfig),
Help,
}
pub fn usage() -> &'static str {
"Usage: lesavka-sync-probe [--server http://HOST:50051] [--duration-seconds 10] [--warmup-seconds 4] [--pulse-period-ms 1000] [--pulse-width-ms 120] [--marker-tick-period 5]"
}
pub fn parse_args_outcome_from<I, S>(args: I) -> Result<ParseOutcome>
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
let mut args = args.into_iter().map(Into::into);
let mut server = DEFAULT_SERVER_ADDR.to_string();
let mut duration_seconds = 10u64;
let mut warmup_seconds = 4u64;
let mut pulse_period_ms = 1_000u64;
let mut pulse_width_ms = 120u64;
let mut marker_tick_period = 5u32;
while let Some(arg) = args.next() {
match arg.as_str() {
"--server" => {
server = args
.next()
.context("missing value after --server")?
.trim()
.to_string();
}
"--duration-seconds" => {
duration_seconds = parse_u64_arg(
args.next(),
"--duration-seconds",
"duration must be at least one second",
)?;
if duration_seconds == 0 {
bail!("duration must be at least one second\n{}", usage());
}
}
"--warmup-seconds" => {
warmup_seconds = parse_u64_arg(
args.next(),
"--warmup-seconds",
"warmup must stay non-negative",
)?;
}
"--pulse-period-ms" => {
pulse_period_ms = parse_u64_arg(
args.next(),
"--pulse-period-ms",
"pulse period must be positive",
)?;
if pulse_period_ms == 0 {
bail!("pulse period must be positive\n{}", usage());
}
}
"--pulse-width-ms" => {
pulse_width_ms = parse_u64_arg(
args.next(),
"--pulse-width-ms",
"pulse width must be positive",
)?;
if pulse_width_ms == 0 {
bail!("pulse width must be positive\n{}", usage());
}
}
"--marker-tick-period" => {
marker_tick_period = parse_u32_arg(
args.next(),
"--marker-tick-period",
"marker tick period must be positive",
)?;
if marker_tick_period == 0 {
bail!("marker tick period must be positive\n{}", usage());
}
}
"--help" | "-h" => return Ok(ParseOutcome::Help),
_ => bail!("unexpected argument `{arg}`\n{}", usage()),
}
}
if pulse_width_ms >= pulse_period_ms {
bail!(
"pulse width must stay smaller than the pulse period\n{}",
usage()
);
}
Ok(ParseOutcome::Run(ProbeConfig {
server,
duration: Duration::from_secs(duration_seconds),
warmup: Duration::from_secs(warmup_seconds),
pulse_period: Duration::from_millis(pulse_period_ms),
pulse_width: Duration::from_millis(pulse_width_ms),
marker_tick_period,
}))
}
fn parse_u64_arg(value: Option<String>, flag: &str, context: &str) -> Result<u64> {
value
.context(format!("missing value after {flag}"))?
.trim()
.parse::<u64>()
.with_context(|| format!("{context}\n{}", usage()))
}
fn parse_u32_arg(value: Option<String>, flag: &str, context: &str) -> Result<u32> {
value
.context(format!("missing value after {flag}"))?
.trim()
.parse::<u32>()
.with_context(|| format!("{context}\n{}", usage()))
}
#[cfg(test)]
mod tests {
use super::{DEFAULT_SERVER_ADDR, ParseOutcome, parse_args_outcome_from};
use std::time::Duration;
#[test]
fn parse_args_uses_operational_defaults() {
let outcome = parse_args_outcome_from(std::iter::empty::<&str>()).expect("defaults");
let ParseOutcome::Run(config) = outcome else {
panic!("expected run config");
};
assert_eq!(config.server, DEFAULT_SERVER_ADDR);
assert_eq!(config.duration, Duration::from_secs(10));
assert_eq!(config.warmup, Duration::from_secs(4));
assert_eq!(config.pulse_period, Duration::from_millis(1_000));
assert_eq!(config.pulse_width, Duration::from_millis(120));
assert_eq!(config.marker_tick_period, 5);
}
#[test]
fn parse_args_accepts_explicit_probe_settings() {
let outcome = parse_args_outcome_from([
"--server",
"http://lab:50051",
"--duration-seconds",
"14",
"--warmup-seconds",
"6",
"--pulse-period-ms",
"750",
"--pulse-width-ms",
"90",
"--marker-tick-period",
"3",
])
.expect("configured run");
let ParseOutcome::Run(config) = outcome else {
panic!("expected run config");
};
assert_eq!(config.server, "http://lab:50051");
assert_eq!(config.duration, Duration::from_secs(14));
assert_eq!(config.warmup, Duration::from_secs(6));
assert_eq!(config.pulse_period, Duration::from_millis(750));
assert_eq!(config.pulse_width, Duration::from_millis(90));
assert_eq!(config.marker_tick_period, 3);
}
#[test]
fn parse_args_rejects_invalid_timing_relationships() {
assert!(parse_args_outcome_from(["--pulse-width-ms", "1000"]).is_err());
assert!(parse_args_outcome_from(["--pulse-period-ms", "0"]).is_err());
assert!(parse_args_outcome_from(["--duration-seconds", "0"]).is_err());
assert!(parse_args_outcome_from(["--marker-tick-period", "0"]).is_err());
}
#[test]
fn parse_args_supports_help() {
let outcome = parse_args_outcome_from(["--help"]).expect("help");
assert_eq!(outcome, ParseOutcome::Help);
}
#[test]
fn parse_args_rejects_missing_values_and_unknown_flags() {
assert!(parse_args_outcome_from(["--server"]).is_err());
assert!(parse_args_outcome_from(["--duration-seconds"]).is_err());
assert!(parse_args_outcome_from(["--marker-tick-period"]).is_err());
assert!(parse_args_outcome_from(["--wat"]).is_err());
}
#[test]
fn parse_args_rejects_non_numeric_values() {
assert!(parse_args_outcome_from(["--duration-seconds", "abc"]).is_err());
assert!(parse_args_outcome_from(["--warmup-seconds", "abc"]).is_err());
assert!(parse_args_outcome_from(["--pulse-period-ms", "abc"]).is_err());
assert!(parse_args_outcome_from(["--pulse-width-ms", "abc"]).is_err());
assert!(parse_args_outcome_from(["--marker-tick-period", "abc"]).is_err());
}
}

View File

@ -0,0 +1,15 @@
//! End-to-end upstream A/V sync probe tooling.
//!
//! This module exists to answer one narrow operational question: when the
//! client captures audio and video at the same moment, do those streams arrive
//! on Tethys in sync? The probe emits deterministic marker pulses through the
//! normal Lesavka relay path so the downstream consumer can be measured
//! precisely instead of judged by eye.
pub mod analyze;
mod capture;
mod config;
mod runner;
mod schedule;
pub use runner::run_sync_probe_from_args;

View File

@ -0,0 +1,173 @@
//! Runtime entrypoint for the shared-clock upstream sync probe.
use anyhow::{Context, Result, bail};
use crate::app_support;
use crate::handshake;
use crate::sync_probe::capture::SyncProbeCapture;
use crate::sync_probe::config::{ParseOutcome, ProbeConfig, parse_args_outcome_from, usage};
use crate::sync_probe::schedule::PulseSchedule;
#[cfg(not(coverage))]
use lesavka_common::lesavka::relay_client::RelayClient;
#[cfg(not(coverage))]
use tonic::{Request, transport::Channel};
pub async fn run_sync_probe_from_args<I, S>(args: I) -> Result<()>
where
I: IntoIterator<Item = S>,
S: Into<String>,
{
match parse_args_outcome_from(args)? {
ParseOutcome::Run(config) => run_sync_probe(config).await,
ParseOutcome::Help => {
println!("{}", usage());
Ok(())
}
}
}
#[cfg(not(coverage))]
async fn run_sync_probe(config: ProbeConfig) -> Result<()> {
let caps = handshake::negotiate(config.server.as_str()).await;
if !caps.camera || !caps.microphone {
bail!("server does not advertise both camera and microphone support");
}
let camera = app_support::camera_config_from_caps(&caps)
.context("server handshake did not include a complete camera profile")?;
let schedule = PulseSchedule::new(
config.warmup,
config.pulse_period,
config.pulse_width,
config.marker_tick_period,
);
let capture = SyncProbeCapture::new(camera, schedule, config.duration)?;
tracing::info!(
server = %config.server,
duration_s = config.duration.as_secs(),
codec = ?camera.codec,
width = camera.width,
height = camera.height,
fps = camera.fps,
"🧪 A/V sync probe starting"
);
let video_channel = connect(config.server.as_str()).await?;
let audio_channel = connect(config.server.as_str()).await?;
let video_queue = capture.video_queue();
let audio_queue = capture.audio_queue();
let video_task = tokio::spawn(async move {
let mut client = RelayClient::new(video_channel);
let outbound = async_stream::stream! {
loop {
let next = video_queue.pop_fresh().await;
if let Some(packet) = next.packet {
yield packet;
continue;
}
break;
}
};
let mut response = client
.stream_camera(Request::new(outbound))
.await
.context("starting sync probe camera stream")?;
while response.get_mut().message().await.transpose().is_some() {}
Ok::<(), anyhow::Error>(())
});
let audio_task = tokio::spawn(async move {
let mut client = RelayClient::new(audio_channel);
let outbound = async_stream::stream! {
loop {
let next = audio_queue.pop_fresh().await;
if let Some(packet) = next.packet {
yield packet;
continue;
}
break;
}
};
let mut response = client
.stream_microphone(Request::new(outbound))
.await
.context("starting sync probe microphone stream")?;
while response.get_mut().message().await.transpose().is_some() {}
Ok::<(), anyhow::Error>(())
});
let (video_result, audio_result) =
tokio::try_join!(video_task, audio_task).context("joining sync probe streams")?;
video_result.context("sync probe camera task failed")?;
audio_result.context("sync probe microphone task failed")?;
tracing::info!("🧪 A/V sync probe finished");
Ok(())
}
#[cfg(not(coverage))]
async fn connect(server_addr: &str) -> Result<Channel> {
Channel::from_shared(server_addr.to_string())
.context("invalid relay server address")?
.tcp_nodelay(true)
.connect()
.await
.with_context(|| format!("connecting to relay at {server_addr}"))
}
#[cfg(coverage)]
async fn run_sync_probe(_config: ProbeConfig) -> Result<()> {
Ok(())
}
#[cfg(test)]
mod tests {
use super::parse_args_outcome_from;
#[cfg(coverage)]
use super::run_sync_probe_from_args;
use crate::sync_probe::config::ParseOutcome;
#[test]
fn help_passthrough_stays_stable() {
let outcome = parse_args_outcome_from(["--help"]).expect("help");
assert_eq!(outcome, ParseOutcome::Help);
}
#[cfg(coverage)]
#[test]
fn coverage_run_path_accepts_default_probe_args() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
rt.block_on(async {
run_sync_probe_from_args(std::iter::empty::<&str>())
.await
.expect("coverage run path");
});
}
#[cfg(coverage)]
#[test]
fn coverage_run_path_accepts_custom_probe_args() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
rt.block_on(async {
run_sync_probe_from_args([
"--server",
"http://lab:50051",
"--duration-seconds",
"2",
"--warmup-seconds",
"5",
"--pulse-period-ms",
"700",
"--pulse-width-ms",
"80",
"--marker-tick-period",
"4",
])
.await
.expect("configured coverage run path");
});
}
}

View File

@ -0,0 +1,234 @@
#![cfg_attr(not(test), allow(dead_code))]
//! Timing helpers for the shared-clock sync probe.
use std::time::Duration;
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct PulseSchedule {
warmup: Duration,
pulse_period: Duration,
pulse_width: Duration,
marker_tick_period: u32,
}
impl PulseSchedule {
pub fn new(
warmup: Duration,
pulse_period: Duration,
pulse_width: Duration,
marker_tick_period: u32,
) -> Self {
assert!(!pulse_period.is_zero(), "pulse period must stay positive");
assert!(!pulse_width.is_zero(), "pulse width must stay positive");
assert!(
pulse_width < pulse_period,
"pulse width must stay smaller than the pulse period"
);
assert!(
marker_tick_period > 0,
"marker tick period must stay positive"
);
Self {
warmup,
pulse_period,
pulse_width,
marker_tick_period,
}
}
pub fn warmup(&self) -> Duration {
self.warmup
}
pub fn pulse_period(&self) -> Duration {
self.pulse_period
}
pub fn marker_tick_period(&self) -> u32 {
self.marker_tick_period
}
pub fn pulse_width(&self) -> Duration {
self.pulse_width
}
pub fn pulse_index(&self, pts: Duration) -> u64 {
if pts < self.warmup_boundary() {
return 0;
}
let pts = pts - self.warmup_boundary();
let period_ns = self.pulse_period.as_nanos().max(1);
(pts.as_nanos() / period_ns) as u64
}
pub fn pulse_offset(&self, pts: Duration) -> Duration {
if pts < self.warmup_boundary() {
return pts;
}
let pts = pts - self.warmup_boundary();
let period_ns = self.pulse_period.as_nanos().max(1);
let offset_ns = (pts.as_nanos() % period_ns) as u64;
Duration::from_nanos(offset_ns)
}
pub fn pulse_is_marker(&self, pts: Duration) -> bool {
pts >= self.warmup_boundary()
&& self
.pulse_index(pts)
.is_multiple_of(u64::from(self.marker_tick_period.max(1)))
}
pub fn marker_pulse_width(&self) -> Duration {
let widened = self.pulse_width.saturating_mul(2);
widened.min(self.pulse_period.saturating_sub(Duration::from_millis(1)))
}
pub fn flash_active(&self, pts: Duration) -> bool {
if pts < self.warmup_boundary() {
return false;
}
let width = if self.pulse_is_marker(pts) {
self.marker_pulse_width()
} else {
self.pulse_width
};
self.pulse_offset(pts) < width
}
pub fn warmup_boundary(&self) -> Duration {
if self.warmup.is_zero() {
return Duration::ZERO;
}
let period_ns = self.pulse_period.as_nanos().max(1) as u64;
let warmup_ns = self.warmup.as_nanos() as u64;
let rounded = ((warmup_ns + period_ns - 1) / period_ns) * period_ns;
Duration::from_nanos(rounded)
}
pub fn audio_gate_open_at(&self) -> Duration {
let lead = self.pulse_width.min(Duration::from_millis(200));
self.warmup_boundary().saturating_sub(lead)
}
pub fn frame_pts(&self, frame_index: u64, fps: u32) -> Duration {
let frame_step_ns = 1_000_000_000u64 / u64::from(fps.max(1));
Duration::from_nanos(frame_index.saturating_mul(frame_step_ns))
}
}
#[cfg(test)]
mod tests {
use super::PulseSchedule;
use std::time::Duration;
#[test]
fn flash_windows_follow_the_expected_pulse_boundaries() {
let schedule = PulseSchedule::new(
Duration::from_secs(4),
Duration::from_millis(1_000),
Duration::from_millis(120),
5,
);
assert!(!schedule.flash_active(Duration::from_millis(3_999)));
assert!(schedule.flash_active(Duration::from_millis(4_000)));
assert!(schedule.flash_active(Duration::from_millis(4_200)));
assert!(!schedule.flash_active(Duration::from_millis(4_240)));
assert!(!schedule.flash_active(Duration::from_millis(4_999)));
assert!(schedule.flash_active(Duration::from_millis(5_000)));
assert!(!schedule.flash_active(Duration::from_millis(5_120)));
}
#[test]
fn pulse_index_and_offset_repeat_cleanly() {
let schedule = PulseSchedule::new(
Duration::from_secs(2),
Duration::from_millis(750),
Duration::from_millis(90),
3,
);
assert_eq!(schedule.pulse_index(Duration::from_millis(0)), 0);
assert_eq!(schedule.warmup_boundary(), Duration::from_millis(2_250));
assert_eq!(schedule.pulse_index(Duration::from_millis(2_999)), 0);
assert_eq!(schedule.pulse_index(Duration::from_millis(3_000)), 1);
assert_eq!(
schedule.pulse_offset(Duration::from_millis(2_320)),
Duration::from_millis(70)
);
assert_eq!(
schedule.pulse_offset(Duration::from_millis(3_750)),
Duration::from_millis(0)
);
}
#[test]
fn frame_pts_respects_requested_framerate() {
let schedule = PulseSchedule::new(
Duration::ZERO,
Duration::from_secs(1),
Duration::from_millis(100),
5,
);
assert_eq!(schedule.frame_pts(0, 30), Duration::from_nanos(0));
assert_eq!(schedule.frame_pts(1, 30), Duration::from_nanos(33_333_333));
assert_eq!(
schedule.frame_pts(30, 30),
Duration::from_nanos(999_999_990)
);
}
#[test]
fn getters_and_zero_fps_fallback_stay_stable() {
let schedule = PulseSchedule::new(
Duration::from_secs(3),
Duration::from_millis(800),
Duration::from_millis(90),
7,
);
assert_eq!(schedule.warmup(), Duration::from_secs(3));
assert_eq!(schedule.warmup_boundary(), Duration::from_millis(3_200));
assert_eq!(schedule.audio_gate_open_at(), Duration::from_millis(3_110));
assert_eq!(schedule.pulse_period(), Duration::from_millis(800));
assert_eq!(schedule.pulse_width(), Duration::from_millis(90));
assert_eq!(schedule.marker_tick_period(), 7);
assert_eq!(schedule.frame_pts(1, 0), Duration::from_secs(1));
}
#[test]
fn marker_pulses_are_detectably_wider_than_regular_pulses() {
let schedule = PulseSchedule::new(
Duration::from_secs(1),
Duration::from_millis(1_000),
Duration::from_millis(120),
5,
);
assert!(schedule.pulse_is_marker(Duration::from_millis(1_000)));
assert_eq!(schedule.marker_pulse_width(), Duration::from_millis(240));
assert!(schedule.flash_active(Duration::from_millis(1_200)));
assert!(!schedule.flash_active(Duration::from_millis(1_241)));
assert!(!schedule.pulse_is_marker(Duration::from_millis(2_000)));
assert!(!schedule.flash_active(Duration::from_millis(2_200)));
}
#[test]
#[should_panic(expected = "pulse period must stay positive")]
fn constructor_rejects_zero_period() {
let _ = PulseSchedule::new(Duration::ZERO, Duration::ZERO, Duration::from_millis(50), 1);
}
#[test]
#[should_panic(expected = "pulse width must stay smaller than the pulse period")]
fn constructor_rejects_width_not_smaller_than_period() {
let _ = PulseSchedule::new(
Duration::ZERO,
Duration::from_millis(100),
Duration::from_millis(100),
1,
);
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "lesavka_common"
version = "0.12.4"
version = "0.13.0"
edition = "2024"
build = "build.rs"

View File

@ -181,6 +181,11 @@ Hardware-facing assumptions belong near the code that uses them; this file is th
| `LESAVKA_SONAR_ENFORCE` | CI gate enforcement override |
| `LESAVKA_SUPPLY_CHAIN_ENFORCE_TOOLS` | CI gate enforcement override |
| `LESAVKA_TAP_AUDIO` | client media capture/playback override |
| `LESAVKA_UAC_BUFFER_TIME_US` | server audio sink latency override |
| `LESAVKA_UAC_COMPENSATION_US` | server audio sink latency override |
| `LESAVKA_UAC_DEV` | server hardware/device override |
| `LESAVKA_UAC_HDMI_COMPENSATION_US` | server audio sink latency override |
| `LESAVKA_UAC_LATENCY_TIME_US` | server audio sink latency override |
| `LESAVKA_TEST_CAM_U32` | test/build contract variable; not runtime operator config |
| `LESAVKA_TEST_CAP_CAMERA` | test/build contract variable; not runtime operator config |
| `LESAVKA_TEST_CAP_MIC` | test/build contract variable; not runtime operator config |

View File

@ -16,6 +16,14 @@
"line_percent": 100.0,
"loc": 304
},
"client/src/bin/lesavka-sync-analyze.rs": {
"line_percent": 95.0,
"loc": 98
},
"client/src/bin/lesavka-sync-probe.rs": {
"line_percent": 100.0,
"loc": 19
},
"client/src/handshake.rs": {
"line_percent": 100.0,
"loc": 381
@ -29,8 +37,8 @@
"loc": 69
},
"client/src/input/camera/capture_pipeline.rs": {
"line_percent": 99.28,
"loc": 254
"line_percent": 99.32,
"loc": 272
},
"client/src/input/camera/device_selection.rs": {
"line_percent": 97.62,
@ -86,7 +94,7 @@
},
"client/src/input/microphone.rs": {
"line_percent": 100.0,
"loc": 398
"loc": 416
},
"client/src/input/mouse.rs": {
"line_percent": 98.85,
@ -130,7 +138,7 @@
},
"client/src/launcher/ui.rs": {
"line_percent": 100.0,
"loc": 184
"loc": 182
},
"client/src/launcher/ui/session_preview_coverage.rs": {
"line_percent": 100.0,
@ -140,6 +148,10 @@
"line_percent": 97.56,
"loc": 78
},
"client/src/live_capture_clock.rs": {
"line_percent": 100.0,
"loc": 56
},
"client/src/main.rs": {
"line_percent": 100.0,
"loc": 101
@ -168,6 +180,50 @@
"line_percent": 100.0,
"loc": 82
},
"client/src/sync_probe/analyze.rs": {
"line_percent": 97.92,
"loc": 86
},
"client/src/sync_probe/analyze/media_extract.rs": {
"line_percent": 98.36,
"loc": 240
},
"client/src/sync_probe/analyze/onset_detection.rs": {
"line_percent": 96.77,
"loc": 274
},
"client/src/sync_probe/analyze/onset_detection/correlation.rs": {
"line_percent": 100.0,
"loc": 310
},
"client/src/sync_probe/analyze/report.rs": {
"line_percent": 100.0,
"loc": 59
},
"client/src/sync_probe/analyze/test_support.rs": {
"line_percent": 98.44,
"loc": 85
},
"client/src/sync_probe/capture.rs": {
"line_percent": 100.0,
"loc": 449
},
"client/src/sync_probe/capture/coverage_stub.rs": {
"line_percent": 100.0,
"loc": 34
},
"client/src/sync_probe/config.rs": {
"line_percent": 98.03,
"loc": 214
},
"client/src/sync_probe/runner.rs": {
"line_percent": 95.65,
"loc": 173
},
"client/src/sync_probe/schedule.rs": {
"line_percent": 98.74,
"loc": 234
},
"client/src/uplink_fresh_queue.rs": {
"line_percent": 100.0,
"loc": 288
@ -222,7 +278,7 @@
},
"server/src/audio/voice_input.rs": {
"line_percent": 100.0,
"loc": 204
"loc": 358
},
"server/src/bin/lesavka_uvc/control_payloads.rs": {
"line_percent": 100.0,
@ -244,6 +300,10 @@
"line_percent": 100.0,
"loc": 471
},
"server/src/camera/selection.rs": {
"line_percent": 97.67,
"loc": 372
},
"server/src/camera_runtime.rs": {
"line_percent": 95.52,
"loc": 211
@ -278,7 +338,7 @@
},
"server/src/main.rs": {
"line_percent": 100.0,
"loc": 95
"loc": 96
},
"server/src/main/entrypoint.rs": {
"line_percent": 100.0,
@ -294,15 +354,15 @@
},
"server/src/main/handler_startup.rs": {
"line_percent": 100.0,
"loc": 130
"loc": 131
},
"server/src/main/relay_service.rs": {
"line_percent": 100.0,
"loc": 242
"loc": 289
},
"server/src/main/relay_service_coverage.rs": {
"line_percent": 98.31,
"loc": 138
"line_percent": 100.0,
"loc": 179
},
"server/src/main/rpc_helpers.rs": {
"line_percent": 100.0,
@ -328,6 +388,10 @@
"line_percent": 100.0,
"loc": 90
},
"server/src/upstream_media_runtime.rs": {
"line_percent": 100.0,
"loc": 369
},
"server/src/uvc_runtime.rs": {
"line_percent": 98.48,
"loc": 241
@ -346,15 +410,15 @@
},
"server/src/video_sinks/hdmi_sink.rs": {
"line_percent": 100.0,
"loc": 354
"loc": 393
},
"server/src/video_sinks/webcam_sink.rs": {
"line_percent": 100.0,
"loc": 199
},
"server/src/video_support.rs": {
"line_percent": 97.48,
"loc": 236
"line_percent": 97.74,
"loc": 263
}
}
}

View File

@ -0,0 +1,306 @@
#!/usr/bin/env python3
import argparse
import http.server
import json
import socketserver
import threading
import time
from pathlib import Path
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Serve a local browser consumer probe page")
parser.add_argument("--host", default="127.0.0.1")
parser.add_argument("--port", type=int, default=18443)
parser.add_argument("--output", default="/tmp/lesavka-browser-av-sync.webm")
parser.add_argument("--status", default="/tmp/lesavka-browser-av-sync-status.json")
parser.add_argument("--duration-seconds", type=int, default=15)
return parser.parse_args()
class ProbeState:
def __init__(self, output_path: Path, status_path: Path, duration_seconds: int) -> None:
self.output_path = output_path
self.status_path = status_path
self.duration_seconds = duration_seconds
self.lock = threading.Lock()
self.start_token = 0
self.status = {
"booted_at": time.time(),
"ready": False,
"recording": False,
"uploaded": False,
"last_error": None,
"selected_video": None,
"selected_audio": None,
"devices": [],
"page_message": "booting",
"last_update": time.time(),
}
self.write_status()
def write_status(self) -> None:
self.status_path.parent.mkdir(parents=True, exist_ok=True)
tmp = self.status_path.with_suffix(".tmp")
tmp.write_text(json.dumps(self.status, indent=2, sort_keys=True), encoding="utf-8")
tmp.replace(self.status_path)
def update(self, payload: dict) -> None:
with self.lock:
self.status.update(payload)
self.status["last_update"] = time.time()
self.write_status()
def snapshot(self) -> dict:
with self.lock:
snap = dict(self.status)
snap["start_token"] = self.start_token
snap["duration_seconds"] = self.duration_seconds
return snap
def request_start(self) -> dict:
with self.lock:
self.start_token += 1
self.status.update({
"recording": False,
"uploaded": False,
"last_error": None,
"page_message": "start requested",
"start_requested_at": time.time(),
})
self.write_status()
return self.snapshot()
def store_upload(self, blob: bytes) -> dict:
with self.lock:
self.output_path.parent.mkdir(parents=True, exist_ok=True)
self.output_path.write_bytes(blob)
self.status.update({
"uploaded": True,
"recording": False,
"page_message": f"capture uploaded to {self.output_path}",
"upload_size": len(blob),
"uploaded_at": time.time(),
})
self.write_status()
return self.snapshot()
def page_html(duration_seconds: int) -> str:
duration_ms = duration_seconds * 1000
return f"""<!doctype html>
<html>
<head>
<meta charset=\"utf-8\">
<title>Lesavka Browser Sync Probe</title>
<style>
body {{ margin: 0; background: #0f131a; color: #e7edf6; font: 16px/1.4 system-ui, sans-serif; }}
.wrap {{ padding: 16px; display: grid; grid-template-columns: 1.1fr 0.9fr; gap: 16px; min-height: 100vh; box-sizing: border-box; }}
.panel {{ background: #1b212c; border: 1px solid #2a3342; border-radius: 12px; padding: 12px; box-sizing: border-box; }}
h1 {{ margin: 0 0 10px; font-size: 22px; }}
h2 {{ margin: 0 0 8px; font-size: 16px; }}
video {{ width: 100%; aspect-ratio: 16 / 9; background: black; border-radius: 10px; object-fit: contain; }}
pre {{ white-space: pre-wrap; word-break: break-word; margin: 0; font: 13px/1.35 ui-monospace, monospace; }}
.meter {{ height: 18px; background: #111722; border-radius: 999px; overflow: hidden; border: 1px solid #2a3342; }}
.bar {{ height: 100%; width: 0%; background: linear-gradient(90deg, #2fbf71, #ffd36e, #ff6b6b); transition: width 120ms linear; }}
.row {{ margin: 10px 0; }}
</style>
</head>
<body>
<div class=\"wrap\">
<div class=\"panel\">
<h1>Lesavka Browser Sync Probe</h1>
<video id=\"video\" autoplay playsinline muted></video>
<div class=\"row\"><div class=\"meter\"><div id=\"bar\" class=\"bar\"></div></div></div>
</div>
<div class=\"panel\">
<h2>Status</h2>
<pre id=\"status\">booting…</pre>
</div>
</div>
<script>
const videoEl = document.getElementById('video');
const statusEl = document.getElementById('status');
const barEl = document.getElementById('bar');
let stream = null;
let recorder = null;
let chunks = [];
let startToken = 0;
let analyser = null;
let audioCtx = null;
let recording = false;
let heartbeatCounter = 0;
function setStatus(lines) {{ statusEl.textContent = lines.join('\\n'); }}
async function postJson(path, payload) {{
await fetch(path, {{ method: 'POST', headers: {{ 'Content-Type': 'application/json' }}, body: JSON.stringify(payload) }});
}}
async function postBlob(path, blob) {{
await fetch(path, {{ method: 'POST', headers: {{ 'Content-Type': 'application/octet-stream' }}, body: blob }});
}}
function fmtDevice(d) {{ return `${{d.kind}}: ${{d.label || '(unlabeled)'}}`; }}
function meterLoop() {{
if (!analyser) return;
const data = new Uint8Array(analyser.fftSize);
const tick = () => {{
if (!analyser) return;
analyser.getByteTimeDomainData(data);
let peak = 0;
for (let i = 0; i < data.length; i++) peak = Math.max(peak, Math.abs(data[i] - 128));
const pct = Math.min(100, Math.round((peak / 128) * 100));
barEl.style.width = pct + '%';
requestAnimationFrame(tick);
}};
requestAnimationFrame(tick);
}}
function attachMeter(track) {{
if (!track) return;
try {{
audioCtx = new AudioContext();
const source = audioCtx.createMediaStreamSource(new MediaStream([track]));
analyser = audioCtx.createAnalyser();
analyser.fftSize = 2048;
source.connect(analyser);
meterLoop();
}} catch (err) {{
console.warn('meter setup failed', err);
}}
}}
async function initStream() {{
const lines = [];
try {{
await postJson('/status', {{ page_message: 'page loaded' }});
lines.push('requesting permission…');
await postJson('/status', {{ page_message: 'requesting permission' }});
const warm = await navigator.mediaDevices.getUserMedia({{ video: true, audio: true }});
warm.getTracks().forEach(track => track.stop());
await postJson('/status', {{ page_message: 'permission granted' }});
const devices = await navigator.mediaDevices.enumerateDevices();
await postJson('/status', {{ page_message: 'devices enumerated', devices: devices.map(fmtDevice) }});
const videoIn = devices.find(d => d.kind === 'videoinput' && /UGREEN/i.test(d.label)) || devices.find(d => d.kind === 'videoinput');
const audioIn = devices.find(d => d.kind === 'audioinput' && /(Multifunction Composite Gadget|Lesavka Composite)/i.test(d.label)) || devices.find(d => d.kind === 'audioinput');
stream = await navigator.mediaDevices.getUserMedia({{
video: videoIn ? {{ deviceId: {{ exact: videoIn.deviceId }} }} : true,
audio: audioIn ? {{ deviceId: {{ exact: audioIn.deviceId }} }} : true,
}});
await postJson('/status', {{ page_message: 'media stream opened' }});
videoEl.srcObject = stream;
attachMeter(stream.getAudioTracks()[0]);
const payload = {{
ready: true,
selected_video: videoIn ? fmtDevice(videoIn) : null,
selected_audio: audioIn ? fmtDevice(audioIn) : null,
devices: devices.map(fmtDevice),
page_message: 'ready for start',
last_error: null,
}};
await postJson('/status', payload);
lines.push('ready');
lines.push('video: ' + payload.selected_video);
lines.push('audio: ' + payload.selected_audio);
setStatus(lines);
}} catch (err) {{
const message = String(err && (err.stack || err));
await postJson('/status', {{ ready: false, last_error: message, page_message: 'permission or stream setup failed' }});
setStatus(['consumer status: FAIL', message]);
}}
}}
async function maybeStartRecording() {{
if (!stream || recording) return;
try {{
const response = await fetch('/command').then(r => r.json());
if (response.start_token === startToken) return;
startToken = response.start_token;
await postJson('/status', {{ page_message: 'start token observed', observed_start_token: startToken }});
recording = true;
chunks = [];
const preferredMime = 'video/webm;codecs=vp8,opus';
const options = MediaRecorder.isTypeSupported(preferredMime) ? {{ mimeType: preferredMime }} : undefined;
recorder = options ? new MediaRecorder(stream, options) : new MediaRecorder(stream);
await postJson('/status', {{ recording: true, uploaded: false, page_message: 'recording' }});
recorder.ondataavailable = event => {{ if (event.data && event.data.size > 0) chunks.push(event.data); }};
recorder.onstop = async () => {{
const blob = new Blob(chunks, {{ type: recorder.mimeType || 'video/webm' }});
await postBlob('/upload', blob);
recording = false;
}};
recorder.start(250);
setTimeout(() => recorder && recorder.state !== 'inactive' && recorder.stop(), response.duration_seconds * 1000);
}} catch (err) {{
recording = false;
const message = String(err && (err.stack || err));
await postJson('/status', {{ last_error: message, page_message: 'recording setup failed', recording: false }});
}}
}}
setInterval(() => {{
heartbeatCounter += 1;
void postJson('/status', {{ heartbeat_counter: heartbeatCounter, page_message: stream ? 'ready heartbeat' : 'boot heartbeat' }});
}}, 1000);
setInterval(() => {{ void maybeStartRecording(); }}, 250);
void initStream();
</script>
</body>
</html>"""
class ProbeHandler(http.server.BaseHTTPRequestHandler):
state: ProbeState
def _send(self, code: int, body: bytes, content_type: str = "application/json") -> None:
self.send_response(code)
self.send_header("Content-Type", content_type)
self.send_header("Content-Length", str(len(body)))
self.end_headers()
self.wfile.write(body)
def do_GET(self) -> None:
if self.path in ("/", "/index.html"):
snap = self.state.snapshot()
self.state.update({
"page_message": "html served",
"html_served_count": int(snap.get("html_served_count", 0)) + 1,
})
self._send(200, page_html(self.state.duration_seconds).encode("utf-8"), "text/html; charset=utf-8")
return
if self.path == "/command":
self._send(200, json.dumps(self.state.snapshot()).encode("utf-8"))
return
if self.path == "/status":
self._send(200, json.dumps(self.state.snapshot()).encode("utf-8"))
return
self._send(404, b"not found", "text/plain; charset=utf-8")
def do_POST(self) -> None:
length = int(self.headers.get("Content-Length", "0"))
body = self.rfile.read(length)
if self.path == "/status":
payload = json.loads(body.decode("utf-8"))
self.state.update(payload)
self._send(200, json.dumps(self.state.snapshot()).encode("utf-8"))
return
if self.path == "/start":
self._send(200, json.dumps(self.state.request_start()).encode("utf-8"))
return
if self.path == "/upload":
self._send(200, json.dumps(self.state.store_upload(body)).encode("utf-8"))
return
self._send(404, b"not found", "text/plain; charset=utf-8")
def log_message(self, fmt: str, *args) -> None:
pass
def main() -> None:
args = parse_args()
state = ProbeState(Path(args.output), Path(args.status), args.duration_seconds)
class Handler(ProbeHandler):
pass
Handler.state = state
with socketserver.TCPServer((args.host, args.port), Handler) as httpd:
httpd.serve_forever()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,102 @@
#!/usr/bin/env bash
# scripts/manual/run_upstream_av_sync.sh
#
# Manual: capture the real Tethys webcam/mic endpoints while the shared-clock
# sync probe streams upstream media through Lesavka, then analyze the skew.
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
REPO_ROOT="$(cd -- "${SCRIPT_DIR}/../.." >/dev/null 2>&1 && pwd)"
TETHYS_HOST=${TETHYS_HOST:-tethys}
LESAVKA_SERVER_ADDR=${LESAVKA_SERVER_ADDR:-http://38.28.125.112:50051}
PROBE_DURATION_SECONDS=${PROBE_DURATION_SECONDS:-10}
PROBE_WARMUP_SECONDS=${PROBE_WARMUP_SECONDS:-4}
LEAD_IN_SECONDS=${LEAD_IN_SECONDS:-8}
TAIL_SECONDS=${TAIL_SECONDS:-2}
CAPTURE_SECONDS=${CAPTURE_SECONDS:-$((PROBE_DURATION_SECONDS + PROBE_WARMUP_SECONDS + LEAD_IN_SECONDS + TAIL_SECONDS))}
REMOTE_CAPTURE=${REMOTE_CAPTURE:-/tmp/lesavka-upstream-av-sync.mkv}
LOCAL_OUTPUT_DIR=${LOCAL_OUTPUT_DIR:-"${REPO_ROOT}/tmp"}
VIDEO_SIZE=${VIDEO_SIZE:-1280x720}
VIDEO_FPS=${VIDEO_FPS:-30}
VIDEO_FORMAT=${VIDEO_FORMAT:-mjpeg}
SSH_OPTS=${SSH_OPTS:-"-o BatchMode=yes -o ConnectTimeout=5"}
mkdir -p "${LOCAL_OUTPUT_DIR}"
STAMP="$(date +%Y%m%d-%H%M%S)"
LOCAL_CAPTURE="${LOCAL_OUTPUT_DIR}/lesavka-upstream-av-sync-${STAMP}.mkv"
echo "==> starting Tethys capture on ${TETHYS_HOST}"
ssh ${SSH_OPTS} "${TETHYS_HOST}" bash -s -- \
"${REMOTE_CAPTURE}" \
"${CAPTURE_SECONDS}" \
"${VIDEO_SIZE}" \
"${VIDEO_FPS}" \
"${VIDEO_FORMAT}" <<'REMOTE_CAPTURE_SCRIPT' &
set -euo pipefail
remote_capture=$1
capture_seconds=$2
video_size=$3
video_fps=$4
video_format=$5
rm -f "${remote_capture}"
video_args=(-f video4linux2 -framerate "${video_fps}" -video_size "${video_size}")
if [[ -n "${video_format}" ]]; then
video_args+=(-input_format "${video_format}")
fi
ffmpeg -hide_banner -loglevel error -y \
-thread_queue_size 1024 \
"${video_args[@]}" \
-i /dev/video0 \
-thread_queue_size 1024 \
-f alsa -ac 2 -ar 48000 \
-i hw:3,0 \
-t "${capture_seconds}" \
-c:v ffv1 -level 3 -g 1 \
-c:a pcm_s16le \
"${remote_capture}"
REMOTE_CAPTURE_SCRIPT
capture_pid=$!
sleep "${LEAD_IN_SECONDS}"
echo "==> running local Lesavka sync probe against ${LESAVKA_SERVER_ADDR}"
probe_status=0
(
cd "${REPO_ROOT}"
cargo run -p lesavka_client --bin lesavka-sync-probe -- \
--server "${LESAVKA_SERVER_ADDR}" \
--duration-seconds "${PROBE_DURATION_SECONDS}" \
--warmup-seconds "${PROBE_WARMUP_SECONDS}"
) || probe_status=$?
capture_status=0
wait "${capture_pid}" || capture_status=$?
if ssh ${SSH_OPTS} "${TETHYS_HOST}" "test -f '${REMOTE_CAPTURE}'"; then
echo "==> fetching capture back to ${LOCAL_CAPTURE}"
scp ${SSH_OPTS} "${TETHYS_HOST}:${REMOTE_CAPTURE}" "${LOCAL_CAPTURE}"
fi
if [[ "${probe_status}" -ne 0 ]]; then
echo "sync probe failed with status ${probe_status}" >&2
[[ -f "${LOCAL_CAPTURE}" ]] && echo "partial capture preserved at ${LOCAL_CAPTURE}" >&2
exit "${probe_status}"
fi
if [[ "${capture_status}" -ne 0 ]]; then
echo "Tethys capture failed with status ${capture_status}" >&2
[[ -f "${LOCAL_CAPTURE}" ]] && echo "partial capture preserved at ${LOCAL_CAPTURE}" >&2
exit "${capture_status}"
fi
echo "==> analyzing capture"
(
cd "${REPO_ROOT}"
cargo run -p lesavka_client --bin lesavka-sync-analyze -- "${LOCAL_CAPTURE}"
)
echo "==> done"
echo "capture: ${LOCAL_CAPTURE}"

View File

@ -0,0 +1,168 @@
#!/usr/bin/env bash
# scripts/manual/run_upstream_browser_av_sync.sh
#
# Drive a real browser consumer on Tethys, record the combined MediaStream,
# pull the capture back, and analyze it with the Lesavka sync analyzer.
set -euo pipefail
SCRIPT_DIR="$(cd -- "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
REPO_ROOT="$(cd -- "${SCRIPT_DIR}/../.." >/dev/null 2>&1 && pwd)"
TETHYS_HOST=${TETHYS_HOST:-tethys}
LESAVKA_SERVER_ADDR=${LESAVKA_SERVER_ADDR:-http://38.28.125.112:50051}
PROBE_DURATION_SECONDS=${PROBE_DURATION_SECONDS:-15}
BROWSER_PORT=${BROWSER_PORT:-18443}
REMOTE_SCRIPT=${REMOTE_SCRIPT:-/tmp/lesavka-browser-consumer-probe.py}
REMOTE_CAPTURE=${REMOTE_CAPTURE:-/tmp/lesavka-browser-av-sync.webm}
REMOTE_STATUS=${REMOTE_STATUS:-/tmp/lesavka-browser-av-sync-status.json}
REMOTE_PROFILE_DIR=${REMOTE_PROFILE_DIR:-/tmp/lesavka-browser-probe-profile}
LOCAL_OUTPUT_DIR=${LOCAL_OUTPUT_DIR:-"${REPO_ROOT}/tmp"}
SSH_OPTS=${SSH_OPTS:-"-o BatchMode=yes -o ConnectTimeout=5"}
DISPLAY_ENV=${DISPLAY_ENV:-":0"}
REMOTE_RUNTIME_DIR=${REMOTE_RUNTIME_DIR:-/run/user/1000}
REMOTE_DBUS_ADDRESS=${REMOTE_DBUS_ADDRESS:-}
REMOTE_XAUTHORITY=${REMOTE_XAUTHORITY:-}
READY_TIMEOUT_SECONDS=${READY_TIMEOUT_SECONDS:-120}
mkdir -p "${LOCAL_OUTPUT_DIR}"
STAMP="$(date +%Y%m%d-%H%M%S)"
LOCAL_CAPTURE="${LOCAL_OUTPUT_DIR}/lesavka-browser-av-sync-${STAMP}.webm"
scp ${SSH_OPTS} "${REPO_ROOT}/scripts/manual/browser_consumer_probe.py" "${TETHYS_HOST}:${REMOTE_SCRIPT}"
ssh ${SSH_OPTS} "${TETHYS_HOST}" bash -s -- \
"${REMOTE_SCRIPT}" \
"${REMOTE_CAPTURE}" \
"${REMOTE_STATUS}" \
"${REMOTE_PROFILE_DIR}" \
"${PROBE_DURATION_SECONDS}" \
"${BROWSER_PORT}" \
"${DISPLAY_ENV}" \
"${REMOTE_RUNTIME_DIR}" <<'REMOTE_SETUP'
set -euo pipefail
remote_script=$1
remote_capture=$2
remote_status=$3
remote_profile_dir=$4
duration=$5
port=$6
display_env=$7
runtime_dir=$8
dbus_address=""
xauthority_path=""
firefox_pid="$(pgrep -n -x firefox-esr || true)"
if [[ -n "${firefox_pid}" && -r "/proc/${firefox_pid}/environ" ]]; then
while IFS='=' read -r key value; do
case "$key" in
DBUS_SESSION_BUS_ADDRESS) dbus_address="$value" ;;
XAUTHORITY) xauthority_path="$value" ;;
DISPLAY) [[ -z "${display_env}" || "${display_env}" == ":0" ]] && display_env="$value" ;;
esac
done < <(tr '\0' '\n' <"/proc/${firefox_pid}/environ")
fi
[[ -z "${dbus_address}" ]] && dbus_address="unix:path=${runtime_dir}/bus"
fuser -k "${port}/tcp" >/dev/null 2>&1 || true
pkill -f "firefox.*${remote_profile_dir}" >/dev/null 2>&1 || true
for _ in $(seq 1 20); do
if ! pgrep -f "firefox.*${remote_profile_dir}" >/dev/null 2>&1; then
break
fi
sleep 0.25
done
rm -f "$remote_capture" "$remote_status"
rm -rf "$remote_profile_dir"
mkdir -p "$remote_profile_dir"
cat >"${remote_profile_dir}/user.js" <<'FIREFOX_PREFS'
user_pref("media.navigator.permission.disabled", true);
user_pref("permissions.default.camera", 1);
user_pref("permissions.default.microphone", 1);
user_pref("media.autoplay.default", 0);
user_pref("media.autoplay.blocking_policy", 0);
user_pref("toolkit.telemetry.reportingpolicy.firstRun", false);
user_pref("browser.shell.checkDefaultBrowser", false);
user_pref("browser.tabs.warnOnClose", false);
user_pref("browser.startup.page", 1);
user_pref("browser.startup.homepage_override.mstone", "ignore");
user_pref("startup.homepage_welcome_url", "");
user_pref("startup.homepage_welcome_url.additional", "");
user_pref("browser.aboutwelcome.enabled", false);
user_pref("trailhead.firstrun.didSeeAboutWelcome", true);
FIREFOX_PREFS
printf 'user_pref("browser.startup.homepage", "http://127.0.0.1:%s/");\n' "$port" >>"${remote_profile_dir}/user.js"
nohup python3 "$remote_script" --port "$port" --output "$remote_capture" --status "$remote_status" --duration-seconds "$duration" >/tmp/lesavka-browser-consumer-probe.log 2>&1 &
if [[ -n "${xauthority_path}" ]]; then
nohup env DISPLAY="$display_env" XDG_RUNTIME_DIR="$runtime_dir" DBUS_SESSION_BUS_ADDRESS="$dbus_address" XAUTHORITY="$xauthority_path" \
firefox --new-instance --no-remote --profile "$remote_profile_dir" \
>/tmp/lesavka-browser-consumer-firefox.log 2>&1 &
else
nohup env DISPLAY="$display_env" XDG_RUNTIME_DIR="$runtime_dir" DBUS_SESSION_BUS_ADDRESS="$dbus_address" \
firefox --new-instance --no-remote --profile "$remote_profile_dir" \
>/tmp/lesavka-browser-consumer-firefox.log 2>&1 &
fi
REMOTE_SETUP
echo "==> waiting for browser consumer to become ready on ${TETHYS_HOST}"
deadline=$(( $(date +%s) + READY_TIMEOUT_SECONDS ))
while true; do
status_json=$(ssh ${SSH_OPTS} "${TETHYS_HOST}" "test -f '${REMOTE_STATUS}' && cat '${REMOTE_STATUS}'" || true)
if [[ -n "${status_json}" ]]; then
if STATUS_JSON="${status_json}" python3 -c 'import json, os, sys; status = json.loads(os.environ["STATUS_JSON"]); sys.exit(0 if status.get("ready") else 1)'
then
echo "==> browser consumer ready"
break
fi
fi
if (( $(date +%s) >= deadline )); then
echo "browser consumer did not become ready before timeout" >&2
[[ -n "${status_json:-}" ]] && echo "last status: ${status_json}" >&2
exit 1
fi
sleep 1
done
echo "==> triggering browser recording"
ssh ${SSH_OPTS} "${TETHYS_HOST}" "curl -fsS -X POST http://127.0.0.1:${BROWSER_PORT}/start >/dev/null"
sleep 1
echo "==> running local Lesavka sync probe against ${LESAVKA_SERVER_ADDR}"
(
cd "${REPO_ROOT}"
cargo run -p lesavka_client --bin lesavka-sync-probe -- \
--server "${LESAVKA_SERVER_ADDR}" \
--duration-seconds "${PROBE_DURATION_SECONDS}"
)
echo "==> waiting for browser recording upload"
dealine_upload=$(( $(date +%s) + PROBE_DURATION_SECONDS + 60 ))
while true; do
status_json=$(ssh ${SSH_OPTS} "${TETHYS_HOST}" "test -f '${REMOTE_STATUS}' && cat '${REMOTE_STATUS}'" || true)
if [[ -n "${status_json}" ]]; then
if STATUS_JSON="${status_json}" python3 -c 'import json, os, sys; status = json.loads(os.environ["STATUS_JSON"]); sys.exit(0 if status.get("uploaded") else 1)'
then
echo "==> browser recording uploaded"
break
fi
fi
if (( $(date +%s) >= dealine_upload )); then
echo "browser recording was not uploaded before timeout" >&2
[[ -n "${status_json:-}" ]] && echo "last status: ${status_json}" >&2
exit 1
fi
sleep 1
done
echo "==> fetching capture back to ${LOCAL_CAPTURE}"
scp ${SSH_OPTS} "${TETHYS_HOST}:${REMOTE_CAPTURE}" "${LOCAL_CAPTURE}"
echo "==> analyzing browser capture"
(
cd "${REPO_ROOT}"
cargo run -p lesavka_client --bin lesavka-sync-analyze -- "${LOCAL_CAPTURE}"
)
echo "==> done"
echo "capture: ${LOCAL_CAPTURE}"

View File

@ -10,7 +10,7 @@ bench = false
[package]
name = "lesavka_server"
version = "0.12.4"
version = "0.13.0"
edition = "2024"
autobins = false

View File

@ -62,6 +62,47 @@ fn voice_input_caps() -> gst::Caps {
.build()
}
fn voice_sink_buffer_time_us() -> i64 {
positive_voice_sink_timing_env("LESAVKA_UAC_BUFFER_TIME_US", 20_000)
}
fn voice_sink_latency_time_us() -> i64 {
positive_voice_sink_timing_env("LESAVKA_UAC_LATENCY_TIME_US", 5_000)
}
fn voice_sink_compensation_us() -> i64 {
std::env::var("LESAVKA_UAC_COMPENSATION_US")
.ok()
.and_then(|value| value.trim().parse::<i64>().ok())
.filter(|value| *value >= 0)
.unwrap_or_else(default_voice_sink_compensation_us)
}
fn default_voice_sink_compensation_us() -> i64 {
let cfg = crate::camera::current_camera_config();
if cfg.output == crate::camera::CameraOutput::Hdmi {
non_negative_voice_sink_timing_env("LESAVKA_UAC_HDMI_COMPENSATION_US", 105_000)
} else {
0
}
}
fn positive_voice_sink_timing_env(name: &str, default: i64) -> i64 {
std::env::var(name)
.ok()
.and_then(|value| value.trim().parse::<i64>().ok())
.filter(|value| *value > 0)
.unwrap_or(default)
}
fn non_negative_voice_sink_timing_env(name: &str, default: i64) -> i64 {
std::env::var(name)
.ok()
.and_then(|value| value.trim().parse::<i64>().ok())
.filter(|value| *value >= 0)
.unwrap_or(default)
}
impl Voice {
#[cfg(coverage)]
pub async fn new(_alsa_dev: &str) -> anyhow::Result<Self> {
@ -133,11 +174,32 @@ impl Voice {
let alsa_sink = gst::ElementFactory::make("alsasink")
.build()
.context("make alsasink")?;
let delay_queue = gst::ElementFactory::make("queue")
.build()
.context("make voice delay queue")?;
let buffer_time_us = voice_sink_buffer_time_us();
let latency_time_us = voice_sink_latency_time_us();
let compensation_us = voice_sink_compensation_us();
alsa_sink.set_property("device", alsa_dev);
alsa_sink.set_property("sync", false);
alsa_sink.set_property("async", false);
alsa_sink.set_property("enable-last-sample", false);
alsa_sink.set_property("provide-clock", false);
alsa_sink.set_property("buffer-time", buffer_time_us);
alsa_sink.set_property("latency-time", latency_time_us);
let compensation_ns = (compensation_us.max(0) as u64).saturating_mul(1_000);
delay_queue.set_property("max-size-buffers", 0u32);
delay_queue.set_property("max-size-bytes", 0u32);
delay_queue.set_property("max-size-time", compensation_ns);
delay_queue.set_property("min-threshold-time", compensation_ns);
tracing::info!(
%alsa_dev,
buffer_time_us,
latency_time_us,
compensation_us,
"🎤 UAC sink low-latency timing armed"
);
pipeline.add_many([
appsrc.upcast_ref(),
@ -145,10 +207,11 @@ impl Voice {
&convert,
&resample,
&capsfilter,
&delay_queue,
&alsa_sink,
])?;
appsrc.link(&decodebin)?;
gst::Element::link_many([&convert, &resample, &capsfilter, &alsa_sink])?;
gst::Element::link_many([&convert, &resample, &capsfilter, &delay_queue, &alsa_sink])?;
/*------------ decodebin autolink ----------------*/
let convert_sink = convert
@ -202,3 +265,94 @@ impl Voice {
let _ = self.appsrc.end_of_stream();
}
}
#[cfg(test)]
mod voice_sink_timing_tests {
use crate::camera::update_camera_config;
use super::{voice_sink_buffer_time_us, voice_sink_latency_time_us};
use super::{default_voice_sink_compensation_us, voice_sink_compensation_us};
#[test]
fn voice_sink_timing_defaults_stay_live_call_friendly() {
temp_env::with_var_unset("LESAVKA_UAC_BUFFER_TIME_US", || {
temp_env::with_var_unset("LESAVKA_UAC_LATENCY_TIME_US", || {
temp_env::with_var_unset("LESAVKA_UAC_COMPENSATION_US", || {
temp_env::with_var_unset("LESAVKA_UAC_HDMI_COMPENSATION_US", || {
temp_env::with_var("LESAVKA_CAM_OUTPUT", Some("uvc"), || {
update_camera_config();
assert_eq!(voice_sink_buffer_time_us(), 20_000);
assert_eq!(voice_sink_latency_time_us(), 5_000);
assert_eq!(voice_sink_compensation_us(), 0);
});
});
});
});
});
}
#[test]
fn voice_sink_timing_env_accepts_positive_overrides_only() {
temp_env::with_var("LESAVKA_CAM_OUTPUT", Some("uvc"), || {
update_camera_config();
temp_env::with_var("LESAVKA_UAC_BUFFER_TIME_US", Some("42000"), || {
temp_env::with_var("LESAVKA_UAC_LATENCY_TIME_US", Some("7000"), || {
assert_eq!(voice_sink_buffer_time_us(), 42_000);
assert_eq!(voice_sink_latency_time_us(), 7_000);
assert_eq!(voice_sink_compensation_us(), 0);
});
});
});
temp_env::with_var("LESAVKA_CAM_OUTPUT", Some("uvc"), || {
update_camera_config();
temp_env::with_var("LESAVKA_UAC_BUFFER_TIME_US", Some("0"), || {
temp_env::with_var("LESAVKA_UAC_LATENCY_TIME_US", Some("-5"), || {
temp_env::with_var("LESAVKA_UAC_COMPENSATION_US", Some("166667"), || {
assert_eq!(voice_sink_buffer_time_us(), 20_000);
assert_eq!(voice_sink_latency_time_us(), 5_000);
assert_eq!(voice_sink_compensation_us(), 166_667);
});
});
});
});
temp_env::with_var("LESAVKA_CAM_OUTPUT", Some("uvc"), || {
update_camera_config();
temp_env::with_var("LESAVKA_UAC_COMPENSATION_US", Some("-5"), || {
temp_env::with_var("LESAVKA_UAC_BUFFER_TIME_US", Some("0"), || {
temp_env::with_var("LESAVKA_UAC_LATENCY_TIME_US", Some("-5"), || {
assert_eq!(voice_sink_buffer_time_us(), 20_000);
assert_eq!(voice_sink_latency_time_us(), 5_000);
assert_eq!(voice_sink_compensation_us(), 0);
});
});
});
});
}
#[test]
fn hdmi_sink_compensation_defaults_to_hdmi_specific_delay() {
temp_env::with_var_unset("LESAVKA_UAC_COMPENSATION_US", || {
temp_env::with_var_unset("LESAVKA_UAC_HDMI_COMPENSATION_US", || {
temp_env::with_var("LESAVKA_CAM_OUTPUT", Some("hdmi"), || {
update_camera_config();
assert_eq!(default_voice_sink_compensation_us(), 105_000);
assert_eq!(voice_sink_compensation_us(), 105_000);
});
});
});
}
#[test]
fn explicit_compensation_override_wins_over_hdmi_default() {
temp_env::with_var("LESAVKA_CAM_OUTPUT", Some("hdmi"), || {
update_camera_config();
temp_env::with_var("LESAVKA_UAC_HDMI_COMPENSATION_US", Some("120000"), || {
temp_env::with_var("LESAVKA_UAC_COMPENSATION_US", Some("90000"), || {
assert_eq!(default_voice_sink_compensation_us(), 120_000);
assert_eq!(voice_sink_compensation_us(), 90_000);
});
});
});
}
}

View File

@ -2,11 +2,11 @@
#![cfg_attr(coverage, allow(dead_code, unused_imports, unused_variables))]
use gstreamer as gst;
use std::collections::HashMap;
use std::fs;
mod selection;
use std::sync::{OnceLock, RwLock};
use tracing::{info, warn};
use selection::select_camera_config;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum CameraOutput {
@ -76,15 +76,15 @@ impl CameraConfig {
}
if let (Some(width), Some(height)) = (
read_u32_from_env("LESAVKA_HDMI_WIDTH"),
read_u32_from_env("LESAVKA_HDMI_HEIGHT"),
selection::read_u32_from_env("LESAVKA_HDMI_WIDTH"),
selection::read_u32_from_env("LESAVKA_HDMI_HEIGHT"),
) {
return (width, height);
}
self.hdmi
.as_ref()
.and_then(|hdmi| preferred_hdmi_mode(&hdmi.modes))
.and_then(|hdmi| selection::preferred_hdmi_mode(&hdmi.modes))
.map(|mode| (mode.width, mode.height))
.unwrap_or((self.width, self.height))
}
@ -127,345 +127,6 @@ pub fn current_camera_config() -> CameraConfig {
update_camera_config()
}
#[cfg(coverage)]
fn select_camera_config() -> CameraConfig {
let output_override = std::env::var("LESAVKA_CAM_OUTPUT")
.ok()
.as_deref()
.and_then(parse_camera_output);
match output_override.unwrap_or(CameraOutput::Uvc) {
CameraOutput::Hdmi => select_hdmi_config(detect_hdmi_connector(false)),
CameraOutput::Uvc => select_uvc_config(),
}
}
#[cfg(not(coverage))]
fn select_camera_config() -> CameraConfig {
let output_env = std::env::var("LESAVKA_CAM_OUTPUT").ok();
let output_override = output_env.as_deref().and_then(parse_camera_output);
let require_connected = output_override != Some(CameraOutput::Hdmi);
let hdmi = detect_hdmi_connector(require_connected);
if output_override == Some(CameraOutput::Hdmi) && hdmi.is_none() {
warn!("📷 HDMI output forced but no connector detected");
}
let output = match output_override {
Some(v) => v,
None => {
if hdmi.is_some() {
CameraOutput::Hdmi
} else {
CameraOutput::Uvc
}
}
};
let cfg = match output {
CameraOutput::Hdmi => select_hdmi_config(hdmi),
CameraOutput::Uvc => select_uvc_config(),
};
let (display_width, display_height) = cfg.hdmi_display_size();
info!(
output = cfg.output.as_str(),
codec = cfg.codec.as_str(),
width = cfg.width,
height = cfg.height,
fps = cfg.fps,
display_width,
display_height,
hdmi = cfg.hdmi.as_ref().map(|h| h.name.as_str()).unwrap_or("none"),
"📷 camera output selected"
);
cfg
}
fn parse_camera_output(raw: &str) -> Option<CameraOutput> {
match raw.trim().to_ascii_lowercase().as_str() {
"uvc" => Some(CameraOutput::Uvc),
"hdmi" => Some(CameraOutput::Hdmi),
"auto" | "" => None,
_ => None,
}
}
fn select_hdmi_config(hdmi: Option<HdmiConnector>) -> CameraConfig {
let hw_decode = has_hw_h264_decode();
let (default_width, default_height) = if hw_decode { (1920, 1080) } else { (1280, 720) };
let width = read_u32_from_env("LESAVKA_CAM_WIDTH").unwrap_or(default_width);
let height = read_u32_from_env("LESAVKA_CAM_HEIGHT").unwrap_or(default_height);
let fps = read_u32_from_env("LESAVKA_CAM_FPS").unwrap_or(30).max(1);
#[cfg(not(coverage))]
if !hw_decode {
if width == default_width && height == default_height {
warn!(
"📷 HDMI output: hardware H264 decoder not detected; requesting 720p30 camera uplink"
);
} else {
warn!(
width,
height,
fps,
"📷 HDMI output: hardware H264 decoder not detected; using configured camera uplink size"
);
}
}
CameraConfig {
output: CameraOutput::Hdmi,
codec: CameraCodec::H264,
width,
height,
fps,
hdmi,
}
}
#[cfg(coverage)]
fn select_uvc_config() -> CameraConfig {
let width = read_u32_from_env("LESAVKA_UVC_WIDTH").unwrap_or(1280);
let height = read_u32_from_env("LESAVKA_UVC_HEIGHT").unwrap_or(720);
let fps = read_u32_from_env("LESAVKA_UVC_FPS")
.or_else(|| {
read_u32_from_env("LESAVKA_UVC_INTERVAL").and_then(|interval| {
if interval == 0 {
None
} else {
Some(10_000_000 / interval)
}
})
})
.unwrap_or(25);
CameraConfig {
output: CameraOutput::Uvc,
codec: CameraCodec::Mjpeg,
width,
height,
fps,
hdmi: None,
}
}
#[cfg(not(coverage))]
fn select_uvc_config() -> CameraConfig {
let mut uvc_env = HashMap::new();
if let Ok(text) = fs::read_to_string("/etc/lesavka/uvc.env") {
uvc_env = parse_env_file(&text);
}
let width = read_u32_from_env("LESAVKA_UVC_WIDTH")
.or_else(|| read_u32_from_map(&uvc_env, "LESAVKA_UVC_WIDTH"))
.unwrap_or(1280);
let height = read_u32_from_env("LESAVKA_UVC_HEIGHT")
.or_else(|| read_u32_from_map(&uvc_env, "LESAVKA_UVC_HEIGHT"))
.unwrap_or(720);
let fps = read_u32_from_env("LESAVKA_UVC_FPS")
.or_else(|| read_u32_from_map(&uvc_env, "LESAVKA_UVC_FPS"))
.or_else(|| {
read_u32_from_env("LESAVKA_UVC_INTERVAL")
.or_else(|| read_u32_from_map(&uvc_env, "LESAVKA_UVC_INTERVAL"))
.and_then(|interval| {
if interval == 0 {
None
} else {
Some(10_000_000 / interval)
}
})
})
.unwrap_or(25);
CameraConfig {
output: CameraOutput::Uvc,
codec: CameraCodec::Mjpeg,
width,
height,
fps,
hdmi: None,
}
}
#[cfg(coverage)]
fn has_hw_h264_decode() -> bool {
std::env::var("LESAVKA_HW_H264").is_ok()
}
#[cfg(not(coverage))]
fn has_hw_h264_decode() -> bool {
if gst::init().is_err() {
return false;
}
for name in ["v4l2h264dec", "v4l2slh264dec", "omxh264dec"] {
if gst::ElementFactory::find(name).is_some() {
return true;
}
}
false
}
#[cfg(coverage)]
fn detect_hdmi_connector(require_connected: bool) -> Option<HdmiConnector> {
let _ = require_connected;
std::env::var("LESAVKA_HDMI_CONNECTOR")
.ok()
.map(|name| HdmiConnector {
name,
id: None,
modes: std::env::var("LESAVKA_HDMI_MODES")
.ok()
.map(|raw| parse_hdmi_modes(&raw))
.unwrap_or_default(),
})
}
#[cfg(not(coverage))]
fn detect_hdmi_connector(require_connected: bool) -> Option<HdmiConnector> {
let preferred = std::env::var("LESAVKA_HDMI_CONNECTOR").ok();
let entries = fs::read_dir("/sys/class/drm").ok()?;
let mut connectors = Vec::new();
for entry in entries.flatten() {
let name = entry.file_name().to_string_lossy().into_owned();
if !name.contains("HDMI-A-") {
continue;
}
let status_path = entry.path().join("status");
let status = fs::read_to_string(&status_path)
.ok()
.map(|v| v.trim().to_string())
.unwrap_or_default();
let id = fs::read_to_string(entry.path().join("connector_id"))
.ok()
.and_then(|v| v.trim().parse::<u32>().ok());
let modes = fs::read_to_string(entry.path().join("modes"))
.ok()
.map(|raw| parse_hdmi_modes(&raw))
.unwrap_or_default();
connectors.push((name, status, id, modes));
}
connectors.sort_by(|a, b| a.0.cmp(&b.0));
let matches_preferred =
|name: &str, preferred: &str| name == preferred || name.ends_with(preferred);
if let Some(pref) = preferred.as_deref() {
for (name, status, id, modes) in &connectors {
if matches_preferred(name, pref) && (!require_connected || status == "connected") {
return Some(HdmiConnector {
name: name.clone(),
id: *id,
modes: modes.clone(),
});
}
}
}
// Keep the previously-selected connector stable when no explicit override is set.
// This prevents connector flapping when multiple HDMI outputs are simultaneously connected.
if preferred.is_none() {
let previous = LAST_CONFIG
.get()
.and_then(|lock| lock.read().ok())
.and_then(|cfg| cfg.hdmi.as_ref().map(|h| h.name.clone()));
if let Some(prev) = previous {
for (name, status, id, modes) in &connectors {
if *name == prev && (!require_connected || status == "connected") {
return Some(HdmiConnector {
name: name.clone(),
id: *id,
modes: modes.clone(),
});
}
}
}
}
for (name, status, id, modes) in connectors {
if !require_connected || status == "connected" {
return Some(HdmiConnector { name, id, modes });
}
}
None
}
fn parse_hdmi_modes(raw: &str) -> Vec<HdmiMode> {
raw.lines()
.flat_map(|line| line.split(','))
.filter_map(parse_hdmi_mode)
.collect()
}
fn parse_hdmi_mode(raw: &str) -> Option<HdmiMode> {
let raw = raw.trim();
let (width, rest) = raw.split_once('x')?;
let width = width.trim().parse::<u32>().ok()?;
let height_digits: String = rest
.trim()
.chars()
.take_while(|ch| ch.is_ascii_digit())
.collect();
let height = height_digits.parse::<u32>().ok()?;
(width > 0 && height > 0).then_some(HdmiMode { width, height })
}
fn preferred_hdmi_mode(modes: &[HdmiMode]) -> Option<HdmiMode> {
for preferred in [
HdmiMode {
width: 1920,
height: 1080,
},
HdmiMode {
width: 1280,
height: 720,
},
] {
if modes.contains(&preferred) {
return Some(preferred);
}
}
modes
.iter()
.copied()
.filter(|mode| mode.width.saturating_mul(9) == mode.height.saturating_mul(16))
.filter(|mode| mode.width.saturating_mul(mode.height) <= 1920 * 1080)
.max_by_key(|mode| mode.width.saturating_mul(mode.height))
.or_else(|| modes.first().copied())
}
#[cfg(not(coverage))]
fn parse_env_file(text: &str) -> HashMap<String, String> {
let mut out = HashMap::new();
for line in text.lines() {
let line = line.trim();
if line.is_empty() || line.starts_with('#') {
continue;
}
let mut parts = line.splitn(2, '=');
let key = match parts.next() {
Some(v) => v.trim(),
None => continue,
};
let val = match parts.next() {
Some(v) => v.trim(),
None => continue,
};
out.insert(key.to_string(), val.to_string());
}
out
}
fn read_u32_from_env(key: &str) -> Option<u32> {
std::env::var(key).ok().and_then(|v| v.parse::<u32>().ok())
}
#[cfg(not(coverage))]
fn read_u32_from_map(map: &HashMap<String, String>, key: &str) -> Option<u32> {
map.get(key).and_then(|v| v.parse::<u32>().ok())
}
#[cfg(test)]
#[path = "tests/camera.rs"]
mod tests;

View File

@ -0,0 +1,372 @@
use super::{CameraCodec, CameraConfig, CameraOutput, HdmiConnector, HdmiMode, LAST_CONFIG};
use gstreamer as gst;
use std::collections::HashMap;
use std::fs;
use tracing::{info, warn};
#[cfg(coverage)]
pub(super) fn select_camera_config() -> CameraConfig {
let output_override = std::env::var("LESAVKA_CAM_OUTPUT")
.ok()
.as_deref()
.and_then(parse_camera_output);
match output_override.unwrap_or(CameraOutput::Uvc) {
CameraOutput::Hdmi => select_hdmi_config(detect_hdmi_connector(false)),
CameraOutput::Uvc => select_uvc_config(),
}
}
#[cfg(not(coverage))]
pub(super) fn select_camera_config() -> CameraConfig {
let output_env = std::env::var("LESAVKA_CAM_OUTPUT").ok();
let output_override = output_env.as_deref().and_then(parse_camera_output);
let require_connected = output_override != Some(CameraOutput::Hdmi);
let hdmi = detect_hdmi_connector(require_connected);
if output_override == Some(CameraOutput::Hdmi) && hdmi.is_none() {
warn!("📷 HDMI output forced but no connector detected");
}
let output = match output_override {
Some(v) => v,
None => {
if hdmi.is_some() {
CameraOutput::Hdmi
} else {
CameraOutput::Uvc
}
}
};
let cfg = match output {
CameraOutput::Hdmi => select_hdmi_config(hdmi),
CameraOutput::Uvc => select_uvc_config(),
};
let (display_width, display_height) = cfg.hdmi_display_size();
info!(
output = cfg.output.as_str(),
codec = cfg.codec.as_str(),
width = cfg.width,
height = cfg.height,
fps = cfg.fps,
display_width,
display_height,
hdmi = cfg.hdmi.as_ref().map(|h| h.name.as_str()).unwrap_or("none"),
"📷 camera output selected"
);
cfg
}
fn parse_camera_output(raw: &str) -> Option<CameraOutput> {
match raw.trim().to_ascii_lowercase().as_str() {
"uvc" => Some(CameraOutput::Uvc),
"hdmi" => Some(CameraOutput::Hdmi),
"auto" | "" => None,
_ => None,
}
}
fn parse_camera_codec(raw: &str) -> Option<CameraCodec> {
match raw.trim().to_ascii_lowercase().as_str() {
"h264" => Some(CameraCodec::H264),
"mjpeg" | "mjpg" | "jpeg" => Some(CameraCodec::Mjpeg),
_ => None,
}
}
fn select_hdmi_codec(hw_decode: bool) -> CameraCodec {
std::env::var("LESAVKA_CAM_CODEC")
.ok()
.as_deref()
.and_then(parse_camera_codec)
.unwrap_or_else(|| {
if hw_decode {
CameraCodec::H264
} else {
CameraCodec::Mjpeg
}
})
}
fn select_hdmi_config(hdmi: Option<HdmiConnector>) -> CameraConfig {
let hw_decode = has_hw_h264_decode();
let (default_width, default_height) = if hw_decode { (1920, 1080) } else { (1280, 720) };
let width = read_u32_from_env("LESAVKA_CAM_WIDTH").unwrap_or(default_width);
let height = read_u32_from_env("LESAVKA_CAM_HEIGHT").unwrap_or(default_height);
let fps = read_u32_from_env("LESAVKA_CAM_FPS").unwrap_or(30).max(1);
let codec = select_hdmi_codec(hw_decode);
#[cfg(not(coverage))]
if !hw_decode {
if matches!(codec, CameraCodec::Mjpeg) {
warn!(
width,
height,
fps,
"📷 HDMI output: hardware H264 decoder not detected; preferring MJPEG uplink"
);
} else if width == default_width && height == default_height {
warn!(
"📷 HDMI output: hardware H264 decoder not detected; forcing H264 uplink at requested size"
);
} else {
warn!(
width,
height,
fps,
"📷 HDMI output: hardware H264 decoder not detected; using configured camera uplink size"
);
}
}
CameraConfig {
output: CameraOutput::Hdmi,
codec,
width,
height,
fps,
hdmi,
}
}
#[cfg(coverage)]
fn select_uvc_config() -> CameraConfig {
let width = read_u32_from_env("LESAVKA_UVC_WIDTH").unwrap_or(1280);
let height = read_u32_from_env("LESAVKA_UVC_HEIGHT").unwrap_or(720);
let fps = read_u32_from_env("LESAVKA_UVC_FPS")
.or_else(|| {
read_u32_from_env("LESAVKA_UVC_INTERVAL").and_then(|interval| {
if interval == 0 {
None
} else {
Some(10_000_000 / interval)
}
})
})
.unwrap_or(25);
CameraConfig {
output: CameraOutput::Uvc,
codec: CameraCodec::Mjpeg,
width,
height,
fps,
hdmi: None,
}
}
#[cfg(not(coverage))]
fn select_uvc_config() -> CameraConfig {
let mut uvc_env = HashMap::new();
if let Ok(text) = fs::read_to_string("/etc/lesavka/uvc.env") {
uvc_env = parse_env_file(&text);
}
let width = read_u32_from_env("LESAVKA_UVC_WIDTH")
.or_else(|| read_u32_from_map(&uvc_env, "LESAVKA_UVC_WIDTH"))
.unwrap_or(1280);
let height = read_u32_from_env("LESAVKA_UVC_HEIGHT")
.or_else(|| read_u32_from_map(&uvc_env, "LESAVKA_UVC_HEIGHT"))
.unwrap_or(720);
let fps = read_u32_from_env("LESAVKA_UVC_FPS")
.or_else(|| read_u32_from_map(&uvc_env, "LESAVKA_UVC_FPS"))
.or_else(|| {
read_u32_from_env("LESAVKA_UVC_INTERVAL")
.or_else(|| read_u32_from_map(&uvc_env, "LESAVKA_UVC_INTERVAL"))
.and_then(|interval| {
if interval == 0 {
None
} else {
Some(10_000_000 / interval)
}
})
})
.unwrap_or(25);
CameraConfig {
output: CameraOutput::Uvc,
codec: CameraCodec::Mjpeg,
width,
height,
fps,
hdmi: None,
}
}
#[cfg(coverage)]
fn has_hw_h264_decode() -> bool {
std::env::var("LESAVKA_HW_H264").is_ok()
}
#[cfg(not(coverage))]
fn has_hw_h264_decode() -> bool {
if gst::init().is_err() {
return false;
}
for name in ["v4l2h264dec", "v4l2slh264dec", "omxh264dec"] {
if gst::ElementFactory::find(name).is_some() {
return true;
}
}
false
}
#[cfg(coverage)]
fn detect_hdmi_connector(require_connected: bool) -> Option<HdmiConnector> {
let _ = require_connected;
std::env::var("LESAVKA_HDMI_CONNECTOR")
.ok()
.map(|name| HdmiConnector {
name,
id: None,
modes: std::env::var("LESAVKA_HDMI_MODES")
.ok()
.map(|raw| parse_hdmi_modes(&raw))
.unwrap_or_default(),
})
}
#[cfg(not(coverage))]
fn detect_hdmi_connector(require_connected: bool) -> Option<HdmiConnector> {
let preferred = std::env::var("LESAVKA_HDMI_CONNECTOR").ok();
let entries = fs::read_dir("/sys/class/drm").ok()?;
let mut connectors = Vec::new();
for entry in entries.flatten() {
let name = entry.file_name().to_string_lossy().into_owned();
if !name.contains("HDMI-A-") {
continue;
}
let status_path = entry.path().join("status");
let status = fs::read_to_string(&status_path)
.ok()
.map(|v| v.trim().to_string())
.unwrap_or_default();
let id = fs::read_to_string(entry.path().join("connector_id"))
.ok()
.and_then(|v| v.trim().parse::<u32>().ok());
let modes = fs::read_to_string(entry.path().join("modes"))
.ok()
.map(|raw| parse_hdmi_modes(&raw))
.unwrap_or_default();
connectors.push((name, status, id, modes));
}
connectors.sort_by(|a, b| a.0.cmp(&b.0));
let matches_preferred =
|name: &str, preferred: &str| name == preferred || name.ends_with(preferred);
if let Some(pref) = preferred.as_deref() {
for (name, status, id, modes) in &connectors {
if matches_preferred(name, pref) && (!require_connected || status == "connected") {
return Some(HdmiConnector {
name: name.clone(),
id: *id,
modes: modes.clone(),
});
}
}
}
if preferred.is_none() {
let previous = LAST_CONFIG
.get()
.and_then(|lock| lock.read().ok())
.and_then(|cfg| cfg.hdmi.as_ref().map(|h| h.name.clone()));
if let Some(prev) = previous {
for (name, status, id, modes) in &connectors {
if *name == prev && (!require_connected || status == "connected") {
return Some(HdmiConnector {
name: name.clone(),
id: *id,
modes: modes.clone(),
});
}
}
}
}
for (name, status, id, modes) in connectors {
if !require_connected || status == "connected" {
return Some(HdmiConnector { name, id, modes });
}
}
None
}
pub(crate) fn parse_hdmi_modes(raw: &str) -> Vec<HdmiMode> {
raw.lines()
.flat_map(|line| line.split(','))
.filter_map(parse_hdmi_mode)
.collect()
}
pub(crate) fn parse_hdmi_mode(raw: &str) -> Option<HdmiMode> {
let raw = raw.trim();
let (width, rest) = raw.split_once('x')?;
let width = width.trim().parse::<u32>().ok()?;
let height_digits: String = rest
.trim()
.chars()
.take_while(|ch| ch.is_ascii_digit())
.collect();
let height = height_digits.parse::<u32>().ok()?;
(width > 0 && height > 0).then_some(HdmiMode { width, height })
}
pub(crate) fn preferred_hdmi_mode(modes: &[HdmiMode]) -> Option<HdmiMode> {
for preferred in [
HdmiMode {
width: 1920,
height: 1080,
},
HdmiMode {
width: 1280,
height: 720,
},
] {
if modes.contains(&preferred) {
return Some(preferred);
}
}
modes
.iter()
.copied()
.filter(|mode| mode.width.saturating_mul(9) == mode.height.saturating_mul(16))
.filter(|mode| mode.width.saturating_mul(mode.height) <= 1920 * 1080)
.max_by_key(|mode| mode.width.saturating_mul(mode.height))
.or_else(|| modes.first().copied())
}
#[cfg(not(coverage))]
fn parse_env_file(text: &str) -> HashMap<String, String> {
let mut out = HashMap::new();
for line in text.lines() {
let line = line.trim();
if line.is_empty() || line.starts_with('#') {
continue;
}
let mut parts = line.splitn(2, '=');
let key = match parts.next() {
Some(v) => v.trim(),
None => continue,
};
let val = match parts.next() {
Some(v) => v.trim(),
None => continue,
};
out.insert(key.to_string(), val.to_string());
}
out
}
pub(crate) fn read_u32_from_env(key: &str) -> Option<u32> {
std::env::var(key).ok().and_then(|v| v.parse::<u32>().ok())
}
#[cfg(not(coverage))]
fn read_u32_from_map(map: &HashMap<String, String>, key: &str) -> Option<u32> {
map.get(key).and_then(|v| v.parse::<u32>().ok())
}

View File

@ -12,6 +12,7 @@ pub mod gadget;
pub mod handshake;
pub mod paste;
pub mod runtime_support;
pub mod upstream_media_runtime;
pub mod uvc_runtime;
pub mod video;
pub(crate) mod video_sinks;

View File

@ -25,8 +25,8 @@ use lesavka_common::lesavka::{
use lesavka_server::{
camera, camera_runtime::CameraRuntime, capture_power::CapturePowerManager, gadget::UsbGadget,
handshake::HandshakeSvc, paste, runtime_support, runtime_support::init_tracing, uvc_runtime,
video,
handshake::HandshakeSvc, paste, runtime_support, runtime_support::init_tracing,
upstream_media_runtime::UpstreamMediaRuntime, uvc_runtime, video,
};
/*──────────────── constants ────────────────*/
@ -65,6 +65,7 @@ struct Handler {
gadget: UsbGadget,
did_cycle: Arc<AtomicBool>,
camera_rt: Arc<CameraRuntime>,
upstream_media_rt: Arc<UpstreamMediaRuntime>,
capture_power: CapturePowerManager,
eye_hubs: Arc<Mutex<HashMap<EyeHubKey, Arc<EyeHub>>>>,
}

View File

@ -45,6 +45,7 @@ impl Handler {
gadget,
did_cycle: Arc::new(AtomicBool::new(false)),
camera_rt: Arc::new(CameraRuntime::new()),
upstream_media_rt: Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: Arc::new(Mutex::new(HashMap::new())),
})

View File

@ -110,23 +110,55 @@ impl Relay for Handler {
req: Request<tonic::Streaming<AudioPacket>>,
) -> Result<Response<Self::StreamMicrophoneStream>, Status> {
let rpc_id = runtime_support::next_stream_id();
info!(rpc_id, "🎤 stream_microphone opened");
let lease = self.upstream_media_rt.activate_microphone();
info!(rpc_id, session_id = lease.session_id, "🎤 stream_microphone opened");
let Some(microphone_sink_permit) = self
.upstream_media_rt
.reserve_microphone_sink(lease.generation)
.await
else {
info!(
rpc_id,
session_id = lease.session_id,
"🎤 stream_microphone stood down before the sink became available"
);
return Err(Status::aborted(
"microphone stream superseded before sink became available",
));
};
// 1 ─ build once, early
let uac_dev = std::env::var("LESAVKA_UAC_DEV").unwrap_or_else(|_| "hw:UAC2Gadget,0".into());
info!(%uac_dev, "🎤 stream_microphone using UAC sink");
let mut sink = runtime_support::open_voice_with_retry(&uac_dev)
.await
.map_err(|e| Status::internal(format!("{e:#}")))?;
.map_err(|e| {
self.upstream_media_rt.close_microphone(lease.generation);
Status::internal(format!("{e:#}"))
})?;
// 2 ─ dummy outbound stream (same trick as before)
let (tx, rx) = tokio::sync::mpsc::channel(1);
let upstream_media_rt = self.upstream_media_rt.clone();
// 3 ─ drive the sink in a background task
tokio::spawn(async move {
let _microphone_sink_permit = microphone_sink_permit;
let mut inbound = req.into_inner();
static CNT: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
while let Some(pkt) = inbound.next().await.transpose()? {
loop {
if !upstream_media_rt.is_microphone_active(lease.generation) {
info!(rpc_id, session_id = lease.session_id, "🎤 stream_microphone session superseded");
break;
}
let next_packet = tokio::select! {
packet = inbound.next() => packet,
_ = tokio::time::sleep(Duration::from_millis(50)) => continue,
};
let Some(mut pkt) = next_packet.transpose()? else {
break;
};
pkt.pts = upstream_media_rt.map_audio_pts(pkt.pts);
let n = CNT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
if n < 5 || n.is_multiple_of(3_000) {
tracing::info!(rpc_id, "🎤⬇ srv pkt#{n} {} bytes", pkt.data.len());
@ -134,8 +166,9 @@ impl Relay for Handler {
sink.push(&pkt);
}
sink.finish(); // flush on EOS
upstream_media_rt.close_microphone(lease.generation);
let _ = tx.send(Ok(Empty {})).await;
info!(rpc_id, "🎤 stream_microphone closed");
info!(rpc_id, session_id = lease.session_id, "🎤 stream_microphone closed");
Ok::<(), Status>(())
});
@ -160,22 +193,36 @@ impl Relay for Handler {
"🎥 stream_camera output selected"
);
let upstream_lease = self.upstream_media_rt.activate_camera();
let (session_id, relay) = self.camera_rt.activate(&cfg).await?;
let camera_rt = self.camera_rt.clone();
let upstream_media_rt = self.upstream_media_rt.clone();
info!(rpc_id, session_id, "🎥 stream_camera opened");
let frame_step_us = (1_000_000u64 / u64::from(cfg.fps.max(1))).max(1);
// dummy outbound (same pattern as other streams)
let (tx, rx) = tokio::sync::mpsc::channel(1);
tokio::spawn(async move {
let mut s = req.into_inner();
while let Some(pkt) = s.next().await.transpose()? {
if !camera_rt.is_active(session_id) {
loop {
if !camera_rt.is_active(session_id)
|| !upstream_media_rt.is_camera_active(upstream_lease.generation)
{
info!(rpc_id, session_id, "🎥 stream_camera session superseded");
break;
}
let next_packet = tokio::select! {
packet = s.next() => packet,
_ = tokio::time::sleep(Duration::from_millis(50)) => continue,
};
let Some(mut pkt) = next_packet.transpose()? else {
break;
};
pkt.pts = upstream_media_rt.map_video_pts(pkt.pts, frame_step_us);
relay.feed(pkt); // ← all logging inside video.rs
}
upstream_media_rt.close_camera(upstream_lease.generation);
tx.send(Ok(Empty {})).await.ok();
info!(rpc_id, session_id, "🎥 stream_camera closed");
Ok::<(), Status>(())

View File

@ -55,18 +55,45 @@ impl Relay for Handler {
&self,
req: Request<tonic::Streaming<AudioPacket>>,
) -> Result<Response<Self::StreamMicrophoneStream>, Status> {
let lease = self.upstream_media_rt.activate_microphone();
let Some(microphone_sink_permit) = self
.upstream_media_rt
.reserve_microphone_sink(lease.generation)
.await
else {
return Err(Status::aborted(
"microphone stream superseded before sink became available",
));
};
let uac_dev = std::env::var("LESAVKA_UAC_DEV").unwrap_or_else(|_| "hw:UAC2Gadget,0".into());
let mut sink = runtime_support::open_voice_with_retry(&uac_dev)
.await
.map_err(|e| Status::internal(format!("{e:#}")))?;
.map_err(|e| {
self.upstream_media_rt.close_microphone(lease.generation);
Status::internal(format!("{e:#}"))
})?;
let (tx, rx) = tokio::sync::mpsc::channel(1);
let upstream_media_rt = self.upstream_media_rt.clone();
tokio::spawn(async move {
let _microphone_sink_permit = microphone_sink_permit;
let mut inbound = req.into_inner();
while let Some(pkt) = inbound.next().await.transpose()? {
loop {
if !upstream_media_rt.is_microphone_active(lease.generation) {
break;
}
let next_packet = tokio::select! {
packet = inbound.next() => packet,
_ = tokio::time::sleep(Duration::from_millis(25)) => continue,
};
let Some(mut pkt) = next_packet.transpose()? else {
break;
};
pkt.pts = upstream_media_rt.map_audio_pts(pkt.pts);
sink.push(&pkt);
}
sink.finish();
upstream_media_rt.close_microphone(lease.generation);
let _ = tx.send(Ok(Empty {})).await;
Ok::<(), Status>(())
});
@ -79,18 +106,32 @@ impl Relay for Handler {
req: Request<tonic::Streaming<VideoPacket>>,
) -> Result<Response<Self::StreamCameraStream>, Status> {
let cfg = camera::current_camera_config();
let upstream_lease = self.upstream_media_rt.activate_camera();
let (session_id, relay) = self.camera_rt.activate(&cfg).await?;
let camera_rt = self.camera_rt.clone();
let upstream_media_rt = self.upstream_media_rt.clone();
let (tx, rx) = tokio::sync::mpsc::channel(1);
let frame_step_us = (1_000_000u64 / u64::from(cfg.fps.max(1))).max(1);
tokio::spawn(async move {
let mut s = req.into_inner();
while let Some(pkt) = s.next().await.transpose()? {
if !camera_rt.is_active(session_id) {
loop {
if !camera_rt.is_active(session_id)
|| !upstream_media_rt.is_camera_active(upstream_lease.generation)
{
break;
}
let next_packet = tokio::select! {
packet = s.next() => packet,
_ = tokio::time::sleep(Duration::from_millis(25)) => continue,
};
let Some(mut pkt) = next_packet.transpose()? else {
break;
};
pkt.pts = upstream_media_rt.map_video_pts(pkt.pts, frame_step_us);
relay.feed(pkt);
}
upstream_media_rt.close_camera(upstream_lease.generation);
tx.send(Ok(Empty {})).await.ok();
Ok::<(), Status>(())
});

View File

@ -1,6 +1,7 @@
use super::selection::{parse_hdmi_mode, parse_hdmi_modes, preferred_hdmi_mode};
use super::{
CameraCodec, CameraConfig, CameraOutput, HdmiConnector, HdmiMode, current_camera_config,
parse_hdmi_mode, parse_hdmi_modes, preferred_hdmi_mode, update_camera_config,
update_camera_config,
};
use serial_test::serial;
use temp_env::with_var;
@ -35,21 +36,54 @@ fn camera_config_env_override_prefers_uvc_values() {
#[serial]
fn hdmi_camera_profile_honors_installed_1080p_override() {
with_var("LESAVKA_CAM_OUTPUT", Some("hdmi"), || {
with_var("LESAVKA_CAM_WIDTH", Some("1920"), || {
with_var("LESAVKA_CAM_HEIGHT", Some("1080"), || {
with_var("LESAVKA_CAM_FPS", Some("30"), || {
let cfg = update_camera_config();
assert_eq!(cfg.output, CameraOutput::Hdmi);
assert_eq!(cfg.codec, CameraCodec::H264);
assert_eq!(cfg.width, 1920);
assert_eq!(cfg.height, 1080);
assert_eq!(cfg.fps, 30);
with_var("LESAVKA_CAM_CODEC", Some("h264"), || {
with_var("LESAVKA_CAM_WIDTH", Some("1920"), || {
with_var("LESAVKA_CAM_HEIGHT", Some("1080"), || {
with_var("LESAVKA_CAM_FPS", Some("30"), || {
let cfg = update_camera_config();
assert_eq!(cfg.output, CameraOutput::Hdmi);
assert_eq!(cfg.codec, CameraCodec::H264);
assert_eq!(cfg.width, 1920);
assert_eq!(cfg.height, 1080);
assert_eq!(cfg.fps, 30);
});
});
});
});
});
}
#[test]
#[serial]
fn hdmi_camera_profile_prefers_mjpeg_without_hardware_decoder() {
with_var("LESAVKA_CAM_OUTPUT", Some("hdmi"), || {
with_var("LESAVKA_HW_H264", None::<&str>, || {
with_var("LESAVKA_CAM_CODEC", None::<&str>, || {
let cfg = update_camera_config();
assert_eq!(cfg.output, CameraOutput::Hdmi);
assert_eq!(cfg.codec, CameraCodec::Mjpeg);
assert_eq!(cfg.width, 1280);
assert_eq!(cfg.height, 720);
assert_eq!(cfg.fps, 30);
});
});
});
}
#[test]
#[serial]
fn hdmi_camera_profile_honors_explicit_codec_override() {
with_var("LESAVKA_CAM_OUTPUT", Some("hdmi"), || {
with_var("LESAVKA_HW_H264", None::<&str>, || {
with_var("LESAVKA_CAM_CODEC", Some("h264"), || {
let cfg = update_camera_config();
assert_eq!(cfg.output, CameraOutput::Hdmi);
assert_eq!(cfg.codec, CameraCodec::H264);
});
});
});
}
#[test]
fn hdmi_mode_parsing_accepts_sysfs_and_override_shapes() {
assert_eq!(

View File

@ -0,0 +1,369 @@
#![forbid(unsafe_code)]
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use tokio::sync::{OwnedSemaphorePermit, Semaphore};
/// Logical upstream media kinds that share one live-call session timeline.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum UpstreamMediaKind {
/// Webcam uplink frames destined for the UVC/HDMI sink path.
Camera,
/// Microphone uplink packets destined for the UAC sink path.
Microphone,
}
/// Lease returned when one upstream media stream becomes the active owner.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct UpstreamStreamLease {
/// Shared session id for the current upstream live-call window.
pub session_id: u64,
/// Per-kind generation used to supersede older streams of the same kind.
pub generation: u64,
}
#[derive(Debug, Default)]
struct UpstreamClockState {
session_id: u64,
active_camera_generation: Option<u64>,
active_microphone_generation: Option<u64>,
base_remote_pts_us: Option<u64>,
last_video_local_pts_us: Option<u64>,
last_audio_local_pts_us: Option<u64>,
}
/// Coordinate upstream stream ownership and keep audio/video on one timeline.
///
/// Inputs: stream-open/close events plus remote packet timestamps.
/// Outputs: active-stream leases and rebased local PTS values.
/// Why: live calls need one current webcam owner, one current microphone owner,
/// and one shared media clock so reconnects do not leave old sinks alive or let
/// audio/video drift onto separate timing islands.
#[derive(Debug)]
pub struct UpstreamMediaRuntime {
next_session_id: AtomicU64,
next_camera_generation: AtomicU64,
next_microphone_generation: AtomicU64,
microphone_sink_gate: Arc<Semaphore>,
state: Mutex<UpstreamClockState>,
}
impl UpstreamMediaRuntime {
/// Build an empty upstream runtime.
#[must_use]
pub fn new() -> Self {
Self {
next_session_id: AtomicU64::new(0),
next_camera_generation: AtomicU64::new(0),
next_microphone_generation: AtomicU64::new(0),
microphone_sink_gate: Arc::new(Semaphore::new(1)),
state: Mutex::new(UpstreamClockState::default()),
}
}
/// Activate a camera stream as the current owner for the session.
#[must_use]
pub fn activate_camera(&self) -> UpstreamStreamLease {
self.activate(UpstreamMediaKind::Camera)
}
/// Activate a microphone stream as the current owner for the session.
#[must_use]
pub fn activate_microphone(&self) -> UpstreamStreamLease {
self.activate(UpstreamMediaKind::Microphone)
}
/// Reserve the single live microphone sink slot for one generation.
///
/// Inputs: the microphone lease generation that wants to own the UAC sink.
/// Outputs: an owned semaphore permit while that generation still owns the
/// microphone slot, or `None` if a newer stream superseded it before the
/// previous sink fully stood down.
/// Why: ALSA only allows one live owner of the UAC playback device, so a
/// replacement stream must wait for the old owner to release the sink
/// before opening a new playback pipeline.
pub async fn reserve_microphone_sink(&self, generation: u64) -> Option<OwnedSemaphorePermit> {
let permit = self
.microphone_sink_gate
.clone()
.acquire_owned()
.await
.ok()?;
self.is_microphone_active(generation).then_some(permit)
}
fn activate(&self, kind: UpstreamMediaKind) -> UpstreamStreamLease {
let generation = match kind {
UpstreamMediaKind::Camera => {
self.next_camera_generation.fetch_add(1, Ordering::SeqCst) + 1
}
UpstreamMediaKind::Microphone => {
self.next_microphone_generation
.fetch_add(1, Ordering::SeqCst)
+ 1
}
};
let mut state = self
.state
.lock()
.expect("upstream media state mutex poisoned");
if state.active_camera_generation.is_none() && state.active_microphone_generation.is_none()
{
state.session_id = self.next_session_id.fetch_add(1, Ordering::SeqCst) + 1;
state.base_remote_pts_us = None;
state.last_video_local_pts_us = None;
state.last_audio_local_pts_us = None;
}
match kind {
UpstreamMediaKind::Camera => state.active_camera_generation = Some(generation),
UpstreamMediaKind::Microphone => state.active_microphone_generation = Some(generation),
}
UpstreamStreamLease {
session_id: state.session_id,
generation,
}
}
/// Return whether the supplied camera lease is still the active owner.
#[must_use]
pub fn is_camera_active(&self, generation: u64) -> bool {
self.is_active(UpstreamMediaKind::Camera, generation)
}
/// Return whether the supplied microphone lease is still the active owner.
#[must_use]
pub fn is_microphone_active(&self, generation: u64) -> bool {
self.is_active(UpstreamMediaKind::Microphone, generation)
}
fn is_active(&self, kind: UpstreamMediaKind, generation: u64) -> bool {
let state = self
.state
.lock()
.expect("upstream media state mutex poisoned");
match kind {
UpstreamMediaKind::Camera => state.active_camera_generation == Some(generation),
UpstreamMediaKind::Microphone => state.active_microphone_generation == Some(generation),
}
}
/// Mark a camera stream as closed if it still owns the camera slot.
pub fn close_camera(&self, generation: u64) {
self.close(UpstreamMediaKind::Camera, generation);
}
/// Mark a microphone stream as closed if it still owns the microphone slot.
pub fn close_microphone(&self, generation: u64) {
self.close(UpstreamMediaKind::Microphone, generation);
}
fn close(&self, kind: UpstreamMediaKind, generation: u64) {
let mut state = self
.state
.lock()
.expect("upstream media state mutex poisoned");
match kind {
UpstreamMediaKind::Camera if state.active_camera_generation == Some(generation) => {
state.active_camera_generation = None;
}
UpstreamMediaKind::Microphone
if state.active_microphone_generation == Some(generation) =>
{
state.active_microphone_generation = None;
}
_ => return,
}
if state.active_camera_generation.is_none() && state.active_microphone_generation.is_none()
{
state.base_remote_pts_us = None;
state.last_video_local_pts_us = None;
state.last_audio_local_pts_us = None;
}
}
/// Rebase one upstream video packet timestamp onto the shared session clock.
#[must_use]
pub fn map_video_pts(&self, remote_pts_us: u64, frame_step_us: u64) -> u64 {
self.map_pts(
UpstreamMediaKind::Camera,
remote_pts_us,
frame_step_us.max(1),
)
}
/// Rebase one upstream audio packet timestamp onto the shared session clock.
#[must_use]
pub fn map_audio_pts(&self, remote_pts_us: u64) -> u64 {
self.map_pts(UpstreamMediaKind::Microphone, remote_pts_us, 1)
}
fn map_pts(&self, kind: UpstreamMediaKind, remote_pts_us: u64, min_step_us: u64) -> u64 {
let mut state = self
.state
.lock()
.expect("upstream media state mutex poisoned");
let base_remote = *state.base_remote_pts_us.get_or_insert(remote_pts_us);
let mut local_pts_us = remote_pts_us.saturating_sub(base_remote);
let last_slot = match kind {
UpstreamMediaKind::Camera => &mut state.last_video_local_pts_us,
UpstreamMediaKind::Microphone => &mut state.last_audio_local_pts_us,
};
if let Some(last_pts_us) = *last_slot
&& local_pts_us <= last_pts_us
{
local_pts_us = last_pts_us.saturating_add(min_step_us.max(1));
}
*last_slot = Some(local_pts_us);
local_pts_us
}
}
#[cfg(test)]
mod tests {
use super::{UpstreamMediaKind, UpstreamMediaRuntime};
use std::sync::Arc;
use std::time::Duration;
#[test]
fn first_stream_starts_a_new_shared_session() {
let runtime = UpstreamMediaRuntime::new();
let camera = runtime.activate_camera();
let microphone = runtime.activate_microphone();
assert_eq!(camera.session_id, 1);
assert_eq!(microphone.session_id, 1);
assert!(runtime.is_camera_active(camera.generation));
assert!(runtime.is_microphone_active(microphone.generation));
}
#[test]
fn replacing_one_kind_keeps_the_session_but_preempts_the_old_owner() {
let runtime = UpstreamMediaRuntime::new();
let first = runtime.activate_microphone();
let second = runtime.activate_microphone();
assert_eq!(first.session_id, second.session_id);
assert!(!runtime.is_microphone_active(first.generation));
assert!(runtime.is_microphone_active(second.generation));
}
#[test]
fn closing_the_last_stream_resets_the_next_session_anchor() {
let runtime = UpstreamMediaRuntime::new();
let camera = runtime.activate_camera();
let microphone = runtime.activate_microphone();
runtime.close_camera(camera.generation);
runtime.close_microphone(microphone.generation);
let next = runtime.activate_camera();
assert_eq!(next.session_id, 2);
}
#[test]
fn shared_clock_rebases_audio_and_video_against_the_same_origin() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
let video_first = runtime.map_video_pts(1_000_000, 16_666);
let audio_first = runtime.map_audio_pts(1_000_000);
let audio_next = runtime.map_audio_pts(1_010_000);
let video_next = runtime.map_video_pts(1_033_333, 16_666);
assert_eq!(video_first, 0);
assert_eq!(audio_first, 0);
assert_eq!(audio_next, 10_000);
assert_eq!(video_next, 33_333);
}
#[test]
fn shared_clock_keeps_each_kind_monotonic_when_remote_pts_repeat() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let first = runtime.map_video_pts(50_000, 16_666);
let repeated = runtime.map_video_pts(50_000, 16_666);
assert_eq!(first, 0);
assert_eq!(repeated, 16_666);
}
#[test]
fn close_ignores_superseded_generation_values() {
let runtime = UpstreamMediaRuntime::new();
let first = runtime.activate_camera();
let second = runtime.activate_camera();
runtime.close_camera(first.generation);
assert!(runtime.is_camera_active(second.generation));
runtime.close(UpstreamMediaKind::Camera, second.generation);
let next = runtime.activate_camera();
assert_eq!(next.session_id, 2);
}
#[tokio::test(flavor = "current_thread")]
async fn new_microphone_owner_waits_for_the_previous_sink_to_release() {
let runtime = Arc::new(UpstreamMediaRuntime::new());
let first = runtime.activate_microphone();
let first_permit = runtime
.reserve_microphone_sink(first.generation)
.await
.expect("first owner should acquire the sink gate");
let second = runtime.activate_microphone();
let waiter = tokio::spawn({
let runtime = runtime.clone();
async move {
runtime
.reserve_microphone_sink(second.generation)
.await
.is_some()
}
});
tokio::time::sleep(Duration::from_millis(25)).await;
assert!(!waiter.is_finished());
drop(first_permit);
assert!(waiter.await.expect("waiter task should finish"));
}
#[tokio::test(flavor = "current_thread")]
async fn superseded_microphone_waiter_stands_down_before_opening_a_sink() {
let runtime = Arc::new(UpstreamMediaRuntime::new());
let first = runtime.activate_microphone();
let first_permit = runtime
.reserve_microphone_sink(first.generation)
.await
.expect("first owner should acquire the sink gate");
let second = runtime.activate_microphone();
let superseded_waiter = tokio::spawn({
let runtime = runtime.clone();
async move {
runtime
.reserve_microphone_sink(second.generation)
.await
.is_some()
}
});
tokio::time::sleep(Duration::from_millis(25)).await;
let third = runtime.activate_microphone();
drop(first_permit);
assert!(
!superseded_waiter
.await
.expect("superseded waiter task should finish"),
"older waiter should stand down instead of opening a sink after supersession"
);
let third_permit = runtime
.reserve_microphone_sink(third.generation)
.await
.expect("latest owner should acquire the sink gate");
drop(third_permit);
}
}

View File

@ -12,6 +12,11 @@ pub struct HdmiSink {
frame_step_us: u64,
}
#[cfg(any(not(coverage), test))]
fn hdmi_queue_buffers() -> u32 {
crate::video_support::env_u32("LESAVKA_HDMI_QUEUE_BUFFERS", 1).max(1)
}
impl HdmiSink {
/// Build a new HDMI sink pipeline.
///
@ -76,13 +81,21 @@ impl HdmiSink {
.property("caps", &raw_caps)
.build()?;
let queue_depth = hdmi_queue_buffers();
let queue = gst::ElementFactory::make("queue")
.property("max-size-buffers", 4u32)
.property("max-size-buffers", queue_depth)
.property("max-size-bytes", 0u32)
.property("max-size-time", 0u64)
.build()?;
let convert = gst::ElementFactory::make("videoconvert").build()?;
let rate = gst::ElementFactory::make("videorate").build()?;
let scale = gst::ElementFactory::make("videoscale").build()?;
let sink = build_hdmi_sink(cfg)?;
tracing::info!(
target: "lesavka_server::video",
queue_depth,
"📺 HDMI sink queue depth armed"
);
if (display_width, display_height) != (cfg.width, cfg.height) {
tracing::info!(
@ -190,7 +203,8 @@ impl HdmiSink {
pub fn push(&self, pkt: VideoPacket) {
let mut buf = gst::Buffer::from_slice(pkt.data);
if let Some(meta) = buf.get_mut() {
let pts_us = next_local_pts(&self.next_pts_us, self.frame_step_us);
let pts_us =
crate::video_support::reserve_local_pts(&self.next_pts_us, pkt.pts, self.frame_step_us);
let ts = gst::ClockTime::from_useconds(pts_us);
meta.set_pts(Some(ts));
meta.set_dts(Some(ts));
@ -352,3 +366,28 @@ fn read_bool_env(name: &str) -> Option<bool> {
_ => None,
}
}
#[cfg(test)]
mod hdmi_queue_tests {
use super::hdmi_queue_buffers;
#[test]
fn hdmi_queue_depth_defaults_to_one_frame() {
temp_env::with_var_unset("LESAVKA_HDMI_QUEUE_BUFFERS", || {
assert_eq!(hdmi_queue_buffers(), 1);
});
}
#[test]
fn hdmi_queue_depth_accepts_positive_env_override_only() {
temp_env::with_var("LESAVKA_HDMI_QUEUE_BUFFERS", Some("3"), || {
assert_eq!(hdmi_queue_buffers(), 3);
});
temp_env::with_var("LESAVKA_HDMI_QUEUE_BUFFERS", Some("0"), || {
assert_eq!(hdmi_queue_buffers(), 1);
});
temp_env::with_var("LESAVKA_HDMI_QUEUE_BUFFERS", Some("nope"), || {
assert_eq!(hdmi_queue_buffers(), 1);
});
}
}

View File

@ -9,7 +9,7 @@ use std::sync::atomic::AtomicU64;
use tracing::warn;
use crate::camera::{CameraCodec, CameraConfig};
use crate::video_support::{contains_idr, dev_mode_enabled, next_local_pts, pick_h264_decoder};
use crate::video_support::{contains_idr, dev_mode_enabled, pick_h264_decoder, reserve_local_pts};
/// Push H.264 or MJPEG frames into the USB UVC gadget.
///
@ -180,7 +180,7 @@ impl WebcamSink {
pub fn push(&self, pkt: VideoPacket) {
let mut buf = gst::Buffer::from_slice(pkt.data);
if let Some(meta) = buf.get_mut() {
let pts_us = next_local_pts(&self.next_pts_us, self.frame_step_us);
let pts_us = reserve_local_pts(&self.next_pts_us, pkt.pts, self.frame_step_us);
let ts = gst::ClockTime::from_useconds(pts_us);
meta.set_pts(Some(ts));
meta.set_dts(Some(ts));

View File

@ -166,11 +166,30 @@ pub fn next_local_pts(counter: &AtomicU64, frame_step_us: u64) -> u64 {
counter.fetch_add(frame_step_us, Ordering::Relaxed)
}
/// Reserve a monotonic local timestamp while preferring a caller-provided value.
///
/// Inputs: the shared counter, a preferred local timestamp, and the minimum
/// step to enforce between consecutive values.
/// Outputs: a timestamp that never goes backwards for the current sink.
/// Why: upstream media can now arrive with a shared session timeline, so sink
/// playback should honor that timing when possible while still guarding against
/// repeated values that would destabilize live playback.
#[must_use]
pub fn reserve_local_pts(counter: &AtomicU64, preferred_pts_us: u64, frame_step_us: u64) -> u64 {
let next_allowed_pts_us = counter.load(Ordering::Relaxed);
let chosen_pts_us = preferred_pts_us.max(next_allowed_pts_us);
counter.store(
chosen_pts_us.saturating_add(frame_step_us.max(1)),
Ordering::Relaxed,
);
chosen_pts_us
}
#[cfg(test)]
mod tests {
use super::{
adjust_effective_fps, contains_idr, default_eye_fps, env_u32, env_usize, next_local_pts,
should_send_frame,
reserve_local_pts, should_send_frame,
};
use serial_test::serial;
use std::sync::atomic::AtomicU64;
@ -212,6 +231,14 @@ mod tests {
assert_eq!(next_local_pts(&counter, 40_000), 40_000);
}
#[test]
fn reserve_local_pts_prefers_preferred_value_but_stays_monotonic() {
let counter = AtomicU64::new(0);
assert_eq!(reserve_local_pts(&counter, 0, 40_000), 0);
assert_eq!(reserve_local_pts(&counter, 10_000, 40_000), 40_000);
assert_eq!(reserve_local_pts(&counter, 120_000, 40_000), 120_000);
}
#[test]
#[serial]
fn env_helpers_parse_values_and_fallbacks() {

View File

@ -382,7 +382,7 @@ JSON
}
#[test]
fn pull_returns_packet_when_appsink_has_buffered_sample() {
fn pull_returns_packet_when_appsink_has_buffered_sample_with_shared_capture_clock_pts() {
gst::init().ok();
let pipeline = gst::Pipeline::new();
let src = gst::ElementFactory::make("appsrc")
@ -406,21 +406,33 @@ JSON
src.link(&sink).expect("link appsrc->appsink");
pipeline.set_state(gst::State::Playing).ok();
let mut buf = gst::Buffer::from_slice(vec![1_u8, 2, 3, 4]);
buf.get_mut()
let mut first = gst::Buffer::from_slice(vec![1_u8, 2, 3, 4]);
first
.get_mut()
.expect("buffer mut")
.set_pts(Some(gst::ClockTime::from_useconds(321)));
src.push_buffer(buf).expect("push sample");
src.push_buffer(first).expect("push first sample");
let mut second = gst::Buffer::from_slice(vec![5_u8, 6, 7, 8]);
second
.get_mut()
.expect("buffer mut")
.set_pts(Some(gst::ClockTime::from_useconds(999_999)));
src.push_buffer(second).expect("push second sample");
let cap = MicrophoneCapture {
pipeline,
sink,
level_tap_running: None,
};
let pkt = cap.pull().expect("audio packet");
assert_eq!(pkt.id, 0);
assert_eq!(pkt.pts, 321);
assert_eq!(pkt.data, vec![1, 2, 3, 4]);
let first_pkt = cap.pull().expect("first audio packet");
let second_pkt = cap.pull().expect("second audio packet");
assert_eq!(first_pkt.id, 0);
assert_eq!(first_pkt.data, vec![1, 2, 3, 4]);
assert_eq!(second_pkt.data, vec![5, 6, 7, 8]);
assert!(second_pkt.pts >= first_pkt.pts);
assert_ne!(first_pkt.pts, 321);
assert_ne!(second_pkt.pts, 999_999);
}
#[test]

View File

@ -208,18 +208,46 @@ fn handshake_uses_uvc_interval_when_fps_is_unset() {
#[test]
#[serial]
fn handshake_returns_hdmi_caps_with_h264_codec() {
fn handshake_returns_hdmi_caps_with_mjpeg_when_h264_decode_is_unavailable() {
with_var("LESAVKA_CAM_OUTPUT", Some("hdmi"), || {
with_var("LESAVKA_DISABLE_UAC", Some("1"), || {
let rt = Runtime::new().expect("create runtime");
let caps = rt.block_on(negotiate_against_local_server());
assert_eq!(caps.camera_output, Some(String::from("hdmi")));
assert_eq!(caps.camera_codec, Some(String::from("h264")));
assert_eq!(caps.camera_fps, Some(30));
assert!(!caps.microphone);
assert!(caps.camera);
assert!(matches!(caps.camera_width, Some(1280) | Some(1920)));
assert!(matches!(caps.camera_height, Some(720) | Some(1080)));
with_var("LESAVKA_HW_H264", None::<&str>, || {
with_var("LESAVKA_CAM_CODEC", None::<&str>, || {
let _ = lesavka_server::camera::update_camera_config();
let rt = Runtime::new().expect("create runtime");
let caps = rt.block_on(negotiate_against_local_server());
assert_eq!(caps.camera_output, Some(String::from("hdmi")));
assert_eq!(caps.camera_codec, Some(String::from("mjpeg")));
assert_eq!(caps.camera_fps, Some(30));
assert!(!caps.microphone);
assert!(caps.camera);
assert_eq!(caps.camera_width, Some(1280));
assert_eq!(caps.camera_height, Some(720));
});
});
});
});
}
#[test]
#[serial]
fn handshake_honors_explicit_hdmi_h264_override() {
with_var("LESAVKA_CAM_OUTPUT", Some("hdmi"), || {
with_var("LESAVKA_DISABLE_UAC", Some("1"), || {
with_var("LESAVKA_HW_H264", None::<&str>, || {
with_var("LESAVKA_CAM_CODEC", Some("h264"), || {
let _ = lesavka_server::camera::update_camera_config();
let rt = Runtime::new().expect("create runtime");
let caps = rt.block_on(negotiate_against_local_server());
assert_eq!(caps.camera_output, Some(String::from("hdmi")));
assert_eq!(caps.camera_codec, Some(String::from("h264")));
assert_eq!(caps.camera_fps, Some(30));
assert!(!caps.microphone);
assert!(caps.camera);
assert_eq!(caps.camera_width, Some(1280));
assert_eq!(caps.camera_height, Some(720));
});
});
});
});
}
@ -237,7 +265,10 @@ fn handshake_auto_mode_falls_back_to_a_valid_camera_configuration() {
assert!(matches!(caps.camera_height, Some(720) | Some(360)));
}
Some("hdmi") => {
assert_eq!(caps.camera_codec.as_deref(), Some("h264"));
assert!(matches!(
caps.camera_codec.as_deref(),
Some("mjpeg") | Some("h264")
));
assert!(matches!(caps.camera_width, Some(1280) | Some(1920)));
assert!(matches!(caps.camera_height, Some(720) | Some(1080)));
}

View File

@ -6,6 +6,8 @@
//! Why: audio pipeline setup is branchy and should stay stable without requiring
//! physical ALSA/UAC hardware in CI.
pub use lesavka_server::camera;
#[path = "../../server/src/audio.rs"]
#[allow(warnings)]
mod server_audio_contract;

View File

@ -75,21 +75,25 @@ fn camera_config_zero_interval_falls_back_to_default_fps() {
#[test]
#[serial]
fn camera_config_forced_hdmi_tracks_cached_state() {
fn camera_config_forced_hdmi_defaults_to_mjpeg_without_hardware_decode() {
with_var("LESAVKA_CAM_OUTPUT", Some("hdmi"), || {
let cfg = update_camera_config();
assert_eq!(cfg.output, CameraOutput::Hdmi);
assert_eq!(cfg.codec, CameraCodec::H264);
assert_eq!(cfg.fps, 30);
assert!(matches!(
(cfg.width, cfg.height),
(1920, 1080) | (1280, 720)
));
with_var("LESAVKA_HW_H264", None::<&str>, || {
with_var("LESAVKA_CAM_CODEC", None::<&str>, || {
let cfg = update_camera_config();
assert_eq!(cfg.output, CameraOutput::Hdmi);
assert_eq!(cfg.codec, CameraCodec::Mjpeg);
assert_eq!(cfg.width, 1280);
assert_eq!(cfg.height, 720);
assert_eq!(cfg.fps, 30);
let cached = current_camera_config();
assert_eq!(cached.output, CameraOutput::Hdmi);
assert_eq!(cached.codec, CameraCodec::H264);
assert_eq!(cached.fps, 30);
let cached = current_camera_config();
assert_eq!(cached.output, CameraOutput::Hdmi);
assert_eq!(cached.codec, CameraCodec::Mjpeg);
assert_eq!(cached.width, 1280);
assert_eq!(cached.height, 720);
assert_eq!(cached.fps, 30);
});
});
});
}
@ -97,21 +101,42 @@ fn camera_config_forced_hdmi_tracks_cached_state() {
#[serial]
fn camera_config_forced_hdmi_honors_1080p_uplink_override() {
with_var("LESAVKA_CAM_OUTPUT", Some("hdmi"), || {
with_var("LESAVKA_CAM_WIDTH", Some("1920"), || {
with_var("LESAVKA_CAM_HEIGHT", Some("1080"), || {
with_var("LESAVKA_CAM_FPS", Some("30"), || {
let cfg = update_camera_config();
assert_eq!(cfg.output, CameraOutput::Hdmi);
assert_eq!(cfg.codec, CameraCodec::H264);
assert_eq!(cfg.width, 1920);
assert_eq!(cfg.height, 1080);
assert_eq!(cfg.fps, 30);
with_var("LESAVKA_HW_H264", None::<&str>, || {
with_var("LESAVKA_CAM_WIDTH", Some("1920"), || {
with_var("LESAVKA_CAM_HEIGHT", Some("1080"), || {
with_var("LESAVKA_CAM_FPS", Some("30"), || {
with_var("LESAVKA_CAM_CODEC", None::<&str>, || {
let cfg = update_camera_config();
assert_eq!(cfg.output, CameraOutput::Hdmi);
assert_eq!(cfg.codec, CameraCodec::Mjpeg);
assert_eq!(cfg.width, 1920);
assert_eq!(cfg.height, 1080);
assert_eq!(cfg.fps, 30);
});
});
});
});
});
});
}
#[test]
#[serial]
fn camera_config_forced_hdmi_honors_explicit_h264_override() {
with_var("LESAVKA_CAM_OUTPUT", Some("hdmi"), || {
with_var("LESAVKA_HW_H264", None::<&str>, || {
with_var("LESAVKA_CAM_CODEC", Some("h264"), || {
let cfg = update_camera_config();
assert_eq!(cfg.output, CameraOutput::Hdmi);
assert_eq!(cfg.codec, CameraCodec::H264);
assert_eq!(cfg.width, 1280);
assert_eq!(cfg.height, 720);
assert_eq!(cfg.fps, 30);
});
});
});
}
#[test]
#[serial]
fn camera_config_output_override_is_case_insensitive() {
@ -184,7 +209,7 @@ fn camera_config_invalid_output_falls_back_to_detected_policy() {
assert!(cfg.fps > 0);
}
CameraOutput::Hdmi => {
assert_eq!(cfg.codec, CameraCodec::H264);
assert!(matches!(cfg.codec, CameraCodec::Mjpeg | CameraCodec::H264));
assert_eq!(cfg.fps, 30);
}
}

View File

@ -49,6 +49,7 @@ mod server_main_binary {
gadget: UsbGadget::new("lesavka"),
did_cycle: std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)),
camera_rt: std::sync::Arc::new(CameraRuntime::new()),
upstream_media_rt: std::sync::Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: std::sync::Arc::new(tokio::sync::Mutex::new(
std::collections::HashMap::new(),

View File

@ -118,6 +118,7 @@ mod server_main_binary_extra {
gadget: UsbGadget::new("lesavka"),
did_cycle: std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)),
camera_rt: std::sync::Arc::new(CameraRuntime::new()),
upstream_media_rt: std::sync::Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: std::sync::Arc::new(tokio::sync::Mutex::new(
std::collections::HashMap::new(),

View File

@ -88,6 +88,7 @@ mod server_main_media_extra {
gadget: UsbGadget::new("lesavka"),
did_cycle: std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)),
camera_rt: std::sync::Arc::new(CameraRuntime::new()),
upstream_media_rt: std::sync::Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: std::sync::Arc::new(tokio::sync::Mutex::new(
std::collections::HashMap::new(),

View File

@ -48,6 +48,7 @@ mod server_main_rpc {
gadget: UsbGadget::new("lesavka"),
did_cycle: std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)),
camera_rt: std::sync::Arc::new(CameraRuntime::new()),
upstream_media_rt: std::sync::Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: std::sync::Arc::new(
tokio::sync::Mutex::new(std::collections::HashMap::new()),

View File

@ -45,6 +45,7 @@ mod server_main_rpc_reset {
gadget: UsbGadget::new("lesavka"),
did_cycle: std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)),
camera_rt: std::sync::Arc::new(CameraRuntime::new()),
upstream_media_rt: std::sync::Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: std::sync::Arc::new(
tokio::sync::Mutex::new(std::collections::HashMap::new()),
@ -102,6 +103,7 @@ mod server_main_rpc_reset {
false,
)),
camera_rt: std::sync::Arc::new(CameraRuntime::new()),
upstream_media_rt: std::sync::Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: std::sync::Arc::new(tokio::sync::Mutex::new(
std::collections::HashMap::new(),

View File

@ -118,6 +118,7 @@ mod server_main_binary_extra {
gadget: UsbGadget::new("lesavka"),
did_cycle: std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)),
camera_rt: std::sync::Arc::new(CameraRuntime::new()),
upstream_media_rt: std::sync::Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: std::sync::Arc::new(tokio::sync::Mutex::new(
std::collections::HashMap::new(),
@ -151,6 +152,7 @@ mod server_main_binary_extra {
gadget: UsbGadget::new("lesavka"),
did_cycle: std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)),
camera_rt: std::sync::Arc::new(CameraRuntime::new()),
upstream_media_rt: std::sync::Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: std::sync::Arc::new(
tokio::sync::Mutex::new(std::collections::HashMap::new()),
@ -214,6 +216,7 @@ echo noop core helper >&2
false,
)),
camera_rt: std::sync::Arc::new(CameraRuntime::new()),
upstream_media_rt: std::sync::Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: std::sync::Arc::new(tokio::sync::Mutex::new(
std::collections::HashMap::new(),
@ -284,6 +287,7 @@ printf 'configured\n' > "$LESAVKA_GADGET_SYSFS_ROOT/class/udc/fake-ctrl.usb/stat
false,
)),
camera_rt: std::sync::Arc::new(CameraRuntime::new()),
upstream_media_rt: std::sync::Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: std::sync::Arc::new(tokio::sync::Mutex::new(
std::collections::HashMap::new(),

View File

@ -60,6 +60,7 @@ mod server_upstream_media {
gadget: UsbGadget::new("lesavka"),
did_cycle: std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)),
camera_rt: std::sync::Arc::new(CameraRuntime::new()),
upstream_media_rt: std::sync::Arc::new(UpstreamMediaRuntime::new()),
capture_power: CapturePowerManager::new(),
eye_hubs: std::sync::Arc::new(tokio::sync::Mutex::new(
std::collections::HashMap::new(),
@ -127,6 +128,70 @@ mod server_upstream_media {
});
}
#[test]
#[serial]
fn stream_microphone_supersedes_the_previous_owner_cleanly() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (first_tx, first_rx) = tokio::sync::mpsc::channel(1);
let (_second_tx, second_rx) = tokio::sync::mpsc::channel(1);
let mut first = cli
.stream_microphone(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(first_rx),
))
.await
.expect("first microphone stream")
.into_inner();
let _second = cli
.stream_microphone(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(second_rx),
))
.await
.expect("second microphone stream supersedes first");
drop(first_tx);
let ack = tokio::time::timeout(std::time::Duration::from_secs(1), first.message())
.await
.expect("superseded microphone ack timeout")
.expect("superseded microphone ack grpc")
.expect("superseded microphone ack item");
assert_eq!(ack, Empty {});
server.abort();
});
});
}
#[test]
#[serial]
fn stream_microphone_surfaces_internal_error_when_sink_open_fails() {
let rt = tokio::runtime::Runtime::new().expect("runtime");
with_var("LESAVKA_CAPTURE_POWER_UNIT", Some("none"), || {
with_var("LESAVKA_TEST_FORCE_PIPELINE_START_ERROR", Some("1"), || {
rt.block_on(async {
let (_dir, handler) = build_handler_for_tests();
let (server, mut cli) = serve_handler(handler).await;
let (_tx, rx) = tokio::sync::mpsc::channel(1);
let err = cli
.stream_microphone(tonic::Request::new(
tokio_stream::wrappers::ReceiverStream::new(rx),
))
.await
.expect_err("missing sink should fail the stream");
assert_eq!(err.code(), tonic::Code::Internal);
server.abort();
});
});
});
}
#[test]
#[serial]
fn stream_camera_accepts_upstream_video_packets() {