media: add HEVC upstream transport calibration
This commit is contained in:
parent
f900d7e582
commit
dee9466e6e
2
.gitignore
vendored
2
.gitignore
vendored
@ -4,6 +4,8 @@ coverage/
|
||||
logs/
|
||||
captures/
|
||||
tmp/
|
||||
*.profraw
|
||||
__pycache__/
|
||||
override.toml
|
||||
.cache/sccache/
|
||||
/unit-graph.json
|
||||
|
||||
6
Cargo.lock
generated
6
Cargo.lock
generated
@ -1652,7 +1652,7 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
|
||||
|
||||
[[package]]
|
||||
name = "lesavka_client"
|
||||
version = "0.20.0"
|
||||
version = "0.21.9"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-stream",
|
||||
@ -1686,7 +1686,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lesavka_common"
|
||||
version = "0.20.0"
|
||||
version = "0.21.9"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"base64",
|
||||
@ -1698,7 +1698,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lesavka_server"
|
||||
version = "0.20.0"
|
||||
version = "0.21.9"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"base64",
|
||||
|
||||
@ -4,7 +4,7 @@ path = "src/main.rs"
|
||||
|
||||
[package]
|
||||
name = "lesavka_client"
|
||||
version = "0.20.0"
|
||||
version = "0.21.9"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
|
||||
@ -119,3 +119,93 @@ use super::*;
|
||||
"video capture timestamp should not resurrect stale source timing"
|
||||
);
|
||||
}
|
||||
|
||||
/// Verifies the live uplink queue emits one physically bundled HEVC frame and PCM span.
|
||||
///
|
||||
/// Inputs: pre-stamped HEVC video plus two nearby audio packets, exactly as
|
||||
/// physical capture workers would hand them to the bundler. Outputs:
|
||||
/// assertions over the queued `UpstreamMediaBundle`. Why: the real
|
||||
/// client-to-server stream should preserve the same pairing contract as the
|
||||
/// synthetic probe before gRPC can add network timing noise.
|
||||
#[cfg(not(coverage))]
|
||||
#[tokio::test]
|
||||
async fn hevc_video_and_nearby_audio_leave_live_uplink_as_one_bundle() {
|
||||
let temp_dir = tempfile::tempdir().expect("tempdir");
|
||||
let telemetry_path = temp_dir.path().join("uplink.json");
|
||||
let telemetry =
|
||||
crate::uplink_telemetry::UplinkTelemetryPublisher::new(telemetry_path, true, true);
|
||||
let camera_telemetry =
|
||||
telemetry.handle(crate::uplink_telemetry::UpstreamStreamKind::Camera);
|
||||
let microphone_telemetry =
|
||||
telemetry.handle(crate::uplink_telemetry::UpstreamStreamKind::Microphone);
|
||||
let queue: crate::uplink_fresh_queue::FreshPacketQueue<UpstreamMediaBundle> =
|
||||
crate::uplink_fresh_queue::FreshPacketQueue::new(BUNDLED_MEDIA_UPLINK_QUEUE);
|
||||
let drop_log = std::sync::Arc::new(std::sync::Mutex::new(UplinkDropLogLimiter::new(
|
||||
"test-bundled-hevc",
|
||||
"test",
|
||||
)));
|
||||
let mut bundle_seq = 0_u64;
|
||||
let video = VideoPacket {
|
||||
pts: 1_000_000,
|
||||
data: vec![0, 0, 0, 1, 0x26, 0x01, 0xaa],
|
||||
seq: 10,
|
||||
effective_fps: 30,
|
||||
client_capture_pts_us: 1_000_000,
|
||||
client_send_pts_us: 1_005_000,
|
||||
client_queue_depth: 1,
|
||||
client_queue_age_ms: 5,
|
||||
..Default::default()
|
||||
};
|
||||
let audio = vec![
|
||||
AudioPacket {
|
||||
pts: 980_000,
|
||||
data: vec![0x11; 1_920],
|
||||
seq: 20,
|
||||
client_capture_pts_us: 980_000,
|
||||
client_send_pts_us: 1_005_000,
|
||||
client_queue_depth: 1,
|
||||
client_queue_age_ms: 25,
|
||||
..Default::default()
|
||||
},
|
||||
AudioPacket {
|
||||
pts: 1_010_000,
|
||||
data: vec![0x22; 1_920],
|
||||
seq: 21,
|
||||
client_capture_pts_us: 1_010_000,
|
||||
client_send_pts_us: 1_015_000,
|
||||
client_queue_depth: 1,
|
||||
client_queue_age_ms: 5,
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
|
||||
emit_bundled_media(
|
||||
42,
|
||||
&mut bundle_seq,
|
||||
Some(video),
|
||||
audio,
|
||||
&queue,
|
||||
&camera_telemetry,
|
||||
µphone_telemetry,
|
||||
&drop_log,
|
||||
);
|
||||
|
||||
let popped = queue.pop_fresh().await;
|
||||
let bundle = popped.packet.expect("queued bundled media");
|
||||
let video = bundle.video.as_ref().expect("bundled video");
|
||||
assert_eq!(bundle.session_id, 42);
|
||||
assert_eq!(bundle.seq, 1);
|
||||
assert_eq!(bundle.capture_start_us, 980_000);
|
||||
assert_eq!(bundle.capture_end_us, 1_010_000);
|
||||
assert_eq!(bundle.audio.len(), 2);
|
||||
assert!(video.data.windows(4).any(|window| window == [0, 0, 0, 1]));
|
||||
assert_eq!(video.client_capture_pts_us, 1_000_000);
|
||||
assert_eq!(video.client_send_pts_us, 1_005_000);
|
||||
assert_eq!(video.client_queue_depth, popped.queue_depth as u32);
|
||||
assert!(bundle.audio.iter().all(|packet| {
|
||||
packet.client_capture_pts_us >= bundle.capture_start_us
|
||||
&& packet.client_capture_pts_us <= bundle.capture_end_us
|
||||
&& packet.client_send_pts_us >= packet.client_capture_pts_us
|
||||
&& packet.client_queue_depth == popped.queue_depth as u32
|
||||
}));
|
||||
}
|
||||
|
||||
@ -114,7 +114,6 @@ fn packet_video_capture_pts_us(packet: &VideoPacket) -> u64 {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn queue_depth_u32(depth: usize) -> u32 {
|
||||
depth.try_into().unwrap_or(u32::MAX)
|
||||
}
|
||||
@ -124,17 +123,14 @@ fn duration_ms(duration: Duration) -> f32 {
|
||||
duration.as_secs_f32() * 1_000.0
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn duration_ms_u32(duration: Duration) -> u32 {
|
||||
duration.as_millis().min(u128::from(u32::MAX)) as u32
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn age_between_capture_and_enqueue(capture_pts_us: u64, enqueue_pts_us: u64) -> Duration {
|
||||
Duration::from_micros(enqueue_pts_us.saturating_sub(capture_pts_us))
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn stamp_audio_timing_metadata_at_enqueue(packet: &mut AudioPacket) -> Duration {
|
||||
static AUDIO_SEQUENCE: AtomicU64 = AtomicU64::new(0);
|
||||
let enqueue_pts_us = crate::live_capture_clock::capture_pts_us();
|
||||
@ -146,7 +142,6 @@ fn stamp_audio_timing_metadata_at_enqueue(packet: &mut AudioPacket) -> Duration
|
||||
age_between_capture_and_enqueue(capture_pts_us, enqueue_pts_us)
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn stamp_video_timing_metadata_at_enqueue(packet: &mut VideoPacket) -> Duration {
|
||||
static VIDEO_SEQUENCE: AtomicU64 = AtomicU64::new(0);
|
||||
let enqueue_pts_us = crate::live_capture_clock::capture_pts_us();
|
||||
@ -158,7 +153,6 @@ fn stamp_video_timing_metadata_at_enqueue(packet: &mut VideoPacket) -> Duration
|
||||
age_between_capture_and_enqueue(capture_pts_us, enqueue_pts_us)
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Keeps `sanitized_capture_pts_us` explicit because it sits on the live uplink path, where stale media must be dropped instead of queued into latency.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn sanitized_capture_pts_us(packet_pts_us: u64, enqueue_pts_us: u64) -> u64 {
|
||||
@ -173,7 +167,6 @@ fn sanitized_capture_pts_us(packet_pts_us: u64, enqueue_pts_us: u64) -> u64 {
|
||||
capture_pts_us
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Keeps `attach_audio_queue_metadata` explicit because it sits on the live uplink path, where stale media must be dropped instead of queued into latency.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn attach_audio_queue_metadata(
|
||||
@ -188,7 +181,6 @@ fn attach_audio_queue_metadata(
|
||||
packet.client_queue_age_ms = duration_ms_u32(delivery_age);
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Keeps `attach_video_queue_metadata` explicit because it sits on the live uplink path, where stale media must be dropped instead of queued into latency.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn attach_video_queue_metadata(
|
||||
|
||||
@ -50,6 +50,7 @@ fn parse_camera_codec(raw: &str) -> Option<CameraCodec> {
|
||||
match raw.trim().to_ascii_lowercase().as_str() {
|
||||
"mjpeg" | "mjpg" | "jpeg" => Some(CameraCodec::Mjpeg),
|
||||
"h264" => Some(CameraCodec::H264),
|
||||
"hevc" | "h265" | "h.265" => Some(CameraCodec::Hevc),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@ -113,6 +114,10 @@ mod tests {
|
||||
assert!(matches!(config.codec, CameraCodec::Mjpeg));
|
||||
assert_eq!(config.width, 1280);
|
||||
|
||||
caps.camera_codec = Some(String::from("h265"));
|
||||
let config = camera_config_from_caps(&caps).expect("h265 alias should map");
|
||||
assert!(matches!(config.codec, CameraCodec::Hevc));
|
||||
|
||||
caps.camera_codec = Some(String::from("vp9"));
|
||||
assert!(camera_config_from_caps(&caps).is_none());
|
||||
}
|
||||
|
||||
@ -1,16 +1,19 @@
|
||||
#![cfg_attr(coverage, allow(dead_code))]
|
||||
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use anyhow::{Context, Result, bail};
|
||||
#[cfg(not(coverage))]
|
||||
use serde::Serialize;
|
||||
#[cfg(not(coverage))]
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use std::collections::BTreeSet;
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
use lesavka_client::sync_probe::analyze::analyze_capture;
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use lesavka_client::sync_probe::analyze::{
|
||||
SyncAnalysisOptions, SyncAnalysisReport, SyncAnalysisVerdict, SyncCalibrationRecommendation,
|
||||
analyze_capture,
|
||||
};
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
@ -24,8 +27,8 @@ struct SyncAnalyzeOutput<'a> {
|
||||
verdict: SyncAnalysisVerdict,
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[derive(Serialize)]
|
||||
#[cfg(any(not(coverage), test))]
|
||||
#[cfg_attr(not(coverage), derive(Serialize))]
|
||||
struct SignatureCoverage {
|
||||
expected_event_count: usize,
|
||||
expected_codes: Vec<u32>,
|
||||
@ -258,7 +261,7 @@ fn parse_analysis_seconds(raw: &str, label: &str) -> Result<f64> {
|
||||
}
|
||||
|
||||
include!("lesavka_sync_analyze/human_report.rs");
|
||||
#[cfg(not(coverage))]
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Keeps `signature_coverage` explicit because it sits on CLI orchestration, where operators need deterministic exits and artifact paths.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn signature_coverage(
|
||||
@ -305,7 +308,7 @@ fn signature_coverage(
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Keeps `format_signature_coverage` explicit because it sits on CLI orchestration, where operators need deterministic exits and artifact paths.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn format_signature_coverage(coverage: Option<&SignatureCoverage>) -> String {
|
||||
@ -325,7 +328,7 @@ fn format_signature_coverage(coverage: Option<&SignatureCoverage>) -> String {
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[cfg(any(not(coverage), test))]
|
||||
fn unpaired_video_onsets(report: &SyncAnalysisReport) -> Vec<f64> {
|
||||
unpaired_onsets(
|
||||
&report.video_onsets_s,
|
||||
@ -337,7 +340,7 @@ fn unpaired_video_onsets(report: &SyncAnalysisReport) -> Vec<f64> {
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[cfg(any(not(coverage), test))]
|
||||
fn unpaired_audio_onsets(report: &SyncAnalysisReport) -> Vec<f64> {
|
||||
unpaired_onsets(
|
||||
&report.audio_onsets_s,
|
||||
@ -349,7 +352,7 @@ fn unpaired_audio_onsets(report: &SyncAnalysisReport) -> Vec<f64> {
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Keeps `unpaired_onsets` explicit because it sits on CLI orchestration, where operators need deterministic exits and artifact paths.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn unpaired_onsets(all_onsets: &[f64], paired_onsets: &[f64]) -> Vec<f64> {
|
||||
@ -365,7 +368,7 @@ fn unpaired_onsets(all_onsets: &[f64], paired_onsets: &[f64]) -> Vec<f64> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Keeps `format_onset_list` explicit because it sits on CLI orchestration, where operators need deterministic exits and artifact paths.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn format_onset_list(onsets: &[f64]) -> String {
|
||||
@ -384,7 +387,7 @@ fn format_onset_list(onsets: &[f64]) -> String {
|
||||
formatted.join(", ")
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Keeps `format_usize_list` explicit because it sits on CLI orchestration, where operators need deterministic exits and artifact paths.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn format_usize_list(values: &[usize]) -> String {
|
||||
@ -398,7 +401,7 @@ fn format_usize_list(values: &[usize]) -> String {
|
||||
.join(", ")
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Keeps `format_u32_list` explicit because it sits on CLI orchestration, where operators need deterministic exits and artifact paths.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn format_u32_list(values: &[u32]) -> String {
|
||||
|
||||
@ -3,6 +3,34 @@
|
||||
fn print_upstream_sync(state: lesavka_common::lesavka::UpstreamSyncState) {
|
||||
println!("planner_session_id={}", state.session_id);
|
||||
println!("planner_phase={}", state.phase);
|
||||
println!(
|
||||
"planner_latest_camera_remote_pts_us={}",
|
||||
state
|
||||
.latest_camera_remote_pts_us
|
||||
.map(|value| value.to_string())
|
||||
.unwrap_or_else(|| "pending".to_string())
|
||||
);
|
||||
println!(
|
||||
"planner_latest_microphone_remote_pts_us={}",
|
||||
state
|
||||
.latest_microphone_remote_pts_us
|
||||
.map(|value| value.to_string())
|
||||
.unwrap_or_else(|| "pending".to_string())
|
||||
);
|
||||
println!(
|
||||
"planner_last_video_presented_pts_us={}",
|
||||
state
|
||||
.last_video_presented_pts_us
|
||||
.map(|value| value.to_string())
|
||||
.unwrap_or_else(|| "pending".to_string())
|
||||
);
|
||||
println!(
|
||||
"planner_last_audio_presented_pts_us={}",
|
||||
state
|
||||
.last_audio_presented_pts_us
|
||||
.map(|value| value.to_string())
|
||||
.unwrap_or_else(|| "pending".to_string())
|
||||
);
|
||||
println!(
|
||||
"planner_live_lag_ms={}",
|
||||
state
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
#[cfg(not(coverage))]
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Keeps `format_human_report` explicit because it sits on CLI orchestration, where operators need deterministic exits and artifact paths.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn format_human_report(
|
||||
|
||||
@ -2,7 +2,7 @@ use super::{
|
||||
CalibrationAction, CapturePowerCommand, CommandKind, Config, ParseOutcome,
|
||||
calibration_request_for, capture_power_request, parse_args_from, parse_args_outcome_from,
|
||||
};
|
||||
use lesavka_common::lesavka::CapturePowerState;
|
||||
use lesavka_common::lesavka::{CapturePowerState, UpstreamSyncState};
|
||||
|
||||
#[test]
|
||||
/// Verifies safe recovery commands stay separate from explicit hard reset.
|
||||
@ -229,6 +229,36 @@ fn print_state_accepts_full_capture_power_payload() {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn print_versions_accepts_unknown_and_reported_server_identity() {
|
||||
super::print_versions(
|
||||
"https://lab:50051",
|
||||
&lesavka_common::lesavka::HandshakeSet {
|
||||
camera_output: "uvc".to_string(),
|
||||
camera_codec: "mjpeg".to_string(),
|
||||
camera_width: 1280,
|
||||
camera_height: 720,
|
||||
camera_fps: 30,
|
||||
bundled_webcam_media: true,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
super::print_versions(
|
||||
"https://lab:50051",
|
||||
&lesavka_common::lesavka::HandshakeSet {
|
||||
server_version: "0.21.1".to_string(),
|
||||
server_revision: "abc1234".to_string(),
|
||||
camera_output: "uvc".to_string(),
|
||||
camera_codec: "hevc".to_string(),
|
||||
camera_width: 1920,
|
||||
camera_height: 1080,
|
||||
camera_fps: 30,
|
||||
bundled_webcam_media: true,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `print_calibration_accepts_full_payload` explicit because it sits on CLI orchestration, where operators need deterministic exits and artifact paths.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
@ -248,6 +278,54 @@ fn print_calibration_accepts_full_payload() {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn print_upstream_sync_accepts_complete_and_pending_payloads() {
|
||||
super::print_upstream_sync(UpstreamSyncState {
|
||||
session_id: 7,
|
||||
phase: "locked".to_string(),
|
||||
latest_camera_remote_pts_us: Some(1_000),
|
||||
latest_microphone_remote_pts_us: Some(1_010),
|
||||
last_video_presented_pts_us: Some(2_000),
|
||||
last_audio_presented_pts_us: Some(2_010),
|
||||
live_lag_ms: Some(44.5),
|
||||
planner_skew_ms: Some(-3.25),
|
||||
stale_audio_drops: 1,
|
||||
stale_video_drops: 2,
|
||||
skew_video_drops: 3,
|
||||
freshness_reanchors: 4,
|
||||
startup_timeouts: 5,
|
||||
video_freezes: 6,
|
||||
last_reason: "healthy".to_string(),
|
||||
client_capture_skew_ms: Some(1.5),
|
||||
client_send_skew_ms: Some(-2.5),
|
||||
server_receive_skew_ms: Some(3.5),
|
||||
camera_client_queue_age_ms: Some(4.5),
|
||||
microphone_client_queue_age_ms: Some(5.5),
|
||||
camera_server_receive_age_ms: Some(6.5),
|
||||
microphone_server_receive_age_ms: Some(7.5),
|
||||
client_capture_abs_skew_p95_ms: Some(8.5),
|
||||
client_send_abs_skew_p95_ms: Some(9.5),
|
||||
server_receive_abs_skew_p95_ms: Some(10.5),
|
||||
camera_client_queue_age_p95_ms: Some(11.5),
|
||||
microphone_client_queue_age_p95_ms: Some(12.5),
|
||||
sink_handoff_skew_ms: Some(-13.5),
|
||||
sink_handoff_abs_skew_p95_ms: Some(14.5),
|
||||
camera_sink_late_ms: Some(15.5),
|
||||
microphone_sink_late_ms: Some(-16.5),
|
||||
camera_sink_late_p95_ms: Some(17.5),
|
||||
microphone_sink_late_p95_ms: Some(18.5),
|
||||
client_timing_window_samples: 19,
|
||||
sink_handoff_window_samples: 20,
|
||||
});
|
||||
|
||||
super::print_upstream_sync(UpstreamSyncState {
|
||||
session_id: 0,
|
||||
phase: "acquiring".to_string(),
|
||||
last_reason: "waiting".to_string(),
|
||||
..UpstreamSyncState::default()
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `calibration_requests_are_only_built_for_calibration_mutations` explicit because it sits on CLI orchestration, where operators need deterministic exits and artifact paths.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
@ -272,6 +350,38 @@ fn calibration_requests_are_only_built_for_calibration_mutations() {
|
||||
assert_eq!(request.video_delta_us, 71_600);
|
||||
assert_eq!(request.note, "probe");
|
||||
|
||||
for (command, action) in [
|
||||
(
|
||||
CommandKind::CalibrationRestoreDefault,
|
||||
CalibrationAction::RestoreDefault,
|
||||
),
|
||||
(
|
||||
CommandKind::CalibrationRestoreFactory,
|
||||
CalibrationAction::RestoreFactory,
|
||||
),
|
||||
(
|
||||
CommandKind::CalibrationSaveDefault,
|
||||
CalibrationAction::SaveActiveAsDefault,
|
||||
),
|
||||
] {
|
||||
let mutation = Config {
|
||||
server: config.server.clone(),
|
||||
command,
|
||||
audio_delta_us: config.audio_delta_us,
|
||||
video_delta_us: config.video_delta_us,
|
||||
note: config.note.clone(),
|
||||
probe_duration_seconds: config.probe_duration_seconds,
|
||||
probe_warmup_seconds: config.probe_warmup_seconds,
|
||||
probe_pulse_period_ms: config.probe_pulse_period_ms,
|
||||
probe_pulse_width_ms: config.probe_pulse_width_ms,
|
||||
probe_event_width_codes: config.probe_event_width_codes.clone(),
|
||||
probe_audio_delay_us: config.probe_audio_delay_us,
|
||||
probe_video_delay_us: config.probe_video_delay_us,
|
||||
};
|
||||
let request = calibration_request_for(&mutation).expect("calibration mutation");
|
||||
assert_eq!(request.action, action as i32);
|
||||
}
|
||||
|
||||
let status = Config {
|
||||
command: CommandKind::CalibrationStatus,
|
||||
..config
|
||||
|
||||
@ -34,13 +34,57 @@ fn parse_args_accepts_analysis_window() {
|
||||
assert_eq!(args.options.analysis_end_s, Some(26.5));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_args_accepts_inline_options_and_open_ended_windows() {
|
||||
let args = parse_args([
|
||||
"capture.mkv",
|
||||
"--report-dir=/tmp/probe",
|
||||
"--event-width-codes=1, 2, 3",
|
||||
"--analysis-window-s=:26.5",
|
||||
])
|
||||
.expect("args");
|
||||
assert_eq!(
|
||||
args.report_dir,
|
||||
Some(std::path::PathBuf::from("/tmp/probe"))
|
||||
);
|
||||
assert_eq!(args.options.event_width_codes, vec![1, 2, 3]);
|
||||
assert_eq!(args.options.analysis_start_s, None);
|
||||
assert_eq!(args.options.analysis_end_s, Some(26.5));
|
||||
|
||||
let args = parse_args(["capture.mkv", "--analysis-window-s=8.25:"]).expect("args");
|
||||
assert_eq!(args.options.analysis_start_s, Some(8.25));
|
||||
assert_eq!(args.options.analysis_end_s, None);
|
||||
|
||||
let args = parse_args([
|
||||
"capture.mkv",
|
||||
"--analysis-start-s=1.25",
|
||||
"--analysis-end-s",
|
||||
"9.5",
|
||||
])
|
||||
.expect("args");
|
||||
assert_eq!(args.options.analysis_start_s, Some(1.25));
|
||||
assert_eq!(args.options.analysis_end_s, Some(9.5));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_args_rejects_extra_positional_arguments() {
|
||||
assert!(parse_args(["one.mkv", "two.mkv"]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--report-dir"]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--report-dir="]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--event-width-codes"]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--event-width-codes", ""]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--event-width-codes="]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--event-width-codes", "0"]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--analysis-window-s"]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--analysis-window-s", "wat:10"]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--analysis-window-s", "10"]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--analysis-window-s="]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--analysis-start-s"]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--analysis-start-s", "-1"]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--analysis-start-s="]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--analysis-end-s"]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--analysis-end-s=wat"]).is_err());
|
||||
assert!(parse_args(["one.mkv", "--analysis-end-s="]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -195,3 +239,34 @@ fn signature_coverage_reports_missing_and_unknown_coded_pairs() {
|
||||
assert!(text.contains("- missing paired signature codes: 2, 3"));
|
||||
assert!(text.contains("- paired signatures without identity: 1"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `formatting_helpers_cover_empty_and_truncated_lists` explicit because analyzer text should stay readable when a probe has no extra onsets or many noisy detections.
|
||||
/// Inputs are empty and oversized helper lists; output is stable operator-facing text.
|
||||
fn formatting_helpers_cover_empty_and_truncated_lists() {
|
||||
let report = SyncAnalysisReport {
|
||||
video_event_count: 0,
|
||||
audio_event_count: 0,
|
||||
paired_event_count: 0,
|
||||
coded_events: false,
|
||||
activity_start_delta_ms: 0.0,
|
||||
raw_first_video_activity_s: 0.0,
|
||||
raw_first_audio_activity_s: 0.0,
|
||||
first_skew_ms: 0.0,
|
||||
last_skew_ms: 0.0,
|
||||
mean_skew_ms: 0.0,
|
||||
median_skew_ms: 0.0,
|
||||
max_abs_skew_ms: 0.0,
|
||||
drift_ms: 0.0,
|
||||
skews_ms: Vec::new(),
|
||||
video_onsets_s: (0..14).map(f64::from).collect(),
|
||||
audio_onsets_s: Vec::new(),
|
||||
paired_events: Vec::new(),
|
||||
};
|
||||
|
||||
assert!(super::signature_coverage(&[], &report).is_none());
|
||||
assert_eq!(super::format_usize_list(&[]), "none");
|
||||
assert_eq!(super::format_u32_list(&[]), "none");
|
||||
assert_eq!(super::format_onset_list(&[]), "none");
|
||||
assert!(super::format_onset_list(&report.video_onsets_s).contains("...+2"));
|
||||
}
|
||||
|
||||
@ -34,6 +34,7 @@ fn env_u32(name: &str, default: u32) -> u32 {
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum CameraCodec {
|
||||
H264,
|
||||
Hevc,
|
||||
Mjpeg,
|
||||
}
|
||||
|
||||
@ -64,7 +65,9 @@ include!("camera/bus_and_encoder.rs");
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{CameraCodec, CameraConfig, resolved_capture_profile, resolved_output_profile};
|
||||
use super::{
|
||||
CameraCapture, CameraCodec, CameraConfig, resolved_capture_profile, resolved_output_profile,
|
||||
};
|
||||
use serial_test::serial;
|
||||
|
||||
#[test]
|
||||
@ -175,4 +178,29 @@ mod tests {
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
/// HEVC software fallback options must stay shaped for live transport.
|
||||
fn hevc_encoder_options_keep_low_latency_and_keyframes() {
|
||||
temp_env::with_var("LESAVKA_CAM_HEVC_KBIT", Some("2400"), || {
|
||||
let options = CameraCapture::encoder_options("x265enc", Some("key-int-max"), 30);
|
||||
|
||||
assert!(options.starts_with("x265enc "));
|
||||
assert!(options.contains("tune=zerolatency"));
|
||||
assert!(options.contains("speed-preset=ultrafast"));
|
||||
assert!(options.contains("bitrate=2400"));
|
||||
assert!(options.contains("key-int-max=30"));
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
#[test]
|
||||
/// Coverage builds use a deterministic HEVC encoder choice.
|
||||
fn coverage_hevc_encoder_choice_is_stable() {
|
||||
assert_eq!(
|
||||
CameraCapture::choose_hevc_encoder(),
|
||||
("x265enc", Some("key-int-max"))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -42,14 +42,22 @@ impl CameraCapture {
|
||||
}
|
||||
};
|
||||
|
||||
let output_mjpeg = cfg.map_or_else(
|
||||
let output_codec = cfg.map_or_else(
|
||||
|| {
|
||||
std::env::var("LESAVKA_CAM_CODEC").ok().is_some_and(|v| {
|
||||
matches!(v.to_ascii_lowercase().as_str(), "mjpeg" | "mjpg" | "jpeg")
|
||||
})
|
||||
match std::env::var("LESAVKA_CAM_CODEC")
|
||||
.ok()
|
||||
.map(|value| value.trim().to_ascii_lowercase())
|
||||
.as_deref()
|
||||
{
|
||||
Some("mjpeg" | "mjpg" | "jpeg") => CameraCodec::Mjpeg,
|
||||
Some("hevc" | "h265" | "h.265") => CameraCodec::Hevc,
|
||||
_ => CameraCodec::H264,
|
||||
}
|
||||
},
|
||||
|cfg| matches!(cfg.codec, CameraCodec::Mjpeg),
|
||||
|cfg| cfg.codec,
|
||||
);
|
||||
let output_mjpeg = matches!(output_codec, CameraCodec::Mjpeg);
|
||||
let output_hevc = matches!(output_codec, CameraCodec::Hevc);
|
||||
let jpeg_quality = env_u32("LESAVKA_CAM_JPEG_QUALITY", 85).clamp(1, 100);
|
||||
let capture_profile = capture_profile_override.unwrap_or_else(|| resolved_capture_profile(cfg));
|
||||
let (capture_width, capture_height, capture_fps) = capture_profile;
|
||||
@ -60,7 +68,13 @@ impl CameraCapture {
|
||||
let passthrough_mjpg_source =
|
||||
use_mjpg_source && capture_profile == (width, height, fps);
|
||||
let (enc, kf_prop) = if use_mjpg_source && !output_mjpeg {
|
||||
("x264enc", Some("key-int-max"))
|
||||
if output_hevc {
|
||||
Self::choose_hevc_encoder()
|
||||
} else {
|
||||
("x264enc", Some("key-int-max"))
|
||||
}
|
||||
} else if output_hevc {
|
||||
Self::choose_hevc_encoder()
|
||||
} else {
|
||||
Self::choose_encoder()
|
||||
};
|
||||
@ -76,6 +90,8 @@ impl CameraCapture {
|
||||
let enc_opts = Self::encoder_options(enc, kf_prop, keyframe_interval);
|
||||
if output_mjpeg {
|
||||
tracing::info!("📸 outputting MJPEG frames for UVC (quality={jpeg_quality})");
|
||||
} else if output_hevc {
|
||||
tracing::info!("📸 using HEVC encoder element: {enc}");
|
||||
} else {
|
||||
tracing::info!("📸 using encoder element: {enc}");
|
||||
}
|
||||
@ -96,6 +112,11 @@ impl CameraCapture {
|
||||
"videoconvert ! video/x-raw,format=NV12,width={width},height={height},framerate={fps}/1 !"
|
||||
),
|
||||
#[cfg(not(coverage))]
|
||||
"x265enc" =>
|
||||
format!(
|
||||
"videoconvert ! video/x-raw,format=I420,width={width},height={height},framerate={fps}/1 !"
|
||||
),
|
||||
#[cfg(not(coverage))]
|
||||
"vaapih264enc" =>
|
||||
format!(
|
||||
"videoconvert ! video/x-raw,format=NV12,width={width},height={height},framerate={fps}/1 !"
|
||||
@ -134,6 +155,11 @@ impl CameraCapture {
|
||||
"{raw_source_chain} ! {}",
|
||||
camera_output_raw_chain(width, height, fps)
|
||||
);
|
||||
let encoded_parse_chain = if output_hevc {
|
||||
"h265parse config-interval=-1 ! video/x-h265,stream-format=byte-stream,alignment=au"
|
||||
} else {
|
||||
"h264parse config-interval=-1 ! video/x-h264,stream-format=byte-stream,alignment=au"
|
||||
};
|
||||
let desc = if preview_tap_path.is_some() {
|
||||
if output_mjpeg {
|
||||
if passthrough_mjpg_source {
|
||||
@ -163,7 +189,7 @@ impl CameraCapture {
|
||||
tee name=t \
|
||||
t. ! queue max-size-buffers=30 leaky=downstream ! \
|
||||
{preenc} {enc_opts} ! \
|
||||
h264parse config-interval=-1 ! video/x-h264,stream-format=byte-stream,alignment=au ! \
|
||||
{encoded_parse_chain} ! \
|
||||
appsink name=asink emit-signals=true max-buffers=60 drop=true \
|
||||
t. ! queue max-size-buffers=2 leaky=downstream ! \
|
||||
{preview_tap_branch}"
|
||||
@ -189,7 +215,7 @@ impl CameraCapture {
|
||||
format!(
|
||||
"{normalized_raw_chain} ! \
|
||||
{preenc} {enc_opts} ! \
|
||||
h264parse config-interval=-1 ! video/x-h264,stream-format=byte-stream,alignment=au ! \
|
||||
{encoded_parse_chain} ! \
|
||||
queue max-size-buffers=30 leaky=downstream ! \
|
||||
appsink name=asink emit-signals=true max-buffers=60 drop=true"
|
||||
)
|
||||
|
||||
@ -62,6 +62,38 @@ impl CameraCapture {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Select the lowest-latency HEVC encoder available on this client.
|
||||
///
|
||||
/// Inputs: installed GStreamer encoder factories and their supported
|
||||
/// keyframe properties. Output: the chosen encoder element plus the
|
||||
/// property used to keep keyframes frequent. Why: transport freshness
|
||||
/// improves only if HEVC is encoded in a live-call shape instead of a
|
||||
/// throughput-oriented offline encode shape.
|
||||
fn choose_hevc_encoder() -> (&'static str, Option<&'static str>) {
|
||||
for (name, keyframe_props) in [
|
||||
("nvh265enc", &["iframeinterval", "idrinterval", "gop-size"][..]),
|
||||
("vah265enc", &["keyframe-period"][..]),
|
||||
("vaapih265enc", &["keyframe-period"][..]),
|
||||
("v4l2h265enc", &["idrcount"][..]),
|
||||
] {
|
||||
if buildable_encoder(name) {
|
||||
return (name, supported_encoder_property(name, keyframe_props));
|
||||
}
|
||||
}
|
||||
("x265enc", Some("key-int-max"))
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Return a stable HEVC encoder choice for coverage builds.
|
||||
///
|
||||
/// Inputs: none. Output: the software encoder contract used by tests. Why:
|
||||
/// coverage builds should exercise deterministic string construction
|
||||
/// without depending on workstation-specific hardware encoders.
|
||||
fn choose_hevc_encoder() -> (&'static str, Option<&'static str>) {
|
||||
("x265enc", Some("key-int-max"))
|
||||
}
|
||||
|
||||
fn encoder_options(
|
||||
enc: &'static str,
|
||||
kf_prop: Option<&'static str>,
|
||||
@ -75,6 +107,14 @@ impl CameraCapture {
|
||||
format!(
|
||||
"{enc} tune=zerolatency speed-preset=faster bitrate={bitrate_kbit}{keyframe_opt}"
|
||||
)
|
||||
} else if enc == "x265enc" {
|
||||
let bitrate_kbit = env_u32("LESAVKA_CAM_HEVC_KBIT", 3000);
|
||||
let keyframe_opt = kf_prop
|
||||
.map(|property| format!(" {property}={keyframe_interval}"))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{enc} tune=zerolatency speed-preset=ultrafast bitrate={bitrate_kbit}{keyframe_opt}"
|
||||
)
|
||||
} else if let Some(property) = kf_prop {
|
||||
format!("{enc} {property}={keyframe_interval}")
|
||||
} else {
|
||||
|
||||
@ -22,11 +22,7 @@ fn spawn_camera_preview_tap(sink: gst_app::AppSink, path: PathBuf) -> Arc<Atomic
|
||||
}
|
||||
}
|
||||
} else if !wrote_first {
|
||||
empty_polls += 1;
|
||||
if empty_polls == 20 || empty_polls.is_multiple_of(120) {
|
||||
#[cfg(not(coverage))]
|
||||
log_camera_preview_tap_waiting(&path);
|
||||
}
|
||||
note_camera_preview_tap_empty_poll(&path, &mut empty_polls);
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -44,6 +40,29 @@ fn log_camera_preview_tap_started(path: &Path, info: &CameraPreviewTapInfo) {
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Tracks empty preview polls and periodically logs that the tap is alive.
|
||||
///
|
||||
/// Inputs: preview path and mutable empty-poll counter. Output: counter/log side
|
||||
/// effect only. Why: startup stalls should be visible in real sessions, but the
|
||||
/// coverage build should not need to spend seconds waiting for a warning-only
|
||||
/// branch.
|
||||
fn note_camera_preview_tap_empty_poll(path: &Path, empty_polls: &mut u64) {
|
||||
*empty_polls += 1;
|
||||
if *empty_polls == 20 || empty_polls.is_multiple_of(120) {
|
||||
log_camera_preview_tap_waiting(path);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Keeps preview-tap empty polling coverage fast.
|
||||
///
|
||||
/// Inputs: same as the production helper. Output: none.
|
||||
/// Why: the wait-log branch exists only for operator visibility during slow
|
||||
/// camera startup; coverage should validate frame publishing without waiting
|
||||
/// five seconds for the first warning threshold.
|
||||
fn note_camera_preview_tap_empty_poll(_path: &Path, _empty_polls: &mut u64) {}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Log that the preview tap is still alive while waiting for first frames.
|
||||
fn log_camera_preview_tap_waiting(path: &Path) {
|
||||
|
||||
@ -35,6 +35,10 @@ impl CalibrationStatus {
|
||||
}
|
||||
}
|
||||
#[must_use]
|
||||
/// Build an unavailable calibration placeholder when the relay status RPC cannot answer.
|
||||
///
|
||||
/// Inputs: human-readable failure detail for the launcher. Output: a
|
||||
/// disabled status that keeps factory/default fields predictable for the UI.
|
||||
pub fn unavailable(detail: impl Into<String>) -> Self {
|
||||
Self {
|
||||
detail: detail.into(),
|
||||
@ -110,6 +114,10 @@ impl UpstreamSyncStatus {
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
/// Build an unavailable upstream-sync placeholder when the relay planner cannot be queried.
|
||||
///
|
||||
/// Inputs: human-readable failure detail for the launcher. Output: a
|
||||
/// disabled status that keeps stale/drop counters at their safe zero-state.
|
||||
pub fn unavailable(detail: impl Into<String>) -> Self {
|
||||
Self {
|
||||
detail: detail.into(),
|
||||
@ -143,6 +151,84 @@ impl Default for UpstreamSyncStatus {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod sync_status_tests {
|
||||
use super::{CalibrationStatus, UpstreamSyncStatus};
|
||||
use lesavka_common::lesavka::{CalibrationState, UpstreamSyncState};
|
||||
|
||||
#[test]
|
||||
/// Confirm launcher calibration state remains useful when relay calibration is live or unavailable.
|
||||
///
|
||||
/// Inputs: representative relay calibration payloads and failure details.
|
||||
/// Output: assertions that visible launcher fields preserve both active
|
||||
/// offsets and safe fallback text.
|
||||
fn calibration_status_maps_proto_and_unavailable_detail() {
|
||||
let status = CalibrationStatus::from_proto(CalibrationState {
|
||||
profile: "hevc/1920x1080@30".to_string(),
|
||||
factory_audio_offset_us: 1,
|
||||
factory_video_offset_us: 2,
|
||||
default_audio_offset_us: 3,
|
||||
default_video_offset_us: 4,
|
||||
active_audio_offset_us: 5,
|
||||
active_video_offset_us: 6,
|
||||
source: "factory".to_string(),
|
||||
confidence: "bench".to_string(),
|
||||
updated_at: "now".to_string(),
|
||||
detail: "ready".to_string(),
|
||||
});
|
||||
|
||||
assert!(status.available);
|
||||
assert_eq!(status.profile, "hevc/1920x1080@30");
|
||||
assert_eq!(status.active_video_offset_us, 6);
|
||||
|
||||
let unavailable = CalibrationStatus::unavailable("relay offline");
|
||||
assert!(!unavailable.available);
|
||||
assert_eq!(unavailable.detail, "relay offline");
|
||||
assert_eq!(CalibrationStatus::default().detail, "calibration status unavailable");
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Confirm upstream sync status exposes planner health without hiding fallback states.
|
||||
///
|
||||
/// Inputs: representative upstream planner payloads and failure details.
|
||||
/// Output: assertions for live counters, phase labels, and unavailable
|
||||
/// zero-state values used by the launcher.
|
||||
fn upstream_sync_status_maps_proto_and_unavailable_detail() {
|
||||
let status = UpstreamSyncStatus::from_proto(UpstreamSyncState {
|
||||
session_id: 42,
|
||||
phase: "live".to_string(),
|
||||
latest_camera_remote_pts_us: Some(1_000),
|
||||
latest_microphone_remote_pts_us: Some(1_010),
|
||||
last_video_presented_pts_us: Some(2_000),
|
||||
last_audio_presented_pts_us: Some(2_010),
|
||||
live_lag_ms: Some(12.5),
|
||||
planner_skew_ms: Some(1.0),
|
||||
stale_audio_drops: 1,
|
||||
stale_video_drops: 2,
|
||||
skew_video_drops: 3,
|
||||
freshness_reanchors: 4,
|
||||
startup_timeouts: 5,
|
||||
video_freezes: 6,
|
||||
last_reason: "healthy".to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
assert!(status.available);
|
||||
assert_eq!(status.session_id, 42);
|
||||
assert_eq!(status.phase, "live");
|
||||
assert_eq!(status.detail, "healthy");
|
||||
assert_eq!(status.video_freezes, 6);
|
||||
|
||||
let unavailable = UpstreamSyncStatus::unavailable("sync rpc unavailable");
|
||||
assert!(!unavailable.available);
|
||||
assert_eq!(unavailable.detail, "sync rpc unavailable");
|
||||
assert_eq!(
|
||||
UpstreamSyncStatus::default().detail,
|
||||
"upstream sync planner unavailable"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)]
|
||||
pub struct DeviceSelection {
|
||||
pub camera: Option<String>,
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
// client/src/lib.rs
|
||||
|
||||
#![forbid(unsafe_code)]
|
||||
#![cfg_attr(coverage, allow(dead_code, unused_imports, unused_variables))]
|
||||
|
||||
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
pub const REVISION: &str = env!("LESAVKA_GIT_SHA");
|
||||
|
||||
@ -282,218 +282,5 @@ impl DurationPacedSourcePtsRebaser {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
DurationPacedSourcePtsRebaser, SourcePtsRebaser, capture_pts_us, packet_age,
|
||||
upstream_source_lag_cap, upstream_source_lead_cap, upstream_timing_trace_enabled,
|
||||
};
|
||||
use serial_test::serial;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn capture_pts_us_monotonically_advances() {
|
||||
let first = capture_pts_us();
|
||||
std::thread::sleep(Duration::from_millis(2));
|
||||
let second = capture_pts_us();
|
||||
assert!(second >= first);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn packet_age_is_small_for_recent_packets() {
|
||||
let pts = capture_pts_us();
|
||||
std::thread::sleep(Duration::from_millis(2));
|
||||
let age = packet_age(pts);
|
||||
assert!(age >= Duration::from_millis(1));
|
||||
assert!(age < Duration::from_secs(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn source_pts_rebaser_preserves_source_delta_on_shared_capture_clock() {
|
||||
let rebased = SourcePtsRebaser::default();
|
||||
let first = rebased.rebase_or_now(Some(1_000_000), 1);
|
||||
let second = rebased.rebase_or_now(Some(1_033_333), 1);
|
||||
|
||||
assert!(first.used_source_pts);
|
||||
assert_eq!(
|
||||
second.packet_pts_us.saturating_sub(first.packet_pts_us),
|
||||
33_333
|
||||
);
|
||||
assert_eq!(first.source_base_us, Some(1_000_000));
|
||||
assert_eq!(second.source_base_us, Some(1_000_000));
|
||||
assert_eq!(first.capture_base_us, second.capture_base_us);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn source_pts_rebaser_stays_monotonic_when_source_pts_repeat() {
|
||||
let rebased = SourcePtsRebaser::default();
|
||||
let first = rebased.rebase_or_now(Some(50_000), 1);
|
||||
let second = rebased.rebase_or_now(Some(50_000), 1);
|
||||
|
||||
assert_eq!(second.packet_pts_us, first.packet_pts_us + 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn source_pts_rebaser_falls_back_to_capture_clock_without_source_pts() {
|
||||
let rebased = SourcePtsRebaser::default();
|
||||
let first = rebased.rebase_or_now(None, 1);
|
||||
std::thread::sleep(Duration::from_millis(2));
|
||||
let second = rebased.rebase_or_now(None, 1);
|
||||
|
||||
assert!(!first.used_source_pts);
|
||||
assert!(!second.used_source_pts);
|
||||
assert!(second.packet_pts_us > first.packet_pts_us);
|
||||
assert!(!first.lag_clamped);
|
||||
assert!(!second.lag_clamped);
|
||||
assert!(!first.lead_clamped);
|
||||
assert!(!second.lead_clamped);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn source_pts_rebaser_clamps_source_lag_when_it_falls_too_far_behind_now() {
|
||||
let rebased = SourcePtsRebaser::default();
|
||||
let _first = rebased.rebase_with_lag_cap(Some(1_000_000), 1, None);
|
||||
std::thread::sleep(Duration::from_millis(8));
|
||||
let second =
|
||||
rebased.rebase_with_lag_cap(Some(1_000_001), 1, Some(Duration::from_millis(2)));
|
||||
|
||||
assert!(second.used_source_pts);
|
||||
assert!(second.lag_clamped);
|
||||
assert!(second.capture_now_us >= second.packet_pts_us);
|
||||
assert!(second.capture_now_us - second.packet_pts_us <= 2_500);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn source_pts_rebaser_clamps_source_lead_when_it_runs_too_far_ahead() {
|
||||
temp_env::with_var("LESAVKA_UPSTREAM_SOURCE_LEAD_CAP_MS", Some("5"), || {
|
||||
let rebased = SourcePtsRebaser::default();
|
||||
let _first =
|
||||
rebased.rebase_with_lag_cap(Some(1_000_000), 1, Some(Duration::from_millis(250)));
|
||||
let second =
|
||||
rebased.rebase_with_lag_cap(Some(2_000_000), 1, Some(Duration::from_millis(250)));
|
||||
|
||||
assert!(second.used_source_pts);
|
||||
assert!(second.lead_clamped);
|
||||
assert!(second.packet_pts_us >= second.capture_now_us);
|
||||
assert!(second.packet_pts_us <= second.capture_now_us + 5_500);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn source_pts_rebasers_anchor_each_stream_to_its_own_first_packet_time() {
|
||||
let microphone = SourcePtsRebaser::default();
|
||||
let camera = SourcePtsRebaser::default();
|
||||
|
||||
let first_microphone = microphone.rebase_or_now(Some(80_000), 1);
|
||||
std::thread::sleep(Duration::from_millis(5));
|
||||
let first_camera = camera.rebase_or_now(Some(435_000), 1);
|
||||
|
||||
assert_ne!(
|
||||
first_microphone.capture_base_us, first_camera.capture_base_us,
|
||||
"independent camera/mic pipelines must not be forced onto the same first-packet timestamp"
|
||||
);
|
||||
assert!(
|
||||
first_camera.packet_pts_us > first_microphone.packet_pts_us,
|
||||
"a later-starting camera pipeline should keep that real wall-clock delay"
|
||||
);
|
||||
assert_eq!(first_microphone.source_base_us, Some(80_000));
|
||||
assert_eq!(first_camera.source_base_us, Some(435_000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn upstream_timing_trace_flag_defaults_off_and_accepts_true_values() {
|
||||
temp_env::with_var_unset("LESAVKA_UPSTREAM_TIMING_TRACE", || {
|
||||
assert!(!upstream_timing_trace_enabled());
|
||||
});
|
||||
|
||||
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("1"), || {
|
||||
assert!(upstream_timing_trace_enabled());
|
||||
});
|
||||
|
||||
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("false"), || {
|
||||
assert!(!upstream_timing_trace_enabled());
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn upstream_source_lag_cap_defaults_and_accepts_override() {
|
||||
temp_env::with_var_unset("LESAVKA_UPSTREAM_SOURCE_LAG_CAP_MS", || {
|
||||
assert_eq!(upstream_source_lag_cap(), Duration::from_millis(250));
|
||||
});
|
||||
|
||||
temp_env::with_var("LESAVKA_UPSTREAM_SOURCE_LAG_CAP_MS", Some("90"), || {
|
||||
assert_eq!(upstream_source_lag_cap(), Duration::from_millis(90));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn upstream_source_lead_cap_defaults_and_accepts_override() {
|
||||
temp_env::with_var_unset("LESAVKA_UPSTREAM_SOURCE_LEAD_CAP_MS", || {
|
||||
assert_eq!(upstream_source_lead_cap(), Duration::from_millis(80));
|
||||
});
|
||||
|
||||
temp_env::with_var("LESAVKA_UPSTREAM_SOURCE_LEAD_CAP_MS", Some("35"), || {
|
||||
assert_eq!(upstream_source_lead_cap(), Duration::from_millis(35));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn duration_paced_rebaser_advances_by_packet_duration_when_source_pts_stretch() {
|
||||
let rebased = DurationPacedSourcePtsRebaser::default();
|
||||
let first =
|
||||
rebased.rebase_with_packet_duration(Some(0), 21_333, Duration::from_millis(250));
|
||||
let second =
|
||||
rebased.rebase_with_packet_duration(Some(52_666), 21_333, Duration::from_millis(250));
|
||||
|
||||
assert_eq!(
|
||||
second.packet_pts_us.saturating_sub(first.packet_pts_us),
|
||||
21_333
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn duration_paced_rebaser_clamps_when_duration_pacing_falls_stale() {
|
||||
let rebased = DurationPacedSourcePtsRebaser::default();
|
||||
let _first = rebased.rebase_with_packet_duration(Some(0), 10_000, Duration::from_millis(2));
|
||||
std::thread::sleep(Duration::from_millis(8));
|
||||
let second =
|
||||
rebased.rebase_with_packet_duration(Some(10_000), 10_000, Duration::from_millis(2));
|
||||
|
||||
assert!(
|
||||
second.packet_pts_us.saturating_add(2_500) >= second.capture_now_us,
|
||||
"duration-paced packet pts should never trail live capture by more than the lag cap"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn duration_paced_rebaser_clamps_when_duration_pacing_runs_future() {
|
||||
temp_env::with_var("LESAVKA_UPSTREAM_SOURCE_LEAD_CAP_MS", Some("15"), || {
|
||||
let rebased = DurationPacedSourcePtsRebaser::default();
|
||||
let mut last =
|
||||
rebased.rebase_with_packet_duration(Some(0), 50_000, Duration::from_millis(250));
|
||||
for packet_index in 1..12 {
|
||||
last = rebased.rebase_with_packet_duration(
|
||||
Some(packet_index * 50_000),
|
||||
50_000,
|
||||
Duration::from_millis(250),
|
||||
);
|
||||
}
|
||||
|
||||
assert!(last.lead_clamped);
|
||||
assert!(last.packet_pts_us <= last.capture_now_us + 16_000);
|
||||
});
|
||||
}
|
||||
}
|
||||
#[path = "live_capture_clock/tests.rs"]
|
||||
mod tests;
|
||||
|
||||
211
client/src/live_capture_clock/tests.rs
Normal file
211
client/src/live_capture_clock/tests.rs
Normal file
@ -0,0 +1,211 @@
|
||||
use super::{
|
||||
DurationPacedSourcePtsRebaser, SourcePtsRebaser, capture_pts_us, packet_age,
|
||||
upstream_source_lag_cap, upstream_source_lead_cap, upstream_timing_trace_enabled,
|
||||
};
|
||||
use serial_test::serial;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn capture_pts_us_monotonically_advances() {
|
||||
let first = capture_pts_us();
|
||||
std::thread::sleep(Duration::from_millis(2));
|
||||
let second = capture_pts_us();
|
||||
assert!(second >= first);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn packet_age_is_small_for_recent_packets() {
|
||||
let pts = capture_pts_us();
|
||||
std::thread::sleep(Duration::from_millis(2));
|
||||
let age = packet_age(pts);
|
||||
assert!(age >= Duration::from_millis(1));
|
||||
assert!(age < Duration::from_secs(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn source_pts_rebaser_preserves_source_delta_on_shared_capture_clock() {
|
||||
let rebased = SourcePtsRebaser::default();
|
||||
let first = rebased.rebase_or_now(Some(1_000_000), 1);
|
||||
let second = rebased.rebase_or_now(Some(1_033_333), 1);
|
||||
|
||||
assert!(first.used_source_pts);
|
||||
assert_eq!(
|
||||
second.packet_pts_us.saturating_sub(first.packet_pts_us),
|
||||
33_333
|
||||
);
|
||||
assert_eq!(first.source_base_us, Some(1_000_000));
|
||||
assert_eq!(second.source_base_us, Some(1_000_000));
|
||||
assert_eq!(first.capture_base_us, second.capture_base_us);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn source_pts_rebaser_stays_monotonic_when_source_pts_repeat() {
|
||||
let rebased = SourcePtsRebaser::default();
|
||||
let first = rebased.rebase_or_now(Some(50_000), 1);
|
||||
let second = rebased.rebase_or_now(Some(50_000), 1);
|
||||
|
||||
assert_eq!(second.packet_pts_us, first.packet_pts_us + 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn source_pts_rebaser_falls_back_to_capture_clock_without_source_pts() {
|
||||
let rebased = SourcePtsRebaser::default();
|
||||
let first = rebased.rebase_or_now(None, 1);
|
||||
std::thread::sleep(Duration::from_millis(2));
|
||||
let second = rebased.rebase_or_now(None, 1);
|
||||
|
||||
assert!(!first.used_source_pts);
|
||||
assert!(!second.used_source_pts);
|
||||
assert!(second.packet_pts_us > first.packet_pts_us);
|
||||
assert!(!first.lag_clamped);
|
||||
assert!(!second.lag_clamped);
|
||||
assert!(!first.lead_clamped);
|
||||
assert!(!second.lead_clamped);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn source_pts_rebaser_clamps_source_lag_when_it_falls_too_far_behind_now() {
|
||||
let rebased = SourcePtsRebaser::default();
|
||||
let _first = rebased.rebase_with_lag_cap(Some(1_000_000), 1, None);
|
||||
std::thread::sleep(Duration::from_millis(8));
|
||||
let second = rebased.rebase_with_lag_cap(Some(1_000_001), 1, Some(Duration::from_millis(2)));
|
||||
|
||||
assert!(second.used_source_pts);
|
||||
assert!(second.lag_clamped);
|
||||
assert!(second.capture_now_us >= second.packet_pts_us);
|
||||
assert!(second.capture_now_us - second.packet_pts_us <= 2_500);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn source_pts_rebaser_clamps_source_lead_when_it_runs_too_far_ahead() {
|
||||
temp_env::with_var("LESAVKA_UPSTREAM_SOURCE_LEAD_CAP_MS", Some("5"), || {
|
||||
let rebased = SourcePtsRebaser::default();
|
||||
let _first =
|
||||
rebased.rebase_with_lag_cap(Some(1_000_000), 1, Some(Duration::from_millis(250)));
|
||||
let second =
|
||||
rebased.rebase_with_lag_cap(Some(2_000_000), 1, Some(Duration::from_millis(250)));
|
||||
|
||||
assert!(second.used_source_pts);
|
||||
assert!(second.lead_clamped);
|
||||
assert!(second.packet_pts_us >= second.capture_now_us);
|
||||
assert!(second.packet_pts_us <= second.capture_now_us + 5_500);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn source_pts_rebasers_anchor_each_stream_to_its_own_first_packet_time() {
|
||||
let microphone = SourcePtsRebaser::default();
|
||||
let camera = SourcePtsRebaser::default();
|
||||
|
||||
let first_microphone = microphone.rebase_or_now(Some(80_000), 1);
|
||||
std::thread::sleep(Duration::from_millis(5));
|
||||
let first_camera = camera.rebase_or_now(Some(435_000), 1);
|
||||
|
||||
assert_ne!(
|
||||
first_microphone.capture_base_us, first_camera.capture_base_us,
|
||||
"independent camera/mic pipelines must not be forced onto the same first-packet timestamp"
|
||||
);
|
||||
assert!(
|
||||
first_camera.packet_pts_us > first_microphone.packet_pts_us,
|
||||
"a later-starting camera pipeline should keep that real wall-clock delay"
|
||||
);
|
||||
assert_eq!(first_microphone.source_base_us, Some(80_000));
|
||||
assert_eq!(first_camera.source_base_us, Some(435_000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn upstream_timing_trace_flag_defaults_off_and_accepts_true_values() {
|
||||
temp_env::with_var_unset("LESAVKA_UPSTREAM_TIMING_TRACE", || {
|
||||
assert!(!upstream_timing_trace_enabled());
|
||||
});
|
||||
|
||||
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("1"), || {
|
||||
assert!(upstream_timing_trace_enabled());
|
||||
});
|
||||
|
||||
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("false"), || {
|
||||
assert!(!upstream_timing_trace_enabled());
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn upstream_source_lag_cap_defaults_and_accepts_override() {
|
||||
temp_env::with_var_unset("LESAVKA_UPSTREAM_SOURCE_LAG_CAP_MS", || {
|
||||
assert_eq!(upstream_source_lag_cap(), Duration::from_millis(250));
|
||||
});
|
||||
|
||||
temp_env::with_var("LESAVKA_UPSTREAM_SOURCE_LAG_CAP_MS", Some("90"), || {
|
||||
assert_eq!(upstream_source_lag_cap(), Duration::from_millis(90));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn upstream_source_lead_cap_defaults_and_accepts_override() {
|
||||
temp_env::with_var_unset("LESAVKA_UPSTREAM_SOURCE_LEAD_CAP_MS", || {
|
||||
assert_eq!(upstream_source_lead_cap(), Duration::from_millis(80));
|
||||
});
|
||||
|
||||
temp_env::with_var("LESAVKA_UPSTREAM_SOURCE_LEAD_CAP_MS", Some("35"), || {
|
||||
assert_eq!(upstream_source_lead_cap(), Duration::from_millis(35));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn duration_paced_rebaser_advances_by_packet_duration_when_source_pts_stretch() {
|
||||
let rebased = DurationPacedSourcePtsRebaser::default();
|
||||
let first = rebased.rebase_with_packet_duration(Some(0), 21_333, Duration::from_millis(250));
|
||||
let second =
|
||||
rebased.rebase_with_packet_duration(Some(52_666), 21_333, Duration::from_millis(250));
|
||||
|
||||
assert_eq!(
|
||||
second.packet_pts_us.saturating_sub(first.packet_pts_us),
|
||||
21_333
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn duration_paced_rebaser_clamps_when_duration_pacing_falls_stale() {
|
||||
let rebased = DurationPacedSourcePtsRebaser::default();
|
||||
let _first = rebased.rebase_with_packet_duration(Some(0), 10_000, Duration::from_millis(2));
|
||||
std::thread::sleep(Duration::from_millis(8));
|
||||
let second =
|
||||
rebased.rebase_with_packet_duration(Some(10_000), 10_000, Duration::from_millis(2));
|
||||
|
||||
assert!(
|
||||
second.packet_pts_us.saturating_add(2_500) >= second.capture_now_us,
|
||||
"duration-paced packet pts should never trail live capture by more than the lag cap"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn duration_paced_rebaser_clamps_when_duration_pacing_runs_future() {
|
||||
temp_env::with_var("LESAVKA_UPSTREAM_SOURCE_LEAD_CAP_MS", Some("15"), || {
|
||||
let rebased = DurationPacedSourcePtsRebaser::default();
|
||||
let mut last =
|
||||
rebased.rebase_with_packet_duration(Some(0), 50_000, Duration::from_millis(250));
|
||||
for packet_index in 1..12 {
|
||||
last = rebased.rebase_with_packet_duration(
|
||||
Some(packet_index * 50_000),
|
||||
50_000,
|
||||
Duration::from_millis(250),
|
||||
);
|
||||
}
|
||||
|
||||
assert!(last.lead_clamped);
|
||||
assert!(last.packet_pts_us <= last.capture_now_us + 16_000);
|
||||
});
|
||||
}
|
||||
@ -168,7 +168,7 @@ mod tests {
|
||||
audio_samples_to_bytes, click_track_samples, frame_json, thumbnail_rgb_video_bytes,
|
||||
thumbnail_video_bytes, with_fake_media_tools,
|
||||
};
|
||||
use super::{SyncAnalysisOptions, analyze_capture};
|
||||
use super::{PulseSegment, SyncAnalysisOptions, analyze_capture};
|
||||
use crate::sync_probe::analyze::reconcile_video_timestamps;
|
||||
|
||||
#[test]
|
||||
@ -287,6 +287,86 @@ mod tests {
|
||||
assert_eq!(reconciled, vec![0.0, 0.5, 1.0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `analysis_window_validation_and_timestamp_fallbacks_are_explicit` explicit because bad probe windows should fail loudly instead of silently removing the evidence needed for sync decisions.
|
||||
/// Inputs are synthetic pulse segments plus timestamp metadata shapes; output proves filtering and reconciliation keep their diagnostic branches stable.
|
||||
fn analysis_window_validation_and_timestamp_fallbacks_are_explicit() {
|
||||
let segments = vec![
|
||||
PulseSegment {
|
||||
start_s: 1.0,
|
||||
end_s: 1.1,
|
||||
duration_s: 0.1,
|
||||
},
|
||||
PulseSegment {
|
||||
start_s: 2.0,
|
||||
end_s: 2.1,
|
||||
duration_s: 0.1,
|
||||
},
|
||||
];
|
||||
|
||||
assert_eq!(
|
||||
super::filter_segments_to_analysis_window(
|
||||
segments.clone(),
|
||||
&SyncAnalysisOptions::default(),
|
||||
"video",
|
||||
)
|
||||
.expect("unbounded window"),
|
||||
segments
|
||||
);
|
||||
assert!(
|
||||
super::filter_segments_to_analysis_window(
|
||||
segments.clone(),
|
||||
&SyncAnalysisOptions {
|
||||
analysis_start_s: Some(f64::NAN),
|
||||
..SyncAnalysisOptions::default()
|
||||
},
|
||||
"video",
|
||||
)
|
||||
.is_err()
|
||||
);
|
||||
assert!(
|
||||
super::filter_segments_to_analysis_window(
|
||||
segments.clone(),
|
||||
&SyncAnalysisOptions {
|
||||
analysis_end_s: Some(-1.0),
|
||||
..SyncAnalysisOptions::default()
|
||||
},
|
||||
"audio",
|
||||
)
|
||||
.is_err()
|
||||
);
|
||||
assert!(
|
||||
super::filter_segments_to_analysis_window(
|
||||
segments.clone(),
|
||||
&SyncAnalysisOptions {
|
||||
analysis_start_s: Some(3.0),
|
||||
analysis_end_s: Some(2.0),
|
||||
..SyncAnalysisOptions::default()
|
||||
},
|
||||
"video",
|
||||
)
|
||||
.is_err()
|
||||
);
|
||||
assert!(
|
||||
super::filter_segments_to_analysis_window(
|
||||
segments,
|
||||
&SyncAnalysisOptions {
|
||||
analysis_start_s: Some(10.0),
|
||||
..SyncAnalysisOptions::default()
|
||||
},
|
||||
"audio",
|
||||
)
|
||||
.is_err()
|
||||
);
|
||||
|
||||
assert!(reconcile_video_timestamps(vec![0.0], 0).is_err());
|
||||
assert_eq!(
|
||||
reconcile_video_timestamps(vec![0.0, 0.5, 1.0], 1).expect("truncated timestamps"),
|
||||
vec![0.0]
|
||||
);
|
||||
assert!(reconcile_video_timestamps(vec![0.0], 2).is_err());
|
||||
}
|
||||
|
||||
/// Keeps `add_sine` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn add_sine(
|
||||
|
||||
@ -5,6 +5,16 @@ use std::process::Command;
|
||||
|
||||
use super::onset_detection::VideoColorFrame;
|
||||
|
||||
mod roi;
|
||||
|
||||
use roi::{summarize_gray_frames_with_adaptive_roi, summarize_rgb_frames_with_adaptive_roi};
|
||||
|
||||
#[cfg(test)]
|
||||
use roi::{
|
||||
adaptive_gray_roi_mask, adaptive_rgb_roi_mask, dark_roi_factor, palette_match_score,
|
||||
retain_largest_connected_roi, summarize_frame_brightness, summarize_frame_color,
|
||||
};
|
||||
|
||||
const VIDEO_ANALYSIS_SIDE_PX: usize = 64;
|
||||
const VIDEO_ANALYSIS_FPS: usize = 60;
|
||||
const MIN_ADAPTIVE_ROI_PIXELS: usize = 16;
|
||||
@ -191,310 +201,6 @@ pub(super) fn run_command(command: &mut Command, description: &str) -> Result<Ve
|
||||
Ok(output.stdout)
|
||||
}
|
||||
|
||||
fn summarize_gray_frames_with_adaptive_roi<'a>(
|
||||
frames: impl Iterator<Item = &'a [u8]>,
|
||||
pixel_count: usize,
|
||||
) -> Vec<u8> {
|
||||
let frames = frames.collect::<Vec<_>>();
|
||||
let mask = adaptive_gray_roi_mask(&frames, pixel_count);
|
||||
frames
|
||||
.iter()
|
||||
.map(|frame| summarize_frame_brightness(frame, mask.as_deref()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn summarize_rgb_frames_with_adaptive_roi<'a>(
|
||||
frames: impl Iterator<Item = &'a [u8]>,
|
||||
pixel_count: usize,
|
||||
) -> Vec<VideoColorFrame> {
|
||||
let frames = frames.collect::<Vec<_>>();
|
||||
let mask = adaptive_rgb_roi_mask(&frames, pixel_count);
|
||||
frames
|
||||
.iter()
|
||||
.map(|frame| summarize_frame_color(frame, mask.as_deref()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Keeps `summarize_frame_brightness` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn summarize_frame_brightness(frame: &[u8], mask: Option<&[bool]>) -> u8 {
|
||||
let mut sum = 0u64;
|
||||
let mut selected = 0u64;
|
||||
for (index, value) in frame.iter().copied().enumerate() {
|
||||
if mask.is_none_or(|mask| mask.get(index).copied().unwrap_or(false)) {
|
||||
sum += u64::from(value);
|
||||
selected += 1;
|
||||
}
|
||||
}
|
||||
if selected == 0 {
|
||||
sum = frame.iter().map(|value| u64::from(*value)).sum();
|
||||
selected = frame.len().max(1) as u64;
|
||||
}
|
||||
let mean = sum / selected;
|
||||
mean.min(u64::from(u8::MAX)) as u8
|
||||
}
|
||||
|
||||
/// Keeps `summarize_frame_color` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn summarize_frame_color(frame: &[u8], mask: Option<&[bool]>) -> VideoColorFrame {
|
||||
let mut r_sum = 0u64;
|
||||
let mut g_sum = 0u64;
|
||||
let mut b_sum = 0u64;
|
||||
let mut selected = 0u64;
|
||||
|
||||
for (index, pixel) in frame.chunks_exact(3).enumerate() {
|
||||
if !mask.is_none_or(|mask| mask.get(index).copied().unwrap_or(false)) {
|
||||
continue;
|
||||
}
|
||||
let r = pixel[0];
|
||||
let g = pixel[1];
|
||||
let b = pixel[2];
|
||||
let max = r.max(g).max(b);
|
||||
let min = r.min(g).min(b);
|
||||
if max >= 60 && max.saturating_sub(min) >= 24 {
|
||||
r_sum += u64::from(r);
|
||||
g_sum += u64::from(g);
|
||||
b_sum += u64::from(b);
|
||||
selected += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if selected == 0 {
|
||||
for (index, pixel) in frame.chunks_exact(3).enumerate() {
|
||||
if !mask.is_none_or(|mask| mask.get(index).copied().unwrap_or(false)) {
|
||||
continue;
|
||||
}
|
||||
r_sum += u64::from(pixel[0]);
|
||||
g_sum += u64::from(pixel[1]);
|
||||
b_sum += u64::from(pixel[2]);
|
||||
selected += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if selected == 0 {
|
||||
for pixel in frame.chunks_exact(3) {
|
||||
r_sum += u64::from(pixel[0]);
|
||||
g_sum += u64::from(pixel[1]);
|
||||
b_sum += u64::from(pixel[2]);
|
||||
selected += 1;
|
||||
}
|
||||
}
|
||||
selected = selected.max(1);
|
||||
|
||||
VideoColorFrame {
|
||||
r: (r_sum / selected).min(u64::from(u8::MAX)) as u8,
|
||||
g: (g_sum / selected).min(u64::from(u8::MAX)) as u8,
|
||||
b: (b_sum / selected).min(u64::from(u8::MAX)) as u8,
|
||||
}
|
||||
}
|
||||
|
||||
/// Keeps `adaptive_gray_roi_mask` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn adaptive_gray_roi_mask(frames: &[&[u8]], pixel_count: usize) -> Option<Vec<bool>> {
|
||||
if frames.len() < 2 || pixel_count == 0 {
|
||||
return None;
|
||||
}
|
||||
let mut scores = vec![0.0; pixel_count];
|
||||
for (pixel_index, score) in scores.iter_mut().enumerate() {
|
||||
let mut min = u8::MAX;
|
||||
let mut max = u8::MIN;
|
||||
for frame in frames {
|
||||
let value = frame[pixel_index];
|
||||
min = min.min(value);
|
||||
max = max.max(value);
|
||||
}
|
||||
*score = f64::from(max.saturating_sub(min)) * dark_roi_factor(min);
|
||||
}
|
||||
adaptive_roi_mask_from_scores(&scores, MIN_GRAY_ROI_SCORE)
|
||||
}
|
||||
|
||||
/// Keeps `adaptive_rgb_roi_mask` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn adaptive_rgb_roi_mask(frames: &[&[u8]], pixel_count: usize) -> Option<Vec<bool>> {
|
||||
if frames.len() < 2 || pixel_count == 0 {
|
||||
return None;
|
||||
}
|
||||
let mut scores = vec![0.0; pixel_count];
|
||||
for (pixel_index, score) in scores.iter_mut().enumerate() {
|
||||
let mut min_r = u8::MAX;
|
||||
let mut min_g = u8::MAX;
|
||||
let mut min_b = u8::MAX;
|
||||
let mut max_r = u8::MIN;
|
||||
let mut max_g = u8::MIN;
|
||||
let mut max_b = u8::MIN;
|
||||
let mut min_luma = u8::MAX;
|
||||
let mut max_luma = u8::MIN;
|
||||
let mut best_palette_score = 0.0_f64;
|
||||
|
||||
for frame in frames {
|
||||
let offset = pixel_index * 3;
|
||||
let r = frame[offset];
|
||||
let g = frame[offset + 1];
|
||||
let b = frame[offset + 2];
|
||||
min_r = min_r.min(r);
|
||||
min_g = min_g.min(g);
|
||||
min_b = min_b.min(b);
|
||||
max_r = max_r.max(r);
|
||||
max_g = max_g.max(g);
|
||||
max_b = max_b.max(b);
|
||||
let luma = luma_u8(r, g, b);
|
||||
min_luma = min_luma.min(luma);
|
||||
max_luma = max_luma.max(luma);
|
||||
best_palette_score = best_palette_score.max(palette_match_score(r, g, b));
|
||||
}
|
||||
|
||||
let rgb_span = f64::from(max_r.saturating_sub(min_r))
|
||||
+ f64::from(max_g.saturating_sub(min_g))
|
||||
+ f64::from(max_b.saturating_sub(min_b));
|
||||
let luma_span = f64::from(max_luma.saturating_sub(min_luma));
|
||||
*score =
|
||||
(rgb_span + (2.0 * luma_span)) * (1.0 + best_palette_score) * dark_roi_factor(min_luma);
|
||||
}
|
||||
adaptive_roi_mask_from_scores(&scores, MIN_RGB_ROI_SCORE)
|
||||
}
|
||||
|
||||
/// Keeps `adaptive_roi_mask_from_scores` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn adaptive_roi_mask_from_scores(scores: &[f64], min_score: f64) -> Option<Vec<bool>> {
|
||||
let max_score = scores.iter().copied().fold(0.0_f64, f64::max);
|
||||
if max_score < min_score {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut ranked = scores
|
||||
.iter()
|
||||
.copied()
|
||||
.enumerate()
|
||||
.filter(|(_, score)| score.is_finite() && *score > 0.0)
|
||||
.collect::<Vec<_>>();
|
||||
ranked.sort_by(|left, right| right.1.total_cmp(&left.1));
|
||||
|
||||
let max_selected = ((scores.len() as f64 * MAX_ADAPTIVE_ROI_FRACTION).round() as usize)
|
||||
.max(MIN_ADAPTIVE_ROI_PIXELS)
|
||||
.min(scores.len());
|
||||
let score_floor = (max_score * ADAPTIVE_ROI_SCORE_FRACTION).max(min_score);
|
||||
let mut mask = vec![false; scores.len()];
|
||||
for (selected, (index, score)) in ranked.into_iter().take(max_selected).enumerate() {
|
||||
if score < score_floor && selected >= MIN_ADAPTIVE_ROI_PIXELS {
|
||||
break;
|
||||
}
|
||||
mask[index] = true;
|
||||
}
|
||||
|
||||
let mask = retain_largest_connected_roi(mask);
|
||||
let selected = mask.iter().filter(|selected| **selected).count();
|
||||
(selected >= MIN_ADAPTIVE_ROI_PIXELS).then_some(mask)
|
||||
}
|
||||
|
||||
/// Keeps `retain_largest_connected_roi` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn retain_largest_connected_roi(mask: Vec<bool>) -> Vec<bool> {
|
||||
let side = (mask.len() as f64).sqrt().round() as usize;
|
||||
if side == 0 || side * side != mask.len() {
|
||||
return mask;
|
||||
}
|
||||
|
||||
let mut visited = vec![false; mask.len()];
|
||||
let mut best_component = Vec::<usize>::new();
|
||||
for start in 0..mask.len() {
|
||||
if !mask[start] || visited[start] {
|
||||
continue;
|
||||
}
|
||||
let mut stack = vec![start];
|
||||
let mut component = Vec::new();
|
||||
visited[start] = true;
|
||||
while let Some(index) = stack.pop() {
|
||||
component.push(index);
|
||||
let x = index % side;
|
||||
let y = index / side;
|
||||
let mut push_neighbor = |neighbor: usize| {
|
||||
if mask[neighbor] && !visited[neighbor] {
|
||||
visited[neighbor] = true;
|
||||
stack.push(neighbor);
|
||||
}
|
||||
};
|
||||
if x > 0 {
|
||||
push_neighbor(index - 1);
|
||||
}
|
||||
if x + 1 < side {
|
||||
push_neighbor(index + 1);
|
||||
}
|
||||
if y > 0 {
|
||||
push_neighbor(index - side);
|
||||
}
|
||||
if y + 1 < side {
|
||||
push_neighbor(index + side);
|
||||
}
|
||||
}
|
||||
if component.len() > best_component.len() {
|
||||
best_component = component;
|
||||
}
|
||||
}
|
||||
|
||||
if best_component.len() < MIN_ADAPTIVE_ROI_PIXELS {
|
||||
return mask;
|
||||
}
|
||||
let mut retained = vec![false; mask.len()];
|
||||
for index in best_component {
|
||||
retained[index] = true;
|
||||
}
|
||||
retained
|
||||
}
|
||||
|
||||
fn luma_u8(r: u8, g: u8, b: u8) -> u8 {
|
||||
((u16::from(r) * 77 + u16::from(g) * 150 + u16::from(b) * 29) / 256) as u8
|
||||
}
|
||||
|
||||
/// Keeps `dark_roi_factor` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn dark_roi_factor(min_luma: u8) -> f64 {
|
||||
match min_luma {
|
||||
0..=80 => 1.0,
|
||||
81..=120 => 0.55,
|
||||
121..=160 => 0.25,
|
||||
_ => 0.10,
|
||||
}
|
||||
}
|
||||
|
||||
/// Keeps `palette_match_score` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn palette_match_score(r: u8, g: u8, b: u8) -> f64 {
|
||||
let max = r.max(g).max(b);
|
||||
let min = r.min(g).min(b);
|
||||
if max < 50 || max.saturating_sub(min) < 20 {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
const PALETTE: [(u8, u8, u8); 16] = [
|
||||
(255, 45, 45),
|
||||
(0, 230, 118),
|
||||
(41, 121, 255),
|
||||
(255, 179, 0),
|
||||
(216, 27, 96),
|
||||
(0, 188, 212),
|
||||
(205, 220, 57),
|
||||
(126, 87, 194),
|
||||
(255, 112, 67),
|
||||
(38, 166, 154),
|
||||
(255, 64, 129),
|
||||
(92, 107, 192),
|
||||
(255, 235, 59),
|
||||
(105, 240, 174),
|
||||
(171, 71, 188),
|
||||
(3, 169, 244),
|
||||
];
|
||||
let best_distance = PALETTE
|
||||
.into_iter()
|
||||
.map(|(pr, pg, pb)| {
|
||||
let dr = f64::from(r) - f64::from(pr);
|
||||
let dg = f64::from(g) - f64::from(pg);
|
||||
let db = f64::from(b) - f64::from(pb);
|
||||
dr * dr + dg * dg + db * db
|
||||
})
|
||||
.fold(f64::INFINITY, f64::min);
|
||||
(1.0 - (best_distance / 65_025.0)).clamp(0.0, 1.0)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "media_extract/tests/mod.rs"]
|
||||
mod tests;
|
||||
|
||||
308
client/src/sync_probe/analyze/media_extract/roi.rs
Normal file
308
client/src/sync_probe/analyze/media_extract/roi.rs
Normal file
@ -0,0 +1,308 @@
|
||||
use super::{
|
||||
ADAPTIVE_ROI_SCORE_FRACTION, MAX_ADAPTIVE_ROI_FRACTION, MIN_ADAPTIVE_ROI_PIXELS,
|
||||
MIN_GRAY_ROI_SCORE, MIN_RGB_ROI_SCORE, VideoColorFrame,
|
||||
};
|
||||
|
||||
pub(super) fn summarize_gray_frames_with_adaptive_roi<'a>(
|
||||
frames: impl Iterator<Item = &'a [u8]>,
|
||||
pixel_count: usize,
|
||||
) -> Vec<u8> {
|
||||
let frames = frames.collect::<Vec<_>>();
|
||||
let mask = adaptive_gray_roi_mask(&frames, pixel_count);
|
||||
frames
|
||||
.iter()
|
||||
.map(|frame| summarize_frame_brightness(frame, mask.as_deref()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(super) fn summarize_rgb_frames_with_adaptive_roi<'a>(
|
||||
frames: impl Iterator<Item = &'a [u8]>,
|
||||
pixel_count: usize,
|
||||
) -> Vec<VideoColorFrame> {
|
||||
let frames = frames.collect::<Vec<_>>();
|
||||
let mask = adaptive_rgb_roi_mask(&frames, pixel_count);
|
||||
frames
|
||||
.iter()
|
||||
.map(|frame| summarize_frame_color(frame, mask.as_deref()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Keeps `summarize_frame_brightness` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
pub(super) fn summarize_frame_brightness(frame: &[u8], mask: Option<&[bool]>) -> u8 {
|
||||
let mut sum = 0u64;
|
||||
let mut selected = 0u64;
|
||||
for (index, value) in frame.iter().copied().enumerate() {
|
||||
if mask.is_none_or(|mask| mask.get(index).copied().unwrap_or(false)) {
|
||||
sum += u64::from(value);
|
||||
selected += 1;
|
||||
}
|
||||
}
|
||||
if selected == 0 {
|
||||
sum = frame.iter().map(|value| u64::from(*value)).sum();
|
||||
selected = frame.len().max(1) as u64;
|
||||
}
|
||||
let mean = sum / selected;
|
||||
mean.min(u64::from(u8::MAX)) as u8
|
||||
}
|
||||
|
||||
/// Keeps `summarize_frame_color` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
pub(super) fn summarize_frame_color(frame: &[u8], mask: Option<&[bool]>) -> VideoColorFrame {
|
||||
let mut r_sum = 0u64;
|
||||
let mut g_sum = 0u64;
|
||||
let mut b_sum = 0u64;
|
||||
let mut selected = 0u64;
|
||||
|
||||
for (index, pixel) in frame.chunks_exact(3).enumerate() {
|
||||
if !mask.is_none_or(|mask| mask.get(index).copied().unwrap_or(false)) {
|
||||
continue;
|
||||
}
|
||||
let r = pixel[0];
|
||||
let g = pixel[1];
|
||||
let b = pixel[2];
|
||||
let max = r.max(g).max(b);
|
||||
let min = r.min(g).min(b);
|
||||
if max >= 60 && max.saturating_sub(min) >= 24 {
|
||||
r_sum += u64::from(r);
|
||||
g_sum += u64::from(g);
|
||||
b_sum += u64::from(b);
|
||||
selected += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if selected == 0 {
|
||||
for (index, pixel) in frame.chunks_exact(3).enumerate() {
|
||||
if !mask.is_none_or(|mask| mask.get(index).copied().unwrap_or(false)) {
|
||||
continue;
|
||||
}
|
||||
r_sum += u64::from(pixel[0]);
|
||||
g_sum += u64::from(pixel[1]);
|
||||
b_sum += u64::from(pixel[2]);
|
||||
selected += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if selected == 0 {
|
||||
for pixel in frame.chunks_exact(3) {
|
||||
r_sum += u64::from(pixel[0]);
|
||||
g_sum += u64::from(pixel[1]);
|
||||
b_sum += u64::from(pixel[2]);
|
||||
selected += 1;
|
||||
}
|
||||
}
|
||||
selected = selected.max(1);
|
||||
|
||||
VideoColorFrame {
|
||||
r: (r_sum / selected).min(u64::from(u8::MAX)) as u8,
|
||||
g: (g_sum / selected).min(u64::from(u8::MAX)) as u8,
|
||||
b: (b_sum / selected).min(u64::from(u8::MAX)) as u8,
|
||||
}
|
||||
}
|
||||
|
||||
/// Keeps `adaptive_gray_roi_mask` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
pub(super) fn adaptive_gray_roi_mask(frames: &[&[u8]], pixel_count: usize) -> Option<Vec<bool>> {
|
||||
if frames.len() < 2 || pixel_count == 0 {
|
||||
return None;
|
||||
}
|
||||
let mut scores = vec![0.0; pixel_count];
|
||||
for (pixel_index, score) in scores.iter_mut().enumerate() {
|
||||
let mut min = u8::MAX;
|
||||
let mut max = u8::MIN;
|
||||
for frame in frames {
|
||||
let value = frame[pixel_index];
|
||||
min = min.min(value);
|
||||
max = max.max(value);
|
||||
}
|
||||
*score = f64::from(max.saturating_sub(min)) * dark_roi_factor(min);
|
||||
}
|
||||
adaptive_roi_mask_from_scores(&scores, MIN_GRAY_ROI_SCORE)
|
||||
}
|
||||
|
||||
/// Keeps `adaptive_rgb_roi_mask` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
pub(super) fn adaptive_rgb_roi_mask(frames: &[&[u8]], pixel_count: usize) -> Option<Vec<bool>> {
|
||||
if frames.len() < 2 || pixel_count == 0 {
|
||||
return None;
|
||||
}
|
||||
let mut scores = vec![0.0; pixel_count];
|
||||
for (pixel_index, score) in scores.iter_mut().enumerate() {
|
||||
let mut min_r = u8::MAX;
|
||||
let mut min_g = u8::MAX;
|
||||
let mut min_b = u8::MAX;
|
||||
let mut max_r = u8::MIN;
|
||||
let mut max_g = u8::MIN;
|
||||
let mut max_b = u8::MIN;
|
||||
let mut min_luma = u8::MAX;
|
||||
let mut max_luma = u8::MIN;
|
||||
let mut best_palette_score = 0.0_f64;
|
||||
|
||||
for frame in frames {
|
||||
let offset = pixel_index * 3;
|
||||
let r = frame[offset];
|
||||
let g = frame[offset + 1];
|
||||
let b = frame[offset + 2];
|
||||
min_r = min_r.min(r);
|
||||
min_g = min_g.min(g);
|
||||
min_b = min_b.min(b);
|
||||
max_r = max_r.max(r);
|
||||
max_g = max_g.max(g);
|
||||
max_b = max_b.max(b);
|
||||
let luma = luma_u8(r, g, b);
|
||||
min_luma = min_luma.min(luma);
|
||||
max_luma = max_luma.max(luma);
|
||||
best_palette_score = best_palette_score.max(palette_match_score(r, g, b));
|
||||
}
|
||||
|
||||
let rgb_span = f64::from(max_r.saturating_sub(min_r))
|
||||
+ f64::from(max_g.saturating_sub(min_g))
|
||||
+ f64::from(max_b.saturating_sub(min_b));
|
||||
let luma_span = f64::from(max_luma.saturating_sub(min_luma));
|
||||
*score =
|
||||
(rgb_span + (2.0 * luma_span)) * (1.0 + best_palette_score) * dark_roi_factor(min_luma);
|
||||
}
|
||||
adaptive_roi_mask_from_scores(&scores, MIN_RGB_ROI_SCORE)
|
||||
}
|
||||
|
||||
/// Keeps `adaptive_roi_mask_from_scores` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
pub(super) fn adaptive_roi_mask_from_scores(scores: &[f64], min_score: f64) -> Option<Vec<bool>> {
|
||||
let max_score = scores.iter().copied().fold(0.0_f64, f64::max);
|
||||
if max_score < min_score {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut ranked = scores
|
||||
.iter()
|
||||
.copied()
|
||||
.enumerate()
|
||||
.filter(|(_, score)| score.is_finite() && *score > 0.0)
|
||||
.collect::<Vec<_>>();
|
||||
ranked.sort_by(|left, right| right.1.total_cmp(&left.1));
|
||||
|
||||
let max_selected = ((scores.len() as f64 * MAX_ADAPTIVE_ROI_FRACTION).round() as usize)
|
||||
.max(MIN_ADAPTIVE_ROI_PIXELS)
|
||||
.min(scores.len());
|
||||
let score_floor = (max_score * ADAPTIVE_ROI_SCORE_FRACTION).max(min_score);
|
||||
let mut mask = vec![false; scores.len()];
|
||||
for (selected, (index, score)) in ranked.into_iter().take(max_selected).enumerate() {
|
||||
if score < score_floor && selected >= MIN_ADAPTIVE_ROI_PIXELS {
|
||||
break;
|
||||
}
|
||||
mask[index] = true;
|
||||
}
|
||||
|
||||
let mask = retain_largest_connected_roi(mask);
|
||||
let selected = mask.iter().filter(|selected| **selected).count();
|
||||
(selected >= MIN_ADAPTIVE_ROI_PIXELS).then_some(mask)
|
||||
}
|
||||
|
||||
/// Keeps `retain_largest_connected_roi` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
pub(super) fn retain_largest_connected_roi(mask: Vec<bool>) -> Vec<bool> {
|
||||
let side = (mask.len() as f64).sqrt().round() as usize;
|
||||
if side == 0 || side * side != mask.len() {
|
||||
return mask;
|
||||
}
|
||||
|
||||
let mut visited = vec![false; mask.len()];
|
||||
let mut best_component = Vec::<usize>::new();
|
||||
for start in 0..mask.len() {
|
||||
if !mask[start] || visited[start] {
|
||||
continue;
|
||||
}
|
||||
let mut stack = vec![start];
|
||||
let mut component = Vec::new();
|
||||
visited[start] = true;
|
||||
while let Some(index) = stack.pop() {
|
||||
component.push(index);
|
||||
let x = index % side;
|
||||
let y = index / side;
|
||||
let mut push_neighbor = |neighbor: usize| {
|
||||
if mask[neighbor] && !visited[neighbor] {
|
||||
visited[neighbor] = true;
|
||||
stack.push(neighbor);
|
||||
}
|
||||
};
|
||||
if x > 0 {
|
||||
push_neighbor(index - 1);
|
||||
}
|
||||
if x + 1 < side {
|
||||
push_neighbor(index + 1);
|
||||
}
|
||||
if y > 0 {
|
||||
push_neighbor(index - side);
|
||||
}
|
||||
if y + 1 < side {
|
||||
push_neighbor(index + side);
|
||||
}
|
||||
}
|
||||
if component.len() > best_component.len() {
|
||||
best_component = component;
|
||||
}
|
||||
}
|
||||
|
||||
if best_component.len() < MIN_ADAPTIVE_ROI_PIXELS {
|
||||
return mask;
|
||||
}
|
||||
let mut retained = vec![false; mask.len()];
|
||||
for index in best_component {
|
||||
retained[index] = true;
|
||||
}
|
||||
retained
|
||||
}
|
||||
|
||||
fn luma_u8(r: u8, g: u8, b: u8) -> u8 {
|
||||
((u16::from(r) * 77 + u16::from(g) * 150 + u16::from(b) * 29) / 256) as u8
|
||||
}
|
||||
|
||||
/// Keeps `dark_roi_factor` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
pub(super) fn dark_roi_factor(min_luma: u8) -> f64 {
|
||||
match min_luma {
|
||||
0..=80 => 1.0,
|
||||
81..=120 => 0.55,
|
||||
121..=160 => 0.25,
|
||||
_ => 0.10,
|
||||
}
|
||||
}
|
||||
|
||||
/// Keeps `palette_match_score` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
pub(super) fn palette_match_score(r: u8, g: u8, b: u8) -> f64 {
|
||||
let max = r.max(g).max(b);
|
||||
let min = r.min(g).min(b);
|
||||
if max < 50 || max.saturating_sub(min) < 20 {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
const PALETTE: [(u8, u8, u8); 16] = [
|
||||
(255, 45, 45),
|
||||
(0, 230, 118),
|
||||
(41, 121, 255),
|
||||
(255, 179, 0),
|
||||
(216, 27, 96),
|
||||
(0, 188, 212),
|
||||
(205, 220, 57),
|
||||
(126, 87, 194),
|
||||
(255, 112, 67),
|
||||
(38, 166, 154),
|
||||
(255, 64, 129),
|
||||
(92, 107, 192),
|
||||
(255, 235, 59),
|
||||
(105, 240, 174),
|
||||
(171, 71, 188),
|
||||
(3, 169, 244),
|
||||
];
|
||||
let best_distance = PALETTE
|
||||
.into_iter()
|
||||
.map(|(pr, pg, pb)| {
|
||||
let dr = f64::from(r) - f64::from(pr);
|
||||
let dg = f64::from(g) - f64::from(pg);
|
||||
let db = f64::from(b) - f64::from(pb);
|
||||
dr * dr + dg * dg + db * db
|
||||
})
|
||||
.fold(f64::INFINITY, f64::min);
|
||||
(1.0 - (best_distance / 65_025.0)).clamp(0.0, 1.0)
|
||||
}
|
||||
@ -1,6 +1,7 @@
|
||||
use super::{
|
||||
extract_audio_samples, extract_video_brightness, extract_video_colors,
|
||||
extract_video_timestamps, run_command,
|
||||
adaptive_gray_roi_mask, adaptive_rgb_roi_mask, dark_roi_factor, extract_audio_samples,
|
||||
extract_video_brightness, extract_video_colors, extract_video_timestamps, palette_match_score,
|
||||
retain_largest_connected_roi, run_command, summarize_frame_brightness, summarize_frame_color,
|
||||
};
|
||||
use crate::sync_probe::analyze::test_support::{
|
||||
audio_samples_to_bytes, frame_json, thumbnail_rgb_video_bytes, thumbnail_video_bytes,
|
||||
@ -126,6 +127,28 @@ fn extract_video_colors_reads_fake_ffmpeg_output() {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_video_colors_rejects_empty_and_truncated_frame_data() {
|
||||
with_fake_media_tools(
|
||||
br#"{"frames":[{"best_effort_timestamp_time":"0.0"}]}"#,
|
||||
&[],
|
||||
&[1, 0],
|
||||
|capture_path| {
|
||||
let error = extract_video_colors(capture_path).expect_err("empty colors");
|
||||
assert!(
|
||||
error
|
||||
.to_string()
|
||||
.contains("did not emit any video color data")
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
with_fake_media_tools(&frame_json(&[0.0]), &[1, 2, 3], &[1, 0], |capture_path| {
|
||||
let error = extract_video_colors(capture_path).expect_err("truncated color bytes");
|
||||
assert!(error.to_string().contains("not divisible"));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `extract_video_colors_tracks_small_flashing_screen_region` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
@ -220,3 +243,42 @@ fn run_command_reports_success_and_failure() {
|
||||
.expect_err("failing command should error");
|
||||
assert!(error.to_string().contains("failing command failed: boom"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Verifies adaptive ROI helpers have explicit fallback behavior.
|
||||
///
|
||||
/// Inputs: tiny masks and frames that cannot produce a stable ROI plus one
|
||||
/// connected flashing region. Outputs: helper-level assertions. Why: analyzer
|
||||
/// robustness depends on falling back to whole-frame summaries when the RCT
|
||||
/// capture has too little color/brightness evidence for a reliable mask.
|
||||
fn adaptive_roi_helpers_cover_fallbacks_and_connected_region_retention() {
|
||||
assert!(adaptive_gray_roi_mask(&[], 4).is_none());
|
||||
assert!(adaptive_rgb_roi_mask(&[], 4).is_none());
|
||||
assert!(adaptive_gray_roi_mask(&[&[1, 2, 3, 4]], 4).is_none());
|
||||
assert!(adaptive_rgb_roi_mask(&[&[1, 2, 3, 4, 5, 6]], 2).is_none());
|
||||
|
||||
assert_eq!(
|
||||
summarize_frame_brightness(&[10, 30], Some(&[false, false])),
|
||||
20
|
||||
);
|
||||
|
||||
let color = summarize_frame_color(&[10, 20, 30, 40, 50, 60], Some(&[false, false]));
|
||||
assert_eq!((color.r, color.g, color.b), (25, 35, 45));
|
||||
|
||||
assert_eq!(dark_roi_factor(130), 0.25);
|
||||
assert_eq!(dark_roi_factor(200), 0.10);
|
||||
assert_eq!(palette_match_score(10, 10, 10), 0.0);
|
||||
assert!(palette_match_score(255, 45, 45) > 0.95);
|
||||
|
||||
let non_square = vec![true, false, true];
|
||||
assert_eq!(retain_largest_connected_roi(non_square.clone()), non_square);
|
||||
|
||||
let mut mask = vec![false; 36];
|
||||
for selected in mask.iter_mut().take(20) {
|
||||
*selected = true;
|
||||
}
|
||||
mask[35] = true;
|
||||
let retained = retain_largest_connected_roi(mask);
|
||||
assert_eq!(retained.iter().filter(|selected| **selected).count(), 20);
|
||||
assert!(!retained[35]);
|
||||
}
|
||||
|
||||
@ -1,5 +1,9 @@
|
||||
use anyhow::{Result, bail};
|
||||
|
||||
use crate::sync_probe::signature::{
|
||||
MAX_EVENT_CODE, probe_audio_frequency_for_event_code, probe_color_for_event_code,
|
||||
};
|
||||
|
||||
mod correlation;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
@ -23,10 +27,6 @@ const MAX_AUDIO_PULSE_INTERNAL_GAP_S: f64 = 0.16;
|
||||
const MIN_AUDIO_PROBE_PEAK: f64 = 25.0;
|
||||
const AUDIO_ENVELOPE_THRESHOLD_FRACTION: f64 = 0.30;
|
||||
const AUDIO_SAMPLE_THRESHOLD_FRACTION: f64 = 0.22;
|
||||
const AUDIO_TONE_FREQUENCIES_HZ: [f64; 16] = [
|
||||
620.0, 780.0, 940.0, 1120.0, 1320.0, 1540.0, 1780.0, 2040.0, 2320.0, 2620.0, 2960.0, 3340.0,
|
||||
3760.0, 4220.0, 4740.0, 5320.0,
|
||||
];
|
||||
const MIN_TONE_ENVELOPE_PEAK: f64 = 18.0;
|
||||
const MIN_TONE_CONTRAST_FRACTION_OF_AMPLITUDE: f64 = 0.12;
|
||||
const MIN_TONE_CODE_DOMINANCE_RATIO: f64 = 1.35;
|
||||
|
||||
@ -187,7 +187,7 @@ fn strongest_probe_tone_window(
|
||||
event_codes: &[u32],
|
||||
) -> ProbeToneWindow {
|
||||
let code_iter: Box<dyn Iterator<Item = u32> + '_> = if event_codes.is_empty() {
|
||||
Box::new(1..=AUDIO_TONE_FREQUENCIES_HZ.len() as u32)
|
||||
Box::new(1..=MAX_EVENT_CODE)
|
||||
} else {
|
||||
Box::new(event_codes.iter().copied())
|
||||
};
|
||||
@ -215,9 +215,7 @@ fn strongest_probe_tone_window(
|
||||
}
|
||||
|
||||
fn audio_frequency_for_event_code(code: u32) -> Option<f64> {
|
||||
AUDIO_TONE_FREQUENCIES_HZ
|
||||
.get(code.checked_sub(1)? as usize)
|
||||
.copied()
|
||||
probe_audio_frequency_for_event_code(code)
|
||||
}
|
||||
|
||||
/// Keeps `goertzel_level` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
|
||||
@ -122,13 +122,6 @@ pub(crate) fn correlate_segments(
|
||||
.iter()
|
||||
.map(|segment| segment.start_s)
|
||||
.collect::<Vec<_>>();
|
||||
if video_onsets_s.is_empty() {
|
||||
bail!("video onset list is empty");
|
||||
}
|
||||
if audio_onsets_s.is_empty() {
|
||||
bail!("audio onset list is empty");
|
||||
}
|
||||
|
||||
let (video_onsets_s, audio_onsets_s, common_window) =
|
||||
trim_onsets_to_common_activity_window(&video_onsets_s, &audio_onsets_s, max_pair_gap_s);
|
||||
let expected_start_skew_ms = (audio_onsets_s[0] - video_onsets_s[0]) * 1000.0;
|
||||
@ -226,13 +219,6 @@ pub(crate) fn correlate_coded_segments(
|
||||
collapse_segments_by_phase(video_segments, pulse_period_s, phase_tolerance_s);
|
||||
let audio_segments =
|
||||
collapse_segments_by_phase(audio_segments, pulse_period_s, phase_tolerance_s);
|
||||
if video_segments.is_empty() {
|
||||
bail!("video onset list is empty");
|
||||
}
|
||||
if audio_segments.is_empty() {
|
||||
bail!("audio onset list is empty");
|
||||
}
|
||||
|
||||
let video_onsets_s = video_segments
|
||||
.iter()
|
||||
.map(|segment| segment.start_s)
|
||||
|
||||
@ -173,6 +173,101 @@ fn detect_color_coded_video_segments_accepts_camera_washed_palette() {
|
||||
assert!((segments[3].duration_s - 0.48).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `color_coded_video_validation_and_fallback_branches_are_explicit` explicit because weak or malformed coded-video evidence should fail before it can steer sync calibration.
|
||||
/// Inputs are direct color segment helper cases plus malformed detector requests; output proves validation, dominant-color fallback, final-segment close, and overlong rejection are stable.
|
||||
fn color_coded_video_validation_and_fallback_branches_are_explicit() {
|
||||
assert!(detect_color_coded_video_segments(&[], &[], &[1], 0.12).is_err());
|
||||
assert!(
|
||||
detect_color_coded_video_segments(
|
||||
&[0.0],
|
||||
&[VideoColorFrame { r: 255, g: 0, b: 0 }],
|
||||
&[1],
|
||||
0.0,
|
||||
)
|
||||
.is_err()
|
||||
);
|
||||
assert!(
|
||||
detect_color_coded_video_segments(
|
||||
&[0.0],
|
||||
&[VideoColorFrame { r: 255, g: 0, b: 0 }],
|
||||
&[],
|
||||
0.12,
|
||||
)
|
||||
.is_err()
|
||||
);
|
||||
assert!(
|
||||
detect_color_coded_video_segments(
|
||||
&[0.0],
|
||||
&[VideoColorFrame { r: 255, g: 0, b: 0 }],
|
||||
&[17],
|
||||
0.12,
|
||||
)
|
||||
.is_err()
|
||||
);
|
||||
|
||||
let timestamps = [0.0, 0.1, 0.2, 0.3];
|
||||
let trailing_frames = [
|
||||
VideoColorFrame { r: 0, g: 0, b: 0 },
|
||||
VideoColorFrame { r: 0, g: 0, b: 0 },
|
||||
VideoColorFrame { r: 255, g: 0, b: 0 },
|
||||
VideoColorFrame { r: 255, g: 0, b: 0 },
|
||||
];
|
||||
let trailing = detect_color_coded_video_segments(×tamps, &trailing_frames, &[1], 0.12)
|
||||
.expect("trailing segment");
|
||||
assert_eq!(trailing.len(), 1);
|
||||
assert!(trailing[0].end_s > trailing[0].start_s);
|
||||
|
||||
let mut segments = Vec::new();
|
||||
super::push_color_segment(&mut segments, 0.0, 0.1, 0.12, &[], 0.033);
|
||||
assert!(segments.is_empty());
|
||||
super::push_color_segment(&mut segments, 0.0, 1.0, 0.12, &[1], 0.033);
|
||||
assert!(segments.is_empty());
|
||||
super::push_color_segment(&mut segments, 0.0, 0.02, 0.12, &[2, 1, 2, 1], 0.033);
|
||||
assert_eq!(segments[0].duration_s, 0.12);
|
||||
|
||||
assert_eq!(
|
||||
super::dominant_color_event_code(VideoColorFrame {
|
||||
r: 180,
|
||||
g: 170,
|
||||
b: 80,
|
||||
}),
|
||||
Some(4)
|
||||
);
|
||||
assert_eq!(
|
||||
super::dominant_color_event_code(VideoColorFrame {
|
||||
r: 180,
|
||||
g: 70,
|
||||
b: 60
|
||||
}),
|
||||
Some(1)
|
||||
);
|
||||
assert_eq!(
|
||||
super::dominant_color_event_code(VideoColorFrame {
|
||||
r: 60,
|
||||
g: 180,
|
||||
b: 70
|
||||
}),
|
||||
Some(2)
|
||||
);
|
||||
assert_eq!(
|
||||
super::dominant_color_event_code(VideoColorFrame {
|
||||
r: 60,
|
||||
g: 70,
|
||||
b: 180
|
||||
}),
|
||||
Some(3)
|
||||
);
|
||||
assert_eq!(
|
||||
super::dominant_color_event_code(VideoColorFrame {
|
||||
r: 100,
|
||||
g: 100,
|
||||
b: 100,
|
||||
}),
|
||||
None
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `detect_audio_segments_keeps_regular_and_marker_durations_distinct` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
@ -469,6 +564,23 @@ fn correlate_segments_validate_inputs_and_support_single_pulse_fallback() {
|
||||
assert!(correlate_segments(&video, &audio, 1.0, 0.1, 3, 0.05).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn correlate_coded_segments_rejects_invalid_coded_probe_configuration() {
|
||||
let segment = PulseSegment {
|
||||
start_s: 1.0,
|
||||
end_s: 1.12,
|
||||
duration_s: 0.12,
|
||||
};
|
||||
let video = [segment];
|
||||
let audio = [segment];
|
||||
|
||||
assert!(correlate_coded_segments(&video, &audio, 1.0, 0.12, &[], 0.2).is_err());
|
||||
assert!(correlate_coded_segments(&video, &audio, 1.0, 0.12, &[0], 0.2).is_err());
|
||||
assert!(correlate_coded_segments(&video, &audio, 0.0, 0.12, &[1], 0.2).is_err());
|
||||
assert!(correlate_coded_segments(&video, &audio, 1.0, 0.0, &[1], 0.2).is_err());
|
||||
assert!(correlate_coded_segments(&video, &audio, 1.0, 0.12, &[1], 0.0).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `correlate_segments_preserves_whole_period_delay_evidence` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
@ -607,6 +719,44 @@ fn correlate_coded_segments_matches_preserved_event_width_codes() {
|
||||
assert!(report.max_abs_skew_ms < 50.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `correlate_coded_segments_uses_time_fallback_when_extra_audio_breaks_indexing` explicit because coded pulse identity is the strongest sync proof when transport adds one-off detections that would otherwise confuse cadence indexing.
|
||||
/// Inputs are coded video/audio pulse segments with one extra audio detection; output proves the analyzer keeps the three valid flash/tone pairs.
|
||||
fn correlate_coded_segments_uses_time_fallback_when_extra_audio_breaks_indexing() {
|
||||
fn segment(start_s: f64, code: u32) -> PulseSegment {
|
||||
let duration_s = 0.12 * f64::from(code);
|
||||
PulseSegment {
|
||||
start_s,
|
||||
end_s: start_s + duration_s,
|
||||
duration_s,
|
||||
}
|
||||
}
|
||||
|
||||
let codes = [1, 2, 3, 4];
|
||||
let video = [segment(0.0, 1), segment(1.0, 2), segment(2.0, 3)];
|
||||
let audio = [
|
||||
segment(0.04, 1),
|
||||
segment(0.50, 4),
|
||||
segment(1.04, 2),
|
||||
segment(2.04, 3),
|
||||
];
|
||||
|
||||
let report =
|
||||
correlate_coded_segments(&video, &audio, 1.0, 0.12, &codes, 0.2).expect("coded report");
|
||||
|
||||
assert_eq!(report.paired_event_count, 3);
|
||||
assert_eq!(
|
||||
report
|
||||
.paired_events
|
||||
.iter()
|
||||
.map(|event| event.event_code)
|
||||
.collect::<Vec<_>>(),
|
||||
vec![Some(1), Some(2), Some(3)]
|
||||
);
|
||||
assert!((report.median_skew_ms - 40.0).abs() < 1.0);
|
||||
assert!(report.max_abs_skew_ms < 50.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `correlate_coded_segments_recovers_when_extra_video_detections_win_phase_collapse` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
|
||||
@ -268,144 +268,11 @@ fn dominant_color_event_code(frame: VideoColorFrame) -> Option<u32> {
|
||||
}
|
||||
|
||||
fn color_for_event_code(code: u32) -> Option<VideoColorFrame> {
|
||||
color_palette()
|
||||
.into_iter()
|
||||
.find_map(|(palette_code, color)| (palette_code == code).then_some(color))
|
||||
}
|
||||
|
||||
/// Keeps `color_palette` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn color_palette() -> [(u32, VideoColorFrame); 16] {
|
||||
[
|
||||
(
|
||||
1,
|
||||
VideoColorFrame {
|
||||
r: 255,
|
||||
g: 45,
|
||||
b: 45,
|
||||
},
|
||||
),
|
||||
(
|
||||
2,
|
||||
VideoColorFrame {
|
||||
r: 0,
|
||||
g: 230,
|
||||
b: 118,
|
||||
},
|
||||
),
|
||||
(
|
||||
3,
|
||||
VideoColorFrame {
|
||||
r: 41,
|
||||
g: 121,
|
||||
b: 255,
|
||||
},
|
||||
),
|
||||
(
|
||||
4,
|
||||
VideoColorFrame {
|
||||
r: 255,
|
||||
g: 179,
|
||||
b: 0,
|
||||
},
|
||||
),
|
||||
(
|
||||
5,
|
||||
VideoColorFrame {
|
||||
r: 216,
|
||||
g: 27,
|
||||
b: 96,
|
||||
},
|
||||
),
|
||||
(
|
||||
6,
|
||||
VideoColorFrame {
|
||||
r: 0,
|
||||
g: 188,
|
||||
b: 212,
|
||||
},
|
||||
),
|
||||
(
|
||||
7,
|
||||
VideoColorFrame {
|
||||
r: 205,
|
||||
g: 220,
|
||||
b: 57,
|
||||
},
|
||||
),
|
||||
(
|
||||
8,
|
||||
VideoColorFrame {
|
||||
r: 126,
|
||||
g: 87,
|
||||
b: 194,
|
||||
},
|
||||
),
|
||||
(
|
||||
9,
|
||||
VideoColorFrame {
|
||||
r: 255,
|
||||
g: 112,
|
||||
b: 67,
|
||||
},
|
||||
),
|
||||
(
|
||||
10,
|
||||
VideoColorFrame {
|
||||
r: 38,
|
||||
g: 166,
|
||||
b: 154,
|
||||
},
|
||||
),
|
||||
(
|
||||
11,
|
||||
VideoColorFrame {
|
||||
r: 255,
|
||||
g: 64,
|
||||
b: 129,
|
||||
},
|
||||
),
|
||||
(
|
||||
12,
|
||||
VideoColorFrame {
|
||||
r: 92,
|
||||
g: 107,
|
||||
b: 192,
|
||||
},
|
||||
),
|
||||
(
|
||||
13,
|
||||
VideoColorFrame {
|
||||
r: 255,
|
||||
g: 235,
|
||||
b: 59,
|
||||
},
|
||||
),
|
||||
(
|
||||
14,
|
||||
VideoColorFrame {
|
||||
r: 105,
|
||||
g: 240,
|
||||
b: 174,
|
||||
},
|
||||
),
|
||||
(
|
||||
15,
|
||||
VideoColorFrame {
|
||||
r: 171,
|
||||
g: 71,
|
||||
b: 188,
|
||||
},
|
||||
),
|
||||
(
|
||||
16,
|
||||
VideoColorFrame {
|
||||
r: 3,
|
||||
g: 169,
|
||||
b: 244,
|
||||
},
|
||||
),
|
||||
]
|
||||
probe_color_for_event_code(code).map(|color| VideoColorFrame {
|
||||
r: color.r,
|
||||
g: color.g,
|
||||
b: color.b,
|
||||
})
|
||||
}
|
||||
|
||||
fn color_distance_squared(left: VideoColorFrame, right: VideoColorFrame) -> u32 {
|
||||
@ -414,4 +281,3 @@ fn color_distance_squared(left: VideoColorFrame, right: VideoColorFrame) -> u32
|
||||
let db = i32::from(left.b) - i32::from(right.b);
|
||||
(dr * dr + dg * dg + db * db) as u32
|
||||
}
|
||||
|
||||
|
||||
@ -196,6 +196,35 @@ fn calibration_recommendation_uses_coded_pairs_when_raw_activity_disagrees() {
|
||||
assert!(recommendation.note.contains("paired pulses disagree"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `calibration_recommendation_rejects_raw_activity_that_disagrees_too_much` explicit because calibration must not bake in a delay from aliased cadence evidence.
|
||||
/// Inputs are a stable paired report with non-coded raw activity far away from the median; output is a not-ready recommendation.
|
||||
fn calibration_recommendation_rejects_raw_activity_that_disagrees_too_much() {
|
||||
let report = SyncAnalysisReport {
|
||||
video_event_count: 16,
|
||||
audio_event_count: 16,
|
||||
paired_event_count: 16,
|
||||
coded_events: false,
|
||||
activity_start_delta_ms: 2_000.0,
|
||||
raw_first_video_activity_s: 0.0,
|
||||
raw_first_audio_activity_s: 2.0,
|
||||
first_skew_ms: 10.0,
|
||||
last_skew_ms: 12.0,
|
||||
mean_skew_ms: 11.0,
|
||||
median_skew_ms: 11.0,
|
||||
max_abs_skew_ms: 12.0,
|
||||
drift_ms: 2.0,
|
||||
skews_ms: vec![10.0, 11.0, 12.0],
|
||||
video_onsets_s: vec![],
|
||||
audio_onsets_s: vec![],
|
||||
paired_events: vec![],
|
||||
};
|
||||
|
||||
let recommendation = report.calibration_recommendation();
|
||||
assert!(!recommendation.ready);
|
||||
assert!(recommendation.note.contains("calibration-safe band"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `calibration_recommendation_reports_when_skew_is_already_settled` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
@ -255,6 +284,94 @@ fn verdict_passes_preferred_skew_band() {
|
||||
assert_eq!(verdict.status, "preferred");
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `verdict_requires_enough_pairs_before_trusting_skew` explicit because a few lucky pulse matches can hide a broken client transport train.
|
||||
/// Inputs are fewer than the minimum paired events; output is an insufficient-data verdict.
|
||||
fn verdict_requires_enough_pairs_before_trusting_skew() {
|
||||
let report = SyncAnalysisReport {
|
||||
video_event_count: 2,
|
||||
audio_event_count: 2,
|
||||
paired_event_count: 2,
|
||||
coded_events: true,
|
||||
activity_start_delta_ms: 0.0,
|
||||
raw_first_video_activity_s: 0.0,
|
||||
raw_first_audio_activity_s: 0.0,
|
||||
first_skew_ms: 1.0,
|
||||
last_skew_ms: 2.0,
|
||||
mean_skew_ms: 1.5,
|
||||
median_skew_ms: 1.5,
|
||||
max_abs_skew_ms: 2.0,
|
||||
drift_ms: 1.0,
|
||||
skews_ms: vec![1.0, 2.0],
|
||||
video_onsets_s: vec![],
|
||||
audio_onsets_s: vec![],
|
||||
paired_events: vec![],
|
||||
};
|
||||
|
||||
let verdict = report.verdict();
|
||||
assert!(!verdict.passed);
|
||||
assert_eq!(verdict.status, "insufficient_data");
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `verdict_passes_acceptable_skew_band` explicit because acceptable-but-not-preferred sync is still useful evidence while tuning transport.
|
||||
/// Inputs are paired skews below the acceptable band; output is a passing acceptable verdict.
|
||||
fn verdict_passes_acceptable_skew_band() {
|
||||
let report = SyncAnalysisReport {
|
||||
video_event_count: 5,
|
||||
audio_event_count: 5,
|
||||
paired_event_count: 5,
|
||||
coded_events: false,
|
||||
activity_start_delta_ms: 0.0,
|
||||
raw_first_video_activity_s: 0.0,
|
||||
raw_first_audio_activity_s: 0.0,
|
||||
first_skew_ms: 45.0,
|
||||
last_skew_ms: 70.0,
|
||||
mean_skew_ms: 55.0,
|
||||
median_skew_ms: 55.0,
|
||||
max_abs_skew_ms: 70.0,
|
||||
drift_ms: 25.0,
|
||||
skews_ms: vec![45.0, 50.0, 55.0, 60.0, 70.0],
|
||||
video_onsets_s: vec![],
|
||||
audio_onsets_s: vec![],
|
||||
paired_events: vec![],
|
||||
};
|
||||
|
||||
let verdict = report.verdict();
|
||||
assert!(verdict.passed);
|
||||
assert_eq!(verdict.status, "acceptable");
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `verdict_reports_gross_failure_between_acceptable_and_catastrophic` explicit because transport tuning needs a middle failure class before declaring total loss.
|
||||
/// Inputs are paired skews beyond the acceptable band but below catastrophic; output is a gross-failure verdict.
|
||||
fn verdict_reports_gross_failure_between_acceptable_and_catastrophic() {
|
||||
let report = SyncAnalysisReport {
|
||||
video_event_count: 5,
|
||||
audio_event_count: 5,
|
||||
paired_event_count: 5,
|
||||
coded_events: false,
|
||||
activity_start_delta_ms: 0.0,
|
||||
raw_first_video_activity_s: 0.0,
|
||||
raw_first_audio_activity_s: 0.0,
|
||||
first_skew_ms: 90.0,
|
||||
last_skew_ms: 120.0,
|
||||
mean_skew_ms: 105.0,
|
||||
median_skew_ms: 105.0,
|
||||
max_abs_skew_ms: 120.0,
|
||||
drift_ms: 30.0,
|
||||
skews_ms: vec![90.0, 100.0, 105.0, 110.0, 120.0],
|
||||
video_onsets_s: vec![],
|
||||
audio_onsets_s: vec![],
|
||||
paired_events: vec![],
|
||||
};
|
||||
|
||||
let verdict = report.verdict();
|
||||
assert!(!verdict.passed);
|
||||
assert_eq!(verdict.status, "gross_failure");
|
||||
assert!(verdict.reason.contains("acceptable band"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `verdict_flags_catastrophic_desync` explicit because it sits on sync-probe analysis, where small timestamp or pairing mistakes can hide real A/V skew.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
|
||||
@ -11,6 +11,8 @@ use gstreamer_app as gst_app;
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use lesavka_common::lesavka::{AudioPacket, VideoPacket};
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use std::collections::BTreeMap;
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use std::sync::{
|
||||
Arc,
|
||||
atomic::{AtomicBool, Ordering},
|
||||
@ -18,7 +20,7 @@ use std::sync::{
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use std::thread::{self, JoinHandle};
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use std::time::{Duration, Instant};
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use std::{f64::consts::TAU, mem::size_of};
|
||||
|
||||
@ -27,6 +29,10 @@ use crate::input::camera::{CameraCodec, CameraConfig};
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use crate::sync_probe::schedule::PulseSchedule;
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use crate::sync_probe::signature::{
|
||||
ProbeColor, probe_audio_frequency_for_event_code, probe_color_for_event_code,
|
||||
};
|
||||
#[cfg(any(not(coverage), test))]
|
||||
use crate::uplink_fresh_queue::{FreshPacketQueue, FreshQueueConfig};
|
||||
|
||||
#[cfg(coverage)]
|
||||
@ -35,6 +41,8 @@ mod coverage_stub;
|
||||
mod runtime;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
#[cfg(not(coverage))]
|
||||
mod video_packets;
|
||||
|
||||
#[cfg(coverage)]
|
||||
pub use coverage_stub::SyncProbeCapture;
|
||||
@ -67,16 +75,84 @@ const AUDIO_PULSE_FREQUENCY_HZ: f64 = 1_800.0;
|
||||
const AUDIO_PULSE_AMPLITUDE: f64 = 24_000.0;
|
||||
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Build the dark RGB frame used outside active probe pulses.
|
||||
///
|
||||
/// Inputs: frame width and height in pixels.
|
||||
/// Outputs: raw RGB bytes.
|
||||
/// Why: idle frames need stable low luma so the RCT analyzer can separate
|
||||
/// transport pulses from background video.
|
||||
fn build_dark_probe_frame(width: usize, height: usize) -> Vec<u8> {
|
||||
vec![16u8; width.saturating_mul(height)]
|
||||
build_solid_rgb_probe_frame(
|
||||
width,
|
||||
height,
|
||||
ProbeColor {
|
||||
r: 16,
|
||||
g: 16,
|
||||
b: 16,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Build the uncoded bright RGB pulse frame.
|
||||
///
|
||||
/// Inputs: frame width and height in pixels.
|
||||
/// Outputs: raw RGB bytes.
|
||||
/// Why: the legacy marker-mode probe still needs a high-contrast frame when
|
||||
/// event identity is not being carried by color.
|
||||
fn build_regular_probe_frame(width: usize, height: usize) -> Vec<u8> {
|
||||
vec![240u8; width.saturating_mul(height)]
|
||||
build_solid_rgb_probe_frame(
|
||||
width,
|
||||
height,
|
||||
ProbeColor {
|
||||
r: 240,
|
||||
g: 240,
|
||||
b: 240,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Build a color-coded RGB pulse frame for one event identity.
|
||||
///
|
||||
/// Inputs: frame dimensions and one-based event code.
|
||||
/// Outputs: raw RGB bytes using the shared probe palette.
|
||||
/// Why: client-to-RCT transport tests can lose startup cadence, so each flash
|
||||
/// carries identity in color before it is bundled for upstream transport.
|
||||
fn build_coded_probe_frame(width: usize, height: usize, code: u32) -> Vec<u8> {
|
||||
build_solid_rgb_probe_frame(
|
||||
width,
|
||||
height,
|
||||
probe_color_for_event_code(code).unwrap_or(ProbeColor {
|
||||
r: 240,
|
||||
g: 240,
|
||||
b: 240,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Fill a whole raw RGB frame with one color.
|
||||
///
|
||||
/// Inputs: frame dimensions and RGB color.
|
||||
/// Outputs: raw RGB bytes.
|
||||
/// Why: generating simple solid frames keeps the synthetic source deterministic
|
||||
/// and cheap to compare in tests.
|
||||
fn build_solid_rgb_probe_frame(width: usize, height: usize, color: ProbeColor) -> Vec<u8> {
|
||||
let mut frame = Vec::with_capacity(width.saturating_mul(height).saturating_mul(3));
|
||||
for _ in 0..width.saturating_mul(height) {
|
||||
frame.extend_from_slice(&[color.r, color.g, color.b]);
|
||||
}
|
||||
frame
|
||||
}
|
||||
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Build the old marker pulse frame with a visible cross.
|
||||
///
|
||||
/// Inputs: frame width and height in pixels.
|
||||
/// Outputs: raw RGB bytes.
|
||||
/// Why: marker-mode analysis uses pulse width and shape to break cadence
|
||||
/// aliases when coded event identity is disabled.
|
||||
fn build_marker_probe_frame(width: usize, height: usize) -> Vec<u8> {
|
||||
let mut frame = build_regular_probe_frame(width, height);
|
||||
|
||||
@ -91,7 +167,11 @@ fn build_marker_probe_frame(width: usize, height: usize) -> Vec<u8> {
|
||||
0,
|
||||
(cx + cross_half_w).min(width),
|
||||
height,
|
||||
16,
|
||||
ProbeColor {
|
||||
r: 16,
|
||||
g: 16,
|
||||
b: 16,
|
||||
},
|
||||
);
|
||||
fill_rect(
|
||||
&mut frame,
|
||||
@ -100,12 +180,22 @@ fn build_marker_probe_frame(width: usize, height: usize) -> Vec<u8> {
|
||||
cy.saturating_sub(cross_half_h),
|
||||
width,
|
||||
(cy + cross_half_h).min(height),
|
||||
16,
|
||||
ProbeColor {
|
||||
r: 16,
|
||||
g: 16,
|
||||
b: 16,
|
||||
},
|
||||
);
|
||||
frame
|
||||
}
|
||||
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Fill a rectangular region in a raw RGB frame.
|
||||
///
|
||||
/// Inputs: mutable frame bytes, image width, rectangle bounds, and RGB color.
|
||||
/// Outputs: in-place mutation of the requested region.
|
||||
/// Why: marker frames need a deterministic visual feature without pulling in a
|
||||
/// heavier image dependency.
|
||||
fn fill_rect(
|
||||
frame: &mut [u8],
|
||||
width: usize,
|
||||
@ -113,20 +203,26 @@ fn fill_rect(
|
||||
y0: usize,
|
||||
x1: usize,
|
||||
y1: usize,
|
||||
value: u8,
|
||||
color: ProbeColor,
|
||||
) {
|
||||
let height = frame.len() / width.max(1);
|
||||
let height = frame.len() / width.max(1) / 3;
|
||||
let x1 = x1.min(width);
|
||||
let y1 = y1.min(height);
|
||||
for y in y0.min(height)..y1 {
|
||||
for x in x0.min(width)..x1 {
|
||||
let offset = y * width + x;
|
||||
frame[offset] = value;
|
||||
let offset = (y * width + x) * 3;
|
||||
frame[offset..offset + 3].copy_from_slice(&[color.r, color.g, color.b]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Render one interleaved PCM audio chunk for the synthetic probe.
|
||||
///
|
||||
/// Inputs: pulse schedule, chunk PTS, and mono samples per channel.
|
||||
/// Outputs: stereo little-endian PCM bytes.
|
||||
/// Why: the client transport test must inject audio exactly where physical
|
||||
/// capture would feed the uplink, but with deterministic tone identities.
|
||||
fn render_audio_chunk(
|
||||
schedule: &PulseSchedule,
|
||||
chunk_pts: Duration,
|
||||
@ -136,8 +232,8 @@ fn render_audio_chunk(
|
||||
let mut pcm = Vec::with_capacity(samples_per_chunk * AUDIO_CHANNELS * size_of::<i16>());
|
||||
for sample_index in 0..samples_per_chunk {
|
||||
let sample_pts = chunk_pts + sample_step.saturating_mul(sample_index as u32);
|
||||
let amplitude = if schedule.flash_active(sample_pts) {
|
||||
let phase = TAU * AUDIO_PULSE_FREQUENCY_HZ * sample_pts.as_secs_f64();
|
||||
let amplitude = if let Some(frequency_hz) = probe_audio_frequency(schedule, sample_pts) {
|
||||
let phase = TAU * frequency_hz * sample_pts.as_secs_f64();
|
||||
(phase.sin() * AUDIO_PULSE_AMPLITUDE) as i16
|
||||
} else {
|
||||
0
|
||||
@ -149,7 +245,39 @@ fn render_audio_chunk(
|
||||
pcm
|
||||
}
|
||||
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Select the tone frequency active at a sample timestamp.
|
||||
///
|
||||
/// Inputs: pulse schedule and sample PTS.
|
||||
/// Outputs: tone frequency when the pulse gate is active, otherwise `None`.
|
||||
/// Why: coded probes need audio identity to survive transport separately from
|
||||
/// the video color identity, while legacy probes keep the original single tone.
|
||||
fn probe_audio_frequency(schedule: &PulseSchedule, sample_pts: Duration) -> Option<f64> {
|
||||
if !schedule.flash_active(sample_pts) {
|
||||
return None;
|
||||
}
|
||||
schedule
|
||||
.event_code(sample_pts)
|
||||
.and_then(probe_audio_frequency_for_event_code)
|
||||
.or(Some(AUDIO_PULSE_FREQUENCY_HZ))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn probe_pts_exceeds_duration(pts_usecs: u64, duration: std::time::Duration) -> bool {
|
||||
pts_usecs > duration.as_micros() as u64
|
||||
}
|
||||
|
||||
#[cfg(any(not(coverage), test))]
|
||||
/// Capture the local Unix clock in nanoseconds.
|
||||
///
|
||||
/// Inputs: none.
|
||||
/// Outputs: best-effort Unix timestamp.
|
||||
/// Why: the RCT freshness summary needs a client-origin timestamp aligned with
|
||||
/// synthetic PTS zero before media enters the bundled transport path.
|
||||
fn unix_now_ns() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_nanos()
|
||||
.min(u64::MAX as u128) as u64
|
||||
}
|
||||
|
||||
@ -33,4 +33,8 @@ impl SyncProbeCapture {
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn start_unix_ns(&self) -> u64 {
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,14 +1,9 @@
|
||||
use super::video_packets::{
|
||||
ProbeFrameKind, VideoPacketSource, build_video_packet_source, probe_frame_kind,
|
||||
stop_video_packet_source, video_packet_data,
|
||||
};
|
||||
use super::*;
|
||||
|
||||
fn rebase_probe_audio_packet_pts(
|
||||
pts_rebaser: &crate::live_capture_clock::DurationPacedSourcePtsRebaser,
|
||||
source_pts_us: u64,
|
||||
packet_duration_us: u64,
|
||||
lag_cap: Duration,
|
||||
) -> crate::live_capture_clock::RebasedSourcePts {
|
||||
pts_rebaser.rebase_with_packet_duration(Some(source_pts_us), packet_duration_us, lag_cap)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(super) fn rebase_probe_packet_pts(
|
||||
pts_rebaser: &crate::live_capture_clock::DurationPacedSourcePtsRebaser,
|
||||
@ -21,8 +16,9 @@ pub(super) fn rebase_probe_packet_pts(
|
||||
}
|
||||
|
||||
pub struct SyncProbeCapture {
|
||||
pipeline: gst::Pipeline,
|
||||
running: Arc<AtomicBool>,
|
||||
probe_start: Instant,
|
||||
start_unix_ns: u64,
|
||||
video_queue: FreshPacketQueue<VideoPacket>,
|
||||
audio_queue: FreshPacketQueue<AudioPacket>,
|
||||
video_thread: Option<JoinHandle<()>>,
|
||||
@ -33,30 +29,15 @@ impl SyncProbeCapture {
|
||||
pub fn new(camera: CameraConfig, schedule: PulseSchedule, duration: Duration) -> Result<Self> {
|
||||
gst::init().context("gst init")?;
|
||||
|
||||
let pipeline = build_pipeline(camera, &schedule)?;
|
||||
let video_src = pipeline
|
||||
.by_name("sync_probe_video_src")
|
||||
.context("missing sync probe video appsrc")?
|
||||
.downcast::<gst_app::AppSrc>()
|
||||
.expect("video appsrc");
|
||||
let video_sink = pipeline
|
||||
.by_name("sync_probe_video_sink")
|
||||
.context("missing sync probe video appsink")?
|
||||
.downcast::<gst_app::AppSink>()
|
||||
.expect("video appsink");
|
||||
|
||||
pipeline
|
||||
.set_state(gst::State::Playing)
|
||||
.context("starting sync probe pipeline")?;
|
||||
|
||||
let running = Arc::new(AtomicBool::new(true));
|
||||
let probe_start = Instant::now();
|
||||
let video_queue = FreshPacketQueue::new(PROBE_VIDEO_QUEUE);
|
||||
let audio_queue = FreshPacketQueue::new(PROBE_AUDIO_QUEUE);
|
||||
let packet_source = build_video_packet_source(camera, &schedule)?;
|
||||
let start_unix_ns = super::unix_now_ns();
|
||||
let probe_start = Instant::now();
|
||||
|
||||
let video_thread = spawn_video_thread(VideoThreadConfig {
|
||||
src: video_src,
|
||||
sink: video_sink,
|
||||
packet_source,
|
||||
camera,
|
||||
schedule: schedule.clone(),
|
||||
duration,
|
||||
@ -73,8 +54,9 @@ impl SyncProbeCapture {
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
pipeline,
|
||||
running,
|
||||
probe_start,
|
||||
start_unix_ns,
|
||||
video_queue,
|
||||
audio_queue,
|
||||
video_thread: Some(video_thread),
|
||||
@ -89,6 +71,20 @@ impl SyncProbeCapture {
|
||||
pub fn audio_queue(&self) -> FreshPacketQueue<AudioPacket> {
|
||||
self.audio_queue.clone()
|
||||
}
|
||||
|
||||
pub fn start_unix_ns(&self) -> u64 {
|
||||
self.start_unix_ns
|
||||
}
|
||||
|
||||
/// Return the local monotonic instant associated with synthetic PTS zero.
|
||||
///
|
||||
/// Inputs: none.
|
||||
/// Outputs: the `Instant` used to pace the probe's capture threads.
|
||||
/// Why: the transport sender needs send-age telemetry on the same clock as
|
||||
/// synthetic capture PTS, not the global physical-capture clock.
|
||||
pub fn probe_start(&self) -> Instant {
|
||||
self.probe_start
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SyncProbeCapture {
|
||||
@ -96,7 +92,6 @@ impl Drop for SyncProbeCapture {
|
||||
self.running.store(false, Ordering::Release);
|
||||
self.video_queue.close();
|
||||
self.audio_queue.close();
|
||||
let _ = self.pipeline.set_state(gst::State::Null);
|
||||
if let Some(handle) = self.video_thread.take() {
|
||||
let _ = handle.join();
|
||||
}
|
||||
@ -106,57 +101,8 @@ impl Drop for SyncProbeCapture {
|
||||
}
|
||||
}
|
||||
|
||||
fn build_pipeline(camera: CameraConfig, _schedule: &PulseSchedule) -> Result<gst::Pipeline> {
|
||||
let video_caps = format!(
|
||||
"video/x-raw,format=GRAY8,width={},height={},framerate={}/1",
|
||||
camera.width,
|
||||
camera.height,
|
||||
camera.fps.max(1)
|
||||
);
|
||||
let video_branch = match camera.codec {
|
||||
CameraCodec::Mjpeg => format!(
|
||||
"appsrc name=sync_probe_video_src is-live=true format=time do-timestamp=false caps={video_caps} ! \
|
||||
queue max-size-buffers=4 leaky=downstream ! videoconvert ! \
|
||||
jpegenc quality=90 ! image/jpeg,parsed=true,width={},height={},framerate={}/1 ! \
|
||||
appsink name=sync_probe_video_sink emit-signals=false sync=false max-buffers=4 drop=true",
|
||||
camera.width,
|
||||
camera.height,
|
||||
camera.fps.max(1),
|
||||
),
|
||||
CameraCodec::H264 => format!(
|
||||
"appsrc name=sync_probe_video_src is-live=true format=time do-timestamp=false caps={video_caps} ! \
|
||||
queue max-size-buffers=4 leaky=downstream ! videoconvert ! \
|
||||
{} ! h264parse config-interval=-1 ! video/x-h264,stream-format=byte-stream,alignment=au ! \
|
||||
appsink name=sync_probe_video_sink emit-signals=false sync=false max-buffers=4 drop=true",
|
||||
pick_h264_encoder(camera.fps.max(1))?
|
||||
),
|
||||
};
|
||||
|
||||
gst::parse::launch(&video_branch)
|
||||
.with_context(|| format!("building sync probe pipeline: {video_branch}"))?
|
||||
.downcast::<gst::Pipeline>()
|
||||
.map_err(|_| anyhow::anyhow!("sync probe description did not build a pipeline"))
|
||||
}
|
||||
|
||||
fn pick_h264_encoder(fps: u32) -> Result<String> {
|
||||
if gst::ElementFactory::find("x264enc").is_some() {
|
||||
return Ok(format!(
|
||||
"x264enc tune=zerolatency speed-preset=ultrafast bitrate=2500 key-int-max={}",
|
||||
fps.max(1)
|
||||
));
|
||||
}
|
||||
if gst::ElementFactory::find("openh264enc").is_some() {
|
||||
return Ok("openh264enc bitrate=2500000".to_string());
|
||||
}
|
||||
if gst::ElementFactory::find("v4l2h264enc").is_some() {
|
||||
return Ok("v4l2h264enc".to_string());
|
||||
}
|
||||
bail!("no usable H.264 encoder found for sync probe")
|
||||
}
|
||||
|
||||
struct VideoThreadConfig {
|
||||
src: gst_app::AppSrc,
|
||||
sink: gst_app::AppSink,
|
||||
packet_source: VideoPacketSource,
|
||||
camera: CameraConfig,
|
||||
schedule: PulseSchedule,
|
||||
duration: Duration,
|
||||
@ -165,10 +111,84 @@ struct VideoThreadConfig {
|
||||
queue: FreshPacketQueue<VideoPacket>,
|
||||
}
|
||||
|
||||
/// Raw RGB signatures needed only by live encoder probe paths.
|
||||
///
|
||||
/// Inputs: camera dimensions and event-code schedule.
|
||||
/// Outputs: reusable frame buffers for H.264 encoding.
|
||||
/// Why: MJPEG probes already carry pre-encoded signatures, so building these
|
||||
/// large raw frames for MJPEG would delay the synthetic capture clock.
|
||||
struct RawProbeFrames {
|
||||
dark_frame: Vec<u8>,
|
||||
regular_pulse_frame: Vec<u8>,
|
||||
marker_pulse_frame: Vec<u8>,
|
||||
coded_pulse_frames: BTreeMap<u32, Vec<u8>>,
|
||||
}
|
||||
|
||||
impl RawProbeFrames {
|
||||
/// Build raw frames only when the packet source still needs them.
|
||||
///
|
||||
/// Inputs: packet source, camera profile, and pulse schedule.
|
||||
/// Outputs: raw RGB frames for live encoders or `None` for pre-encoded MJPEG.
|
||||
/// Why: client-origin freshness must not include one-time test-pattern
|
||||
/// setup work that would never happen inside physical camera capture.
|
||||
fn maybe_new(
|
||||
packet_source: &VideoPacketSource,
|
||||
camera: CameraConfig,
|
||||
schedule: &PulseSchedule,
|
||||
) -> Option<Self> {
|
||||
if !matches!(packet_source, VideoPacketSource::Pipeline { .. }) {
|
||||
return None;
|
||||
}
|
||||
Some(Self {
|
||||
dark_frame: build_dark_probe_frame(camera.width as usize, camera.height as usize),
|
||||
regular_pulse_frame: build_regular_probe_frame(
|
||||
camera.width as usize,
|
||||
camera.height as usize,
|
||||
),
|
||||
marker_pulse_frame: build_marker_probe_frame(
|
||||
camera.width as usize,
|
||||
camera.height as usize,
|
||||
),
|
||||
coded_pulse_frames: schedule
|
||||
.event_width_codes()
|
||||
.iter()
|
||||
.copied()
|
||||
.map(|code| {
|
||||
(
|
||||
code,
|
||||
build_coded_probe_frame(
|
||||
camera.width as usize,
|
||||
camera.height as usize,
|
||||
code,
|
||||
),
|
||||
)
|
||||
})
|
||||
.collect::<BTreeMap<_, _>>(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the raw RGB frame for one probe frame kind.
|
||||
///
|
||||
/// Inputs: selected frame kind from the shared pulse schedule.
|
||||
/// Outputs: borrowed raw frame bytes.
|
||||
/// Why: H.264 encoding still needs the same visual signatures as MJPEG so
|
||||
/// analyzer identity stays comparable across codecs.
|
||||
fn frame(&self, frame_kind: ProbeFrameKind) -> &[u8] {
|
||||
match frame_kind {
|
||||
ProbeFrameKind::Dark => &self.dark_frame,
|
||||
ProbeFrameKind::RegularPulse => &self.regular_pulse_frame,
|
||||
ProbeFrameKind::MarkerPulse => &self.marker_pulse_frame,
|
||||
ProbeFrameKind::Coded(code) => self
|
||||
.coded_pulse_frames
|
||||
.get(&code)
|
||||
.unwrap_or(&self.regular_pulse_frame),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_video_thread(config: VideoThreadConfig) -> JoinHandle<()> {
|
||||
let VideoThreadConfig {
|
||||
src,
|
||||
sink,
|
||||
mut packet_source,
|
||||
camera,
|
||||
schedule,
|
||||
duration,
|
||||
@ -177,13 +197,7 @@ fn spawn_video_thread(config: VideoThreadConfig) -> JoinHandle<()> {
|
||||
queue,
|
||||
} = config;
|
||||
thread::spawn(move || {
|
||||
let pts_rebaser = crate::live_capture_clock::DurationPacedSourcePtsRebaser::default();
|
||||
let lag_cap = crate::live_capture_clock::upstream_source_lag_cap();
|
||||
let dark_frame = build_dark_probe_frame(camera.width as usize, camera.height as usize);
|
||||
let regular_pulse_frame =
|
||||
build_regular_probe_frame(camera.width as usize, camera.height as usize);
|
||||
let marker_pulse_frame =
|
||||
build_marker_probe_frame(camera.width as usize, camera.height as usize);
|
||||
let raw_frames = RawProbeFrames::maybe_new(&packet_source, camera, &schedule);
|
||||
let frame_step = Duration::from_nanos(1_000_000_000u64 / u64::from(camera.fps.max(1)));
|
||||
let mut frame_index = 0u64;
|
||||
|
||||
@ -200,45 +214,19 @@ fn spawn_video_thread(config: VideoThreadConfig) -> JoinHandle<()> {
|
||||
thread::sleep(remaining);
|
||||
}
|
||||
|
||||
let frame = if schedule.flash_active(pts) && schedule.pulse_is_marker(pts) {
|
||||
&marker_pulse_frame
|
||||
} else if schedule.flash_active(pts) {
|
||||
®ular_pulse_frame
|
||||
} else {
|
||||
&dark_frame
|
||||
};
|
||||
let mut buffer = gst::Buffer::from_slice(frame.clone());
|
||||
if let Some(meta) = buffer.get_mut() {
|
||||
let pts_time = gst::ClockTime::from_nseconds(pts.as_nanos() as u64);
|
||||
meta.set_pts(Some(pts_time));
|
||||
meta.set_dts(Some(pts_time));
|
||||
meta.set_duration(Some(gst::ClockTime::from_nseconds(
|
||||
frame_step.as_nanos() as u64
|
||||
)));
|
||||
}
|
||||
if src.push_buffer(buffer).is_err() {
|
||||
break;
|
||||
}
|
||||
|
||||
if let Some(sample) = freshest_probe_video_sample(&sink)
|
||||
&& let Some(buffer) = sample.buffer()
|
||||
&& let Ok(map) = buffer.map_readable()
|
||||
let frame_kind = probe_frame_kind(&schedule, pts);
|
||||
let raw_frame = raw_frames
|
||||
.as_ref()
|
||||
.map(|frames| frames.frame(frame_kind))
|
||||
.unwrap_or(&[]);
|
||||
if let Some(encoded) =
|
||||
video_packet_data(&mut packet_source, frame_kind, raw_frame, pts, frame_step)
|
||||
{
|
||||
let source_pts_us = buffer.pts().unwrap_or(gst::ClockTime::ZERO).nseconds() / 1_000;
|
||||
let packet_duration_us = buffer
|
||||
.duration()
|
||||
.map(|ts| (ts.nseconds() / 1_000).max(1))
|
||||
.unwrap_or(frame_step.as_micros().min(u64::MAX as u128) as u64);
|
||||
let source_pts_us = encoded.pts.as_micros().min(u64::MAX as u128) as u64;
|
||||
let packet = VideoPacket {
|
||||
id: 2,
|
||||
pts: pts_rebaser
|
||||
.rebase_with_packet_duration(
|
||||
Some(source_pts_us),
|
||||
packet_duration_us,
|
||||
lag_cap,
|
||||
)
|
||||
.packet_pts_us,
|
||||
data: map.as_slice().to_vec(),
|
||||
pts: source_pts_us,
|
||||
data: encoded.data,
|
||||
..Default::default()
|
||||
};
|
||||
let _ = queue.push(packet, Duration::ZERO);
|
||||
@ -247,19 +235,11 @@ fn spawn_video_thread(config: VideoThreadConfig) -> JoinHandle<()> {
|
||||
frame_index = frame_index.saturating_add(1);
|
||||
}
|
||||
|
||||
let _ = src.end_of_stream();
|
||||
stop_video_packet_source(packet_source);
|
||||
queue.close();
|
||||
})
|
||||
}
|
||||
|
||||
fn freshest_probe_video_sample(sink: &gst_app::AppSink) -> Option<gst::Sample> {
|
||||
let mut newest = sink.try_pull_sample(gst::ClockTime::from_mseconds(250));
|
||||
while let Some(sample) = sink.try_pull_sample(gst::ClockTime::ZERO) {
|
||||
newest = Some(sample);
|
||||
}
|
||||
newest
|
||||
}
|
||||
|
||||
fn spawn_audio_thread(
|
||||
schedule: PulseSchedule,
|
||||
duration: Duration,
|
||||
@ -268,8 +248,6 @@ fn spawn_audio_thread(
|
||||
queue: FreshPacketQueue<AudioPacket>,
|
||||
) -> JoinHandle<()> {
|
||||
thread::spawn(move || {
|
||||
let pts_rebaser = crate::live_capture_clock::DurationPacedSourcePtsRebaser::default();
|
||||
let lag_cap = crate::live_capture_clock::upstream_source_lag_cap();
|
||||
let chunk_duration = Duration::from_millis(AUDIO_CHUNK_MS);
|
||||
let samples_per_chunk =
|
||||
(AUDIO_SAMPLE_RATE as usize * AUDIO_CHUNK_MS as usize / 1_000).max(1);
|
||||
@ -289,15 +267,10 @@ fn spawn_audio_thread(
|
||||
}
|
||||
|
||||
let chunk = render_audio_chunk(&schedule, pts, samples_per_chunk);
|
||||
let timing = rebase_probe_audio_packet_pts(
|
||||
&pts_rebaser,
|
||||
pts.as_micros().min(u64::MAX as u128) as u64,
|
||||
chunk_duration.as_micros().min(u64::MAX as u128) as u64,
|
||||
lag_cap,
|
||||
);
|
||||
let source_pts_us = pts.as_micros().min(u64::MAX as u128) as u64;
|
||||
let packet = AudioPacket {
|
||||
id: 0,
|
||||
pts: timing.packet_pts_us,
|
||||
pts: source_pts_us,
|
||||
data: chunk,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
use super::{
|
||||
AUDIO_SAMPLE_RATE, SyncProbeCapture, build_dark_probe_frame, build_marker_probe_frame,
|
||||
build_regular_probe_frame,
|
||||
AUDIO_SAMPLE_RATE, SyncProbeCapture, build_coded_probe_frame, build_dark_probe_frame,
|
||||
build_marker_probe_frame, build_regular_probe_frame,
|
||||
};
|
||||
use crate::input::camera::{CameraCodec, CameraConfig};
|
||||
use crate::sync_probe::analyze::detect_audio_onsets;
|
||||
@ -11,7 +11,9 @@ use gstreamer as gst;
|
||||
use gstreamer::prelude::*;
|
||||
#[cfg(not(coverage))]
|
||||
use gstreamer_app as gst_app;
|
||||
use lesavka_common::lesavka::{AudioPacket, VideoPacket};
|
||||
#[cfg(coverage)]
|
||||
use lesavka_common::lesavka::AudioPacket;
|
||||
use lesavka_common::lesavka::VideoPacket;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
@ -35,6 +37,7 @@ fn decode_interleaved_pcm_to_mono_samples(pcm_bytes: &[u8]) -> Vec<i16> {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[cfg(coverage)]
|
||||
async fn coverage_stub_exposes_live_video_and_audio_queues() {
|
||||
let capture = SyncProbeCapture::new(
|
||||
stub_camera(),
|
||||
@ -73,6 +76,7 @@ async fn coverage_stub_exposes_live_video_and_audio_queues() {
|
||||
let audio = audio_queue.pop_fresh().await;
|
||||
assert_eq!(video.packet.expect("video packet").data, vec![1, 2, 3]);
|
||||
assert_eq!(audio.packet.expect("audio packet").data, vec![4, 5, 6]);
|
||||
assert!(capture.start_unix_ns() > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -135,9 +139,11 @@ fn probe_video_frames_render_distinct_idle_regular_and_marker_patterns() {
|
||||
let dark = build_dark_probe_frame(64, 36);
|
||||
let regular = build_regular_probe_frame(64, 36);
|
||||
let marker = build_marker_probe_frame(64, 36);
|
||||
let coded = build_coded_probe_frame(64, 36, 3);
|
||||
|
||||
assert_eq!(dark.len(), regular.len());
|
||||
assert_eq!(dark.len(), marker.len());
|
||||
assert_eq!(dark.len(), coded.len());
|
||||
assert!(
|
||||
regular.iter().map(|byte| u64::from(*byte)).sum::<u64>()
|
||||
!= dark.iter().map(|byte| u64::from(*byte)).sum::<u64>()
|
||||
@ -147,6 +153,26 @@ fn probe_video_frames_render_distinct_idle_regular_and_marker_patterns() {
|
||||
!= dark.iter().map(|byte| u64::from(*byte)).sum::<u64>()
|
||||
);
|
||||
assert_ne!(regular, marker);
|
||||
assert_ne!(coded, regular);
|
||||
assert_ne!(coded, dark);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `probe_origin_timestamp_is_unix_epoch_based` explicit because client-to-RCT freshness depends on comparing client-origin media timestamps with Tethys observations.
|
||||
/// Inputs are none; output is a current Unix nanosecond timestamp.
|
||||
fn probe_origin_timestamp_is_unix_epoch_based() {
|
||||
let before = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.expect("system clock after epoch")
|
||||
.as_nanos() as u64;
|
||||
let captured = super::unix_now_ns();
|
||||
let after = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.expect("system clock after epoch")
|
||||
.as_nanos() as u64;
|
||||
|
||||
assert!(captured >= before);
|
||||
assert!(captured <= after);
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
|
||||
@ -184,8 +184,10 @@ async fn runtime_probe_audio_and_video_pts_advance_near_real_time() {
|
||||
let (audio_first, audio_last, audio_count) = audio_task.await.expect("audio drain");
|
||||
let wall_elapsed = started.elapsed();
|
||||
|
||||
let video_span = video_last.expect("video last pts") - video_first.expect("video first pts");
|
||||
let audio_span = audio_last.expect("audio last pts") - audio_first.expect("audio first pts");
|
||||
let video_first = video_first.expect("video first pts");
|
||||
let audio_first = audio_first.expect("audio first pts");
|
||||
let video_span = video_last.expect("video last pts") - video_first;
|
||||
let audio_span = audio_last.expect("audio last pts") - audio_first;
|
||||
eprintln!(
|
||||
"runtime probe spans: video_count={video_count} video_span_us={video_span} audio_count={audio_count} audio_span_us={audio_span} wall_elapsed={wall_elapsed:?}"
|
||||
);
|
||||
@ -202,6 +204,14 @@ async fn runtime_probe_audio_and_video_pts_advance_near_real_time() {
|
||||
wall_elapsed <= Duration::from_secs(5),
|
||||
"runtime probe should not take excessively long locally, took {wall_elapsed:?}"
|
||||
);
|
||||
assert!(
|
||||
video_first <= 50_000,
|
||||
"video PTS should stay anchored to synthetic capture start, got {video_first} us"
|
||||
);
|
||||
assert!(
|
||||
audio_first <= 50_000,
|
||||
"audio PTS should stay anchored to synthetic capture start, got {audio_first} us"
|
||||
);
|
||||
assert!(
|
||||
video_span >= 2_400_000,
|
||||
"video pts should span most of the 3s capture, got {} us",
|
||||
@ -219,6 +229,139 @@ async fn runtime_probe_audio_and_video_pts_advance_near_real_time() {
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[tokio::test]
|
||||
async fn runtime_probe_1080p_mjpeg_packets_are_clock_paced() {
|
||||
let capture = SyncProbeCapture::new(
|
||||
CameraConfig {
|
||||
codec: CameraCodec::Mjpeg,
|
||||
width: 1920,
|
||||
height: 1080,
|
||||
fps: 30,
|
||||
},
|
||||
PulseSchedule::new(
|
||||
Duration::from_secs(1),
|
||||
Duration::from_millis(500),
|
||||
Duration::from_millis(120),
|
||||
4,
|
||||
),
|
||||
Duration::from_secs(2),
|
||||
)
|
||||
.expect("runtime capture");
|
||||
|
||||
let started = Instant::now();
|
||||
let video_queue = capture.video_queue();
|
||||
let mut count = 0usize;
|
||||
loop {
|
||||
let next = video_queue.pop_fresh().await;
|
||||
let Some(packet) = next.packet else {
|
||||
break;
|
||||
};
|
||||
assert!(!packet.data.is_empty());
|
||||
count += 1;
|
||||
}
|
||||
|
||||
assert!(count >= 45, "expected real-time 1080p packets, got {count}");
|
||||
assert!(
|
||||
started.elapsed() <= Duration::from_secs(4),
|
||||
"1080p MJPEG probe should be packet-paced, not encoder-paced"
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[tokio::test]
|
||||
/// Verifies local synthetic HEVC video and PCM audio can be paired before transport.
|
||||
///
|
||||
/// Inputs: a tiny HEVC sync-probe camera profile and the normal synthetic audio
|
||||
/// schedule. Outputs: assertions over locally generated packets only. Why: the
|
||||
/// server can only use the HEVC ingress path optimally if the client leaves the
|
||||
/// machine with HEVC access units and nearby audio on the same capture clock.
|
||||
async fn runtime_probe_hevc_video_and_audio_can_form_one_local_bundle() {
|
||||
gst::init().expect("gst init");
|
||||
if !hevc_encoder_available() {
|
||||
eprintln!("skipping local HEVC bundle contract because no HEVC encoder is installed");
|
||||
return;
|
||||
}
|
||||
|
||||
let capture = SyncProbeCapture::new(
|
||||
CameraConfig {
|
||||
codec: CameraCodec::Hevc,
|
||||
width: 640,
|
||||
height: 360,
|
||||
fps: 10,
|
||||
},
|
||||
PulseSchedule::with_event_width_codes(
|
||||
Duration::from_millis(500),
|
||||
Duration::from_millis(500),
|
||||
Duration::from_millis(120),
|
||||
4,
|
||||
vec![1, 2, 3],
|
||||
),
|
||||
Duration::from_secs(2),
|
||||
)
|
||||
.expect("runtime HEVC capture");
|
||||
|
||||
let video_queue = capture.video_queue();
|
||||
let audio_queue = capture.audio_queue();
|
||||
let video_task = tokio::spawn(async move {
|
||||
let mut packets = Vec::new();
|
||||
loop {
|
||||
let next = video_queue.pop_fresh().await;
|
||||
let Some(packet) = next.packet else {
|
||||
break;
|
||||
};
|
||||
packets.push(packet);
|
||||
}
|
||||
packets
|
||||
});
|
||||
let audio_task = tokio::spawn(async move {
|
||||
let mut packets = Vec::new();
|
||||
loop {
|
||||
let next = audio_queue.pop_fresh().await;
|
||||
let Some(packet) = next.packet else {
|
||||
break;
|
||||
};
|
||||
packets.push(packet);
|
||||
}
|
||||
packets
|
||||
});
|
||||
|
||||
let videos = video_task.await.expect("video drain");
|
||||
let audios = audio_task.await.expect("audio drain");
|
||||
assert!(
|
||||
videos.len() >= 12,
|
||||
"expected local HEVC probe video packets, got {}",
|
||||
videos.len()
|
||||
);
|
||||
assert!(
|
||||
audios.len() >= 120,
|
||||
"expected local PCM audio packets, got {}",
|
||||
audios.len()
|
||||
);
|
||||
|
||||
let video = videos
|
||||
.iter()
|
||||
.find(|packet| packet_has_annex_b_start_code(&packet.data))
|
||||
.expect("HEVC Annex-B video packet");
|
||||
let paired_audio = audios
|
||||
.iter()
|
||||
.filter(|packet| {
|
||||
packet.pts >= video.pts.saturating_sub(120_000)
|
||||
&& packet.pts <= video.pts.saturating_add(40_000)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(
|
||||
!paired_audio.is_empty(),
|
||||
"expected at least one PCM packet close to HEVC video pts {}",
|
||||
video.pts
|
||||
);
|
||||
assert!(
|
||||
paired_audio.iter().any(|packet| packet.data.len() >= 1_920),
|
||||
"expected paired audio to carry full 10ms stereo PCM packets"
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[tokio::test]
|
||||
async fn runtime_probe_video_packets_change_across_a_pulse_boundary() {
|
||||
@ -312,3 +455,33 @@ async fn runtime_probe_dark_video_packets_do_not_alternate_frame_to_frame() {
|
||||
"expected consecutive dark MJPEG packets to stay visually stable, got {dark_means:?}"
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Returns whether this developer host can encode local HEVC probe media.
|
||||
///
|
||||
/// Inputs: installed GStreamer plugin registry. Outputs: true when one of the
|
||||
/// supported HEVC encoders is available. Why: the local bundle contract should
|
||||
/// prove HEVC behavior when the host can do it without making unrelated CI
|
||||
/// workers fail just because they lack codec plugins.
|
||||
fn hevc_encoder_available() -> bool {
|
||||
[
|
||||
"x265enc",
|
||||
"nvh265enc",
|
||||
"vah265enc",
|
||||
"vaapih265enc",
|
||||
"v4l2h265enc",
|
||||
]
|
||||
.iter()
|
||||
.any(|encoder| gst::ElementFactory::find(encoder).is_some())
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Detects Annex-B framing in an encoded video packet.
|
||||
///
|
||||
/// Inputs: encoded video bytes. Outputs: true when a 3- or 4-byte start code is
|
||||
/// present. Why: the server-side HEVC decoder expects byte-stream access units,
|
||||
/// not an opaque test payload that only looks non-empty.
|
||||
fn packet_has_annex_b_start_code(data: &[u8]) -> bool {
|
||||
data.windows(4).any(|window| window == [0, 0, 0, 1])
|
||||
|| data.windows(3).any(|window| window == [0, 0, 1])
|
||||
}
|
||||
|
||||
463
client/src/sync_probe/capture/video_packets.rs
Normal file
463
client/src/sync_probe/capture/video_packets.rs
Normal file
@ -0,0 +1,463 @@
|
||||
use super::*;
|
||||
|
||||
pub(super) struct MjpegProbeFrames {
|
||||
dark: Vec<u8>,
|
||||
regular_pulse: Vec<u8>,
|
||||
marker_pulse: Vec<u8>,
|
||||
coded_pulses: BTreeMap<u32, Vec<u8>>,
|
||||
}
|
||||
|
||||
pub(super) enum VideoPacketSource {
|
||||
Mjpeg(MjpegProbeFrames),
|
||||
Pipeline {
|
||||
pipeline: gst::Pipeline,
|
||||
src: gst_app::AppSrc,
|
||||
sink: gst_app::AppSink,
|
||||
first_sample_pts: Option<Duration>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub(super) enum ProbeFrameKind {
|
||||
Dark,
|
||||
RegularPulse,
|
||||
MarkerPulse,
|
||||
Coded(u32),
|
||||
}
|
||||
|
||||
pub(super) struct EncodedVideoData {
|
||||
pub data: Vec<u8>,
|
||||
pub pts: Duration,
|
||||
}
|
||||
|
||||
/// Build the encoded video source used by the synthetic client transport probe.
|
||||
///
|
||||
/// Inputs: negotiated camera profile and pulse schedule.
|
||||
/// Outputs: either pre-encoded MJPEG signature frames or a live H.264 encoder.
|
||||
/// Why: MJPEG encoding 1080p frames in the hot loop caused transport jitter, so
|
||||
/// deterministic signature frames are encoded once before capture starts.
|
||||
pub(super) fn build_video_packet_source(
|
||||
camera: CameraConfig,
|
||||
schedule: &PulseSchedule,
|
||||
) -> Result<VideoPacketSource> {
|
||||
match camera.codec {
|
||||
CameraCodec::Mjpeg => {
|
||||
let coded_pulses = schedule
|
||||
.event_width_codes()
|
||||
.iter()
|
||||
.copied()
|
||||
.map(|code| {
|
||||
encode_mjpeg_probe_frame(
|
||||
camera,
|
||||
&build_coded_probe_frame(
|
||||
camera.width as usize,
|
||||
camera.height as usize,
|
||||
code,
|
||||
),
|
||||
)
|
||||
.map(|frame| (code, frame))
|
||||
})
|
||||
.collect::<Result<BTreeMap<_, _>>>()?;
|
||||
Ok(VideoPacketSource::Mjpeg(MjpegProbeFrames {
|
||||
dark: encode_mjpeg_probe_frame(
|
||||
camera,
|
||||
&build_dark_probe_frame(camera.width as usize, camera.height as usize),
|
||||
)?,
|
||||
regular_pulse: encode_mjpeg_probe_frame(
|
||||
camera,
|
||||
&build_regular_probe_frame(camera.width as usize, camera.height as usize),
|
||||
)?,
|
||||
marker_pulse: encode_mjpeg_probe_frame(
|
||||
camera,
|
||||
&build_marker_probe_frame(camera.width as usize, camera.height as usize),
|
||||
)?,
|
||||
coded_pulses,
|
||||
}))
|
||||
}
|
||||
CameraCodec::H264 | CameraCodec::Hevc => {
|
||||
let pipeline = build_encoded_pipeline(camera)?;
|
||||
let src = pipeline
|
||||
.by_name("sync_probe_video_src")
|
||||
.context("missing sync probe video appsrc")?
|
||||
.downcast::<gst_app::AppSrc>()
|
||||
.expect("video appsrc");
|
||||
let sink = pipeline
|
||||
.by_name("sync_probe_video_sink")
|
||||
.context("missing sync probe video appsink")?
|
||||
.downcast::<gst_app::AppSink>()
|
||||
.expect("video appsink");
|
||||
pipeline
|
||||
.set_state(gst::State::Playing)
|
||||
.context("starting sync probe pipeline")?;
|
||||
Ok(VideoPacketSource::Pipeline {
|
||||
pipeline,
|
||||
src,
|
||||
sink,
|
||||
first_sample_pts: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop any live encoder held by a video packet source.
|
||||
///
|
||||
/// Inputs: packet source being consumed by the video thread.
|
||||
/// Outputs: GStreamer shutdown side effects only.
|
||||
/// Why: the probe should release local encoder resources even when the RCT
|
||||
/// capture or upstream transport test exits early.
|
||||
pub(super) fn stop_video_packet_source(packet_source: VideoPacketSource) {
|
||||
if let VideoPacketSource::Pipeline { pipeline, src, .. } = packet_source {
|
||||
let _ = src.end_of_stream();
|
||||
let _ = pipeline.set_state(gst::State::Null);
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode one RGB probe frame as MJPEG.
|
||||
///
|
||||
/// Inputs: camera profile and raw RGB frame bytes.
|
||||
/// Outputs: JPEG packet payload bytes.
|
||||
/// Why: pre-encoding still frames keeps the client transport test focused on
|
||||
/// bundled network timing rather than local software JPEG throughput.
|
||||
fn encode_mjpeg_probe_frame(camera: CameraConfig, frame: &[u8]) -> Result<Vec<u8>> {
|
||||
let video_caps = format!(
|
||||
"video/x-raw,format=RGB,width={},height={},framerate={}/1",
|
||||
camera.width,
|
||||
camera.height,
|
||||
camera.fps.max(1)
|
||||
);
|
||||
let desc = format!(
|
||||
"appsrc name=sync_probe_still_src is-live=false format=time do-timestamp=false caps={video_caps} ! \
|
||||
videoconvert ! jpegenc quality=90 ! image/jpeg,parsed=true,width={},height={},framerate={}/1 ! \
|
||||
appsink name=sync_probe_still_sink emit-signals=false sync=false max-buffers=1 drop=false",
|
||||
camera.width,
|
||||
camera.height,
|
||||
camera.fps.max(1),
|
||||
);
|
||||
let pipeline = gst::parse::launch(&desc)
|
||||
.with_context(|| format!("building still MJPEG encoder: {desc}"))?
|
||||
.downcast::<gst::Pipeline>()
|
||||
.map_err(|_| anyhow::anyhow!("still MJPEG encoder did not build a pipeline"))?;
|
||||
let src = pipeline
|
||||
.by_name("sync_probe_still_src")
|
||||
.context("missing still MJPEG appsrc")?
|
||||
.downcast::<gst_app::AppSrc>()
|
||||
.expect("still appsrc");
|
||||
let sink = pipeline
|
||||
.by_name("sync_probe_still_sink")
|
||||
.context("missing still MJPEG appsink")?
|
||||
.downcast::<gst_app::AppSink>()
|
||||
.expect("still appsink");
|
||||
pipeline
|
||||
.set_state(gst::State::Playing)
|
||||
.context("starting still MJPEG encoder")?;
|
||||
let frame_step = Duration::from_nanos(1_000_000_000u64 / u64::from(camera.fps.max(1)));
|
||||
let mut buffer = gst::Buffer::from_slice(frame.to_vec());
|
||||
if let Some(meta) = buffer.get_mut() {
|
||||
meta.set_pts(Some(gst::ClockTime::ZERO));
|
||||
meta.set_dts(Some(gst::ClockTime::ZERO));
|
||||
meta.set_duration(Some(gst::ClockTime::from_nseconds(
|
||||
frame_step.as_nanos() as u64
|
||||
)));
|
||||
}
|
||||
src.push_buffer(buffer)
|
||||
.map_err(|err| anyhow::anyhow!("pushing still MJPEG frame failed: {err:?}"))?;
|
||||
let _ = src.end_of_stream();
|
||||
let sample = sink
|
||||
.try_pull_sample(gst::ClockTime::from_seconds(2))
|
||||
.context("still MJPEG encoder produced no sample")?;
|
||||
let data = sample
|
||||
.buffer()
|
||||
.context("still MJPEG sample had no buffer")?
|
||||
.map_readable()
|
||||
.context("mapping still MJPEG sample")?
|
||||
.as_slice()
|
||||
.to_vec();
|
||||
let _ = pipeline.set_state(gst::State::Null);
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
/// Build the live encoder pipeline for non-MJPEG negotiated profiles.
|
||||
///
|
||||
/// Inputs: camera profile.
|
||||
/// Outputs: an appsrc-to-appsink GStreamer pipeline.
|
||||
/// Why: inter-frame codecs cannot reuse still JPEG packets, but they still need
|
||||
/// the same RGB signature frames so analyzer identity remains comparable.
|
||||
fn build_encoded_pipeline(camera: CameraConfig) -> Result<gst::Pipeline> {
|
||||
let video_caps = format!(
|
||||
"video/x-raw,format=RGB,width={},height={},framerate={}/1",
|
||||
camera.width,
|
||||
camera.height,
|
||||
camera.fps.max(1)
|
||||
);
|
||||
let (encoder, parse_chain) = match camera.codec {
|
||||
CameraCodec::H264 => (
|
||||
pick_h264_encoder(camera.fps.max(1))?,
|
||||
"h264parse config-interval=-1 ! video/x-h264,stream-format=byte-stream,alignment=au",
|
||||
),
|
||||
CameraCodec::Hevc => (
|
||||
pick_hevc_encoder(camera.fps.max(1))?,
|
||||
"h265parse config-interval=-1 ! video/x-h265,stream-format=byte-stream,alignment=au",
|
||||
),
|
||||
CameraCodec::Mjpeg => unreachable!("MJPEG uses pre-encoded still frames"),
|
||||
};
|
||||
let video_branch = format!(
|
||||
"appsrc name=sync_probe_video_src is-live=true format=time do-timestamp=false caps={video_caps} ! \
|
||||
queue max-size-buffers=4 leaky=downstream ! videoconvert ! \
|
||||
{encoder} ! {parse_chain} ! \
|
||||
appsink name=sync_probe_video_sink emit-signals=false sync=false max-buffers=4 drop=true",
|
||||
);
|
||||
|
||||
gst::parse::launch(&video_branch)
|
||||
.with_context(|| format!("building sync probe pipeline: {video_branch}"))?
|
||||
.downcast::<gst::Pipeline>()
|
||||
.map_err(|_| anyhow::anyhow!("sync probe description did not build a pipeline"))
|
||||
}
|
||||
|
||||
/// Choose an available low-latency H.264 encoder.
|
||||
///
|
||||
/// Inputs: target frame rate, used for GOP sizing where the encoder supports it.
|
||||
/// Outputs: a GStreamer encoder element description.
|
||||
/// Why: this probe should run on different developer hosts without hardcoding a
|
||||
/// single hardware encoder, while still preferring low-latency behavior.
|
||||
fn pick_h264_encoder(fps: u32) -> Result<String> {
|
||||
if gst::ElementFactory::find("x264enc").is_some() {
|
||||
return Ok(format!(
|
||||
"x264enc tune=zerolatency speed-preset=ultrafast bitrate=2500 key-int-max={}",
|
||||
fps.max(1)
|
||||
));
|
||||
}
|
||||
if gst::ElementFactory::find("openh264enc").is_some() {
|
||||
return Ok("openh264enc bitrate=2500000".to_string());
|
||||
}
|
||||
if gst::ElementFactory::find("v4l2h264enc").is_some() {
|
||||
return Ok("v4l2h264enc".to_string());
|
||||
}
|
||||
bail!("no usable H.264 encoder found for sync probe")
|
||||
}
|
||||
|
||||
/// Choose an available low-latency HEVC encoder.
|
||||
///
|
||||
/// Inputs: target frame rate, used for GOP sizing where the encoder supports it.
|
||||
/// Outputs: a GStreamer encoder element description.
|
||||
/// Why: the client-to-server probe should exercise the same HEVC transport
|
||||
/// shape as real webcam uplink without requiring a specific GPU encoder.
|
||||
fn pick_hevc_encoder(fps: u32) -> Result<String> {
|
||||
if gst::ElementFactory::find("x265enc").is_some() {
|
||||
let keyframe_interval = low_latency_hevc_keyframe_interval(fps);
|
||||
return Ok(format!(
|
||||
"x265enc tune=zerolatency speed-preset=ultrafast bitrate=2500 key-int-max={}",
|
||||
keyframe_interval
|
||||
));
|
||||
}
|
||||
for encoder in ["nvh265enc", "vah265enc", "vaapih265enc", "v4l2h265enc"] {
|
||||
if gst::ElementFactory::find(encoder).is_some() {
|
||||
return Ok(encoder.to_string());
|
||||
}
|
||||
}
|
||||
bail!("no usable HEVC encoder found for sync probe")
|
||||
}
|
||||
|
||||
/// Match the real webcam HEVC keyframe cadence in synthetic transport probes.
|
||||
///
|
||||
/// Inputs: target frame rate. Output: low-latency keyframe interval in frames.
|
||||
/// Why: the client-to-RCT probe should stress the same inter-frame shape as
|
||||
/// real webcam uplink; a one-second GOP made coded flashes less representative
|
||||
/// than Lesavka's default live-call HEVC pipeline.
|
||||
fn low_latency_hevc_keyframe_interval(fps: u32) -> u32 {
|
||||
fps.clamp(1, 5)
|
||||
}
|
||||
|
||||
/// Select the visual signature for a video timestamp.
|
||||
///
|
||||
/// Inputs: deterministic pulse schedule and current video PTS.
|
||||
/// Outputs: frame kind used by packet encoding.
|
||||
/// Why: the frame decision must be shared by MJPEG and H.264 so both codecs
|
||||
/// carry the same event identity to the RCT analyzer.
|
||||
pub(super) fn probe_frame_kind(schedule: &PulseSchedule, pts: Duration) -> ProbeFrameKind {
|
||||
if !schedule.flash_active(pts) {
|
||||
return ProbeFrameKind::Dark;
|
||||
}
|
||||
if let Some(code) = schedule.event_code(pts) {
|
||||
return ProbeFrameKind::Coded(code);
|
||||
}
|
||||
if schedule.pulse_is_marker(pts) {
|
||||
ProbeFrameKind::MarkerPulse
|
||||
} else {
|
||||
ProbeFrameKind::RegularPulse
|
||||
}
|
||||
}
|
||||
|
||||
/// Produce an encoded video payload for a probe frame.
|
||||
///
|
||||
/// Inputs: packet source, frame kind, raw RGB frame, and timing metadata.
|
||||
/// Outputs: encoded video bytes, or `None` when the live encoder is drained.
|
||||
/// Why: bundled transport tests need fresh video packets paced from local PTS,
|
||||
/// but MJPEG and H.264 require different packet-production paths.
|
||||
pub(super) fn video_packet_data(
|
||||
packet_source: &mut VideoPacketSource,
|
||||
frame_kind: ProbeFrameKind,
|
||||
raw_frame: &[u8],
|
||||
pts: Duration,
|
||||
frame_step: Duration,
|
||||
) -> Option<EncodedVideoData> {
|
||||
match packet_source {
|
||||
VideoPacketSource::Mjpeg(frames) => match frame_kind {
|
||||
ProbeFrameKind::Dark => Some(EncodedVideoData {
|
||||
data: frames.dark.clone(),
|
||||
pts,
|
||||
}),
|
||||
ProbeFrameKind::RegularPulse => Some(EncodedVideoData {
|
||||
data: frames.regular_pulse.clone(),
|
||||
pts,
|
||||
}),
|
||||
ProbeFrameKind::MarkerPulse => Some(EncodedVideoData {
|
||||
data: frames.marker_pulse.clone(),
|
||||
pts,
|
||||
}),
|
||||
ProbeFrameKind::Coded(code) => frames
|
||||
.coded_pulses
|
||||
.get(&code)
|
||||
.cloned()
|
||||
.map(|data| EncodedVideoData { data, pts }),
|
||||
},
|
||||
VideoPacketSource::Pipeline {
|
||||
src,
|
||||
sink,
|
||||
first_sample_pts,
|
||||
..
|
||||
} => {
|
||||
let mut buffer = gst::Buffer::from_slice(raw_frame.to_vec());
|
||||
if let Some(meta) = buffer.get_mut() {
|
||||
let pts_time = gst::ClockTime::from_nseconds(pts.as_nanos() as u64);
|
||||
meta.set_pts(Some(pts_time));
|
||||
meta.set_dts(Some(pts_time));
|
||||
meta.set_duration(Some(gst::ClockTime::from_nseconds(
|
||||
frame_step.as_nanos() as u64
|
||||
)));
|
||||
}
|
||||
if src.push_buffer(buffer).is_err() {
|
||||
return None;
|
||||
}
|
||||
freshest_probe_video_sample(sink).and_then(|sample| {
|
||||
let buffer = sample.buffer()?;
|
||||
let sample_pts =
|
||||
normalized_sample_pts_duration(buffer, first_sample_pts).unwrap_or(pts);
|
||||
let map = buffer.map_readable().ok()?;
|
||||
Some(EncodedVideoData {
|
||||
data: map.as_slice().to_vec(),
|
||||
pts: sample_pts,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the encoder output timestamp for one sample buffer.
|
||||
///
|
||||
/// Inputs: encoded sample buffer. Output: packet PTS as a `Duration` when the
|
||||
/// encoder preserved it. Why: inter-frame encoders may return an older access
|
||||
/// unit than the frame just pushed, so transport packets must use the actual
|
||||
/// output PTS instead of the current input-loop PTS.
|
||||
fn sample_pts_duration(buffer: &gst::BufferRef) -> Option<Duration> {
|
||||
buffer.pts().map(|pts| Duration::from_nanos(pts.nseconds()))
|
||||
}
|
||||
|
||||
/// Rebase encoder output timestamps onto the probe's zero-based timeline.
|
||||
///
|
||||
/// Inputs: encoded sample buffer and mutable first-sample timestamp.
|
||||
/// Output: normalized sample PTS.
|
||||
/// Why: some GStreamer encoders emit a segment-offset PTS while still preserving
|
||||
/// correct sample-to-sample cadence, so the probe keeps the cadence and drops
|
||||
/// the absolute segment origin before bundling media for transport.
|
||||
fn normalized_sample_pts_duration(
|
||||
buffer: &gst::BufferRef,
|
||||
first_sample_pts: &mut Option<Duration>,
|
||||
) -> Option<Duration> {
|
||||
let sample_pts = sample_pts_duration(buffer)?;
|
||||
let first = first_sample_pts.get_or_insert(sample_pts);
|
||||
Some(sample_pts.saturating_sub(*first))
|
||||
}
|
||||
|
||||
/// Drain a live appsink and return the newest encoded sample.
|
||||
///
|
||||
/// Inputs: GStreamer appsink for the H.264 probe pipeline.
|
||||
/// Outputs: most recent sample if one was produced.
|
||||
/// Why: the transport probe should prefer freshness over preserving an encoder
|
||||
/// backlog that would make client-origin media look older than it really is.
|
||||
fn freshest_probe_video_sample(sink: &gst_app::AppSink) -> Option<gst::Sample> {
|
||||
let mut newest = sink.try_pull_sample(gst::ClockTime::from_mseconds(250));
|
||||
while let Some(sample) = sink.try_pull_sample(gst::ClockTime::ZERO) {
|
||||
newest = Some(sample);
|
||||
}
|
||||
newest
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use gstreamer as gst;
|
||||
|
||||
/// Verifies synthetic HEVC probes use the same short GOP shape as live
|
||||
/// camera transport.
|
||||
///
|
||||
/// Input: representative target frame rates. Output: bounded keyframe
|
||||
/// interval. Why: coded flash recovery should fail for real transport
|
||||
/// reasons, not because the probe used an easier one-second GOP.
|
||||
#[test]
|
||||
fn low_latency_hevc_keyframe_interval_matches_live_camera_default() {
|
||||
assert_eq!(super::low_latency_hevc_keyframe_interval(0), 1);
|
||||
assert_eq!(super::low_latency_hevc_keyframe_interval(1), 1);
|
||||
assert_eq!(super::low_latency_hevc_keyframe_interval(5), 5);
|
||||
assert_eq!(super::low_latency_hevc_keyframe_interval(20), 5);
|
||||
assert_eq!(super::low_latency_hevc_keyframe_interval(30), 5);
|
||||
}
|
||||
|
||||
/// Verifies encoded packet timestamps come from the encoder output sample.
|
||||
///
|
||||
/// Input: one encoded GStreamer buffer with explicit PTS. Output: matching
|
||||
/// `Duration`. Why: HEVC encoders may return a delayed access unit, so the
|
||||
/// bundle must carry the timestamp of what actually left the encoder.
|
||||
#[test]
|
||||
fn sample_pts_duration_uses_encoder_output_pts() {
|
||||
gst::init().expect("gst init");
|
||||
let mut buffer = gst::Buffer::with_size(4).expect("buffer");
|
||||
{
|
||||
let meta = buffer.get_mut().expect("mutable buffer");
|
||||
meta.set_pts(Some(gst::ClockTime::from_mseconds(123)));
|
||||
}
|
||||
assert_eq!(
|
||||
super::sample_pts_duration(buffer.as_ref()),
|
||||
Some(std::time::Duration::from_millis(123))
|
||||
);
|
||||
}
|
||||
|
||||
/// Verifies encoder segment origins are removed while cadence is retained.
|
||||
///
|
||||
/// Input: two encoded buffers whose PTS starts far from zero. Output:
|
||||
/// zero-based probe timestamps. Why: the server analyzer compares client
|
||||
/// media against the synthetic probe timeline, not GStreamer segment
|
||||
/// wall-clock origins.
|
||||
#[test]
|
||||
fn normalized_sample_pts_duration_preserves_cadence_without_segment_origin() {
|
||||
gst::init().expect("gst init");
|
||||
let mut first_sample_pts = None;
|
||||
|
||||
let mut first = gst::Buffer::with_size(4).expect("first");
|
||||
first
|
||||
.get_mut()
|
||||
.expect("first mutable")
|
||||
.set_pts(Some(gst::ClockTime::from_seconds(3_600)));
|
||||
assert_eq!(
|
||||
super::normalized_sample_pts_duration(first.as_ref(), &mut first_sample_pts),
|
||||
Some(std::time::Duration::ZERO)
|
||||
);
|
||||
|
||||
let mut second = gst::Buffer::with_size(4).expect("second");
|
||||
second.get_mut().expect("second mutable").set_pts(Some(
|
||||
gst::ClockTime::from_seconds(3_600) + gst::ClockTime::from_mseconds(33),
|
||||
));
|
||||
assert_eq!(
|
||||
super::normalized_sample_pts_duration(second.as_ref(), &mut first_sample_pts),
|
||||
Some(std::time::Duration::from_millis(33))
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -1,9 +1,11 @@
|
||||
//! CLI parsing for the upstream A/V sync probe.
|
||||
|
||||
use anyhow::{Context, Result, bail};
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::app_support::DEFAULT_SERVER_ADDR;
|
||||
use crate::sync_probe::signature::first_unsupported_event_code;
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct ProbeConfig {
|
||||
@ -13,6 +15,8 @@ pub struct ProbeConfig {
|
||||
pub pulse_period: Duration,
|
||||
pub pulse_width: Duration,
|
||||
pub marker_tick_period: u32,
|
||||
pub event_width_codes: Vec<u32>,
|
||||
pub timeline_json: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
@ -22,7 +26,7 @@ pub enum ParseOutcome {
|
||||
}
|
||||
|
||||
pub fn usage() -> &'static str {
|
||||
"Usage: lesavka-sync-probe [--server http://HOST:50051] [--duration-seconds 10] [--warmup-seconds 4] [--pulse-period-ms 1000] [--pulse-width-ms 120] [--marker-tick-period 5]"
|
||||
"Usage: lesavka-sync-probe [--server http://HOST:50051] [--duration-seconds 10] [--warmup-seconds 4] [--pulse-period-ms 1000] [--pulse-width-ms 120] [--marker-tick-period 5] [--event-width-codes 1,2,3] [--timeline-json PATH]"
|
||||
}
|
||||
|
||||
pub fn parse_args_outcome_from<I, S>(args: I) -> Result<ParseOutcome>
|
||||
@ -37,6 +41,8 @@ where
|
||||
let mut pulse_period_ms = 1_000u64;
|
||||
let mut pulse_width_ms = 120u64;
|
||||
let mut marker_tick_period = 5u32;
|
||||
let mut event_width_codes = Vec::<u32>::new();
|
||||
let mut timeline_json = None::<PathBuf>;
|
||||
|
||||
while let Some(arg) = args.next() {
|
||||
match arg.as_str() {
|
||||
@ -94,6 +100,20 @@ where
|
||||
bail!("marker tick period must be positive\n{}", usage());
|
||||
}
|
||||
}
|
||||
"--event-width-codes" => {
|
||||
event_width_codes = parse_event_width_codes(args.next())?;
|
||||
}
|
||||
"--timeline-json" => {
|
||||
let path = args
|
||||
.next()
|
||||
.context("missing value after --timeline-json")?
|
||||
.trim()
|
||||
.to_string();
|
||||
if path.is_empty() {
|
||||
bail!("timeline JSON path must not be empty\n{}", usage());
|
||||
}
|
||||
timeline_json = Some(PathBuf::from(path));
|
||||
}
|
||||
"--help" | "-h" => return Ok(ParseOutcome::Help),
|
||||
_ => bail!("unexpected argument `{arg}`\n{}", usage()),
|
||||
}
|
||||
@ -105,7 +125,6 @@ where
|
||||
usage()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(ParseOutcome::Run(ProbeConfig {
|
||||
server,
|
||||
duration: Duration::from_secs(duration_seconds),
|
||||
@ -113,6 +132,8 @@ where
|
||||
pulse_period: Duration::from_millis(pulse_period_ms),
|
||||
pulse_width: Duration::from_millis(pulse_width_ms),
|
||||
marker_tick_period,
|
||||
event_width_codes,
|
||||
timeline_json,
|
||||
}))
|
||||
}
|
||||
|
||||
@ -132,6 +153,45 @@ fn parse_u32_arg(value: Option<String>, flag: &str, context: &str) -> Result<u32
|
||||
.with_context(|| format!("{context}\n{}", usage()))
|
||||
}
|
||||
|
||||
/// Parse the event identity sequence for coded synthetic probes.
|
||||
///
|
||||
/// Inputs: optional raw CLI value after `--event-width-codes`.
|
||||
/// Outputs: validated one-based event code vector.
|
||||
/// Why: invalid event signatures should fail before transport starts, otherwise
|
||||
/// the final RCT capture would look like missing media rather than bad config.
|
||||
fn parse_event_width_codes(value: Option<String>) -> Result<Vec<u32>> {
|
||||
let raw = value
|
||||
.context("missing value after --event-width-codes")?
|
||||
.trim()
|
||||
.to_string();
|
||||
let codes = raw
|
||||
.split(',')
|
||||
.filter_map(|part| {
|
||||
let trimmed = part.trim();
|
||||
(!trimmed.is_empty()).then_some(trimmed)
|
||||
})
|
||||
.map(|part| {
|
||||
let code = part
|
||||
.parse::<u32>()
|
||||
.with_context(|| format!("parsing event width code `{part}`"))?;
|
||||
if code == 0 {
|
||||
bail!("event width codes must be positive\n{}", usage());
|
||||
}
|
||||
Ok(code)
|
||||
})
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
if codes.is_empty() {
|
||||
bail!("event width code list must not be empty\n{}", usage());
|
||||
}
|
||||
if let Some(code) = first_unsupported_event_code(&codes) {
|
||||
bail!(
|
||||
"event width code {code} has no probe signature\n{}",
|
||||
usage()
|
||||
);
|
||||
}
|
||||
Ok(codes)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{DEFAULT_SERVER_ADDR, ParseOutcome, parse_args_outcome_from};
|
||||
@ -150,6 +210,8 @@ mod tests {
|
||||
assert_eq!(config.pulse_period, Duration::from_millis(1_000));
|
||||
assert_eq!(config.pulse_width, Duration::from_millis(120));
|
||||
assert_eq!(config.marker_tick_period, 5);
|
||||
assert!(config.event_width_codes.is_empty());
|
||||
assert_eq!(config.timeline_json, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -167,6 +229,10 @@ mod tests {
|
||||
"90",
|
||||
"--marker-tick-period",
|
||||
"3",
|
||||
"--event-width-codes",
|
||||
"1,2,3",
|
||||
"--timeline-json",
|
||||
"/tmp/client-timeline.json",
|
||||
])
|
||||
.expect("configured run");
|
||||
let ParseOutcome::Run(config) = outcome else {
|
||||
@ -179,6 +245,11 @@ mod tests {
|
||||
assert_eq!(config.pulse_period, Duration::from_millis(750));
|
||||
assert_eq!(config.pulse_width, Duration::from_millis(90));
|
||||
assert_eq!(config.marker_tick_period, 3);
|
||||
assert_eq!(config.event_width_codes, vec![1, 2, 3]);
|
||||
assert_eq!(
|
||||
config.timeline_json,
|
||||
Some("/tmp/client-timeline.json".into())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -187,6 +258,8 @@ mod tests {
|
||||
assert!(parse_args_outcome_from(["--pulse-period-ms", "0"]).is_err());
|
||||
assert!(parse_args_outcome_from(["--duration-seconds", "0"]).is_err());
|
||||
assert!(parse_args_outcome_from(["--marker-tick-period", "0"]).is_err());
|
||||
assert!(parse_args_outcome_from(["--event-width-codes", "0"]).is_err());
|
||||
assert!(parse_args_outcome_from(["--event-width-codes", "17"]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -200,6 +273,7 @@ mod tests {
|
||||
assert!(parse_args_outcome_from(["--server"]).is_err());
|
||||
assert!(parse_args_outcome_from(["--duration-seconds"]).is_err());
|
||||
assert!(parse_args_outcome_from(["--marker-tick-period"]).is_err());
|
||||
assert!(parse_args_outcome_from(["--timeline-json"]).is_err());
|
||||
assert!(parse_args_outcome_from(["--wat"]).is_err());
|
||||
}
|
||||
|
||||
|
||||
@ -11,5 +11,7 @@ mod capture;
|
||||
mod config;
|
||||
mod runner;
|
||||
mod schedule;
|
||||
mod signature;
|
||||
mod timeline;
|
||||
|
||||
pub use runner::run_sync_probe_from_args;
|
||||
|
||||
@ -7,30 +7,14 @@ use crate::handshake;
|
||||
use crate::sync_probe::capture::SyncProbeCapture;
|
||||
use crate::sync_probe::config::{ParseOutcome, ProbeConfig, parse_args_outcome_from, usage};
|
||||
use crate::sync_probe::schedule::PulseSchedule;
|
||||
use crate::sync_probe::timeline::ProbeTimeline;
|
||||
#[cfg(not(coverage))]
|
||||
use std::fs::File;
|
||||
#[cfg(not(coverage))]
|
||||
use std::io::Write;
|
||||
#[cfg(not(coverage))]
|
||||
use std::path::PathBuf;
|
||||
mod bundled_transport;
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
use lesavka_common::lesavka::{
|
||||
AudioPacket, UpstreamMediaBundle, VideoPacket, relay_client::RelayClient,
|
||||
};
|
||||
use bundled_transport::run_bundled_probe_stream;
|
||||
#[cfg(not(coverage))]
|
||||
use tonic::{Request, transport::Channel};
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
const PROBE_BUNDLE_AUDIO_GRACE: std::time::Duration = std::time::Duration::from_millis(30);
|
||||
#[cfg(not(coverage))]
|
||||
const PROBE_BUNDLE_AUDIO_WINDOW_BEFORE_US: u64 = 120_000;
|
||||
#[cfg(not(coverage))]
|
||||
const PROBE_BUNDLE_AUDIO_WINDOW_AFTER_US: u64 = 40_000;
|
||||
#[cfg(not(coverage))]
|
||||
const PROBE_BUNDLE_MAX_AUDIO_PACKETS: usize = 16;
|
||||
#[cfg(not(coverage))]
|
||||
const PROBE_BUNDLE_SESSION_ID: u64 = 1;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
/// Keeps `run_sync_probe_from_args` explicit because it sits on this module contract, where hidden behavior would make regressions difficult to diagnose.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
@ -62,12 +46,22 @@ async fn run_sync_probe(config: ProbeConfig) -> Result<()> {
|
||||
let camera = app_support::camera_config_from_caps(&caps)
|
||||
.context("server handshake did not include a complete camera profile")?;
|
||||
|
||||
let schedule = PulseSchedule::new(
|
||||
config.warmup,
|
||||
config.pulse_period,
|
||||
config.pulse_width,
|
||||
config.marker_tick_period,
|
||||
);
|
||||
let schedule = if config.event_width_codes.is_empty() {
|
||||
PulseSchedule::new(
|
||||
config.warmup,
|
||||
config.pulse_period,
|
||||
config.pulse_width,
|
||||
config.marker_tick_period,
|
||||
)
|
||||
} else {
|
||||
PulseSchedule::with_event_width_codes(
|
||||
config.warmup,
|
||||
config.pulse_period,
|
||||
config.pulse_width,
|
||||
config.marker_tick_period,
|
||||
config.event_width_codes.clone(),
|
||||
)
|
||||
};
|
||||
|
||||
tracing::info!(
|
||||
server = %config.server,
|
||||
@ -80,249 +74,30 @@ async fn run_sync_probe(config: ProbeConfig) -> Result<()> {
|
||||
);
|
||||
|
||||
let bundled_channel = connect(config.server.as_str()).await?;
|
||||
let capture = SyncProbeCapture::new(camera, schedule, config.duration)?;
|
||||
let capture = SyncProbeCapture::new(camera, schedule.clone(), config.duration)?;
|
||||
let client_start_unix_ns = capture.start_unix_ns();
|
||||
let probe_start = capture.probe_start();
|
||||
if let Some(path) = &config.timeline_json {
|
||||
ProbeTimeline::new(camera, &schedule, config.duration, client_start_unix_ns)
|
||||
.write_to(path)
|
||||
.with_context(|| format!("writing client sync probe timeline {}", path.display()))?;
|
||||
}
|
||||
let video_queue = capture.video_queue();
|
||||
let audio_queue = capture.audio_queue();
|
||||
|
||||
let bundled_task = tokio::spawn(async move {
|
||||
let mut client = RelayClient::new(bundled_channel);
|
||||
let mut audio_dump = open_debug_dump("LESAVKA_SYNC_PROBE_AUDIO_DUMP")
|
||||
.context("opening sync probe audio dump")?;
|
||||
let outbound = async_stream::stream! {
|
||||
let mut pending_audio = Vec::<AudioPacket>::new();
|
||||
let mut audio_done = false;
|
||||
let mut video_done = false;
|
||||
let mut bundle_seq = 0_u64;
|
||||
let mut audio_seq = 0_u64;
|
||||
let mut video_seq = 0_u64;
|
||||
|
||||
loop {
|
||||
if video_done && audio_done {
|
||||
break;
|
||||
}
|
||||
|
||||
tokio::select! {
|
||||
next = video_queue.pop_fresh(), if !video_done => {
|
||||
if next.dropped_stale > 0 {
|
||||
tracing::warn!(
|
||||
dropped_stale = next.dropped_stale,
|
||||
queue_depth = next.queue_depth,
|
||||
"🧪 sync probe video queue dropped stale packets"
|
||||
);
|
||||
}
|
||||
if let Some(mut video) = next.packet {
|
||||
stamp_probe_video_packet(&mut video, &mut video_seq, next.queue_depth, camera.fps);
|
||||
retain_probe_audio_for_video(&mut pending_audio, packet_video_capture_pts_us(&video));
|
||||
collect_probe_audio_grace(
|
||||
&audio_queue,
|
||||
&mut pending_audio,
|
||||
&mut audio_done,
|
||||
&mut audio_seq,
|
||||
audio_dump.as_mut(),
|
||||
).await;
|
||||
retain_probe_audio_for_video(&mut pending_audio, packet_video_capture_pts_us(&video));
|
||||
if pending_audio.is_empty() {
|
||||
tracing::warn!(
|
||||
video_pts = packet_video_capture_pts_us(&video),
|
||||
"🧪 sync probe skipped video-only bundle while measuring bundled output delay"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
bundle_seq = bundle_seq.saturating_add(1);
|
||||
let audio = std::mem::take(&mut pending_audio);
|
||||
let (capture_start_us, capture_end_us) =
|
||||
probe_bundle_capture_bounds(Some(&video), &audio);
|
||||
yield UpstreamMediaBundle {
|
||||
session_id: PROBE_BUNDLE_SESSION_ID,
|
||||
seq: bundle_seq,
|
||||
capture_start_us,
|
||||
capture_end_us,
|
||||
video: Some(video),
|
||||
audio,
|
||||
audio_sample_rate: 48_000,
|
||||
audio_channels: 2,
|
||||
video_width: camera.width,
|
||||
video_height: camera.height,
|
||||
video_fps: camera.fps,
|
||||
};
|
||||
} else if next.closed {
|
||||
video_done = true;
|
||||
}
|
||||
}
|
||||
next = audio_queue.pop_fresh(), if !audio_done => {
|
||||
if next.dropped_stale > 0 {
|
||||
tracing::warn!(
|
||||
dropped_stale = next.dropped_stale,
|
||||
queue_depth = next.queue_depth,
|
||||
"🧪 sync probe audio queue dropped stale packets"
|
||||
);
|
||||
}
|
||||
if let Some(mut packet) = next.packet {
|
||||
stamp_probe_audio_packet(&mut packet, &mut audio_seq, next.queue_depth);
|
||||
write_probe_audio_dump(audio_dump.as_mut(), &packet);
|
||||
pending_audio.push(packet);
|
||||
retain_newest_probe_audio(&mut pending_audio);
|
||||
} else if next.closed {
|
||||
audio_done = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(file) = audio_dump.as_mut() {
|
||||
let _ = file.flush();
|
||||
}
|
||||
};
|
||||
let mut response = client
|
||||
.stream_webcam_media(Request::new(outbound))
|
||||
.await
|
||||
.context("starting bundled sync probe webcam stream")?;
|
||||
while response.get_mut().message().await.transpose().is_some() {}
|
||||
Ok::<(), anyhow::Error>(())
|
||||
});
|
||||
|
||||
bundled_task
|
||||
.await
|
||||
.context("joining bundled sync probe stream")?
|
||||
.context("bundled sync probe task failed")?;
|
||||
run_bundled_probe_stream(
|
||||
bundled_channel,
|
||||
camera,
|
||||
video_queue,
|
||||
audio_queue,
|
||||
probe_start,
|
||||
)
|
||||
.await?;
|
||||
|
||||
tracing::info!("🧪 A/V sync probe finished");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Keeps `collect_probe_audio_grace` explicit because it sits on this module contract, where hidden behavior would make regressions difficult to diagnose.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn collect_probe_audio_grace(
|
||||
audio_queue: &crate::uplink_fresh_queue::FreshPacketQueue<AudioPacket>,
|
||||
pending_audio: &mut Vec<AudioPacket>,
|
||||
audio_done: &mut bool,
|
||||
audio_seq: &mut u64,
|
||||
audio_dump: Option<&mut File>,
|
||||
) {
|
||||
if *audio_done || !pending_audio.is_empty() {
|
||||
return;
|
||||
}
|
||||
let Ok(next) = tokio::time::timeout(PROBE_BUNDLE_AUDIO_GRACE, audio_queue.pop_fresh()).await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
if let Some(mut packet) = next.packet {
|
||||
stamp_probe_audio_packet(&mut packet, audio_seq, next.queue_depth);
|
||||
write_probe_audio_dump(audio_dump, &packet);
|
||||
pending_audio.push(packet);
|
||||
retain_newest_probe_audio(pending_audio);
|
||||
} else if next.closed {
|
||||
*audio_done = true;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn stamp_probe_audio_packet(packet: &mut AudioPacket, seq: &mut u64, queue_depth: usize) {
|
||||
*seq = seq.saturating_add(1);
|
||||
let capture_pts_us = packet.pts;
|
||||
let send_pts_us = crate::live_capture_clock::capture_pts_us().max(capture_pts_us);
|
||||
packet.seq = *seq;
|
||||
packet.client_capture_pts_us = capture_pts_us;
|
||||
packet.client_send_pts_us = send_pts_us;
|
||||
packet.client_queue_depth = queue_depth.try_into().unwrap_or(u32::MAX);
|
||||
packet.client_queue_age_ms = packet_age_ms(capture_pts_us, send_pts_us);
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn stamp_probe_video_packet(packet: &mut VideoPacket, seq: &mut u64, queue_depth: usize, fps: u32) {
|
||||
*seq = seq.saturating_add(1);
|
||||
let capture_pts_us = packet.pts;
|
||||
let send_pts_us = crate::live_capture_clock::capture_pts_us().max(capture_pts_us);
|
||||
packet.seq = *seq;
|
||||
packet.effective_fps = fps;
|
||||
packet.client_capture_pts_us = capture_pts_us;
|
||||
packet.client_send_pts_us = send_pts_us;
|
||||
packet.client_queue_depth = queue_depth.try_into().unwrap_or(u32::MAX);
|
||||
packet.client_queue_age_ms = packet_age_ms(capture_pts_us, send_pts_us);
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn packet_age_ms(capture_pts_us: u64, send_pts_us: u64) -> u32 {
|
||||
(send_pts_us.saturating_sub(capture_pts_us) / 1_000)
|
||||
.try_into()
|
||||
.unwrap_or(u32::MAX)
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Keeps `write_probe_audio_dump` explicit because it sits on this module contract, where hidden behavior would make regressions difficult to diagnose.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn write_probe_audio_dump(file: Option<&mut File>, packet: &AudioPacket) {
|
||||
if let Some(file) = file {
|
||||
let _ = file.write_all(&packet.data);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Keeps `retain_newest_probe_audio` explicit because it sits on this module contract, where hidden behavior would make regressions difficult to diagnose.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn retain_newest_probe_audio(pending_audio: &mut Vec<AudioPacket>) {
|
||||
if pending_audio.len() > PROBE_BUNDLE_MAX_AUDIO_PACKETS {
|
||||
let dropped = pending_audio.len() - PROBE_BUNDLE_MAX_AUDIO_PACKETS;
|
||||
pending_audio.drain(..dropped);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn retain_probe_audio_for_video(pending_audio: &mut Vec<AudioPacket>, video_pts_us: u64) {
|
||||
let min_pts = video_pts_us.saturating_sub(PROBE_BUNDLE_AUDIO_WINDOW_BEFORE_US);
|
||||
let max_pts = video_pts_us.saturating_add(PROBE_BUNDLE_AUDIO_WINDOW_AFTER_US);
|
||||
pending_audio.retain(|packet| {
|
||||
let pts = packet_audio_capture_pts_us(packet);
|
||||
pts >= min_pts && pts <= max_pts
|
||||
});
|
||||
retain_newest_probe_audio(pending_audio);
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Keeps `probe_bundle_capture_bounds` explicit because it sits on this module contract, where hidden behavior would make regressions difficult to diagnose.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn probe_bundle_capture_bounds(video: Option<&VideoPacket>, audio: &[AudioPacket]) -> (u64, u64) {
|
||||
let mut start = u64::MAX;
|
||||
let mut end = 0_u64;
|
||||
if let Some(video) = video {
|
||||
let pts = packet_video_capture_pts_us(video);
|
||||
start = start.min(pts);
|
||||
end = end.max(pts);
|
||||
}
|
||||
for packet in audio {
|
||||
let pts = packet_audio_capture_pts_us(packet);
|
||||
start = start.min(pts);
|
||||
end = end.max(pts);
|
||||
}
|
||||
if start == u64::MAX {
|
||||
let now = crate::live_capture_clock::capture_pts_us();
|
||||
return (now, now);
|
||||
}
|
||||
(start, end.max(start))
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Keeps `packet_audio_capture_pts_us` explicit because it sits on this module contract, where hidden behavior would make regressions difficult to diagnose.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn packet_audio_capture_pts_us(packet: &AudioPacket) -> u64 {
|
||||
if packet.client_capture_pts_us == 0 {
|
||||
packet.pts
|
||||
} else {
|
||||
packet.client_capture_pts_us
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Keeps `packet_video_capture_pts_us` explicit because it sits on this module contract, where hidden behavior would make regressions difficult to diagnose.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn packet_video_capture_pts_us(packet: &VideoPacket) -> u64 {
|
||||
if packet.client_capture_pts_us == 0 {
|
||||
packet.pts
|
||||
} else {
|
||||
packet.client_capture_pts_us
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
async fn connect(server_addr: &str) -> Result<Channel> {
|
||||
crate::relay_transport::endpoint(server_addr)?
|
||||
@ -332,17 +107,6 @@ async fn connect(server_addr: &str) -> Result<Channel> {
|
||||
.with_context(|| format!("connecting to relay at {server_addr}"))
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn open_debug_dump(env_var: &str) -> Result<Option<File>> {
|
||||
let Some(path) = std::env::var_os(env_var) else {
|
||||
return Ok(None);
|
||||
};
|
||||
let path = PathBuf::from(path);
|
||||
let file = File::create(&path)
|
||||
.with_context(|| format!("creating debug dump at {}", path.display()))?;
|
||||
Ok(Some(file))
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
async fn run_sync_probe(_config: ProbeConfig) -> Result<()> {
|
||||
Ok(())
|
||||
@ -392,6 +156,8 @@ mod tests {
|
||||
"80",
|
||||
"--marker-tick-period",
|
||||
"4",
|
||||
"--timeline-json",
|
||||
"/tmp/client-sync-timeline.json",
|
||||
])
|
||||
.await
|
||||
.expect("configured coverage run path");
|
||||
|
||||
468
client/src/sync_probe/runner/bundled_transport.rs
Normal file
468
client/src/sync_probe/runner/bundled_transport.rs
Normal file
@ -0,0 +1,468 @@
|
||||
//! Bundled upstream sender for the synthetic sync probe.
|
||||
//!
|
||||
//! The runner builds the synthetic capture timeline, while this module owns the
|
||||
//! transport-specific work of pairing fresh audio with each video pulse and
|
||||
//! streaming `UpstreamMediaBundle` messages to the server.
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use lesavka_common::lesavka::{
|
||||
AudioPacket, UpstreamMediaBundle, VideoPacket, relay_client::RelayClient,
|
||||
};
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Instant;
|
||||
use tonic::{Request, transport::Channel};
|
||||
|
||||
use crate::input::camera::CameraConfig;
|
||||
use crate::uplink_fresh_queue::FreshPacketQueue;
|
||||
|
||||
const PROBE_BUNDLE_AUDIO_GRACE: std::time::Duration = std::time::Duration::from_millis(30);
|
||||
const PROBE_BUNDLE_AUDIO_WINDOW_BEFORE_US: u64 = 120_000;
|
||||
const PROBE_BUNDLE_AUDIO_WINDOW_AFTER_US: u64 = 40_000;
|
||||
const PROBE_BUNDLE_MAX_AUDIO_PACKETS: usize = 16;
|
||||
const PROBE_BUNDLE_SESSION_ID: u64 = 1;
|
||||
|
||||
/// Stream synthetic paired media through the same bundled RPC as real webcam calls.
|
||||
///
|
||||
/// Inputs: gRPC channel, negotiated camera mode, fresh video/audio queues, and
|
||||
/// the probe monotonic start instant.
|
||||
/// Outputs: completed RPC stream or an error from connection, send, or task join.
|
||||
/// Why: the client-to-RCT probe must exercise post-capture transport without
|
||||
/// measuring split camera/microphone fallback behavior.
|
||||
pub async fn run_bundled_probe_stream(
|
||||
channel: Channel,
|
||||
camera: CameraConfig,
|
||||
video_queue: FreshPacketQueue<VideoPacket>,
|
||||
audio_queue: FreshPacketQueue<AudioPacket>,
|
||||
probe_start: Instant,
|
||||
) -> Result<()> {
|
||||
let bundled_task = tokio::spawn(async move {
|
||||
let mut client = RelayClient::new(channel);
|
||||
let mut audio_dump = open_debug_dump("LESAVKA_SYNC_PROBE_AUDIO_DUMP")
|
||||
.context("opening sync probe audio dump")?;
|
||||
let mut send_log = open_debug_dump("LESAVKA_SYNC_PROBE_SEND_LOG")
|
||||
.context("opening sync probe send log")?;
|
||||
let outbound = async_stream::stream! {
|
||||
let mut pending_audio = Vec::<AudioPacket>::new();
|
||||
let mut audio_done = false;
|
||||
let mut video_done = false;
|
||||
let mut bundle_seq = 0_u64;
|
||||
let mut audio_seq = 0_u64;
|
||||
let mut video_seq = 0_u64;
|
||||
|
||||
loop {
|
||||
if video_done && audio_done {
|
||||
break;
|
||||
}
|
||||
|
||||
tokio::select! {
|
||||
next = video_queue.pop_fresh(), if !video_done => {
|
||||
if next.dropped_stale > 0 {
|
||||
tracing::warn!(
|
||||
dropped_stale = next.dropped_stale,
|
||||
queue_depth = next.queue_depth,
|
||||
"🧪 sync probe video queue dropped stale packets"
|
||||
);
|
||||
}
|
||||
if let Some(mut video) = next.packet {
|
||||
stamp_probe_video_packet(
|
||||
&mut video,
|
||||
&mut video_seq,
|
||||
next.queue_depth,
|
||||
camera.fps,
|
||||
probe_start,
|
||||
);
|
||||
retain_probe_audio_for_video(&mut pending_audio, packet_video_capture_pts_us(&video));
|
||||
collect_probe_audio_grace(
|
||||
&audio_queue,
|
||||
&mut pending_audio,
|
||||
&mut audio_done,
|
||||
&mut audio_seq,
|
||||
audio_dump.as_mut(),
|
||||
probe_start,
|
||||
).await;
|
||||
retain_probe_audio_for_video(&mut pending_audio, packet_video_capture_pts_us(&video));
|
||||
if pending_audio.is_empty() {
|
||||
tracing::warn!(
|
||||
video_pts = packet_video_capture_pts_us(&video),
|
||||
"🧪 sync probe skipped video-only bundle while measuring bundled output delay"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
bundle_seq = bundle_seq.saturating_add(1);
|
||||
let audio = std::mem::take(&mut pending_audio);
|
||||
let (capture_start_us, capture_end_us) =
|
||||
probe_bundle_capture_bounds(Some(&video), &audio);
|
||||
write_probe_send_log(
|
||||
send_log.as_mut(),
|
||||
bundle_seq,
|
||||
probe_start,
|
||||
Some(&video),
|
||||
&audio,
|
||||
);
|
||||
yield build_probe_bundle(
|
||||
PROBE_BUNDLE_SESSION_ID,
|
||||
bundle_seq,
|
||||
&camera,
|
||||
Some(video),
|
||||
audio,
|
||||
capture_start_us,
|
||||
capture_end_us,
|
||||
);
|
||||
} else if next.closed {
|
||||
video_done = true;
|
||||
}
|
||||
}
|
||||
next = audio_queue.pop_fresh(), if !audio_done => {
|
||||
if next.dropped_stale > 0 {
|
||||
tracing::warn!(
|
||||
dropped_stale = next.dropped_stale,
|
||||
queue_depth = next.queue_depth,
|
||||
"🧪 sync probe audio queue dropped stale packets"
|
||||
);
|
||||
}
|
||||
if let Some(mut packet) = next.packet {
|
||||
stamp_probe_audio_packet(
|
||||
&mut packet,
|
||||
&mut audio_seq,
|
||||
next.queue_depth,
|
||||
probe_start,
|
||||
);
|
||||
write_probe_audio_dump(audio_dump.as_mut(), &packet);
|
||||
pending_audio.push(packet);
|
||||
retain_newest_probe_audio(&mut pending_audio);
|
||||
} else if next.closed {
|
||||
audio_done = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(file) = audio_dump.as_mut() {
|
||||
let _ = file.flush();
|
||||
}
|
||||
if let Some(file) = send_log.as_mut() {
|
||||
let _ = file.flush();
|
||||
}
|
||||
};
|
||||
let mut response = client
|
||||
.stream_webcam_media(Request::new(outbound))
|
||||
.await
|
||||
.context("starting bundled sync probe webcam stream")?;
|
||||
while response.get_mut().message().await.transpose().is_some() {}
|
||||
Ok::<(), anyhow::Error>(())
|
||||
});
|
||||
|
||||
bundled_task
|
||||
.await
|
||||
.context("joining bundled sync probe stream")?
|
||||
.context("bundled sync probe task failed")
|
||||
}
|
||||
|
||||
/// Build one outgoing synthetic A/V bundle after local pairing is complete.
|
||||
///
|
||||
/// Inputs: bundle identity, negotiated camera profile, paired video/audio, and
|
||||
/// precomputed capture bounds.
|
||||
/// Outputs: the exact `UpstreamMediaBundle` yielded to the server RPC.
|
||||
/// Why: the server's HEVC path depends on video codec metadata, audio format,
|
||||
/// and capture bounds staying attached to the same physical bundle that leaves
|
||||
/// the client.
|
||||
fn build_probe_bundle(
|
||||
session_id: u64,
|
||||
seq: u64,
|
||||
camera: &CameraConfig,
|
||||
video: Option<VideoPacket>,
|
||||
audio: Vec<AudioPacket>,
|
||||
capture_start_us: u64,
|
||||
capture_end_us: u64,
|
||||
) -> UpstreamMediaBundle {
|
||||
UpstreamMediaBundle {
|
||||
session_id,
|
||||
seq,
|
||||
capture_start_us,
|
||||
capture_end_us,
|
||||
video,
|
||||
audio,
|
||||
audio_sample_rate: 48_000,
|
||||
audio_channels: 2,
|
||||
video_width: camera.width,
|
||||
video_height: camera.height,
|
||||
video_fps: camera.fps,
|
||||
}
|
||||
}
|
||||
|
||||
/// Drain one short audio grace window when a video packet arrives first.
|
||||
///
|
||||
/// Inputs: audio queue, pending audio buffer, sequence state, debug dump, and
|
||||
/// the probe clock.
|
||||
/// Outputs: at most one newly stamped audio packet in `pending_audio`.
|
||||
/// Why: real bundled transport pairs nearby mic and camera captures before
|
||||
/// sending; the synthetic probe needs to mimic that path closely.
|
||||
async fn collect_probe_audio_grace(
|
||||
audio_queue: &FreshPacketQueue<AudioPacket>,
|
||||
pending_audio: &mut Vec<AudioPacket>,
|
||||
audio_done: &mut bool,
|
||||
audio_seq: &mut u64,
|
||||
audio_dump: Option<&mut File>,
|
||||
probe_start: Instant,
|
||||
) {
|
||||
if *audio_done || !pending_audio.is_empty() {
|
||||
return;
|
||||
}
|
||||
let Ok(next) = tokio::time::timeout(PROBE_BUNDLE_AUDIO_GRACE, audio_queue.pop_fresh()).await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
if let Some(mut packet) = next.packet {
|
||||
stamp_probe_audio_packet(&mut packet, audio_seq, next.queue_depth, probe_start);
|
||||
write_probe_audio_dump(audio_dump, &packet);
|
||||
pending_audio.push(packet);
|
||||
retain_newest_probe_audio(pending_audio);
|
||||
} else if next.closed {
|
||||
*audio_done = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Stamp one synthetic audio packet with client-side transport telemetry.
|
||||
///
|
||||
/// Inputs: mutable packet, sequence counter, queue depth, and probe clock.
|
||||
/// Outputs: packet sidecar fields used by server freshness telemetry.
|
||||
/// Why: server diagnostics compare capture, send, receive, and sink timing, so
|
||||
/// synthetic packets need the same metadata as physical capture packets.
|
||||
fn stamp_probe_audio_packet(
|
||||
packet: &mut AudioPacket,
|
||||
seq: &mut u64,
|
||||
queue_depth: usize,
|
||||
probe_start: Instant,
|
||||
) {
|
||||
*seq = seq.saturating_add(1);
|
||||
let capture_pts_us = packet.pts;
|
||||
let send_pts_us = probe_elapsed_us(probe_start).max(capture_pts_us);
|
||||
packet.seq = *seq;
|
||||
packet.client_capture_pts_us = capture_pts_us;
|
||||
packet.client_send_pts_us = send_pts_us;
|
||||
packet.client_queue_depth = queue_depth.try_into().unwrap_or(u32::MAX);
|
||||
packet.client_queue_age_ms = packet_age_ms(capture_pts_us, send_pts_us);
|
||||
}
|
||||
|
||||
/// Stamp one synthetic video packet with client-side transport telemetry.
|
||||
///
|
||||
/// Inputs: mutable packet, sequence counter, queue depth, negotiated FPS, and
|
||||
/// probe clock.
|
||||
/// Outputs: packet sidecar fields used by server freshness telemetry.
|
||||
/// Why: video freshness must be measured against generated capture PTS, not a
|
||||
/// later wall-clock moment after MJPEG encoding or gRPC buffering.
|
||||
fn stamp_probe_video_packet(
|
||||
packet: &mut VideoPacket,
|
||||
seq: &mut u64,
|
||||
queue_depth: usize,
|
||||
fps: u32,
|
||||
probe_start: Instant,
|
||||
) {
|
||||
*seq = seq.saturating_add(1);
|
||||
let capture_pts_us = packet.pts;
|
||||
let send_pts_us = probe_elapsed_us(probe_start).max(capture_pts_us);
|
||||
packet.seq = *seq;
|
||||
packet.effective_fps = fps;
|
||||
packet.client_capture_pts_us = capture_pts_us;
|
||||
packet.client_send_pts_us = send_pts_us;
|
||||
packet.client_queue_depth = queue_depth.try_into().unwrap_or(u32::MAX);
|
||||
packet.client_queue_age_ms = packet_age_ms(capture_pts_us, send_pts_us);
|
||||
}
|
||||
|
||||
/// Return elapsed probe time in microseconds on the synthetic capture clock.
|
||||
///
|
||||
/// Inputs: probe monotonic start instant.
|
||||
/// Outputs: saturating microsecond timestamp.
|
||||
/// Why: send-age telemetry must share the same origin as the generated media
|
||||
/// PTS so local queue age is not inflated by setup work.
|
||||
fn probe_elapsed_us(probe_start: Instant) -> u64 {
|
||||
probe_start.elapsed().as_micros().min(u64::MAX as u128) as u64
|
||||
}
|
||||
|
||||
/// Calculate client-local queue age for one packet.
|
||||
///
|
||||
/// Inputs: packet capture PTS and send PTS in microseconds.
|
||||
/// Outputs: saturating age in milliseconds.
|
||||
/// Why: the server freshness budget drops already-stale bundles before they can
|
||||
/// distort RCT sync evidence.
|
||||
fn packet_age_ms(capture_pts_us: u64, send_pts_us: u64) -> u32 {
|
||||
(send_pts_us.saturating_sub(capture_pts_us) / 1_000)
|
||||
.try_into()
|
||||
.unwrap_or(u32::MAX)
|
||||
}
|
||||
|
||||
/// Append raw audio bytes when a debug dump is requested.
|
||||
///
|
||||
/// Inputs: optional dump file and one stamped audio packet.
|
||||
/// Outputs: best-effort write to the dump file.
|
||||
/// Why: audio decoding failures in the RCT analyzer are easier to debug when
|
||||
/// the exact sent PCM can be inspected.
|
||||
fn write_probe_audio_dump(file: Option<&mut File>, packet: &AudioPacket) {
|
||||
if let Some(file) = file {
|
||||
let _ = file.write_all(&packet.data);
|
||||
}
|
||||
}
|
||||
|
||||
/// Write one client-side bundled send telemetry row.
|
||||
///
|
||||
/// Inputs: optional log file, bundle sequence, probe clock, and media packets.
|
||||
/// Outputs: JSONL sidecar used by manual transport probes.
|
||||
/// Why: when RCT freshness fails, this shows whether the client yielded stale
|
||||
/// synthetic media or whether delay accumulated after gRPC accepted the bundle.
|
||||
fn write_probe_send_log(
|
||||
file: Option<&mut File>,
|
||||
bundle_seq: u64,
|
||||
probe_start: Instant,
|
||||
video: Option<&VideoPacket>,
|
||||
audio: &[AudioPacket],
|
||||
) {
|
||||
let Some(file) = file else {
|
||||
return;
|
||||
};
|
||||
let send_elapsed_us = probe_elapsed_us(probe_start);
|
||||
let unix_ns = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.map(|duration| duration.as_nanos().min(u64::MAX as u128) as u64)
|
||||
.unwrap_or_default();
|
||||
let video_pts_us = video.map(packet_video_capture_pts_us);
|
||||
let video_bytes = video.map(|packet| packet.data.len()).unwrap_or_default();
|
||||
let audio_first_pts_us = audio.first().map(packet_audio_capture_pts_us);
|
||||
let audio_last_pts_us = audio.last().map(packet_audio_capture_pts_us);
|
||||
let audio_bytes: usize = audio.iter().map(|packet| packet.data.len()).sum();
|
||||
let max_capture_pts_us = video_pts_us
|
||||
.into_iter()
|
||||
.chain(audio_last_pts_us)
|
||||
.max()
|
||||
.unwrap_or_default();
|
||||
let local_age_ms = send_elapsed_us.saturating_sub(max_capture_pts_us) as f64 / 1000.0;
|
||||
let _ = writeln!(
|
||||
file,
|
||||
"{{\"schema\":\"lesavka.sync-probe-send.v1\",\"bundle_seq\":{},\"send_unix_ns\":{},\"send_elapsed_us\":{},\"video_capture_pts_us\":{},\"video_bytes\":{},\"audio_packets\":{},\"audio_first_capture_pts_us\":{},\"audio_last_capture_pts_us\":{},\"audio_bytes\":{},\"local_age_ms\":{:.3}}}",
|
||||
bundle_seq,
|
||||
unix_ns,
|
||||
send_elapsed_us,
|
||||
optional_u64_json(video_pts_us),
|
||||
video_bytes,
|
||||
audio.len(),
|
||||
optional_u64_json(audio_first_pts_us),
|
||||
optional_u64_json(audio_last_pts_us),
|
||||
audio_bytes,
|
||||
local_age_ms,
|
||||
);
|
||||
}
|
||||
|
||||
/// Format an optional integer for the debug JSONL writer.
|
||||
///
|
||||
/// Inputs: optional unsigned integer.
|
||||
/// Outputs: JSON number text or `null`.
|
||||
/// Why: the send log is intentionally dependency-free so the manual probe can
|
||||
/// keep running even if serde support changes.
|
||||
fn optional_u64_json(value: Option<u64>) -> String {
|
||||
value
|
||||
.map(|value| value.to_string())
|
||||
.unwrap_or_else(|| "null".to_string())
|
||||
}
|
||||
|
||||
/// Keep only the newest bounded audio packets for the next video bundle.
|
||||
///
|
||||
/// Inputs: mutable pending audio packet list.
|
||||
/// Outputs: list trimmed in place.
|
||||
/// Why: stale audio continuity is less useful than preserving a live capture
|
||||
/// relationship with the next flash frame.
|
||||
fn retain_newest_probe_audio(pending_audio: &mut Vec<AudioPacket>) {
|
||||
if pending_audio.len() > PROBE_BUNDLE_MAX_AUDIO_PACKETS {
|
||||
let dropped = pending_audio.len() - PROBE_BUNDLE_MAX_AUDIO_PACKETS;
|
||||
pending_audio.drain(..dropped);
|
||||
}
|
||||
}
|
||||
|
||||
/// Retain only audio packets close enough to travel with one video frame.
|
||||
///
|
||||
/// Inputs: pending audio packet list and video capture PTS.
|
||||
/// Outputs: list filtered and capped in place.
|
||||
/// Why: client->server sync testing should send physically plausible bundles,
|
||||
/// not arbitrary pending audio that only happened to be in memory.
|
||||
fn retain_probe_audio_for_video(pending_audio: &mut Vec<AudioPacket>, video_pts_us: u64) {
|
||||
let min_pts = video_pts_us.saturating_sub(PROBE_BUNDLE_AUDIO_WINDOW_BEFORE_US);
|
||||
let max_pts = video_pts_us.saturating_add(PROBE_BUNDLE_AUDIO_WINDOW_AFTER_US);
|
||||
pending_audio.retain(|packet| {
|
||||
let pts = packet_audio_capture_pts_us(packet);
|
||||
pts >= min_pts && pts <= max_pts
|
||||
});
|
||||
retain_newest_probe_audio(pending_audio);
|
||||
}
|
||||
|
||||
/// Return capture PTS bounds for one outgoing bundle.
|
||||
///
|
||||
/// Inputs: optional video packet and audio packets.
|
||||
/// Outputs: `(start_us, end_us)` capture span for the bundle message.
|
||||
/// Why: the server uses this span as a plausibility check before trusting the
|
||||
/// client capture clock for lip-sync scheduling.
|
||||
fn probe_bundle_capture_bounds(video: Option<&VideoPacket>, audio: &[AudioPacket]) -> (u64, u64) {
|
||||
let mut start = u64::MAX;
|
||||
let mut end = 0_u64;
|
||||
if let Some(video) = video {
|
||||
let pts = packet_video_capture_pts_us(video);
|
||||
start = start.min(pts);
|
||||
end = end.max(pts);
|
||||
}
|
||||
for packet in audio {
|
||||
let pts = packet_audio_capture_pts_us(packet);
|
||||
start = start.min(pts);
|
||||
end = end.max(pts);
|
||||
}
|
||||
if start == u64::MAX {
|
||||
let now = crate::live_capture_clock::capture_pts_us();
|
||||
return (now, now);
|
||||
}
|
||||
(start, end.max(start))
|
||||
}
|
||||
|
||||
/// Read the capture PTS from an audio packet sidecar or legacy PTS field.
|
||||
///
|
||||
/// Inputs: one audio packet.
|
||||
/// Outputs: capture timestamp in microseconds.
|
||||
/// Why: old callers may only populate `pts`, while the bundled path prefers the
|
||||
/// explicit client capture sidecar.
|
||||
fn packet_audio_capture_pts_us(packet: &AudioPacket) -> u64 {
|
||||
if packet.client_capture_pts_us == 0 {
|
||||
packet.pts
|
||||
} else {
|
||||
packet.client_capture_pts_us
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the capture PTS from a video packet sidecar or legacy PTS field.
|
||||
///
|
||||
/// Inputs: one video packet.
|
||||
/// Outputs: capture timestamp in microseconds.
|
||||
/// Why: the probe sender and server both need one stable clock field while
|
||||
/// older test packets may still use `pts` directly.
|
||||
fn packet_video_capture_pts_us(packet: &VideoPacket) -> u64 {
|
||||
if packet.client_capture_pts_us == 0 {
|
||||
packet.pts
|
||||
} else {
|
||||
packet.client_capture_pts_us
|
||||
}
|
||||
}
|
||||
|
||||
/// Open an optional debug sidecar path named by an environment variable.
|
||||
///
|
||||
/// Inputs: environment variable name.
|
||||
/// Outputs: `None` when unset, otherwise a writable file handle.
|
||||
/// Why: manual transport probes need rich artifacts, but normal probe runs
|
||||
/// should not pay for debug file creation unless explicitly requested.
|
||||
fn open_debug_dump(env_var: &str) -> Result<Option<File>> {
|
||||
let Some(path) = std::env::var_os(env_var) else {
|
||||
return Ok(None);
|
||||
};
|
||||
let path = PathBuf::from(path);
|
||||
let file = File::create(&path)
|
||||
.with_context(|| format!("creating debug dump at {}", path.display()))?;
|
||||
Ok(Some(file))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "bundled_transport/tests.rs"]
|
||||
mod tests;
|
||||
446
client/src/sync_probe/runner/bundled_transport/tests.rs
Normal file
446
client/src/sync_probe/runner/bundled_transport/tests.rs
Normal file
@ -0,0 +1,446 @@
|
||||
use super::*;
|
||||
use crate::input::camera::{CameraCodec, CameraConfig};
|
||||
|
||||
const SUPPORTED_HEVC_AUDIT_MODES: &[(u32, u32, u32)] = &[
|
||||
(1280, 720, 20),
|
||||
(1280, 720, 30),
|
||||
(1920, 1080, 20),
|
||||
(1920, 1080, 30),
|
||||
];
|
||||
|
||||
#[test]
|
||||
/// Verifies the local HEVC probe bundle contains exactly the metadata the server needs.
|
||||
///
|
||||
/// Inputs: one synthetic HEVC access unit and nearby PCM audio packets on the
|
||||
/// same capture clock. Outputs: assertions only. Why: this catches regressions
|
||||
/// where the client silently sends plausible media bytes but drops the bundled
|
||||
/// timing/profile fields that let the server map HEVC video and audio together.
|
||||
fn hevc_probe_bundle_preserves_paired_media_and_server_metadata() {
|
||||
let camera = CameraConfig {
|
||||
codec: CameraCodec::Hevc,
|
||||
width: 1280,
|
||||
height: 720,
|
||||
fps: 30,
|
||||
};
|
||||
assert!(matches!(camera.codec, CameraCodec::Hevc));
|
||||
|
||||
let probe_start = Instant::now();
|
||||
let mut video = VideoPacket {
|
||||
pts: 1_000_000,
|
||||
data: vec![0, 0, 0, 1, 0x26, 0xaa, 0xbb],
|
||||
..Default::default()
|
||||
};
|
||||
let mut video_seq = 0;
|
||||
stamp_probe_video_packet(&mut video, &mut video_seq, 2, camera.fps, probe_start);
|
||||
|
||||
let mut audio = vec![
|
||||
AudioPacket {
|
||||
pts: 950_000,
|
||||
data: vec![0; 1_920],
|
||||
..Default::default()
|
||||
},
|
||||
AudioPacket {
|
||||
pts: 1_010_000,
|
||||
data: vec![1; 1_920],
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
let mut audio_seq = 0;
|
||||
for packet in &mut audio {
|
||||
stamp_probe_audio_packet(packet, &mut audio_seq, 3, probe_start);
|
||||
}
|
||||
retain_probe_audio_for_video(&mut audio, packet_video_capture_pts_us(&video));
|
||||
assert_eq!(
|
||||
audio.len(),
|
||||
2,
|
||||
"nearby audio should travel with the HEVC frame"
|
||||
);
|
||||
|
||||
let (capture_start_us, capture_end_us) = probe_bundle_capture_bounds(Some(&video), &audio);
|
||||
let bundle = build_probe_bundle(
|
||||
PROBE_BUNDLE_SESSION_ID,
|
||||
7,
|
||||
&camera,
|
||||
Some(video),
|
||||
audio,
|
||||
capture_start_us,
|
||||
capture_end_us,
|
||||
);
|
||||
|
||||
let video = bundle.video.as_ref().expect("bundled HEVC video");
|
||||
assert_eq!(bundle.session_id, PROBE_BUNDLE_SESSION_ID);
|
||||
assert_eq!(bundle.seq, 7);
|
||||
assert_eq!(bundle.video_width, 1280);
|
||||
assert_eq!(bundle.video_height, 720);
|
||||
assert_eq!(bundle.video_fps, 30);
|
||||
assert_eq!(bundle.audio_sample_rate, 48_000);
|
||||
assert_eq!(bundle.audio_channels, 2);
|
||||
assert!(!bundle.audio.is_empty());
|
||||
assert!(video.data.windows(4).any(|window| window == [0, 0, 0, 1]));
|
||||
assert!(bundle.capture_start_us <= video.client_capture_pts_us);
|
||||
assert!(bundle.capture_end_us >= video.client_capture_pts_us);
|
||||
assert!(
|
||||
bundle.audio.iter().all(
|
||||
|packet| packet.client_capture_pts_us <= bundle.capture_end_us
|
||||
&& packet.client_capture_pts_us >= bundle.capture_start_us
|
||||
),
|
||||
"bundle capture bounds should include every paired audio packet"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Verifies a synthetic HEVC event train leaves as paired A/V bundles.
|
||||
///
|
||||
/// Inputs: sixteen fake HEVC access units and nearby PCM packets on the same
|
||||
/// client capture clock. Outputs: assertions over the exact
|
||||
/// `UpstreamMediaBundle` messages that would enter the gRPC stream. Why: the
|
||||
/// client-to-server probe is only useful if every coded flash keeps its nearby
|
||||
/// tone packets and profile metadata before the server sees it.
|
||||
fn hevc_probe_bundle_train_keeps_every_coded_flash_with_nearby_audio() {
|
||||
let camera = default_hevc_probe_camera();
|
||||
let bundles = build_hevc_probe_bundle_train_for_camera(camera);
|
||||
|
||||
assert_hevc_probe_bundle_train(&camera, &bundles);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Proves every supported HEVC transport mode can build a complete local A/V train.
|
||||
///
|
||||
/// Inputs: the four resolution/fps combinations Lesavka advertises for the RCT
|
||||
/// path. Outputs: assertions over the bundled media units only. Why: the next
|
||||
/// hardware loop should fail on transport or server decode if it fails at all,
|
||||
/// not because one mode's synthetic flash/tone source cannot produce all 16
|
||||
/// physically paired HEVC+audio bundles.
|
||||
fn hevc_probe_bundle_train_covers_every_supported_mode() {
|
||||
for &(width, height, fps) in SUPPORTED_HEVC_AUDIT_MODES {
|
||||
let camera = hevc_probe_camera(width, height, fps);
|
||||
let bundles = build_hevc_probe_bundle_train_for_camera(camera);
|
||||
assert_hevc_probe_bundle_train(&camera, &bundles);
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks one synthetic bundle train against the transport invariants.
|
||||
///
|
||||
/// Inputs: the camera profile used to build the train and the resulting
|
||||
/// `UpstreamMediaBundle` messages. Outputs: assertions only. Why: mode-specific
|
||||
/// tests should share one invariant definition so 20fps/30fps coverage cannot
|
||||
/// quietly drift from the canonical HEVC probe behavior.
|
||||
fn assert_hevc_probe_bundle_train(camera: &CameraConfig, bundles: &[UpstreamMediaBundle]) {
|
||||
assert_eq!(bundles.len(), 16);
|
||||
for (idx, bundle) in bundles.iter().enumerate() {
|
||||
let event_code = idx as u64 + 1;
|
||||
let video = bundle.video.as_ref().expect("HEVC video packet");
|
||||
assert_eq!(bundle.session_id, PROBE_BUNDLE_SESSION_ID);
|
||||
assert_eq!(bundle.seq, event_code);
|
||||
assert_eq!(bundle.video_width, camera.width);
|
||||
assert_eq!(bundle.video_height, camera.height);
|
||||
assert_eq!(bundle.video_fps, camera.fps);
|
||||
assert_eq!(bundle.audio_sample_rate, 48_000);
|
||||
assert_eq!(bundle.audio_channels, 2);
|
||||
assert_eq!(bundle.audio.len(), 2);
|
||||
assert!(video.data.windows(4).any(|window| window == [0, 0, 0, 1]));
|
||||
assert_eq!(video.effective_fps, camera.fps);
|
||||
assert!(video.client_capture_pts_us > 0);
|
||||
assert!(video.client_send_pts_us >= video.client_capture_pts_us);
|
||||
assert!(
|
||||
bundle.capture_start_us <= video.client_capture_pts_us
|
||||
&& bundle.capture_end_us >= video.client_capture_pts_us
|
||||
);
|
||||
assert!(bundle.audio.iter().all(|packet| {
|
||||
packet.client_capture_pts_us >= bundle.capture_start_us
|
||||
&& packet.client_capture_pts_us <= bundle.capture_end_us
|
||||
&& packet.client_send_pts_us >= packet.client_capture_pts_us
|
||||
&& packet.data.len() == 1_920
|
||||
}));
|
||||
assert!(
|
||||
bundle
|
||||
.audio
|
||||
.iter()
|
||||
.any(|packet| { packet.client_capture_pts_us < video.client_capture_pts_us })
|
||||
&& bundle
|
||||
.audio
|
||||
.iter()
|
||||
.any(|packet| { packet.client_capture_pts_us > video.client_capture_pts_us }),
|
||||
"event {event_code} should carry tone packets around the flash PTS"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Stress-tests the local HEVC bundle train under freshness-biased delivery.
|
||||
///
|
||||
/// Inputs: the same sixteen synthetic bundles used by the client-to-server-RCT
|
||||
/// probe plus a deterministic WAN jitter profile. Outputs: assertions that
|
||||
/// stale bundles are dropped as whole A/V units while the remaining evidence
|
||||
/// still meets the analyzer's 13-pair floor. Why: the next hardware run should
|
||||
/// test real transport, not discover that our synthetic client media can split
|
||||
/// tones from flashes when freshness pressure appears.
|
||||
fn hevc_probe_bundle_train_drops_stale_events_as_complete_av_units_under_jitter() {
|
||||
let bundles = build_hevc_probe_bundle_train();
|
||||
let simulated_network_delay_ms = [
|
||||
44_u64, 72, 96, 1_450, 118, 64, 88, 77, 1_820, 91, 70, 109, 83, 1_120, 93, 66,
|
||||
];
|
||||
let max_age_ms = 1_000_u64;
|
||||
let mut delivered = Vec::new();
|
||||
let mut dropped = Vec::new();
|
||||
|
||||
for (bundle, delay_ms) in bundles.iter().zip(simulated_network_delay_ms) {
|
||||
let video = bundle.video.as_ref().expect("HEVC video packet");
|
||||
let observed_age_ms = delay_ms
|
||||
+ video
|
||||
.client_send_pts_us
|
||||
.saturating_sub(video.client_capture_pts_us)
|
||||
/ 1_000;
|
||||
if observed_age_ms > max_age_ms {
|
||||
dropped.push(bundle.seq);
|
||||
continue;
|
||||
}
|
||||
assert!(
|
||||
bundle.audio.iter().all(|packet| {
|
||||
let skew_us = packet
|
||||
.client_capture_pts_us
|
||||
.abs_diff(video.client_capture_pts_us);
|
||||
skew_us <= 120_000
|
||||
}),
|
||||
"fresh bundles must keep only nearby tone packets"
|
||||
);
|
||||
delivered.push(bundle.seq);
|
||||
}
|
||||
|
||||
assert_eq!(dropped, vec![4, 9, 14]);
|
||||
assert_eq!(delivered.len(), 13);
|
||||
assert!(
|
||||
delivered.windows(2).all(|window| window[1] > window[0]),
|
||||
"freshness drops must not reorder the surviving coded event train"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Writes and validates a local manifest for the outgoing synthetic HEVC bundle train.
|
||||
///
|
||||
/// Inputs: sixteen synthetic HEVC flash packets plus nearby PCM tone packets.
|
||||
/// Outputs: a JSON audit manifest, optionally copied to
|
||||
/// `LESAVKA_LOCAL_HEVC_BUNDLE_AUDIT_JSON`. Why: this is a passwordless
|
||||
/// preflight for the client side of the HEVC plan, proving the server will
|
||||
/// receive one complete bundled train before any WAN/RCT hardware can add
|
||||
/// noise.
|
||||
fn hevc_probe_bundle_audit_writes_manifest_for_local_preflight() {
|
||||
let bundles = build_hevc_probe_bundle_train();
|
||||
let manifest = hevc_bundle_audit_manifest(&bundles);
|
||||
|
||||
assert_eq!(manifest["schema"], "lesavka.local-hevc-bundle-audit.v1");
|
||||
assert_eq!(manifest["summary"]["bundles"], 16);
|
||||
assert_eq!(manifest["summary"]["coded_video_events"], 16);
|
||||
assert_eq!(manifest["summary"]["bundles_with_audio_before_video"], 16);
|
||||
assert_eq!(manifest["summary"]["bundles_with_audio_after_video"], 16);
|
||||
assert_eq!(manifest["summary"]["annex_b_video_events"], 16);
|
||||
assert_eq!(manifest["summary"]["monotonic_bundle_sequences"], true);
|
||||
assert_eq!(manifest["summary"]["metadata_mode"], "1920x1080@30");
|
||||
assert_eq!(manifest["summary"]["video_codec"], "hevc");
|
||||
|
||||
let audit_file = tempfile::NamedTempFile::new().expect("audit tempfile");
|
||||
write_hevc_bundle_audit(audit_file.path(), &manifest);
|
||||
let persisted: serde_json::Value =
|
||||
serde_json::from_slice(&std::fs::read(audit_file.path()).expect("audit bytes"))
|
||||
.expect("audit json");
|
||||
assert_eq!(persisted["summary"]["bundles"], 16);
|
||||
|
||||
if let Some(path) = std::env::var_os("LESAVKA_LOCAL_HEVC_BUNDLE_AUDIT_JSON") {
|
||||
write_hevc_bundle_audit(std::path::Path::new(&path), &manifest);
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the local HEVC probe train exactly like the client transport harness should emit it.
|
||||
///
|
||||
/// Inputs are fixed so the audit remains deterministic: 16 coded video flashes at
|
||||
/// `1920x1080@30` plus nearby audio tone packets. The output is a sequence of
|
||||
/// bundled media units with one HEVC-like video packet and the audio packets that
|
||||
/// should stay physically paired before transport.
|
||||
fn build_hevc_probe_bundle_train() -> Vec<UpstreamMediaBundle> {
|
||||
build_hevc_probe_bundle_train_for_camera(default_hevc_probe_camera())
|
||||
}
|
||||
|
||||
/// Returns the canonical local audit mode used by the persisted manifest.
|
||||
///
|
||||
/// Inputs: none. Output: the 1080p30 HEVC camera profile. Why: the manifest is
|
||||
/// intentionally stable for easy comparison across runs, while a separate test
|
||||
/// covers every advertised transport mode.
|
||||
fn default_hevc_probe_camera() -> CameraConfig {
|
||||
hevc_probe_camera(1920, 1080, 30)
|
||||
}
|
||||
|
||||
/// Builds one HEVC camera profile for local bundle-audit tests.
|
||||
///
|
||||
/// Inputs: width, height, and fps. Output: `CameraConfig` with HEVC selected.
|
||||
/// Why: keeping mode construction in one place avoids accidentally testing MJPEG
|
||||
/// metadata while hardening the HEVC client transport path.
|
||||
fn hevc_probe_camera(width: u32, height: u32, fps: u32) -> CameraConfig {
|
||||
CameraConfig {
|
||||
codec: CameraCodec::Hevc,
|
||||
width,
|
||||
height,
|
||||
fps,
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds the local HEVC probe train for one camera mode.
|
||||
///
|
||||
/// Inputs are fixed except for the camera profile: 16 coded video flashes plus
|
||||
/// nearby audio tone packets. Output: bundled media units with one HEVC-like
|
||||
/// video packet and the audio packets that should stay physically paired before
|
||||
/// transport.
|
||||
fn build_hevc_probe_bundle_train_for_camera(camera: CameraConfig) -> Vec<UpstreamMediaBundle> {
|
||||
let probe_start = Instant::now();
|
||||
let mut video_seq = 0;
|
||||
let mut audio_seq = 0;
|
||||
let mut bundles = Vec::new();
|
||||
|
||||
for event_index in 0..16_u64 {
|
||||
let event_pts_us = 4_000_000 + event_index * 1_000_000;
|
||||
let event_code = event_index + 1;
|
||||
let mut video = VideoPacket {
|
||||
pts: event_pts_us,
|
||||
data: vec![0, 0, 0, 1, 0x26, event_code as u8, 0xaa],
|
||||
..Default::default()
|
||||
};
|
||||
stamp_probe_video_packet(&mut video, &mut video_seq, 1, camera.fps, probe_start);
|
||||
|
||||
let mut audio = vec![
|
||||
AudioPacket {
|
||||
pts: event_pts_us.saturating_sub(200_000),
|
||||
data: vec![0xee; 1_920],
|
||||
..Default::default()
|
||||
},
|
||||
AudioPacket {
|
||||
pts: event_pts_us.saturating_sub(20_000),
|
||||
data: vec![event_code as u8; 1_920],
|
||||
..Default::default()
|
||||
},
|
||||
AudioPacket {
|
||||
pts: event_pts_us.saturating_add(10_000),
|
||||
data: vec![event_code as u8; 1_920],
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
for packet in &mut audio {
|
||||
stamp_probe_audio_packet(packet, &mut audio_seq, 2, probe_start);
|
||||
}
|
||||
|
||||
retain_probe_audio_for_video(&mut audio, packet_video_capture_pts_us(&video));
|
||||
assert_eq!(
|
||||
audio.len(),
|
||||
2,
|
||||
"event {event_code} should keep only nearby tone packets"
|
||||
);
|
||||
let (capture_start_us, capture_end_us) = probe_bundle_capture_bounds(Some(&video), &audio);
|
||||
bundles.push(build_probe_bundle(
|
||||
PROBE_BUNDLE_SESSION_ID,
|
||||
event_code,
|
||||
&camera,
|
||||
Some(video),
|
||||
audio,
|
||||
capture_start_us,
|
||||
capture_end_us,
|
||||
));
|
||||
}
|
||||
|
||||
bundles
|
||||
}
|
||||
|
||||
/// Converts a bundle train into the JSON evidence consumed by the local audit script.
|
||||
///
|
||||
/// The input transport bundle train is recorded before involving the server: coded video visibility,
|
||||
/// Annex-B framing, monotonic bundle sequence numbers, and audio packets on both
|
||||
/// sides of each video timestamp.
|
||||
fn hevc_bundle_audit_manifest(bundles: &[UpstreamMediaBundle]) -> serde_json::Value {
|
||||
let mut previous_seq = 0_u64;
|
||||
let mut monotonic = true;
|
||||
let mut coded_video_events = 0usize;
|
||||
let mut annex_b_video_events = 0usize;
|
||||
let mut audio_before_video = 0usize;
|
||||
let mut audio_after_video = 0usize;
|
||||
let mut total_audio_packets = 0usize;
|
||||
let events = bundles
|
||||
.iter()
|
||||
.map(|bundle| {
|
||||
monotonic &= bundle.seq > previous_seq;
|
||||
previous_seq = bundle.seq;
|
||||
let video = bundle.video.as_ref().expect("audit video packet");
|
||||
let has_annex_b = video.data.windows(4).any(|window| window == [0, 0, 0, 1]);
|
||||
annex_b_video_events += usize::from(has_annex_b);
|
||||
coded_video_events += usize::from(!video.data.is_empty());
|
||||
let before = bundle
|
||||
.audio
|
||||
.iter()
|
||||
.filter(|packet| packet.client_capture_pts_us < video.client_capture_pts_us)
|
||||
.count();
|
||||
let after = bundle
|
||||
.audio
|
||||
.iter()
|
||||
.filter(|packet| packet.client_capture_pts_us > video.client_capture_pts_us)
|
||||
.count();
|
||||
let audio_capture_pts_us = bundle
|
||||
.audio
|
||||
.iter()
|
||||
.map(|packet| packet.client_capture_pts_us)
|
||||
.collect::<Vec<_>>();
|
||||
let max_audio_video_skew_us = bundle
|
||||
.audio
|
||||
.iter()
|
||||
.map(|packet| {
|
||||
packet
|
||||
.client_capture_pts_us
|
||||
.abs_diff(video.client_capture_pts_us)
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
audio_before_video += usize::from(before > 0);
|
||||
audio_after_video += usize::from(after > 0);
|
||||
total_audio_packets += bundle.audio.len();
|
||||
serde_json::json!({
|
||||
"bundle_seq": bundle.seq,
|
||||
"event_code": video.data.get(5).copied().unwrap_or_default(),
|
||||
"video_capture_pts_us": video.client_capture_pts_us,
|
||||
"video_send_pts_us": video.client_send_pts_us,
|
||||
"video_bytes": video.data.len(),
|
||||
"has_annex_b_start_code": has_annex_b,
|
||||
"audio_packets": bundle.audio.len(),
|
||||
"audio_capture_pts_us": audio_capture_pts_us,
|
||||
"max_audio_video_skew_us": max_audio_video_skew_us,
|
||||
"audio_before_video": before,
|
||||
"audio_after_video": after,
|
||||
"capture_start_us": bundle.capture_start_us,
|
||||
"capture_end_us": bundle.capture_end_us,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
serde_json::json!({
|
||||
"schema": "lesavka.local-hevc-bundle-audit.v1",
|
||||
"summary": {
|
||||
"video_codec": "hevc",
|
||||
"metadata_mode": "1920x1080@30",
|
||||
"bundles": bundles.len(),
|
||||
"coded_video_events": coded_video_events,
|
||||
"annex_b_video_events": annex_b_video_events,
|
||||
"audio_packets": total_audio_packets,
|
||||
"bundles_with_audio_before_video": audio_before_video,
|
||||
"bundles_with_audio_after_video": audio_after_video,
|
||||
"monotonic_bundle_sequences": monotonic,
|
||||
},
|
||||
"events": events,
|
||||
})
|
||||
}
|
||||
|
||||
/// Persists local audit evidence so later remote failures have a known-good client-side artifact.
|
||||
///
|
||||
/// `path` is the destination JSON file; parent directories are created on demand.
|
||||
/// `manifest` is written as pretty JSON and the function panics in tests if the
|
||||
/// local filesystem cannot store the evidence.
|
||||
fn write_hevc_bundle_audit(path: &std::path::Path, manifest: &serde_json::Value) {
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent).expect("audit parent");
|
||||
}
|
||||
let file = std::fs::File::create(path).expect("audit file");
|
||||
serde_json::to_writer_pretty(file, manifest).expect("audit json write");
|
||||
}
|
||||
@ -9,6 +9,7 @@ pub struct PulseSchedule {
|
||||
pulse_period: Duration,
|
||||
pulse_width: Duration,
|
||||
marker_tick_period: u32,
|
||||
event_width_codes: Vec<u32>,
|
||||
}
|
||||
|
||||
impl PulseSchedule {
|
||||
@ -34,6 +35,46 @@ impl PulseSchedule {
|
||||
pulse_period,
|
||||
pulse_width,
|
||||
marker_tick_period,
|
||||
event_width_codes: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a schedule whose pulses carry explicit event identities.
|
||||
///
|
||||
/// Inputs: timing parameters plus a finite code sequence.
|
||||
/// Outputs: a schedule that emits one normal-width pulse per code.
|
||||
/// Why: client-to-RCT tests need identity-rich pulses so final capture
|
||||
/// analysis can join observations back to the client timeline after startup
|
||||
/// drops.
|
||||
pub fn with_event_width_codes(
|
||||
warmup: Duration,
|
||||
pulse_period: Duration,
|
||||
pulse_width: Duration,
|
||||
marker_tick_period: u32,
|
||||
event_width_codes: Vec<u32>,
|
||||
) -> Self {
|
||||
/// Validate the event code sequence before it reaches renderers.
|
||||
///
|
||||
/// Inputs: event code slice.
|
||||
/// Outputs: panic on invalid operator/test configuration.
|
||||
/// Why: a zero code has no stable audio/video signature, and failing
|
||||
/// here is clearer than letting the final RCT analyzer miss events.
|
||||
fn validate_event_width_codes(event_width_codes: &[u32]) {
|
||||
assert!(
|
||||
!event_width_codes.is_empty(),
|
||||
"event width code list must not be empty"
|
||||
);
|
||||
assert!(
|
||||
event_width_codes.iter().all(|code| *code > 0),
|
||||
"event width codes must be positive"
|
||||
);
|
||||
}
|
||||
|
||||
validate_event_width_codes(&event_width_codes);
|
||||
let schedule = Self::new(warmup, pulse_period, pulse_width, marker_tick_period);
|
||||
Self {
|
||||
event_width_codes,
|
||||
..schedule
|
||||
}
|
||||
}
|
||||
|
||||
@ -53,6 +94,16 @@ impl PulseSchedule {
|
||||
self.pulse_width
|
||||
}
|
||||
|
||||
/// Return the explicit event identity sequence for coded probes.
|
||||
///
|
||||
/// Inputs: schedule state.
|
||||
/// Outputs: event code slice, empty for legacy marker-mode probes.
|
||||
/// Why: the generator and timeline writer must use the same code sequence
|
||||
/// that the RCT analyzer will use for final event pairing.
|
||||
pub fn event_width_codes(&self) -> &[u32] {
|
||||
&self.event_width_codes
|
||||
}
|
||||
|
||||
pub fn pulse_index(&self, pts: Duration) -> u64 {
|
||||
if pts < self.warmup_boundary() {
|
||||
return 0;
|
||||
@ -72,7 +123,16 @@ impl PulseSchedule {
|
||||
Duration::from_nanos(offset_ns)
|
||||
}
|
||||
|
||||
/// Decide whether a timestamp belongs to a legacy marker pulse.
|
||||
///
|
||||
/// Inputs: media PTS in the synthetic source timeline.
|
||||
/// Outputs: `true` only for widened marker pulses in non-coded mode.
|
||||
/// Why: coded probes use explicit color/tone identity, so marker widening
|
||||
/// must be disabled to avoid mixing two identity schemes.
|
||||
pub fn pulse_is_marker(&self, pts: Duration) -> bool {
|
||||
if !self.event_width_codes.is_empty() {
|
||||
return false;
|
||||
}
|
||||
pts >= self.warmup_boundary()
|
||||
&& self
|
||||
.pulse_index(pts)
|
||||
@ -88,12 +148,46 @@ impl PulseSchedule {
|
||||
if pts < self.warmup_boundary() {
|
||||
return false;
|
||||
}
|
||||
let width = if self.pulse_is_marker(pts) {
|
||||
let width = self.active_pulse_width(pts);
|
||||
if width.is_zero() {
|
||||
return false;
|
||||
}
|
||||
self.pulse_offset(pts) < width
|
||||
}
|
||||
|
||||
/// Return the event code active at a timestamp.
|
||||
///
|
||||
/// Inputs: media PTS in the synthetic source timeline.
|
||||
/// Outputs: one-based event code for coded probes, or `None`.
|
||||
/// Why: audio and video renderers need a shared identity decision per PTS
|
||||
/// so the same client-origin event reaches the server as one bundled unit.
|
||||
pub fn event_code(&self, pts: Duration) -> Option<u32> {
|
||||
if pts < self.warmup_boundary() {
|
||||
return None;
|
||||
}
|
||||
self.event_width_codes
|
||||
.get(self.pulse_index(pts) as usize)
|
||||
.copied()
|
||||
}
|
||||
|
||||
/// Return the physical pulse width active at a timestamp.
|
||||
///
|
||||
/// Inputs: media PTS in the synthetic source timeline.
|
||||
/// Outputs: actual flash/tone gate width.
|
||||
/// Why: coded probes carry identity through color and tone frequency, not by
|
||||
/// making late events multi-second-long and destroying cadence.
|
||||
pub fn active_pulse_width(&self, pts: Duration) -> Duration {
|
||||
if !self.event_width_codes.is_empty() {
|
||||
return self
|
||||
.event_code(pts)
|
||||
.map(|_| self.pulse_width)
|
||||
.unwrap_or(Duration::ZERO);
|
||||
}
|
||||
if self.pulse_is_marker(pts) {
|
||||
self.marker_pulse_width()
|
||||
} else {
|
||||
self.pulse_width
|
||||
};
|
||||
self.pulse_offset(pts) < width
|
||||
}
|
||||
}
|
||||
|
||||
pub fn warmup_boundary(&self) -> Duration {
|
||||
@ -215,6 +309,37 @@ mod tests {
|
||||
assert!(!schedule.flash_active(Duration::from_millis(2_200)));
|
||||
}
|
||||
|
||||
/// Verifies coded schedules keep identity without widening physical pulses.
|
||||
///
|
||||
/// Inputs: synthetic timing schedule.
|
||||
/// Outputs: assertions on code lookup, pulse width, and end-of-code gating.
|
||||
/// Why: final RCT analysis depends on color/tone identity while preserving
|
||||
/// a regular one-second cadence.
|
||||
#[test]
|
||||
fn coded_pulses_preserve_identity_and_duration() {
|
||||
let schedule = PulseSchedule::with_event_width_codes(
|
||||
Duration::from_secs(1),
|
||||
Duration::from_millis(1_000),
|
||||
Duration::from_millis(100),
|
||||
5,
|
||||
vec![1, 3],
|
||||
);
|
||||
|
||||
assert_eq!(schedule.event_width_codes(), &[1, 3]);
|
||||
assert_eq!(schedule.event_code(Duration::from_millis(1_000)), Some(1));
|
||||
assert_eq!(
|
||||
schedule.active_pulse_width(Duration::from_millis(1_000)),
|
||||
Duration::from_millis(100)
|
||||
);
|
||||
assert_eq!(schedule.event_code(Duration::from_millis(2_000)), Some(3));
|
||||
assert_eq!(
|
||||
schedule.active_pulse_width(Duration::from_millis(2_000)),
|
||||
Duration::from_millis(100)
|
||||
);
|
||||
assert!(!schedule.pulse_is_marker(Duration::from_millis(1_000)));
|
||||
assert!(!schedule.flash_active(Duration::from_millis(3_000)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "pulse period must stay positive")]
|
||||
fn constructor_rejects_zero_period() {
|
||||
|
||||
188
client/src/sync_probe/signature.rs
Normal file
188
client/src/sync_probe/signature.rs
Normal file
@ -0,0 +1,188 @@
|
||||
//! Shared A/V signature palette for synthetic sync probes.
|
||||
//!
|
||||
//! The analyzer and generator both need the same small codebook. Keeping the
|
||||
//! colors and tones here prevents a subtle class of transport-test regressions:
|
||||
//! a client could emit a valid-looking pulse that the downstream analyzer no
|
||||
//! longer recognizes as the same event identity.
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub(crate) struct ProbeColor {
|
||||
pub r: u8,
|
||||
pub g: u8,
|
||||
pub b: u8,
|
||||
}
|
||||
|
||||
pub(crate) const MAX_EVENT_CODE: u32 = 16;
|
||||
|
||||
const PROBE_AUDIO_TONE_FREQUENCIES_HZ: [f64; MAX_EVENT_CODE as usize] = [
|
||||
620.0, 780.0, 940.0, 1120.0, 1320.0, 1540.0, 1780.0, 2040.0, 2320.0, 2620.0, 2960.0, 3340.0,
|
||||
3760.0, 4220.0, 4740.0, 5320.0,
|
||||
];
|
||||
|
||||
const PROBE_COLOR_PALETTE: [(u32, ProbeColor); MAX_EVENT_CODE as usize] = [
|
||||
(
|
||||
1,
|
||||
ProbeColor {
|
||||
r: 255,
|
||||
g: 45,
|
||||
b: 45,
|
||||
},
|
||||
),
|
||||
(
|
||||
2,
|
||||
ProbeColor {
|
||||
r: 0,
|
||||
g: 230,
|
||||
b: 118,
|
||||
},
|
||||
),
|
||||
(
|
||||
3,
|
||||
ProbeColor {
|
||||
r: 41,
|
||||
g: 121,
|
||||
b: 255,
|
||||
},
|
||||
),
|
||||
(
|
||||
4,
|
||||
ProbeColor {
|
||||
r: 255,
|
||||
g: 179,
|
||||
b: 0,
|
||||
},
|
||||
),
|
||||
(
|
||||
5,
|
||||
ProbeColor {
|
||||
r: 216,
|
||||
g: 27,
|
||||
b: 96,
|
||||
},
|
||||
),
|
||||
(
|
||||
6,
|
||||
ProbeColor {
|
||||
r: 0,
|
||||
g: 188,
|
||||
b: 212,
|
||||
},
|
||||
),
|
||||
(
|
||||
7,
|
||||
ProbeColor {
|
||||
r: 205,
|
||||
g: 220,
|
||||
b: 57,
|
||||
},
|
||||
),
|
||||
(
|
||||
8,
|
||||
ProbeColor {
|
||||
r: 126,
|
||||
g: 87,
|
||||
b: 194,
|
||||
},
|
||||
),
|
||||
(
|
||||
9,
|
||||
ProbeColor {
|
||||
r: 255,
|
||||
g: 112,
|
||||
b: 67,
|
||||
},
|
||||
),
|
||||
(
|
||||
10,
|
||||
ProbeColor {
|
||||
r: 38,
|
||||
g: 166,
|
||||
b: 154,
|
||||
},
|
||||
),
|
||||
(
|
||||
11,
|
||||
ProbeColor {
|
||||
r: 255,
|
||||
g: 64,
|
||||
b: 129,
|
||||
},
|
||||
),
|
||||
(
|
||||
12,
|
||||
ProbeColor {
|
||||
r: 92,
|
||||
g: 107,
|
||||
b: 192,
|
||||
},
|
||||
),
|
||||
(
|
||||
13,
|
||||
ProbeColor {
|
||||
r: 255,
|
||||
g: 235,
|
||||
b: 59,
|
||||
},
|
||||
),
|
||||
(
|
||||
14,
|
||||
ProbeColor {
|
||||
r: 105,
|
||||
g: 240,
|
||||
b: 174,
|
||||
},
|
||||
),
|
||||
(
|
||||
15,
|
||||
ProbeColor {
|
||||
r: 171,
|
||||
g: 71,
|
||||
b: 188,
|
||||
},
|
||||
),
|
||||
(
|
||||
16,
|
||||
ProbeColor {
|
||||
r: 3,
|
||||
g: 169,
|
||||
b: 244,
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
/// Return the audio frequency assigned to a probe event code.
|
||||
///
|
||||
/// Inputs: one-based event code.
|
||||
/// Outputs: tone frequency in hertz, or `None` for unsupported codes.
|
||||
/// Why: coded client-origin probes must preserve event identity after network
|
||||
/// transport, not just pulse cadence.
|
||||
pub(crate) fn probe_audio_frequency_for_event_code(code: u32) -> Option<f64> {
|
||||
PROBE_AUDIO_TONE_FREQUENCIES_HZ
|
||||
.get(code.checked_sub(1)? as usize)
|
||||
.copied()
|
||||
}
|
||||
|
||||
/// Return the video color assigned to a probe event code.
|
||||
///
|
||||
/// Inputs: one-based event code.
|
||||
/// Outputs: saturated RGB color, or `None` for unsupported codes.
|
||||
/// Why: final RCT captures can drop early frames; color identity lets the
|
||||
/// analyzer rejoin observed flashes with the exact client-generated event.
|
||||
pub(crate) fn probe_color_for_event_code(code: u32) -> Option<ProbeColor> {
|
||||
PROBE_COLOR_PALETTE
|
||||
.into_iter()
|
||||
.find_map(|(palette_code, color)| (palette_code == code).then_some(color))
|
||||
}
|
||||
|
||||
/// Validate that every event code can be rendered and analyzed.
|
||||
///
|
||||
/// Inputs: user-supplied code sequence.
|
||||
/// Outputs: unsupported code if one is present.
|
||||
/// Why: failing at CLI/config time produces clearer diagnostics than a later
|
||||
/// analyzer report with missing pulses.
|
||||
pub(crate) fn first_unsupported_event_code(codes: &[u32]) -> Option<u32> {
|
||||
codes.iter().copied().find(|code| {
|
||||
probe_audio_frequency_for_event_code(*code).is_none()
|
||||
|| probe_color_for_event_code(*code).is_none()
|
||||
})
|
||||
}
|
||||
274
client/src/sync_probe/timeline.rs
Normal file
274
client/src/sync_probe/timeline.rs
Normal file
@ -0,0 +1,274 @@
|
||||
//! Client-origin timeline artifacts for synthetic upstream transport probes.
|
||||
//!
|
||||
//! The external RCT capture is the truth source for final sync, but freshness
|
||||
//! needs a client-side origin clock. This module writes that compact schedule
|
||||
//! before transport starts so manual harnesses can correlate observed flashes
|
||||
//! and tones against when the client generated them.
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use serde::Serialize;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::input::camera::{CameraCodec, CameraConfig};
|
||||
use crate::sync_probe::schedule::PulseSchedule;
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ProbeTimeline {
|
||||
schema: &'static str,
|
||||
origin: &'static str,
|
||||
media_path: &'static str,
|
||||
injection_scope: &'static str,
|
||||
client_uplink_included: bool,
|
||||
client_start_unix_ns: u64,
|
||||
camera_width: u32,
|
||||
camera_height: u32,
|
||||
camera_fps: u32,
|
||||
camera_codec: &'static str,
|
||||
audio_sample_rate: u32,
|
||||
audio_channels: u32,
|
||||
audio_chunk_ms: u32,
|
||||
warmup_us: u64,
|
||||
duration_us: u64,
|
||||
pulse_period_ms: u64,
|
||||
pulse_width_ms: u64,
|
||||
marker_tick_period: u32,
|
||||
event_width_codes: Vec<u32>,
|
||||
events: Vec<ProbeTimelineEvent>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ProbeTimelineEvent {
|
||||
event_id: usize,
|
||||
code: u32,
|
||||
planned_start_us: u64,
|
||||
planned_end_us: u64,
|
||||
client_capture_unix_ns: u64,
|
||||
}
|
||||
|
||||
impl ProbeTimeline {
|
||||
/// Build a client-origin event schedule for a synthetic bundled probe.
|
||||
///
|
||||
/// Inputs: negotiated media profile, deterministic pulse schedule, total
|
||||
/// probe duration, and the Unix timestamp associated with local PTS zero.
|
||||
/// Outputs: a serializable timeline artifact.
|
||||
/// Why: the RCT analyzer can prove final A/V sync by itself, but end-to-end
|
||||
/// freshness needs to know when each flash/tone was generated on the client.
|
||||
#[must_use]
|
||||
pub fn new(
|
||||
camera: CameraConfig,
|
||||
schedule: &PulseSchedule,
|
||||
duration: Duration,
|
||||
client_start_unix_ns: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
schema: "lesavka.client-transport-probe-timeline.v1",
|
||||
origin: "client-generated",
|
||||
media_path: "client synthetic capture -> bundled upstream media -> server UVC/UAC sinks -> RCT capture",
|
||||
injection_scope: "client-post-capture-uplink-bundle",
|
||||
client_uplink_included: true,
|
||||
client_start_unix_ns,
|
||||
camera_width: camera.width,
|
||||
camera_height: camera.height,
|
||||
camera_fps: camera.fps,
|
||||
camera_codec: codec_label(camera.codec),
|
||||
audio_sample_rate: 48_000,
|
||||
audio_channels: 2,
|
||||
audio_chunk_ms: 10,
|
||||
warmup_us: micros(schedule.warmup_boundary()),
|
||||
duration_us: micros(duration),
|
||||
pulse_period_ms: millis(schedule.pulse_period()),
|
||||
pulse_width_ms: millis(schedule.pulse_width()),
|
||||
marker_tick_period: schedule.marker_tick_period(),
|
||||
event_width_codes: schedule.event_width_codes().to_vec(),
|
||||
events: timeline_events(schedule, duration, client_start_unix_ns),
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the timeline as pretty JSON.
|
||||
///
|
||||
/// Inputs: destination path.
|
||||
/// Outputs: a filesystem artifact consumed by manual RCT probe scripts.
|
||||
/// Why: keeping the artifact structured avoids scraping probe logs for
|
||||
/// timing truth during transport tuning.
|
||||
pub fn write_to(&self, path: &Path) -> Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent)
|
||||
.with_context(|| format!("creating timeline directory {}", parent.display()))?;
|
||||
}
|
||||
let json = serde_json::to_string_pretty(self).context("serializing probe timeline")?;
|
||||
std::fs::write(path, format!("{json}\n"))
|
||||
.with_context(|| format!("writing {}", path.display()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive client-origin event rows from the pulse schedule.
|
||||
///
|
||||
/// Inputs: schedule, total probe duration, and Unix timestamp for PTS zero.
|
||||
/// Outputs: timeline events with planned PTS and client clock timestamps.
|
||||
/// Why: the RCT analyzer reports observed event identity, but freshness needs
|
||||
/// the client-side generation time for the same event.
|
||||
fn timeline_events(
|
||||
schedule: &PulseSchedule,
|
||||
duration: Duration,
|
||||
client_start_unix_ns: u64,
|
||||
) -> Vec<ProbeTimelineEvent> {
|
||||
let mut events = Vec::new();
|
||||
let mut start = schedule.warmup_boundary();
|
||||
let period = schedule.pulse_period();
|
||||
while start < duration {
|
||||
let width = schedule.active_pulse_width(start);
|
||||
if width.is_zero() {
|
||||
break;
|
||||
}
|
||||
let code = if let Some(code) = schedule.event_code(start) {
|
||||
code
|
||||
} else if schedule.pulse_is_marker(start) {
|
||||
2
|
||||
} else {
|
||||
1
|
||||
};
|
||||
let planned_start_us = micros(start);
|
||||
events.push(ProbeTimelineEvent {
|
||||
event_id: events.len(),
|
||||
code,
|
||||
planned_start_us,
|
||||
planned_end_us: micros((start + width).min(duration)),
|
||||
client_capture_unix_ns: client_start_unix_ns
|
||||
.saturating_add(planned_start_us.saturating_mul(1_000)),
|
||||
});
|
||||
start += period;
|
||||
}
|
||||
events
|
||||
}
|
||||
|
||||
fn codec_label(codec: CameraCodec) -> &'static str {
|
||||
match codec {
|
||||
CameraCodec::H264 => "h264",
|
||||
CameraCodec::Hevc => "hevc",
|
||||
CameraCodec::Mjpeg => "mjpeg",
|
||||
}
|
||||
}
|
||||
|
||||
fn micros(duration: Duration) -> u64 {
|
||||
duration.as_micros().min(u64::MAX as u128) as u64
|
||||
}
|
||||
|
||||
fn millis(duration: Duration) -> u64 {
|
||||
duration.as_millis().min(u64::MAX as u128) as u64
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::ProbeTimeline;
|
||||
use crate::input::camera::{CameraCodec, CameraConfig};
|
||||
use crate::sync_probe::schedule::PulseSchedule;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn timeline_lists_client_origin_events_after_rounded_warmup() {
|
||||
let camera = CameraConfig {
|
||||
codec: CameraCodec::Mjpeg,
|
||||
width: 1280,
|
||||
height: 720,
|
||||
fps: 30,
|
||||
};
|
||||
let schedule = PulseSchedule::new(
|
||||
Duration::from_millis(3_500),
|
||||
Duration::from_millis(1_000),
|
||||
Duration::from_millis(120),
|
||||
5,
|
||||
);
|
||||
|
||||
let timeline = ProbeTimeline::new(camera, &schedule, Duration::from_secs(8), 1_000);
|
||||
|
||||
assert_eq!(
|
||||
timeline.schema,
|
||||
"lesavka.client-transport-probe-timeline.v1"
|
||||
);
|
||||
assert_eq!(timeline.warmup_us, 4_000_000);
|
||||
assert_eq!(timeline.camera_codec, "mjpeg");
|
||||
assert!(timeline.event_width_codes.is_empty());
|
||||
assert_eq!(timeline.events.len(), 4);
|
||||
assert_eq!(timeline.events[0].event_id, 0);
|
||||
assert_eq!(timeline.events[0].code, 2);
|
||||
assert_eq!(timeline.events[0].planned_start_us, 4_000_000);
|
||||
assert_eq!(timeline.events[0].client_capture_unix_ns, 4_000_001_000);
|
||||
assert_eq!(timeline.events[1].code, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn timeline_preserves_explicit_event_codes() {
|
||||
let camera = CameraConfig {
|
||||
codec: CameraCodec::Mjpeg,
|
||||
width: 1280,
|
||||
height: 720,
|
||||
fps: 30,
|
||||
};
|
||||
let schedule = PulseSchedule::with_event_width_codes(
|
||||
Duration::from_secs(4),
|
||||
Duration::from_millis(1_000),
|
||||
Duration::from_millis(120),
|
||||
5,
|
||||
vec![1, 3],
|
||||
);
|
||||
|
||||
let timeline = ProbeTimeline::new(camera, &schedule, Duration::from_secs(8), 1_000);
|
||||
|
||||
assert_eq!(timeline.event_width_codes, vec![1, 3]);
|
||||
assert_eq!(timeline.events.len(), 2);
|
||||
assert_eq!(timeline.events[0].code, 1);
|
||||
assert_eq!(timeline.events[0].planned_end_us, 4_120_000);
|
||||
assert_eq!(timeline.events[1].code, 3);
|
||||
assert_eq!(timeline.events[1].planned_end_us, 5_120_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Timeline files should be durable artifacts for end-to-end freshness analysis.
|
||||
fn timeline_writes_json_and_labels_hevc_origin_profile() {
|
||||
let camera = CameraConfig {
|
||||
codec: CameraCodec::Hevc,
|
||||
width: 1920,
|
||||
height: 1080,
|
||||
fps: 30,
|
||||
};
|
||||
let schedule = PulseSchedule::new(
|
||||
Duration::from_secs(1),
|
||||
Duration::from_secs(1),
|
||||
Duration::from_millis(120),
|
||||
4,
|
||||
);
|
||||
let temp_dir = tempfile::tempdir().expect("tempdir");
|
||||
let path = temp_dir.path().join("nested/timeline.json");
|
||||
|
||||
ProbeTimeline::new(camera, &schedule, Duration::from_secs(3), 123)
|
||||
.write_to(&path)
|
||||
.expect("write timeline");
|
||||
|
||||
let json: serde_json::Value =
|
||||
serde_json::from_str(&std::fs::read_to_string(path).expect("timeline")).expect("json");
|
||||
assert_eq!(json["camera_codec"], "hevc");
|
||||
assert_eq!(json["events"][0]["code"], 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Codec labeling stays independent from GStreamer so transport reports stay cheap.
|
||||
fn timeline_labels_h264_without_needing_transport_runtime() {
|
||||
let camera = CameraConfig {
|
||||
codec: CameraCodec::H264,
|
||||
width: 640,
|
||||
height: 360,
|
||||
fps: 20,
|
||||
};
|
||||
let schedule = PulseSchedule::new(
|
||||
Duration::from_secs(1),
|
||||
Duration::from_secs(1),
|
||||
Duration::from_millis(120),
|
||||
4,
|
||||
);
|
||||
|
||||
let timeline = ProbeTimeline::new(camera, &schedule, Duration::from_secs(2), 0);
|
||||
|
||||
assert_eq!(timeline.camera_codec, "h264");
|
||||
}
|
||||
}
|
||||
@ -346,4 +346,23 @@ mod tests {
|
||||
assert_eq!(snapshot.microphone.latest_enqueue_age_ms, 0.0);
|
||||
assert!(snapshot.microphone.last_error.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// The launcher consumes this file path to explain which uplink transport is active.
|
||||
fn publisher_from_env_records_bundled_mode_to_configured_path() {
|
||||
let temp_dir = tempfile::tempdir().expect("tempdir");
|
||||
let path = temp_dir.path().join("env-uplink.json");
|
||||
let path_string = path.to_string_lossy().into_owned();
|
||||
|
||||
temp_env::with_var(UPLINK_TELEMETRY_ENV, Some(path_string.as_str()), || {
|
||||
let publisher = UplinkTelemetryPublisher::from_env(true, true);
|
||||
|
||||
publisher.record_upstream_mode(" bundled ");
|
||||
|
||||
let snapshot = load_uplink_telemetry(&path).expect("load snapshot");
|
||||
assert_eq!(snapshot.upstream_mode, "bundled");
|
||||
assert!(snapshot.camera.enabled);
|
||||
assert!(snapshot.microphone.enabled);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lesavka_common"
|
||||
version = "0.20.0"
|
||||
version = "0.21.9"
|
||||
edition = "2024"
|
||||
build = "build.rs"
|
||||
|
||||
|
||||
278
docs/hevc-upstream-plan.md
Normal file
278
docs/hevc-upstream-plan.md
Normal file
@ -0,0 +1,278 @@
|
||||
# HEVC upstream implementation checklist
|
||||
|
||||
This is the working checklist for moving Lesavka upstream media from MJPEG-only transport toward HEVC/H.265 video transport while preserving the already-calibrated MJPEG server-to-RCT path.
|
||||
|
||||
## Goals
|
||||
|
||||
- Keep the existing MJPEG ingress and MJPEG UVC output calibration valid.
|
||||
- Add first-class HEVC ingress on the server, decoded to the existing MJPEG UVC output path.
|
||||
- Calibrate server-to-RCT delays per ingress profile and UVC mode.
|
||||
- Send client-origin synthetic A/V as bundled audio plus HEVC video through the same handoff used by real capture.
|
||||
- Measure final RCT sync, freshness, and smoothness before adding deeper introspection.
|
||||
|
||||
## Safety constraints
|
||||
|
||||
- Do not reboot Theia unless SSH/service recovery cannot be achieved any other way.
|
||||
- If Theia must be rebooted, wait for it to come back online and resume from the last artifact-backed checkpoint.
|
||||
- Keep server-to-RCT measurement tooling intact except for additive HEVC/profile support.
|
||||
- Do not overwrite the MJPEG static delay profile with HEVC values.
|
||||
- Bump package versions before any push that contains code changes.
|
||||
|
||||
## Access and automation
|
||||
|
||||
- [x] Theia helper supports `deploy`, `restart`, `status`, `hevc-prereqs`, and `reconfigure MODE [hevc|mjpeg]`.
|
||||
- [x] Passwordless sudo works for `/usr/local/sbin/lesavka-dev-install`.
|
||||
- [x] `reconfigure 1920x1080@30 hevc` was verified with all services active.
|
||||
- [x] `reconfigure 1280x720@20 hevc` rebuilt descriptors; Tethys saw `1280x720@20` MJPEG.
|
||||
- [x] Patch local `lesavka-core.sh` so slow `udevadm control --reload` becomes a warning instead of aborting reconfigure.
|
||||
- [ ] Deploy the hardened `lesavka-core.sh` to Theia once SSH completes banner exchange again.
|
||||
|
||||
## Server-to-RCT HEVC calibration
|
||||
|
||||
- [x] `1920x1080@30` HEVC ingress was measured ready in `/tmp/lesavka-server-rc-mode-matrix-20260507-033941`.
|
||||
- [x] `1920x1080@30` candidate: video `127952us`, audio `0us`, p95 max `5.3ms`, freshness budget max `252.3ms`.
|
||||
- [x] Initial failures for 720p modes were traced to capture budget, not media failure: coded events started around 45s into a 52s capture.
|
||||
- [x] Longer HEVC calibration budget proof: `CAPTURE_SECONDS=90`, `PROBE_TIMEOUT_SECONDS=90`.
|
||||
- [x] `1280x720@20` proof run ready in `/tmp/lesavka-server-rc-mode-matrix-20260507-050819`: video `143741us`, audio `0us`, p95 `9.2ms`, freshness `257.8ms`, smoothness clean.
|
||||
- [x] `1280x720@30` repeated evidence in `/tmp/lesavka-server-rc-mode-matrix-20260507-051510`: candidates around `129603us`, `135090us`, and `142615us`, all preferred with 15/16 coded pairs.
|
||||
- [x] `1280x720@20` and `1280x720@30` completed a post-recovery 3-run HEVC static matrix in `/tmp/lesavka-hevc-continuation-20260509-004310/server-rct-hevc-720p-static/lesavka-server-rc-mode-matrix-20260509-005114`.
|
||||
- [x] `1280x720@20` selected video `173852us`, audio `0us`; p95 max `14.1ms`, median abs max `11.2ms`, freshness budget max `309.0ms`.
|
||||
- [x] `1280x720@30` selected video `145695us`, audio `0us`; p95 max `27.8ms`, median abs max `18.5ms`, freshness budget max `309.4ms`.
|
||||
- [ ] Re-run `1920x1080@20` only after a fresh explicit go/no-go decision; this mode is quarantined because the last attempt preceded a Theia outage.
|
||||
- [ ] Run final all-safe-mode HEVC sanity matrix with `LESAVKA_SERVER_RC_TUNE_DELAYS=0`; avoid `1920x1080@20` until the quarantine is lifted.
|
||||
|
||||
### Local artifact consolidation while Theia is offline
|
||||
|
||||
These notes are from local artifact review on 2026-05-07. They are useful for choosing the next post-reboot run, but only completed matrix summaries should be treated as final static calibration.
|
||||
|
||||
| Artifact | Mode | Result |
|
||||
| --- | --- | --- |
|
||||
| `/tmp/lesavka-server-rc-mode-matrix-20260507-033941` | `1920x1080@30` | Completed 3/3 static run, ready at video `127952us`, audio `0us`; p95 max `5.3ms`, median abs max `4.1ms`, freshness budget max `252.3ms`. |
|
||||
| `/tmp/lesavka-server-rc-mode-matrix-20260507-050819` | `1280x720@20` | Single-run proof, ready only because `min_runs=1`; video `143741us`, audio `0us`; p95 `9.2ms`, median abs `6.4ms`, freshness `257.8ms`. |
|
||||
| `/tmp/lesavka-server-rc-mode-matrix-20260507-051510` | `1280x720@20` | Interrupted before matrix summary. Per-probe reports show preferred confirmations at `190716us`, `160769us`, and `153680us`; the first confirmation still had median `-31.0ms`, so rerun this mode before locking it. |
|
||||
| `/tmp/lesavka-server-rc-mode-matrix-20260507-051510` | `1280x720@30` | Interrupted before matrix summary. Per-probe reports show preferred confirmations at `129603us`, `135090us`, and `142615us`, all with 15/16 paired signatures and p95 between `3.0ms` and `11.4ms`; `135090us` remains a sensible candidate pending a completed static summary. |
|
||||
|
||||
Post-reboot priority order:
|
||||
|
||||
1. Deploy the local `lesavka-core.sh` udev timeout hardening.
|
||||
2. Treat the completed 2026-05-09 720p static matrix as the source of truth for 720p HEVC values: `1280x720@20=173852`, `1280x720@30=145695`.
|
||||
3. Keep `1920x1080@30=127952` from the completed 3-run static matrix.
|
||||
4. Only after an explicit go/no-go, re-run `1920x1080@20` HEVC with explicit mode selection and tighter watchdogs.
|
||||
5. Run final safe-mode HEVC sanity with tuning disabled for validated modes; avoid `1920x1080@20` until the quarantine is lifted.
|
||||
|
||||
## Code work
|
||||
|
||||
- [x] Add server HEVC decode path to MJPEG UVC output.
|
||||
- [x] Add client HEVC capture/encode selection.
|
||||
- [x] Add synthetic HEVC probe frame encoding.
|
||||
- [x] Add `mjpeg-cfr` RCT capture mode to avoid MJPEG timestamp compression artifacts.
|
||||
- [x] Make `LESAVKA_CORE_ONESHOT=1` request a descriptor rebuild in `lesavka-core.sh`.
|
||||
- [x] Harden `lesavka-core.sh` so slow udev reload/trigger commands do not abort gadget rebuild.
|
||||
- [x] Add HEVC profile defaults to `run_server_to_rc_mode_matrix.sh`: longer capture/probe timeout and separate delay map.
|
||||
- [x] Stamp `LESAVKA_CALIBRATION_PROFILE`, `LESAVKA_UPLINK_CAMERA_CODEC`, and profile-specific delay maps during matrix runtime reconfigure.
|
||||
- [x] Add profile-specific factory calibration maps in server calibration without changing MJPEG defaults.
|
||||
- [x] Add installer env support for separate MJPEG and HEVC delay maps.
|
||||
- [x] Add tests for profile selection, install defaults, and matrix HEVC defaults.
|
||||
- [x] Bump local package versions to `0.21.1` for the initial HEVC profile/defaults work.
|
||||
- [x] Bump local package versions to `0.21.3` for the 720p HEVC static calibration defaults.
|
||||
- [x] Bump local package versions to `0.21.4` for HEVC decoded-MJPEG spool pacing.
|
||||
- [x] Bump local package versions to `0.21.5` for synthetic HEVC probe GOP parity with live camera uplink.
|
||||
- [x] Bump local package versions to `0.21.6` for encoded probe packets using encoder output PTS.
|
||||
- [x] Bump local package versions to `0.21.7` for optional UVC spool metadata fetches and stricter local HEVC bundle identity proof.
|
||||
- [x] Verify local HEVC/profile contracts:
|
||||
`cargo test -q -p lesavka_server calibration::tests`,
|
||||
`cargo test -q --test client_server_rc_matrix_script_contract --test server_install_script_contract`,
|
||||
`cargo test -q -p lesavka_server hevc_probe_frame_encoder_builds_when_x265_is_available`,
|
||||
and `cargo test -q --test server_upstream_media_v2_handoff_contract --test client_rct_transport_probe_contract`.
|
||||
- [x] Verify broad local baseline after the HEVC/profile work:
|
||||
`cargo test --workspace --all-targets`.
|
||||
|
||||
## Client-to-server-to-RCT transport
|
||||
|
||||
- [x] Confirm at code/contract level that client synthetic media uses the same bundled RPC shape as real capture handoff.
|
||||
- [x] Confirm at code/contract level that the client sends audio and negotiated HEVC video as bundled media units.
|
||||
- [x] Confirm at code/contract level that the server receives each bundle and queues audio/video into the post-transport UAC/UVC handoff path.
|
||||
- [x] Add unattended start-delay support to the client-to-RCT transport probe.
|
||||
- [x] Add local HEVC bundle audit plus freshness-biased jitter stress so outgoing synthetic media proves 16/16 coded events and event codes 1..16 before hardware is involved.
|
||||
- [x] Add client-to-RCT summary layer attribution for client-local bundle age versus post-send-to-RCT freshness.
|
||||
- [x] Add optional UVC spool-boundary metadata fetch/summarization so failed blind runs can distinguish server decode/spool loss from final RCT capture loss.
|
||||
- [ ] Run blind client-to-RCT HEVC transport probe.
|
||||
- [ ] Evaluate coded-pair completeness, sync p95/median, freshness budget, smoothness, and transport lag.
|
||||
- [ ] Add deeper ingress/queue/decode instrumentation only if the blind final RCT result fails.
|
||||
|
||||
## Deferred downstream/input latency follow-up
|
||||
|
||||
Bring this section back up after upstream media is fully optimized and the blind
|
||||
HEVC client-to-server-to-RCT route is consistently healthy. The goal is to
|
||||
minimize the loop from local input to visible downstream evidence:
|
||||
|
||||
`local input -> server HID write -> RCT response -> capture-card H.264 -> client display`
|
||||
|
||||
Current downstream facts:
|
||||
|
||||
- Real downstream eye video is H.264 byte-stream pass-through on the server:
|
||||
capture-card `v4l2src` emits `video/x-h264`, the server parses it, and the
|
||||
client decodes it.
|
||||
- The normal downstream path does not decode/re-encode on the server.
|
||||
- Testsrc-only downstream still uses `x264enc` so the hardware-free contract can
|
||||
prove H.264 packet shape, Annex-B framing, IDR recovery, and timing.
|
||||
- HID/input has deterministic freshness/routing/recovery contracts, but no
|
||||
measured end-to-end HID latency probe yet.
|
||||
|
||||
Follow-up optimization plan:
|
||||
|
||||
1. Add a T0-T5 downstream/input latency probe:
|
||||
`T0` local synthetic input generated, `T1` server RPC receive, `T2` server HID
|
||||
write, `T3` visible RCT response, `T4` downstream capture-card frame observed
|
||||
by the server, `T5` client display handoff.
|
||||
2. Reuse existing probe patterns instead of inventing new infrastructure:
|
||||
client timelines from `lesavka-sync-probe`, server timing sidecars from UVC
|
||||
metadata work, final capture analysis from sync/freshness tooling, and local
|
||||
performance/input gates for deterministic checks.
|
||||
3. Tune downstream buffering after measurement: eye queue depth, appsink depth,
|
||||
client appsrc queue depth, leaky/drop policy, and first-frame/stall watchdogs.
|
||||
4. Check capture-card H.264 controls for low-latency settings: GOP/keyframe
|
||||
cadence, bitrate, buffering, and whether the card exposes any hardware
|
||||
latency knobs through V4L2 controls.
|
||||
5. Prefer the fastest reliable client H.264 decoder available on the local host;
|
||||
keep `sync=false` display sinks and verify queue depths are freshness-biased.
|
||||
6. Add a focused HID latency measurement, not just reliability tests, so mouse
|
||||
and keyboard can be optimized by observed numbers instead of feel.
|
||||
7. Only consider downstream HEVC or alternate transport after the H.264
|
||||
pass-through path is measured; H.264 pass-through is already cheap on Theia,
|
||||
so the likely wins are buffering, decoder choice, and measurement-guided
|
||||
recovery.
|
||||
|
||||
### Optional UVC spool-boundary metadata
|
||||
|
||||
The client-to-RCT probe remains non-mutating by default. If a server has already
|
||||
been configured to append UVC spool metadata, the probe can fetch that JSONL and
|
||||
write a local summary beside the final RCT capture artifacts:
|
||||
|
||||
```bash
|
||||
env LESAVKA_CLIENT_RCT_UVC_FRAME_META_LOG_REMOTE=/tmp/lesavka-uvc-frame-meta.jsonl \
|
||||
./scripts/manual/run_client_to_rct_transport_probe.sh
|
||||
```
|
||||
|
||||
This does not enable server metadata by itself. It only copies and summarizes a
|
||||
pre-existing server artifact. The useful comparison is:
|
||||
|
||||
- `client-transport-timeline.json`: what the client generated and bundled.
|
||||
- `uvc-frame-meta-summary.txt`: what reached the server's MJPEG UVC spool.
|
||||
- `report.txt` and `client-rct-transport-summary.txt`: what the RCT finally
|
||||
observed.
|
||||
|
||||
If the spool summary has 16/16 synthetic events and the RCT report does not, the
|
||||
loss is after server decode/spool. If both are incomplete, the next debugging
|
||||
target is client encoding, bundled transport, or server ingest/decode.
|
||||
|
||||
## Remote safety posture
|
||||
|
||||
- The 2026-05-08 moonshot run is recorded in
|
||||
`/tmp/lesavka-hevc-moonshot-latest/report.log`.
|
||||
- Local validation is green after the all-mode bundle audit hardening:
|
||||
`hygiene_gate.sh`, `quality_gate.sh`, and a release build all passed.
|
||||
- No reboot or destructive recovery should be attempted by automation.
|
||||
- The last hardware run survived the 720p HEVC calibration repeats, then failed
|
||||
during `1920x1080@20` signal conditioning with zero paired events before the
|
||||
host became unreachable.
|
||||
- Treat `1920x1080@20` HEVC as quarantined until Theia is back and a low-risk
|
||||
service-only status pass completes. Do not resume that mode as the first
|
||||
remote action.
|
||||
- Next remote step once Theia recovers: run status-only checks, deploy the
|
||||
latest binaries if needed, prove a known-safe mode such as `1280x720@30`, and
|
||||
only then decide whether to reattempt `1920x1080@20` with tighter watchdogs.
|
||||
|
||||
### 2026-05-08 partial HEVC calibration evidence
|
||||
|
||||
These are not final static values, but they are useful breadcrumbs from the
|
||||
interrupted `/tmp/lesavka-hevc-moonshot-*` matrix:
|
||||
|
||||
| Mode | Evidence | Current interpretation |
|
||||
| --- | --- | --- |
|
||||
| `1280x720@30` | Three usable tuned confirmations: `150731us`, `140349us`, and `135090us`; p95 stayed in the preferred band with 15 paired coded events per run. | Existing `135090us` remains acceptable; completed evidence centers closer to `136171-140349us`, so do not retune blindly without a final all-mode sanity run. |
|
||||
| `1280x720@20` | Two strict-eligible tuned confirmations around `184870us` and `180995us`; the third run (`159923us`) had median/p95 too high for static selection. | Needs one more stable completed matrix before baking; likely higher than the older `153680us` seed, but not locked. |
|
||||
| `1920x1080@20` | Signal conditioning timed out and produced `0` paired events; host became unreachable afterward. | Blocked/risky. Re-enter with status-only checks and a low-risk mode before attempting this again. |
|
||||
|
||||
### 2026-05-09 post-recovery 720p HEVC static decision
|
||||
|
||||
| Mode | Selected video delay | Evidence | Interpretation |
|
||||
| --- | ---: | --- | --- |
|
||||
| `1280x720@20` | `173852us` | 3/3 static-ready runs, p95 max `14.1ms`, median abs max `11.2ms`, freshness budget max `309.0ms`; follow-up no-tune sanity still passed sync/freshness/smoothness but had one median sample at `21.8ms`. | Bake this as the 720p20 HEVC default, but keep an eye on median spread in later end-to-end runs. |
|
||||
| `1280x720@30` | `145695us` | 3/3 static-ready runs, p95 max `27.8ms`, median abs max `18.5ms`, freshness budget max `309.4ms`; follow-up no-tune sanity passed with p95 `13.7ms`. | Bake this as the 720p30 HEVC default. |
|
||||
|
||||
## Resume commands
|
||||
|
||||
Run these from `/home/brad/Development/lesavka` after `ssh theia 'date -Is'` succeeds:
|
||||
|
||||
```bash
|
||||
./scripts/manual/run_local_hevc_bundle_audit.sh
|
||||
./scripts/manual/run_local_hevc_encoder_preflight.sh
|
||||
```
|
||||
|
||||
The local audit writes a passwordless preflight manifest proving that the client
|
||||
synthetic source is producing one HEVC+PCM bundle train with 16/16 coded video
|
||||
events, event codes 1..16, and nearby audio before any server/RCT hardware is
|
||||
involved. It also runs a deterministic jitter stress case that drops stale
|
||||
events as complete A/V bundles while preserving the analyzer's 13-pair evidence
|
||||
floor.
|
||||
|
||||
The encoder preflight is also local-only. It runs the supported
|
||||
`1280x720@20`, `1280x720@30`, `1920x1080@20`, and `1920x1080@30` modes through
|
||||
the available GStreamer HEVC encoder and records whether each mode can produce
|
||||
Annex-B HEVC faster than realtime before remote transport is involved.
|
||||
|
||||
Use the re-entry helper for status-only checks first:
|
||||
|
||||
```bash
|
||||
./scripts/manual/run_hevc_remote_reentry_check.sh
|
||||
```
|
||||
|
||||
Or run the post-reboot sequence helper when we are ready for the full unattended
|
||||
runway. It performs the local HEVC preflights, waits for Theia, syncs/builds/
|
||||
deploys/reconfigures through passwordless `lesavka-dev-install`, and starts the
|
||||
pending HEVC static calibration matrix:
|
||||
|
||||
```bash
|
||||
./scripts/manual/run_hevc_post_reboot_sequence.sh
|
||||
```
|
||||
|
||||
Then use the same helper for the full no-password loop after the status check is
|
||||
green:
|
||||
|
||||
```bash
|
||||
env LESAVKA_HEVC_REENTRY_SYNC=1 \
|
||||
LESAVKA_HEVC_REENTRY_BUILD=1 \
|
||||
LESAVKA_HEVC_REENTRY_DEPLOY=1 \
|
||||
LESAVKA_HEVC_REENTRY_RECONFIGURE=1 \
|
||||
LESAVKA_HEVC_REENTRY_WAIT_SECONDS=900 \
|
||||
LESAVKA_HEVC_REENTRY_MODE='1280x720@30' \
|
||||
./scripts/manual/run_hevc_remote_reentry_check.sh
|
||||
```
|
||||
|
||||
Equivalent manual commands are:
|
||||
|
||||
```bash
|
||||
rsync -az --exclude target --exclude .git ./ theia:/home/theia/Development/lesavka-codex/
|
||||
ssh theia 'cd /home/theia/Development/lesavka-codex && cargo build --release --bin lesavka-server --bin lesavka-uvc && sudo -n /usr/local/sbin/lesavka-dev-install deploy'
|
||||
ssh theia 'sudo -n /usr/local/sbin/lesavka-dev-install reconfigure 1280x720@30 hevc'
|
||||
```
|
||||
|
||||
Then resume the hardware matrix:
|
||||
|
||||
```bash
|
||||
env REMOTE_PULSE_CAPTURE_TOOL=gst \
|
||||
REMOTE_PULSE_VIDEO_MODE=mjpeg-cfr \
|
||||
LESAVKA_SERVER_RC_PROFILE=hevc \
|
||||
LESAVKA_SERVER_RC_MODES='1920x1080@20' \
|
||||
LESAVKA_SERVER_RC_REPEAT_COUNT=3 \
|
||||
LESAVKA_SERVER_RC_STATIC_MIN_RUNS=3 \
|
||||
LESAVKA_SERVER_RC_VERBOSE_PROBES=0 \
|
||||
LESAVKA_SERVER_RC_RECONFIGURE=1 \
|
||||
LESAVKA_SERVER_RC_RECONFIGURE_COMMAND='ssh theia sudo -n /usr/local/sbin/lesavka-dev-install reconfigure "$LESAVKA_MODE" hevc' \
|
||||
CAPTURE_SECONDS=90 \
|
||||
PROBE_TIMEOUT_SECONDS=160 \
|
||||
PROBE_DURATION_SECONDS=20 \
|
||||
PROBE_WARMUP_SECONDS=4 \
|
||||
./scripts/manual/run_server_to_rc_mode_matrix.sh
|
||||
```
|
||||
@ -73,6 +73,14 @@ from `LESAVKA_CLIENT_PKI_SSH_SOURCE` over SSH. Runtime clients require the insta
|
||||
| `LESAVKA_CLIENT_PKI_AUTO_FETCH` | client installer toggle for SSH enrollment auto-fetch; defaults to enabled |
|
||||
| `LESAVKA_CLIENT_PKI_DIR` | client installer/runtime TLS identity directory override |
|
||||
| `LESAVKA_CLIENT_PKI_SSH_SOURCE` | client installer SSH source for auto-fetching the server enrollment bundle; defaults to `theia:/etc/lesavka/lesavka-client-pki.tar.gz` |
|
||||
| `LESAVKA_CLIENT_RCT_MAX_AGE_MS` | manual client-to-RCT transport probe freshness limit; maximum client-origin-to-RCT-observed p95 age including clock uncertainty, defaults to `1000` |
|
||||
| `LESAVKA_CLIENT_RCT_MIN_PAIRS` | manual client-to-RCT transport probe evidence floor; minimum paired flash/tone events before freshness can pass, defaults to `13` |
|
||||
| `LESAVKA_CLIENT_RCT_MODE` | manual client-to-RCT transport probe expected RCT UVC mode in `WIDTHxHEIGHT@FPS` form, or `auto` to read the current gadget profile; defaults to `auto` and does not reconfigure the gadget |
|
||||
| `LESAVKA_CLIENT_RCT_REQUIRE_SMOOTHNESS` | manual client-to-RCT transport probe gate toggle; when `1`, cadence hiccups fail the transport summary instead of reporting warnings |
|
||||
| `LESAVKA_CLIENT_RCT_SYNC_SAMPLE_INTERVAL_SECONDS` | manual client-to-RCT transport probe introspection interval; controls how often the harness samples server `upstream-sync` state while the client-origin probe is live |
|
||||
| `LESAVKA_CLIENT_RCT_UVC_FRAME_META_LOG_REMOTE` | manual client-to-RCT transport probe optional artifact path; fetches a pre-enabled server `LESAVKA_UVC_FRAME_META_LOG_PATH` JSONL and summarizes UVC spool-boundary timing |
|
||||
| `LESAVKA_CLIENT_RCT_UVC_FRAME_META_LOG_REQUIRED` | manual client-to-RCT transport probe optional artifact gate; set to `1` to fail if the configured UVC frame metadata log cannot be fetched |
|
||||
| `PROBE_EVENT_WIDTH_CODES` | manual client-to-RCT transport probe identity sequence; defaults to unique codes `1..16` so final RCT observations can be joined to client-origin timeline events after startup drops |
|
||||
| `LESAVKA_CLIENT_RELAYCTL_BIN_SRC` | test/build contract variable; not runtime operator config |
|
||||
| `LESAVKA_CLIENT_VIDEO_SUPPORT_SRC` | test/build contract variable; not runtime operator config |
|
||||
| `LESAVKA_CLIPBOARD_CHORD` | input routing/clipboard override |
|
||||
@ -243,6 +251,7 @@ from `LESAVKA_CLIENT_PKI_SSH_SOURCE` over SSH. Runtime clients require the insta
|
||||
| `LESAVKA_TEST_CAP_MIC` | test/build contract variable; not runtime operator config |
|
||||
| `LESAVKA_TEST_ASOUND_CARDS` | test/build contract variable; not runtime operator config |
|
||||
| `LESAVKA_TEST_ASOUND_PCM` | test/build contract variable; not runtime operator config |
|
||||
| `LESAVKA_TEST_BOOL_ENV_NEVER_SET` | test/build contract variable; not runtime operator config |
|
||||
| `LESAVKA_TEST_DISABLE_H264_DECODERS` | test/build contract variable; not runtime operator config |
|
||||
| `LESAVKA_TEST_FORCE_PIPELINE_START_ERROR` | test/build contract variable; not runtime operator config |
|
||||
| `LESAVKA_TEST_GATE_PUSHGATEWAY_JOB` | test/build contract variable; not runtime operator config |
|
||||
@ -275,7 +284,7 @@ from `LESAVKA_CLIENT_PKI_SSH_SOURCE` over SSH. Runtime clients require the insta
|
||||
| `LESAVKA_UPSTREAM_TIMING_TRACE` | upstream capture/rebase trace override for sync debugging |
|
||||
| `LESAVKA_UPSTREAM_V2_MAX_LIVE_AGE_MS` | v2 bundled webcam freshness ceiling; bundles already older than this are dropped as one unit, defaults to `1000` |
|
||||
| `LESAVKA_UPSTREAM_V2_PLAYOUT_DELAY_MS` | v2 optional common playout slack after sync offsets; defaults to `20` and is reduced when needed to protect the live-age budget |
|
||||
| `LESAVKA_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US` | server upstream per-UVC-mode output-path map; shipped MJPEG defaults are `1280x720@20=162659,1280x720@30=135090,1920x1080@20=160045,1920x1080@30=127952` |
|
||||
| `LESAVKA_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US` | server upstream per-UVC-mode output-path map; shipped MJPEG defaults are `1280x720@20=162659,1280x720@30=135090,1920x1080@20=160045,1920x1080@30=127952`; shipped HEVC decode-to-MJPEG defaults are profile-specific under `LESAVKA_UPSTREAM_HEVC_VIDEO_PLAYOUT_MODE_OFFSETS_US` |
|
||||
| `LESAVKA_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US` | server upstream output-path override; v2 uses it as the explicit UVC handoff delay relative to the shared client capture clock, defaults to the calibrated MJPEG/UVC offset |
|
||||
| `LESAVKA_UPLINK_CAMERA_PREVIEW` | client media capture/playback override |
|
||||
| `LESAVKA_UPLINK_MIC_LEVEL` | client media capture/playback override |
|
||||
@ -299,9 +308,13 @@ from `LESAVKA_CLIENT_PKI_SSH_SOURCE` over SSH. Runtime clients require the insta
|
||||
| `LESAVKA_UVC_EXTERNAL` | server hardware/device override |
|
||||
| `LESAVKA_UVC_FALLBACK` | server hardware/device override |
|
||||
| `LESAVKA_UVC_FPS` | server hardware/device override |
|
||||
| `LESAVKA_UVC_FRAME_META` | UVC helper diagnostic override; when true, the server writes an atomic JSON sidecar for each spooled MJPEG frame so HEVC decode/spool timing can be compared with final RCT capture |
|
||||
| `LESAVKA_UVC_FRAME_META_LOG_PATH` | UVC helper diagnostic override; when set with `LESAVKA_UVC_FRAME_META=1`, append every MJPEG spool timing record as JSONL for full-probe HEVC/RCT correlation; summarize with `scripts/manual/summarize_uvc_frame_meta_log.py` |
|
||||
| `LESAVKA_UVC_FRAME_META_PATH` | UVC helper diagnostic override; explicit path for the optional MJPEG spool metadata sidecar |
|
||||
| `LESAVKA_UVC_FRAME_MAX_AGE_MS` | UVC helper freshness override; stale spooled MJPEG frames older than this are not replayed, defaults to `1000`; `0` disables TTL |
|
||||
| `LESAVKA_UVC_FRAME_SIZE` | server hardware/device override |
|
||||
| `LESAVKA_UVC_HEIGHT` | server hardware/device override |
|
||||
| `LESAVKA_UVC_HEVC_SPOOL_PULL_TIMEOUT_MS` | server HEVC decode-to-MJPEG freshness override; appsink pull wait for decoded MJPEG handoff before publishing newest frame to the UVC helper, defaults to `5` and is capped at `50` |
|
||||
| `LESAVKA_UVC_IDLE_PUMP_MS` | UVC helper freshness override; idle poll sleep while pumping host-returned buffers, defaults to `2` |
|
||||
| `LESAVKA_UVC_INTERVAL` | server hardware/device override |
|
||||
| `LESAVKA_UVC_LIMIT_PCT` | server hardware/device override |
|
||||
@ -328,6 +341,7 @@ from `LESAVKA_CLIENT_PKI_SSH_SOURCE` over SSH. Runtime clients require the insta
|
||||
| `LESAVKA_SERVER_ENV` | server/install environment file override |
|
||||
| `LESAVKA_SERVER_LOG_PATH` | server logging path override |
|
||||
| `LESAVKA_SYNC_PROBE_AUDIO_DUMP` | manual probe override |
|
||||
| `LESAVKA_SYNC_PROBE_SEND_LOG` | manual client-origin probe debug sidecar; writes JSONL bundle-send timing so RCT freshness failures can be separated into client queue age versus post-gRPC/server delay |
|
||||
| `LESAVKA_UAC_SANITY_DEV` | manual UAC sanity probe override |
|
||||
| `LESAVKA_UAC_SANITY_FREQ` | manual UAC sanity probe override |
|
||||
| `LESAVKA_UAC_SANITY_SECONDS` | manual UAC sanity probe override |
|
||||
@ -349,11 +363,56 @@ from `LESAVKA_CLIENT_PKI_SSH_SOURCE` over SSH. Runtime clients require the insta
|
||||
These entries are intentionally concise because most are manual lab or CI harness controls. The detailed behavior lives in the scripts and source that consume them; this table keeps every `LESAVKA_*` knob discoverable for operators and the hygiene gate.
|
||||
|
||||
| `LESAVKA_CAM_EMIT_UI_PROFILE` | client camera/profile negotiation override; used by launcher or lab probes to control emitted capture profile metadata |
|
||||
| `LESAVKA_CALIBRATION_PROFILE` | server calibration profile override (`mjpeg` or `hevc`); selects profile-specific server-to-RCT offset maps |
|
||||
| `LESAVKA_CAM_LOCK_TO_SERVER_PROFILE` | client camera/profile negotiation override; used by launcher or lab probes to control emitted capture profile metadata |
|
||||
| `LESAVKA_CAM_HEVC_KBIT` | client HEVC camera encoder bitrate in kbit/s; defaults to `3000` for latency-first upstream transport |
|
||||
| `LESAVKA_CLIENT_RCT_START_DELAY_SECONDS` | manual client-to-RCT transport probe start delay; lets installed server changes settle before capture starts |
|
||||
| `LESAVKA_CORE_ONESHOT` | server gadget helper mode; when `1`, performs one descriptor rebuild/reconfigure pass and exits |
|
||||
| `LESAVKA_EYE_FIRST_FRAME_TIMEOUT_MS` | runtime/install/session override; document near use before promoting to broader operator config |
|
||||
| `LESAVKA_EYE_STALL_WARN_MS` | downstream eye-video diagnostic threshold; logs when an already-started eye stream stops producing samples, defaults to `5000`; `0` disables the midstream warning |
|
||||
| `LESAVKA_HEVC_ALLOW_HARDWARE` | server HEVC decoder policy; when truthy, permits hardware decoder factories before the safe software fallback |
|
||||
| `LESAVKA_HEVC_DECODER` | server HEVC decoder override; selects an explicit GStreamer decoder element for HEVC ingress experiments |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_FINAL_MODES` | manual HEVC post-reboot sequence final sanity mode list; defaults to all four supported upstream profiles |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_OUTPUT_DIR` | manual HEVC post-reboot sequence artifact directory for local preflights, remote re-entry, and matrix logs |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_PENDING_MODES` | manual HEVC post-reboot sequence static-calibration mode list; defaults to the lower-risk 720p HEVC modes; set explicitly before retrying quarantined `1920x1080@20` |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_RECONFIGURE_COMMAND` | manual HEVC post-reboot sequence override for the passwordless server-to-RCT reconfigure command |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_REENTRY_MODE` | manual HEVC post-reboot sequence mode used for the initial deploy/reconfigure smoke, defaults to low-risk `1280x720@30` |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_REMOTE_HOST` | manual HEVC post-reboot sequence SSH host, defaults to `theia` through the re-entry helper |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_REMOTE_REPO` | manual HEVC post-reboot sequence remote workspace path, defaults to `/home/theia/Development/lesavka-codex` |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_REPEAT_COUNT` | manual HEVC post-reboot sequence static matrix repeat count, defaults to `3` |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_RUN_FINAL_SANITY` | manual HEVC post-reboot sequence toggle; when `1`, runs the final all-mode tuning-disabled HEVC sanity matrix |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_RUN_LOCAL_PREFLIGHTS` | manual HEVC post-reboot sequence toggle; when `1`, runs local bundle and encoder preflights before remote work |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_RUN_REENTRY` | manual HEVC post-reboot sequence toggle; when `1`, syncs/builds/deploys/reconfigures Theia through the re-entry helper |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_RUN_STATIC_MATRIX` | manual HEVC post-reboot sequence toggle; when `1`, runs the pending HEVC static calibration matrix |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_STATIC_MIN_RUNS` | manual HEVC post-reboot sequence static calibration minimum eligible run count, defaults to `3` |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_WAIT_INTERVAL_SECONDS` | manual HEVC post-reboot sequence retry interval while waiting for SSH after a lab host outage, defaults to `15` |
|
||||
| `LESAVKA_HEVC_POST_REBOOT_WAIT_SECONDS` | manual HEVC post-reboot sequence reachability wait budget before remote re-entry, defaults to `900` |
|
||||
| `LESAVKA_HEVC_REENTRY_BUILD` | manual HEVC re-entry helper toggle; when `1`, builds release server/UVC binaries on Theia after SSH recovers |
|
||||
| `LESAVKA_HEVC_REENTRY_CODEC` | manual HEVC re-entry helper codec argument for `lesavka-dev-install reconfigure`, defaults to `hevc` |
|
||||
| `LESAVKA_HEVC_REENTRY_DEPLOY` | manual HEVC re-entry helper toggle; when `1`, deploys already-built binaries via passwordless `lesavka-dev-install` |
|
||||
| `LESAVKA_HEVC_REENTRY_HOST` | manual HEVC re-entry helper SSH host, defaults to `theia` |
|
||||
| `LESAVKA_HEVC_REENTRY_MODE` | manual HEVC re-entry helper UVC mode argument, defaults to low-risk `1280x720@30` |
|
||||
| `LESAVKA_HEVC_REENTRY_OUTPUT_DIR` | manual HEVC re-entry helper artifact directory for status/build/deploy logs |
|
||||
| `LESAVKA_HEVC_REENTRY_RECONFIGURE` | manual HEVC re-entry helper toggle; when `1`, runs passwordless HEVC mode reconfiguration |
|
||||
| `LESAVKA_HEVC_REENTRY_REMOTE_REPO` | manual HEVC re-entry helper remote workspace path, defaults to `/home/theia/Development/lesavka-codex` |
|
||||
| `LESAVKA_HEVC_REENTRY_SYNC` | manual HEVC re-entry helper toggle; when `1`, rsyncs the local workspace to Theia before optional build/deploy |
|
||||
| `LESAVKA_HEVC_REENTRY_WAIT_INTERVAL_SECONDS` | manual HEVC re-entry helper retry interval while waiting for SSH after a lab host outage, defaults to `15` |
|
||||
| `LESAVKA_HEVC_REENTRY_WAIT_SECONDS` | manual HEVC re-entry helper reachability wait budget; when greater than `0`, polls SSH before status/build/deploy/reconfigure instead of failing immediately |
|
||||
| `LESAVKA_INSTALL_CAM_CODEC` | server installer camera ingress codec default; persists `LESAVKA_CAM_CODEC` for installed services, defaults to `hevc` |
|
||||
| `LESAVKA_INSTALL_UVC_FRAME_META` | server installer diagnostic toggle; persists `LESAVKA_UVC_FRAME_META`, defaults to `0` so spool metadata is opt-in |
|
||||
| `LESAVKA_INSTALL_UVC_FRAME_META_LOG_PATH` | server installer diagnostic path; persists `LESAVKA_UVC_FRAME_META_LOG_PATH`, defaults to `/tmp/lesavka-uvc-frame-meta.jsonl` for optional client-to-RCT spool-boundary fetches |
|
||||
| `LESAVKA_INSTALL_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US` | installer default override; seeds server calibration env files with known lab-measured output-path offsets |
|
||||
| `LESAVKA_INSTALL_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US` | installer default override; seeds server calibration env files with known lab-measured output-path offsets |
|
||||
| `LESAVKA_LEGACY_SPLIT_UPLINK` | runtime/install/session override; document near use before promoting to broader operator config |
|
||||
| `LESAVKA_LOCAL_HEVC_BUNDLE_AUDIT_JSON` | local HEVC bundle audit output path; receives the generated JSON manifest for outgoing synthetic HEVC+audio bundles |
|
||||
| `LESAVKA_LOCAL_HEVC_BUNDLE_AUDIT_OUTPUT_DIR` | local HEVC bundle audit artifact directory, defaults to a timestamped `/tmp/lesavka-local-hevc-bundle-audit-*` path |
|
||||
| `LESAVKA_LOCAL_HEVC_ENCODER` | local HEVC encoder preflight override; defaults to `auto` and otherwise names a GStreamer encoder element such as `x265enc` |
|
||||
| `LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_JSON` | local HEVC encoder preflight summary path; receives throughput and Annex-B validation for each tested mode |
|
||||
| `LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_KBIT` | local HEVC encoder preflight bitrate in kbit/s, defaults to `3000` |
|
||||
| `LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_MIN_REALTIME_FACTOR` | local HEVC encoder preflight pass threshold; encoded media seconds divided by wall time must meet this value, defaults to `1.05` |
|
||||
| `LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_MODES` | local HEVC encoder preflight mode list, defaults to the four supported upstream profiles |
|
||||
| `LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_OUTPUT_DIR` | local HEVC encoder preflight artifact directory, defaults to a timestamped `/tmp/lesavka-local-hevc-encoder-preflight-*` path |
|
||||
| `LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_SECONDS` | local HEVC encoder preflight media duration per mode, defaults to `5` |
|
||||
| `LESAVKA_MIC_PACKET_TARGET_US` | client microphone capture override; tunes Pulse/PipeWire packet sizing, buffering, or selected source behavior |
|
||||
| `LESAVKA_MIC_PULSE_BUFFER_TIME_US` | client microphone capture override; tunes Pulse/PipeWire packet sizing, buffering, or selected source behavior |
|
||||
| `LESAVKA_MIC_PULSE_LATENCY_TIME_US` | client microphone capture override; tunes Pulse/PipeWire packet sizing, buffering, or selected source behavior |
|
||||
@ -377,9 +436,13 @@ These entries are intentionally concise because most are manual lab or CI harnes
|
||||
| `LESAVKA_SERVER_RC_FRESHNESS_MAX_CLOCK_UNCERTAINTY_MS` | manual server-to-RCT mode-matrix probe override; used to tune, confirm, or summarize server-generated UVC/UAC output against the RC capture target |
|
||||
| `LESAVKA_SERVER_RC_FRESHNESS_MAX_DRIFT_MS` | manual server-to-RCT mode-matrix probe override; used to tune, confirm, or summarize server-generated UVC/UAC output against the RC capture target |
|
||||
| `LESAVKA_SERVER_RC_FRESHNESS_MIN_PAIRS` | manual server-to-RCT mode-matrix probe override; used to tune, confirm, or summarize server-generated UVC/UAC output against the RC capture target |
|
||||
| `LESAVKA_SERVER_RC_HEVC_MODE_DELAYS_US` | manual server-to-RCT mode-matrix HEVC seed delay map; defaults to `1280x720@20=173852,1280x720@30=145695,1920x1080@20=160045,1920x1080@30=127952` |
|
||||
| `LESAVKA_SERVER_RC_MAX_AUDIO_HICCUPS` | manual server-to-RCT mode-matrix probe override; used to tune, confirm, or summarize server-generated UVC/UAC output against the RC capture target |
|
||||
| `LESAVKA_SERVER_RC_MAX_AUDIO_LOW_RMS_WINDOWS` | manual server-to-RCT mode-matrix probe override; used to tune, confirm, or summarize server-generated UVC/UAC output against the RC capture target |
|
||||
| `LESAVKA_SERVER_RC_MAX_AUDIO_P95_JITTER_MS` | manual server-to-RCT mode-matrix probe override; used to tune, confirm, or summarize server-generated UVC/UAC output against the RC capture target |
|
||||
| `LESAVKA_SERVER_RC_MJPEG_MODE_DELAYS_US` | manual server-to-RCT mode-matrix MJPEG seed delay map; defaults to the baked MJPEG profile values |
|
||||
| `LESAVKA_SERVER_RC_NORMALIZED_PROFILE` | internal manual mode-matrix normalized profile value; derived from `LESAVKA_SERVER_RC_PROFILE` |
|
||||
| `LESAVKA_SERVER_RC_PROFILE` | manual server-to-RCT mode-matrix ingress profile (`mjpeg` or `hevc`); selects profile-specific delays and capture budgets |
|
||||
| `LESAVKA_SERVER_RC_MAX_VIDEO_DUPLICATE_FRAMES` | manual server-to-RCT mode-matrix probe override; used to tune, confirm, or summarize server-generated UVC/UAC output against the RC capture target |
|
||||
| `LESAVKA_SERVER_RC_MAX_VIDEO_HICCUPS` | manual server-to-RCT mode-matrix probe override; used to tune, confirm, or summarize server-generated UVC/UAC output against the RC capture target |
|
||||
| `LESAVKA_SERVER_RC_MAX_VIDEO_MISSING_FRAMES` | manual server-to-RCT mode-matrix probe override; used to tune, confirm, or summarize server-generated UVC/UAC output against the RC capture target |
|
||||
@ -472,6 +535,15 @@ These entries are intentionally concise because most are manual lab or CI harnes
|
||||
| `LESAVKA_UAC_APP_MAX_BUFFERS` | server UAC appsrc buffering override for lab tuning of microphone gadget output latency and stability |
|
||||
| `LESAVKA_UAC_APP_MAX_BYTES` | server UAC appsrc buffering override for lab tuning of microphone gadget output latency and stability |
|
||||
| `LESAVKA_UAC_APP_MAX_TIME_NS` | server UAC appsrc buffering override for lab tuning of microphone gadget output latency and stability |
|
||||
| `LESAVKA_UPLINK_CAMERA_CODEC` | server camera ingress codec hint; records whether upstream camera media arrives as `mjpeg`, `h264`, or `hevc` before UVC output |
|
||||
| `LESAVKA_UPSTREAM_HEVC_AUDIO_PLAYOUT_MODE_OFFSETS_US` | server HEVC-ingress audio playout delay map by `WIDTHxHEIGHT@FPS`; overrides generic upstream audio offsets for HEVC |
|
||||
| `LESAVKA_UPSTREAM_HEVC_AUDIO_PLAYOUT_OFFSET_US` | server HEVC-ingress scalar audio playout delay in microseconds; used when no mode-specific value is present |
|
||||
| `LESAVKA_UPSTREAM_HEVC_VIDEO_PLAYOUT_MODE_OFFSETS_US` | server HEVC-ingress video playout delay map by `WIDTHxHEIGHT@FPS`; includes decode/re-emit path calibration |
|
||||
| `LESAVKA_UPSTREAM_HEVC_VIDEO_PLAYOUT_OFFSET_US` | server HEVC-ingress scalar video playout delay in microseconds; used when no mode-specific value is present |
|
||||
| `LESAVKA_UPSTREAM_MJPEG_AUDIO_PLAYOUT_MODE_OFFSETS_US` | server MJPEG-ingress audio playout delay map by `WIDTHxHEIGHT@FPS`; preserves the calibrated MJPEG transport profile |
|
||||
| `LESAVKA_UPSTREAM_MJPEG_AUDIO_PLAYOUT_OFFSET_US` | server MJPEG-ingress scalar audio playout delay in microseconds; used when no mode-specific value is present |
|
||||
| `LESAVKA_UPSTREAM_MJPEG_VIDEO_PLAYOUT_MODE_OFFSETS_US` | server MJPEG-ingress video playout delay map by `WIDTHxHEIGHT@FPS`; preserves the calibrated MJPEG transport profile |
|
||||
| `LESAVKA_UPSTREAM_MJPEG_VIDEO_PLAYOUT_OFFSET_US` | server MJPEG-ingress scalar video playout delay in microseconds; used when no mode-specific value is present |
|
||||
| `LESAVKA_UPSTREAM_BLIND_HEAL` | server upstream media blind-healer tuning knob; adjusts cautious runtime offset correction when telemetry indicates persistent skew |
|
||||
| `LESAVKA_UPSTREAM_BLIND_HEAL_COOLDOWN_MS` | server upstream media blind-healer tuning knob; adjusts cautious runtime offset correction when telemetry indicates persistent skew |
|
||||
| `LESAVKA_UPSTREAM_BLIND_HEAL_DEADBAND_MS` | server upstream media blind-healer tuning knob; adjusts cautious runtime offset correction when telemetry indicates persistent skew |
|
||||
@ -487,4 +559,5 @@ These entries are intentionally concise because most are manual lab or CI harnes
|
||||
| `LESAVKA_UPSTREAM_BLIND_HEAL_TARGET` | server upstream media blind-healer tuning knob; adjusts cautious runtime offset correction when telemetry indicates persistent skew |
|
||||
| `LESAVKA_UPSTREAM_SOURCE_LEAD_CAP_MS` | server upstream media timing override; bounds live source lead or playout behavior while tuning client-to-server transport |
|
||||
| `LESAVKA_UVC_CONFIGFS_BASE` | server UVC gadget mode/configfs override used by runtime reconfiguration and hardware-in-the-loop probes |
|
||||
| `LESAVKA_UVC_HEVC_JPEG_QUALITY` | server HEVC-to-MJPEG UVC bridge JPEG quality; defaults to `90` to keep RCT output compatible while limiting encode cost |
|
||||
| `LESAVKA_UVC_MODE` | server UVC gadget mode/configfs override used by runtime reconfiguration and hardware-in-the-loop probes |
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -35,6 +35,7 @@ MEDIA_TESTS=(
|
||||
--test client_microphone_source_contract
|
||||
--test client_uplink_freshness_contract
|
||||
--test client_uplink_performance_contract
|
||||
--test client_rct_transport_probe_contract
|
||||
--test client_output_video_include_contract
|
||||
--test handshake_camera_contract
|
||||
--test server_camera_contract
|
||||
|
||||
@ -12,6 +12,14 @@ PUSHGATEWAY_URL=${QUALITY_GATE_PUSHGATEWAY_URL:-}
|
||||
|
||||
mkdir -p "${REPORT_DIR}"
|
||||
|
||||
clean_stray_profraw() {
|
||||
find "${ROOT_DIR}" -path "${ROOT_DIR}/target" -prune -o -name '*.profraw' -type f -print0 \
|
||||
| xargs -0r rm -f
|
||||
}
|
||||
|
||||
clean_stray_profraw
|
||||
trap clean_stray_profraw EXIT
|
||||
|
||||
branch=${BRANCH_NAME:-${GIT_BRANCH:-}}
|
||||
if [[ -z "${branch}" ]]; then
|
||||
branch=$(git -C "${ROOT_DIR}" rev-parse --abbrev-ref HEAD 2>/dev/null || echo unknown)
|
||||
|
||||
@ -6,55 +6,67 @@
|
||||
},
|
||||
"client/src/app/session_lifecycle.rs": {
|
||||
"line_percent": 97.56,
|
||||
"loc": 348
|
||||
"loc": 346
|
||||
},
|
||||
"client/src/app/uplink_media/uplink_queue_metadata.rs": {
|
||||
"line_percent": 95.0,
|
||||
"loc": 212
|
||||
},
|
||||
"client/src/app_support.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 132
|
||||
"loc": 138
|
||||
},
|
||||
"client/src/bin/lesavka-relayctl.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 304
|
||||
"loc": 423
|
||||
},
|
||||
"client/src/bin/lesavka-sync-analyze.rs": {
|
||||
"line_percent": 95.0,
|
||||
"loc": 125
|
||||
"line_percent": 98.42,
|
||||
"loc": 425
|
||||
},
|
||||
"client/src/bin/lesavka-sync-probe.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 19
|
||||
},
|
||||
"client/src/bin/lesavka_relayctl/upstream_sync_formatting.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 189
|
||||
},
|
||||
"client/src/bin/lesavka_sync_analyze/human_report.rs": {
|
||||
"line_percent": 96.77,
|
||||
"loc": 93
|
||||
},
|
||||
"client/src/handshake.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 381
|
||||
"loc": 386
|
||||
},
|
||||
"client/src/input/camera.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 63
|
||||
"loc": 206
|
||||
},
|
||||
"client/src/input/camera/bus_and_encoder.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 69
|
||||
},
|
||||
"client/src/input/camera/capture_pipeline.rs": {
|
||||
"line_percent": 97.66,
|
||||
"loc": 295
|
||||
"line_percent": 95.81,
|
||||
"loc": 404
|
||||
},
|
||||
"client/src/input/camera/device_selection.rs": {
|
||||
"line_percent": 97.73,
|
||||
"loc": 102
|
||||
"line_percent": 98.04,
|
||||
"loc": 110
|
||||
},
|
||||
"client/src/input/camera/encoder_selection.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 85
|
||||
"loc": 125
|
||||
},
|
||||
"client/src/input/camera/preview_tap.rs": {
|
||||
"line_percent": 97.01,
|
||||
"loc": 100
|
||||
"loc": 119
|
||||
},
|
||||
"client/src/input/camera/source_description.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 76
|
||||
"loc": 84
|
||||
},
|
||||
"client/src/input/inputs/construction_and_scan.rs": {
|
||||
"line_percent": 98.85,
|
||||
@ -93,8 +105,12 @@
|
||||
"loc": 196
|
||||
},
|
||||
"client/src/input/microphone.rs": {
|
||||
"line_percent": 99.63,
|
||||
"loc": 479
|
||||
"line_percent": 96.81,
|
||||
"loc": 338
|
||||
},
|
||||
"client/src/input/microphone/capture_runtime.rs": {
|
||||
"line_percent": 98.79,
|
||||
"loc": 297
|
||||
},
|
||||
"client/src/input/mouse.rs": {
|
||||
"line_percent": 98.85,
|
||||
@ -105,32 +121,32 @@
|
||||
"loc": 173
|
||||
},
|
||||
"client/src/launcher/devices.rs": {
|
||||
"line_percent": 96.74,
|
||||
"loc": 385
|
||||
"line_percent": 96.76,
|
||||
"loc": 387
|
||||
},
|
||||
"client/src/launcher/diagnostics/diagnostics_models.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 185
|
||||
"loc": 199
|
||||
},
|
||||
"client/src/launcher/diagnostics/recommendations.rs": {
|
||||
"line_percent": 97.62,
|
||||
"loc": 277
|
||||
},
|
||||
"client/src/launcher/diagnostics/snapshot_report.rs": {
|
||||
"line_percent": 98.31,
|
||||
"loc": 286
|
||||
"line_percent": 98.42,
|
||||
"loc": 303
|
||||
},
|
||||
"client/src/launcher/diagnostics/snapshot_report_text.rs": {
|
||||
"line_percent": 96.69,
|
||||
"loc": 292
|
||||
"line_percent": 96.53,
|
||||
"loc": 329
|
||||
},
|
||||
"client/src/launcher/mod.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 246
|
||||
"loc": 240
|
||||
},
|
||||
"client/src/launcher/state/launcher_state_impl.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 456
|
||||
"line_percent": 99.19,
|
||||
"loc": 459
|
||||
},
|
||||
"client/src/launcher/state/launcher_status_line.rs": {
|
||||
"line_percent": 96.3,
|
||||
@ -141,12 +157,16 @@
|
||||
"loc": 244
|
||||
},
|
||||
"client/src/launcher/state/selection_models.rs": {
|
||||
"line_percent": 99.53,
|
||||
"loc": 456
|
||||
"line_percent": 99.26,
|
||||
"loc": 325
|
||||
},
|
||||
"client/src/launcher/state/selection_models/sync_and_state_status.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 329
|
||||
},
|
||||
"client/src/launcher/ui.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 194
|
||||
"loc": 201
|
||||
},
|
||||
"client/src/launcher/ui/session_preview_coverage.rs": {
|
||||
"line_percent": 100.0,
|
||||
@ -157,20 +177,20 @@
|
||||
"loc": 78
|
||||
},
|
||||
"client/src/live_capture_clock.rs": {
|
||||
"line_percent": 99.08,
|
||||
"loc": 429
|
||||
"line_percent": 97.01,
|
||||
"loc": 286
|
||||
},
|
||||
"client/src/live_media_control.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 203
|
||||
"line_percent": 99.07,
|
||||
"loc": 344
|
||||
},
|
||||
"client/src/main.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 101
|
||||
},
|
||||
"client/src/output/audio.rs": {
|
||||
"line_percent": 98.07,
|
||||
"loc": 392
|
||||
"line_percent": 95.05,
|
||||
"loc": 415
|
||||
},
|
||||
"client/src/output/display.rs": {
|
||||
"line_percent": 97.44,
|
||||
@ -197,64 +217,96 @@
|
||||
"loc": 296
|
||||
},
|
||||
"client/src/sync_probe/analyze.rs": {
|
||||
"line_percent": 97.92,
|
||||
"loc": 87
|
||||
"line_percent": 98.22,
|
||||
"loc": 390
|
||||
},
|
||||
"client/src/sync_probe/analyze/media_extract.rs": {
|
||||
"line_percent": 97.81,
|
||||
"loc": 300
|
||||
"line_percent": 97.18,
|
||||
"loc": 206
|
||||
},
|
||||
"client/src/sync_probe/analyze/media_extract/roi.rs": {
|
||||
"line_percent": 98.74,
|
||||
"loc": 308
|
||||
},
|
||||
"client/src/sync_probe/analyze/onset_detection.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 248
|
||||
"loc": 75
|
||||
},
|
||||
"client/src/sync_probe/analyze/onset_detection/correlation.rs": {
|
||||
"line_percent": 98.04,
|
||||
"loc": 426
|
||||
"client/src/sync_probe/analyze/onset_detection/audio_tone_detection.rs": {
|
||||
"line_percent": 95.6,
|
||||
"loc": 336
|
||||
},
|
||||
"client/src/sync_probe/analyze/onset_detection/correlation/activity_pairing.rs": {
|
||||
"line_percent": 95.06,
|
||||
"loc": 387
|
||||
},
|
||||
"client/src/sync_probe/analyze/onset_detection/correlation/coded_pair_candidates.rs": {
|
||||
"line_percent": 98.64,
|
||||
"loc": 183
|
||||
},
|
||||
"client/src/sync_probe/analyze/onset_detection/correlation/coded_pair_matching.rs": {
|
||||
"line_percent": 96.59,
|
||||
"loc": 441
|
||||
},
|
||||
"client/src/sync_probe/analyze/onset_detection/correlation/report_building.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 70
|
||||
},
|
||||
"client/src/sync_probe/analyze/onset_detection/correlation_collapse.rs": {
|
||||
"line_percent": 98.73,
|
||||
"loc": 311
|
||||
},
|
||||
"client/src/sync_probe/analyze/onset_detection/video_segment_detection.rs": {
|
||||
"line_percent": 99.59,
|
||||
"loc": 283
|
||||
},
|
||||
"client/src/sync_probe/analyze/report.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 217
|
||||
"line_percent": 99.47,
|
||||
"loc": 346
|
||||
},
|
||||
"client/src/sync_probe/analyze/test_support.rs": {
|
||||
"line_percent": 98.67,
|
||||
"loc": 100
|
||||
"line_percent": 98.88,
|
||||
"loc": 126
|
||||
},
|
||||
"client/src/sync_probe/capture.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 153
|
||||
"loc": 283
|
||||
},
|
||||
"client/src/sync_probe/capture/coverage_stub.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 34
|
||||
"loc": 40
|
||||
},
|
||||
"client/src/sync_probe/config.rs": {
|
||||
"line_percent": 98.03,
|
||||
"loc": 214
|
||||
"line_percent": 97.62,
|
||||
"loc": 288
|
||||
},
|
||||
"client/src/sync_probe/runner.rs": {
|
||||
"line_percent": 95.65,
|
||||
"loc": 221
|
||||
"line_percent": 95.83,
|
||||
"loc": 166
|
||||
},
|
||||
"client/src/sync_probe/schedule.rs": {
|
||||
"line_percent": 98.74,
|
||||
"loc": 234
|
||||
"line_percent": 98.66,
|
||||
"loc": 359
|
||||
},
|
||||
"client/src/sync_probe/signature.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 188
|
||||
},
|
||||
"client/src/sync_probe/timeline.rs": {
|
||||
"line_percent": 99.37,
|
||||
"loc": 274
|
||||
},
|
||||
"client/src/uplink_fresh_queue.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 288
|
||||
"loc": 346
|
||||
},
|
||||
"client/src/uplink_latency_harness.rs": {
|
||||
"line_percent": 98.73,
|
||||
"loc": 284
|
||||
},
|
||||
"client/src/uplink_telemetry.rs": {
|
||||
"line_percent": 96.89,
|
||||
"loc": 336
|
||||
"line_percent": 100.0,
|
||||
"loc": 368
|
||||
},
|
||||
"client/src/video_support.rs": {
|
||||
"line_percent": 97.3,
|
||||
@ -294,11 +346,11 @@
|
||||
},
|
||||
"server/src/audio/ear_capture.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 460
|
||||
"loc": 453
|
||||
},
|
||||
"server/src/audio/voice_input.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 461
|
||||
"line_percent": 98.75,
|
||||
"loc": 413
|
||||
},
|
||||
"server/src/bin/lesavka_uvc/control_payloads.rs": {
|
||||
"line_percent": 100.0,
|
||||
@ -309,28 +361,44 @@
|
||||
"loc": 162
|
||||
},
|
||||
"server/src/bin/lesavka_uvc/coverage_startup.rs": {
|
||||
"line_percent": 98.99,
|
||||
"loc": 129
|
||||
"line_percent": 98.48,
|
||||
"loc": 203
|
||||
},
|
||||
"server/src/bin/lesavka_uvc/payload_limits.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 74
|
||||
},
|
||||
"server/src/blind_healer.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 473
|
||||
},
|
||||
"server/src/calibration.rs": {
|
||||
"line_percent": 99.72,
|
||||
"loc": 467
|
||||
"line_percent": 96.74,
|
||||
"loc": 460
|
||||
},
|
||||
"server/src/calibration/mode_env.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 96
|
||||
},
|
||||
"server/src/calibration/profile_offsets.rs": {
|
||||
"line_percent": 97.01,
|
||||
"loc": 151
|
||||
},
|
||||
"server/src/camera.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 132
|
||||
"loc": 134
|
||||
},
|
||||
"server/src/camera/selection.rs": {
|
||||
"line_percent": 97.83,
|
||||
"loc": 383
|
||||
"line_percent": 97.79,
|
||||
"loc": 472
|
||||
},
|
||||
"server/src/camera/selection/config_env.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 32
|
||||
},
|
||||
"server/src/camera_runtime.rs": {
|
||||
"line_percent": 95.52,
|
||||
"loc": 211
|
||||
"line_percent": 100.0,
|
||||
"loc": 230
|
||||
},
|
||||
"server/src/capture_power.rs": {
|
||||
"line_percent": 100.0,
|
||||
@ -358,11 +426,11 @@
|
||||
},
|
||||
"server/src/handshake.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 45
|
||||
"loc": 47
|
||||
},
|
||||
"server/src/main.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 100
|
||||
"loc": 109
|
||||
},
|
||||
"server/src/main/entrypoint.rs": {
|
||||
"line_percent": 100.0,
|
||||
@ -378,23 +446,23 @@
|
||||
},
|
||||
"server/src/main/handler_startup.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 140
|
||||
"loc": 145
|
||||
},
|
||||
"server/src/main/relay_service.rs": {
|
||||
"server/src/main/relay_service_coverage/freshness_helpers.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 485
|
||||
"loc": 190
|
||||
},
|
||||
"server/src/main/relay_service_coverage.rs": {
|
||||
"line_percent": 96.53,
|
||||
"loc": 301
|
||||
"server/src/main/relay_service_coverage/relay_trait_impl.rs": {
|
||||
"line_percent": 96.15,
|
||||
"loc": 372
|
||||
},
|
||||
"server/src/main/relay_stream_lifecycle.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 130
|
||||
"line_percent": 96.97,
|
||||
"loc": 214
|
||||
},
|
||||
"server/src/main/rpc_helpers.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 118
|
||||
"line_percent": 97.14,
|
||||
"loc": 242
|
||||
},
|
||||
"server/src/main/usb_recovery_helpers.rs": {
|
||||
"line_percent": 100.0,
|
||||
@ -404,6 +472,18 @@
|
||||
"line_percent": 100.0,
|
||||
"loc": 72
|
||||
},
|
||||
"server/src/output_delay_probe/media_encoding.rs": {
|
||||
"line_percent": 95.51,
|
||||
"loc": 452
|
||||
},
|
||||
"server/src/output_delay_probe/probe_runtime.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 148
|
||||
},
|
||||
"server/src/output_delay_probe/timeline_config.rs": {
|
||||
"line_percent": 98.85,
|
||||
"loc": 203
|
||||
},
|
||||
"server/src/paste.rs": {
|
||||
"line_percent": 98.29,
|
||||
"loc": 260
|
||||
@ -425,16 +505,20 @@
|
||||
"loc": 211
|
||||
},
|
||||
"server/src/upstream_media_runtime.rs": {
|
||||
"line_percent": 97.36,
|
||||
"loc": 392
|
||||
"line_percent": 96.04,
|
||||
"loc": 443
|
||||
},
|
||||
"server/src/upstream_media_runtime/config.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 88
|
||||
"server/src/upstream_media_runtime/planner_snapshot_methods.rs": {
|
||||
"line_percent": 97.75,
|
||||
"loc": 114
|
||||
},
|
||||
"server/src/upstream_media_runtime/lease_lifecycle.rs": {
|
||||
"server/src/upstream_media_runtime/playout_planning_methods.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 142
|
||||
"loc": 150
|
||||
},
|
||||
"server/src/upstream_media_runtime/stream_lifecycle_methods.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 140
|
||||
},
|
||||
"server/src/uvc_runtime.rs": {
|
||||
"line_percent": 97.53,
|
||||
@ -442,11 +526,11 @@
|
||||
},
|
||||
"server/src/video/eye_capture.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 415
|
||||
"loc": 476
|
||||
},
|
||||
"server/src/video/stream_core.rs": {
|
||||
"line_percent": 98.73,
|
||||
"loc": 248
|
||||
"line_percent": 98.91,
|
||||
"loc": 286
|
||||
},
|
||||
"server/src/video_sinks/camera_relay.rs": {
|
||||
"line_percent": 100.0,
|
||||
@ -454,15 +538,19 @@
|
||||
},
|
||||
"server/src/video_sinks/hdmi_sink.rs": {
|
||||
"line_percent": 100.0,
|
||||
"loc": 428
|
||||
"loc": 466
|
||||
},
|
||||
"server/src/video_sinks/mjpeg_spool.rs": {
|
||||
"line_percent": 98.33,
|
||||
"loc": 438
|
||||
},
|
||||
"server/src/video_sinks/webcam_sink.rs": {
|
||||
"line_percent": 97.3,
|
||||
"loc": 374
|
||||
"line_percent": 100.0,
|
||||
"loc": 479
|
||||
},
|
||||
"server/src/video_support.rs": {
|
||||
"line_percent": 97.74,
|
||||
"loc": 263
|
||||
"line_percent": 97.47,
|
||||
"loc": 301
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -26,6 +26,15 @@ load_uvc_env_defaults() {
|
||||
|
||||
load_uvc_env_defaults
|
||||
|
||||
if [[ ${LESAVKA_CORE_ONESHOT:-0} == 1 ]]; then
|
||||
# Dev calibration needs a complete descriptor refresh without rebooting the Pi.
|
||||
export LESAVKA_ALLOW_GADGET_RESET=${LESAVKA_ALLOW_GADGET_RESET:-1}
|
||||
export LESAVKA_FORCE_GADGET_REBUILD=${LESAVKA_FORCE_GADGET_REBUILD:-1}
|
||||
export LESAVKA_ATTACH_WRITE_UDC=${LESAVKA_ATTACH_WRITE_UDC:-1}
|
||||
export LESAVKA_DETACH_CLEAR_UDC=${LESAVKA_DETACH_CLEAR_UDC:-1}
|
||||
export LESAVKA_UVC_FALLBACK=${LESAVKA_UVC_FALLBACK:-0}
|
||||
fi
|
||||
|
||||
find_udc() {
|
||||
ls /sys/class/udc 2>/dev/null | head -n1 || true
|
||||
}
|
||||
@ -320,8 +329,8 @@ if [[ -n ${LESAVKA_RELOAD_UVCVIDEO:-} ]]; then
|
||||
fi
|
||||
modprobe uvcvideo || { echo "uvcvideo not in kernel; abort" >&2; exit 1; }
|
||||
|
||||
udevadm control --reload
|
||||
udevadm trigger --subsystem-match=video4linux
|
||||
udevadm control --reload || log "⚠️ udevadm control --reload failed or timed out"
|
||||
udevadm trigger --subsystem-match=video4linux || log "⚠️ udevadm video4linux trigger failed"
|
||||
udevadm settle --timeout=5 || log "⚠️ udevadm settle timed out"
|
||||
|
||||
#──────────────────────────────────────────────────
|
||||
|
||||
@ -9,6 +9,7 @@ SCRIPT_REPO_ROOT=$(cd -- "$SCRIPT_DIR/../.." && pwd)
|
||||
DEFAULT_REPO_URL=ssh://git@scm.bstein.dev:2242/bstein/lesavka.git
|
||||
REPO_URL=${LESAVKA_REPO_URL:-}
|
||||
SRC=/var/src/lesavka
|
||||
INSTALL_SOURCE=${LESAVKA_INSTALL_SOURCE:-auto}
|
||||
export TMPDIR=${TMPDIR:-/var/tmp}
|
||||
USER_HOME=$(getent passwd "$ORIG_USER" | cut -d: -f6)
|
||||
CLIENT_PKI_DIR=${LESAVKA_CLIENT_PKI_DIR:-$USER_HOME/.config/lesavka/pki}
|
||||
@ -30,6 +31,60 @@ manifest_package_version() {
|
||||
' "$manifest"
|
||||
}
|
||||
|
||||
source_revision() {
|
||||
local repo=$1
|
||||
local sha=""
|
||||
sha=$(run_as_user git -C "$repo" rev-parse --short HEAD 2>/dev/null || true)
|
||||
if [[ -n $sha ]] && ! run_as_user git -C "$repo" diff --quiet --ignore-submodules -- 2>/dev/null; then
|
||||
sha="${sha}+dirty"
|
||||
fi
|
||||
printf '%s\n' "$sha"
|
||||
}
|
||||
|
||||
resolve_source_checkout() {
|
||||
case "$INSTALL_SOURCE" in
|
||||
auto)
|
||||
if [[ -d $SCRIPT_REPO_ROOT/.git ]]; then
|
||||
SRC=$SCRIPT_REPO_ROOT
|
||||
log "3. Using local source checkout at $SRC"
|
||||
echo " ↪ set LESAVKA_INSTALL_SOURCE=ref to install from ${REF} via Git"
|
||||
return 0
|
||||
fi
|
||||
;;
|
||||
local)
|
||||
if [[ ! -d $SCRIPT_REPO_ROOT/.git ]]; then
|
||||
echo "❌ LESAVKA_INSTALL_SOURCE=local requested, but $SCRIPT_REPO_ROOT is not a Git checkout." >&2
|
||||
exit 1
|
||||
fi
|
||||
SRC=$SCRIPT_REPO_ROOT
|
||||
log "3. Using local source checkout at $SRC"
|
||||
return 0
|
||||
;;
|
||||
ref|git)
|
||||
;;
|
||||
*)
|
||||
echo "❌ unsupported LESAVKA_INSTALL_SOURCE=$INSTALL_SOURCE (expected auto, local, or ref)" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
log "3. Syncing source checkout for ref ${REF}"
|
||||
if [[ ! -d /var/src ]]; then
|
||||
sudo mkdir -p /var/src
|
||||
fi
|
||||
sudo chown "$ORIG_USER":"$ORIG_USER" /var/src
|
||||
if [[ -d $SRC/.git ]]; then
|
||||
run_as_user git -C "$SRC" fetch --all --tags --prune
|
||||
else
|
||||
run_as_user git clone "$REPO_URL" "$SRC"
|
||||
fi
|
||||
if run_as_user git -C "$SRC" rev-parse --verify --quiet "origin/$REF" >/dev/null; then
|
||||
run_as_user git -C "$SRC" checkout -B "$REF" "origin/$REF"
|
||||
else
|
||||
run_as_user git -C "$SRC" checkout --force "$REF"
|
||||
fi
|
||||
}
|
||||
|
||||
installed_kernel_module_trees() {
|
||||
local roots=(/usr/lib/modules /lib/modules)
|
||||
local seen=()
|
||||
@ -240,22 +295,9 @@ log "2. Ensuring Rust toolchain"
|
||||
sudo rustup default stable
|
||||
run_as_user rustup default stable
|
||||
|
||||
# 3. clone / update into a canonical workspace checkout
|
||||
log "3. Syncing source checkout for ref ${REF}"
|
||||
if [[ ! -d /var/src ]]; then
|
||||
sudo mkdir -p /var/src
|
||||
fi
|
||||
sudo chown "$ORIG_USER":"$ORIG_USER" /var/src
|
||||
if [[ -d $SRC/.git ]]; then
|
||||
run_as_user git -C "$SRC" fetch --all --tags --prune
|
||||
else
|
||||
run_as_user git clone "$REPO_URL" "$SRC"
|
||||
fi
|
||||
if run_as_user git -C "$SRC" rev-parse --verify --quiet "origin/$REF" >/dev/null; then
|
||||
run_as_user git -C "$SRC" checkout -B "$REF" "origin/$REF"
|
||||
else
|
||||
run_as_user git -C "$SRC" checkout --force "$REF"
|
||||
fi
|
||||
# 3. resolve the build source. Local checkouts are preferred so development
|
||||
# installs do not silently rebuild an older /var/src clone.
|
||||
resolve_source_checkout
|
||||
|
||||
# 4. build
|
||||
log "4. Building client release binary"
|
||||
@ -291,7 +333,7 @@ sudo systemctl daemon-reload
|
||||
echo
|
||||
echo "✅ lesavka-client install complete"
|
||||
INSTALLED_VERSION=$(manifest_package_version "$SRC/client/Cargo.toml" 2>/dev/null || true)
|
||||
INSTALLED_SHA=$(run_as_user git -C "$SRC" rev-parse --short HEAD 2>/dev/null || true)
|
||||
INSTALLED_SHA=$(source_revision "$SRC")
|
||||
if [[ -n ${INSTALLED_VERSION:-} ]]; then
|
||||
echo "➡️ Installed: lesavka-client ${INSTALLED_VERSION:-unknown}${INSTALLED_SHA:+ ($INSTALLED_SHA)}"
|
||||
fi
|
||||
|
||||
@ -9,8 +9,12 @@ export TMPDIR=${TMPDIR:-/var/tmp}
|
||||
|
||||
REF=${LESAVKA_REF:-master} # fallback
|
||||
REPO_URL=${LESAVKA_REPO_URL:-}
|
||||
INSTALL_SOURCE=${LESAVKA_INSTALL_SOURCE:-auto}
|
||||
USER_HOME=$(getent passwd "$ORIG_USER" | cut -d: -f6)
|
||||
INSTALL_UVC_CODEC=${LESAVKA_INSTALL_UVC_CODEC:-mjpeg}
|
||||
INSTALL_CAM_CODEC=${LESAVKA_INSTALL_CAM_CODEC:-${LESAVKA_CAM_CODEC:-hevc}}
|
||||
INSTALL_UVC_FRAME_META=${LESAVKA_INSTALL_UVC_FRAME_META:-${LESAVKA_UVC_FRAME_META:-0}}
|
||||
INSTALL_UVC_FRAME_META_LOG_PATH=${LESAVKA_INSTALL_UVC_FRAME_META_LOG_PATH:-${LESAVKA_UVC_FRAME_META_LOG_PATH:-/tmp/lesavka-uvc-frame-meta.jsonl}}
|
||||
INSTALL_SERVER_BIND_ADDR=${LESAVKA_INSTALL_SERVER_BIND_ADDR:-0.0.0.0:50051}
|
||||
LESAVKA_TLS_DIR=${LESAVKA_TLS_DIR:-/etc/lesavka/pki}
|
||||
LESAVKA_CLIENT_BUNDLE=${LESAVKA_CLIENT_BUNDLE:-/etc/lesavka/lesavka-client-pki.tar.gz}
|
||||
@ -18,6 +22,10 @@ DEFAULT_MJPEG_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US=0
|
||||
DEFAULT_MJPEG_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US=135090
|
||||
DEFAULT_MJPEG_UPSTREAM_AUDIO_PLAYOUT_MODE_OFFSETS_US=1280x720@20=0,1280x720@30=0,1920x1080@20=0,1920x1080@30=0
|
||||
DEFAULT_MJPEG_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US=1280x720@20=162659,1280x720@30=135090,1920x1080@20=160045,1920x1080@30=127952
|
||||
DEFAULT_HEVC_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US=0
|
||||
DEFAULT_HEVC_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US=110000
|
||||
DEFAULT_HEVC_UPSTREAM_AUDIO_PLAYOUT_MODE_OFFSETS_US=1280x720@20=0,1280x720@30=0,1920x1080@20=0,1920x1080@30=0
|
||||
DEFAULT_HEVC_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US=1280x720@20=173852,1280x720@30=110000,1920x1080@20=160045,1920x1080@30=127952
|
||||
LEGACY_MJPEG_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US=-45000
|
||||
PREVIOUS_MJPEG_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US=720000
|
||||
PREVIOUS_TUNED_MJPEG_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US=1260000
|
||||
@ -112,6 +120,37 @@ resolve_upstream_video_playout_offset_us() {
|
||||
printf '%s\n' "$default_offset_us"
|
||||
}
|
||||
|
||||
ensure_hevc_decode_support() {
|
||||
echo "==> 1e. HEVC/H.265 decode support"
|
||||
|
||||
if sudo modprobe rpi_hevc_dec >/dev/null 2>&1; then
|
||||
echo " ↪ rpi_hevc_dec kernel module loaded"
|
||||
echo 'rpi_hevc_dec' | sudo tee /etc/modules-load.d/lesavka-hevc.conf >/dev/null
|
||||
else
|
||||
echo " ↪ rpi_hevc_dec kernel module unavailable; HEVC decode will use CPU fallback when needed"
|
||||
fi
|
||||
|
||||
if getent group video >/dev/null 2>&1 && [ -n "${ORIG_USER:-}" ] && [ "${ORIG_USER}" != "root" ]; then
|
||||
sudo usermod -aG video "${ORIG_USER}" || true
|
||||
echo " ↪ ensured ${ORIG_USER} is in the video group for media-device probing"
|
||||
fi
|
||||
|
||||
rm -f "${USER_HOME}/.cache/gstreamer-1.0"/registry.* 2>/dev/null || true
|
||||
sudo rm -f /root/.cache/gstreamer-1.0/registry.* 2>/dev/null || true
|
||||
|
||||
if gst-inspect-1.0 v4l2slh265dec >/dev/null 2>&1; then
|
||||
echo "✅ hardware HEVC decoder exposed: v4l2slh265dec"
|
||||
echo " Lesavka will still smoke-test the decoder before using it; CPU fallback remains available."
|
||||
elif gst-inspect-1.0 v4l2h265dec >/dev/null 2>&1; then
|
||||
echo "✅ hardware HEVC decoder exposed: v4l2h265dec"
|
||||
echo " Lesavka will still smoke-test the decoder before using it; CPU fallback remains available."
|
||||
elif gst-inspect-1.0 avdec_h265 >/dev/null 2>&1; then
|
||||
echo "⚠️ hardware HEVC decoder not exposed; Lesavka can fall back to avdec_h265"
|
||||
else
|
||||
echo "⚠️ no HEVC decoder exposed to GStreamer; install gst-libav or a v4l2 HEVC decoder before enabling HEVC transport"
|
||||
fi
|
||||
}
|
||||
|
||||
manifest_package_version() {
|
||||
local manifest=$1
|
||||
[[ -f $manifest ]] || return 1
|
||||
@ -122,6 +161,64 @@ manifest_package_version() {
|
||||
' "$manifest"
|
||||
}
|
||||
|
||||
source_revision() {
|
||||
local repo=$1
|
||||
local sha=""
|
||||
sha=$(run_as_user git -C "$repo" rev-parse --short HEAD 2>/dev/null || true)
|
||||
if [[ -n $sha ]] && ! run_as_user git -C "$repo" diff --quiet --ignore-submodules -- 2>/dev/null; then
|
||||
sha="${sha}+dirty"
|
||||
fi
|
||||
printf '%s\n' "$sha"
|
||||
}
|
||||
|
||||
resolve_source_checkout() {
|
||||
case "$INSTALL_SOURCE" in
|
||||
auto)
|
||||
if [[ -d $SCRIPT_REPO_ROOT/.git ]]; then
|
||||
SRC_DIR=$SCRIPT_REPO_ROOT
|
||||
echo "==> 4a. Using local source checkout at $SRC_DIR"
|
||||
echo " ↪ set LESAVKA_INSTALL_SOURCE=ref to install from ${REF} via Git"
|
||||
return 0
|
||||
fi
|
||||
;;
|
||||
local)
|
||||
if [[ ! -d $SCRIPT_REPO_ROOT/.git ]]; then
|
||||
echo "❌ LESAVKA_INSTALL_SOURCE=local requested, but $SCRIPT_REPO_ROOT is not a Git checkout." >&2
|
||||
exit 1
|
||||
fi
|
||||
SRC_DIR=$SCRIPT_REPO_ROOT
|
||||
echo "==> 4a. Using local source checkout at $SRC_DIR"
|
||||
return 0
|
||||
;;
|
||||
ref|git)
|
||||
;;
|
||||
*)
|
||||
echo "❌ unsupported LESAVKA_INSTALL_SOURCE=$INSTALL_SOURCE (expected auto, local, or ref)" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "==> 4a. Source checkout"
|
||||
SRC_DIR=/var/src/lesavka
|
||||
if [[ ! -d $SRC_DIR ]]; then
|
||||
sudo mkdir -p /var/src
|
||||
sudo chown "$ORIG_USER":"$ORIG_USER" /var/src
|
||||
fi
|
||||
if [[ -d $SRC_DIR/.git ]]; then
|
||||
run_as_user git -C "$SRC_DIR" fetch --all --tags --prune
|
||||
else
|
||||
run_as_user git clone "$REPO_URL" "$SRC_DIR"
|
||||
fi
|
||||
|
||||
if run_as_user git -C "$SRC_DIR" rev-parse --verify --quiet "origin/$REF" >/dev/null; then
|
||||
run_as_user git -C "$SRC_DIR" checkout -B "$REF" "origin/$REF"
|
||||
else
|
||||
run_as_user git -C "$SRC_DIR" checkout --force "$REF"
|
||||
fi
|
||||
}
|
||||
|
||||
SRC_DIR=/var/src/lesavka
|
||||
|
||||
render_uvc_env_file() {
|
||||
cat <<EOF
|
||||
# generated by lesavka/scripts/install/server.sh
|
||||
@ -906,6 +1003,7 @@ echo "==> 1d. Audio permissions for diagnostics"
|
||||
if getent group audio >/dev/null 2>&1 && [ -n "${SUDO_USER:-}" ] && [ "${SUDO_USER}" != "root" ]; then
|
||||
sudo usermod -aG audio "${SUDO_USER}" || true
|
||||
fi
|
||||
ensure_hevc_decode_support
|
||||
|
||||
echo "==> 2a. Kernel-driver tweaks"
|
||||
cat <<'EOF' | sudo tee /etc/modprobe.d/gc311-stream.conf >/dev/null
|
||||
@ -982,23 +1080,7 @@ echo "==> 3. Rust toolchain"
|
||||
sudo rustup default stable
|
||||
run_as_user rustup default stable
|
||||
|
||||
echo "==> 4a. Source checkout"
|
||||
SRC_DIR=/var/src/lesavka
|
||||
if [[ ! -d $SRC_DIR ]]; then
|
||||
sudo mkdir -p /var/src
|
||||
sudo chown "$ORIG_USER":"$ORIG_USER" /var/src
|
||||
fi
|
||||
if [[ -d $SRC_DIR/.git ]]; then
|
||||
run_as_user git -C "$SRC_DIR" fetch --all --tags --prune
|
||||
else
|
||||
run_as_user git clone "$REPO_URL" "$SRC_DIR"
|
||||
fi
|
||||
|
||||
if run_as_user git -C "$SRC_DIR" rev-parse --verify --quiet "origin/$REF" >/dev/null; then
|
||||
run_as_user git -C "$SRC_DIR" checkout -B "$REF" "origin/$REF"
|
||||
else
|
||||
run_as_user git -C "$SRC_DIR" checkout --force "$REF"
|
||||
fi
|
||||
resolve_source_checkout
|
||||
|
||||
echo "==> 4b. Kernel upgrade (optional)"
|
||||
if [[ "${LESAVKA_KERNEL_UPDATE:-0}" != "0" ]]; then
|
||||
@ -1034,6 +1116,7 @@ fi
|
||||
printf 'LESAVKA_HDMI_CONNECTOR=%s\n' "$HDMI_CONNECTOR"
|
||||
fi
|
||||
printf 'LESAVKA_CAM_OUTPUT=%s\n' "${LESAVKA_INSTALL_CAM_OUTPUT:-uvc}"
|
||||
printf 'LESAVKA_CAM_CODEC=%s\n' "${INSTALL_CAM_CODEC}"
|
||||
printf 'LESAVKA_CAM_WIDTH=%s\n' "${LESAVKA_CAM_WIDTH:-1920}"
|
||||
printf 'LESAVKA_CAM_HEIGHT=%s\n' "${LESAVKA_CAM_HEIGHT:-1080}"
|
||||
printf 'LESAVKA_CAM_FPS=%s\n' "${LESAVKA_CAM_FPS:-30}"
|
||||
@ -1050,6 +1133,14 @@ fi
|
||||
printf 'LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS=%s\n' "${LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS:-350}"
|
||||
printf 'LESAVKA_UPSTREAM_MAX_LIVE_LAG_MS=%s\n' "${LESAVKA_UPSTREAM_MAX_LIVE_LAG_MS:-1000}"
|
||||
printf 'LESAVKA_UPSTREAM_STARTUP_TIMEOUT_MS=%s\n' "${LESAVKA_UPSTREAM_STARTUP_TIMEOUT_MS:-60000}"
|
||||
printf 'LESAVKA_UPSTREAM_MJPEG_AUDIO_PLAYOUT_MODE_OFFSETS_US=%s\n' "${LESAVKA_UPSTREAM_MJPEG_AUDIO_PLAYOUT_MODE_OFFSETS_US:-$DEFAULT_MJPEG_UPSTREAM_AUDIO_PLAYOUT_MODE_OFFSETS_US}"
|
||||
printf 'LESAVKA_UPSTREAM_MJPEG_VIDEO_PLAYOUT_MODE_OFFSETS_US=%s\n' "${LESAVKA_UPSTREAM_MJPEG_VIDEO_PLAYOUT_MODE_OFFSETS_US:-$DEFAULT_MJPEG_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US}"
|
||||
printf 'LESAVKA_UPSTREAM_MJPEG_AUDIO_PLAYOUT_OFFSET_US=%s\n' "${LESAVKA_UPSTREAM_MJPEG_AUDIO_PLAYOUT_OFFSET_US:-$DEFAULT_MJPEG_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US}"
|
||||
printf 'LESAVKA_UPSTREAM_MJPEG_VIDEO_PLAYOUT_OFFSET_US=%s\n' "${LESAVKA_UPSTREAM_MJPEG_VIDEO_PLAYOUT_OFFSET_US:-$DEFAULT_MJPEG_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US}"
|
||||
printf 'LESAVKA_UPSTREAM_HEVC_AUDIO_PLAYOUT_MODE_OFFSETS_US=%s\n' "${LESAVKA_UPSTREAM_HEVC_AUDIO_PLAYOUT_MODE_OFFSETS_US:-$DEFAULT_HEVC_UPSTREAM_AUDIO_PLAYOUT_MODE_OFFSETS_US}"
|
||||
printf 'LESAVKA_UPSTREAM_HEVC_VIDEO_PLAYOUT_MODE_OFFSETS_US=%s\n' "${LESAVKA_UPSTREAM_HEVC_VIDEO_PLAYOUT_MODE_OFFSETS_US:-$DEFAULT_HEVC_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US}"
|
||||
printf 'LESAVKA_UPSTREAM_HEVC_AUDIO_PLAYOUT_OFFSET_US=%s\n' "${LESAVKA_UPSTREAM_HEVC_AUDIO_PLAYOUT_OFFSET_US:-$DEFAULT_HEVC_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US}"
|
||||
printf 'LESAVKA_UPSTREAM_HEVC_VIDEO_PLAYOUT_OFFSET_US=%s\n' "${LESAVKA_UPSTREAM_HEVC_VIDEO_PLAYOUT_OFFSET_US:-$DEFAULT_HEVC_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US}"
|
||||
printf 'LESAVKA_UPSTREAM_AUDIO_PLAYOUT_MODE_OFFSETS_US=%s\n' "${LESAVKA_UPSTREAM_AUDIO_PLAYOUT_MODE_OFFSETS_US:-$DEFAULT_MJPEG_UPSTREAM_AUDIO_PLAYOUT_MODE_OFFSETS_US}"
|
||||
printf 'LESAVKA_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US=%s\n' "${LESAVKA_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US:-$DEFAULT_MJPEG_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US}"
|
||||
printf 'LESAVKA_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US=%s\n' "$(resolve_upstream_audio_playout_offset_us)"
|
||||
@ -1063,6 +1154,8 @@ fi
|
||||
printf 'LESAVKA_UVC_HEIGHT=%s\n' "${LESAVKA_UVC_HEIGHT:-720}"
|
||||
printf 'LESAVKA_UVC_FPS=%s\n' "${LESAVKA_UVC_FPS:-30}"
|
||||
printf 'LESAVKA_UVC_INTERVAL=%s\n' "${LESAVKA_UVC_INTERVAL:-333333}"
|
||||
printf 'LESAVKA_UVC_FRAME_META=%s\n' "${INSTALL_UVC_FRAME_META}"
|
||||
printf 'LESAVKA_UVC_FRAME_META_LOG_PATH=%s\n' "${INSTALL_UVC_FRAME_META_LOG_PATH}"
|
||||
printf 'LESAVKA_REQUIRE_TLS=%s\n' "${LESAVKA_REQUIRE_TLS:-1}"
|
||||
printf 'LESAVKA_TLS_CERT=%s\n' "${LESAVKA_TLS_CERT:-$LESAVKA_TLS_DIR/server.crt}"
|
||||
printf 'LESAVKA_TLS_KEY=%s\n' "${LESAVKA_TLS_KEY:-$LESAVKA_TLS_DIR/server.key}"
|
||||
|
||||
92
scripts/manual/client_rct_clock_alignment.py
Executable file
92
scripts/manual/client_rct_clock_alignment.py
Executable file
@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Sample client-to-RCT clock alignment for client-origin transport probes."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import pathlib
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def sample_clock_alignment(host: str, ssh_opts_text: str) -> dict:
|
||||
"""Return a midpoint clock-offset estimate between this client and RCT.
|
||||
|
||||
Inputs: SSH host plus the same SSH option string used by the manual probe.
|
||||
Outputs: a JSON-serializable clock alignment record.
|
||||
Why: client-origin freshness needs the final capture timestamps translated
|
||||
into the client's clock without requiring NTP-level access or sudo.
|
||||
"""
|
||||
|
||||
ssh_opts = shlex.split(ssh_opts_text)
|
||||
remote_code = "import sys,time\nfor _ in sys.stdin:\n print(time.time_ns(), flush=True)\n"
|
||||
proc = subprocess.Popen(
|
||||
["ssh", *ssh_opts, host, "python3 -u -c " + shlex.quote(remote_code)],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.DEVNULL,
|
||||
text=True,
|
||||
)
|
||||
rows: list[tuple[int, int]] = []
|
||||
try:
|
||||
assert proc.stdin is not None
|
||||
assert proc.stdout is not None
|
||||
for _ in range(9):
|
||||
start = time.time_ns()
|
||||
proc.stdin.write("sample\n")
|
||||
proc.stdin.flush()
|
||||
remote = proc.stdout.readline().strip()
|
||||
end = time.time_ns()
|
||||
if not remote:
|
||||
raise RuntimeError("remote clock sampler stopped before returning data")
|
||||
midpoint = (start + end) // 2
|
||||
rows.append((end - start, int(remote) - midpoint))
|
||||
time.sleep(0.05)
|
||||
finally:
|
||||
if proc.stdin is not None:
|
||||
proc.stdin.close()
|
||||
proc.terminate()
|
||||
try:
|
||||
proc.wait(timeout=2)
|
||||
except subprocess.TimeoutExpired:
|
||||
proc.kill()
|
||||
|
||||
rows.sort(key=lambda row: row[0])
|
||||
best = rows[:5]
|
||||
offset = round(sum(row[1] for row in best) / len(best))
|
||||
uncertainty = max(row[0] for row in best) // 2
|
||||
return {
|
||||
"schema": "lesavka.client-rct-clock-alignment.v1",
|
||||
"available": True,
|
||||
"method": "persistent client-to-capture ssh midpoint",
|
||||
"capture_host": host,
|
||||
"capture_clock_offset_from_client_ns": offset,
|
||||
"clock_uncertainty_ns": uncertainty,
|
||||
"clock_uncertainty_ms": uncertainty / 1_000_000.0,
|
||||
"samples": len(rows),
|
||||
}
|
||||
|
||||
|
||||
def main() -> int:
|
||||
"""CLI entrypoint for the clock alignment helper."""
|
||||
|
||||
if len(sys.argv) != 4:
|
||||
print(
|
||||
"usage: client_rct_clock_alignment.py TETHYS_HOST SSH_OPTS OUTPUT_JSON",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 2
|
||||
host, ssh_opts_text, output_path = sys.argv[1:]
|
||||
data = sample_clock_alignment(host, ssh_opts_text)
|
||||
pathlib.Path(output_path).write_text(json.dumps(data, indent=2, sort_keys=True) + "\n")
|
||||
offset = data["capture_clock_offset_from_client_ns"] / 1_000_000.0
|
||||
uncertainty = data["clock_uncertainty_ms"]
|
||||
print(f" ↪ tethys_from_client_offset_ms={offset:+.3f}")
|
||||
print(f" ↪ clock_alignment_uncertainty_ms={uncertainty:.3f}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
342
scripts/manual/client_rct_timing_trace_summary.py
Executable file
342
scripts/manual/client_rct_timing_trace_summary.py
Executable file
@ -0,0 +1,342 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Build a T0-T5 timing trace from a client-to-RCT probe artifact."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import math
|
||||
import pathlib
|
||||
import statistics
|
||||
from typing import Any
|
||||
|
||||
|
||||
def load_json(path: pathlib.Path) -> dict[str, Any]:
|
||||
"""Read a JSON object from `path`.
|
||||
|
||||
Inputs: an artifact path produced by the client-to-RCT harness.
|
||||
Output: the parsed object. Why: the trace intentionally reuses the blind
|
||||
probe artifacts instead of adding another media generator.
|
||||
"""
|
||||
|
||||
return json.loads(path.read_text())
|
||||
|
||||
|
||||
def load_jsonl(path: pathlib.Path) -> list[dict[str, Any]]:
|
||||
"""Read valid JSON records from an optional JSONL artifact.
|
||||
|
||||
Inputs: a JSONL path. Output: parsed objects with malformed lines ignored.
|
||||
Why: operational probe logs can be truncated when a run fails, but partial
|
||||
timing evidence is still useful for locating the layer that drifted.
|
||||
"""
|
||||
|
||||
if not path.exists():
|
||||
return []
|
||||
records: list[dict[str, Any]] = []
|
||||
for line in path.read_text(errors="replace").splitlines():
|
||||
if not line.strip():
|
||||
continue
|
||||
try:
|
||||
records.append(json.loads(line))
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
return records
|
||||
|
||||
|
||||
def percentile(values: list[float], q: float) -> float | None:
|
||||
"""Return a nearest-rank percentile for finite values.
|
||||
|
||||
Inputs: numeric samples and a quantile. Output: the selected percentile or
|
||||
`None`. Why: the trace needs the same conservative p95 language as the sync
|
||||
and freshness gates.
|
||||
"""
|
||||
|
||||
finite = sorted(value for value in values if math.isfinite(value))
|
||||
if not finite:
|
||||
return None
|
||||
index = min(len(finite) - 1, max(0, math.ceil(len(finite) * q) - 1))
|
||||
return finite[index]
|
||||
|
||||
|
||||
def fmt_ms(value: float | None) -> str:
|
||||
"""Format optional millisecond evidence for human reports."""
|
||||
|
||||
return f"{value:.1f}ms" if value is not None else "unavailable"
|
||||
|
||||
|
||||
def capture_start_ns(capture_log: pathlib.Path) -> int | None:
|
||||
"""Return the RCT recorder Unix start timestamp from the capture log.
|
||||
|
||||
Inputs: the recorder log. Output: Unix nanoseconds or `None`.
|
||||
Why: converting capture-relative detections into client time lets us split
|
||||
end-to-end freshness into pre-send and post-send portions.
|
||||
"""
|
||||
|
||||
if not capture_log.exists():
|
||||
return None
|
||||
for line in capture_log.read_text(errors="replace").splitlines():
|
||||
if line.startswith("capture_start_unix_ns="):
|
||||
return int(line.split("=", 1)[1].strip())
|
||||
return None
|
||||
|
||||
|
||||
def as_float(value: Any) -> float | None:
|
||||
"""Parse a finite float from relayctl fields."""
|
||||
|
||||
if value in (None, "pending"):
|
||||
return None
|
||||
try:
|
||||
parsed = float(str(value).replace("+", ""))
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
return parsed if math.isfinite(parsed) else None
|
||||
|
||||
|
||||
def summarize_upstream_samples(records: list[dict[str, Any]]) -> dict[str, Any] | None:
|
||||
"""Summarize sampled server receive and sink timing.
|
||||
|
||||
Inputs: `upstream-sync-samples.jsonl` records. Output: p95 layer metrics.
|
||||
Why: the current server telemetry is sampled instead of per-event; this
|
||||
still tells us whether the failure is already visible at receive/sink time.
|
||||
"""
|
||||
|
||||
buckets: dict[str, list[float]] = {
|
||||
"camera_client_queue_age_ms": [],
|
||||
"microphone_client_queue_age_ms": [],
|
||||
"camera_server_receive_age_ms": [],
|
||||
"microphone_server_receive_age_ms": [],
|
||||
"camera_sink_late_ms": [],
|
||||
"microphone_sink_late_ms": [],
|
||||
"server_receive_abs_skew_ms": [],
|
||||
"sink_handoff_abs_skew_ms": [],
|
||||
}
|
||||
live_samples = 0
|
||||
for record in records:
|
||||
fields = record.get("fields") or {}
|
||||
if fields.get("planner_phase") == "live":
|
||||
live_samples += 1
|
||||
mapping = {
|
||||
"planner_camera_client_queue_age_ms": "camera_client_queue_age_ms",
|
||||
"planner_microphone_client_queue_age_ms": "microphone_client_queue_age_ms",
|
||||
"planner_camera_server_receive_age_ms": "camera_server_receive_age_ms",
|
||||
"planner_microphone_server_receive_age_ms": "microphone_server_receive_age_ms",
|
||||
"planner_camera_sink_late_ms": "camera_sink_late_ms",
|
||||
"planner_microphone_sink_late_ms": "microphone_sink_late_ms",
|
||||
"planner_server_receive_abs_skew_p95_ms": "server_receive_abs_skew_ms",
|
||||
"planner_sink_handoff_abs_skew_p95_ms": "sink_handoff_abs_skew_ms",
|
||||
}
|
||||
for source, target in mapping.items():
|
||||
parsed = as_float(fields.get(source))
|
||||
if parsed is not None:
|
||||
buckets[target].append(parsed)
|
||||
|
||||
if live_samples == 0 and not any(buckets.values()):
|
||||
return None
|
||||
return {
|
||||
"sample_count": live_samples,
|
||||
**{f"{name}_p95": percentile(values, 0.95) for name, values in buckets.items()},
|
||||
}
|
||||
|
||||
|
||||
def event_send_records(
|
||||
event: dict[str, Any],
|
||||
send_records: list[dict[str, Any]],
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Return client send records whose video PTS falls inside an event window.
|
||||
|
||||
Inputs: a synthetic event and client bundle send records. Output: matching
|
||||
bundles. Why: T1 should be the bundle send containing the coded video flash,
|
||||
not merely the nearest arbitrary frame.
|
||||
"""
|
||||
|
||||
start = int(event.get("planned_start_us") or -1)
|
||||
end = int(event.get("planned_end_us") or -1)
|
||||
if start < 0 or end <= start:
|
||||
return []
|
||||
return [
|
||||
record
|
||||
for record in send_records
|
||||
if start <= int(record.get("video_capture_pts_us") or -1) < end
|
||||
]
|
||||
|
||||
|
||||
def build_trace(report_dir: pathlib.Path) -> dict[str, Any]:
|
||||
"""Build the T0-T5 trace summary for one completed probe directory.
|
||||
|
||||
Inputs: a client-to-RCT artifact directory. Output: structured trace data.
|
||||
Why: failed blind runs need enough layer evidence to decide whether to tune
|
||||
client generation, transport/server ingress, HEVC decode/UVC handoff, or
|
||||
final RCT capture before we touch production offsets.
|
||||
"""
|
||||
|
||||
timeline = load_json(report_dir / "client-transport-timeline.json")
|
||||
report = load_json(report_dir / "report.json")
|
||||
transport = load_json(report_dir / "client-rct-transport-summary.json")
|
||||
clock = load_json(report_dir / "clock-alignment.json")
|
||||
send_records = load_jsonl(report_dir / "client-send-bundles.jsonl")
|
||||
upstream_records = load_jsonl(report_dir / "upstream-sync-samples.jsonl")
|
||||
capture_start = capture_start_ns(report_dir / "capture.log")
|
||||
offset_ns = int(clock.get("capture_clock_offset_from_client_ns") or 0)
|
||||
|
||||
pairs = {
|
||||
int(pair.get("server_event_id", pair.get("event_id", -1))): pair
|
||||
for pair in report.get("paired_events", [])
|
||||
}
|
||||
summary_events = {
|
||||
int(event.get("event_id", -1)): event
|
||||
for event in transport.get("events", [])
|
||||
}
|
||||
|
||||
rows: list[dict[str, Any]] = []
|
||||
t0_t1_values: list[float] = []
|
||||
t1_t5_video_values: list[float] = []
|
||||
t1_t5_audio_values: list[float] = []
|
||||
for event in timeline.get("events", []):
|
||||
event_id = int(event.get("event_id", -1))
|
||||
matches = event_send_records(event, send_records)
|
||||
first_send = min(matches, key=lambda record: int(record["send_unix_ns"])) if matches else None
|
||||
pair = pairs.get(event_id)
|
||||
summary_event = summary_events.get(event_id)
|
||||
t0_ns = int(event.get("client_capture_unix_ns") or 0)
|
||||
t1_ns = int(first_send["send_unix_ns"]) if first_send else None
|
||||
t0_t1_ms = ((t1_ns - t0_ns) / 1_000_000.0) if t1_ns and t0_ns else None
|
||||
if t0_t1_ms is not None:
|
||||
t0_t1_values.append(t0_t1_ms)
|
||||
|
||||
t1_t5_video_ms = None
|
||||
t1_t5_audio_ms = None
|
||||
if pair and t1_ns and capture_start is not None:
|
||||
send_capture_s = (t1_ns + offset_ns - capture_start) / 1_000_000_000.0
|
||||
t1_t5_video_ms = (float(pair["video_time_s"]) - send_capture_s) * 1000.0
|
||||
t1_t5_audio_ms = (float(pair["audio_time_s"]) - send_capture_s) * 1000.0
|
||||
t1_t5_video_values.append(t1_t5_video_ms)
|
||||
t1_t5_audio_values.append(t1_t5_audio_ms)
|
||||
|
||||
rows.append(
|
||||
{
|
||||
"event_id": event_id,
|
||||
"code": event.get("code"),
|
||||
"t0_client_capture_unix_ns": t0_ns or None,
|
||||
"t1_client_send_unix_ns": t1_ns,
|
||||
"t0_to_t1_local_send_ms": t0_t1_ms,
|
||||
"event_video_bundle_count": len(matches),
|
||||
"t5_rct_video_s": pair.get("video_time_s") if pair else None,
|
||||
"t5_rct_audio_s": pair.get("audio_time_s") if pair else None,
|
||||
"t0_to_t5_video_ms": summary_event.get("video_age_ms") if summary_event else None,
|
||||
"t0_to_t5_audio_ms": summary_event.get("audio_age_ms") if summary_event else None,
|
||||
"t1_to_t5_video_ms": t1_t5_video_ms,
|
||||
"t1_to_t5_audio_ms": t1_t5_audio_ms,
|
||||
"sync_skew_ms": pair.get("skew_ms") if pair else None,
|
||||
"paired": pair is not None,
|
||||
}
|
||||
)
|
||||
|
||||
uvc_summary_path = report_dir / "uvc-frame-meta-summary.json"
|
||||
uvc_summary = load_json(uvc_summary_path) if uvc_summary_path.exists() else None
|
||||
return {
|
||||
"schema": "lesavka.client-rct-timing-trace.v1",
|
||||
"report_dir": str(report_dir),
|
||||
"verdict": "pass" if transport.get("passed") else "fail",
|
||||
"sync_status": transport.get("sync_status"),
|
||||
"sync_p95_abs_skew_ms": (report.get("verdict") or {}).get("p95_abs_skew_ms"),
|
||||
"sync_median_skew_ms": report.get("median_skew_ms"),
|
||||
"sync_drift_ms": report.get("drift_ms"),
|
||||
"paired_event_count": transport.get("paired_event_count"),
|
||||
"expected_event_count": transport.get("expected_event_count"),
|
||||
"freshness_budget_ms": transport.get("freshness_budget_ms"),
|
||||
"freshness_limit_ms": transport.get("freshness_limit_ms"),
|
||||
"t0_to_t1_local_send_p95_ms": percentile(t0_t1_values, 0.95),
|
||||
"t1_to_t5_video_p95_ms": percentile(t1_t5_video_values, 0.95),
|
||||
"t1_to_t5_audio_p95_ms": percentile(t1_t5_audio_values, 0.95),
|
||||
"t0_to_t5_video_p95_ms": transport.get("video_age_p95_ms"),
|
||||
"t0_to_t5_audio_p95_ms": transport.get("audio_age_p95_ms"),
|
||||
"upstream_sampled_layers": summarize_upstream_samples(upstream_records),
|
||||
"uvc_spool": uvc_summary,
|
||||
"smoothness": transport.get("smoothness"),
|
||||
"events": rows,
|
||||
"notes": [
|
||||
"T0 is synthetic capture time on the client.",
|
||||
"T1 is the first outgoing bundle whose video PTS lands inside the coded event window.",
|
||||
"T2/T3 are sampled server receive/sink telemetry, not per-event timestamps yet.",
|
||||
"T4 UVC spool evidence is present only when the server metadata log is enabled.",
|
||||
"T5 is final RCT capture detection from the flash/tone analyzer.",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def text_report(trace: dict[str, Any]) -> str:
|
||||
"""Render the timing trace summary for humans."""
|
||||
|
||||
upstream = trace.get("upstream_sampled_layers") or {}
|
||||
uvc = trace.get("uvc_spool") or {}
|
||||
smoothness = trace.get("smoothness") or {}
|
||||
lines = [
|
||||
f"Client-to-RCT T0-T5 timing trace for {trace['report_dir']}",
|
||||
f"- verdict: {trace['verdict']}",
|
||||
f"- sync: {trace.get('sync_status')} p95={fmt_ms(trace.get('sync_p95_abs_skew_ms'))} "
|
||||
f"median={fmt_ms(trace.get('sync_median_skew_ms'))} drift={fmt_ms(trace.get('sync_drift_ms'))}",
|
||||
f"- evidence: paired={trace.get('paired_event_count')}/{trace.get('expected_event_count')}",
|
||||
f"- freshness: budget={fmt_ms(trace.get('freshness_budget_ms'))} "
|
||||
f"limit={fmt_ms(trace.get('freshness_limit_ms'))}",
|
||||
"- layer p95:",
|
||||
f" T0->T1 local bundle send: {fmt_ms(trace.get('t0_to_t1_local_send_p95_ms'))}",
|
||||
f" T1->T5 RCT video detect: {fmt_ms(trace.get('t1_to_t5_video_p95_ms'))}",
|
||||
f" T1->T5 RCT audio detect: {fmt_ms(trace.get('t1_to_t5_audio_p95_ms'))}",
|
||||
f" T0->T5 RCT video detect: {fmt_ms(trace.get('t0_to_t5_video_p95_ms'))}",
|
||||
f" T0->T5 RCT audio detect: {fmt_ms(trace.get('t0_to_t5_audio_p95_ms'))}",
|
||||
"- sampled server layers:",
|
||||
f" T2 server receive age p95: video={fmt_ms(upstream.get('camera_server_receive_age_ms_p95'))} "
|
||||
f"audio={fmt_ms(upstream.get('microphone_server_receive_age_ms_p95'))}",
|
||||
f" T2 receive A/V skew p95: {fmt_ms(upstream.get('server_receive_abs_skew_ms_p95'))}",
|
||||
f" T3 sink late p95: video={fmt_ms(upstream.get('camera_sink_late_ms_p95'))} "
|
||||
f"audio={fmt_ms(upstream.get('microphone_sink_late_ms_p95'))}",
|
||||
f" T3 sink handoff skew p95: {fmt_ms(upstream.get('sink_handoff_abs_skew_ms_p95'))}",
|
||||
"- optional UVC spool layer:",
|
||||
]
|
||||
if uvc:
|
||||
coverage = uvc.get("event_coverage") or {}
|
||||
lines.append(
|
||||
f" T4 records={uvc.get('record_count')} profiles={uvc.get('profiles')} "
|
||||
f"covered_events={coverage.get('covered_events')}/{coverage.get('expected_events')}"
|
||||
)
|
||||
else:
|
||||
lines.append(" T4 unavailable; enable LESAVKA_UVC_FRAME_META_LOG_PATH for spool-boundary evidence")
|
||||
lines.extend(
|
||||
[
|
||||
"- smoothness:",
|
||||
f" video_hiccups={smoothness.get('video_hiccups')} "
|
||||
f"audio_hiccups={smoothness.get('audio_hiccups')} "
|
||||
f"video_p95_jitter={fmt_ms(smoothness.get('video_p95_jitter_ms'))}",
|
||||
]
|
||||
)
|
||||
missing = [event for event in trace["events"] if not event["paired"]]
|
||||
if missing:
|
||||
lines.append(
|
||||
"- missing paired event codes: "
|
||||
+ ",".join(str(event.get("code")) for event in missing if event.get("code") is not None)
|
||||
)
|
||||
return "\n".join(lines) + "\n"
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
"""Parse CLI arguments for the trace summarizer."""
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("report_dir", type=pathlib.Path)
|
||||
parser.add_argument("json_out", type=pathlib.Path)
|
||||
parser.add_argument("txt_out", type=pathlib.Path)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Write the JSON and text T0-T5 trace artifacts."""
|
||||
|
||||
args = parse_args()
|
||||
trace = build_trace(args.report_dir)
|
||||
args.json_out.write_text(json.dumps(trace, indent=2, sort_keys=True) + "\n")
|
||||
args.txt_out.write_text(text_report(trace))
|
||||
print(args.txt_out.read_text(), end="")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
128
scripts/manual/client_rct_transport_layers.py
Executable file
128
scripts/manual/client_rct_transport_layers.py
Executable file
@ -0,0 +1,128 @@
|
||||
"""Layer attribution helpers for client-to-RCT transport summaries."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import math
|
||||
import pathlib
|
||||
|
||||
|
||||
def percentile(values: list[float], q: float) -> float | None:
|
||||
"""Return a nearest-rank percentile for finite values.
|
||||
|
||||
Inputs: numeric samples and a quantile in `[0, 1]`. Outputs: the selected
|
||||
percentile or `None`. Why: stage attribution should use the same p95 style
|
||||
as the main transport summary without coupling this helper back to the CLI.
|
||||
"""
|
||||
|
||||
finite = sorted(value for value in values if math.isfinite(value))
|
||||
if not finite:
|
||||
return None
|
||||
index = min(len(finite) - 1, max(0, math.ceil(len(finite) * q) - 1))
|
||||
return finite[index]
|
||||
|
||||
|
||||
def client_send_summary(report_path: pathlib.Path, joined: list[dict]) -> dict | None:
|
||||
"""Summarize client-side bundle send timing from optional JSONL artifacts.
|
||||
|
||||
Inputs: the final report path, used to find sibling
|
||||
`client-send-bundles.jsonl`, and joined RCT events. Outputs: local queue
|
||||
age plus post-send-to-RCT age percentiles. Why: if final freshness fails,
|
||||
the next question is whether delay existed before the client wrote the gRPC
|
||||
bundle or appeared after the bundle left the client.
|
||||
"""
|
||||
|
||||
send_path = report_path.parent / "client-send-bundles.jsonl"
|
||||
if not send_path.exists():
|
||||
return None
|
||||
|
||||
rows_by_video_pts: dict[int, dict] = {}
|
||||
local_ages: list[float] = []
|
||||
for line in send_path.read_text(errors="replace").splitlines():
|
||||
try:
|
||||
row = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
if row.get("schema") != "lesavka.sync-probe-send.v1":
|
||||
continue
|
||||
try:
|
||||
video_pts = int(row["video_capture_pts_us"])
|
||||
local_age = float(row["local_age_ms"])
|
||||
except (KeyError, TypeError, ValueError):
|
||||
continue
|
||||
rows_by_video_pts[video_pts] = row
|
||||
local_ages.append(local_age)
|
||||
|
||||
if not rows_by_video_pts:
|
||||
return None
|
||||
|
||||
post_send_video_ages: list[float] = []
|
||||
post_send_audio_ages: list[float] = []
|
||||
joined_with_send_rows = 0
|
||||
for event in joined:
|
||||
planned_start_us = event.get("client_planned_start_us")
|
||||
if planned_start_us is None:
|
||||
continue
|
||||
row = rows_by_video_pts.get(int(planned_start_us))
|
||||
if not row:
|
||||
continue
|
||||
joined_with_send_rows += 1
|
||||
local_age = float(row.get("local_age_ms") or 0.0)
|
||||
if event.get("video_age_ms") is not None:
|
||||
post_send_video_ages.append(float(event["video_age_ms"]) - local_age)
|
||||
if event.get("audio_age_ms") is not None:
|
||||
post_send_audio_ages.append(float(event["audio_age_ms"]) - local_age)
|
||||
|
||||
post_send_worst = max(
|
||||
value
|
||||
for value in [
|
||||
percentile(post_send_video_ages, 0.95),
|
||||
percentile(post_send_audio_ages, 0.95),
|
||||
]
|
||||
if value is not None
|
||||
) if post_send_video_ages or post_send_audio_ages else None
|
||||
|
||||
return {
|
||||
"bundle_count": len(rows_by_video_pts),
|
||||
"joined_event_count": joined_with_send_rows,
|
||||
"local_bundle_age_p95_ms": percentile(local_ages, 0.95),
|
||||
"local_bundle_age_max_ms": max(local_ages) if local_ages else None,
|
||||
"post_client_send_video_age_p95_ms": percentile(post_send_video_ages, 0.95),
|
||||
"post_client_send_audio_age_p95_ms": percentile(post_send_audio_ages, 0.95),
|
||||
"post_client_send_worst_p95_ms": post_send_worst,
|
||||
}
|
||||
|
||||
|
||||
def freshness_bottleneck(summary: dict) -> str:
|
||||
"""Classify the most likely freshness bottleneck from available artifacts.
|
||||
|
||||
Inputs: the structured summary assembled from RCT capture, client send log,
|
||||
and optional upstream-sync samples. Outputs: a short machine-readable label.
|
||||
Why: first-pass client->server work should remain black-box, but failed runs
|
||||
should still point us toward client queueing, transport/server receive, or
|
||||
post-send output/RCT delay before we add invasive introspection.
|
||||
"""
|
||||
|
||||
if summary.get("freshness_passed"):
|
||||
return "within_limit"
|
||||
if summary.get("paired_event_count", 0) < summary.get("min_paired_events", 0):
|
||||
return "evidence_incomplete"
|
||||
|
||||
client_send = summary.get("client_send") or {}
|
||||
local_age = client_send.get("local_bundle_age_p95_ms")
|
||||
post_send = client_send.get("post_client_send_worst_p95_ms")
|
||||
if local_age is not None and local_age > 500.0:
|
||||
return "client_queue_or_bundle_generation"
|
||||
|
||||
upstream = summary.get("upstream_sync") or {}
|
||||
receive_age = upstream.get("server_receive_age_p95_ms")
|
||||
transport_lag = upstream.get("media_transport_lag_p95_ms")
|
||||
if receive_age is not None and receive_age > 500.0:
|
||||
return "server_receive_or_ingress_queue"
|
||||
if transport_lag is not None and transport_lag > 1_000.0:
|
||||
return "client_to_server_transport"
|
||||
if post_send is not None and post_send > 500.0:
|
||||
return "post_client_send_to_rct_path"
|
||||
return "needs_deeper_introspection"
|
||||
|
||||
|
||||
487
scripts/manual/client_rct_transport_summary.py
Executable file
487
scripts/manual/client_rct_transport_summary.py
Executable file
@ -0,0 +1,487 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Summarize client-origin transport timing from RCT capture artifacts."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import math
|
||||
import pathlib
|
||||
import statistics
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from client_rct_transport_layers import client_send_summary, freshness_bottleneck
|
||||
|
||||
|
||||
def capture_start_ns(path: pathlib.Path) -> int | None:
|
||||
"""Return the RCT recorder Unix start timestamp when the capture log has it.
|
||||
|
||||
Inputs: a capture log path written by the remote recorder.
|
||||
Outputs: the nanosecond Unix timestamp or `None`.
|
||||
Why: client-origin event timestamps need to be translated into the capture
|
||||
file's timebase before end-to-end media age can be measured.
|
||||
"""
|
||||
|
||||
for line in path.read_text(errors="replace").splitlines():
|
||||
if line.startswith("capture_start_unix_ns="):
|
||||
return int(line.split("=", 1)[1].strip())
|
||||
return None
|
||||
|
||||
|
||||
def percentile(values: list[float], q: float) -> float | None:
|
||||
"""Return a simple nearest-rank percentile for finite values.
|
||||
|
||||
Inputs: numeric samples and a quantile in `[0, 1]`.
|
||||
Outputs: the percentile or `None` for an empty set.
|
||||
Why: manual transport reports should match the conservative p95 style used
|
||||
by the server-to-RCT gate without pulling in extra dependencies.
|
||||
"""
|
||||
|
||||
finite = sorted(value for value in values if math.isfinite(value))
|
||||
if not finite:
|
||||
return None
|
||||
index = min(len(finite) - 1, max(0, math.ceil(len(finite) * q) - 1))
|
||||
return finite[index]
|
||||
|
||||
|
||||
def fmt_ms(value: float | None) -> str:
|
||||
"""Format optional millisecond evidence for compact text reports.
|
||||
|
||||
Inputs: a numeric millisecond value or `None`. Output: display text. Why:
|
||||
missing layer evidence should remain explicit when optional samplers are
|
||||
disabled, rather than becoming a confusing `null` or Python exception.
|
||||
"""
|
||||
|
||||
return f"{value:.1f}ms" if value is not None else "unavailable"
|
||||
|
||||
|
||||
def ffprobe_times(capture_path: pathlib.Path, kind: str) -> list[float]:
|
||||
"""Read video frame or audio packet timestamps from a capture.
|
||||
|
||||
Inputs: the Matroska capture path and `video` or `audio`.
|
||||
Outputs: timestamp seconds from ffprobe.
|
||||
Why: smoothness warnings need cadence evidence even when the sync analyzer
|
||||
correctly focuses only on flash/tone onsets.
|
||||
"""
|
||||
|
||||
selector = "v:0" if kind == "video" else "a:0"
|
||||
section = "frame=pts_time" if kind == "video" else "packet=pts_time"
|
||||
show = "-show_frames" if kind == "video" else "-show_packets"
|
||||
try:
|
||||
output = subprocess.check_output(
|
||||
[
|
||||
"ffprobe",
|
||||
"-v",
|
||||
"error",
|
||||
"-select_streams",
|
||||
selector,
|
||||
show,
|
||||
"-show_entries",
|
||||
section,
|
||||
"-of",
|
||||
"json",
|
||||
str(capture_path),
|
||||
],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
data = json.loads(output)
|
||||
except Exception:
|
||||
return []
|
||||
rows = data.get("frames" if kind == "video" else "packets", [])
|
||||
times: list[float] = []
|
||||
for row in rows:
|
||||
try:
|
||||
times.append(float(row["pts_time"]))
|
||||
except (KeyError, TypeError, ValueError):
|
||||
pass
|
||||
return times
|
||||
|
||||
|
||||
def smoothness_summary(
|
||||
capture_path: pathlib.Path,
|
||||
timeline: dict,
|
||||
require_smoothness: bool,
|
||||
) -> dict:
|
||||
"""Compute coarse cadence warnings for the final RCT capture.
|
||||
|
||||
Inputs: the final capture, client timeline media profile, and whether
|
||||
smoothness should be hard-gated.
|
||||
Outputs: a JSON-serializable smoothness summary.
|
||||
Why: we are not tuning smoothness yet, but the circuit test should preserve
|
||||
enough evidence to notice if transport improvements regress cadence.
|
||||
"""
|
||||
|
||||
fps = float(timeline.get("camera_fps") or 0.0)
|
||||
video_times = ffprobe_times(capture_path, "video")
|
||||
audio_times = ffprobe_times(capture_path, "audio")
|
||||
expected_video_ms = 1000.0 / fps if fps > 0 else None
|
||||
video_intervals = [(b - a) * 1000.0 for a, b in zip(video_times, video_times[1:])]
|
||||
audio_intervals = [(b - a) * 1000.0 for a, b in zip(audio_times, audio_times[1:])]
|
||||
video_jitter = (
|
||||
[abs(value - expected_video_ms) for value in video_intervals]
|
||||
if expected_video_ms
|
||||
else []
|
||||
)
|
||||
audio_median = statistics.median(audio_intervals) if audio_intervals else None
|
||||
audio_jitter = (
|
||||
[abs(value - audio_median) for value in audio_intervals] if audio_median else []
|
||||
)
|
||||
video_hiccups = sum(
|
||||
1
|
||||
for value in video_intervals
|
||||
if expected_video_ms and value > expected_video_ms * 1.75
|
||||
)
|
||||
audio_hiccups = sum(
|
||||
1 for value in audio_intervals if audio_median and value > audio_median * 2.5
|
||||
)
|
||||
return {
|
||||
"passed": video_hiccups == 0 and audio_hiccups == 0,
|
||||
"required": require_smoothness,
|
||||
"video_frames": len(video_times),
|
||||
"video_expected_interval_ms": expected_video_ms,
|
||||
"video_p95_jitter_ms": percentile(video_jitter, 0.95),
|
||||
"video_max_interval_ms": max(video_intervals) if video_intervals else None,
|
||||
"video_hiccups": video_hiccups,
|
||||
"audio_packets": len(audio_times),
|
||||
"audio_median_interval_ms": audio_median,
|
||||
"audio_p95_jitter_ms": percentile(audio_jitter, 0.95),
|
||||
"audio_max_interval_ms": max(audio_intervals) if audio_intervals else None,
|
||||
"audio_hiccups": audio_hiccups,
|
||||
}
|
||||
|
||||
|
||||
def parse_float_field(fields: dict, name: str) -> float | None:
|
||||
"""Read a numeric upstream-sync field when relayctl reported one.
|
||||
|
||||
Inputs: parsed relayctl fields and a key name.
|
||||
Outputs: a finite float or `None` for `pending`/missing values.
|
||||
Why: failed black-box runs need lightweight ingress diagnosis without
|
||||
requiring a second log-scraping tool.
|
||||
"""
|
||||
|
||||
raw = fields.get(name)
|
||||
if raw is None or raw == "pending":
|
||||
return None
|
||||
try:
|
||||
value = float(raw)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
return value if math.isfinite(value) else None
|
||||
|
||||
|
||||
def upstream_sync_summary(report_path: pathlib.Path, timeline: dict) -> dict | None:
|
||||
"""Summarize client-to-server timing from optional sampler artifacts.
|
||||
|
||||
Inputs: the report path, used to find sibling `upstream-sync-samples.jsonl`,
|
||||
and the client-origin timeline.
|
||||
Outputs: transport-lag and queue-age percentiles, or `None`.
|
||||
Why: when final RCT freshness fails, the sampler shows whether media was
|
||||
already late at server ingress or only after server handoff.
|
||||
"""
|
||||
|
||||
samples_path = report_path.parent / "upstream-sync-samples.jsonl"
|
||||
if not samples_path.exists():
|
||||
return None
|
||||
client_start_unix_ns = int(timeline.get("client_start_unix_ns") or 0)
|
||||
if client_start_unix_ns <= 0:
|
||||
return None
|
||||
|
||||
media_lags: list[float] = []
|
||||
camera_lags: list[float] = []
|
||||
microphone_lags: list[float] = []
|
||||
camera_queue_ages: list[float] = []
|
||||
microphone_queue_ages: list[float] = []
|
||||
server_receive_ages: list[float] = []
|
||||
sink_late_values: list[float] = []
|
||||
live_samples = 0
|
||||
|
||||
for line in samples_path.read_text(errors="replace").splitlines():
|
||||
try:
|
||||
record = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
fields = record.get("fields", {})
|
||||
sample_unix_ns = int(record.get("sample_unix_ns") or 0)
|
||||
if sample_unix_ns <= client_start_unix_ns:
|
||||
continue
|
||||
sample_rel_ms = (sample_unix_ns - client_start_unix_ns) / 1_000_000.0
|
||||
camera_pts_ms = parse_float_field(fields, "planner_latest_camera_remote_pts_us")
|
||||
microphone_pts_ms = parse_float_field(
|
||||
fields, "planner_latest_microphone_remote_pts_us"
|
||||
)
|
||||
if camera_pts_ms is None and microphone_pts_ms is None:
|
||||
continue
|
||||
live_samples += 1
|
||||
if camera_pts_ms is not None:
|
||||
camera_pts_ms /= 1000.0
|
||||
camera_lags.append(sample_rel_ms - camera_pts_ms)
|
||||
media_lags.append(sample_rel_ms - camera_pts_ms)
|
||||
if microphone_pts_ms is not None:
|
||||
microphone_pts_ms /= 1000.0
|
||||
microphone_lags.append(sample_rel_ms - microphone_pts_ms)
|
||||
media_lags.append(sample_rel_ms - microphone_pts_ms)
|
||||
for key, bucket in [
|
||||
("planner_camera_client_queue_age_ms", camera_queue_ages),
|
||||
("planner_microphone_client_queue_age_ms", microphone_queue_ages),
|
||||
("planner_camera_server_receive_age_ms", server_receive_ages),
|
||||
("planner_microphone_server_receive_age_ms", server_receive_ages),
|
||||
("planner_camera_sink_late_ms", sink_late_values),
|
||||
("planner_microphone_sink_late_ms", sink_late_values),
|
||||
]:
|
||||
value = parse_float_field(fields, key)
|
||||
if value is not None:
|
||||
bucket.append(value)
|
||||
|
||||
if live_samples == 0:
|
||||
return None
|
||||
return {
|
||||
"sample_count": live_samples,
|
||||
"media_transport_lag_p50_ms": percentile(media_lags, 0.50),
|
||||
"media_transport_lag_p95_ms": percentile(media_lags, 0.95),
|
||||
"camera_transport_lag_p95_ms": percentile(camera_lags, 0.95),
|
||||
"microphone_transport_lag_p95_ms": percentile(microphone_lags, 0.95),
|
||||
"camera_client_queue_age_p95_ms": percentile(camera_queue_ages, 0.95),
|
||||
"microphone_client_queue_age_p95_ms": percentile(microphone_queue_ages, 0.95),
|
||||
"server_receive_age_p95_ms": percentile(server_receive_ages, 0.95),
|
||||
"sink_late_p95_ms": percentile(sink_late_values, 0.95),
|
||||
}
|
||||
|
||||
|
||||
def uvc_spool_summary(report_path: pathlib.Path) -> dict | None:
|
||||
"""Load optional server UVC spool-boundary timing next to the RCT report.
|
||||
|
||||
Inputs: the analyzer report path. Output: parsed spool summary or `None`.
|
||||
Why: blind HEVC runs need one compact report that shows whether synthetic
|
||||
coded frames reached the server's decoded-MJPEG spool before final RCT
|
||||
capture, without making the normal non-mutating probe require this artifact.
|
||||
"""
|
||||
|
||||
summary_path = report_path.parent / "uvc-frame-meta-summary.json"
|
||||
if not summary_path.exists():
|
||||
return None
|
||||
try:
|
||||
summary = json.loads(summary_path.read_text())
|
||||
except (OSError, json.JSONDecodeError):
|
||||
return None
|
||||
if summary.get("schema") != "lesavka.uvc-mjpeg-spool-summary.v1":
|
||||
return None
|
||||
return summary
|
||||
|
||||
|
||||
def build_summary(args: list[str]) -> tuple[dict, str]:
|
||||
"""Build the transport summary JSON and human text.
|
||||
|
||||
Inputs: command-line paths and thresholds from the Bash harness.
|
||||
Outputs: structured summary plus text lines.
|
||||
Why: keeping this in Python makes the shell runner small and leaves the
|
||||
timing math easy to test or extend if black-box results fail.
|
||||
"""
|
||||
|
||||
(
|
||||
report_path,
|
||||
timeline_path,
|
||||
capture_log_path,
|
||||
clock_path,
|
||||
capture_path,
|
||||
_json_out,
|
||||
_txt_out,
|
||||
max_age_raw,
|
||||
min_pairs_raw,
|
||||
require_smoothness_raw,
|
||||
) = args
|
||||
report_file = pathlib.Path(report_path)
|
||||
report = json.loads(report_file.read_text())
|
||||
timeline = json.loads(pathlib.Path(timeline_path).read_text())
|
||||
clock = json.loads(pathlib.Path(clock_path).read_text())
|
||||
max_age_ms = float(max_age_raw)
|
||||
min_pairs = int(min_pairs_raw)
|
||||
require_smoothness = require_smoothness_raw not in {"0", "false", "False", "no", "off"}
|
||||
|
||||
capture_start = capture_start_ns(pathlib.Path(capture_log_path))
|
||||
offset_ns = int(clock.get("capture_clock_offset_from_client_ns") or 0)
|
||||
uncertainty_ms = float(clock.get("clock_uncertainty_ms") or 0.0)
|
||||
timeline_events = {int(event["event_id"]): event for event in timeline.get("events", [])}
|
||||
joined: list[dict] = []
|
||||
video_ages: list[float] = []
|
||||
audio_ages: list[float] = []
|
||||
for pair in report.get("paired_events", []):
|
||||
paired_server_event_id = pair.get("server_event_id")
|
||||
event_id = int(
|
||||
paired_server_event_id
|
||||
if paired_server_event_id is not None
|
||||
else pair.get("event_id", -1)
|
||||
)
|
||||
event = timeline_events.get(event_id)
|
||||
if not event or capture_start is None:
|
||||
continue
|
||||
expected_capture_s = (
|
||||
int(event["client_capture_unix_ns"]) + offset_ns - capture_start
|
||||
) / 1_000_000_000.0
|
||||
video_age_ms = (float(pair["video_time_s"]) - expected_capture_s) * 1000.0
|
||||
audio_age_ms = (float(pair["audio_time_s"]) - expected_capture_s) * 1000.0
|
||||
video_ages.append(video_age_ms)
|
||||
audio_ages.append(audio_age_ms)
|
||||
joined.append(
|
||||
{
|
||||
"event_id": event_id,
|
||||
"event_code": event.get("code"),
|
||||
"client_planned_start_us": event.get("planned_start_us"),
|
||||
"client_expected_capture_s": expected_capture_s,
|
||||
"tethys_video_time_s": pair.get("video_time_s"),
|
||||
"tethys_audio_time_s": pair.get("audio_time_s"),
|
||||
"video_age_ms": video_age_ms,
|
||||
"audio_age_ms": audio_age_ms,
|
||||
"skew_ms": pair.get("skew_ms"),
|
||||
"confidence": pair.get("confidence"),
|
||||
}
|
||||
)
|
||||
|
||||
worst_p95 = max(
|
||||
value
|
||||
for value in [percentile(video_ages, 0.95), percentile(audio_ages, 0.95)]
|
||||
if value is not None
|
||||
) if video_ages or audio_ages else None
|
||||
freshness_budget_ms = worst_p95 + uncertainty_ms if worst_p95 is not None else None
|
||||
sync = report.get("verdict", {})
|
||||
smoothness = smoothness_summary(pathlib.Path(capture_path), timeline, require_smoothness)
|
||||
upstream_sync = upstream_sync_summary(report_file, timeline)
|
||||
client_send = client_send_summary(report_file, joined)
|
||||
uvc_spool = uvc_spool_summary(report_file)
|
||||
freshness_passed = (
|
||||
freshness_budget_ms is not None
|
||||
and freshness_budget_ms <= max_age_ms
|
||||
and len(joined) >= min_pairs
|
||||
)
|
||||
passed = (
|
||||
bool(sync.get("passed"))
|
||||
and freshness_passed
|
||||
and (smoothness["passed"] or not require_smoothness)
|
||||
)
|
||||
summary = {
|
||||
"schema": "lesavka.client-rct-transport-summary.v1",
|
||||
"passed": passed,
|
||||
"sync_passed": bool(sync.get("passed")),
|
||||
"sync_status": sync.get("status"),
|
||||
"paired_event_count": len(joined),
|
||||
"min_paired_events": min_pairs,
|
||||
"freshness_passed": freshness_passed,
|
||||
"freshness_worst_p95_ms": worst_p95,
|
||||
"freshness_budget_ms": freshness_budget_ms,
|
||||
"freshness_limit_ms": max_age_ms,
|
||||
"clock_uncertainty_ms": uncertainty_ms,
|
||||
"video_age_p95_ms": percentile(video_ages, 0.95),
|
||||
"audio_age_p95_ms": percentile(audio_ages, 0.95),
|
||||
"smoothness": smoothness,
|
||||
"upstream_sync": upstream_sync,
|
||||
"client_send": client_send,
|
||||
"uvc_spool": uvc_spool,
|
||||
"expected_event_count": len(timeline_events),
|
||||
"freshness_bottleneck": None,
|
||||
"events": joined,
|
||||
}
|
||||
summary["freshness_bottleneck"] = freshness_bottleneck(summary)
|
||||
text = "\n".join(human_lines(report_path, summary, sync, smoothness)) + "\n"
|
||||
return summary, text
|
||||
|
||||
|
||||
def human_lines(report_path: str, summary: dict, sync: dict, smoothness: dict) -> list[str]:
|
||||
"""Render a compact operator summary.
|
||||
|
||||
Inputs: structured timing summaries.
|
||||
Outputs: readable report lines.
|
||||
Why: the user should be able to paste a short tail and still preserve the
|
||||
three dimensions we care about: sync, freshness, and smoothness.
|
||||
"""
|
||||
|
||||
lines = [
|
||||
f"Client-to-RCT transport summary for {report_path}",
|
||||
f"- verdict: {'pass' if summary['passed'] else 'fail'}",
|
||||
f"- sync: {sync.get('status', 'unknown')} ({'pass' if sync.get('passed') else 'fail'}), p95={float(sync.get('p95_abs_skew_ms', 0.0)):.1f}ms",
|
||||
f"- paired events: {summary['paired_event_count']}/{summary['min_paired_events']}",
|
||||
f"- synthetic evidence: paired={summary['paired_event_count']}/{summary['expected_event_count']} expected coded events",
|
||||
]
|
||||
if summary["freshness_budget_ms"] is None:
|
||||
lines.append("- freshness: unavailable")
|
||||
else:
|
||||
lines.append(
|
||||
f"- freshness: {'pass' if summary['freshness_passed'] else 'fail'} "
|
||||
f"budget={summary['freshness_budget_ms']:.1f}ms "
|
||||
f"limit={summary['freshness_limit_ms']:.1f}ms"
|
||||
)
|
||||
for label, key in [("video", "video_age_p95_ms"), ("audio", "audio_age_p95_ms")]:
|
||||
value = summary[key]
|
||||
lines.append(
|
||||
f"- {label} age p95: {value:.1f}ms" if value is not None else f"- {label} age p95: unavailable"
|
||||
)
|
||||
lines.append(
|
||||
f"- smoothness: {'pass' if smoothness['passed'] else 'warn'} "
|
||||
f"video_hiccups={smoothness['video_hiccups']} "
|
||||
f"audio_hiccups={smoothness['audio_hiccups']} "
|
||||
f"video_p95_jitter={smoothness['video_p95_jitter_ms']}"
|
||||
)
|
||||
client_send = summary.get("client_send")
|
||||
if client_send:
|
||||
lines.append(
|
||||
"- client send: "
|
||||
f"bundles={client_send['bundle_count']} "
|
||||
f"joined={client_send['joined_event_count']} "
|
||||
f"local_age_p95={fmt_ms(client_send.get('local_bundle_age_p95_ms'))} "
|
||||
f"post_send_to_rct_worst_p95={fmt_ms(client_send.get('post_client_send_worst_p95_ms'))}"
|
||||
)
|
||||
lines.append(f"- freshness bottleneck: {summary['freshness_bottleneck']}")
|
||||
upstream = summary.get("upstream_sync")
|
||||
if upstream:
|
||||
lag = fmt_ms(upstream.get("media_transport_lag_p95_ms"))
|
||||
camera_queue = fmt_ms(upstream.get("camera_client_queue_age_p95_ms"))
|
||||
microphone_queue = fmt_ms(upstream.get("microphone_client_queue_age_p95_ms"))
|
||||
server_age = fmt_ms(upstream.get("server_receive_age_p95_ms"))
|
||||
sink_late = fmt_ms(upstream.get("sink_late_p95_ms"))
|
||||
lines.append(
|
||||
"- upstream sampler: "
|
||||
f"samples={upstream['sample_count']} "
|
||||
f"transport_lag_p95={lag} "
|
||||
f"client_queue_p95=video {camera_queue}/audio {microphone_queue} "
|
||||
f"server_receive_age_p95={server_age} "
|
||||
f"sink_late_p95={sink_late}"
|
||||
)
|
||||
spool = summary.get("uvc_spool")
|
||||
if spool:
|
||||
coverage = spool.get("event_coverage") or {}
|
||||
expected = coverage.get("expected_events", 0)
|
||||
covered = coverage.get("covered_events", 0)
|
||||
missing = coverage.get("missing_codes", [])
|
||||
source_hiccups = spool.get("source_cadence_hiccup_count")
|
||||
spool_p95 = spool.get("spool_interval_p95_ms")
|
||||
decoded_p95 = spool.get("decoded_pts_delta_p95_ms")
|
||||
lines.append(
|
||||
"- UVC spool boundary: "
|
||||
f"records={spool.get('record_count')} "
|
||||
f"events={covered}/{expected} "
|
||||
f"missing_codes={missing} "
|
||||
f"sequence_gaps={spool.get('sequence_gap_count')} "
|
||||
f"source_hiccups={source_hiccups} "
|
||||
f"spool_interval_p95={fmt_ms(spool_p95)} "
|
||||
f"decoded_delta_p95={fmt_ms(decoded_p95)}"
|
||||
)
|
||||
return lines
|
||||
|
||||
|
||||
def main() -> int:
|
||||
"""CLI entrypoint for the manual transport summary helper."""
|
||||
|
||||
if len(sys.argv) != 11:
|
||||
print(
|
||||
"usage: client_rct_transport_summary.py REPORT TIMELINE CAPTURE_LOG CLOCK CAPTURE JSON_OUT TXT_OUT MAX_AGE_MS MIN_PAIRS REQUIRE_SMOOTHNESS",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 2
|
||||
summary, text = build_summary(sys.argv[1:])
|
||||
pathlib.Path(sys.argv[6]).write_text(json.dumps(summary, indent=2, sort_keys=True) + "\n")
|
||||
pathlib.Path(sys.argv[7]).write_text(text)
|
||||
print(text, end="")
|
||||
return 0 if summary["passed"] else 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
123
scripts/manual/client_rct_upstream_sync_sampler.py
Executable file
123
scripts/manual/client_rct_upstream_sync_sampler.py
Executable file
@ -0,0 +1,123 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Poll server upstream-sync state while a client-to-RCT probe is active."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def parse_relayctl_fields(text: str) -> dict:
|
||||
"""Parse relayctl key/value output into a dictionary.
|
||||
|
||||
Inputs: text from `lesavka-relayctl upstream-sync`.
|
||||
Outputs: field names mapped to string values.
|
||||
Why: the sampler should preserve raw operator output while also making
|
||||
timing fields easy for follow-up scripts to query with `jq`.
|
||||
"""
|
||||
|
||||
fields: dict[str, str] = {}
|
||||
for line in text.splitlines():
|
||||
if "=" not in line:
|
||||
continue
|
||||
key, value = line.split("=", 1)
|
||||
fields[key.strip()] = value.strip()
|
||||
return fields
|
||||
|
||||
|
||||
def process_alive(pid: int) -> bool:
|
||||
"""Return whether a local probe process still exists.
|
||||
|
||||
Inputs: process id to monitor.
|
||||
Outputs: true while the process can be signaled with zero.
|
||||
Why: the harness needs a passwordless background sampler that exits
|
||||
naturally when the active probe finishes.
|
||||
"""
|
||||
|
||||
try:
|
||||
os.kill(pid, 0)
|
||||
except OSError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def sample_until_probe_exits(
|
||||
relayctl: str,
|
||||
server: str,
|
||||
tls_domain: str,
|
||||
probe_pid: int,
|
||||
interval_s: float,
|
||||
jsonl_path: pathlib.Path,
|
||||
text_path: pathlib.Path,
|
||||
) -> int:
|
||||
"""Write upstream-sync samples while the probe process is alive.
|
||||
|
||||
Inputs: relayctl path, server address, TLS domain, probe pid, interval, and
|
||||
output paths.
|
||||
Outputs: process exit code only; sample artifacts are written to disk.
|
||||
Why: if the RCT black-box result fails, these samples show whether the
|
||||
server planner saw stale client queues, late presentation, or healthy ingress.
|
||||
"""
|
||||
|
||||
jsonl_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
text_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with jsonl_path.open("w", encoding="utf-8") as jsonl, text_path.open(
|
||||
"w", encoding="utf-8"
|
||||
) as text:
|
||||
while process_alive(probe_pid):
|
||||
sample_ns = time.time_ns()
|
||||
env = os.environ.copy()
|
||||
env["LESAVKA_TLS_DOMAIN"] = tls_domain
|
||||
result = subprocess.run(
|
||||
[relayctl, "--server", server, "upstream-sync"],
|
||||
text=True,
|
||||
capture_output=True,
|
||||
env=env,
|
||||
check=False,
|
||||
)
|
||||
raw = result.stdout.strip()
|
||||
if result.stderr.strip():
|
||||
raw = raw + ("\n" if raw else "") + result.stderr.strip()
|
||||
row = {
|
||||
"schema": "lesavka.client-rct-upstream-sync-sample.v1",
|
||||
"sample_unix_ns": sample_ns,
|
||||
"ok": result.returncode == 0,
|
||||
"returncode": result.returncode,
|
||||
"fields": parse_relayctl_fields(raw),
|
||||
"raw": raw,
|
||||
}
|
||||
jsonl.write(json.dumps(row, sort_keys=True) + "\n")
|
||||
jsonl.flush()
|
||||
text.write(f"--- sample_unix_ns={sample_ns} ok={row['ok']} ---\n{raw}\n")
|
||||
text.flush()
|
||||
time.sleep(interval_s)
|
||||
return 0
|
||||
|
||||
|
||||
def main() -> int:
|
||||
"""CLI entrypoint for the upstream-sync sampler."""
|
||||
|
||||
if len(sys.argv) != 8:
|
||||
print(
|
||||
"usage: client_rct_upstream_sync_sampler.py RELAYCTL SERVER TLS_DOMAIN PROBE_PID INTERVAL_SECONDS JSONL_OUT TXT_OUT",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 2
|
||||
relayctl, server, tls_domain, pid_raw, interval_raw, jsonl_out, txt_out = sys.argv[1:]
|
||||
return sample_until_probe_exits(
|
||||
relayctl,
|
||||
server,
|
||||
tls_domain,
|
||||
int(pid_raw),
|
||||
float(interval_raw),
|
||||
pathlib.Path(jsonl_out),
|
||||
pathlib.Path(txt_out),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
56
scripts/manual/client_rct_uvc_frame_meta_fetch.sh
Executable file
56
scripts/manual/client_rct_uvc_frame_meta_fetch.sh
Executable file
@ -0,0 +1,56 @@
|
||||
#!/usr/bin/env bash
|
||||
# scripts/manual/client_rct_uvc_frame_meta_fetch.sh
|
||||
# Manual: optional UVC spool metadata fetch for client-to-RCT lab probes.
|
||||
# Not part of CI; requires SSH access to a live server artifact.
|
||||
# Fetches optional server-side UVC spool timing for client-to-RCT probes.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ "$#" -ne 9 ]]; then
|
||||
echo "usage: $0 SERVER_HOST REMOTE_LOG REQUIRED LOCAL_JSONL SUMMARY_JSON SUMMARY_TXT TIMELINE_JSON MODE_FPS REPO_ROOT" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
server_host=$1
|
||||
remote_log=$2
|
||||
required=$3
|
||||
local_jsonl=$4
|
||||
summary_json=$5
|
||||
summary_txt=$6
|
||||
timeline_json=$7
|
||||
mode_fps=$8
|
||||
repo_root=$9
|
||||
ssh_opts=${SSH_OPTS:-"-o BatchMode=yes -o ConnectTimeout=5"}
|
||||
|
||||
if [[ -z "${remote_log}" ]]; then
|
||||
echo "==> UVC frame metadata log fetch disabled"
|
||||
echo " ↪ set LESAVKA_CLIENT_RCT_UVC_FRAME_META_LOG_REMOTE to fetch and summarize server-side spool timing"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "==> fetching optional UVC frame metadata log from ${server_host}:${remote_log}"
|
||||
if ! scp ${ssh_opts} "${server_host}:${remote_log}" "${local_jsonl}"; then
|
||||
if [[ "${required}" == "1" ]]; then
|
||||
echo "required UVC frame metadata log was unavailable" >&2
|
||||
exit 91
|
||||
fi
|
||||
echo " ↪ optional UVC frame metadata log unavailable; continuing without spool-boundary summary"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
summarize_args=(
|
||||
"${local_jsonl}"
|
||||
"${summary_json}"
|
||||
"${summary_txt}"
|
||||
--timeline "${timeline_json}"
|
||||
)
|
||||
if [[ "${mode_fps}" != "0" ]]; then
|
||||
summarize_args+=(--fps "${mode_fps}")
|
||||
fi
|
||||
if ! python3 "${repo_root}/scripts/manual/summarize_uvc_frame_meta_log.py" "${summarize_args[@]}"; then
|
||||
if [[ "${required}" == "1" ]]; then
|
||||
echo "required UVC frame metadata log could not be summarized" >&2
|
||||
exit 92
|
||||
fi
|
||||
echo " ↪ optional UVC frame metadata log could not be summarized; continuing without spool-boundary summary"
|
||||
fi
|
||||
52
scripts/manual/run_client_to_rct_timing_trace_probe.sh
Executable file
52
scripts/manual/run_client_to_rct_timing_trace_probe.sh
Executable file
@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env bash
|
||||
# scripts/manual/run_client_to_rct_timing_trace_probe.sh
|
||||
# Manual: client-origin HEVC timing trace layered over the blind RCT probe.
|
||||
# Not part of CI; requires the live Theia/Tethys lab.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd -- "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||
REPO_ROOT="$(cd -- "${SCRIPT_DIR}/../.." >/dev/null 2>&1 && pwd)"
|
||||
STAMP="$(date +%Y%m%d-%H%M%S)"
|
||||
LOCAL_OUTPUT_DIR="${LOCAL_OUTPUT_DIR:-/tmp}"
|
||||
TRACE_DIR="${LOCAL_OUTPUT_DIR%/}/lesavka-client-rct-timing-trace-${STAMP}"
|
||||
BLIND_DIR="${TRACE_DIR}/blind-probe"
|
||||
TRACE_JSON="${TRACE_DIR}/client-rct-timing-trace.json"
|
||||
TRACE_TXT="${TRACE_DIR}/client-rct-timing-trace.txt"
|
||||
RUN_LOG="${TRACE_DIR}/client-rct-timing-trace-run.log"
|
||||
mkdir -p "${BLIND_DIR}"
|
||||
exec > >(tee -a "${RUN_LOG}") 2>&1
|
||||
|
||||
echo "==> client-to-RCT T0-T5 timing trace"
|
||||
echo " ↪ artifact_dir=${TRACE_DIR}"
|
||||
echo " ↪ blind_probe_dir=${BLIND_DIR}"
|
||||
echo " ↪ run_log=${RUN_LOG}"
|
||||
echo " ↪ this wraps the blind probe and does not reconfigure Theia"
|
||||
|
||||
set +e
|
||||
LOCAL_OUTPUT_DIR="${BLIND_DIR}" "${SCRIPT_DIR}/run_client_to_rct_transport_probe.sh"
|
||||
blind_status=$?
|
||||
set -e
|
||||
|
||||
probe_dir="$(find "${BLIND_DIR}" -maxdepth 1 -type d -name 'lesavka-client-rct-transport-probe-*' | sort | tail -n 1)"
|
||||
if [[ -z "${probe_dir}" ]]; then
|
||||
echo "no blind probe artifact directory found under ${BLIND_DIR}" >&2
|
||||
exit 90
|
||||
fi
|
||||
|
||||
echo "==> building T0-T5 timing trace from ${probe_dir}"
|
||||
python3 "${SCRIPT_DIR}/client_rct_timing_trace_summary.py" \
|
||||
"${probe_dir}" \
|
||||
"${TRACE_JSON}" \
|
||||
"${TRACE_TXT}"
|
||||
|
||||
echo "==> done"
|
||||
printf '%s\n' \
|
||||
"artifact_dir: ${TRACE_DIR}" \
|
||||
"blind_probe_artifact_dir: ${probe_dir}" \
|
||||
"timing_trace_json: ${TRACE_JSON}" \
|
||||
"timing_trace_txt: ${TRACE_TXT}" \
|
||||
"run_log: ${RUN_LOG}" \
|
||||
"blind_probe_exit_status: ${blind_status}"
|
||||
|
||||
exit "${blind_status}"
|
||||
498
scripts/manual/run_client_to_rct_transport_probe.sh
Executable file
498
scripts/manual/run_client_to_rct_transport_probe.sh
Executable file
@ -0,0 +1,498 @@
|
||||
#!/usr/bin/env bash
|
||||
# scripts/manual/run_client_to_rct_transport_probe.sh
|
||||
# Manual: client-origin bundled transport probe to the RCT UVC/UAC endpoints.
|
||||
# Not part of CI; hardware/lab manual only.
|
||||
#
|
||||
# This runner keeps server->RCT calibration tooling untouched. It starts an
|
||||
# RCT capture, injects deterministic flash/tone media through
|
||||
# `lesavka-sync-probe`, then measures final sync, freshness, and smoothness from
|
||||
# the captured UVC/UAC output.
|
||||
|
||||
set -euo pipefail
|
||||
SCRIPT_DIR="$(cd -- "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||
REPO_ROOT="$(cd -- "${SCRIPT_DIR}/../.." >/dev/null 2>&1 && pwd)"
|
||||
TETHYS_HOST=${TETHYS_HOST:-tethys}
|
||||
LESAVKA_SERVER_HOST=${LESAVKA_SERVER_HOST:-theia}
|
||||
LESAVKA_SERVER_ADDR=${LESAVKA_SERVER_ADDR:-auto}
|
||||
LESAVKA_SERVER_SCHEME=${LESAVKA_SERVER_SCHEME:-https}
|
||||
LESAVKA_TLS_DOMAIN=${LESAVKA_TLS_DOMAIN:-lesavka-server}
|
||||
SERVER_TUNNEL_REMOTE_PORT=${SERVER_TUNNEL_REMOTE_PORT:-50051}
|
||||
SSH_OPTS=${SSH_OPTS:-"-o BatchMode=yes -o ConnectTimeout=5"}
|
||||
|
||||
LESAVKA_CLIENT_RCT_MODE=${LESAVKA_CLIENT_RCT_MODE:-auto}
|
||||
REMOTE_CAPTURE_STACK=${REMOTE_CAPTURE_STACK:-pulse}
|
||||
REMOTE_PULSE_CAPTURE_TOOL=${REMOTE_PULSE_CAPTURE_TOOL:-gst}
|
||||
REMOTE_PULSE_VIDEO_MODE=${REMOTE_PULSE_VIDEO_MODE:-cfr}
|
||||
REMOTE_PULSE_AUDIO_ANCHOR_SILENCE=${REMOTE_PULSE_AUDIO_ANCHOR_SILENCE:-1}
|
||||
REMOTE_CAPTURE_READY_TIMEOUT_SECONDS=${REMOTE_CAPTURE_READY_TIMEOUT_SECONDS:-30}
|
||||
REMOTE_CAPTURE_READY_SETTLE_SECONDS=${REMOTE_CAPTURE_READY_SETTLE_SECONDS:-1}
|
||||
REMOTE_CAPTURE_PREROLL_DISCARD_SECONDS=${REMOTE_CAPTURE_PREROLL_DISCARD_SECONDS:-3}
|
||||
LESAVKA_CLIENT_RCT_START_DELAY_SECONDS=${LESAVKA_CLIENT_RCT_START_DELAY_SECONDS:-0}
|
||||
|
||||
PROBE_DURATION_SECONDS=${PROBE_DURATION_SECONDS:-20}
|
||||
PROBE_WARMUP_SECONDS=${PROBE_WARMUP_SECONDS:-4}
|
||||
PROBE_PULSE_PERIOD_MS=${PROBE_PULSE_PERIOD_MS:-1000}
|
||||
PROBE_PULSE_WIDTH_MS=${PROBE_PULSE_WIDTH_MS:-120}
|
||||
PROBE_MARKER_TICK_PERIOD=${PROBE_MARKER_TICK_PERIOD:-5}
|
||||
PROBE_EVENT_WIDTH_CODES=${PROBE_EVENT_WIDTH_CODES:-1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}
|
||||
PROBE_START_GRACE_SECONDS=${PROBE_START_GRACE_SECONDS:-12}
|
||||
TAIL_SECONDS=${TAIL_SECONDS:-4}
|
||||
PROBE_TIMEOUT_SECONDS=${PROBE_TIMEOUT_SECONDS:-$((PROBE_DURATION_SECONDS + PROBE_START_GRACE_SECONDS + TAIL_SECONDS + 20))}
|
||||
CAPTURE_SECONDS=${CAPTURE_SECONDS:-$((PROBE_DURATION_SECONDS + PROBE_START_GRACE_SECONDS + TAIL_SECONDS))}
|
||||
|
||||
LESAVKA_CLIENT_RCT_MAX_AGE_MS=${LESAVKA_CLIENT_RCT_MAX_AGE_MS:-1000}
|
||||
LESAVKA_CLIENT_RCT_MIN_PAIRS=${LESAVKA_CLIENT_RCT_MIN_PAIRS:-13}
|
||||
LESAVKA_CLIENT_RCT_REQUIRE_SMOOTHNESS=${LESAVKA_CLIENT_RCT_REQUIRE_SMOOTHNESS:-0}
|
||||
LESAVKA_CLIENT_RCT_SYNC_SAMPLE_INTERVAL_SECONDS=${LESAVKA_CLIENT_RCT_SYNC_SAMPLE_INTERVAL_SECONDS:-0.5}
|
||||
|
||||
LOCAL_OUTPUT_DIR=${LOCAL_OUTPUT_DIR:-/tmp}
|
||||
STAMP="$(date +%Y%m%d-%H%M%S)"
|
||||
LOCAL_REPORT_DIR="${LOCAL_OUTPUT_DIR%/}/lesavka-client-rct-transport-probe-${STAMP}"
|
||||
LOCAL_CAPTURE="${LOCAL_REPORT_DIR}/capture.mkv"
|
||||
LOCAL_CAPTURE_LOG="${LOCAL_REPORT_DIR}/capture.log"
|
||||
LOCAL_REPORT_JSON="${LOCAL_REPORT_DIR}/report.json"
|
||||
LOCAL_REPORT_TXT="${LOCAL_REPORT_DIR}/report.txt"
|
||||
LOCAL_EVENTS_CSV="${LOCAL_REPORT_DIR}/events.csv"
|
||||
LOCAL_CLIENT_TIMELINE_JSON="${LOCAL_REPORT_DIR}/client-transport-timeline.json"
|
||||
LOCAL_CLOCK_ALIGNMENT_JSON="${LOCAL_REPORT_DIR}/clock-alignment.json"
|
||||
LOCAL_TRANSPORT_SUMMARY_JSON="${LOCAL_REPORT_DIR}/client-rct-transport-summary.json"
|
||||
LOCAL_TRANSPORT_SUMMARY_TXT="${LOCAL_REPORT_DIR}/client-rct-transport-summary.txt"
|
||||
LOCAL_UPSTREAM_SYNC_JSONL="${LOCAL_REPORT_DIR}/upstream-sync-samples.jsonl"
|
||||
LOCAL_UPSTREAM_SYNC_TXT="${LOCAL_REPORT_DIR}/upstream-sync-samples.txt"
|
||||
LOCAL_CLIENT_SEND_JSONL="${LOCAL_REPORT_DIR}/client-send-bundles.jsonl"
|
||||
LOCAL_UVC_FRAME_META_JSONL="${LOCAL_REPORT_DIR}/uvc-frame-meta.jsonl"
|
||||
LOCAL_UVC_FRAME_META_SUMMARY_JSON="${LOCAL_REPORT_DIR}/uvc-frame-meta-summary.json"
|
||||
LOCAL_UVC_FRAME_META_SUMMARY_TXT="${LOCAL_REPORT_DIR}/uvc-frame-meta-summary.txt"
|
||||
LOCAL_RUN_LOG="${LOCAL_REPORT_DIR}/client-rct-run.log"
|
||||
REMOTE_CAPTURE=${REMOTE_CAPTURE:-"/tmp/lesavka-client-rct-transport-probe-${STAMP}.mkv"}
|
||||
LESAVKA_CLIENT_RCT_UVC_FRAME_META_LOG_REMOTE=${LESAVKA_CLIENT_RCT_UVC_FRAME_META_LOG_REMOTE:-}
|
||||
LESAVKA_CLIENT_RCT_UVC_FRAME_META_LOG_REQUIRED=${LESAVKA_CLIENT_RCT_UVC_FRAME_META_LOG_REQUIRED:-0}
|
||||
CAPTURE_READY_MARKER="__LESAVKA_CAPTURE_READY__"
|
||||
mkdir -p "${LOCAL_REPORT_DIR}"
|
||||
exec > >(tee -a "${LOCAL_RUN_LOG}") 2>&1
|
||||
SERVER_TUNNEL_PID=""
|
||||
CAPTURE_PID=""
|
||||
RESOLVED_LESAVKA_SERVER_ADDR=""
|
||||
|
||||
cleanup() {
|
||||
if [[ -n "${SERVER_TUNNEL_PID}" ]] && kill -0 "${SERVER_TUNNEL_PID}" >/dev/null 2>&1; then
|
||||
kill "${SERVER_TUNNEL_PID}" >/dev/null 2>&1 || true
|
||||
wait "${SERVER_TUNNEL_PID}" >/dev/null 2>&1 || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
parse_mode() {
|
||||
local mode=$1
|
||||
if [[ "${mode}" == "auto" ]]; then
|
||||
printf '0 0 0\n'
|
||||
return 0
|
||||
fi
|
||||
if [[ "${mode}" =~ ^([0-9]+)x([0-9]+)@([0-9]+)$ ]]; then
|
||||
printf '%s %s %s\n' "${BASH_REMATCH[1]}" "${BASH_REMATCH[2]}" "${BASH_REMATCH[3]}"
|
||||
return 0
|
||||
fi
|
||||
echo "invalid LESAVKA_CLIENT_RCT_MODE=${mode}; expected WIDTHxHEIGHT@FPS" >&2
|
||||
return 1
|
||||
}
|
||||
|
||||
pick_tunnel_port() {
|
||||
python3 - <<'PY'
|
||||
import socket
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
|
||||
sock.bind(("127.0.0.1", 0))
|
||||
print(sock.getsockname()[1])
|
||||
PY
|
||||
}
|
||||
|
||||
wait_for_local_port() {
|
||||
local port=$1
|
||||
local deadline=$(( $(date +%s) + 15 ))
|
||||
until python3 - <<'PY' "${port}"
|
||||
import socket
|
||||
import sys
|
||||
port = int(sys.argv[1])
|
||||
try:
|
||||
with socket.create_connection(("127.0.0.1", port), timeout=1):
|
||||
pass
|
||||
except OSError:
|
||||
sys.exit(1)
|
||||
PY
|
||||
do
|
||||
if (( $(date +%s) >= deadline )); then
|
||||
echo "timed out waiting for localhost:${port}" >&2
|
||||
return 1
|
||||
fi
|
||||
sleep 0.25
|
||||
done
|
||||
}
|
||||
|
||||
sleep_start_delay() {
|
||||
[[ "${LESAVKA_CLIENT_RCT_START_DELAY_SECONDS}" =~ ^[0-9]+$ ]] || {
|
||||
echo "LESAVKA_CLIENT_RCT_START_DELAY_SECONDS must be a non-negative number" >&2
|
||||
exit 2
|
||||
}
|
||||
if (( LESAVKA_CLIENT_RCT_START_DELAY_SECONDS > 0 )); then
|
||||
echo "==> delaying client-to-RCT transport probe start for ${LESAVKA_CLIENT_RCT_START_DELAY_SECONDS}s"
|
||||
sleep "${LESAVKA_CLIENT_RCT_START_DELAY_SECONDS}"
|
||||
fi
|
||||
}
|
||||
|
||||
start_server_tunnel() {
|
||||
if [[ "${LESAVKA_SERVER_ADDR}" != "auto" ]]; then
|
||||
RESOLVED_LESAVKA_SERVER_ADDR="${LESAVKA_SERVER_ADDR}"
|
||||
return 0
|
||||
fi
|
||||
local local_port
|
||||
local_port="$(pick_tunnel_port)"
|
||||
echo "==> opening SSH tunnel to ${LESAVKA_SERVER_HOST}:127.0.0.1:${SERVER_TUNNEL_REMOTE_PORT} on localhost:${local_port}"
|
||||
ssh ${SSH_OPTS} -o ExitOnForwardFailure=yes \
|
||||
-N -L "127.0.0.1:${local_port}:127.0.0.1:${SERVER_TUNNEL_REMOTE_PORT}" \
|
||||
"${LESAVKA_SERVER_HOST}" &
|
||||
SERVER_TUNNEL_PID=$!
|
||||
wait_for_local_port "${local_port}"
|
||||
RESOLVED_LESAVKA_SERVER_ADDR="${LESAVKA_SERVER_SCHEME}://127.0.0.1:${local_port}"
|
||||
echo " ↪ tunneled to ${LESAVKA_SERVER_HOST}:127.0.0.1:${SERVER_TUNNEL_REMOTE_PORT}"
|
||||
}
|
||||
|
||||
sample_capture_clock_alignment() {
|
||||
echo "==> sampling client/Tethys clock alignment for transport freshness"
|
||||
python3 "${REPO_ROOT}/scripts/manual/client_rct_clock_alignment.py" \
|
||||
"${TETHYS_HOST}" \
|
||||
"${SSH_OPTS}" \
|
||||
"${LOCAL_CLOCK_ALIGNMENT_JSON}"
|
||||
}
|
||||
|
||||
build_probe_tools() {
|
||||
echo "==> prebuilding client transport probe/analyzer"
|
||||
(
|
||||
cd "${REPO_ROOT}"
|
||||
cargo build -p lesavka_client --bin lesavka-sync-probe --bin lesavka-sync-analyze --bin lesavka-relayctl
|
||||
)
|
||||
}
|
||||
|
||||
print_versions() {
|
||||
echo "==> Lesavka versions under test"
|
||||
(
|
||||
cd "${REPO_ROOT}"
|
||||
LESAVKA_TLS_DOMAIN="${LESAVKA_TLS_DOMAIN}" \
|
||||
"${REPO_ROOT}/target/debug/lesavka-relayctl" \
|
||||
--server "${RESOLVED_LESAVKA_SERVER_ADDR}" version
|
||||
) | sed 's/^/ ↪ /' || true
|
||||
}
|
||||
|
||||
start_tethys_capture() {
|
||||
local width=$1 height=$2 fps=$3
|
||||
echo "==> starting RCT UVC/UAC capture on ${TETHYS_HOST}"
|
||||
ssh ${SSH_OPTS} "${TETHYS_HOST}" bash -s -- \
|
||||
"${REMOTE_CAPTURE}" \
|
||||
"${CAPTURE_SECONDS}" \
|
||||
"${width}" \
|
||||
"${height}" \
|
||||
"${fps}" \
|
||||
"${REMOTE_CAPTURE_STACK}" \
|
||||
"${REMOTE_PULSE_CAPTURE_TOOL}" \
|
||||
"${REMOTE_PULSE_VIDEO_MODE}" \
|
||||
"${REMOTE_PULSE_AUDIO_ANCHOR_SILENCE}" \
|
||||
"${REMOTE_CAPTURE_PREROLL_DISCARD_SECONDS}" \
|
||||
"${REMOTE_CAPTURE_READY_SETTLE_SECONDS}" \
|
||||
"${CAPTURE_READY_MARKER}" \
|
||||
>"${LOCAL_CAPTURE_LOG}" 2>&1 <<'REMOTE_CAPTURE_SCRIPT' &
|
||||
set -euo pipefail
|
||||
remote_capture=$1
|
||||
capture_seconds=$2
|
||||
width=$3
|
||||
height=$4
|
||||
fps=$5
|
||||
capture_stack=$6
|
||||
pulse_tool=$7
|
||||
video_mode=$8
|
||||
anchor_silence=$9
|
||||
preroll_discard=${10}
|
||||
ready_settle=${11}
|
||||
ready_marker=${12}
|
||||
|
||||
resolve_video_device() {
|
||||
find /dev/v4l/by-id -maxdepth 1 -type l \
|
||||
-name 'usb-Lesavka_Lesavka_Composite*video-index0' | sort | head -n 1
|
||||
}
|
||||
|
||||
resolve_pulse_source() {
|
||||
pactl list short sources 2>/dev/null \
|
||||
| awk '
|
||||
/alsa_input\..*Lesavka_Lesavka_Composite/ { print $2; found=1; exit }
|
||||
/Lesavka_Lesavka_Composite/ && $2 !~ /\.monitor$/ && !fallback { fallback=$2 }
|
||||
END {
|
||||
if (found) exit 0
|
||||
if (fallback != "") { print fallback; exit 0 }
|
||||
exit 1
|
||||
}
|
||||
'
|
||||
}
|
||||
|
||||
current_video_profile() {
|
||||
v4l2-ctl -d "${video_device}" --all 2>/dev/null \
|
||||
| awk '
|
||||
/Width\/Height[[:space:]]*:/ {
|
||||
split($0, a, ":")
|
||||
gsub(/^[ \t]+/, "", a[2])
|
||||
split(a[2], wh, "/")
|
||||
width=wh[1]
|
||||
height=wh[2]
|
||||
next
|
||||
}
|
||||
/Frames per second[[:space:]]*:/ {
|
||||
split($0, a, ":")
|
||||
gsub(/^[ \t]+/, "", a[2])
|
||||
split(a[2], fps_parts, "\\.")
|
||||
fps=fps_parts[1]
|
||||
}
|
||||
END {
|
||||
if (width && height && fps) {
|
||||
printf "%s %s %s\n", width, height, fps
|
||||
exit 0
|
||||
}
|
||||
exit 1
|
||||
}
|
||||
'
|
||||
}
|
||||
|
||||
gst_audio_mixer_element() {
|
||||
if gst-inspect-1.0 audiomixer 2>/dev/null | grep -q 'ignore-inactive-pads'; then
|
||||
printf 'audiomixer name=amix ignore-inactive-pads=true'
|
||||
else
|
||||
printf 'audiomixer name=amix'
|
||||
fi
|
||||
}
|
||||
|
||||
run_preroll() {
|
||||
local video_device=$1
|
||||
local seconds=$2
|
||||
[[ "${seconds}" =~ ^[0-9]+$ && "${seconds}" -gt 0 ]] || return 0
|
||||
printf 'discarding %ss of post-enumeration capture before probe\n' "${seconds}" >&2
|
||||
timeout --kill-after=2 --signal=INT "${seconds}" \
|
||||
gst-launch-1.0 -q -e v4l2src device="${video_device}" do-timestamp=true num-buffers="$((fps * seconds))" \
|
||||
! "image/jpeg,width=${width},height=${height},framerate=${fps}/1" ! fakesink \
|
||||
>/dev/null 2>&1 || true
|
||||
}
|
||||
|
||||
run_gst_pulse_capture() {
|
||||
local video_device=$1
|
||||
local pulse_source=$2
|
||||
local video_caps="image/jpeg,width=${width},height=${height},framerate=${fps}/1"
|
||||
local decode_chain="jpegdec !"
|
||||
local audio_mixer
|
||||
audio_mixer="$(gst_audio_mixer_element)"
|
||||
local audio_anchor=()
|
||||
if [[ "${anchor_silence}" != "0" ]]; then
|
||||
printf 'anchoring Pulse capture audio timeline with generated silence\n' >&2
|
||||
audio_anchor=(audiotestsrc wave=silence is-live=true do-timestamp=true ! "audio/x-raw,rate=48000,channels=2" ! queue ! amix.)
|
||||
fi
|
||||
printf 'capture_start_unix_ns=%s\n' "$(date +%s%N)" >&2
|
||||
if [[ "${video_mode}" == "cfr" ]]; then
|
||||
timeout --kill-after=5 --signal=INT "$((capture_seconds + 3))" \
|
||||
gst-launch-1.0 -q -e \
|
||||
matroskamux name=mux ! filesink location="${remote_capture}" \
|
||||
v4l2src device="${video_device}" do-timestamp=true ! ${video_caps} ! \
|
||||
${decode_chain} videoconvert ! videorate ! video/x-raw,framerate="${fps}"/1 ! \
|
||||
x264enc tune=zerolatency speed-preset=ultrafast key-int-max=1 bitrate=5000 ! \
|
||||
h264parse ! queue ! mux. \
|
||||
${audio_mixer} ! audio/x-raw,rate=48000,channels=2 ! queue ! mux. \
|
||||
"${audio_anchor[@]}" \
|
||||
pulsesrc device="${pulse_source}" do-timestamp=true ! audio/x-raw,rate=48000,channels=2 ! \
|
||||
audioconvert ! audioresample ! audio/x-raw,rate=48000,channels=2 ! queue ! amix. &
|
||||
else
|
||||
timeout --kill-after=5 --signal=INT "$((capture_seconds + 3))" \
|
||||
gst-launch-1.0 -q -e \
|
||||
matroskamux name=mux ! filesink location="${remote_capture}" \
|
||||
v4l2src device="${video_device}" do-timestamp=true ! ${video_caps} ! queue ! mux. \
|
||||
${audio_mixer} ! audio/x-raw,rate=48000,channels=2 ! queue ! mux. \
|
||||
"${audio_anchor[@]}" \
|
||||
pulsesrc device="${pulse_source}" do-timestamp=true ! audio/x-raw,rate=48000,channels=2 ! \
|
||||
audioconvert ! audioresample ! audio/x-raw,rate=48000,channels=2 ! queue ! amix. &
|
||||
fi
|
||||
local capture_pid=$!
|
||||
sleep "${ready_settle}"
|
||||
printf '%s\n' "${ready_marker}" >&2
|
||||
wait "${capture_pid}"
|
||||
}
|
||||
|
||||
rm -f "${remote_capture}"
|
||||
video_device="$(resolve_video_device)"
|
||||
if [[ -z "${video_device}" ]]; then
|
||||
printf 'Lesavka UVC video device not found on RCT host; refusing unrelated capture devices.\n' >&2
|
||||
exit 2
|
||||
fi
|
||||
if [[ "${width}" == "0" || "${height}" == "0" || "${fps}" == "0" ]]; then
|
||||
if read -r width height fps < <(current_video_profile); then
|
||||
:
|
||||
else
|
||||
printf 'unable to auto-detect current UVC mode; set LESAVKA_CLIENT_RCT_MODE=WIDTHxHEIGHT@FPS\n' >&2
|
||||
exit 2
|
||||
fi
|
||||
fi
|
||||
printf 'using video device: %s\n' "${video_device}" >&2
|
||||
printf 'using video mode: %sx%s @ %s fps (mjpeg)\n' "${width}" "${height}" "${fps}" >&2
|
||||
|
||||
case "${capture_stack}" in
|
||||
pulse)
|
||||
if [[ "${pulse_tool}" != "gst" ]]; then
|
||||
printf 'unsupported REMOTE_PULSE_CAPTURE_TOOL=%s for client-to-RCT probe; use gst\n' "${pulse_tool}" >&2
|
||||
exit 2
|
||||
fi
|
||||
pulse_source="$(resolve_pulse_source)"
|
||||
if [[ -z "${pulse_source}" ]]; then
|
||||
printf 'Lesavka Pulse audio source not found; refusing timing-sensitive fallback.\n' >&2
|
||||
exit 2
|
||||
fi
|
||||
printf 'using Pulse source: %s\n' "${pulse_source}" >&2
|
||||
run_preroll "${video_device}" "${preroll_discard}"
|
||||
run_gst_pulse_capture "${video_device}" "${pulse_source}"
|
||||
;;
|
||||
*)
|
||||
printf 'unsupported REMOTE_CAPTURE_STACK=%s for client-to-RCT probe\n' "${capture_stack}" >&2
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
REMOTE_CAPTURE_SCRIPT
|
||||
CAPTURE_PID=$!
|
||||
}
|
||||
|
||||
wait_for_capture_ready() {
|
||||
local deadline=$(( $(date +%s) + REMOTE_CAPTURE_READY_TIMEOUT_SECONDS ))
|
||||
until grep -q "${CAPTURE_READY_MARKER}" "${LOCAL_CAPTURE_LOG}" 2>/dev/null; do
|
||||
if ! kill -0 "${CAPTURE_PID}" >/dev/null 2>&1; then
|
||||
wait "${CAPTURE_PID}" || true
|
||||
echo "RCT capture failed before the client probe could start; see ${LOCAL_CAPTURE_LOG}" >&2
|
||||
exit 90
|
||||
fi
|
||||
if (( $(date +%s) >= deadline )); then
|
||||
echo "timed out waiting for RCT capture readiness; see ${LOCAL_CAPTURE_LOG}" >&2
|
||||
exit 90
|
||||
fi
|
||||
sleep 0.25
|
||||
done
|
||||
}
|
||||
|
||||
run_client_sync_probe() {
|
||||
echo "==> running client-origin bundled transport probe against ${RESOLVED_LESAVKA_SERVER_ADDR}"
|
||||
(
|
||||
cd "${REPO_ROOT}"
|
||||
LESAVKA_TLS_DOMAIN="${LESAVKA_TLS_DOMAIN}" \
|
||||
LESAVKA_SYNC_PROBE_SEND_LOG="${LOCAL_CLIENT_SEND_JSONL}" \
|
||||
timeout --signal=INT "${PROBE_TIMEOUT_SECONDS}" \
|
||||
"${REPO_ROOT}/target/debug/lesavka-sync-probe" \
|
||||
--server "${RESOLVED_LESAVKA_SERVER_ADDR}" \
|
||||
--duration-seconds "${PROBE_DURATION_SECONDS}" \
|
||||
--warmup-seconds "${PROBE_WARMUP_SECONDS}" \
|
||||
--pulse-period-ms "${PROBE_PULSE_PERIOD_MS}" \
|
||||
--pulse-width-ms "${PROBE_PULSE_WIDTH_MS}" \
|
||||
--marker-tick-period "${PROBE_MARKER_TICK_PERIOD}" \
|
||||
--event-width-codes "${PROBE_EVENT_WIDTH_CODES}" \
|
||||
--timeline-json "${LOCAL_CLIENT_TIMELINE_JSON}"
|
||||
)
|
||||
}
|
||||
|
||||
run_client_sync_probe_with_sampler() {
|
||||
run_client_sync_probe &
|
||||
local probe_pid=$!
|
||||
python3 "${REPO_ROOT}/scripts/manual/client_rct_upstream_sync_sampler.py" \
|
||||
"${REPO_ROOT}/target/debug/lesavka-relayctl" \
|
||||
"${RESOLVED_LESAVKA_SERVER_ADDR}" \
|
||||
"${LESAVKA_TLS_DOMAIN}" \
|
||||
"${probe_pid}" \
|
||||
"${LESAVKA_CLIENT_RCT_SYNC_SAMPLE_INTERVAL_SECONDS}" \
|
||||
"${LOCAL_UPSTREAM_SYNC_JSONL}" \
|
||||
"${LOCAL_UPSTREAM_SYNC_TXT}" &
|
||||
local sampler_pid=$!
|
||||
local probe_status=0
|
||||
wait "${probe_pid}" || probe_status=$?
|
||||
wait "${sampler_pid}" || true
|
||||
return "${probe_status}"
|
||||
}
|
||||
|
||||
fetch_and_analyze_capture() {
|
||||
echo "==> fetching RCT capture back to ${LOCAL_CAPTURE}"
|
||||
scp ${SSH_OPTS} "${TETHYS_HOST}:${REMOTE_CAPTURE}" "${LOCAL_CAPTURE}"
|
||||
echo "==> analyzing RCT capture"
|
||||
local analyze_args=("${LOCAL_CAPTURE}" --report-dir "${LOCAL_REPORT_DIR}")
|
||||
if [[ -n "${PROBE_EVENT_WIDTH_CODES}" ]]; then
|
||||
analyze_args+=(--event-width-codes "${PROBE_EVENT_WIDTH_CODES}")
|
||||
fi
|
||||
(
|
||||
cd "${REPO_ROOT}"
|
||||
"${REPO_ROOT}/target/debug/lesavka-sync-analyze" "${analyze_args[@]}"
|
||||
)
|
||||
}
|
||||
|
||||
write_transport_summary() {
|
||||
python3 "${REPO_ROOT}/scripts/manual/client_rct_transport_summary.py" \
|
||||
"${LOCAL_REPORT_JSON}" \
|
||||
"${LOCAL_CLIENT_TIMELINE_JSON}" \
|
||||
"${LOCAL_CAPTURE_LOG}" \
|
||||
"${LOCAL_CLOCK_ALIGNMENT_JSON}" \
|
||||
"${LOCAL_CAPTURE}" \
|
||||
"${LOCAL_TRANSPORT_SUMMARY_JSON}" \
|
||||
"${LOCAL_TRANSPORT_SUMMARY_TXT}" \
|
||||
"${LESAVKA_CLIENT_RCT_MAX_AGE_MS}" \
|
||||
"${LESAVKA_CLIENT_RCT_MIN_PAIRS}" \
|
||||
"${LESAVKA_CLIENT_RCT_REQUIRE_SMOOTHNESS}"
|
||||
}
|
||||
|
||||
fetch_and_summarize_uvc_frame_meta_log() {
|
||||
SSH_OPTS="${SSH_OPTS}" "${REPO_ROOT}/scripts/manual/client_rct_uvc_frame_meta_fetch.sh" \
|
||||
"${LESAVKA_SERVER_HOST}" \
|
||||
"${LESAVKA_CLIENT_RCT_UVC_FRAME_META_LOG_REMOTE}" \
|
||||
"${LESAVKA_CLIENT_RCT_UVC_FRAME_META_LOG_REQUIRED}" \
|
||||
"${LOCAL_UVC_FRAME_META_JSONL}" \
|
||||
"${LOCAL_UVC_FRAME_META_SUMMARY_JSON}" \
|
||||
"${LOCAL_UVC_FRAME_META_SUMMARY_TXT}" \
|
||||
"${LOCAL_CLIENT_TIMELINE_JSON}" \
|
||||
"${MODE_FPS}" \
|
||||
"${REPO_ROOT}"
|
||||
}
|
||||
|
||||
read -r MODE_WIDTH MODE_HEIGHT MODE_FPS < <(parse_mode "${LESAVKA_CLIENT_RCT_MODE}")
|
||||
|
||||
echo "==> client-to-RCT bundled transport probe"
|
||||
echo " ↪ mode=${LESAVKA_CLIENT_RCT_MODE}"
|
||||
echo " ↪ capture_stack=${REMOTE_CAPTURE_STACK} pulse_tool=${REMOTE_PULSE_CAPTURE_TOOL} video_mode=${REMOTE_PULSE_VIDEO_MODE}"
|
||||
echo " ↪ server_addr=${LESAVKA_SERVER_ADDR}"
|
||||
echo " ↪ max_client_to_rct_age_ms=${LESAVKA_CLIENT_RCT_MAX_AGE_MS}"
|
||||
echo " ↪ start_delay=${LESAVKA_CLIENT_RCT_START_DELAY_SECONDS}s"
|
||||
echo " ↪ uvc_frame_meta_log_remote=${LESAVKA_CLIENT_RCT_UVC_FRAME_META_LOG_REMOTE:-disabled}"
|
||||
echo " ↪ artifact_dir=${LOCAL_REPORT_DIR}"
|
||||
echo " ↪ run_log=${LOCAL_RUN_LOG}"
|
||||
echo " ↪ no remote sudo/reconfigure will be attempted by this script"
|
||||
|
||||
sleep_start_delay
|
||||
start_server_tunnel
|
||||
build_probe_tools
|
||||
print_versions
|
||||
sample_capture_clock_alignment
|
||||
start_tethys_capture "${MODE_WIDTH}" "${MODE_HEIGHT}" "${MODE_FPS}"
|
||||
wait_for_capture_ready
|
||||
sleep 1
|
||||
run_client_sync_probe_with_sampler
|
||||
|
||||
capture_status=0
|
||||
wait "${CAPTURE_PID}" || capture_status=$?
|
||||
if [[ "${capture_status}" -ne 0 && "${capture_status}" -ne 124 ]]; then
|
||||
echo "RCT capture exited with status ${capture_status}; see ${LOCAL_CAPTURE_LOG}" >&2
|
||||
exit "${capture_status}"
|
||||
fi
|
||||
|
||||
fetch_and_analyze_capture
|
||||
write_transport_summary
|
||||
fetch_and_summarize_uvc_frame_meta_log
|
||||
|
||||
echo "==> done"
|
||||
printf '%s\n' \
|
||||
"artifact_dir: ${LOCAL_REPORT_DIR}" "capture: ${LOCAL_CAPTURE}" \
|
||||
"report_json: ${LOCAL_REPORT_JSON}" "report_txt: ${LOCAL_REPORT_TXT}" \
|
||||
"events_csv: ${LOCAL_EVENTS_CSV}" "client_timeline_json: ${LOCAL_CLIENT_TIMELINE_JSON}" \
|
||||
"clock_alignment_json: ${LOCAL_CLOCK_ALIGNMENT_JSON}" "transport_summary_json: ${LOCAL_TRANSPORT_SUMMARY_JSON}" \
|
||||
"transport_summary_txt: ${LOCAL_TRANSPORT_SUMMARY_TXT}" "upstream_sync_jsonl: ${LOCAL_UPSTREAM_SYNC_JSONL}" \
|
||||
"upstream_sync_txt: ${LOCAL_UPSTREAM_SYNC_TXT}" "client_send_jsonl: ${LOCAL_CLIENT_SEND_JSONL}" \
|
||||
"uvc_frame_meta_jsonl: ${LOCAL_UVC_FRAME_META_JSONL}" "uvc_frame_meta_summary_json: ${LOCAL_UVC_FRAME_META_SUMMARY_JSON}" \
|
||||
"uvc_frame_meta_summary_txt: ${LOCAL_UVC_FRAME_META_SUMMARY_TXT}" "run_log: ${LOCAL_RUN_LOG}"
|
||||
129
scripts/manual/run_hevc_post_reboot_sequence.sh
Executable file
129
scripts/manual/run_hevc_post_reboot_sequence.sh
Executable file
@ -0,0 +1,129 @@
|
||||
#!/usr/bin/env bash
|
||||
# Manual: one-command HEVC recovery runway after Theia comes back online.
|
||||
# Not part of CI: this coordinates local preflights, passwordless Theia deploy,
|
||||
# and hardware-in-the-loop RCT calibration.
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd -- "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||
REPO_ROOT="$(cd -- "${SCRIPT_DIR}/../.." >/dev/null 2>&1 && pwd)"
|
||||
STAMP="$(date +%Y%m%d-%H%M%S)"
|
||||
|
||||
OUTPUT_DIR="${LESAVKA_HEVC_POST_REBOOT_OUTPUT_DIR:-/tmp/lesavka-hevc-post-reboot-${STAMP}}"
|
||||
RUN_LOG="${OUTPUT_DIR}/hevc-post-reboot-sequence.log"
|
||||
REMOTE_HOST="${LESAVKA_HEVC_POST_REBOOT_REMOTE_HOST:-${LESAVKA_HEVC_REENTRY_HOST:-theia}}"
|
||||
REMOTE_REPO="${LESAVKA_HEVC_POST_REBOOT_REMOTE_REPO:-${LESAVKA_HEVC_REENTRY_REMOTE_REPO:-/home/theia/Development/lesavka-codex}}"
|
||||
WAIT_SECONDS="${LESAVKA_HEVC_POST_REBOOT_WAIT_SECONDS:-900}"
|
||||
WAIT_INTERVAL_SECONDS="${LESAVKA_HEVC_POST_REBOOT_WAIT_INTERVAL_SECONDS:-15}"
|
||||
|
||||
RUN_LOCAL_PREFLIGHTS="${LESAVKA_HEVC_POST_REBOOT_RUN_LOCAL_PREFLIGHTS:-1}"
|
||||
RUN_REENTRY="${LESAVKA_HEVC_POST_REBOOT_RUN_REENTRY:-1}"
|
||||
RUN_STATIC_MATRIX="${LESAVKA_HEVC_POST_REBOOT_RUN_STATIC_MATRIX:-1}"
|
||||
RUN_FINAL_SANITY="${LESAVKA_HEVC_POST_REBOOT_RUN_FINAL_SANITY:-0}"
|
||||
|
||||
REENTRY_MODE="${LESAVKA_HEVC_POST_REBOOT_REENTRY_MODE:-1280x720@30}"
|
||||
PENDING_MODES="${LESAVKA_HEVC_POST_REBOOT_PENDING_MODES:-1280x720@30,1280x720@20}"
|
||||
FINAL_SANITY_MODES="${LESAVKA_HEVC_POST_REBOOT_FINAL_MODES:-1280x720@20,1280x720@30,1920x1080@20,1920x1080@30}"
|
||||
RECONFIGURE_COMMAND="${LESAVKA_HEVC_POST_REBOOT_RECONFIGURE_COMMAND:-ssh ${REMOTE_HOST} sudo -n /usr/local/sbin/lesavka-dev-install reconfigure \"\$LESAVKA_MODE\" hevc}"
|
||||
|
||||
mkdir -p "${OUTPUT_DIR}"
|
||||
|
||||
echo "==> HEVC post-reboot sequence"
|
||||
echo " ↪ output_dir=${OUTPUT_DIR}"
|
||||
echo " ↪ run_log=${RUN_LOG}"
|
||||
echo " ↪ remote_host=${REMOTE_HOST}"
|
||||
echo " ↪ remote_repo=${REMOTE_REPO}"
|
||||
echo " ↪ wait_seconds=${WAIT_SECONDS} wait_interval_seconds=${WAIT_INTERVAL_SECONDS}"
|
||||
echo " ↪ local_preflights=${RUN_LOCAL_PREFLIGHTS} reentry=${RUN_REENTRY} static_matrix=${RUN_STATIC_MATRIX} final_sanity=${RUN_FINAL_SANITY}"
|
||||
echo " ↪ pending_modes=${PENDING_MODES}"
|
||||
echo " ↪ final_sanity_modes=${FINAL_SANITY_MODES}"
|
||||
echo " ↪ sudo is non-interactive only; no password prompt path is used"
|
||||
|
||||
run_local_preflights() {
|
||||
echo "== local HEVC bundle audit =="
|
||||
LESAVKA_LOCAL_HEVC_BUNDLE_AUDIT_OUTPUT_DIR="${OUTPUT_DIR}/local-bundle-audit" \
|
||||
"${SCRIPT_DIR}/run_local_hevc_bundle_audit.sh"
|
||||
|
||||
echo "== local HEVC encoder preflight =="
|
||||
LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_OUTPUT_DIR="${OUTPUT_DIR}/local-encoder-preflight" \
|
||||
"${SCRIPT_DIR}/run_local_hevc_encoder_preflight.sh"
|
||||
}
|
||||
|
||||
run_remote_reentry() {
|
||||
echo "== remote HEVC re-entry =="
|
||||
LESAVKA_HEVC_REENTRY_HOST="${REMOTE_HOST}" \
|
||||
LESAVKA_HEVC_REENTRY_REMOTE_REPO="${REMOTE_REPO}" \
|
||||
LESAVKA_HEVC_REENTRY_MODE="${REENTRY_MODE}" \
|
||||
LESAVKA_HEVC_REENTRY_CODEC=hevc \
|
||||
LESAVKA_HEVC_REENTRY_SYNC=1 \
|
||||
LESAVKA_HEVC_REENTRY_BUILD=1 \
|
||||
LESAVKA_HEVC_REENTRY_DEPLOY=1 \
|
||||
LESAVKA_HEVC_REENTRY_RECONFIGURE=1 \
|
||||
LESAVKA_HEVC_REENTRY_WAIT_SECONDS="${WAIT_SECONDS}" \
|
||||
LESAVKA_HEVC_REENTRY_WAIT_INTERVAL_SECONDS="${WAIT_INTERVAL_SECONDS}" \
|
||||
LESAVKA_HEVC_REENTRY_OUTPUT_DIR="${OUTPUT_DIR}/remote-reentry" \
|
||||
"${SCRIPT_DIR}/run_hevc_remote_reentry_check.sh"
|
||||
}
|
||||
|
||||
run_hevc_static_matrix() {
|
||||
echo "== pending HEVC static calibration matrix =="
|
||||
LOCAL_OUTPUT_DIR="${OUTPUT_DIR}" \
|
||||
REMOTE_PULSE_CAPTURE_TOOL="${REMOTE_PULSE_CAPTURE_TOOL:-gst}" \
|
||||
REMOTE_PULSE_VIDEO_MODE="${REMOTE_PULSE_VIDEO_MODE:-mjpeg-cfr}" \
|
||||
LESAVKA_SERVER_RC_PROFILE=hevc \
|
||||
LESAVKA_SERVER_RC_MODES="${PENDING_MODES}" \
|
||||
LESAVKA_SERVER_RC_REPEAT_COUNT="${LESAVKA_HEVC_POST_REBOOT_REPEAT_COUNT:-3}" \
|
||||
LESAVKA_SERVER_RC_STATIC_MIN_RUNS="${LESAVKA_HEVC_POST_REBOOT_STATIC_MIN_RUNS:-3}" \
|
||||
LESAVKA_SERVER_RC_VERBOSE_PROBES="${LESAVKA_SERVER_RC_VERBOSE_PROBES:-0}" \
|
||||
LESAVKA_SERVER_RC_RECONFIGURE=1 \
|
||||
LESAVKA_SERVER_RC_PROMPT_SUDO_EARLY=0 \
|
||||
LESAVKA_SERVER_RC_RECONFIGURE_COMMAND="${RECONFIGURE_COMMAND}" \
|
||||
CAPTURE_SECONDS="${CAPTURE_SECONDS:-90}" \
|
||||
PROBE_TIMEOUT_SECONDS="${PROBE_TIMEOUT_SECONDS:-160}" \
|
||||
PROBE_DURATION_SECONDS="${PROBE_DURATION_SECONDS:-20}" \
|
||||
PROBE_WARMUP_SECONDS="${PROBE_WARMUP_SECONDS:-4}" \
|
||||
"${SCRIPT_DIR}/run_server_to_rc_mode_matrix.sh"
|
||||
}
|
||||
|
||||
run_hevc_final_sanity() {
|
||||
echo "== final all-mode HEVC sanity matrix =="
|
||||
LOCAL_OUTPUT_DIR="${OUTPUT_DIR}" \
|
||||
REMOTE_PULSE_CAPTURE_TOOL="${REMOTE_PULSE_CAPTURE_TOOL:-gst}" \
|
||||
REMOTE_PULSE_VIDEO_MODE="${REMOTE_PULSE_VIDEO_MODE:-mjpeg-cfr}" \
|
||||
LESAVKA_SERVER_RC_PROFILE=hevc \
|
||||
LESAVKA_SERVER_RC_MODES="${FINAL_SANITY_MODES}" \
|
||||
LESAVKA_SERVER_RC_REPEAT_COUNT=1 \
|
||||
LESAVKA_SERVER_RC_VERBOSE_PROBES="${LESAVKA_SERVER_RC_VERBOSE_PROBES:-0}" \
|
||||
LESAVKA_SERVER_RC_RECONFIGURE=1 \
|
||||
LESAVKA_SERVER_RC_TUNE_DELAYS=0 \
|
||||
LESAVKA_SERVER_RC_PROMPT_SUDO_EARLY=0 \
|
||||
LESAVKA_SERVER_RC_RECONFIGURE_COMMAND="${RECONFIGURE_COMMAND}" \
|
||||
CAPTURE_SECONDS="${CAPTURE_SECONDS:-90}" \
|
||||
PROBE_TIMEOUT_SECONDS="${PROBE_TIMEOUT_SECONDS:-160}" \
|
||||
PROBE_DURATION_SECONDS="${PROBE_DURATION_SECONDS:-20}" \
|
||||
PROBE_WARMUP_SECONDS="${PROBE_WARMUP_SECONDS:-4}" \
|
||||
"${SCRIPT_DIR}/run_server_to_rc_mode_matrix.sh"
|
||||
}
|
||||
|
||||
(
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
if [[ "${RUN_LOCAL_PREFLIGHTS}" == "1" ]]; then
|
||||
run_local_preflights
|
||||
fi
|
||||
|
||||
if [[ "${RUN_REENTRY}" == "1" ]]; then
|
||||
run_remote_reentry
|
||||
fi
|
||||
|
||||
if [[ "${RUN_STATIC_MATRIX}" == "1" ]]; then
|
||||
run_hevc_static_matrix
|
||||
fi
|
||||
|
||||
if [[ "${RUN_FINAL_SANITY}" == "1" ]]; then
|
||||
run_hevc_final_sanity
|
||||
fi
|
||||
) 2>&1 | tee "${RUN_LOG}"
|
||||
|
||||
echo "==> done"
|
||||
echo "artifact_dir: ${OUTPUT_DIR}"
|
||||
echo "run_log: ${RUN_LOG}"
|
||||
129
scripts/manual/run_hevc_remote_reentry_check.sh
Executable file
129
scripts/manual/run_hevc_remote_reentry_check.sh
Executable file
@ -0,0 +1,129 @@
|
||||
#!/usr/bin/env bash
|
||||
# Manual: HEVC remote re-entry check for operator-driven lab recovery; not part of CI.
|
||||
set -euo pipefail
|
||||
|
||||
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
STAMP="$(date +%Y%m%d-%H%M%S)"
|
||||
REMOTE_HOST="${LESAVKA_HEVC_REENTRY_HOST:-theia}"
|
||||
REMOTE_REPO="${LESAVKA_HEVC_REENTRY_REMOTE_REPO:-/home/theia/Development/lesavka-codex}"
|
||||
MODE="${LESAVKA_HEVC_REENTRY_MODE:-1280x720@30}"
|
||||
CODEC="${LESAVKA_HEVC_REENTRY_CODEC:-hevc}"
|
||||
SYNC_REPO="${LESAVKA_HEVC_REENTRY_SYNC:-0}"
|
||||
BUILD_RELEASE="${LESAVKA_HEVC_REENTRY_BUILD:-0}"
|
||||
DEPLOY="${LESAVKA_HEVC_REENTRY_DEPLOY:-0}"
|
||||
RECONFIGURE="${LESAVKA_HEVC_REENTRY_RECONFIGURE:-0}"
|
||||
OUTPUT_DIR="${LESAVKA_HEVC_REENTRY_OUTPUT_DIR:-/tmp/lesavka-hevc-reentry-${STAMP}}"
|
||||
RUN_LOG="${OUTPUT_DIR}/hevc-reentry.log"
|
||||
WAIT_SECONDS="${LESAVKA_HEVC_REENTRY_WAIT_SECONDS:-0}"
|
||||
WAIT_INTERVAL_SECONDS="${LESAVKA_HEVC_REENTRY_WAIT_INTERVAL_SECONDS:-15}"
|
||||
SSH_OPTS=${SSH_OPTS:-"-o BatchMode=yes -o ConnectTimeout=8"}
|
||||
|
||||
mkdir -p "${OUTPUT_DIR}"
|
||||
|
||||
echo "==> HEVC remote re-entry check"
|
||||
echo " ↪ host=${REMOTE_HOST}"
|
||||
echo " ↪ remote_repo=${REMOTE_REPO}"
|
||||
echo " ↪ mode=${MODE} codec=${CODEC}"
|
||||
echo " ↪ sync=${SYNC_REPO} build=${BUILD_RELEASE} deploy=${DEPLOY} reconfigure=${RECONFIGURE}"
|
||||
echo " ↪ wait_seconds=${WAIT_SECONDS} wait_interval_seconds=${WAIT_INTERVAL_SECONDS}"
|
||||
echo " ↪ run_log=${RUN_LOG}"
|
||||
echo " ↪ sudo is non-interactive only; this script will not prompt for passwords"
|
||||
|
||||
run_ssh() {
|
||||
# shellcheck disable=SC2086
|
||||
ssh ${SSH_OPTS} "${REMOTE_HOST}" "$@"
|
||||
}
|
||||
|
||||
sync_repo_to_remote() {
|
||||
if command -v rsync >/dev/null 2>&1 && run_ssh 'command -v rsync >/dev/null 2>&1'; then
|
||||
rsync -az --delete \
|
||||
--exclude .git \
|
||||
--exclude target \
|
||||
--exclude '*.profraw' \
|
||||
"${REPO_ROOT}/" "${REMOTE_HOST}:${REMOTE_REPO}/"
|
||||
return
|
||||
fi
|
||||
|
||||
echo " ↪ rsync unavailable on one side; falling back to git-file tar-over-SSH sync without remote delete"
|
||||
if command -v git >/dev/null 2>&1; then
|
||||
(
|
||||
cd "${REPO_ROOT}"
|
||||
git ls-files -z --cached --others --exclude-standard \
|
||||
| tar --null -T - -czf -
|
||||
) | run_ssh "mkdir -p '${REMOTE_REPO}' && tar -xzf - -C '${REMOTE_REPO}'"
|
||||
return
|
||||
fi
|
||||
|
||||
echo " ↪ git unavailable locally; using strict source tar excludes"
|
||||
(
|
||||
cd "${REPO_ROOT}"
|
||||
tar \
|
||||
--exclude './.git' \
|
||||
--exclude './target' \
|
||||
--exclude './target/*' \
|
||||
--exclude '*/target' \
|
||||
--exclude '*/target/*' \
|
||||
--exclude '*.profraw' \
|
||||
-czf - .
|
||||
) | run_ssh "mkdir -p '${REMOTE_REPO}' && tar -xzf - -C '${REMOTE_REPO}'"
|
||||
}
|
||||
|
||||
wait_for_remote() {
|
||||
local deadline now attempt
|
||||
|
||||
if [[ "${WAIT_SECONDS}" == "0" ]]; then
|
||||
run_ssh 'date -Is'
|
||||
return
|
||||
fi
|
||||
|
||||
deadline=$((SECONDS + WAIT_SECONDS))
|
||||
attempt=1
|
||||
while true; do
|
||||
echo "== remote reachability attempt ${attempt} =="
|
||||
if run_ssh 'date -Is'; then
|
||||
return
|
||||
fi
|
||||
now="${SECONDS}"
|
||||
if (( now >= deadline )); then
|
||||
echo "remote host did not become reachable within ${WAIT_SECONDS}s" >&2
|
||||
return 1
|
||||
fi
|
||||
sleep "${WAIT_INTERVAL_SECONDS}"
|
||||
attempt=$((attempt + 1))
|
||||
done
|
||||
}
|
||||
|
||||
(
|
||||
echo "== remote reachability =="
|
||||
wait_for_remote
|
||||
|
||||
echo "== lesavka helper status =="
|
||||
run_ssh 'sudo -n /usr/local/sbin/lesavka-dev-install status'
|
||||
|
||||
if [[ "${SYNC_REPO}" == "1" ]]; then
|
||||
echo "== syncing local repo to remote workspace =="
|
||||
sync_repo_to_remote
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_RELEASE}" == "1" ]]; then
|
||||
echo "== remote release build =="
|
||||
run_ssh "cd '${REMOTE_REPO}' && cargo build --release --bin lesavka-server --bin lesavka-uvc"
|
||||
fi
|
||||
|
||||
if [[ "${DEPLOY}" == "1" ]]; then
|
||||
echo "== remote deploy =="
|
||||
run_ssh 'sudo -n /usr/local/sbin/lesavka-dev-install deploy'
|
||||
fi
|
||||
|
||||
if [[ "${RECONFIGURE}" == "1" ]]; then
|
||||
echo "== remote HEVC reconfigure =="
|
||||
run_ssh "sudo -n /usr/local/sbin/lesavka-dev-install reconfigure '${MODE}' '${CODEC}'"
|
||||
fi
|
||||
|
||||
echo "== final status =="
|
||||
run_ssh 'sudo -n /usr/local/sbin/lesavka-dev-install status'
|
||||
) 2>&1 | tee "${RUN_LOG}"
|
||||
|
||||
echo "==> done"
|
||||
echo "artifact_dir: ${OUTPUT_DIR}"
|
||||
echo "run_log: ${RUN_LOG}"
|
||||
34
scripts/manual/run_local_hevc_bundle_audit.sh
Executable file
34
scripts/manual/run_local_hevc_bundle_audit.sh
Executable file
@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
# Manual: local HEVC bundle preflight for lab transport work; not part of CI.
|
||||
set -euo pipefail
|
||||
|
||||
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
STAMP="$(date +%Y%m%d-%H%M%S)"
|
||||
LOCAL_OUTPUT_DIR="${LESAVKA_LOCAL_HEVC_BUNDLE_AUDIT_OUTPUT_DIR:-/tmp/lesavka-local-hevc-bundle-audit-${STAMP}}"
|
||||
LOCAL_AUDIT_JSON="${LESAVKA_LOCAL_HEVC_BUNDLE_AUDIT_JSON:-${LOCAL_OUTPUT_DIR}/hevc-bundle-audit.json}"
|
||||
LOCAL_RUN_LOG="${LOCAL_OUTPUT_DIR}/hevc-bundle-audit.log"
|
||||
|
||||
mkdir -p "${LOCAL_OUTPUT_DIR}"
|
||||
|
||||
echo "==> local HEVC+audio bundle audit"
|
||||
echo " ↪ artifact_dir=${LOCAL_OUTPUT_DIR}"
|
||||
echo " ↪ audit_json=${LOCAL_AUDIT_JSON}"
|
||||
echo " ↪ run_log=${LOCAL_RUN_LOG}"
|
||||
echo " ↪ no remote host, sudo, tunnel, or RCT capture is used"
|
||||
|
||||
(
|
||||
cd "${REPO_ROOT}"
|
||||
export LESAVKA_LOCAL_HEVC_BUNDLE_AUDIT_JSON="${LOCAL_AUDIT_JSON}"
|
||||
cargo test -p lesavka_client hevc_probe_bundle_audit_writes_manifest -- --nocapture
|
||||
cargo test -p lesavka_client hevc_probe_bundle_train_covers_every_supported_mode -- --nocapture
|
||||
cargo test -p lesavka_client hevc_probe_bundle_train_drops_stale_events_as_complete_av_units_under_jitter -- --nocapture
|
||||
cargo test -p lesavka_client runtime_probe_hevc_video_and_audio_can_form_one_local_bundle -- --nocapture
|
||||
) 2>&1 | tee "${LOCAL_RUN_LOG}"
|
||||
|
||||
echo "==> local HEVC+audio bundle audit summary"
|
||||
"${REPO_ROOT}/scripts/manual/validate_local_hevc_bundle_audit.py" "${LOCAL_AUDIT_JSON}"
|
||||
|
||||
echo "==> done"
|
||||
echo "artifact_dir: ${LOCAL_OUTPUT_DIR}"
|
||||
echo "audit_json: ${LOCAL_AUDIT_JSON}"
|
||||
echo "run_log: ${LOCAL_RUN_LOG}"
|
||||
167
scripts/manual/run_local_hevc_encoder_preflight.sh
Executable file
167
scripts/manual/run_local_hevc_encoder_preflight.sh
Executable file
@ -0,0 +1,167 @@
|
||||
#!/usr/bin/env bash
|
||||
# Manual: local HEVC encoder throughput preflight for upstream transport work; not part of CI.
|
||||
set -euo pipefail
|
||||
|
||||
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
STAMP="$(date +%Y%m%d-%H%M%S)"
|
||||
OUTPUT_DIR="${LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_OUTPUT_DIR:-/tmp/lesavka-local-hevc-encoder-preflight-${STAMP}}"
|
||||
SUMMARY_JSON="${LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_JSON:-${OUTPUT_DIR}/hevc-encoder-preflight.json}"
|
||||
RUN_LOG="${OUTPUT_DIR}/hevc-encoder-preflight.log"
|
||||
MODES="${LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_MODES:-1280x720@20,1280x720@30,1920x1080@20,1920x1080@30}"
|
||||
DURATION_SECONDS="${LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_SECONDS:-5}"
|
||||
BITRATE_KBIT="${LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_KBIT:-3000}"
|
||||
MIN_REALTIME_FACTOR="${LESAVKA_LOCAL_HEVC_ENCODER_PREFLIGHT_MIN_REALTIME_FACTOR:-1.05}"
|
||||
ENCODER="${LESAVKA_LOCAL_HEVC_ENCODER:-auto}"
|
||||
|
||||
mkdir -p "${OUTPUT_DIR}"
|
||||
|
||||
echo "==> local HEVC encoder preflight"
|
||||
echo " ↪ artifact_dir=${OUTPUT_DIR}"
|
||||
echo " ↪ summary_json=${SUMMARY_JSON}"
|
||||
echo " ↪ run_log=${RUN_LOG}"
|
||||
echo " ↪ modes=${MODES}"
|
||||
echo " ↪ duration=${DURATION_SECONDS}s bitrate=${BITRATE_KBIT}kbit min_realtime_factor=${MIN_REALTIME_FACTOR}"
|
||||
echo " ↪ no remote host, sudo, tunnel, or RCT capture is used"
|
||||
|
||||
(
|
||||
cd "${REPO_ROOT}"
|
||||
python3 - "$OUTPUT_DIR" "$SUMMARY_JSON" "$MODES" "$DURATION_SECONDS" "$BITRATE_KBIT" "$MIN_REALTIME_FACTOR" "$ENCODER" <<'PY'
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
output_dir = Path(sys.argv[1])
|
||||
summary_json = Path(sys.argv[2])
|
||||
modes = [mode.strip() for mode in sys.argv[3].split(",") if mode.strip()]
|
||||
duration_seconds = float(sys.argv[4])
|
||||
bitrate_kbit = int(sys.argv[5])
|
||||
min_realtime_factor = float(sys.argv[6])
|
||||
encoder_override = sys.argv[7].strip()
|
||||
|
||||
ENCODER_ORDER = ["nvh265enc", "vah265enc", "vaapih265enc", "v4l2h265enc", "x265enc"]
|
||||
MODE_RE = re.compile(r"^([0-9]+)x([0-9]+)@([0-9]+)$")
|
||||
|
||||
|
||||
def gst_has(element: str) -> bool:
|
||||
return subprocess.run(
|
||||
["gst-inspect-1.0", element],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
check=False,
|
||||
).returncode == 0
|
||||
|
||||
|
||||
def pick_encoder() -> str:
|
||||
if encoder_override and encoder_override != "auto":
|
||||
if not gst_has(encoder_override):
|
||||
raise SystemExit(f"requested HEVC encoder is unavailable: {encoder_override}")
|
||||
return encoder_override
|
||||
for candidate in ENCODER_ORDER:
|
||||
if gst_has(candidate):
|
||||
return candidate
|
||||
raise SystemExit("no supported HEVC encoder found")
|
||||
|
||||
|
||||
def encoder_chain(encoder: str, fps: int) -> str:
|
||||
if encoder == "x265enc":
|
||||
return (
|
||||
f"x265enc tune=zerolatency speed-preset=ultrafast "
|
||||
f"bitrate={bitrate_kbit} key-int-max={max(fps, 1)} log-level=none"
|
||||
)
|
||||
# Hardware encoder property names vary by driver. Use the bare element for
|
||||
# this availability/throughput preflight rather than failing on a missing
|
||||
# low-latency property that the runtime selector already probes separately.
|
||||
return encoder
|
||||
|
||||
|
||||
def has_annex_b(path: Path) -> bool:
|
||||
data = path.read_bytes()
|
||||
return b"\x00\x00\x00\x01" in data or b"\x00\x00\x01" in data
|
||||
|
||||
|
||||
def run_mode(encoder: str, mode: str) -> dict:
|
||||
match = MODE_RE.match(mode)
|
||||
if not match:
|
||||
raise SystemExit(f"invalid mode: {mode}")
|
||||
width, height, fps = map(int, match.groups())
|
||||
frame_count = max(1, int(round(duration_seconds * fps)))
|
||||
media_seconds = frame_count / fps
|
||||
out_path = output_dir / f"{mode.replace('@', '_')}-{encoder}.h265"
|
||||
pipeline = (
|
||||
f"videotestsrc num-buffers={frame_count} is-live=false pattern=smpte ! "
|
||||
f"video/x-raw,format=I420,width={width},height={height},framerate={fps}/1 ! "
|
||||
f"{encoder_chain(encoder, fps)} ! "
|
||||
"h265parse config-interval=-1 ! "
|
||||
"video/x-h265,stream-format=byte-stream,alignment=au ! "
|
||||
f"filesink location={out_path}"
|
||||
)
|
||||
timeout_seconds = max(10.0, media_seconds * 4.0)
|
||||
start = time.monotonic()
|
||||
proc = subprocess.run(
|
||||
["timeout", f"{timeout_seconds:.1f}", "gst-launch-1.0", "-q", *pipeline.split()],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
check=False,
|
||||
)
|
||||
elapsed = max(time.monotonic() - start, 0.001)
|
||||
encoded_bytes = out_path.stat().st_size if out_path.exists() else 0
|
||||
realtime_factor = media_seconds / elapsed
|
||||
annex_b = encoded_bytes > 0 and has_annex_b(out_path)
|
||||
status = (
|
||||
"pass"
|
||||
if proc.returncode == 0 and encoded_bytes > 0 and annex_b and realtime_factor >= min_realtime_factor
|
||||
else "fail"
|
||||
)
|
||||
return {
|
||||
"mode": mode,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"fps": fps,
|
||||
"frames": frame_count,
|
||||
"media_seconds": round(media_seconds, 3),
|
||||
"elapsed_seconds": round(elapsed, 3),
|
||||
"realtime_factor": round(realtime_factor, 3),
|
||||
"bytes": encoded_bytes,
|
||||
"annex_b": annex_b,
|
||||
"status": status,
|
||||
"artifact": str(out_path),
|
||||
"stderr_tail": "\n".join(proc.stderr.splitlines()[-12:]),
|
||||
}
|
||||
|
||||
|
||||
encoder = pick_encoder()
|
||||
results = [run_mode(encoder, mode) for mode in modes]
|
||||
summary = {
|
||||
"schema": "lesavka.local-hevc-encoder-preflight.v1",
|
||||
"encoder": encoder,
|
||||
"duration_seconds": duration_seconds,
|
||||
"bitrate_kbit": bitrate_kbit,
|
||||
"min_realtime_factor": min_realtime_factor,
|
||||
"verdict": "pass" if all(row["status"] == "pass" for row in results) else "fail",
|
||||
"results": results,
|
||||
}
|
||||
summary_json.parent.mkdir(parents=True, exist_ok=True)
|
||||
summary_json.write_text(json.dumps(summary, indent=2) + "\n")
|
||||
|
||||
print(f"encoder={encoder}")
|
||||
for row in results:
|
||||
print(
|
||||
f"{row['status'].upper()} {row['mode']}: "
|
||||
f"frames={row['frames']} elapsed={row['elapsed_seconds']:.3f}s "
|
||||
f"rtx={row['realtime_factor']:.3f} bytes={row['bytes']} annex_b={row['annex_b']}"
|
||||
)
|
||||
|
||||
if summary["verdict"] != "pass":
|
||||
raise SystemExit("local HEVC encoder preflight failed")
|
||||
PY
|
||||
) 2>&1 | tee "${RUN_LOG}"
|
||||
|
||||
echo "==> done"
|
||||
echo "artifact_dir: ${OUTPUT_DIR}"
|
||||
echo "summary_json: ${SUMMARY_JSON}"
|
||||
echo "run_log: ${RUN_LOG}"
|
||||
@ -32,10 +32,26 @@ SSH_OPTS=${SSH_OPTS:-"-o BatchMode=yes -o ConnectTimeout=30"}
|
||||
|
||||
LESAVKA_SERVER_RC_CORE_WEBCAM_MODES=${LESAVKA_SERVER_RC_CORE_WEBCAM_MODES:-1280x720@20,1280x720@30,1920x1080@20,1920x1080@30}
|
||||
LESAVKA_SERVER_RC_MODES=${LESAVKA_SERVER_RC_MODES:-${LESAVKA_SERVER_RC_CORE_WEBCAM_MODES}}
|
||||
LESAVKA_SERVER_RC_PROFILE=${LESAVKA_SERVER_RC_PROFILE:-mjpeg}
|
||||
LESAVKA_SERVER_RC_NORMALIZED_PROFILE=mjpeg
|
||||
LESAVKA_SERVER_RC_DEFAULT_AUDIO_DELAY_US=${LESAVKA_SERVER_RC_DEFAULT_AUDIO_DELAY_US:-${LESAVKA_OUTPUT_DELAY_PROBE_AUDIO_DELAY_US:-0}}
|
||||
LESAVKA_SERVER_RC_DEFAULT_VIDEO_DELAY_US=${LESAVKA_SERVER_RC_DEFAULT_VIDEO_DELAY_US:-135090}
|
||||
LESAVKA_SERVER_RC_MODE_AUDIO_DELAYS_US=${LESAVKA_SERVER_RC_MODE_AUDIO_DELAYS_US:-1280x720@20=${LESAVKA_SERVER_RC_DEFAULT_AUDIO_DELAY_US},1280x720@30=${LESAVKA_SERVER_RC_DEFAULT_AUDIO_DELAY_US},1920x1080@20=${LESAVKA_SERVER_RC_DEFAULT_AUDIO_DELAY_US},1920x1080@30=${LESAVKA_SERVER_RC_DEFAULT_AUDIO_DELAY_US}}
|
||||
LESAVKA_SERVER_RC_MODE_DELAYS_US=${LESAVKA_SERVER_RC_MODE_DELAYS_US:-1280x720@20=162659,1280x720@30=135090,1920x1080@20=160045,1920x1080@30=127952}
|
||||
LESAVKA_SERVER_RC_MJPEG_MODE_DELAYS_US=${LESAVKA_SERVER_RC_MJPEG_MODE_DELAYS_US:-1280x720@20=162659,1280x720@30=135090,1920x1080@20=160045,1920x1080@30=127952}
|
||||
LESAVKA_SERVER_RC_HEVC_MODE_DELAYS_US=${LESAVKA_SERVER_RC_HEVC_MODE_DELAYS_US:-1280x720@20=173852,1280x720@30=110000,1920x1080@20=160045,1920x1080@30=127952}
|
||||
case "${LESAVKA_SERVER_RC_PROFILE,,}" in
|
||||
hevc|h265|h.265)
|
||||
LESAVKA_SERVER_RC_NORMALIZED_PROFILE=hevc
|
||||
LESAVKA_SERVER_RC_MODE_DELAYS_US=${LESAVKA_SERVER_RC_MODE_DELAYS_US:-${LESAVKA_SERVER_RC_HEVC_MODE_DELAYS_US}}
|
||||
CAPTURE_SECONDS=${CAPTURE_SECONDS:-90}
|
||||
PROBE_TIMEOUT_SECONDS=${PROBE_TIMEOUT_SECONDS:-90}
|
||||
export CAPTURE_SECONDS PROBE_TIMEOUT_SECONDS
|
||||
;;
|
||||
*)
|
||||
LESAVKA_SERVER_RC_NORMALIZED_PROFILE=mjpeg
|
||||
LESAVKA_SERVER_RC_MODE_DELAYS_US=${LESAVKA_SERVER_RC_MODE_DELAYS_US:-${LESAVKA_SERVER_RC_MJPEG_MODE_DELAYS_US}}
|
||||
;;
|
||||
esac
|
||||
LESAVKA_SERVER_RC_MODE_DISCOVERY_SIZES=${LESAVKA_SERVER_RC_MODE_DISCOVERY_SIZES:-1280x720,1920x1080}
|
||||
LESAVKA_SERVER_RC_MODE_DISCOVERY_FPS=${LESAVKA_SERVER_RC_MODE_DISCOVERY_FPS:-20,30}
|
||||
LESAVKA_SERVER_RC_MODE_DISCOVERY_INCLUDE_REGEX=${LESAVKA_SERVER_RC_MODE_DISCOVERY_INCLUDE_REGEX:-Logitech|BRIO|C9[0-9]+|HD UVC WebCam|USB2[.]0 HD|Integrated Camera|Webcam|Camera}
|
||||
@ -990,8 +1006,14 @@ reconfigure_server_mode() {
|
||||
LESAVKA_UVC_WIDTH="${width}" \
|
||||
LESAVKA_UVC_HEIGHT="${height}" \
|
||||
LESAVKA_UVC_FPS="${fps}" \
|
||||
LESAVKA_SERVER_RC_PROFILE="${LESAVKA_SERVER_RC_NORMALIZED_PROFILE}" \
|
||||
LESAVKA_CALIBRATION_PROFILE="${LESAVKA_SERVER_RC_NORMALIZED_PROFILE}" \
|
||||
LESAVKA_UPLINK_CAMERA_CODEC="${LESAVKA_SERVER_RC_NORMALIZED_PROFILE}" \
|
||||
LESAVKA_CAM_CODEC="${LESAVKA_SERVER_RC_NORMALIZED_PROFILE}" \
|
||||
LESAVKA_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US="${audio_delay_us}" \
|
||||
LESAVKA_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US="${video_delay_us}" \
|
||||
LESAVKA_UPSTREAM_AUDIO_PLAYOUT_MODE_OFFSETS_US="${LESAVKA_SERVER_RC_MODE_AUDIO_DELAYS_US}" \
|
||||
LESAVKA_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US="${LESAVKA_SERVER_RC_MODE_DELAYS_US}" \
|
||||
bash -c "${LESAVKA_SERVER_RC_RECONFIGURE_COMMAND}"
|
||||
return 0
|
||||
fi
|
||||
@ -1011,6 +1033,7 @@ reconfigure_server_mode() {
|
||||
"${fps}" \
|
||||
"${interval}" \
|
||||
"${LESAVKA_SERVER_RC_RECONFIGURE_CODEC}" \
|
||||
"${LESAVKA_SERVER_RC_NORMALIZED_PROFILE}" \
|
||||
"${LESAVKA_SERVER_RC_ALLOW_GADGET_RESET}" \
|
||||
"${LESAVKA_SERVER_RC_FORCE_GADGET_REBUILD}" \
|
||||
"${LESAVKA_SERVER_RC_RECONFIGURE_SETTLE_SECONDS}" \
|
||||
@ -1026,14 +1049,15 @@ height=$3
|
||||
fps=$4
|
||||
interval=$5
|
||||
codec=$6
|
||||
allow_gadget_reset=$7
|
||||
force_gadget_rebuild=$8
|
||||
settle_seconds=$9
|
||||
verbose=${10}
|
||||
audio_delay_us=${11}
|
||||
video_delay_us=${12}
|
||||
audio_delay_map=${13}
|
||||
video_delay_map=${14}
|
||||
ingress_profile=$7
|
||||
allow_gadget_reset=$8
|
||||
force_gadget_rebuild=$9
|
||||
settle_seconds=${10}
|
||||
verbose=${11}
|
||||
audio_delay_us=${12}
|
||||
video_delay_us=${13}
|
||||
audio_delay_map=${14}
|
||||
video_delay_map=${15}
|
||||
|
||||
set_env_value() {
|
||||
local file=$1
|
||||
@ -1076,6 +1100,9 @@ install -d -m 0755 /etc/lesavka
|
||||
touch /etc/lesavka/server.env /etc/lesavka/uvc.env
|
||||
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_CAM_OUTPUT uvc
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_CAM_CODEC "${ingress_profile}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UPLINK_CAMERA_CODEC "${ingress_profile}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_CALIBRATION_PROFILE "${ingress_profile}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UVC_CODEC "${codec}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UVC_WIDTH "${width}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UVC_HEIGHT "${height}"
|
||||
@ -1085,6 +1112,20 @@ set_env_value /etc/lesavka/server.env LESAVKA_UPSTREAM_AUDIO_PLAYOUT_MODE_OFFSET
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US "${video_delay_map}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US "${audio_delay_us}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US "${video_delay_us}"
|
||||
case "${ingress_profile}" in
|
||||
hevc)
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UPSTREAM_HEVC_AUDIO_PLAYOUT_MODE_OFFSETS_US "${audio_delay_map}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UPSTREAM_HEVC_VIDEO_PLAYOUT_MODE_OFFSETS_US "${video_delay_map}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UPSTREAM_HEVC_AUDIO_PLAYOUT_OFFSET_US "${audio_delay_us}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UPSTREAM_HEVC_VIDEO_PLAYOUT_OFFSET_US "${video_delay_us}"
|
||||
;;
|
||||
*)
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UPSTREAM_MJPEG_AUDIO_PLAYOUT_MODE_OFFSETS_US "${audio_delay_map}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UPSTREAM_MJPEG_VIDEO_PLAYOUT_MODE_OFFSETS_US "${video_delay_map}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UPSTREAM_MJPEG_AUDIO_PLAYOUT_OFFSET_US "${audio_delay_us}"
|
||||
set_env_value /etc/lesavka/server.env LESAVKA_UPSTREAM_MJPEG_VIDEO_PLAYOUT_OFFSET_US "${video_delay_us}"
|
||||
;;
|
||||
esac
|
||||
|
||||
set_env_value /etc/lesavka/uvc.env LESAVKA_UVC_CODEC "${codec}"
|
||||
set_env_value /etc/lesavka/uvc.env LESAVKA_UVC_WIDTH "${width}"
|
||||
@ -2244,6 +2285,7 @@ fi
|
||||
echo "==> server-to-RC mode matrix"
|
||||
echo " ↪ modes=${LESAVKA_SERVER_RC_MODES}"
|
||||
echo " ↪ mode_source=${LESAVKA_SERVER_RC_MODE_SOURCE}"
|
||||
echo " ↪ profile=${LESAVKA_SERVER_RC_PROFILE} capture_seconds=${CAPTURE_SECONDS:-auto} probe_timeout_seconds=${PROBE_TIMEOUT_SECONDS:-auto}"
|
||||
echo " ↪ repeat_count=${LESAVKA_SERVER_RC_REPEAT_COUNT} verbose_probes=${LESAVKA_SERVER_RC_VERBOSE_PROBES}"
|
||||
echo " ↪ video_delays=${LESAVKA_SERVER_RC_MODE_DELAYS_US}"
|
||||
echo " ↪ audio_delays=${LESAVKA_SERVER_RC_MODE_AUDIO_DELAYS_US}"
|
||||
|
||||
@ -3212,6 +3212,43 @@ elif [[ "${capture_mode}" == "pulse" ]]; then
|
||||
queue ! mux.
|
||||
fi
|
||||
;;
|
||||
mjpeg-cfr|mjpeg_cfr)
|
||||
if [[ "${remote_pulse_audio_anchor_silence}" == "1" ]]; then
|
||||
run_tolerant_capture timeout --kill-after=5 --signal=INT "$((capture_seconds + 3))" \
|
||||
gst-launch-1.0 -q -e \
|
||||
matroskamux name=mux ! filesink location="${remote_capture}" \
|
||||
v4l2src device="${resolved_video_device}" do-timestamp=true ! \
|
||||
${gst_source_caps} ! \
|
||||
${gst_decode_chain} \
|
||||
videoconvert ! videorate ! video/x-raw,framerate="${resolved_video_fps}"/1 ! \
|
||||
jpegenc quality=80 ! image/jpeg,framerate="${resolved_video_fps}"/1 ! \
|
||||
queue ! mux. \
|
||||
${gst_audio_mixer} ! \
|
||||
audio/x-raw,rate=48000,channels=2 ! \
|
||||
queue ! mux. \
|
||||
audiotestsrc wave=silence is-live=true do-timestamp=true ! \
|
||||
audio/x-raw,rate=48000,channels=2 ! \
|
||||
queue ! amix. \
|
||||
pulsesrc device="${pulse_source}" do-timestamp=true ! \
|
||||
audio/x-raw,rate=48000,channels=2 ! \
|
||||
audioconvert ! audioresample ! audio/x-raw,rate=48000,channels=2 ! \
|
||||
queue ! amix.
|
||||
else
|
||||
run_tolerant_capture timeout --kill-after=5 --signal=INT "$((capture_seconds + 3))" \
|
||||
gst-launch-1.0 -q -e \
|
||||
matroskamux name=mux ! filesink location="${remote_capture}" \
|
||||
v4l2src device="${resolved_video_device}" do-timestamp=true ! \
|
||||
${gst_source_caps} ! \
|
||||
${gst_decode_chain} \
|
||||
videoconvert ! videorate ! video/x-raw,framerate="${resolved_video_fps}"/1 ! \
|
||||
jpegenc quality=80 ! image/jpeg,framerate="${resolved_video_fps}"/1 ! \
|
||||
queue ! mux. \
|
||||
pulsesrc device="${pulse_source}" do-timestamp=true ! \
|
||||
audio/x-raw,rate=48000,channels=2 ! \
|
||||
audioconvert ! audioresample ! audio/x-raw,rate=48000,channels=2 ! \
|
||||
queue ! mux.
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
printf 'unsupported REMOTE_PULSE_VIDEO_MODE=%s\n' "${remote_pulse_video_mode}" >&2
|
||||
exit 64
|
||||
|
||||
310
scripts/manual/summarize_uvc_frame_meta_log.py
Executable file
310
scripts/manual/summarize_uvc_frame_meta_log.py
Executable file
@ -0,0 +1,310 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Summarize optional UVC MJPEG frame metadata JSONL logs.
|
||||
|
||||
The server can append one compact JSON record for every MJPEG frame it spools
|
||||
into the UVC helper. This script turns that raw per-frame stream into cadence,
|
||||
profile, and synthetic-event coverage metrics. Why: when an HEVC client-to-RCT
|
||||
run fails at the final capture, we need to know whether the decoded MJPEG handoff
|
||||
was already incomplete before adding heavier server-side introspection.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import math
|
||||
import pathlib
|
||||
import sys
|
||||
from collections import Counter
|
||||
from typing import Any
|
||||
|
||||
SCHEMA = "lesavka.uvc-mjpeg-spool-meta.v1"
|
||||
|
||||
|
||||
def percentile(values: list[float], q: float) -> float | None:
|
||||
"""Return a nearest-rank percentile for finite numeric samples.
|
||||
|
||||
Inputs: sample values and a quantile from `0.0` to `1.0`. Output: the
|
||||
selected percentile or `None` when no finite samples exist. Why: all Lesavka
|
||||
probe summaries use p95-style nearest-rank percentiles, so this keeps the
|
||||
spool boundary diagnostics comparable with sync/freshness reports.
|
||||
"""
|
||||
|
||||
finite = sorted(value for value in values if math.isfinite(value))
|
||||
if not finite:
|
||||
return None
|
||||
index = min(len(finite) - 1, max(0, math.ceil(len(finite) * q) - 1))
|
||||
return finite[index]
|
||||
|
||||
|
||||
def optional_int(value: Any) -> int | None:
|
||||
"""Parse optional integer JSON fields without treating null as an error.
|
||||
|
||||
Inputs: a raw JSON field. Output: an integer or `None`. Why: MJPEG ingress
|
||||
has no decoded PTS, while HEVC-decoded MJPEG should provide one when the
|
||||
decoder reports it, and both profiles share the same log schema.
|
||||
"""
|
||||
|
||||
if value is None:
|
||||
return None
|
||||
try:
|
||||
return int(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def load_records(path: pathlib.Path) -> tuple[list[dict[str, Any]], int]:
|
||||
"""Load valid metadata records from a JSONL file.
|
||||
|
||||
Inputs: a JSONL path. Output: valid records plus ignored-line count. Why:
|
||||
probe logs are operational artifacts; the summarizer should tolerate blank,
|
||||
truncated, or unrelated lines while still refusing to summarize an empty
|
||||
usable stream.
|
||||
"""
|
||||
|
||||
records: list[dict[str, Any]] = []
|
||||
ignored = 0
|
||||
for line in path.read_text(errors="replace").splitlines():
|
||||
if not line.strip():
|
||||
continue
|
||||
try:
|
||||
raw = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
ignored += 1
|
||||
continue
|
||||
if raw.get("schema") != SCHEMA:
|
||||
ignored += 1
|
||||
continue
|
||||
sequence = optional_int(raw.get("sequence"))
|
||||
byte_count = optional_int(raw.get("bytes"))
|
||||
spool_unix_ns = optional_int(raw.get("spool_unix_ns"))
|
||||
if sequence is None or byte_count is None or spool_unix_ns is None:
|
||||
ignored += 1
|
||||
continue
|
||||
records.append(
|
||||
{
|
||||
"sequence": sequence,
|
||||
"profile": str(raw.get("profile") or "unknown"),
|
||||
"bytes": byte_count,
|
||||
"source_pts_us": optional_int(raw.get("source_pts_us")),
|
||||
"decoded_pts_us": optional_int(raw.get("decoded_pts_us")),
|
||||
"spool_unix_ns": spool_unix_ns,
|
||||
}
|
||||
)
|
||||
return records, ignored
|
||||
|
||||
|
||||
def diffs(values: list[int]) -> list[float]:
|
||||
"""Return adjacent differences in milliseconds for sorted integer samples.
|
||||
|
||||
Inputs: timestamps in microseconds or nanoseconds after the caller has
|
||||
selected the unit. Output: millisecond deltas. Why: cadence problems show up
|
||||
as gaps between adjacent frame records, not as absolute timestamps.
|
||||
"""
|
||||
|
||||
if len(values) < 2:
|
||||
return []
|
||||
return [(b - a) / 1000.0 for a, b in zip(values, values[1:])]
|
||||
|
||||
|
||||
def sequence_gap_count(records: list[dict[str, Any]]) -> int:
|
||||
"""Count missing sequence numbers in the append-only frame log.
|
||||
|
||||
Inputs: parsed frame metadata. Output: total missing sequence IDs. Why: a
|
||||
source PTS gap can be legitimate after freshness drops, but a sequence gap
|
||||
points at incomplete logging or skipped spool writes.
|
||||
"""
|
||||
|
||||
ordered = sorted(record["sequence"] for record in records)
|
||||
return sum(max(0, b - a - 1) for a, b in zip(ordered, ordered[1:]))
|
||||
|
||||
|
||||
def event_coverage(records: list[dict[str, Any]], timeline_path: pathlib.Path | None) -> dict | None:
|
||||
"""Compare spooled frame PTS values with synthetic event windows.
|
||||
|
||||
Inputs: frame records and an optional client/server probe timeline JSON.
|
||||
Output: coverage counts or `None`. Why: the top-level RCT analyzer can miss
|
||||
flashes after transport turbulence; this boundary check tells us whether the
|
||||
event-coded video frames reached the UVC spool before blaming final capture.
|
||||
"""
|
||||
|
||||
if timeline_path is None:
|
||||
return None
|
||||
try:
|
||||
timeline = json.loads(timeline_path.read_text())
|
||||
except (OSError, json.JSONDecodeError):
|
||||
return None
|
||||
events = timeline.get("events")
|
||||
if not isinstance(events, list):
|
||||
return None
|
||||
|
||||
source_pts = [
|
||||
record["source_pts_us"]
|
||||
for record in records
|
||||
if isinstance(record.get("source_pts_us"), int)
|
||||
]
|
||||
covered = 0
|
||||
missing_codes: list[int] = []
|
||||
per_event: list[dict[str, Any]] = []
|
||||
for event in events:
|
||||
try:
|
||||
start = int(event["planned_start_us"])
|
||||
end = int(event["planned_end_us"])
|
||||
except (KeyError, TypeError, ValueError):
|
||||
continue
|
||||
code = optional_int(event.get("code"))
|
||||
matching = sum(1 for pts in source_pts if start <= pts < end)
|
||||
if matching:
|
||||
covered += 1
|
||||
elif code is not None:
|
||||
missing_codes.append(code)
|
||||
per_event.append(
|
||||
{
|
||||
"event_id": optional_int(event.get("event_id")),
|
||||
"code": code,
|
||||
"frame_count": matching,
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"expected_events": len(per_event),
|
||||
"covered_events": covered,
|
||||
"missing_codes": missing_codes,
|
||||
"per_event": per_event,
|
||||
}
|
||||
|
||||
|
||||
def summarize(records: list[dict[str, Any]], ignored: int, fps: float | None, timeline: pathlib.Path | None) -> dict:
|
||||
"""Build the structured UVC spool metadata summary.
|
||||
|
||||
Inputs: parsed records, ignored-line count, optional expected FPS, and an
|
||||
optional synthetic timeline. Output: JSON-serializable metrics. Why: both
|
||||
humans and follow-up automation need the same artifact to decide whether a
|
||||
failing end-to-end HEVC run needs transport, decode, UVC, or RCT attention.
|
||||
"""
|
||||
|
||||
profiles = Counter(record["profile"] for record in records)
|
||||
byte_counts = [float(record["bytes"]) for record in records]
|
||||
source_pts = sorted(
|
||||
record["source_pts_us"]
|
||||
for record in records
|
||||
if isinstance(record.get("source_pts_us"), int)
|
||||
)
|
||||
spool_ns = sorted(record["spool_unix_ns"] for record in records)
|
||||
source_intervals = diffs(source_pts)
|
||||
spool_intervals = [(b - a) / 1_000_000.0 for a, b in zip(spool_ns, spool_ns[1:])]
|
||||
decoded_deltas = [
|
||||
(record["decoded_pts_us"] - record["source_pts_us"]) / 1000.0
|
||||
for record in records
|
||||
if isinstance(record.get("decoded_pts_us"), int)
|
||||
and isinstance(record.get("source_pts_us"), int)
|
||||
]
|
||||
expected_interval_ms = 1000.0 / fps if fps and fps > 0 else None
|
||||
cadence_hiccups = (
|
||||
sum(1 for value in source_intervals if value > expected_interval_ms * 1.5)
|
||||
if expected_interval_ms is not None
|
||||
else None
|
||||
)
|
||||
|
||||
return {
|
||||
"schema": "lesavka.uvc-mjpeg-spool-summary.v1",
|
||||
"record_count": len(records),
|
||||
"ignored_line_count": ignored,
|
||||
"profiles": dict(sorted(profiles.items())),
|
||||
"sequence_first": min(record["sequence"] for record in records),
|
||||
"sequence_last": max(record["sequence"] for record in records),
|
||||
"sequence_gap_count": sequence_gap_count(records),
|
||||
"bytes_median": percentile(byte_counts, 0.50),
|
||||
"bytes_p95": percentile(byte_counts, 0.95),
|
||||
"bytes_max": max(byte_counts) if byte_counts else None,
|
||||
"source_pts_span_ms": ((source_pts[-1] - source_pts[0]) / 1000.0) if len(source_pts) >= 2 else None,
|
||||
"source_interval_p95_ms": percentile(source_intervals, 0.95),
|
||||
"source_interval_max_ms": max(source_intervals) if source_intervals else None,
|
||||
"spool_interval_p95_ms": percentile(spool_intervals, 0.95),
|
||||
"spool_interval_max_ms": max(spool_intervals) if spool_intervals else None,
|
||||
"expected_interval_ms": expected_interval_ms,
|
||||
"source_cadence_hiccup_count": cadence_hiccups,
|
||||
"decoded_pts_delta_median_ms": percentile(decoded_deltas, 0.50),
|
||||
"decoded_pts_delta_p95_ms": percentile(decoded_deltas, 0.95),
|
||||
"event_coverage": event_coverage(records, timeline),
|
||||
}
|
||||
|
||||
|
||||
def format_ms(value: float | None) -> str:
|
||||
"""Format optional millisecond values for concise text output.
|
||||
|
||||
Inputs: a numeric value or `None`. Output: display string. Why: report text
|
||||
should make absent evidence explicit instead of quietly rendering `null`.
|
||||
"""
|
||||
|
||||
return "n/a" if value is None else f"{value:.1f} ms"
|
||||
|
||||
|
||||
def write_text_report(path: pathlib.Path, log_path: pathlib.Path, summary: dict) -> None:
|
||||
"""Write a human-readable spool metadata report.
|
||||
|
||||
Inputs: output path, source log path, and structured summary. Output: report
|
||||
file on disk. Why: the run matrix logs are easiest to scan when key timing
|
||||
evidence is available as text next to the JSON artifact.
|
||||
"""
|
||||
|
||||
coverage = summary.get("event_coverage") or {}
|
||||
coverage_line = "n/a"
|
||||
if coverage:
|
||||
coverage_line = (
|
||||
f"{coverage.get('covered_events', 0)}/{coverage.get('expected_events', 0)}"
|
||||
f" missing_codes={coverage.get('missing_codes', [])}"
|
||||
)
|
||||
lines = [
|
||||
f"UVC frame metadata summary for {log_path}",
|
||||
f"- records: {summary['record_count']} ignored_lines={summary['ignored_line_count']}",
|
||||
f"- profiles: {summary['profiles']}",
|
||||
f"- sequence: {summary['sequence_first']}..{summary['sequence_last']} gaps={summary['sequence_gap_count']}",
|
||||
f"- source cadence: p95={format_ms(summary['source_interval_p95_ms'])} max={format_ms(summary['source_interval_max_ms'])} hiccups={summary['source_cadence_hiccup_count']}",
|
||||
f"- spool cadence: p95={format_ms(summary['spool_interval_p95_ms'])} max={format_ms(summary['spool_interval_max_ms'])}",
|
||||
f"- decoded PTS delta: median={format_ms(summary['decoded_pts_delta_median_ms'])} p95={format_ms(summary['decoded_pts_delta_p95_ms'])}",
|
||||
f"- event coverage: {coverage_line}",
|
||||
]
|
||||
path.write_text("\n".join(lines) + "\n")
|
||||
|
||||
|
||||
def parse_args(argv: list[str]) -> argparse.Namespace:
|
||||
"""Parse command-line options for artifact summarization.
|
||||
|
||||
Inputs: CLI argv. Output: argparse namespace. Why: the script is intended
|
||||
for both manual postmortems and automated probe wrappers, so all outputs are
|
||||
explicit file paths rather than implicit terminal scraping.
|
||||
"""
|
||||
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument("log_jsonl", type=pathlib.Path)
|
||||
parser.add_argument("json_out", type=pathlib.Path)
|
||||
parser.add_argument("txt_out", type=pathlib.Path)
|
||||
parser.add_argument("--fps", type=float, default=None)
|
||||
parser.add_argument("--timeline", type=pathlib.Path, default=None)
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def main(argv: list[str]) -> int:
|
||||
"""Run the UVC frame metadata summarizer.
|
||||
|
||||
Inputs: command-line arguments. Output: process exit code. Why: returning
|
||||
explicit non-zero statuses makes probe wrappers fail fast when metadata was
|
||||
enabled but no valid frame records were captured.
|
||||
"""
|
||||
|
||||
args = parse_args(argv)
|
||||
records, ignored = load_records(args.log_jsonl)
|
||||
if not records:
|
||||
print(f"no valid {SCHEMA} records found in {args.log_jsonl}", file=sys.stderr)
|
||||
return 1
|
||||
summary = summarize(records, ignored, args.fps, args.timeline)
|
||||
args.json_out.write_text(json.dumps(summary, indent=2, sort_keys=True) + "\n")
|
||||
write_text_report(args.txt_out, args.log_jsonl, summary)
|
||||
print(f"summary_json: {args.json_out}")
|
||||
print(f"summary_txt: {args.txt_out}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main(sys.argv[1:]))
|
||||
147
scripts/manual/validate_local_hevc_bundle_audit.py
Executable file
147
scripts/manual/validate_local_hevc_bundle_audit.py
Executable file
@ -0,0 +1,147 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Validate the local synthetic HEVC+audio bundle audit artifact.
|
||||
|
||||
The local audit is our passwordless proof that the client can generate the
|
||||
same coded flash/tone train we will later send through the WAN and RCT path.
|
||||
This validator keeps the acceptance rules in one reusable place so later
|
||||
hardware failures can be compared against a known-good client-origin manifest.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
EXPECTED_EVENTS = 16
|
||||
EXPECTED_AUDIO_PACKETS_PER_EVENT = 2
|
||||
EXPECTED_VIDEO_PERIOD_US = 1_000_000
|
||||
MAX_AUDIO_VIDEO_SKEW_US = 120_000
|
||||
|
||||
|
||||
def fail(message: str) -> None:
|
||||
"""Exit with a compact validation error for shell scripts and operators."""
|
||||
|
||||
raise SystemExit(f"local HEVC bundle audit failed: {message}")
|
||||
|
||||
|
||||
def require(condition: bool, message: str) -> None:
|
||||
"""Keep validation checks readable while preserving precise error context."""
|
||||
|
||||
if not condition:
|
||||
fail(message)
|
||||
|
||||
|
||||
def load_manifest(path: Path) -> dict:
|
||||
"""Read the manifest JSON generated by the local Rust bundle preflight."""
|
||||
|
||||
try:
|
||||
return json.loads(path.read_text())
|
||||
except FileNotFoundError:
|
||||
fail(f"missing audit manifest: {path}")
|
||||
except json.JSONDecodeError as exc:
|
||||
fail(f"invalid audit JSON: {exc}")
|
||||
|
||||
|
||||
def validate_manifest(data: dict) -> dict:
|
||||
"""Validate manifest-level and per-event timing invariants.
|
||||
|
||||
Input: the `lesavka.local-hevc-bundle-audit.v1` JSON object. Output: the
|
||||
summary object for caller reporting. The checks intentionally match the
|
||||
analyzer evidence floor and sync-probe event train: sixteen ordered coded
|
||||
HEVC frames, two nearby audio packets per frame, and no per-bundle audio
|
||||
skew outside the coded pulse width.
|
||||
"""
|
||||
|
||||
require(
|
||||
data.get("schema") == "lesavka.local-hevc-bundle-audit.v1",
|
||||
f"unexpected schema {data.get('schema')!r}",
|
||||
)
|
||||
summary = data.get("summary") or {}
|
||||
events = data.get("events") or []
|
||||
|
||||
expected_summary = {
|
||||
"video_codec": "hevc",
|
||||
"metadata_mode": "1920x1080@30",
|
||||
"bundles": EXPECTED_EVENTS,
|
||||
"coded_video_events": EXPECTED_EVENTS,
|
||||
"annex_b_video_events": EXPECTED_EVENTS,
|
||||
"audio_packets": EXPECTED_EVENTS * EXPECTED_AUDIO_PACKETS_PER_EVENT,
|
||||
"bundles_with_audio_before_video": EXPECTED_EVENTS,
|
||||
"bundles_with_audio_after_video": EXPECTED_EVENTS,
|
||||
"monotonic_bundle_sequences": True,
|
||||
}
|
||||
for key, expected in expected_summary.items():
|
||||
require(
|
||||
summary.get(key) == expected,
|
||||
f"summary {key} expected {expected!r}, got {summary.get(key)!r}",
|
||||
)
|
||||
|
||||
require(len(events) == EXPECTED_EVENTS, f"expected {EXPECTED_EVENTS} events, got {len(events)}")
|
||||
previous_seq = 0
|
||||
previous_video_pts = None
|
||||
for index, event in enumerate(events, start=1):
|
||||
seq = int(event.get("bundle_seq", -1))
|
||||
code = int(event.get("event_code", -1))
|
||||
video_pts = int(event.get("video_capture_pts_us", -1))
|
||||
send_pts = int(event.get("video_send_pts_us", -1))
|
||||
capture_start = int(event.get("capture_start_us", -1))
|
||||
capture_end = int(event.get("capture_end_us", -1))
|
||||
audio_pts = [int(value) for value in event.get("audio_capture_pts_us") or []]
|
||||
max_skew = int(event.get("max_audio_video_skew_us", -1))
|
||||
|
||||
require(seq == index, f"event {index} has bundle_seq={seq}")
|
||||
require(code == index, f"event {index} has event_code={code}")
|
||||
require(seq > previous_seq, f"event {index} sequence is not monotonic")
|
||||
previous_seq = seq
|
||||
require(event.get("has_annex_b_start_code") is True, f"event {index} lacks Annex-B")
|
||||
require(
|
||||
int(event.get("audio_packets", -1)) == EXPECTED_AUDIO_PACKETS_PER_EVENT,
|
||||
f"event {index} has wrong audio packet count",
|
||||
)
|
||||
require(len(audio_pts) == EXPECTED_AUDIO_PACKETS_PER_EVENT, f"event {index} missing audio PTS")
|
||||
require(capture_start <= video_pts <= capture_end, f"event {index} video outside bounds")
|
||||
require(all(capture_start <= pts <= capture_end for pts in audio_pts), f"event {index} audio outside bounds")
|
||||
require(any(pts < video_pts for pts in audio_pts), f"event {index} lacks pre-video audio")
|
||||
require(any(pts > video_pts for pts in audio_pts), f"event {index} lacks post-video audio")
|
||||
require(send_pts >= video_pts, f"event {index} send PTS precedes capture PTS")
|
||||
require(max_skew <= MAX_AUDIO_VIDEO_SKEW_US, f"event {index} audio/video skew {max_skew}us")
|
||||
|
||||
if previous_video_pts is not None:
|
||||
period = video_pts - previous_video_pts
|
||||
require(
|
||||
period == EXPECTED_VIDEO_PERIOD_US,
|
||||
f"event {index} period {period}us != {EXPECTED_VIDEO_PERIOD_US}us",
|
||||
)
|
||||
previous_video_pts = video_pts
|
||||
|
||||
return summary
|
||||
|
||||
|
||||
def main(argv: list[str]) -> int:
|
||||
"""Validate one manifest path and print a concise operator summary."""
|
||||
|
||||
if len(argv) != 2:
|
||||
print(f"usage: {argv[0]} AUDIT_JSON", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
path = Path(argv[1])
|
||||
summary = validate_manifest(load_manifest(path))
|
||||
print("local HEVC bundle audit validation: pass")
|
||||
print(f"- schema: lesavka.local-hevc-bundle-audit.v1")
|
||||
print(f"- mode: {summary['metadata_mode']} codec={summary['video_codec']}")
|
||||
print(f"- bundles: {summary['bundles']}")
|
||||
print(f"- coded video events: {summary['coded_video_events']}/{EXPECTED_EVENTS}")
|
||||
print(f"- event codes: 1..{EXPECTED_EVENTS}")
|
||||
print(f"- Annex-B video events: {summary['annex_b_video_events']}/{EXPECTED_EVENTS}")
|
||||
print(f"- audio packets: {summary['audio_packets']}")
|
||||
print(
|
||||
"- bundles with audio before/after video: "
|
||||
f"{summary['bundles_with_audio_before_video']}/{summary['bundles_with_audio_after_video']}"
|
||||
)
|
||||
print(f"- monotonic bundle sequences: {summary['monotonic_bundle_sequences']}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main(sys.argv))
|
||||
@ -10,7 +10,7 @@ bench = false
|
||||
|
||||
[package]
|
||||
name = "lesavka_server"
|
||||
version = "0.20.0"
|
||||
version = "0.21.9"
|
||||
edition = "2024"
|
||||
autobins = false
|
||||
|
||||
|
||||
@ -424,15 +424,17 @@ impl UvcVideoStream {
|
||||
}
|
||||
|
||||
fn refresh_latest_frame(&mut self) {
|
||||
if frame_spool_is_stale(&self.frame_path, frame_spool_max_age()) {
|
||||
self.latest_frame = EMPTY_MJPEG_FRAME.to_vec();
|
||||
let stale = frame_spool_is_stale(&self.frame_path, frame_spool_max_age());
|
||||
if stale && looks_like_mjpeg_frame(&self.latest_frame) {
|
||||
return;
|
||||
}
|
||||
if let Ok(frame) = std::fs::read(&self.frame_path)
|
||||
&& !frame.is_empty()
|
||||
&& frame.len() <= MAX_MJPEG_FRAME_BYTES
|
||||
&& looks_like_mjpeg_frame(&frame)
|
||||
{
|
||||
self.latest_frame = frame;
|
||||
} else if !looks_like_mjpeg_frame(&self.latest_frame) {
|
||||
self.latest_frame = EMPTY_MJPEG_FRAME.to_vec();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -478,6 +480,12 @@ fn frame_spool_path() -> std::path::PathBuf {
|
||||
.unwrap_or_else(|_| std::path::PathBuf::from("/run/lesavka-uvc-frame.mjpg"))
|
||||
}
|
||||
|
||||
fn looks_like_mjpeg_frame(frame: &[u8]) -> bool {
|
||||
frame.len() > EMPTY_MJPEG_FRAME.len()
|
||||
&& frame.starts_with(&[0xff, 0xd8])
|
||||
&& frame.ends_with(&[0xff, 0xd9])
|
||||
}
|
||||
|
||||
fn uvc_buffer_count() -> u32 {
|
||||
env_u32("LESAVKA_UVC_BUFFER_COUNT", DEFAULT_UVC_BUFFER_COUNT).clamp(1, 8)
|
||||
}
|
||||
|
||||
@ -47,6 +47,13 @@ const UVC_VS_COMMIT_CONTROL: u8 = 0x02;
|
||||
#[cfg(coverage)]
|
||||
const UVC_VC_REQUEST_ERROR_CODE_CONTROL: u8 = 0x02;
|
||||
|
||||
#[cfg(coverage)]
|
||||
const DEFAULT_UVC_BUFFER_COUNT: u32 = 2;
|
||||
#[cfg(coverage)]
|
||||
const DEFAULT_UVC_IDLE_PUMP_MS: u64 = 2;
|
||||
#[cfg(coverage)]
|
||||
const DEFAULT_UVC_FRAME_MAX_AGE_MS: u64 = 1_000;
|
||||
|
||||
#[cfg(coverage)]
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy)]
|
||||
|
||||
@ -127,3 +127,77 @@ fn uvc_control_read_only() -> bool {
|
||||
})
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Returns the bounded UVC request-buffer count used by the helper.
|
||||
///
|
||||
/// Inputs: `LESAVKA_UVC_BUFFER_COUNT`. Outputs: a value clamped to the safe
|
||||
/// range accepted by the helper. Why: coverage contracts need the same backlog
|
||||
/// bound as the real UVC binary without opening a gadget device.
|
||||
fn uvc_buffer_count() -> u32 {
|
||||
env_u32("LESAVKA_UVC_BUFFER_COUNT", DEFAULT_UVC_BUFFER_COUNT).clamp(1, 8)
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Returns the idle frame-pump sleep interval used when no fresh frame is ready.
|
||||
///
|
||||
/// Inputs: `LESAVKA_UVC_IDLE_PUMP_MS`. Outputs: a duration in milliseconds.
|
||||
/// Why: this value controls how quickly the UVC helper retries the freshness
|
||||
/// spool when browsers are already streaming.
|
||||
fn uvc_idle_pump_sleep() -> std::time::Duration {
|
||||
std::time::Duration::from_millis(env_u64(
|
||||
"LESAVKA_UVC_IDLE_PUMP_MS",
|
||||
DEFAULT_UVC_IDLE_PUMP_MS,
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Returns the optional maximum age for a spooled MJPEG frame.
|
||||
///
|
||||
/// Inputs: `LESAVKA_UVC_FRAME_MAX_AGE_MS`. Outputs: `None` when the TTL is
|
||||
/// disabled. Why: stale-frame replay must be explicit because it trades
|
||||
/// smoothness against freshness during browser stream recovery.
|
||||
fn frame_spool_max_age() -> Option<std::time::Duration> {
|
||||
match env_u64(
|
||||
"LESAVKA_UVC_FRAME_MAX_AGE_MS",
|
||||
DEFAULT_UVC_FRAME_MAX_AGE_MS,
|
||||
) {
|
||||
0 => None,
|
||||
value => Some(std::time::Duration::from_millis(value)),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Determines whether a spooled MJPEG frame is too old to replay.
|
||||
///
|
||||
/// Inputs: frame path and optional age limit. Outputs: true when the frame is
|
||||
/// missing or older than the limit. Why: the coverage harness should guard the
|
||||
/// same freshness cut-off that prevents seconds-old video from re-entering UVC.
|
||||
fn frame_spool_is_stale(path: &std::path::Path, max_age: Option<std::time::Duration>) -> bool {
|
||||
let Some(max_age) = max_age else {
|
||||
return false;
|
||||
};
|
||||
let Ok(metadata) = std::fs::metadata(path) else {
|
||||
return true;
|
||||
};
|
||||
let Ok(modified) = metadata.modified() else {
|
||||
return false;
|
||||
};
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(modified)
|
||||
.map(|age| age > max_age)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Parses an unsigned 64-bit environment value with a fallback.
|
||||
///
|
||||
/// Inputs: variable name and default value. Outputs: parsed value or default.
|
||||
/// Why: UVC freshness settings use millisecond durations that should not be
|
||||
/// truncated to 32-bit just because the coverage harness is lightweight.
|
||||
fn env_u64(name: &str, default: u64) -> u64 {
|
||||
env::var(name)
|
||||
.ok()
|
||||
.and_then(|value| value.parse::<u64>().ok())
|
||||
.unwrap_or(default)
|
||||
}
|
||||
|
||||
@ -371,6 +371,27 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn blind_healer_refuses_when_under_sampled_or_unstable() {
|
||||
let mut disabled = config();
|
||||
disabled.enabled = false;
|
||||
assert_eq!(
|
||||
evaluate_blind_heal_snapshot(&snapshot(), disabled),
|
||||
BlindHealDecision::Wait("disabled")
|
||||
);
|
||||
|
||||
let mut not_live = snapshot();
|
||||
not_live.phase = "starting";
|
||||
assert_eq!(
|
||||
evaluate_blind_heal_snapshot(¬_live, config()),
|
||||
BlindHealDecision::Wait("not-live")
|
||||
);
|
||||
|
||||
let mut low_client_samples = snapshot();
|
||||
low_client_samples.client_timing_window_samples = 1;
|
||||
assert_eq!(
|
||||
evaluate_blind_heal_snapshot(&low_client_samples, config()),
|
||||
BlindHealDecision::Wait("not-enough-client-samples")
|
||||
);
|
||||
|
||||
let mut low_samples = snapshot();
|
||||
low_samples.sink_handoff_window_samples = 1;
|
||||
assert_eq!(
|
||||
@ -378,6 +399,20 @@ mod tests {
|
||||
BlindHealDecision::Wait("not-enough-sink-samples")
|
||||
);
|
||||
|
||||
let mut missing_skew = snapshot();
|
||||
missing_skew.sink_handoff_skew_ms = None;
|
||||
assert_eq!(
|
||||
evaluate_blind_heal_snapshot(&missing_skew, config()),
|
||||
BlindHealDecision::Wait("missing-sink-skew")
|
||||
);
|
||||
|
||||
let mut inside_deadband = snapshot();
|
||||
inside_deadband.sink_handoff_skew_ms = Some(4.0);
|
||||
assert_eq!(
|
||||
evaluate_blind_heal_snapshot(&inside_deadband, config()),
|
||||
BlindHealDecision::Wait("inside-deadband")
|
||||
);
|
||||
|
||||
let mut noisy_network = snapshot();
|
||||
noisy_network.server_receive_abs_skew_p95_ms = Some(400.0);
|
||||
assert_eq!(
|
||||
@ -385,6 +420,34 @@ mod tests {
|
||||
BlindHealDecision::Wait("server-receive-p95-unstable")
|
||||
);
|
||||
|
||||
let mut noisy_client_send = snapshot();
|
||||
noisy_client_send.client_send_abs_skew_p95_ms = Some(400.0);
|
||||
assert_eq!(
|
||||
evaluate_blind_heal_snapshot(&noisy_client_send, config()),
|
||||
BlindHealDecision::Wait("client-send-p95-unstable")
|
||||
);
|
||||
|
||||
let mut queued_client = snapshot();
|
||||
queued_client.camera_client_queue_age_p95_ms = Some(400.0);
|
||||
assert_eq!(
|
||||
evaluate_blind_heal_snapshot(&queued_client, config()),
|
||||
BlindHealDecision::Wait("client-queue-p95-unstable")
|
||||
);
|
||||
|
||||
let mut late_sink = snapshot();
|
||||
late_sink.microphone_sink_late_p95_ms = Some(400.0);
|
||||
assert_eq!(
|
||||
evaluate_blind_heal_snapshot(&late_sink, config()),
|
||||
BlindHealDecision::Wait("sink-late-p95-unstable")
|
||||
);
|
||||
|
||||
let mut rounded_zero = config();
|
||||
rounded_zero.gain = 0.000_001;
|
||||
assert_eq!(
|
||||
evaluate_blind_heal_snapshot(&snapshot(), rounded_zero),
|
||||
BlindHealDecision::Wait("rounded-zero")
|
||||
);
|
||||
|
||||
let mut noisy_handoff = snapshot();
|
||||
noisy_handoff.sink_handoff_abs_skew_p95_ms = Some(241.0);
|
||||
assert_eq!(
|
||||
|
||||
@ -9,19 +9,25 @@ use lesavka_common::lesavka::{
|
||||
|
||||
use crate::upstream_media_runtime::UpstreamMediaRuntime;
|
||||
|
||||
pub const FACTORY_MJPEG_AUDIO_OFFSET_US: i64 = 0;
|
||||
pub const FACTORY_MJPEG_VIDEO_OFFSET_1280X720_20_US: i64 = 162_659;
|
||||
pub const FACTORY_MJPEG_VIDEO_OFFSET_1280X720_30_US: i64 = 135_090;
|
||||
pub const FACTORY_MJPEG_VIDEO_OFFSET_1920X1080_20_US: i64 = 160_045;
|
||||
pub const FACTORY_MJPEG_VIDEO_OFFSET_1920X1080_30_US: i64 = 127_952;
|
||||
pub const FACTORY_MJPEG_AUDIO_MODE_OFFSETS_US: &str =
|
||||
"1280x720@20=0,1280x720@30=0,1920x1080@20=0,1920x1080@30=0";
|
||||
pub const FACTORY_MJPEG_VIDEO_MODE_OFFSETS_US: &str =
|
||||
"1280x720@20=162659,1280x720@30=135090,1920x1080@20=160045,1920x1080@30=127952";
|
||||
// Direct UVC/UAC output-delay probes against the lab RC target showed a
|
||||
// per-mode sync center for MJPEG/UVC video. This is output-path compensation,
|
||||
// not a freshness buffer. The scalar fallback follows the default UVC mode.
|
||||
pub const FACTORY_MJPEG_VIDEO_OFFSET_US: i64 = FACTORY_MJPEG_VIDEO_OFFSET_1280X720_30_US;
|
||||
mod mode_env;
|
||||
mod profile_offsets;
|
||||
|
||||
use mode_env::{current_uvc_mode, lookup_mode_offset_us};
|
||||
pub use profile_offsets::{
|
||||
FACTORY_HEVC_AUDIO_MODE_OFFSETS_US, FACTORY_HEVC_AUDIO_OFFSET_US,
|
||||
FACTORY_HEVC_VIDEO_MODE_OFFSETS_US, FACTORY_HEVC_VIDEO_OFFSET_1280X720_20_US,
|
||||
FACTORY_HEVC_VIDEO_OFFSET_1280X720_30_US, FACTORY_HEVC_VIDEO_OFFSET_1920X1080_20_US,
|
||||
FACTORY_HEVC_VIDEO_OFFSET_1920X1080_30_US, FACTORY_HEVC_VIDEO_OFFSET_US,
|
||||
FACTORY_MJPEG_AUDIO_MODE_OFFSETS_US, FACTORY_MJPEG_AUDIO_OFFSET_US,
|
||||
FACTORY_MJPEG_VIDEO_MODE_OFFSETS_US, FACTORY_MJPEG_VIDEO_OFFSET_1280X720_20_US,
|
||||
FACTORY_MJPEG_VIDEO_OFFSET_1280X720_30_US, FACTORY_MJPEG_VIDEO_OFFSET_1920X1080_20_US,
|
||||
FACTORY_MJPEG_VIDEO_OFFSET_1920X1080_30_US, FACTORY_MJPEG_VIDEO_OFFSET_US,
|
||||
};
|
||||
use profile_offsets::{
|
||||
configured_profile_offset_us, current_profile, factory_audio_mode_offsets_us,
|
||||
factory_audio_scalar_offset_us, factory_video_mode_offsets_us, factory_video_scalar_offset_us,
|
||||
};
|
||||
|
||||
const LEGACY_FACTORY_MJPEG_AUDIO_OFFSET_US: i64 = -45_000;
|
||||
const PREVIOUS_FACTORY_MJPEG_AUDIO_OFFSET_US: i64 = 720_000;
|
||||
const PREVIOUS_TUNED_MJPEG_AUDIO_OFFSET_US: i64 = 1_260_000;
|
||||
@ -30,7 +36,6 @@ const PREVIOUS_DELAYED_FACTORY_MJPEG_VIDEO_OFFSET_US: i64 = 350_000;
|
||||
const PREVIOUS_BROWSER_FACTORY_MJPEG_VIDEO_OFFSET_US: i64 = 130_000;
|
||||
const PREVIOUS_SCALAR_FACTORY_MJPEG_VIDEO_OFFSET_US: i64 = 170_000;
|
||||
const PREVIOUS_OVERSHOT_FACTORY_MJPEG_VIDEO_OFFSET_US: i64 = 1_090_000;
|
||||
const PROFILE: &str = "mjpeg";
|
||||
const FACTORY_CONFIDENCE: &str = "factory";
|
||||
const PREVIOUS_OFFSET_LIMIT_US: i64 = 500_000;
|
||||
const OFFSET_LIMIT_US: i64 = 1_500_000;
|
||||
@ -98,12 +103,10 @@ impl CalibrationStore {
|
||||
touch(&mut state);
|
||||
}
|
||||
CalibrationAction::RestoreFactory => {
|
||||
state.active_audio_offset_us = state.factory_audio_offset_us;
|
||||
state.active_video_offset_us = state.factory_video_offset_us;
|
||||
state.source = "factory".to_string();
|
||||
state.confidence = FACTORY_CONFIDENCE.to_string();
|
||||
state.detail = "restored release-shipped MJPEG upstream calibration".to_string();
|
||||
touch(&mut state);
|
||||
*state = factory_snapshot_from_env(format!(
|
||||
"restored release-shipped {} upstream calibration",
|
||||
current_profile()
|
||||
));
|
||||
}
|
||||
CalibrationAction::AdjustActive => {
|
||||
state.active_audio_offset_us = clamp_offset(
|
||||
@ -228,26 +231,23 @@ pub fn calibration_path() -> PathBuf {
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn snapshot_from_env() -> CalibrationSnapshot {
|
||||
let mode = current_uvc_mode();
|
||||
let profile = current_profile();
|
||||
let factory_audio_mode_offsets_us = factory_audio_mode_offsets_us(&profile);
|
||||
let factory_video_mode_offsets_us = factory_video_mode_offsets_us(&profile);
|
||||
let factory_audio_scalar_offset_us = factory_audio_scalar_offset_us(&profile);
|
||||
let factory_video_scalar_offset_us = factory_video_scalar_offset_us(&profile);
|
||||
let factory_audio_offset_us = mode
|
||||
.as_deref()
|
||||
.and_then(|mode| lookup_mode_offset_us(FACTORY_MJPEG_AUDIO_MODE_OFFSETS_US, mode))
|
||||
.unwrap_or(FACTORY_MJPEG_AUDIO_OFFSET_US);
|
||||
.and_then(|mode| lookup_mode_offset_us(factory_audio_mode_offsets_us, mode))
|
||||
.unwrap_or(factory_audio_scalar_offset_us);
|
||||
let factory_video_offset_us = mode
|
||||
.as_deref()
|
||||
.and_then(|mode| lookup_mode_offset_us(FACTORY_MJPEG_VIDEO_MODE_OFFSETS_US, mode))
|
||||
.unwrap_or(FACTORY_MJPEG_VIDEO_OFFSET_US);
|
||||
let env_audio = configured_offset_us(
|
||||
"LESAVKA_UPSTREAM_AUDIO_PLAYOUT_MODE_OFFSETS_US",
|
||||
"LESAVKA_UPSTREAM_AUDIO_PLAYOUT_OFFSET_US",
|
||||
mode.as_deref(),
|
||||
is_stale_audio_offset_us,
|
||||
);
|
||||
let env_video = configured_offset_us(
|
||||
"LESAVKA_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US",
|
||||
"LESAVKA_UPSTREAM_VIDEO_PLAYOUT_OFFSET_US",
|
||||
mode.as_deref(),
|
||||
is_stale_video_offset_us,
|
||||
);
|
||||
.and_then(|mode| lookup_mode_offset_us(factory_video_mode_offsets_us, mode))
|
||||
.unwrap_or(factory_video_scalar_offset_us);
|
||||
let env_audio =
|
||||
configured_profile_offset_us(&profile, "AUDIO", mode.as_deref(), is_stale_audio_offset_us);
|
||||
let env_video =
|
||||
configured_profile_offset_us(&profile, "VIDEO", mode.as_deref(), is_stale_video_offset_us);
|
||||
let default_audio_offset_us = env_audio.unwrap_or(factory_audio_offset_us);
|
||||
let default_video_offset_us = env_video.unwrap_or(factory_video_offset_us);
|
||||
let source = if env_audio.is_some() || env_video.is_some() {
|
||||
@ -261,7 +261,7 @@ fn snapshot_from_env() -> CalibrationSnapshot {
|
||||
"configured".to_string()
|
||||
};
|
||||
CalibrationSnapshot {
|
||||
profile: PROFILE.to_string(),
|
||||
profile,
|
||||
factory_audio_offset_us,
|
||||
factory_video_offset_us,
|
||||
default_audio_offset_us,
|
||||
@ -316,8 +316,28 @@ fn parse_snapshot(raw: &str) -> CalibrationSnapshot {
|
||||
/// Keeps `migrate_legacy_snapshot` explicit because it sits on calibration state, where persisted and factory offsets must stay auditable.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn migrate_legacy_snapshot(mut state: CalibrationSnapshot) -> CalibrationSnapshot {
|
||||
let current_profile = current_profile();
|
||||
let source_allows_migration = matches!(state.source.as_str(), "factory" | "env");
|
||||
let confidence_allows_migration = matches!(state.confidence.as_str(), "factory" | "configured");
|
||||
let detail_allows_profile_migration = state
|
||||
.detail
|
||||
.contains("loaded upstream A/V calibration defaults")
|
||||
|| state.detail.contains("restored release-shipped");
|
||||
if state.profile != current_profile
|
||||
&& source_allows_migration
|
||||
&& confidence_allows_migration
|
||||
&& detail_allows_profile_migration
|
||||
{
|
||||
let mut replacement = factory_snapshot_from_env(format!(
|
||||
"migrated factory upstream A/V calibration profile from {} to {}",
|
||||
state.profile, current_profile
|
||||
));
|
||||
replacement.detail = format!(
|
||||
"migrated factory upstream A/V calibration profile from {} to {}",
|
||||
state.profile, replacement.profile
|
||||
);
|
||||
return replacement;
|
||||
}
|
||||
let clamped_previous_baseline = matches!(
|
||||
state.default_audio_offset_us,
|
||||
PREVIOUS_OFFSET_LIMIT_US | OFFSET_LIMIT_US
|
||||
@ -330,7 +350,7 @@ fn migrate_legacy_snapshot(mut state: CalibrationSnapshot) -> CalibrationSnapsho
|
||||
&& state.active_audio_offset_us == state.default_audio_offset_us;
|
||||
let untouched_legacy_video = is_stale_video_offset_us(state.default_video_offset_us)
|
||||
&& state.active_video_offset_us == state.default_video_offset_us;
|
||||
if state.profile == PROFILE
|
||||
if state.profile == current_profile
|
||||
&& source_allows_migration
|
||||
&& confidence_allows_migration
|
||||
&& untouched_legacy_audio
|
||||
@ -356,6 +376,25 @@ fn migrate_legacy_snapshot(mut state: CalibrationSnapshot) -> CalibrationSnapsho
|
||||
state
|
||||
}
|
||||
|
||||
/// Builds a persisted calibration snapshot from the current profile factory.
|
||||
///
|
||||
/// Inputs: human-readable audit detail. Output: calibration state whose active
|
||||
/// and default offsets are reset to the profile-specific factory values.
|
||||
/// Why: restore/migration paths must not accidentally revive stale MJPEG
|
||||
/// offsets after the server is running an HEVC decode-to-MJPEG profile.
|
||||
fn factory_snapshot_from_env(detail: impl Into<String>) -> CalibrationSnapshot {
|
||||
let mut state = snapshot_from_env();
|
||||
state.default_audio_offset_us = state.factory_audio_offset_us;
|
||||
state.default_video_offset_us = state.factory_video_offset_us;
|
||||
state.active_audio_offset_us = state.factory_audio_offset_us;
|
||||
state.active_video_offset_us = state.factory_video_offset_us;
|
||||
state.source = "factory".to_string();
|
||||
state.confidence = FACTORY_CONFIDENCE.to_string();
|
||||
state.detail = detail.into();
|
||||
touch(&mut state);
|
||||
state
|
||||
}
|
||||
|
||||
/// Keeps `persist_snapshot` explicit because it sits on calibration state, where persisted and factory offsets must stay auditable.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn persist_snapshot(path: &PathBuf, state: &CalibrationSnapshot) -> Result<()> {
|
||||
@ -388,84 +427,6 @@ fn touch(state: &mut CalibrationSnapshot) {
|
||||
state.updated_at = Utc::now().to_rfc3339();
|
||||
}
|
||||
|
||||
fn configured_offset_us(
|
||||
mode_map_name: &str,
|
||||
scalar_name: &str,
|
||||
mode: Option<&str>,
|
||||
is_stale_scalar: impl Fn(i64) -> bool,
|
||||
) -> Option<i64> {
|
||||
mode.and_then(|mode| env_mode_offset_us(mode_map_name, mode))
|
||||
.or_else(|| env_i64(scalar_name).filter(|offset| !is_stale_scalar(*offset)))
|
||||
}
|
||||
|
||||
/// Keeps `current_uvc_mode` explicit because it sits on calibration state, where persisted and factory offsets must stay auditable.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn current_uvc_mode() -> Option<String> {
|
||||
env_mode("UVC_MODE")
|
||||
.or_else(|| env_mode("LESAVKA_UVC_MODE"))
|
||||
.or_else(|| {
|
||||
let width = env_u32("LESAVKA_UVC_WIDTH")?;
|
||||
let height = env_u32("LESAVKA_UVC_HEIGHT")?;
|
||||
let fps = env_u32("LESAVKA_UVC_FPS")
|
||||
.or_else(|| {
|
||||
env_u32("LESAVKA_UVC_INTERVAL")
|
||||
.and_then(|interval| (interval > 0).then_some(10_000_000 / interval))
|
||||
})?
|
||||
.max(1);
|
||||
Some(format!("{width}x{height}@{fps}"))
|
||||
})
|
||||
.or_else(|| {
|
||||
let width = env_u32("LESAVKA_CAM_WIDTH")?;
|
||||
let height = env_u32("LESAVKA_CAM_HEIGHT")?;
|
||||
let fps = env_u32("LESAVKA_CAM_FPS")?.max(1);
|
||||
Some(format!("{width}x{height}@{fps}"))
|
||||
})
|
||||
}
|
||||
|
||||
/// Keeps `env_mode` explicit because it sits on calibration state, where persisted and factory offsets must stay auditable.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn env_mode(name: &str) -> Option<String> {
|
||||
std::env::var(name).ok().and_then(|value| {
|
||||
let trimmed = value.trim();
|
||||
let valid = trimmed.split_once('@').and_then(|(size, fps)| {
|
||||
let (width, height) = size.split_once('x')?;
|
||||
width.parse::<u32>().ok()?;
|
||||
height.parse::<u32>().ok()?;
|
||||
fps.parse::<u32>().ok()?;
|
||||
Some(())
|
||||
});
|
||||
valid.map(|()| trimmed.to_string())
|
||||
})
|
||||
}
|
||||
|
||||
fn env_mode_offset_us(name: &str, mode: &str) -> Option<i64> {
|
||||
std::env::var(name)
|
||||
.ok()
|
||||
.and_then(|map| lookup_mode_offset_us(&map, mode))
|
||||
}
|
||||
|
||||
fn lookup_mode_offset_us(map: &str, mode: &str) -> Option<i64> {
|
||||
map.split(',').find_map(|entry| {
|
||||
let (key, value) = entry.trim().split_once('=')?;
|
||||
(key.trim() == mode)
|
||||
.then(|| value.trim().parse::<i64>().ok().map(clamp_offset))
|
||||
.flatten()
|
||||
})
|
||||
}
|
||||
|
||||
fn env_i64(name: &str) -> Option<i64> {
|
||||
std::env::var(name)
|
||||
.ok()
|
||||
.and_then(|value| value.trim().parse::<i64>().ok())
|
||||
.map(clamp_offset)
|
||||
}
|
||||
|
||||
fn env_u32(name: &str) -> Option<u32> {
|
||||
std::env::var(name)
|
||||
.ok()
|
||||
.and_then(|value| value.trim().parse::<u32>().ok())
|
||||
}
|
||||
|
||||
fn is_stale_audio_offset_us(offset: i64) -> bool {
|
||||
matches!(
|
||||
offset,
|
||||
|
||||
96
server/src/calibration/mode_env.rs
Normal file
96
server/src/calibration/mode_env.rs
Normal file
@ -0,0 +1,96 @@
|
||||
use super::clamp_offset;
|
||||
|
||||
/// Resolve the active UVC mode from explicit mode or width/height/fps env.
|
||||
///
|
||||
/// Inputs: server UVC and camera environment variables. Output:
|
||||
/// `WIDTHxHEIGHT@FPS` when enough information is available. Why: calibration
|
||||
/// offsets are mode-specific, and early startup may see either configfs-style
|
||||
/// mode strings or separate descriptor fields.
|
||||
pub(super) fn current_uvc_mode() -> Option<String> {
|
||||
env_mode("UVC_MODE")
|
||||
.or_else(|| env_mode("LESAVKA_UVC_MODE"))
|
||||
.or_else(|| {
|
||||
let width = env_u32("LESAVKA_UVC_WIDTH")?;
|
||||
let height = env_u32("LESAVKA_UVC_HEIGHT")?;
|
||||
let fps = env_u32("LESAVKA_UVC_FPS")
|
||||
.or_else(|| {
|
||||
env_u32("LESAVKA_UVC_INTERVAL")
|
||||
.and_then(|interval| (interval > 0).then_some(10_000_000 / interval))
|
||||
})?
|
||||
.max(1);
|
||||
Some(format!("{width}x{height}@{fps}"))
|
||||
})
|
||||
.or_else(|| {
|
||||
let width = env_u32("LESAVKA_CAM_WIDTH")?;
|
||||
let height = env_u32("LESAVKA_CAM_HEIGHT")?;
|
||||
let fps = env_u32("LESAVKA_CAM_FPS")?.max(1);
|
||||
Some(format!("{width}x{height}@{fps}"))
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse one env var as a validated UVC mode string.
|
||||
///
|
||||
/// Inputs: environment variable name. Output: normalized mode string if it has
|
||||
/// numeric width, height, and fps. Why: invalid operator input should not poison
|
||||
/// persisted calibration state.
|
||||
fn env_mode(name: &str) -> Option<String> {
|
||||
std::env::var(name).ok().and_then(|value| {
|
||||
let trimmed = value.trim();
|
||||
let valid = trimmed.split_once('@').and_then(|(size, fps)| {
|
||||
let (width, height) = size.split_once('x')?;
|
||||
width.parse::<u32>().ok()?;
|
||||
height.parse::<u32>().ok()?;
|
||||
fps.parse::<u32>().ok()?;
|
||||
Some(())
|
||||
});
|
||||
valid.map(|()| trimmed.to_string())
|
||||
})
|
||||
}
|
||||
|
||||
/// Read one comma-separated mode offset map from the environment.
|
||||
///
|
||||
/// Inputs: variable name and target mode. Output: clamped offset in
|
||||
/// microseconds. Why: mode maps let one service binary carry calibrated offsets
|
||||
/// for several advertised UVC profiles.
|
||||
pub(super) fn env_mode_offset_us(name: &str, mode: &str) -> Option<i64> {
|
||||
std::env::var(name)
|
||||
.ok()
|
||||
.and_then(|map| lookup_mode_offset_us(&map, mode))
|
||||
}
|
||||
|
||||
/// Find one mode's offset in a comma-separated `MODE=US` map.
|
||||
///
|
||||
/// Inputs: raw map text and target mode. Output: clamped offset in
|
||||
/// microseconds. Why: install scripts and hardware probes pass calibration maps
|
||||
/// as env strings, so parsing stays shared and auditable.
|
||||
pub(super) fn lookup_mode_offset_us(map: &str, mode: &str) -> Option<i64> {
|
||||
map.split(',').find_map(|entry| {
|
||||
let (key, value) = entry.trim().split_once('=')?;
|
||||
(key.trim() == mode)
|
||||
.then(|| value.trim().parse::<i64>().ok().map(clamp_offset))
|
||||
.flatten()
|
||||
})
|
||||
}
|
||||
|
||||
/// Read one scalar microsecond offset from the environment.
|
||||
///
|
||||
/// Inputs: variable name. Output: clamped offset when parseable. Why: scalar
|
||||
/// fallbacks keep old deployments configurable even after mode maps became the
|
||||
/// preferred calibration representation.
|
||||
pub(super) fn env_i64(name: &str) -> Option<i64> {
|
||||
std::env::var(name)
|
||||
.ok()
|
||||
.and_then(|value| value.trim().parse::<i64>().ok())
|
||||
.map(clamp_offset)
|
||||
}
|
||||
|
||||
/// Read one unsigned integer from the environment.
|
||||
///
|
||||
/// Inputs: variable name. Output: parsed value if present and valid. Why:
|
||||
/// UVC descriptors expose dimensions and intervals as numeric env fields during
|
||||
/// runtime reconfiguration.
|
||||
pub(super) fn env_u32(name: &str) -> Option<u32> {
|
||||
std::env::var(name)
|
||||
.ok()
|
||||
.and_then(|value| value.trim().parse::<u32>().ok())
|
||||
}
|
||||
151
server/src/calibration/profile_offsets.rs
Normal file
151
server/src/calibration/profile_offsets.rs
Normal file
@ -0,0 +1,151 @@
|
||||
pub const FACTORY_MJPEG_AUDIO_OFFSET_US: i64 = 0;
|
||||
pub const FACTORY_MJPEG_VIDEO_OFFSET_1280X720_20_US: i64 = 162_659;
|
||||
pub const FACTORY_MJPEG_VIDEO_OFFSET_1280X720_30_US: i64 = 135_090;
|
||||
pub const FACTORY_MJPEG_VIDEO_OFFSET_1920X1080_20_US: i64 = 160_045;
|
||||
pub const FACTORY_MJPEG_VIDEO_OFFSET_1920X1080_30_US: i64 = 127_952;
|
||||
pub const FACTORY_MJPEG_AUDIO_MODE_OFFSETS_US: &str =
|
||||
"1280x720@20=0,1280x720@30=0,1920x1080@20=0,1920x1080@30=0";
|
||||
pub const FACTORY_MJPEG_VIDEO_MODE_OFFSETS_US: &str =
|
||||
"1280x720@20=162659,1280x720@30=135090,1920x1080@20=160045,1920x1080@30=127952";
|
||||
pub const FACTORY_HEVC_AUDIO_OFFSET_US: i64 = 0;
|
||||
pub const FACTORY_HEVC_VIDEO_OFFSET_1280X720_20_US: i64 = 173_852;
|
||||
pub const FACTORY_HEVC_VIDEO_OFFSET_1280X720_30_US: i64 = 110_000;
|
||||
pub const FACTORY_HEVC_VIDEO_OFFSET_1920X1080_20_US: i64 = 160_045;
|
||||
pub const FACTORY_HEVC_VIDEO_OFFSET_1920X1080_30_US: i64 = 127_952;
|
||||
pub const FACTORY_HEVC_AUDIO_MODE_OFFSETS_US: &str =
|
||||
"1280x720@20=0,1280x720@30=0,1920x1080@20=0,1920x1080@30=0";
|
||||
pub const FACTORY_HEVC_VIDEO_MODE_OFFSETS_US: &str =
|
||||
"1280x720@20=173852,1280x720@30=110000,1920x1080@20=160045,1920x1080@30=127952";
|
||||
// Direct UVC/UAC output-delay probes against the lab RC target showed a
|
||||
// per-mode sync center for MJPEG/UVC video. This is output-path compensation,
|
||||
// not a freshness buffer. The scalar fallback follows the default UVC mode.
|
||||
pub const FACTORY_MJPEG_VIDEO_OFFSET_US: i64 = FACTORY_MJPEG_VIDEO_OFFSET_1280X720_30_US;
|
||||
pub const FACTORY_HEVC_VIDEO_OFFSET_US: i64 = FACTORY_HEVC_VIDEO_OFFSET_1280X720_30_US;
|
||||
|
||||
const MJPEG_PROFILE: &str = "mjpeg";
|
||||
const HEVC_PROFILE: &str = "hevc";
|
||||
|
||||
use super::mode_env::{env_i64, env_mode_offset_us};
|
||||
|
||||
/// Read a profile-specific playout offset with generic env fallback.
|
||||
///
|
||||
/// Inputs: normalized profile (`mjpeg` or `hevc`), media kind (`AUDIO` or
|
||||
/// `VIDEO`), optional UVC mode, and stale-scalar predicate. Output: the selected
|
||||
/// offset in microseconds. Why: HEVC decode adds a different server-to-RCT path,
|
||||
/// so profile-specific maps must override legacy generic MJPEG-era knobs while
|
||||
/// preserving compatibility for existing deployments.
|
||||
pub(super) fn configured_profile_offset_us(
|
||||
profile: &str,
|
||||
media: &str,
|
||||
mode: Option<&str>,
|
||||
is_stale_scalar: impl Fn(i64) -> bool,
|
||||
) -> Option<i64> {
|
||||
let profile_prefix = profile.to_ascii_uppercase();
|
||||
let profile_mode_map_name =
|
||||
format!("LESAVKA_UPSTREAM_{profile_prefix}_{media}_PLAYOUT_MODE_OFFSETS_US");
|
||||
let profile_scalar_name =
|
||||
format!("LESAVKA_UPSTREAM_{profile_prefix}_{media}_PLAYOUT_OFFSET_US");
|
||||
let generic_mode_map_name = format!("LESAVKA_UPSTREAM_{media}_PLAYOUT_MODE_OFFSETS_US");
|
||||
let generic_scalar_name = format!("LESAVKA_UPSTREAM_{media}_PLAYOUT_OFFSET_US");
|
||||
let profile_offset = mode
|
||||
.and_then(|mode| env_mode_offset_us(&profile_mode_map_name, mode))
|
||||
.or_else(|| env_i64(&profile_scalar_name).filter(|offset| !is_stale_scalar(*offset)));
|
||||
if profile_offset.is_some() {
|
||||
return profile_offset;
|
||||
}
|
||||
|
||||
// Generic playout variables predate ingress profiles and were written by
|
||||
// MJPEG installs. Do not let those stale maps silently override HEVC decode
|
||||
// factory offsets; HEVC deployments can still opt in with the profile knobs.
|
||||
if profile != MJPEG_PROFILE {
|
||||
return None;
|
||||
}
|
||||
|
||||
mode.and_then(|mode| env_mode_offset_us(&generic_mode_map_name, mode))
|
||||
.or_else(|| env_i64(&generic_scalar_name).filter(|offset| !is_stale_scalar(*offset)))
|
||||
}
|
||||
|
||||
/// Resolve the active calibration profile from explicit and codec env hints.
|
||||
///
|
||||
/// Inputs: process environment. Output: normalized profile string. Why:
|
||||
/// persisted calibration must follow the ingress codec, not just the UVC output
|
||||
/// codec, because HEVC media pays decode/re-encode cost before reaching RCT.
|
||||
pub(super) fn current_profile() -> String {
|
||||
std::env::var("LESAVKA_CALIBRATION_PROFILE")
|
||||
.ok()
|
||||
.and_then(|value| normalize_profile(&value))
|
||||
.or_else(|| {
|
||||
std::env::var("LESAVKA_UPLINK_CAMERA_CODEC")
|
||||
.ok()
|
||||
.and_then(|value| normalize_profile(&value))
|
||||
})
|
||||
.or_else(|| {
|
||||
std::env::var("LESAVKA_CAM_CODEC")
|
||||
.ok()
|
||||
.and_then(|value| normalize_profile(&value))
|
||||
})
|
||||
.unwrap_or_else(|| MJPEG_PROFILE.to_string())
|
||||
}
|
||||
|
||||
/// Normalize user-facing codec spellings into calibration profile names.
|
||||
///
|
||||
/// Inputs: a codec/profile string. Output: `Some(profile)` for known camera
|
||||
/// ingress profiles. Why: operators and install scripts use `h265`, `h.265`,
|
||||
/// `mjpg`, and `jpeg` spellings interchangeably, but the stored calibration
|
||||
/// profile must remain stable.
|
||||
fn normalize_profile(value: &str) -> Option<String> {
|
||||
match value.trim().to_ascii_lowercase().as_str() {
|
||||
"hevc" | "h265" | "h.265" => Some(HEVC_PROFILE.to_string()),
|
||||
"mjpeg" | "mjpg" | "jpeg" => Some(MJPEG_PROFILE.to_string()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the factory per-mode audio offset map for a calibration profile.
|
||||
///
|
||||
/// Inputs: normalized profile. Output: comma-separated mode map. Why: keeping
|
||||
/// audio maps profile-aware lets future decode paths diverge without changing
|
||||
/// the persisted calibration schema again.
|
||||
pub(super) fn factory_audio_mode_offsets_us(profile: &str) -> &'static str {
|
||||
match profile {
|
||||
HEVC_PROFILE => FACTORY_HEVC_AUDIO_MODE_OFFSETS_US,
|
||||
_ => FACTORY_MJPEG_AUDIO_MODE_OFFSETS_US,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the factory per-mode video offset map for a calibration profile.
|
||||
///
|
||||
/// Inputs: normalized profile. Output: comma-separated mode map. Why: video is
|
||||
/// where HEVC decode and MJPEG re-emission can shift sync, so each ingress
|
||||
/// profile needs its own baked server-to-RCT center points.
|
||||
pub(super) fn factory_video_mode_offsets_us(profile: &str) -> &'static str {
|
||||
match profile {
|
||||
HEVC_PROFILE => FACTORY_HEVC_VIDEO_MODE_OFFSETS_US,
|
||||
_ => FACTORY_MJPEG_VIDEO_MODE_OFFSETS_US,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the scalar factory audio fallback for a profile.
|
||||
///
|
||||
/// Inputs: normalized profile. Output: audio offset in microseconds. Why:
|
||||
/// callers still need a safe default when no UVC mode is visible during early
|
||||
/// process startup.
|
||||
pub(super) fn factory_audio_scalar_offset_us(profile: &str) -> i64 {
|
||||
match profile {
|
||||
HEVC_PROFILE => FACTORY_HEVC_AUDIO_OFFSET_US,
|
||||
_ => FACTORY_MJPEG_AUDIO_OFFSET_US,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the scalar factory video fallback for a profile.
|
||||
///
|
||||
/// Inputs: normalized profile. Output: video offset in microseconds. Why:
|
||||
/// profile-specific scalar defaults keep unknown-mode startup aligned with the
|
||||
/// most common calibrated profile instead of silently borrowing stale MJPEG
|
||||
/// values for HEVC.
|
||||
pub(super) fn factory_video_scalar_offset_us(profile: &str) -> i64 {
|
||||
match profile {
|
||||
HEVC_PROFILE => FACTORY_HEVC_VIDEO_OFFSET_US,
|
||||
_ => FACTORY_MJPEG_VIDEO_OFFSET_US,
|
||||
}
|
||||
}
|
||||
@ -32,6 +32,25 @@ fn with_clean_offset_env(test: impl FnOnce()) {
|
||||
("LESAVKA_CAM_WIDTH", None::<&str>),
|
||||
("LESAVKA_CAM_HEIGHT", None::<&str>),
|
||||
("LESAVKA_CAM_FPS", None::<&str>),
|
||||
("LESAVKA_CAM_CODEC", None::<&str>),
|
||||
("LESAVKA_UPLINK_CAMERA_CODEC", None::<&str>),
|
||||
("LESAVKA_CALIBRATION_PROFILE", None::<&str>),
|
||||
(
|
||||
"LESAVKA_UPSTREAM_HEVC_AUDIO_PLAYOUT_MODE_OFFSETS_US",
|
||||
None::<&str>,
|
||||
),
|
||||
(
|
||||
"LESAVKA_UPSTREAM_HEVC_VIDEO_PLAYOUT_MODE_OFFSETS_US",
|
||||
None::<&str>,
|
||||
),
|
||||
(
|
||||
"LESAVKA_UPSTREAM_HEVC_AUDIO_PLAYOUT_OFFSET_US",
|
||||
None::<&str>,
|
||||
),
|
||||
(
|
||||
"LESAVKA_UPSTREAM_HEVC_VIDEO_PLAYOUT_OFFSET_US",
|
||||
None::<&str>,
|
||||
),
|
||||
],
|
||||
test,
|
||||
);
|
||||
@ -45,10 +64,18 @@ fn blessed_server_to_rct_offsets_are_release_defaults() {
|
||||
FACTORY_MJPEG_VIDEO_OFFSET_US, FACTORY_MJPEG_VIDEO_OFFSET_1280X720_30_US,
|
||||
"720p30 is the blessed default profile until a new lab matrix replaces it"
|
||||
);
|
||||
assert_eq!(
|
||||
FACTORY_HEVC_VIDEO_OFFSET_US, FACTORY_HEVC_VIDEO_OFFSET_1280X720_30_US,
|
||||
"HEVC defaults follow the validated 720p30 decode-to-MJPEG profile"
|
||||
);
|
||||
assert_eq!(FACTORY_MJPEG_VIDEO_OFFSET_1280X720_20_US, 162_659);
|
||||
assert_eq!(FACTORY_MJPEG_VIDEO_OFFSET_1280X720_30_US, 135_090);
|
||||
assert_eq!(FACTORY_MJPEG_VIDEO_OFFSET_1920X1080_20_US, 160_045);
|
||||
assert_eq!(FACTORY_MJPEG_VIDEO_OFFSET_1920X1080_30_US, 127_952);
|
||||
assert_eq!(FACTORY_HEVC_VIDEO_OFFSET_1280X720_20_US, 173_852);
|
||||
assert_eq!(FACTORY_HEVC_VIDEO_OFFSET_1280X720_30_US, 110_000);
|
||||
assert_eq!(FACTORY_HEVC_VIDEO_OFFSET_1920X1080_20_US, 160_045);
|
||||
assert_eq!(FACTORY_HEVC_VIDEO_OFFSET_1920X1080_30_US, 127_952);
|
||||
assert_eq!(
|
||||
FACTORY_MJPEG_AUDIO_MODE_OFFSETS_US,
|
||||
"1280x720@20=0,1280x720@30=0,1920x1080@20=0,1920x1080@30=0"
|
||||
@ -57,6 +84,10 @@ fn blessed_server_to_rct_offsets_are_release_defaults() {
|
||||
FACTORY_MJPEG_VIDEO_MODE_OFFSETS_US,
|
||||
"1280x720@20=162659,1280x720@30=135090,1920x1080@20=160045,1920x1080@30=127952"
|
||||
);
|
||||
assert_eq!(
|
||||
FACTORY_HEVC_VIDEO_MODE_OFFSETS_US,
|
||||
"1280x720@20=173852,1280x720@30=110000,1920x1080@20=160045,1920x1080@30=127952"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -83,16 +114,150 @@ fn every_supported_uvc_mode_loads_tailored_factory_offset() {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uvc_mode_detection_accepts_split_descriptor_fields_and_interval_fps() {
|
||||
with_clean_offset_env(|| {
|
||||
temp_env::with_vars(
|
||||
[
|
||||
("LESAVKA_UVC_WIDTH", Some("1280")),
|
||||
("LESAVKA_UVC_HEIGHT", Some("720")),
|
||||
("LESAVKA_UVC_FPS", Some("30")),
|
||||
("LESAVKA_UVC_INTERVAL", Some("500000")),
|
||||
],
|
||||
|| {
|
||||
assert_eq!(mode_env::current_uvc_mode().as_deref(), Some("1280x720@30"));
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
with_clean_offset_env(|| {
|
||||
temp_env::with_vars(
|
||||
[
|
||||
("LESAVKA_UVC_WIDTH", Some("1920")),
|
||||
("LESAVKA_UVC_HEIGHT", Some("1080")),
|
||||
("LESAVKA_UVC_INTERVAL", Some("500000")),
|
||||
],
|
||||
|| {
|
||||
assert_eq!(
|
||||
mode_env::current_uvc_mode().as_deref(),
|
||||
Some("1920x1080@20")
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uvc_mode_detection_falls_back_to_camera_fields_after_invalid_modes() {
|
||||
with_clean_offset_env(|| {
|
||||
temp_env::with_vars(
|
||||
[
|
||||
("UVC_MODE", Some("not-a-mode")),
|
||||
("LESAVKA_UVC_MODE", Some("1280x720")),
|
||||
("LESAVKA_CAM_WIDTH", Some("1280")),
|
||||
("LESAVKA_CAM_HEIGHT", Some("720")),
|
||||
("LESAVKA_CAM_FPS", Some("20")),
|
||||
],
|
||||
|| {
|
||||
assert_eq!(mode_env::current_uvc_mode().as_deref(), Some("1280x720@20"));
|
||||
},
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_snapshot_uses_factory_mjpeg_calibration() {
|
||||
with_clean_offset_env(|| {
|
||||
let state = snapshot_from_env();
|
||||
assert_eq!(state.profile, "mjpeg");
|
||||
assert_eq!(state.default_audio_offset_us, 0);
|
||||
assert_eq!(state.active_video_offset_us, FACTORY_MJPEG_VIDEO_OFFSET_US);
|
||||
assert_eq!(state.source, "factory");
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hevc_profile_uses_hevc_factory_calibration_without_changing_mjpeg_defaults() {
|
||||
with_clean_offset_env(|| {
|
||||
temp_env::with_vars(
|
||||
[
|
||||
("LESAVKA_CAM_CODEC", Some("hevc")),
|
||||
("LESAVKA_UVC_WIDTH", Some("1280")),
|
||||
("LESAVKA_UVC_HEIGHT", Some("720")),
|
||||
("LESAVKA_UVC_FPS", Some("20")),
|
||||
],
|
||||
|| {
|
||||
let state = snapshot_from_env();
|
||||
assert_eq!(state.profile, "hevc");
|
||||
assert_eq!(
|
||||
state.factory_video_offset_us,
|
||||
FACTORY_HEVC_VIDEO_OFFSET_1280X720_20_US
|
||||
);
|
||||
assert_eq!(
|
||||
state.default_video_offset_us,
|
||||
FACTORY_HEVC_VIDEO_OFFSET_1280X720_20_US
|
||||
);
|
||||
assert_eq!(FACTORY_MJPEG_VIDEO_OFFSET_1280X720_20_US, 162_659);
|
||||
},
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hevc_profile_specific_env_map_overrides_generic_map() {
|
||||
with_clean_offset_env(|| {
|
||||
temp_env::with_vars(
|
||||
[
|
||||
("LESAVKA_UPLINK_CAMERA_CODEC", Some("h265")),
|
||||
("LESAVKA_UVC_WIDTH", Some("1280")),
|
||||
("LESAVKA_UVC_HEIGHT", Some("720")),
|
||||
("LESAVKA_UVC_FPS", Some("30")),
|
||||
(
|
||||
"LESAVKA_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US",
|
||||
Some("1280x720@30=111111"),
|
||||
),
|
||||
(
|
||||
"LESAVKA_UPSTREAM_HEVC_VIDEO_PLAYOUT_MODE_OFFSETS_US",
|
||||
Some("1280x720@30=222222"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let state = snapshot_from_env();
|
||||
assert_eq!(state.profile, "hevc");
|
||||
assert_eq!(state.default_video_offset_us, 222_222);
|
||||
assert_eq!(state.source, "env");
|
||||
},
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hevc_profile_ignores_legacy_generic_mjpeg_map() {
|
||||
with_clean_offset_env(|| {
|
||||
temp_env::with_vars(
|
||||
[
|
||||
("LESAVKA_UPLINK_CAMERA_CODEC", Some("hevc")),
|
||||
("LESAVKA_UVC_WIDTH", Some("1280")),
|
||||
("LESAVKA_UVC_HEIGHT", Some("720")),
|
||||
("LESAVKA_UVC_FPS", Some("30")),
|
||||
(
|
||||
"LESAVKA_UPSTREAM_VIDEO_PLAYOUT_MODE_OFFSETS_US",
|
||||
Some("1280x720@30=111111"),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
let state = snapshot_from_env();
|
||||
assert_eq!(state.profile, "hevc");
|
||||
assert_eq!(
|
||||
state.default_video_offset_us,
|
||||
FACTORY_HEVC_VIDEO_OFFSET_1280X720_30_US
|
||||
);
|
||||
assert_eq!(state.source, "factory");
|
||||
},
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `default_snapshot_uses_uvc_mode_factory_calibration` explicit because it sits on calibration state, where persisted and factory offsets must stay auditable.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
@ -628,6 +793,63 @@ fn store_applies_all_calibration_actions_and_persists_defaults() {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn restore_factory_rebuilds_current_profile_and_mode() {
|
||||
let file = NamedTempFile::new().expect("temp calibration file");
|
||||
std::fs::write(
|
||||
file.path(),
|
||||
r#"
|
||||
profile="mjpeg"
|
||||
default_audio_offset_us=0
|
||||
default_video_offset_us=1090000
|
||||
active_audio_offset_us=0
|
||||
active_video_offset_us=157789
|
||||
source="manual"
|
||||
confidence="manual"
|
||||
detail="old manual calibration"
|
||||
"#,
|
||||
)
|
||||
.expect("manual calibration seed");
|
||||
let path = file.path().to_string_lossy().to_string();
|
||||
with_clean_offset_env(|| {
|
||||
temp_env::with_vars(
|
||||
[
|
||||
("LESAVKA_CALIBRATION_PATH", Some(path.as_str())),
|
||||
("LESAVKA_UPLINK_CAMERA_CODEC", Some("hevc")),
|
||||
("LESAVKA_UVC_WIDTH", Some("1280")),
|
||||
("LESAVKA_UVC_HEIGHT", Some("720")),
|
||||
("LESAVKA_UVC_FPS", Some("20")),
|
||||
],
|
||||
|| {
|
||||
let runtime = Arc::new(UpstreamMediaRuntime::new());
|
||||
let store = CalibrationStore::load(runtime.clone());
|
||||
assert_eq!(store.current().active_video_offset_us, 157_789);
|
||||
|
||||
let factory = store
|
||||
.apply(CalibrationRequest {
|
||||
action: CalibrationAction::RestoreFactory as i32,
|
||||
..CalibrationRequest::default()
|
||||
})
|
||||
.expect("factory restore");
|
||||
assert_eq!(factory.profile, "hevc");
|
||||
assert_eq!(
|
||||
factory.active_video_offset_us,
|
||||
FACTORY_HEVC_VIDEO_OFFSET_1280X720_20_US
|
||||
);
|
||||
assert_eq!(
|
||||
factory.default_video_offset_us,
|
||||
FACTORY_HEVC_VIDEO_OFFSET_1280X720_20_US
|
||||
);
|
||||
assert_eq!(factory.source, "factory");
|
||||
assert_eq!(
|
||||
runtime.playout_offsets(),
|
||||
(FACTORY_HEVC_VIDEO_OFFSET_1280X720_20_US, 0)
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `transient_blind_estimate_updates_runtime_without_persisting_active_file_state` explicit because it sits on calibration state, where persisted and factory offsets must stay auditable.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
@ -661,3 +883,24 @@ fn transient_blind_estimate_updates_runtime_without_persisting_active_file_state
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transient_blind_estimate_without_note_records_telemetry_reason() {
|
||||
let dir = tempfile::tempdir().expect("calibration dir");
|
||||
let path = dir.path().join("calibration.toml");
|
||||
let path_string = path.to_string_lossy().to_string();
|
||||
temp_env::with_var(
|
||||
"LESAVKA_CALIBRATION_PATH",
|
||||
Some(path_string.as_str()),
|
||||
|| {
|
||||
let runtime = Arc::new(UpstreamMediaRuntime::new());
|
||||
let store = CalibrationStore::load(runtime);
|
||||
|
||||
let state = store.apply_transient_blind_estimate(1_000, 2_000, 12.5, -3.0, "");
|
||||
|
||||
assert_eq!(state.source, "blind");
|
||||
assert!(state.detail.contains("delivery skew 12.5ms"));
|
||||
assert!(state.detail.contains("enqueue skew -3.0ms"));
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
@ -27,6 +27,7 @@ impl CameraOutput {
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum CameraCodec {
|
||||
H264,
|
||||
Hevc,
|
||||
Mjpeg,
|
||||
}
|
||||
|
||||
@ -35,6 +36,7 @@ impl CameraCodec {
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
CameraCodec::H264 => "h264",
|
||||
CameraCodec::Hevc => "hevc",
|
||||
CameraCodec::Mjpeg => "mjpeg",
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,6 +95,7 @@ fn parse_camera_output(raw: &str) -> Option<CameraOutput> {
|
||||
fn parse_camera_codec(raw: &str) -> Option<CameraCodec> {
|
||||
match raw.trim().to_ascii_lowercase().as_str() {
|
||||
"h264" => Some(CameraCodec::H264),
|
||||
"hevc" | "h265" | "h.265" => Some(CameraCodec::Hevc),
|
||||
"mjpeg" | "mjpg" | "jpeg" => Some(CameraCodec::Mjpeg),
|
||||
_ => None,
|
||||
}
|
||||
@ -113,14 +114,14 @@ fn select_hdmi_codec(hw_decode: bool) -> CameraCodec {
|
||||
}
|
||||
|
||||
fn select_uvc_codec(uvc_env: Option<&HashMap<String, String>>) -> CameraCodec {
|
||||
std::env::var("LESAVKA_UVC_CODEC")
|
||||
std::env::var("LESAVKA_UPLINK_CAMERA_CODEC")
|
||||
.ok()
|
||||
.or_else(|| std::env::var("LESAVKA_CAM_CODEC").ok())
|
||||
.or_else(|| uvc_env.and_then(|env| env.get("LESAVKA_UVC_CODEC").cloned()))
|
||||
.or_else(|| uvc_env.and_then(|env| env.get("LESAVKA_UPLINK_CAMERA_CODEC").cloned()))
|
||||
.or_else(|| uvc_env.and_then(|env| env.get("LESAVKA_CAM_CODEC").cloned()))
|
||||
.as_deref()
|
||||
.and_then(parse_camera_codec)
|
||||
.unwrap_or(CameraCodec::Mjpeg)
|
||||
.unwrap_or(CameraCodec::Hevc)
|
||||
}
|
||||
|
||||
/// Keeps `select_hdmi_config` explicit because it sits on camera selection, where negotiated profiles must match the server output contract.
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
// server/src/lib.rs
|
||||
|
||||
#![cfg_attr(coverage, allow(dead_code, unused_imports, unused_variables))]
|
||||
|
||||
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
pub const REVISION: &str = env!("LESAVKA_GIT_SHA");
|
||||
pub const BUILD_ID: &str = REVISION;
|
||||
|
||||
@ -35,7 +35,10 @@ use lesavka_server::{
|
||||
paste, runtime_support,
|
||||
runtime_support::init_tracing,
|
||||
security,
|
||||
upstream_media_runtime::{UpstreamClientTiming, UpstreamMediaKind, UpstreamMediaRuntime},
|
||||
upstream_media_runtime::{
|
||||
PlannedUpstreamPacket, UpstreamClientTiming, UpstreamMediaKind, UpstreamMediaRuntime,
|
||||
UpstreamPlanDecision,
|
||||
},
|
||||
uvc_runtime, video,
|
||||
};
|
||||
|
||||
|
||||
@ -5,25 +5,13 @@ const MEDIA_V2_DEFAULT_MAX_LIVE_AGE_MS: u64 = 1_000;
|
||||
#[cfg(not(coverage))]
|
||||
const MEDIA_V2_MAX_MIXED_CAPTURE_SPAN_US: u64 = 250_000;
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
struct MediaV2Clock {
|
||||
base_capture_pts_us: Option<u64>,
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
impl MediaV2Clock {
|
||||
fn local_pts_us(&mut self, capture_pts_us: u64) -> u64 {
|
||||
let base = *self.base_capture_pts_us.get_or_insert(capture_pts_us);
|
||||
capture_pts_us.saturating_sub(base)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
|
||||
struct MediaV2BundleFacts {
|
||||
has_audio: bool,
|
||||
has_video: bool,
|
||||
capture_start_us: u64,
|
||||
capture_end_us: u64,
|
||||
capture_span_us: u64,
|
||||
max_queue_age_ms: u32,
|
||||
}
|
||||
@ -38,6 +26,18 @@ struct MediaV2HandoffSchedule {
|
||||
relative_video_delay: Duration,
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
struct MediaV2ScheduledAudio {
|
||||
packets: Vec<AudioPacket>,
|
||||
due_at: tokio::time::Instant,
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
struct MediaV2ScheduledVideo {
|
||||
packet: VideoPacket,
|
||||
due_at: tokio::time::Instant,
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Keeps `summarize_media_v2_bundle` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
@ -68,6 +68,8 @@ fn summarize_media_v2_bundle(bundle: &UpstreamMediaBundle) -> Option<MediaV2Bund
|
||||
Some(MediaV2BundleFacts {
|
||||
has_audio,
|
||||
has_video,
|
||||
capture_start_us,
|
||||
capture_end_us,
|
||||
capture_span_us: capture_end_us.saturating_sub(capture_start_us),
|
||||
max_queue_age_ms,
|
||||
})
|
||||
@ -157,6 +159,19 @@ fn media_v2_handoff_schedule(
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn media_v2_frame_step_us(fps: u32) -> u64 {
|
||||
if fps == 0 {
|
||||
return 1;
|
||||
}
|
||||
(1_000_000_u64 / u64::from(fps)).max(1)
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn media_v2_drop_late_plan(plan: &PlannedUpstreamPacket) -> bool {
|
||||
plan.late_by >= media_v2_max_live_age()
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Keeps `sleep_until_media_v2` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
@ -167,58 +182,135 @@ async fn sleep_until_media_v2(due_at: tokio::time::Instant) {
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Keeps `push_media_v2_audio` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn push_media_v2_audio(
|
||||
/// Rebase audio packet PTS before scheduling UAC handoff.
|
||||
///
|
||||
/// Inputs: bundle audio packets and the shared v2 media clock.
|
||||
/// Outputs: audio packets with server-local PTS.
|
||||
/// Why: ingress must keep draining gRPC while a separate handoff worker sleeps,
|
||||
/// so clock rebasing has to happen before packets leave the receive loop.
|
||||
fn prepare_media_v2_audio(
|
||||
audio_packets: &mut Vec<AudioPacket>,
|
||||
clock: &mut MediaV2Clock,
|
||||
sink: &mut lesavka_server::audio::Voice,
|
||||
upstream_media_rt: &UpstreamMediaRuntime,
|
||||
due_at: tokio::time::Instant,
|
||||
) {
|
||||
sleep_until_media_v2(due_at).await;
|
||||
for mut audio in audio_packets.drain(..) {
|
||||
let capture_pts_us = packet_audio_capture_pts_us(&audio);
|
||||
audio.pts = clock.local_pts_us(capture_pts_us);
|
||||
sink.push(&audio);
|
||||
upstream_media_rt.mark_audio_presented(audio.pts, due_at);
|
||||
bundle_base_remote_pts_us: u64,
|
||||
bundle_epoch: tokio::time::Instant,
|
||||
) -> Option<MediaV2ScheduledAudio> {
|
||||
let mut due_at: Option<tokio::time::Instant> = None;
|
||||
let packets: Vec<AudioPacket> = audio_packets
|
||||
.drain(..)
|
||||
.filter_map(|mut audio| {
|
||||
let capture_pts_us = packet_audio_capture_pts_us(&audio);
|
||||
match upstream_media_rt.plan_bundled_pts(
|
||||
UpstreamMediaKind::Microphone,
|
||||
capture_pts_us,
|
||||
1,
|
||||
bundle_base_remote_pts_us,
|
||||
bundle_epoch,
|
||||
) {
|
||||
UpstreamPlanDecision::Play(plan) if !media_v2_drop_late_plan(&plan) => {
|
||||
due_at = Some(due_at.map_or(plan.due_at, |current| current.min(plan.due_at)));
|
||||
audio.pts = plan.local_pts_us;
|
||||
Some(audio)
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Some(MediaV2ScheduledAudio {
|
||||
packets,
|
||||
due_at: due_at?,
|
||||
})
|
||||
.filter(|scheduled| !scheduled.packets.is_empty())
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Rebase one video packet before scheduling UVC handoff.
|
||||
///
|
||||
/// Inputs: optional bundle video packet and the shared v2 media clock.
|
||||
/// Outputs: video packet with server-local PTS.
|
||||
/// Why: the receive loop should never wait for presentation timing; it only
|
||||
/// prepares media for the handoff worker.
|
||||
fn prepare_media_v2_video(
|
||||
video: Option<VideoPacket>,
|
||||
upstream_media_rt: &UpstreamMediaRuntime,
|
||||
bundle_base_remote_pts_us: u64,
|
||||
bundle_epoch: tokio::time::Instant,
|
||||
frame_step_us: u64,
|
||||
) -> Option<MediaV2ScheduledVideo> {
|
||||
let mut video = video?;
|
||||
let capture_pts_us = packet_video_capture_pts_us(&video);
|
||||
match upstream_media_rt.plan_bundled_pts(
|
||||
UpstreamMediaKind::Camera,
|
||||
capture_pts_us,
|
||||
frame_step_us,
|
||||
bundle_base_remote_pts_us,
|
||||
bundle_epoch,
|
||||
) {
|
||||
UpstreamPlanDecision::Play(plan) if !media_v2_drop_late_plan(&plan) => {
|
||||
video.pts = plan.local_pts_us;
|
||||
Some(MediaV2ScheduledVideo {
|
||||
packet: video,
|
||||
due_at: plan.due_at,
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
/// Keeps `feed_media_v2_video` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn feed_media_v2_video(
|
||||
video: Option<VideoPacket>,
|
||||
clock: &mut MediaV2Clock,
|
||||
relay: &Arc<lesavka_server::video::CameraRelay>,
|
||||
upstream_media_rt: &UpstreamMediaRuntime,
|
||||
due_at: tokio::time::Instant,
|
||||
video_presented_once: &mut bool,
|
||||
/// Push scheduled audio packets to UAC without blocking gRPC ingress.
|
||||
///
|
||||
/// Inputs: scheduled audio queue, UAC sink, and upstream timing runtime.
|
||||
/// Outputs: sink writes and presentation telemetry.
|
||||
/// Why: sleeping in the receive loop created HTTP/2 backlog, so handoff timing
|
||||
/// belongs in a worker that can lag or catch up independently.
|
||||
async fn run_media_v2_audio_handoff(
|
||||
mut rx: tokio::sync::mpsc::Receiver<MediaV2ScheduledAudio>,
|
||||
mut sink: lesavka_server::audio::Voice,
|
||||
upstream_media_rt: Arc<UpstreamMediaRuntime>,
|
||||
) {
|
||||
while let Some(item) = rx.recv().await {
|
||||
sleep_until_media_v2(item.due_at).await;
|
||||
for audio in item.packets {
|
||||
let pts = audio.pts;
|
||||
sink.push(&audio);
|
||||
upstream_media_rt.mark_audio_presented(pts, item.due_at);
|
||||
}
|
||||
}
|
||||
sink.finish();
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Feed scheduled video packets to UVC without blocking gRPC ingress.
|
||||
///
|
||||
/// Inputs: scheduled video queue, camera relay, timing runtime, and log IDs.
|
||||
/// Outputs: relay feeds and presentation telemetry.
|
||||
/// Why: UVC sync offsets still require sleeping, but that sleep must not slow
|
||||
/// the network receive loop.
|
||||
async fn run_media_v2_video_handoff(
|
||||
mut rx: tokio::sync::mpsc::Receiver<MediaV2ScheduledVideo>,
|
||||
relay: Arc<lesavka_server::video::CameraRelay>,
|
||||
upstream_media_rt: Arc<UpstreamMediaRuntime>,
|
||||
rpc_id: u64,
|
||||
session_id: u64,
|
||||
camera_session_id: u64,
|
||||
) {
|
||||
let Some(mut video) = video else {
|
||||
return;
|
||||
};
|
||||
sleep_until_media_v2(due_at).await;
|
||||
let capture_pts_us = packet_video_capture_pts_us(&video);
|
||||
video.pts = clock.local_pts_us(capture_pts_us);
|
||||
let presented_pts = video.pts;
|
||||
relay.feed(video);
|
||||
if !*video_presented_once {
|
||||
info!(
|
||||
rpc_id,
|
||||
session_id,
|
||||
camera_session_id,
|
||||
pts = presented_pts,
|
||||
"📦 first v2 bundled video frame fed to camera sink"
|
||||
);
|
||||
*video_presented_once = true;
|
||||
let mut video_presented_once = false;
|
||||
while let Some(item) = rx.recv().await {
|
||||
sleep_until_media_v2(item.due_at).await;
|
||||
let presented_pts = item.packet.pts;
|
||||
relay.feed(item.packet);
|
||||
if !video_presented_once {
|
||||
info!(
|
||||
rpc_id,
|
||||
session_id,
|
||||
camera_session_id,
|
||||
pts = presented_pts,
|
||||
"📦 first v2 bundled video frame fed to camera sink"
|
||||
);
|
||||
video_presented_once = true;
|
||||
}
|
||||
upstream_media_rt.mark_video_presented(presented_pts, item.due_at);
|
||||
}
|
||||
upstream_media_rt.mark_video_presented(presented_pts, due_at);
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
|
||||
@ -41,7 +41,7 @@ impl Handler {
|
||||
));
|
||||
};
|
||||
let uac_dev = std::env::var("LESAVKA_UAC_DEV").unwrap_or_else(|_| "hw:UAC2Gadget,0".into());
|
||||
let mut sink = runtime_support::open_voice_with_retry(&uac_dev)
|
||||
let sink = runtime_support::open_voice_with_retry(&uac_dev)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
self.upstream_media_rt.close_camera(camera_lease.generation);
|
||||
@ -55,11 +55,26 @@ impl Handler {
|
||||
tokio::spawn(async move {
|
||||
let _microphone_sink_permit = microphone_sink_permit;
|
||||
let mut inbound = req.into_inner();
|
||||
let mut clock = MediaV2Clock::default();
|
||||
let mut last_bundle_session_id = None;
|
||||
let mut last_bundle_seq = None;
|
||||
let mut video_presented_once = false;
|
||||
let mut outcome = "aborted";
|
||||
let (audio_handoff_tx, audio_handoff_rx) =
|
||||
tokio::sync::mpsc::channel::<MediaV2ScheduledAudio>(32);
|
||||
let (video_handoff_tx, video_handoff_rx) =
|
||||
tokio::sync::mpsc::channel::<MediaV2ScheduledVideo>(32);
|
||||
let audio_worker = tokio::spawn(run_media_v2_audio_handoff(
|
||||
audio_handoff_rx,
|
||||
sink,
|
||||
upstream_media_rt.clone(),
|
||||
));
|
||||
let video_worker = tokio::spawn(run_media_v2_video_handoff(
|
||||
video_handoff_rx,
|
||||
relay.clone(),
|
||||
upstream_media_rt.clone(),
|
||||
rpc_id,
|
||||
camera_lease.session_id,
|
||||
camera_session_id,
|
||||
));
|
||||
|
||||
while let Some(bundle_result) = inbound.next().await {
|
||||
let mut bundle = match bundle_result {
|
||||
@ -73,6 +88,7 @@ impl Handler {
|
||||
break;
|
||||
}
|
||||
};
|
||||
let bundle_arrived_at = tokio::time::Instant::now();
|
||||
if !camera_rt.is_active(camera_session_id)
|
||||
|| !upstream_media_rt.is_camera_active(camera_lease.generation)
|
||||
|| !upstream_media_rt.is_microphone_active(microphone_lease.generation)
|
||||
@ -87,7 +103,6 @@ impl Handler {
|
||||
next_session_id = bundle.session_id,
|
||||
"📦 v2 bundled client session changed; resetting local media clock"
|
||||
);
|
||||
clock = MediaV2Clock::default();
|
||||
last_bundle_seq = None;
|
||||
}
|
||||
last_bundle_session_id = Some(bundle.session_id);
|
||||
@ -160,82 +175,68 @@ impl Handler {
|
||||
video_offset_us,
|
||||
"📦 v2 scheduled bundled UAC/UVC handoff from one capture clock"
|
||||
);
|
||||
let bundle_epoch = bundle_arrived_at + schedule.common_delay;
|
||||
let bundle_base_remote_pts_us = facts.capture_start_us;
|
||||
let frame_step_us = media_v2_frame_step_us(camera_cfg.fps);
|
||||
|
||||
match (schedule.audio_due_at, schedule.video_due_at) {
|
||||
(Some(audio_due_at), Some(video_due_at)) if audio_due_at <= video_due_at => {
|
||||
push_media_v2_audio(
|
||||
&mut bundle.audio,
|
||||
&mut clock,
|
||||
&mut sink,
|
||||
&upstream_media_rt,
|
||||
audio_due_at,
|
||||
)
|
||||
.await;
|
||||
feed_media_v2_video(
|
||||
bundle.video.take(),
|
||||
&mut clock,
|
||||
&relay,
|
||||
&upstream_media_rt,
|
||||
video_due_at,
|
||||
&mut video_presented_once,
|
||||
rpc_id,
|
||||
camera_lease.session_id,
|
||||
camera_session_id,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
(Some(audio_due_at), Some(video_due_at)) => {
|
||||
feed_media_v2_video(
|
||||
bundle.video.take(),
|
||||
&mut clock,
|
||||
&relay,
|
||||
&upstream_media_rt,
|
||||
video_due_at,
|
||||
&mut video_presented_once,
|
||||
rpc_id,
|
||||
camera_lease.session_id,
|
||||
camera_session_id,
|
||||
)
|
||||
.await;
|
||||
push_media_v2_audio(
|
||||
&mut bundle.audio,
|
||||
&mut clock,
|
||||
&mut sink,
|
||||
&upstream_media_rt,
|
||||
audio_due_at,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
(Some(audio_due_at), None) => {
|
||||
push_media_v2_audio(
|
||||
&mut bundle.audio,
|
||||
&mut clock,
|
||||
&mut sink,
|
||||
&upstream_media_rt,
|
||||
audio_due_at,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
(None, Some(video_due_at)) => {
|
||||
feed_media_v2_video(
|
||||
bundle.video.take(),
|
||||
&mut clock,
|
||||
&relay,
|
||||
&upstream_media_rt,
|
||||
video_due_at,
|
||||
&mut video_presented_once,
|
||||
rpc_id,
|
||||
camera_lease.session_id,
|
||||
camera_session_id,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
(None, None) => {}
|
||||
if schedule.audio_due_at.is_some()
|
||||
&& let Some(scheduled_audio) = prepare_media_v2_audio(
|
||||
&mut bundle.audio,
|
||||
&upstream_media_rt,
|
||||
bundle_base_remote_pts_us,
|
||||
bundle_epoch,
|
||||
)
|
||||
&& audio_handoff_tx
|
||||
.send(scheduled_audio)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
warn!(
|
||||
rpc_id,
|
||||
session_id = camera_lease.session_id,
|
||||
"📦 v2 audio handoff worker stopped while receiving bundled media"
|
||||
);
|
||||
break;
|
||||
}
|
||||
if schedule.video_due_at.is_some()
|
||||
&& let Some(scheduled_video) = prepare_media_v2_video(
|
||||
bundle.video.take(),
|
||||
&upstream_media_rt,
|
||||
bundle_base_remote_pts_us,
|
||||
bundle_epoch,
|
||||
frame_step_us,
|
||||
)
|
||||
&& video_handoff_tx
|
||||
.send(scheduled_video)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
warn!(
|
||||
rpc_id,
|
||||
session_id = camera_lease.session_id,
|
||||
"📦 v2 video handoff worker stopped while receiving bundled media"
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
outcome = if outcome == "aborted" { "closed" } else { outcome };
|
||||
sink.finish();
|
||||
drop(audio_handoff_tx);
|
||||
drop(video_handoff_tx);
|
||||
if let Err(err) = audio_worker.await {
|
||||
warn!(
|
||||
rpc_id,
|
||||
session_id = camera_lease.session_id,
|
||||
"📦 v2 audio handoff worker join failed: {err}"
|
||||
);
|
||||
}
|
||||
if let Err(err) = video_worker.await {
|
||||
warn!(
|
||||
rpc_id,
|
||||
session_id = camera_lease.session_id,
|
||||
"📦 v2 video handoff worker join failed: {err}"
|
||||
);
|
||||
}
|
||||
upstream_media_rt.close_camera(camera_lease.generation);
|
||||
upstream_media_rt.close_microphone(microphone_lease.generation);
|
||||
info!(
|
||||
|
||||
@ -1,440 +1,5 @@
|
||||
#[cfg(coverage)]
|
||||
fn upstream_stale_drop_budget() -> Duration {
|
||||
let drop_ms = std::env::var("LESAVKA_UPSTREAM_STALE_DROP_MS")
|
||||
.ok()
|
||||
.and_then(|value| value.trim().parse::<u64>().ok())
|
||||
.unwrap_or(80);
|
||||
Duration::from_millis(drop_ms)
|
||||
}
|
||||
include!("relay_service_coverage/freshness_helpers.rs");
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Keeps `retain_freshest_video_packet` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn retain_freshest_video_packet(
|
||||
pending: &mut std::collections::VecDeque<VideoPacket>,
|
||||
) -> usize {
|
||||
if pending.len() <= 1 {
|
||||
return 0;
|
||||
}
|
||||
let newest = pending.pop_back().expect("non-empty pending video queue");
|
||||
let dropped = pending.len();
|
||||
pending.clear();
|
||||
pending.push_back(newest);
|
||||
dropped
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
const AUDIO_PENDING_LIVE_WINDOW_PACKETS: usize = 8;
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Keeps `retain_freshest_audio_packet` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn retain_freshest_audio_packet(
|
||||
pending: &mut std::collections::VecDeque<AudioPacket>,
|
||||
) -> usize {
|
||||
if pending.len() <= AUDIO_PENDING_LIVE_WINDOW_PACKETS {
|
||||
return 0;
|
||||
}
|
||||
let dropped = pending.len() - AUDIO_PENDING_LIVE_WINDOW_PACKETS;
|
||||
pending.drain(..dropped);
|
||||
dropped
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
#[tonic::async_trait]
|
||||
impl Relay for Handler {
|
||||
type StreamKeyboardStream = ReceiverStream<Result<KeyboardReport, Status>>;
|
||||
type StreamMouseStream = ReceiverStream<Result<MouseReport, Status>>;
|
||||
type CaptureVideoStream = VideoStream;
|
||||
type CaptureAudioStream = AudioStream;
|
||||
type StreamMicrophoneStream = ReceiverStream<Result<Empty, Status>>;
|
||||
type StreamCameraStream = ReceiverStream<Result<Empty, Status>>;
|
||||
type StreamWebcamMediaStream = ReceiverStream<Result<Empty, Status>>;
|
||||
type RunOutputDelayProbeStream = ReceiverStream<Result<OutputDelayProbeReply, Status>>;
|
||||
|
||||
/// Keeps `stream_keyboard` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn stream_keyboard(
|
||||
&self,
|
||||
req: Request<tonic::Streaming<KeyboardReport>>,
|
||||
) -> Result<Response<Self::StreamKeyboardStream>, Status> {
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(32);
|
||||
let kb = self.kb.clone();
|
||||
let report_delay = live_keyboard_report_delay();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut s = req.into_inner();
|
||||
while let Some(pkt) = s.next().await.transpose()? {
|
||||
let _ = runtime_support::write_hid_report(&kb, &hid_endpoint(0), &pkt.data).await;
|
||||
tx.send(Ok(pkt)).await.ok();
|
||||
if !report_delay.is_zero() {
|
||||
#[cfg(not(coverage))]
|
||||
tokio::time::sleep(report_delay).await;
|
||||
}
|
||||
}
|
||||
Ok::<(), Status>(())
|
||||
});
|
||||
|
||||
Ok(Response::new(ReceiverStream::new(rx)))
|
||||
}
|
||||
|
||||
/// Keeps `stream_mouse` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn stream_mouse(
|
||||
&self,
|
||||
req: Request<tonic::Streaming<MouseReport>>,
|
||||
) -> Result<Response<Self::StreamMouseStream>, Status> {
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(32);
|
||||
let ms = self.ms.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut s = req.into_inner();
|
||||
while let Some(pkt) = s.next().await.transpose()? {
|
||||
let _ = runtime_support::write_hid_report(&ms, &hid_endpoint(1), &pkt.data).await;
|
||||
tx.send(Ok(pkt)).await.ok();
|
||||
}
|
||||
Ok::<(), Status>(())
|
||||
});
|
||||
|
||||
Ok(Response::new(ReceiverStream::new(rx)))
|
||||
}
|
||||
|
||||
/// Keeps `stream_microphone` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn stream_microphone(
|
||||
&self,
|
||||
req: Request<tonic::Streaming<AudioPacket>>,
|
||||
) -> Result<Response<Self::StreamMicrophoneStream>, Status> {
|
||||
let lease = self.upstream_media_rt.activate_microphone();
|
||||
let Some(microphone_sink_permit) = self
|
||||
.upstream_media_rt
|
||||
.reserve_microphone_sink(lease.generation)
|
||||
.await
|
||||
else {
|
||||
return Err(Status::aborted(
|
||||
"microphone stream superseded before sink became available",
|
||||
));
|
||||
};
|
||||
let uac_dev = std::env::var("LESAVKA_UAC_DEV").unwrap_or_else(|_| "hw:UAC2Gadget,0".into());
|
||||
let mut sink = runtime_support::open_voice_with_retry(&uac_dev)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
self.upstream_media_rt.close_microphone(lease.generation);
|
||||
Status::internal(format!("{e:#}"))
|
||||
})?;
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
let upstream_media_rt = self.upstream_media_rt.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _microphone_sink_permit = microphone_sink_permit;
|
||||
let mut inbound = req.into_inner();
|
||||
let mut pending = std::collections::VecDeque::new();
|
||||
let mut inbound_closed = false;
|
||||
let stale_drop_budget = upstream_stale_drop_budget();
|
||||
loop {
|
||||
if !upstream_media_rt.is_microphone_active(lease.generation) {
|
||||
break;
|
||||
}
|
||||
if !inbound_closed {
|
||||
let next_packet = tokio::select! {
|
||||
packet = inbound.next() => Some(packet),
|
||||
_ = tokio::time::sleep(Duration::from_millis(25)) => None,
|
||||
};
|
||||
if let Some(next_packet) = next_packet {
|
||||
match next_packet.transpose()? {
|
||||
Some(pkt) => {
|
||||
pending.push_back(pkt);
|
||||
let _ = retain_freshest_audio_packet(&mut pending);
|
||||
}
|
||||
None => inbound_closed = true,
|
||||
}
|
||||
}
|
||||
}
|
||||
let Some(mut pkt) = pending.pop_front() else {
|
||||
if inbound_closed {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
};
|
||||
let plan = match upstream_media_rt.plan_audio_pts(pkt.pts) {
|
||||
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::AwaitingPair => {
|
||||
if inbound_closed {
|
||||
continue;
|
||||
}
|
||||
pending.push_front(pkt);
|
||||
continue;
|
||||
}
|
||||
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::DropBeforeOverlap => {
|
||||
continue;
|
||||
}
|
||||
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::DropStale(_) => {
|
||||
continue;
|
||||
}
|
||||
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::StartupFailed(_) => {
|
||||
break;
|
||||
}
|
||||
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::Play(plan) => plan,
|
||||
};
|
||||
if plan.late_by > stale_drop_budget {
|
||||
continue;
|
||||
}
|
||||
tokio::time::sleep_until(plan.due_at).await;
|
||||
let actual_late_by = tokio::time::Instant::now()
|
||||
.checked_duration_since(plan.due_at)
|
||||
.unwrap_or_default();
|
||||
if actual_late_by > stale_drop_budget {
|
||||
continue;
|
||||
}
|
||||
pkt.pts = plan.local_pts_us;
|
||||
sink.push(&pkt);
|
||||
upstream_media_rt.mark_audio_presented(pkt.pts, plan.due_at);
|
||||
}
|
||||
sink.finish();
|
||||
upstream_media_rt.close_microphone(lease.generation);
|
||||
let _ = tx.send(Ok(Empty {})).await;
|
||||
Ok::<(), Status>(())
|
||||
});
|
||||
|
||||
Ok(Response::new(ReceiverStream::new(rx)))
|
||||
}
|
||||
|
||||
/// Keeps `stream_webcam_media` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn stream_webcam_media(
|
||||
&self,
|
||||
req: Request<tonic::Streaming<UpstreamMediaBundle>>,
|
||||
) -> Result<Response<Self::StreamWebcamMediaStream>, Status> {
|
||||
let microphone_lease = self.upstream_media_rt.activate_microphone();
|
||||
let camera_lease = self.upstream_media_rt.activate_camera();
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
let upstream_media_rt = self.upstream_media_rt.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut inbound = req.into_inner();
|
||||
while let Some(bundle) = inbound.next().await.transpose()? {
|
||||
if let Some(video) = bundle.video {
|
||||
upstream_media_rt.record_client_timing(
|
||||
UpstreamMediaKind::Camera,
|
||||
video_client_timing(&video),
|
||||
);
|
||||
}
|
||||
for audio in bundle.audio {
|
||||
upstream_media_rt.record_client_timing(
|
||||
UpstreamMediaKind::Microphone,
|
||||
audio_client_timing(&audio),
|
||||
);
|
||||
}
|
||||
}
|
||||
upstream_media_rt.close_camera(camera_lease.generation);
|
||||
upstream_media_rt.close_microphone(microphone_lease.generation);
|
||||
let _ = tx.send(Ok(Empty {})).await;
|
||||
Ok::<(), Status>(())
|
||||
});
|
||||
|
||||
Ok(Response::new(ReceiverStream::new(rx)))
|
||||
}
|
||||
|
||||
/// Keeps `stream_camera` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn stream_camera(
|
||||
&self,
|
||||
req: Request<tonic::Streaming<VideoPacket>>,
|
||||
) -> Result<Response<Self::StreamCameraStream>, Status> {
|
||||
let cfg = camera::current_camera_config();
|
||||
let upstream_lease = self.upstream_media_rt.activate_camera();
|
||||
let (session_id, relay, _relay_reused) = self.camera_rt.activate(&cfg).await?;
|
||||
let camera_rt = self.camera_rt.clone();
|
||||
let upstream_media_rt = self.upstream_media_rt.clone();
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
let frame_step_us = (1_000_000u64 / u64::from(cfg.fps.max(1))).max(1);
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut s = req.into_inner();
|
||||
let mut pending = std::collections::VecDeque::new();
|
||||
let mut inbound_closed = false;
|
||||
let stale_drop_budget = upstream_stale_drop_budget();
|
||||
loop {
|
||||
if !camera_rt.is_active(session_id)
|
||||
|| !upstream_media_rt.is_camera_active(upstream_lease.generation)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if !inbound_closed {
|
||||
let next_packet = tokio::select! {
|
||||
packet = s.next() => Some(packet),
|
||||
_ = tokio::time::sleep(Duration::from_millis(25)) => None,
|
||||
};
|
||||
if let Some(next_packet) = next_packet {
|
||||
match next_packet.transpose()? {
|
||||
Some(pkt) => {
|
||||
pending.push_back(pkt);
|
||||
let _ = retain_freshest_video_packet(&mut pending);
|
||||
}
|
||||
None => inbound_closed = true,
|
||||
}
|
||||
}
|
||||
}
|
||||
let Some(mut pkt) = pending.pop_front() else {
|
||||
if inbound_closed {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
};
|
||||
let plan = match upstream_media_rt.plan_video_pts(pkt.pts, frame_step_us) {
|
||||
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::AwaitingPair => {
|
||||
if inbound_closed {
|
||||
continue;
|
||||
}
|
||||
pending.push_front(pkt);
|
||||
continue;
|
||||
}
|
||||
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::DropBeforeOverlap => {
|
||||
continue;
|
||||
}
|
||||
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::DropStale(_) => {
|
||||
continue;
|
||||
}
|
||||
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::StartupFailed(_) => {
|
||||
break;
|
||||
}
|
||||
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::Play(plan) => plan,
|
||||
};
|
||||
if !upstream_media_rt
|
||||
.wait_for_audio_master(plan.local_pts_us, plan.due_at)
|
||||
.await
|
||||
{
|
||||
upstream_media_rt.record_video_freeze("coverage video froze awaiting audio master");
|
||||
continue;
|
||||
}
|
||||
if plan.late_by > stale_drop_budget {
|
||||
let _ = retain_freshest_video_packet(&mut pending);
|
||||
continue;
|
||||
}
|
||||
tokio::time::sleep_until(plan.due_at).await;
|
||||
pkt.pts = plan.local_pts_us;
|
||||
let presented_pts = pkt.pts;
|
||||
relay.feed(pkt);
|
||||
upstream_media_rt.mark_video_presented(presented_pts, plan.due_at);
|
||||
}
|
||||
upstream_media_rt.close_camera(upstream_lease.generation);
|
||||
tx.send(Ok(Empty {})).await.ok();
|
||||
Ok::<(), Status>(())
|
||||
});
|
||||
|
||||
Ok(Response::new(ReceiverStream::new(rx)))
|
||||
}
|
||||
|
||||
/// Keeps `run_output_delay_probe` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn run_output_delay_probe(
|
||||
&self,
|
||||
req: Request<OutputDelayProbeRequest>,
|
||||
) -> Result<Response<Self::RunOutputDelayProbeStream>, Status> {
|
||||
let cfg = camera::current_camera_config();
|
||||
let (_session_id, relay, _relay_reused) = self.camera_rt.activate(&cfg).await?;
|
||||
let mut sink = lesavka_server::audio::Voice::new("coverage-uac")
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("{e:#}")))?;
|
||||
let summary = lesavka_server::output_delay_probe::run_server_output_delay_probe(
|
||||
relay,
|
||||
&mut sink,
|
||||
&cfg,
|
||||
&req.into_inner(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("{e:#}")))?;
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
let detail = format!(
|
||||
"server-generated UVC/UAC output-delay probe complete: video_frames={} audio_packets={} events={}",
|
||||
summary.video_frames, summary.audio_packets, summary.event_count
|
||||
);
|
||||
tx.send(Ok(OutputDelayProbeReply {
|
||||
ok: true,
|
||||
detail,
|
||||
server_timeline_json: summary.timeline_json,
|
||||
}))
|
||||
.await
|
||||
.ok();
|
||||
Ok(Response::new(ReceiverStream::new(rx)))
|
||||
}
|
||||
|
||||
async fn capture_video(
|
||||
&self,
|
||||
req: Request<MonitorRequest>,
|
||||
) -> Result<Response<Self::CaptureVideoStream>, Status> {
|
||||
self.capture_video_reply(req.into_inner()).await
|
||||
}
|
||||
|
||||
async fn capture_audio(
|
||||
&self,
|
||||
_req: Request<MonitorRequest>,
|
||||
) -> Result<Response<Self::CaptureAudioStream>, Status> {
|
||||
Err(Status::internal(
|
||||
"audio capture unavailable in coverage harness",
|
||||
))
|
||||
}
|
||||
|
||||
async fn paste_text(&self, req: Request<PasteRequest>) -> Result<Response<PasteReply>, Status> {
|
||||
self.paste_text_reply(req).await
|
||||
}
|
||||
|
||||
async fn reset_usb(&self, _req: Request<Empty>) -> Result<Response<ResetUsbReply>, Status> {
|
||||
self.reset_usb_reply().await
|
||||
}
|
||||
|
||||
async fn recover_usb(
|
||||
&self,
|
||||
_req: Request<Empty>,
|
||||
) -> Result<Response<ResetUsbReply>, Status> {
|
||||
self.recover_usb_reply().await
|
||||
}
|
||||
|
||||
async fn recover_uac(
|
||||
&self,
|
||||
_req: Request<Empty>,
|
||||
) -> Result<Response<ResetUsbReply>, Status> {
|
||||
self.recover_uac_reply().await
|
||||
}
|
||||
|
||||
async fn recover_uvc(
|
||||
&self,
|
||||
_req: Request<Empty>,
|
||||
) -> Result<Response<ResetUsbReply>, Status> {
|
||||
self.recover_uvc_reply().await
|
||||
}
|
||||
|
||||
async fn get_capture_power(
|
||||
&self,
|
||||
_req: Request<Empty>,
|
||||
) -> Result<Response<CapturePowerState>, Status> {
|
||||
self.get_capture_power_reply().await
|
||||
}
|
||||
|
||||
async fn set_capture_power(
|
||||
&self,
|
||||
req: Request<SetCapturePowerRequest>,
|
||||
) -> Result<Response<CapturePowerState>, Status> {
|
||||
self.set_capture_power_reply(req).await
|
||||
}
|
||||
|
||||
async fn get_calibration(
|
||||
&self,
|
||||
_req: Request<Empty>,
|
||||
) -> Result<Response<CalibrationState>, Status> {
|
||||
self.get_calibration_reply().await
|
||||
}
|
||||
|
||||
async fn calibrate(
|
||||
&self,
|
||||
req: Request<CalibrationRequest>,
|
||||
) -> Result<Response<CalibrationState>, Status> {
|
||||
self.calibrate_reply(req).await
|
||||
}
|
||||
|
||||
async fn get_upstream_sync(
|
||||
&self,
|
||||
_req: Request<Empty>,
|
||||
) -> Result<Response<UpstreamSyncState>, Status> {
|
||||
self.get_upstream_sync_reply().await
|
||||
}
|
||||
}
|
||||
include!("relay_service_coverage/relay_trait_impl.rs");
|
||||
|
||||
190
server/src/main/relay_service_coverage/freshness_helpers.rs
Normal file
190
server/src/main/relay_service_coverage/freshness_helpers.rs
Normal file
@ -0,0 +1,190 @@
|
||||
#[cfg(coverage)]
|
||||
/// Read the upstream stale-drop budget used by coverage-only relay loops.
|
||||
///
|
||||
/// Inputs: optional `LESAVKA_UPSTREAM_STALE_DROP_MS` environment override.
|
||||
/// Output: a bounded duration used to decide whether synthetic packets remain
|
||||
/// fresh enough to play. The helper exists so tests exercise the same
|
||||
/// freshness-first policy that protects real calls from backlog growth.
|
||||
fn upstream_stale_drop_budget() -> Duration {
|
||||
let drop_ms = std::env::var("LESAVKA_UPSTREAM_STALE_DROP_MS")
|
||||
.ok()
|
||||
.and_then(|value| value.trim().parse::<u64>().ok())
|
||||
.unwrap_or(80);
|
||||
Duration::from_millis(drop_ms)
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Keeps `retain_freshest_video_packet` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn retain_freshest_video_packet(
|
||||
pending: &mut std::collections::VecDeque<VideoPacket>,
|
||||
) -> usize {
|
||||
if pending.len() <= 1 {
|
||||
return 0;
|
||||
}
|
||||
let newest = pending.pop_back().expect("non-empty pending video queue");
|
||||
let dropped = pending.len();
|
||||
pending.clear();
|
||||
pending.push_back(newest);
|
||||
dropped
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
const AUDIO_PENDING_LIVE_WINDOW_PACKETS: usize = 8;
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Keeps `retain_freshest_audio_packet` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
fn retain_freshest_audio_packet(
|
||||
pending: &mut std::collections::VecDeque<AudioPacket>,
|
||||
) -> usize {
|
||||
if pending.len() <= AUDIO_PENDING_LIVE_WINDOW_PACKETS {
|
||||
return 0;
|
||||
}
|
||||
let dropped = pending.len() - AUDIO_PENDING_LIVE_WINDOW_PACKETS;
|
||||
pending.drain(..dropped);
|
||||
dropped
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Return a playable upstream plan while collapsing all wait/drop decisions.
|
||||
///
|
||||
/// Inputs: planner decision from `UpstreamMediaRuntime`. Output: `Some(plan)`
|
||||
/// only when playback should proceed. This keeps coverage tests focused on
|
||||
/// the relay branch contract rather than duplicating planner internals.
|
||||
fn coverage_playable_plan(
|
||||
decision: lesavka_server::upstream_media_runtime::UpstreamPlanDecision,
|
||||
) -> Option<lesavka_server::upstream_media_runtime::PlannedUpstreamPacket> {
|
||||
match decision {
|
||||
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::Play(plan) => Some(plan),
|
||||
lesavka_server::upstream_media_runtime::UpstreamPlanDecision::AwaitingPair
|
||||
| lesavka_server::upstream_media_runtime::UpstreamPlanDecision::DropBeforeOverlap
|
||||
| lesavka_server::upstream_media_runtime::UpstreamPlanDecision::DropStale(_)
|
||||
| lesavka_server::upstream_media_runtime::UpstreamPlanDecision::StartupFailed(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Requeue audio when the planner needs more paired media before playback.
|
||||
///
|
||||
/// Inputs: pending audio queue, packet, and inbound stream state. Output:
|
||||
/// whether the packet was requeued. This mirrors the live relay behavior where
|
||||
/// audio may need to wait for video overlap but must not resurrect closed input.
|
||||
fn coverage_requeue_audio_packet(
|
||||
pending: &mut std::collections::VecDeque<AudioPacket>,
|
||||
pkt: AudioPacket,
|
||||
inbound_closed: bool,
|
||||
) -> bool {
|
||||
if inbound_closed {
|
||||
return false;
|
||||
}
|
||||
pending.push_front(pkt);
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Requeue video when the planner needs more paired media before playback.
|
||||
///
|
||||
/// Inputs: pending video queue, packet, and inbound stream state. Output:
|
||||
/// whether the packet was requeued. This keeps the video path freshness-first
|
||||
/// while still allowing a short wait for the audio timing master.
|
||||
fn coverage_requeue_video_packet(
|
||||
pending: &mut std::collections::VecDeque<VideoPacket>,
|
||||
pkt: VideoPacket,
|
||||
inbound_closed: bool,
|
||||
) -> bool {
|
||||
if inbound_closed {
|
||||
return false;
|
||||
}
|
||||
pending.push_front(pkt);
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Record a coverage-only video freeze when audio never becomes the timing master.
|
||||
///
|
||||
/// Inputs: upstream media runtime being observed. Output: runtime telemetry
|
||||
/// side effect. This documents the otherwise silent branch where video is
|
||||
/// intentionally dropped instead of playing out of sync.
|
||||
fn coverage_record_video_wait_failure(
|
||||
upstream_media_rt: &lesavka_server::upstream_media_runtime::UpstreamMediaRuntime,
|
||||
) {
|
||||
upstream_media_rt.record_video_freeze("coverage video froze awaiting audio master");
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Drop stale queued video down to the newest packet.
|
||||
///
|
||||
/// Inputs: pending video queue. Output: number of dropped packets. The relay
|
||||
/// uses this freshness-first escape hatch so a stalled client cannot create a
|
||||
/// growing RCT video delay.
|
||||
fn coverage_drop_late_video_packet(
|
||||
pending: &mut std::collections::VecDeque<VideoPacket>,
|
||||
) -> usize {
|
||||
retain_freshest_video_packet(pending)
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Convert an audio planner decision into a playable plan or queue mutation.
|
||||
///
|
||||
/// Inputs: planner decision, pending queue, current packet, inbound state, and
|
||||
/// stale budget. Output: playable plan when fresh enough. This keeps the audio
|
||||
/// test harness faithful to the live decision order without duplicating a full
|
||||
/// streaming RPC in every branch test.
|
||||
fn coverage_audio_plan_from_decision(
|
||||
decision: lesavka_server::upstream_media_runtime::UpstreamPlanDecision,
|
||||
pending: &mut std::collections::VecDeque<AudioPacket>,
|
||||
pkt: AudioPacket,
|
||||
inbound_closed: bool,
|
||||
stale_drop_budget: Duration,
|
||||
) -> Option<lesavka_server::upstream_media_runtime::PlannedUpstreamPacket> {
|
||||
let Some(plan) = coverage_playable_plan(decision) else {
|
||||
coverage_requeue_audio_packet(pending, pkt, inbound_closed);
|
||||
return None;
|
||||
};
|
||||
if plan.late_by > stale_drop_budget {
|
||||
return None;
|
||||
}
|
||||
Some(plan)
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Convert a video planner decision into a playable plan or freshness drop.
|
||||
///
|
||||
/// Inputs: planner decision, pending queue, current packet, inbound state, and
|
||||
/// stale budget. Output: playable plan when fresh enough. This protects the
|
||||
/// branch where video must sacrifice smoothness rather than accumulate delay.
|
||||
fn coverage_video_plan_from_decision(
|
||||
decision: lesavka_server::upstream_media_runtime::UpstreamPlanDecision,
|
||||
pending: &mut std::collections::VecDeque<VideoPacket>,
|
||||
pkt: VideoPacket,
|
||||
inbound_closed: bool,
|
||||
stale_drop_budget: Duration,
|
||||
) -> Option<lesavka_server::upstream_media_runtime::PlannedUpstreamPacket> {
|
||||
let Some(plan) = coverage_playable_plan(decision) else {
|
||||
coverage_requeue_video_packet(pending, pkt, inbound_closed);
|
||||
return None;
|
||||
};
|
||||
if plan.late_by > stale_drop_budget {
|
||||
let _ = coverage_drop_late_video_packet(pending);
|
||||
return None;
|
||||
}
|
||||
Some(plan)
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
/// Check whether the audio master is ready before video can be presented.
|
||||
///
|
||||
/// Inputs: upstream runtime and readiness result from the waiter. Output: true
|
||||
/// when video may play. A false result records telemetry because this branch is
|
||||
/// a real call-quality signal, not just a skipped packet.
|
||||
fn coverage_audio_master_ready(
|
||||
upstream_media_rt: &lesavka_server::upstream_media_runtime::UpstreamMediaRuntime,
|
||||
ready: bool,
|
||||
) -> bool {
|
||||
if ready {
|
||||
return true;
|
||||
}
|
||||
coverage_record_video_wait_failure(upstream_media_rt);
|
||||
false
|
||||
}
|
||||
372
server/src/main/relay_service_coverage/relay_trait_impl.rs
Normal file
372
server/src/main/relay_service_coverage/relay_trait_impl.rs
Normal file
@ -0,0 +1,372 @@
|
||||
#[cfg(coverage)]
|
||||
#[tonic::async_trait]
|
||||
impl Relay for Handler {
|
||||
type StreamKeyboardStream = ReceiverStream<Result<KeyboardReport, Status>>;
|
||||
type StreamMouseStream = ReceiverStream<Result<MouseReport, Status>>;
|
||||
type CaptureVideoStream = VideoStream;
|
||||
type CaptureAudioStream = AudioStream;
|
||||
type StreamMicrophoneStream = ReceiverStream<Result<Empty, Status>>;
|
||||
type StreamCameraStream = ReceiverStream<Result<Empty, Status>>;
|
||||
type StreamWebcamMediaStream = ReceiverStream<Result<Empty, Status>>;
|
||||
type RunOutputDelayProbeStream = ReceiverStream<Result<OutputDelayProbeReply, Status>>;
|
||||
|
||||
/// Keeps `stream_keyboard` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn stream_keyboard(
|
||||
&self,
|
||||
req: Request<tonic::Streaming<KeyboardReport>>,
|
||||
) -> Result<Response<Self::StreamKeyboardStream>, Status> {
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(32);
|
||||
let kb = self.kb.clone();
|
||||
let report_delay = live_keyboard_report_delay();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut s = req.into_inner();
|
||||
while let Some(pkt) = s.next().await.transpose()? {
|
||||
let _ = runtime_support::write_hid_report(&kb, &hid_endpoint(0), &pkt.data).await;
|
||||
tx.send(Ok(pkt)).await.ok();
|
||||
if !report_delay.is_zero() {
|
||||
#[cfg(not(coverage))]
|
||||
tokio::time::sleep(report_delay).await;
|
||||
}
|
||||
}
|
||||
Ok::<(), Status>(())
|
||||
});
|
||||
|
||||
Ok(Response::new(ReceiverStream::new(rx)))
|
||||
}
|
||||
|
||||
/// Keeps `stream_mouse` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn stream_mouse(
|
||||
&self,
|
||||
req: Request<tonic::Streaming<MouseReport>>,
|
||||
) -> Result<Response<Self::StreamMouseStream>, Status> {
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(32);
|
||||
let ms = self.ms.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut s = req.into_inner();
|
||||
while let Some(pkt) = s.next().await.transpose()? {
|
||||
let _ = runtime_support::write_hid_report(&ms, &hid_endpoint(1), &pkt.data).await;
|
||||
tx.send(Ok(pkt)).await.ok();
|
||||
}
|
||||
Ok::<(), Status>(())
|
||||
});
|
||||
|
||||
Ok(Response::new(ReceiverStream::new(rx)))
|
||||
}
|
||||
|
||||
/// Keeps `stream_microphone` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn stream_microphone(
|
||||
&self,
|
||||
req: Request<tonic::Streaming<AudioPacket>>,
|
||||
) -> Result<Response<Self::StreamMicrophoneStream>, Status> {
|
||||
let lease = self.upstream_media_rt.activate_microphone();
|
||||
let Some(microphone_sink_permit) = self
|
||||
.upstream_media_rt
|
||||
.reserve_microphone_sink(lease.generation)
|
||||
.await
|
||||
else {
|
||||
return Err(Status::aborted(
|
||||
"microphone stream superseded before sink became available",
|
||||
));
|
||||
};
|
||||
let uac_dev = std::env::var("LESAVKA_UAC_DEV").unwrap_or_else(|_| "hw:UAC2Gadget,0".into());
|
||||
let mut sink = runtime_support::open_voice_with_retry(&uac_dev)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
self.upstream_media_rt.close_microphone(lease.generation);
|
||||
Status::internal(format!("{e:#}"))
|
||||
})?;
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
let upstream_media_rt = self.upstream_media_rt.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _microphone_sink_permit = microphone_sink_permit;
|
||||
let mut inbound = req.into_inner();
|
||||
let mut pending = std::collections::VecDeque::new();
|
||||
let mut inbound_closed = false;
|
||||
let stale_drop_budget = upstream_stale_drop_budget();
|
||||
loop {
|
||||
if !upstream_media_rt.is_microphone_active(lease.generation) {
|
||||
break;
|
||||
}
|
||||
if !inbound_closed {
|
||||
let next_packet = tokio::select! {
|
||||
packet = inbound.next() => Some(packet),
|
||||
_ = tokio::time::sleep(Duration::from_millis(25)) => None,
|
||||
};
|
||||
if let Some(next_packet) = next_packet {
|
||||
match next_packet.transpose()? {
|
||||
Some(pkt) => {
|
||||
pending.push_back(pkt);
|
||||
let _ = retain_freshest_audio_packet(&mut pending);
|
||||
}
|
||||
None => inbound_closed = true,
|
||||
}
|
||||
}
|
||||
}
|
||||
let Some(mut pkt) = pending.pop_front() else {
|
||||
if inbound_closed {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
};
|
||||
let Some(plan) = coverage_audio_plan_from_decision(
|
||||
upstream_media_rt.plan_audio_pts(pkt.pts),
|
||||
&mut pending,
|
||||
pkt.clone(),
|
||||
inbound_closed,
|
||||
stale_drop_budget,
|
||||
) else {
|
||||
continue;
|
||||
};
|
||||
tokio::time::sleep_until(plan.due_at).await;
|
||||
let actual_late_by = tokio::time::Instant::now()
|
||||
.checked_duration_since(plan.due_at)
|
||||
.unwrap_or_default();
|
||||
if actual_late_by > stale_drop_budget {
|
||||
continue;
|
||||
}
|
||||
pkt.pts = plan.local_pts_us;
|
||||
sink.push(&pkt);
|
||||
upstream_media_rt.mark_audio_presented(pkt.pts, plan.due_at);
|
||||
}
|
||||
sink.finish();
|
||||
upstream_media_rt.close_microphone(lease.generation);
|
||||
let _ = tx.send(Ok(Empty {})).await;
|
||||
Ok::<(), Status>(())
|
||||
});
|
||||
|
||||
Ok(Response::new(ReceiverStream::new(rx)))
|
||||
}
|
||||
|
||||
/// Keeps `stream_webcam_media` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn stream_webcam_media(
|
||||
&self,
|
||||
req: Request<tonic::Streaming<UpstreamMediaBundle>>,
|
||||
) -> Result<Response<Self::StreamWebcamMediaStream>, Status> {
|
||||
let microphone_lease = self.upstream_media_rt.activate_microphone();
|
||||
let camera_lease = self.upstream_media_rt.activate_camera();
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
let upstream_media_rt = self.upstream_media_rt.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut inbound = req.into_inner();
|
||||
while let Some(bundle) = inbound.next().await.transpose()? {
|
||||
if let Some(video) = bundle.video {
|
||||
upstream_media_rt.record_client_timing(
|
||||
UpstreamMediaKind::Camera,
|
||||
video_client_timing(&video),
|
||||
);
|
||||
}
|
||||
for audio in bundle.audio {
|
||||
upstream_media_rt.record_client_timing(
|
||||
UpstreamMediaKind::Microphone,
|
||||
audio_client_timing(&audio),
|
||||
);
|
||||
}
|
||||
}
|
||||
upstream_media_rt.close_camera(camera_lease.generation);
|
||||
upstream_media_rt.close_microphone(microphone_lease.generation);
|
||||
let _ = tx.send(Ok(Empty {})).await;
|
||||
Ok::<(), Status>(())
|
||||
});
|
||||
|
||||
Ok(Response::new(ReceiverStream::new(rx)))
|
||||
}
|
||||
|
||||
/// Keeps `stream_camera` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn stream_camera(
|
||||
&self,
|
||||
req: Request<tonic::Streaming<VideoPacket>>,
|
||||
) -> Result<Response<Self::StreamCameraStream>, Status> {
|
||||
let cfg = camera::current_camera_config();
|
||||
let upstream_lease = self.upstream_media_rt.activate_camera();
|
||||
let (session_id, relay, _relay_reused) = self.camera_rt.activate(&cfg).await?;
|
||||
let camera_rt = self.camera_rt.clone();
|
||||
let upstream_media_rt = self.upstream_media_rt.clone();
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
let frame_step_us = (1_000_000u64 / u64::from(cfg.fps.max(1))).max(1);
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut s = req.into_inner();
|
||||
let mut pending = std::collections::VecDeque::new();
|
||||
let mut inbound_closed = false;
|
||||
let stale_drop_budget = upstream_stale_drop_budget();
|
||||
loop {
|
||||
if !camera_rt.is_active(session_id)
|
||||
|| !upstream_media_rt.is_camera_active(upstream_lease.generation)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if !inbound_closed {
|
||||
let next_packet = tokio::select! {
|
||||
packet = s.next() => Some(packet),
|
||||
_ = tokio::time::sleep(Duration::from_millis(25)) => None,
|
||||
};
|
||||
if let Some(next_packet) = next_packet {
|
||||
match next_packet.transpose()? {
|
||||
Some(pkt) => {
|
||||
pending.push_back(pkt);
|
||||
let _ = retain_freshest_video_packet(&mut pending);
|
||||
}
|
||||
None => inbound_closed = true,
|
||||
}
|
||||
}
|
||||
}
|
||||
let Some(mut pkt) = pending.pop_front() else {
|
||||
if inbound_closed {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
};
|
||||
let Some(plan) = coverage_video_plan_from_decision(
|
||||
upstream_media_rt.plan_video_pts(pkt.pts, frame_step_us),
|
||||
&mut pending,
|
||||
pkt.clone(),
|
||||
inbound_closed,
|
||||
stale_drop_budget,
|
||||
) else {
|
||||
continue;
|
||||
};
|
||||
if !coverage_audio_master_ready(
|
||||
&upstream_media_rt,
|
||||
upstream_media_rt
|
||||
.wait_for_audio_master(plan.local_pts_us, plan.due_at)
|
||||
.await,
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
tokio::time::sleep_until(plan.due_at).await;
|
||||
pkt.pts = plan.local_pts_us;
|
||||
let presented_pts = pkt.pts;
|
||||
relay.feed(pkt);
|
||||
upstream_media_rt.mark_video_presented(presented_pts, plan.due_at);
|
||||
}
|
||||
upstream_media_rt.close_camera(upstream_lease.generation);
|
||||
tx.send(Ok(Empty {})).await.ok();
|
||||
Ok::<(), Status>(())
|
||||
});
|
||||
|
||||
Ok(Response::new(ReceiverStream::new(rx)))
|
||||
}
|
||||
|
||||
/// Keeps `run_output_delay_probe` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
async fn run_output_delay_probe(
|
||||
&self,
|
||||
req: Request<OutputDelayProbeRequest>,
|
||||
) -> Result<Response<Self::RunOutputDelayProbeStream>, Status> {
|
||||
let cfg = camera::current_camera_config();
|
||||
let (_session_id, relay, _relay_reused) = self.camera_rt.activate(&cfg).await?;
|
||||
let mut sink = lesavka_server::audio::Voice::new("coverage-uac")
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("{e:#}")))?;
|
||||
let summary = lesavka_server::output_delay_probe::run_server_output_delay_probe(
|
||||
relay,
|
||||
&mut sink,
|
||||
&cfg,
|
||||
&req.into_inner(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Status::internal(format!("{e:#}")))?;
|
||||
let (tx, rx) = tokio::sync::mpsc::channel(1);
|
||||
let detail = format!(
|
||||
"server-generated UVC/UAC output-delay probe complete: video_frames={} audio_packets={} events={}",
|
||||
summary.video_frames, summary.audio_packets, summary.event_count
|
||||
);
|
||||
tx.send(Ok(OutputDelayProbeReply {
|
||||
ok: true,
|
||||
detail,
|
||||
server_timeline_json: summary.timeline_json,
|
||||
}))
|
||||
.await
|
||||
.ok();
|
||||
Ok(Response::new(ReceiverStream::new(rx)))
|
||||
}
|
||||
|
||||
async fn capture_video(
|
||||
&self,
|
||||
req: Request<MonitorRequest>,
|
||||
) -> Result<Response<Self::CaptureVideoStream>, Status> {
|
||||
self.capture_video_reply(req.into_inner()).await
|
||||
}
|
||||
|
||||
async fn capture_audio(
|
||||
&self,
|
||||
_req: Request<MonitorRequest>,
|
||||
) -> Result<Response<Self::CaptureAudioStream>, Status> {
|
||||
Err(Status::internal(
|
||||
"audio capture unavailable in coverage harness",
|
||||
))
|
||||
}
|
||||
|
||||
async fn paste_text(&self, req: Request<PasteRequest>) -> Result<Response<PasteReply>, Status> {
|
||||
self.paste_text_reply(req).await
|
||||
}
|
||||
|
||||
async fn reset_usb(&self, _req: Request<Empty>) -> Result<Response<ResetUsbReply>, Status> {
|
||||
self.reset_usb_reply().await
|
||||
}
|
||||
|
||||
async fn recover_usb(
|
||||
&self,
|
||||
_req: Request<Empty>,
|
||||
) -> Result<Response<ResetUsbReply>, Status> {
|
||||
self.recover_usb_reply().await
|
||||
}
|
||||
|
||||
async fn recover_uac(
|
||||
&self,
|
||||
_req: Request<Empty>,
|
||||
) -> Result<Response<ResetUsbReply>, Status> {
|
||||
self.recover_uac_reply().await
|
||||
}
|
||||
|
||||
async fn recover_uvc(
|
||||
&self,
|
||||
_req: Request<Empty>,
|
||||
) -> Result<Response<ResetUsbReply>, Status> {
|
||||
self.recover_uvc_reply().await
|
||||
}
|
||||
|
||||
async fn get_capture_power(
|
||||
&self,
|
||||
_req: Request<Empty>,
|
||||
) -> Result<Response<CapturePowerState>, Status> {
|
||||
self.get_capture_power_reply().await
|
||||
}
|
||||
|
||||
async fn set_capture_power(
|
||||
&self,
|
||||
req: Request<SetCapturePowerRequest>,
|
||||
) -> Result<Response<CapturePowerState>, Status> {
|
||||
self.set_capture_power_reply(req).await
|
||||
}
|
||||
|
||||
async fn get_calibration(
|
||||
&self,
|
||||
_req: Request<Empty>,
|
||||
) -> Result<Response<CalibrationState>, Status> {
|
||||
self.get_calibration_reply().await
|
||||
}
|
||||
|
||||
async fn calibrate(
|
||||
&self,
|
||||
req: Request<CalibrationRequest>,
|
||||
) -> Result<Response<CalibrationState>, Status> {
|
||||
self.calibrate_reply(req).await
|
||||
}
|
||||
|
||||
async fn get_upstream_sync(
|
||||
&self,
|
||||
_req: Request<Empty>,
|
||||
) -> Result<Response<UpstreamSyncState>, Status> {
|
||||
self.get_upstream_sync_reply().await
|
||||
}
|
||||
}
|
||||
@ -3,6 +3,7 @@
|
||||
mod tests {
|
||||
use super::{
|
||||
MediaV2BundleFacts, UpstreamStreamCleanup, media_v2_handoff_schedule,
|
||||
media_v2_frame_step_us, prepare_media_v2_audio, prepare_media_v2_video,
|
||||
retain_freshest_audio_packet, retain_freshest_video_packet, summarize_media_v2_bundle,
|
||||
};
|
||||
use lesavka_common::lesavka::{AudioPacket, UpstreamMediaBundle, VideoPacket};
|
||||
@ -131,6 +132,8 @@ mod tests {
|
||||
|
||||
assert!(summary.has_video);
|
||||
assert!(summary.has_audio);
|
||||
assert_eq!(summary.capture_start_us, 1_000_000);
|
||||
assert_eq!(summary.capture_end_us, 1_020_000);
|
||||
assert_eq!(summary.capture_span_us, 20_000);
|
||||
assert_eq!(summary.max_queue_age_ms, 34);
|
||||
}
|
||||
@ -142,6 +145,8 @@ mod tests {
|
||||
let facts = MediaV2BundleFacts {
|
||||
has_audio: true,
|
||||
has_video: true,
|
||||
capture_start_us: 1_000_000,
|
||||
capture_end_us: 1_020_000,
|
||||
capture_span_us: 20_000,
|
||||
max_queue_age_ms: 0,
|
||||
};
|
||||
@ -162,6 +167,8 @@ mod tests {
|
||||
let facts = MediaV2BundleFacts {
|
||||
has_audio: true,
|
||||
has_video: true,
|
||||
capture_start_us: 1_000_000,
|
||||
capture_end_us: 1_020_000,
|
||||
capture_span_us: 20_000,
|
||||
max_queue_age_ms: 1_000,
|
||||
};
|
||||
@ -169,6 +176,51 @@ mod tests {
|
||||
assert!(media_v2_handoff_schedule(facts, 0, 0).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `media_v2_preparation_anchors_audio_and_video_to_one_capture_epoch` explicit because the bundled path must not let network receive cadence become video playout cadence.
|
||||
/// Inputs are one bundle's client capture PTS values; output proves audio
|
||||
/// and video are planned from the same epoch before handoff workers sleep.
|
||||
fn media_v2_preparation_anchors_audio_and_video_to_one_capture_epoch() {
|
||||
let runtime = UpstreamMediaRuntime::new();
|
||||
runtime.set_playout_offsets(0, 0);
|
||||
let epoch = tokio::time::Instant::now() + std::time::Duration::from_secs(1);
|
||||
let base = 1_000_000;
|
||||
let mut audio = vec![
|
||||
AudioPacket {
|
||||
client_capture_pts_us: base,
|
||||
..Default::default()
|
||||
},
|
||||
AudioPacket {
|
||||
client_capture_pts_us: base + 10_000,
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
let video = VideoPacket {
|
||||
client_capture_pts_us: base + 33_333,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let scheduled_audio =
|
||||
prepare_media_v2_audio(&mut audio, &runtime, base, epoch).expect("audio plan");
|
||||
let scheduled_video = prepare_media_v2_video(
|
||||
Some(video),
|
||||
&runtime,
|
||||
base,
|
||||
epoch,
|
||||
media_v2_frame_step_us(30),
|
||||
)
|
||||
.expect("video plan");
|
||||
|
||||
assert_eq!(scheduled_audio.packets[0].pts, 0);
|
||||
assert_eq!(scheduled_audio.packets[1].pts, 10_000);
|
||||
assert_eq!(scheduled_video.packet.pts, 33_333);
|
||||
assert_eq!(scheduled_audio.due_at, epoch);
|
||||
assert_eq!(
|
||||
scheduled_video.due_at.duration_since(epoch).as_micros(),
|
||||
33_333
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `legacy_bundled_event_timing_example_documents_quarantined_v1_behavior` explicit because it sits on relay RPC orchestration, where hardware failures must surface without stopping the server.
|
||||
/// Inputs are the typed parameters; output is the return value or side effect.
|
||||
|
||||
@ -66,7 +66,6 @@ fn retain_freshest_audio_packet(
|
||||
dropped
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Extract client-side timing facts from an upstream microphone packet.
|
||||
fn audio_client_timing(pkt: &AudioPacket) -> UpstreamClientTiming {
|
||||
let capture_pts_us = if pkt.client_capture_pts_us == 0 {
|
||||
@ -87,7 +86,6 @@ fn audio_client_timing(pkt: &AudioPacket) -> UpstreamClientTiming {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
/// Extract client-side timing facts from an upstream camera packet.
|
||||
fn video_client_timing(pkt: &VideoPacket) -> UpstreamClientTiming {
|
||||
let capture_pts_us = if pkt.client_capture_pts_us == 0 {
|
||||
|
||||
@ -6,14 +6,7 @@ struct EncodedProbeFrames {
|
||||
#[cfg(not(coverage))]
|
||||
impl EncodedProbeFrames {
|
||||
fn new(camera: &CameraConfig, config: &ProbeConfig, frame_step: Duration) -> Result<Self> {
|
||||
if !matches!(camera.codec, CameraCodec::Mjpeg) {
|
||||
bail!(
|
||||
"server-generated output-delay probe currently requires MJPEG UVC output, got {}",
|
||||
camera.codec.as_str()
|
||||
);
|
||||
}
|
||||
|
||||
let mut encoder = MjpegFrameEncoder::new(camera)?;
|
||||
let mut encoder = ProbeFrameEncoder::new(camera)?;
|
||||
let mut frames = Vec::new();
|
||||
let mut frame_index = 0u64;
|
||||
loop {
|
||||
@ -36,6 +29,50 @@ impl EncodedProbeFrames {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
enum ProbeFrameEncoder {
|
||||
Mjpeg(MjpegFrameEncoder),
|
||||
Hevc(HevcFrameEncoder),
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
impl ProbeFrameEncoder {
|
||||
/// Build the encoder that matches the ingress profile under calibration.
|
||||
///
|
||||
/// Inputs: camera codec, dimensions, and frame rate. Output: an encoder
|
||||
/// capable of producing packets for the server-generated probe. Why:
|
||||
/// profile-specific server-to-RCT calibration must include the same decode
|
||||
/// work that real client-origin media will impose on the server.
|
||||
fn new(camera: &CameraConfig) -> Result<Self> {
|
||||
match camera.codec {
|
||||
CameraCodec::Mjpeg => Ok(Self::Mjpeg(MjpegFrameEncoder::new(camera)?)),
|
||||
CameraCodec::Hevc => Ok(Self::Hevc(HevcFrameEncoder::new(camera)?)),
|
||||
CameraCodec::H264 => bail!(
|
||||
"server-generated output-delay probe currently supports MJPEG or HEVC UVC ingress profiles, got {}",
|
||||
camera.codec.as_str()
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode one visual probe frame for the active profile.
|
||||
///
|
||||
/// Inputs: the event color and monotonically increasing frame sequence.
|
||||
/// Output: one compressed video access unit. Why: the analyzer can only
|
||||
/// compare MJPEG and HEVC paths fairly when both originate from the same
|
||||
/// coded flash schedule.
|
||||
fn encode_probe_frame(&mut self, color: Rgb, sequence: u64) -> Result<Vec<u8>> {
|
||||
match self {
|
||||
Self::Mjpeg(encoder) => encoder.encode_probe_frame(color, sequence),
|
||||
Self::Hevc(encoder) => encoder.encode_probe_frame(color, sequence),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Map an optional event identity to the RGB color emitted in the probe frame.
|
||||
///
|
||||
/// Inputs: an event code from the probe schedule. Output: the matching color or
|
||||
/// the dark idle frame. Why: keeping the mapping centralized preserves the
|
||||
/// analyzer's coded-pulse identity contract across MJPEG and HEVC encoders.
|
||||
fn probe_color_for_code(code: Option<u32>) -> Rgb {
|
||||
code.and_then(|code| EVENT_COLORS.get(code.checked_sub(1)? as usize).copied())
|
||||
.unwrap_or(DARK_FRAME_RGB)
|
||||
@ -156,6 +193,151 @@ impl Drop for MjpegFrameEncoder {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
struct HevcFrameEncoder {
|
||||
src: gst_app::AppSrc,
|
||||
sink: gst_app::AppSink,
|
||||
pipeline: gst::Pipeline,
|
||||
width: usize,
|
||||
height: usize,
|
||||
frame_step_us: u64,
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
impl HevcFrameEncoder {
|
||||
/// Create the HEVC probe encoder used by profile calibration.
|
||||
///
|
||||
/// Inputs: camera dimensions and frame rate. Output: a GStreamer encoder
|
||||
/// pipeline that emits byte-stream HEVC access units. Why: server-side HEVC
|
||||
/// calibration needs synthetic media to enter the same compressed ingress
|
||||
/// profile that client transport will use.
|
||||
fn new(camera: &CameraConfig) -> Result<Self> {
|
||||
gst::init().context("gst init")?;
|
||||
let width = camera.width as i32;
|
||||
let height = camera.height as i32;
|
||||
let fps = camera.fps.max(1) as i32;
|
||||
let raw_caps = gst::Caps::builder("video/x-raw")
|
||||
.field("format", "RGB")
|
||||
.field("width", width)
|
||||
.field("height", height)
|
||||
.field("framerate", gst::Fraction::new(fps, 1))
|
||||
.build();
|
||||
let hevc_caps = gst::Caps::builder("video/x-h265")
|
||||
.field("stream-format", "byte-stream")
|
||||
.field("alignment", "au")
|
||||
.build();
|
||||
let pipeline = gst::Pipeline::new();
|
||||
let src = gst::ElementFactory::make("appsrc")
|
||||
.name("output_delay_probe_hevc_src")
|
||||
.build()?
|
||||
.downcast::<gst_app::AppSrc>()
|
||||
.expect("appsrc");
|
||||
src.set_is_live(false);
|
||||
src.set_format(gst::Format::Time);
|
||||
src.set_property("do-timestamp", false);
|
||||
src.set_caps(Some(&raw_caps));
|
||||
let convert = gst::ElementFactory::make("videoconvert").build()?;
|
||||
let raw_i420 = gst::Caps::builder("video/x-raw")
|
||||
.field("format", "I420")
|
||||
.field("width", width)
|
||||
.field("height", height)
|
||||
.field("framerate", gst::Fraction::new(fps, 1))
|
||||
.build();
|
||||
let raw_capsfilter = gst::ElementFactory::make("capsfilter")
|
||||
.property("caps", &raw_i420)
|
||||
.build()?;
|
||||
let encoder = gst::ElementFactory::make("x265enc")
|
||||
.property_from_str("tune", "zerolatency")
|
||||
.property_from_str("speed-preset", "ultrafast")
|
||||
.property("bitrate", 2500u32)
|
||||
.property("key-int-max", 1i32)
|
||||
.build()
|
||||
.context("building HEVC probe encoder x265enc")?;
|
||||
let parser = gst::ElementFactory::make("h265parse")
|
||||
.property("config-interval", -1i32)
|
||||
.build()?;
|
||||
let hevc_capsfilter = gst::ElementFactory::make("capsfilter")
|
||||
.property("caps", &hevc_caps)
|
||||
.build()?;
|
||||
let sink = gst::ElementFactory::make("appsink")
|
||||
.name("output_delay_probe_hevc_sink")
|
||||
.property("sync", false)
|
||||
.property("emit-signals", false)
|
||||
.property("max-buffers", 8u32)
|
||||
.build()?
|
||||
.downcast::<gst_app::AppSink>()
|
||||
.expect("appsink");
|
||||
pipeline.add_many([
|
||||
src.upcast_ref(),
|
||||
&convert,
|
||||
&raw_capsfilter,
|
||||
&encoder,
|
||||
&parser,
|
||||
&hevc_capsfilter,
|
||||
sink.upcast_ref(),
|
||||
])?;
|
||||
gst::Element::link_many([
|
||||
src.upcast_ref(),
|
||||
&convert,
|
||||
&raw_capsfilter,
|
||||
&encoder,
|
||||
&parser,
|
||||
&hevc_capsfilter,
|
||||
sink.upcast_ref(),
|
||||
])?;
|
||||
pipeline
|
||||
.set_state(gst::State::Playing)
|
||||
.context("starting output-delay probe HEVC encoder")?;
|
||||
|
||||
Ok(Self {
|
||||
src,
|
||||
sink,
|
||||
pipeline,
|
||||
width: camera.width as usize,
|
||||
height: camera.height as usize,
|
||||
frame_step_us: (1_000_000u64 / u64::from(camera.fps.max(1))).max(1),
|
||||
})
|
||||
}
|
||||
|
||||
/// Encode one RGB probe frame as an HEVC access unit.
|
||||
///
|
||||
/// Inputs: the rendered color and sequence number. Output: the compressed
|
||||
/// HEVC packet to feed into the server camera path. Why: sequence-stamped
|
||||
/// HEVC frames let the final RCT capture prove sync after decode and MJPEG
|
||||
/// re-emission, not just before transport.
|
||||
fn encode_probe_frame(&mut self, color: Rgb, sequence: u64) -> Result<Vec<u8>> {
|
||||
let pts_us = sequence.saturating_mul(self.frame_step_us);
|
||||
let frame = probe_rgb_frame(self.width, self.height, color, sequence);
|
||||
let mut buffer = gst::Buffer::from_slice(frame);
|
||||
if let Some(meta) = buffer.get_mut() {
|
||||
let pts = gst::ClockTime::from_useconds(pts_us);
|
||||
meta.set_pts(Some(pts));
|
||||
meta.set_dts(Some(pts));
|
||||
meta.set_duration(Some(gst::ClockTime::from_useconds(self.frame_step_us)));
|
||||
}
|
||||
self.src
|
||||
.push_buffer(buffer)
|
||||
.context("encoding output-delay probe HEVC frame")?;
|
||||
let sample = self
|
||||
.sink
|
||||
.pull_sample()
|
||||
.context("pulling encoded output-delay probe HEVC frame")?;
|
||||
let buffer = sample.buffer().context("encoded HEVC frame had no buffer")?;
|
||||
let map = buffer
|
||||
.map_readable()
|
||||
.context("mapping encoded output-delay probe HEVC frame")?;
|
||||
Ok(map.as_slice().to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
impl Drop for HevcFrameEncoder {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.src.end_of_stream();
|
||||
let _ = self.pipeline.set_state(gst::State::Null);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
fn probe_rgb_frame(width: usize, height: usize, color: Rgb, sequence: u64) -> Vec<u8> {
|
||||
let mut frame = vec![0u8; width.saturating_mul(height).saturating_mul(3)];
|
||||
|
||||
@ -6,6 +6,7 @@
|
||||
/// Why: this probe intentionally bypasses client capture/uplink but uses the
|
||||
/// same final server output handoff calls as received client media, so the
|
||||
/// measured skew/freshness is the server final-handoff-to-RCT path.
|
||||
#[cfg(not(coverage))]
|
||||
pub async fn run_server_output_delay_probe(
|
||||
relay: Arc<CameraRelay>,
|
||||
sink: &mut Voice,
|
||||
@ -123,6 +124,14 @@ pub async fn run_server_output_delay_probe(
|
||||
_sink: &mut Voice,
|
||||
camera: &CameraConfig,
|
||||
request: &OutputDelayProbeRequest,
|
||||
) -> Result<OutputDelayProbeSummary> {
|
||||
coverage_output_delay_summary(camera, request)
|
||||
}
|
||||
|
||||
#[cfg(coverage)]
|
||||
fn coverage_output_delay_summary(
|
||||
camera: &CameraConfig,
|
||||
request: &OutputDelayProbeRequest,
|
||||
) -> Result<OutputDelayProbeSummary> {
|
||||
let config = ProbeConfig::from_request(request)?;
|
||||
Ok(OutputDelayProbeSummary {
|
||||
|
||||
@ -1,10 +1,22 @@
|
||||
#[cfg(not(coverage))]
|
||||
use super::EncodedProbeFrames;
|
||||
#[cfg(coverage)]
|
||||
use super::coverage_output_delay_summary;
|
||||
use super::{
|
||||
DARK_FRAME_RGB, EVENT_COLORS, EVENT_FREQUENCIES_HZ, OutputDelayProbeTimeline, ProbeConfig,
|
||||
duration_us, probe_color_for_code, render_audio_chunk, unix_ns_from_start,
|
||||
draw_frame_continuity_watermark, duration_mul, duration_us, event_frequency_hz,
|
||||
parse_event_width_codes, positive_delay, probe_color_for_code, render_audio_chunk,
|
||||
unix_ns_from_start,
|
||||
};
|
||||
#[cfg(coverage)]
|
||||
use crate::audio::Voice;
|
||||
use crate::camera::{CameraCodec, CameraConfig, CameraOutput};
|
||||
#[cfg(coverage)]
|
||||
use crate::video_sinks::CameraRelay;
|
||||
use lesavka_common::lesavka::OutputDelayProbeRequest;
|
||||
use std::collections::BTreeSet;
|
||||
#[cfg(coverage)]
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
@ -29,6 +41,93 @@ fn event_codes_reject_unsupported_signatures() {
|
||||
assert!(ProbeConfig::from_request(&request).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn request_validation_rejects_non_live_probe_shapes() {
|
||||
let period_too_large = OutputDelayProbeRequest {
|
||||
duration_seconds: u32::MAX,
|
||||
..Default::default()
|
||||
};
|
||||
assert!(ProbeConfig::from_request(&period_too_large).is_err());
|
||||
|
||||
let width_not_smaller_than_period = OutputDelayProbeRequest {
|
||||
pulse_period_ms: 120,
|
||||
pulse_width_ms: 120,
|
||||
..Default::default()
|
||||
};
|
||||
assert!(ProbeConfig::from_request(&width_not_smaller_than_period).is_err());
|
||||
assert!(positive_delay(-1, "video_delay_us").is_err());
|
||||
assert!(parse_event_width_codes(" , , ").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(coverage)]
|
||||
/// Keeps `coverage_probe_summary_uses_validated_request_and_timeline_shape` explicit because the coverage harness must preserve the same request validation and timeline fields as live UVC/UAC probing.
|
||||
/// Inputs are a synthetic HEVC camera profile and probe request; output is a compact coverage summary with timeline JSON.
|
||||
fn coverage_probe_summary_uses_validated_request_and_timeline_shape() {
|
||||
let camera = CameraConfig {
|
||||
output: CameraOutput::Uvc,
|
||||
codec: CameraCodec::Hevc,
|
||||
width: 1280,
|
||||
height: 720,
|
||||
fps: 30,
|
||||
hdmi: None,
|
||||
};
|
||||
let request = OutputDelayProbeRequest {
|
||||
duration_seconds: 6,
|
||||
warmup_seconds: 1,
|
||||
pulse_period_ms: 1_000,
|
||||
pulse_width_ms: 120,
|
||||
event_width_codes: "1,2,3".to_string(),
|
||||
audio_delay_us: 0,
|
||||
video_delay_us: 150_000,
|
||||
};
|
||||
|
||||
let summary = coverage_output_delay_summary(&camera, &request).expect("coverage summary");
|
||||
let timeline =
|
||||
serde_json::from_str::<serde_json::Value>(&summary.timeline_json).expect("timeline json");
|
||||
|
||||
assert_eq!(summary.video_frames, 1);
|
||||
assert_eq!(summary.audio_packets, 1);
|
||||
assert_eq!(summary.event_count, 5);
|
||||
assert_eq!(timeline["camera_width"].as_u64(), Some(1280));
|
||||
assert_eq!(timeline["video_delay_us"].as_u64(), Some(150_000));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[cfg(coverage)]
|
||||
/// Keeps `coverage_probe_runtime_entrypoint_accepts_noop_sinks` explicit because the coverage harness should validate the public probe entrypoint without touching physical UVC/UAC devices.
|
||||
/// Inputs are noop video/audio sinks plus a short probe request; output is a serialized timeline summary from the same entrypoint the RPC uses.
|
||||
async fn coverage_probe_runtime_entrypoint_accepts_noop_sinks() {
|
||||
let camera = CameraConfig {
|
||||
output: CameraOutput::Uvc,
|
||||
codec: CameraCodec::Mjpeg,
|
||||
width: 640,
|
||||
height: 480,
|
||||
fps: 20,
|
||||
hdmi: None,
|
||||
};
|
||||
let request = OutputDelayProbeRequest {
|
||||
duration_seconds: 3,
|
||||
warmup_seconds: 1,
|
||||
pulse_period_ms: 1_000,
|
||||
pulse_width_ms: 120,
|
||||
event_width_codes: "1,2".to_string(),
|
||||
audio_delay_us: 0,
|
||||
video_delay_us: 0,
|
||||
};
|
||||
let relay = Arc::new(CameraRelay::new_noop(0));
|
||||
let mut voice = Voice::new("coverage-audio").await.expect("coverage voice");
|
||||
|
||||
let summary = super::run_server_output_delay_probe(relay, &mut voice, &camera, &request)
|
||||
.await
|
||||
.expect("coverage output-delay probe");
|
||||
|
||||
assert_eq!(summary.video_frames, 1);
|
||||
assert_eq!(summary.audio_packets, 1);
|
||||
assert_eq!(summary.event_count, 2);
|
||||
assert!(summary.timeline_json.contains("640"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_probe_signatures_are_unique_for_all_coded_pairs() {
|
||||
assert_eq!(EVENT_COLORS.len(), 16);
|
||||
@ -47,6 +146,28 @@ fn default_probe_signatures_are_unique_for_all_coded_pairs() {
|
||||
assert_eq!(frequencies.len(), EVENT_FREQUENCIES_HZ.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `continuity_watermark_encodes_sequence_bits_and_timing_helpers_saturate` explicit because the analyzer relies on the watermark to detect smoothness without mistaking overflow for a huge timestamp.
|
||||
/// Inputs are a raw RGB frame, sequence number, and edge timing values; output is an in-place watermark plus bounded helper results.
|
||||
fn continuity_watermark_encodes_sequence_bits_and_timing_helpers_saturate() {
|
||||
let width = 40usize;
|
||||
let height = 36usize;
|
||||
let mut frame = vec![127u8; width * height * 3];
|
||||
|
||||
draw_frame_continuity_watermark(&mut frame, width, height, 0b1010_0101_0000_1111);
|
||||
|
||||
assert_eq!(frame[(height - 1) * width * 3], 255);
|
||||
assert!(frame.contains(&0));
|
||||
assert!(frame.contains(&255));
|
||||
assert_eq!(event_frequency_hz(0), None);
|
||||
assert_eq!(event_frequency_hz(17), None);
|
||||
assert_eq!(event_frequency_hz(1), Some(EVENT_FREQUENCIES_HZ[0]));
|
||||
assert_eq!(
|
||||
duration_mul(Duration::from_nanos(u64::MAX), 2),
|
||||
Duration::from_nanos(u64::MAX)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn audio_chunk_contains_tone_only_during_coded_pulse() {
|
||||
let config = ProbeConfig::from_request(&OutputDelayProbeRequest {
|
||||
@ -99,6 +220,49 @@ fn generated_video_and_audio_share_the_same_event_schedule() {
|
||||
assert!(rms_i16_le(&active_audio) > rms_i16_le(&idle_audio) * 10.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(not(coverage))]
|
||||
fn hevc_probe_frame_encoder_builds_when_x265_is_available() {
|
||||
gstreamer::init().expect("initialize gstreamer");
|
||||
if gstreamer::ElementFactory::find("x265enc").is_none() {
|
||||
return;
|
||||
}
|
||||
let config = ProbeConfig::from_request(&OutputDelayProbeRequest {
|
||||
duration_seconds: 3,
|
||||
warmup_seconds: 1,
|
||||
pulse_period_ms: 1_000,
|
||||
pulse_width_ms: 120,
|
||||
event_width_codes: "1".to_string(),
|
||||
audio_delay_us: 0,
|
||||
video_delay_us: 0,
|
||||
})
|
||||
.expect("config");
|
||||
let camera = CameraConfig {
|
||||
output: CameraOutput::Uvc,
|
||||
codec: CameraCodec::Hevc,
|
||||
width: 320,
|
||||
height: 180,
|
||||
fps: 10,
|
||||
hdmi: None,
|
||||
};
|
||||
|
||||
let frames =
|
||||
EncodedProbeFrames::new(&camera, &config, Duration::from_millis(100)).expect("hevc frames");
|
||||
|
||||
assert!(
|
||||
!frames
|
||||
.packet_for_frame(0)
|
||||
.expect("first HEVC frame")
|
||||
.is_empty()
|
||||
);
|
||||
assert!(
|
||||
!frames
|
||||
.packet_for_frame(10)
|
||||
.expect("event HEVC frame")
|
||||
.is_empty()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn timeline_exports_wall_clock_fields_for_freshness() {
|
||||
let config = ProbeConfig::from_request(&OutputDelayProbeRequest {
|
||||
@ -166,6 +330,56 @@ fn timeline_exports_wall_clock_fields_for_freshness() {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn timeline_ignores_out_of_range_and_duplicate_marks() {
|
||||
let config = ProbeConfig::from_request(&OutputDelayProbeRequest {
|
||||
duration_seconds: 2,
|
||||
warmup_seconds: 3,
|
||||
pulse_period_ms: 1_000,
|
||||
pulse_width_ms: 120,
|
||||
event_width_codes: "1".to_string(),
|
||||
audio_delay_us: 0,
|
||||
video_delay_us: 0,
|
||||
})
|
||||
.expect("config");
|
||||
assert_eq!(config.event_count(), 0);
|
||||
|
||||
let camera = CameraConfig {
|
||||
output: CameraOutput::Uvc,
|
||||
codec: CameraCodec::Mjpeg,
|
||||
width: 640,
|
||||
height: 480,
|
||||
fps: 20,
|
||||
hdmi: None,
|
||||
};
|
||||
let mut timeline = OutputDelayProbeTimeline::new(&config, &camera, 0);
|
||||
timeline.mark_audio(config.event_slot_by_id(0), 1, 1_000, 1_000_000);
|
||||
timeline.mark_video(config.event_slot_by_id(0), 1, 1_000, 1_000_000);
|
||||
assert!(timeline.events.is_empty());
|
||||
|
||||
let config = ProbeConfig::from_request(&OutputDelayProbeRequest {
|
||||
duration_seconds: 6,
|
||||
warmup_seconds: 1,
|
||||
pulse_period_ms: 1_000,
|
||||
pulse_width_ms: 120,
|
||||
event_width_codes: "1".to_string(),
|
||||
audio_delay_us: 0,
|
||||
video_delay_us: 0,
|
||||
})
|
||||
.expect("config");
|
||||
let mut timeline = OutputDelayProbeTimeline::new(&config, &camera, 0);
|
||||
let slot = config.event_slot_by_id(0);
|
||||
|
||||
timeline.mark_video(slot, 1, 1_000, 1_000_000);
|
||||
timeline.mark_video(slot, 2, 2_000, 2_000_000);
|
||||
timeline.mark_audio(slot, 1, 1_500, 1_500_000);
|
||||
timeline.mark_audio(slot, 2, 2_500, 2_500_000);
|
||||
|
||||
assert_eq!(timeline.events[0].video_seq, Some(1));
|
||||
assert_eq!(timeline.events[0].audio_seq, Some(1));
|
||||
assert_eq!(timeline.events[0].server_feed_delta_ms, Some(0.5));
|
||||
}
|
||||
|
||||
fn rms_i16_le(bytes: &[u8]) -> f64 {
|
||||
let samples = bytes
|
||||
.chunks_exact(2)
|
||||
|
||||
@ -16,14 +16,14 @@ fn camera_config_env_override_prefers_uvc_values() {
|
||||
with_var("LESAVKA_UVC_FPS", Some("24"), || {
|
||||
let cfg = update_camera_config();
|
||||
assert_eq!(cfg.output, CameraOutput::Uvc);
|
||||
assert_eq!(cfg.codec, CameraCodec::Mjpeg);
|
||||
assert_eq!(cfg.codec, CameraCodec::Hevc);
|
||||
assert_eq!(cfg.width, 800);
|
||||
assert_eq!(cfg.height, 600);
|
||||
assert_eq!(cfg.fps, 24);
|
||||
|
||||
let cached = current_camera_config();
|
||||
assert_eq!(cached.output, CameraOutput::Uvc);
|
||||
assert_eq!(cached.codec, CameraCodec::Mjpeg);
|
||||
assert_eq!(cached.codec, CameraCodec::Hevc);
|
||||
assert_eq!(cached.width, 800);
|
||||
assert_eq!(cached.height, 600);
|
||||
assert_eq!(cached.fps, 24);
|
||||
@ -57,7 +57,7 @@ fn uvc_camera_profile_honors_live_attached_descriptor() {
|
||||
|| {
|
||||
let cfg = update_camera_config();
|
||||
assert_eq!(cfg.output, CameraOutput::Uvc);
|
||||
assert_eq!(cfg.codec, CameraCodec::Mjpeg);
|
||||
assert_eq!(cfg.codec, CameraCodec::Hevc);
|
||||
assert_eq!((cfg.width, cfg.height, cfg.fps), (640, 480, 20));
|
||||
},
|
||||
);
|
||||
@ -65,14 +65,37 @@ fn uvc_camera_profile_honors_live_attached_descriptor() {
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn camera_config_env_override_honors_uvc_codec() {
|
||||
with_var("LESAVKA_CAM_OUTPUT", Some("uvc"), || {
|
||||
with_var("LESAVKA_UVC_CODEC", Some("h264"), || {
|
||||
fn camera_config_env_override_ignores_uvc_descriptor_codec_for_uplink() {
|
||||
temp_env::with_vars(
|
||||
[
|
||||
("LESAVKA_CAM_OUTPUT", Some("uvc")),
|
||||
("LESAVKA_UVC_CODEC", Some("h264")),
|
||||
("LESAVKA_CAM_CODEC", None),
|
||||
("LESAVKA_UPLINK_CAMERA_CODEC", None),
|
||||
],
|
||||
|| {
|
||||
let cfg = update_camera_config();
|
||||
assert_eq!(cfg.output, CameraOutput::Uvc);
|
||||
assert_eq!(cfg.codec, CameraCodec::H264);
|
||||
});
|
||||
});
|
||||
assert_eq!(cfg.codec, CameraCodec::Hevc);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn camera_config_env_override_honors_explicit_uplink_codec() {
|
||||
temp_env::with_vars(
|
||||
[
|
||||
("LESAVKA_CAM_OUTPUT", Some("uvc")),
|
||||
("LESAVKA_CAM_CODEC", Some("mjpeg")),
|
||||
("LESAVKA_UPLINK_CAMERA_CODEC", Some("h265")),
|
||||
],
|
||||
|| {
|
||||
let cfg = update_camera_config();
|
||||
assert_eq!(cfg.output, CameraOutput::Uvc);
|
||||
assert_eq!(cfg.codec, CameraCodec::Hevc);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@ -23,6 +23,32 @@ fn source_mode_selection_prefers_native_modes_without_reencode() {
|
||||
assert_eq!(smaller_mode_request.fps, 60);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eye_stall_watchdog_is_configurable_without_disabling_first_frame_errors() {
|
||||
temp_env::with_var("LESAVKA_EYE_STALL_WARN_MS", None::<&str>, || {
|
||||
assert_eq!(
|
||||
eye_stall_warn_timeout(),
|
||||
Some(std::time::Duration::from_millis(5_000))
|
||||
);
|
||||
});
|
||||
temp_env::with_var("LESAVKA_EYE_STALL_WARN_MS", Some("0"), || {
|
||||
assert_eq!(eye_stall_warn_timeout(), None);
|
||||
});
|
||||
temp_env::with_var("LESAVKA_EYE_STALL_WARN_MS", Some("1"), || {
|
||||
assert_eq!(
|
||||
eye_stall_warn_timeout(),
|
||||
Some(std::time::Duration::from_millis(500))
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn downstream_wall_clock_ms_is_monotonic_enough_for_stall_diagnostics() {
|
||||
let first = wall_clock_ms();
|
||||
std::thread::sleep(std::time::Duration::from_millis(1));
|
||||
assert!(wall_clock_ms() >= first);
|
||||
}
|
||||
|
||||
fn marker_frame(width: i32, height: i32) -> Vec<u8> {
|
||||
let mut rgba = vec![0_u8; (width * height * 4) as usize];
|
||||
let marker = 96;
|
||||
|
||||
@ -77,3 +77,103 @@ fn runtime_prefers_mode_offset_map_over_scalar_fallback() {
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `runtime_records_client_and_sink_timing_for_upstream_snapshots` explicit because the blind client-to-RCT probe depends on this telemetry to explain freshness losses.
|
||||
/// Inputs are paired camera/microphone timing samples plus sink handoff marks; output is a live snapshot with skew, queue, late, and freeze fields populated.
|
||||
fn runtime_records_client_and_sink_timing_for_upstream_snapshots() {
|
||||
with_clean_offset_env(|| {
|
||||
let runtime = UpstreamMediaRuntime::new();
|
||||
let camera = runtime.activate_camera();
|
||||
let microphone = runtime.activate_microphone();
|
||||
assert!(runtime.is_camera_active(camera.generation));
|
||||
assert!(runtime.is_microphone_active(microphone.generation));
|
||||
|
||||
runtime.set_playout_offsets(12_000, -3_000);
|
||||
assert_eq!(runtime.playout_offsets(), (12_000, -3_000));
|
||||
|
||||
runtime.record_client_timing(
|
||||
UpstreamMediaKind::Camera,
|
||||
UpstreamClientTiming {
|
||||
capture_pts_us: 100_000,
|
||||
send_pts_us: 120_000,
|
||||
queue_depth: 2,
|
||||
queue_age_ms: 20,
|
||||
},
|
||||
);
|
||||
runtime.record_client_timing(
|
||||
UpstreamMediaKind::Microphone,
|
||||
UpstreamClientTiming {
|
||||
capture_pts_us: 106_000,
|
||||
send_pts_us: 130_000,
|
||||
queue_depth: 5,
|
||||
queue_age_ms: 35,
|
||||
},
|
||||
);
|
||||
|
||||
let due = tokio::time::Instant::now() - std::time::Duration::from_millis(2);
|
||||
runtime.mark_video_presented(10_000, due);
|
||||
runtime.mark_audio_presented(11_500, due);
|
||||
runtime.record_video_freeze("test freeze");
|
||||
|
||||
let snapshot = runtime.snapshot();
|
||||
assert_eq!(snapshot.phase, "healing");
|
||||
assert_eq!(snapshot.client_capture_skew_ms, Some(6.0));
|
||||
assert_eq!(snapshot.client_send_skew_ms, Some(10.0));
|
||||
assert_eq!(snapshot.camera_client_queue_age_ms, Some(20.0));
|
||||
assert_eq!(snapshot.microphone_client_queue_age_ms, Some(35.0));
|
||||
assert_eq!(snapshot.last_video_presented_pts_us, Some(10_000));
|
||||
assert_eq!(snapshot.last_audio_presented_pts_us, Some(11_500));
|
||||
assert_eq!(snapshot.planner_skew_ms, Some(1.5));
|
||||
assert_eq!(snapshot.video_freezes, 1);
|
||||
assert_eq!(snapshot.last_reason, "test freeze");
|
||||
assert_eq!(snapshot.client_timing_window_samples, 1);
|
||||
assert!(snapshot.sink_handoff_window_samples >= 1);
|
||||
|
||||
runtime.close_camera(camera.generation);
|
||||
runtime.close_microphone(microphone.generation);
|
||||
assert!(!runtime.is_camera_active(camera.generation));
|
||||
assert!(!runtime.is_microphone_active(microphone.generation));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `runtime_public_mapping_helpers_cover_legacy_and_bundled_paths` explicit because these helpers are the thin boundary between transport packets and the shared playout clock.
|
||||
/// Inputs are legacy and bundled remote timestamps; output proves both paths map onto fresh monotonic local PTS values.
|
||||
fn runtime_public_mapping_helpers_cover_legacy_and_bundled_paths() {
|
||||
with_clean_offset_env(|| {
|
||||
let runtime = UpstreamMediaRuntime::new();
|
||||
|
||||
assert_eq!(runtime.map_video_pts(1_000, 0), Some(0));
|
||||
assert_eq!(runtime.map_video_pts(1_000, 33_333), Some(33_333));
|
||||
assert_eq!(runtime.map_audio_pts(1_500), Some(500));
|
||||
|
||||
let bundle_epoch = tokio::time::Instant::now() + std::time::Duration::from_millis(10);
|
||||
let decision =
|
||||
runtime.plan_bundled_pts(UpstreamMediaKind::Camera, 2_000, 0, 1_000, bundle_epoch);
|
||||
|
||||
match decision {
|
||||
UpstreamPlanDecision::Play(plan) => {
|
||||
assert!(plan.local_pts_us >= 1_000);
|
||||
assert_eq!(plan.source_lag, std::time::Duration::ZERO);
|
||||
}
|
||||
other => panic!("expected bundled play decision, got {other:?}"),
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Keeps `runtime_soft_microphone_recovery_cycles_only_the_microphone_generation` explicit because a failed UAC handoff should not disturb active camera playout.
|
||||
/// Inputs are an active camera lease and a soft microphone recovery request; output keeps camera active while cycling microphone state.
|
||||
fn runtime_soft_microphone_recovery_cycles_only_the_microphone_generation() {
|
||||
with_clean_offset_env(|| {
|
||||
let runtime = UpstreamMediaRuntime::new();
|
||||
let camera = runtime.activate_camera();
|
||||
|
||||
runtime.soft_recover_microphone();
|
||||
|
||||
assert!(runtime.is_camera_active(camera.generation));
|
||||
assert!(!runtime.is_microphone_active(1));
|
||||
runtime.close_camera(camera.generation);
|
||||
});
|
||||
}
|
||||
|
||||
@ -107,6 +107,8 @@ pub async fn eye_ball_with_request(
|
||||
_pipeline: gst::Pipeline::new(),
|
||||
#[cfg(not(coverage))]
|
||||
_bus_watch: None,
|
||||
#[cfg(not(coverage))]
|
||||
_stall_watchdog_alive: None,
|
||||
inner: ReceiverStream::new(rx),
|
||||
})
|
||||
}
|
||||
@ -173,6 +175,7 @@ pub async fn eye_ball_with_request(
|
||||
let last_telemetry_sec = Arc::new(AtomicU64::new(0));
|
||||
let packet_seq = Arc::new(AtomicU64::new(0));
|
||||
let first_sample_seen = Arc::new(AtomicBool::new(false));
|
||||
let last_sample_wall_ms = Arc::new(AtomicU64::new(0));
|
||||
|
||||
let queue_buffers = env_u32("LESAVKA_EYE_QUEUE_BUFFERS", 4).max(1);
|
||||
let appsink_buffers = env_u32("LESAVKA_EYE_APPSINK_BUFFERS", 4).max(1);
|
||||
@ -256,6 +259,7 @@ pub async fn eye_ball_with_request(
|
||||
let server_encoder_label_for_cb = server_encoder_label.clone();
|
||||
let server_process_cpu_tenths_for_cb = Arc::clone(&server_process_cpu_tenths);
|
||||
let first_sample_seen_for_cb = Arc::clone(&first_sample_seen);
|
||||
let last_sample_wall_ms_for_cb = Arc::clone(&last_sample_wall_ms);
|
||||
sink.set_callbacks(
|
||||
gst_app::AppSinkCallbacks::builder()
|
||||
.new_sample(move |sink| {
|
||||
@ -264,6 +268,7 @@ pub async fn eye_ball_with_request(
|
||||
let map = buffer.map_readable().map_err(|_| gst::FlowError::Error)?;
|
||||
let is_idr = contains_idr(map.as_slice());
|
||||
first_sample_seen_for_cb.store(true, Ordering::Relaxed);
|
||||
last_sample_wall_ms_for_cb.store(wall_clock_ms(), Ordering::Relaxed);
|
||||
|
||||
static FRAME: AtomicU64 = AtomicU64::new(0);
|
||||
let frame = FRAME.fetch_add(1, Ordering::Relaxed);
|
||||
@ -425,11 +430,47 @@ pub async fn eye_ball_with_request(
|
||||
.await;
|
||||
}
|
||||
});
|
||||
let stall_watchdog_alive = Arc::new(AtomicBool::new(true));
|
||||
if let Some(stall_timeout) = eye_stall_warn_timeout() {
|
||||
let stall_alive = Arc::clone(&stall_watchdog_alive);
|
||||
let stall_first_seen = Arc::clone(&first_sample_seen);
|
||||
let stall_last_sample = Arc::clone(&last_sample_wall_ms);
|
||||
let stall_eye = eye.to_string();
|
||||
tokio::spawn(async move {
|
||||
let stall_ms = stall_timeout.as_millis().min(u64::MAX as u128) as u64;
|
||||
let mut warned_for_sample_ms = 0_u64;
|
||||
loop {
|
||||
sleep(stall_timeout).await;
|
||||
if !stall_alive.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
if !stall_first_seen.load(Ordering::Relaxed) {
|
||||
continue;
|
||||
}
|
||||
let last_sample_ms = stall_last_sample.load(Ordering::Relaxed);
|
||||
if last_sample_ms == 0 {
|
||||
continue;
|
||||
}
|
||||
let idle_ms = wall_clock_ms().saturating_sub(last_sample_ms);
|
||||
if idle_ms >= stall_ms && warned_for_sample_ms != last_sample_ms {
|
||||
warned_for_sample_ms = last_sample_ms;
|
||||
warn!(
|
||||
target:"lesavka_server::video",
|
||||
eye = %stall_eye,
|
||||
idle_ms,
|
||||
stall_warn_ms = stall_ms,
|
||||
"downstream eye stream has produced no samples since the last frame"
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
let bus_watch = BusWatchHandle::spawn(bus, eye.to_owned(), tx_for_bus);
|
||||
|
||||
Ok(VideoStream {
|
||||
_pipeline: pipeline,
|
||||
_bus_watch: Some(bus_watch),
|
||||
_stall_watchdog_alive: Some(stall_watchdog_alive),
|
||||
inner: ReceiverStream::new(rx),
|
||||
})
|
||||
}
|
||||
|
||||
@ -13,6 +13,7 @@ use std::os::unix::fs::FileTypeExt;
|
||||
use std::sync::Arc;
|
||||
use std::sync::OnceLock;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::time::{Duration, Instant, sleep};
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::Status;
|
||||
@ -48,6 +49,8 @@ pub struct VideoStream {
|
||||
_pipeline: gst::Pipeline,
|
||||
#[cfg(not(coverage))]
|
||||
_bus_watch: Option<BusWatchHandle>,
|
||||
#[cfg(not(coverage))]
|
||||
_stall_watchdog_alive: Option<Arc<AtomicBool>>,
|
||||
inner: ReceiverStream<Result<VideoPacket, Status>>,
|
||||
}
|
||||
|
||||
@ -67,6 +70,9 @@ impl Drop for VideoStream {
|
||||
let _ = self._pipeline.set_state(gst::State::Null);
|
||||
#[cfg(not(coverage))]
|
||||
{
|
||||
if let Some(alive) = self._stall_watchdog_alive.take() {
|
||||
alive.store(false, Ordering::Relaxed);
|
||||
}
|
||||
let _ = self._bus_watch.take();
|
||||
}
|
||||
}
|
||||
@ -151,6 +157,7 @@ impl BusWatchHandle {
|
||||
|
||||
#[cfg(not(coverage))]
|
||||
impl Drop for BusWatchHandle {
|
||||
/// Stops the background bus watcher before joining it so stream teardown does not leak helper threads.
|
||||
fn drop(&mut self) {
|
||||
self.alive.store(false, Ordering::Relaxed);
|
||||
if let Some(join) = self.join.take() {
|
||||
@ -203,6 +210,27 @@ fn eye_device_wait_poll() -> Duration {
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the warning threshold for midstream eye-capture stalls.
|
||||
///
|
||||
/// The input is `LESAVKA_EYE_STALL_WARN_MS`: `0` disables the watchdog, invalid
|
||||
/// values fall back to five seconds, and very small positive values are clamped
|
||||
/// to 500ms so the diagnostic cannot accidentally flood logs. The output is
|
||||
/// `None` when disabled or a bounded duration used by the downstream watchdog.
|
||||
fn eye_stall_warn_timeout() -> Option<Duration> {
|
||||
let millis = std::env::var("LESAVKA_EYE_STALL_WARN_MS")
|
||||
.ok()
|
||||
.and_then(|value| value.trim().parse::<u64>().ok())
|
||||
.unwrap_or(5_000);
|
||||
(millis > 0).then_some(Duration::from_millis(millis.max(500)))
|
||||
}
|
||||
|
||||
fn wall_clock_ms() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map(|duration| duration.as_millis().min(u64::MAX as u128) as u64)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn eye_source_profile() -> (u32, u32, u32) {
|
||||
let mode = default_eye_source_mode();
|
||||
(mode.width, mode.height, mode.fps)
|
||||
|
||||
@ -161,6 +161,44 @@ impl HdmiSink {
|
||||
&sink,
|
||||
])?;
|
||||
}
|
||||
CameraCodec::Hevc => {
|
||||
let caps_hevc = gst::Caps::builder("video/x-h265")
|
||||
.field("stream-format", "byte-stream")
|
||||
.field("alignment", "au")
|
||||
.build();
|
||||
src.set_caps(Some(&caps_hevc));
|
||||
let h265parse = gst::ElementFactory::make("h265parse")
|
||||
.property("disable-passthrough", true)
|
||||
.property("config-interval", -1i32)
|
||||
.build()?;
|
||||
let decoder_name = pick_hevc_decoder();
|
||||
let decoder = gst::ElementFactory::make(decoder_name)
|
||||
.build()
|
||||
.with_context(|| format!("building HEVC decoder element {decoder_name}"))?;
|
||||
|
||||
pipeline.add_many([
|
||||
src.upcast_ref(),
|
||||
&queue,
|
||||
&h265parse,
|
||||
&decoder,
|
||||
&rate,
|
||||
&convert,
|
||||
&scale,
|
||||
&capsfilter,
|
||||
&sink,
|
||||
])?;
|
||||
gst::Element::link_many([
|
||||
src.upcast_ref(),
|
||||
&queue,
|
||||
&h265parse,
|
||||
&decoder,
|
||||
&rate,
|
||||
&convert,
|
||||
&scale,
|
||||
&capsfilter,
|
||||
&sink,
|
||||
])?;
|
||||
}
|
||||
CameraCodec::Mjpeg => {
|
||||
let caps_mjpeg = gst::Caps::builder("image/jpeg")
|
||||
.field("parsed", true)
|
||||
|
||||
438
server/src/video_sinks/mjpeg_spool.rs
Normal file
438
server/src/video_sinks/mjpeg_spool.rs
Normal file
@ -0,0 +1,438 @@
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use gstreamer as gst;
|
||||
use gstreamer_app as gst_app;
|
||||
|
||||
static SPOOL_SEQUENCE: AtomicU64 = AtomicU64::new(1);
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub(super) struct MjpegSpoolTiming {
|
||||
pub profile: &'static str,
|
||||
pub source_pts_us: Option<u64>,
|
||||
pub decoded_pts_us: Option<u64>,
|
||||
}
|
||||
|
||||
impl MjpegSpoolTiming {
|
||||
/// Build metadata for direct MJPEG ingress.
|
||||
///
|
||||
/// Inputs: the upstream packet PTS in microseconds. Output: timing metadata
|
||||
/// labeled as passthrough MJPEG. Why: direct MJPEG and decoded HEVC share
|
||||
/// the same spool file, so future diagnostics need to distinguish them.
|
||||
pub(super) fn mjpeg_passthrough(source_pts_us: u64) -> Self {
|
||||
Self {
|
||||
profile: "mjpeg-passthrough",
|
||||
source_pts_us: Some(source_pts_us),
|
||||
decoded_pts_us: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build metadata for decoded HEVC entering the MJPEG UVC helper.
|
||||
///
|
||||
/// Inputs: upstream packet PTS plus the decoded appsink buffer PTS.
|
||||
/// Output: timing metadata labeled as HEVC-decoded MJPEG. Why: the
|
||||
/// remaining HEVC sync jitter appears after transport, so we need a
|
||||
/// low-overhead marker at the decode-to-UVC handoff boundary.
|
||||
pub(super) fn hevc_decoded_mjpeg(source_pts_us: u64, decoded_pts_us: Option<u64>) -> Self {
|
||||
Self {
|
||||
profile: "hevc-decoded-mjpeg",
|
||||
source_pts_us: Some(source_pts_us),
|
||||
decoded_pts_us,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Decide whether the UVC helper file-spool path should own MJPEG emission.
|
||||
///
|
||||
/// Inputs: `LESAVKA_UVC_MJPEG_SPOOL`. Output: true unless explicitly disabled.
|
||||
/// Why: the helper path prevents two processes from fighting over the UVC
|
||||
/// gadget node, while preserving a direct `v4l2sink` fallback for diagnostics.
|
||||
pub(super) fn mjpeg_spool_enabled() -> bool {
|
||||
std::env::var("LESAVKA_UVC_MJPEG_SPOOL")
|
||||
.ok()
|
||||
.map(|value| {
|
||||
let trimmed = value.trim();
|
||||
!(trimmed.eq_ignore_ascii_case("0")
|
||||
|| trimmed.eq_ignore_ascii_case("false")
|
||||
|| trimmed.eq_ignore_ascii_case("no")
|
||||
|| trimmed.eq_ignore_ascii_case("off"))
|
||||
})
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
/// Resolve the frame path consumed by the UVC helper.
|
||||
///
|
||||
/// Inputs: `LESAVKA_UVC_FRAME_PATH`. Output: filesystem path for the newest
|
||||
/// MJPEG frame. Why: the helper polls a single atomic frame file, so both direct
|
||||
/// MJPEG and decoded HEVC output need to agree on the handoff location.
|
||||
pub(super) fn mjpeg_spool_path() -> PathBuf {
|
||||
std::env::var("LESAVKA_UVC_FRAME_PATH")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|_| PathBuf::from("/run/lesavka-uvc-frame.mjpg"))
|
||||
}
|
||||
|
||||
/// Decide whether frame spool metadata should be published.
|
||||
///
|
||||
/// Inputs: `LESAVKA_UVC_FRAME_META`. Output: false unless explicitly enabled.
|
||||
/// Why: the metadata is useful for HEVC boundary diagnostics, but it adds one
|
||||
/// extra atomic sidecar write per frame and should stay opt-in during calls.
|
||||
pub(super) fn mjpeg_spool_metadata_enabled() -> bool {
|
||||
std::env::var("LESAVKA_UVC_FRAME_META")
|
||||
.ok()
|
||||
.map(|value| {
|
||||
let trimmed = value.trim();
|
||||
trimmed.eq_ignore_ascii_case("1")
|
||||
|| trimmed.eq_ignore_ascii_case("true")
|
||||
|| trimmed.eq_ignore_ascii_case("yes")
|
||||
|| trimmed.eq_ignore_ascii_case("on")
|
||||
})
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Resolve the metadata sidecar path for the UVC helper spool.
|
||||
///
|
||||
/// Inputs: frame path plus `LESAVKA_UVC_FRAME_META_PATH`. Output: sidecar path.
|
||||
/// Why: keeping this path explicit lets capture scripts fetch timing evidence
|
||||
/// without guessing where the virtual webcam helper found the frame.
|
||||
pub(super) fn mjpeg_spool_metadata_path(frame_path: &Path) -> PathBuf {
|
||||
std::env::var("LESAVKA_UVC_FRAME_META_PATH")
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|_| frame_path.with_extension("mjpg.meta.json"))
|
||||
}
|
||||
|
||||
/// Resolve the optional JSONL metadata log for full-probe diagnostics.
|
||||
///
|
||||
/// Inputs: `LESAVKA_UVC_FRAME_META_LOG_PATH`. Output: an append-only log path
|
||||
/// when configured. Why: a latest-frame sidecar is enough for spot checks, but
|
||||
/// client-to-RCT HEVC probes need the whole decode/spool timing sequence.
|
||||
pub(super) fn mjpeg_spool_metadata_log_path() -> Option<PathBuf> {
|
||||
std::env::var("LESAVKA_UVC_FRAME_META_LOG_PATH")
|
||||
.ok()
|
||||
.map(|value| value.trim().to_string())
|
||||
.filter(|value| !value.is_empty())
|
||||
.map(PathBuf::from)
|
||||
}
|
||||
|
||||
/// Bound how long one HEVC handoff may wait for decoded MJPEG output.
|
||||
///
|
||||
/// Inputs: `LESAVKA_UVC_HEVC_SPOOL_PULL_TIMEOUT_MS`, clamped to 0..=50ms.
|
||||
/// Output: the timeout used by appsink polling.
|
||||
/// Why: decoded frames should be published when they are due, but the video
|
||||
/// handoff worker must not build a WAN-sized backlog while waiting on decode.
|
||||
pub(super) fn decoded_mjpeg_pull_timeout() -> gst::ClockTime {
|
||||
let timeout_ms = std::env::var("LESAVKA_UVC_HEVC_SPOOL_PULL_TIMEOUT_MS")
|
||||
.ok()
|
||||
.and_then(|value| value.trim().parse::<u64>().ok())
|
||||
.unwrap_or(5)
|
||||
.min(50);
|
||||
gst::ClockTime::from_mseconds(timeout_ms)
|
||||
}
|
||||
|
||||
/// Drain the decoded-MJPEG appsink down to its freshest sample.
|
||||
///
|
||||
/// Inputs: the appsink owned by the HEVC-to-MJPEG branch. Output: the newest
|
||||
/// available sample, if any. Why: the UVC helper should see the latest decoded
|
||||
/// frame rather than letting stale decode output accumulate during CPU spikes.
|
||||
#[cfg(not(coverage))]
|
||||
pub(super) fn freshest_mjpeg_sample(sink: &gst_app::AppSink) -> Option<gst::Sample> {
|
||||
let mut newest = sink.try_pull_sample(decoded_mjpeg_pull_timeout());
|
||||
while let Some(sample) = sink.try_pull_sample(gst::ClockTime::ZERO) {
|
||||
newest = Some(sample);
|
||||
}
|
||||
newest
|
||||
}
|
||||
|
||||
fn unix_now_ns() -> u128 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map(|duration| duration.as_nanos())
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
fn json_number_or_null(value: Option<u64>) -> String {
|
||||
value
|
||||
.map(|value| value.to_string())
|
||||
.unwrap_or_else(|| "null".to_string())
|
||||
}
|
||||
|
||||
/// Atomically write a text sidecar beside the current frame.
|
||||
///
|
||||
/// Inputs: a destination path and complete text payload. Output: success or
|
||||
/// filesystem error. Why: the latest-frame metadata sidecar should never be
|
||||
/// observed half-written while RCT probe scripts are collecting artifacts.
|
||||
fn write_atomic_text(path: &Path, data: &str) -> anyhow::Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent)?;
|
||||
}
|
||||
let tmp = path.with_extension(format!("json.{}.tmp", std::process::id()));
|
||||
fs::write(&tmp, data)?;
|
||||
fs::rename(&tmp, path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Append one timing record to the optional full-probe metadata log.
|
||||
///
|
||||
/// Inputs: a JSONL path and already formatted metadata record. Output: success
|
||||
/// or filesystem error. Why: HEVC/RCT debugging needs every decoded-MJPEG
|
||||
/// handoff timestamp, while the latest sidecar only preserves the newest frame.
|
||||
fn append_metadata_log(path: &Path, record: &str) -> anyhow::Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent)?;
|
||||
}
|
||||
OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(path)?
|
||||
.write_all(record.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Render one metadata record for a spooled MJPEG frame.
|
||||
///
|
||||
/// Inputs: a sequence number, frame size, and timing labels. Output: compact
|
||||
/// JSON suitable for sidecar artifacts. Why: keeping the format deterministic
|
||||
/// makes later client-to-RCT scripts able to compare server decode/spool timing
|
||||
/// against final RCT observations without parsing log prose.
|
||||
pub(super) fn format_mjpeg_spool_metadata(
|
||||
sequence: u64,
|
||||
bytes: usize,
|
||||
timing: MjpegSpoolTiming,
|
||||
) -> String {
|
||||
format!(
|
||||
"{{\"schema\":\"lesavka.uvc-mjpeg-spool-meta.v1\",\"sequence\":{},\"profile\":\"{}\",\"bytes\":{},\"source_pts_us\":{},\"decoded_pts_us\":{},\"spool_unix_ns\":{}}}\n",
|
||||
sequence,
|
||||
timing.profile,
|
||||
bytes,
|
||||
json_number_or_null(timing.source_pts_us),
|
||||
json_number_or_null(timing.decoded_pts_us),
|
||||
unix_now_ns()
|
||||
)
|
||||
}
|
||||
|
||||
/// Atomically publish one MJPEG frame plus optional timing metadata.
|
||||
///
|
||||
/// Inputs: destination path, JPEG bytes, and optional timing metadata. Output:
|
||||
/// success or filesystem error. Why: HEVC transport debugging needs to know
|
||||
/// whether residual jitter happens before or after the decoded-MJPEG handoff,
|
||||
/// while the default runtime path should remain identical when metadata is off.
|
||||
pub(super) fn spool_mjpeg_frame_with_timing(
|
||||
path: &Path,
|
||||
data: &[u8],
|
||||
timing: Option<MjpegSpoolTiming>,
|
||||
) -> anyhow::Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent)?;
|
||||
}
|
||||
let tmp = path.with_extension(format!("mjpg.{}.tmp", std::process::id()));
|
||||
fs::write(&tmp, data)?;
|
||||
fs::rename(&tmp, path)?;
|
||||
|
||||
if mjpeg_spool_metadata_enabled()
|
||||
&& let Some(timing) = timing
|
||||
{
|
||||
let sequence = SPOOL_SEQUENCE.fetch_add(1, Ordering::Relaxed);
|
||||
let record = format_mjpeg_spool_metadata(sequence, data.len(), timing);
|
||||
write_atomic_text(&mjpeg_spool_metadata_path(path), &record)?;
|
||||
if let Some(log_path) = mjpeg_spool_metadata_log_path() {
|
||||
append_metadata_log(&log_path, &record)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
/// Verifies HEVC decoded-frame polling defaults to a freshness-first wait.
|
||||
///
|
||||
/// Input: unset timeout env var. Output: 5ms appsink poll timeout. Why:
|
||||
/// server-side decode should keep enough patience for normal scheduling
|
||||
/// jitter without letting an HEVC backlog accumulate behind UVC playback.
|
||||
#[test]
|
||||
fn decoded_mjpeg_pull_timeout_defaults_to_short_bounded_wait() {
|
||||
temp_env::with_var_unset("LESAVKA_UVC_HEVC_SPOOL_PULL_TIMEOUT_MS", || {
|
||||
assert_eq!(
|
||||
super::decoded_mjpeg_pull_timeout(),
|
||||
gstreamer::ClockTime::from_mseconds(5)
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/// Verifies explicit HEVC spool polling overrides stay bounded.
|
||||
///
|
||||
/// Input: zero and oversized timeout values. Output: direct zero polling
|
||||
/// and a 50ms safety cap. Why: lab tuning may need aggressive polling, but
|
||||
/// no override should recreate the multi-second decoded-frame backlog.
|
||||
#[test]
|
||||
fn decoded_mjpeg_pull_timeout_allows_fast_poll_and_clamps_slow_waits() {
|
||||
temp_env::with_var("LESAVKA_UVC_HEVC_SPOOL_PULL_TIMEOUT_MS", Some("0"), || {
|
||||
assert_eq!(
|
||||
super::decoded_mjpeg_pull_timeout(),
|
||||
gstreamer::ClockTime::from_mseconds(0)
|
||||
);
|
||||
});
|
||||
|
||||
temp_env::with_var("LESAVKA_UVC_HEVC_SPOOL_PULL_TIMEOUT_MS", Some("250"), || {
|
||||
assert_eq!(
|
||||
super::decoded_mjpeg_pull_timeout(),
|
||||
gstreamer::ClockTime::from_mseconds(50)
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/// Verifies spool metadata remains opt-in and path-configurable.
|
||||
///
|
||||
/// Input: default and explicit metadata env vars. Output: disabled by
|
||||
/// default plus deterministic sidecar path selection. Why: diagnostics must
|
||||
/// not add per-frame writes unless the operator asks for timing evidence.
|
||||
#[test]
|
||||
fn mjpeg_spool_metadata_is_opt_in_and_path_configurable() {
|
||||
temp_env::with_var_unset("LESAVKA_UVC_FRAME_META", || {
|
||||
assert!(!super::mjpeg_spool_metadata_enabled());
|
||||
});
|
||||
temp_env::with_var("LESAVKA_UVC_FRAME_META", Some("yes"), || {
|
||||
assert!(super::mjpeg_spool_metadata_enabled());
|
||||
});
|
||||
|
||||
let frame = std::path::Path::new("/tmp/lesavka-frame.mjpg");
|
||||
temp_env::with_var_unset("LESAVKA_UVC_FRAME_META_PATH", || {
|
||||
assert_eq!(
|
||||
super::mjpeg_spool_metadata_path(frame),
|
||||
std::path::PathBuf::from("/tmp/lesavka-frame.mjpg.meta.json")
|
||||
);
|
||||
});
|
||||
temp_env::with_var(
|
||||
"LESAVKA_UVC_FRAME_META_PATH",
|
||||
Some("/tmp/custom-meta.json"),
|
||||
|| {
|
||||
assert_eq!(
|
||||
super::mjpeg_spool_metadata_path(frame),
|
||||
std::path::PathBuf::from("/tmp/custom-meta.json")
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
temp_env::with_var_unset("LESAVKA_UVC_FRAME_META_LOG_PATH", || {
|
||||
assert_eq!(super::mjpeg_spool_metadata_log_path(), None);
|
||||
});
|
||||
temp_env::with_var("LESAVKA_UVC_FRAME_META_LOG_PATH", Some(" "), || {
|
||||
assert_eq!(super::mjpeg_spool_metadata_log_path(), None);
|
||||
});
|
||||
}
|
||||
|
||||
/// Verifies metadata records carry enough timing evidence for RCT analysis.
|
||||
///
|
||||
/// Input: HEVC-decoded spool timing. Output: JSON fields for source and
|
||||
/// decoded PTS. Why: future blind end-to-end probes need to tell whether a
|
||||
/// bad RCT result came from transport/decode or from the UVC helper/browser.
|
||||
#[test]
|
||||
fn mjpeg_spool_metadata_formats_timing_fields() {
|
||||
let record = super::format_mjpeg_spool_metadata(
|
||||
7,
|
||||
1234,
|
||||
super::MjpegSpoolTiming::hevc_decoded_mjpeg(42_000, Some(43_000)),
|
||||
);
|
||||
|
||||
assert!(record.contains("\"schema\":\"lesavka.uvc-mjpeg-spool-meta.v1\""));
|
||||
assert!(record.contains("\"sequence\":7"));
|
||||
assert!(record.contains("\"profile\":\"hevc-decoded-mjpeg\""));
|
||||
assert!(record.contains("\"bytes\":1234"));
|
||||
assert!(record.contains("\"source_pts_us\":42000"));
|
||||
assert!(record.contains("\"decoded_pts_us\":43000"));
|
||||
}
|
||||
|
||||
/// Verifies direct MJPEG metadata explicitly marks passthrough timing.
|
||||
///
|
||||
/// Input: an upstream MJPEG packet PTS. Output: metadata with no decoded
|
||||
/// PTS. Why: direct MJPEG ingress must remain distinguishable from HEVC
|
||||
/// decode when later RCT timing evidence is compared across profiles.
|
||||
#[test]
|
||||
fn mjpeg_passthrough_metadata_uses_source_pts_and_null_decode_pts() {
|
||||
let record = super::format_mjpeg_spool_metadata(
|
||||
8,
|
||||
99,
|
||||
super::MjpegSpoolTiming::mjpeg_passthrough(55_000),
|
||||
);
|
||||
|
||||
assert!(record.contains("\"profile\":\"mjpeg-passthrough\""));
|
||||
assert!(record.contains("\"source_pts_us\":55000"));
|
||||
assert!(record.contains("\"decoded_pts_us\":null"));
|
||||
}
|
||||
|
||||
/// Verifies frame spooling preserves default behavior unless metadata is enabled.
|
||||
///
|
||||
/// Input: a temporary frame path plus disabled metadata env vars. Output:
|
||||
/// the frame file is atomically written and no sidecar appears. Why:
|
||||
/// diagnostics must not alter the normal UVC helper handoff during calls.
|
||||
#[test]
|
||||
fn spool_mjpeg_frame_writes_frame_without_default_sidecar() {
|
||||
let dir = tempfile::tempdir().expect("tempdir");
|
||||
let frame = dir.path().join("nested").join("frame.mjpg");
|
||||
let meta = frame.with_extension("mjpg.meta.json");
|
||||
|
||||
temp_env::with_var_unset("LESAVKA_UVC_FRAME_META", || {
|
||||
super::spool_mjpeg_frame_with_timing(
|
||||
&frame,
|
||||
b"jpeg-bytes",
|
||||
Some(super::MjpegSpoolTiming::mjpeg_passthrough(10)),
|
||||
)
|
||||
.expect("spool frame");
|
||||
});
|
||||
|
||||
assert_eq!(std::fs::read(&frame).expect("read frame"), b"jpeg-bytes");
|
||||
assert!(!meta.exists());
|
||||
}
|
||||
|
||||
/// Verifies enabled frame metadata is atomically written beside the frame.
|
||||
///
|
||||
/// Input: explicit metadata enablement, custom sidecar path, and HEVC
|
||||
/// timing. Output: both frame and sidecar are published. Why: this gives
|
||||
/// client-to-RCT probes a precise server decode/spool boundary without
|
||||
/// requiring invasive server logging.
|
||||
#[test]
|
||||
fn spool_mjpeg_frame_writes_enabled_sidecar_with_timing() {
|
||||
let dir = tempfile::tempdir().expect("tempdir");
|
||||
let frame = dir.path().join("frame.mjpg");
|
||||
let meta = dir.path().join("frame-meta.json");
|
||||
let log = dir.path().join("frames.jsonl");
|
||||
|
||||
temp_env::with_vars(
|
||||
[
|
||||
("LESAVKA_UVC_FRAME_META", Some("on")),
|
||||
(
|
||||
"LESAVKA_UVC_FRAME_META_PATH",
|
||||
Some(meta.to_str().expect("utf8 path")),
|
||||
),
|
||||
(
|
||||
"LESAVKA_UVC_FRAME_META_LOG_PATH",
|
||||
Some(log.to_str().expect("utf8 path")),
|
||||
),
|
||||
],
|
||||
|| {
|
||||
super::spool_mjpeg_frame_with_timing(
|
||||
&frame,
|
||||
b"decoded-jpeg",
|
||||
Some(super::MjpegSpoolTiming::hevc_decoded_mjpeg(
|
||||
100_000,
|
||||
Some(101_000),
|
||||
)),
|
||||
)
|
||||
.expect("spool frame with metadata");
|
||||
},
|
||||
);
|
||||
|
||||
assert_eq!(std::fs::read(&frame).expect("read frame"), b"decoded-jpeg");
|
||||
let record = std::fs::read_to_string(&meta).expect("read metadata");
|
||||
assert!(record.contains("\"profile\":\"hevc-decoded-mjpeg\""));
|
||||
assert!(record.contains("\"bytes\":12"));
|
||||
assert!(record.contains("\"source_pts_us\":100000"));
|
||||
assert!(record.contains("\"decoded_pts_us\":101000"));
|
||||
|
||||
let log_record = std::fs::read_to_string(&log).expect("read metadata log");
|
||||
assert_eq!(log_record.lines().count(), 1);
|
||||
assert!(log_record.contains("\"profile\":\"hevc-decoded-mjpeg\""));
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user