800 lines
30 KiB
Rust

use super::{UpstreamMediaRuntime, play, runtime_without_offsets};
use serial_test::serial;
use std::time::Duration;
fn with_info_tracing<T>(f: impl FnOnce() -> T) -> T {
let subscriber = tracing_subscriber::fmt()
.with_max_level(tracing::Level::INFO)
.with_test_writer()
.finish();
tracing::subscriber::with_default(subscriber, f)
}
#[test]
#[serial(upstream_media_runtime)]
fn shared_playout_epoch_is_reused_across_audio_and_video() {
let runtime = runtime_without_offsets();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let audio_first = play(runtime.plan_audio_pts(1_000_000));
let video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
let audio_next = play(runtime.plan_audio_pts(1_010_000));
assert_eq!(video_first.local_pts_us, 0);
assert_eq!(audio_first.local_pts_us, 0);
assert_eq!(video_first.due_at, audio_first.due_at);
assert_eq!(
audio_next
.due_at
.saturating_duration_since(audio_first.due_at),
Duration::from_micros(10_000)
);
}
#[test]
#[serial(upstream_media_runtime)]
fn bundled_media_uses_client_epoch_without_pairing_wait() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
let runtime = runtime_without_offsets();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
let epoch = tokio::time::Instant::now();
let audio = play(runtime.plan_bundled_pts(
super::UpstreamMediaKind::Microphone,
1_000_000,
1,
1_000_000,
epoch,
));
let video = play(runtime.plan_bundled_pts(
super::UpstreamMediaKind::Camera,
1_033_333,
16_666,
1_000_000,
epoch,
));
assert_eq!(audio.local_pts_us, 0);
assert_eq!(video.local_pts_us, 33_333);
assert_eq!(
video.due_at.saturating_duration_since(audio.due_at),
Duration::from_micros(33_333)
);
let snapshot = runtime.snapshot();
assert_eq!(snapshot.latest_microphone_remote_pts_us, Some(1_000_000));
assert_eq!(snapshot.latest_camera_remote_pts_us, Some(1_033_333));
assert_eq!(
snapshot.last_reason,
"client-bundled upstream media epoch established"
);
});
}
#[test]
#[serial(upstream_media_runtime)]
fn bundled_media_ignores_legacy_static_calibration_offsets_by_default() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
temp_env::with_var_unset("LESAVKA_UPSTREAM_BUNDLED_AUDIO_PLAYOUT_OFFSET_US", || {
temp_env::with_var_unset("LESAVKA_UPSTREAM_BUNDLED_VIDEO_PLAYOUT_OFFSET_US", || {
let runtime = UpstreamMediaRuntime::new();
runtime.set_playout_offsets(1_090_000, 0);
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
let epoch = tokio::time::Instant::now();
let audio = play(runtime.plan_bundled_pts(
super::UpstreamMediaKind::Microphone,
1_000_000,
1,
1_000_000,
epoch,
));
let video = play(runtime.plan_bundled_pts(
super::UpstreamMediaKind::Camera,
1_000_000,
16_666,
1_000_000,
epoch,
));
assert_eq!(audio.due_at, video.due_at);
assert_eq!(audio.local_pts_us, video.local_pts_us);
});
});
});
}
#[test]
#[serial(upstream_media_runtime)]
fn bundled_media_reanchors_future_wait_inside_one_second_freshness_budget() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("350"), || {
temp_env::with_var("LESAVKA_UPSTREAM_MAX_LIVE_LAG_MS", Some("1000"), || {
let runtime = runtime_without_offsets();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
let now = tokio::time::Instant::now();
let stale_epoch = now + Duration::from_secs(20);
let video = play(runtime.plan_bundled_pts(
super::UpstreamMediaKind::Camera,
1_000_000,
16_666,
1_000_000,
stale_epoch,
));
assert!(
video
.due_at
.saturating_duration_since(tokio::time::Instant::now())
<= Duration::from_secs(1),
"bundled playout must not preserve a many-second future wait"
);
assert!(runtime.snapshot().freshness_reanchors >= 1);
});
});
}
#[test]
#[serial(upstream_media_runtime)]
fn pairing_window_holds_one_sided_playout_by_default() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
assert!(matches!(
runtime.plan_video_pts(1_016_666, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
});
}
#[test]
#[serial(upstream_media_runtime)]
fn explicit_override_allows_one_sided_playout_for_compatibility() {
temp_env::with_var("LESAVKA_UPSTREAM_REQUIRE_PAIRED_STARTUP", Some("0"), || {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let first = play(runtime.plan_video_pts(1_000_000, 16_666));
let second = play(runtime.plan_video_pts(1_016_666, 16_666));
assert_eq!(first.local_pts_us, 0);
assert_eq!(second.local_pts_us, 16_666);
});
});
}
#[test]
#[serial(upstream_media_runtime)]
fn overdue_pairing_refreshes_waiting_anchor_before_late_counterpart_arrives() {
temp_env::with_var(
"LESAVKA_UPSTREAM_CAMERA_STARTUP_GRACE_MS",
Some("0"),
|| {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
let runtime = runtime_without_offsets();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
assert!(matches!(
runtime.plan_video_pts(9_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let audio = play(runtime.plan_audio_pts(9_010_000));
assert!(matches!(
runtime.plan_video_pts(9_000_000, 16_666),
super::UpstreamPlanDecision::DropBeforeOverlap
));
let video = play(runtime.plan_video_pts(9_016_666, 16_666));
assert_eq!(audio.local_pts_us, 0);
assert_eq!(video.local_pts_us, 6_666);
});
},
);
}
#[test]
#[serial(upstream_media_runtime)]
fn map_wrappers_hide_unpaired_and_pre_overlap_packets() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert_eq!(runtime.map_video_pts(1_000_000, 16_666), None);
assert_eq!(runtime.map_audio_pts(1_000_000), Some(0));
assert_eq!(runtime.map_audio_pts(999_999), None);
}
#[test]
#[serial(upstream_media_runtime)]
fn shared_playout_trace_path_keeps_planned_pts_stable() {
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("1"), || {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let audio = play(runtime.plan_audio_pts(1_000_000));
let video = play(runtime.plan_video_pts(1_000_000, 16_666));
assert_eq!(video.local_pts_us, 0);
assert_eq!(audio.local_pts_us, 0);
});
}
#[test]
#[serial(upstream_media_runtime)]
fn catastrophic_lateness_reanchors_the_shared_playout_epoch() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
temp_env::with_var("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", Some("5"), || {
let runtime = UpstreamMediaRuntime::new();
runtime.set_playout_offsets(0, 0);
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let _video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
std::thread::sleep(Duration::from_millis(30));
let recovered_audio = play(runtime.plan_audio_pts(1_000_000));
assert!(
recovered_audio.due_at > tokio::time::Instant::now(),
"recovered packet should be scheduled back into the future"
);
assert!(
recovered_audio.late_by <= Duration::from_millis(1),
"recovered packet should no longer be catastrophically late"
);
let recovered_video = play(runtime.plan_video_pts(1_016_666, 16_666));
assert!(
recovered_video.due_at > tokio::time::Instant::now(),
"shared epoch recovery should also move video back into the future"
);
});
});
}
#[test]
#[serial(upstream_media_runtime)]
fn overlap_anchor_gets_a_fresh_playout_budget_when_pairing_finishes_late() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
let runtime = UpstreamMediaRuntime::new();
runtime.set_playout_offsets(0, 0);
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
std::thread::sleep(Duration::from_millis(15));
let before_pair = tokio::time::Instant::now();
let audio_first = play(runtime.plan_audio_pts(1_000_000));
let video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
assert!(
audio_first.due_at.saturating_duration_since(before_pair) >= Duration::from_millis(15),
"audio should keep most of the configured playout budget after late pairing"
);
assert!(
video_first.due_at.saturating_duration_since(before_pair) >= Duration::from_millis(15),
"video should keep most of the configured playout budget after late pairing"
);
});
}
#[test]
#[serial(upstream_media_runtime)]
fn catastrophic_lateness_reanchors_repeatedly_to_preserve_freshness() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
temp_env::with_var("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", Some("5"), || {
let runtime = UpstreamMediaRuntime::new();
runtime.set_playout_offsets(0, 0);
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let _video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
std::thread::sleep(Duration::from_millis(30));
let first_recovered = play(runtime.plan_audio_pts(1_000_000));
assert!(first_recovered.due_at > tokio::time::Instant::now());
assert!(first_recovered.late_by <= Duration::from_millis(1));
std::thread::sleep(Duration::from_millis(30));
let second_recovered = play(runtime.plan_audio_pts(1_000_001));
assert!(second_recovered.due_at > tokio::time::Instant::now());
assert!(
second_recovered.late_by <= Duration::from_millis(1),
"0.17 planner must keep healing instead of preserving stale timing"
);
assert!(
runtime.snapshot().freshness_reanchors >= 2,
"repeated freshness reanchors should be counted for diagnostics"
);
});
});
}
#[test]
#[serial(upstream_media_runtime)]
fn catastrophic_lateness_reanchors_even_after_startup_window() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || {
temp_env::with_var("LESAVKA_UPSTREAM_REANCHOR_LATE_MS", Some("5"), || {
let runtime = UpstreamMediaRuntime::new();
runtime.set_playout_offsets(0, 0);
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio_first = play(runtime.plan_audio_pts(1_000_000));
let _video_first = play(runtime.plan_video_pts(1_000_000, 16_666));
std::thread::sleep(Duration::from_millis(130));
let late_audio = play(runtime.plan_audio_pts(1_100_000));
assert_eq!(late_audio.local_pts_us, 100_000);
assert!(
late_audio.late_by <= Duration::from_millis(1),
"0.17 planner should heal mid-session lateness instead of preserving drift"
);
assert!(
late_audio.due_at > tokio::time::Instant::now(),
"mid-session freshness healing should push due_at back into the live budget"
);
assert!(runtime.snapshot().freshness_reanchors >= 1);
});
});
}
#[test]
#[serial(upstream_media_runtime)]
fn stale_audio_behind_the_freshest_audio_frontier_is_dropped() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
temp_env::with_var("LESAVKA_UPSTREAM_MAX_LIVE_LAG_MS", Some("50"), || {
let runtime = runtime_without_offsets();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio = play(runtime.plan_audio_pts(1_000_000));
let _video = play(runtime.plan_video_pts(1_000_000, 16_666));
let _fresh_audio = play(runtime.plan_audio_pts(2_000_000));
assert!(matches!(
runtime.plan_audio_pts(1_900_000),
super::UpstreamPlanDecision::DropStale("packet exceeded max live lag")
));
assert_eq!(runtime.snapshot().stale_audio_drops, 1);
});
});
}
#[test]
#[serial(upstream_media_runtime)]
fn stale_video_behind_the_freshest_video_frontier_is_dropped() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
temp_env::with_var("LESAVKA_UPSTREAM_MAX_LIVE_LAG_MS", Some("50"), || {
let runtime = runtime_without_offsets();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio = play(runtime.plan_audio_pts(1_000_000));
let _video = play(runtime.plan_video_pts(1_000_000, 16_666));
let _fresh_video = play(runtime.plan_video_pts(2_000_000, 16_666));
assert!(matches!(
runtime.plan_video_pts(1_900_000, 16_666),
super::UpstreamPlanDecision::DropStale("packet exceeded max live lag")
));
let snapshot = runtime.snapshot();
assert_eq!(snapshot.stale_video_drops, 1);
assert_eq!(snapshot.video_freezes, 1);
});
});
}
#[test]
#[serial(upstream_media_runtime)]
fn video_too_far_behind_audio_master_is_dropped_and_counted_as_freeze() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
temp_env::with_var("LESAVKA_UPSTREAM_PAIR_SLACK_US", Some("50000"), || {
let runtime = runtime_without_offsets();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let audio = play(runtime.plan_audio_pts(1_000_000));
let _video = play(runtime.plan_video_pts(1_000_000, 16_666));
runtime.mark_audio_presented(audio.local_pts_us, audio.due_at);
let audio_master = play(runtime.plan_audio_pts(1_200_000));
assert!(
matches!(
runtime.plan_video_pts(1_100_000, 16_666),
super::UpstreamPlanDecision::Play(_)
),
"future planned audio alone must not freeze video before UAC presentation"
);
runtime.mark_audio_presented(audio_master.local_pts_us, audio_master.due_at);
assert!(matches!(
runtime.plan_video_pts(1_116_666, 16_666),
super::UpstreamPlanDecision::DropStale(
"video frame was too far behind audio master"
)
));
let snapshot = runtime.snapshot();
assert_eq!(snapshot.skew_video_drops, 1);
assert_eq!(snapshot.video_freezes, 1);
});
});
}
#[test]
#[serial(upstream_media_runtime)]
fn configured_video_delay_does_not_make_the_planner_freeze_video() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
temp_env::with_var("LESAVKA_UPSTREAM_PAIR_SLACK_US", Some("50000"), || {
let runtime = UpstreamMediaRuntime::new();
runtime.set_playout_offsets(350_000, 0);
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio = play(runtime.plan_audio_pts(1_000_000));
let _video = play(runtime.plan_video_pts(1_000_000, 16_666));
let _audio_master = play(runtime.plan_audio_pts(1_300_000));
let video = play(runtime.plan_video_pts(1_100_000, 16_666));
assert_eq!(video.local_pts_us, 100_000);
let snapshot = runtime.snapshot();
assert_eq!(snapshot.skew_video_drops, 0);
assert_eq!(snapshot.video_freezes, 0);
});
});
}
#[test]
#[serial(upstream_media_runtime)]
fn browser_visible_video_delay_is_not_reanchored_away() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("350"), || {
temp_env::with_var("LESAVKA_UPSTREAM_MAX_LIVE_LAG_MS", Some("1000"), || {
let runtime = runtime_without_offsets();
runtime.set_playout_offsets(1_090_000, 0);
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let audio = play(runtime.plan_audio_pts(1_000_000));
let video = play(runtime.plan_video_pts(1_000_000, 16_666));
assert!(
video.due_at.saturating_duration_since(audio.due_at) >= Duration::from_millis(1080),
"intentional browser-visible video delay must survive the freshness reanchor"
);
let snapshot = runtime.snapshot();
assert_eq!(snapshot.freshness_reanchors, 0);
assert_eq!(snapshot.stale_video_drops, 0);
});
});
}
#[test]
#[serial(upstream_media_runtime)]
fn paired_startup_times_out_instead_of_waiting_forever() {
temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("0"), || {
temp_env::with_var("LESAVKA_UPSTREAM_STARTUP_TIMEOUT_MS", Some("1"), || {
let runtime = UpstreamMediaRuntime::new();
runtime.set_playout_offsets(0, 0);
let _camera = runtime.activate_camera();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
std::thread::sleep(Duration::from_millis(3));
assert!(matches!(
runtime.plan_video_pts(1_016_666, 16_666),
super::UpstreamPlanDecision::StartupFailed(
"paired upstream startup did not converge before timeout"
)
));
let snapshot = runtime.snapshot();
assert_eq!(snapshot.phase, "failed");
assert_eq!(snapshot.startup_timeouts, 1);
});
});
}
#[test]
#[serial(upstream_media_runtime)]
fn planner_snapshot_tracks_presented_playheads_and_skew() {
let runtime = runtime_without_offsets();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let audio = play(runtime.plan_audio_pts(1_000_000));
let video = play(runtime.plan_video_pts(1_000_000, 16_666));
runtime.mark_audio_presented(audio.local_pts_us, audio.due_at);
runtime.mark_video_presented(video.local_pts_us, video.due_at);
let snapshot = runtime.snapshot();
assert_eq!(snapshot.phase, "live");
assert_eq!(snapshot.last_audio_presented_pts_us, Some(0));
assert_eq!(snapshot.last_video_presented_pts_us, Some(0));
assert_eq!(snapshot.planner_skew_ms, Some(0.0));
}
#[test]
#[serial(upstream_media_runtime)]
fn planner_snapshot_tracks_sink_handoff_timing_windows() {
let runtime = runtime_without_offsets();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
let due_at = tokio::time::Instant::now()
.checked_sub(Duration::from_millis(5))
.unwrap_or_else(tokio::time::Instant::now);
runtime.mark_audio_presented(123_000, due_at);
std::thread::sleep(Duration::from_millis(1));
runtime.mark_video_presented(123_000, due_at);
let snapshot = runtime.snapshot();
assert!(
snapshot.sink_handoff_skew_ms.is_some_and(|skew| skew > 0.0),
"video was handed to its sink after audio"
);
assert!(
snapshot
.sink_handoff_abs_skew_p95_ms
.is_some_and(|skew| skew > 0.0),
"the rolling handoff window should include the audio/video handoff gap"
);
assert!(
snapshot.camera_sink_late_ms.is_some_and(|late| late > 0.0),
"handoff after due_at should be reported as positive lateness"
);
assert!(
snapshot
.microphone_sink_late_p95_ms
.is_some_and(|late| late > 0.0),
"audio sink lateness should be retained in the rolling window"
);
}
#[test]
#[serial(upstream_media_runtime)]
fn sink_handoff_window_pairs_by_due_time_not_offset_local_pts() {
let runtime = runtime_without_offsets();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
let due_at = tokio::time::Instant::now()
.checked_sub(Duration::from_millis(5))
.unwrap_or_else(tokio::time::Instant::now);
runtime.mark_audio_presented(1_200_000, due_at);
std::thread::sleep(Duration::from_millis(1));
runtime.mark_video_presented(100_000, due_at);
let snapshot = runtime.snapshot();
assert_eq!(snapshot.sink_handoff_window_samples, 1);
assert!(
snapshot.sink_handoff_skew_ms.is_some(),
"offset-compensated streams should still produce handoff evidence when their due times match"
);
}
#[test]
#[serial(upstream_media_runtime)]
fn planner_snapshot_tracks_client_timing_sidecar_metrics() {
let runtime = runtime_without_offsets();
runtime.record_client_timing(
super::UpstreamMediaKind::Camera,
super::UpstreamClientTiming {
capture_pts_us: 1_060_000,
send_pts_us: 1_080_000,
queue_depth: 2,
queue_age_ms: 20,
},
);
std::thread::sleep(Duration::from_millis(2));
runtime.record_client_timing(
super::UpstreamMediaKind::Microphone,
super::UpstreamClientTiming {
capture_pts_us: 1_000_000,
send_pts_us: 1_030_000,
queue_depth: 3,
queue_age_ms: 30,
},
);
let snapshot = runtime.snapshot();
assert_eq!(snapshot.client_capture_skew_ms, Some(60.0));
assert_eq!(snapshot.client_send_skew_ms, Some(50.0));
assert_eq!(snapshot.client_capture_abs_skew_p95_ms, Some(60.0));
assert_eq!(snapshot.client_send_abs_skew_p95_ms, Some(50.0));
assert_eq!(snapshot.camera_client_queue_age_ms, Some(20.0));
assert_eq!(snapshot.microphone_client_queue_age_ms, Some(30.0));
assert_eq!(snapshot.camera_client_queue_age_p95_ms, Some(20.0));
assert_eq!(snapshot.microphone_client_queue_age_p95_ms, Some(30.0));
assert!(
snapshot
.server_receive_skew_ms
.is_some_and(|skew| skew < 0.0),
"camera was received before microphone, so camera-minus-mic receive skew should be negative"
);
assert!(
snapshot
.server_receive_abs_skew_p95_ms
.is_some_and(|skew| skew > 0.0),
"server receive jitter should be retained as an absolute p95"
);
assert!(
snapshot
.camera_server_receive_age_ms
.zip(snapshot.microphone_server_receive_age_ms)
.is_some_and(|(camera_age, microphone_age)| camera_age >= microphone_age),
"the earlier camera receive sample should be at least as old as the later mic sample"
);
}
#[test]
#[serial(upstream_media_runtime)]
fn planner_pairs_client_timing_by_nearby_send_time_not_latest_packet() {
let runtime = runtime_without_offsets();
runtime.record_client_timing(
super::UpstreamMediaKind::Camera,
super::UpstreamClientTiming {
capture_pts_us: 1_000_000,
send_pts_us: 1_000_000,
queue_depth: 1,
queue_age_ms: 5,
},
);
runtime.record_client_timing(
super::UpstreamMediaKind::Microphone,
super::UpstreamClientTiming {
capture_pts_us: 1_010_000,
send_pts_us: 1_010_000,
queue_depth: 1,
queue_age_ms: 5,
},
);
runtime.record_client_timing(
super::UpstreamMediaKind::Microphone,
super::UpstreamClientTiming {
capture_pts_us: 2_500_000,
send_pts_us: 2_500_000,
queue_depth: 1,
queue_age_ms: 5,
},
);
let snapshot = runtime.snapshot();
assert_eq!(snapshot.client_capture_skew_ms, Some(-10.0));
assert_eq!(snapshot.client_send_skew_ms, Some(-10.0));
assert_eq!(snapshot.client_capture_abs_skew_p95_ms, Some(10.0));
assert_eq!(snapshot.client_send_abs_skew_p95_ms, Some(10.0));
}
#[test]
#[serial(upstream_media_runtime)]
fn default_runtime_covers_video_map_play_path() {
let runtime = UpstreamMediaRuntime::default();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_000_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
let _audio = play(runtime.plan_audio_pts(1_000_000));
assert_eq!(runtime.map_video_pts(1_000_000, 16_666), Some(0));
}
#[tokio::test(flavor = "current_thread")]
#[serial(upstream_media_runtime)]
async fn wait_for_audio_master_returns_false_after_sync_grace_has_already_passed() {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(
!runtime
.wait_for_audio_master(
123_456,
tokio::time::Instant::now()
.checked_sub(Duration::from_millis(400))
.unwrap_or_else(tokio::time::Instant::now),
)
.await
);
}
#[test]
#[serial(upstream_media_runtime)]
fn timing_trace_paths_emit_overlap_and_dropbeforeoverlap_details() {
temp_env::with_var("LESAVKA_UPSTREAM_TIMING_TRACE", Some("1"), || {
with_info_tracing(|| {
let runtime = UpstreamMediaRuntime::new();
let _camera = runtime.activate_camera();
let _microphone = runtime.activate_microphone();
assert!(matches!(
runtime.plan_video_pts(1_300_000, 16_666),
super::UpstreamPlanDecision::AwaitingPair
));
assert!(matches!(
runtime.plan_audio_pts(1_000_000),
super::UpstreamPlanDecision::DropBeforeOverlap
));
let _video = play(runtime.plan_video_pts(1_300_000, 16_666));
assert!(matches!(
runtime.plan_audio_pts(1_000_000),
super::UpstreamPlanDecision::DropBeforeOverlap
));
});
});
}