use super::{UpstreamMediaRuntime, play, runtime_without_offsets}; use serial_test::serial; use std::time::Duration; #[test] #[serial(upstream_media_runtime)] fn first_stream_starts_a_new_shared_session() { let runtime = UpstreamMediaRuntime::new(); let camera = runtime.activate_camera(); let microphone = runtime.activate_microphone(); assert_eq!(camera.session_id, 1); assert_eq!(microphone.session_id, 1); assert!(runtime.is_camera_active(camera.generation)); assert!(runtime.is_microphone_active(microphone.generation)); } #[test] #[serial(upstream_media_runtime)] fn replacing_one_kind_keeps_the_session_but_preempts_the_old_owner() { let runtime = UpstreamMediaRuntime::new(); let first = runtime.activate_microphone(); let second = runtime.activate_microphone(); assert_eq!(first.session_id, second.session_id); assert!(!runtime.is_microphone_active(first.generation)); assert!(runtime.is_microphone_active(second.generation)); } #[test] #[serial(upstream_media_runtime)] fn closing_the_last_stream_resets_the_next_session_anchor() { let runtime = UpstreamMediaRuntime::new(); let camera = runtime.activate_camera(); let microphone = runtime.activate_microphone(); runtime.close_camera(camera.generation); runtime.close_microphone(microphone.generation); let next = runtime.activate_camera(); assert_eq!(next.session_id, 2); } #[test] #[serial(upstream_media_runtime)] fn first_packets_wait_for_the_counterpart_before_pairing() { let runtime = runtime_without_offsets(); let _camera = runtime.activate_camera(); let _microphone = runtime.activate_microphone(); assert!(matches!( runtime.plan_video_pts(1_000_000, 16_666), super::UpstreamPlanDecision::AwaitingPair )); let audio_first = play(runtime.plan_audio_pts(1_000_000)); let video_first = play(runtime.plan_video_pts(1_000_000, 16_666)); assert_eq!(audio_first.local_pts_us, 0); assert_eq!(video_first.local_pts_us, 0); assert_eq!(audio_first.due_at, video_first.due_at); } #[test] #[serial(upstream_media_runtime)] fn overlap_waits_for_camera_startup_grace_before_establishing_the_shared_base() { temp_env::with_var( "LESAVKA_UPSTREAM_CAMERA_STARTUP_GRACE_MS", Some("250"), || { let runtime = UpstreamMediaRuntime::new(); let _camera = runtime.activate_camera(); let _microphone = runtime.activate_microphone(); assert!(matches!( runtime.plan_video_pts(1_000_000, 16_666), super::UpstreamPlanDecision::AwaitingPair )); assert!(matches!( runtime.plan_audio_pts(1_000_000), super::UpstreamPlanDecision::AwaitingPair )); assert!(matches!( runtime.plan_video_pts(1_200_000, 16_666), super::UpstreamPlanDecision::AwaitingPair )); let video_ready = play(runtime.plan_video_pts(1_250_000, 16_666)); let audio_ready = play(runtime.plan_audio_pts(1_260_000)); assert_eq!(video_ready.local_pts_us, 0); assert_eq!(audio_ready.local_pts_us, 10_000); }, ); } #[test] #[serial(upstream_media_runtime)] fn pairing_window_does_not_expire_into_one_sided_playout_while_camera_warms_up() { temp_env::with_var( "LESAVKA_UPSTREAM_CAMERA_STARTUP_GRACE_MS", Some("250"), || { temp_env::with_var("LESAVKA_UPSTREAM_PLAYOUT_DELAY_MS", Some("20"), || { let runtime = UpstreamMediaRuntime::new(); let _camera = runtime.activate_camera(); let _microphone = runtime.activate_microphone(); assert!(matches!( runtime.plan_video_pts(1_000_000, 16_666), super::UpstreamPlanDecision::AwaitingPair )); assert!(matches!( runtime.plan_audio_pts(1_000_000), super::UpstreamPlanDecision::AwaitingPair )); std::thread::sleep(Duration::from_millis(30)); assert!(matches!( runtime.plan_audio_pts(1_010_000), super::UpstreamPlanDecision::AwaitingPair )); let video_ready = play(runtime.plan_video_pts(1_250_000, 16_666)); let audio_ready = play(runtime.plan_audio_pts(1_260_000)); assert_eq!(video_ready.local_pts_us, 0); assert_eq!(audio_ready.local_pts_us, 10_000); }); }, ); } #[test] #[serial(upstream_media_runtime)] fn overlap_pairing_drops_leading_packets_before_the_shared_base() { let runtime = UpstreamMediaRuntime::new(); let _camera = runtime.activate_camera(); let _microphone = runtime.activate_microphone(); assert!(matches!( runtime.plan_audio_pts(1_000_000), super::UpstreamPlanDecision::AwaitingPair )); let video_first = play(runtime.plan_video_pts(1_300_000, 16_666)); assert_eq!(video_first.local_pts_us, 0); assert!(matches!( runtime.plan_audio_pts(1_000_000), super::UpstreamPlanDecision::DropBeforeOverlap )); let audio_next = play(runtime.plan_audio_pts(1_310_000)); let video_next = play(runtime.plan_video_pts(1_333_333, 16_666)); assert_eq!(audio_next.local_pts_us, 10_000); assert_eq!(video_next.local_pts_us, 33_333); } #[test] #[serial(upstream_media_runtime)] fn shared_clock_keeps_each_kind_monotonic_when_remote_pts_repeat() { let runtime = UpstreamMediaRuntime::new(); let _camera = runtime.activate_camera(); let _microphone = runtime.activate_microphone(); assert!(matches!( runtime.plan_video_pts(50_000, 16_666), super::UpstreamPlanDecision::AwaitingPair )); let _audio = play(runtime.plan_audio_pts(50_000)); let first = play(runtime.plan_video_pts(50_000, 16_666)); let repeated = play(runtime.plan_video_pts(50_000, 16_666)); assert_eq!(first.local_pts_us, 0); assert_eq!(repeated.local_pts_us, 16_666); } #[test] #[serial(upstream_media_runtime)] fn close_ignores_superseded_generation_values() { let runtime = UpstreamMediaRuntime::new(); let first = runtime.activate_camera(); let second = runtime.activate_camera(); runtime.close_camera(first.generation); assert!(runtime.is_camera_active(second.generation)); runtime.close(super::UpstreamMediaKind::Camera, second.generation); let next = runtime.activate_camera(); assert_eq!(next.session_id, 2); }