fix: harden mirrored av probe detection

This commit is contained in:
Brad Stein 2026-05-02 21:40:45 -03:00
parent 060e09336e
commit d7aa38b1c1
14 changed files with 1006 additions and 77 deletions

View File

@ -612,3 +612,21 @@ sink handoff p95 near 240ms.
- [x] Add regression coverage for default-disabled blind healing and noisy sink-handoff refusal.
- [ ] Re-run the normal probe-calibrate-confirm flow; `calibration_source` should remain non-blind unless the server was explicitly started with blind healing.
- [ ] If the probe still produces only one or two visual events while blind metrics stay stable, move the next fix to stimulus/browser/probe detection instead of transport timing.
## 0.17.33 Probe Detection Robustness Checklist
Context: the 0.17.32 mirrored run proved hidden blind healing stayed off and calibration remained
stable, but the external browser probe still produced too few pairs. The capture path is now limited
by analyzer robustness: the webcam sees the screen plus room background, and the microphone hears the
stimulus plus environmental noise.
- [x] Treat probe pairing as the top priority before applying more calibration logic.
- [x] Replace whole-frame color/brightness averaging with adaptive video ROI detection that follows the changing stimulus region.
- [x] Add regression coverage for a small flashing screen region inside a larger static frame.
- [x] Add tone-aware audio detection using the stimulus frequency palette so steady hum/noise is less likely to become a pulse.
- [x] Add regression coverage for test-tone pulses under strong low-frequency background hum.
- [x] Add coded-video fallbacks for overexposed screen captures: pulse-shaped color filtering, brightness fallback, and duplicate-frame normalization.
- [x] Make the local stimulus more probe-friendly by defaulting to kiosk mode and darker saturated colors.
- [x] Generate a `manual-review/index.html` with embedded segment captures so runs are easy to inspect by eye.
- [ ] Re-run the mirrored probe and confirm pair counts rise enough for calibration-ready evidence.
- [ ] If pair counts improve but p95 remains high, move next to server sink handoff jitter and late-run queue pressure.

6
Cargo.lock generated
View File

@ -1652,7 +1652,7 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
[[package]]
name = "lesavka_client"
version = "0.17.32"
version = "0.17.33"
dependencies = [
"anyhow",
"async-stream",
@ -1686,7 +1686,7 @@ dependencies = [
[[package]]
name = "lesavka_common"
version = "0.17.32"
version = "0.17.33"
dependencies = [
"anyhow",
"base64",
@ -1698,7 +1698,7 @@ dependencies = [
[[package]]
name = "lesavka_server"
version = "0.17.32"
version = "0.17.33"
dependencies = [
"anyhow",
"base64",

View File

@ -4,7 +4,7 @@ path = "src/main.rs"
[package]
name = "lesavka_client"
version = "0.17.32"
version = "0.17.33"
edition = "2024"
[dependencies]

View File

@ -6,7 +6,7 @@ mod report;
#[cfg(test)]
pub(super) mod test_support;
use anyhow::{Result, bail};
use anyhow::{Context, Result, bail};
use std::path::Path;
use media_extract::{
@ -14,7 +14,8 @@ use media_extract::{
};
use onset_detection::{
DEFAULT_AUDIO_SAMPLE_RATE_HZ, correlate_coded_segments, correlate_segments,
detect_audio_segments, detect_color_coded_video_segments, detect_video_segments,
detect_audio_segments, detect_coded_audio_segments, detect_color_coded_video_segments,
detect_video_segments,
};
pub use onset_detection::{detect_audio_onsets, detect_video_onsets};
@ -30,29 +31,51 @@ pub fn analyze_capture(
options: &SyncAnalysisOptions,
) -> Result<SyncAnalysisReport> {
let raw_timestamps = extract_video_timestamps(capture_path)?;
let video_segments = if options.event_width_codes.is_empty() {
let (video_segments, coded_video_events) = if options.event_width_codes.is_empty() {
let brightness = extract_video_brightness(capture_path)?;
let timestamps = reconcile_video_timestamps(raw_timestamps, brightness.len())?;
detect_video_segments(&timestamps, &brightness)?
(detect_video_segments(&timestamps, &brightness)?, false)
} else {
let colors = extract_video_colors(capture_path)?;
let timestamps = reconcile_video_timestamps(raw_timestamps, colors.len())?;
detect_color_coded_video_segments(
let timestamps = reconcile_video_timestamps(raw_timestamps.clone(), colors.len())?;
match detect_color_coded_video_segments(
&timestamps,
&colors,
&options.event_width_codes,
options.pulse_width_s,
) {
Ok(segments) => (segments, true),
Err(color_error) => {
let brightness = extract_video_brightness(capture_path)?;
let timestamps = reconcile_video_timestamps(raw_timestamps, brightness.len())?;
(
detect_video_segments(&timestamps, &brightness).with_context(|| {
format!("color-coded video pulse detection failed: {color_error}")
})?,
false,
)
}
}
};
let audio_samples = extract_audio_samples(capture_path)?;
let audio_segments = if options.event_width_codes.is_empty() {
detect_audio_segments(
&audio_samples,
DEFAULT_AUDIO_SAMPLE_RATE_HZ,
options.audio_window_ms,
)?
} else {
detect_coded_audio_segments(
&audio_samples,
DEFAULT_AUDIO_SAMPLE_RATE_HZ,
options.audio_window_ms,
&options.event_width_codes,
options.pulse_width_s,
)?
};
let audio_samples = extract_audio_samples(capture_path)?;
let audio_segments = detect_audio_segments(
&audio_samples,
DEFAULT_AUDIO_SAMPLE_RATE_HZ,
options.audio_window_ms,
)?;
if options.event_width_codes.is_empty() {
if !coded_video_events {
correlate_segments(
&video_segments,
&audio_segments,

View File

@ -5,7 +5,13 @@ use std::process::Command;
use super::onset_detection::VideoColorFrame;
const VIDEO_ANALYSIS_SIDE_PX: usize = 32;
const VIDEO_ANALYSIS_SIDE_PX: usize = 64;
const VIDEO_ANALYSIS_FPS: usize = 60;
const MIN_ADAPTIVE_ROI_PIXELS: usize = 16;
const MAX_ADAPTIVE_ROI_FRACTION: f64 = 0.35;
const ADAPTIVE_ROI_SCORE_FRACTION: f64 = 0.30;
const MIN_RGB_ROI_SCORE: f64 = 24.0;
const MIN_GRAY_ROI_SCORE: f64 = 8.0;
#[derive(Debug, Deserialize)]
struct ProbeFrameResponse {
@ -60,7 +66,8 @@ pub(super) fn extract_video_brightness(capture_path: &Path) -> Result<Vec<u8>> {
.arg("0:v:0")
.arg("-vf")
.arg(format!(
"scale={side}:{side}:flags=area,format=gray",
"fps={fps},scale={side}:{side}:flags=area,format=gray",
fps = VIDEO_ANALYSIS_FPS,
side = VIDEO_ANALYSIS_SIDE_PX
))
.arg("-f")
@ -84,11 +91,10 @@ pub(super) fn extract_video_brightness(capture_path: &Path) -> Result<Vec<u8>> {
}
let extracted_frames = output.len() / frame_pixels;
Ok(output
.chunks_exact(frame_pixels)
.take(extracted_frames)
.map(summarize_frame_brightness)
.collect())
Ok(summarize_gray_frames_with_adaptive_roi(
output.chunks_exact(frame_pixels).take(extracted_frames),
frame_pixels,
))
}
pub(super) fn extract_video_colors(capture_path: &Path) -> Result<Vec<VideoColorFrame>> {
@ -103,7 +109,8 @@ pub(super) fn extract_video_colors(capture_path: &Path) -> Result<Vec<VideoColor
.arg("0:v:0")
.arg("-vf")
.arg(format!(
"scale={side}:{side}:flags=area,format=rgb24",
"fps={fps},scale={side}:{side}:flags=area,format=rgb24",
fps = VIDEO_ANALYSIS_FPS,
side = VIDEO_ANALYSIS_SIDE_PX
))
.arg("-f")
@ -127,11 +134,10 @@ pub(super) fn extract_video_colors(capture_path: &Path) -> Result<Vec<VideoColor
}
let extracted_frames = output.len() / frame_bytes;
Ok(output
.chunks_exact(frame_bytes)
.take(extracted_frames)
.map(summarize_frame_color)
.collect())
Ok(summarize_rgb_frames_with_adaptive_roi(
output.chunks_exact(frame_bytes).take(extracted_frames),
VIDEO_ANALYSIS_SIDE_PX * VIDEO_ANALYSIS_SIDE_PX,
))
}
pub(super) fn extract_audio_samples(capture_path: &Path) -> Result<Vec<i16>> {
@ -175,18 +181,57 @@ pub(super) fn run_command(command: &mut Command, description: &str) -> Result<Ve
Ok(output.stdout)
}
fn summarize_frame_brightness(frame: &[u8]) -> u8 {
let mean = frame.iter().map(|value| u64::from(*value)).sum::<u64>() / frame.len().max(1) as u64;
fn summarize_gray_frames_with_adaptive_roi<'a>(
frames: impl Iterator<Item = &'a [u8]>,
pixel_count: usize,
) -> Vec<u8> {
let frames = frames.collect::<Vec<_>>();
let mask = adaptive_gray_roi_mask(&frames, pixel_count);
frames
.iter()
.map(|frame| summarize_frame_brightness(frame, mask.as_deref()))
.collect()
}
fn summarize_rgb_frames_with_adaptive_roi<'a>(
frames: impl Iterator<Item = &'a [u8]>,
pixel_count: usize,
) -> Vec<VideoColorFrame> {
let frames = frames.collect::<Vec<_>>();
let mask = adaptive_rgb_roi_mask(&frames, pixel_count);
frames
.iter()
.map(|frame| summarize_frame_color(frame, mask.as_deref()))
.collect()
}
fn summarize_frame_brightness(frame: &[u8], mask: Option<&[bool]>) -> u8 {
let mut sum = 0u64;
let mut selected = 0u64;
for (index, value) in frame.iter().copied().enumerate() {
if mask.is_none_or(|mask| mask.get(index).copied().unwrap_or(false)) {
sum += u64::from(value);
selected += 1;
}
}
if selected == 0 {
sum = frame.iter().map(|value| u64::from(*value)).sum();
selected = frame.len().max(1) as u64;
}
let mean = sum / selected;
mean.min(u64::from(u8::MAX)) as u8
}
fn summarize_frame_color(frame: &[u8]) -> VideoColorFrame {
fn summarize_frame_color(frame: &[u8], mask: Option<&[bool]>) -> VideoColorFrame {
let mut r_sum = 0u64;
let mut g_sum = 0u64;
let mut b_sum = 0u64;
let mut selected = 0u64;
for pixel in frame.chunks_exact(3) {
for (index, pixel) in frame.chunks_exact(3).enumerate() {
if !mask.is_none_or(|mask| mask.get(index).copied().unwrap_or(false)) {
continue;
}
let r = pixel[0];
let g = pixel[1];
let b = pixel[2];
@ -201,13 +246,26 @@ fn summarize_frame_color(frame: &[u8]) -> VideoColorFrame {
}
if selected == 0 {
selected = (frame.len() / 3).max(1) as u64;
for (index, pixel) in frame.chunks_exact(3).enumerate() {
if !mask.is_none_or(|mask| mask.get(index).copied().unwrap_or(false)) {
continue;
}
r_sum += u64::from(pixel[0]);
g_sum += u64::from(pixel[1]);
b_sum += u64::from(pixel[2]);
selected += 1;
}
}
if selected == 0 {
for pixel in frame.chunks_exact(3) {
r_sum += u64::from(pixel[0]);
g_sum += u64::from(pixel[1]);
b_sum += u64::from(pixel[2]);
selected += 1;
}
}
selected = selected.max(1);
VideoColorFrame {
r: (r_sum / selected).min(u64::from(u8::MAX)) as u8,
@ -216,6 +274,187 @@ fn summarize_frame_color(frame: &[u8]) -> VideoColorFrame {
}
}
fn adaptive_gray_roi_mask(frames: &[&[u8]], pixel_count: usize) -> Option<Vec<bool>> {
if frames.len() < 2 || pixel_count == 0 {
return None;
}
let mut scores = vec![0.0; pixel_count];
for pixel_index in 0..pixel_count {
let mut min = u8::MAX;
let mut max = u8::MIN;
for frame in frames {
let value = frame[pixel_index];
min = min.min(value);
max = max.max(value);
}
scores[pixel_index] = f64::from(max.saturating_sub(min)) * dark_roi_factor(min);
}
adaptive_roi_mask_from_scores(&scores, MIN_GRAY_ROI_SCORE)
}
fn adaptive_rgb_roi_mask(frames: &[&[u8]], pixel_count: usize) -> Option<Vec<bool>> {
if frames.len() < 2 || pixel_count == 0 {
return None;
}
let mut scores = vec![0.0; pixel_count];
for pixel_index in 0..pixel_count {
let mut min_r = u8::MAX;
let mut min_g = u8::MAX;
let mut min_b = u8::MAX;
let mut max_r = u8::MIN;
let mut max_g = u8::MIN;
let mut max_b = u8::MIN;
let mut min_luma = u8::MAX;
let mut max_luma = u8::MIN;
let mut best_palette_score = 0.0_f64;
for frame in frames {
let offset = pixel_index * 3;
let r = frame[offset];
let g = frame[offset + 1];
let b = frame[offset + 2];
min_r = min_r.min(r);
min_g = min_g.min(g);
min_b = min_b.min(b);
max_r = max_r.max(r);
max_g = max_g.max(g);
max_b = max_b.max(b);
let luma = luma_u8(r, g, b);
min_luma = min_luma.min(luma);
max_luma = max_luma.max(luma);
best_palette_score = best_palette_score.max(palette_match_score(r, g, b));
}
let rgb_span = f64::from(max_r.saturating_sub(min_r))
+ f64::from(max_g.saturating_sub(min_g))
+ f64::from(max_b.saturating_sub(min_b));
let luma_span = f64::from(max_luma.saturating_sub(min_luma));
scores[pixel_index] =
(rgb_span + (2.0 * luma_span)) * (1.0 + best_palette_score) * dark_roi_factor(min_luma);
}
adaptive_roi_mask_from_scores(&scores, MIN_RGB_ROI_SCORE)
}
fn adaptive_roi_mask_from_scores(scores: &[f64], min_score: f64) -> Option<Vec<bool>> {
let max_score = scores.iter().copied().fold(0.0_f64, f64::max);
if max_score < min_score {
return None;
}
let mut ranked = scores
.iter()
.copied()
.enumerate()
.filter(|(_, score)| score.is_finite() && *score > 0.0)
.collect::<Vec<_>>();
ranked.sort_by(|left, right| right.1.total_cmp(&left.1));
let max_selected = ((scores.len() as f64 * MAX_ADAPTIVE_ROI_FRACTION).round() as usize)
.max(MIN_ADAPTIVE_ROI_PIXELS)
.min(scores.len());
let score_floor = (max_score * ADAPTIVE_ROI_SCORE_FRACTION).max(min_score);
let mut mask = vec![false; scores.len()];
let mut selected = 0usize;
for (index, score) in ranked.into_iter().take(max_selected) {
if score < score_floor && selected >= MIN_ADAPTIVE_ROI_PIXELS {
break;
}
mask[index] = true;
selected += 1;
}
let mask = retain_largest_connected_roi(mask);
let selected = mask.iter().filter(|selected| **selected).count();
(selected >= MIN_ADAPTIVE_ROI_PIXELS).then_some(mask)
}
fn retain_largest_connected_roi(mask: Vec<bool>) -> Vec<bool> {
let side = (mask.len() as f64).sqrt().round() as usize;
if side == 0 || side * side != mask.len() {
return mask;
}
let mut visited = vec![false; mask.len()];
let mut best_component = Vec::<usize>::new();
for start in 0..mask.len() {
if !mask[start] || visited[start] {
continue;
}
let mut stack = vec![start];
let mut component = Vec::new();
visited[start] = true;
while let Some(index) = stack.pop() {
component.push(index);
let x = index % side;
let y = index / side;
let mut push_neighbor = |neighbor: usize| {
if mask[neighbor] && !visited[neighbor] {
visited[neighbor] = true;
stack.push(neighbor);
}
};
if x > 0 {
push_neighbor(index - 1);
}
if x + 1 < side {
push_neighbor(index + 1);
}
if y > 0 {
push_neighbor(index - side);
}
if y + 1 < side {
push_neighbor(index + side);
}
}
if component.len() > best_component.len() {
best_component = component;
}
}
if best_component.len() < MIN_ADAPTIVE_ROI_PIXELS {
return mask;
}
let mut retained = vec![false; mask.len()];
for index in best_component {
retained[index] = true;
}
retained
}
fn luma_u8(r: u8, g: u8, b: u8) -> u8 {
((u16::from(r) * 77 + u16::from(g) * 150 + u16::from(b) * 29) / 256) as u8
}
fn dark_roi_factor(min_luma: u8) -> f64 {
match min_luma {
0..=80 => 1.0,
81..=120 => 0.55,
121..=160 => 0.25,
_ => 0.10,
}
}
fn palette_match_score(r: u8, g: u8, b: u8) -> f64 {
let max = r.max(g).max(b);
let min = r.min(g).min(b);
if max < 50 || max.saturating_sub(min) < 20 {
return 0.0;
}
const PALETTE: [(u8, u8, u8); 4] =
[(255, 45, 45), (0, 230, 118), (41, 121, 255), (255, 179, 0)];
let best_distance = PALETTE
.into_iter()
.map(|(pr, pg, pb)| {
let dr = f64::from(r) - f64::from(pr);
let dg = f64::from(g) - f64::from(pg);
let db = f64::from(b) - f64::from(pb);
dr * dr + dg * dg + db * db
})
.fold(f64::INFINITY, f64::min);
(1.0 - (best_distance / 65_025.0)).clamp(0.0, 1.0)
}
#[cfg(test)]
mod tests {
use super::{
@ -274,7 +513,7 @@ mod tests {
&[1, 0],
|capture_path| {
let parsed = extract_video_brightness(capture_path).expect("video brightness");
assert_eq!(parsed, vec![16, 40, 77]);
assert_eq!(parsed, brightness);
},
);
}
@ -305,7 +544,7 @@ mod tests {
&[1, 0],
|capture_path| {
let parsed = extract_video_brightness(capture_path).expect("video brightness");
assert_eq!(parsed, vec![20, 26, 20]);
assert_eq!(parsed, brightness);
},
);
}
@ -334,6 +573,43 @@ mod tests {
);
}
#[test]
fn extract_video_colors_tracks_small_flashing_screen_region() {
const SIDE: usize = 64;
let mut bytes = Vec::new();
for color in [(24, 28, 32), (255, 45, 45), (24, 28, 32), (0, 230, 118)] {
let mut frame = vec![34u8; SIDE * SIDE * 3];
for y in 6..18 {
for x in 40..54 {
let offset = (y * SIDE + x) * 3;
frame[offset] = color.0;
frame[offset + 1] = color.1;
frame[offset + 2] = color.2;
}
}
bytes.extend_from_slice(&frame);
}
with_fake_media_tools(
&frame_json(&[0.0, 0.1, 0.2, 0.3]),
&bytes,
&[1, 0],
|capture_path| {
let parsed = extract_video_colors(capture_path).expect("video colors");
assert!(
parsed[1].r > 220 && parsed[1].g < 80,
"red pulse should dominate selected ROI: {:?}",
parsed[1]
);
assert!(
parsed[3].g > 190 && parsed[3].r < 60,
"green pulse should dominate selected ROI: {:?}",
parsed[3]
);
},
);
}
#[test]
fn extract_audio_samples_reads_fake_ffmpeg_output() {
let samples = vec![1i16, -2, 32_000];

View File

@ -16,10 +16,17 @@ const MAX_VIDEO_FLICKER_SEGMENT_FRAME_MULTIPLIER: f64 = 1.5;
const MIN_COLOR_PULSE_SATURATION: u8 = 36;
const MIN_COLOR_PULSE_VALUE: u8 = 70;
const MAX_COLOR_DISTANCE_SQUARED: u32 = 24_000;
const DOMINANT_COLOR_MARGIN: i16 = 28;
const MAX_COLOR_OBSERVED_DURATION_MULTIPLIER: f64 = 1.55;
const MAX_COLOR_OBSERVED_DURATION_SLACK_S: f64 = 0.08;
const MAX_AUDIO_PULSE_INTERNAL_GAP_S: f64 = 0.16;
const MIN_AUDIO_PROBE_PEAK: f64 = 25.0;
const AUDIO_ENVELOPE_THRESHOLD_FRACTION: f64 = 0.30;
const AUDIO_SAMPLE_THRESHOLD_FRACTION: f64 = 0.22;
const AUDIO_TONE_FREQUENCIES_HZ: [f64; 4] = [660.0, 880.0, 1100.0, 1320.0];
const MIN_TONE_ENVELOPE_PEAK: f64 = 18.0;
const MIN_TONE_CONTRAST_FRACTION_OF_AMPLITUDE: f64 = 0.12;
const MIN_TONE_CODE_DOMINANCE_RATIO: f64 = 1.35;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub(super) struct VideoColorFrame {
@ -140,6 +147,7 @@ pub(crate) fn detect_color_coded_video_segments(
}
let frame_step_s = median_frame_step_seconds(&timestamps_s[..frame_count]).max(1.0 / 120.0);
let max_event_code = event_codes.iter().copied().max().unwrap_or(1);
let mut segments = Vec::new();
let mut previous_code = None::<u32>;
let mut segment_start = 0.0_f64;
@ -168,6 +176,7 @@ pub(crate) fn detect_color_coded_video_segments(
timestamp,
),
pulse_width_s,
max_event_code,
&segment_codes,
frame_step_s,
);
@ -183,6 +192,7 @@ pub(crate) fn detect_color_coded_video_segments(
segment_start,
last_timestamp + frame_step_s / 2.0,
pulse_width_s,
max_event_code,
&segment_codes,
frame_step_s,
);
@ -214,12 +224,20 @@ fn push_color_segment(
start_s: f64,
observed_end_s: f64,
pulse_width_s: f64,
max_event_code: u32,
codes: &[u32],
frame_step_s: f64,
) {
let Some(code) = dominant_event_code(codes) else {
return;
};
let observed_duration_s = observed_end_s - start_s;
let max_observed_duration_s =
(pulse_width_s * f64::from(max_event_code) * MAX_COLOR_OBSERVED_DURATION_MULTIPLIER)
+ MAX_COLOR_OBSERVED_DURATION_SLACK_S;
if observed_duration_s > max_observed_duration_s {
return;
}
let encoded_duration_s = pulse_width_s * f64::from(code);
segments.push(PulseSegment {
start_s,
@ -255,6 +273,30 @@ fn color_event_code(frame: VideoColorFrame) -> Option<u32> {
.map(|(code, color)| (code, color_distance_squared(frame, color)))
.min_by_key(|(_, distance)| *distance)
.and_then(|(code, distance)| (distance <= MAX_COLOR_DISTANCE_SQUARED).then_some(code))
.or_else(|| dominant_color_event_code(frame))
}
fn dominant_color_event_code(frame: VideoColorFrame) -> Option<u32> {
let r = i16::from(frame.r);
let g = i16::from(frame.g);
let b = i16::from(frame.b);
if r - b >= DOMINANT_COLOR_MARGIN
&& g - b >= DOMINANT_COLOR_MARGIN
&& (r - g).abs() <= DOMINANT_COLOR_MARGIN * 3
{
return Some(4);
}
if r - g >= DOMINANT_COLOR_MARGIN && r - b >= DOMINANT_COLOR_MARGIN {
return Some(1);
}
if g - r >= DOMINANT_COLOR_MARGIN && g - b >= DOMINANT_COLOR_MARGIN {
return Some(2);
}
if b - r >= DOMINANT_COLOR_MARGIN && b - g >= DOMINANT_COLOR_MARGIN {
return Some(3);
}
None
}
fn color_for_event_code(code: u32) -> Option<VideoColorFrame> {
@ -322,6 +364,45 @@ pub(crate) fn detect_audio_segments(
samples: &[i16],
sample_rate_hz: u32,
window_ms: u32,
) -> Result<Vec<PulseSegment>> {
detect_audio_segments_with_optional_codes(samples, sample_rate_hz, window_ms, &[], 0.0)
}
pub(crate) fn detect_coded_audio_segments(
samples: &[i16],
sample_rate_hz: u32,
window_ms: u32,
event_codes: &[u32],
pulse_width_s: f64,
) -> Result<Vec<PulseSegment>> {
if pulse_width_s <= 0.0 {
bail!("pulse width must stay positive");
}
if event_codes.is_empty() {
bail!("event code list must not be empty");
}
if let Some(unsupported) = event_codes
.iter()
.find(|code| audio_frequency_for_event_code(**code).is_none())
{
bail!("event code {unsupported} has no audio tone signature");
}
detect_audio_segments_with_optional_codes(
samples,
sample_rate_hz,
window_ms,
event_codes,
pulse_width_s,
)
}
fn detect_audio_segments_with_optional_codes(
samples: &[i16],
sample_rate_hz: u32,
window_ms: u32,
event_codes: &[u32],
pulse_width_s: f64,
) -> Result<Vec<PulseSegment>> {
if samples.is_empty() {
bail!("capture did not contain any audio samples");
@ -334,7 +415,7 @@ pub(crate) fn detect_audio_segments(
}
let window_samples = ((sample_rate_hz as usize * window_ms as usize) / 1000).max(1);
let raw_envelope = samples
let amplitude_envelope = samples
.chunks(window_samples)
.map(|chunk| {
let total: u64 = chunk
@ -344,7 +425,15 @@ pub(crate) fn detect_audio_segments(
total as f64 / chunk.len() as f64
})
.collect::<Vec<_>>();
let envelope = smooth_envelope(&raw_envelope);
let tone_windows = samples
.chunks(window_samples)
.map(|chunk| strongest_probe_tone_window(chunk, sample_rate_hz, event_codes))
.collect::<Vec<_>>();
let tone_envelope = tone_windows
.iter()
.map(|window| window.level)
.collect::<Vec<_>>();
let envelope = choose_audio_detection_envelope(&amplitude_envelope, &tone_envelope);
let peak = envelope.iter().copied().fold(0.0_f64, f64::max);
if peak < MIN_AUDIO_PROBE_PEAK {
bail!("audio probe peaks are too quiet to detect sync pulses");
@ -362,35 +451,133 @@ pub(crate) fn detect_audio_segments(
let mut segments = Vec::new();
let mut previous_active = false;
let mut segment_start = 0usize;
let mut segment_codes = Vec::<u32>::new();
for (index, level) in envelope.iter().copied().enumerate() {
let active = level >= threshold;
if active && !previous_active {
segment_start = index;
segment_codes.clear();
}
if !event_codes.is_empty()
&& active
&& let Some(code) = tone_windows.get(index).and_then(|window| window.code)
{
segment_codes.push(code);
}
if previous_active && !active {
segments.push(window_segment(
push_audio_segment(
&mut segments,
samples,
sample_rate_hz,
window_samples,
segment_start,
index,
sample_threshold,
));
dominant_event_code(&segment_codes).map(|code| pulse_width_s * f64::from(code)),
);
segment_codes.clear();
}
previous_active = active;
}
if previous_active {
segments.push(window_segment(
push_audio_segment(
&mut segments,
samples,
sample_rate_hz,
window_samples,
segment_start,
envelope.len(),
sample_threshold,
));
dominant_event_code(&segment_codes).map(|code| pulse_width_s * f64::from(code)),
);
}
Ok(merge_nearby_audio_segments(segments))
if event_codes.is_empty() {
Ok(merge_nearby_audio_segments(segments))
} else {
Ok(merge_nearby_coded_audio_segments(segments))
}
}
fn choose_audio_detection_envelope(amplitude_envelope: &[f64], tone_envelope: &[f64]) -> Vec<f64> {
let smoothed_amplitude = smooth_envelope(amplitude_envelope);
let smoothed_tone = smooth_envelope(tone_envelope);
let amplitude_peak = smoothed_amplitude.iter().copied().fold(0.0_f64, f64::max);
let amplitude_baseline = median(smoothed_amplitude.clone());
let tone_peak = smoothed_tone.iter().copied().fold(0.0_f64, f64::max);
let tone_baseline = median(smoothed_tone.clone());
let amplitude_contrast = (amplitude_peak - amplitude_baseline).max(0.0);
let tone_contrast = (tone_peak - tone_baseline).max(0.0);
if tone_peak >= MIN_TONE_ENVELOPE_PEAK
&& tone_contrast >= amplitude_contrast * MIN_TONE_CONTRAST_FRACTION_OF_AMPLITUDE
{
smoothed_tone
} else {
smoothed_amplitude
}
}
#[derive(Clone, Copy, Debug)]
struct ProbeToneWindow {
code: Option<u32>,
level: f64,
}
fn strongest_probe_tone_window(
samples: &[i16],
sample_rate_hz: u32,
event_codes: &[u32],
) -> ProbeToneWindow {
let code_iter: Box<dyn Iterator<Item = u32> + '_> = if event_codes.is_empty() {
Box::new(1..=AUDIO_TONE_FREQUENCIES_HZ.len() as u32)
} else {
Box::new(event_codes.iter().copied())
};
let mut candidates = code_iter
.filter_map(|code| {
audio_frequency_for_event_code(code)
.map(|frequency_hz| (code, goertzel_level(samples, sample_rate_hz, frequency_hz)))
})
.collect::<Vec<_>>();
candidates.sort_by(|(_, left_level), (_, right_level)| right_level.total_cmp(left_level));
let Some((code, level)) = candidates.first().copied() else {
return ProbeToneWindow {
code: None,
level: 0.0,
};
};
let runner_up = candidates.get(1).map(|(_, level)| *level).unwrap_or(0.0);
ProbeToneWindow {
code: (level >= MIN_TONE_ENVELOPE_PEAK
&& level >= runner_up * MIN_TONE_CODE_DOMINANCE_RATIO)
.then_some(code),
level,
}
}
fn audio_frequency_for_event_code(code: u32) -> Option<f64> {
AUDIO_TONE_FREQUENCIES_HZ
.get(code.checked_sub(1)? as usize)
.copied()
}
fn goertzel_level(samples: &[i16], sample_rate_hz: u32, frequency_hz: f64) -> f64 {
if samples.is_empty() || sample_rate_hz == 0 {
return 0.0;
}
let omega = 2.0 * std::f64::consts::PI * frequency_hz / f64::from(sample_rate_hz);
let coefficient = 2.0 * omega.cos();
let mut q1 = 0.0_f64;
let mut q2 = 0.0_f64;
for sample in samples {
let q0 = f64::from(*sample) + coefficient * q1 - q2;
q2 = q1;
q1 = q0;
}
let power = q1 * q1 + q2 * q2 - coefficient * q1 * q2;
power.max(0.0).sqrt() / samples.len() as f64
}
fn smooth_envelope(envelope: &[f64]) -> Vec<f64> {
@ -444,6 +631,44 @@ pub(super) fn window_segment(
}
}
fn push_audio_segment(
segments: &mut Vec<PulseSegment>,
samples: &[i16],
sample_rate_hz: u32,
window_samples: usize,
start_window_index: usize,
end_window_index_exclusive: usize,
sample_threshold: f64,
encoded_duration_s: Option<f64>,
) {
let mut segment = window_segment(
samples,
sample_rate_hz,
window_samples,
start_window_index,
end_window_index_exclusive,
sample_threshold,
);
if let Some(encoded_duration_s) = encoded_duration_s {
segment.duration_s = encoded_duration_s;
}
segments.push(segment);
}
fn merge_nearby_coded_audio_segments(segments: Vec<PulseSegment>) -> Vec<PulseSegment> {
let mut merged = Vec::<PulseSegment>::new();
for segment in segments {
match merged.last_mut() {
Some(prior) if segment.start_s - prior.end_s <= MAX_AUDIO_PULSE_INTERNAL_GAP_S => {
prior.end_s = segment.end_s;
prior.duration_s = prior.duration_s.max(segment.duration_s);
}
_ => merged.push(segment),
}
}
merged
}
pub(super) fn median_frame_step_seconds(timestamps_s: &[f64]) -> f64 {
let diffs = timestamps_s
.windows(2)

View File

@ -230,6 +230,8 @@ pub(crate) fn correlate_coded_segments(
let activity_start_delta_ms =
(raw_first_audio_activity_s - raw_first_video_activity_s) * 1000.0;
let raw_video_segments = video_segments.to_vec();
let raw_audio_segments = audio_segments.to_vec();
let phase_tolerance_s = segment_phase_tolerance(pulse_period_s, pulse_width_s, max_pair_gap_s);
let video_segments =
collapse_segments_by_phase(video_segments, pulse_period_s, phase_tolerance_s);
@ -250,20 +252,6 @@ pub(crate) fn correlate_coded_segments(
.iter()
.map(|segment| segment.start_s)
.collect::<Vec<_>>();
let full_video_onsets_s = video_onsets_s.clone();
let full_audio_onsets_s = audio_onsets_s.clone();
let full_video_indexed = index_coded_segments_by_spacing(
&video_segments,
pulse_period_s,
pulse_width_s,
event_width_codes,
);
let full_audio_indexed = index_coded_segments_by_spacing(
&audio_segments,
pulse_period_s,
pulse_width_s,
event_width_codes,
);
let (_, _, common_window) =
trim_onsets_to_common_activity_window(&video_onsets_s, &audio_onsets_s, max_pair_gap_s);
let filtered_video_segments = filter_segments_to_window(&video_segments, common_window);
@ -303,17 +291,94 @@ pub(crate) fn correlate_coded_segments(
);
if pairs.len() < MIN_CODED_PAIRS {
let time_pairs = best_coded_pairs_by_time(
&filtered_video_segments,
&filtered_audio_segments,
pulse_width_s,
event_width_codes,
max_pair_gap_s,
);
if time_pairs.len() >= MIN_CODED_PAIRS {
let video_onsets_s = filtered_video_segments
.iter()
.map(|segment| segment.start_s)
.collect::<Vec<_>>();
let audio_onsets_s = filtered_audio_segments
.iter()
.map(|segment| segment.start_s)
.collect::<Vec<_>>();
return Ok(sync_report_from_pairs(
&video_onsets_s,
&audio_onsets_s,
true,
activity_start_delta_ms,
raw_first_video_activity_s,
raw_first_audio_activity_s,
time_pairs,
));
}
if let Some((raw_filtered_video_segments, raw_filtered_audio_segments, raw_pairs)) =
best_coded_pairs_for_raw_segments(
&raw_video_segments,
&raw_audio_segments,
pulse_period_s,
pulse_width_s,
event_width_codes,
max_pair_gap_s,
)
&& raw_pairs.len() >= MIN_CODED_PAIRS
{
let video_onsets_s = raw_filtered_video_segments
.iter()
.map(|segment| segment.start_s)
.collect::<Vec<_>>();
let audio_onsets_s = raw_filtered_audio_segments
.iter()
.map(|segment| segment.start_s)
.collect::<Vec<_>>();
return Ok(sync_report_from_pairs(
&video_onsets_s,
&audio_onsets_s,
true,
activity_start_delta_ms,
raw_first_video_activity_s,
raw_first_audio_activity_s,
raw_pairs,
));
}
let raw_full_video_indexed = index_coded_segments_by_spacing(
&raw_video_segments,
pulse_period_s,
pulse_width_s,
event_width_codes,
);
let raw_full_audio_indexed = index_coded_segments_by_spacing(
&raw_audio_segments,
pulse_period_s,
pulse_width_s,
event_width_codes,
);
let diagnostic_pairs = diagnostic_coded_pairs_for_index_offsets(
&full_video_indexed,
&full_audio_indexed,
&candidate_coded_index_offsets(&full_video_indexed, &full_audio_indexed),
&raw_full_video_indexed,
&raw_full_audio_indexed,
&candidate_coded_index_offsets(&raw_full_video_indexed, &raw_full_audio_indexed),
DIAGNOSTIC_CODED_MAX_PAIR_GAP_S,
activity_start_delta_ms,
);
if diagnostic_pairs.len() >= MIN_CODED_PAIRS {
let raw_full_video_onsets_s = raw_video_segments
.iter()
.map(|segment| segment.start_s)
.collect::<Vec<_>>();
let raw_full_audio_onsets_s = raw_audio_segments
.iter()
.map(|segment| segment.start_s)
.collect::<Vec<_>>();
return Ok(sync_report_from_pairs(
&full_video_onsets_s,
&full_audio_onsets_s,
&raw_full_video_onsets_s,
&raw_full_audio_onsets_s,
true,
activity_start_delta_ms,
raw_first_video_activity_s,
@ -357,6 +422,71 @@ pub(crate) fn correlate_coded_segments(
))
}
fn best_coded_pairs_for_raw_segments(
video_segments: &[PulseSegment],
audio_segments: &[PulseSegment],
pulse_period_s: f64,
pulse_width_s: f64,
event_width_codes: &[u32],
max_pair_gap_s: f64,
) -> Option<(Vec<PulseSegment>, Vec<PulseSegment>, Vec<MatchedOnsetPair>)> {
if video_segments.is_empty() || audio_segments.is_empty() {
return None;
}
let video_onsets_s = video_segments
.iter()
.map(|segment| segment.start_s)
.collect::<Vec<_>>();
let audio_onsets_s = audio_segments
.iter()
.map(|segment| segment.start_s)
.collect::<Vec<_>>();
let (_, _, common_window) =
trim_onsets_to_common_activity_window(&video_onsets_s, &audio_onsets_s, max_pair_gap_s);
let filtered_video_segments = filter_segments_to_window(video_segments, common_window);
let filtered_audio_segments = filter_segments_to_window(audio_segments, common_window);
if filtered_video_segments.is_empty() || filtered_audio_segments.is_empty() {
return None;
}
let expected_start_skew_ms =
(filtered_audio_segments[0].start_s - filtered_video_segments[0].start_s) * 1000.0;
let video_indexed = index_coded_segments_by_spacing(
&filtered_video_segments,
pulse_period_s,
pulse_width_s,
event_width_codes,
);
let audio_indexed = index_coded_segments_by_spacing(
&filtered_audio_segments,
pulse_period_s,
pulse_width_s,
event_width_codes,
);
let index_pairs = best_coded_pairs_for_index_offsets(
&video_indexed,
&audio_indexed,
&candidate_coded_index_offsets(&video_indexed, &audio_indexed),
max_pair_gap_s,
expected_start_skew_ms,
);
let time_pairs = best_coded_pairs_by_time(
&filtered_video_segments,
&filtered_audio_segments,
pulse_width_s,
event_width_codes,
max_pair_gap_s,
);
let pairs = if time_pairs.len() > index_pairs.len() {
time_pairs
} else {
index_pairs
};
Some((filtered_video_segments, filtered_audio_segments, pairs))
}
#[derive(Clone, Copy)]
struct CommonActivityWindow {
start_s: f64,
@ -772,6 +902,51 @@ fn best_coded_pairs_for_index_offsets(
best.map(|(_, _, _, _, pairs)| pairs).unwrap_or_default()
}
fn best_coded_pairs_by_time(
video_segments: &[PulseSegment],
audio_segments: &[PulseSegment],
pulse_width_s: f64,
event_width_codes: &[u32],
max_pair_gap_s: f64,
) -> Vec<MatchedOnsetPair> {
let max_pair_gap_ms = max_pair_gap_s * 1000.0;
let mut used_audio = vec![false; audio_segments.len()];
let mut pairs = Vec::new();
for video in video_segments {
let video_code =
nearest_event_width_code(video.duration_s, pulse_width_s, event_width_codes);
let best_audio = audio_segments
.iter()
.enumerate()
.filter(|(index, audio)| {
!used_audio[*index]
&& nearest_event_width_code(audio.duration_s, pulse_width_s, event_width_codes)
== video_code
})
.map(|(index, audio)| {
let skew_ms = (audio.start_s - video.start_s) * 1000.0;
(index, audio, skew_ms)
})
.filter(|(_, _, skew_ms)| skew_ms.abs() <= max_pair_gap_ms)
.min_by(|(_, _, left_skew), (_, _, right_skew)| {
left_skew.abs().total_cmp(&right_skew.abs())
});
if let Some((audio_index, audio, skew_ms)) = best_audio {
used_audio[audio_index] = true;
pairs.push(MatchedOnsetPair::new(
video.start_s,
audio.start_s,
skew_ms,
max_pair_gap_s,
));
}
}
pairs
}
fn diagnostic_coded_pairs_for_index_offsets(
video_indexed: &BTreeMap<i64, CodedPulseSegment>,
audio_indexed: &BTreeMap<i64, CodedPulseSegment>,

View File

@ -4,8 +4,8 @@ use super::correlation::{
};
use super::{
PulseSegment, VideoColorFrame, correlate_coded_segments, correlate_segments,
detect_audio_onsets, detect_audio_segments, detect_color_coded_video_segments,
detect_video_onsets, detect_video_segments, median,
detect_audio_onsets, detect_audio_segments, detect_coded_audio_segments,
detect_color_coded_video_segments, detect_video_onsets, detect_video_segments, median,
};
use crate::sync_probe::analyze::report::SyncAnalysisReport;
use std::collections::BTreeMap;
@ -118,6 +118,51 @@ fn detect_color_coded_video_segments_ignores_generic_bright_changes() {
assert!((segments[1].duration_s - 0.24).abs() < 0.001);
}
#[test]
fn detect_color_coded_video_segments_accepts_camera_washed_palette() {
let timestamps = (0..90).map(|idx| idx as f64 / 30.0).collect::<Vec<_>>();
let frames = timestamps
.iter()
.enumerate()
.map(|(idx, _)| match idx {
10..=12 => VideoColorFrame {
r: 184,
g: 72,
b: 68,
},
30..=34 => VideoColorFrame {
r: 76,
g: 168,
b: 111,
},
50..=55 => VideoColorFrame {
r: 82,
g: 125,
b: 188,
},
70..=76 => VideoColorFrame {
r: 190,
g: 173,
b: 60,
},
_ => VideoColorFrame {
r: 22,
g: 22,
b: 24,
},
})
.collect::<Vec<_>>();
let segments = detect_color_coded_video_segments(&timestamps, &frames, &[1, 2, 3, 4], 0.12)
.expect("segments");
assert_eq!(segments.len(), 4);
assert!((segments[0].duration_s - 0.12).abs() < 0.001);
assert!((segments[1].duration_s - 0.24).abs() < 0.001);
assert!((segments[2].duration_s - 0.36).abs() < 0.001);
assert!((segments[3].duration_s - 0.48).abs() < 0.001);
}
#[test]
fn detect_audio_segments_keeps_regular_and_marker_durations_distinct() {
let mut samples = vec![0i16; 48_000];
@ -162,6 +207,36 @@ fn detect_audio_segments_accepts_faint_probe_tones() {
assert!((segments[1].start_s - 0.5).abs() < 0.01);
}
#[test]
fn detect_audio_segments_locks_onto_probe_tone_over_background_hum() {
let mut samples = vec![0i16; 96_000];
add_sine(&mut samples, 48_000, 0.0, 2.0, 120.0, 7_000.0);
add_sine(&mut samples, 48_000, 0.25, 0.12, 880.0, 1_800.0);
add_sine(&mut samples, 48_000, 1.25, 0.12, 880.0, 1_800.0);
let segments = detect_audio_segments(&samples, 48_000, 10).expect("tone segments");
assert_eq!(segments.len(), 2);
assert!((segments[0].start_s - 0.25).abs() < 0.03);
assert!((segments[1].start_s - 1.25).abs() < 0.03);
}
#[test]
fn detect_coded_audio_segments_uses_probe_tone_frequency_for_event_code() {
let mut samples = vec![0i16; 96_000];
add_sine(&mut samples, 48_000, 0.0, 2.0, 120.0, 7_000.0);
add_sine(&mut samples, 48_000, 0.25, 0.07, 660.0, 2_000.0);
add_sine(&mut samples, 48_000, 1.25, 0.07, 1320.0, 2_000.0);
let segments =
detect_coded_audio_segments(&samples, 48_000, 10, &[1, 2, 3, 4], 0.12).expect("segments");
assert_eq!(segments.len(), 2);
assert!((segments[0].start_s - 0.25).abs() < 0.03);
assert!((segments[0].duration_s - 0.12).abs() < 0.001);
assert!((segments[1].start_s - 1.25).abs() < 0.03);
assert!((segments[1].duration_s - 0.48).abs() < 0.001);
}
#[test]
fn detect_audio_segments_merges_longer_probe_dropouts_inside_one_pulse() {
let mut samples = vec![0i16; 48_000];
@ -177,6 +252,26 @@ fn detect_audio_segments_merges_longer_probe_dropouts_inside_one_pulse() {
assert!(segments[0].duration_s > 0.24);
}
fn add_sine(
samples: &mut [i16],
sample_rate_hz: u32,
start_s: f64,
duration_s: f64,
frequency_hz: f64,
amplitude: f64,
) {
let start = (start_s * f64::from(sample_rate_hz)).round() as usize;
let len = (duration_s * f64::from(sample_rate_hz)).round() as usize;
for (offset, sample) in samples.iter_mut().skip(start).take(len).enumerate() {
let t = offset as f64 / f64::from(sample_rate_hz);
let value =
f64::from(*sample) + amplitude * (2.0 * std::f64::consts::PI * frequency_hz * t).sin();
*sample = value
.round()
.clamp(f64::from(i16::MIN), f64::from(i16::MAX)) as i16;
}
}
#[test]
fn detect_video_segments_closes_a_pulse_that_stays_active_until_the_last_frame() {
let timestamps = [0.0, 0.1, 0.2, 0.3];
@ -472,6 +567,37 @@ fn correlate_coded_segments_matches_preserved_event_width_codes() {
assert!(report.max_abs_skew_ms < 50.0);
}
#[test]
fn correlate_coded_segments_recovers_when_extra_video_detections_win_phase_collapse() {
fn segment(start_s: f64, code: u32) -> PulseSegment {
let duration_s = 0.12 * f64::from(code);
PulseSegment {
start_s,
end_s: start_s + duration_s,
duration_s,
}
}
let codes = [1, 2, 1, 3, 2, 4, 1, 1];
let mut video = Vec::new();
for (tick, code) in codes.iter().copied().enumerate() {
video.push(segment(tick as f64, code));
video.push(segment(tick as f64 + 0.45, 4));
}
let audio = codes
.iter()
.enumerate()
.map(|(tick, code)| segment(tick as f64 + 0.045, *code))
.collect::<Vec<_>>();
let report =
correlate_coded_segments(&video, &audio, 1.0, 0.12, &codes, 0.2).expect("coded report");
assert_eq!(report.paired_event_count, codes.len());
assert!((report.median_skew_ms - 45.0).abs() < 1.0);
assert!(report.max_abs_skew_ms < 50.0);
}
#[test]
fn correlate_coded_segments_rejects_nearby_wrong_width_codes() {
fn segment(start_s: f64, code: u32) -> PulseSegment {

View File

@ -70,7 +70,7 @@ pub(super) fn click_track_samples(click_times_s: &[f64], total_samples: usize) -
}
pub(super) fn thumbnail_video_bytes(brightness_values: &[u8]) -> Vec<u8> {
const SIDE: usize = 32;
const SIDE: usize = 64;
let mut bytes = Vec::with_capacity(brightness_values.len() * SIDE * SIDE);
for brightness in brightness_values {
let mut frame = vec![20u8; SIDE * SIDE];
@ -85,7 +85,7 @@ pub(super) fn thumbnail_video_bytes(brightness_values: &[u8]) -> Vec<u8> {
}
pub(super) fn thumbnail_rgb_video_bytes(colors: &[(u8, u8, u8)]) -> Vec<u8> {
const SIDE: usize = 32;
const SIDE: usize = 64;
let mut bytes = Vec::with_capacity(colors.len() * SIDE * SIDE * 3);
for (r, g, b) in colors {
let mut frame = vec![0u8; SIDE * SIDE * 3];

View File

@ -1,6 +1,6 @@
[package]
name = "lesavka_common"
version = "0.17.32"
version = "0.17.33"
edition = "2024"
build = "build.rs"

View File

@ -129,10 +129,10 @@ let oscillator = null;
let gain = null;
let startedAt = 0;
const pulseColors = {
1: '#ff2d2d',
2: '#00e676',
3: '#2979ff',
4: '#ffb300'
1: '#b81d24',
2: '#007a3d',
3: '#1456b8',
4: '#b56b00'
};
const pulseFrequencies = {
1: 660,

View File

@ -50,6 +50,7 @@ STIMULUS_SETTLE_SECONDS=${STIMULUS_SETTLE_SECONDS:-10}
LOCAL_OUTPUT_DIR=${LOCAL_OUTPUT_DIR:-"${REPO_ROOT}/tmp"}
SSH_OPTS=${SSH_OPTS:-"-o BatchMode=yes -o ConnectTimeout=5"}
LOCAL_BROWSER=${LOCAL_BROWSER:-firefox}
LESAVKA_STIMULUS_BROWSER_KIOSK=${LESAVKA_STIMULUS_BROWSER_KIOSK:-1}
mkdir -p "${LOCAL_OUTPUT_DIR}"
STAMP="$(date +%Y%m%d-%H%M%S)"
@ -639,9 +640,12 @@ user_pref("browser.aboutwelcome.enabled", false);
PREFS
printf 'user_pref("browser.startup.homepage", "http://127.0.0.1:%s/");\n' "${STIMULUS_PORT}" >>"${STIMULUS_PROFILE}/user.js"
echo "==> opening local stimulus browser"
"${LOCAL_BROWSER}" --new-instance --no-remote --profile "${STIMULUS_PROFILE}" \
"http://127.0.0.1:${STIMULUS_PORT}/" \
>"${ARTIFACT_DIR}/stimulus-browser.log" 2>&1 &
local browser_args=(--new-instance --no-remote --profile "${STIMULUS_PROFILE}")
if [[ "${LESAVKA_STIMULUS_BROWSER_KIOSK}" == "1" ]]; then
browser_args+=(--kiosk)
fi
browser_args+=("http://127.0.0.1:${STIMULUS_PORT}/")
"${LOCAL_BROWSER}" "${browser_args[@]}" >"${ARTIFACT_DIR}/stimulus-browser.log" 2>&1 &
STIMULUS_BROWSER_PID=$!
wait_for_stimulus_page_ready 15
@ -746,9 +750,11 @@ summarize_adaptive_probe_metrics() {
echo "==> summarizing segmented probe metrics"
python3 - "${ARTIFACT_DIR}" "${LESAVKA_SYNC_TOTAL_SEGMENTS}" "${LESAVKA_SYNC_CALIBRATION_SEGMENTS}" <<'PY'
import csv
import html
import json
import math
import os
import shutil
import sys
from pathlib import Path
@ -783,6 +789,13 @@ def latest_analysis_failure(segment_dir):
return max(failures, key=lambda path: path.stat().st_mtime)
def latest_capture(segment_dir):
captures = list(segment_dir.glob("*.webm"))
if not captures:
return None
return max(captures, key=lambda path: path.stat().st_mtime)
def as_float(value):
if value is None or value in {"", "pending"}:
return None
@ -1023,6 +1036,7 @@ diagnoses = []
for segment in range(1, segment_count + 1):
segment_dir = root / f"segment-{segment}"
report_path = latest_report(segment_dir)
capture_path = latest_capture(segment_dir)
report = {}
verdict = {}
calibration = {}
@ -1048,6 +1062,7 @@ for segment in range(1, segment_count + 1):
row = {
"segment": segment,
"segment_phase": phase,
"capture_path": str(capture_path) if capture_path else "",
"report_json": str(report_path) if report_path else "",
"analysis_failure_json": str(failure_path) if failure_path else "",
"analysis_failure_reason": failure.get("reason", ""),
@ -1172,6 +1187,73 @@ with events_jsonl_path.open("w", encoding="utf-8") as handle:
for row in event_rows:
handle.write(json.dumps(row, sort_keys=True) + "\n")
review_dir = root / "manual-review"
review_dir.mkdir(exist_ok=True)
review_items = []
for row in rows:
capture_path_raw = row.get("capture_path")
if not capture_path_raw:
continue
source = Path(capture_path_raw)
if not source.exists():
continue
review_name = f"segment-{row['segment']}.webm"
review_capture = review_dir / review_name
if review_capture.exists() or review_capture.is_symlink():
review_capture.unlink()
try:
review_capture.symlink_to(os.path.relpath(source, review_dir))
except OSError:
shutil.copy2(source, review_capture)
review_items.append({
"segment": row["segment"],
"segment_phase": row.get("segment_phase", ""),
"probe_status": row.get("probe_status", ""),
"probe_paired_pulses": row.get("probe_paired_pulses", ""),
"probe_p95_abs_skew_ms": row.get("probe_p95_abs_skew_ms", ""),
"capture_href": review_name,
"report_href": os.path.relpath(row["report_json"], review_dir) if row.get("report_json") else "",
"failure_href": os.path.relpath(row["analysis_failure_json"], review_dir) if row.get("analysis_failure_json") else "",
})
manual_review_path = review_dir / "index.html"
review_cards = []
for item in review_items:
detail_links = []
if item["report_href"]:
detail_links.append(f'<a href="{html.escape(item["report_href"])}">report.json</a>')
if item["failure_href"]:
detail_links.append(f'<a href="{html.escape(item["failure_href"])}">analysis-failure.json</a>')
p95 = item["probe_p95_abs_skew_ms"]
p95_text = "n/a" if p95 is None else f"{p95:.1f} ms"
review_cards.append(f"""
<section>
<h2>Segment {item['segment']} ({html.escape(str(item['segment_phase']))})</h2>
<p>status={html.escape(str(item['probe_status']))}, pairs={html.escape(str(item['probe_paired_pulses']))}, p95={html.escape(p95_text)}</p>
<video controls preload="metadata" src="{html.escape(item['capture_href'])}"></video>
<p><a href="{html.escape(item['capture_href'])}">open capture</a>{' | ' + ' | '.join(detail_links) if detail_links else ''}</p>
</section>""")
manual_review_path.write_text(f"""<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>Lesavka A/V Probe Manual Review</title>
<style>
body {{ font: 16px/1.45 sans-serif; margin: 24px; background: #101418; color: #eef4fa; }}
a {{ color: #8bd3ff; }}
section {{ margin: 0 0 28px; padding: 16px; border: 1px solid #34414f; border-radius: 12px; background: #18212a; }}
video {{ display: block; width: min(960px, 100%); max-height: 70vh; background: #000; }}
</style>
</head>
<body>
<h1>Lesavka A/V Probe Manual Review</h1>
<p>Open these captures to visually inspect framing, flashes, and audible test tones after the run.</p>
{''.join(review_cards) if review_cards else '<p>No browser captures were found.</p>'}
</body>
</html>
""", encoding="utf-8")
good_rows = [row for row in rows if row.get("probe_passed")]
confirmation_rows = [row for row in rows if row.get("segment_phase") == "confirmation"]
passing_confirmation_rows = [row for row in confirmation_rows if row.get("probe_passed")]
@ -1318,6 +1400,7 @@ print(f" ↪ segment_metrics_csv={csv_path}")
print(f" ↪ segment_metrics_jsonl={jsonl_path}")
print(f" ↪ segment_events_csv={events_csv_path}")
print(f" ↪ segment_events_jsonl={events_jsonl_path}")
print(f" ↪ manual_review_html={manual_review_path}")
print(f" ↪ blind_targets_json={target_path}")
print(f" ↪ blind_targets_ready={str(bool(target.get('ready'))).lower()}")
print(f" ↪ confirmation_summary_json={confirmation_path}")

View File

@ -10,7 +10,7 @@ bench = false
[package]
name = "lesavka_server"
version = "0.17.32"
version = "0.17.33"
edition = "2024"
autobins = false

View File

@ -172,6 +172,9 @@ fn mirrored_sync_script_uses_real_client_capture_path() {
"segment-metrics.jsonl",
"segment-events.csv",
"segment-events.jsonl",
"manual-review",
"manual_review_html",
"capture_path",
"confirmation-summary.json",
"confirmation_passed",
"check_confirmation_result",