probe: color-code mirrored av sync events
This commit is contained in:
parent
bbb6d3100c
commit
add8d66c98
10
AGENTS.md
10
AGENTS.md
@ -112,6 +112,12 @@ Context: the mirrored browser probe finally reproduced the real failure class on
|
||||
### Phase 0: Keep The Probe Honest
|
||||
- [x] Split raw activity-start fields from filtered/coded paired-pulse fields in probe reports.
|
||||
- [x] Print explicit raw first-video and first-audio timestamps in `report.txt`.
|
||||
- [x] Root-cause the 0.16.17 `raw_first_video_activity_s=0.000` artifact as the mirrored probe counting its own bright pre-start positioning card.
|
||||
- [x] Make the mirrored stimulus pre-start screen dark/dim so only real flash pulses can be detected as video activity.
|
||||
- [x] Add analyzer coverage proving dim pre-start positioning frames are ignored.
|
||||
- [x] Replace generic light/dark mirrored flashes with color-coded event IDs.
|
||||
- [x] Make mirrored audio pulses unique by the same event ID via pulse width plus tone frequency.
|
||||
- [x] Teach the analyzer to decode mirrored video event IDs from color, not grayscale brightness.
|
||||
- [ ] Keep the mirrored browser probe as the release/blocking upstream A/V gate.
|
||||
- [ ] Keep the old raw-device probe as a lower-level diagnostic only.
|
||||
|
||||
@ -147,5 +153,7 @@ Context: the mirrored browser probe finally reproduced the real failure class on
|
||||
- [x] Run server/client media contract tests.
|
||||
- [x] Run `cargo check` for touched packages.
|
||||
- [x] Bump version for the fix release.
|
||||
- [ ] Run the mirrored browser probe on installed client/server.
|
||||
- [x] Run the mirrored browser probe on installed client/server.
|
||||
- 0.16.17 still failed: reported `activity_start_delta_ms=+6735.0`, but `raw_first_video_activity_s=0.000` exposed a probe false-positive from the pre-start screen. Paired pulses still showed real steady-state skew (`p95=411.8 ms`, `median=-99.0 ms`), so the product remains unfixed.
|
||||
- [ ] Re-run the mirrored browser probe after the pre-start false-positive fix.
|
||||
- [ ] Run Google Meet manual validation.
|
||||
|
||||
6
Cargo.lock
generated
6
Cargo.lock
generated
@ -1652,7 +1652,7 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
|
||||
|
||||
[[package]]
|
||||
name = "lesavka_client"
|
||||
version = "0.16.17"
|
||||
version = "0.16.18"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-stream",
|
||||
@ -1686,7 +1686,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lesavka_common"
|
||||
version = "0.16.17"
|
||||
version = "0.16.18"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"base64",
|
||||
@ -1698,7 +1698,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lesavka_server"
|
||||
version = "0.16.17"
|
||||
version = "0.16.18"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"base64",
|
||||
|
||||
@ -4,7 +4,7 @@ path = "src/main.rs"
|
||||
|
||||
[package]
|
||||
name = "lesavka_client"
|
||||
version = "0.16.17"
|
||||
version = "0.16.18"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
|
||||
@ -9,10 +9,12 @@ pub(super) mod test_support;
|
||||
use anyhow::{Result, bail};
|
||||
use std::path::Path;
|
||||
|
||||
use media_extract::{extract_audio_samples, extract_video_brightness, extract_video_timestamps};
|
||||
use media_extract::{
|
||||
extract_audio_samples, extract_video_brightness, extract_video_colors, extract_video_timestamps,
|
||||
};
|
||||
use onset_detection::{
|
||||
DEFAULT_AUDIO_SAMPLE_RATE_HZ, correlate_coded_segments, correlate_segments,
|
||||
detect_audio_segments, detect_video_segments,
|
||||
detect_audio_segments, detect_color_coded_video_segments, detect_video_segments,
|
||||
};
|
||||
|
||||
pub use onset_detection::{detect_audio_onsets, detect_video_onsets};
|
||||
@ -28,9 +30,20 @@ pub fn analyze_capture(
|
||||
options: &SyncAnalysisOptions,
|
||||
) -> Result<SyncAnalysisReport> {
|
||||
let raw_timestamps = extract_video_timestamps(capture_path)?;
|
||||
let brightness = extract_video_brightness(capture_path)?;
|
||||
let timestamps = reconcile_video_timestamps(raw_timestamps, brightness.len())?;
|
||||
let video_segments = detect_video_segments(×tamps, &brightness)?;
|
||||
let video_segments = if options.event_width_codes.is_empty() {
|
||||
let brightness = extract_video_brightness(capture_path)?;
|
||||
let timestamps = reconcile_video_timestamps(raw_timestamps, brightness.len())?;
|
||||
detect_video_segments(×tamps, &brightness)?
|
||||
} else {
|
||||
let colors = extract_video_colors(capture_path)?;
|
||||
let timestamps = reconcile_video_timestamps(raw_timestamps, colors.len())?;
|
||||
detect_color_coded_video_segments(
|
||||
×tamps,
|
||||
&colors,
|
||||
&options.event_width_codes,
|
||||
options.pulse_width_s,
|
||||
)?
|
||||
};
|
||||
|
||||
let audio_samples = extract_audio_samples(capture_path)?;
|
||||
let audio_segments = detect_audio_segments(
|
||||
@ -93,8 +106,8 @@ fn reconcile_video_timestamps(timestamps: Vec<f64>, frame_count: usize) -> Resul
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::test_support::{
|
||||
audio_samples_to_bytes, click_track_samples, frame_json, thumbnail_video_bytes,
|
||||
with_fake_media_tools,
|
||||
audio_samples_to_bytes, click_track_samples, frame_json, thumbnail_rgb_video_bytes,
|
||||
thumbnail_video_bytes, with_fake_media_tools,
|
||||
};
|
||||
use super::{SyncAnalysisOptions, analyze_capture};
|
||||
use crate::sync_probe::analyze::reconcile_video_timestamps;
|
||||
@ -163,6 +176,48 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn analyze_capture_uses_color_codes_for_mirrored_video_events() {
|
||||
let timestamps = (0..45).map(|index| index as f64 * 0.1).collect::<Vec<_>>();
|
||||
let colors = timestamps
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(index, _)| match index {
|
||||
10 | 11 => (255, 45, 45),
|
||||
20 | 21 | 22 | 23 => (0, 230, 118),
|
||||
30 | 31 | 32 => (41, 121, 255),
|
||||
_ => (0, 0, 0),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let mut audio = vec![0i16; 220_000];
|
||||
for (start_s, code) in [(1.05, 1usize), (2.05, 2usize), (3.05, 3usize)] {
|
||||
let start = (start_s * 48_000.0) as usize;
|
||||
for sample in audio.iter_mut().skip(start).take(5_760 * code) {
|
||||
*sample = 18_000;
|
||||
}
|
||||
}
|
||||
|
||||
with_fake_media_tools(
|
||||
&frame_json(×tamps),
|
||||
&thumbnail_rgb_video_bytes(&colors),
|
||||
&audio_samples_to_bytes(&audio),
|
||||
|capture_path| {
|
||||
let report = analyze_capture(
|
||||
capture_path,
|
||||
&SyncAnalysisOptions {
|
||||
pulse_period_s: 1.0,
|
||||
event_width_codes: vec![1, 2, 3],
|
||||
..SyncAnalysisOptions::default()
|
||||
},
|
||||
)
|
||||
.expect("analysis report");
|
||||
assert_eq!(report.video_event_count, 3);
|
||||
assert_eq!(report.paired_event_count, 3);
|
||||
assert!(report.max_abs_skew_ms < 120.0);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reconcile_video_timestamps_resamples_metadata_span_to_decoded_frame_count() {
|
||||
let reconciled = reconcile_video_timestamps(vec![0.0, 0.004, 0.008, 1.0], 3)
|
||||
|
||||
@ -3,6 +3,8 @@ use serde::Deserialize;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
|
||||
use super::onset_detection::VideoColorFrame;
|
||||
|
||||
const VIDEO_ANALYSIS_SIDE_PX: usize = 32;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@ -89,6 +91,49 @@ pub(super) fn extract_video_brightness(capture_path: &Path) -> Result<Vec<u8>> {
|
||||
.collect())
|
||||
}
|
||||
|
||||
pub(super) fn extract_video_colors(capture_path: &Path) -> Result<Vec<VideoColorFrame>> {
|
||||
let output = run_command(
|
||||
Command::new("ffmpeg")
|
||||
.arg("-hide_banner")
|
||||
.arg("-loglevel")
|
||||
.arg("error")
|
||||
.arg("-i")
|
||||
.arg(capture_path)
|
||||
.arg("-map")
|
||||
.arg("0:v:0")
|
||||
.arg("-vf")
|
||||
.arg(format!(
|
||||
"scale={side}:{side}:flags=area,format=rgb24",
|
||||
side = VIDEO_ANALYSIS_SIDE_PX
|
||||
))
|
||||
.arg("-f")
|
||||
.arg("rawvideo")
|
||||
.arg("-pix_fmt")
|
||||
.arg("rgb24")
|
||||
.arg("-"),
|
||||
"ffmpeg video color extraction",
|
||||
)?;
|
||||
if output.is_empty() {
|
||||
bail!("ffmpeg did not emit any video color data");
|
||||
}
|
||||
|
||||
let frame_bytes = VIDEO_ANALYSIS_SIDE_PX * VIDEO_ANALYSIS_SIDE_PX * 3;
|
||||
if output.len() % frame_bytes != 0 {
|
||||
bail!(
|
||||
"ffmpeg emitted {} bytes of video color data, which is not divisible by the {}-byte analysis frame size",
|
||||
output.len(),
|
||||
frame_bytes
|
||||
);
|
||||
}
|
||||
let extracted_frames = output.len() / frame_bytes;
|
||||
|
||||
Ok(output
|
||||
.chunks_exact(frame_bytes)
|
||||
.take(extracted_frames)
|
||||
.map(summarize_frame_color)
|
||||
.collect())
|
||||
}
|
||||
|
||||
pub(super) fn extract_audio_samples(capture_path: &Path) -> Result<Vec<i16>> {
|
||||
let output = run_command(
|
||||
Command::new("ffmpeg")
|
||||
@ -135,13 +180,51 @@ fn summarize_frame_brightness(frame: &[u8]) -> u8 {
|
||||
mean.min(u64::from(u8::MAX)) as u8
|
||||
}
|
||||
|
||||
fn summarize_frame_color(frame: &[u8]) -> VideoColorFrame {
|
||||
let mut r_sum = 0u64;
|
||||
let mut g_sum = 0u64;
|
||||
let mut b_sum = 0u64;
|
||||
let mut selected = 0u64;
|
||||
|
||||
for pixel in frame.chunks_exact(3) {
|
||||
let r = pixel[0];
|
||||
let g = pixel[1];
|
||||
let b = pixel[2];
|
||||
let max = r.max(g).max(b);
|
||||
let min = r.min(g).min(b);
|
||||
if max >= 60 && max.saturating_sub(min) >= 24 {
|
||||
r_sum += u64::from(r);
|
||||
g_sum += u64::from(g);
|
||||
b_sum += u64::from(b);
|
||||
selected += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if selected == 0 {
|
||||
selected = (frame.len() / 3).max(1) as u64;
|
||||
for pixel in frame.chunks_exact(3) {
|
||||
r_sum += u64::from(pixel[0]);
|
||||
g_sum += u64::from(pixel[1]);
|
||||
b_sum += u64::from(pixel[2]);
|
||||
}
|
||||
}
|
||||
|
||||
VideoColorFrame {
|
||||
r: (r_sum / selected).min(u64::from(u8::MAX)) as u8,
|
||||
g: (g_sum / selected).min(u64::from(u8::MAX)) as u8,
|
||||
b: (b_sum / selected).min(u64::from(u8::MAX)) as u8,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
extract_audio_samples, extract_video_brightness, extract_video_timestamps, run_command,
|
||||
extract_audio_samples, extract_video_brightness, extract_video_colors,
|
||||
extract_video_timestamps, run_command,
|
||||
};
|
||||
use crate::sync_probe::analyze::test_support::{
|
||||
audio_samples_to_bytes, frame_json, thumbnail_video_bytes, with_fake_media_tools,
|
||||
audio_samples_to_bytes, frame_json, thumbnail_rgb_video_bytes, thumbnail_video_bytes,
|
||||
with_fake_media_tools,
|
||||
};
|
||||
use std::process::Command;
|
||||
|
||||
@ -235,6 +318,22 @@ mod tests {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_video_colors_reads_fake_ffmpeg_output() {
|
||||
let colors = vec![(255, 45, 45), (0, 230, 118), (41, 121, 255)];
|
||||
with_fake_media_tools(
|
||||
&frame_json(&[0.0, 0.1, 0.2]),
|
||||
&thumbnail_rgb_video_bytes(&colors),
|
||||
&[1, 0],
|
||||
|capture_path| {
|
||||
let parsed = extract_video_colors(capture_path).expect("video colors");
|
||||
assert_eq!(parsed[0].r, 255);
|
||||
assert_eq!(parsed[1].g, 230);
|
||||
assert_eq!(parsed[2].b, 255);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_audio_samples_reads_fake_ffmpeg_output() {
|
||||
let samples = vec![1i16, -2, 32_000];
|
||||
|
||||
@ -13,6 +13,16 @@ pub(super) const DEFAULT_AUDIO_SAMPLE_RATE_HZ: u32 = 48_000;
|
||||
const MIN_VIDEO_CONTRAST: u8 = 4;
|
||||
const MAX_VIDEO_ACTIVE_FRAME_FRACTION: f64 = 0.35;
|
||||
const MAX_VIDEO_FLICKER_SEGMENT_FRAME_MULTIPLIER: f64 = 1.5;
|
||||
const MIN_COLOR_PULSE_SATURATION: u8 = 36;
|
||||
const MIN_COLOR_PULSE_VALUE: u8 = 70;
|
||||
const MAX_COLOR_DISTANCE_SQUARED: u32 = 42_000;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub(super) struct VideoColorFrame {
|
||||
pub r: u8,
|
||||
pub g: u8,
|
||||
pub b: u8,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub(crate) struct PulseSegment {
|
||||
@ -102,6 +112,183 @@ pub(crate) fn detect_video_segments(
|
||||
Ok(segments)
|
||||
}
|
||||
|
||||
pub(crate) fn detect_color_coded_video_segments(
|
||||
timestamps_s: &[f64],
|
||||
frames: &[VideoColorFrame],
|
||||
event_codes: &[u32],
|
||||
pulse_width_s: f64,
|
||||
) -> Result<Vec<PulseSegment>> {
|
||||
let frame_count = timestamps_s.len().min(frames.len());
|
||||
if frame_count == 0 {
|
||||
bail!("capture did not contain any video frames");
|
||||
}
|
||||
if pulse_width_s <= 0.0 {
|
||||
bail!("pulse width must stay positive");
|
||||
}
|
||||
if event_codes.is_empty() {
|
||||
bail!("event code list must not be empty");
|
||||
}
|
||||
if let Some(unsupported) = event_codes
|
||||
.iter()
|
||||
.find(|code| color_for_event_code(**code).is_none())
|
||||
{
|
||||
bail!("event code {unsupported} has no video color signature");
|
||||
}
|
||||
|
||||
let frame_step_s = median_frame_step_seconds(×tamps_s[..frame_count]).max(1.0 / 120.0);
|
||||
let mut segments = Vec::new();
|
||||
let mut previous_code = None::<u32>;
|
||||
let mut segment_start = 0.0_f64;
|
||||
let mut previous_timestamp = None;
|
||||
let mut last_active_timestamp = None;
|
||||
let mut segment_codes = Vec::<u32>::new();
|
||||
|
||||
for (timestamp, frame) in timestamps_s.iter().copied().zip(frames.iter().copied()) {
|
||||
let code = color_event_code(frame).filter(|code| event_codes.contains(code));
|
||||
if code.is_some() && previous_code.is_none() {
|
||||
segment_start = previous_timestamp
|
||||
.map(|prior| edge_midpoint(prior, timestamp))
|
||||
.unwrap_or(timestamp);
|
||||
segment_codes.clear();
|
||||
}
|
||||
if let Some(code) = code {
|
||||
last_active_timestamp = Some(timestamp);
|
||||
segment_codes.push(code);
|
||||
}
|
||||
if previous_code.is_some() && code.is_none() {
|
||||
push_color_segment(
|
||||
&mut segments,
|
||||
segment_start,
|
||||
edge_midpoint(
|
||||
last_active_timestamp.unwrap_or(timestamp - frame_step_s),
|
||||
timestamp,
|
||||
),
|
||||
pulse_width_s,
|
||||
&segment_codes,
|
||||
frame_step_s,
|
||||
);
|
||||
segment_codes.clear();
|
||||
}
|
||||
previous_code = code;
|
||||
previous_timestamp = Some(timestamp);
|
||||
}
|
||||
if previous_code.is_some() {
|
||||
let last_timestamp = timestamps_s[frame_count - 1];
|
||||
push_color_segment(
|
||||
&mut segments,
|
||||
segment_start,
|
||||
last_timestamp + frame_step_s / 2.0,
|
||||
pulse_width_s,
|
||||
&segment_codes,
|
||||
frame_step_s,
|
||||
);
|
||||
}
|
||||
|
||||
if segments.is_empty() {
|
||||
bail!("video did not contain any recognizable color-coded sync pulses");
|
||||
}
|
||||
|
||||
Ok(segments)
|
||||
}
|
||||
|
||||
fn push_color_segment(
|
||||
segments: &mut Vec<PulseSegment>,
|
||||
start_s: f64,
|
||||
observed_end_s: f64,
|
||||
pulse_width_s: f64,
|
||||
codes: &[u32],
|
||||
frame_step_s: f64,
|
||||
) {
|
||||
let Some(code) = dominant_event_code(codes) else {
|
||||
return;
|
||||
};
|
||||
let encoded_duration_s = pulse_width_s * f64::from(code);
|
||||
segments.push(PulseSegment {
|
||||
start_s,
|
||||
end_s: observed_end_s.max(start_s + frame_step_s / 2.0),
|
||||
duration_s: encoded_duration_s,
|
||||
});
|
||||
}
|
||||
|
||||
fn dominant_event_code(codes: &[u32]) -> Option<u32> {
|
||||
let mut counts = std::collections::BTreeMap::<u32, usize>::new();
|
||||
for code in codes {
|
||||
*counts.entry(*code).or_default() += 1;
|
||||
}
|
||||
counts
|
||||
.into_iter()
|
||||
.max_by(|(left_code, left_count), (right_code, right_count)| {
|
||||
left_count
|
||||
.cmp(right_count)
|
||||
.then_with(|| right_code.cmp(left_code))
|
||||
})
|
||||
.map(|(code, _)| code)
|
||||
}
|
||||
|
||||
fn color_event_code(frame: VideoColorFrame) -> Option<u32> {
|
||||
let max = frame.r.max(frame.g).max(frame.b);
|
||||
let min = frame.r.min(frame.g).min(frame.b);
|
||||
if max < MIN_COLOR_PULSE_VALUE || max.saturating_sub(min) < MIN_COLOR_PULSE_SATURATION {
|
||||
return None;
|
||||
}
|
||||
|
||||
color_palette()
|
||||
.into_iter()
|
||||
.map(|(code, color)| (code, color_distance_squared(frame, color)))
|
||||
.min_by_key(|(_, distance)| *distance)
|
||||
.and_then(|(code, distance)| (distance <= MAX_COLOR_DISTANCE_SQUARED).then_some(code))
|
||||
}
|
||||
|
||||
fn color_for_event_code(code: u32) -> Option<VideoColorFrame> {
|
||||
color_palette()
|
||||
.into_iter()
|
||||
.find_map(|(palette_code, color)| (palette_code == code).then_some(color))
|
||||
}
|
||||
|
||||
fn color_palette() -> [(u32, VideoColorFrame); 4] {
|
||||
[
|
||||
(
|
||||
1,
|
||||
VideoColorFrame {
|
||||
r: 255,
|
||||
g: 45,
|
||||
b: 45,
|
||||
},
|
||||
),
|
||||
(
|
||||
2,
|
||||
VideoColorFrame {
|
||||
r: 0,
|
||||
g: 230,
|
||||
b: 118,
|
||||
},
|
||||
),
|
||||
(
|
||||
3,
|
||||
VideoColorFrame {
|
||||
r: 41,
|
||||
g: 121,
|
||||
b: 255,
|
||||
},
|
||||
),
|
||||
(
|
||||
4,
|
||||
VideoColorFrame {
|
||||
r: 255,
|
||||
g: 179,
|
||||
b: 0,
|
||||
},
|
||||
),
|
||||
]
|
||||
}
|
||||
|
||||
fn color_distance_squared(left: VideoColorFrame, right: VideoColorFrame) -> u32 {
|
||||
let dr = i32::from(left.r) - i32::from(right.r);
|
||||
let dg = i32::from(left.g) - i32::from(right.g);
|
||||
let db = i32::from(left.b) - i32::from(right.b);
|
||||
(dr * dr + dg * dg + db * db) as u32
|
||||
}
|
||||
|
||||
pub fn detect_audio_onsets(
|
||||
samples: &[i16],
|
||||
sample_rate_hz: u32,
|
||||
|
||||
@ -3,8 +3,9 @@ use super::correlation::{
|
||||
index_onsets_by_spacing, marker_index_offsets, marker_onsets, shortest_wrapped_difference,
|
||||
};
|
||||
use super::{
|
||||
PulseSegment, correlate_coded_segments, correlate_segments, detect_audio_onsets,
|
||||
detect_audio_segments, detect_video_onsets, detect_video_segments, median,
|
||||
PulseSegment, VideoColorFrame, correlate_coded_segments, correlate_segments,
|
||||
detect_audio_onsets, detect_audio_segments, detect_color_coded_video_segments,
|
||||
detect_video_onsets, detect_video_segments, median,
|
||||
};
|
||||
use crate::sync_probe::analyze::report::SyncAnalysisReport;
|
||||
use std::collections::BTreeMap;
|
||||
@ -55,6 +56,63 @@ fn detect_video_segments_keeps_regular_and_marker_durations_distinct() {
|
||||
assert!(segments[1].duration_s > segments[0].duration_s);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detect_video_segments_ignores_dim_positioning_prelude() {
|
||||
let timestamps = (0..90).map(|idx| idx as f64 / 30.0).collect::<Vec<_>>();
|
||||
let brightness = timestamps
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, _)| {
|
||||
if (45..49).contains(&idx) || (75..79).contains(&idx) {
|
||||
245
|
||||
} else {
|
||||
8
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let segments = detect_video_segments(×tamps, &brightness).expect("video segments");
|
||||
assert_eq!(segments.len(), 2);
|
||||
assert!(
|
||||
segments[0].start_s > 1.4,
|
||||
"dim pre-start positioning screen must not become a fake onset"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detect_color_coded_video_segments_ignores_generic_bright_changes() {
|
||||
let timestamps = (0..80).map(|idx| idx as f64 / 20.0).collect::<Vec<_>>();
|
||||
let frames = timestamps
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, _)| match idx {
|
||||
0..=4 => VideoColorFrame {
|
||||
r: 245,
|
||||
g: 245,
|
||||
b: 245,
|
||||
},
|
||||
20..=22 => VideoColorFrame {
|
||||
r: 255,
|
||||
g: 45,
|
||||
b: 45,
|
||||
},
|
||||
40..=45 => VideoColorFrame {
|
||||
r: 0,
|
||||
g: 230,
|
||||
b: 118,
|
||||
},
|
||||
_ => VideoColorFrame { r: 0, g: 0, b: 0 },
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let segments =
|
||||
detect_color_coded_video_segments(×tamps, &frames, &[1, 2], 0.12).expect("segments");
|
||||
assert_eq!(segments.len(), 2);
|
||||
assert!(segments[0].start_s > 0.9);
|
||||
assert!((segments[0].duration_s - 0.12).abs() < 0.001);
|
||||
assert!((segments[1].duration_s - 0.24).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detect_audio_segments_keeps_regular_and_marker_durations_distinct() {
|
||||
let mut samples = vec![0i16; 48_000];
|
||||
|
||||
@ -84,6 +84,24 @@ pub(super) fn thumbnail_video_bytes(brightness_values: &[u8]) -> Vec<u8> {
|
||||
bytes
|
||||
}
|
||||
|
||||
pub(super) fn thumbnail_rgb_video_bytes(colors: &[(u8, u8, u8)]) -> Vec<u8> {
|
||||
const SIDE: usize = 32;
|
||||
let mut bytes = Vec::with_capacity(colors.len() * SIDE * SIDE * 3);
|
||||
for (r, g, b) in colors {
|
||||
let mut frame = vec![0u8; SIDE * SIDE * 3];
|
||||
for y in SIDE / 4..SIDE - SIDE / 4 {
|
||||
for x in SIDE / 4..SIDE - SIDE / 4 {
|
||||
let offset = (y * SIDE + x) * 3;
|
||||
frame[offset] = *r;
|
||||
frame[offset + 1] = *g;
|
||||
frame[offset + 2] = *b;
|
||||
}
|
||||
}
|
||||
bytes.extend_from_slice(&frame);
|
||||
}
|
||||
bytes
|
||||
}
|
||||
|
||||
pub(super) fn audio_samples_to_bytes(samples: &[i16]) -> Vec<u8> {
|
||||
samples
|
||||
.iter()
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lesavka_common"
|
||||
version = "0.16.17"
|
||||
version = "0.16.18"
|
||||
edition = "2024"
|
||||
build = "build.rs"
|
||||
|
||||
|
||||
@ -104,12 +104,14 @@ def page_html() -> str:
|
||||
<title>Lesavka Local A/V Stimulus</title>
|
||||
<style>
|
||||
html, body { margin: 0; width: 100%; height: 100%; overflow: hidden; background: #02040a; color: #eaf2ff; font: 20px/1.4 system-ui, sans-serif; }
|
||||
#stage { position: fixed; inset: 0; display: grid; place-items: center; background: #02040a; transition: none; }
|
||||
#stage.active { background: #f8fbff; color: #02040a; }
|
||||
#card { max-width: 900px; padding: 28px; border-radius: 24px; background: rgba(16, 24, 40, 0.78); border: 1px solid rgba(255,255,255,0.18); text-align: center; }
|
||||
#stage.active #card { background: rgba(255,255,255,0.84); border-color: rgba(0,0,0,0.18); }
|
||||
#big { font-size: clamp(48px, 9vw, 140px); font-weight: 900; letter-spacing: 0.06em; }
|
||||
#status { white-space: pre-wrap; font: 15px/1.45 ui-monospace, monospace; opacity: 0.86; }
|
||||
#stage { position: fixed; inset: 0; display: grid; place-items: center; background: #000; transition: none; }
|
||||
#stage.active { background: var(--pulse-color, #ff2d2d); color: #02040a; }
|
||||
#card { max-width: 900px; padding: 28px; border-radius: 24px; background: rgba(0, 0, 0, 0.82); border: 1px solid rgba(52, 65, 86, 0.34); text-align: center; }
|
||||
#stage.active #card { background: transparent; border-color: transparent; }
|
||||
#big { font-size: clamp(48px, 9vw, 140px); font-weight: 900; letter-spacing: 0.06em; color: #111827; }
|
||||
#stage.active #big { color: rgba(0,0,0,0.16); }
|
||||
#status { white-space: pre-wrap; font: 15px/1.45 ui-monospace, monospace; color: #1f2937; opacity: 0.72; }
|
||||
#stage.active #status { color: rgba(0,0,0,0.22); opacity: 0.68; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
@ -123,6 +125,18 @@ let audioCtx = null;
|
||||
let oscillator = null;
|
||||
let gain = null;
|
||||
let startedAt = 0;
|
||||
const pulseColors = {
|
||||
1: '#ff2d2d',
|
||||
2: '#00e676',
|
||||
3: '#2979ff',
|
||||
4: '#ffb300'
|
||||
};
|
||||
const pulseFrequencies = {
|
||||
1: 660,
|
||||
2: 880,
|
||||
3: 1100,
|
||||
4: 1320
|
||||
};
|
||||
|
||||
function setStatus(message) { statusEl.textContent = message; }
|
||||
async function postJson(path, payload) {
|
||||
@ -139,16 +153,16 @@ function ensureAudio() {
|
||||
oscillator.connect(gain).connect(audioCtx.destination);
|
||||
oscillator.start();
|
||||
}
|
||||
function activeAt(elapsedMs, command) {
|
||||
function eventAt(elapsedMs, command) {
|
||||
const warmupMs = command.warmup_seconds * 1000;
|
||||
if (elapsedMs < warmupMs || elapsedMs > command.duration_seconds * 1000) return false;
|
||||
if (elapsedMs < warmupMs || elapsedMs > command.duration_seconds * 1000) return { active: false, pulseIndex: 0, widthCode: 1 };
|
||||
const sinceWarmup = elapsedMs - warmupMs;
|
||||
const pulseIndex = Math.floor(sinceWarmup / command.pulse_period_ms);
|
||||
const offset = sinceWarmup % command.pulse_period_ms;
|
||||
const codes = command.event_width_codes && command.event_width_codes.length ? command.event_width_codes : [1];
|
||||
const widthCode = codes[pulseIndex % codes.length];
|
||||
const width = Math.min(command.pulse_period_ms - 1, command.pulse_width_ms * widthCode);
|
||||
return offset < width;
|
||||
return { active: offset < width, pulseIndex, widthCode };
|
||||
}
|
||||
async function runStimulus(command) {
|
||||
if (running) return;
|
||||
@ -159,12 +173,13 @@ async function runStimulus(command) {
|
||||
await postJson('/status', { ready: true, started: true, completed: false, page_message: 'stimulus running' });
|
||||
const tick = async () => {
|
||||
const elapsed = performance.now() - startedAt;
|
||||
const active = activeAt(elapsed, command);
|
||||
const warmupMs = command.warmup_seconds * 1000;
|
||||
const pulseIndex = Math.max(0, Math.floor((elapsed - warmupMs) / command.pulse_period_ms));
|
||||
const codes = command.event_width_codes && command.event_width_codes.length ? command.event_width_codes : [1];
|
||||
const widthCode = codes[pulseIndex % codes.length];
|
||||
const event = eventAt(elapsed, command);
|
||||
const active = event.active;
|
||||
const pulseIndex = event.pulseIndex;
|
||||
const widthCode = event.widthCode;
|
||||
stage.style.setProperty('--pulse-color', pulseColors[widthCode] || pulseColors[1]);
|
||||
stage.classList.toggle('active', active);
|
||||
oscillator.frequency.setTargetAtTime(pulseFrequencies[widthCode] || pulseFrequencies[1], audioCtx.currentTime, 0.003);
|
||||
gain.gain.setTargetAtTime(active ? 0.28 : 0.0, audioCtx.currentTime, 0.005);
|
||||
setStatus(`running\nelapsed=${(elapsed / 1000).toFixed(2)}s\nactive=${active}\nevent=${pulseIndex}\nwidth_code=${widthCode}\nPoint the real webcam at this window and keep the real microphone hearing the tone.`);
|
||||
if (elapsed <= command.duration_seconds * 1000 + 500) {
|
||||
|
||||
@ -10,7 +10,7 @@ bench = false
|
||||
|
||||
[package]
|
||||
name = "lesavka_server"
|
||||
version = "0.16.17"
|
||||
version = "0.16.18"
|
||||
edition = "2024"
|
||||
autobins = false
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user