khora_lanes/audio_lane/mixing/
spatial_mixing_lane.rs

1// Copyright 2025 eraflo
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! The core audio processing lane, responsible for mixing and spatializing sound sources.
16
17use khora_core::audio::device::StreamInfo;
18use khora_data::ecs::{AudioListener, AudioSource, GlobalTransform, PlaybackState, World};
19
20use crate::audio_lane::AudioMixingLane;
21
22/// A lane that performs spatialized audio mixing.
23#[derive(Default)]
24pub struct SpatialMixingLane;
25
26impl SpatialMixingLane {
27    /// Creates a new `SpatialMixingLane`.
28    pub fn new() -> Self {
29        Self
30    }
31}
32
33impl AudioMixingLane for SpatialMixingLane {
34    /// Mixes all active `AudioSource`s into a single output buffer, applying 3D spatialization.
35    fn mix(&self, world: &mut World, output_buffer: &mut [f32], stream_info: &StreamInfo) {
36        output_buffer.fill(0.0);
37
38        // --- Step 1: Find the listener (if any) ---
39        let listener_transform = world
40            .query::<(&AudioListener, &GlobalTransform)>()
41            .next()
42            .map(|(_, t)| t.0);
43
44        // --- Step 2 & 3: Process and mix all active sources ---
45        let samples_to_write = output_buffer.len() / stream_info.channels as usize;
46
47        for (source, source_transform) in world.query_mut::<(&mut AudioSource, &GlobalTransform)>()
48        {
49            if source.autoplay && source.state.is_none() {
50                source.state = Some(PlaybackState { cursor: 0.0 });
51            }
52
53            let sound_data = &source.handle;
54            let num_frames = sound_data.samples.len() / sound_data.channels as usize;
55
56            // Stop immediately if the sound is empty.
57            if num_frames == 0 {
58                source.state = None;
59                continue;
60            }
61
62            let resample_ratio = sound_data.sample_rate as f32 / stream_info.sample_rate as f32;
63            let (mut volume, mut pan) = (source.volume, 0.5);
64
65            if let Some(listener_mat) = listener_transform {
66                let source_pos = source_transform.0.translation();
67                let listener_pos = listener_mat.translation();
68                let listener_right = listener_mat.right();
69                let to_source = source_pos - listener_pos;
70                let distance = to_source.length();
71
72                volume *= 1.0 / (1.0 + distance * distance);
73                if distance > 0.001 {
74                    pan = (to_source.normalize().dot(listener_right) + 1.0) * 0.5;
75                }
76            }
77
78            let vol_l = volume * (1.0 - pan).sqrt();
79            let vol_r = volume * pan.sqrt();
80
81            for i in 0..samples_to_write {
82                // Get a mutable reference to the cursor for this iteration.
83                // If the state becomes None mid-loop, we stop processing this source.
84                let cursor = if let Some(state) = source.state.as_mut() {
85                    &mut state.cursor
86                } else {
87                    break;
88                };
89
90                // --- Robust End-of-Sound and Loop Handling ---
91                if *cursor >= num_frames as f32 {
92                    if source.looping {
93                        *cursor %= num_frames as f32;
94                    } else {
95                        source.state = None;
96                        break; // Stop processing samples for this source
97                    }
98                }
99
100                let cursor_floor = cursor.floor() as usize;
101                let cursor_fract = cursor.fract();
102
103                // For looping sounds, the next sample might wrap around to the beginning.
104                let next_frame_idx = (cursor_floor + 1) % num_frames;
105
106                let s1_idx = cursor_floor * sound_data.channels as usize;
107                let s2_idx = next_frame_idx * sound_data.channels as usize;
108
109                // This check prevents panics if sound data is malformed, though unlikely.
110                if s1_idx >= sound_data.samples.len() || s2_idx >= sound_data.samples.len() {
111                    source.state = None;
112                    break;
113                }
114
115                let s1 = sound_data.samples[s1_idx];
116                let s2 = sound_data.samples[s2_idx];
117                let sample = s1 + (s2 - s1) * cursor_fract;
118
119                // Mix into output buffer
120                let out_idx = i * stream_info.channels as usize;
121                if stream_info.channels == 2 {
122                    output_buffer[out_idx] += sample * vol_l;
123                    output_buffer[out_idx + 1] += sample * vol_r;
124                } else {
125                    output_buffer[out_idx] += sample * volume;
126                }
127
128                // Advance cursor
129                *cursor += resample_ratio;
130            }
131        }
132
133        // --- Step 4: Limiter ---
134        for sample in output_buffer.iter_mut() {
135            *sample = sample.clamp(-1.0, 1.0);
136        }
137    }
138}
139
140#[cfg(test)]
141mod tests {
142    use super::*;
143    use khora_core::{
144        asset::AssetHandle,
145        math::{affine_transform::AffineTransform, vector::Vec3},
146    };
147    use khora_data::assets::SoundData;
148
149    // Helper to create a simple SoundData for tests.
150    fn create_test_sound(len: usize, sample_rate: u32) -> AssetHandle<SoundData> {
151        let samples = (0..len).map(|i| (i as f32).sin()).collect();
152        AssetHandle::new(SoundData {
153            samples,
154            channels: 1, // Mono for simplicity
155            sample_rate,
156        })
157    }
158
159    // Helper for floating-point comparison
160    fn approx_eq(a: f32, b: f32) -> bool {
161        (a - b).abs() < 1e-5
162    }
163
164    #[test]
165    fn test_panning_right() {
166        let mut world = World::new();
167        let stream_info = StreamInfo {
168            channels: 2,
169            sample_rate: 44100,
170        };
171        let lane = SpatialMixingLane::new();
172        let mut buffer = vec![0.0; 128];
173
174        // Listener at the origin
175        world.spawn((AudioListener, GlobalTransform(AffineTransform::IDENTITY)));
176
177        // Source sound to the right of the listener
178        let sound = create_test_sound(1024, 44100);
179        world.spawn((
180            AudioSource {
181                handle: sound,
182                autoplay: true,
183                looping: false,
184                volume: 1.0,
185                state: None,
186            },
187            GlobalTransform(AffineTransform::from_translation(Vec3::new(10.0, 0.0, 0.0))),
188        ));
189
190        lane.mix(&mut world, &mut buffer, &stream_info);
191
192        // The sum of squares is a good measure of the signal's energy
193        let energy_left = buffer.iter().step_by(2).map(|&s| s * s).sum::<f32>();
194        let energy_right = buffer
195            .iter()
196            .skip(1)
197            .step_by(2)
198            .map(|&s| s * s)
199            .sum::<f32>();
200
201        assert!(
202            energy_right > energy_left * 100.0,
203            "The energy should be much stronger in the right channel"
204        );
205
206        // With a sound perfectly to the right, the left channel MUST be silent.
207        assert!(
208            approx_eq(energy_left, 0.0),
209            "The left channel should be silent for a sound perfectly to the right"
210        );
211    }
212
213    #[test]
214    fn test_distance_attenuation() {
215        let stream_info = StreamInfo {
216            channels: 2,
217            sample_rate: 44100,
218        };
219        let lane = SpatialMixingLane::new();
220
221        // --- Case 1: Near source ---
222        let mut world_near = World::new();
223        world_near.spawn((AudioListener, GlobalTransform(AffineTransform::IDENTITY)));
224        let sound = create_test_sound(1024, 44100);
225        world_near.spawn((
226            AudioSource {
227                handle: sound.clone(),
228                autoplay: true,
229                looping: true,
230                volume: 1.0,
231                state: None,
232            },
233            GlobalTransform(AffineTransform::from_translation(Vec3::new(1.0, 0.0, 0.0))),
234        ));
235
236        let mut buffer_near = vec![0.0; 128];
237        lane.mix(&mut world_near, &mut buffer_near, &stream_info);
238        let peak_near = buffer_near.iter().map(|s| s.abs()).fold(0.0, f32::max);
239
240        // --- Case 2: Far source ---
241        let mut world_far = World::new();
242        world_far.spawn((AudioListener, GlobalTransform(AffineTransform::IDENTITY)));
243        world_far.spawn((
244            AudioSource {
245                handle: sound,
246                autoplay: true,
247                looping: true,
248                volume: 1.0,
249                state: None,
250            },
251            GlobalTransform(AffineTransform::from_translation(Vec3::new(
252                100.0, 0.0, 0.0,
253            ))),
254        ));
255
256        let mut buffer_far = vec![0.0; 128];
257        lane.mix(&mut world_far, &mut buffer_far, &stream_info);
258        let peak_far = buffer_far.iter().map(|s| s.abs()).fold(0.0, f32::max);
259
260        assert!(peak_near > 0.01, "The near sound should be audible");
261        assert!(
262            peak_far < peak_near * 0.1,
263            "The far sound should be significantly quieter"
264        );
265    }
266
267    #[test]
268    fn test_sound_finishes_and_stops() {
269        let mut world = World::new();
270        let stream_info = StreamInfo {
271            channels: 2,
272            sample_rate: 10,
273        };
274        let lane = SpatialMixingLane::new();
275
276        let sound = create_test_sound(5, 10); // Sound of 5 samples at 10Hz (0.5s)
277        let entity = world.spawn((
278            AudioSource {
279                handle: sound,
280                autoplay: true,
281                looping: false, // Does not loop
282                volume: 1.0,
283                state: None,
284            },
285            GlobalTransform::default(),
286        ));
287
288        // First mix, the sound plays
289        let mut buffer = vec![0.0; 20]; // 20 samples = 2 seconds
290        lane.mix(&mut world, &mut buffer, &stream_info);
291
292        // After the mix, the sound should have finished
293        let source = world.get_mut::<AudioSource>(entity).unwrap();
294        assert!(
295            source.state.is_none(),
296            "The playback state should be `None` after the sound finishes"
297        );
298
299        // Check that the sound stops in the buffer
300        let first_part_energy = buffer[0..10].iter().map(|&s| s * s).sum::<f32>();
301        let second_part_energy = buffer[10..20].iter().map(|&s| s * s).sum::<f32>();
302        assert!(first_part_energy > 0.0);
303        assert!(
304            approx_eq(second_part_energy, 0.0),
305            "The second half of the buffer should be silent"
306        );
307    }
308
309    #[test]
310    fn test_sound_loops() {
311        let mut world = World::new();
312        let stream_info = StreamInfo {
313            channels: 1,
314            sample_rate: 10,
315        };
316        let lane = SpatialMixingLane::new();
317
318        let sound = create_test_sound(5, 10); // Sound of 5 samples
319        let entity = world.spawn((
320            AudioSource {
321                handle: sound,
322                autoplay: true,
323                looping: true, // Loops
324                volume: 1.0,
325                state: None,
326            },
327            GlobalTransform::default(),
328        ));
329
330        let mut buffer = vec![0.0; 12]; // Buffer of 12 samples
331        lane.mix(&mut world, &mut buffer, &stream_info);
332
333        // After the mix, the sound should still be playing
334        let source = world.get::<AudioSource>(entity).unwrap();
335        let cursor = source.state.as_ref().unwrap().cursor;
336        assert!(
337            cursor > 0.0 && cursor < 5.0,
338            "The cursor should have looped and returned to the beginning, cursor is {}",
339            cursor
340        );
341
342        // The sound should be present at the start and end of the buffer
343        assert!(!approx_eq(buffer[1], 0.0)); // Beginning
344        assert!(!approx_eq(buffer[6], 0.0)); // Middle (after loop)
345        assert!(!approx_eq(buffer[11], 0.0)); // End
346    }
347}