khora_data/allocators/
tracking_allocator.rs

1// Copyright 2025 eraflo
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! An implementation of `GlobalAlloc` that tracks memory usage.
16
17use khora_core::memory::*;
18use std::alloc::{GlobalAlloc, Layout, System};
19use std::sync::atomic::Ordering;
20
21/// The size, in bytes, above which an allocation is considered "large".
22const LARGE_ALLOCATION_THRESHOLD: usize = 1024 * 1024; // 1MB
23/// The size, in bytes, below which an allocation is considered "small".
24const SMALL_ALLOCATION_THRESHOLD: usize = 1024; // 1KB
25
26/// A wrapper around a `GlobalAlloc` implementation (like `std::alloc::System`)
27/// that intercepts allocation calls to update the global memory counters defined
28/// in `khora_core::memory`.
29///
30/// This allocator is the key to enabling the SAA's memory monitoring. By registering
31/// it as the `#[global_allocator]`, all heap allocations made by the application
32/// will be tracked, providing essential telemetry to the Dynamic Context Core (DCC).
33///
34/// # Type Parameters
35///
36/// * `A`: The underlying allocator that will perform the actual memory allocation.
37///   Defaults to `System`, the standard Rust allocator.
38///
39/// # Usage
40///
41/// ```rust,ignore
42/// use khora_data::allocators::SaaTrackingAllocator;
43///
44/// #[global_allocator]
45/// static GLOBAL: SaaTrackingAllocator = SaaTrackingAllocator::new(std::alloc::System);
46/// ```
47#[derive(Debug, Default, Clone, Copy)]
48pub struct SaaTrackingAllocator<A = System> {
49    inner: A,
50}
51
52impl<A> SaaTrackingAllocator<A> {
53    /// Creates a new tracking allocator that wraps the given inner allocator.
54    pub const fn new(inner: A) -> Self {
55        Self { inner }
56    }
57}
58
59unsafe impl<A: GlobalAlloc> GlobalAlloc for SaaTrackingAllocator<A> {
60    /// Allocates memory and updates tracking counters.
61    ///
62    /// # Safety
63    ///
64    /// This function is unsafe because it is part of the `GlobalAlloc` trait.
65    /// The caller must ensure that `layout` has a non-zero size.
66    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
67        let ptr = self.inner.alloc(layout);
68        if !ptr.is_null() {
69            let size = layout.size();
70            let result = CURRENTLY_ALLOCATED_BYTES.fetch_update(
71                Ordering::Relaxed,
72                Ordering::Relaxed,
73                |current| current.checked_add(size),
74            );
75
76            if let Ok(current_total) = result {
77                let new_total = current_total + size;
78                PEAK_ALLOCATED_BYTES.fetch_max(new_total as u64, Ordering::Relaxed);
79                TOTAL_ALLOCATIONS.fetch_add(1, Ordering::Relaxed);
80                BYTES_ALLOCATED_LIFETIME.fetch_add(size as u64, Ordering::Relaxed);
81
82                if size >= LARGE_ALLOCATION_THRESHOLD {
83                    LARGE_ALLOCATIONS.fetch_add(1, Ordering::Relaxed);
84                    LARGE_ALLOCATION_BYTES.fetch_add(size as u64, Ordering::Relaxed);
85                } else if size < SMALL_ALLOCATION_THRESHOLD {
86                    SMALL_ALLOCATIONS.fetch_add(1, Ordering::Relaxed);
87                    SMALL_ALLOCATION_BYTES.fetch_add(size as u64, Ordering::Relaxed);
88                }
89            } else {
90                log::error!("Memory tracking counter overflowed during alloc! Size: {size}");
91            }
92        }
93        ptr
94    }
95
96    /// Deallocates memory and updates tracking counters.
97    ///
98    /// # Safety
99    ///
100    /// This function is unsafe because it is part of the `GlobalAlloc` trait.
101    /// The caller must ensure that `ptr` was allocated by this allocator with the same `layout`.
102    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
103        let size = layout.size();
104        let result = CURRENTLY_ALLOCATED_BYTES.fetch_update(
105            Ordering::Relaxed,
106            Ordering::Relaxed,
107            |current| current.checked_sub(size),
108        );
109
110        if result.is_err() {
111            log::error!("Memory tracking counter underflowed during dealloc! Size: {size}");
112        } else {
113            TOTAL_DEALLOCATIONS.fetch_add(1, Ordering::Relaxed);
114            BYTES_DEALLOCATED_LIFETIME.fetch_add(size as u64, Ordering::Relaxed);
115        }
116
117        self.inner.dealloc(ptr, layout);
118    }
119
120    /// Allocates zero-initialized memory and updates tracking counters.
121    ///
122    /// # Safety
123    ///
124    /// This function is unsafe for the same reasons as `alloc`.
125    unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
126        let ptr = self.inner.alloc_zeroed(layout);
127        if !ptr.is_null() {
128            let size = layout.size();
129            let result = CURRENTLY_ALLOCATED_BYTES.fetch_update(
130                Ordering::Relaxed,
131                Ordering::Relaxed,
132                |current| current.checked_add(size),
133            );
134
135            if let Ok(current_total) = result {
136                let new_total = current_total + size;
137                PEAK_ALLOCATED_BYTES.fetch_max(new_total as u64, Ordering::Relaxed);
138                TOTAL_ALLOCATIONS.fetch_add(1, Ordering::Relaxed);
139                BYTES_ALLOCATED_LIFETIME.fetch_add(size as u64, Ordering::Relaxed);
140
141                if size >= LARGE_ALLOCATION_THRESHOLD {
142                    LARGE_ALLOCATIONS.fetch_add(1, Ordering::Relaxed);
143                    LARGE_ALLOCATION_BYTES.fetch_add(size as u64, Ordering::Relaxed);
144                } else if size < SMALL_ALLOCATION_THRESHOLD {
145                    SMALL_ALLOCATIONS.fetch_add(1, Ordering::Relaxed);
146                    SMALL_ALLOCATION_BYTES.fetch_add(size as u64, Ordering::Relaxed);
147                }
148            } else {
149                log::error!("Memory tracking counter overflowed during alloc_zeroed! Size: {size}");
150            }
151        }
152        ptr
153    }
154
155    /// Reallocates memory and updates tracking counters.
156    ///
157    /// # Safety
158    ///
159    /// This function is unsafe for the same reasons as `realloc` in `GlobalAlloc`.
160    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
161        let old_size = layout.size();
162        let new_ptr = self.inner.realloc(ptr, layout, new_size);
163        if !new_ptr.is_null() {
164            TOTAL_REALLOCATIONS.fetch_add(1, Ordering::Relaxed);
165            let size_diff = new_size as isize - old_size as isize;
166            let fetch_result = match size_diff.cmp(&0) {
167                std::cmp::Ordering::Greater => {
168                    let additional_bytes = size_diff as usize;
169                    BYTES_ALLOCATED_LIFETIME.fetch_add(additional_bytes as u64, Ordering::Relaxed);
170                    CURRENTLY_ALLOCATED_BYTES.fetch_update(
171                        Ordering::Relaxed,
172                        Ordering::Relaxed,
173                        |current| current.checked_add(additional_bytes),
174                    )
175                }
176                std::cmp::Ordering::Less => {
177                    let freed_bytes = (-size_diff) as usize;
178                    BYTES_DEALLOCATED_LIFETIME.fetch_add(freed_bytes as u64, Ordering::Relaxed);
179                    CURRENTLY_ALLOCATED_BYTES.fetch_update(
180                        Ordering::Relaxed,
181                        Ordering::Relaxed,
182                        |current| current.checked_sub(freed_bytes),
183                    )
184                }
185                std::cmp::Ordering::Equal => Ok(CURRENTLY_ALLOCATED_BYTES.load(Ordering::Relaxed)),
186            };
187
188            if size_diff > 0 {
189                if let Ok(new_total) = fetch_result {
190                    PEAK_ALLOCATED_BYTES.fetch_max(new_total as u64, Ordering::Relaxed);
191                }
192            }
193
194            if fetch_result.is_err() {
195                log::error!(
196                    "Memory tracking counter overflow/underflow during realloc! Diff: {size_diff}"
197                );
198            }
199        }
200        new_ptr
201    }
202}