khora_data/allocators/
tracking_allocator.rs1use khora_core::memory::*;
18use std::alloc::{GlobalAlloc, Layout, System};
19use std::sync::atomic::Ordering;
20
21const LARGE_ALLOCATION_THRESHOLD: usize = 1024 * 1024; const SMALL_ALLOCATION_THRESHOLD: usize = 1024; #[derive(Debug, Default, Clone, Copy)]
48pub struct SaaTrackingAllocator<A = System> {
49 inner: A,
50}
51
52impl<A> SaaTrackingAllocator<A> {
53 pub const fn new(inner: A) -> Self {
55 Self { inner }
56 }
57}
58
59unsafe impl<A: GlobalAlloc> GlobalAlloc for SaaTrackingAllocator<A> {
60 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
67 let ptr = self.inner.alloc(layout);
68 if !ptr.is_null() {
69 let size = layout.size();
70 let result = CURRENTLY_ALLOCATED_BYTES.fetch_update(
71 Ordering::Relaxed,
72 Ordering::Relaxed,
73 |current| current.checked_add(size),
74 );
75
76 if let Ok(current_total) = result {
77 let new_total = current_total + size;
78 PEAK_ALLOCATED_BYTES.fetch_max(new_total as u64, Ordering::Relaxed);
79 TOTAL_ALLOCATIONS.fetch_add(1, Ordering::Relaxed);
80 BYTES_ALLOCATED_LIFETIME.fetch_add(size as u64, Ordering::Relaxed);
81
82 if size >= LARGE_ALLOCATION_THRESHOLD {
83 LARGE_ALLOCATIONS.fetch_add(1, Ordering::Relaxed);
84 LARGE_ALLOCATION_BYTES.fetch_add(size as u64, Ordering::Relaxed);
85 } else if size < SMALL_ALLOCATION_THRESHOLD {
86 SMALL_ALLOCATIONS.fetch_add(1, Ordering::Relaxed);
87 SMALL_ALLOCATION_BYTES.fetch_add(size as u64, Ordering::Relaxed);
88 }
89 } else {
90 log::error!("Memory tracking counter overflowed during alloc! Size: {size}");
91 }
92 }
93 ptr
94 }
95
96 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
103 let size = layout.size();
104 let result = CURRENTLY_ALLOCATED_BYTES.fetch_update(
105 Ordering::Relaxed,
106 Ordering::Relaxed,
107 |current| current.checked_sub(size),
108 );
109
110 if result.is_err() {
111 log::error!("Memory tracking counter underflowed during dealloc! Size: {size}");
112 } else {
113 TOTAL_DEALLOCATIONS.fetch_add(1, Ordering::Relaxed);
114 BYTES_DEALLOCATED_LIFETIME.fetch_add(size as u64, Ordering::Relaxed);
115 }
116
117 self.inner.dealloc(ptr, layout);
118 }
119
120 unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
126 let ptr = self.inner.alloc_zeroed(layout);
127 if !ptr.is_null() {
128 let size = layout.size();
129 let result = CURRENTLY_ALLOCATED_BYTES.fetch_update(
130 Ordering::Relaxed,
131 Ordering::Relaxed,
132 |current| current.checked_add(size),
133 );
134
135 if let Ok(current_total) = result {
136 let new_total = current_total + size;
137 PEAK_ALLOCATED_BYTES.fetch_max(new_total as u64, Ordering::Relaxed);
138 TOTAL_ALLOCATIONS.fetch_add(1, Ordering::Relaxed);
139 BYTES_ALLOCATED_LIFETIME.fetch_add(size as u64, Ordering::Relaxed);
140
141 if size >= LARGE_ALLOCATION_THRESHOLD {
142 LARGE_ALLOCATIONS.fetch_add(1, Ordering::Relaxed);
143 LARGE_ALLOCATION_BYTES.fetch_add(size as u64, Ordering::Relaxed);
144 } else if size < SMALL_ALLOCATION_THRESHOLD {
145 SMALL_ALLOCATIONS.fetch_add(1, Ordering::Relaxed);
146 SMALL_ALLOCATION_BYTES.fetch_add(size as u64, Ordering::Relaxed);
147 }
148 } else {
149 log::error!("Memory tracking counter overflowed during alloc_zeroed! Size: {size}");
150 }
151 }
152 ptr
153 }
154
155 unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
161 let old_size = layout.size();
162 let new_ptr = self.inner.realloc(ptr, layout, new_size);
163 if !new_ptr.is_null() {
164 TOTAL_REALLOCATIONS.fetch_add(1, Ordering::Relaxed);
165 let size_diff = new_size as isize - old_size as isize;
166 let fetch_result = match size_diff.cmp(&0) {
167 std::cmp::Ordering::Greater => {
168 let additional_bytes = size_diff as usize;
169 BYTES_ALLOCATED_LIFETIME.fetch_add(additional_bytes as u64, Ordering::Relaxed);
170 CURRENTLY_ALLOCATED_BYTES.fetch_update(
171 Ordering::Relaxed,
172 Ordering::Relaxed,
173 |current| current.checked_add(additional_bytes),
174 )
175 }
176 std::cmp::Ordering::Less => {
177 let freed_bytes = (-size_diff) as usize;
178 BYTES_DEALLOCATED_LIFETIME.fetch_add(freed_bytes as u64, Ordering::Relaxed);
179 CURRENTLY_ALLOCATED_BYTES.fetch_update(
180 Ordering::Relaxed,
181 Ordering::Relaxed,
182 |current| current.checked_sub(freed_bytes),
183 )
184 }
185 std::cmp::Ordering::Equal => Ok(CURRENTLY_ALLOCATED_BYTES.load(Ordering::Relaxed)),
186 };
187
188 if size_diff > 0 {
189 if let Ok(new_total) = fetch_result {
190 PEAK_ALLOCATED_BYTES.fetch_max(new_total as u64, Ordering::Relaxed);
191 }
192 }
193
194 if fetch_result.is_err() {
195 log::error!(
196 "Memory tracking counter overflow/underflow during realloc! Diff: {size_diff}"
197 );
198 }
199 }
200 new_ptr
201 }
202}