Skip to content

Commit 7128267

Browse files
committed
feat(heaperion): add a growable heap
1 parent 65c8454 commit 7128267

10 files changed

Lines changed: 664 additions & 56 deletions

File tree

beskar-lib/src/lib.rs

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ extern crate alloc;
77

88
pub use beskar_core::syscall::ExitCode;
99
use beskar_core::{syscall::SyscallExitCode, time::Duration};
10+
use core::sync::atomic::{AtomicBool, Ordering};
1011
use hyperdrive::call_once;
1112

1213
mod arch;
@@ -20,9 +21,20 @@ pub mod surface;
2021
mod sys;
2122
pub mod time;
2223

24+
static PANIC_NESTED: AtomicBool = AtomicBool::new(false);
25+
26+
/// Returns `true` if the current thread is already panicking (i.e., if we're in a nested panic).
27+
pub fn panicking() -> bool {
28+
PANIC_NESTED.load(Ordering::SeqCst)
29+
}
30+
2331
#[panic_handler]
2432
fn panic(info: &::core::panic::PanicInfo) -> ! {
25-
println!("Panic occurred: {}", info);
33+
if !panicking() {
34+
PANIC_NESTED.store(true, Ordering::SeqCst);
35+
println!("Panic occurred: {}", info);
36+
}
37+
2638
sys::sc_exit(ExitCode::Failure);
2739
}
2840

@@ -70,13 +82,7 @@ macro_rules! entry_point {
7082
pub fn __init() {
7183
call_once!({
7284
// Heap
73-
{
74-
let heap_size = mem::HEAP_SIZE;
75-
let res = mem::mmap(heap_size, None, mem::MemoryProtection::ReadWrite)
76-
.expect("Memory mapping failed");
77-
unsafe { mem::init_heap(res.as_ptr(), heap_size.try_into().unwrap()) };
78-
}
79-
85+
mem::init_heap();
8086
// Time
8187
time::init();
8288
});

beskar-lib/src/mem.rs

Lines changed: 86 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,18 @@
11
use crate::error::{MemoryError, MemoryErrorKind, MemoryResult};
22
use beskar_core::arch::paging::{M4KiB, MemSize as _};
33
use core::{num::NonZeroU64, ptr::NonNull};
4+
use heaperion::{DefaultGrowableHeap, HybridAllocator};
45
use hyperdrive::locks::mcs::MUMcsLock;
56

6-
static ALLOCATOR: MUMcsLock<heaperion::Heap> = MUMcsLock::uninit();
7-
8-
struct Heap;
7+
static ALLOCATOR: MUMcsLock<DefaultGrowableHeap<Mmap, 4>> = MUMcsLock::uninit();
98

109
#[global_allocator]
1110
static HEAP: Heap = Heap;
1211

13-
pub(crate) const HEAP_SIZE: u64 = 20 * 1024 * 1024; // 20 MiB
14-
beskar_core::static_assert!(HEAP_SIZE.is_multiple_of(M4KiB::SIZE));
12+
const HEAP_START_SIZE: u64 = 16 * 1024 * 1024; // 16 MiB
13+
beskar_core::static_assert!(HEAP_START_SIZE.is_multiple_of(M4KiB::SIZE));
14+
15+
struct Heap;
1516

1617
unsafe impl core::alloc::GlobalAlloc for Heap {
1718
unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
@@ -30,8 +31,15 @@ unsafe impl core::alloc::GlobalAlloc for Heap {
3031

3132
#[inline]
3233
/// Initialize the heap allocator
33-
pub(crate) unsafe fn init_heap(start: *mut u8, size: usize) {
34-
ALLOCATOR.init(unsafe { heaperion::Heap::new(start, size) }.unwrap());
34+
pub(super) fn init_heap() {
35+
let size = HEAP_START_SIZE;
36+
let start = mmap(size, None, MemoryProtection::ReadWrite).unwrap();
37+
38+
// SAFETY: `start` and `size` come from a successful call to `mmap` and are not used after this point.
39+
let heap =
40+
unsafe { HybridAllocator::new(start.as_ptr(), usize::try_from(size).unwrap()) }.unwrap();
41+
let growable = DefaultGrowableHeap::new(heap, Mmap);
42+
ALLOCATOR.init(growable);
3543
}
3644

3745
/// Map memory into the address space
@@ -76,6 +84,77 @@ pub fn mprotect(ptr: *mut u8, size: u64, flags: MemoryProtection) -> bool {
7684
res.is_success()
7785
}
7886

87+
pub struct MmapReadWrite {
88+
ptr: NonNull<u8>,
89+
size: u64,
90+
}
91+
92+
impl MmapReadWrite {
93+
#[inline]
94+
/// Create a new read-write memory mapping of the given size.
95+
///
96+
/// # Errors
97+
///
98+
/// Returns an error if the memory cannot be mapped.
99+
pub fn new(size: u64) -> MemoryResult<Self> {
100+
let ptr = mmap(size, None, MemoryProtection::ReadWrite)?;
101+
Ok(Self { ptr, size })
102+
}
103+
104+
#[must_use]
105+
#[inline]
106+
pub const fn size(&self) -> u64 {
107+
self.size
108+
}
109+
110+
#[must_use]
111+
#[inline]
112+
pub const fn as_ptr(&self) -> *mut u8 {
113+
self.ptr.as_ptr()
114+
}
115+
116+
#[must_use]
117+
#[inline]
118+
#[expect(clippy::missing_panics_doc, reason = "Never panics")]
119+
pub fn as_slice(&self) -> &[u8] {
120+
let data = self.as_ptr();
121+
let len = usize::try_from(self.size).unwrap();
122+
unsafe { core::slice::from_raw_parts(data.cast::<u8>(), len) }
123+
}
124+
125+
#[must_use]
126+
#[inline]
127+
#[expect(clippy::missing_panics_doc, reason = "Never panics")]
128+
pub fn as_mut_slice(&mut self) -> &mut [u8] {
129+
let data = self.as_ptr();
130+
let len = usize::try_from(self.size).unwrap();
131+
unsafe { core::slice::from_raw_parts_mut(data.cast::<u8>(), len) }
132+
}
133+
}
134+
135+
impl Drop for MmapReadWrite {
136+
fn drop(&mut self) {
137+
// Safety: `ptr` and `size` come from a previous, successful call to `mmap`
138+
// and are not used after this point.
139+
unsafe {
140+
munmap(self.ptr.as_ptr(), self.size);
141+
}
142+
}
143+
}
144+
145+
struct Mmap;
146+
unsafe impl heaperion::MemorySource for Mmap {
147+
fn request(&mut self, min_size: usize) -> Option<(*mut u8, usize)> {
148+
let ptr = mmap(
149+
u64::try_from(min_size).unwrap(),
150+
None,
151+
MemoryProtection::ReadWrite,
152+
)
153+
.ok()?;
154+
Some((ptr.as_ptr(), min_size))
155+
}
156+
}
157+
79158
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
80159
#[repr(u64)]
81160
pub enum MemoryProtection {

heaperion/README.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,11 @@ Heaperion is a robust and efficient memory allocator designed for embedded syste
2828
- O(log(c)) allocation and deallocation
2929
- Power-of-two sized blocks
3030
- Automatic coalescing to reduce fragmentation
31+
32+
### Hybrid Allocator
33+
34+
Dispatch allocations to Slab or Buddy depending on the size.
35+
36+
### GrowableHeap
37+
38+
Wrapper around heaps that can grow at runtime.

heaperion/src/buddy.rs

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -72,15 +72,12 @@ impl BuddyAllocator {
7272
max_order,
7373
};
7474

75-
// Add the initial block to the appropriate free list
76-
let initial_order = size_to_order(adjusted_size.next_power_of_two() / 2);
77-
let initial_block_size = order_to_size(initial_order);
78-
79-
if initial_block_size <= adjusted_size {
80-
// SAFETY: We've validated the heap region and aligned it properly
81-
unsafe {
82-
buddy.add_block_to_free_list(aligned_ptr, initial_order);
83-
}
75+
// Add the initial block to the free list.
76+
// `max_order` is capped to `MAX_ORDER - 1`, so `order_to_size(max_order) <= adjusted_size`
77+
// is guaranteed and we cannot go out-of-bounds on `free_lists`.
78+
// SAFETY: We've validated the heap region and aligned it properly
79+
unsafe {
80+
buddy.add_block_to_free_list(aligned_ptr, max_order);
8481
}
8582

8683
Ok(buddy)
@@ -148,6 +145,17 @@ impl BuddyAllocator {
148145
Ok(())
149146
}
150147

148+
/// Returns `true` if this allocator owns the given pointer.
149+
///
150+
/// A pointer is owned when it falls within the heap region provided at construction.
151+
#[must_use]
152+
pub fn contains(&self, ptr: NonNull<u8>) -> bool {
153+
let ptr_raw = ptr.as_ptr();
154+
// SAFETY: heap_start + heap_size is within the original allocation
155+
let heap_end = unsafe { self.heap_start.add(self.heap_size) };
156+
ptr_raw >= self.heap_start && ptr_raw < heap_end
157+
}
158+
151159
/// Find and allocate a block of the given order
152160
fn find_block(&mut self, order: usize) -> Result<*mut u8> {
153161
// Try to find a block in the current order
@@ -219,11 +227,7 @@ impl BuddyAllocator {
219227
}
220228

221229
// Merge with buddy - the merged block starts at the lower address
222-
current_ptr = if current_ptr < buddy_ptr {
223-
current_ptr
224-
} else {
225-
buddy_ptr
226-
};
230+
current_ptr = current_ptr.min(buddy_ptr);
227231
current_order += 1;
228232
} else {
229233
break;

0 commit comments

Comments
 (0)