Files
shared_memory_heap/src/allocator.rs

170 lines
5.0 KiB
Rust

use std::{
mem::size_of,
ptr::NonNull,
sync::{LazyLock, Mutex},
};
use crate::bump_allocator::{BUMP_ALLOCATOR, MEMFD_INITIAL_SIZE};
const ALIGNMENT: usize = 8;
const INITIAL_HEAP_SIZE: usize = MEMFD_INITIAL_SIZE;
const METADATA_SIZE: usize = size_of::<Metadata>();
pub(crate) static ALLOCATOR: LazyLock<Mutex<Allocator>> =
LazyLock::new(|| Mutex::new(Allocator::new()));
struct Metadata {
chunk: NonNull<Chunk>,
}
struct Chunk {
buffer: *mut u8,
size: usize,
in_use: bool,
next_chunk: Option<NonNull<Chunk>>,
prev_chunk: Option<NonNull<Chunk>>,
}
pub(crate) struct Allocator {
head: NonNull<Chunk>,
tail: NonNull<Chunk>,
}
unsafe impl Send for Chunk {}
unsafe impl Send for Allocator {}
impl Allocator {
fn new() -> Self {
let mut allocator = BUMP_ALLOCATOR.lock().unwrap();
let mem = unsafe { allocator.alloc(INITIAL_HEAP_SIZE).unwrap() };
let head = Box::new(Chunk {
buffer: unsafe { mem.byte_add(METADATA_SIZE) },
size: INITIAL_HEAP_SIZE - METADATA_SIZE,
in_use: false,
next_chunk: None,
prev_chunk: None,
});
let head = NonNull::new(Box::leak(head)).unwrap();
let mem = mem as *mut Metadata;
unsafe {
*mem = Metadata { chunk: head };
}
Allocator { head, tail: head }
}
pub(crate) fn allocate(&mut self, size: usize) -> Option<*mut u8> {
let size = (size + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT;
let mut head = Some(self.head);
while head.is_some() {
// The heap uses a global Mutex. Only one thread can operate on it at a time.
let current_head = unsafe { head.unwrap().as_mut() };
if !current_head.in_use && current_head.size >= size {
if current_head.size < (size + METADATA_SIZE + ALIGNMENT) {
current_head.in_use = true;
return Some(current_head.buffer);
}
let unused_space = Box::new(Chunk {
// We know that size of buffer is larger than size + METADATA_SIZE + ALIGNMENT.
// Therefore size + METADATA_SIZE is still inside of the buffer
buffer: unsafe { current_head.buffer.byte_add(size + METADATA_SIZE) },
size: current_head.size - size - METADATA_SIZE,
in_use: false,
next_chunk: current_head.next_chunk,
prev_chunk: head,
});
let ptr = NonNull::new(Box::leak(unused_space)).unwrap();
// buffer points to current_head + size + METADATA_SIZE.
// Therefore buffer - METADATA_SIZE points right after the buffer of current_head
// and right before the buffer of unused_space.
// This is where the pointer to the metadata chunk is expected
unsafe {
*(ptr.as_ref().buffer.byte_sub(METADATA_SIZE) as *mut Metadata) =
Metadata { chunk: ptr };
}
// We know that accessing ptr is safe since we just allocated it.
unsafe {
if ptr.as_ref().next_chunk.is_none() {
self.tail = ptr;
}
}
current_head.in_use = true;
current_head.size = size;
current_head.next_chunk = Some(ptr);
return Some(current_head.buffer);
}
head = current_head.next_chunk;
}
// TODO: Try to allocate more space from bump allocator
None
}
pub(crate) fn deallocate(&mut self, ptr: *mut u8) {
let metadata = unsafe { ptr.byte_sub(METADATA_SIZE) as *mut Metadata };
let metadata = unsafe { (*metadata).chunk.as_mut() };
debug_assert_eq!(metadata.in_use, true);
debug_assert_eq!(metadata.buffer, ptr);
metadata.in_use = false;
// TODO: Consolidate chunks
}
pub(crate) unsafe fn get_offset(&self, ptr: *const u8) -> usize {
let allocator = BUMP_ALLOCATOR.lock().unwrap();
allocator.get_offset(ptr)
}
}
#[cfg(test)]
mod tests {
extern crate test;
use core::slice;
use test::Bencher;
use super::ALLOCATOR;
#[test]
fn functionality() {
let mut allocator = ALLOCATOR.lock().unwrap();
unsafe {
let x = allocator.allocate(10).unwrap();
let x = slice::from_raw_parts_mut(x, 10);
x[0] = 1;
assert_eq!(x[0], 1);
allocator.deallocate(x.as_mut_ptr());
}
}
#[bench]
fn allocator_bench(b: &mut Bencher) {
let mut allocator = ALLOCATOR.lock().unwrap();
b.iter(|| unsafe {
let x = allocator.allocate(1).unwrap();
let x = slice::from_raw_parts_mut(x, 1);
x[0] = 1;
assert_eq!(x[0], 1);
allocator.deallocate(x.as_mut_ptr());
});
}
}