Compare commits

...

3 Commits

Author SHA1 Message Date
ef9bcc94fb remove unnecessary features 2025-03-04 10:55:38 +01:00
7f46573218 feat: make size of SharedPtr dynamic 2024-10-10 16:37:04 +02:00
9b466a72bb fix: inconsistencies with buffer size 2024-08-28 10:15:56 +02:00
3 changed files with 55 additions and 39 deletions

View File

@@ -11,7 +11,7 @@ const INITIAL_HEAP_SIZE: usize = MEMFD_INITIAL_SIZE;
const METADATA_SIZE: usize = size_of::<Metadata>(); const METADATA_SIZE: usize = size_of::<Metadata>();
pub(crate) static ALLOCATOR: LazyLock<Mutex<Allocator>> = pub(crate) static ALLOCATOR: LazyLock<Mutex<Allocator>> =
LazyLock::new(|| unsafe { Mutex::new(Allocator::new()) }); LazyLock::new(|| Mutex::new(Allocator::new()));
struct Metadata { struct Metadata {
chunk: NonNull<Chunk>, chunk: NonNull<Chunk>,
@@ -34,14 +34,14 @@ unsafe impl Send for Chunk {}
unsafe impl Send for Allocator {} unsafe impl Send for Allocator {}
impl Allocator { impl Allocator {
unsafe fn new() -> Self { fn new() -> Self {
let mut allocator = BUMP_ALLOCATOR.lock().unwrap(); let mut allocator = BUMP_ALLOCATOR.lock().unwrap();
let mem = allocator.alloc(INITIAL_HEAP_SIZE).unwrap(); let mem = unsafe { allocator.alloc(INITIAL_HEAP_SIZE).unwrap() };
let head = Box::new(Chunk { let head = Box::new(Chunk {
buffer: mem.byte_add(METADATA_SIZE), buffer: unsafe { mem.byte_add(METADATA_SIZE) },
size: INITIAL_HEAP_SIZE, size: INITIAL_HEAP_SIZE - METADATA_SIZE,
in_use: false, in_use: false,
next_chunk: None, next_chunk: None,
prev_chunk: None, prev_chunk: None,
@@ -49,27 +49,32 @@ impl Allocator {
let head = NonNull::new(Box::leak(head)).unwrap(); let head = NonNull::new(Box::leak(head)).unwrap();
let mem = mem as *mut Metadata; let mem = mem as *mut Metadata;
*mem = Metadata { chunk: head }; unsafe {
*mem = Metadata { chunk: head };
}
Allocator { head, tail: head } Allocator { head, tail: head }
} }
pub(crate) unsafe fn allocate(&mut self, size: usize) -> Option<*mut u8> { pub(crate) fn allocate(&mut self, size: usize) -> Option<*mut u8> {
let size = (size + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT; let size = (size + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT;
let mut head = Some(self.head); let mut head = Some(self.head);
while head.is_some() { while head.is_some() {
let current_head = head.unwrap().as_mut(); // The heap uses a global Mutex. Only one thread can operate on it at a time.
let current_head = unsafe { head.unwrap().as_mut() };
if !current_head.in_use && current_head.size >= size { if !current_head.in_use && current_head.size >= size {
if current_head.size == size { if current_head.size < (size + METADATA_SIZE + ALIGNMENT) {
current_head.in_use = true; current_head.in_use = true;
return Some(current_head.buffer); return Some(current_head.buffer);
} }
let unused_space = Box::new(Chunk { let unused_space = Box::new(Chunk {
buffer: current_head.buffer.byte_add(size + METADATA_SIZE), // We know that size of buffer is larger than size + METADATA_SIZE + ALIGNMENT.
// Therefore size + METADATA_SIZE is still inside of the buffer
buffer: unsafe { current_head.buffer.byte_add(size + METADATA_SIZE) },
size: current_head.size - size - METADATA_SIZE, size: current_head.size - size - METADATA_SIZE,
in_use: false, in_use: false,
next_chunk: current_head.next_chunk, next_chunk: current_head.next_chunk,
@@ -77,11 +82,20 @@ impl Allocator {
}); });
let ptr = NonNull::new(Box::leak(unused_space)).unwrap(); let ptr = NonNull::new(Box::leak(unused_space)).unwrap();
*(ptr.as_ref().buffer.byte_sub(METADATA_SIZE) as *mut Metadata) = // buffer points to current_head + size + METADATA_SIZE.
Metadata { chunk: ptr }; // Therefore buffer - METADATA_SIZE points right after the buffer of current_head
// and right before the buffer of unused_space.
// This is where the pointer to the metadata chunk is expected
unsafe {
*(ptr.as_ref().buffer.byte_sub(METADATA_SIZE) as *mut Metadata) =
Metadata { chunk: ptr };
}
if ptr.as_ref().next_chunk.is_none() { // We know that accessing ptr is safe since we just allocated it.
self.tail = ptr; unsafe {
if ptr.as_ref().next_chunk.is_none() {
self.tail = ptr;
}
} }
current_head.in_use = true; current_head.in_use = true;
@@ -99,9 +113,9 @@ impl Allocator {
None None
} }
pub(crate) unsafe fn deallocate(&mut self, ptr: *mut u8) { pub(crate) fn deallocate(&mut self, ptr: *mut u8) {
let metadata = ptr.byte_sub(METADATA_SIZE) as *mut Metadata; let metadata = unsafe { ptr.byte_sub(METADATA_SIZE) as *mut Metadata };
let metadata = (*metadata).chunk.as_mut(); let metadata = unsafe { (*metadata).chunk.as_mut() };
debug_assert_eq!(metadata.in_use, true); debug_assert_eq!(metadata.in_use, true);
debug_assert_eq!(metadata.buffer, ptr); debug_assert_eq!(metadata.buffer, ptr);

View File

@@ -1,6 +1,3 @@
#![feature(test)]
#![feature(lazy_cell)]
use bump_allocator::BUMP_ALLOCATOR; use bump_allocator::BUMP_ALLOCATOR;
mod allocator; mod allocator;

View File

@@ -4,56 +4,61 @@ use std::ops::{Deref, DerefMut};
use crate::allocator::ALLOCATOR; use crate::allocator::ALLOCATOR;
#[derive(Debug)] #[derive(Debug)]
pub struct SharedPtr<const N: usize>(*mut u8); pub struct SharedPtr {
ptr: *mut u8,
size: usize
}
unsafe impl<const N: usize> Send for SharedPtr<N> {} unsafe impl Send for SharedPtr {}
unsafe impl<const N: usize> Sync for SharedPtr<N> {} unsafe impl Sync for SharedPtr {}
impl<const N: usize> SharedPtr<N> { impl SharedPtr {
pub fn new() -> Option<Self> { pub fn new(size: usize) -> Option<Self> {
let mut allocator = ALLOCATOR.lock().unwrap(); let mut allocator = ALLOCATOR.lock().unwrap();
let buf = unsafe { allocator.allocate(N)? }; let buf = allocator.allocate(size)?;
Some(SharedPtr(buf)) Some(SharedPtr{ptr: buf, size})
} }
pub fn get_offset(&self) -> usize { pub fn get_offset(&self) -> usize {
let allocator = ALLOCATOR.lock().unwrap(); let allocator = ALLOCATOR.lock().unwrap();
unsafe { allocator.get_offset(self.as_ptr()) } unsafe { allocator.get_offset(self.ptr) }
}
pub fn get_size(&self) -> usize {
self.size
} }
} }
impl<const N: usize> Deref for SharedPtr<N> { impl Deref for SharedPtr {
type Target = [u8; N]; type Target = [u8];
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
unsafe { unsafe {
slice::from_raw_parts(self.0, N) slice::from_raw_parts(self.ptr, self.size)
.try_into() .try_into()
.expect("This should never fail") .expect("This should never fail")
} }
} }
} }
impl<const N: usize> DerefMut for SharedPtr<N> { impl DerefMut for SharedPtr {
fn deref_mut(&mut self) -> &mut Self::Target { fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { unsafe {
slice::from_raw_parts_mut(self.0, N) slice::from_raw_parts_mut(self.ptr, self.size)
.try_into() .try_into()
.expect("This should never fail") .expect("This should never fail")
} }
} }
} }
impl<const N: usize> Drop for SharedPtr<N> { impl Drop for SharedPtr {
fn drop(&mut self) { fn drop(&mut self) {
let mut allocator = ALLOCATOR.lock().unwrap(); let mut allocator = ALLOCATOR.lock().unwrap();
unsafe { allocator.deallocate(self.ptr);
allocator.deallocate(self.0);
}
} }
} }
@@ -63,7 +68,7 @@ mod tests {
#[test] #[test]
fn test() { fn test() {
let mut x = SharedPtr::<10>::new().unwrap(); let mut x = SharedPtr::new(10).unwrap();
x[0] = 1; x[0] = 1;
assert_eq!(x[0], 1); assert_eq!(x[0], 1);
drop(x); drop(x);
@@ -71,7 +76,7 @@ mod tests {
#[test] #[test]
fn slice() { fn slice() {
let mut x = SharedPtr::<10>::new().unwrap(); let mut x = SharedPtr::new(10).unwrap();
x[0] = 1; x[0] = 1;
x[1] = 2; x[1] = 2;
assert_eq!(x[0..=1], [1, 2]); assert_eq!(x[0..=1], [1, 2]);