diff --git a/src/allocator.rs b/src/allocator.rs index bcd9350..adef3a0 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -11,7 +11,7 @@ const INITIAL_HEAP_SIZE: usize = MEMFD_INITIAL_SIZE; const METADATA_SIZE: usize = size_of::(); pub(crate) static ALLOCATOR: LazyLock> = - LazyLock::new(|| unsafe { Mutex::new(Allocator::new()) }); + LazyLock::new(|| Mutex::new(Allocator::new())); struct Metadata { chunk: NonNull, @@ -34,14 +34,14 @@ unsafe impl Send for Chunk {} unsafe impl Send for Allocator {} impl Allocator { - unsafe fn new() -> Self { + fn new() -> Self { let mut allocator = BUMP_ALLOCATOR.lock().unwrap(); - let mem = allocator.alloc(INITIAL_HEAP_SIZE).unwrap(); + let mem = unsafe { allocator.alloc(INITIAL_HEAP_SIZE).unwrap() }; let head = Box::new(Chunk { - buffer: mem.byte_add(METADATA_SIZE), - size: INITIAL_HEAP_SIZE, + buffer: unsafe { mem.byte_add(METADATA_SIZE) }, + size: INITIAL_HEAP_SIZE - METADATA_SIZE, in_use: false, next_chunk: None, prev_chunk: None, @@ -49,27 +49,32 @@ impl Allocator { let head = NonNull::new(Box::leak(head)).unwrap(); let mem = mem as *mut Metadata; - *mem = Metadata { chunk: head }; + unsafe { + *mem = Metadata { chunk: head }; + } Allocator { head, tail: head } } - pub(crate) unsafe fn allocate(&mut self, size: usize) -> Option<*mut u8> { + pub(crate) fn allocate(&mut self, size: usize) -> Option<*mut u8> { let size = (size + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT; let mut head = Some(self.head); while head.is_some() { - let current_head = head.unwrap().as_mut(); + // The heap uses a global Mutex. Only one thread can operate on it at a time. + let current_head = unsafe { head.unwrap().as_mut() }; if !current_head.in_use && current_head.size >= size { - if current_head.size == size { + if current_head.size < (size + METADATA_SIZE + ALIGNMENT) { current_head.in_use = true; return Some(current_head.buffer); } let unused_space = Box::new(Chunk { - buffer: current_head.buffer.byte_add(size + METADATA_SIZE), + // We know that size of buffer is larger than size + METADATA_SIZE + ALIGNMENT. + // Therefore size + METADATA_SIZE is still inside of the buffer + buffer: unsafe { current_head.buffer.byte_add(size + METADATA_SIZE) }, size: current_head.size - size - METADATA_SIZE, in_use: false, next_chunk: current_head.next_chunk, @@ -77,11 +82,20 @@ impl Allocator { }); let ptr = NonNull::new(Box::leak(unused_space)).unwrap(); - *(ptr.as_ref().buffer.byte_sub(METADATA_SIZE) as *mut Metadata) = - Metadata { chunk: ptr }; + // buffer points to current_head + size + METADATA_SIZE. + // Therefore buffer - METADATA_SIZE points right after the buffer of current_head + // and right before the buffer of unused_space. + // This is where the pointer to the metadata chunk is expected + unsafe { + *(ptr.as_ref().buffer.byte_sub(METADATA_SIZE) as *mut Metadata) = + Metadata { chunk: ptr }; + } - if ptr.as_ref().next_chunk.is_none() { - self.tail = ptr; + // We know that accessing ptr is safe since we just allocated it. + unsafe { + if ptr.as_ref().next_chunk.is_none() { + self.tail = ptr; + } } current_head.in_use = true; @@ -99,9 +113,9 @@ impl Allocator { None } - pub(crate) unsafe fn deallocate(&mut self, ptr: *mut u8) { - let metadata = ptr.byte_sub(METADATA_SIZE) as *mut Metadata; - let metadata = (*metadata).chunk.as_mut(); + pub(crate) fn deallocate(&mut self, ptr: *mut u8) { + let metadata = unsafe { ptr.byte_sub(METADATA_SIZE) as *mut Metadata }; + let metadata = unsafe { (*metadata).chunk.as_mut() }; debug_assert_eq!(metadata.in_use, true); debug_assert_eq!(metadata.buffer, ptr); diff --git a/src/sharedptr.rs b/src/sharedptr.rs index e363536..278192f 100644 --- a/src/sharedptr.rs +++ b/src/sharedptr.rs @@ -13,7 +13,7 @@ impl SharedPtr { pub fn new() -> Option { let mut allocator = ALLOCATOR.lock().unwrap(); - let buf = unsafe { allocator.allocate(N)? }; + let buf = allocator.allocate(N)?; Some(SharedPtr(buf)) } @@ -51,9 +51,7 @@ impl Drop for SharedPtr { fn drop(&mut self) { let mut allocator = ALLOCATOR.lock().unwrap(); - unsafe { - allocator.deallocate(self.0); - } + allocator.deallocate(self.0); } }