diff --git a/src/allocator.rs b/src/allocator.rs index 4f08de5..bcd9350 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -110,6 +110,11 @@ impl Allocator { // TODO: Consolidate chunks } + + pub(crate) unsafe fn get_offset(&self, ptr: *const u8) -> usize { + let allocator = BUMP_ALLOCATOR.lock().unwrap(); + allocator.get_offset(ptr) + } } #[cfg(test)] diff --git a/src/bump_allocator.rs b/src/bump_allocator.rs index d5c266d..1ccf13c 100644 --- a/src/bump_allocator.rs +++ b/src/bump_allocator.rs @@ -55,7 +55,7 @@ impl BumpAllocator { } } - pub unsafe fn alloc(&mut self, size: usize) -> Option<*mut u8> { + pub(crate) unsafe fn alloc(&mut self, size: usize) -> Option<*mut u8> { let new_head = self.head.byte_add(size); if new_head > self.end_of_mem { @@ -75,13 +75,22 @@ impl BumpAllocator { ret } - pub fn dealloc(&mut self) { + pub(crate) fn dealloc(&mut self) { self.number_of_allocated_chunks -= 1; if self.number_of_allocated_chunks == 0 { self.head = self.start_of_mem; } } + + pub(crate) unsafe fn get_offset(&self, ptr: *const u8) -> usize { + let offset = ptr.byte_offset_from(self.start_of_mem); + + debug_assert!(offset >= 0); + debug_assert!(offset < self.end_of_mem.byte_offset_from(self.start_of_mem)); + + offset as usize + } } #[cfg(test)] diff --git a/src/sharedptr.rs b/src/sharedptr.rs index 01dd76e..34652a8 100644 --- a/src/sharedptr.rs +++ b/src/sharedptr.rs @@ -19,6 +19,14 @@ impl SharedPtr<'_> { Some(SharedPtr(buf)) } + + pub fn get_offset(&self) -> usize { + let allocator = ALLOCATOR.lock().unwrap(); + + unsafe { + allocator.get_offset(self.as_ptr()) + } + } } impl<'a> Deref for SharedPtr<'a> {