commit 536fb4cac3d55fb920666a44f734a4da9c4299c3 Author: Aaron Kaiser Date: Wed Mar 13 13:00:08 2024 +0100 Initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ea8c4bf --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +/target diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..b68a21e --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,16 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "libc" +version = "0.2.153" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" + +[[package]] +name = "shared_memory_heap" +version = "0.1.0" +dependencies = [ + "libc", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..e8ab898 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "shared_memory_heap" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +libc = "0.2.153" diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 0000000..5d56faf --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +channel = "nightly" diff --git a/src/allocator.rs b/src/allocator.rs new file mode 100644 index 0000000..4f08de5 --- /dev/null +++ b/src/allocator.rs @@ -0,0 +1,150 @@ +use std::{ + mem::size_of, + ptr::NonNull, + sync::{LazyLock, Mutex}, +}; + +use crate::bump_allocator::{BUMP_ALLOCATOR, MEMFD_INITIAL_SIZE}; + +const ALIGNMENT: usize = 8; +const INITIAL_HEAP_SIZE: usize = MEMFD_INITIAL_SIZE; +const METADATA_SIZE: usize = size_of::(); + +pub(crate) static ALLOCATOR: LazyLock> = + LazyLock::new(|| unsafe { Mutex::new(Allocator::new()) }); + +struct Metadata { + chunk: NonNull, +} + +struct Chunk { + buffer: *mut u8, + size: usize, + in_use: bool, + next_chunk: Option>, + prev_chunk: Option>, +} + +pub(crate) struct Allocator { + head: NonNull, + tail: NonNull, +} + +unsafe impl Send for Chunk {} +unsafe impl Send for Allocator {} + +impl Allocator { + unsafe fn new() -> Self { + let mut allocator = BUMP_ALLOCATOR.lock().unwrap(); + + let mem = allocator.alloc(INITIAL_HEAP_SIZE).unwrap(); + + let head = Box::new(Chunk { + buffer: mem.byte_add(METADATA_SIZE), + size: INITIAL_HEAP_SIZE, + in_use: false, + next_chunk: None, + prev_chunk: None, + }); + let head = NonNull::new(Box::leak(head)).unwrap(); + + let mem = mem as *mut Metadata; + *mem = Metadata { chunk: head }; + + Allocator { head, tail: head } + } + + pub(crate) unsafe fn allocate(&mut self, size: usize) -> Option<*mut u8> { + let size = (size + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT; + + let mut head = Some(self.head); + + while head.is_some() { + let current_head = head.unwrap().as_mut(); + + if !current_head.in_use && current_head.size >= size { + if current_head.size == size { + current_head.in_use = true; + return Some(current_head.buffer); + } + + let unused_space = Box::new(Chunk { + buffer: current_head.buffer.byte_add(size + METADATA_SIZE), + size: current_head.size - size - METADATA_SIZE, + in_use: false, + next_chunk: current_head.next_chunk, + prev_chunk: head, + }); + let ptr = NonNull::new(Box::leak(unused_space)).unwrap(); + + *(ptr.as_ref().buffer.byte_sub(METADATA_SIZE) as *mut Metadata) = + Metadata { chunk: ptr }; + + if ptr.as_ref().next_chunk.is_none() { + self.tail = ptr; + } + + current_head.in_use = true; + current_head.size = size; + current_head.next_chunk = Some(ptr); + + return Some(current_head.buffer); + } + + head = current_head.next_chunk; + } + + // TODO: Try to allocate more space from bump allocator + + None + } + + pub(crate) unsafe fn deallocate(&mut self, ptr: *mut u8) { + let metadata = ptr.byte_sub(METADATA_SIZE) as *mut Metadata; + let metadata = (*metadata).chunk.as_mut(); + + debug_assert_eq!(metadata.in_use, true); + debug_assert_eq!(metadata.buffer, ptr); + + metadata.in_use = false; + + // TODO: Consolidate chunks + } +} + +#[cfg(test)] +mod tests { + extern crate test; + + use core::slice; + + use test::Bencher; + + use super::ALLOCATOR; + + #[test] + fn functionality() { + let mut allocator = ALLOCATOR.lock().unwrap(); + + unsafe { + let x = allocator.allocate(10).unwrap(); + let x = slice::from_raw_parts_mut(x, 10); + x[0] = 1; + assert_eq!(x[0], 1); + allocator.deallocate(x.as_mut_ptr()); + } + } + + #[bench] + fn allocator_bench(b: &mut Bencher) { + let mut allocator = ALLOCATOR.lock().unwrap(); + + b.iter(|| unsafe { + let x = allocator.allocate(1).unwrap(); + let x = slice::from_raw_parts_mut(x, 1); + x[0] = 1; + assert_eq!(x[0], 1); + allocator.deallocate(x.as_mut_ptr()); + }); + } +} diff --git a/src/bump_allocator.rs b/src/bump_allocator.rs new file mode 100644 index 0000000..d5c266d --- /dev/null +++ b/src/bump_allocator.rs @@ -0,0 +1,120 @@ +use std::{ + sync::{LazyLock, Mutex}, + usize, +}; + +use libc::{c_char, c_void, ftruncate, memfd_create, mmap, MAP_SHARED, PROT_READ, PROT_WRITE}; + +pub(crate) const MEMFD_INITIAL_SIZE: usize = 1024 * 1024 * 4; +const MMAP_SIZE: usize = 1024 * 1024 * 1024; + +pub(crate) static BUMP_ALLOCATOR: LazyLock> = + LazyLock::new(|| unsafe { Mutex::new(BumpAllocator::new()) }); + +pub struct BumpAllocator { + start_of_mem: *mut u8, + head: *mut u8, + end_of_mem: *mut u8, + number_of_allocated_chunks: usize, + backing_fd: i32, + fd_size: usize, +} + +unsafe impl Send for BumpAllocator {} + +impl BumpAllocator { + unsafe fn new() -> Self { + assert!(MMAP_SIZE >= MEMFD_INITIAL_SIZE); + + let data_fd = memfd_create("data\x00".as_ptr() as *const c_char, 0); + + assert!(data_fd > 0); + + assert_eq!(ftruncate(data_fd, MEMFD_INITIAL_SIZE as i64), 0); + + let start_of_mem = mmap( + 0 as *mut c_void, + MMAP_SIZE, + PROT_READ | PROT_WRITE, + MAP_SHARED, + data_fd, + 0, + ) as *mut u8; + + assert_ne!(start_of_mem, 0 as *mut u8); + + let end_of_mem = start_of_mem.byte_add(MEMFD_INITIAL_SIZE); + + BumpAllocator { + start_of_mem, + head: start_of_mem, + end_of_mem, + number_of_allocated_chunks: 0, + backing_fd: data_fd, + fd_size: MEMFD_INITIAL_SIZE, + } + } + + pub unsafe fn alloc(&mut self, size: usize) -> Option<*mut u8> { + let new_head = self.head.byte_add(size); + + if new_head > self.end_of_mem { + if self.end_of_mem.byte_add(self.fd_size) < self.start_of_mem.byte_add(MMAP_SIZE) { + assert_eq!(ftruncate(self.backing_fd, (self.fd_size * 2) as i64), 0); + self.fd_size *= 2; + } else { + return None; + } + } + + let ret = Some(self.head); + + self.head = new_head; + self.number_of_allocated_chunks += 1; + + ret + } + + pub fn dealloc(&mut self) { + self.number_of_allocated_chunks -= 1; + + if self.number_of_allocated_chunks == 0 { + self.head = self.start_of_mem; + } + } +} + +#[cfg(test)] +mod tests { + extern crate test; + + use core::slice; + use test::Bencher; + + use super::BUMP_ALLOCATOR; + + #[test] + fn test() { + let mut allocator = BUMP_ALLOCATOR.lock().unwrap(); + + unsafe { + let x = allocator.alloc(10).unwrap(); + let x = slice::from_raw_parts_mut(x, 10); + x[0] = 1; + assert_eq!(x[0], 1); + } + } + + #[bench] + fn bench(b: &mut Bencher) { + let mut allocator = BUMP_ALLOCATOR.lock().unwrap(); + + b.iter(|| unsafe { + let x = allocator.alloc(10).unwrap(); + let x = slice::from_raw_parts_mut(x, 10); + x[0] = 1; + assert_eq!(x[0], 1); + allocator.dealloc(); + }); + } +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..35a9d3f --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,6 @@ +#![feature(test)] +#![feature(lazy_cell)] + +mod allocator; +mod bump_allocator; +pub mod sharedptr; diff --git a/src/sharedptr.rs b/src/sharedptr.rs new file mode 100644 index 0000000..01dd76e --- /dev/null +++ b/src/sharedptr.rs @@ -0,0 +1,59 @@ +use core::slice; +use std::{ + ops::{Deref, DerefMut}, + usize, +}; + +use crate::allocator::ALLOCATOR; + +pub struct SharedPtr<'a>(&'a mut [u8]); + +impl SharedPtr<'_> { + pub fn new(size: usize) -> Option { + let mut allocator = ALLOCATOR.lock().unwrap(); + + let buf = unsafe { + let buf = allocator.allocate(size)?; + slice::from_raw_parts_mut(buf, size) + }; + + Some(SharedPtr(buf)) + } +} + +impl<'a> Deref for SharedPtr<'a> { + type Target = &'a mut [u8]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'a> DerefMut for SharedPtr<'a> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl Drop for SharedPtr<'_> { + fn drop(&mut self) { + let mut allocator = ALLOCATOR.lock().unwrap(); + + unsafe { + allocator.deallocate(self.0.as_mut_ptr()); + } + } +} + +#[cfg(test)] +mod tests { + use super::SharedPtr; + + #[test] + fn test() { + let mut x = SharedPtr::new(10).unwrap(); + x[0] = 1; + assert_eq!(x[0], 1); + drop(x); + } +}