Compare commits

...

13 Commits

4 changed files with 111 additions and 46 deletions

View File

@@ -11,7 +11,7 @@ const INITIAL_HEAP_SIZE: usize = MEMFD_INITIAL_SIZE;
const METADATA_SIZE: usize = size_of::<Metadata>();
pub(crate) static ALLOCATOR: LazyLock<Mutex<Allocator>> =
LazyLock::new(|| unsafe { Mutex::new(Allocator::new()) });
LazyLock::new(|| Mutex::new(Allocator::new()));
struct Metadata {
chunk: NonNull<Chunk>,
@@ -34,14 +34,14 @@ unsafe impl Send for Chunk {}
unsafe impl Send for Allocator {}
impl Allocator {
unsafe fn new() -> Self {
fn new() -> Self {
let mut allocator = BUMP_ALLOCATOR.lock().unwrap();
let mem = allocator.alloc(INITIAL_HEAP_SIZE).unwrap();
let mem = unsafe { allocator.alloc(INITIAL_HEAP_SIZE).unwrap() };
let head = Box::new(Chunk {
buffer: mem.byte_add(METADATA_SIZE),
size: INITIAL_HEAP_SIZE,
buffer: unsafe { mem.byte_add(METADATA_SIZE) },
size: INITIAL_HEAP_SIZE - METADATA_SIZE,
in_use: false,
next_chunk: None,
prev_chunk: None,
@@ -49,27 +49,32 @@ impl Allocator {
let head = NonNull::new(Box::leak(head)).unwrap();
let mem = mem as *mut Metadata;
unsafe {
*mem = Metadata { chunk: head };
}
Allocator { head, tail: head }
}
pub(crate) unsafe fn allocate(&mut self, size: usize) -> Option<*mut u8> {
pub(crate) fn allocate(&mut self, size: usize) -> Option<*mut u8> {
let size = (size + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT;
let mut head = Some(self.head);
while head.is_some() {
let current_head = head.unwrap().as_mut();
// The heap uses a global Mutex. Only one thread can operate on it at a time.
let current_head = unsafe { head.unwrap().as_mut() };
if !current_head.in_use && current_head.size >= size {
if current_head.size == size {
if current_head.size < (size + METADATA_SIZE + ALIGNMENT) {
current_head.in_use = true;
return Some(current_head.buffer);
}
let unused_space = Box::new(Chunk {
buffer: current_head.buffer.byte_add(size + METADATA_SIZE),
// We know that size of buffer is larger than size + METADATA_SIZE + ALIGNMENT.
// Therefore size + METADATA_SIZE is still inside of the buffer
buffer: unsafe { current_head.buffer.byte_add(size + METADATA_SIZE) },
size: current_head.size - size - METADATA_SIZE,
in_use: false,
next_chunk: current_head.next_chunk,
@@ -77,12 +82,21 @@ impl Allocator {
});
let ptr = NonNull::new(Box::leak(unused_space)).unwrap();
// buffer points to current_head + size + METADATA_SIZE.
// Therefore buffer - METADATA_SIZE points right after the buffer of current_head
// and right before the buffer of unused_space.
// This is where the pointer to the metadata chunk is expected
unsafe {
*(ptr.as_ref().buffer.byte_sub(METADATA_SIZE) as *mut Metadata) =
Metadata { chunk: ptr };
}
// We know that accessing ptr is safe since we just allocated it.
unsafe {
if ptr.as_ref().next_chunk.is_none() {
self.tail = ptr;
}
}
current_head.in_use = true;
current_head.size = size;
@@ -99,9 +113,9 @@ impl Allocator {
None
}
pub(crate) unsafe fn deallocate(&mut self, ptr: *mut u8) {
let metadata = ptr.byte_sub(METADATA_SIZE) as *mut Metadata;
let metadata = (*metadata).chunk.as_mut();
pub(crate) fn deallocate(&mut self, ptr: *mut u8) {
let metadata = unsafe { ptr.byte_sub(METADATA_SIZE) as *mut Metadata };
let metadata = unsafe { (*metadata).chunk.as_mut() };
debug_assert_eq!(metadata.in_use, true);
debug_assert_eq!(metadata.buffer, ptr);
@@ -110,6 +124,11 @@ impl Allocator {
// TODO: Consolidate chunks
}
pub(crate) unsafe fn get_offset(&self, ptr: *const u8) -> usize {
let allocator = BUMP_ALLOCATOR.lock().unwrap();
allocator.get_offset(ptr)
}
}
#[cfg(test)]

View File

@@ -3,10 +3,10 @@ use std::{
usize,
};
use libc::{c_char, c_void, ftruncate, memfd_create, mmap, MAP_SHARED, PROT_READ, PROT_WRITE};
use libc::{c_char, c_void, ftruncate, mmap, perror, syscall, SYS_memfd_secret, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
pub(crate) const MEMFD_INITIAL_SIZE: usize = 1024 * 1024 * 4;
const MMAP_SIZE: usize = 1024 * 1024 * 1024;
pub(crate) const MEMFD_INITIAL_SIZE: usize = 1024 * 1024;
const MMAP_SIZE: usize = 1024 * 1024;
pub(crate) static BUMP_ALLOCATOR: LazyLock<Mutex<BumpAllocator>> =
LazyLock::new(|| unsafe { Mutex::new(BumpAllocator::new()) });
@@ -26,7 +26,10 @@ impl BumpAllocator {
unsafe fn new() -> Self {
assert!(MMAP_SIZE >= MEMFD_INITIAL_SIZE);
let data_fd = memfd_create("data\x00".as_ptr() as *const c_char, 0);
let data_fd = syscall(SYS_memfd_secret, 0) as i32;
if data_fd <= 0 {
perror("memfd secret\x00".as_ptr() as *const c_char);
}
assert!(data_fd > 0);
@@ -41,7 +44,7 @@ impl BumpAllocator {
0,
) as *mut u8;
assert_ne!(start_of_mem, 0 as *mut u8);
assert_ne!(start_of_mem, MAP_FAILED as *mut u8);
let end_of_mem = start_of_mem.byte_add(MEMFD_INITIAL_SIZE);
@@ -55,7 +58,7 @@ impl BumpAllocator {
}
}
pub unsafe fn alloc(&mut self, size: usize) -> Option<*mut u8> {
pub(crate) unsafe fn alloc(&mut self, size: usize) -> Option<*mut u8> {
let new_head = self.head.byte_add(size);
if new_head > self.end_of_mem {
@@ -75,13 +78,26 @@ impl BumpAllocator {
ret
}
pub fn dealloc(&mut self) {
pub(crate) fn dealloc(&mut self) {
self.number_of_allocated_chunks -= 1;
if self.number_of_allocated_chunks == 0 {
self.head = self.start_of_mem;
}
}
pub(crate) unsafe fn get_offset(&self, ptr: *const u8) -> usize {
let offset = ptr.byte_offset_from(self.start_of_mem);
debug_assert!(offset >= 0);
debug_assert!(offset < self.end_of_mem.byte_offset_from(self.start_of_mem));
offset as usize
}
pub(crate) fn get_fd(&self) -> i32 {
self.backing_fd
}
}
#[cfg(test)]

View File

@@ -1,6 +1,11 @@
#![feature(test)]
#![feature(lazy_cell)]
use bump_allocator::BUMP_ALLOCATOR;
mod allocator;
mod bump_allocator;
pub mod sharedptr;
pub fn get_shared_mem_fd() -> i32 {
let allocator = BUMP_ALLOCATOR.lock().unwrap();
allocator.get_fd()
}

View File

@@ -1,47 +1,64 @@
use core::slice;
use std::{
ops::{Deref, DerefMut},
usize,
};
use std::ops::{Deref, DerefMut};
use crate::allocator::ALLOCATOR;
pub struct SharedPtr<'a>(&'a mut [u8]);
#[derive(Debug)]
pub struct SharedPtr {
ptr: *mut u8,
size: usize
}
impl SharedPtr<'_> {
unsafe impl Send for SharedPtr {}
unsafe impl Sync for SharedPtr {}
impl SharedPtr {
pub fn new(size: usize) -> Option<Self> {
let mut allocator = ALLOCATOR.lock().unwrap();
let buf = unsafe {
let buf = allocator.allocate(size)?;
slice::from_raw_parts_mut(buf, size)
};
Some(SharedPtr(buf))
Some(SharedPtr{ptr: buf, size})
}
pub fn get_offset(&self) -> usize {
let allocator = ALLOCATOR.lock().unwrap();
unsafe { allocator.get_offset(self.ptr) }
}
pub fn get_size(&self) -> usize {
self.size
}
}
impl<'a> Deref for SharedPtr<'a> {
type Target = &'a mut [u8];
impl Deref for SharedPtr {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.0
unsafe {
slice::from_raw_parts(self.ptr, self.size)
.try_into()
.expect("This should never fail")
}
}
}
impl<'a> DerefMut for SharedPtr<'a> {
impl DerefMut for SharedPtr {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
unsafe {
slice::from_raw_parts_mut(self.ptr, self.size)
.try_into()
.expect("This should never fail")
}
}
}
impl Drop for SharedPtr<'_> {
impl Drop for SharedPtr {
fn drop(&mut self) {
let mut allocator = ALLOCATOR.lock().unwrap();
unsafe {
allocator.deallocate(self.0.as_mut_ptr());
}
allocator.deallocate(self.ptr);
}
}
@@ -56,4 +73,12 @@ mod tests {
assert_eq!(x[0], 1);
drop(x);
}
#[test]
fn slice() {
let mut x = SharedPtr::new(10).unwrap();
x[0] = 1;
x[1] = 2;
assert_eq!(x[0..=1], [1, 2]);
}
}