Initial commit

This commit is contained in:
2024-03-13 13:00:08 +01:00
commit 536fb4cac3
8 changed files with 363 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target

16
Cargo.lock generated Normal file
View File

@@ -0,0 +1,16 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "libc"
version = "0.2.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]]
name = "shared_memory_heap"
version = "0.1.0"
dependencies = [
"libc",
]

9
Cargo.toml Normal file
View File

@@ -0,0 +1,9 @@
[package]
name = "shared_memory_heap"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
libc = "0.2.153"

2
rust-toolchain.toml Normal file
View File

@@ -0,0 +1,2 @@
[toolchain]
channel = "nightly"

150
src/allocator.rs Normal file
View File

@@ -0,0 +1,150 @@
use std::{
mem::size_of,
ptr::NonNull,
sync::{LazyLock, Mutex},
};
use crate::bump_allocator::{BUMP_ALLOCATOR, MEMFD_INITIAL_SIZE};
const ALIGNMENT: usize = 8;
const INITIAL_HEAP_SIZE: usize = MEMFD_INITIAL_SIZE;
const METADATA_SIZE: usize = size_of::<Metadata>();
pub(crate) static ALLOCATOR: LazyLock<Mutex<Allocator>> =
LazyLock::new(|| unsafe { Mutex::new(Allocator::new()) });
struct Metadata {
chunk: NonNull<Chunk>,
}
struct Chunk {
buffer: *mut u8,
size: usize,
in_use: bool,
next_chunk: Option<NonNull<Chunk>>,
prev_chunk: Option<NonNull<Chunk>>,
}
pub(crate) struct Allocator {
head: NonNull<Chunk>,
tail: NonNull<Chunk>,
}
unsafe impl Send for Chunk {}
unsafe impl Send for Allocator {}
impl Allocator {
unsafe fn new() -> Self {
let mut allocator = BUMP_ALLOCATOR.lock().unwrap();
let mem = allocator.alloc(INITIAL_HEAP_SIZE).unwrap();
let head = Box::new(Chunk {
buffer: mem.byte_add(METADATA_SIZE),
size: INITIAL_HEAP_SIZE,
in_use: false,
next_chunk: None,
prev_chunk: None,
});
let head = NonNull::new(Box::leak(head)).unwrap();
let mem = mem as *mut Metadata;
*mem = Metadata { chunk: head };
Allocator { head, tail: head }
}
pub(crate) unsafe fn allocate(&mut self, size: usize) -> Option<*mut u8> {
let size = (size + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT;
let mut head = Some(self.head);
while head.is_some() {
let current_head = head.unwrap().as_mut();
if !current_head.in_use && current_head.size >= size {
if current_head.size == size {
current_head.in_use = true;
return Some(current_head.buffer);
}
let unused_space = Box::new(Chunk {
buffer: current_head.buffer.byte_add(size + METADATA_SIZE),
size: current_head.size - size - METADATA_SIZE,
in_use: false,
next_chunk: current_head.next_chunk,
prev_chunk: head,
});
let ptr = NonNull::new(Box::leak(unused_space)).unwrap();
*(ptr.as_ref().buffer.byte_sub(METADATA_SIZE) as *mut Metadata) =
Metadata { chunk: ptr };
if ptr.as_ref().next_chunk.is_none() {
self.tail = ptr;
}
current_head.in_use = true;
current_head.size = size;
current_head.next_chunk = Some(ptr);
return Some(current_head.buffer);
}
head = current_head.next_chunk;
}
// TODO: Try to allocate more space from bump allocator
None
}
pub(crate) unsafe fn deallocate(&mut self, ptr: *mut u8) {
let metadata = ptr.byte_sub(METADATA_SIZE) as *mut Metadata;
let metadata = (*metadata).chunk.as_mut();
debug_assert_eq!(metadata.in_use, true);
debug_assert_eq!(metadata.buffer, ptr);
metadata.in_use = false;
// TODO: Consolidate chunks
}
}
#[cfg(test)]
mod tests {
extern crate test;
use core::slice;
use test::Bencher;
use super::ALLOCATOR;
#[test]
fn functionality() {
let mut allocator = ALLOCATOR.lock().unwrap();
unsafe {
let x = allocator.allocate(10).unwrap();
let x = slice::from_raw_parts_mut(x, 10);
x[0] = 1;
assert_eq!(x[0], 1);
allocator.deallocate(x.as_mut_ptr());
}
}
#[bench]
fn allocator_bench(b: &mut Bencher) {
let mut allocator = ALLOCATOR.lock().unwrap();
b.iter(|| unsafe {
let x = allocator.allocate(1).unwrap();
let x = slice::from_raw_parts_mut(x, 1);
x[0] = 1;
assert_eq!(x[0], 1);
allocator.deallocate(x.as_mut_ptr());
});
}
}

120
src/bump_allocator.rs Normal file
View File

@@ -0,0 +1,120 @@
use std::{
sync::{LazyLock, Mutex},
usize,
};
use libc::{c_char, c_void, ftruncate, memfd_create, mmap, MAP_SHARED, PROT_READ, PROT_WRITE};
pub(crate) const MEMFD_INITIAL_SIZE: usize = 1024 * 1024 * 4;
const MMAP_SIZE: usize = 1024 * 1024 * 1024;
pub(crate) static BUMP_ALLOCATOR: LazyLock<Mutex<BumpAllocator>> =
LazyLock::new(|| unsafe { Mutex::new(BumpAllocator::new()) });
pub struct BumpAllocator {
start_of_mem: *mut u8,
head: *mut u8,
end_of_mem: *mut u8,
number_of_allocated_chunks: usize,
backing_fd: i32,
fd_size: usize,
}
unsafe impl Send for BumpAllocator {}
impl BumpAllocator {
unsafe fn new() -> Self {
assert!(MMAP_SIZE >= MEMFD_INITIAL_SIZE);
let data_fd = memfd_create("data\x00".as_ptr() as *const c_char, 0);
assert!(data_fd > 0);
assert_eq!(ftruncate(data_fd, MEMFD_INITIAL_SIZE as i64), 0);
let start_of_mem = mmap(
0 as *mut c_void,
MMAP_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED,
data_fd,
0,
) as *mut u8;
assert_ne!(start_of_mem, 0 as *mut u8);
let end_of_mem = start_of_mem.byte_add(MEMFD_INITIAL_SIZE);
BumpAllocator {
start_of_mem,
head: start_of_mem,
end_of_mem,
number_of_allocated_chunks: 0,
backing_fd: data_fd,
fd_size: MEMFD_INITIAL_SIZE,
}
}
pub unsafe fn alloc(&mut self, size: usize) -> Option<*mut u8> {
let new_head = self.head.byte_add(size);
if new_head > self.end_of_mem {
if self.end_of_mem.byte_add(self.fd_size) < self.start_of_mem.byte_add(MMAP_SIZE) {
assert_eq!(ftruncate(self.backing_fd, (self.fd_size * 2) as i64), 0);
self.fd_size *= 2;
} else {
return None;
}
}
let ret = Some(self.head);
self.head = new_head;
self.number_of_allocated_chunks += 1;
ret
}
pub fn dealloc(&mut self) {
self.number_of_allocated_chunks -= 1;
if self.number_of_allocated_chunks == 0 {
self.head = self.start_of_mem;
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use core::slice;
use test::Bencher;
use super::BUMP_ALLOCATOR;
#[test]
fn test() {
let mut allocator = BUMP_ALLOCATOR.lock().unwrap();
unsafe {
let x = allocator.alloc(10).unwrap();
let x = slice::from_raw_parts_mut(x, 10);
x[0] = 1;
assert_eq!(x[0], 1);
}
}
#[bench]
fn bench(b: &mut Bencher) {
let mut allocator = BUMP_ALLOCATOR.lock().unwrap();
b.iter(|| unsafe {
let x = allocator.alloc(10).unwrap();
let x = slice::from_raw_parts_mut(x, 10);
x[0] = 1;
assert_eq!(x[0], 1);
allocator.dealloc();
});
}
}

6
src/lib.rs Normal file
View File

@@ -0,0 +1,6 @@
#![feature(test)]
#![feature(lazy_cell)]
mod allocator;
mod bump_allocator;
pub mod sharedptr;

59
src/sharedptr.rs Normal file
View File

@@ -0,0 +1,59 @@
use core::slice;
use std::{
ops::{Deref, DerefMut},
usize,
};
use crate::allocator::ALLOCATOR;
pub struct SharedPtr<'a>(&'a mut [u8]);
impl SharedPtr<'_> {
pub fn new(size: usize) -> Option<Self> {
let mut allocator = ALLOCATOR.lock().unwrap();
let buf = unsafe {
let buf = allocator.allocate(size)?;
slice::from_raw_parts_mut(buf, size)
};
Some(SharedPtr(buf))
}
}
impl<'a> Deref for SharedPtr<'a> {
type Target = &'a mut [u8];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<'a> DerefMut for SharedPtr<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl Drop for SharedPtr<'_> {
fn drop(&mut self) {
let mut allocator = ALLOCATOR.lock().unwrap();
unsafe {
allocator.deallocate(self.0.as_mut_ptr());
}
}
}
#[cfg(test)]
mod tests {
use super::SharedPtr;
#[test]
fn test() {
let mut x = SharedPtr::new(10).unwrap();
x[0] = 1;
assert_eq!(x[0], 1);
drop(x);
}
}