Compare commits
7 Commits
weird_rust
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| ef9bcc94fb | |||
| 7f46573218 | |||
| 9b466a72bb | |||
| 9ec0b728c2 | |||
| 0200b3c913 | |||
| 18f1a1b9a6 | |||
| 7c336c8769 |
@@ -11,7 +11,7 @@ const INITIAL_HEAP_SIZE: usize = MEMFD_INITIAL_SIZE;
|
||||
const METADATA_SIZE: usize = size_of::<Metadata>();
|
||||
|
||||
pub(crate) static ALLOCATOR: LazyLock<Mutex<Allocator>> =
|
||||
LazyLock::new(|| unsafe { Mutex::new(Allocator::new()) });
|
||||
LazyLock::new(|| Mutex::new(Allocator::new()));
|
||||
|
||||
struct Metadata {
|
||||
chunk: NonNull<Chunk>,
|
||||
@@ -34,14 +34,14 @@ unsafe impl Send for Chunk {}
|
||||
unsafe impl Send for Allocator {}
|
||||
|
||||
impl Allocator {
|
||||
unsafe fn new() -> Self {
|
||||
fn new() -> Self {
|
||||
let mut allocator = BUMP_ALLOCATOR.lock().unwrap();
|
||||
|
||||
let mem = allocator.alloc(INITIAL_HEAP_SIZE).unwrap();
|
||||
let mem = unsafe { allocator.alloc(INITIAL_HEAP_SIZE).unwrap() };
|
||||
|
||||
let head = Box::new(Chunk {
|
||||
buffer: mem.byte_add(METADATA_SIZE),
|
||||
size: INITIAL_HEAP_SIZE,
|
||||
buffer: unsafe { mem.byte_add(METADATA_SIZE) },
|
||||
size: INITIAL_HEAP_SIZE - METADATA_SIZE,
|
||||
in_use: false,
|
||||
next_chunk: None,
|
||||
prev_chunk: None,
|
||||
@@ -49,27 +49,32 @@ impl Allocator {
|
||||
let head = NonNull::new(Box::leak(head)).unwrap();
|
||||
|
||||
let mem = mem as *mut Metadata;
|
||||
*mem = Metadata { chunk: head };
|
||||
unsafe {
|
||||
*mem = Metadata { chunk: head };
|
||||
}
|
||||
|
||||
Allocator { head, tail: head }
|
||||
}
|
||||
|
||||
pub(crate) unsafe fn allocate(&mut self, size: usize) -> Option<*mut u8> {
|
||||
pub(crate) fn allocate(&mut self, size: usize) -> Option<*mut u8> {
|
||||
let size = (size + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT;
|
||||
|
||||
let mut head = Some(self.head);
|
||||
|
||||
while head.is_some() {
|
||||
let current_head = head.unwrap().as_mut();
|
||||
// The heap uses a global Mutex. Only one thread can operate on it at a time.
|
||||
let current_head = unsafe { head.unwrap().as_mut() };
|
||||
|
||||
if !current_head.in_use && current_head.size >= size {
|
||||
if current_head.size == size {
|
||||
if current_head.size < (size + METADATA_SIZE + ALIGNMENT) {
|
||||
current_head.in_use = true;
|
||||
return Some(current_head.buffer);
|
||||
}
|
||||
|
||||
let unused_space = Box::new(Chunk {
|
||||
buffer: current_head.buffer.byte_add(size + METADATA_SIZE),
|
||||
// We know that size of buffer is larger than size + METADATA_SIZE + ALIGNMENT.
|
||||
// Therefore size + METADATA_SIZE is still inside of the buffer
|
||||
buffer: unsafe { current_head.buffer.byte_add(size + METADATA_SIZE) },
|
||||
size: current_head.size - size - METADATA_SIZE,
|
||||
in_use: false,
|
||||
next_chunk: current_head.next_chunk,
|
||||
@@ -77,11 +82,20 @@ impl Allocator {
|
||||
});
|
||||
let ptr = NonNull::new(Box::leak(unused_space)).unwrap();
|
||||
|
||||
*(ptr.as_ref().buffer.byte_sub(METADATA_SIZE) as *mut Metadata) =
|
||||
Metadata { chunk: ptr };
|
||||
// buffer points to current_head + size + METADATA_SIZE.
|
||||
// Therefore buffer - METADATA_SIZE points right after the buffer of current_head
|
||||
// and right before the buffer of unused_space.
|
||||
// This is where the pointer to the metadata chunk is expected
|
||||
unsafe {
|
||||
*(ptr.as_ref().buffer.byte_sub(METADATA_SIZE) as *mut Metadata) =
|
||||
Metadata { chunk: ptr };
|
||||
}
|
||||
|
||||
if ptr.as_ref().next_chunk.is_none() {
|
||||
self.tail = ptr;
|
||||
// We know that accessing ptr is safe since we just allocated it.
|
||||
unsafe {
|
||||
if ptr.as_ref().next_chunk.is_none() {
|
||||
self.tail = ptr;
|
||||
}
|
||||
}
|
||||
|
||||
current_head.in_use = true;
|
||||
@@ -99,9 +113,9 @@ impl Allocator {
|
||||
None
|
||||
}
|
||||
|
||||
pub(crate) unsafe fn deallocate(&mut self, ptr: *mut u8) {
|
||||
let metadata = ptr.byte_sub(METADATA_SIZE) as *mut Metadata;
|
||||
let metadata = (*metadata).chunk.as_mut();
|
||||
pub(crate) fn deallocate(&mut self, ptr: *mut u8) {
|
||||
let metadata = unsafe { ptr.byte_sub(METADATA_SIZE) as *mut Metadata };
|
||||
let metadata = unsafe { (*metadata).chunk.as_mut() };
|
||||
|
||||
debug_assert_eq!(metadata.in_use, true);
|
||||
debug_assert_eq!(metadata.buffer, ptr);
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::{
|
||||
usize,
|
||||
};
|
||||
|
||||
use libc::{c_char, c_void, ftruncate, memfd_create, mmap, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
|
||||
use libc::{c_char, c_void, ftruncate, mmap, perror, syscall, SYS_memfd_secret, MAP_FAILED, MAP_SHARED, PROT_READ, PROT_WRITE};
|
||||
|
||||
pub(crate) const MEMFD_INITIAL_SIZE: usize = 1024 * 1024;
|
||||
const MMAP_SIZE: usize = 1024 * 1024;
|
||||
@@ -26,7 +26,10 @@ impl BumpAllocator {
|
||||
unsafe fn new() -> Self {
|
||||
assert!(MMAP_SIZE >= MEMFD_INITIAL_SIZE);
|
||||
|
||||
let data_fd = memfd_create("data\x00".as_ptr() as *const c_char, 0);
|
||||
let data_fd = syscall(SYS_memfd_secret, 0) as i32;
|
||||
if data_fd <= 0 {
|
||||
perror("memfd secret\x00".as_ptr() as *const c_char);
|
||||
}
|
||||
|
||||
assert!(data_fd > 0);
|
||||
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
#![feature(test)]
|
||||
#![feature(lazy_cell)]
|
||||
|
||||
use bump_allocator::BUMP_ALLOCATOR;
|
||||
|
||||
mod allocator;
|
||||
|
||||
@@ -1,54 +1,64 @@
|
||||
use core::slice;
|
||||
use std::{
|
||||
ops::{Deref, DerefMut},
|
||||
usize,
|
||||
};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use crate::allocator::ALLOCATOR;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SharedPtr<const N: usize>([u8; N]);
|
||||
pub struct SharedPtr {
|
||||
ptr: *mut u8,
|
||||
size: usize
|
||||
}
|
||||
|
||||
impl<const N: usize> SharedPtr<N> {
|
||||
pub fn new() -> Option<Self> {
|
||||
unsafe impl Send for SharedPtr {}
|
||||
unsafe impl Sync for SharedPtr {}
|
||||
|
||||
impl SharedPtr {
|
||||
pub fn new(size: usize) -> Option<Self> {
|
||||
let mut allocator = ALLOCATOR.lock().unwrap();
|
||||
|
||||
let buf = unsafe {
|
||||
let buf = allocator.allocate(N)?;
|
||||
slice::from_raw_parts_mut(buf, N)
|
||||
};
|
||||
let buf = allocator.allocate(size)?;
|
||||
|
||||
Some(SharedPtr(buf.try_into().expect("Should never fail")))
|
||||
Some(SharedPtr{ptr: buf, size})
|
||||
}
|
||||
|
||||
pub fn get_offset(&self) -> usize {
|
||||
let allocator = ALLOCATOR.lock().unwrap();
|
||||
|
||||
unsafe { allocator.get_offset(self.as_ptr()) }
|
||||
unsafe { allocator.get_offset(self.ptr) }
|
||||
}
|
||||
|
||||
pub fn get_size(&self) -> usize {
|
||||
self.size
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Deref for SharedPtr<N> {
|
||||
type Target = [u8; N];
|
||||
impl Deref for SharedPtr {
|
||||
type Target = [u8];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
unsafe {
|
||||
slice::from_raw_parts(self.ptr, self.size)
|
||||
.try_into()
|
||||
.expect("This should never fail")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, const N: usize> DerefMut for SharedPtr<N> {
|
||||
impl DerefMut for SharedPtr {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
unsafe {
|
||||
slice::from_raw_parts_mut(self.ptr, self.size)
|
||||
.try_into()
|
||||
.expect("This should never fail")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> Drop for SharedPtr<N> {
|
||||
impl Drop for SharedPtr {
|
||||
fn drop(&mut self) {
|
||||
let mut allocator = ALLOCATOR.lock().unwrap();
|
||||
|
||||
unsafe {
|
||||
allocator.deallocate(self.0.as_mut_ptr());
|
||||
}
|
||||
allocator.deallocate(self.ptr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,7 +68,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
let mut x = SharedPtr::<10>::new().unwrap();
|
||||
let mut x = SharedPtr::new(10).unwrap();
|
||||
x[0] = 1;
|
||||
assert_eq!(x[0], 1);
|
||||
drop(x);
|
||||
@@ -66,7 +76,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn slice() {
|
||||
let mut x = SharedPtr::<10>::new().unwrap();
|
||||
let mut x = SharedPtr::new(10).unwrap();
|
||||
x[0] = 1;
|
||||
x[1] = 2;
|
||||
assert_eq!(x[0..=1], [1, 2]);
|
||||
|
||||
Reference in New Issue
Block a user