windows/src/memory/allocator.rs

129 lines
4.6 KiB
Rust

use core::alloc::GlobalAlloc;
use core::mem;
use core::ptr::NonNull;
use x86_64::structures::paging::{FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB};
use x86_64::structures::paging::mapper::MapToError;
use x86_64::VirtAddr;
use crate::memory::PageSize;
use super::Locked;
pub const HEAP_START: u64 = 0x_4444_4444_0000;
pub const HEAP_SIZE: u64 = 100 * 1024; // 100 KiB
#[global_allocator]
static ALLOCATOR: Locked<FixedSizeBlockAllocator> = Locked::new(FixedSizeBlockAllocator::new());
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048];
pub struct FixedSizeBlockAllocator {
list_heads: [Option<&'static mut FixedSizeBlockNode>; BLOCK_SIZES.len()],
#[cfg(feature = "f_ll_alloc")]
fallback_allocator: linked_list_allocator::Heap,
}
pub struct FixedSizeBlockNode {
next: Option<&'static mut FixedSizeBlockNode>,
}
impl FixedSizeBlockAllocator {
pub const fn new() -> Self {
const EMPTY: Option<&'static mut FixedSizeBlockNode> = None;
Self {
list_heads: [EMPTY; BLOCK_SIZES.len()],
#[cfg(feature = "f_ll_alloc")]
fallback_allocator: linked_list_allocator::Heap::empty(),
}
}
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
self.fallback_allocator.init(heap_start, heap_size);
}
fn fallback_alloc(&mut self, layout: core::alloc::Layout) -> *mut u8 {
match self.fallback_allocator.allocate_first_fit(layout) {
Ok(ptr) => ptr.as_ptr(),
Err(_) => core::ptr::null_mut(),
}
}
}
unsafe impl GlobalAlloc for Locked<FixedSizeBlockAllocator> {
unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
match allocator.list_heads[index].take() {
Some(node) => {
allocator.list_heads[index] = node.next.take();
node as *mut FixedSizeBlockNode as *mut u8
}
None => {
// no block exists in list => allocate new block
let block_size = BLOCK_SIZES[index];
// only works if all block sizes are a power of 2
let block_align = block_size;
let layout = core::alloc::Layout::from_size_align(block_size, block_align)
.unwrap();
allocator.fallback_alloc(layout)
}
}
}
None => allocator.fallback_alloc(layout),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) {
let mut allocator = self.lock();
match list_index(&layout) {
Some(index) => {
let new_node = FixedSizeBlockNode {
next: allocator.list_heads[index].take(),
};
// verify that block has size and alignment required for storing node
assert!(mem::size_of::<FixedSizeBlockNode>() <= BLOCK_SIZES[index]);
assert!(mem::align_of::<FixedSizeBlockNode>() <= BLOCK_SIZES[index]);
let new_node_ptr = ptr as *mut FixedSizeBlockNode;
new_node_ptr.write(new_node);
allocator.list_heads[index] = Some(&mut *new_node_ptr);
}
None => {
let ptr = NonNull::new(ptr).unwrap();
allocator.fallback_allocator.deallocate(ptr, layout);
}
}
}
}
fn list_index(layout: &core::alloc::Layout) -> Option<usize> {
let required_block_size = layout.size().max(layout.align());
BLOCK_SIZES.iter().position(|&s| s >= required_block_size)
}
pub fn init_heap(
mapper: &mut impl Mapper<PageSize>,
frame_allocator: &mut impl FrameAllocator<PageSize>,
) -> Result<(), MapToError<PageSize>> {
let page_range = {
let heap_start = VirtAddr::new(HEAP_START as u64);
let heap_end = heap_start + HEAP_SIZE - 1u64;
let heap_start_page = Page::containing_address(heap_start);
let heap_end_page = Page::containing_address(heap_end);
Page::range_inclusive(heap_start_page, heap_end_page)
};
for page in page_range {
let frame = frame_allocator
.allocate_frame()
.ok_or(MapToError::FrameAllocationFailed)?;
let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
unsafe {
mapper.map_to(page, frame, flags, frame_allocator)?.flush()
};
}
unsafe {
ALLOCATOR.lock().init(HEAP_START as usize, HEAP_SIZE as usize);
}
Ok(())
}