Basic kernel heap allocation, heavily influenced by this blog: https://os.phil-opp.com/#memory-management. The most exciting possibility this (immediately) enables is using the tracing
crate in the kernel.
SCGTKLIL3L6BITCIP2PGKSCJD2XQ2LKQWN63SWQXSELVGVZYMI3AC
use bootloader_api::info::{MemoryRegionKind, MemoryRegions};
use x86_64::addr::{PhysAddr, VirtAddr};
use x86_64::registers::control::Cr3;
use x86_64::structures::paging::{
FrameAllocator, OffsetPageTable, PageSize, PageTable, PhysFrame, Size4KiB,
};
// Not sure how to get the configured page size, but pretty sure it's only 4kib by default
// TODO: check if this is true and enable 64-bit pages
static PAGE_SIZE: u64 = Size4KiB::SIZE;
pub struct SimpleFrameAllocator<I>
where
I: Iterator<Item = PhysFrame>,
{
pub usable_frames: I,
}
// TODO: increase page size here as well
unsafe impl<I> FrameAllocator<Size4KiB> for SimpleFrameAllocator<I>
where
I: Iterator<Item = PhysFrame>,
{
fn allocate_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
self.usable_frames.next()
}
}
/// Caller must ensure memory map is valid
pub unsafe fn init_frame_allocator(
memory_regions: &'static MemoryRegions,
) -> SimpleFrameAllocator<impl Iterator<Item = PhysFrame>> {
// Collect all physical frames marked as usable
let usable_frames = memory_regions
.iter()
// Filter to only usable frames
.filter(|region| region.kind == MemoryRegionKind::Usable)
// Map to start+end range
.map(|region| region.start..region.end)
// Collect all pages in region
.flat_map(|region| region.step_by(PAGE_SIZE as usize))
// Map to start address
.map(|start_address| PhysFrame::containing_address(PhysAddr::new(start_address)));
SimpleFrameAllocator { usable_frames }
}
/// Get the current active level 4 page table
unsafe fn active_level_4_table(physical_memory_offset: VirtAddr) -> &'static mut PageTable {
let (physical_frame, _flags) = Cr3::read();
let frame_physical_address = physical_frame.start_address();
let frame_virtual_address = physical_memory_offset + frame_physical_address.as_u64();
let page_table_address = frame_virtual_address.as_mut_ptr();
&mut *page_table_address
}
pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> {
let active_table = active_level_4_table(physical_memory_offset);
OffsetPageTable::new(active_table, physical_memory_offset)
}
let mut offset_table = unsafe {
memory::init(VirtAddr::new(
boot_info
.physical_memory_offset
.into_option()
.unwrap_or_default(),
))
};
let mut frame_allocator = unsafe { memory::init_frame_allocator(&boot_info.memory_regions) };
allocator::init_heap(&mut offset_table, &mut frame_allocator).unwrap();
use linked_list_allocator::LockedHeap;
use x86_64::structures::paging::mapper::MapToError;
use x86_64::structures::paging::{FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB};
use x86_64::VirtAddr;
#[global_allocator]
static ALLOCATOR: LockedHeap = LockedHeap::empty();
// TODO: choose a better start address
pub const HEAP_START: usize = 0x_2222_0000_0000;
pub const HEAP_SIZE: usize = 512 * 1024; // 500 KiB
/// Initialize the kernel heap
pub fn init_heap(
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<(), MapToError<Size4KiB>> {
let heap_start = VirtAddr::new(HEAP_START as u64);
let heap_end = heap_start + HEAP_SIZE - 1u64;
// Get the pages between start & end adresses (inclusive)
let page_range = {
// TODO: larger page sizes
let start_page: Page<Size4KiB> = Page::containing_address(heap_start);
let end_page = Page::containing_address(heap_end);
Page::range_inclusive(start_page, end_page)
};
// Map pages to be usable later
for page in page_range {
let frame = frame_allocator
.allocate_frame()
.ok_or(MapToError::FrameAllocationFailed)?;
let page_flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
unsafe { mapper.map_to(page, frame, page_flags, frame_allocator) }?.flush();
}
// Initialize the allocator
unsafe {
ALLOCATOR.lock().init(heap_start.as_mut_ptr(), HEAP_SIZE);
}
Ok(())
}