From ce717ff277505f9316dac3e267a9f1020fc73413 Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Sat, 10 Sep 2016 18:48:27 -0600 Subject: [PATCH] Reorganize kernel memory mappings --- alloc/hole_list_allocator/Cargo.toml | 4 -- alloc/hole_list_allocator/src/lib.rs | 23 ++++--- arch/x86_64/src/lib.rs | 12 ++++ arch/x86_64/src/linker.ld | 2 +- ...frame_alloc.rs => area_frame_allocator.rs} | 2 +- arch/x86_64/src/memory/bitmap_allocator.rs | 39 +++++++++++ arch/x86_64/src/memory/mod.rs | 4 +- arch/x86_64/src/paging/entry.rs | 2 +- arch/x86_64/src/paging/mod.rs | 7 +- arch/x86_64/src/paging/table.rs | 2 +- arch/x86_64/src/start.rs | 69 +++++++++---------- bootloader/x86_64/startup-x86_64.asm | 7 +- 12 files changed, 107 insertions(+), 66 deletions(-) rename arch/x86_64/src/memory/{area_frame_alloc.rs => area_frame_allocator.rs} (97%) create mode 100644 arch/x86_64/src/memory/bitmap_allocator.rs diff --git a/alloc/hole_list_allocator/Cargo.toml b/alloc/hole_list_allocator/Cargo.toml index eba97fb..e682e21 100644 --- a/alloc/hole_list_allocator/Cargo.toml +++ b/alloc/hole_list_allocator/Cargo.toml @@ -6,7 +6,3 @@ version = "0.1.0" [dependencies] linked_list_allocator = "0.2.0" spin = "0.3.5" - -[dependencies.lazy_static] -version = "0.2.1" -features = ["spin_no_std"] diff --git a/alloc/hole_list_allocator/src/lib.rs b/alloc/hole_list_allocator/src/lib.rs index 9e6da8f..df90843 100644 --- a/alloc/hole_list_allocator/src/lib.rs +++ b/alloc/hole_list_allocator/src/lib.rs @@ -9,26 +9,29 @@ use linked_list_allocator::Heap; extern crate spin; extern crate linked_list_allocator; -#[macro_use] -extern crate lazy_static; -pub const HEAP_START: usize = 0xffff_ff00_0000_0000; // Put at end of memory, below the recursive page mapping -pub const HEAP_SIZE: usize = 64 * 1024 * 1024; // 128 MB +static HEAP: Mutex> = Mutex::new(None); -lazy_static! { - static ref HEAP: Mutex = Mutex::new(unsafe { - Heap::new(HEAP_START, HEAP_SIZE) - }); +pub unsafe fn init(offset: usize, size: usize) { + *HEAP.lock() = Some(Heap::new(offset, size)); } #[no_mangle] pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 { - HEAP.lock().allocate_first_fit(size, align).expect("out of memory") + if let Some(ref mut heap) = *HEAP.lock() { + heap.allocate_first_fit(size, align).expect("out of memory") + } else { + panic!("__rust_allocate: heap not initialized"); + } } #[no_mangle] pub extern fn __rust_deallocate(ptr: *mut u8, size: usize, align: usize) { - unsafe { HEAP.lock().deallocate(ptr, size, align) }; + if let Some(ref mut heap) = *HEAP.lock() { + unsafe { heap.deallocate(ptr, size, align) }; + } else { + panic!("__rust_deallocate: heap not initialized"); + } } #[no_mangle] diff --git a/arch/x86_64/src/lib.rs b/arch/x86_64/src/lib.rs index 61fb547..c72ccc2 100644 --- a/arch/x86_64/src/lib.rs +++ b/arch/x86_64/src/lib.rs @@ -19,6 +19,18 @@ extern crate ransid; extern crate spin; pub extern crate x86; +// Because the memory map is so important to not be aliased, it is defined here, in one place +/// Offset of recursive paging +pub const RECURSIVE_PAGE_OFFSET: usize = 0xffff_ff80_0000_0000; +/// Offset of kernel +pub const KERNEL_OFFSET: usize = 0xffff_ff00_0000_0000; +/// Offset to memory allocation bitmap +pub const BITMAP_OFFSET: usize = 0xffff_fe80_0000_0000; +/// Offset to kernel heap +pub const HEAP_OFFSET: usize = 0xffff_fe00_0000_0000; +/// Size of heap +pub const HEAP_SIZE: usize = 64 * 1024 * 1024; // 128 MB + /// Print to console #[macro_export] macro_rules! print { diff --git a/arch/x86_64/src/linker.ld b/arch/x86_64/src/linker.ld index c1ac875..c8c932f 100644 --- a/arch/x86_64/src/linker.ld +++ b/arch/x86_64/src/linker.ld @@ -1,7 +1,7 @@ ENTRY(kstart) OUTPUT_FORMAT(elf64-x86-64) -/* KERNEL_OFFSET = 0xfffffe8000100000; */ +/* KERNEL_OFFSET = 0xffffff0000100000; */ KERNEL_OFFSET = 0x100000; SECTIONS { diff --git a/arch/x86_64/src/memory/area_frame_alloc.rs b/arch/x86_64/src/memory/area_frame_allocator.rs similarity index 97% rename from arch/x86_64/src/memory/area_frame_alloc.rs rename to arch/x86_64/src/memory/area_frame_allocator.rs index 7adf683..15f21a2 100644 --- a/arch/x86_64/src/memory/area_frame_alloc.rs +++ b/arch/x86_64/src/memory/area_frame_allocator.rs @@ -76,6 +76,6 @@ impl FrameAllocator for AreaFrameAllocator { } fn deallocate_frame(&mut self, frame: Frame) { - //println!("Leak frame: {:?}", frame); + //panic!("AreaFrameAllocator::deallocate_frame: not supported: {:?}", frame); } } diff --git a/arch/x86_64/src/memory/bitmap_allocator.rs b/arch/x86_64/src/memory/bitmap_allocator.rs new file mode 100644 index 0000000..1f8085a --- /dev/null +++ b/arch/x86_64/src/memory/bitmap_allocator.rs @@ -0,0 +1,39 @@ +use super::AreaFrameAllocator; + +const BITMAP_RESERVED: usize = 0; +const BITMAP_FREE: usize = 1; +const BITMAP_USED: usize = 2; + +pub struct BitmapAllocator { + bitmap: &'static mut [u8] +} + +impl BitmapAllocator { + pub fn new(area_frame_allocator: AreaFrameAllocator) -> BitmapAllocator { + BitmapAllocator { + bitmap: &mut [] + } + } +} + +impl FrameAllocator for BitmapAllocator { + fn allocate_frame(&mut self) -> Option { + let mut i = 0; + while i < self.bitmap.len() { + if self.bitmap[i] == BITMAP_FREE { + self.bitmap[i] = BITMAP_USED; + return Some(Frame::containing_address(PhysicalAddress::new(i * 4096))); + } + } + None + } + + fn deallocate_frame(&mut self, frame: Frame) { + let i = frame.starting_address().get()/4096; + if i < self.bitmap.len() && self.bitmap[i] == BITMAP_USED { + self.bitmap[i] = BITMAP_FREE; + } else { + panic!("BitmapAllocator::deallocate_frame: unowned frame {:?}", frame); + } + } +} diff --git a/arch/x86_64/src/memory/mod.rs b/arch/x86_64/src/memory/mod.rs index 5c24554..f3b2ce3 100644 --- a/arch/x86_64/src/memory/mod.rs +++ b/arch/x86_64/src/memory/mod.rs @@ -3,11 +3,11 @@ pub use paging::{PAGE_SIZE, PhysicalAddress}; -use self::area_frame_alloc::AreaFrameAllocator; +use self::area_frame_allocator::AreaFrameAllocator; use spin::Mutex; -pub mod area_frame_alloc; +pub mod area_frame_allocator; /// The current memory map. It's size is maxed out to 512 entries, due to it being /// from 0x500 to 0x5000 (800 is the absolute total) diff --git a/arch/x86_64/src/paging/entry.rs b/arch/x86_64/src/paging/entry.rs index 605650d..aecf647 100644 --- a/arch/x86_64/src/paging/entry.rs +++ b/arch/x86_64/src/paging/entry.rs @@ -23,7 +23,7 @@ bitflags! { } } -pub const ADDRESS_MASK: usize = 0x000fffff_fffff000; +pub const ADDRESS_MASK: usize = 0x000f_ffff_ffff_f000; impl Entry { /// Is the entry unused? diff --git a/arch/x86_64/src/paging/mod.rs b/arch/x86_64/src/paging/mod.rs index 5baeb10..420ca49 100644 --- a/arch/x86_64/src/paging/mod.rs +++ b/arch/x86_64/src/paging/mod.rs @@ -21,9 +21,6 @@ pub const ENTRY_COUNT: usize = 512; /// Size of pages pub const PAGE_SIZE: usize = 4096; -/// Offset of kernel from physical -pub const KERNEL_OFFSET: usize = 0xfffffe8000000000; - /// Initialize paging pub unsafe fn init(stack_start: usize, stack_end: usize) -> ActivePageTable { extern { @@ -71,8 +68,8 @@ pub unsafe fn init(stack_start: usize, stack_end: usize) -> ActivePageTable { for frame in Frame::range_inclusive(start_frame, end_frame) { mapper.identity_map(frame.clone(), flags); - let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + KERNEL_OFFSET)); - mapper.map_to(page, frame, flags); + //let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET)); + //mapper.map_to(page, frame, flags); } } }; diff --git a/arch/x86_64/src/paging/table.rs b/arch/x86_64/src/paging/table.rs index 956787b..f2fa466 100644 --- a/arch/x86_64/src/paging/table.rs +++ b/arch/x86_64/src/paging/table.rs @@ -9,7 +9,7 @@ use memory::allocate_frame; use super::entry::*; use super::ENTRY_COUNT; -pub const P4: *mut Table = 0xffffffff_fffff000 as *mut _; +pub const P4: *mut Table = 0xffff_ffff_ffff_f000 as *mut _; pub trait TableLevel {} diff --git a/arch/x86_64/src/start.rs b/arch/x86_64/src/start.rs index ab65d17..a154925 100644 --- a/arch/x86_64/src/start.rs +++ b/arch/x86_64/src/start.rs @@ -6,7 +6,7 @@ use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; use acpi; -use allocator::{HEAP_START, HEAP_SIZE}; +use allocator; use device; use externs::memset; use gdt; @@ -29,7 +29,7 @@ static mut TDATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF; static AP_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT; static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT; -static HEAP_FRAME: AtomicUsize = ATOMIC_USIZE_INIT; +static HEAP_TABLE: AtomicUsize = ATOMIC_USIZE_INIT; extern { /// Kernel main function @@ -97,32 +97,32 @@ pub unsafe extern fn kstart() -> ! { AP_COUNT.store(0, Ordering::SeqCst); AP_READY.store(false, Ordering::SeqCst); BSP_READY.store(false, Ordering::SeqCst); - HEAP_FRAME.store(0, Ordering::SeqCst); + HEAP_TABLE.store(0, Ordering::SeqCst); // Map heap { - let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START)); - let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1)); - - { - let index = heap_start_page.p4_index(); - assert_eq!(index, heap_end_page.p4_index()); - - let frame = memory::allocate_frame().expect("no frames available"); - HEAP_FRAME.store(frame.start_address().get(), Ordering::SeqCst); - - let p4 = active_table.p4_mut(); - { - let entry = &mut p4[index]; - assert!(entry.is_unused()); - entry.set(frame, entry::PRESENT | entry::WRITABLE); - } - p4.next_table_mut(index).unwrap().zero(); - } - + // Map heap pages + let heap_start_page = Page::containing_address(VirtualAddress::new(::HEAP_OFFSET)); + let heap_end_page = Page::containing_address(VirtualAddress::new(::HEAP_OFFSET + ::HEAP_SIZE-1)); for page in Page::range_inclusive(heap_start_page, heap_end_page) { active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE); } + + // Init the allocator + allocator::init(::HEAP_OFFSET, ::HEAP_SIZE); + + // Send heap page table to APs + let index = heap_start_page.p4_index(); + + let p4 = active_table.p4(); + { + let entry = &p4[index]; + if let Some(frame) = entry.pointed_frame() { + HEAP_TABLE.store(frame.start_address().get(), Ordering::SeqCst); + } else { + panic!("heap does not have PML4 entry"); + } + } } // Initialize devices @@ -167,24 +167,19 @@ pub unsafe extern fn kstart_ap(stack_start: usize, stack_end: usize) -> ! { assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE); } - // Map heap + // Copy heap PML4 { - let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START)); - let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1)); + let page = Page::containing_address(VirtualAddress::new(::HEAP_OFFSET)); - { - assert_eq!(heap_start_page.p4_index(), heap_end_page.p4_index()); - - while HEAP_FRAME.load(Ordering::SeqCst) == 0 { - interrupt::pause(); - } - let frame = Frame::containing_address(PhysicalAddress::new(HEAP_FRAME.load(Ordering::SeqCst))); - - let p4 = active_table.p4_mut(); - let entry = &mut p4[heap_start_page.p4_index()]; - assert!(entry.is_unused()); - entry.set(frame, entry::PRESENT | entry::WRITABLE); + while HEAP_TABLE.load(Ordering::SeqCst) == 0 { + interrupt::pause(); } + let frame = Frame::containing_address(PhysicalAddress::new(HEAP_TABLE.load(Ordering::SeqCst))); + + let p4 = active_table.p4_mut(); + let entry = &mut p4[page.p4_index()]; + assert!(entry.is_unused()); + entry.set(frame, entry::PRESENT | entry::WRITABLE); } // Init devices for AP diff --git a/bootloader/x86_64/startup-x86_64.asm b/bootloader/x86_64/startup-x86_64.asm index f5f9bc6..3d9b865 100644 --- a/bootloader/x86_64/startup-x86_64.asm +++ b/bootloader/x86_64/startup-x86_64.asm @@ -58,9 +58,9 @@ startup_arch: rep stosd xor edi, edi - ;Link first PML4 and third to last PML4 to PDP + ;Link first PML4 and second to last PML4 to PDP mov DWORD [es:edi], 0x71000 | 1 << 1 | 1 - mov DWORD [es:edi + 509*8], 0x71000 | 1 << 1 | 1 + mov DWORD [es:edi + 510*8], 0x71000 | 1 << 1 | 1 add edi, 0x1000 ;Link last PML4 to PML4 mov DWORD [es:edi - 8], 0x70000 | 1 << 1 | 1 @@ -120,8 +120,7 @@ long_mode: mov rsp, 0x0009F000 ;rust init - xor rax, rax - mov eax, [kernel_base + 0x18] + mov rax, [kernel_base + 0x18] jmp rax long_mode_ap: