From fda1ab2327bdc7231ca54a2118d0d49380a5ff65 Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Mon, 15 Aug 2016 16:29:54 -0600 Subject: [PATCH] Global allocator, page table - protected by spinlock --- arch/x86_64/Cargo.toml | 1 + arch/x86_64/src/lib.rs | 7 +++- arch/x86_64/src/memory/mod.rs | 2 +- arch/x86_64/src/start.rs | 68 +++++++++++++++++++---------------- 4 files changed, 46 insertions(+), 32 deletions(-) diff --git a/arch/x86_64/Cargo.toml b/arch/x86_64/Cargo.toml index 250c559..a845fa7 100644 --- a/arch/x86_64/Cargo.toml +++ b/arch/x86_64/Cargo.toml @@ -5,6 +5,7 @@ version = "0.1.0" [dependencies] bitflags = "*" hole_list_allocator = { path = "../../alloc/hole_list_allocator"} +spin = "*" [dependencies.x86] default-features = false diff --git a/arch/x86_64/src/lib.rs b/arch/x86_64/src/lib.rs index f4102db..eabc8a8 100644 --- a/arch/x86_64/src/lib.rs +++ b/arch/x86_64/src/lib.rs @@ -10,11 +10,13 @@ #![no_std] extern crate hole_list_allocator as allocator; - #[macro_use] extern crate bitflags; +extern crate spin; extern crate x86; +use spin::Mutex; + /// Print to console #[macro_export] macro_rules! print { @@ -106,3 +108,6 @@ pub mod start; /// Task state segment pub mod tss; + +pub static ALLOCATOR: Mutex> = Mutex::new(None); +pub static PAGE_TABLE: Mutex> = Mutex::new(None); diff --git a/arch/x86_64/src/memory/mod.rs b/arch/x86_64/src/memory/mod.rs index ee79995..59f3730 100644 --- a/arch/x86_64/src/memory/mod.rs +++ b/arch/x86_64/src/memory/mod.rs @@ -3,7 +3,7 @@ pub use paging::{PAGE_SIZE, PhysicalAddress}; -use self::area_frame_alloc::AreaFrameAllocator; +pub use self::area_frame_alloc::AreaFrameAllocator; pub mod area_frame_alloc; diff --git a/arch/x86_64/src/start.rs b/arch/x86_64/src/start.rs index 99147b9..c70d2bd 100644 --- a/arch/x86_64/src/start.rs +++ b/arch/x86_64/src/start.rs @@ -22,45 +22,53 @@ extern { #[no_mangle] pub unsafe extern fn kstart() -> ! { - extern { - /// The starting byte of the _.bss_ (uninitialized data) segment. - static mut __bss_start: u8; - /// The ending byte of the _.bss_ (uninitialized data) segment. - static mut __bss_end: u8; - } - - // Zero BSS, this initializes statics that are set to 0 { - let start_ptr = &mut __bss_start as *mut u8; - let end_ptr = & __bss_end as *const u8 as usize; - - if start_ptr as usize <= end_ptr { - let size = end_ptr - start_ptr as usize; - memset(start_ptr, 0, size); + extern { + /// The starting byte of the _.bss_ (uninitialized data) segment. + static mut __bss_start: u8; + /// The ending byte of the _.bss_ (uninitialized data) segment. + static mut __bss_end: u8; } - debug_assert_eq!(BSS_TEST_ZERO, 0); - debug_assert_eq!(BSS_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF); - } + // Zero BSS, this initializes statics that are set to 0 + { + let start_ptr = &mut __bss_start as *mut u8; + let end_ptr = & __bss_end as *const u8 as usize; - // Set up GDT - gdt::init(); + if start_ptr as usize <= end_ptr { + let size = end_ptr - start_ptr as usize; + memset(start_ptr, 0, size); + } - // Set up IDT - idt::init(blank); + debug_assert_eq!(BSS_TEST_ZERO, 0); + debug_assert_eq!(BSS_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF); + } - // Initialize memory management - let mut allocator = memory::init(0, &__bss_end as *const u8 as usize); + // Set up GDT + gdt::init(); - // Initialize paging - let mut active_table = paging::init(&mut allocator); + // Set up IDT + idt::init(blank); - // Initialize heap - let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START)); - let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1)); + // Initialize memory management + let mut allocator = memory::init(0, &__bss_end as *const u8 as usize); - for page in Page::range_inclusive(heap_start_page, heap_end_page) { - active_table.map(page, paging::entry::WRITABLE, &mut allocator); + // Initialize paging + let mut active_table = paging::init(&mut allocator); + + // Initialize heap + let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START)); + let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1)); + + for page in Page::range_inclusive(heap_start_page, heap_end_page) { + active_table.map(page, paging::entry::WRITABLE, &mut allocator); + } + + // Set global allocator + *::ALLOCATOR.lock() = Some(allocator); + + // Set global page table + *::PAGE_TABLE.lock() = Some(active_table); } asm!("xchg bx, bx" : : : : "intel", "volatile");