Global allocator, page table - protected by spinlock

This commit is contained in:
Jeremy Soller 2016-08-15 16:29:54 -06:00
parent 617516b949
commit fda1ab2327
4 changed files with 46 additions and 32 deletions

View file

@ -5,6 +5,7 @@ version = "0.1.0"
[dependencies] [dependencies]
bitflags = "*" bitflags = "*"
hole_list_allocator = { path = "../../alloc/hole_list_allocator"} hole_list_allocator = { path = "../../alloc/hole_list_allocator"}
spin = "*"
[dependencies.x86] [dependencies.x86]
default-features = false default-features = false

View file

@ -10,11 +10,13 @@
#![no_std] #![no_std]
extern crate hole_list_allocator as allocator; extern crate hole_list_allocator as allocator;
#[macro_use] #[macro_use]
extern crate bitflags; extern crate bitflags;
extern crate spin;
extern crate x86; extern crate x86;
use spin::Mutex;
/// Print to console /// Print to console
#[macro_export] #[macro_export]
macro_rules! print { macro_rules! print {
@ -106,3 +108,6 @@ pub mod start;
/// Task state segment /// Task state segment
pub mod tss; pub mod tss;
pub static ALLOCATOR: Mutex<Option<memory::AreaFrameAllocator>> = Mutex::new(None);
pub static PAGE_TABLE: Mutex<Option<paging::ActivePageTable>> = Mutex::new(None);

View file

@ -3,7 +3,7 @@
pub use paging::{PAGE_SIZE, PhysicalAddress}; pub use paging::{PAGE_SIZE, PhysicalAddress};
use self::area_frame_alloc::AreaFrameAllocator; pub use self::area_frame_alloc::AreaFrameAllocator;
pub mod area_frame_alloc; pub mod area_frame_alloc;

View file

@ -22,45 +22,53 @@ extern {
#[no_mangle] #[no_mangle]
pub unsafe extern fn kstart() -> ! { pub unsafe extern fn kstart() -> ! {
extern {
/// The starting byte of the _.bss_ (uninitialized data) segment.
static mut __bss_start: u8;
/// The ending byte of the _.bss_ (uninitialized data) segment.
static mut __bss_end: u8;
}
// Zero BSS, this initializes statics that are set to 0
{ {
let start_ptr = &mut __bss_start as *mut u8; extern {
let end_ptr = & __bss_end as *const u8 as usize; /// The starting byte of the _.bss_ (uninitialized data) segment.
static mut __bss_start: u8;
if start_ptr as usize <= end_ptr { /// The ending byte of the _.bss_ (uninitialized data) segment.
let size = end_ptr - start_ptr as usize; static mut __bss_end: u8;
memset(start_ptr, 0, size);
} }
debug_assert_eq!(BSS_TEST_ZERO, 0); // Zero BSS, this initializes statics that are set to 0
debug_assert_eq!(BSS_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF); {
} let start_ptr = &mut __bss_start as *mut u8;
let end_ptr = & __bss_end as *const u8 as usize;
// Set up GDT if start_ptr as usize <= end_ptr {
gdt::init(); let size = end_ptr - start_ptr as usize;
memset(start_ptr, 0, size);
}
// Set up IDT debug_assert_eq!(BSS_TEST_ZERO, 0);
idt::init(blank); debug_assert_eq!(BSS_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
}
// Initialize memory management // Set up GDT
let mut allocator = memory::init(0, &__bss_end as *const u8 as usize); gdt::init();
// Initialize paging // Set up IDT
let mut active_table = paging::init(&mut allocator); idt::init(blank);
// Initialize heap // Initialize memory management
let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START)); let mut allocator = memory::init(0, &__bss_end as *const u8 as usize);
let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) { // Initialize paging
active_table.map(page, paging::entry::WRITABLE, &mut allocator); let mut active_table = paging::init(&mut allocator);
// Initialize heap
let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START));
let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, paging::entry::WRITABLE, &mut allocator);
}
// Set global allocator
*::ALLOCATOR.lock() = Some(allocator);
// Set global page table
*::PAGE_TABLE.lock() = Some(active_table);
} }
asm!("xchg bx, bx" : : : : "intel", "volatile"); asm!("xchg bx, bx" : : : : "intel", "volatile");