Global allocator, page table - protected by spinlock

This commit is contained in:
Jeremy Soller 2016-08-15 16:29:54 -06:00
parent 617516b949
commit fda1ab2327
4 changed files with 46 additions and 32 deletions

View file

@ -5,6 +5,7 @@ version = "0.1.0"
[dependencies]
bitflags = "*"
hole_list_allocator = { path = "../../alloc/hole_list_allocator"}
spin = "*"
[dependencies.x86]
default-features = false

View file

@ -10,11 +10,13 @@
#![no_std]
extern crate hole_list_allocator as allocator;
#[macro_use]
extern crate bitflags;
extern crate spin;
extern crate x86;
use spin::Mutex;
/// Print to console
#[macro_export]
macro_rules! print {
@ -106,3 +108,6 @@ pub mod start;
/// Task state segment
pub mod tss;
pub static ALLOCATOR: Mutex<Option<memory::AreaFrameAllocator>> = Mutex::new(None);
pub static PAGE_TABLE: Mutex<Option<paging::ActivePageTable>> = Mutex::new(None);

View file

@ -3,7 +3,7 @@
pub use paging::{PAGE_SIZE, PhysicalAddress};
use self::area_frame_alloc::AreaFrameAllocator;
pub use self::area_frame_alloc::AreaFrameAllocator;
pub mod area_frame_alloc;

View file

@ -22,45 +22,53 @@ extern {
#[no_mangle]
pub unsafe extern fn kstart() -> ! {
extern {
/// The starting byte of the _.bss_ (uninitialized data) segment.
static mut __bss_start: u8;
/// The ending byte of the _.bss_ (uninitialized data) segment.
static mut __bss_end: u8;
}
// Zero BSS, this initializes statics that are set to 0
{
let start_ptr = &mut __bss_start as *mut u8;
let end_ptr = & __bss_end as *const u8 as usize;
if start_ptr as usize <= end_ptr {
let size = end_ptr - start_ptr as usize;
memset(start_ptr, 0, size);
extern {
/// The starting byte of the _.bss_ (uninitialized data) segment.
static mut __bss_start: u8;
/// The ending byte of the _.bss_ (uninitialized data) segment.
static mut __bss_end: u8;
}
debug_assert_eq!(BSS_TEST_ZERO, 0);
debug_assert_eq!(BSS_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
}
// Zero BSS, this initializes statics that are set to 0
{
let start_ptr = &mut __bss_start as *mut u8;
let end_ptr = & __bss_end as *const u8 as usize;
// Set up GDT
gdt::init();
if start_ptr as usize <= end_ptr {
let size = end_ptr - start_ptr as usize;
memset(start_ptr, 0, size);
}
// Set up IDT
idt::init(blank);
debug_assert_eq!(BSS_TEST_ZERO, 0);
debug_assert_eq!(BSS_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
}
// Initialize memory management
let mut allocator = memory::init(0, &__bss_end as *const u8 as usize);
// Set up GDT
gdt::init();
// Initialize paging
let mut active_table = paging::init(&mut allocator);
// Set up IDT
idt::init(blank);
// Initialize heap
let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START));
let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1));
// Initialize memory management
let mut allocator = memory::init(0, &__bss_end as *const u8 as usize);
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, paging::entry::WRITABLE, &mut allocator);
// Initialize paging
let mut active_table = paging::init(&mut allocator);
// Initialize heap
let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START));
let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, paging::entry::WRITABLE, &mut allocator);
}
// Set global allocator
*::ALLOCATOR.lock() = Some(allocator);
// Set global page table
*::PAGE_TABLE.lock() = Some(active_table);
}
asm!("xchg bx, bx" : : : : "intel", "volatile");