Reorganize kernel memory mappings

This commit is contained in:
Jeremy Soller 2016-09-10 18:48:27 -06:00
parent 7ace92c60c
commit ce717ff277
12 changed files with 107 additions and 66 deletions

View file

@ -6,7 +6,3 @@ version = "0.1.0"
[dependencies]
linked_list_allocator = "0.2.0"
spin = "0.3.5"
[dependencies.lazy_static]
version = "0.2.1"
features = ["spin_no_std"]

View file

@ -9,26 +9,29 @@ use linked_list_allocator::Heap;
extern crate spin;
extern crate linked_list_allocator;
#[macro_use]
extern crate lazy_static;
pub const HEAP_START: usize = 0xffff_ff00_0000_0000; // Put at end of memory, below the recursive page mapping
pub const HEAP_SIZE: usize = 64 * 1024 * 1024; // 128 MB
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
lazy_static! {
static ref HEAP: Mutex<Heap> = Mutex::new(unsafe {
Heap::new(HEAP_START, HEAP_SIZE)
});
pub unsafe fn init(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
#[no_mangle]
pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
HEAP.lock().allocate_first_fit(size, align).expect("out of memory")
if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate_first_fit(size, align).expect("out of memory")
} else {
panic!("__rust_allocate: heap not initialized");
}
}
#[no_mangle]
pub extern fn __rust_deallocate(ptr: *mut u8, size: usize, align: usize) {
unsafe { HEAP.lock().deallocate(ptr, size, align) };
if let Some(ref mut heap) = *HEAP.lock() {
unsafe { heap.deallocate(ptr, size, align) };
} else {
panic!("__rust_deallocate: heap not initialized");
}
}
#[no_mangle]

View file

@ -19,6 +19,18 @@ extern crate ransid;
extern crate spin;
pub extern crate x86;
// Because the memory map is so important to not be aliased, it is defined here, in one place
/// Offset of recursive paging
pub const RECURSIVE_PAGE_OFFSET: usize = 0xffff_ff80_0000_0000;
/// Offset of kernel
pub const KERNEL_OFFSET: usize = 0xffff_ff00_0000_0000;
/// Offset to memory allocation bitmap
pub const BITMAP_OFFSET: usize = 0xffff_fe80_0000_0000;
/// Offset to kernel heap
pub const HEAP_OFFSET: usize = 0xffff_fe00_0000_0000;
/// Size of heap
pub const HEAP_SIZE: usize = 64 * 1024 * 1024; // 128 MB
/// Print to console
#[macro_export]
macro_rules! print {

View file

@ -1,7 +1,7 @@
ENTRY(kstart)
OUTPUT_FORMAT(elf64-x86-64)
/* KERNEL_OFFSET = 0xfffffe8000100000; */
/* KERNEL_OFFSET = 0xffffff0000100000; */
KERNEL_OFFSET = 0x100000;
SECTIONS {

View file

@ -76,6 +76,6 @@ impl FrameAllocator for AreaFrameAllocator {
}
fn deallocate_frame(&mut self, frame: Frame) {
//println!("Leak frame: {:?}", frame);
//panic!("AreaFrameAllocator::deallocate_frame: not supported: {:?}", frame);
}
}

View file

@ -0,0 +1,39 @@
use super::AreaFrameAllocator;
const BITMAP_RESERVED: usize = 0;
const BITMAP_FREE: usize = 1;
const BITMAP_USED: usize = 2;
pub struct BitmapAllocator {
bitmap: &'static mut [u8]
}
impl BitmapAllocator {
pub fn new(area_frame_allocator: AreaFrameAllocator) -> BitmapAllocator {
BitmapAllocator {
bitmap: &mut []
}
}
}
impl FrameAllocator for BitmapAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
let mut i = 0;
while i < self.bitmap.len() {
if self.bitmap[i] == BITMAP_FREE {
self.bitmap[i] = BITMAP_USED;
return Some(Frame::containing_address(PhysicalAddress::new(i * 4096)));
}
}
None
}
fn deallocate_frame(&mut self, frame: Frame) {
let i = frame.starting_address().get()/4096;
if i < self.bitmap.len() && self.bitmap[i] == BITMAP_USED {
self.bitmap[i] = BITMAP_FREE;
} else {
panic!("BitmapAllocator::deallocate_frame: unowned frame {:?}", frame);
}
}
}

View file

@ -3,11 +3,11 @@
pub use paging::{PAGE_SIZE, PhysicalAddress};
use self::area_frame_alloc::AreaFrameAllocator;
use self::area_frame_allocator::AreaFrameAllocator;
use spin::Mutex;
pub mod area_frame_alloc;
pub mod area_frame_allocator;
/// The current memory map. It's size is maxed out to 512 entries, due to it being
/// from 0x500 to 0x5000 (800 is the absolute total)

View file

@ -23,7 +23,7 @@ bitflags! {
}
}
pub const ADDRESS_MASK: usize = 0x000fffff_fffff000;
pub const ADDRESS_MASK: usize = 0x000f_ffff_ffff_f000;
impl Entry {
/// Is the entry unused?

View file

@ -21,9 +21,6 @@ pub const ENTRY_COUNT: usize = 512;
/// Size of pages
pub const PAGE_SIZE: usize = 4096;
/// Offset of kernel from physical
pub const KERNEL_OFFSET: usize = 0xfffffe8000000000;
/// Initialize paging
pub unsafe fn init(stack_start: usize, stack_end: usize) -> ActivePageTable {
extern {
@ -71,8 +68,8 @@ pub unsafe fn init(stack_start: usize, stack_end: usize) -> ActivePageTable {
for frame in Frame::range_inclusive(start_frame, end_frame) {
mapper.identity_map(frame.clone(), flags);
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + KERNEL_OFFSET));
mapper.map_to(page, frame, flags);
//let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET));
//mapper.map_to(page, frame, flags);
}
}
};

View file

@ -9,7 +9,7 @@ use memory::allocate_frame;
use super::entry::*;
use super::ENTRY_COUNT;
pub const P4: *mut Table<Level4> = 0xffffffff_fffff000 as *mut _;
pub const P4: *mut Table<Level4> = 0xffff_ffff_ffff_f000 as *mut _;
pub trait TableLevel {}

View file

@ -6,7 +6,7 @@
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use acpi;
use allocator::{HEAP_START, HEAP_SIZE};
use allocator;
use device;
use externs::memset;
use gdt;
@ -29,7 +29,7 @@ static mut TDATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
static AP_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT;
static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT;
static HEAP_FRAME: AtomicUsize = ATOMIC_USIZE_INIT;
static HEAP_TABLE: AtomicUsize = ATOMIC_USIZE_INIT;
extern {
/// Kernel main function
@ -97,32 +97,32 @@ pub unsafe extern fn kstart() -> ! {
AP_COUNT.store(0, Ordering::SeqCst);
AP_READY.store(false, Ordering::SeqCst);
BSP_READY.store(false, Ordering::SeqCst);
HEAP_FRAME.store(0, Ordering::SeqCst);
HEAP_TABLE.store(0, Ordering::SeqCst);
// Map heap
{
let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START));
let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1));
{
let index = heap_start_page.p4_index();
assert_eq!(index, heap_end_page.p4_index());
let frame = memory::allocate_frame().expect("no frames available");
HEAP_FRAME.store(frame.start_address().get(), Ordering::SeqCst);
let p4 = active_table.p4_mut();
{
let entry = &mut p4[index];
assert!(entry.is_unused());
entry.set(frame, entry::PRESENT | entry::WRITABLE);
}
p4.next_table_mut(index).unwrap().zero();
}
// Map heap pages
let heap_start_page = Page::containing_address(VirtualAddress::new(::HEAP_OFFSET));
let heap_end_page = Page::containing_address(VirtualAddress::new(::HEAP_OFFSET + ::HEAP_SIZE-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE);
}
// Init the allocator
allocator::init(::HEAP_OFFSET, ::HEAP_SIZE);
// Send heap page table to APs
let index = heap_start_page.p4_index();
let p4 = active_table.p4();
{
let entry = &p4[index];
if let Some(frame) = entry.pointed_frame() {
HEAP_TABLE.store(frame.start_address().get(), Ordering::SeqCst);
} else {
panic!("heap does not have PML4 entry");
}
}
}
// Initialize devices
@ -167,24 +167,19 @@ pub unsafe extern fn kstart_ap(stack_start: usize, stack_end: usize) -> ! {
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE);
}
// Map heap
// Copy heap PML4
{
let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START));
let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1));
let page = Page::containing_address(VirtualAddress::new(::HEAP_OFFSET));
{
assert_eq!(heap_start_page.p4_index(), heap_end_page.p4_index());
while HEAP_FRAME.load(Ordering::SeqCst) == 0 {
interrupt::pause();
}
let frame = Frame::containing_address(PhysicalAddress::new(HEAP_FRAME.load(Ordering::SeqCst)));
let p4 = active_table.p4_mut();
let entry = &mut p4[heap_start_page.p4_index()];
assert!(entry.is_unused());
entry.set(frame, entry::PRESENT | entry::WRITABLE);
while HEAP_TABLE.load(Ordering::SeqCst) == 0 {
interrupt::pause();
}
let frame = Frame::containing_address(PhysicalAddress::new(HEAP_TABLE.load(Ordering::SeqCst)));
let p4 = active_table.p4_mut();
let entry = &mut p4[page.p4_index()];
assert!(entry.is_unused());
entry.set(frame, entry::PRESENT | entry::WRITABLE);
}
// Init devices for AP

View file

@ -58,9 +58,9 @@ startup_arch:
rep stosd
xor edi, edi
;Link first PML4 and third to last PML4 to PDP
;Link first PML4 and second to last PML4 to PDP
mov DWORD [es:edi], 0x71000 | 1 << 1 | 1
mov DWORD [es:edi + 509*8], 0x71000 | 1 << 1 | 1
mov DWORD [es:edi + 510*8], 0x71000 | 1 << 1 | 1
add edi, 0x1000
;Link last PML4 to PML4
mov DWORD [es:edi - 8], 0x70000 | 1 << 1 | 1
@ -120,8 +120,7 @@ long_mode:
mov rsp, 0x0009F000
;rust init
xor rax, rax
mov eax, [kernel_base + 0x18]
mov rax, [kernel_base + 0x18]
jmp rax
long_mode_ap: