Reorganize kernel memory mappings
This commit is contained in:
parent
7ace92c60c
commit
ce717ff277
|
@ -6,7 +6,3 @@ version = "0.1.0"
|
||||||
[dependencies]
|
[dependencies]
|
||||||
linked_list_allocator = "0.2.0"
|
linked_list_allocator = "0.2.0"
|
||||||
spin = "0.3.5"
|
spin = "0.3.5"
|
||||||
|
|
||||||
[dependencies.lazy_static]
|
|
||||||
version = "0.2.1"
|
|
||||||
features = ["spin_no_std"]
|
|
||||||
|
|
|
@ -9,26 +9,29 @@ use linked_list_allocator::Heap;
|
||||||
|
|
||||||
extern crate spin;
|
extern crate spin;
|
||||||
extern crate linked_list_allocator;
|
extern crate linked_list_allocator;
|
||||||
#[macro_use]
|
|
||||||
extern crate lazy_static;
|
|
||||||
|
|
||||||
pub const HEAP_START: usize = 0xffff_ff00_0000_0000; // Put at end of memory, below the recursive page mapping
|
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
|
||||||
pub const HEAP_SIZE: usize = 64 * 1024 * 1024; // 128 MB
|
|
||||||
|
|
||||||
lazy_static! {
|
pub unsafe fn init(offset: usize, size: usize) {
|
||||||
static ref HEAP: Mutex<Heap> = Mutex::new(unsafe {
|
*HEAP.lock() = Some(Heap::new(offset, size));
|
||||||
Heap::new(HEAP_START, HEAP_SIZE)
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
|
pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
|
||||||
HEAP.lock().allocate_first_fit(size, align).expect("out of memory")
|
if let Some(ref mut heap) = *HEAP.lock() {
|
||||||
|
heap.allocate_first_fit(size, align).expect("out of memory")
|
||||||
|
} else {
|
||||||
|
panic!("__rust_allocate: heap not initialized");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub extern fn __rust_deallocate(ptr: *mut u8, size: usize, align: usize) {
|
pub extern fn __rust_deallocate(ptr: *mut u8, size: usize, align: usize) {
|
||||||
unsafe { HEAP.lock().deallocate(ptr, size, align) };
|
if let Some(ref mut heap) = *HEAP.lock() {
|
||||||
|
unsafe { heap.deallocate(ptr, size, align) };
|
||||||
|
} else {
|
||||||
|
panic!("__rust_deallocate: heap not initialized");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
|
|
|
@ -19,6 +19,18 @@ extern crate ransid;
|
||||||
extern crate spin;
|
extern crate spin;
|
||||||
pub extern crate x86;
|
pub extern crate x86;
|
||||||
|
|
||||||
|
// Because the memory map is so important to not be aliased, it is defined here, in one place
|
||||||
|
/// Offset of recursive paging
|
||||||
|
pub const RECURSIVE_PAGE_OFFSET: usize = 0xffff_ff80_0000_0000;
|
||||||
|
/// Offset of kernel
|
||||||
|
pub const KERNEL_OFFSET: usize = 0xffff_ff00_0000_0000;
|
||||||
|
/// Offset to memory allocation bitmap
|
||||||
|
pub const BITMAP_OFFSET: usize = 0xffff_fe80_0000_0000;
|
||||||
|
/// Offset to kernel heap
|
||||||
|
pub const HEAP_OFFSET: usize = 0xffff_fe00_0000_0000;
|
||||||
|
/// Size of heap
|
||||||
|
pub const HEAP_SIZE: usize = 64 * 1024 * 1024; // 128 MB
|
||||||
|
|
||||||
/// Print to console
|
/// Print to console
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! print {
|
macro_rules! print {
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
ENTRY(kstart)
|
ENTRY(kstart)
|
||||||
OUTPUT_FORMAT(elf64-x86-64)
|
OUTPUT_FORMAT(elf64-x86-64)
|
||||||
|
|
||||||
/* KERNEL_OFFSET = 0xfffffe8000100000; */
|
/* KERNEL_OFFSET = 0xffffff0000100000; */
|
||||||
KERNEL_OFFSET = 0x100000;
|
KERNEL_OFFSET = 0x100000;
|
||||||
|
|
||||||
SECTIONS {
|
SECTIONS {
|
||||||
|
|
|
@ -76,6 +76,6 @@ impl FrameAllocator for AreaFrameAllocator {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deallocate_frame(&mut self, frame: Frame) {
|
fn deallocate_frame(&mut self, frame: Frame) {
|
||||||
//println!("Leak frame: {:?}", frame);
|
//panic!("AreaFrameAllocator::deallocate_frame: not supported: {:?}", frame);
|
||||||
}
|
}
|
||||||
}
|
}
|
39
arch/x86_64/src/memory/bitmap_allocator.rs
Normal file
39
arch/x86_64/src/memory/bitmap_allocator.rs
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
use super::AreaFrameAllocator;
|
||||||
|
|
||||||
|
const BITMAP_RESERVED: usize = 0;
|
||||||
|
const BITMAP_FREE: usize = 1;
|
||||||
|
const BITMAP_USED: usize = 2;
|
||||||
|
|
||||||
|
pub struct BitmapAllocator {
|
||||||
|
bitmap: &'static mut [u8]
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BitmapAllocator {
|
||||||
|
pub fn new(area_frame_allocator: AreaFrameAllocator) -> BitmapAllocator {
|
||||||
|
BitmapAllocator {
|
||||||
|
bitmap: &mut []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FrameAllocator for BitmapAllocator {
|
||||||
|
fn allocate_frame(&mut self) -> Option<Frame> {
|
||||||
|
let mut i = 0;
|
||||||
|
while i < self.bitmap.len() {
|
||||||
|
if self.bitmap[i] == BITMAP_FREE {
|
||||||
|
self.bitmap[i] = BITMAP_USED;
|
||||||
|
return Some(Frame::containing_address(PhysicalAddress::new(i * 4096)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deallocate_frame(&mut self, frame: Frame) {
|
||||||
|
let i = frame.starting_address().get()/4096;
|
||||||
|
if i < self.bitmap.len() && self.bitmap[i] == BITMAP_USED {
|
||||||
|
self.bitmap[i] = BITMAP_FREE;
|
||||||
|
} else {
|
||||||
|
panic!("BitmapAllocator::deallocate_frame: unowned frame {:?}", frame);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -3,11 +3,11 @@
|
||||||
|
|
||||||
pub use paging::{PAGE_SIZE, PhysicalAddress};
|
pub use paging::{PAGE_SIZE, PhysicalAddress};
|
||||||
|
|
||||||
use self::area_frame_alloc::AreaFrameAllocator;
|
use self::area_frame_allocator::AreaFrameAllocator;
|
||||||
|
|
||||||
use spin::Mutex;
|
use spin::Mutex;
|
||||||
|
|
||||||
pub mod area_frame_alloc;
|
pub mod area_frame_allocator;
|
||||||
|
|
||||||
/// The current memory map. It's size is maxed out to 512 entries, due to it being
|
/// The current memory map. It's size is maxed out to 512 entries, due to it being
|
||||||
/// from 0x500 to 0x5000 (800 is the absolute total)
|
/// from 0x500 to 0x5000 (800 is the absolute total)
|
||||||
|
|
|
@ -23,7 +23,7 @@ bitflags! {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const ADDRESS_MASK: usize = 0x000fffff_fffff000;
|
pub const ADDRESS_MASK: usize = 0x000f_ffff_ffff_f000;
|
||||||
|
|
||||||
impl Entry {
|
impl Entry {
|
||||||
/// Is the entry unused?
|
/// Is the entry unused?
|
||||||
|
|
|
@ -21,9 +21,6 @@ pub const ENTRY_COUNT: usize = 512;
|
||||||
/// Size of pages
|
/// Size of pages
|
||||||
pub const PAGE_SIZE: usize = 4096;
|
pub const PAGE_SIZE: usize = 4096;
|
||||||
|
|
||||||
/// Offset of kernel from physical
|
|
||||||
pub const KERNEL_OFFSET: usize = 0xfffffe8000000000;
|
|
||||||
|
|
||||||
/// Initialize paging
|
/// Initialize paging
|
||||||
pub unsafe fn init(stack_start: usize, stack_end: usize) -> ActivePageTable {
|
pub unsafe fn init(stack_start: usize, stack_end: usize) -> ActivePageTable {
|
||||||
extern {
|
extern {
|
||||||
|
@ -71,8 +68,8 @@ pub unsafe fn init(stack_start: usize, stack_end: usize) -> ActivePageTable {
|
||||||
for frame in Frame::range_inclusive(start_frame, end_frame) {
|
for frame in Frame::range_inclusive(start_frame, end_frame) {
|
||||||
mapper.identity_map(frame.clone(), flags);
|
mapper.identity_map(frame.clone(), flags);
|
||||||
|
|
||||||
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + KERNEL_OFFSET));
|
//let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET));
|
||||||
mapper.map_to(page, frame, flags);
|
//mapper.map_to(page, frame, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -9,7 +9,7 @@ use memory::allocate_frame;
|
||||||
use super::entry::*;
|
use super::entry::*;
|
||||||
use super::ENTRY_COUNT;
|
use super::ENTRY_COUNT;
|
||||||
|
|
||||||
pub const P4: *mut Table<Level4> = 0xffffffff_fffff000 as *mut _;
|
pub const P4: *mut Table<Level4> = 0xffff_ffff_ffff_f000 as *mut _;
|
||||||
|
|
||||||
pub trait TableLevel {}
|
pub trait TableLevel {}
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
|
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
|
||||||
|
|
||||||
use acpi;
|
use acpi;
|
||||||
use allocator::{HEAP_START, HEAP_SIZE};
|
use allocator;
|
||||||
use device;
|
use device;
|
||||||
use externs::memset;
|
use externs::memset;
|
||||||
use gdt;
|
use gdt;
|
||||||
|
@ -29,7 +29,7 @@ static mut TDATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
|
||||||
static AP_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
|
static AP_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||||
pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT;
|
pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT;
|
||||||
static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT;
|
static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT;
|
||||||
static HEAP_FRAME: AtomicUsize = ATOMIC_USIZE_INIT;
|
static HEAP_TABLE: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||||
|
|
||||||
extern {
|
extern {
|
||||||
/// Kernel main function
|
/// Kernel main function
|
||||||
|
@ -97,32 +97,32 @@ pub unsafe extern fn kstart() -> ! {
|
||||||
AP_COUNT.store(0, Ordering::SeqCst);
|
AP_COUNT.store(0, Ordering::SeqCst);
|
||||||
AP_READY.store(false, Ordering::SeqCst);
|
AP_READY.store(false, Ordering::SeqCst);
|
||||||
BSP_READY.store(false, Ordering::SeqCst);
|
BSP_READY.store(false, Ordering::SeqCst);
|
||||||
HEAP_FRAME.store(0, Ordering::SeqCst);
|
HEAP_TABLE.store(0, Ordering::SeqCst);
|
||||||
|
|
||||||
// Map heap
|
// Map heap
|
||||||
{
|
{
|
||||||
let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START));
|
// Map heap pages
|
||||||
let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1));
|
let heap_start_page = Page::containing_address(VirtualAddress::new(::HEAP_OFFSET));
|
||||||
|
let heap_end_page = Page::containing_address(VirtualAddress::new(::HEAP_OFFSET + ::HEAP_SIZE-1));
|
||||||
{
|
|
||||||
let index = heap_start_page.p4_index();
|
|
||||||
assert_eq!(index, heap_end_page.p4_index());
|
|
||||||
|
|
||||||
let frame = memory::allocate_frame().expect("no frames available");
|
|
||||||
HEAP_FRAME.store(frame.start_address().get(), Ordering::SeqCst);
|
|
||||||
|
|
||||||
let p4 = active_table.p4_mut();
|
|
||||||
{
|
|
||||||
let entry = &mut p4[index];
|
|
||||||
assert!(entry.is_unused());
|
|
||||||
entry.set(frame, entry::PRESENT | entry::WRITABLE);
|
|
||||||
}
|
|
||||||
p4.next_table_mut(index).unwrap().zero();
|
|
||||||
}
|
|
||||||
|
|
||||||
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
|
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
|
||||||
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE);
|
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Init the allocator
|
||||||
|
allocator::init(::HEAP_OFFSET, ::HEAP_SIZE);
|
||||||
|
|
||||||
|
// Send heap page table to APs
|
||||||
|
let index = heap_start_page.p4_index();
|
||||||
|
|
||||||
|
let p4 = active_table.p4();
|
||||||
|
{
|
||||||
|
let entry = &p4[index];
|
||||||
|
if let Some(frame) = entry.pointed_frame() {
|
||||||
|
HEAP_TABLE.store(frame.start_address().get(), Ordering::SeqCst);
|
||||||
|
} else {
|
||||||
|
panic!("heap does not have PML4 entry");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize devices
|
// Initialize devices
|
||||||
|
@ -167,25 +167,20 @@ pub unsafe extern fn kstart_ap(stack_start: usize, stack_end: usize) -> ! {
|
||||||
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE);
|
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Map heap
|
// Copy heap PML4
|
||||||
{
|
{
|
||||||
let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START));
|
let page = Page::containing_address(VirtualAddress::new(::HEAP_OFFSET));
|
||||||
let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1));
|
|
||||||
|
|
||||||
{
|
while HEAP_TABLE.load(Ordering::SeqCst) == 0 {
|
||||||
assert_eq!(heap_start_page.p4_index(), heap_end_page.p4_index());
|
|
||||||
|
|
||||||
while HEAP_FRAME.load(Ordering::SeqCst) == 0 {
|
|
||||||
interrupt::pause();
|
interrupt::pause();
|
||||||
}
|
}
|
||||||
let frame = Frame::containing_address(PhysicalAddress::new(HEAP_FRAME.load(Ordering::SeqCst)));
|
let frame = Frame::containing_address(PhysicalAddress::new(HEAP_TABLE.load(Ordering::SeqCst)));
|
||||||
|
|
||||||
let p4 = active_table.p4_mut();
|
let p4 = active_table.p4_mut();
|
||||||
let entry = &mut p4[heap_start_page.p4_index()];
|
let entry = &mut p4[page.p4_index()];
|
||||||
assert!(entry.is_unused());
|
assert!(entry.is_unused());
|
||||||
entry.set(frame, entry::PRESENT | entry::WRITABLE);
|
entry.set(frame, entry::PRESENT | entry::WRITABLE);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Init devices for AP
|
// Init devices for AP
|
||||||
device::init_ap(&mut active_table);
|
device::init_ap(&mut active_table);
|
||||||
|
|
|
@ -58,9 +58,9 @@ startup_arch:
|
||||||
rep stosd
|
rep stosd
|
||||||
|
|
||||||
xor edi, edi
|
xor edi, edi
|
||||||
;Link first PML4 and third to last PML4 to PDP
|
;Link first PML4 and second to last PML4 to PDP
|
||||||
mov DWORD [es:edi], 0x71000 | 1 << 1 | 1
|
mov DWORD [es:edi], 0x71000 | 1 << 1 | 1
|
||||||
mov DWORD [es:edi + 509*8], 0x71000 | 1 << 1 | 1
|
mov DWORD [es:edi + 510*8], 0x71000 | 1 << 1 | 1
|
||||||
add edi, 0x1000
|
add edi, 0x1000
|
||||||
;Link last PML4 to PML4
|
;Link last PML4 to PML4
|
||||||
mov DWORD [es:edi - 8], 0x70000 | 1 << 1 | 1
|
mov DWORD [es:edi - 8], 0x70000 | 1 << 1 | 1
|
||||||
|
@ -120,8 +120,7 @@ long_mode:
|
||||||
mov rsp, 0x0009F000
|
mov rsp, 0x0009F000
|
||||||
|
|
||||||
;rust init
|
;rust init
|
||||||
xor rax, rax
|
mov rax, [kernel_base + 0x18]
|
||||||
mov eax, [kernel_base + 0x18]
|
|
||||||
jmp rax
|
jmp rax
|
||||||
|
|
||||||
long_mode_ap:
|
long_mode_ap:
|
||||||
|
|
Loading…
Reference in a new issue