Setup independent page tables, map heap to a single location
This commit is contained in:
parent
7aafc165ec
commit
0501b2580d
|
@ -12,7 +12,7 @@ extern crate linked_list_allocator;
|
|||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
pub const HEAP_START: usize = 0x1_0000_0000; // Put at end of 4GB
|
||||
pub const HEAP_START: usize = 0xffff_ff00_0000_0000; // Put at end of memory, below the recursive page mapping
|
||||
pub const HEAP_SIZE: usize = 16 * 1024 * 1024; // 16 MB
|
||||
|
||||
lazy_static! {
|
||||
|
|
|
@ -72,10 +72,6 @@ pub unsafe fn init() {
|
|||
|
||||
GDT[GDT_TSS] = GdtEntry::new(&TSS as *const _ as u32, mem::size_of::<TaskStateSegment>() as u32, GDT_A_PRESENT | GDT_A_RING_3 | GDT_A_TSS_AVAIL, 0);
|
||||
|
||||
init_ap();
|
||||
}
|
||||
|
||||
pub unsafe fn init_ap() {
|
||||
dtables::lgdt(&GDTR);
|
||||
|
||||
segmentation::load_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16));
|
||||
|
|
|
@ -1,15 +1,19 @@
|
|||
use core::mem;
|
||||
use x86::dtables::{self, DescriptorTablePointer};
|
||||
|
||||
use interrupt::halt;
|
||||
|
||||
pub static mut IDTR: IdtDescriptor = IdtDescriptor {
|
||||
size: 0,
|
||||
offset: 0
|
||||
pub static mut IDTR: DescriptorTablePointer = DescriptorTablePointer {
|
||||
limit: 0,
|
||||
base: 0
|
||||
};
|
||||
|
||||
pub static mut IDT: [IdtEntry; 256] = [IdtEntry::new(); 256];
|
||||
|
||||
pub unsafe fn init() {
|
||||
IDTR.limit = (IDT.len() * mem::size_of::<IdtEntry>() - 1) as u16;
|
||||
IDTR.base = IDT.as_ptr() as u64;
|
||||
|
||||
for entry in IDT[0..32].iter_mut() {
|
||||
entry.set_flags(IDT_PRESENT | IDT_RING_0 | IDT_INTERRUPT);
|
||||
entry.set_offset(8, exception as usize);
|
||||
|
@ -21,13 +25,8 @@ pub unsafe fn init() {
|
|||
entry.set_offset(8, blank as usize);
|
||||
}
|
||||
IDT[0x80].set_offset(8, syscall as usize);
|
||||
IDTR.set_slice(&IDT);
|
||||
|
||||
init_ap();
|
||||
}
|
||||
|
||||
pub unsafe fn init_ap() {
|
||||
IDTR.load();
|
||||
dtables::lidt(&IDTR);
|
||||
}
|
||||
|
||||
interrupt!(blank, {
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#![feature(core_intrinsics)]
|
||||
#![feature(lang_items)]
|
||||
#![feature(naked_functions)]
|
||||
#![feature(thread_local)]
|
||||
#![feature(unique)]
|
||||
#![no_std]
|
||||
|
||||
|
|
|
@ -32,7 +32,10 @@ impl Mapper {
|
|||
let mut p2 = p3.next_table_create(page.p3_index());
|
||||
let mut p1 = p2.next_table_create(page.p2_index());
|
||||
|
||||
assert!(p1[page.p1_index()].is_unused());
|
||||
assert!(p1[page.p1_index()].is_unused(),
|
||||
"Set to {:X}: {:?}, requesting {:X}: {:?}",
|
||||
p1[page.p1_index()].address().get(), p1[page.p1_index()].flags(),
|
||||
frame.start_address().get(), flags);
|
||||
p1[page.p1_index()].set(frame, flags | entry::PRESENT);
|
||||
}
|
||||
|
||||
|
|
|
@ -35,6 +35,14 @@ pub unsafe fn init(stack_start: usize, stack_end: usize) -> ActivePageTable {
|
|||
static mut __data_start: u8;
|
||||
/// The ending byte of the _.data_ segment.
|
||||
static mut __data_end: u8;
|
||||
/// The starting byte of the thread data segment
|
||||
static mut __tdata_start: u8;
|
||||
/// The ending byte of the thread data segment
|
||||
static mut __tdata_end: u8;
|
||||
/// The starting byte of the thread BSS segment
|
||||
static mut __tbss_start: u8;
|
||||
/// The ending byte of the thread BSS segment
|
||||
static mut __tbss_end: u8;
|
||||
/// The starting byte of the _.bss_ (uninitialized data) segment.
|
||||
static mut __bss_start: u8;
|
||||
/// The ending byte of the _.bss_ (uninitialized data) segment.
|
||||
|
@ -51,6 +59,7 @@ pub unsafe fn init(stack_start: usize, stack_end: usize) -> ActivePageTable {
|
|||
};
|
||||
|
||||
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
|
||||
{
|
||||
let mut remap = |start: usize, end: usize, flags: EntryFlags| {
|
||||
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
|
||||
let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1));
|
||||
|
@ -60,7 +69,7 @@ pub unsafe fn init(stack_start: usize, stack_end: usize) -> ActivePageTable {
|
|||
};
|
||||
|
||||
// Remap stack writable, no execute
|
||||
remap(stack_start, stack_end, PRESENT | WRITABLE | NO_EXECUTE);
|
||||
remap(stack_start, stack_end, PRESENT | NO_EXECUTE | WRITABLE);
|
||||
|
||||
// Remap a section with `flags`
|
||||
let mut remap_section = |start: &u8, end: &u8, flags: EntryFlags| {
|
||||
|
@ -74,40 +83,48 @@ pub unsafe fn init(stack_start: usize, stack_end: usize) -> ActivePageTable {
|
|||
remap_section(& __data_start, & __data_end, PRESENT | NO_EXECUTE | WRITABLE);
|
||||
// Remap bss writable, no execute
|
||||
remap_section(& __bss_start, & __bss_end, PRESENT | NO_EXECUTE | WRITABLE);
|
||||
}
|
||||
});
|
||||
|
||||
active_table.switch(new_table);
|
||||
|
||||
active_table
|
||||
}
|
||||
// Map and copy TDATA
|
||||
{
|
||||
temporary_page.map(allocate_frame().expect("no more frames"), PRESENT | NO_EXECUTE | WRITABLE, &mut active_table);
|
||||
|
||||
/// Initialize paging for AP
|
||||
pub unsafe fn init_ap(stack_start: usize, stack_end: usize, bsp_page_table: usize) -> ActivePageTable {
|
||||
let mut active_table = ActivePageTable::new();
|
||||
|
||||
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000)));
|
||||
|
||||
let mut new_table = {
|
||||
let frame = Frame::containing_address(PhysicalAddress::new(bsp_page_table));
|
||||
InactivePageTable {
|
||||
p4_frame: frame
|
||||
let start = & __tbss_start as *const _ as usize;
|
||||
let end = & __tbss_end as *const _ as usize;
|
||||
let start_page = Page::containing_address(VirtualAddress::new(start));
|
||||
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
// Copy master to temporary page
|
||||
{
|
||||
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get()));
|
||||
active_table.identity_map(frame, PRESENT | NO_EXECUTE);
|
||||
::externs::memcpy(temporary_page.start_address().get() as *mut u8, page.start_address().get() as *const u8, 4096);
|
||||
active_table.unmap(page);
|
||||
}
|
||||
// Copy temporary page to CPU copy
|
||||
{
|
||||
active_table.map(page, PRESENT | NO_EXECUTE | WRITABLE);
|
||||
::externs::memcpy(page.start_address().get() as *mut u8, temporary_page.start_address().get() as *const u8, 4096);
|
||||
}
|
||||
};
|
||||
|
||||
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
|
||||
let mut remap = |start: usize, end: usize, flags: EntryFlags| {
|
||||
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
|
||||
let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1));
|
||||
for frame in Frame::range_inclusive(start_frame, end_frame) {
|
||||
mapper.identity_map(frame, flags);
|
||||
}
|
||||
};
|
||||
|
||||
// Remap stack writable, no execute
|
||||
remap(stack_start, stack_end, PRESENT | WRITABLE | NO_EXECUTE);
|
||||
});
|
||||
temporary_page.unmap(&mut active_table);
|
||||
}
|
||||
|
||||
active_table.switch(new_table);
|
||||
// Map and clear TBSS
|
||||
{
|
||||
let start = & __tbss_start as *const _ as usize;
|
||||
let end = & __tbss_end as *const _ as usize;
|
||||
let start_page = Page::containing_address(VirtualAddress::new(start));
|
||||
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
active_table.map(page, PRESENT | NO_EXECUTE | WRITABLE);
|
||||
::externs::memset(page.start_address().get() as *mut u8, 0, 4096);
|
||||
}
|
||||
}
|
||||
|
||||
active_table
|
||||
}
|
||||
|
@ -237,19 +254,19 @@ impl Page {
|
|||
VirtualAddress::new(self.number * PAGE_SIZE)
|
||||
}
|
||||
|
||||
fn p4_index(&self) -> usize {
|
||||
pub fn p4_index(&self) -> usize {
|
||||
(self.number >> 27) & 0o777
|
||||
}
|
||||
|
||||
fn p3_index(&self) -> usize {
|
||||
pub fn p3_index(&self) -> usize {
|
||||
(self.number >> 18) & 0o777
|
||||
}
|
||||
|
||||
fn p2_index(&self) -> usize {
|
||||
pub fn p2_index(&self) -> usize {
|
||||
(self.number >> 9) & 0o777
|
||||
}
|
||||
|
||||
fn p1_index(&self) -> usize {
|
||||
pub fn p1_index(&self) -> usize {
|
||||
(self.number >> 0) & 0o777
|
||||
}
|
||||
|
||||
|
|
|
@ -63,10 +63,10 @@ impl<L> Table<L> where L: HierarchicalLevel {
|
|||
|
||||
pub fn next_table_create(&mut self, index: usize) -> &mut Table<L::NextLevel> {
|
||||
if self.next_table(index).is_none() {
|
||||
assert!(!self.entries[index].flags().contains(HUGE_PAGE),
|
||||
assert!(!self[index].flags().contains(HUGE_PAGE),
|
||||
"mapping code does not support huge pages");
|
||||
let frame = allocate_frame().expect("no frames available");
|
||||
self.entries[index].set(frame, PRESENT | WRITABLE);
|
||||
self[index].set(frame, PRESENT | WRITABLE);
|
||||
self.next_table_mut(index).unwrap().zero();
|
||||
}
|
||||
self.next_table_mut(index).unwrap()
|
||||
|
|
|
@ -18,6 +18,10 @@ impl TemporaryPage {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn start_address (&self) -> VirtualAddress {
|
||||
self.page.start_address()
|
||||
}
|
||||
|
||||
/// Maps the temporary page to the given frame in the active table.
|
||||
/// Returns the start address of the temporary page.
|
||||
pub fn map(&mut self, frame: Frame, flags: EntryFlags, active_table: &mut ActivePageTable) -> VirtualAddress {
|
||||
|
|
|
@ -4,15 +4,14 @@
|
|||
/// defined in other files inside of the `arch` module
|
||||
|
||||
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
|
||||
use x86::controlregs;
|
||||
|
||||
use acpi;
|
||||
use allocator::{HEAP_START, HEAP_SIZE};
|
||||
use externs::memset;
|
||||
use gdt;
|
||||
use idt;
|
||||
use memory;
|
||||
use paging::{self, entry, Page, VirtualAddress};
|
||||
use memory::{self, Frame};
|
||||
use paging::{self, entry, Page, PhysicalAddress, VirtualAddress};
|
||||
|
||||
/// Test of zero values in BSS.
|
||||
static BSS_TEST_ZERO: usize = 0;
|
||||
|
@ -21,7 +20,7 @@ static BSS_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
|
|||
|
||||
static AP_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT;
|
||||
static BSP_PAGE_TABLE: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
static HEAP_FRAME: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
extern {
|
||||
/// Kernel main function
|
||||
|
@ -55,12 +54,6 @@ pub unsafe extern fn kstart() -> ! {
|
|||
debug_assert_eq!(BSS_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
|
||||
}
|
||||
|
||||
// Set up GDT
|
||||
gdt::init();
|
||||
|
||||
// Set up IDT
|
||||
idt::init();
|
||||
|
||||
// Initialize memory management
|
||||
memory::init(0, &__bss_end as *const u8 as usize);
|
||||
|
||||
|
@ -71,21 +64,46 @@ pub unsafe extern fn kstart() -> ! {
|
|||
// Initialize paging
|
||||
let mut active_table = paging::init(stack_start, stack_end);
|
||||
|
||||
// Set up GDT
|
||||
gdt::init();
|
||||
|
||||
// Set up IDT
|
||||
idt::init();
|
||||
|
||||
// Reset AP variables
|
||||
AP_COUNT.store(0, Ordering::SeqCst);
|
||||
BSP_READY.store(false, Ordering::SeqCst);
|
||||
BSP_PAGE_TABLE.store(controlregs::cr3() as usize, Ordering::SeqCst);
|
||||
HEAP_FRAME.store(0, Ordering::SeqCst);
|
||||
|
||||
// Read ACPI tables, starts APs
|
||||
acpi::init(&mut active_table);
|
||||
|
||||
// Map heap
|
||||
{
|
||||
let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START));
|
||||
let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1));
|
||||
|
||||
{
|
||||
let index = heap_start_page.p4_index();
|
||||
println!("HEAP: {} {} {} {}", index, heap_start_page.p3_index(), heap_start_page.p2_index(), heap_start_page.p1_index());
|
||||
assert_eq!(index, heap_end_page.p4_index());
|
||||
|
||||
let frame = memory::allocate_frame().expect("no frames available");
|
||||
HEAP_FRAME.store(frame.start_address().get(), Ordering::SeqCst);
|
||||
|
||||
let p4 = active_table.p4_mut();
|
||||
{
|
||||
let entry = &mut p4[index];
|
||||
assert!(entry.is_unused());
|
||||
entry.set(frame, entry::PRESENT | entry::WRITABLE);
|
||||
}
|
||||
p4.next_table_mut(index).unwrap().zero();
|
||||
}
|
||||
|
||||
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
|
||||
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE);
|
||||
}
|
||||
}
|
||||
|
||||
BSP_READY.store(true, Ordering::SeqCst);
|
||||
}
|
||||
|
@ -96,15 +114,34 @@ pub unsafe extern fn kstart() -> ! {
|
|||
/// Entry to rust for an AP
|
||||
pub unsafe extern fn kstart_ap(stack_start: usize, stack_end: usize) -> ! {
|
||||
{
|
||||
// Set up GDT for AP
|
||||
gdt::init_ap();
|
||||
|
||||
// Set up IDT for aP
|
||||
idt::init_ap();
|
||||
|
||||
// Initialize paging
|
||||
//let mut active_table =
|
||||
paging::init_ap(stack_start, stack_end, BSP_PAGE_TABLE.load(Ordering::SeqCst));
|
||||
let mut active_table = paging::init(stack_start, stack_end);
|
||||
|
||||
// Set up GDT for AP
|
||||
gdt::init();
|
||||
|
||||
// Set up IDT for AP
|
||||
idt::init();
|
||||
|
||||
// Map heap
|
||||
{
|
||||
let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START));
|
||||
let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1));
|
||||
|
||||
{
|
||||
assert_eq!(heap_start_page.p4_index(), heap_end_page.p4_index());
|
||||
|
||||
while HEAP_FRAME.load(Ordering::SeqCst) == 0 {
|
||||
asm!("pause" : : : : "intel", "volatile");
|
||||
}
|
||||
let frame = Frame::containing_address(PhysicalAddress::new(HEAP_FRAME.load(Ordering::SeqCst)));
|
||||
|
||||
let p4 = active_table.p4_mut();
|
||||
let entry = &mut p4[heap_start_page.p4_index()];
|
||||
assert!(entry.is_unused());
|
||||
entry.set(frame, entry::PRESENT | entry::WRITABLE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let ap_number = AP_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
|
|
|
@ -28,6 +28,10 @@ SECTIONS {
|
|||
*(.data*)
|
||||
. = ALIGN(4096);
|
||||
__data_end = .;
|
||||
__tdata_start = .;
|
||||
*(.tdata*)
|
||||
. = ALIGN(4096);
|
||||
__tdata_end = .;
|
||||
}
|
||||
|
||||
.bss : AT(ADDR(.bss) - KERNEL_OFFSET) {
|
||||
|
@ -35,6 +39,10 @@ SECTIONS {
|
|||
*(.bss*)
|
||||
. = ALIGN(4096);
|
||||
__bss_end = .;
|
||||
__tbss_start = .;
|
||||
*(.tbss*)
|
||||
. = ALIGN(4096);
|
||||
__tbss_end = .;
|
||||
}
|
||||
|
||||
/DISCARD/ : {
|
||||
|
|
|
@ -120,7 +120,7 @@ pub extern fn kmain() {
|
|||
|
||||
#[no_mangle]
|
||||
pub extern fn kmain_ap(id: usize) {
|
||||
print!("{}", format!("ASP {}\n", id));
|
||||
print!("{}", format!("AP {}\n", id));
|
||||
|
||||
loop {
|
||||
unsafe { interrupt::enable_and_halt() }
|
||||
|
|
Loading…
Reference in a new issue