Move kernel heap into kernel PML4, now a single PML4 needs to be copied to initialize a new table

This commit is contained in:
Jeremy Soller 2016-09-14 21:28:09 -06:00
parent 83bc8a0da5
commit c000820d72
4 changed files with 138 additions and 62 deletions

View file

@ -142,7 +142,7 @@ pub unsafe fn init(active_table: &mut ActivePageTable) -> Option<Acpi> {
let start_addr = 0xE0000;
let end_addr = 0xFFFFF;
// Map all of the ACPI RSDT space
// Map all of the ACPI RSDP space
{
let start_frame = Frame::containing_address(PhysicalAddress::new(start_addr));
let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr));
@ -205,7 +205,7 @@ pub unsafe fn init(active_table: &mut ActivePageTable) -> Option<Acpi> {
println!("NO RSDP FOUND");
}
// Unmap all of the ACPI RSDT space
// Unmap all of the ACPI RSDP space
{
let start_frame = Frame::containing_address(PhysicalAddress::new(start_addr));
let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr));

View file

@ -22,7 +22,8 @@ pub extern crate x86;
// Because the memory map is so important to not be aliased, it is defined here, in one place
// The lower 256 PML4 entries are reserved for userspace
// Each PML4 entry references up to 512 GB of memory
// The upper 256 are reserved for the kernel
// The top (511) PML4 is reserved for recursive mapping
// The second from the top (510) PML4 is reserved for the kernel
/// The size of a single PML4
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
@ -33,7 +34,7 @@ pub extern crate x86;
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
/// Offset to kernel heap
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET + PML4_SIZE/2;
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 64 * 1024 * 1024; // 64 MB

View file

@ -22,6 +22,59 @@ pub const ENTRY_COUNT: usize = 512;
/// Size of pages
pub const PAGE_SIZE: usize = 4096;
/// Setup page attribute table
unsafe fn init_pat() {
let uncacheable = 0;
let write_combining = 1;
let write_through = 4;
//let write_protected = 5;
let write_back = 6;
let uncached = 7;
let pat0 = write_back;
let pat1 = write_through;
let pat2 = uncached;
let pat3 = uncacheable;
let pat4 = write_combining;
let pat5 = pat1;
let pat6 = pat2;
let pat7 = pat3;
msr::wrmsr(msr::IA32_PAT, pat7 << 56 | pat6 << 48 | pat5 << 40 | pat4 << 32
| pat3 << 24 | pat2 << 16 | pat1 << 8 | pat0);
}
/// Copy tdata, clear tbss, set TCB self pointer
unsafe fn init_tcb(cpu_id: usize) -> usize {
extern {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
/// The ending byte of the thread data segment
static mut __tdata_end: u8;
/// The starting byte of the thread BSS segment
static mut __tbss_start: u8;
/// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
}
let tcb_offset;
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let tbss_offset = & __tbss_start as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
tcb_offset = end - mem::size_of::<usize>();
::externs::memcpy(start as *mut u8, & __tdata_start as *const u8, tbss_offset);
::externs::memset((start + tbss_offset) as *mut u8, 0, size - tbss_offset);
*(tcb_offset as *mut usize) = end;
}
tcb_offset
}
/// Initialize paging
///
/// Returns page table and thread control block offset
@ -53,6 +106,8 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
static mut __bss_end: u8;
}
init_pat();
let mut active_table = ActivePageTable::new();
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000)));
@ -109,45 +164,71 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
}
});
let uncacheable = 0;
let write_combining = 1;
let write_through = 4;
//let write_protected = 5;
let write_back = 6;
let uncached = 7;
active_table.switch(new_table);
let pat0 = write_back;
let pat1 = write_through;
let pat2 = uncached;
let pat3 = uncacheable;
(active_table, init_tcb(cpu_id))
}
let pat4 = write_combining;
let pat5 = pat1;
let pat6 = pat2;
let pat7 = pat3;
pub unsafe fn init_ap(cpu_id: usize, stack_start: usize, stack_end: usize, kernel_table: usize) -> (ActivePageTable, usize) {
extern {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
/// The ending byte of the thread data segment
static mut __tdata_end: u8;
/// The starting byte of the thread BSS segment
static mut __tbss_start: u8;
/// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
}
msr::wrmsr(msr::IA32_PAT, pat7 << 56 | pat6 << 48 | pat5 << 40 | pat4 << 32
| pat3 << 24 | pat2 << 16 | pat1 << 8 | pat0);
init_pat();
let mut active_table = ActivePageTable::new();
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000)));
let mut new_table = {
let frame = allocate_frame().expect("no more frames in paging::init new_table");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
// Copy kernel mapping
let kernel_frame = Frame::containing_address(PhysicalAddress::new(kernel_table));
mapper.p4_mut()[510].set(kernel_frame, entry::PRESENT | entry::WRITABLE);
// Map tdata and tbss
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
mapper.map(page, PRESENT | NO_EXECUTE | WRITABLE);
}
}
let mut remap = |start: usize, end: usize, flags: EntryFlags, offset: usize| {
if end > start {
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + offset));
mapper.map_to(page, frame, flags);
}
}
};
// Remap stack writable, no execute
remap(stack_start, stack_end, PRESENT | NO_EXECUTE | WRITABLE, 0);
});
active_table.switch(new_table);
// Copy tdata, clear tbss, set TCB self pointer
let tcb_offset;
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let tbss_offset = & __tbss_start as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
tcb_offset = end - mem::size_of::<usize>();
::externs::memcpy(start as *mut u8, & __tdata_start as *const u8, tbss_offset);
::externs::memset((start + tbss_offset) as *mut u8, 0, size - tbss_offset);
*(tcb_offset as *mut usize) = end;
}
(active_table, tcb_offset)
(active_table, init_tcb(cpu_id))
}
pub struct ActivePageTable {

View file

@ -28,7 +28,7 @@ static mut TDATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT;
static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT;
static HEAP_TABLE: AtomicUsize = ATOMIC_USIZE_INIT;
static KERNEL_TABLE: AtomicUsize = ATOMIC_USIZE_INIT;
extern {
/// Kernel main function
@ -93,9 +93,9 @@ pub unsafe extern fn kstart() -> ! {
// Reset AP variables
AP_READY.store(false, Ordering::SeqCst);
BSP_READY.store(false, Ordering::SeqCst);
HEAP_TABLE.store(0, Ordering::SeqCst);
KERNEL_TABLE.store(0, Ordering::SeqCst);
// Map heap
// Setup kernel heap
{
// Map heap pages
let heap_start_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET));
@ -106,24 +106,26 @@ pub unsafe extern fn kstart() -> ! {
// Init the allocator
allocator::init(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE);
}
// Send heap page table to APs
let index = heap_start_page.p4_index();
// Initialize devices
device::init(&mut active_table);
// Send kernel page table to APs
{
let index = Page::containing_address(VirtualAddress::new(::KERNEL_OFFSET)).p4_index();
let p4 = active_table.p4();
{
let entry = &p4[index];
if let Some(frame) = entry.pointed_frame() {
HEAP_TABLE.store(frame.start_address().get(), Ordering::SeqCst);
KERNEL_TABLE.store(frame.start_address().get(), Ordering::SeqCst);
} else {
panic!("heap does not have PML4 entry");
panic!("kernel does not have PML4 entry");
}
}
}
// Initialize devices
device::init(&mut active_table);
// Read ACPI tables, starts APs
acpi::init(&mut active_table);
@ -139,8 +141,14 @@ pub unsafe extern fn kstart_ap(cpu_id: usize, page_table: usize, stack_start: us
assert_eq!(BSS_TEST_ZERO, 0);
assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
// Retrieve kernel table entry
while KERNEL_TABLE.load(Ordering::SeqCst) == 0 {
interrupt::pause();
}
let kernel_table = KERNEL_TABLE.load(Ordering::SeqCst);
// Initialize paging
let (mut active_table, tcb_offset) = paging::init(cpu_id, stack_start, stack_end);
let (mut active_table, tcb_offset) = paging::init_ap(cpu_id, stack_start, stack_end, kernel_table);
// Set up GDT for AP
gdt::init(tcb_offset, stack_end);
@ -158,20 +166,6 @@ pub unsafe extern fn kstart_ap(cpu_id: usize, page_table: usize, stack_start: us
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE);
}
// Copy heap PML4
{
while HEAP_TABLE.load(Ordering::SeqCst) == 0 {
interrupt::pause();
}
let frame = Frame::containing_address(PhysicalAddress::new(HEAP_TABLE.load(Ordering::SeqCst)));
let page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET));
let p4 = active_table.p4_mut();
let entry = &mut p4[page.p4_index()];
assert!(entry.is_unused());
entry.set(frame, entry::PRESENT | entry::WRITABLE);
}
// Init devices for AP
device::init_ap(&mut active_table);