Cleanup PML4 entries, pass correct cpu ID to trampoline
This commit is contained in:
parent
1298e38ed8
commit
11eddfecd1
|
@ -66,12 +66,14 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
|
|||
let stack_end = allocate_frame().expect("no more frames in acpi stack_end").start_address().get() + 4096;
|
||||
|
||||
let ap_ready = TRAMPOLINE as *mut u64;
|
||||
let ap_stack_start = unsafe { ap_ready.offset(1) };
|
||||
let ap_stack_end = unsafe { ap_ready.offset(2) };
|
||||
let ap_code = unsafe { ap_ready.offset(3) };
|
||||
let ap_cpu_id = unsafe { ap_ready.offset(1) };
|
||||
let ap_stack_start = unsafe { ap_ready.offset(2) };
|
||||
let ap_stack_end = unsafe { ap_ready.offset(3) };
|
||||
let ap_code = unsafe { ap_ready.offset(4) };
|
||||
|
||||
// Set the ap_ready to 0, volatile
|
||||
unsafe { atomic_store(ap_ready, 0) };
|
||||
unsafe { atomic_store(ap_cpu_id, ap_local_apic.id as u64) };
|
||||
unsafe { atomic_store(ap_stack_start, stack_start as u64) };
|
||||
unsafe { atomic_store(ap_stack_end, stack_end as u64) };
|
||||
unsafe { atomic_store(ap_code, kstart_ap as u64) };
|
||||
|
|
|
@ -20,20 +20,29 @@ extern crate spin;
|
|||
pub extern crate x86;
|
||||
|
||||
// Because the memory map is so important to not be aliased, it is defined here, in one place
|
||||
/// Offset of recursive paging
|
||||
pub const RECURSIVE_PAGE_OFFSET: usize = 0xffff_ff80_0000_0000;
|
||||
/// Offset of kernel
|
||||
pub const KERNEL_OFFSET: usize = 0xffff_ff00_0000_0000;
|
||||
/// Offset to memory allocation bitmap
|
||||
pub const BITMAP_OFFSET: usize = 0xffff_fe80_0000_0000;
|
||||
/// Offset to kernel heap
|
||||
pub const HEAP_OFFSET: usize = 0xffff_fe00_0000_0000;
|
||||
/// Size of heap
|
||||
pub const HEAP_SIZE: usize = 64 * 1024 * 1024; // 64 MB
|
||||
/// Offset to user heap
|
||||
pub const USER_HEAP_OFFSET: usize = 0x0000_0080_0000_0000;
|
||||
/// Size of user heap
|
||||
pub const USER_HEAP_SIZE: usize = 64 * 1024 * 1024; // 64 MB
|
||||
// The lower 256 PML4 entries are reserved for userspace
|
||||
// Each PML4 entry references up to 512 GB of memory
|
||||
// The upper 256 are reserved for the kernel
|
||||
/// The size of a single PML4
|
||||
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
|
||||
|
||||
/// Offset of recursive paging
|
||||
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
|
||||
|
||||
/// Offset of kernel
|
||||
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
|
||||
|
||||
/// Offset to kernel heap
|
||||
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
|
||||
/// Size of kernel heap
|
||||
pub const KERNEL_HEAP_SIZE: usize = 64 * 1024 * 1024; // 64 MB
|
||||
|
||||
/// Offset to kernel percpu variables
|
||||
pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
|
||||
|
||||
/// Offset to user heap
|
||||
pub const USER_HEAP_OFFSET: usize = PML4_SIZE;
|
||||
|
||||
|
||||
/// Print to console
|
||||
#[macro_export]
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
use super::AreaFrameAllocator;
|
||||
|
||||
const BITMAP_RESERVED: usize = 0;
|
||||
const BITMAP_FREE: usize = 1;
|
||||
const BITMAP_USED: usize = 2;
|
||||
|
||||
pub struct BitmapAllocator {
|
||||
bitmap: &'static mut [u8]
|
||||
}
|
||||
|
||||
impl BitmapAllocator {
|
||||
pub fn new(area_frame_allocator: AreaFrameAllocator) -> BitmapAllocator {
|
||||
BitmapAllocator {
|
||||
bitmap: &mut []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FrameAllocator for BitmapAllocator {
|
||||
fn allocate_frame(&mut self) -> Option<Frame> {
|
||||
let mut i = 0;
|
||||
while i < self.bitmap.len() {
|
||||
if self.bitmap[i] == BITMAP_FREE {
|
||||
self.bitmap[i] = BITMAP_USED;
|
||||
return Some(Frame::containing_address(PhysicalAddress::new(i * 4096)));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn deallocate_frame(&mut self, frame: Frame) {
|
||||
let i = frame.starting_address().get()/4096;
|
||||
if i < self.bitmap.len() && self.bitmap[i] == BITMAP_USED {
|
||||
self.bitmap[i] = BITMAP_FREE;
|
||||
} else {
|
||||
panic!("BitmapAllocator::deallocate_frame: unowned frame {:?}", frame);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -25,7 +25,7 @@ pub const PAGE_SIZE: usize = 4096;
|
|||
/// Initialize paging
|
||||
///
|
||||
/// Returns page table and thread control block offset
|
||||
pub unsafe fn init(stack_start: usize, stack_end: usize) -> (ActivePageTable, usize) {
|
||||
pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (ActivePageTable, usize) {
|
||||
extern {
|
||||
/// The starting byte of the text (code) data segment.
|
||||
static mut __text_start: u8;
|
||||
|
|
|
@ -26,7 +26,6 @@ static mut TBSS_TEST_ZERO: usize = 0;
|
|||
#[thread_local]
|
||||
static mut TDATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
|
||||
|
||||
static AP_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT;
|
||||
static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT;
|
||||
static HEAP_TABLE: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
@ -73,7 +72,7 @@ pub unsafe extern fn kstart() -> ! {
|
|||
let stack_end = 0x0009F000;
|
||||
|
||||
// Initialize paging
|
||||
let (mut active_table, tcb_offset) = paging::init(stack_start, stack_end);
|
||||
let (mut active_table, tcb_offset) = paging::init(0, stack_start, stack_end);
|
||||
|
||||
// Set up GDT
|
||||
gdt::init(tcb_offset, stack_end);
|
||||
|
@ -92,7 +91,6 @@ pub unsafe extern fn kstart() -> ! {
|
|||
}
|
||||
|
||||
// Reset AP variables
|
||||
AP_COUNT.store(0, Ordering::SeqCst);
|
||||
AP_READY.store(false, Ordering::SeqCst);
|
||||
BSP_READY.store(false, Ordering::SeqCst);
|
||||
HEAP_TABLE.store(0, Ordering::SeqCst);
|
||||
|
@ -100,14 +98,14 @@ pub unsafe extern fn kstart() -> ! {
|
|||
// Map heap
|
||||
{
|
||||
// Map heap pages
|
||||
let heap_start_page = Page::containing_address(VirtualAddress::new(::HEAP_OFFSET));
|
||||
let heap_end_page = Page::containing_address(VirtualAddress::new(::HEAP_OFFSET + ::HEAP_SIZE-1));
|
||||
let heap_start_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET));
|
||||
let heap_end_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET + ::KERNEL_HEAP_SIZE-1));
|
||||
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
|
||||
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE);
|
||||
}
|
||||
|
||||
// Init the allocator
|
||||
allocator::init(::HEAP_OFFSET, ::HEAP_SIZE);
|
||||
allocator::init(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE);
|
||||
|
||||
// Send heap page table to APs
|
||||
let index = heap_start_page.p4_index();
|
||||
|
@ -136,13 +134,13 @@ pub unsafe extern fn kstart() -> ! {
|
|||
}
|
||||
|
||||
/// Entry to rust for an AP
|
||||
pub unsafe extern fn kstart_ap(stack_start: usize, stack_end: usize) -> ! {
|
||||
pub unsafe extern fn kstart_ap(cpu_id: usize, stack_start: usize, stack_end: usize) -> ! {
|
||||
{
|
||||
assert_eq!(BSS_TEST_ZERO, 0);
|
||||
assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
|
||||
|
||||
// Initialize paging
|
||||
let (mut active_table, tcb_offset) = paging::init(stack_start, stack_end);
|
||||
let (mut active_table, tcb_offset) = paging::init(cpu_id, stack_start, stack_end);
|
||||
|
||||
// Set up GDT for AP
|
||||
gdt::init_ap(tcb_offset, stack_end);
|
||||
|
@ -162,13 +160,12 @@ pub unsafe extern fn kstart_ap(stack_start: usize, stack_end: usize) -> ! {
|
|||
|
||||
// Copy heap PML4
|
||||
{
|
||||
let page = Page::containing_address(VirtualAddress::new(::HEAP_OFFSET));
|
||||
|
||||
while HEAP_TABLE.load(Ordering::SeqCst) == 0 {
|
||||
interrupt::pause();
|
||||
}
|
||||
let frame = Frame::containing_address(PhysicalAddress::new(HEAP_TABLE.load(Ordering::SeqCst)));
|
||||
|
||||
let page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET));
|
||||
let p4 = active_table.p4_mut();
|
||||
let entry = &mut p4[page.p4_index()];
|
||||
assert!(entry.is_unused());
|
||||
|
@ -181,13 +178,11 @@ pub unsafe extern fn kstart_ap(stack_start: usize, stack_end: usize) -> ! {
|
|||
AP_READY.store(true, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
let ap_number = AP_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
while ! BSP_READY.load(Ordering::SeqCst) {
|
||||
interrupt::pause();
|
||||
}
|
||||
|
||||
kmain_ap(ap_number);
|
||||
kmain_ap(cpu_id);
|
||||
}
|
||||
|
||||
pub unsafe fn usermode(ip: usize, sp: usize) -> ! {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
trampoline:
|
||||
.ready: dq 0
|
||||
.cpu_id: dq 0
|
||||
.stack_start: dq 0
|
||||
.stack_end: dq 0
|
||||
.code: dq 0
|
||||
|
@ -131,11 +132,11 @@ long_mode_ap:
|
|||
mov gs, rax
|
||||
mov ss, rax
|
||||
|
||||
mov rdi, [trampoline.cpu_id]
|
||||
mov rsi, [trampoline.stack_start]
|
||||
mov rdx, [trampoline.stack_end]
|
||||
|
||||
mov rdi, [trampoline.stack_start]
|
||||
mov rsi, [trampoline.stack_end]
|
||||
|
||||
lea rsp, [rsi - 16]
|
||||
lea rsp, [rdx - 256]
|
||||
|
||||
mov qword [trampoline.ready], 1
|
||||
mov rax, [trampoline.code]
|
||||
|
|
Loading…
Reference in a new issue