Make all perCPU mappings available in all contexts - this will allow APs to pick up threads

This commit is contained in:
Jeremy Soller 2016-10-23 09:13:12 -06:00
parent 790c32b0bc
commit a715e157d4
5 changed files with 29 additions and 45 deletions

View file

@ -7,7 +7,7 @@ use core::sync::atomic::Ordering;
use interrupt;
use memory::{allocate_frames, Frame};
use paging::{entry, ActivePageTable, Page, PhysicalAddress, VirtualAddress};
use start::{kstart_ap, AP_READY};
use start::{kstart_ap, CPU_COUNT, AP_READY};
use self::local_apic::LocalApic;
use self::madt::{Madt, MadtEntry};
@ -57,8 +57,10 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
println!(" This is my local APIC");
} else {
if ap_local_apic.flags & 1 == 1 {
// Increase CPU ID
let cpu_id = CPU_COUNT.fetch_add(1, Ordering::SeqCst);
// Allocate a stack
// TODO: Allocate contiguous
let stack_start = allocate_frames(64).expect("no more frames in acpi stack_start").start_address().get() + ::KERNEL_OFFSET;
let stack_end = stack_start + 64 * 4096;
@ -71,7 +73,7 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
// Set the ap_ready to 0, volatile
unsafe { atomic_store(ap_ready, 0) };
unsafe { atomic_store(ap_cpu_id, ap_local_apic.id as u64) };
unsafe { atomic_store(ap_cpu_id, cpu_id as u64) };
unsafe { atomic_store(ap_page_table, active_table.address() as u64) };
unsafe { atomic_store(ap_stack_start, stack_start as u64) };
unsafe { atomic_store(ap_stack_end, stack_end as u64) };
@ -118,6 +120,8 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
interrupt::pause();
}
println!(" Ready");
active_table.flush_all();
} else {
println!(" CPU Disabled");
}

View file

@ -169,7 +169,7 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
(active_table, init_tcb(cpu_id))
}
pub unsafe fn init_ap(cpu_id: usize, stack_start: usize, stack_end: usize, kernel_table: usize) -> (ActivePageTable, usize) {
pub unsafe fn init_ap(cpu_id: usize, bsp_table: usize, stack_start: usize, stack_end: usize) -> usize {
extern {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
@ -185,18 +185,11 @@ pub unsafe fn init_ap(cpu_id: usize, stack_start: usize, stack_end: usize, kerne
let mut active_table = ActivePageTable::new();
let mut new_table = InactivePageTable::from_address(bsp_table);
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000)));
let mut new_table = {
let frame = allocate_frame().expect("no more frames in paging::init new_table");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
// Copy kernel mapping
let kernel_frame = Frame::containing_address(PhysicalAddress::new(kernel_table));
mapper.p4_mut()[510].set(kernel_frame, entry::PRESENT | entry::WRITABLE);
// Map tdata and tbss
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
@ -228,7 +221,7 @@ pub unsafe fn init_ap(cpu_id: usize, stack_start: usize, stack_end: usize, kerne
active_table.switch(new_table);
(active_table, init_tcb(cpu_id))
init_tcb(cpu_id)
}
pub struct ActivePageTable {

View file

@ -26,13 +26,13 @@ static mut TBSS_TEST_ZERO: usize = 0;
#[thread_local]
static mut TDATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
pub static CPU_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT;
static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT;
static KERNEL_TABLE: AtomicUsize = ATOMIC_USIZE_INIT;
extern {
/// Kernel main function
fn kmain() -> !;
fn kmain(cpus: usize) -> !;
/// Kernel main for APs
fn kmain_ap(id: usize) -> !;
}
@ -91,9 +91,9 @@ pub unsafe extern fn kstart() -> ! {
}
// Reset AP variables
CPU_COUNT.store(1, Ordering::SeqCst);
AP_READY.store(false, Ordering::SeqCst);
BSP_READY.store(false, Ordering::SeqCst);
KERNEL_TABLE.store(0, Ordering::SeqCst);
// Setup kernel heap
{
@ -111,44 +111,23 @@ pub unsafe extern fn kstart() -> ! {
// Initialize devices
device::init();
// Send kernel page table to APs
{
let index = Page::containing_address(VirtualAddress::new(::KERNEL_OFFSET)).p4_index();
let p4 = active_table.p4();
{
let entry = &p4[index];
if let Some(frame) = entry.pointed_frame() {
KERNEL_TABLE.store(frame.start_address().get(), Ordering::SeqCst);
} else {
panic!("kernel does not have PML4 entry");
}
}
}
// Read ACPI tables, starts APs
acpi::init(&mut active_table);
BSP_READY.store(true, Ordering::SeqCst);
}
kmain();
kmain(CPU_COUNT.load(Ordering::SeqCst));
}
/// Entry to rust for an AP
pub unsafe extern fn kstart_ap(cpu_id: usize, _page_table: usize, stack_start: usize, stack_end: usize) -> ! {
pub unsafe extern fn kstart_ap(cpu_id: usize, bsp_table: usize, stack_start: usize, stack_end: usize) -> ! {
{
assert_eq!(BSS_TEST_ZERO, 0);
assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
// Retrieve kernel table entry
while KERNEL_TABLE.load(Ordering::SeqCst) == 0 {
interrupt::pause();
}
let kernel_table = KERNEL_TABLE.load(Ordering::SeqCst);
// Initialize paging
let (_active_table, tcb_offset) = paging::init_ap(cpu_id, stack_start, stack_end, kernel_table);
let tcb_offset = paging::init_ap(cpu_id, bsp_table, stack_start, stack_end);
// Set up GDT for AP
gdt::init(tcb_offset, stack_end);

View file

@ -133,6 +133,13 @@ pub fn cpu_id() -> usize {
CPU_ID.load(Ordering::Relaxed)
}
static CPU_COUNT : AtomicUsize = ATOMIC_USIZE_INIT;
#[inline(always)]
pub fn cpu_count() -> usize {
CPU_COUNT.load(Ordering::Relaxed)
}
pub extern fn userspace_init() {
assert_eq!(syscall::chdir(b"initfs:bin"), Ok(0));
@ -146,13 +153,14 @@ pub extern fn userspace_init() {
}
#[no_mangle]
pub extern fn kmain() {
pub extern fn kmain(cpus: usize) {
CPU_ID.store(0, Ordering::SeqCst);
CPU_COUNT.store(cpus, Ordering::SeqCst);
context::init();
let pid = syscall::getpid();
println!("BSP: {:?}", pid);
println!("BSP: {:?} {}", pid, cpus);
match context::contexts_mut().spawn(userspace_init) {
Ok(context_lock) => {

View file

@ -346,7 +346,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
context.grants = grants;
} else {
// Copy percpu mapping
{
for cpu_id in 0..::cpu_count() {
extern {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
@ -356,7 +356,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
let size = unsafe { & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize };
let start = arch::KERNEL_PERCPU_OFFSET + arch::KERNEL_PERCPU_SIZE * ::cpu_id();
let start = arch::KERNEL_PERCPU_OFFSET + arch::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));