* Fire up multiple processors

* Use IPIs to wake up secondary processors

* Much better exception information

* Modifications to show more information on fault

* WIP: Use real libstd

* Add TLS (not complete)

* Add random function, export getpid, cleanup

* Do not spin APs until new context

* Update rust

* Update rust

* Use rd/wrfsbase

* Implement TLS

* Implement compiler builtins and update rust

* Update rust

* Back to Redox libstd

* Update rust
This commit is contained in:
Jeremy Soller 2016-10-31 10:49:00 -06:00 committed by GitHub
parent 25dc44b348
commit 149b0297a4
54 changed files with 1121 additions and 380 deletions

View file

@ -1,77 +0,0 @@
use core::intrinsics::{volatile_load, volatile_store};
use x86::cpuid::CpuId;
use x86::msr::*;
use memory::Frame;
use paging::{entry, ActivePageTable, PhysicalAddress};
/// Local APIC
pub struct LocalApic {
pub address: u32,
pub x2: bool
}
impl LocalApic {
pub fn new(active_table: &mut ActivePageTable) -> Self {
let mut apic = LocalApic {
address: (unsafe { rdmsr(IA32_APIC_BASE) as u32 } & 0xFFFF0000),
x2: false
};
if CpuId::new().get_feature_info().unwrap().has_x2apic() {
unsafe { wrmsr(IA32_APIC_BASE, rdmsr(IA32_APIC_BASE) | 1 << 10) };
apic.x2 = true;
} else {
active_table.identity_map(Frame::containing_address(PhysicalAddress::new(apic.address as usize)), entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE);
}
apic
}
unsafe fn read(&self, reg: u32) -> u32 {
volatile_load((self.address + reg) as *const u32)
}
unsafe fn write(&self, reg: u32, value: u32) {
volatile_store((self.address + reg) as *mut u32, value);
}
pub fn id(&self) -> u32 {
if self.x2 {
unsafe { rdmsr(IA32_X2APIC_APICID) as u32 }
} else {
unsafe { self.read(0x20) }
}
}
pub fn version(&self) -> u32 {
if self.x2 {
unsafe { rdmsr(IA32_X2APIC_VERSION) as u32 }
} else {
unsafe { self.read(0x30) }
}
}
pub fn icr(&self) -> u64 {
if self.x2 {
unsafe { rdmsr(IA32_X2APIC_ICR) }
} else {
unsafe {
(self.read(0x310) as u64) << 32 | self.read(0x300) as u64
}
}
}
pub fn set_icr(&mut self, value: u64) {
if self.x2 {
unsafe { wrmsr(IA32_X2APIC_ICR, value); }
} else {
unsafe {
while self.read(0x300) & 1 << 12 == 1 << 12 {}
self.write(0x310, (value >> 32) as u32);
self.write(0x300, value as u32);
while self.read(0x300) & 1 << 12 == 1 << 12 {}
}
}
}
}

View file

@ -4,20 +4,19 @@
use core::intrinsics::{atomic_load, atomic_store};
use core::sync::atomic::Ordering;
use device::local_apic::LOCAL_APIC;
use interrupt;
use memory::{allocate_frames, Frame};
use paging::{entry, ActivePageTable, Page, PhysicalAddress, VirtualAddress};
use start::{kstart_ap, CPU_COUNT, AP_READY};
use self::dmar::{Dmar, DmarEntry};
use self::local_apic::LocalApic;
use self::madt::{Madt, MadtEntry};
use self::rsdt::Rsdt;
use self::sdt::Sdt;
use self::xsdt::Xsdt;
pub mod dmar;
pub mod local_apic;
pub mod madt;
pub mod rsdt;
pub mod sdt;
@ -35,7 +34,7 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
if let Some(madt) = Madt::new(sdt) {
println!(": {:>08X}: {}", madt.local_address, madt.flags);
let mut local_apic = LocalApic::new(active_table);
let mut local_apic = unsafe { &mut LOCAL_APIC };
let me = local_apic.id() as u8;
@ -60,7 +59,7 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
} else {
if ap_local_apic.flags & 1 == 1 {
// Increase CPU ID
let cpu_id = CPU_COUNT.fetch_add(1, Ordering::SeqCst);
CPU_COUNT.fetch_add(1, Ordering::SeqCst);
// Allocate a stack
let stack_start = allocate_frames(64).expect("no more frames in acpi stack_start").start_address().get() + ::KERNEL_OFFSET;
@ -75,7 +74,7 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
// Set the ap_ready to 0, volatile
unsafe { atomic_store(ap_ready, 0) };
unsafe { atomic_store(ap_cpu_id, cpu_id as u64) };
unsafe { atomic_store(ap_cpu_id, ap_local_apic.id as u64) };
unsafe { atomic_store(ap_page_table, active_table.address() as u64) };
unsafe { atomic_store(ap_stack_start, stack_start as u64) };
unsafe { atomic_store(ap_stack_end, stack_end as u64) };