* Fire up multiple processors

* Use IPIs to wake up secondary processors

* Much better exception information

* Modifications to show more information on fault

* WIP: Use real libstd

* Add TLS (not complete)

* Add random function, export getpid, cleanup

* Do not spin APs until new context

* Update rust

* Update rust

* Use rd/wrfsbase

* Implement TLS

* Implement compiler builtins and update rust

* Update rust

* Back to Redox libstd

* Update rust
This commit is contained in:
Jeremy Soller 2016-10-31 10:49:00 -06:00 committed by GitHub
parent 25dc44b348
commit 149b0297a4
54 changed files with 1121 additions and 380 deletions

View file

@ -5,7 +5,7 @@ use spin::Mutex;
use arch;
use context::file::File;
use context::memory::{Grant, Memory, SharedMemory};
use context::memory::{Grant, Memory, SharedMemory, Tls};
use syscall::data::Event;
use sync::{WaitCondition, WaitQueue};
@ -36,7 +36,7 @@ pub struct Context {
/// Context running or not
pub running: bool,
/// CPU ID, if locked
pub cpuid: Option<usize>,
pub cpu_id: Option<usize>,
/// Context is halting parent
pub vfork: bool,
/// Context is being waited on
@ -55,6 +55,8 @@ pub struct Context {
pub heap: Option<SharedMemory>,
/// User stack
pub stack: Option<Memory>,
/// User Tls
pub tls: Option<Tls>,
/// User grants
pub grants: Arc<Mutex<Vec<Grant>>>,
/// The name of the context
@ -81,7 +83,7 @@ impl Context {
egid: 0,
status: Status::Blocked,
running: false,
cpuid: None,
cpu_id: None,
vfork: false,
waitpid: Arc::new(WaitCondition::new()),
wake: None,
@ -91,6 +93,7 @@ impl Context {
image: Vec::new(),
heap: None,
stack: None,
tls: None,
grants: Arc::new(Mutex::new(Vec::new())),
name: Arc::new(Mutex::new(Vec::new())),
cwd: Arc::new(Mutex::new(Vec::new())),
@ -153,6 +156,13 @@ impl Context {
pub fn unblock(&mut self) -> bool {
if self.status == Status::Blocked {
self.status = Status::Runnable;
if let Some(cpu_id) = self.cpu_id {
if cpu_id != ::cpu_id() {
// Send IPI if not on current CPU
// TODO: Make this more architecture independent
unsafe { arch::device::local_apic::LOCAL_APIC.ipi(cpu_id) };
}
}
true
} else {
false

View file

@ -332,3 +332,10 @@ impl Drop for Memory {
self.unmap(true);
}
}
#[derive(Debug)]
pub struct Tls {
pub master: VirtualAddress,
pub file_size: usize,
pub mem: Memory
}

View file

@ -50,7 +50,7 @@ pub fn init() {
context.kfx = Some(fx);
context.status = Status::Runnable;
context.running = true;
context.cpuid = Some(::cpu_id());
context.cpu_id = Some(::cpu_id());
CONTEXT_ID.store(context.id, Ordering::SeqCst);
}

View file

@ -16,27 +16,34 @@ pub unsafe fn switch() -> bool {
arch::interrupt::pause();
}
let cpu_id = ::cpu_id();
let from_ptr;
let mut to_ptr = 0 as *mut Context;
{
let contexts = contexts();
{
let context_lock = contexts.current().expect("context::switch: Not inside of context");
let context_lock = contexts.current().expect("context::switch: not inside of context");
let mut context = context_lock.write();
from_ptr = context.deref_mut() as *mut Context;
}
let check_context = |context: &mut Context| -> bool {
if context.cpuid == None || context.cpuid == Some(::cpu_id()) {
if context.status == Status::Blocked && context.wake.is_some() {
let wake = context.wake.expect("context::switch: wake not set");
if context.cpu_id == None && cpu_id == 0 {
context.cpu_id = Some(cpu_id);
println!("{}: take {} {}", cpu_id, context.id, ::core::str::from_utf8_unchecked(&context.name.lock()));
}
let current = arch::time::monotonic();
if current.0 > wake.0 || (current.0 == wake.0 && current.1 >= wake.1) {
context.unblock();
}
if context.status == Status::Blocked && context.wake.is_some() {
let wake = context.wake.expect("context::switch: wake not set");
let current = arch::time::monotonic();
if current.0 > wake.0 || (current.0 == wake.0 && current.1 >= wake.1) {
context.unblock();
}
}
if context.cpu_id == Some(cpu_id) {
if context.status == Status::Runnable && ! context.running {
return true;
}
@ -74,8 +81,6 @@ pub unsafe fn switch() -> bool {
return false;
}
// println!("{}: Switch {} to {}", ::cpu_id(), (&*from_ptr).id, (&*to_ptr).id);
(&mut *from_ptr).running = false;
(&mut *to_ptr).running = true;
if let Some(ref stack) = (*to_ptr).kstack {
@ -83,6 +88,9 @@ pub unsafe fn switch() -> bool {
}
CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst);
// Unset global lock before switch, as arch is only usable by the current CPU at this time
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
(&mut *from_ptr).arch.switch_to(&mut (&mut *to_ptr).arch);
true