Merge branch 'master' of github.com:redox-os/kernel

This commit is contained in:
ticki 2016-08-31 17:02:47 +02:00
commit a9a8c2b340
6 changed files with 68 additions and 27 deletions

View file

@ -3,6 +3,7 @@
use core::intrinsics::{atomic_load, atomic_store};
use interrupt;
use memory::{allocate_frame, Frame};
use paging::{entry, ActivePageTable, Page, PhysicalAddress, VirtualAddress};
use start::kstart_ap;
@ -87,7 +88,7 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
// Wait for trampoline ready
println!(" Waiting for AP {}", asp_local_apic.id);
while unsafe { atomic_load(ap_ready) } == 0 {
unsafe { asm!("pause" : : : : "intel", "volatile") };
interrupt::pause();
}
println!(" AP {} is ready!", asp_local_apic.id);
} else {

View file

@ -93,6 +93,7 @@ impl Context {
asm!("mov cr3, $0" : : "r"(self.cr3) : "memory" : "intel", "volatile");
*/
//CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
// Unset global lock, set inside of kernel
CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
}
}

View file

@ -26,6 +26,13 @@ pub unsafe fn halt() {
asm!("hlt" : : : : "intel", "volatile");
}
/// Pause instruction
/// Safe because it is similar to a NOP, and has no memory effects
#[inline(always)]
pub fn pause() {
unsafe { asm!("pause" : : : : "intel", "volatile"); }
}
/// Get a stack trace
//TODO: Check for stack being mapped before dereferencing
#[inline(never)]

View file

@ -11,6 +11,7 @@ use display;
use externs::memset;
use gdt;
use idt;
use interrupt;
use memory::{self, Frame};
use paging::{self, entry, Page, PhysicalAddress, VirtualAddress};
@ -92,7 +93,7 @@ pub unsafe extern fn kstart() -> ! {
}
// Initialize display
//display::init(&mut active_table);
display::init(&mut active_table);
// Reset AP variables
AP_COUNT.store(0, Ordering::SeqCst);
@ -100,7 +101,7 @@ pub unsafe extern fn kstart() -> ! {
HEAP_FRAME.store(0, Ordering::SeqCst);
// Read ACPI tables, starts APs
//acpi::init(&mut active_table);
acpi::init(&mut active_table);
// Map heap
{
@ -173,7 +174,7 @@ pub unsafe extern fn kstart_ap(stack_start: usize, stack_end: usize) -> ! {
assert_eq!(heap_start_page.p4_index(), heap_end_page.p4_index());
while HEAP_FRAME.load(Ordering::SeqCst) == 0 {
asm!("pause" : : : : "intel", "volatile");
interrupt::pause();
}
let frame = Frame::containing_address(PhysicalAddress::new(HEAP_FRAME.load(Ordering::SeqCst)));
@ -188,7 +189,7 @@ pub unsafe extern fn kstart_ap(stack_start: usize, stack_end: usize) -> ! {
let ap_number = AP_COUNT.fetch_add(1, Ordering::SeqCst);
while ! BSP_READY.load(Ordering::SeqCst) {
asm!("pause" : : : : "intel", "volatile");
interrupt::pause();
}
kmain_ap(ap_number);

View file

@ -6,6 +6,7 @@ use core::mem;
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use arch;
use arch::context::Context as ArchContext;
use syscall::{Error, Result};
@ -94,7 +95,8 @@ static CONTEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
pub fn init() {
let mut contexts = contexts_mut();
let context_lock = contexts.new_context().expect("could not initialize first context");
let context = context_lock.read();
let mut context = context_lock.write();
context.running = true;
CONTEXT_ID.store(context.id, Ordering::SeqCst);
}
@ -118,8 +120,44 @@ pub fn contexts_mut() -> RwLockWriteGuard<'static, ContextList> {
/// # Safety
///
/// Do not call this while holding locks!
pub unsafe fn context_switch() {
// current.arch.switch_to(&mut next.arch);
pub unsafe fn switch() {
use core::ops::DerefMut;
// Set the global lock to avoid the unsafe operations below from causing issues
while arch::context::CONTEXT_SWITCH_LOCK.compare_and_swap(false, true, Ordering::SeqCst) {
arch::interrupt::pause();
}
let from_ptr = if let Some(context_lock) = contexts().current() {
let mut context = context_lock.write();
context.deref_mut() as *mut Context
} else {
print!("NO FROM_PTR\n");
return;
};
let mut to_ptr = 0 as *mut Context;
for (_pid, context_lock) in contexts().map.iter() {
let mut context = context_lock.write();
if ! context.running {
to_ptr = context.deref_mut() as *mut Context;
break;
}
}
if to_ptr as usize == 0 {
print!("NO TO_PTR\n");
return;
}
unsafe {
(&mut *from_ptr).running = false;
(&mut *to_ptr).running = true;
CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst);
(&mut *from_ptr).arch.switch_to(&mut (&mut *to_ptr).arch);
}
}
/// A context, which identifies either a process or a thread
@ -127,6 +165,8 @@ pub unsafe fn context_switch() {
pub struct Context {
/// The ID of this context
pub id: usize,
/// Running or not
pub running: bool,
/// The architecture specific context
pub arch: ArchContext,
/// Kernel stack
@ -140,6 +180,7 @@ impl Context {
pub fn new(id: usize) -> Context {
Context {
id: id,
running: false,
arch: ArchContext::new(),
kstack: None,
files: Vec::new()

View file

@ -121,8 +121,10 @@ pub mod syscall;
pub mod tests;
pub extern fn context_test() {
print!("TEST\n");
print!("Test\n");
unsafe { context::switch(); }
print!("Test halt\n");
loop {
unsafe { interrupt::enable_and_halt(); }
}
@ -134,26 +136,14 @@ pub extern fn kmain() {
print!("{}", format!("BSP: {:?}\n", syscall::getpid()));
let to_ptr = if let Ok(context_lock) = context::contexts_mut().spawn(context_test) {
if let Ok(context_lock) = context::contexts_mut().spawn(context_test) {
print!("Spawned context\n");
let mut context = context_lock.write();
&mut context.arch as *mut arch::context::Context
} else {
0 as *mut arch::context::Context
};
let from_ptr = if let Some(context_lock) = context::contexts().current() {
let mut context = context_lock.write();
&mut context.arch as *mut arch::context::Context
} else {
0 as *mut arch::context::Context
};
if to_ptr as usize != 0 && from_ptr as usize != 0 {
print!("Switching\n");
unsafe { (&mut *from_ptr).switch_to(&mut *to_ptr); }
}
print!("Main\n");
unsafe { context::switch(); }
print!("Main halt\n");
loop {
unsafe { interrupt::enable_and_halt(); }
}