work on shared memory accross threads

This commit is contained in:
Jeremy Soller 2016-09-16 17:51:27 -06:00
parent 0b2fd79816
commit 8ee9f1d7da
4 changed files with 111 additions and 56 deletions

View file

@ -3,7 +3,7 @@ use collections::Vec;
use arch; use arch;
use super::file::File; use super::file::File;
use super::memory::Memory; use super::memory::{Memory, SharedMemory};
#[derive(Copy, Clone, Debug, Eq, PartialEq)] #[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Status { pub enum Status {
@ -26,9 +26,9 @@ pub struct Context {
/// Kernel stack /// Kernel stack
pub kstack: Option<Box<[u8]>>, pub kstack: Option<Box<[u8]>>,
/// Executable image /// Executable image
pub image: Vec<Memory>, pub image: Vec<SharedMemory>,
/// User heap /// User heap
pub heap: Option<Memory>, pub heap: Option<SharedMemory>,
/// User stack /// User stack
pub stack: Option<Memory>, pub stack: Option<Memory>,
/// The open files in the scheme /// The open files in the scheme

View file

@ -1,3 +1,6 @@
use alloc::arc::{Arc, Weak};
use spin::Mutex;
use arch::externs::memset; use arch::externs::memset;
use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, VirtualAddress}; use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, VirtualAddress};
use arch::paging::entry::{self, EntryFlags}; use arch::paging::entry::{self, EntryFlags};
@ -10,6 +13,35 @@ pub struct Memory {
flags: EntryFlags flags: EntryFlags
} }
#[derive(Debug)]
pub enum SharedMemory {
Owned(Arc<Mutex<Memory>>),
Borrowed(Weak<Mutex<Memory>>)
}
impl SharedMemory {
pub fn with<F, T>(&self, f: F) -> T where F: FnOnce(&mut Memory) -> T {
match *self {
SharedMemory::Owned(ref memory_lock) => {
let mut memory = memory_lock.lock();
f(&mut *memory)
},
SharedMemory::Borrowed(ref memory_weak) => {
let memory_lock = memory_weak.upgrade().expect("SharedMemory::Borrowed no longer valid");
let mut memory = memory_lock.lock();
f(&mut *memory)
}
}
}
pub fn borrow(&self) -> SharedMemory {
match *self {
SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)),
SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone())
}
}
}
impl Memory { impl Memory {
pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, flush: bool, clear: bool) -> Self { pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, flush: bool, clear: bool) -> Self {
let mut memory = Memory { let mut memory = Memory {
@ -23,6 +55,10 @@ impl Memory {
memory memory
} }
pub fn to_shared(self) -> SharedMemory {
SharedMemory::Owned(Arc::new(Mutex::new(self)))
}
pub fn start_address(&self) -> VirtualAddress { pub fn start_address(&self) -> VirtualAddress {
self.start self.start
} }

View file

@ -97,10 +97,18 @@ impl<'a> Elf<'a> {
memory.remap(flags, true); memory.remap(flags, true);
context.image.push(memory); context.image.push(memory.to_shared());
} }
} }
context.heap = Some(context::memory::Memory::new(
VirtualAddress::new(arch::USER_HEAP_OFFSET),
0,
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
true,
true
).to_shared());
// Map stack // Map stack
context.stack = Some(context::memory::Memory::new( context.stack = Some(context::memory::Memory::new(
VirtualAddress::new(arch::USER_STACK_OFFSET), VirtualAddress::new(arch::USER_STACK_OFFSET),

View file

@ -15,30 +15,29 @@ use syscall::{self, Error, Result};
pub fn brk(address: usize) -> Result<usize> { pub fn brk(address: usize) -> Result<usize> {
let contexts = context::contexts(); let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::NoProcess)?; let context_lock = contexts.current().ok_or(Error::NoProcess)?;
let mut context = context_lock.write(); let context = context_lock.read();
let current = if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
heap.start_address().get() + heap.size()
})
} else {
panic!("user heap not initialized");
};
let mut current = arch::USER_HEAP_OFFSET;
if let Some(ref heap) = context.heap {
current = heap.start_address().get() + heap.size();
}
if address == 0 { if address == 0 {
//println!("Brk query {:X}", current); //println!("Brk query {:X}", current);
Ok(current) Ok(current)
} else if address >= arch::USER_HEAP_OFFSET { } else if address >= arch::USER_HEAP_OFFSET {
//TODO: out of memory errors //TODO: out of memory errors
if let Some(ref mut heap) = context.heap { if let Some(ref heap_shared) = context.heap {
heap.resize(address - arch::USER_HEAP_OFFSET, true, true); heap_shared.with(|heap| {
return Ok(address); heap.resize(address - arch::USER_HEAP_OFFSET, true, true);
});
} else {
panic!("user heap not initialized");
} }
context.heap = Some(context::memory::Memory::new(
VirtualAddress::new(arch::USER_HEAP_OFFSET),
address - arch::USER_HEAP_OFFSET,
entry::WRITABLE | entry::NO_EXECUTE | entry::USER_ACCESSIBLE,
true,
true
));
Ok(address) Ok(address)
} else { } else {
//TODO: Return correct error //TODO: Return correct error
@ -85,38 +84,46 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
if flags & CLONE_VM == CLONE_VM { if flags & CLONE_VM == CLONE_VM {
panic!("unimplemented: CLONE_VM"); panic!("unimplemented: CLONE_VM");
} else { } else {
for memory in context.image.iter() { for memory_shared in context.image.iter() {
let mut new_memory = context::memory::Memory::new( memory_shared.with(|memory| {
VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET), let mut new_memory = context::memory::Memory::new(
memory.size(), VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, memory.size(),
true, entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
false true,
); false
unsafe { );
arch::externs::memcpy(new_memory.start_address().get() as *mut u8,
memory.start_address().get() as *const u8, unsafe {
memory.size()); arch::externs::memcpy(new_memory.start_address().get() as *mut u8,
} memory.start_address().get() as *const u8,
new_memory.remap(memory.flags(), true); memory.size());
image.push(new_memory); }
new_memory.remap(memory.flags(), true);
image.push(new_memory.to_shared());
});
} }
if let Some(ref heap) = context.heap { if let Some(ref heap_shared) = context.heap {
let mut new_heap = context::memory::Memory::new( heap_shared.with(|heap| {
VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET), let mut new_heap = context::memory::Memory::new(
heap.size(), VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, heap.size(),
true, entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
false true,
); false
unsafe { );
arch::externs::memcpy(new_heap.start_address().get() as *mut u8,
heap.start_address().get() as *const u8, unsafe {
heap.size()); arch::externs::memcpy(new_heap.start_address().get() as *mut u8,
} heap.start_address().get() as *const u8,
new_heap.remap(heap.flags(), true); heap.size());
heap_option = Some(new_heap); }
new_heap.remap(heap.flags(), true);
heap_option = Some(new_heap.to_shared());
});
} }
} }
@ -220,15 +227,19 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
context.kstack = Some(stack); context.kstack = Some(stack);
} }
for memory in image.iter_mut() { for memory_shared in image.iter_mut() {
let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET); memory_shared.with(|memory| {
memory.move_to(start, &mut new_table, &mut temporary_page, true); let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET);
memory.move_to(start, &mut new_table, &mut temporary_page, true);
});
} }
context.image = image; context.image = image;
if let Some(mut heap) = heap_option.take() { if let Some(heap_shared) = heap_option.take() {
heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page, true); heap_shared.with(|heap| {
context.heap = Some(heap); heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page, true);
});
context.heap = Some(heap_shared);
} }
if let Some(mut stack) = stack_option.take() { if let Some(mut stack) = stack_option.take() {