From 8ee9f1d7da49a69c5f3253a6dce7729dfefd15ba Mon Sep 17 00:00:00 2001 From: Jeremy Soller Date: Fri, 16 Sep 2016 17:51:27 -0600 Subject: [PATCH] work on shared memory accross threads --- kernel/context/context.rs | 6 +- kernel/context/memory.rs | 36 ++++++++++++ kernel/elf.rs | 10 +++- kernel/syscall/process.rs | 115 +++++++++++++++++++++----------------- 4 files changed, 111 insertions(+), 56 deletions(-) diff --git a/kernel/context/context.rs b/kernel/context/context.rs index 8ed1bec..4ab62a4 100644 --- a/kernel/context/context.rs +++ b/kernel/context/context.rs @@ -3,7 +3,7 @@ use collections::Vec; use arch; use super::file::File; -use super::memory::Memory; +use super::memory::{Memory, SharedMemory}; #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum Status { @@ -26,9 +26,9 @@ pub struct Context { /// Kernel stack pub kstack: Option>, /// Executable image - pub image: Vec, + pub image: Vec, /// User heap - pub heap: Option, + pub heap: Option, /// User stack pub stack: Option, /// The open files in the scheme diff --git a/kernel/context/memory.rs b/kernel/context/memory.rs index 71ed47c..34e69a3 100644 --- a/kernel/context/memory.rs +++ b/kernel/context/memory.rs @@ -1,3 +1,6 @@ +use alloc::arc::{Arc, Weak}; +use spin::Mutex; + use arch::externs::memset; use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, VirtualAddress}; use arch::paging::entry::{self, EntryFlags}; @@ -10,6 +13,35 @@ pub struct Memory { flags: EntryFlags } +#[derive(Debug)] +pub enum SharedMemory { + Owned(Arc>), + Borrowed(Weak>) +} + +impl SharedMemory { + pub fn with(&self, f: F) -> T where F: FnOnce(&mut Memory) -> T { + match *self { + SharedMemory::Owned(ref memory_lock) => { + let mut memory = memory_lock.lock(); + f(&mut *memory) + }, + SharedMemory::Borrowed(ref memory_weak) => { + let memory_lock = memory_weak.upgrade().expect("SharedMemory::Borrowed no longer valid"); + let mut memory = memory_lock.lock(); + f(&mut *memory) + } + } + } + + pub fn borrow(&self) -> SharedMemory { + match *self { + SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)), + SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone()) + } + } +} + impl Memory { pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, flush: bool, clear: bool) -> Self { let mut memory = Memory { @@ -23,6 +55,10 @@ impl Memory { memory } + pub fn to_shared(self) -> SharedMemory { + SharedMemory::Owned(Arc::new(Mutex::new(self))) + } + pub fn start_address(&self) -> VirtualAddress { self.start } diff --git a/kernel/elf.rs b/kernel/elf.rs index 7fb7f9a..180ff23 100644 --- a/kernel/elf.rs +++ b/kernel/elf.rs @@ -97,10 +97,18 @@ impl<'a> Elf<'a> { memory.remap(flags, true); - context.image.push(memory); + context.image.push(memory.to_shared()); } } + context.heap = Some(context::memory::Memory::new( + VirtualAddress::new(arch::USER_HEAP_OFFSET), + 0, + entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE, + true, + true + ).to_shared()); + // Map stack context.stack = Some(context::memory::Memory::new( VirtualAddress::new(arch::USER_STACK_OFFSET), diff --git a/kernel/syscall/process.rs b/kernel/syscall/process.rs index 3ac8fc7..64e5a07 100644 --- a/kernel/syscall/process.rs +++ b/kernel/syscall/process.rs @@ -15,30 +15,29 @@ use syscall::{self, Error, Result}; pub fn brk(address: usize) -> Result { let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::NoProcess)?; - let mut context = context_lock.write(); + let context = context_lock.read(); + + let current = if let Some(ref heap_shared) = context.heap { + heap_shared.with(|heap| { + heap.start_address().get() + heap.size() + }) + } else { + panic!("user heap not initialized"); + }; - let mut current = arch::USER_HEAP_OFFSET; - if let Some(ref heap) = context.heap { - current = heap.start_address().get() + heap.size(); - } if address == 0 { //println!("Brk query {:X}", current); Ok(current) } else if address >= arch::USER_HEAP_OFFSET { //TODO: out of memory errors - if let Some(ref mut heap) = context.heap { - heap.resize(address - arch::USER_HEAP_OFFSET, true, true); - return Ok(address); + if let Some(ref heap_shared) = context.heap { + heap_shared.with(|heap| { + heap.resize(address - arch::USER_HEAP_OFFSET, true, true); + }); + } else { + panic!("user heap not initialized"); } - context.heap = Some(context::memory::Memory::new( - VirtualAddress::new(arch::USER_HEAP_OFFSET), - address - arch::USER_HEAP_OFFSET, - entry::WRITABLE | entry::NO_EXECUTE | entry::USER_ACCESSIBLE, - true, - true - )); - Ok(address) } else { //TODO: Return correct error @@ -85,38 +84,46 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { if flags & CLONE_VM == CLONE_VM { panic!("unimplemented: CLONE_VM"); } else { - for memory in context.image.iter() { - let mut new_memory = context::memory::Memory::new( - VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET), - memory.size(), - entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, - true, - false - ); - unsafe { - arch::externs::memcpy(new_memory.start_address().get() as *mut u8, - memory.start_address().get() as *const u8, - memory.size()); - } - new_memory.remap(memory.flags(), true); - image.push(new_memory); + for memory_shared in context.image.iter() { + memory_shared.with(|memory| { + let mut new_memory = context::memory::Memory::new( + VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET), + memory.size(), + entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, + true, + false + ); + + unsafe { + arch::externs::memcpy(new_memory.start_address().get() as *mut u8, + memory.start_address().get() as *const u8, + memory.size()); + } + + new_memory.remap(memory.flags(), true); + image.push(new_memory.to_shared()); + }); } - if let Some(ref heap) = context.heap { - let mut new_heap = context::memory::Memory::new( - VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET), - heap.size(), - entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, - true, - false - ); - unsafe { - arch::externs::memcpy(new_heap.start_address().get() as *mut u8, - heap.start_address().get() as *const u8, - heap.size()); - } - new_heap.remap(heap.flags(), true); - heap_option = Some(new_heap); + if let Some(ref heap_shared) = context.heap { + heap_shared.with(|heap| { + let mut new_heap = context::memory::Memory::new( + VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET), + heap.size(), + entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, + true, + false + ); + + unsafe { + arch::externs::memcpy(new_heap.start_address().get() as *mut u8, + heap.start_address().get() as *const u8, + heap.size()); + } + + new_heap.remap(heap.flags(), true); + heap_option = Some(new_heap.to_shared()); + }); } } @@ -220,15 +227,19 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { context.kstack = Some(stack); } - for memory in image.iter_mut() { - let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET); - memory.move_to(start, &mut new_table, &mut temporary_page, true); + for memory_shared in image.iter_mut() { + memory_shared.with(|memory| { + let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET); + memory.move_to(start, &mut new_table, &mut temporary_page, true); + }); } context.image = image; - if let Some(mut heap) = heap_option.take() { - heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page, true); - context.heap = Some(heap); + if let Some(heap_shared) = heap_option.take() { + heap_shared.with(|heap| { + heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page, true); + }); + context.heap = Some(heap_shared); } if let Some(mut stack) = stack_option.take() {