work on shared memory accross threads
This commit is contained in:
parent
0b2fd79816
commit
8ee9f1d7da
|
@ -3,7 +3,7 @@ use collections::Vec;
|
|||
|
||||
use arch;
|
||||
use super::file::File;
|
||||
use super::memory::Memory;
|
||||
use super::memory::{Memory, SharedMemory};
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
pub enum Status {
|
||||
|
@ -26,9 +26,9 @@ pub struct Context {
|
|||
/// Kernel stack
|
||||
pub kstack: Option<Box<[u8]>>,
|
||||
/// Executable image
|
||||
pub image: Vec<Memory>,
|
||||
pub image: Vec<SharedMemory>,
|
||||
/// User heap
|
||||
pub heap: Option<Memory>,
|
||||
pub heap: Option<SharedMemory>,
|
||||
/// User stack
|
||||
pub stack: Option<Memory>,
|
||||
/// The open files in the scheme
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
use alloc::arc::{Arc, Weak};
|
||||
use spin::Mutex;
|
||||
|
||||
use arch::externs::memset;
|
||||
use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, VirtualAddress};
|
||||
use arch::paging::entry::{self, EntryFlags};
|
||||
|
@ -10,6 +13,35 @@ pub struct Memory {
|
|||
flags: EntryFlags
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum SharedMemory {
|
||||
Owned(Arc<Mutex<Memory>>),
|
||||
Borrowed(Weak<Mutex<Memory>>)
|
||||
}
|
||||
|
||||
impl SharedMemory {
|
||||
pub fn with<F, T>(&self, f: F) -> T where F: FnOnce(&mut Memory) -> T {
|
||||
match *self {
|
||||
SharedMemory::Owned(ref memory_lock) => {
|
||||
let mut memory = memory_lock.lock();
|
||||
f(&mut *memory)
|
||||
},
|
||||
SharedMemory::Borrowed(ref memory_weak) => {
|
||||
let memory_lock = memory_weak.upgrade().expect("SharedMemory::Borrowed no longer valid");
|
||||
let mut memory = memory_lock.lock();
|
||||
f(&mut *memory)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn borrow(&self) -> SharedMemory {
|
||||
match *self {
|
||||
SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)),
|
||||
SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Memory {
|
||||
pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, flush: bool, clear: bool) -> Self {
|
||||
let mut memory = Memory {
|
||||
|
@ -23,6 +55,10 @@ impl Memory {
|
|||
memory
|
||||
}
|
||||
|
||||
pub fn to_shared(self) -> SharedMemory {
|
||||
SharedMemory::Owned(Arc::new(Mutex::new(self)))
|
||||
}
|
||||
|
||||
pub fn start_address(&self) -> VirtualAddress {
|
||||
self.start
|
||||
}
|
||||
|
|
|
@ -97,10 +97,18 @@ impl<'a> Elf<'a> {
|
|||
|
||||
memory.remap(flags, true);
|
||||
|
||||
context.image.push(memory);
|
||||
context.image.push(memory.to_shared());
|
||||
}
|
||||
}
|
||||
|
||||
context.heap = Some(context::memory::Memory::new(
|
||||
VirtualAddress::new(arch::USER_HEAP_OFFSET),
|
||||
0,
|
||||
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
|
||||
true,
|
||||
true
|
||||
).to_shared());
|
||||
|
||||
// Map stack
|
||||
context.stack = Some(context::memory::Memory::new(
|
||||
VirtualAddress::new(arch::USER_STACK_OFFSET),
|
||||
|
|
|
@ -15,30 +15,29 @@ use syscall::{self, Error, Result};
|
|||
pub fn brk(address: usize) -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::NoProcess)?;
|
||||
let mut context = context_lock.write();
|
||||
let context = context_lock.read();
|
||||
|
||||
let current = if let Some(ref heap_shared) = context.heap {
|
||||
heap_shared.with(|heap| {
|
||||
heap.start_address().get() + heap.size()
|
||||
})
|
||||
} else {
|
||||
panic!("user heap not initialized");
|
||||
};
|
||||
|
||||
let mut current = arch::USER_HEAP_OFFSET;
|
||||
if let Some(ref heap) = context.heap {
|
||||
current = heap.start_address().get() + heap.size();
|
||||
}
|
||||
if address == 0 {
|
||||
//println!("Brk query {:X}", current);
|
||||
Ok(current)
|
||||
} else if address >= arch::USER_HEAP_OFFSET {
|
||||
//TODO: out of memory errors
|
||||
if let Some(ref mut heap) = context.heap {
|
||||
heap.resize(address - arch::USER_HEAP_OFFSET, true, true);
|
||||
return Ok(address);
|
||||
if let Some(ref heap_shared) = context.heap {
|
||||
heap_shared.with(|heap| {
|
||||
heap.resize(address - arch::USER_HEAP_OFFSET, true, true);
|
||||
});
|
||||
} else {
|
||||
panic!("user heap not initialized");
|
||||
}
|
||||
|
||||
context.heap = Some(context::memory::Memory::new(
|
||||
VirtualAddress::new(arch::USER_HEAP_OFFSET),
|
||||
address - arch::USER_HEAP_OFFSET,
|
||||
entry::WRITABLE | entry::NO_EXECUTE | entry::USER_ACCESSIBLE,
|
||||
true,
|
||||
true
|
||||
));
|
||||
|
||||
Ok(address)
|
||||
} else {
|
||||
//TODO: Return correct error
|
||||
|
@ -85,38 +84,46 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
|
|||
if flags & CLONE_VM == CLONE_VM {
|
||||
panic!("unimplemented: CLONE_VM");
|
||||
} else {
|
||||
for memory in context.image.iter() {
|
||||
let mut new_memory = context::memory::Memory::new(
|
||||
VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET),
|
||||
memory.size(),
|
||||
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
|
||||
true,
|
||||
false
|
||||
);
|
||||
unsafe {
|
||||
arch::externs::memcpy(new_memory.start_address().get() as *mut u8,
|
||||
memory.start_address().get() as *const u8,
|
||||
memory.size());
|
||||
}
|
||||
new_memory.remap(memory.flags(), true);
|
||||
image.push(new_memory);
|
||||
for memory_shared in context.image.iter() {
|
||||
memory_shared.with(|memory| {
|
||||
let mut new_memory = context::memory::Memory::new(
|
||||
VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET),
|
||||
memory.size(),
|
||||
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
|
||||
true,
|
||||
false
|
||||
);
|
||||
|
||||
unsafe {
|
||||
arch::externs::memcpy(new_memory.start_address().get() as *mut u8,
|
||||
memory.start_address().get() as *const u8,
|
||||
memory.size());
|
||||
}
|
||||
|
||||
new_memory.remap(memory.flags(), true);
|
||||
image.push(new_memory.to_shared());
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(ref heap) = context.heap {
|
||||
let mut new_heap = context::memory::Memory::new(
|
||||
VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET),
|
||||
heap.size(),
|
||||
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
|
||||
true,
|
||||
false
|
||||
);
|
||||
unsafe {
|
||||
arch::externs::memcpy(new_heap.start_address().get() as *mut u8,
|
||||
heap.start_address().get() as *const u8,
|
||||
heap.size());
|
||||
}
|
||||
new_heap.remap(heap.flags(), true);
|
||||
heap_option = Some(new_heap);
|
||||
if let Some(ref heap_shared) = context.heap {
|
||||
heap_shared.with(|heap| {
|
||||
let mut new_heap = context::memory::Memory::new(
|
||||
VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET),
|
||||
heap.size(),
|
||||
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
|
||||
true,
|
||||
false
|
||||
);
|
||||
|
||||
unsafe {
|
||||
arch::externs::memcpy(new_heap.start_address().get() as *mut u8,
|
||||
heap.start_address().get() as *const u8,
|
||||
heap.size());
|
||||
}
|
||||
|
||||
new_heap.remap(heap.flags(), true);
|
||||
heap_option = Some(new_heap.to_shared());
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -220,15 +227,19 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
|
|||
context.kstack = Some(stack);
|
||||
}
|
||||
|
||||
for memory in image.iter_mut() {
|
||||
let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET);
|
||||
memory.move_to(start, &mut new_table, &mut temporary_page, true);
|
||||
for memory_shared in image.iter_mut() {
|
||||
memory_shared.with(|memory| {
|
||||
let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET);
|
||||
memory.move_to(start, &mut new_table, &mut temporary_page, true);
|
||||
});
|
||||
}
|
||||
context.image = image;
|
||||
|
||||
if let Some(mut heap) = heap_option.take() {
|
||||
heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page, true);
|
||||
context.heap = Some(heap);
|
||||
if let Some(heap_shared) = heap_option.take() {
|
||||
heap_shared.with(|heap| {
|
||||
heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page, true);
|
||||
});
|
||||
context.heap = Some(heap_shared);
|
||||
}
|
||||
|
||||
if let Some(mut stack) = stack_option.take() {
|
||||
|
|
Loading…
Reference in a new issue