Grant to allow passing data to scheme handler

This commit is contained in:
Jeremy Soller 2016-09-20 14:50:04 -06:00
parent 2b915953c9
commit 941fc0b494
10 changed files with 205 additions and 19 deletions

View file

@ -43,6 +43,10 @@ impl Context {
} }
} }
pub fn get_page_table(&self) -> usize {
self.cr3
}
pub fn set_page_table(&mut self, address: usize) { pub fn set_page_table(&mut self, address: usize) {
self.cr3 = address; self.cr3 = address;
} }

View file

@ -59,6 +59,9 @@ pub extern crate x86;
/// Size of user stack /// Size of user stack
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
/// Offset to user grants
pub const USER_GRANT_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
/// Offset to user temporary image (used when cloning) /// Offset to user temporary image (used when cloning)
pub const USER_TMP_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE; pub const USER_TMP_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
@ -68,6 +71,9 @@ pub extern crate x86;
/// Offset to user temporary stack (used when cloning) /// Offset to user temporary stack (used when cloning)
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE; pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
/// Offset to user temporary page for grants
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
/// Print to console /// Print to console
#[macro_export] #[macro_export]

View file

@ -328,6 +328,10 @@ impl InactivePageTable {
InactivePageTable { p4_frame: frame } InactivePageTable { p4_frame: frame }
} }
pub unsafe fn from_address(cr3: usize) -> InactivePageTable {
InactivePageTable { p4_frame: Frame::containing_address(PhysicalAddress::new(cr3)) }
}
pub unsafe fn address(&self) -> usize { pub unsafe fn address(&self) -> usize {
self.p4_frame.start_address().get() self.p4_frame.start_address().get()
} }

View file

@ -12,8 +12,8 @@ use externs::memset;
use gdt; use gdt;
use idt; use idt;
use interrupt; use interrupt;
use memory::{self, Frame}; use memory;
use paging::{self, entry, Page, PhysicalAddress, VirtualAddress}; use paging::{self, entry, Page, VirtualAddress};
/// Test of zero values in BSS. /// Test of zero values in BSS.
static BSS_TEST_ZERO: usize = 0; static BSS_TEST_ZERO: usize = 0;

View file

@ -84,6 +84,9 @@ fn main() {
scheme.read(&mut packet).expect("pcid: failed to read events from pci scheme"); scheme.read(&mut packet).expect("pcid: failed to read events from pci scheme");
println!("{:?}", packet); println!("{:?}", packet);
if packet.a == 5 {
println!("{}", unsafe { ::std::str::from_utf8_unchecked(::std::slice::from_raw_parts(packet.b as *const u8, packet.c)) });
}
packet.a = 0; packet.a = 0;

View file

@ -5,7 +5,7 @@ use spin::Mutex;
use arch; use arch;
use super::file::File; use super::file::File;
use super::memory::{Memory, SharedMemory}; use super::memory::{Grant, Memory, SharedMemory};
#[derive(Copy, Clone, Debug, Eq, PartialEq)] #[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Status { pub enum Status {
@ -33,6 +33,8 @@ pub struct Context {
pub heap: Option<SharedMemory>, pub heap: Option<SharedMemory>,
/// User stack /// User stack
pub stack: Option<Memory>, pub stack: Option<Memory>,
/// User grants
pub grants: Arc<Mutex<Vec<Grant>>>,
/// The current working directory /// The current working directory
pub cwd: Arc<Mutex<Vec<u8>>>, pub cwd: Arc<Mutex<Vec<u8>>>,
/// The open files in the scheme /// The open files in the scheme
@ -51,6 +53,7 @@ impl Context {
image: Vec::new(), image: Vec::new(),
heap: None, heap: None,
stack: None, stack: None,
grants: Arc::new(Mutex::new(Vec::new())),
cwd: Arc::new(Mutex::new(Vec::new())), cwd: Arc::new(Mutex::new(Vec::new())),
files: Arc::new(Mutex::new(Vec::new())) files: Arc::new(Mutex::new(Vec::new()))
} }

View file

@ -1,3 +1,4 @@
use alloc::arc::Arc;
use collections::BTreeMap; use collections::BTreeMap;
use core::mem; use core::mem;
use core::sync::atomic::Ordering; use core::sync::atomic::Ordering;
@ -9,7 +10,7 @@ use super::context::Context;
/// Context list type /// Context list type
pub struct ContextList { pub struct ContextList {
map: BTreeMap<usize, RwLock<Context>>, map: BTreeMap<usize, Arc<RwLock<Context>>>,
next_id: usize next_id: usize
} }
@ -23,21 +24,21 @@ impl ContextList {
} }
/// Get the nth context. /// Get the nth context.
pub fn get(&self, id: usize) -> Option<&RwLock<Context>> { pub fn get(&self, id: usize) -> Option<&Arc<RwLock<Context>>> {
self.map.get(&id) self.map.get(&id)
} }
/// Get the current context. /// Get the current context.
pub fn current(&self) -> Option<&RwLock<Context>> { pub fn current(&self) -> Option<&Arc<RwLock<Context>>> {
self.map.get(&super::CONTEXT_ID.load(Ordering::SeqCst)) self.map.get(&super::CONTEXT_ID.load(Ordering::SeqCst))
} }
pub fn iter(&self) -> ::collections::btree_map::Iter<usize, RwLock<Context>> { pub fn iter(&self) -> ::collections::btree_map::Iter<usize, Arc<RwLock<Context>>> {
self.map.iter() self.map.iter()
} }
/// Create a new context. /// Create a new context.
pub fn new_context(&mut self) -> Result<&RwLock<Context>> { pub fn new_context(&mut self) -> Result<&Arc<RwLock<Context>>> {
if self.next_id >= super::CONTEXT_MAX_CONTEXTS { if self.next_id >= super::CONTEXT_MAX_CONTEXTS {
self.next_id = 1; self.next_id = 1;
} }
@ -53,13 +54,13 @@ impl ContextList {
let id = self.next_id; let id = self.next_id;
self.next_id += 1; self.next_id += 1;
assert!(self.map.insert(id, RwLock::new(Context::new(id))).is_none()); assert!(self.map.insert(id, Arc::new(RwLock::new(Context::new(id)))).is_none());
Ok(self.map.get(&id).expect("Failed to insert new context. ID is out of bounds.")) Ok(self.map.get(&id).expect("Failed to insert new context. ID is out of bounds."))
} }
/// Spawn a context from a function. /// Spawn a context from a function.
pub fn spawn(&mut self, func: extern fn()) -> Result<&RwLock<Context>> { pub fn spawn(&mut self, func: extern fn()) -> Result<&Arc<RwLock<Context>>> {
let context_lock = self.new_context()?; let context_lock = self.new_context()?;
{ {
let mut context = context_lock.write(); let mut context = context_lock.write();
@ -77,7 +78,7 @@ impl ContextList {
Ok(context_lock) Ok(context_lock)
} }
pub fn remove(&mut self, id: usize) -> Option<RwLock<Context>> { pub fn remove(&mut self, id: usize) -> Option<Arc<RwLock<Context>>> {
self.map.remove(&id) self.map.remove(&id)
} }
} }

View file

@ -1,4 +1,5 @@
use alloc::arc::{Arc, Weak}; use alloc::arc::{Arc, Weak};
use collections::VecDeque;
use spin::Mutex; use spin::Mutex;
use arch::externs::memset; use arch::externs::memset;
@ -7,12 +8,66 @@ use arch::paging::entry::{self, EntryFlags};
use arch::paging::temporary_page::TemporaryPage; use arch::paging::temporary_page::TemporaryPage;
#[derive(Debug)] #[derive(Debug)]
pub struct Memory { pub struct Grant {
start: VirtualAddress, start: VirtualAddress,
size: usize, size: usize,
flags: EntryFlags flags: EntryFlags
} }
impl Grant {
pub fn new(from: VirtualAddress, to: VirtualAddress, size: usize, flags: EntryFlags, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) -> Grant {
let mut active_table = unsafe { ActivePageTable::new() };
let mut frames = VecDeque::new();
let start_page = Page::containing_address(from);
let end_page = Page::containing_address(VirtualAddress::new(from.get() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = active_table.translate_page(page).expect("grant references unmapped memory");
frames.push_back(frame);
}
active_table.with(new_table, temporary_page, |mapper| {
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.get() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = frames.pop_front().expect("grant did not find enough frames");
mapper.map_to(page, frame, flags);
}
});
Grant {
start: to,
size: size,
flags: flags
}
}
pub fn destroy(self, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
let mut active_table = unsafe { ActivePageTable::new() };
active_table.with(new_table, temporary_page, |mapper| {
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
mapper.unmap_return(page);
}
});
}
pub fn start_address(&self) -> VirtualAddress {
self.start
}
pub fn size(&self) -> usize {
self.size
}
pub fn flags(&self) -> EntryFlags {
self.flags
}
}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub enum SharedMemory { pub enum SharedMemory {
Owned(Arc<Mutex<Memory>>), Owned(Arc<Mutex<Memory>>),
@ -42,6 +97,13 @@ impl SharedMemory {
} }
} }
#[derive(Debug)]
pub struct Memory {
start: VirtualAddress,
size: usize,
flags: EntryFlags
}
impl Memory { impl Memory {
pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, flush: bool, clear: bool) -> Self { pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, flush: bool, clear: bool) -> Self {
let mut memory = Memory { let mut memory = Memory {

View file

@ -4,6 +4,7 @@ use collections::BTreeMap;
use core::sync::atomic::{AtomicUsize, Ordering}; use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock; use spin::RwLock;
use context;
use syscall::{Error, Result}; use syscall::{Error, Result};
use scheme::{self, Scheme}; use scheme::{self, Scheme};
use scheme::user::{UserInner, UserScheme}; use scheme::user::{UserInner, UserScheme};
@ -24,12 +25,18 @@ impl RootScheme {
impl Scheme for RootScheme { impl Scheme for RootScheme {
fn open(&self, path: &[u8], _flags: usize) -> Result<usize> { fn open(&self, path: &[u8], _flags: usize) -> Result<usize> {
let context = {
let contexts = context::contexts();
let context = contexts.current().ok_or(Error::NoProcess)?;
Arc::downgrade(&context)
};
let inner = { let inner = {
let mut schemes = scheme::schemes_mut(); let mut schemes = scheme::schemes_mut();
if schemes.get_name(path).is_some() { if schemes.get_name(path).is_some() {
return Err(Error::FileExists); return Err(Error::FileExists);
} }
let inner = Arc::new(UserInner::new()); let inner = Arc::new(UserInner::new(context));
schemes.insert(path.to_vec().into_boxed_slice(), Arc::new(Box::new(UserScheme::new(Arc::downgrade(&inner))))).expect("failed to insert user scheme"); schemes.insert(path.to_vec().into_boxed_slice(), Arc::new(Box::new(UserScheme::new(Arc::downgrade(&inner))))).expect("failed to insert user scheme");
inner inner
}; };

View file

@ -2,9 +2,13 @@ use alloc::arc::Weak;
use collections::{BTreeMap, VecDeque}; use collections::{BTreeMap, VecDeque};
use core::sync::atomic::{AtomicUsize, Ordering}; use core::sync::atomic::{AtomicUsize, Ordering};
use core::{mem, usize}; use core::{mem, usize};
use spin::Mutex; use spin::{Mutex, RwLock};
use context; use arch;
use arch::paging::{InactivePageTable, Page, VirtualAddress, entry};
use arch::paging::temporary_page::TemporaryPage;
use context::{self, Context};
use context::memory::Grant;
use syscall::{convert_to_result, Call, Error, Result}; use syscall::{convert_to_result, Call, Error, Result};
use super::Scheme; use super::Scheme;
@ -21,14 +25,16 @@ pub struct Packet {
pub struct UserInner { pub struct UserInner {
next_id: AtomicUsize, next_id: AtomicUsize,
context: Weak<RwLock<Context>>,
todo: Mutex<VecDeque<Packet>>, todo: Mutex<VecDeque<Packet>>,
done: Mutex<BTreeMap<usize, usize>> done: Mutex<BTreeMap<usize, usize>>
} }
impl UserInner { impl UserInner {
pub fn new() -> UserInner { pub fn new(context: Weak<RwLock<Context>>) -> UserInner {
UserInner { UserInner {
next_id: AtomicUsize::new(0), next_id: AtomicUsize::new(0),
context: context,
todo: Mutex::new(VecDeque::new()), todo: Mutex::new(VecDeque::new()),
done: Mutex::new(BTreeMap::new()) done: Mutex::new(BTreeMap::new())
} }
@ -59,6 +65,87 @@ impl UserInner {
} }
} }
pub fn capture(&self, buf: &[u8]) -> Result<usize> {
self.capture_inner(buf.as_ptr() as usize, buf.len(), false)
}
pub fn capture_mut(&self, buf: &mut [u8]) -> Result<usize> {
self.capture_inner(buf.as_mut_ptr() as usize, buf.len(), true)
}
fn capture_inner(&self, address: usize, size: usize, writable: bool) -> Result<usize> {
let context_lock = self.context.upgrade().ok_or(Error::NoProcess)?;
let context = context_lock.read();
let mut grants = context.grants.lock();
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(arch::USER_TMP_GRANT_OFFSET)));
let from_address = (address/4096) * 4096;
let offset = address - from_address;
let full_size = ((offset + size + 4095)/4096) * 4096;
let mut to_address = arch::USER_GRANT_OFFSET;
let mut flags = entry::PRESENT | entry::NO_EXECUTE;
if writable {
flags |= entry::WRITABLE;
}
for i in 0 .. grants.len() {
let start = grants[i].start_address().get();
if to_address + full_size < start {
grants.insert(i, Grant::new(
VirtualAddress::new(from_address),
VirtualAddress::new(to_address),
full_size,
flags,
&mut new_table,
&mut temporary_page
));
return Ok(to_address + offset);
} else {
let pages = (grants[i].size() + 4095) / 4096;
let end = start + pages * 4096;
to_address = end;
}
}
grants.push(Grant::new(
VirtualAddress::new(from_address),
VirtualAddress::new(to_address),
full_size,
flags,
&mut new_table,
&mut temporary_page
));
return Ok(to_address + offset);
}
pub fn release(&self, address: usize) -> Result<()> {
let context_lock = self.context.upgrade().ok_or(Error::NoProcess)?;
let context = context_lock.read();
let mut grants = context.grants.lock();
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(arch::USER_TMP_GRANT_OFFSET)));
for i in 0 .. grants.len() {
let start = grants[i].start_address().get();
let end = start + grants[i].size();
if address >= start && address < end {
grants.remove(i).destroy(&mut new_table, &mut temporary_page);
return Ok(());
}
}
Err(Error::Fault)
}
pub fn read(&self, buf: &mut [u8]) -> Result<usize> { pub fn read(&self, buf: &mut [u8]) -> Result<usize> {
let packet_size = mem::size_of::<Packet>(); let packet_size = mem::size_of::<Packet>();
let len = buf.len()/packet_size; let len = buf.len()/packet_size;
@ -115,7 +202,10 @@ impl UserScheme {
impl Scheme for UserScheme { impl Scheme for UserScheme {
fn open(&self, path: &[u8], flags: usize) -> Result<usize> { fn open(&self, path: &[u8], flags: usize) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::NoDevice)?; let inner = self.inner.upgrade().ok_or(Error::NoDevice)?;
inner.call(Call::Open, path.as_ptr() as usize, path.len(), flags) let address = inner.capture(path)?;
let result = inner.call(Call::Open, address, path.len(), flags);
let _ = inner.release(address);
result
} }
fn dup(&self, file: usize) -> Result<usize> { fn dup(&self, file: usize) -> Result<usize> {
@ -125,12 +215,18 @@ impl Scheme for UserScheme {
fn read(&self, file: usize, buf: &mut [u8]) -> Result<usize> { fn read(&self, file: usize, buf: &mut [u8]) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::NoDevice)?; let inner = self.inner.upgrade().ok_or(Error::NoDevice)?;
inner.call(Call::Read, file, buf.as_mut_ptr() as usize, buf.len()) let address = inner.capture_mut(buf)?;
let result = inner.call(Call::Read, file, address, buf.len());
let _ = inner.release(address);
result
} }
fn write(&self, file: usize, buf: &[u8]) -> Result<usize> { fn write(&self, file: usize, buf: &[u8]) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::NoDevice)?; let inner = self.inner.upgrade().ok_or(Error::NoDevice)?;
inner.call(Call::Write, file, buf.as_ptr() as usize, buf.len()) let address = inner.capture(buf)?;
let result = inner.call(Call::Write, file, buf.as_ptr() as usize, buf.len());
let _ = inner.release(address);
result
} }
fn fsync(&self, file: usize) -> Result<()> { fn fsync(&self, file: usize) -> Result<()> {