Orbital (#16)
* Port previous ethernet scheme * Add ipd * Fix initfs rebuilds, use QEMU user networking addresses in ipd * Add tcp/udp, netutils, dns, and network config * Add fsync to network driver * Add dns, router, subnet by default * Fix e1000 driver. Make ethernet and IP non-blocking to avoid deadlocks * Add orbital server, WIP * Add futex * Add orbutils and orbital * Update libstd, orbutils, and orbital Move ANSI key encoding to vesad * Add orbital assets * Update orbital * Update to add login manager * Add blocking primitives, block for most things except waitpid, update orbital * Wait in waitpid and IRQ, improvements for other waits * Fevent in root scheme * WIP: Switch to using fevent * Reorganize * Event based e1000d driver * Superuser-only access to some network schemes, display, and disk * Superuser root and irq schemes * Fix orbital
This commit is contained in:
parent
372d44f88c
commit
224c43f761
92 changed files with 3415 additions and 473 deletions
|
@ -1,31 +1,27 @@
|
|||
use collections::VecDeque;
|
||||
use core::str;
|
||||
use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
|
||||
use spin::{Mutex, Once};
|
||||
use spin::Once;
|
||||
|
||||
use context;
|
||||
use sync::WaitQueue;
|
||||
use syscall::error::*;
|
||||
use syscall::flag::EVENT_READ;
|
||||
use syscall::scheme::Scheme;
|
||||
|
||||
pub static DEBUG_SCHEME_ID: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
/// Input
|
||||
static INPUT: Once<Mutex<VecDeque<u8>>> = Once::new();
|
||||
/// Input queue
|
||||
static INPUT: Once<WaitQueue<u8>> = Once::new();
|
||||
|
||||
/// Initialize contexts, called if needed
|
||||
fn init_input() -> Mutex<VecDeque<u8>> {
|
||||
Mutex::new(VecDeque::new())
|
||||
/// Initialize input queue, called if needed
|
||||
fn init_input() -> WaitQueue<u8> {
|
||||
WaitQueue::new()
|
||||
}
|
||||
|
||||
/// Get the global schemes list, const
|
||||
/// Add to the input queue
|
||||
#[no_mangle]
|
||||
pub extern fn debug_input(b: u8) {
|
||||
let len = {
|
||||
let mut input = INPUT.call_once(init_input).lock();
|
||||
input.push_back(b);
|
||||
input.len()
|
||||
};
|
||||
let len = INPUT.call_once(init_input).send(b);
|
||||
|
||||
context::event::trigger(DEBUG_SCHEME_ID.load(Ordering::SeqCst), 0, EVENT_READ, len);
|
||||
}
|
||||
|
@ -45,22 +41,7 @@ impl Scheme for DebugScheme {
|
|||
///
|
||||
/// Returns the number of bytes read
|
||||
fn read(&self, _file: usize, buf: &mut [u8]) -> Result<usize> {
|
||||
loop {
|
||||
let mut i = 0;
|
||||
{
|
||||
let mut input = INPUT.call_once(init_input).lock();
|
||||
while i < buf.len() && ! input.is_empty() {
|
||||
buf[i] = input.pop_front().expect("debug_input lost byte");
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if i > 0 {
|
||||
return Ok(i);
|
||||
} else {
|
||||
unsafe { context::switch(); } //TODO: Block
|
||||
}
|
||||
}
|
||||
Ok(INPUT.call_once(init_input).receive_into(buf))
|
||||
}
|
||||
|
||||
/// Write the `buffer` to the `file`
|
||||
|
|
|
@ -1,17 +1,18 @@
|
|||
use alloc::arc::{Arc, Weak};
|
||||
use collections::{BTreeMap, VecDeque};
|
||||
use core::mem;
|
||||
use collections::BTreeMap;
|
||||
use core::{mem, slice};
|
||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||
use spin::{Mutex, RwLock};
|
||||
use spin::RwLock;
|
||||
|
||||
use context;
|
||||
use sync::WaitQueue;
|
||||
use syscall::data::Event;
|
||||
use syscall::error::*;
|
||||
use syscall::scheme::Scheme;
|
||||
|
||||
pub struct EventScheme {
|
||||
next_id: AtomicUsize,
|
||||
handles: RwLock<BTreeMap<usize, Weak<Mutex<VecDeque<Event>>>>>
|
||||
handles: RwLock<BTreeMap<usize, Weak<WaitQueue<Event>>>>
|
||||
}
|
||||
|
||||
impl EventScheme {
|
||||
|
@ -57,29 +58,8 @@ impl Scheme for EventScheme {
|
|||
handle_weak.upgrade().ok_or(Error::new(EBADF))?
|
||||
};
|
||||
|
||||
let event_size = mem::size_of::<Event>();
|
||||
let len = buf.len()/event_size;
|
||||
if len > 0 {
|
||||
loop {
|
||||
let mut i = 0;
|
||||
{
|
||||
let mut events = handle.lock();
|
||||
while ! events.is_empty() && i < len {
|
||||
let event = events.pop_front().unwrap();
|
||||
unsafe { *(buf.as_mut_ptr() as *mut Event).offset(i as isize) = event; }
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if i > 0 {
|
||||
return Ok(i * event_size);
|
||||
} else {
|
||||
unsafe { context::switch(); } //TODO: Block
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Ok(0)
|
||||
}
|
||||
let event_buf = unsafe { slice::from_raw_parts_mut(buf.as_mut_ptr() as *mut Event, buf.len()/mem::size_of::<Event>()) };
|
||||
Ok(handle.receive_into(event_buf) * mem::size_of::<Event>())
|
||||
}
|
||||
|
||||
fn fsync(&self, id: usize) -> Result<usize> {
|
||||
|
|
|
@ -1,21 +1,54 @@
|
|||
use core::{mem, str};
|
||||
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
|
||||
use spin::{Mutex, Once};
|
||||
|
||||
use arch::interrupt::irq::{ACKS, COUNTS, acknowledge};
|
||||
use arch::interrupt::irq::acknowledge;
|
||||
use context;
|
||||
use sync::WaitCondition;
|
||||
use syscall::error::*;
|
||||
use syscall::flag::EVENT_READ;
|
||||
use syscall::scheme::Scheme;
|
||||
|
||||
pub static IRQ_SCHEME_ID: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
/// IRQ queues
|
||||
static ACKS: Mutex<[usize; 16]> = Mutex::new([0; 16]);
|
||||
static COUNTS: Mutex<[usize; 16]> = Mutex::new([0; 16]);
|
||||
static WAITS: Once<[WaitCondition; 16]> = Once::new();
|
||||
|
||||
fn init_waits() -> [WaitCondition; 16] {
|
||||
[
|
||||
WaitCondition::new(), WaitCondition::new(), WaitCondition::new(), WaitCondition::new(),
|
||||
WaitCondition::new(), WaitCondition::new(), WaitCondition::new(), WaitCondition::new(),
|
||||
WaitCondition::new(), WaitCondition::new(), WaitCondition::new(), WaitCondition::new(),
|
||||
WaitCondition::new(), WaitCondition::new(), WaitCondition::new(), WaitCondition::new()
|
||||
]
|
||||
}
|
||||
|
||||
/// Add to the input queue
|
||||
#[no_mangle]
|
||||
pub extern fn irq_trigger(irq: u8) {
|
||||
COUNTS.lock()[irq as usize] += 1;
|
||||
WAITS.call_once(init_waits)[irq as usize].notify();
|
||||
context::event::trigger(IRQ_SCHEME_ID.load(Ordering::SeqCst), irq as usize, EVENT_READ, mem::size_of::<usize>());
|
||||
}
|
||||
|
||||
pub struct IrqScheme;
|
||||
|
||||
impl Scheme for IrqScheme {
|
||||
fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
|
||||
let path_str = str::from_utf8(path).or(Err(Error::new(ENOENT)))?;
|
||||
fn open(&self, path: &[u8], _flags: usize, uid: u32, _gid: u32) -> Result<usize> {
|
||||
if uid == 0 {
|
||||
let path_str = str::from_utf8(path).or(Err(Error::new(ENOENT)))?;
|
||||
|
||||
let id = path_str.parse::<usize>().or(Err(Error::new(ENOENT)))?;
|
||||
let id = path_str.parse::<usize>().or(Err(Error::new(ENOENT)))?;
|
||||
|
||||
if id < COUNTS.lock().len() {
|
||||
Ok(id)
|
||||
if id < COUNTS.lock().len() {
|
||||
Ok(id)
|
||||
} else {
|
||||
Err(Error::new(ENOENT))
|
||||
}
|
||||
} else {
|
||||
Err(Error::new(ENOENT))
|
||||
Err(Error::new(EACCES))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,15 +59,17 @@ impl Scheme for IrqScheme {
|
|||
fn read(&self, file: usize, buffer: &mut [u8]) -> Result<usize> {
|
||||
// Ensures that the length of the buffer is larger than the size of a usize
|
||||
if buffer.len() >= mem::size_of::<usize>() {
|
||||
let ack = ACKS.lock()[file];
|
||||
let current = COUNTS.lock()[file];
|
||||
if ack != current {
|
||||
// Safe if the length of the buffer is larger than the size of a usize
|
||||
assert!(buffer.len() >= mem::size_of::<usize>());
|
||||
unsafe { *(buffer.as_mut_ptr() as *mut usize) = current; }
|
||||
Ok(mem::size_of::<usize>())
|
||||
} else {
|
||||
Ok(0)
|
||||
loop {
|
||||
let ack = ACKS.lock()[file];
|
||||
let current = COUNTS.lock()[file];
|
||||
if ack != current {
|
||||
// Safe if the length of the buffer is larger than the size of a usize
|
||||
assert!(buffer.len() >= mem::size_of::<usize>());
|
||||
unsafe { *(buffer.as_mut_ptr() as *mut usize) = current; }
|
||||
return Ok(mem::size_of::<usize>());
|
||||
} else {
|
||||
WAITS.call_once(init_waits)[file].wait();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Err(Error::new(EINVAL))
|
||||
|
@ -58,6 +93,10 @@ impl Scheme for IrqScheme {
|
|||
}
|
||||
}
|
||||
|
||||
fn fevent(&self, _file: usize, _flags: usize) -> Result<usize> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn fsync(&self, _file: usize) -> Result<usize> {
|
||||
Ok(0)
|
||||
}
|
||||
|
|
|
@ -19,9 +19,9 @@ use self::debug::{DEBUG_SCHEME_ID, DebugScheme};
|
|||
use self::event::EventScheme;
|
||||
use self::env::EnvScheme;
|
||||
use self::initfs::InitFsScheme;
|
||||
use self::irq::IrqScheme;
|
||||
use self::irq::{IRQ_SCHEME_ID, IrqScheme};
|
||||
use self::pipe::{PIPE_SCHEME_ID, PipeScheme};
|
||||
use self::root::RootScheme;
|
||||
use self::root::{ROOT_SCHEME_ID, RootScheme};
|
||||
|
||||
/// Debug scheme
|
||||
pub mod debug;
|
||||
|
@ -114,12 +114,12 @@ static SCHEMES: Once<RwLock<SchemeList>> = Once::new();
|
|||
/// Initialize schemes, called if needed
|
||||
fn init_schemes() -> RwLock<SchemeList> {
|
||||
let mut list: SchemeList = SchemeList::new();
|
||||
list.insert(Box::new(*b""), Arc::new(Box::new(RootScheme::new()))).expect("failed to insert root scheme");
|
||||
ROOT_SCHEME_ID.store(list.insert(Box::new(*b""), Arc::new(Box::new(RootScheme::new()))).expect("failed to insert root scheme"), Ordering::SeqCst);
|
||||
DEBUG_SCHEME_ID.store(list.insert(Box::new(*b"debug"), Arc::new(Box::new(DebugScheme))).expect("failed to insert debug scheme"), Ordering::SeqCst);
|
||||
list.insert(Box::new(*b"event"), Arc::new(Box::new(EventScheme::new()))).expect("failed to insert event scheme");
|
||||
list.insert(Box::new(*b"env"), Arc::new(Box::new(EnvScheme::new()))).expect("failed to insert env scheme");
|
||||
list.insert(Box::new(*b"initfs"), Arc::new(Box::new(InitFsScheme::new()))).expect("failed to insert initfs scheme");
|
||||
list.insert(Box::new(*b"irq"), Arc::new(Box::new(IrqScheme))).expect("failed to insert irq scheme");
|
||||
IRQ_SCHEME_ID.store(list.insert(Box::new(*b"irq"), Arc::new(Box::new(IrqScheme))).expect("failed to insert irq scheme"), Ordering::SeqCst);
|
||||
PIPE_SCHEME_ID.store(list.insert(Box::new(*b"pipe"), Arc::new(Box::new(PipeScheme))).expect("failed to insert pipe scheme"), Ordering::SeqCst);
|
||||
RwLock::new(list)
|
||||
}
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
use alloc::arc::{Arc, Weak};
|
||||
use collections::{BTreeMap, VecDeque};
|
||||
use collections::BTreeMap;
|
||||
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
|
||||
use spin::{Mutex, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
|
||||
use sync::WaitQueue;
|
||||
use syscall::error::{Error, Result, EBADF, EPIPE};
|
||||
use syscall::scheme::Scheme;
|
||||
|
||||
|
@ -103,43 +104,21 @@ impl Scheme for PipeScheme {
|
|||
/// Read side of a pipe
|
||||
#[derive(Clone)]
|
||||
pub struct PipeRead {
|
||||
vec: Arc<Mutex<VecDeque<u8>>>
|
||||
vec: Arc<WaitQueue<u8>>
|
||||
}
|
||||
|
||||
impl PipeRead {
|
||||
pub fn new() -> Self {
|
||||
PipeRead {
|
||||
vec: Arc::new(Mutex::new(VecDeque::new()))
|
||||
vec: Arc::new(WaitQueue::new())
|
||||
}
|
||||
}
|
||||
|
||||
fn read(&self, buf: &mut [u8]) -> Result<usize> {
|
||||
if buf.is_empty() || (Arc::weak_count(&self.vec) == 0 && self.vec.lock().is_empty()) {
|
||||
if buf.is_empty() || (Arc::weak_count(&self.vec) == 0 && self.vec.is_empty()) {
|
||||
Ok(0)
|
||||
} else {
|
||||
/*loop {
|
||||
{
|
||||
if let Some(byte) = self.vec.lock().pop_front() {
|
||||
buf[0] = byte;
|
||||
break;
|
||||
}
|
||||
}
|
||||
unsafe { context::switch(); }
|
||||
}*/
|
||||
|
||||
let mut i = 0;
|
||||
|
||||
while i < buf.len() {
|
||||
match self.vec.lock().pop_front() {
|
||||
Some(b) => {
|
||||
buf[i] = b;
|
||||
i += 1;
|
||||
},
|
||||
None => break
|
||||
}
|
||||
}
|
||||
|
||||
Ok(i)
|
||||
Ok(self.vec.receive_into(buf))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -147,7 +126,7 @@ impl PipeRead {
|
|||
/// Read side of a pipe
|
||||
#[derive(Clone)]
|
||||
pub struct PipeWrite {
|
||||
vec: Weak<Mutex<VecDeque<u8>>>,
|
||||
vec: Weak<WaitQueue<u8>>,
|
||||
}
|
||||
|
||||
impl PipeWrite {
|
||||
|
@ -160,9 +139,7 @@ impl PipeWrite {
|
|||
fn write(&self, buf: &[u8]) -> Result<usize> {
|
||||
match self.vec.upgrade() {
|
||||
Some(vec) => {
|
||||
for &b in buf.iter() {
|
||||
vec.lock().push_back(b);
|
||||
}
|
||||
vec.send_from(buf);
|
||||
|
||||
Ok(buf.len())
|
||||
},
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use alloc::arc::Arc;
|
||||
use alloc::boxed::Box;
|
||||
use collections::BTreeMap;
|
||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
|
||||
use spin::RwLock;
|
||||
|
||||
use context;
|
||||
|
@ -10,6 +10,8 @@ use syscall::scheme::Scheme;
|
|||
use scheme;
|
||||
use scheme::user::{UserInner, UserScheme};
|
||||
|
||||
pub static ROOT_SCHEME_ID: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
pub struct RootScheme {
|
||||
next_id: AtomicUsize,
|
||||
handles: RwLock<BTreeMap<usize, Arc<UserInner>>>
|
||||
|
@ -25,28 +27,33 @@ impl RootScheme {
|
|||
}
|
||||
|
||||
impl Scheme for RootScheme {
|
||||
fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
|
||||
let context = {
|
||||
let contexts = context::contexts();
|
||||
let context = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
Arc::downgrade(&context)
|
||||
};
|
||||
fn open(&self, path: &[u8], _flags: usize, uid: u32, _gid: u32) -> Result<usize> {
|
||||
if uid == 0 {
|
||||
let context = {
|
||||
let contexts = context::contexts();
|
||||
let context = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
Arc::downgrade(&context)
|
||||
};
|
||||
|
||||
let inner = {
|
||||
let mut schemes = scheme::schemes_mut();
|
||||
if schemes.get_name(path).is_some() {
|
||||
return Err(Error::new(EEXIST));
|
||||
}
|
||||
let inner = Arc::new(UserInner::new(context));
|
||||
let id = schemes.insert(path.to_vec().into_boxed_slice(), Arc::new(Box::new(UserScheme::new(Arc::downgrade(&inner))))).expect("failed to insert user scheme");
|
||||
inner.scheme_id.store(id, Ordering::SeqCst);
|
||||
inner
|
||||
};
|
||||
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
|
||||
self.handles.write().insert(id, inner);
|
||||
let inner = {
|
||||
let mut schemes = scheme::schemes_mut();
|
||||
if schemes.get_name(path).is_some() {
|
||||
return Err(Error::new(EEXIST));
|
||||
}
|
||||
let inner = Arc::new(UserInner::new(id, context));
|
||||
let scheme_id = schemes.insert(path.to_vec().into_boxed_slice(), Arc::new(Box::new(UserScheme::new(Arc::downgrade(&inner))))).expect("failed to insert user scheme");
|
||||
inner.scheme_id.store(scheme_id, Ordering::SeqCst);
|
||||
inner
|
||||
};
|
||||
|
||||
Ok(id)
|
||||
self.handles.write().insert(id, inner);
|
||||
|
||||
Ok(id)
|
||||
} else {
|
||||
Err(Error::new(EACCES))
|
||||
}
|
||||
}
|
||||
|
||||
fn dup(&self, file: usize) -> Result<usize> {
|
||||
|
@ -82,6 +89,10 @@ impl Scheme for RootScheme {
|
|||
inner.write(buf)
|
||||
}
|
||||
|
||||
fn fevent(&self, _file: usize, _flags: usize) -> Result<usize> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn fsync(&self, _file: usize) -> Result<usize> {
|
||||
Ok(0)
|
||||
}
|
||||
|
|
|
@ -1,35 +1,39 @@
|
|||
use alloc::arc::Weak;
|
||||
use collections::{BTreeMap, VecDeque};
|
||||
use core::sync::atomic::{AtomicUsize, AtomicU64, Ordering};
|
||||
use core::{mem, usize};
|
||||
use spin::{Mutex, RwLock};
|
||||
use core::{mem, slice, usize};
|
||||
use spin::RwLock;
|
||||
|
||||
use arch;
|
||||
use arch::paging::{InactivePageTable, Page, VirtualAddress, entry};
|
||||
use arch::paging::temporary_page::TemporaryPage;
|
||||
use context::{self, Context};
|
||||
use context::memory::Grant;
|
||||
use scheme::root::ROOT_SCHEME_ID;
|
||||
use sync::{WaitQueue, WaitMap};
|
||||
use syscall::data::{Packet, Stat};
|
||||
use syscall::error::*;
|
||||
use syscall::flag::EVENT_READ;
|
||||
use syscall::number::*;
|
||||
use syscall::scheme::Scheme;
|
||||
|
||||
pub struct UserInner {
|
||||
handle_id: usize,
|
||||
pub scheme_id: AtomicUsize,
|
||||
next_id: AtomicU64,
|
||||
context: Weak<RwLock<Context>>,
|
||||
todo: Mutex<VecDeque<Packet>>,
|
||||
done: Mutex<BTreeMap<u64, usize>>
|
||||
todo: WaitQueue<Packet>,
|
||||
done: WaitMap<u64, usize>
|
||||
}
|
||||
|
||||
impl UserInner {
|
||||
pub fn new(context: Weak<RwLock<Context>>) -> UserInner {
|
||||
pub fn new(handle_id: usize, context: Weak<RwLock<Context>>) -> UserInner {
|
||||
UserInner {
|
||||
handle_id: handle_id,
|
||||
scheme_id: AtomicUsize::new(0),
|
||||
next_id: AtomicU64::new(1),
|
||||
context: context,
|
||||
todo: Mutex::new(VecDeque::new()),
|
||||
done: Mutex::new(BTreeMap::new())
|
||||
todo: WaitQueue::new(),
|
||||
done: WaitMap::new()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,18 +58,10 @@ impl UserInner {
|
|||
d: d
|
||||
};
|
||||
|
||||
self.todo.lock().push_back(packet);
|
||||
let len = self.todo.send(packet);
|
||||
context::event::trigger(ROOT_SCHEME_ID.load(Ordering::SeqCst), self.handle_id, EVENT_READ, len * mem::size_of::<Packet>());
|
||||
|
||||
loop {
|
||||
{
|
||||
let mut done = self.done.lock();
|
||||
if let Some(a) = done.remove(&id) {
|
||||
return Error::demux(a);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe { context::switch(); } //TODO: Block
|
||||
}
|
||||
Error::demux(self.done.receive(&id))
|
||||
}
|
||||
|
||||
pub fn capture(&self, buf: &[u8]) -> Result<usize> {
|
||||
|
@ -158,29 +154,8 @@ impl UserInner {
|
|||
}
|
||||
|
||||
pub fn read(&self, buf: &mut [u8]) -> Result<usize> {
|
||||
let packet_size = mem::size_of::<Packet>();
|
||||
let len = buf.len()/packet_size;
|
||||
if len > 0 {
|
||||
loop {
|
||||
let mut i = 0;
|
||||
{
|
||||
let mut todo = self.todo.lock();
|
||||
while ! todo.is_empty() && i < len {
|
||||
let packet = todo.pop_front().unwrap();
|
||||
unsafe { *(buf.as_mut_ptr() as *mut Packet).offset(i as isize) = packet; }
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if i > 0 {
|
||||
return Ok(i * packet_size);
|
||||
} else {
|
||||
unsafe { context::switch(); } //TODO: Block
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Ok(0)
|
||||
}
|
||||
let packet_buf = unsafe { slice::from_raw_parts_mut(buf.as_mut_ptr() as *mut Packet, buf.len()/mem::size_of::<Packet>()) };
|
||||
Ok(self.todo.receive_into(packet_buf) * mem::size_of::<Packet>())
|
||||
}
|
||||
|
||||
pub fn write(&self, buf: &[u8]) -> Result<usize> {
|
||||
|
@ -195,7 +170,7 @@ impl UserInner {
|
|||
_ => println!("Unknown scheme -> kernel message {}", packet.a)
|
||||
}
|
||||
} else {
|
||||
self.done.lock().insert(packet.id, packet.a);
|
||||
self.done.send(packet.id, packet.a);
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue