Pass clone and exec to dup to identify dup location, make exec dup to implement cloexec

This commit is contained in:
Jeremy Soller 2016-11-14 20:55:31 -07:00
parent a916e29b3c
commit 28cb4c1f18
2 changed files with 128 additions and 49 deletions

View file

@ -4,27 +4,27 @@ use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use spin::{Mutex, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use sync::WaitCondition;
use syscall::error::{Error, Result, EBADF, EINVAL, EPIPE};
use syscall::flag::{F_GETFL, F_SETFL, O_NONBLOCK};
use syscall::error::{Error, Result, EAGAIN, EBADF, EINVAL, EPIPE};
use syscall::flag::{F_GETFL, F_SETFL, O_CLOEXEC, O_NONBLOCK};
use syscall::scheme::Scheme;
/// Pipes list
pub static PIPE_SCHEME_ID: AtomicUsize = ATOMIC_USIZE_INIT;
static PIPE_NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
static PIPES: Once<RwLock<(BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)>> = Once::new();
static PIPES: Once<RwLock<(BTreeMap<usize, Arc<PipeRead>>, BTreeMap<usize, Arc<PipeWrite>>)>> = Once::new();
/// Initialize pipes, called if needed
fn init_pipes() -> RwLock<(BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
fn init_pipes() -> RwLock<(BTreeMap<usize, Arc<PipeRead>>, BTreeMap<usize, Arc<PipeWrite>>)> {
RwLock::new((BTreeMap::new(), BTreeMap::new()))
}
/// Get the global pipes list, const
fn pipes() -> RwLockReadGuard<'static, (BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
fn pipes() -> RwLockReadGuard<'static, (BTreeMap<usize, Arc<PipeRead>>, BTreeMap<usize, Arc<PipeWrite>>)> {
PIPES.call_once(init_pipes).read()
}
/// Get the global schemes list, mutable
fn pipes_mut() -> RwLockWriteGuard<'static, (BTreeMap<usize, PipeRead>, BTreeMap<usize, PipeWrite>)> {
fn pipes_mut() -> RwLockWriteGuard<'static, (BTreeMap<usize, Arc<PipeRead>>, BTreeMap<usize, Arc<PipeWrite>>)> {
PIPES.call_once(init_pipes).write()
}
@ -34,8 +34,8 @@ pub fn pipe(flags: usize) -> (usize, usize) {
let write_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
let read = PipeRead::new(flags);
let write = PipeWrite::new(flags, &read);
pipes.0.insert(read_id, read);
pipes.1.insert(write_id, write);
pipes.0.insert(read_id, Arc::new(read));
pipes.1.insert(write_id, Arc::new(write));
(read_id, write_id)
}
@ -52,7 +52,7 @@ impl Scheme for PipeScheme {
};
if let Some(pipe) = read_option {
let pipe_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
pipes.0.insert(pipe_id, pipe);
pipes.0.insert(pipe_id, Arc::new(pipe));
return Ok(pipe_id);
}
@ -63,7 +63,7 @@ impl Scheme for PipeScheme {
};
if let Some(pipe) = write_option {
let pipe_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
pipes.1.insert(pipe_id, pipe);
pipes.1.insert(pipe_id, Arc::new(pipe));
return Ok(pipe_id);
}
@ -71,23 +71,23 @@ impl Scheme for PipeScheme {
}
fn read(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let pipes = pipes();
// Clone to prevent deadlocks
let pipe = {
let pipes = pipes();
pipes.0.get(&id).map(|pipe| pipe.clone()).ok_or(Error::new(EBADF))?
};
if let Some(pipe) = pipes.0.get(&id) {
pipe.read(buf)
} else {
Err(Error::new(EBADF))
}
pipe.read(buf)
}
fn write(&self, id: usize, buf: &[u8]) -> Result<usize> {
let pipes = pipes();
// Clone to prevent deadlocks
let pipe = {
let pipes = pipes();
pipes.1.get(&id).map(|pipe| pipe.clone()).ok_or(Error::new(EBADF))?
};
if let Some(pipe) = pipes.1.get(&id) {
pipe.write(buf)
} else {
Err(Error::new(EBADF))
}
pipe.write(buf)
}
fn fcntl(&self, id: usize, cmd: usize, arg: usize) -> Result<usize> {
@ -134,12 +134,20 @@ impl PipeRead {
}
}
fn dup(&self, _buf: &[u8]) -> Result<Self> {
Ok(PipeRead {
flags: AtomicUsize::new(self.flags.load(Ordering::SeqCst)),
condition: self.condition.clone(),
vec: self.vec.clone()
})
fn dup(&self, buf: &[u8]) -> Result<Self> {
if buf == b"exec" && self.flags.load(Ordering::SeqCst) & O_CLOEXEC == O_CLOEXEC {
Err(Error::new(EBADF))
} else {
let mut flags = self.flags.load(Ordering::SeqCst);
if buf.is_empty() {
flags &= ! O_CLOEXEC;
}
Ok(PipeRead {
flags: AtomicUsize::new(flags),
condition: self.condition.clone(),
vec: self.vec.clone()
})
}
}
fn fcntl(&self, cmd: usize, arg: usize) -> Result<usize> {
@ -173,8 +181,10 @@ impl PipeRead {
}
}
if self.flags.load(Ordering::SeqCst) & O_NONBLOCK == O_NONBLOCK || Arc::weak_count(&self.vec) == 0 {
if Arc::weak_count(&self.vec) == 0 {
return Ok(0);
} else if self.flags.load(Ordering::SeqCst) & O_NONBLOCK == O_NONBLOCK {
return Err(Error::new(EAGAIN));
} else {
self.condition.wait();
}
@ -186,7 +196,7 @@ impl PipeRead {
pub struct PipeWrite {
flags: AtomicUsize,
condition: Arc<WaitCondition>,
vec: Weak<Mutex<VecDeque<u8>>>
vec: Option<Weak<Mutex<VecDeque<u8>>>>
}
impl PipeWrite {
@ -194,16 +204,24 @@ impl PipeWrite {
PipeWrite {
flags: AtomicUsize::new(flags),
condition: read.condition.clone(),
vec: Arc::downgrade(&read.vec),
vec: Some(Arc::downgrade(&read.vec)),
}
}
fn dup(&self, _buf: &[u8]) -> Result<Self> {
Ok(PipeWrite {
flags: AtomicUsize::new(self.flags.load(Ordering::SeqCst)),
condition: self.condition.clone(),
vec: self.vec.clone()
})
fn dup(&self, buf: &[u8]) -> Result<Self> {
if buf == b"exec" && self.flags.load(Ordering::SeqCst) & O_CLOEXEC == O_CLOEXEC {
Err(Error::new(EBADF))
} else {
let mut flags = self.flags.load(Ordering::SeqCst);
if buf.is_empty() {
flags &= ! O_CLOEXEC;
}
Ok(PipeWrite {
flags: AtomicUsize::new(flags),
condition: self.condition.clone(),
vec: self.vec.clone()
})
}
}
fn fcntl(&self, cmd: usize, arg: usize) -> Result<usize> {
@ -218,24 +236,29 @@ impl PipeWrite {
}
fn write(&self, buf: &[u8]) -> Result<usize> {
if let Some(vec_lock) = self.vec.upgrade() {
let mut vec = vec_lock.lock();
if let Some(ref vec_weak) = self.vec {
if let Some(vec_lock) = vec_weak.upgrade() {
let mut vec = vec_lock.lock();
for &b in buf.iter() {
vec.push_back(b);
for &b in buf.iter() {
vec.push_back(b);
}
self.condition.notify();
Ok(buf.len())
} else {
Err(Error::new(EPIPE))
}
self.condition.notify();
Ok(buf.len())
} else {
Err(Error::new(EPIPE))
panic!("PipeWrite dropped before write");
}
}
}
impl Drop for PipeWrite {
fn drop(&mut self) {
drop(self.vec.take());
self.condition.notify();
}
}

View file

@ -255,7 +255,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
let scheme = schemes.get(file.scheme).ok_or(Error::new(EBADF))?;
scheme.clone()
};
let result = scheme.dup(file.number, &[]);
let result = scheme.dup(file.number, b"clone");
result
};
match result {
@ -496,7 +496,7 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
drop(arg_ptrs); // Drop so that usage is not allowed after unmapping context
let contexts = context::contexts();
let (vfork, ppid) = {
let (vfork, ppid, files) = {
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
@ -655,11 +655,67 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
context.image.push(memory.to_shared());
}
let files = Arc::new(Mutex::new(context.files.lock().clone()));
context.files = files.clone();
let vfork = context.vfork;
context.vfork = false;
(vfork, context.ppid)
(vfork, context.ppid, files)
};
// Duplicate current files using b"exec", close previous
for (fd, mut file_option) in files.lock().iter_mut().enumerate() {
let new_file_option = if let Some(file) = *file_option {
// Duplicate
let result = {
let scheme_option = {
let schemes = scheme::schemes();
schemes.get(file.scheme).map(|scheme| scheme.clone())
};
if let Some(scheme) = scheme_option {
let result = scheme.dup(file.number, b"exec");
result
} else {
Err(Error::new(EBADF))
}
};
// Close
{
if let Some(event_id) = file.event {
context::event::unregister(fd, file.scheme, event_id);
}
let scheme_option = {
let schemes = scheme::schemes();
schemes.get(file.scheme).map(|scheme| scheme.clone())
};
if let Some(scheme) = scheme_option {
let _ = scheme.close(file.number);
}
}
// Return new descriptor
match result {
Ok(new_number) => {
Some(context::file::File {
scheme: file.scheme,
number: new_number,
event: None,
})
},
Err(err) => {
println!("exec: failed to dup {}: {:?}", fd, err);
None
}
}
} else {
None
};
*file_option = new_file_option;
}
if vfork {
if let Some(context_lock) = contexts.get(ppid) {
let mut context = context_lock.write();