Add signal support - exit on signal
This commit is contained in:
parent
b551b30300
commit
bf292bc0d1
31 changed files with 396 additions and 314 deletions
|
@ -1,6 +1,6 @@
|
|||
use alloc::arc::Arc;
|
||||
use alloc::boxed::Box;
|
||||
use collections::{BTreeMap, Vec};
|
||||
use collections::{BTreeMap, Vec, VecDeque};
|
||||
use spin::Mutex;
|
||||
|
||||
use arch;
|
||||
|
@ -48,6 +48,8 @@ pub struct Context {
|
|||
pub vfork: bool,
|
||||
/// Context is being waited on
|
||||
pub waitpid: Arc<WaitMap<ContextId, usize>>,
|
||||
/// Context should handle pending signals
|
||||
pub pending: VecDeque<u8>,
|
||||
/// Context should wake up at specified time
|
||||
pub wake: Option<(u64, u64)>,
|
||||
/// The architecture specific context
|
||||
|
@ -94,6 +96,7 @@ impl Context {
|
|||
cpu_id: None,
|
||||
vfork: false,
|
||||
waitpid: Arc::new(WaitMap::new()),
|
||||
pending: VecDeque::new(),
|
||||
wake: None,
|
||||
arch: arch::context::Context::new(),
|
||||
kfx: None,
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
use core::sync::atomic::Ordering;
|
||||
|
||||
use arch;
|
||||
use super::{contexts, Context, Status, CONTEXT_ID};
|
||||
use context::{contexts, Context, Status, CONTEXT_ID};
|
||||
use syscall;
|
||||
|
||||
/// Switch to the next context
|
||||
///
|
||||
|
@ -20,6 +21,7 @@ pub unsafe fn switch() -> bool {
|
|||
|
||||
let from_ptr;
|
||||
let mut to_ptr = 0 as *mut Context;
|
||||
let mut to_sig = None;
|
||||
{
|
||||
let contexts = contexts();
|
||||
{
|
||||
|
@ -34,6 +36,10 @@ pub unsafe fn switch() -> bool {
|
|||
// println!("{}: take {} {}", cpu_id, context.id, ::core::str::from_utf8_unchecked(&context.name.lock()));
|
||||
}
|
||||
|
||||
if context.status == Status::Blocked && ! context.pending.is_empty() {
|
||||
context.unblock();
|
||||
}
|
||||
|
||||
if context.status == Status::Blocked && context.wake.is_some() {
|
||||
let wake = context.wake.expect("context::switch: wake not set");
|
||||
|
||||
|
@ -57,6 +63,7 @@ pub unsafe fn switch() -> bool {
|
|||
let mut context = context_lock.write();
|
||||
if check_context(&mut context) {
|
||||
to_ptr = context.deref_mut() as *mut Context;
|
||||
to_sig = context.pending.pop_front();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -68,6 +75,7 @@ pub unsafe fn switch() -> bool {
|
|||
let mut context = context_lock.write();
|
||||
if check_context(&mut context) {
|
||||
to_ptr = context.deref_mut() as *mut Context;
|
||||
to_sig = context.pending.pop_front();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -91,7 +99,17 @@ pub unsafe fn switch() -> bool {
|
|||
// Unset global lock before switch, as arch is only usable by the current CPU at this time
|
||||
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
|
||||
|
||||
if let Some(sig) = to_sig {
|
||||
println!("Handle {}", sig);
|
||||
(&mut *to_ptr).arch.signal_stack(signal_handler, sig);
|
||||
}
|
||||
|
||||
(&mut *from_ptr).arch.switch_to(&mut (&mut *to_ptr).arch);
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
extern fn signal_handler(signal: usize) {
|
||||
println!("Signal handler: {}", signal);
|
||||
syscall::exit(signal);
|
||||
}
|
||||
|
|
|
@ -113,6 +113,7 @@ pub extern fn ksignal(signal: usize) {
|
|||
println!("NAME {}", unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) });
|
||||
}
|
||||
}
|
||||
syscall::exit(signal & 0x7F);
|
||||
}
|
||||
|
||||
/// This is the kernel entry point for the primary CPU. The arch crate is responsible for calling this
|
||||
|
|
130
kernel/syscall/driver.rs
Normal file
130
kernel/syscall/driver.rs
Normal file
|
@ -0,0 +1,130 @@
|
|||
use arch;
|
||||
use arch::memory::{allocate_frames, deallocate_frames, Frame};
|
||||
use arch::paging::{entry, ActivePageTable, PhysicalAddress, VirtualAddress};
|
||||
use context;
|
||||
use context::memory::Grant;
|
||||
use syscall::error::{Error, EFAULT, ENOMEM, EPERM, ESRCH, Result};
|
||||
use syscall::flag::{MAP_WRITE, MAP_WRITE_COMBINE};
|
||||
|
||||
fn enforce_root() -> Result<()> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
if context.euid == 0 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::new(EPERM))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn iopl(_level: usize, _stack_base: usize) -> Result<usize> {
|
||||
enforce_root()?;
|
||||
|
||||
//TODO
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
pub fn physalloc(size: usize) -> Result<usize> {
|
||||
enforce_root()?;
|
||||
|
||||
allocate_frames((size + 4095)/4096).ok_or(Error::new(ENOMEM)).map(|frame| frame.start_address().get())
|
||||
}
|
||||
|
||||
pub fn physfree(physical_address: usize, size: usize) -> Result<usize> {
|
||||
enforce_root()?;
|
||||
|
||||
deallocate_frames(Frame::containing_address(PhysicalAddress::new(physical_address)), (size + 4095)/4096);
|
||||
//TODO: Check that no double free occured
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
//TODO: verify exlusive access to physical memory
|
||||
pub fn physmap(physical_address: usize, size: usize, flags: usize) -> Result<usize> {
|
||||
enforce_root()?;
|
||||
|
||||
if size == 0 {
|
||||
Ok(0)
|
||||
} else {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
|
||||
let mut grants = context.grants.lock();
|
||||
|
||||
let from_address = (physical_address/4096) * 4096;
|
||||
let offset = physical_address - from_address;
|
||||
let full_size = ((offset + size + 4095)/4096) * 4096;
|
||||
let mut to_address = arch::USER_GRANT_OFFSET;
|
||||
|
||||
let mut entry_flags = entry::PRESENT | entry::NO_EXECUTE | entry::USER_ACCESSIBLE;
|
||||
if flags & MAP_WRITE == MAP_WRITE {
|
||||
entry_flags |= entry::WRITABLE;
|
||||
}
|
||||
if flags & MAP_WRITE_COMBINE == MAP_WRITE_COMBINE {
|
||||
entry_flags |= entry::HUGE_PAGE;
|
||||
}
|
||||
|
||||
for i in 0 .. grants.len() {
|
||||
let start = grants[i].start_address().get();
|
||||
if to_address + full_size < start {
|
||||
grants.insert(i, Grant::physmap(
|
||||
PhysicalAddress::new(from_address),
|
||||
VirtualAddress::new(to_address),
|
||||
full_size,
|
||||
entry_flags
|
||||
));
|
||||
|
||||
return Ok(to_address + offset);
|
||||
} else {
|
||||
let pages = (grants[i].size() + 4095) / 4096;
|
||||
let end = start + pages * 4096;
|
||||
to_address = end;
|
||||
}
|
||||
}
|
||||
|
||||
grants.push(Grant::physmap(
|
||||
PhysicalAddress::new(from_address),
|
||||
VirtualAddress::new(to_address),
|
||||
full_size,
|
||||
entry_flags
|
||||
));
|
||||
|
||||
Ok(to_address + offset)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn physunmap(virtual_address: usize) -> Result<usize> {
|
||||
enforce_root()?;
|
||||
|
||||
if virtual_address == 0 {
|
||||
Ok(0)
|
||||
} else {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
|
||||
let mut grants = context.grants.lock();
|
||||
|
||||
for i in 0 .. grants.len() {
|
||||
let start = grants[i].start_address().get();
|
||||
let end = start + grants[i].size();
|
||||
if virtual_address >= start && virtual_address < end {
|
||||
grants.remove(i).unmap();
|
||||
|
||||
return Ok(0);
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::new(EFAULT))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn virttophys(virtual_address: usize) -> Result<usize> {
|
||||
enforce_root()?;
|
||||
|
||||
let active_table = unsafe { ActivePageTable::new() };
|
||||
match active_table.translate(VirtualAddress::new(virtual_address)) {
|
||||
Some(physical_address) => Ok(physical_address.get()),
|
||||
None => Err(Error::new(EFAULT))
|
||||
}
|
||||
}
|
|
@ -4,8 +4,10 @@ extern crate syscall;
|
|||
|
||||
pub use self::syscall::{data, error, flag, number, scheme};
|
||||
|
||||
pub use self::driver::*;
|
||||
pub use self::fs::*;
|
||||
pub use self::futex::futex;
|
||||
pub use self::privilege::*;
|
||||
pub use self::process::*;
|
||||
pub use self::time::*;
|
||||
pub use self::validate::*;
|
||||
|
@ -17,12 +19,18 @@ use self::number::*;
|
|||
use context::ContextId;
|
||||
use scheme::FileHandle;
|
||||
|
||||
/// Driver syscalls
|
||||
pub mod driver;
|
||||
|
||||
/// Filesystem syscalls
|
||||
pub mod fs;
|
||||
|
||||
/// Fast userspace mutex
|
||||
pub mod futex;
|
||||
|
||||
/// Privilege syscalls
|
||||
pub mod privilege;
|
||||
|
||||
/// Process syscalls
|
||||
pub mod process;
|
||||
|
||||
|
@ -65,13 +73,14 @@ pub extern fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize
|
|||
SYS_CLOCK_GETTIME => clock_gettime(b, validate_slice_mut(c as *mut TimeSpec, 1).map(|time| &mut time[0])?),
|
||||
SYS_FUTEX => futex(validate_slice_mut(b as *mut i32, 1).map(|uaddr| &mut uaddr[0])?, c, d as i32, e, f as *mut i32),
|
||||
SYS_BRK => brk(b),
|
||||
SYS_EXIT => exit(b),
|
||||
SYS_WAITPID => waitpid(ContextId::from(b), c, d).map(ContextId::into),
|
||||
SYS_EXECVE => exec(validate_slice(b as *const u8, c)?, validate_slice(d as *const [usize; 2], e)?),
|
||||
SYS_CHDIR => chdir(validate_slice(b as *const u8, c)?),
|
||||
SYS_GETPID => getpid().map(ContextId::into),
|
||||
SYS_IOPL => iopl(b),
|
||||
SYS_CLONE => clone(b, stack).map(ContextId::into),
|
||||
SYS_EXIT => exit((b & 0xFF) << 8),
|
||||
SYS_KILL => kill(ContextId::from(b), c),
|
||||
SYS_WAITPID => waitpid(ContextId::from(b), c, d).map(ContextId::into),
|
||||
SYS_CHDIR => chdir(validate_slice(b as *const u8, c)?),
|
||||
SYS_EXECVE => exec(validate_slice(b as *const u8, c)?, validate_slice(d as *const [usize; 2], e)?),
|
||||
SYS_IOPL => iopl(b, stack),
|
||||
SYS_GETCWD => getcwd(validate_slice_mut(b as *mut u8, c)?),
|
||||
SYS_GETUID => getuid(),
|
||||
SYS_GETGID => getgid(),
|
||||
|
|
85
kernel/syscall/privilege.rs
Normal file
85
kernel/syscall/privilege.rs
Normal file
|
@ -0,0 +1,85 @@
|
|||
use collections::Vec;
|
||||
|
||||
use context;
|
||||
use scheme;
|
||||
use syscall::error::*;
|
||||
use syscall::validate::validate_slice;
|
||||
|
||||
pub fn getegid() -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
Ok(context.egid as usize)
|
||||
}
|
||||
|
||||
pub fn geteuid() -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
Ok(context.euid as usize)
|
||||
}
|
||||
|
||||
pub fn getgid() -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
Ok(context.rgid as usize)
|
||||
}
|
||||
|
||||
pub fn getuid() -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
Ok(context.ruid as usize)
|
||||
}
|
||||
|
||||
pub fn setgid(gid: u32) -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let mut context = context_lock.write();
|
||||
if context.egid == 0 {
|
||||
context.rgid = gid;
|
||||
context.egid = gid;
|
||||
Ok(0)
|
||||
} else {
|
||||
Err(Error::new(EPERM))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setuid(uid: u32) -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let mut context = context_lock.write();
|
||||
if context.euid == 0 {
|
||||
context.ruid = uid;
|
||||
context.euid = uid;
|
||||
Ok(0)
|
||||
} else {
|
||||
Err(Error::new(EPERM))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setns(name_ptrs: &[[usize; 2]]) -> Result<usize> {
|
||||
let mut names = Vec::new();
|
||||
for name_ptr in name_ptrs {
|
||||
names.push(validate_slice(name_ptr[0] as *const u8, name_ptr[1])?);
|
||||
}
|
||||
|
||||
let from = {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
context.scheme_ns
|
||||
};
|
||||
|
||||
let to = scheme::schemes_mut().setns(from, &names)?;
|
||||
|
||||
{
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let mut context = context_lock.write();
|
||||
context.scheme_ns = to;
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
}
|
|
@ -7,19 +7,18 @@ use core::ops::DerefMut;
|
|||
use spin::Mutex;
|
||||
|
||||
use arch;
|
||||
use arch::memory::{allocate_frame, allocate_frames, deallocate_frames, Frame};
|
||||
use arch::paging::{ActivePageTable, InactivePageTable, Page, PhysicalAddress, VirtualAddress, entry};
|
||||
use arch::memory::allocate_frame;
|
||||
use arch::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, entry};
|
||||
use arch::paging::temporary_page::TemporaryPage;
|
||||
use arch::start::usermode;
|
||||
use context;
|
||||
use context::ContextId;
|
||||
use context::memory::Grant;
|
||||
use elf::{self, program_header};
|
||||
use scheme::{self, FileHandle};
|
||||
use syscall;
|
||||
use syscall::data::Stat;
|
||||
use syscall::error::*;
|
||||
use syscall::flag::{CLONE_VFORK, CLONE_VM, CLONE_FS, CLONE_FILES, MAP_WRITE, MAP_WRITE_COMBINE, WNOHANG};
|
||||
use syscall::flag::{CLONE_VFORK, CLONE_VM, CLONE_FS, CLONE_FILES, WNOHANG};
|
||||
use syscall::validate::{validate_slice, validate_slice_mut};
|
||||
|
||||
pub fn brk(address: usize) -> Result<usize> {
|
||||
|
@ -838,27 +837,6 @@ pub fn exit(status: usize) -> ! {
|
|||
unreachable!();
|
||||
}
|
||||
|
||||
pub fn getegid() -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
Ok(context.egid as usize)
|
||||
}
|
||||
|
||||
pub fn geteuid() -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
Ok(context.euid as usize)
|
||||
}
|
||||
|
||||
pub fn getgid() -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
Ok(context.rgid as usize)
|
||||
}
|
||||
|
||||
pub fn getpid() -> Result<ContextId> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
|
@ -866,224 +844,29 @@ pub fn getpid() -> Result<ContextId> {
|
|||
Ok(context.id)
|
||||
}
|
||||
|
||||
pub fn getuid() -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
Ok(context.ruid as usize)
|
||||
}
|
||||
|
||||
pub fn iopl(_level: usize) -> Result<usize> {
|
||||
//TODO
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
pub fn kill(pid: ContextId, sig: usize) -> Result<usize> {
|
||||
use syscall::flag::*;
|
||||
let (ruid, euid) = {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
(context.ruid, context.euid)
|
||||
};
|
||||
|
||||
let _context_lock = {
|
||||
if sig > 0 && sig <= 0x7F {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.get(pid).ok_or(Error::new(ESRCH))?;
|
||||
context_lock.clone()
|
||||
};
|
||||
|
||||
let term = || {
|
||||
println!("Terminate {:?}", pid);
|
||||
};
|
||||
|
||||
let core = || {
|
||||
println!("Core {:?}", pid);
|
||||
};
|
||||
|
||||
let stop = || {
|
||||
println!("Stop {:?}", pid);
|
||||
};
|
||||
|
||||
let cont = || {
|
||||
println!("Continue {:?}", pid);
|
||||
};
|
||||
|
||||
match sig {
|
||||
0 => (),
|
||||
SIGHUP => term(),
|
||||
SIGINT => term(),
|
||||
SIGQUIT => core(),
|
||||
SIGILL => core(),
|
||||
SIGTRAP => core(),
|
||||
SIGABRT => core(),
|
||||
SIGBUS => core(),
|
||||
SIGFPE => core(),
|
||||
SIGKILL => term(),
|
||||
SIGUSR1 => term(),
|
||||
SIGSEGV => core(),
|
||||
SIGPIPE => term(),
|
||||
SIGALRM => term(),
|
||||
SIGTERM => term(),
|
||||
SIGSTKFLT => term(),
|
||||
SIGCHLD => (),
|
||||
SIGCONT => cont(),
|
||||
SIGSTOP => stop(),
|
||||
SIGTSTP => stop(),
|
||||
SIGTTIN => stop(),
|
||||
SIGTTOU => stop(),
|
||||
SIGURG => (),
|
||||
SIGXCPU => core(),
|
||||
SIGXFSZ => core(),
|
||||
SIGVTALRM => term(),
|
||||
SIGPROF => term(),
|
||||
SIGWINCH => (),
|
||||
SIGIO => term(),
|
||||
SIGPWR => term(),
|
||||
SIGSYS => core(),
|
||||
_ => return Err(Error::new(EINVAL))
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
pub fn physalloc(size: usize) -> Result<usize> {
|
||||
allocate_frames((size + 4095)/4096).ok_or(Error::new(ENOMEM)).map(|frame| frame.start_address().get())
|
||||
}
|
||||
|
||||
pub fn physfree(physical_address: usize, size: usize) -> Result<usize> {
|
||||
deallocate_frames(Frame::containing_address(PhysicalAddress::new(physical_address)), (size + 4095)/4096);
|
||||
//TODO: Check that no double free occured
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
//TODO: verify exlusive access to physical memory
|
||||
pub fn physmap(physical_address: usize, size: usize, flags: usize) -> Result<usize> {
|
||||
if size == 0 {
|
||||
Ok(0)
|
||||
} else {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
|
||||
let mut grants = context.grants.lock();
|
||||
|
||||
let from_address = (physical_address/4096) * 4096;
|
||||
let offset = physical_address - from_address;
|
||||
let full_size = ((offset + size + 4095)/4096) * 4096;
|
||||
let mut to_address = arch::USER_GRANT_OFFSET;
|
||||
|
||||
let mut entry_flags = entry::PRESENT | entry::NO_EXECUTE | entry::USER_ACCESSIBLE;
|
||||
if flags & MAP_WRITE == MAP_WRITE {
|
||||
entry_flags |= entry::WRITABLE;
|
||||
}
|
||||
if flags & MAP_WRITE_COMBINE == MAP_WRITE_COMBINE {
|
||||
entry_flags |= entry::HUGE_PAGE;
|
||||
}
|
||||
|
||||
for i in 0 .. grants.len() {
|
||||
let start = grants[i].start_address().get();
|
||||
if to_address + full_size < start {
|
||||
grants.insert(i, Grant::physmap(
|
||||
PhysicalAddress::new(from_address),
|
||||
VirtualAddress::new(to_address),
|
||||
full_size,
|
||||
entry_flags
|
||||
));
|
||||
|
||||
return Ok(to_address + offset);
|
||||
} else {
|
||||
let pages = (grants[i].size() + 4095) / 4096;
|
||||
let end = start + pages * 4096;
|
||||
to_address = end;
|
||||
}
|
||||
}
|
||||
|
||||
grants.push(Grant::physmap(
|
||||
PhysicalAddress::new(from_address),
|
||||
VirtualAddress::new(to_address),
|
||||
full_size,
|
||||
entry_flags
|
||||
));
|
||||
|
||||
Ok(to_address + offset)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn physunmap(virtual_address: usize) -> Result<usize> {
|
||||
if virtual_address == 0 {
|
||||
Ok(0)
|
||||
} else {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
|
||||
let mut grants = context.grants.lock();
|
||||
|
||||
for i in 0 .. grants.len() {
|
||||
let start = grants[i].start_address().get();
|
||||
let end = start + grants[i].size();
|
||||
if virtual_address >= start && virtual_address < end {
|
||||
grants.remove(i).unmap();
|
||||
|
||||
return Ok(0);
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::new(EFAULT))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setgid(gid: u32) -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let mut context = context_lock.write();
|
||||
if context.egid == 0 {
|
||||
context.rgid = gid;
|
||||
context.egid = gid;
|
||||
Ok(0)
|
||||
} else {
|
||||
Err(Error::new(EPERM))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setuid(uid: u32) -> Result<usize> {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let mut context = context_lock.write();
|
||||
if context.euid == 0 {
|
||||
context.ruid = uid;
|
||||
context.euid = uid;
|
||||
Ok(0)
|
||||
} else {
|
||||
Err(Error::new(EPERM))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setns(name_ptrs: &[[usize; 2]]) -> Result<usize> {
|
||||
let mut names = Vec::new();
|
||||
for name_ptr in name_ptrs {
|
||||
names.push(validate_slice(name_ptr[0] as *const u8, name_ptr[1])?);
|
||||
}
|
||||
|
||||
let from = {
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let context = context_lock.read();
|
||||
context.scheme_ns
|
||||
};
|
||||
|
||||
let to = scheme::schemes_mut().setns(from, &names)?;
|
||||
|
||||
{
|
||||
let contexts = context::contexts();
|
||||
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
|
||||
let mut context = context_lock.write();
|
||||
context.scheme_ns = to;
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
pub fn virttophys(virtual_address: usize) -> Result<usize> {
|
||||
let active_table = unsafe { ActivePageTable::new() };
|
||||
match active_table.translate(VirtualAddress::new(virtual_address)) {
|
||||
Some(physical_address) => Ok(physical_address.get()),
|
||||
None => Err(Error::new(EFAULT))
|
||||
if euid == 0
|
||||
|| euid == context.ruid
|
||||
|| ruid == context.ruid
|
||||
{
|
||||
context.pending.push_back(sig as u8);
|
||||
Ok(0)
|
||||
} else {
|
||||
Err(Error::new(EPERM))
|
||||
}
|
||||
} else {
|
||||
Err(Error::new(EINVAL))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue