Move kernel to submodule

This commit is contained in:
Jeremy Soller 2017-01-04 15:42:07 -07:00
parent 2998dc5bda
commit db61470278
96 changed files with 4 additions and 10335 deletions

3
.gitmodules vendored
View file

@ -58,3 +58,6 @@
[submodule "programs/games"] [submodule "programs/games"]
path = programs/games path = programs/games
url = https://github.com/redox-os/games.git url = https://github.com/redox-os/games.git
[submodule "kernel"]
path = kernel
url = https://github.com/redox-os/kernel.git

1
kernel Submodule

@ -0,0 +1 @@
Subproject commit 0c8ba636f4263665dae1b0fdd0a040e4e0c724f5

View file

@ -1,37 +0,0 @@
[package]
name = "kernel"
version = "0.1.0"
[lib]
name = "kernel"
path = "src/lib.rs"
crate-type = ["staticlib"]
[dependencies]
bitflags = "*"
spin = "*"
redox_syscall = { path = "../syscall/" }
[dependencies.goblin]
git = "https://github.com/m4b/goblin.git"
default-features = false
features = ["elf32", "elf64"]
[dev-dependencies]
arch_test = { path = "arch/test" }
[target.'cfg(target_arch = "arm")'.dependencies]
arch_arm = { path = "arch/arm" }
[target.'cfg(target_arch = "x86_64")'.dependencies]
arch_x86_64 = { path = "arch/x86_64" }
[features]
default = []
live = []
[profile.dev]
panic = "unwind"
[profile.release]
panic = "abort"

View file

@ -1,69 +0,0 @@
# kernel
A collaborative effort to rewrite the kernel with focus on correctness and code
quality.
## Why?
The kernel code was getting increasingly messy to the point where only the
original writer would be able to find and fix bugs. Fortunately, the kernel of
Redox is relatively small and such a project is estimated to take only a few
months.
## What?
The aims of the new kernel should be clear in their order:
1. **Correctness**: Above anything else, the kernel should be correct. No hacks,
despite how the tiny cool shortcuts might seem, it gives severe backslash later
on. Keep it correct and well-written.
2. **Readability and documentation**: The code quality should be high, with that
follows a detailed documentation, including both API docs (on every item!) and
careful comments for anything non-trivial.
3. **Performance**: If you can, go for it.
## Guidelines
### A rotten house is built on a rotten fundament.
Don't fool yourself. You are likely not getting back to the ugly code. Write it
the right way **first time**, and make sure you only move on when it's
**done right**.
### Comments
Do not hesitate to put comments all over the place.
### Documentation
Every public item should contain API documentation.
### Debug assertions
Abusing debug assertions is a wonderful way to catch bugs, and it is very much
encouraged.
### Statical checking
Rust provides a lot of type-system features which can be used to create
wonderful safe abstractions, and you should use them whenever you get the chance.
Unsafety should be avoided, and if it is triggered only under some addition
**insert an assertion**. Despite this being a kernel, we prefer kernel panics
over security vulnerabilities.
If the condition is (or should be) unreachable, but if not upheld, leading to
UB, put an assertion in the start of the function.
### Be gentle
Don't just write as much code as you can as quick as possible. Take your time
and be careful.
### Commits
Use descriptive commits. One way to force yourself to do that is to not pass the
`-m` flag, which will make your editor pop up, so that you can conviniently
write long commit messages.

View file

@ -1,8 +0,0 @@
[package]
name = "arch_arm"
version = "0.1.0"
[dependencies]
bitflags = "*"
hole_list_allocator = { path = "../../../crates/hole_list_allocator"}
spin = "*"

View file

@ -1,9 +0,0 @@
#[derive(Debug)]
pub struct Context;
impl Context {
pub fn new() -> Self {
Context
}
}

View file

@ -1,70 +0,0 @@
/// Memcpy
///
/// Copy N bytes of memory from one location to another.
#[no_mangle]
pub unsafe extern fn memcpy(dest: *mut u8, src: *const u8,
n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
dest
}
/// Memmove
///
/// Copy N bytes of memory from src to dest. The memory areas may overlap.
#[no_mangle]
pub unsafe extern fn memmove(dest: *mut u8, src: *const u8,
n: usize) -> *mut u8 {
if src < dest as *const u8 {
let mut i = n;
while i != 0 {
i -= 1;
*dest.offset(i as isize) = *src.offset(i as isize);
}
} else {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
dest
}
/// Memset
///
/// Fill a block of memory with a specified value.
#[no_mangle]
pub unsafe extern fn memset(s: *mut u8, c: i32, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*s.offset(i as isize) = c as u8;
i += 1;
}
s
}
/// Memcmp
///
/// Compare two blocks of memory.
#[no_mangle]
pub unsafe extern fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
let mut i = 0;
while i < n {
let a = *s1.offset(i as isize);
let b = *s2.offset(i as isize);
if a != b {
return a as i32 - b as i32
}
i += 1;
}
0
}

View file

@ -1,30 +0,0 @@
//! Interrupt instructions
/// Clear interrupts
#[inline(always)]
pub unsafe fn disable() {
}
/// Set interrupts
#[inline(always)]
pub unsafe fn enable() {
}
/// Set interrupts and halt
#[inline(always)]
pub unsafe fn enable_and_halt() {
halt();
}
/// Halt instruction
#[inline(always)]
pub unsafe fn halt() {
//asm!("wfi" : : : : "volatile");
asm!("nop" : : : : "volatile");
}
/// Get a stack trace
//TODO: Check for stack being mapped before dereferencing
#[inline(never)]
pub unsafe fn stack_trace() {
}

View file

@ -1,39 +0,0 @@
//! Architecture support for ARM
#![feature(asm)]
#![feature(lang_items)]
#![feature(naked_functions)]
#![no_std]
extern crate hole_list_allocator as allocator;
#[macro_use]
extern crate bitflags;
extern crate spin;
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({});
}
/// Print with new line to console
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Context switching
pub mod context;
/// Memset, memcpy, etc.
pub mod externs;
/// Interrupt handling
pub mod interrupt;
/// Panic support
pub mod panic;
/// Initialization function
pub mod start;

View file

@ -1,60 +0,0 @@
ENTRY(kstart)
OUTPUT_ARCH(arm)
OUTPUT_FORMAT(elf32-littlearm)
KERNEL_OFFSET = 0;
SECTIONS {
. = KERNEL_OFFSET;
.text : AT(ADDR(.text) - KERNEL_OFFSET) {
__text_start = .;
*(.text*)
. = ALIGN(4096);
__text_end = .;
}
.rodata : AT(ADDR(.rodata) - KERNEL_OFFSET) {
__rodata_start = .;
*(.rodata*)
. = ALIGN(4096);
__rodata_end = .;
}
.data : AT(ADDR(.data) - KERNEL_OFFSET) {
__data_start = .;
*(.data*)
. = ALIGN(4096);
__data_end = .;
}
.tdata : AT(ADDR(.tdata) - KERNEL_OFFSET) {
__tdata_start = .;
*(.tdata*)
. = ALIGN(4096);
__tdata_end = .;
__tbss_start = .;
*(.tbss*)
. += 8;
. = ALIGN(4096);
__tbss_end = .;
}
.bss : AT(ADDR(.bss) - KERNEL_OFFSET) {
__bss_start = .;
*(.bss*)
. = ALIGN(4096);
__bss_end = .;
}
__end = .;
/DISCARD/ : {
*(.comment*)
*(.debug*)
*(.eh_frame*)
*(.gcc_except_table*)
*(.note*)
*(.rel.eh_frame*)
}
}

View file

@ -1,38 +0,0 @@
//! Intrinsics for panic handling
use interrupt;
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn eh_personality() {}
#[cfg(not(test))]
/// Required to handle panics
#[lang = "panic_fmt"]
extern "C" fn panic_fmt(fmt: ::core::fmt::Arguments, file: &str, line: u32) -> ! {
println!("PANIC: {}", fmt);
println!("FILE: {}", file);
println!("LINE: {}", line);
unsafe { interrupt::stack_trace(); }
println!("HALT");
loop {
unsafe { interrupt::halt(); }
}
}
#[allow(non_snake_case)]
#[no_mangle]
/// Required to handle panics
pub extern "C" fn _Unwind_Resume() -> ! {
loop {
unsafe { interrupt::halt(); }
}
}
/// Required for linker
#[no_mangle]
pub extern "C" fn __aeabi_unwind_cpp_pr0() {
loop {}
}

View file

@ -1,27 +0,0 @@
const SERIAL_BASE: *mut u8 = 0x16000000 as *mut u8;
const SERIAL_FLAG_REGISTER: *const u8 = 0x16000018 as *const u8;
const SERIAL_BUFFER_FULL: u8 = (1 << 5);
unsafe fn putc (c: u8)
{
/* Wait until the serial buffer is empty */
while *SERIAL_FLAG_REGISTER & SERIAL_BUFFER_FULL == SERIAL_BUFFER_FULL {}
/* Put our character, c, into the serial buffer */
*SERIAL_BASE = c;
}
unsafe fn puts(string: &str)
{
for b in string.bytes() {
putc(b);
}
}
#[no_mangle]
#[naked]
pub unsafe extern fn kstart() -> ! {
asm!("mov sp, 0x18000" : : : : "volatile");
puts("TEST\r\n");
loop {}
}

View file

@ -1,3 +0,0 @@
[package]
name = "arch_test"
version = "0.1.0"

View file

@ -1,43 +0,0 @@
//! Interrupt instructions
static mut INTERRUPTS_ENABLED: bool = false;
/// Clear interrupts
#[inline(always)]
pub unsafe fn disable() {
println!("CLEAR INTERRUPTS");
INTERRUPTS_ENABLED = false;
}
/// Set interrupts
#[inline(always)]
pub unsafe fn enable() {
println!("SET INTERRUPTS");
INTERRUPTS_ENABLED = true;
}
/// Halt instruction
#[inline(always)]
pub unsafe fn halt() {
assert!(INTERRUPTS_ENABLED);
::std::thread::yield_now();
}
/// Pause instruction
#[inline(always)]
pub unsafe fn pause() {
}
/// Set interrupts and nop
#[inline(always)]
pub unsafe fn enable_and_nop() {
enable();
}
/// Set interrupts and halt
#[inline(always)]
pub unsafe fn enable_and_halt() {
enable();
halt();
}

View file

@ -1,43 +0,0 @@
//! Architecture support for testing
pub use std::io;
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({
use $crate::io::Write;
let _ = write!($crate::io::stdout(), $($arg)*);
});
}
/// Print with new line to console
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Create an interrupt function that can safely run rust code
#[macro_export]
macro_rules! interrupt {
($name:ident, $func:block) => {
pub unsafe extern fn $name () {
unsafe fn inner() {
$func
}
// Call inner rust function
inner();
}
};
}
/// Interrupt instructions
pub mod interrupt;
/// Initialization and main function
pub mod main;
/// Time functions
pub mod time;

View file

@ -1,11 +0,0 @@
/// This function is where the kernel sets up IRQ handlers
/// It is increcibly unsafe, and should be minimal in nature
extern {
fn kmain() -> !;
}
#[no_mangle]
pub unsafe extern fn kstart() -> ! {
kmain();
}

View file

@ -1,7 +0,0 @@
pub fn monotonic() -> (u64, u64) {
(0, 0)
}
pub fn realtime() -> (u64, u64) {
(0, 0)
}

View file

@ -1,15 +0,0 @@
[package]
name = "arch_x86_64"
version = "0.1.0"
[dependencies]
bitflags = "*"
hole_list_allocator = { path = "../../../crates/hole_list_allocator/" }
io = { path = "../../../crates/io/" }
raw-cpuid = { git = "https://github.com/gz/rust-cpuid" }
spin = "*"
redox_syscall = { path = "../../../syscall/" }
[dependencies.x86]
version = "0.7"
default-features = false

View file

@ -1,77 +0,0 @@
#[repr(packed)]
pub struct DrhdFault {
pub sts: u32,
pub ctrl: u32,
pub data: u32,
pub addr: [u32; 2],
_rsv: [u64; 2],
pub log: u64,
}
#[repr(packed)]
pub struct DrhdProtectedMemory {
pub en: u32,
pub low_base: u32,
pub low_limit: u32,
pub high_base: u64,
pub high_limit: u64,
}
#[repr(packed)]
pub struct DrhdInvalidation {
pub queue_head: u64,
pub queue_tail: u64,
pub queue_addr: u64,
_rsv: u32,
pub cmpl_sts: u32,
pub cmpl_ctrl: u32,
pub cmpl_data: u32,
pub cmpl_addr: [u32; 2],
}
#[repr(packed)]
pub struct DrhdPageRequest {
pub queue_head: u64,
pub queue_tail: u64,
pub queue_addr: u64,
_rsv: u32,
pub sts: u32,
pub ctrl: u32,
pub data: u32,
pub addr: [u32; 2],
}
#[repr(packed)]
pub struct DrhdMtrrVariable {
pub base: u64,
pub mask: u64,
}
#[repr(packed)]
pub struct DrhdMtrr {
pub cap: u64,
pub def_type: u64,
pub fixed: [u64; 11],
pub variable: [DrhdMtrrVariable; 10],
}
#[repr(packed)]
pub struct Drhd {
pub version: u32,
_rsv: u32,
pub cap: u64,
pub ext_cap: u64,
pub gl_cmd: u32,
pub gl_sts: u32,
pub root_table: u64,
pub ctx_cmd: u64,
_rsv1: u32,
pub fault: DrhdFault,
_rsv2: u32,
pub pm: DrhdProtectedMemory,
pub invl: DrhdInvalidation,
_rsv3: u64,
pub intr_table: u64,
pub page_req: DrhdPageRequest,
pub mtrr: DrhdMtrr,
}

View file

@ -1,183 +0,0 @@
use core::mem;
use super::sdt::Sdt;
use self::drhd::Drhd;
use memory::Frame;
use paging::{entry, ActivePageTable, PhysicalAddress};
pub mod drhd;
/// The DMA Remapping Table
#[derive(Debug)]
pub struct Dmar {
sdt: &'static Sdt,
pub addr_width: u8,
pub flags: u8,
_rsv: [u8; 10],
}
impl Dmar {
pub fn new(sdt: &'static Sdt) -> Option<Dmar> {
if &sdt.signature == b"DMAR" && sdt.data_len() >= 12 { //Not valid if no local address and flags
let addr_width = unsafe { *(sdt.data_address() as *const u8) };
let flags = unsafe { *(sdt.data_address() as *const u8).offset(1) };
let rsv: [u8; 10] = unsafe { *((sdt.data_address() as *const u8).offset(2) as *const [u8; 10]) };
Some(Dmar {
sdt: sdt,
addr_width: addr_width,
flags: flags,
_rsv: rsv,
})
} else {
None
}
}
pub fn iter(&self) -> DmarIter {
DmarIter {
sdt: self.sdt,
i: 12 // Skip address width and flags
}
}
}
///
/// DMAR DMA Remapping Hardware Unit Definition
// TODO: Implement iterator on DmarDrhd scope
#[derive(Debug)]
#[repr(packed)]
pub struct DmarDrhd {
kind: u16,
length: u16,
flags: u8,
_rsv: u8,
segment: u16,
base: u64,
}
impl DmarDrhd {
pub fn get(&self, active_table: &mut ActivePageTable) -> &'static mut Drhd {
let result = active_table.identity_map(Frame::containing_address(PhysicalAddress::new(self.base as usize)), entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE);
result.flush(active_table);
unsafe { &mut *(self.base as *mut Drhd) }
}
}
/// DMAR Reserved Memory Region Reporting
// TODO: Implement iterator on DmarRmrr scope
#[derive(Debug)]
#[repr(packed)]
pub struct DmarRmrr {
kind: u16,
length: u16,
_rsv: u16,
segment: u16,
base: u64,
limit: u64,
}
/// DMAR Root Port ATS Capability Reporting
// TODO: Implement iterator on DmarAtsr scope
#[derive(Debug)]
#[repr(packed)]
pub struct DmarAtsr {
kind: u16,
length: u16,
flags: u8,
_rsv: u8,
segment: u16,
}
/// DMAR Remapping Hardware Static Affinity
#[derive(Debug)]
#[repr(packed)]
pub struct DmarRhsa {
kind: u16,
length: u16,
_rsv: u32,
base: u64,
domain: u32,
}
/// DMAR ACPI Name-space Device Declaration
// TODO: Implement iterator on DmarAndd object name
#[derive(Debug)]
#[repr(packed)]
pub struct DmarAndd {
kind: u16,
length: u16,
_rsv: [u8; 3],
acpi_dev: u8,
}
/// DMAR Entries
#[derive(Debug)]
pub enum DmarEntry {
Drhd(&'static DmarDrhd),
InvalidDrhd(usize),
Rmrr(&'static DmarRmrr),
InvalidRmrr(usize),
Atsr(&'static DmarAtsr),
InvalidAtsr(usize),
Rhsa(&'static DmarRhsa),
InvalidRhsa(usize),
Andd(&'static DmarAndd),
InvalidAndd(usize),
Unknown(u16)
}
pub struct DmarIter {
sdt: &'static Sdt,
i: usize
}
impl Iterator for DmarIter {
type Item = DmarEntry;
fn next(&mut self) -> Option<Self::Item> {
if self.i + 4 <= self.sdt.data_len() {
let entry_type = unsafe { *((self.sdt.data_address() as *const u8).offset(self.i as isize) as *const u16) };
let entry_len = unsafe { *((self.sdt.data_address() as *const u8).offset(self.i as isize + 2) as *const u16) } as usize;
if self.i + entry_len <= self.sdt.data_len() {
let item = match entry_type {
0 => if entry_len >= mem::size_of::<DmarDrhd>() {
DmarEntry::Drhd(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarDrhd) })
} else {
DmarEntry::InvalidDrhd(entry_len)
},
1 => if entry_len >= mem::size_of::<DmarRmrr>() {
DmarEntry::Rmrr(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarRmrr) })
} else {
DmarEntry::InvalidRmrr(entry_len)
},
2 => if entry_len >= mem::size_of::<DmarAtsr>() {
DmarEntry::Atsr(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarAtsr) })
} else {
DmarEntry::InvalidAtsr(entry_len)
},
3 => if entry_len == mem::size_of::<DmarRhsa>() {
DmarEntry::Rhsa(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarRhsa) })
} else {
DmarEntry::InvalidRhsa(entry_len)
},
4 => if entry_len >= mem::size_of::<DmarAndd>() {
DmarEntry::Andd(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarAndd) })
} else {
DmarEntry::InvalidAndd(entry_len)
},
_ => DmarEntry::Unknown(entry_type)
};
self.i += entry_len;
Some(item)
} else {
None
}
} else {
None
}
}
}

View file

@ -1,96 +0,0 @@
use core::{mem, ptr};
use super::sdt::Sdt;
#[repr(packed)]
#[derive(Debug)]
pub struct Fadt {
pub header: Sdt,
pub firmware_ctrl: u32,
pub dsdt: u32,
// field used in ACPI 1.0; no longer in use, for compatibility only
reserved: u8,
pub preferred_power_managament: u8,
pub sci_interrupt: u16,
pub smi_command_port: u32,
pub acpi_enable: u8,
pub acpi_disable: u8,
pub s4_bios_req: u8,
pub pstate_control: u8,
pub pm1a_event_block: u32,
pub pm1b_event_block: u32,
pub pm1a_control_block: u32,
pub pm1b_control_block: u32,
pub pm2_control_block: u32,
pub pm_timer_block: u32,
pub gpe0_block: u32,
pub gpe1_block: u32,
pub pm1_event_length: u8,
pub pm1_control_length: u8,
pub pm2_control_length: u8,
pub pm_timer_length: u8,
pub gpe0_ength: u8,
pub gpe1_length: u8,
pub gpe1_base: u8,
pub c_state_control: u8,
pub worst_c2_latency: u16,
pub worst_c3_latency: u16,
pub flush_size: u16,
pub flush_stride: u16,
pub duty_offset: u8,
pub duty_width: u8,
pub day_alarm: u8,
pub month_alarm: u8,
pub century: u8,
// reserved in ACPI 1.0; used since ACPI 2.0+
pub boot_architecture_flags: u16,
reserved2: u8,
pub flags: u32,
}
/* ACPI 2 structure
#[repr(packed)]
#[derive(Clone, Copy, Debug, Default)]
pub struct GenericAddressStructure {
address_space: u8,
bit_width: u8,
bit_offset: u8,
access_size: u8,
address: u64,
}
{
// 12 byte structure; see below for details
pub reset_reg: GenericAddressStructure,
pub reset_value: u8,
reserved3: [u8; 3],
// 64bit pointers - Available on ACPI 2.0+
pub x_firmware_control: u64,
pub x_dsdt: u64,
pub x_pm1a_event_block: GenericAddressStructure,
pub x_pm1b_event_block: GenericAddressStructure,
pub x_pm1a_control_block: GenericAddressStructure,
pub x_pm1b_control_block: GenericAddressStructure,
pub x_pm2_control_block: GenericAddressStructure,
pub x_pm_timer_block: GenericAddressStructure,
pub x_gpe0_block: GenericAddressStructure,
pub x_gpe1_block: GenericAddressStructure,
}
*/
impl Fadt {
pub fn new(sdt: &'static Sdt) -> Option<Fadt> {
if &sdt.signature == b"FACP" && sdt.length as usize >= mem::size_of::<Fadt>() {
Some(unsafe { ptr::read((sdt as *const Sdt) as *const Fadt) })
} else {
None
}
}
}

View file

@ -1,133 +0,0 @@
use core::mem;
use super::sdt::Sdt;
/// The Multiple APIC Descriptor Table
#[derive(Debug)]
pub struct Madt {
sdt: &'static Sdt,
pub local_address: u32,
pub flags: u32
}
impl Madt {
pub fn new(sdt: &'static Sdt) -> Option<Madt> {
if &sdt.signature == b"APIC" && sdt.data_len() >= 8 { //Not valid if no local address and flags
let local_address = unsafe { *(sdt.data_address() as *const u32) };
let flags = unsafe { *(sdt.data_address() as *const u32).offset(1) };
Some(Madt {
sdt: sdt,
local_address: local_address,
flags: flags
})
} else {
None
}
}
pub fn iter(&self) -> MadtIter {
MadtIter {
sdt: self.sdt,
i: 8 // Skip local controller address and flags
}
}
}
///
/// MADT Local APIC
#[derive(Debug)]
#[repr(packed)]
pub struct MadtLocalApic {
/// Processor ID
pub processor: u8,
/// Local APIC ID
pub id: u8,
/// Flags. 1 means that the processor is enabled
pub flags: u32
}
/// MADT I/O APIC
#[derive(Debug)]
#[repr(packed)]
pub struct MadtIoApic {
/// I/O APIC ID
pub id: u8,
/// reserved
reserved: u8,
/// I/O APIC address
pub address: u32,
/// Global system interrupt base
pub gsi_base: u32
}
/// MADT Interrupt Source Override
#[derive(Debug)]
#[repr(packed)]
pub struct MadtIntSrcOverride {
/// Bus Source
pub bus_source: u8,
/// IRQ Source
pub irq_source: u8,
/// Global system interrupt base
pub gsi_base: u32,
/// Flags
pub flags: u16
}
/// MADT Entries
#[derive(Debug)]
pub enum MadtEntry {
LocalApic(&'static MadtLocalApic),
InvalidLocalApic(usize),
IoApic(&'static MadtIoApic),
InvalidIoApic(usize),
IntSrcOverride(&'static MadtIntSrcOverride),
InvalidIntSrcOverride(usize),
Unknown(u8)
}
pub struct MadtIter {
sdt: &'static Sdt,
i: usize
}
impl Iterator for MadtIter {
type Item = MadtEntry;
fn next(&mut self) -> Option<Self::Item> {
if self.i + 1 < self.sdt.data_len() {
let entry_type = unsafe { *(self.sdt.data_address() as *const u8).offset(self.i as isize) };
let entry_len = unsafe { *(self.sdt.data_address() as *const u8).offset(self.i as isize + 1) } as usize;
if self.i + entry_len <= self.sdt.data_len() {
let item = match entry_type {
0 => if entry_len == mem::size_of::<MadtLocalApic>() + 2 {
MadtEntry::LocalApic(unsafe { &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalApic) })
} else {
MadtEntry::InvalidLocalApic(entry_len)
},
1 => if entry_len == mem::size_of::<MadtIoApic>() + 2 {
MadtEntry::IoApic(unsafe { &*((self.sdt.data_address() + self.i + 2) as *const MadtIoApic) })
} else {
MadtEntry::InvalidIoApic(entry_len)
},
2 => if entry_len == mem::size_of::<MadtIntSrcOverride>() + 2 {
MadtEntry::IntSrcOverride(unsafe { &*((self.sdt.data_address() + self.i + 2) as *const MadtIntSrcOverride) })
} else {
MadtEntry::InvalidIntSrcOverride(entry_len)
},
_ => MadtEntry::Unknown(entry_type)
};
self.i += entry_len;
Some(item)
} else {
None
}
} else {
None
}
}
}

View file

@ -1,284 +0,0 @@
//! # ACPI
//! Code to parse the ACPI tables
use core::intrinsics::{atomic_load, atomic_store};
use core::sync::atomic::Ordering;
use device::local_apic::LOCAL_APIC;
use interrupt;
use memory::{allocate_frames, Frame};
use paging::{entry, ActivePageTable, Page, PhysicalAddress, VirtualAddress};
use start::{kstart_ap, CPU_COUNT, AP_READY};
use self::dmar::{Dmar, DmarEntry};
use self::fadt::Fadt;
use self::madt::{Madt, MadtEntry};
use self::rsdt::Rsdt;
use self::sdt::Sdt;
use self::xsdt::Xsdt;
pub mod dmar;
pub mod fadt;
pub mod madt;
pub mod rsdt;
pub mod sdt;
pub mod xsdt;
const TRAMPOLINE: usize = 0x7E00;
const AP_STARTUP: usize = TRAMPOLINE + 512;
pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
print!(" ");
for &c in sdt.signature.iter() {
print!("{}", c as char);
}
if let Some(fadt) = Fadt::new(sdt) {
println!(": {:#?}", fadt);
} else if let Some(madt) = Madt::new(sdt) {
println!(": {:>08X}: {}", madt.local_address, madt.flags);
let mut local_apic = unsafe { &mut LOCAL_APIC };
let me = local_apic.id() as u8;
if local_apic.x2 {
println!(" X2APIC {}", me);
} else {
println!(" XAPIC {}: {:>08X}", me, local_apic.address);
}
let trampoline_frame = Frame::containing_address(PhysicalAddress::new(TRAMPOLINE));
let trampoline_page = Page::containing_address(VirtualAddress::new(TRAMPOLINE));
// Map trampoline
let result = active_table.map_to(trampoline_page, trampoline_frame, entry::PRESENT | entry::WRITABLE);
result.flush(active_table);
for madt_entry in madt.iter() {
println!(" {:?}", madt_entry);
match madt_entry {
MadtEntry::LocalApic(ap_local_apic) => if ap_local_apic.id == me {
println!(" This is my local APIC");
} else {
if ap_local_apic.flags & 1 == 1 {
// Increase CPU ID
CPU_COUNT.fetch_add(1, Ordering::SeqCst);
// Allocate a stack
let stack_start = allocate_frames(64).expect("no more frames in acpi stack_start").start_address().get() + ::KERNEL_OFFSET;
let stack_end = stack_start + 64 * 4096;
let ap_ready = TRAMPOLINE as *mut u64;
let ap_cpu_id = unsafe { ap_ready.offset(1) };
let ap_page_table = unsafe { ap_ready.offset(2) };
let ap_stack_start = unsafe { ap_ready.offset(3) };
let ap_stack_end = unsafe { ap_ready.offset(4) };
let ap_code = unsafe { ap_ready.offset(5) };
// Set the ap_ready to 0, volatile
unsafe { atomic_store(ap_ready, 0) };
unsafe { atomic_store(ap_cpu_id, ap_local_apic.id as u64) };
unsafe { atomic_store(ap_page_table, active_table.address() as u64) };
unsafe { atomic_store(ap_stack_start, stack_start as u64) };
unsafe { atomic_store(ap_stack_end, stack_end as u64) };
unsafe { atomic_store(ap_code, kstart_ap as u64) };
AP_READY.store(false, Ordering::SeqCst);
print!(" AP {}:", ap_local_apic.id);
// Send INIT IPI
{
let mut icr = 0x4500;
if local_apic.x2 {
icr |= (ap_local_apic.id as u64) << 32;
} else {
icr |= (ap_local_apic.id as u64) << 56;
}
print!(" IPI...");
local_apic.set_icr(icr);
}
// Send START IPI
{
//Start at 0x0800:0000 => 0x8000. Hopefully the bootloader code is still there
let ap_segment = (AP_STARTUP >> 12) & 0xFF;
let mut icr = 0x4600 | ap_segment as u64;
if local_apic.x2 {
icr |= (ap_local_apic.id as u64) << 32;
} else {
icr |= (ap_local_apic.id as u64) << 56;
}
print!(" SIPI...");
local_apic.set_icr(icr);
}
// Wait for trampoline ready
print!(" Wait...");
while unsafe { atomic_load(ap_ready) } == 0 {
interrupt::pause();
}
print!(" Trampoline...");
while ! AP_READY.load(Ordering::SeqCst) {
interrupt::pause();
}
println!(" Ready");
active_table.flush_all();
} else {
println!(" CPU Disabled");
}
},
_ => ()
}
}
// Unmap trampoline
let result = active_table.unmap(trampoline_page);
result.flush(active_table);
} else if let Some(dmar) = Dmar::new(sdt) {
println!(": {}: {}", dmar.addr_width, dmar.flags);
for dmar_entry in dmar.iter() {
println!(" {:?}", dmar_entry);
match dmar_entry {
DmarEntry::Drhd(dmar_drhd) => {
let drhd = dmar_drhd.get(active_table);
println!("VER: {:X}", drhd.version);
println!("CAP: {:X}", drhd.cap);
println!("EXT_CAP: {:X}", drhd.ext_cap);
println!("GCMD: {:X}", drhd.gl_cmd);
println!("GSTS: {:X}", drhd.gl_sts);
println!("RT: {:X}", drhd.root_table);
},
_ => ()
}
}
} else {
println!(": Unknown");
}
}
/// Parse the ACPI tables to gather CPU, interrupt, and timer information
pub unsafe fn init(active_table: &mut ActivePageTable) -> Option<Acpi> {
let start_addr = 0xE0000;
let end_addr = 0xFFFFF;
// Map all of the ACPI RSDP space
{
let start_frame = Frame::containing_address(PhysicalAddress::new(start_addr));
let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
let result = active_table.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE);
result.flush(active_table);
}
}
// Search for RSDP
if let Some(rsdp) = RSDP::search(start_addr, end_addr) {
let get_sdt = |sdt_address: usize, active_table: &mut ActivePageTable| -> (&'static Sdt, bool) {
let mapped = if active_table.translate_page(Page::containing_address(VirtualAddress::new(sdt_address))).is_none() {
let sdt_frame = Frame::containing_address(PhysicalAddress::new(sdt_address));
let sdt_page = Page::containing_address(VirtualAddress::new(sdt_address));
let result = active_table.map_to(sdt_page, sdt_frame, entry::PRESENT | entry::NO_EXECUTE);
result.flush(active_table);
true
} else {
false
};
(&*(sdt_address as *const Sdt), mapped)
};
let drop_sdt = |sdt: &'static Sdt, mapped: bool, active_table: &mut ActivePageTable| {
let sdt_address = sdt as *const Sdt as usize;
drop(sdt);
if mapped {
let sdt_page = Page::containing_address(VirtualAddress::new(sdt_address));
let result = active_table.unmap(sdt_page);
result.flush(active_table);
}
};
let (rxsdt, rxmapped) = get_sdt(rsdp.sdt_address(), active_table);
for &c in rxsdt.signature.iter() {
print!("{}", c as char);
}
println!(":");
if let Some(rsdt) = Rsdt::new(rxsdt) {
for sdt_address in rsdt.iter() {
let (sdt, mapped) = get_sdt(sdt_address, active_table);
init_sdt(sdt, active_table);
drop_sdt(sdt, mapped, active_table);
}
} else if let Some(xsdt) = Xsdt::new(rxsdt) {
for sdt_address in xsdt.iter() {
let (sdt, mapped) = get_sdt(sdt_address, active_table);
init_sdt(sdt, active_table);
drop_sdt(sdt, mapped, active_table);
}
} else {
println!("UNKNOWN RSDT OR XSDT SIGNATURE");
}
drop_sdt(rxsdt, rxmapped, active_table);
} else {
println!("NO RSDP FOUND");
}
// Unmap all of the ACPI RSDP space
{
let start_frame = Frame::containing_address(PhysicalAddress::new(start_addr));
let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
let result = active_table.unmap(page);
result.flush(active_table);
}
}
None
}
pub struct Acpi;
/// RSDP
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct RSDP {
signature: [u8; 8],
checksum: u8,
oemid: [u8; 6],
revision: u8,
rsdt_address: u32,
length: u32,
xsdt_address: u64,
extended_checksum: u8,
reserved: [u8; 3]
}
impl RSDP {
/// Search for the RSDP
pub fn search(start_addr: usize, end_addr: usize) -> Option<RSDP> {
for i in 0 .. (end_addr + 1 - start_addr)/16 {
let rsdp = unsafe { &*((start_addr + i * 16) as *const RSDP) };
if &rsdp.signature == b"RSD PTR " {
return Some(*rsdp);
}
}
None
}
/// Get the RSDT or XSDT address
pub fn sdt_address(&self) -> usize {
if self.revision >= 2 {
self.xsdt_address as usize
} else {
self.rsdt_address as usize
}
}
}

View file

@ -1,41 +0,0 @@
use core::mem;
use super::sdt::Sdt;
#[derive(Debug)]
pub struct Rsdt(&'static Sdt);
impl Rsdt {
pub fn new(sdt: &'static Sdt) -> Option<Rsdt> {
if &sdt.signature == b"RSDT" {
Some(Rsdt(sdt))
} else {
None
}
}
pub fn iter(&self) -> RsdtIter {
RsdtIter {
sdt: self.0,
i: 0
}
}
}
pub struct RsdtIter {
sdt: &'static Sdt,
i: usize
}
impl Iterator for RsdtIter {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.i < self.sdt.data_len()/mem::size_of::<u32>() {
let item = unsafe { *(self.sdt.data_address() as *const u32).offset(self.i as isize) };
self.i += 1;
Some(item as usize)
} else {
None
}
}
}

View file

@ -1,33 +0,0 @@
use core::mem;
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct Sdt {
pub signature: [u8; 4],
pub length: u32,
pub revision: u8,
pub checksum: u8,
pub oem_id: [u8; 6],
pub oem_table_id: [u8; 8],
pub oem_revision: u32,
pub creator_id: u32,
pub creator_revision: u32
}
impl Sdt {
/// Get the address of this tables data
pub fn data_address(&'static self) -> usize {
self as *const _ as usize + mem::size_of::<Sdt>()
}
/// Get the length of this tables data
pub fn data_len(&'static self) -> usize {
let total_size = self.length as usize;
let header_size = mem::size_of::<Sdt>();
if total_size >= header_size {
total_size - header_size
} else {
0
}
}
}

View file

@ -1,41 +0,0 @@
use core::mem;
use super::sdt::Sdt;
#[derive(Debug)]
pub struct Xsdt(&'static Sdt);
impl Xsdt {
pub fn new(sdt: &'static Sdt) -> Option<Xsdt> {
if &sdt.signature == b"XSDT" {
Some(Xsdt(sdt))
} else {
None
}
}
pub fn iter(&self) -> XsdtIter {
XsdtIter {
sdt: self.0,
i: 0
}
}
}
pub struct XsdtIter {
sdt: &'static Sdt,
i: usize
}
impl Iterator for XsdtIter {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.i < self.sdt.data_len()/mem::size_of::<u64>() {
let item = unsafe { *(self.sdt.data_address() as *const u64).offset(self.i as isize) };
self.i += 1;
Some(item as usize)
} else {
None
}
}
}

View file

@ -1,14 +0,0 @@
use core::fmt::{self, Write};
use spin::Mutex;
use device::serial::COM1;
pub static CONSOLE: Mutex<Console> = Mutex::new(Console);
pub struct Console;
impl Write for Console {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
COM1.lock().write_str(s)
}
}

View file

@ -1,184 +0,0 @@
use core::mem;
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT};
/// This must be used by the kernel to ensure that context switches are done atomically
/// Compare and exchange this to true when beginning a context switch on any CPU
/// The Context::switch_to function will set it back to false, allowing other CPU's to switch
/// This must be done, as no locks can be held on the stack during switch
pub static CONTEXT_SWITCH_LOCK: AtomicBool = ATOMIC_BOOL_INIT;
#[derive(Clone, Debug)]
pub struct Context {
/// FX valid?
loadable: bool,
/// FX location
fx: usize,
/// Page table pointer
cr3: usize,
/// RFLAGS register
rflags: usize,
/// RBX register
rbx: usize,
/// R12 register
r12: usize,
/// R13 register
r13: usize,
/// R14 register
r14: usize,
/// R15 register
r15: usize,
/// Base pointer
rbp: usize,
/// Stack pointer
rsp: usize
}
impl Context {
pub fn new() -> Context {
Context {
loadable: false,
fx: 0,
cr3: 0,
rflags: 0,
rbx: 0,
r12: 0,
r13: 0,
r14: 0,
r15: 0,
rbp: 0,
rsp: 0
}
}
pub fn get_page_table(&self) -> usize {
self.cr3
}
pub fn set_fx(&mut self, address: usize) {
self.fx = address;
}
pub fn set_page_table(&mut self, address: usize) {
self.cr3 = address;
}
pub fn set_stack(&mut self, address: usize) {
self.rsp = address;
}
pub unsafe fn signal_stack(&mut self, handler: extern fn(usize), sig: u8) {
self.push_stack(sig as usize);
self.push_stack(handler as usize);
self.push_stack(signal_handler_wrapper as usize);
}
pub unsafe fn push_stack(&mut self, value: usize) {
self.rsp -= mem::size_of::<usize>();
*(self.rsp as *mut usize) = value;
}
pub unsafe fn pop_stack(&mut self) -> usize {
let value = *(self.rsp as *const usize);
self.rsp += mem::size_of::<usize>();
value
}
/// Switch to the next context by restoring its stack and registers
#[cold]
#[inline(never)]
#[naked]
pub unsafe fn switch_to(&mut self, next: &mut Context) {
asm!("fxsave [$0]" : : "r"(self.fx) : "memory" : "intel", "volatile");
self.loadable = true;
if next.loadable {
asm!("fxrstor [$0]" : : "r"(next.fx) : "memory" : "intel", "volatile");
}else{
asm!("fninit" : : : "memory" : "intel", "volatile");
}
asm!("mov $0, cr3" : "=r"(self.cr3) : : "memory" : "intel", "volatile");
if next.cr3 != self.cr3 {
asm!("mov cr3, $0" : : "r"(next.cr3) : "memory" : "intel", "volatile");
}
asm!("pushfq ; pop $0" : "=r"(self.rflags) : : "memory" : "intel", "volatile");
asm!("push $0 ; popfq" : : "r"(next.rflags) : "memory" : "intel", "volatile");
asm!("mov $0, rbx" : "=r"(self.rbx) : : "memory" : "intel", "volatile");
asm!("mov rbx, $0" : : "r"(next.rbx) : "memory" : "intel", "volatile");
asm!("mov $0, r12" : "=r"(self.r12) : : "memory" : "intel", "volatile");
asm!("mov r12, $0" : : "r"(next.r12) : "memory" : "intel", "volatile");
asm!("mov $0, r13" : "=r"(self.r13) : : "memory" : "intel", "volatile");
asm!("mov r13, $0" : : "r"(next.r13) : "memory" : "intel", "volatile");
asm!("mov $0, r14" : "=r"(self.r14) : : "memory" : "intel", "volatile");
asm!("mov r14, $0" : : "r"(next.r14) : "memory" : "intel", "volatile");
asm!("mov $0, r15" : "=r"(self.r15) : : "memory" : "intel", "volatile");
asm!("mov r15, $0" : : "r"(next.r15) : "memory" : "intel", "volatile");
asm!("mov $0, rsp" : "=r"(self.rsp) : : "memory" : "intel", "volatile");
asm!("mov rsp, $0" : : "r"(next.rsp) : "memory" : "intel", "volatile");
asm!("mov $0, rbp" : "=r"(self.rbp) : : "memory" : "intel", "volatile");
asm!("mov rbp, $0" : : "r"(next.rbp) : "memory" : "intel", "volatile");
}
}
#[repr(packed)]
pub struct SignalHandlerStack {
r11: usize,
r10: usize,
r9: usize,
r8: usize,
rsi: usize,
rdi: usize,
rdx: usize,
rcx: usize,
rax: usize,
handler: extern fn(usize),
sig: usize,
rip: usize,
}
#[naked]
unsafe extern fn signal_handler_wrapper() {
#[inline(never)]
unsafe fn inner(stack: &SignalHandlerStack) {
(stack.handler)(stack.sig);
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11"
: : : : "intel", "volatile");
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Call inner rust function
inner(&*(rsp as *const SignalHandlerStack));
// Pop scratch registers, error code, and return
asm!("pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
add rsp, 16"
: : : : "intel", "volatile");
}

View file

@ -1,126 +0,0 @@
extern crate raw_cpuid;
use core::fmt::{Result, Write};
use self::raw_cpuid::CpuId;
pub fn cpu_info<W: Write>(w: &mut W) -> Result {
let cpuid = CpuId::new();
if let Some(info) = cpuid.get_vendor_info() {
write!(w, "Vendor: {}\n", info.as_string())?;
}
if let Some(info) = cpuid.get_extended_function_info() {
if let Some(brand) = info.processor_brand_string() {
write!(w, "Model: {}\n", brand)?;
}
}
if let Some(info) = cpuid.get_processor_frequency_info() {
write!(w, "CPU Base MHz: {}\n", info.processor_base_frequency())?;
write!(w, "CPU Max MHz: {}\n", info.processor_max_frequency())?;
write!(w, "Bus MHz: {}\n", info.bus_frequency())?;
}
write!(w, "Features:")?;
if let Some(info) = cpuid.get_feature_info() {
if info.has_fpu() { write!(w, " fpu")? };
if info.has_vme() { write!(w, " vme")? };
if info.has_de() { write!(w, " de")? };
if info.has_pse() { write!(w, " pse")? };
if info.has_tsc() { write!(w, " tsc")? };
if info.has_msr() { write!(w, " msr")? };
if info.has_pae() { write!(w, " pae")? };
if info.has_mce() { write!(w, " mce")? };
if info.has_cmpxchg8b() { write!(w, " cx8")? };
if info.has_apic() { write!(w, " apic")? };
if info.has_sysenter_sysexit() { write!(w, " sep")? };
if info.has_mtrr() { write!(w, " mtrr")? };
if info.has_pge() { write!(w, " pge")? };
if info.has_mca() { write!(w, " mca")? };
if info.has_cmov() { write!(w, " cmov")? };
if info.has_pat() { write!(w, " pat")? };
if info.has_pse36() { write!(w, " pse36")? };
if info.has_psn() { write!(w, " psn")? };
if info.has_clflush() { write!(w, " clflush")? };
if info.has_ds() { write!(w, " ds")? };
if info.has_acpi() { write!(w, " acpi")? };
if info.has_mmx() { write!(w, " mmx")? };
if info.has_fxsave_fxstor() { write!(w, " fxsr")? };
if info.has_sse() { write!(w, " sse")? };
if info.has_sse2() { write!(w, " sse2")? };
if info.has_ss() { write!(w, " ss")? };
if info.has_htt() { write!(w, " ht")? };
if info.has_tm() { write!(w, " tm")? };
if info.has_pbe() { write!(w, " pbe")? };
if info.has_sse3() { write!(w, " sse3")? };
if info.has_pclmulqdq() { write!(w, " pclmulqdq")? };
if info.has_ds_area() { write!(w, " dtes64")? };
if info.has_monitor_mwait() { write!(w, " monitor")? };
if info.has_cpl() { write!(w, " ds_cpl")? };
if info.has_vmx() { write!(w, " vmx")? };
if info.has_smx() { write!(w, " smx")? };
if info.has_eist() { write!(w, " est")? };
if info.has_tm2() { write!(w, " tm2")? };
if info.has_ssse3() { write!(w, " ssse3")? };
if info.has_cnxtid() { write!(w, " cnxtid")? };
if info.has_fma() { write!(w, " fma")? };
if info.has_cmpxchg16b() { write!(w, " cx16")? };
if info.has_pdcm() { write!(w, " pdcm")? };
if info.has_pcid() { write!(w, " pcid")? };
if info.has_dca() { write!(w, " dca")? };
if info.has_sse41() { write!(w, " sse4_1")? };
if info.has_sse42() { write!(w, " sse4_2")? };
if info.has_x2apic() { write!(w, " x2apic")? };
if info.has_movbe() { write!(w, " movbe")? };
if info.has_popcnt() { write!(w, " popcnt")? };
if info.has_tsc_deadline() { write!(w, " tsc_deadline_timer")? };
if info.has_aesni() { write!(w, " aes")? };
if info.has_xsave() { write!(w, " xsave")? };
if info.has_oxsave() { write!(w, " xsaveopt")? };
if info.has_avx() { write!(w, " avx")? };
if info.has_f16c() { write!(w, " f16c")? };
if info.has_rdrand() { write!(w, " rdrand")? };
}
if let Some(info) = cpuid.get_extended_function_info() {
if info.has_64bit_mode() { write!(w, " lm")? };
if info.has_rdtscp() { write!(w, " rdtscp")? };
if info.has_1gib_pages() { write!(w, " pdpe1gb")? };
if info.has_execute_disable() { write!(w, " nx")? };
if info.has_syscall_sysret() { write!(w, " syscall")? };
if info.has_prefetchw() { write!(w, " prefetchw")? };
if info.has_lzcnt() { write!(w, " lzcnt")? };
if info.has_lahf_sahf() { write!(w, " lahf_lm")? };
if info.has_invariant_tsc() { write!(w, " constant_tsc")? };
}
if let Some(info) = cpuid.get_extended_feature_info() {
if info.has_fsgsbase() { write!(w, " fsgsbase")? };
if info.has_tsc_adjust_msr() { write!(w, " tsc_adjust")? };
if info.has_bmi1() { write!(w, " bmi1")? };
if info.has_hle() { write!(w, " hle")? };
if info.has_avx2() { write!(w, " avx2")? };
if info.has_smep() { write!(w, " smep")? };
if info.has_bmi2() { write!(w, " bmi2")? };
if info.has_rep_movsb_stosb() { write!(w, " erms")? };
if info.has_invpcid() { write!(w, " invpcid")? };
if info.has_rtm() { write!(w, " rtm")? };
if info.has_qm() { write!(w, " qm")? };
if info.has_fpu_cs_ds_deprecated() { write!(w, " fpu_seg")? };
if info.has_mpx() { write!(w, " mpx")? };
}
write!(w, "\n")?;
Ok(())
}

View file

@ -1,115 +0,0 @@
use core::intrinsics::{volatile_load, volatile_store};
use x86::cpuid::CpuId;
use x86::msr::*;
use memory::Frame;
use paging::{entry, ActivePageTable, PhysicalAddress, Page, VirtualAddress};
pub static mut LOCAL_APIC: LocalApic = LocalApic {
address: 0,
x2: false
};
pub unsafe fn init(active_table: &mut ActivePageTable) {
LOCAL_APIC.init(active_table);
}
pub unsafe fn init_ap() {
LOCAL_APIC.init_ap();
}
/// Local APIC
pub struct LocalApic {
pub address: usize,
pub x2: bool
}
impl LocalApic {
unsafe fn init(&mut self, active_table: &mut ActivePageTable) {
self.address = (rdmsr(IA32_APIC_BASE) as usize & 0xFFFF0000) + ::KERNEL_OFFSET;
self.x2 = CpuId::new().get_feature_info().unwrap().has_x2apic();
if ! self.x2 {
let page = Page::containing_address(VirtualAddress::new(self.address));
let frame = Frame::containing_address(PhysicalAddress::new(self.address - ::KERNEL_OFFSET));
let result = active_table.map_to(page, frame, entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE);
result.flush(active_table);
}
self.init_ap();
}
unsafe fn init_ap(&mut self) {
if self.x2 {
wrmsr(IA32_APIC_BASE, rdmsr(IA32_APIC_BASE) | 1 << 10);
wrmsr(IA32_X2APIC_SIVR, 0x100);
} else {
self.write(0xF0, 0x100);
}
}
unsafe fn read(&self, reg: u32) -> u32 {
volatile_load((self.address + reg as usize) as *const u32)
}
unsafe fn write(&mut self, reg: u32, value: u32) {
volatile_store((self.address + reg as usize) as *mut u32, value);
}
pub fn id(&self) -> u32 {
if self.x2 {
unsafe { rdmsr(IA32_X2APIC_APICID) as u32 }
} else {
unsafe { self.read(0x20) }
}
}
pub fn version(&self) -> u32 {
if self.x2 {
unsafe { rdmsr(IA32_X2APIC_VERSION) as u32 }
} else {
unsafe { self.read(0x30) }
}
}
pub fn icr(&self) -> u64 {
if self.x2 {
unsafe { rdmsr(IA32_X2APIC_ICR) }
} else {
unsafe {
(self.read(0x310) as u64) << 32 | self.read(0x300) as u64
}
}
}
pub fn set_icr(&mut self, value: u64) {
if self.x2 {
unsafe { wrmsr(IA32_X2APIC_ICR, value); }
} else {
unsafe {
while self.read(0x300) & 1 << 12 == 1 << 12 {}
self.write(0x310, (value >> 32) as u32);
self.write(0x300, value as u32);
while self.read(0x300) & 1 << 12 == 1 << 12 {}
}
}
}
pub fn ipi(&mut self, apic_id: usize) {
let mut icr = 0x4040;
if self.x2 {
icr |= (apic_id as u64) << 32;
} else {
icr |= (apic_id as u64) << 56;
}
self.set_icr(icr);
}
pub unsafe fn eoi(&mut self) {
if self.x2 {
wrmsr(IA32_X2APIC_EOI, 0);
} else {
self.write(0xB0, 0);
}
}
}

View file

@ -1,16 +0,0 @@
use paging::ActivePageTable;
pub mod cpu;
pub mod local_apic;
pub mod rtc;
pub mod serial;
pub unsafe fn init(active_table: &mut ActivePageTable){
local_apic::init(active_table);
rtc::init();
serial::init();
}
pub unsafe fn init_ap() {
local_apic::init_ap();
}

View file

@ -1,109 +0,0 @@
use io::{Io, Pio};
use time;
pub fn init() {
let mut rtc = Rtc::new();
time::START.lock().0 = rtc.time();
}
fn cvt_bcd(value: usize) -> usize {
(value & 0xF) + ((value / 16) * 10)
}
/// RTC
pub struct Rtc {
addr: Pio<u8>,
data: Pio<u8>,
}
impl Rtc {
/// Create new empty RTC
pub fn new() -> Self {
return Rtc {
addr: Pio::<u8>::new(0x70),
data: Pio::<u8>::new(0x71),
};
}
/// Read
unsafe fn read(&mut self, reg: u8) -> u8 {
self.addr.write(reg);
return self.data.read();
}
/// Wait
unsafe fn wait(&mut self) {
while self.read(0xA) & 0x80 != 0x80 {}
while self.read(0xA) & 0x80 == 0x80 {}
}
/// Get time
pub fn time(&mut self) -> u64 {
let mut second;
let mut minute;
let mut hour;
let mut day;
let mut month;
let mut year;
let register_b;
unsafe {
self.wait();
second = self.read(0) as usize;
minute = self.read(2) as usize;
hour = self.read(4) as usize;
day = self.read(7) as usize;
month = self.read(8) as usize;
year = self.read(9) as usize;
register_b = self.read(0xB);
}
if register_b & 4 != 4 {
second = cvt_bcd(second);
minute = cvt_bcd(minute);
hour = cvt_bcd(hour & 0x7F) | (hour & 0x80);
day = cvt_bcd(day);
month = cvt_bcd(month);
year = cvt_bcd(year);
}
if register_b & 2 != 2 || hour & 0x80 == 0x80 {
hour = ((hour & 0x7F) + 12) % 24;
}
// TODO: Century Register
year += 2000;
// Unix time from clock
let mut secs: u64 = (year as u64 - 1970) * 31536000;
let mut leap_days = (year as u64 - 1972) / 4 + 1;
if year % 4 == 0 {
if month <= 2 {
leap_days -= 1;
}
}
secs += leap_days * 86400;
match month {
2 => secs += 2678400,
3 => secs += 5097600,
4 => secs += 7776000,
5 => secs += 10368000,
6 => secs += 13046400,
7 => secs += 15638400,
8 => secs += 18316800,
9 => secs += 20995200,
10 => secs += 23587200,
11 => secs += 26265600,
12 => secs += 28857600,
_ => (),
}
secs += (day as u64 - 1) * 86400;
secs += hour as u64 * 3600;
secs += minute as u64 * 60;
secs += second as u64;
secs
}
}

View file

@ -1,115 +0,0 @@
use core::fmt::{self, Write};
use spin::Mutex;
use io::{Io, Pio, ReadOnly};
pub static COM1: Mutex<SerialPort> = Mutex::new(SerialPort::new(0x3F8));
pub static COM2: Mutex<SerialPort> = Mutex::new(SerialPort::new(0x2F8));
pub unsafe fn init() {
COM1.lock().init();
COM2.lock().init();
}
bitflags! {
/// Interrupt enable flags
flags IntEnFlags: u8 {
const RECEIVED = 1,
const SENT = 1 << 1,
const ERRORED = 1 << 2,
const STATUS_CHANGE = 1 << 3,
// 4 to 7 are unused
}
}
bitflags! {
/// Line status flags
flags LineStsFlags: u8 {
const INPUT_FULL = 1,
// 1 to 4 unknown
const OUTPUT_EMPTY = 1 << 5,
// 6 and 7 unknown
}
}
#[allow(dead_code)]
pub struct SerialPort {
/// Data register, read to receive, write to send
data: Pio<u8>,
/// Interrupt enable
int_en: Pio<u8>,
/// FIFO control
fifo_ctrl: Pio<u8>,
/// Line control
line_ctrl: Pio<u8>,
/// Modem control
modem_ctrl: Pio<u8>,
/// Line status
line_sts: ReadOnly<Pio<u8>>,
/// Modem status
modem_sts: ReadOnly<Pio<u8>>,
}
impl SerialPort {
const fn new(base: u16) -> SerialPort {
SerialPort {
data: Pio::new(base),
int_en: Pio::new(base + 1),
fifo_ctrl: Pio::new(base + 2),
line_ctrl: Pio::new(base + 3),
modem_ctrl: Pio::new(base + 4),
line_sts: ReadOnly::new(Pio::new(base + 5)),
modem_sts: ReadOnly::new(Pio::new(base + 6))
}
}
fn line_sts(&self) -> LineStsFlags {
LineStsFlags::from_bits_truncate(self.line_sts.read())
}
fn write(&mut self, data: u8) {
while ! self.line_sts().contains(OUTPUT_EMPTY) {}
self.data.write(data)
}
fn init(&mut self) {
//TODO: Cleanup
self.int_en.write(0x00);
self.line_ctrl.write(0x80);
self.data.write(0x03);
self.int_en.write(0x00);
self.line_ctrl.write(0x03);
self.fifo_ctrl.write(0xC7);
self.modem_ctrl.write(0x0B);
self.int_en.write(0x01);
}
pub fn on_receive(&mut self) {
let data = self.data.read();
extern {
fn debug_input(byte: u8);
}
unsafe { debug_input(data) };
}
}
impl Write for SerialPort {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
for byte in s.bytes() {
match byte {
8 | 0x7F => {
self.write(8);
self.write(b' ');
self.write(8);
},
_ => {
self.write(byte);
}
}
}
Ok(())
}
}

View file

@ -1,70 +0,0 @@
/// Memcpy
///
/// Copy N bytes of memory from one location to another.
#[no_mangle]
pub unsafe extern fn memcpy(dest: *mut u8, src: *const u8,
n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*((dest as usize + i) as *mut u8) = *((src as usize + i) as *const u8);
i += 1;
}
dest
}
/// Memmove
///
/// Copy N bytes of memory from src to dest. The memory areas may overlap.
#[no_mangle]
pub unsafe extern fn memmove(dest: *mut u8, src: *const u8,
n: usize) -> *mut u8 {
if src < dest as *const u8 {
let mut i = n;
while i != 0 {
i -= 1;
*((dest as usize + i) as *mut u8) = *((src as usize + i) as *const u8);
}
} else {
let mut i = 0;
while i < n {
*((dest as usize + i) as *mut u8) = *((src as usize + i) as *const u8);
i += 1;
}
}
dest
}
/// Memset
///
/// Fill a block of memory with a specified value.
#[no_mangle]
pub unsafe extern fn memset(dest: *mut u8, c: i32, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*((dest as usize + i) as *mut u8) = c as u8;
i += 1;
}
dest
}
/// Memcmp
///
/// Compare two blocks of memory.
#[no_mangle]
pub unsafe extern fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
let mut i = 0;
while i < n {
let a = *((s1 as usize + i) as *const u8);
let b = *((s2 as usize + i) as *const u8);
if a != b {
return a as i32 - b as i32
}
i += 1;
}
0
}

View file

@ -1,177 +0,0 @@
//! Global descriptor table
use core::mem;
use x86::dtables::{self, DescriptorTablePointer};
use x86::segmentation::{self, SegmentSelector};
use x86::task::{self, TaskStateSegment};
pub const GDT_NULL: usize = 0;
pub const GDT_KERNEL_CODE: usize = 1;
pub const GDT_KERNEL_DATA: usize = 2;
pub const GDT_KERNEL_TLS: usize = 3;
pub const GDT_USER_CODE: usize = 4;
pub const GDT_USER_DATA: usize = 5;
pub const GDT_USER_TLS: usize = 6;
pub const GDT_TSS: usize = 7;
pub const GDT_TSS_HIGH: usize = 8;
pub const GDT_A_PRESENT: u8 = 1 << 7;
pub const GDT_A_RING_0: u8 = 0 << 5;
pub const GDT_A_RING_1: u8 = 1 << 5;
pub const GDT_A_RING_2: u8 = 2 << 5;
pub const GDT_A_RING_3: u8 = 3 << 5;
pub const GDT_A_SYSTEM: u8 = 1 << 4;
pub const GDT_A_EXECUTABLE: u8 = 1 << 3;
pub const GDT_A_CONFORMING: u8 = 1 << 2;
pub const GDT_A_PRIVILEGE: u8 = 1 << 1;
pub const GDT_A_DIRTY: u8 = 1;
pub const GDT_A_TSS_AVAIL: u8 = 0x9;
pub const GDT_A_TSS_BUSY: u8 = 0xB;
pub const GDT_F_PAGE_SIZE: u8 = 1 << 7;
pub const GDT_F_PROTECTED_MODE: u8 = 1 << 6;
pub const GDT_F_LONG_MODE: u8 = 1 << 5;
static mut INIT_GDTR: DescriptorTablePointer = DescriptorTablePointer {
limit: 0,
base: 0
};
static mut INIT_GDT: [GdtEntry; 4] = [
// Null
GdtEntry::new(0, 0, 0, 0),
// Kernel code
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_0 | GDT_A_SYSTEM | GDT_A_EXECUTABLE | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// Kernel data
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_0 | GDT_A_SYSTEM | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// Kernel TLS
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_3 | GDT_A_SYSTEM | GDT_A_PRIVILEGE, GDT_F_LONG_MODE)
];
#[thread_local]
pub static mut GDTR: DescriptorTablePointer = DescriptorTablePointer {
limit: 0,
base: 0
};
#[thread_local]
pub static mut GDT: [GdtEntry; 9] = [
// Null
GdtEntry::new(0, 0, 0, 0),
// Kernel code
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_0 | GDT_A_SYSTEM | GDT_A_EXECUTABLE | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// Kernel data
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_0 | GDT_A_SYSTEM | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// Kernel TLS
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_0 | GDT_A_SYSTEM | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// User code
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_3 | GDT_A_SYSTEM | GDT_A_EXECUTABLE | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// User data
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_3 | GDT_A_SYSTEM | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// User TLS
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_3 | GDT_A_SYSTEM | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// TSS
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_3 | GDT_A_TSS_AVAIL, 0),
// TSS must be 16 bytes long, twice the normal size
GdtEntry::new(0, 0, 0, 0),
];
#[thread_local]
pub static mut TSS: TaskStateSegment = TaskStateSegment {
reserved: 0,
rsp: [0; 3],
reserved2: 0,
ist: [0; 7],
reserved3: 0,
reserved4: 0,
iomap_base: 0xFFFF
};
/// Initialize GDT
pub unsafe fn init(tcb_offset: usize, stack_offset: usize) {
// Setup the initial GDT with TLS, so we can setup the TLS GDT (a little confusing)
// This means that each CPU will have its own GDT, but we only need to define it once as a thread local
INIT_GDTR.limit = (INIT_GDT.len() * mem::size_of::<GdtEntry>() - 1) as u16;
INIT_GDTR.base = INIT_GDT.as_ptr() as u64;
// Set the TLS segment to the offset of the Thread Control Block
INIT_GDT[GDT_KERNEL_TLS].set_offset(tcb_offset as u32);
// Load the initial GDT, before we have access to thread locals
dtables::lgdt(&INIT_GDTR);
// Load the segment descriptors
segmentation::load_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_TLS as u16));
segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16));
// Now that we have access to thread locals, setup the AP's individual GDT
GDTR.limit = (GDT.len() * mem::size_of::<GdtEntry>() - 1) as u16;
GDTR.base = GDT.as_ptr() as u64;
// Set the TLS segment to the offset of the Thread Control Block
GDT[GDT_KERNEL_TLS].set_offset(tcb_offset as u32);
// Set the User TLS segment to the offset of the user TCB
GDT[GDT_USER_TLS].set_offset(::USER_TCB_OFFSET as u32);
// We can now access our TSS, which is a thread local
GDT[GDT_TSS].set_offset(&TSS as *const _ as u32);
GDT[GDT_TSS].set_limit(mem::size_of::<TaskStateSegment>() as u32);
// Set the stack pointer when coming back from userspace
TSS.rsp[0] = stack_offset as u64;
// Load the new GDT, which is correctly located in thread local storage
dtables::lgdt(&GDTR);
// Reload the segment descriptors
segmentation::load_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_TLS as u16));
segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16));
// Load the task register
task::load_ltr(SegmentSelector::new(GDT_TSS as u16));
}
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct GdtEntry {
pub limitl: u16,
pub offsetl: u16,
pub offsetm: u8,
pub access: u8,
pub flags_limith: u8,
pub offseth: u8
}
impl GdtEntry {
pub const fn new(offset: u32, limit: u32, access: u8, flags: u8) -> Self {
GdtEntry {
limitl: limit as u16,
offsetl: offset as u16,
offsetm: (offset >> 16) as u8,
access: access,
flags_limith: flags & 0xF0 | ((limit >> 16) as u8) & 0x0F,
offseth: (offset >> 24) as u8
}
}
pub fn set_offset(&mut self, offset: u32) {
self.offsetl = offset as u16;
self.offsetm = (offset >> 16) as u8;
self.offseth = (offset >> 24) as u8;
}
pub fn set_limit(&mut self, limit: u32) {
self.limitl = limit as u16;
self.flags_limith = self.flags_limith & 0xF0 | ((limit >> 16) as u8) & 0x0F;
}
}

View file

@ -1,142 +0,0 @@
use core::mem;
use x86::dtables::{self, DescriptorTablePointer};
use interrupt::*;
pub static mut IDTR: DescriptorTablePointer = DescriptorTablePointer {
limit: 0,
base: 0
};
pub static mut IDT: [IdtEntry; 256] = [IdtEntry::new(); 256];
pub unsafe fn init() {
IDTR.limit = (IDT.len() * mem::size_of::<IdtEntry>() - 1) as u16;
IDTR.base = IDT.as_ptr() as u64;
// Set up exceptions
IDT[0].set_func(exception::divide_by_zero);
IDT[1].set_func(exception::debug);
IDT[2].set_func(exception::non_maskable);
IDT[3].set_func(exception::breakpoint);
IDT[4].set_func(exception::overflow);
IDT[5].set_func(exception::bound_range);
IDT[6].set_func(exception::invalid_opcode);
IDT[7].set_func(exception::device_not_available);
IDT[8].set_func(exception::double_fault);
// 9 no longer available
IDT[10].set_func(exception::invalid_tss);
IDT[11].set_func(exception::segment_not_present);
IDT[12].set_func(exception::stack_segment);
IDT[13].set_func(exception::protection);
IDT[14].set_func(exception::page);
// 15 reserved
IDT[16].set_func(exception::fpu);
IDT[17].set_func(exception::alignment_check);
IDT[18].set_func(exception::machine_check);
IDT[19].set_func(exception::simd);
IDT[20].set_func(exception::virtualization);
// 21 through 29 reserved
IDT[30].set_func(exception::security);
// 31 reserved
// Set up IRQs
IDT[32].set_func(irq::pit);
IDT[33].set_func(irq::keyboard);
IDT[34].set_func(irq::cascade);
IDT[35].set_func(irq::com2);
IDT[36].set_func(irq::com1);
IDT[37].set_func(irq::lpt2);
IDT[38].set_func(irq::floppy);
IDT[39].set_func(irq::lpt1);
IDT[40].set_func(irq::rtc);
IDT[41].set_func(irq::pci1);
IDT[42].set_func(irq::pci2);
IDT[43].set_func(irq::pci3);
IDT[44].set_func(irq::mouse);
IDT[45].set_func(irq::fpu);
IDT[46].set_func(irq::ata1);
IDT[47].set_func(irq::ata2);
// Set IPI handler (null)
IDT[0x40].set_func(ipi::ipi);
// Set syscall function
IDT[0x80].set_func(syscall::syscall);
IDT[0x80].set_flags(IDT_PRESENT | IDT_RING_3 | IDT_INTERRUPT);
dtables::lidt(&IDTR);
}
bitflags! {
pub flags IdtFlags: u8 {
const IDT_PRESENT = 1 << 7,
const IDT_RING_0 = 0 << 5,
const IDT_RING_1 = 1 << 5,
const IDT_RING_2 = 2 << 5,
const IDT_RING_3 = 3 << 5,
const IDT_SS = 1 << 4,
const IDT_INTERRUPT = 0xE,
const IDT_TRAP = 0xF,
}
}
#[repr(packed)]
pub struct IdtDescriptor {
size: u16,
offset: u64
}
impl IdtDescriptor {
pub fn set_slice(&mut self, slice: &'static [IdtEntry]) {
self.size = (slice.len() * mem::size_of::<IdtEntry>() - 1) as u16;
self.offset = slice.as_ptr() as u64;
}
pub unsafe fn load(&self) {
asm!("lidt [rax]" : : "{rax}"(self as *const _ as usize) : : "intel", "volatile");
}
}
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct IdtEntry {
offsetl: u16,
selector: u16,
zero: u8,
attribute: u8,
offsetm: u16,
offseth: u32,
zero2: u32
}
impl IdtEntry {
pub const fn new() -> IdtEntry {
IdtEntry {
offsetl: 0,
selector: 0,
zero: 0,
attribute: 0,
offsetm: 0,
offseth: 0,
zero2: 0
}
}
pub fn set_flags(&mut self, flags: IdtFlags) {
self.attribute = flags.bits;
}
pub fn set_offset(&mut self, selector: u16, base: usize) {
self.selector = selector;
self.offsetl = base as u16;
self.offsetm = (base >> 16) as u16;
self.offseth = (base >> 32) as u32;
}
// A function to set the offset more easily
pub fn set_func(&mut self, func: unsafe extern fn()) {
self.set_flags(IDT_PRESENT | IDT_RING_0 | IDT_INTERRUPT);
self.set_offset(8, func as usize);
}
}

View file

@ -1,123 +0,0 @@
use interrupt::stack_trace;
use syscall::flag::*;
extern {
fn ksignal(signal: usize);
}
interrupt_stack!(divide_by_zero, stack, {
println!("Divide by zero fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGFPE);
});
interrupt_stack!(debug, stack, {
println!("Debug trap at {:>02X}:{:>016X}", stack.cs, stack.rip);
ksignal(SIGTRAP);
});
interrupt_stack!(non_maskable, stack, {
println!("Non-maskable interrupt at {:>02X}:{:>016X}", stack.cs, stack.rip);
});
interrupt_stack!(breakpoint, stack, {
println!("Breakpoint trap at {:>02X}:{:>016X}", stack.cs, stack.rip);
ksignal(SIGTRAP);
});
interrupt_stack!(overflow, stack, {
println!("Overflow trap at {:>02X}:{:>016X}", stack.cs, stack.rip);
ksignal(SIGFPE);
});
interrupt_stack!(bound_range, stack, {
println!("Bound range exceeded fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_stack!(invalid_opcode, stack, {
println!("Invalid opcode fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGILL);
});
interrupt_stack!(device_not_available, stack, {
println!("Device not available fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGILL);
});
interrupt_error!(double_fault, stack, {
println!("Double fault: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error!(invalid_tss, stack, {
println!("Invalid TSS fault: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error!(segment_not_present, stack, {
println!("Segment not present fault: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error!(stack_segment, stack, {
println!("Stack segment fault: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error!(protection, stack, {
println!("Protection fault: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error!(page, stack, {
let cr2: usize;
asm!("mov rax, cr2" : "={rax}"(cr2) : : : "intel", "volatile");
println!("Page fault: {:>02X}:{:>016X} at {:>02X}:{:>016X}", stack.code, cr2, stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_stack!(fpu, stack, {
println!("FPU floating point fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGFPE);
});
interrupt_error!(alignment_check, stack, {
println!("Alignment check fault: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGBUS);
});
interrupt_stack!(machine_check, stack, {
println!("Machine check fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGBUS);
});
interrupt_stack!(simd, stack, {
println!("SIMD floating point fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGFPE);
});
interrupt_stack!(virtualization, stack, {
println!("Virtualization fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGBUS);
});
interrupt_error!(security, stack, {
println!("Security exception: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGBUS);
});

View file

@ -1,5 +0,0 @@
use device::local_apic::LOCAL_APIC;
interrupt!(ipi, {
LOCAL_APIC.eoi();
});

View file

@ -1,115 +0,0 @@
use x86::io;
use device::serial::{COM1, COM2};
use time;
extern {
fn irq_trigger(irq: u8);
}
#[inline(always)]
unsafe fn master_ack() {
io::outb(0x20, 0x20);
}
#[inline(always)]
unsafe fn slave_ack() {
io::outb(0xA0, 0x20);
master_ack();
}
pub unsafe fn acknowledge(irq: usize) {
if irq >= 8 {
slave_ack();
} else {
master_ack();
}
}
interrupt!(pit, {
// Saves CPU time by not sending IRQ event irq_trigger(0);
{
const PIT_RATE: u64 = 2250286;
let mut offset = time::OFFSET.lock();
let sum = offset.1 + PIT_RATE;
offset.1 = sum % 1000000000;
offset.0 += sum / 1000000000;
}
master_ack();
});
interrupt!(keyboard, {
irq_trigger(1);
});
interrupt!(cascade, {
irq_trigger(2);
master_ack();
});
interrupt!(com2, {
irq_trigger(3);
COM2.lock().on_receive();
master_ack();
});
interrupt!(com1, {
irq_trigger(4);
COM1.lock().on_receive();
master_ack();
});
interrupt!(lpt2, {
irq_trigger(5);
master_ack();
});
interrupt!(floppy, {
irq_trigger(6);
master_ack();
});
interrupt!(lpt1, {
irq_trigger(7);
master_ack();
});
interrupt!(rtc, {
irq_trigger(8);
slave_ack();
});
interrupt!(pci1, {
irq_trigger(9);
slave_ack();
});
interrupt!(pci2, {
irq_trigger(10);
});
interrupt!(pci3, {
irq_trigger(11);
});
interrupt!(mouse, {
irq_trigger(12);
});
interrupt!(fpu, {
irq_trigger(13);
slave_ack();
});
interrupt!(ata1, {
irq_trigger(14);
slave_ack();
});
interrupt!(ata2, {
irq_trigger(15);
slave_ack();
});

View file

@ -1,85 +0,0 @@
//! Interrupt instructions
use core::mem;
use paging::{ActivePageTable, VirtualAddress};
pub mod exception;
pub mod ipi;
pub mod irq;
pub mod syscall;
/// Clear interrupts
#[inline(always)]
pub unsafe fn disable() {
asm!("cli" : : : : "intel", "volatile");
}
/// Set interrupts
#[inline(always)]
pub unsafe fn enable() {
asm!("sti" : : : : "intel", "volatile");
}
/// Set interrupts and halt
/// This will atomically wait for the next interrupt
/// Performing enable followed by halt is not guaranteed to be atomic, use this instead!
#[inline(always)]
pub unsafe fn enable_and_halt() {
asm!("sti
hlt"
: : : : "intel", "volatile");
}
/// Set interrupts and nop
/// This will enable interrupts and allow the IF flag to be processed
/// Simply enabling interrupts does not gurantee that they will trigger, use this instead!
#[inline(always)]
pub unsafe fn enable_and_nop() {
asm!("sti
nop"
: : : : "intel", "volatile");
}
/// Halt instruction
#[inline(always)]
pub unsafe fn halt() {
asm!("hlt" : : : : "intel", "volatile");
}
/// Pause instruction
/// Safe because it is similar to a NOP, and has no memory effects
#[inline(always)]
pub fn pause() {
unsafe { asm!("pause" : : : : "intel", "volatile"); }
}
/// Get a stack trace
//TODO: Check for stack being mapped before dereferencing
#[inline(never)]
pub unsafe fn stack_trace() {
let mut rbp: usize;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");
println!("TRACE: {:>016X}", rbp);
//Maximum 64 frames
let active_table = ActivePageTable::new();
for _frame in 0..64 {
if let Some(rip_rbp) = rbp.checked_add(mem::size_of::<usize>()) {
if active_table.translate(VirtualAddress::new(rbp)).is_some() && active_table.translate(VirtualAddress::new(rip_rbp)).is_some() {
let rip = *(rip_rbp as *const usize);
if rip == 0 {
println!(" {:>016X}: EMPTY RETURN", rbp);
break;
}
println!(" {:>016X}: {:>016X}", rbp, rip);
rbp = *(rbp as *const usize);
} else {
println!(" {:>016X}: GUARD PAGE", rbp);
break;
}
} else {
println!(" {:>016X}: RBP OVERFLOW", rbp);
}
}
}

View file

@ -1,61 +0,0 @@
#[naked]
pub unsafe extern fn syscall() {
#[inline(never)]
unsafe fn inner() {
extern {
fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, stack: usize) -> usize;
}
let mut a;
{
let b;
let c;
let d;
let e;
let f;
let stack;
asm!("" : "={rax}"(a), "={rbx}"(b), "={rcx}"(c), "={rdx}"(d), "={rsi}"(e), "={rdi}"(f), "={rbp}"(stack)
: : : "intel", "volatile");
a = syscall(a, b, c, d, e, f, stack);
}
asm!("" : : "{rax}"(a) : : "intel", "volatile");
}
// Push scratch registers, minus rax for the return value
asm!("push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov r11, 0x18
mov fs, r11"
: : : : "intel", "volatile");
inner();
// Interrupt return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
iretq"
: : : : "intel", "volatile");
}
#[naked]
pub unsafe extern fn clone_ret() -> usize {
asm!("pop rbp"
: : : : "intel", "volatile");
0
}

View file

@ -1,327 +0,0 @@
//! Architecture support for x86_64
#![deny(unused_must_use)]
#![feature(asm)]
#![feature(concat_idents)]
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(drop_types_in_const)]
#![feature(lang_items)]
#![feature(naked_functions)]
#![feature(thread_local)]
#![feature(unique)]
#![no_std]
extern crate hole_list_allocator as allocator;
#[macro_use]
extern crate bitflags;
extern crate io;
extern crate spin;
extern crate syscall;
pub extern crate x86;
// Because the memory map is so important to not be aliased, it is defined here, in one place
// The lower 256 PML4 entries are reserved for userspace
// Each PML4 entry references up to 512 GB of memory
// The top (511) PML4 is reserved for recursive mapping
// The second from the top (510) PML4 is reserved for the kernel
/// The size of a single PML4
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
/// Offset of recursive paging
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
/// Offset of kernel
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
/// Offset to kernel heap
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET + PML4_SIZE/2;
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 128 * 1024 * 1024; // 128 MB
/// Offset to kernel percpu variables
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000;
/// Size of kernel percpu variables
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
/// Offset to user image
pub const USER_OFFSET: usize = 0;
/// Offset to user TCB
pub const USER_TCB_OFFSET: usize = 0xB000_0000;
/// Offset to user arguments
pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE/2;
/// Offset to user heap
pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
/// Offset to user grants
pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
/// Offset to user stack
pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE;
/// Size of user stack
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
/// Offset to user TLS
pub const USER_TLS_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
/// Offset to user temporary image (used when cloning)
pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE;
/// Offset to user temporary heap (used when cloning)
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
/// Offset to user temporary page for grants
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
/// Offset to user temporary stack (used when cloning)
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE;
/// Offset to user temporary tls (used when cloning)
pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
/// Offset for usage in other temporary pages
pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE;
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({
use core::fmt::Write;
let _ = write!($crate::console::CONSOLE.lock(), $($arg)*);
});
}
/// Print with new line to console
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Create an interrupt function that can safely run rust code
#[macro_export]
macro_rules! interrupt {
($name:ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner() {
$func
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov rax, 0x18
mov fs, ax"
: : : : "intel", "volatile");
// Call inner rust function
inner();
// Pop scratch registers and return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
iretq"
: : : : "intel", "volatile");
}
};
}
#[repr(packed)]
pub struct InterruptStack {
fs: usize,
r11: usize,
r10: usize,
r9: usize,
r8: usize,
rsi: usize,
rdi: usize,
rdx: usize,
rcx: usize,
rax: usize,
rip: usize,
cs: usize,
rflags: usize,
}
#[macro_export]
macro_rules! interrupt_stack {
($name:ident, $stack: ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner($stack: &$crate::InterruptStack) {
$func
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov rax, 0x18
mov fs, ax"
: : : : "intel", "volatile");
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Call inner rust function
inner(&*(rsp as *const $crate::InterruptStack));
// Pop scratch registers and return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
iretq"
: : : : "intel", "volatile");
}
};
}
#[repr(packed)]
pub struct InterruptErrorStack {
fs: usize,
r11: usize,
r10: usize,
r9: usize,
r8: usize,
rsi: usize,
rdi: usize,
rdx: usize,
rcx: usize,
rax: usize,
code: usize,
rip: usize,
cs: usize,
rflags: usize,
}
#[macro_export]
macro_rules! interrupt_error {
($name:ident, $stack:ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner($stack: &$crate::InterruptErrorStack) {
$func
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov rax, 0x18
mov fs, ax"
: : : : "intel", "volatile");
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Call inner rust function
inner(&*(rsp as *const $crate::InterruptErrorStack));
// Pop scratch registers, error code, and return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
add rsp, 8
iretq"
: : : : "intel", "volatile");
}
};
}
/// ACPI table parsing
pub mod acpi;
/// Console handling
pub mod console;
/// Context switching
pub mod context;
/// Devices
pub mod device;
/// Memcpy, memmove, etc.
pub mod externs;
/// Global descriptor table
pub mod gdt;
/// Interrupt descriptor table
pub mod idt;
/// Interrupt instructions
pub mod interrupt;
/// Memory management
pub mod memory;
/// Paging
pub mod paging;
/// Panic
pub mod panic;
/// Initialization and start function
pub mod start;
/// Shutdown function
pub mod stop;
/// Time
pub mod time;

View file

@ -1,63 +0,0 @@
ENTRY(kstart)
OUTPUT_FORMAT(elf64-x86-64)
KERNEL_OFFSET = 0xffffff0000100000;
/* KERNEL_OFFSET = 0x100000; */
SECTIONS {
. = KERNEL_OFFSET;
. += SIZEOF_HEADERS;
. = ALIGN(4096);
.text : AT(ADDR(.text) - KERNEL_OFFSET) {
__text_start = .;
*(.text*)
. = ALIGN(4096);
__text_end = .;
}
.rodata : AT(ADDR(.rodata) - KERNEL_OFFSET) {
__rodata_start = .;
*(.rodata*)
. = ALIGN(4096);
__rodata_end = .;
}
.data : AT(ADDR(.data) - KERNEL_OFFSET) {
__data_start = .;
*(.data*)
. = ALIGN(4096);
__data_end = .;
}
.tdata : AT(ADDR(.tdata) - KERNEL_OFFSET) {
__tdata_start = .;
*(.tdata*)
. = ALIGN(4096);
__tdata_end = .;
__tbss_start = .;
*(.tbss*)
. += 8;
. = ALIGN(4096);
__tbss_end = .;
}
.bss : AT(ADDR(.bss) - KERNEL_OFFSET) {
__bss_start = .;
*(.bss*)
. = ALIGN(4096);
__bss_end = .;
}
__end = .;
/DISCARD/ : {
*(.comment*)
*(.debug*)
*(.eh_frame*)
*(.gcc_except_table*)
*(.note*)
*(.rel.eh_frame*)
}
}

View file

@ -1,127 +0,0 @@
//! # Area frame allocator
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html)
use paging::PhysicalAddress;
use super::{Frame, FrameAllocator, MemoryArea, MemoryAreaIter};
pub struct AreaFrameAllocator {
next_free_frame: Frame,
current_area: Option<&'static MemoryArea>,
areas: MemoryAreaIter,
kernel_start: Frame,
kernel_end: Frame
}
impl AreaFrameAllocator {
pub fn new(kernel_start: usize, kernel_end: usize, memory_areas: MemoryAreaIter) -> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(PhysicalAddress::new(0)),
current_area: None,
areas: memory_areas,
kernel_start: Frame::containing_address(PhysicalAddress::new(kernel_start)),
kernel_end: Frame::containing_address(PhysicalAddress::new(kernel_end))
};
allocator.choose_next_area();
allocator
}
fn choose_next_area(&mut self) {
self.current_area = self.areas.clone().filter(|area| {
let address = area.base_addr + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize)) >= self.next_free_frame
}).min_by_key(|area| area.base_addr);
if let Some(area) = self.current_area {
let start_frame = Frame::containing_address(PhysicalAddress::new(area.base_addr as usize));
if self.next_free_frame < start_frame {
self.next_free_frame = start_frame;
}
}
}
}
impl FrameAllocator for AreaFrameAllocator {
fn free_frames(&self) -> usize {
let mut count = 0;
for area in self.areas.clone() {
let start_frame = Frame::containing_address(PhysicalAddress::new(area.base_addr as usize));
let end_frame = Frame::containing_address(PhysicalAddress::new((area.base_addr + area.length - 1) as usize));
for frame in Frame::range_inclusive(start_frame, end_frame) {
if frame >= self.kernel_start && frame <= self.kernel_end {
// Inside of kernel range
} else if frame >= self.next_free_frame {
// Frame is in free range
count += 1;
} else {
// Inside of used range
}
}
}
count
}
fn used_frames(&self) -> usize {
let mut count = 0;
for area in self.areas.clone() {
let start_frame = Frame::containing_address(PhysicalAddress::new(area.base_addr as usize));
let end_frame = Frame::containing_address(PhysicalAddress::new((area.base_addr + area.length - 1) as usize));
for frame in Frame::range_inclusive(start_frame, end_frame) {
if frame >= self.kernel_start && frame <= self.kernel_end {
// Inside of kernel range
count += 1
} else if frame >= self.next_free_frame {
// Frame is in free range
} else {
count += 1;
}
}
}
count
}
fn allocate_frames(&mut self, count: usize) -> Option<Frame> {
if count == 0 {
None
} else if let Some(area) = self.current_area {
// "Clone" the frame to return it if it's free. Frame doesn't
// implement Clone, but we can construct an identical frame.
let start_frame = Frame{ number: self.next_free_frame.number };
let end_frame = Frame { number: self.next_free_frame.number + (count - 1) };
// the last frame of the current area
let current_area_last_frame = {
let address = area.base_addr + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize))
};
if end_frame > current_area_last_frame {
// all frames of current area are used, switch to next area
self.choose_next_area();
} else if (start_frame >= self.kernel_start && start_frame <= self.kernel_end)
|| (end_frame >= self.kernel_start && end_frame <= self.kernel_end) {
// `frame` is used by the kernel
self.next_free_frame = Frame {
number: self.kernel_end.number + 1
};
} else {
// frame is unused, increment `next_free_frame` and return it
self.next_free_frame.number += count;
return Some(start_frame);
}
// `frame` was not valid, try it again with the updated `next_free_frame`
self.allocate_frames(count)
} else {
None // no free frames left
}
}
fn deallocate_frames(&mut self, frame: Frame, count: usize) {
//panic!("AreaFrameAllocator::deallocate_frame: not supported: {:?}", frame);
}
}

View file

@ -1,189 +0,0 @@
//! # Memory management
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html)
pub use paging::{PAGE_SIZE, PhysicalAddress};
use self::area_frame_allocator::AreaFrameAllocator;
use spin::Mutex;
pub mod area_frame_allocator;
/// The current memory map. It's size is maxed out to 512 entries, due to it being
/// from 0x500 to 0x5000 (800 is the absolute total)
static mut MEMORY_MAP: [MemoryArea; 512] = [MemoryArea { base_addr: 0, length: 0, _type: 0, acpi: 0 }; 512];
/// Memory does not exist
pub const MEMORY_AREA_NULL: u32 = 0;
/// Memory is free to use
pub const MEMORY_AREA_FREE: u32 = 1;
/// Memory is reserved
pub const MEMORY_AREA_RESERVED: u32 = 2;
/// Memory is used by ACPI, and can be reclaimed
pub const MEMORY_AREA_ACPI: u32 = 3;
/// A memory map area
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
pub struct MemoryArea {
pub base_addr: u64,
pub length: u64,
pub _type: u32,
pub acpi: u32
}
#[derive(Clone)]
pub struct MemoryAreaIter {
_type: u32,
i: usize
}
impl MemoryAreaIter {
fn new(_type: u32) -> Self {
MemoryAreaIter {
_type: _type,
i: 0
}
}
}
impl Iterator for MemoryAreaIter {
type Item = &'static MemoryArea;
fn next(&mut self) -> Option<Self::Item> {
while self.i < unsafe { MEMORY_MAP.len() } {
let entry = unsafe { &MEMORY_MAP[self.i] };
self.i += 1;
if entry._type == self._type {
return Some(entry);
}
}
None
}
}
static ALLOCATOR: Mutex<Option<AreaFrameAllocator>> = Mutex::new(None);
/// Init memory module
/// Must be called once, and only once,
pub unsafe fn init(kernel_start: usize, kernel_end: usize) {
// Copy memory map from bootloader location
for (i, mut entry) in MEMORY_MAP.iter_mut().enumerate() {
*entry = *(0x500 as *const MemoryArea).offset(i as isize);
if entry._type != MEMORY_AREA_NULL {
println!("{:?}", entry);
}
}
*ALLOCATOR.lock() = Some(AreaFrameAllocator::new(kernel_start, kernel_end, MemoryAreaIter::new(MEMORY_AREA_FREE)));
}
/// Allocate a frame
pub fn allocate_frame() -> Option<Frame> {
allocate_frames(1)
}
/// Deallocate a frame
pub fn deallocate_frame(frame: Frame) {
deallocate_frames(frame, 1)
}
/// Get the number of frames available
pub fn free_frames() -> usize {
if let Some(ref allocator) = *ALLOCATOR.lock() {
allocator.free_frames()
} else {
panic!("frame allocator not initialized");
}
}
/// Get the number of frames used
pub fn used_frames() -> usize {
if let Some(ref allocator) = *ALLOCATOR.lock() {
allocator.used_frames()
} else {
panic!("frame allocator not initialized");
}
}
/// Allocate a range of frames
pub fn allocate_frames(count: usize) -> Option<Frame> {
if let Some(ref mut allocator) = *ALLOCATOR.lock() {
allocator.allocate_frames(count)
} else {
panic!("frame allocator not initialized");
}
}
/// Deallocate a range of frames frame
pub fn deallocate_frames(frame: Frame, count: usize) {
if let Some(ref mut allocator) = *ALLOCATOR.lock() {
allocator.deallocate_frames(frame, count)
} else {
panic!("frame allocator not initialized");
}
}
/// A frame, allocated by the frame allocator.
/// Do not add more derives, or make anything `pub`!
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Frame {
number: usize
}
impl Frame {
/// Get the address of this frame
pub fn start_address(&self) -> PhysicalAddress {
PhysicalAddress::new(self.number * PAGE_SIZE)
}
//TODO: Set private
pub fn clone(&self) -> Frame {
Frame {
number: self.number
}
}
/// Create a frame containing `address`
pub fn containing_address(address: PhysicalAddress) -> Frame {
Frame {
number: address.get() / PAGE_SIZE
}
}
//TODO: Set private
pub fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
FrameIter {
start: start,
end: end,
}
}
}
pub struct FrameIter {
start: Frame,
end: Frame,
}
impl Iterator for FrameIter {
type Item = Frame;
fn next(&mut self) -> Option<Frame> {
if self.start <= self.end {
let frame = self.start.clone();
self.start.number += 1;
Some(frame)
} else {
None
}
}
}
pub trait FrameAllocator {
fn free_frames(&self) -> usize;
fn used_frames(&self) -> usize;
fn allocate_frames(&mut self, size: usize) -> Option<Frame>;
fn deallocate_frames(&mut self, frame: Frame, size: usize);
}

View file

@ -1,62 +0,0 @@
//! # Page table entry
//! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use memory::Frame;
use super::PhysicalAddress;
/// A page table entry
pub struct Entry(u64);
bitflags! {
pub flags EntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}
pub const ADDRESS_MASK: usize = 0x000f_ffff_ffff_f000;
impl Entry {
/// Is the entry unused?
pub fn is_unused(&self) -> bool {
self.0 == 0
}
/// Make the entry unused
pub fn set_unused(&mut self) {
self.0 = 0;
}
/// Get the address this page references
pub fn address(&self) -> PhysicalAddress {
PhysicalAddress::new(self.0 as usize & ADDRESS_MASK)
}
/// Get the current entry flags
pub fn flags(&self) -> EntryFlags {
EntryFlags::from_bits_truncate(self.0)
}
/// Get the associated frame, if available
pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(PRESENT) {
Some(Frame::containing_address(self.address()))
} else {
None
}
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
debug_assert!(frame.start_address().get() & !ADDRESS_MASK == 0);
self.0 = (frame.start_address().get() as u64) | flags.bits();
}
}

View file

@ -1,182 +0,0 @@
use core::mem;
use core::ptr::Unique;
use memory::{allocate_frame, deallocate_frame, Frame};
use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress};
use super::entry::{self, EntryFlags};
use super::table::{self, Table, Level4};
/// In order to enforce correct paging operations in the kernel, these types
/// are returned on any mapping operation to get the code involved to specify
/// how it intends to flush changes to a page table
#[must_use = "The page table must be flushed, or the changes unsafely ignored"]
pub struct MapperFlush(Page);
impl MapperFlush {
/// Create a new page flush promise
pub fn new(page: Page) -> MapperFlush {
MapperFlush(page)
}
/// Flush this page in the active table
pub fn flush(self, table: &mut ActivePageTable) {
table.flush(self.0);
mem::forget(self);
}
/// Ignore the flush. This is unsafe, and a reason should be provided for use
pub unsafe fn ignore(self) {
mem::forget(self);
}
}
/// A flush cannot be dropped, it must be consumed
impl Drop for MapperFlush {
fn drop(&mut self) {
panic!("Mapper flush was not utilized");
}
}
/// To allow for combining multiple flushes into one, we have a way of flushing
/// the active table, which can consume MapperFlush structs
#[must_use = "The page table must be flushed, or the changes unsafely ignored"]
pub struct MapperFlushAll(bool);
impl MapperFlushAll {
/// Create a new promise to flush all mappings
pub fn new() -> MapperFlushAll {
MapperFlushAll(false)
}
/// Consume a single page flush
pub fn consume(&mut self, flush: MapperFlush) {
self.0 = true;
mem::forget(flush);
}
/// Flush the active page table
pub fn flush(self, table: &mut ActivePageTable) {
if self.0 {
table.flush_all();
}
mem::forget(self);
}
/// Ignore the flush. This is unsafe, and a reason should be provided for use
pub unsafe fn ignore(self) {
mem::forget(self);
}
}
/// A flush cannot be dropped, it must be consumed
impl Drop for MapperFlushAll {
fn drop(&mut self) {
panic!("Mapper flush all was not utilized");
}
}
pub struct Mapper {
p4: Unique<Table<Level4>>,
}
impl Mapper {
/// Create a new page table
pub unsafe fn new() -> Mapper {
Mapper {
p4: Unique::new(table::P4),
}
}
pub fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.get() }
}
pub fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.get_mut() }
}
/// Map a page to a frame
pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags) -> MapperFlush {
let mut p3 = self.p4_mut().next_table_create(page.p4_index());
let mut p2 = p3.next_table_create(page.p3_index());
let mut p1 = p2.next_table_create(page.p2_index());
assert!(p1[page.p1_index()].is_unused(),
"{:X}: Set to {:X}: {:?}, requesting {:X}: {:?}",
page.start_address().get(),
p1[page.p1_index()].address().get(), p1[page.p1_index()].flags(),
frame.start_address().get(), flags);
p1[page.p1_index()].set(frame, flags | entry::PRESENT);
MapperFlush::new(page)
}
/// Map a page to the next free frame
pub fn map(&mut self, page: Page, flags: EntryFlags) -> MapperFlush {
let frame = allocate_frame().expect("out of frames");
self.map_to(page, frame, flags)
}
/// Update flags for a page
pub fn remap(&mut self, page: Page, flags: EntryFlags) -> MapperFlush {
let mut p3 = self.p4_mut().next_table_mut(page.p4_index()).expect("failed to remap: no p3");
let mut p2 = p3.next_table_mut(page.p3_index()).expect("failed to remap: no p2");
let mut p1 = p2.next_table_mut(page.p2_index()).expect("failed to remap: no p1");
let frame = p1[page.p1_index()].pointed_frame().expect("failed to remap: not mapped");
p1[page.p1_index()].set(frame, flags | entry::PRESENT);
MapperFlush::new(page)
}
/// Identity map a frame
pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags) -> MapperFlush {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
self.map_to(page, frame, flags)
}
/// Unmap a page
pub fn unmap(&mut self, page: Page) -> MapperFlush {
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("unmap does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
// TODO free p(1,2,3) table if empty
deallocate_frame(frame);
MapperFlush::new(page)
}
/// Unmap a page, return frame without free
pub fn unmap_return(&mut self, page: Page) -> (MapperFlush, Frame) {
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("unmap_return does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
(MapperFlush::new(page), frame)
}
pub fn translate_page(&self, page: Page) -> Option<Frame> {
self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
}
pub fn translate_page_flags(&self, page: Page) -> Option<EntryFlags> {
self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| Some(p1[page.p1_index()].flags()))
}
/// Translate a virtual address to a physical one
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address.get() % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| PhysicalAddress::new(frame.start_address().get() + offset))
}
}

View file

@ -1,429 +0,0 @@
//! # Paging
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::mem;
use core::ops::{Deref, DerefMut};
use x86::{msr, tlb};
use memory::{allocate_frame, Frame};
use self::entry::{EntryFlags, PRESENT, GLOBAL, WRITABLE, NO_EXECUTE};
use self::mapper::Mapper;
use self::temporary_page::TemporaryPage;
pub mod entry;
pub mod mapper;
pub mod table;
pub mod temporary_page;
/// Number of entries per page table
pub const ENTRY_COUNT: usize = 512;
/// Size of pages
pub const PAGE_SIZE: usize = 4096;
/// Setup page attribute table
unsafe fn init_pat() {
let uncacheable = 0;
let write_combining = 1;
let write_through = 4;
//let write_protected = 5;
let write_back = 6;
let uncached = 7;
let pat0 = write_back;
let pat1 = write_through;
let pat2 = uncached;
let pat3 = uncacheable;
let pat4 = write_combining;
let pat5 = pat1;
let pat6 = pat2;
let pat7 = pat3;
msr::wrmsr(msr::IA32_PAT, pat7 << 56 | pat6 << 48 | pat5 << 40 | pat4 << 32
| pat3 << 24 | pat2 << 16 | pat1 << 8 | pat0);
}
/// Copy tdata, clear tbss, set TCB self pointer
unsafe fn init_tcb(cpu_id: usize) -> usize {
extern {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
/// The ending byte of the thread data segment
static mut __tdata_end: u8;
/// The starting byte of the thread BSS segment
static mut __tbss_start: u8;
/// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
}
let tcb_offset;
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let tbss_offset = & __tbss_start as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
tcb_offset = end - mem::size_of::<usize>();
::externs::memcpy(start as *mut u8, & __tdata_start as *const u8, tbss_offset);
::externs::memset((start + tbss_offset) as *mut u8, 0, size - tbss_offset);
*(tcb_offset as *mut usize) = end;
}
tcb_offset
}
/// Initialize paging
///
/// Returns page table and thread control block offset
pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (ActivePageTable, usize) {
extern {
/// The starting byte of the text (code) data segment.
static mut __text_start: u8;
/// The ending byte of the text (code) data segment.
static mut __text_end: u8;
/// The starting byte of the _.rodata_ (read-only data) segment.
static mut __rodata_start: u8;
/// The ending byte of the _.rodata_ (read-only data) segment.
static mut __rodata_end: u8;
/// The starting byte of the _.data_ segment.
static mut __data_start: u8;
/// The ending byte of the _.data_ segment.
static mut __data_end: u8;
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
/// The ending byte of the thread data segment
static mut __tdata_end: u8;
/// The starting byte of the thread BSS segment
static mut __tbss_start: u8;
/// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
/// The starting byte of the _.bss_ (uninitialized data) segment.
static mut __bss_start: u8;
/// The ending byte of the _.bss_ (uninitialized data) segment.
static mut __bss_end: u8;
}
init_pat();
let mut active_table = ActivePageTable::new();
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_MISC_OFFSET)));
let mut new_table = {
let frame = allocate_frame().expect("no more frames in paging::init new_table");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
// Map tdata and tbss
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = mapper.map(page, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
// The flush can be ignored as this is not the active table. See later active_table.switch
unsafe { result.ignore(); }
}
}
let mut remap = |start: usize, end: usize, flags: EntryFlags| {
if end > start {
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET));
let result = mapper.map_to(page, frame, flags);
// The flush can be ignored as this is not the active table. See later active_table.switch
unsafe { result.ignore(); }
}
}
};
// Remap stack writable, no execute
remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
// Remap a section with `flags`
let mut remap_section = |start: &u8, end: &u8, flags: EntryFlags| {
remap(start as *const _ as usize - ::KERNEL_OFFSET, end as *const _ as usize - ::KERNEL_OFFSET, flags);
};
// Remap text read-only
remap_section(& __text_start, & __text_end, PRESENT | GLOBAL);
// Remap rodata read-only, no execute
remap_section(& __rodata_start, & __rodata_end, PRESENT | GLOBAL | NO_EXECUTE);
// Remap data writable, no execute
remap_section(& __data_start, & __data_end, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
// Remap tdata master writable, no execute
remap_section(& __tdata_start, & __tdata_end, PRESENT | GLOBAL | NO_EXECUTE);
// Remap bss writable, no execute
remap_section(& __bss_start, & __bss_end, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
});
// This switches the active table, which is setup by the bootloader, to a correct table
// setup by the lambda above. This will also flush the TLB
active_table.switch(new_table);
(active_table, init_tcb(cpu_id))
}
pub unsafe fn init_ap(cpu_id: usize, bsp_table: usize, stack_start: usize, stack_end: usize) -> usize {
extern {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
/// The ending byte of the thread data segment
static mut __tdata_end: u8;
/// The starting byte of the thread BSS segment
static mut __tbss_start: u8;
/// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
}
init_pat();
let mut active_table = ActivePageTable::new();
let mut new_table = InactivePageTable::from_address(bsp_table);
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_MISC_OFFSET)));
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
// Map tdata and tbss
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = mapper.map(page, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
// The flush can be ignored as this is not the active table. See later active_table.switch
unsafe { result.ignore(); }
}
}
let mut remap = |start: usize, end: usize, flags: EntryFlags| {
if end > start {
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET));
let result = mapper.map_to(page, frame, flags);
// The flush can be ignored as this is not the active table. See later active_table.switch
unsafe { result.ignore(); }
}
}
};
// Remap stack writable, no execute
remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
});
// This switches the active table, which is setup by the bootloader, to a correct table
// setup by the lambda above. This will also flush the TLB
active_table.switch(new_table);
init_tcb(cpu_id)
}
pub struct ActivePageTable {
mapper: Mapper,
}
impl Deref for ActivePageTable {
type Target = Mapper;
fn deref(&self) -> &Mapper {
&self.mapper
}
}
impl DerefMut for ActivePageTable {
fn deref_mut(&mut self) -> &mut Mapper {
&mut self.mapper
}
}
impl ActivePageTable {
pub unsafe fn new() -> ActivePageTable {
ActivePageTable {
mapper: Mapper::new(),
}
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::controlregs;
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
PhysicalAddress::new(unsafe { controlregs::cr3() } as usize)
),
};
unsafe {
controlregs::cr3_write(new_table.p4_frame.start_address().get() as u64);
}
old_table
}
pub fn flush(&mut self, page: Page) {
unsafe { tlb::flush(page.start_address().get()); }
}
pub fn flush_all(&mut self) {
unsafe { tlb::flush_all(); }
}
pub fn with<F>(&mut self, table: &mut InactivePageTable, temporary_page: &mut temporary_page::TemporaryPage, f: F)
where F: FnOnce(&mut Mapper)
{
use x86::controlregs;
{
let backup = Frame::containing_address(PhysicalAddress::new(unsafe { controlregs::cr3() as usize }));
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), PRESENT | WRITABLE | NO_EXECUTE, self);
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE | NO_EXECUTE);
self.flush_all();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE | NO_EXECUTE);
self.flush_all();
}
temporary_page.unmap(self);
}
pub unsafe fn address(&self) -> usize {
use x86::controlregs;
controlregs::cr3() as usize
}
}
pub struct InactivePageTable {
p4_frame: Frame,
}
impl InactivePageTable {
pub fn new(frame: Frame, active_table: &mut ActivePageTable, temporary_page: &mut TemporaryPage) -> InactivePageTable {
{
let table = temporary_page.map_table_frame(frame.clone(), PRESENT | WRITABLE | NO_EXECUTE, active_table);
// now we are able to zero the table
table.zero();
// set up recursive mapping for the table
table[511].set(frame.clone(), PRESENT | WRITABLE | NO_EXECUTE);
}
temporary_page.unmap(active_table);
InactivePageTable { p4_frame: frame }
}
pub unsafe fn from_address(cr3: usize) -> InactivePageTable {
InactivePageTable { p4_frame: Frame::containing_address(PhysicalAddress::new(cr3)) }
}
pub unsafe fn address(&self) -> usize {
self.p4_frame.start_address().get()
}
}
/// A physical address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct PhysicalAddress(usize);
impl PhysicalAddress {
pub fn new(address: usize) -> Self {
PhysicalAddress(address)
}
pub fn get(&self) -> usize {
self.0
}
}
/// A virtual address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct VirtualAddress(usize);
impl VirtualAddress {
pub fn new(address: usize) -> Self {
VirtualAddress(address)
}
pub fn get(&self) -> usize {
self.0
}
}
/// Page
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page {
number: usize
}
impl Page {
pub fn start_address(&self) -> VirtualAddress {
VirtualAddress::new(self.number * PAGE_SIZE)
}
pub fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
pub fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
pub fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
pub fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
pub fn containing_address(address: VirtualAddress) -> Page {
//TODO assert!(address.get() < 0x0000_8000_0000_0000 || address.get() >= 0xffff_8000_0000_0000,
// "invalid address: 0x{:x}", address.get());
Page { number: address.get() / PAGE_SIZE }
}
pub fn range_inclusive(start: Page, end: Page) -> PageIter {
PageIter {
start: start,
end: end,
}
}
}
pub struct PageIter {
start: Page,
end: Page,
}
impl Iterator for PageIter {
type Item = Page;
fn next(&mut self) -> Option<Page> {
if self.start <= self.end {
let page = self.start;
self.start.number += 1;
Some(page)
} else {
None
}
}
}

View file

@ -1,98 +0,0 @@
//! # Page table
//! Code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use memory::allocate_frame;
use super::entry::*;
use super::ENTRY_COUNT;
pub const P4: *mut Table<Level4> = 0xffff_ffff_ffff_f000 as *mut _;
pub trait TableLevel {}
pub enum Level4 {}
pub enum Level3 {}
pub enum Level2 {}
pub enum Level1 {}
impl TableLevel for Level4 {}
impl TableLevel for Level3 {}
impl TableLevel for Level2 {}
impl TableLevel for Level1 {}
pub trait HierarchicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierarchicalLevel for Level4 {
type NextLevel = Level3;
}
impl HierarchicalLevel for Level3 {
type NextLevel = Level2;
}
impl HierarchicalLevel for Level2 {
type NextLevel = Level1;
}
pub struct Table<L: TableLevel> {
entries: [Entry; ENTRY_COUNT],
level: PhantomData<L>,
}
impl<L> Table<L> where L: TableLevel {
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
entry.set_unused();
}
}
}
impl<L> Table<L> where L: HierarchicalLevel {
pub fn next_table(&self, index: usize) -> Option<&Table<L::NextLevel>> {
self.next_table_address(index).map(|address| unsafe { &*(address as *const _) })
}
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table<L::NextLevel>> {
self.next_table_address(index).map(|address| unsafe { &mut *(address as *mut _) })
}
pub fn next_table_create(&mut self, index: usize) -> &mut Table<L::NextLevel> {
if self.next_table(index).is_none() {
assert!(!self[index].flags().contains(HUGE_PAGE),
"next_table_create does not support huge pages");
let frame = allocate_frame().expect("no frames available");
self[index].set(frame, PRESENT | WRITABLE | USER_ACCESSIBLE /* Allow users to go down the page table, implement permissions at the page level */);
self.next_table_mut(index).unwrap().zero();
}
self.next_table_mut(index).unwrap()
}
fn next_table_address(&self, index: usize) -> Option<usize> {
let entry_flags = self[index].flags();
if entry_flags.contains(PRESENT) && !entry_flags.contains(HUGE_PAGE) {
let table_address = self as *const _ as usize;
Some((table_address << 9) | (index << 12))
} else {
None
}
}
}
impl<L> Index<usize> for Table<L> where L: TableLevel {
type Output = Entry;
fn index(&self, index: usize) -> &Entry {
&self.entries[index]
}
}
impl<L> IndexMut<usize> for Table<L> where L: TableLevel {
fn index_mut(&mut self, index: usize) -> &mut Entry {
&mut self.entries[index]
}
}

View file

@ -1,45 +0,0 @@
//! Temporarily map a page
//! From [Phil Opp's Blog](http://os.phil-opp.com/remap-the-kernel.html)
use memory::Frame;
use super::{ActivePageTable, Page, VirtualAddress};
use super::entry::EntryFlags;
use super::table::{Table, Level1};
pub struct TemporaryPage {
page: Page,
}
impl TemporaryPage {
pub fn new(page: Page) -> TemporaryPage {
TemporaryPage {
page: page,
}
}
pub fn start_address (&self) -> VirtualAddress {
self.page.start_address()
}
/// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page.
pub fn map(&mut self, frame: Frame, flags: EntryFlags, active_table: &mut ActivePageTable) -> VirtualAddress {
assert!(active_table.translate_page(self.page).is_none(), "temporary page is already mapped");
let result = active_table.map_to(self.page, frame, flags);
result.flush(active_table);
self.page.start_address()
}
/// Maps the temporary page to the given page table frame in the active
/// table. Returns a reference to the now mapped table.
pub fn map_table_frame(&mut self, frame: Frame, flags: EntryFlags, active_table: &mut ActivePageTable) -> &mut Table<Level1> {
unsafe { &mut *(self.map(frame, flags, active_table).get() as *mut Table<Level1>) }
}
/// Unmaps the temporary page in the active table.
pub fn unmap(&mut self, active_table: &mut ActivePageTable) {
let result = active_table.unmap(self.page);
result.flush(active_table);
}
}

View file

@ -1,32 +0,0 @@
//! Intrinsics for panic handling
use interrupt;
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn eh_personality() {}
#[cfg(not(test))]
/// Required to handle panics
#[lang = "panic_fmt"]
extern "C" fn panic_fmt(fmt: ::core::fmt::Arguments, file: &str, line: u32) -> ! {
println!("PANIC: {}", fmt);
println!("FILE: {}", file);
println!("LINE: {}", line);
unsafe { interrupt::stack_trace(); }
println!("HALT");
loop {
unsafe { interrupt::halt(); }
}
}
#[allow(non_snake_case)]
#[no_mangle]
/// Required to handle panics
pub extern "C" fn _Unwind_Resume() -> ! {
loop {
unsafe { interrupt::halt(); }
}
}

View file

@ -1,189 +0,0 @@
/// This function is where the kernel sets up IRQ handlers
/// It is increcibly unsafe, and should be minimal in nature
/// It must create the IDT with the correct entries, those entries are
/// defined in other files inside of the `arch` module
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use acpi;
use allocator;
use device;
use externs::memset;
use gdt;
use idt;
use interrupt;
use memory;
use paging::{self, entry, Page, VirtualAddress};
use paging::mapper::MapperFlushAll;
/// Test of zero values in BSS.
static BSS_TEST_ZERO: usize = 0;
/// Test of non-zero values in data.
static DATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
/// Test of zero values in thread BSS
#[thread_local]
static mut TBSS_TEST_ZERO: usize = 0;
/// Test of non-zero values in thread data.
#[thread_local]
static mut TDATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
pub static CPU_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT;
static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT;
extern {
/// Kernel main function
fn kmain(cpus: usize) -> !;
/// Kernel main for APs
fn kmain_ap(id: usize) -> !;
}
/// The entry to Rust, all things must be initialized
#[no_mangle]
pub unsafe extern fn kstart() -> ! {
{
extern {
/// The starting byte of the _.bss_ (uninitialized data) segment.
static mut __bss_start: u8;
/// The ending byte of the _.bss_ (uninitialized data) segment.
static mut __bss_end: u8;
/// The end of the kernel
static mut __end: u8;
}
// Zero BSS, this initializes statics that are set to 0
{
let start_ptr = &mut __bss_start as *mut u8;
let end_ptr = & __bss_end as *const u8 as usize;
if start_ptr as usize <= end_ptr {
let size = end_ptr - start_ptr as usize;
memset(start_ptr, 0, size);
}
assert_eq!(BSS_TEST_ZERO, 0);
assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
}
// Initialize memory management
memory::init(0, &__end as *const u8 as usize - ::KERNEL_OFFSET);
// TODO: allocate a stack
let stack_start = 0x00080000 + ::KERNEL_OFFSET;
let stack_end = 0x0009F000 + ::KERNEL_OFFSET;
// Initialize paging
let (mut active_table, tcb_offset) = paging::init(0, stack_start, stack_end);
// Set up GDT
gdt::init(tcb_offset, stack_end);
// Set up IDT
idt::init();
// Test tdata and tbss
{
assert_eq!(TBSS_TEST_ZERO, 0);
TBSS_TEST_ZERO += 1;
assert_eq!(TBSS_TEST_ZERO, 1);
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
TDATA_TEST_NONZERO -= 1;
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE);
}
// Reset AP variables
CPU_COUNT.store(1, Ordering::SeqCst);
AP_READY.store(false, Ordering::SeqCst);
BSP_READY.store(false, Ordering::SeqCst);
// Setup kernel heap
{
let mut flush_all = MapperFlushAll::new();
// Map heap pages
let heap_start_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET));
let heap_end_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET + ::KERNEL_HEAP_SIZE-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
let result = active_table.map(page, entry::PRESENT | entry::GLOBAL | entry::WRITABLE | entry::NO_EXECUTE);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
// Init the allocator
allocator::init(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE);
}
// Initialize devices
device::init(&mut active_table);
// Read ACPI tables, starts APs
acpi::init(&mut active_table);
BSP_READY.store(true, Ordering::SeqCst);
}
kmain(CPU_COUNT.load(Ordering::SeqCst));
}
/// Entry to rust for an AP
pub unsafe extern fn kstart_ap(cpu_id: usize, bsp_table: usize, stack_start: usize, stack_end: usize) -> ! {
{
assert_eq!(BSS_TEST_ZERO, 0);
assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
// Initialize paging
let tcb_offset = paging::init_ap(cpu_id, bsp_table, stack_start, stack_end);
// Set up GDT for AP
gdt::init(tcb_offset, stack_end);
// Set up IDT for AP
idt::init();
// Test tdata and tbss
{
assert_eq!(TBSS_TEST_ZERO, 0);
TBSS_TEST_ZERO += 1;
assert_eq!(TBSS_TEST_ZERO, 1);
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
TDATA_TEST_NONZERO -= 1;
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE);
}
// Initialize devices (for AP)
device::init_ap();
AP_READY.store(true, Ordering::SeqCst);
}
while ! BSP_READY.load(Ordering::SeqCst) {
interrupt::pause();
}
kmain_ap(cpu_id);
}
pub unsafe fn usermode(ip: usize, sp: usize) -> ! {
// Go to usermode
asm!("mov ds, ax
mov es, ax
mov fs, bx
mov gs, ax
push rax
push rcx
push rdx
push rsi
push rdi
iretq"
: // No output because it never returns
: "{rax}"(gdt::GDT_USER_DATA << 3 | 3), // Data segment
"{rbx}"(gdt::GDT_USER_TLS << 3 | 3), // TLS segment
"{rcx}"(sp), // Stack pointer
"{rdx}"(3 << 12 | 1 << 9), // Flags - Set IOPL and interrupt enable flag
"{rsi}"(gdt::GDT_USER_CODE << 3 | 3), // Code segment
"{rdi}"(ip) // IP
: // No clobers because it never returns
: "intel", "volatile");
unreachable!();
}

View file

@ -1,23 +0,0 @@
use io::{Io, Pio};
#[no_mangle]
pub unsafe extern fn kstop() -> ! {
// (phony) ACPI shutdown (http://forum.osdev.org/viewtopic.php?t=16990)
// Works for qemu and bochs.
for &port in [0x604, 0xB004].iter() {
println!("Shutdown with outw(0x{:X}, 0x{:X})", port, 0x2000);
Pio::<u16>::new(port).write(0x2000);
}
// Magic shutdown code for bochs and qemu (older versions).
for c in "Shutdown".bytes() {
println!("Shutdown with outb(0x{:X}, '{}')", 0x8900, c as char);
Pio::<u8>::new(0x8900).write(c);
}
// Magic code for VMWare. Also a hard lock.
println!("Shutdown with cli hlt");
asm!("cli; hlt" : : : : "intel", "volatile");
unreachable!();
}

View file

@ -1,15 +0,0 @@
use spin::Mutex;
pub static START: Mutex<(u64, u64)> = Mutex::new((0, 0));
pub static OFFSET: Mutex<(u64, u64)> = Mutex::new((0, 0));
pub fn monotonic() -> (u64, u64) {
*OFFSET.lock()
}
pub fn realtime() -> (u64, u64) {
let offset = monotonic();
let start = *START.lock();
let sum = start.1 + offset.1;
(start.0 + offset.0 + sum / 1000000000, sum % 1000000000)
}

View file

@ -1,108 +0,0 @@
//! Helpers used to define types that are backed by integers (typically `usize`),
//! without compromising safety.
//!
//! # Example
//!
//! ```
//! /// Define an opaque type `Pid` backed by a `usize`.
//! int_like!(Pid, usize);
//!
//! const ZERO: Pid = Pid::from(0);
//! ```
//!
//! # Example
//!
//! ```
//! /// Define opaque types `Pid` and `AtomicPid`, backed respectively by a `usize`
//! /// and a `AtomicUsize`.
//!
//! int_like!(Pid, AtomicPid, usize, AtomicUsize);
//!
//! const ZERO: Pid = Pid::from(0);
//! let ATOMIC_PID: AtomicPid = AtomicPid::default();
//! ```
#[macro_export]
macro_rules! int_like {
($new_type_name:ident, $backing_type: ident) => {
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)]
pub struct $new_type_name($backing_type);
impl $new_type_name {
pub const fn into(self) -> $backing_type {
self.0
}
pub const fn from(x: $backing_type) -> Self {
$new_type_name(x)
}
}
};
($new_type_name:ident, $new_atomic_type_name: ident, $backing_type:ident, $backing_atomic_type:ident) => {
int_like!($new_type_name, $backing_type);
/// A mutable holder for T that can safely be shared among threads.
/// Runtime equivalent to using `AtomicUsize`, just type-safer.
pub struct $new_atomic_type_name {
container: $backing_atomic_type,
}
impl $new_atomic_type_name {
pub const fn new(x: $new_type_name) -> Self {
$new_atomic_type_name {
container: $backing_atomic_type::new(x.into())
}
}
pub const fn default() -> Self {
Self::new($new_type_name::from(0))
}
pub fn load(&self, order: ::core::sync::atomic::Ordering) -> $new_type_name {
$new_type_name::from(self.container.load(order))
}
pub fn store(&self, val: $new_type_name, order: ::core::sync::atomic::Ordering) {
self.container.store(val.into(), order)
}
#[allow(dead_code)]
pub fn swap(&self, val: $new_type_name, order: ::core::sync::atomic::Ordering) -> $new_type_name {
$new_type_name::from(self.container.swap(val.into(), order))
}
#[allow(dead_code)]
pub fn compare_and_swap(&self, current: $new_type_name, new: $new_type_name, order: ::core::sync::atomic::Ordering) -> $new_type_name {
$new_type_name::from(self.container.compare_and_swap(current.into(), new.into(), order))
}
#[allow(dead_code)]
pub fn compare_exchange(&self, current: $new_type_name, new: $new_type_name, success: ::core::sync::atomic::Ordering, failure: ::core::sync::atomic::Ordering) -> ::core::result::Result<$new_type_name, $new_type_name> {
match self.container.compare_exchange(current.into(), new.into(), success, failure) {
Ok(result) => Ok($new_type_name::from(result)),
Err(result) => Err($new_type_name::from(result))
}
}
#[allow(dead_code)]
pub fn compare_exchange_weak(&self, current: $new_type_name, new: $new_type_name, success: ::core::sync::atomic::Ordering, failure: ::core::sync::atomic::Ordering) -> ::core::result::Result<$new_type_name, $new_type_name> {
match self.container.compare_exchange_weak(current.into(), new.into(), success, failure) {
Ok(result) => Ok($new_type_name::from(result)),
Err(result) => Err($new_type_name::from(result))
}
}
}
}
}
#[cfg(test)]
fn test() {
use core::mem::size_of;
use ::core::sync::atomic::AtomicUsize;
// Generate type `usize_like`.
int_like!(UsizeLike, usize);
const ZERO: UsizeLike = UsizeLike::from(0);
assert_eq!(size_of::<UsizeLike>(), size_of::<usize>());
// Generate types `usize_like` and `AtomicUsize`.
int_like!(UsizeLike2, AtomicUsizeLike, usize, AtomicUsize);
assert_eq!(size_of::<UsizeLike2>(), size_of::<usize>());
assert_eq!(size_of::<AtomicUsizeLike>(), size_of::<AtomicUsize>());
}

View file

@ -1,2 +0,0 @@
#[macro_use]
pub mod int_like;

View file

@ -1,252 +0,0 @@
use alloc::arc::Arc;
use alloc::boxed::Box;
use collections::{BTreeMap, Vec, VecDeque};
use spin::Mutex;
use arch;
use context::file::File;
use context::memory::{Grant, Memory, SharedMemory, Tls};
use scheme::{SchemeNamespace, FileHandle};
use syscall::data::Event;
use sync::{WaitMap, WaitQueue};
/// Unique identifier for a context (i.e. `pid`).
use ::core::sync::atomic::AtomicUsize;
int_like!(ContextId, AtomicContextId, usize, AtomicUsize);
/// The status of a context - used for scheduling
/// See syscall::process::waitpid and the sync module for examples of usage
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Status {
Runnable,
Blocked,
Exited(usize)
}
/// A context, which identifies either a process or a thread
#[derive(Debug)]
pub struct Context {
/// The ID of this context
pub id: ContextId,
/// The ID of the parent context
pub ppid: ContextId,
/// The real user id
pub ruid: u32,
/// The real group id
pub rgid: u32,
/// The real namespace id
pub rns: SchemeNamespace,
/// The effective user id
pub euid: u32,
/// The effective group id
pub egid: u32,
/// The effective namespace id
pub ens: SchemeNamespace,
/// Status of context
pub status: Status,
/// Context running or not
pub running: bool,
/// CPU ID, if locked
pub cpu_id: Option<usize>,
/// Context is halting parent
pub vfork: bool,
/// Context is being waited on
pub waitpid: Arc<WaitMap<ContextId, usize>>,
/// Context should handle pending signals
pub pending: VecDeque<u8>,
/// Context should wake up at specified time
pub wake: Option<(u64, u64)>,
/// The architecture specific context
pub arch: arch::context::Context,
/// Kernel FX - used to store SIMD and FPU registers on context switch
pub kfx: Option<Box<[u8]>>,
/// Kernel stack
pub kstack: Option<Box<[u8]>>,
/// Executable image
pub image: Vec<SharedMemory>,
/// User heap
pub heap: Option<SharedMemory>,
/// User stack
pub stack: Option<Memory>,
/// User Thread local storage
pub tls: Option<Tls>,
/// User grants
pub grants: Arc<Mutex<Vec<Grant>>>,
/// The name of the context
pub name: Arc<Mutex<Vec<u8>>>,
/// The current working directory
pub cwd: Arc<Mutex<Vec<u8>>>,
/// Kernel events
pub events: Arc<WaitQueue<Event>>,
/// The process environment
pub env: Arc<Mutex<BTreeMap<Box<[u8]>, Arc<Mutex<Vec<u8>>>>>>,
/// The open files in the scheme
pub files: Arc<Mutex<Vec<Option<File>>>>
}
impl Context {
pub fn new(id: ContextId) -> Context {
Context {
id: id,
ppid: ContextId::from(0),
ruid: 0,
rgid: 0,
rns: SchemeNamespace::from(0),
euid: 0,
egid: 0,
ens: SchemeNamespace::from(0),
status: Status::Blocked,
running: false,
cpu_id: None,
vfork: false,
waitpid: Arc::new(WaitMap::new()),
pending: VecDeque::new(),
wake: None,
arch: arch::context::Context::new(),
kfx: None,
kstack: None,
image: Vec::new(),
heap: None,
stack: None,
tls: None,
grants: Arc::new(Mutex::new(Vec::new())),
name: Arc::new(Mutex::new(Vec::new())),
cwd: Arc::new(Mutex::new(Vec::new())),
events: Arc::new(WaitQueue::new()),
env: Arc::new(Mutex::new(BTreeMap::new())),
files: Arc::new(Mutex::new(Vec::new()))
}
}
/// Make a relative path absolute
/// Given a cwd of "scheme:/path"
/// This function will turn "foo" into "scheme:/path/foo"
/// "/foo" will turn into "scheme:/foo"
/// "bar:/foo" will be used directly, as it is already absolute
pub fn canonicalize(&self, path: &[u8]) -> Vec<u8> {
if path.iter().position(|&b| b == b':').is_none() {
let cwd = self.cwd.lock();
if path == b"." {
cwd.clone()
} else if path == b".." {
cwd[..cwd[..cwd.len() - 1]
.iter().rposition(|&b| b == b'/' || b == b':')
.map_or(cwd.len(), |i| i + 1)]
.to_vec()
} else if path.starts_with(b"./") {
let mut canon = cwd.clone();
if ! canon.ends_with(b"/") {
canon.push(b'/');
}
canon.extend_from_slice(&path[2..]);
canon
} else if path.starts_with(b"../") {
let mut canon = cwd[..cwd[..cwd.len() - 1]
.iter().rposition(|&b| b == b'/' || b == b':')
.map_or(cwd.len(), |i| i + 1)]
.to_vec();
canon.extend_from_slice(&path[3..]);
canon
} else if path.starts_with(b"/") {
let mut canon = cwd[..cwd.iter().position(|&b| b == b':').map_or(1, |i| i + 1)].to_vec();
canon.extend_from_slice(&path);
canon
} else {
let mut canon = cwd.clone();
if ! canon.ends_with(b"/") {
canon.push(b'/');
}
canon.extend_from_slice(&path);
canon
}
} else {
path.to_vec()
}
}
/// Block the context, and return true if it was runnable before being blocked
pub fn block(&mut self) -> bool {
if self.status == Status::Runnable {
self.status = Status::Blocked;
true
} else {
false
}
}
/// Unblock context, and return true if it was blocked before being marked runnable
pub fn unblock(&mut self) -> bool {
if self.status == Status::Blocked {
self.status = Status::Runnable;
if let Some(cpu_id) = self.cpu_id {
if cpu_id != ::cpu_id() {
// Send IPI if not on current CPU
// TODO: Make this more architecture independent
unsafe { arch::device::local_apic::LOCAL_APIC.ipi(cpu_id) };
}
}
true
} else {
false
}
}
/// Add a file to the lowest available slot.
/// Return the file descriptor number or None if no slot was found
pub fn add_file(&self, file: File) -> Option<FileHandle> {
let mut files = self.files.lock();
for (i, mut file_option) in files.iter_mut().enumerate() {
if file_option.is_none() {
*file_option = Some(file);
return Some(FileHandle::from(i));
}
}
let len = files.len();
if len < super::CONTEXT_MAX_FILES {
files.push(Some(file));
Some(FileHandle::from(len))
} else {
None
}
}
/// Get a file
pub fn get_file(&self, i: FileHandle) -> Option<File> {
let files = self.files.lock();
if i.into() < files.len() {
files[i.into()]
} else {
None
}
}
/// Insert a file with a specific handle number. This is used by dup2
/// Return the file descriptor number or None if the slot was not empty, or i was invalid
pub fn insert_file(&self, i: FileHandle, file: File) -> Option<FileHandle> {
let mut files = self.files.lock();
if i.into() < super::CONTEXT_MAX_FILES {
while i.into() >= files.len() {
files.push(None);
}
if files[i.into()].is_none() {
files[i.into()] = Some(file);
Some(i)
} else {
None
}
} else {
None
}
}
/// Remove a file
// TODO: adjust files vector to smaller size if possible
pub fn remove_file(&self, i: FileHandle) -> Option<File> {
let mut files = self.files.lock();
if i.into() < files.len() {
files[i.into()].take()
} else {
None
}
}
}

View file

@ -1,112 +0,0 @@
use alloc::arc::{Arc, Weak};
use collections::BTreeMap;
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use context;
use scheme::{FileHandle, SchemeId};
use sync::WaitQueue;
use syscall::data::Event;
type EventList = Weak<WaitQueue<Event>>;
#[derive(PartialEq, Eq, PartialOrd, Ord)]
pub struct RegKey {
scheme_id: SchemeId,
event_id: usize,
}
#[derive(PartialEq, Eq, PartialOrd, Ord)]
pub struct ProcessKey {
context_id: context::context::ContextId,
fd: FileHandle,
}
type Registry = BTreeMap<RegKey, BTreeMap<ProcessKey, EventList>>;
static REGISTRY: Once<RwLock<Registry>> = Once::new();
/// Initialize registry, called if needed
fn init_registry() -> RwLock<Registry> {
RwLock::new(Registry::new())
}
/// Get the global schemes list, const
fn registry() -> RwLockReadGuard<'static, Registry> {
REGISTRY.call_once(init_registry).read()
}
/// Get the global schemes list, mutable
pub fn registry_mut() -> RwLockWriteGuard<'static, Registry> {
REGISTRY.call_once(init_registry).write()
}
pub fn register(fd: FileHandle, scheme_id: SchemeId, event_id: usize) -> bool {
let (context_id, events) = {
let contexts = context::contexts();
let context_lock = contexts.current().expect("event::register: No context");
let context = context_lock.read();
(context.id, Arc::downgrade(&context.events))
};
let mut registry = registry_mut();
let entry = registry.entry(RegKey {
scheme_id: scheme_id,
event_id: event_id
}).or_insert_with(|| {
BTreeMap::new()
});
let process_key = ProcessKey {
context_id: context_id,
fd: fd
};
if entry.contains_key(&process_key) {
false
} else {
entry.insert(process_key, events);
true
}
}
pub fn unregister(fd: FileHandle, scheme_id: SchemeId, event_id: usize) {
let mut registry = registry_mut();
let mut remove = false;
let key = RegKey {
scheme_id: scheme_id,
event_id: event_id
};
if let Some(entry) = registry.get_mut(&key) {
let process_key = ProcessKey {
context_id: context::context_id(),
fd: fd,
};
entry.remove(&process_key);
if entry.is_empty() {
remove = true;
}
}
if remove {
registry.remove(&key);
}
}
pub fn trigger(scheme_id: SchemeId, event_id: usize, flags: usize, data: usize) {
let registry = registry();
let key = RegKey {
scheme_id: scheme_id,
event_id: event_id
};
if let Some(event_lists) = registry.get(&key) {
for entry in event_lists.iter() {
if let Some(event_list) = entry.1.upgrade() {
event_list.send(Event {
id: (entry.0).fd.into(),
flags: flags,
data: data
});
}
}
}
}

View file

@ -1,15 +0,0 @@
//! File struct
use scheme::SchemeId;
/// A file
//TODO: Close on exec
#[derive(Copy, Clone, Debug)]
pub struct File {
/// The scheme that this file refers to
pub scheme: SchemeId,
/// The number the scheme uses to refer to this file
pub number: usize,
/// If events are on, this is the event ID
pub event: Option<usize>,
}

View file

@ -1,91 +0,0 @@
use alloc::arc::Arc;
use alloc::boxed::Box;
use collections::BTreeMap;
use core::mem;
use core::sync::atomic::Ordering;
use spin::RwLock;
use arch;
use syscall::error::{Result, Error, EAGAIN};
use super::context::{Context, ContextId};
/// Context list type
pub struct ContextList {
map: BTreeMap<ContextId, Arc<RwLock<Context>>>,
next_id: usize
}
impl ContextList {
/// Create a new context list.
pub fn new() -> Self {
ContextList {
map: BTreeMap::new(),
next_id: 1
}
}
/// Get the nth context.
pub fn get(&self, id: ContextId) -> Option<&Arc<RwLock<Context>>> {
self.map.get(&id)
}
/// Get the current context.
pub fn current(&self) -> Option<&Arc<RwLock<Context>>> {
self.map.get(&super::CONTEXT_ID.load(Ordering::SeqCst))
}
pub fn iter(&self) -> ::collections::btree_map::Iter<ContextId, Arc<RwLock<Context>>> {
self.map.iter()
}
/// Create a new context.
pub fn new_context(&mut self) -> Result<&Arc<RwLock<Context>>> {
if self.next_id >= super::CONTEXT_MAX_CONTEXTS {
self.next_id = 1;
}
while self.map.contains_key(&ContextId::from(self.next_id)) {
self.next_id += 1;
}
if self.next_id >= super::CONTEXT_MAX_CONTEXTS {
return Err(Error::new(EAGAIN));
}
let id = ContextId::from(self.next_id);
self.next_id += 1;
assert!(self.map.insert(id, Arc::new(RwLock::new(Context::new(id)))).is_none());
Ok(self.map.get(&id).expect("Failed to insert new context. ID is out of bounds."))
}
/// Spawn a context from a function.
pub fn spawn(&mut self, func: extern fn()) -> Result<&Arc<RwLock<Context>>> {
let context_lock = self.new_context()?;
{
let mut context = context_lock.write();
let mut fx = unsafe { Box::from_raw(::alloc::heap::allocate(512, 16) as *mut [u8; 512]) };
for b in fx.iter_mut() {
*b = 0;
}
let mut stack = vec![0; 65536].into_boxed_slice();
let offset = stack.len() - mem::size_of::<usize>();
unsafe {
let offset = stack.len() - mem::size_of::<usize>();
let func_ptr = stack.as_mut_ptr().offset(offset as isize);
*(func_ptr as *mut usize) = func as usize;
}
context.arch.set_page_table(unsafe { arch::paging::ActivePageTable::new().address() });
context.arch.set_fx(fx.as_ptr() as usize);
context.arch.set_stack(stack.as_ptr() as usize + offset);
context.kfx = Some(fx);
context.kstack = Some(stack);
}
Ok(context_lock)
}
pub fn remove(&mut self, id: ContextId) -> Option<Arc<RwLock<Context>>> {
self.map.remove(&id)
}
}

View file

@ -1,327 +0,0 @@
use alloc::arc::{Arc, Weak};
use collections::VecDeque;
use core::intrinsics;
use spin::Mutex;
use arch::memory::Frame;
use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, PhysicalAddress, VirtualAddress};
use arch::paging::entry::{self, EntryFlags};
use arch::paging::mapper::MapperFlushAll;
use arch::paging::temporary_page::TemporaryPage;
#[derive(Debug)]
pub struct Grant {
start: VirtualAddress,
size: usize,
flags: EntryFlags,
mapped: bool
}
impl Grant {
pub fn physmap(from: PhysicalAddress, to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant {
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.get() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get() - to.get() + from.get()));
let result = active_table.map_to(page, frame, flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
Grant {
start: to,
size: size,
flags: flags,
mapped: true
}
}
pub fn map_inactive(from: VirtualAddress, to: VirtualAddress, size: usize, flags: EntryFlags, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) -> Grant {
let mut active_table = unsafe { ActivePageTable::new() };
let mut frames = VecDeque::new();
let start_page = Page::containing_address(from);
let end_page = Page::containing_address(VirtualAddress::new(from.get() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = active_table.translate_page(page).expect("grant references unmapped memory");
frames.push_back(frame);
}
active_table.with(new_table, temporary_page, |mapper| {
let start_page = Page::containing_address(to);
let end_page = Page::containing_address(VirtualAddress::new(to.get() + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = frames.pop_front().expect("grant did not find enough frames");
let result = mapper.map_to(page, frame, flags);
// Ignore result due to mapping on inactive table
unsafe { result.ignore(); }
}
});
Grant {
start: to,
size: size,
flags: flags,
mapped: true
}
}
pub fn start_address(&self) -> VirtualAddress {
self.start
}
pub fn size(&self) -> usize {
self.size
}
pub fn flags(&self) -> EntryFlags {
self.flags
}
pub fn unmap(mut self) {
assert!(self.mapped);
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let (result, _frame) = active_table.unmap_return(page);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
self.mapped = false;
}
pub fn unmap_inactive(mut self, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
assert!(self.mapped);
let mut active_table = unsafe { ActivePageTable::new() };
active_table.with(new_table, temporary_page, |mapper| {
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
let (result, _frame) = mapper.unmap_return(page);
// This is not the active table, so the flush can be ignored
unsafe { result.ignore(); }
}
});
self.mapped = false;
}
}
impl Drop for Grant {
fn drop(&mut self) {
assert!(!self.mapped);
}
}
#[derive(Clone, Debug)]
pub enum SharedMemory {
Owned(Arc<Mutex<Memory>>),
Borrowed(Weak<Mutex<Memory>>)
}
impl SharedMemory {
pub fn with<F, T>(&self, f: F) -> T where F: FnOnce(&mut Memory) -> T {
match *self {
SharedMemory::Owned(ref memory_lock) => {
let mut memory = memory_lock.lock();
f(&mut *memory)
},
SharedMemory::Borrowed(ref memory_weak) => {
let memory_lock = memory_weak.upgrade().expect("SharedMemory::Borrowed no longer valid");
let mut memory = memory_lock.lock();
f(&mut *memory)
}
}
}
pub fn borrow(&self) -> SharedMemory {
match *self {
SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)),
SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone())
}
}
}
#[derive(Debug)]
pub struct Memory {
start: VirtualAddress,
size: usize,
flags: EntryFlags
}
impl Memory {
pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, clear: bool) -> Self {
let mut memory = Memory {
start: start,
size: size,
flags: flags
};
memory.map(clear);
memory
}
pub fn to_shared(self) -> SharedMemory {
SharedMemory::Owned(Arc::new(Mutex::new(self)))
}
pub fn start_address(&self) -> VirtualAddress {
self.start
}
pub fn size(&self) -> usize {
self.size
}
pub fn flags(&self) -> EntryFlags {
self.flags
}
pub fn pages(&self) -> PageIter {
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
Page::range_inclusive(start_page, end_page)
}
fn map(&mut self, clear: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = MapperFlushAll::new();
for page in self.pages() {
let result = active_table.map(page, self.flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
if clear {
assert!(self.flags.contains(entry::WRITABLE));
unsafe {
intrinsics::write_bytes(self.start_address().get() as *mut u8, 0, self.size);
}
}
}
fn unmap(&mut self) {
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = MapperFlushAll::new();
for page in self.pages() {
let result = active_table.unmap(page);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
}
/// A complicated operation to move a piece of memory to a new page table
/// It also allows for changing the address at the same time
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = MapperFlushAll::new();
for page in self.pages() {
let (result, frame) = active_table.unmap_return(page);
flush_all.consume(result);
active_table.with(new_table, temporary_page, |mapper| {
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.start.get() + new_start.get()));
let result = mapper.map_to(new_page, frame, self.flags);
// This is not the active table, so the flush can be ignored
unsafe { result.ignore(); }
});
}
flush_all.flush(&mut active_table);
self.start = new_start;
}
pub fn remap(&mut self, new_flags: EntryFlags) {
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = MapperFlushAll::new();
for page in self.pages() {
let result = active_table.remap(page, new_flags);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
self.flags = new_flags;
}
pub fn resize(&mut self, new_size: usize, clear: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
//TODO: Calculate page changes to minimize operations
if new_size > self.size {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + new_size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_none() {
let result = active_table.map(page, self.flags);
flush_all.consume(result);
}
}
flush_all.flush(&mut active_table);
if clear {
unsafe {
intrinsics::write_bytes((self.start.get() + self.size) as *mut u8, 0, new_size - self.size);
}
}
} else if new_size < self.size {
let mut flush_all = MapperFlushAll::new();
let start_page = Page::containing_address(VirtualAddress::new(self.start.get() + new_size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_some() {
let result = active_table.unmap(page);
flush_all.consume(result);
}
}
flush_all.flush(&mut active_table);
}
self.size = new_size;
}
}
impl Drop for Memory {
fn drop(&mut self) {
self.unmap();
}
}
#[derive(Debug)]
pub struct Tls {
pub master: VirtualAddress,
pub file_size: usize,
pub mem: Memory
}

View file

@ -1,75 +0,0 @@
//! Context management
use alloc::boxed::Box;
use core::sync::atomic::Ordering;
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
pub use self::context::{Context, Status};
pub use self::list::ContextList;
pub use self::switch::switch;
pub use context::context::ContextId;
/// Context struct
mod context;
/// Context list
mod list;
/// Context switch function
mod switch;
/// Event handling
pub mod event;
/// File struct - defines a scheme and a file number
pub mod file;
/// Memory struct - contains a set of pages for a context
pub mod memory;
/// Limit on number of contexts
pub const CONTEXT_MAX_CONTEXTS: usize = usize::max_value() - 1;
/// Maximum context files
pub const CONTEXT_MAX_FILES: usize = 65536;
/// Contexts list
static CONTEXTS: Once<RwLock<ContextList>> = Once::new();
#[thread_local]
static CONTEXT_ID: context::AtomicContextId = context::AtomicContextId::default();
pub fn init() {
let mut contexts = contexts_mut();
let context_lock = contexts.new_context().expect("could not initialize first context");
let mut context = context_lock.write();
let mut fx = unsafe { Box::from_raw(::alloc::heap::allocate(512, 16) as *mut [u8; 512]) };
for b in fx.iter_mut() {
*b = 0;
}
context.arch.set_fx(fx.as_ptr() as usize);
context.kfx = Some(fx);
context.status = Status::Runnable;
context.running = true;
context.cpu_id = Some(::cpu_id());
CONTEXT_ID.store(context.id, Ordering::SeqCst);
}
/// Initialize contexts, called if needed
fn init_contexts() -> RwLock<ContextList> {
RwLock::new(ContextList::new())
}
/// Get the global schemes list, const
pub fn contexts() -> RwLockReadGuard<'static, ContextList> {
CONTEXTS.call_once(init_contexts).read()
}
/// Get the global schemes list, mutable
pub fn contexts_mut() -> RwLockWriteGuard<'static, ContextList> {
CONTEXTS.call_once(init_contexts).write()
}
pub fn context_id() -> context::ContextId {
CONTEXT_ID.load(Ordering::SeqCst)
}

View file

@ -1,115 +0,0 @@
use core::sync::atomic::Ordering;
use arch;
use context::{contexts, Context, Status, CONTEXT_ID};
use syscall;
/// Switch to the next context
///
/// # Safety
///
/// Do not call this while holding locks!
pub unsafe fn switch() -> bool {
use core::ops::DerefMut;
// Set the global lock to avoid the unsafe operations below from causing issues
while arch::context::CONTEXT_SWITCH_LOCK.compare_and_swap(false, true, Ordering::SeqCst) {
arch::interrupt::pause();
}
let cpu_id = ::cpu_id();
let from_ptr;
let mut to_ptr = 0 as *mut Context;
let mut to_sig = None;
{
let contexts = contexts();
{
let context_lock = contexts.current().expect("context::switch: not inside of context");
let mut context = context_lock.write();
from_ptr = context.deref_mut() as *mut Context;
}
let check_context = |context: &mut Context| -> bool {
if context.cpu_id == None && cpu_id == 0 {
context.cpu_id = Some(cpu_id);
// println!("{}: take {} {}", cpu_id, context.id, ::core::str::from_utf8_unchecked(&context.name.lock()));
}
if context.status == Status::Blocked && ! context.pending.is_empty() {
context.unblock();
}
if context.status == Status::Blocked && context.wake.is_some() {
let wake = context.wake.expect("context::switch: wake not set");
let current = arch::time::monotonic();
if current.0 > wake.0 || (current.0 == wake.0 && current.1 >= wake.1) {
context.unblock();
}
}
if context.cpu_id == Some(cpu_id) {
if context.status == Status::Runnable && ! context.running {
return true;
}
}
false
};
for (pid, context_lock) in contexts.iter() {
if *pid > (*from_ptr).id {
let mut context = context_lock.write();
if check_context(&mut context) {
to_ptr = context.deref_mut() as *mut Context;
to_sig = context.pending.pop_front();
break;
}
}
}
if to_ptr as usize == 0 {
for (pid, context_lock) in contexts.iter() {
if *pid < (*from_ptr).id {
let mut context = context_lock.write();
if check_context(&mut context) {
to_ptr = context.deref_mut() as *mut Context;
to_sig = context.pending.pop_front();
break;
}
}
}
}
};
if to_ptr as usize == 0 {
// Unset global lock if no context found
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
return false;
}
(&mut *from_ptr).running = false;
(&mut *to_ptr).running = true;
if let Some(ref stack) = (*to_ptr).kstack {
arch::gdt::TSS.rsp[0] = (stack.as_ptr() as usize + stack.len() - 256) as u64;
}
CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst);
// Unset global lock before switch, as arch is only usable by the current CPU at this time
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
if let Some(sig) = to_sig {
println!("Handle {}", sig);
(&mut *to_ptr).arch.signal_stack(signal_handler, sig);
}
(&mut *from_ptr).arch.switch_to(&mut (&mut *to_ptr).arch);
true
}
extern fn signal_handler(signal: usize) {
println!("Signal handler: {}", signal);
syscall::exit(signal);
}

View file

@ -1,73 +0,0 @@
//! ELF executables
use collections::String;
use core::str;
#[cfg(target_arch = "x86")]
pub use goblin::elf32::{header, program_header};
#[cfg(target_arch = "x86_64")]
pub use goblin::elf64::{header, program_header};
/// An ELF executable
pub struct Elf<'a> {
pub data: &'a [u8],
header: &'a header::Header
}
impl<'a> Elf<'a> {
/// Create a ELF executable from data
pub fn from(data: &'a [u8]) -> Result<Elf<'a>, String> {
if data.len() < header::SIZEOF_EHDR {
Err(format!("Elf: Not enough data: {} < {}", data.len(), header::SIZEOF_EHDR))
} else if &data[..header::SELFMAG] != header::ELFMAG {
Err(format!("Elf: Invalid magic: {:?} != {:?}", &data[..4], header::ELFMAG))
} else if data.get(header::EI_CLASS) != Some(&header::ELFCLASS) {
Err(format!("Elf: Invalid architecture: {:?} != {:?}", data.get(header::EI_CLASS), header::ELFCLASS))
} else {
Ok(Elf {
data: data,
header: unsafe { &*(data.as_ptr() as usize as *const header::Header) }
})
}
}
pub fn segments(&'a self) -> ElfSegments<'a> {
ElfSegments {
data: self.data,
header: self.header,
i: 0
}
}
/// Get the entry field of the header
pub fn entry(&self) -> usize {
self.header.e_entry as usize
}
}
pub struct ElfSegments<'a> {
data: &'a [u8],
header: &'a header::Header,
i: usize
}
impl<'a> Iterator for ElfSegments<'a> {
type Item = &'a program_header::ProgramHeader;
fn next(&mut self) -> Option<Self::Item> {
if self.i < self.header.e_phnum as usize {
let item = unsafe {
&* ((
self.data.as_ptr() as usize
+ self.header.e_phoff as usize
+ self.i * self.header.e_phentsize as usize
) as *const program_header::ProgramHeader)
};
self.i += 1;
Some(item)
} else {
None
}
}
}

View file

@ -1,180 +0,0 @@
//! # The Redox OS Kernel, version 2
//!
//! The Redox OS Kernel is a hybrid kernel that supports X86_64 systems and
//! provides Unix-like syscalls for primarily Rust applications
#![deny(warnings)]
#![feature(alloc)]
#![feature(asm)]
#![feature(collections)]
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(drop_types_in_const)]
#![feature(heap_api)]
#![feature(integer_atomics)]
#![feature(never_type)]
#![feature(thread_local)]
#![no_std]
use arch::interrupt;
/// Architecture specific items (test)
#[cfg(test)]
#[macro_use]
extern crate arch_test as arch;
/// Architecture specific items (ARM)
#[cfg(all(not(test), target_arch = "arm"))]
#[macro_use]
extern crate arch_arm as arch;
/// Architecture specific items (x86_64)
#[cfg(all(not(test), target_arch = "x86_64"))]
#[macro_use]
extern crate arch_x86_64 as arch;
extern crate alloc;
#[macro_use]
extern crate collections;
#[macro_use]
extern crate bitflags;
extern crate goblin;
extern crate spin;
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use scheme::FileHandle;
#[macro_use]
/// Shared data structures
pub mod common;
/// Context management
pub mod context;
/// ELF file parsing
pub mod elf;
/// Schemes, filesystem handlers
pub mod scheme;
/// Synchronization primitives
pub mod sync;
/// Syscall handlers
pub mod syscall;
/// Tests
#[cfg(test)]
pub mod tests;
/// A unique number that identifies the current CPU - used for scheduling
#[thread_local]
static CPU_ID: AtomicUsize = ATOMIC_USIZE_INIT;
/// Get the current CPU's scheduling ID
#[inline(always)]
pub fn cpu_id() -> usize {
CPU_ID.load(Ordering::Relaxed)
}
/// The count of all CPUs that can have work scheduled
static CPU_COUNT : AtomicUsize = ATOMIC_USIZE_INIT;
/// Get the number of CPUs currently active
#[inline(always)]
pub fn cpu_count() -> usize {
CPU_COUNT.load(Ordering::Relaxed)
}
/// Initialize userspace by running the initfs:bin/init process
/// This function will also set the CWD to initfs:bin and open debug: as stdio
pub extern fn userspace_init() {
assert_eq!(syscall::chdir(b"initfs:"), Ok(0));
assert_eq!(syscall::open(b"debug:", syscall::flag::O_RDONLY).map(FileHandle::into), Ok(0));
assert_eq!(syscall::open(b"debug:", syscall::flag::O_WRONLY).map(FileHandle::into), Ok(1));
assert_eq!(syscall::open(b"debug:", syscall::flag::O_WRONLY).map(FileHandle::into), Ok(2));
syscall::exec(b"/bin/init", &[]).expect("failed to execute init");
panic!("init returned");
}
/// Allow exception handlers to send signal to arch-independant kernel
#[no_mangle]
pub extern fn ksignal(signal: usize) {
println!("SIGNAL {}, CPU {}, PID {:?}", signal, cpu_id(), context::context_id());
{
let contexts = context::contexts();
if let Some(context_lock) = contexts.current() {
let context = context_lock.read();
println!("NAME {}", unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) });
}
}
syscall::exit(signal & 0x7F);
}
/// This is the kernel entry point for the primary CPU. The arch crate is responsible for calling this
#[no_mangle]
pub extern fn kmain(cpus: usize) {
CPU_ID.store(0, Ordering::SeqCst);
CPU_COUNT.store(cpus, Ordering::SeqCst);
context::init();
let pid = syscall::getpid();
println!("BSP: {:?} {}", pid, cpus);
match context::contexts_mut().spawn(userspace_init) {
Ok(context_lock) => {
let mut context = context_lock.write();
context.status = context::Status::Runnable;
},
Err(err) => {
panic!("failed to spawn userspace_init: {:?}", err);
}
}
loop {
unsafe {
interrupt::disable();
if context::switch() {
interrupt::enable_and_nop();
} else {
// Enable interrupts, then halt CPU (to save power) until the next interrupt is actually fired.
interrupt::enable_and_halt();
}
}
}
}
/// This is the main kernel entry point for secondary CPUs
#[no_mangle]
pub extern fn kmain_ap(_id: usize) {
// Disable APs for now
loop {
unsafe { interrupt::enable_and_halt(); }
}
/*
CPU_ID.store(id, Ordering::SeqCst);
context::init();
let pid = syscall::getpid();
println!("AP {}: {:?}", id, pid);
loop {
unsafe {
interrupt::disable();
if context::switch() {
interrupt::enable_and_nop();
} else {
// Enable interrupts, then halt CPU (to save power) until the next interrupt is actually fired.
interrupt::enable_and_halt();
}
}
}
*/
}

View file

@ -1,75 +0,0 @@
use core::str;
use core::sync::atomic::Ordering;
use spin::Once;
use context;
use scheme::*;
use sync::WaitQueue;
use syscall::flag::EVENT_READ;
use syscall::scheme::Scheme;
pub static DEBUG_SCHEME_ID: AtomicSchemeId = ATOMIC_SCHEMEID_INIT;
/// Input queue
static INPUT: Once<WaitQueue<u8>> = Once::new();
/// Initialize input queue, called if needed
fn init_input() -> WaitQueue<u8> {
WaitQueue::new()
}
/// Add to the input queue
#[no_mangle]
pub extern fn debug_input(b: u8) {
let len = INPUT.call_once(init_input).send(b);
context::event::trigger(DEBUG_SCHEME_ID.load(Ordering::SeqCst), 0, EVENT_READ, len);
}
pub struct DebugScheme;
impl DebugScheme {
pub fn new(scheme_id: SchemeId) -> DebugScheme {
DEBUG_SCHEME_ID.store(scheme_id, Ordering::SeqCst);
DebugScheme
}
}
impl Scheme for DebugScheme {
fn open(&self, _path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
Ok(0)
}
fn dup(&self, _file: usize, _buf: &[u8]) -> Result<usize> {
Ok(0)
}
/// Read the file `number` into the `buffer`
///
/// Returns the number of bytes read
fn read(&self, _file: usize, buf: &mut [u8]) -> Result<usize> {
Ok(INPUT.call_once(init_input).receive_into(buf, true))
}
/// Write the `buffer` to the `file`
///
/// Returns the number of bytes written
fn write(&self, _file: usize, buffer: &[u8]) -> Result<usize> {
//TODO: Write bytes, do not convert to str
print!("{}", unsafe { str::from_utf8_unchecked(buffer) });
Ok(buffer.len())
}
fn fevent(&self, _file: usize, _flags: usize) -> Result<usize> {
Ok(0)
}
fn fsync(&self, _file: usize) -> Result<usize> {
Ok(0)
}
/// Close the file `number`
fn close(&self, _file: usize) -> Result<usize> {
Ok(0)
}
}

View file

@ -1,193 +0,0 @@
use alloc::arc::Arc;
use collections::{BTreeMap, Vec};
use core::{cmp, str};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::{Mutex, RwLock};
use context;
use syscall::data::Stat;
use syscall::error::*;
use syscall::flag::{MODE_FILE, SEEK_SET, SEEK_CUR, SEEK_END};
use syscall::scheme::Scheme;
#[derive(Clone)]
struct Handle {
data: Arc<Mutex<Vec<u8>>>,
mode: u16,
seek: usize
}
pub struct EnvScheme {
next_id: AtomicUsize,
handles: RwLock<BTreeMap<usize, Handle>>
}
impl EnvScheme {
pub fn new() -> EnvScheme {
EnvScheme {
next_id: AtomicUsize::new(0),
handles: RwLock::new(BTreeMap::new())
}
}
}
impl Scheme for EnvScheme {
fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let path = str::from_utf8(path).map_err(|_err| Error::new(ENOENT))?.trim_matches('/');
let env_lock = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
context.env.clone()
};
if path.is_empty() {
let mut list = Vec::new();
{
let env = env_lock.lock();
for entry in env.iter() {
if ! list.is_empty() {
list.push(b'\n');
}
list.extend_from_slice(&entry.0);
list.push(b'=');
list.extend_from_slice(&entry.1.lock());
}
}
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
data: Arc::new(Mutex::new(list)),
mode: MODE_FILE,
seek: 0
});
Ok(id)
} else {
let data = {
let mut env = env_lock.lock();
if env.contains_key(path.as_bytes()) {
env[path.as_bytes()].clone()
} else /*if flags & O_CREAT == O_CREAT*/ {
let name = path.as_bytes().to_vec().into_boxed_slice();
let data = Arc::new(Mutex::new(Vec::new()));
env.insert(name, data.clone());
data
}
};
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
data: data,
mode: MODE_FILE,
seek: 0
});
Ok(id)
}
}
fn dup(&self, id: usize, _buf: &[u8]) -> Result<usize> {
let new_handle = {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
handle.clone()
};
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, new_handle);
Ok(id)
}
fn read(&self, id: usize, buffer: &mut [u8]) -> Result<usize> {
let mut handles = self.handles.write();
let mut handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let data = handle.data.lock();
let mut i = 0;
while i < buffer.len() && handle.seek < data.len() {
buffer[i] = data[handle.seek];
i += 1;
handle.seek += 1;
}
Ok(i)
}
fn write(&self, id: usize, buffer: &[u8]) -> Result<usize> {
let mut handles = self.handles.write();
let mut handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let mut data = handle.data.lock();
let mut i = 0;
while i < buffer.len() && handle.seek < data.len() {
data[handle.seek] = buffer[i];
i += 1;
handle.seek += 1;
}
while i < buffer.len() {
data.push(buffer[i]);
i += 1;
handle.seek += 1;
}
Ok(i)
}
fn seek(&self, id: usize, pos: usize, whence: usize) -> Result<usize> {
let mut handles = self.handles.write();
let mut handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let len = handle.data.lock().len();
handle.seek = match whence {
SEEK_SET => cmp::min(len, pos),
SEEK_CUR => cmp::max(0, cmp::min(len as isize, handle.seek as isize + pos as isize)) as usize,
SEEK_END => cmp::max(0, cmp::min(len as isize, len as isize + pos as isize)) as usize,
_ => return Err(Error::new(EINVAL))
};
Ok(handle.seek)
}
fn fstat(&self, id: usize, stat: &mut Stat) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
stat.st_mode = handle.mode;
stat.st_size = handle.data.lock().len() as u64;
Ok(0)
}
fn fsync(&self, id: usize) -> Result<usize> {
let handles = self.handles.read();
let _handle = handles.get(&id).ok_or(Error::new(EBADF))?;
Ok(0)
}
fn ftruncate(&self, id: usize, len: usize) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
let mut data = handle.data.lock();
if len < data.len() {
data.truncate(len)
} else {
while len > data.len() {
data.push(0);
}
}
Ok(0)
}
fn close(&self, id: usize) -> Result<usize> {
self.handles.write().remove(&id).ok_or(Error::new(EBADF)).and(Ok(0))
}
}

View file

@ -1,74 +0,0 @@
use alloc::arc::{Arc, Weak};
use collections::BTreeMap;
use core::{mem, slice};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock;
use context;
use sync::WaitQueue;
use syscall::data::Event;
use syscall::error::*;
use syscall::scheme::Scheme;
pub struct EventScheme {
next_id: AtomicUsize,
handles: RwLock<BTreeMap<usize, Weak<WaitQueue<Event>>>>
}
impl EventScheme {
pub fn new() -> EventScheme {
EventScheme {
next_id: AtomicUsize::new(0),
handles: RwLock::new(BTreeMap::new())
}
}
}
impl Scheme for EventScheme {
fn open(&self, _path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let handle = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
context.events.clone()
};
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Arc::downgrade(&handle));
Ok(id)
}
fn dup(&self, id: usize, _buf: &[u8]) -> Result<usize> {
let handle = {
let handles = self.handles.read();
let handle_weak = handles.get(&id).ok_or(Error::new(EBADF))?;
handle_weak.upgrade().ok_or(Error::new(EBADF))?
};
let new_id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(new_id, Arc::downgrade(&handle));
Ok(new_id)
}
fn read(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let handle = {
let handles = self.handles.read();
let handle_weak = handles.get(&id).ok_or(Error::new(EBADF))?;
handle_weak.upgrade().ok_or(Error::new(EBADF))?
};
let event_buf = unsafe { slice::from_raw_parts_mut(buf.as_mut_ptr() as *mut Event, buf.len()/mem::size_of::<Event>()) };
Ok(handle.receive_into(event_buf, true) * mem::size_of::<Event>())
}
fn fsync(&self, id: usize) -> Result<usize> {
let handles = self.handles.read();
let handle_weak = handles.get(&id).ok_or(Error::new(EBADF))?;
handle_weak.upgrade().ok_or(Error::new(EBADF)).and(Ok(0))
}
fn close(&self, id: usize) -> Result<usize> {
self.handles.write().remove(&id).ok_or(Error::new(EBADF)).and(Ok(0))
}
}

View file

@ -1,154 +0,0 @@
use collections::BTreeMap;
use core::{cmp, str};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock;
use syscall::data::Stat;
use syscall::error::*;
use syscall::flag::{MODE_DIR, MODE_FILE, SEEK_SET, SEEK_CUR, SEEK_END};
use syscall::scheme::Scheme;
#[cfg(test)]
mod gen {
use collections::BTreeMap;
pub fn gen() -> BTreeMap<&'static [u8], (&'static [u8], bool)> { BTreeMap::new() }
}
#[cfg(not(test))]
#[path="../../../build/userspace/initfs.rs"]
mod gen;
struct Handle {
path: &'static [u8],
data: &'static [u8],
mode: u16,
seek: usize
}
pub struct InitFsScheme {
next_id: AtomicUsize,
files: BTreeMap<&'static [u8], (&'static [u8], bool)>,
handles: RwLock<BTreeMap<usize, Handle>>
}
impl InitFsScheme {
pub fn new() -> InitFsScheme {
InitFsScheme {
next_id: AtomicUsize::new(0),
files: gen::gen(),
handles: RwLock::new(BTreeMap::new())
}
}
}
impl Scheme for InitFsScheme {
fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let path_utf8 = str::from_utf8(path).map_err(|_err| Error::new(ENOENT))?;
let path_trimmed = path_utf8.trim_matches('/');
//Have to iterate to get the path without allocation
for entry in self.files.iter() {
if entry.0 == &path_trimmed.as_bytes() {
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: entry.0,
data: (entry.1).0,
mode: if (entry.1).1 { MODE_DIR | 0o755 } else { MODE_FILE | 0o744 },
seek: 0
});
return Ok(id);
}
}
Err(Error::new(ENOENT))
}
fn dup(&self, id: usize, _buf: &[u8]) -> Result<usize> {
let (path, data, mode, seek) = {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
(handle.path, handle.data, handle.mode, handle.seek)
};
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: path,
data: data,
mode: mode,
seek: seek
});
Ok(id)
}
fn read(&self, id: usize, buffer: &mut [u8]) -> Result<usize> {
let mut handles = self.handles.write();
let mut handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let mut i = 0;
while i < buffer.len() && handle.seek < handle.data.len() {
buffer[i] = handle.data[handle.seek];
i += 1;
handle.seek += 1;
}
Ok(i)
}
fn seek(&self, id: usize, pos: usize, whence: usize) -> Result<usize> {
let mut handles = self.handles.write();
let mut handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
handle.seek = match whence {
SEEK_SET => cmp::min(handle.data.len(), pos),
SEEK_CUR => cmp::max(0, cmp::min(handle.data.len() as isize, handle.seek as isize + pos as isize)) as usize,
SEEK_END => cmp::max(0, cmp::min(handle.data.len() as isize, handle.data.len() as isize + pos as isize)) as usize,
_ => return Err(Error::new(EINVAL))
};
Ok(handle.seek)
}
fn fpath(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
//TODO: Copy scheme part in kernel
let mut i = 0;
let scheme_path = b"initfs:";
while i < buf.len() && i < scheme_path.len() {
buf[i] = scheme_path[i];
i += 1;
}
let mut j = 0;
while i < buf.len() && j < handle.path.len() {
buf[i] = handle.path[j];
i += 1;
j += 1;
}
Ok(i)
}
fn fstat(&self, id: usize, stat: &mut Stat) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
stat.st_mode = handle.mode;
stat.st_uid = 0;
stat.st_gid = 0;
stat.st_size = handle.data.len() as u64;
Ok(0)
}
fn fsync(&self, _id: usize) -> Result<usize> {
Ok(0)
}
fn close(&self, id: usize) -> Result<usize> {
self.handles.write().remove(&id).ok_or(Error::new(EBADF)).and(Ok(0))
}
}

View file

@ -1,101 +0,0 @@
use core::{mem, str};
use core::sync::atomic::Ordering;
use spin::Mutex;
use arch::interrupt::irq::acknowledge;
use context;
use scheme::{AtomicSchemeId, ATOMIC_SCHEMEID_INIT, SchemeId};
use syscall::error::*;
use syscall::flag::EVENT_READ;
use syscall::scheme::Scheme;
pub static IRQ_SCHEME_ID: AtomicSchemeId = ATOMIC_SCHEMEID_INIT;
/// IRQ queues
static ACKS: Mutex<[usize; 16]> = Mutex::new([0; 16]);
static COUNTS: Mutex<[usize; 16]> = Mutex::new([0; 16]);
/// Add to the input queue
#[no_mangle]
pub extern fn irq_trigger(irq: u8) {
COUNTS.lock()[irq as usize] += 1;
context::event::trigger(IRQ_SCHEME_ID.load(Ordering::SeqCst), irq as usize, EVENT_READ, mem::size_of::<usize>());
}
pub struct IrqScheme;
impl IrqScheme {
pub fn new(scheme_id: SchemeId) -> IrqScheme {
IRQ_SCHEME_ID.store(scheme_id, Ordering::SeqCst);
IrqScheme
}
}
impl Scheme for IrqScheme {
fn open(&self, path: &[u8], _flags: usize, uid: u32, _gid: u32) -> Result<usize> {
if uid == 0 {
let path_str = str::from_utf8(path).or(Err(Error::new(ENOENT)))?;
let id = path_str.parse::<usize>().or(Err(Error::new(ENOENT)))?;
if id < COUNTS.lock().len() {
Ok(id)
} else {
Err(Error::new(ENOENT))
}
} else {
Err(Error::new(EACCES))
}
}
fn dup(&self, file: usize, _buf: &[u8]) -> Result<usize> {
Ok(file)
}
fn read(&self, file: usize, buffer: &mut [u8]) -> Result<usize> {
// Ensures that the length of the buffer is larger than the size of a usize
if buffer.len() >= mem::size_of::<usize>() {
let ack = ACKS.lock()[file];
let current = COUNTS.lock()[file];
if ack != current {
// Safe if the length of the buffer is larger than the size of a usize
assert!(buffer.len() >= mem::size_of::<usize>());
unsafe { *(buffer.as_mut_ptr() as *mut usize) = current; }
Ok(mem::size_of::<usize>())
} else {
Ok(0)
}
} else {
Err(Error::new(EINVAL))
}
}
fn write(&self, file: usize, buffer: &[u8]) -> Result<usize> {
if buffer.len() >= mem::size_of::<usize>() {
assert!(buffer.len() >= mem::size_of::<usize>());
let ack = unsafe { *(buffer.as_ptr() as *const usize) };
let current = COUNTS.lock()[file];
if ack == current {
ACKS.lock()[file] = ack;
unsafe { acknowledge(file); }
Ok(mem::size_of::<usize>())
} else {
Ok(0)
}
} else {
Err(Error::new(EINVAL))
}
}
fn fevent(&self, file: usize, _flags: usize) -> Result<usize> {
Ok(file)
}
fn fsync(&self, _file: usize) -> Result<usize> {
Ok(0)
}
fn close(&self, _file: usize) -> Result<usize> {
Ok(0)
}
}

View file

@ -1,160 +0,0 @@
/// Disk scheme replacement when making live disk
use alloc::arc::Arc;
use collections::{BTreeMap, Vec};
use core::cmp;
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock;
use syscall::data::Stat;
use syscall::error::*;
use syscall::flag::{MODE_FILE, SEEK_SET, SEEK_CUR, SEEK_END};
use syscall::scheme::Scheme;
static FILESYSTEM: &'static [u8] = include_bytes!("../../../build/filesystem.bin");
struct Handle {
path: &'static [u8],
data: Arc<RwLock<Vec<u8>>>,
mode: u16,
seek: usize
}
pub struct DiskScheme {
next_id: AtomicUsize,
data: Arc<RwLock<Vec<u8>>>,
handles: RwLock<BTreeMap<usize, Handle>>
}
impl DiskScheme {
pub fn new() -> DiskScheme {
DiskScheme {
next_id: AtomicUsize::new(0),
data: Arc::new(RwLock::new(FILESYSTEM.to_vec())),
handles: RwLock::new(BTreeMap::new())
}
}
}
impl Scheme for DiskScheme {
fn open(&self, _path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: b"0",
data: self.data.clone(),
mode: MODE_FILE | 0o744,
seek: 0
});
Ok(id)
}
fn dup(&self, id: usize, _buf: &[u8]) -> Result<usize> {
let (path, data, mode, seek) = {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
(handle.path, handle.data.clone(), handle.mode, handle.seek)
};
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: path,
data: data,
mode: mode,
seek: seek
});
Ok(id)
}
fn read(&self, id: usize, buffer: &mut [u8]) -> Result<usize> {
let mut handles = self.handles.write();
let mut handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let data = handle.data.read();
let mut i = 0;
while i < buffer.len() && handle.seek < data.len() {
buffer[i] = data[handle.seek];
i += 1;
handle.seek += 1;
}
Ok(i)
}
fn write(&self, id: usize, buffer: &[u8]) -> Result<usize> {
let mut handles = self.handles.write();
let mut handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let mut data = handle.data.write();
let mut i = 0;
while i < buffer.len() && handle.seek < data.len() {
data[handle.seek] = buffer[i];
i += 1;
handle.seek += 1;
}
Ok(i)
}
fn seek(&self, id: usize, pos: usize, whence: usize) -> Result<usize> {
let mut handles = self.handles.write();
let mut handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let data = handle.data.read();
handle.seek = match whence {
SEEK_SET => cmp::min(data.len(), pos),
SEEK_CUR => cmp::max(0, cmp::min(data.len() as isize, handle.seek as isize + pos as isize)) as usize,
SEEK_END => cmp::max(0, cmp::min(data.len() as isize, data.len() as isize + pos as isize)) as usize,
_ => return Err(Error::new(EINVAL))
};
Ok(handle.seek)
}
fn fpath(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
//TODO: Copy scheme part in kernel
let mut i = 0;
let scheme_path = b"disk:";
while i < buf.len() && i < scheme_path.len() {
buf[i] = scheme_path[i];
i += 1;
}
let mut j = 0;
while i < buf.len() && j < handle.path.len() {
buf[i] = handle.path[j];
i += 1;
j += 1;
}
Ok(i)
}
fn fstat(&self, id: usize, stat: &mut Stat) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
let data = handle.data.read();
stat.st_mode = handle.mode;
stat.st_uid = 0;
stat.st_gid = 0;
stat.st_size = data.len() as u64;
Ok(0)
}
fn fsync(&self, id: usize) -> Result<usize> {
let handles = self.handles.read();
let _handle = handles.get(&id).ok_or(Error::new(EBADF))?;
Ok(0)
}
fn close(&self, id: usize) -> Result<usize> {
self.handles.write().remove(&id).ok_or(Error::new(EBADF)).and(Ok(0))
}
}

View file

@ -1,30 +0,0 @@
use arch::memory::{free_frames, used_frames};
use syscall::data::StatVfs;
use syscall::error::*;
use syscall::scheme::Scheme;
pub struct MemoryScheme;
impl Scheme for MemoryScheme {
fn open(&self, _path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
Ok(0)
}
fn fstatvfs(&self, _file: usize, stat: &mut StatVfs) -> Result<usize> {
let used = used_frames() as u64;
let free = free_frames() as u64;
stat.f_bsize = 4096;
stat.f_blocks = used + free;
stat.f_bfree = free;
stat.f_bavail = stat.f_bfree;
Ok(0)
}
/// Close the file `number`
fn close(&self, _file: usize) -> Result<usize> {
Ok(0)
}
}

View file

@ -1,246 +0,0 @@
//! # Schemes
//! A scheme is a primitive for handling filesystem syscalls in Redox.
//! Schemes accept paths from the kernel for `open`, and file descriptors that they generate
//! are then passed for operations like `close`, `read`, `write`, etc.
//!
//! The kernel validates paths and file descriptors before they are passed to schemes,
//! also stripping the scheme identifier of paths if necessary.
use alloc::arc::Arc;
use alloc::boxed::Box;
use collections::BTreeMap;
use core::sync::atomic::AtomicUsize;
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use syscall::error::*;
use syscall::scheme::Scheme;
use self::debug::DebugScheme;
use self::event::EventScheme;
use self::env::EnvScheme;
use self::initfs::InitFsScheme;
use self::irq::IrqScheme;
use self::memory::MemoryScheme;
use self::null::NullScheme;
use self::pipe::PipeScheme;
use self::root::RootScheme;
use self::sys::SysScheme;
use self::zero::ZeroScheme;
/// `debug:` - provides access to serial console
pub mod debug;
/// `event:` - allows reading of `Event`s which are registered using `fevent`
pub mod event;
/// `env:` - access and modify environmental variables
pub mod env;
/// `initfs:` - a readonly filesystem used for initializing the system
pub mod initfs;
/// `irq:` - allows userspace handling of IRQs
pub mod irq;
/// When compiled with "live" feature - `disk:` - embedded filesystem for live disk
#[cfg(feature="live")]
pub mod live;
/// `memory:` - a scheme for accessing physical memory
pub mod memory;
/// `null:` - a scheme that will discard all writes, and read no bytes
pub mod null;
/// `pipe:` - used internally by the kernel to implement `pipe`
pub mod pipe;
/// `:` - allows the creation of userspace schemes, tightly dependent on `user`
pub mod root;
/// `sys:` - system information, such as the context list and scheme list
pub mod sys;
/// A wrapper around userspace schemes, tightly dependent on `root`
pub mod user;
/// `zero:` - a scheme that will discard all writes, and always fill read buffers with zero
pub mod zero;
/// Limit on number of schemes
pub const SCHEME_MAX_SCHEMES: usize = 65536;
/// Unique identifier for a scheme namespace.
int_like!(SchemeNamespace, AtomicSchemeNamespace, usize, AtomicUsize);
/// Unique identifier for a scheme.
int_like!(SchemeId, AtomicSchemeId, usize, AtomicUsize);
pub const ATOMIC_SCHEMEID_INIT: AtomicSchemeId = AtomicSchemeId::default();
/// Unique identifier for a file descriptor.
int_like!(FileHandle, AtomicFileHandle, usize, AtomicUsize);
/// Scheme list type
pub struct SchemeList {
map: BTreeMap<SchemeId, Arc<Box<Scheme + Send + Sync>>>,
names: BTreeMap<SchemeNamespace, BTreeMap<Box<[u8]>, SchemeId>>,
next_ns: usize,
next_id: usize
}
impl SchemeList {
/// Create a new scheme list.
pub fn new() -> Self {
let mut list = SchemeList {
map: BTreeMap::new(),
names: BTreeMap::new(),
next_ns: 0,
next_id: 1
};
list.new_root();
list
}
/// Initialize a new namespace
fn new_ns(&mut self) -> SchemeNamespace {
let ns = SchemeNamespace(self.next_ns);
self.next_ns += 1;
self.names.insert(ns, BTreeMap::new());
self.insert(ns, Box::new(*b""), |scheme_id| Arc::new(Box::new(RootScheme::new(ns, scheme_id)))).unwrap();
self.insert(ns, Box::new(*b"event"), |_| Arc::new(Box::new(EventScheme::new()))).unwrap();
self.insert(ns, Box::new(*b"env"), |_| Arc::new(Box::new(EnvScheme::new()))).unwrap();
self.insert(ns, Box::new(*b"null"), |_| Arc::new(Box::new(NullScheme))).unwrap();
self.insert(ns, Box::new(*b"sys"), |_| Arc::new(Box::new(SysScheme::new()))).unwrap();
self.insert(ns, Box::new(*b"zero"), |_| Arc::new(Box::new(ZeroScheme))).unwrap();
ns
}
/// Initialize the root namespace
#[cfg(not(feature="live"))]
fn new_root(&mut self) {
// Do common namespace initialization
let ns = self.new_ns();
// Debug, Initfs and IRQ are only available in the root namespace. Pipe is special
self.insert(ns, Box::new(*b"debug"), |scheme_id| Arc::new(Box::new(DebugScheme::new(scheme_id)))).unwrap();
self.insert(ns, Box::new(*b"initfs"), |_| Arc::new(Box::new(InitFsScheme::new()))).unwrap();
self.insert(ns, Box::new(*b"irq"), |scheme_id| Arc::new(Box::new(IrqScheme::new(scheme_id)))).unwrap();
self.insert(ns, Box::new(*b"memory"), |_| Arc::new(Box::new(MemoryScheme))).unwrap();
self.insert(ns, Box::new(*b"pipe"), |scheme_id| Arc::new(Box::new(PipeScheme::new(scheme_id)))).unwrap();
}
/// Initialize the root namespace - with live disk
#[cfg(feature="live")]
fn new_root(&mut self) {
// Do common namespace initialization
let ns = self.new_ns();
// Debug, Disk, Initfs and IRQ are only available in the root namespace. Pipe is special
self.insert(ns, Box::new(*b"debug"), |scheme_id| Arc::new(Box::new(DebugScheme::new(scheme_id)))).unwrap();
self.insert(ns, Box::new(*b"disk"), |_| Arc::new(Box::new(self::live::DiskScheme::new()))).unwrap();
self.insert(ns, Box::new(*b"initfs"), |_| Arc::new(Box::new(InitFsScheme::new()))).unwrap();
self.insert(ns, Box::new(*b"irq"), |scheme_id| Arc::new(Box::new(IrqScheme::new(scheme_id)))).unwrap();
self.insert(ns, Box::new(*b"memory"), |_| Arc::new(Box::new(MemoryScheme))).unwrap();
self.insert(ns, Box::new(*b"pipe"), |scheme_id| Arc::new(Box::new(PipeScheme::new(scheme_id)))).unwrap();
}
pub fn make_ns(&mut self, from: SchemeNamespace, names: &[&[u8]]) -> Result<SchemeNamespace> {
// Create an empty namespace
let to = self.new_ns();
// Copy requested scheme IDs
for name in names.iter() {
let id = if let Some((id, _scheme)) = self.get_name(from, name) {
id
} else {
return Err(Error::new(ENODEV));
};
if let Some(ref mut names) = self.names.get_mut(&to) {
assert!(names.insert(name.to_vec().into_boxed_slice(), id).is_none());
} else {
panic!("scheme namespace not found");
}
}
Ok(to)
}
pub fn iter(&self) -> ::collections::btree_map::Iter<SchemeId, Arc<Box<Scheme + Send + Sync>>> {
self.map.iter()
}
pub fn iter_name(&self, ns: SchemeNamespace) -> ::collections::btree_map::Iter<Box<[u8]>, SchemeId> {
self.names[&ns].iter()
}
/// Get the nth scheme.
pub fn get(&self, id: SchemeId) -> Option<&Arc<Box<Scheme + Send + Sync>>> {
self.map.get(&id)
}
pub fn get_name(&self, ns: SchemeNamespace, name: &[u8]) -> Option<(SchemeId, &Arc<Box<Scheme + Send + Sync>>)> {
if let Some(&id) = self.names[&ns].get(name) {
self.get(id).map(|scheme| (id, scheme))
} else {
None
}
}
/// Create a new scheme.
pub fn insert<F>(&mut self, ns: SchemeNamespace, name: Box<[u8]>, scheme_fn: F) -> Result<SchemeId>
where F: Fn(SchemeId) -> Arc<Box<Scheme + Send + Sync>>
{
if self.names[&ns].contains_key(&name) {
return Err(Error::new(EEXIST));
}
if self.next_id >= SCHEME_MAX_SCHEMES {
self.next_id = 1;
}
while self.map.contains_key(&SchemeId(self.next_id)) {
self.next_id += 1;
}
/* Allow scheme list to grow if required
if self.next_id >= SCHEME_MAX_SCHEMES {
return Err(Error::new(EAGAIN));
}
*/
let id = SchemeId(self.next_id);
self.next_id += 1;
let scheme = scheme_fn(id);
assert!(self.map.insert(id, scheme).is_none());
if let Some(ref mut names) = self.names.get_mut(&ns) {
assert!(names.insert(name, id).is_none());
} else {
panic!("scheme namespace not found");
}
Ok(id)
}
}
/// Schemes list
static SCHEMES: Once<RwLock<SchemeList>> = Once::new();
/// Initialize schemes, called if needed
fn init_schemes() -> RwLock<SchemeList> {
RwLock::new(SchemeList::new())
}
/// Get the global schemes list, const
pub fn schemes() -> RwLockReadGuard<'static, SchemeList> {
SCHEMES.call_once(init_schemes).read()
}
/// Get the global schemes list, mutable
pub fn schemes_mut() -> RwLockWriteGuard<'static, SchemeList> {
SCHEMES.call_once(init_schemes).write()
}

View file

@ -1,37 +0,0 @@
use syscall::error::*;
use syscall::scheme::Scheme;
pub struct NullScheme;
impl Scheme for NullScheme {
fn open(&self, _path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
Ok(0)
}
fn dup(&self, _file: usize, _buf: &[u8]) -> Result<usize> {
Ok(0)
}
/// Read the file `number` into the `buffer`
///
/// Returns the number of bytes read
fn read(&self, _file: usize, _buf: &mut [u8]) -> Result<usize> {
Ok(0)
}
/// Write the `buffer` to the `file`
///
/// Returns the number of bytes written
fn write(&self, _file: usize, buffer: &[u8]) -> Result<usize> {
Ok(buffer.len())
}
fn fsync(&self, _file: usize) -> Result<usize> {
Ok(0)
}
/// Close the file `number`
fn close(&self, _file: usize) -> Result<usize> {
Ok(0)
}
}

View file

@ -1,272 +0,0 @@
use alloc::arc::{Arc, Weak};
use collections::{BTreeMap, VecDeque};
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use spin::{Mutex, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use scheme::{AtomicSchemeId, ATOMIC_SCHEMEID_INIT, SchemeId};
use sync::WaitCondition;
use syscall::error::{Error, Result, EAGAIN, EBADF, EINVAL, EPIPE};
use syscall::flag::{F_GETFL, F_SETFL, O_ACCMODE, O_CLOEXEC, O_NONBLOCK};
use syscall::scheme::Scheme;
/// Pipes list
pub static PIPE_SCHEME_ID: AtomicSchemeId = ATOMIC_SCHEMEID_INIT;
static PIPE_NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
static PIPES: Once<RwLock<(BTreeMap<usize, Arc<PipeRead>>, BTreeMap<usize, Arc<PipeWrite>>)>> = Once::new();
/// Initialize pipes, called if needed
fn init_pipes() -> RwLock<(BTreeMap<usize, Arc<PipeRead>>, BTreeMap<usize, Arc<PipeWrite>>)> {
RwLock::new((BTreeMap::new(), BTreeMap::new()))
}
/// Get the global pipes list, const
fn pipes() -> RwLockReadGuard<'static, (BTreeMap<usize, Arc<PipeRead>>, BTreeMap<usize, Arc<PipeWrite>>)> {
PIPES.call_once(init_pipes).read()
}
/// Get the global schemes list, mutable
fn pipes_mut() -> RwLockWriteGuard<'static, (BTreeMap<usize, Arc<PipeRead>>, BTreeMap<usize, Arc<PipeWrite>>)> {
PIPES.call_once(init_pipes).write()
}
pub fn pipe(flags: usize) -> (usize, usize) {
let mut pipes = pipes_mut();
let read_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
let write_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
let read = PipeRead::new(flags);
let write = PipeWrite::new(flags, &read);
pipes.0.insert(read_id, Arc::new(read));
pipes.1.insert(write_id, Arc::new(write));
(read_id, write_id)
}
pub struct PipeScheme;
impl PipeScheme {
pub fn new(scheme_id: SchemeId) -> PipeScheme {
PIPE_SCHEME_ID.store(scheme_id, Ordering::SeqCst);
PipeScheme
}
}
impl Scheme for PipeScheme {
fn dup(&self, id: usize, buf: &[u8]) -> Result<usize> {
let mut pipes = pipes_mut();
let read_option = if let Some(pipe) = pipes.0.get(&id) {
Some(pipe.dup(buf)?)
} else {
None
};
if let Some(pipe) = read_option {
let pipe_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
pipes.0.insert(pipe_id, Arc::new(pipe));
return Ok(pipe_id);
}
let write_option = if let Some(pipe) = pipes.1.get(&id) {
Some(pipe.dup(buf)?)
} else {
None
};
if let Some(pipe) = write_option {
let pipe_id = PIPE_NEXT_ID.fetch_add(1, Ordering::SeqCst);
pipes.1.insert(pipe_id, Arc::new(pipe));
return Ok(pipe_id);
}
Err(Error::new(EBADF))
}
fn read(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
// Clone to prevent deadlocks
let pipe = {
let pipes = pipes();
pipes.0.get(&id).map(|pipe| pipe.clone()).ok_or(Error::new(EBADF))?
};
pipe.read(buf)
}
fn write(&self, id: usize, buf: &[u8]) -> Result<usize> {
// Clone to prevent deadlocks
let pipe = {
let pipes = pipes();
pipes.1.get(&id).map(|pipe| pipe.clone()).ok_or(Error::new(EBADF))?
};
pipe.write(buf)
}
fn fcntl(&self, id: usize, cmd: usize, arg: usize) -> Result<usize> {
let pipes = pipes();
if let Some(pipe) = pipes.0.get(&id) {
return pipe.fcntl(cmd, arg);
}
if let Some(pipe) = pipes.1.get(&id) {
return pipe.fcntl(cmd, arg);
}
Err(Error::new(EBADF))
}
fn fsync(&self, _id: usize) -> Result<usize> {
Ok(0)
}
fn close(&self, id: usize) -> Result<usize> {
let mut pipes = pipes_mut();
drop(pipes.0.remove(&id));
drop(pipes.1.remove(&id));
Ok(0)
}
}
/// Read side of a pipe
pub struct PipeRead {
flags: AtomicUsize,
condition: Arc<WaitCondition>,
vec: Arc<Mutex<VecDeque<u8>>>
}
impl PipeRead {
pub fn new(flags: usize) -> Self {
PipeRead {
flags: AtomicUsize::new(flags),
condition: Arc::new(WaitCondition::new()),
vec: Arc::new(Mutex::new(VecDeque::new())),
}
}
fn dup(&self, buf: &[u8]) -> Result<Self> {
if buf == b"exec" && self.flags.load(Ordering::SeqCst) & O_CLOEXEC == O_CLOEXEC {
Err(Error::new(EBADF))
} else {
let mut flags = self.flags.load(Ordering::SeqCst);
if buf.is_empty() {
flags &= ! O_CLOEXEC;
}
Ok(PipeRead {
flags: AtomicUsize::new(flags),
condition: self.condition.clone(),
vec: self.vec.clone()
})
}
}
fn fcntl(&self, cmd: usize, arg: usize) -> Result<usize> {
match cmd {
F_GETFL => Ok(self.flags.load(Ordering::SeqCst)),
F_SETFL => {
self.flags.store(arg & ! O_ACCMODE, Ordering::SeqCst);
Ok(0)
},
_ => Err(Error::new(EINVAL))
}
}
fn read(&self, buf: &mut [u8]) -> Result<usize> {
loop {
{
let mut vec = self.vec.lock();
let mut i = 0;
while i < buf.len() {
if let Some(b) = vec.pop_front() {
buf[i] = b;
i += 1;
} else {
break;
}
}
if i > 0 {
return Ok(i);
}
}
if Arc::weak_count(&self.vec) == 0 {
return Ok(0);
} else if self.flags.load(Ordering::SeqCst) & O_NONBLOCK == O_NONBLOCK {
return Err(Error::new(EAGAIN));
} else {
self.condition.wait();
}
}
}
}
/// Read side of a pipe
pub struct PipeWrite {
flags: AtomicUsize,
condition: Arc<WaitCondition>,
vec: Option<Weak<Mutex<VecDeque<u8>>>>
}
impl PipeWrite {
pub fn new(flags: usize, read: &PipeRead) -> Self {
PipeWrite {
flags: AtomicUsize::new(flags),
condition: read.condition.clone(),
vec: Some(Arc::downgrade(&read.vec)),
}
}
fn dup(&self, buf: &[u8]) -> Result<Self> {
if buf == b"exec" && self.flags.load(Ordering::SeqCst) & O_CLOEXEC == O_CLOEXEC {
Err(Error::new(EBADF))
} else {
let mut flags = self.flags.load(Ordering::SeqCst);
if buf.is_empty() {
flags &= ! O_CLOEXEC;
}
Ok(PipeWrite {
flags: AtomicUsize::new(flags),
condition: self.condition.clone(),
vec: self.vec.clone()
})
}
}
fn fcntl(&self, cmd: usize, arg: usize) -> Result<usize> {
match cmd {
F_GETFL => Ok(self.flags.load(Ordering::SeqCst)),
F_SETFL => {
self.flags.store(arg & ! O_ACCMODE, Ordering::SeqCst);
Ok(0)
},
_ => Err(Error::new(EINVAL))
}
}
fn write(&self, buf: &[u8]) -> Result<usize> {
if let Some(ref vec_weak) = self.vec {
if let Some(vec_lock) = vec_weak.upgrade() {
let mut vec = vec_lock.lock();
for &b in buf.iter() {
vec.push_back(b);
}
self.condition.notify();
Ok(buf.len())
} else {
Err(Error::new(EPIPE))
}
} else {
panic!("PipeWrite dropped before write");
}
}
}
impl Drop for PipeWrite {
fn drop(&mut self) {
drop(self.vec.take());
self.condition.notify();
}
}

View file

@ -1,116 +0,0 @@
use alloc::arc::Arc;
use alloc::boxed::Box;
use collections::BTreeMap;
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock;
use context;
use syscall::error::*;
use syscall::scheme::Scheme;
use scheme::{self, SchemeNamespace, SchemeId};
use scheme::user::{UserInner, UserScheme};
pub struct RootScheme {
scheme_ns: SchemeNamespace,
scheme_id: SchemeId,
next_id: AtomicUsize,
handles: RwLock<BTreeMap<usize, Arc<UserInner>>>
}
impl RootScheme {
pub fn new(scheme_ns: SchemeNamespace, scheme_id: SchemeId) -> RootScheme {
RootScheme {
scheme_ns: scheme_ns,
scheme_id: scheme_id,
next_id: AtomicUsize::new(0),
handles: RwLock::new(BTreeMap::new())
}
}
}
impl Scheme for RootScheme {
fn open(&self, path: &[u8], flags: usize, uid: u32, _gid: u32) -> Result<usize> {
if uid == 0 {
let context = {
let contexts = context::contexts();
let context = contexts.current().ok_or(Error::new(ESRCH))?;
Arc::downgrade(&context)
};
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
let inner = {
let mut schemes = scheme::schemes_mut();
let inner = Arc::new(UserInner::new(self.scheme_id, id, flags, context));
schemes.insert(self.scheme_ns, path.to_vec().into_boxed_slice(), |scheme_id| {
inner.scheme_id.store(scheme_id, Ordering::SeqCst);
Arc::new(Box::new(UserScheme::new(Arc::downgrade(&inner))))
})?;
inner
};
self.handles.write().insert(id, inner);
Ok(id)
} else {
Err(Error::new(EACCES))
}
}
fn dup(&self, file: usize, _buf: &[u8]) -> Result<usize> {
let mut handles = self.handles.write();
let inner = {
let inner = handles.get(&file).ok_or(Error::new(EBADF))?;
inner.clone()
};
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
handles.insert(id, inner);
Ok(id)
}
fn read(&self, file: usize, buf: &mut [u8]) -> Result<usize> {
let inner = {
let handles = self.handles.read();
let inner = handles.get(&file).ok_or(Error::new(EBADF))?;
inner.clone()
};
inner.read(buf)
}
fn write(&self, file: usize, buf: &[u8]) -> Result<usize> {
let inner = {
let handles = self.handles.read();
let inner = handles.get(&file).ok_or(Error::new(EBADF))?;
inner.clone()
};
inner.write(buf)
}
fn fevent(&self, file: usize, flags: usize) -> Result<usize> {
let inner = {
let handles = self.handles.read();
let inner = handles.get(&file).ok_or(Error::new(EBADF))?;
inner.clone()
};
inner.fevent(flags)
}
fn fsync(&self, file: usize) -> Result<usize> {
let inner = {
let handles = self.handles.read();
let inner = handles.get(&file).ok_or(Error::new(EBADF))?;
inner.clone()
};
inner.fsync()
}
fn close(&self, file: usize) -> Result<usize> {
self.handles.write().remove(&file).ok_or(Error::new(EBADF)).and(Ok(0))
}
}

View file

@ -1,106 +0,0 @@
use collections::{String, Vec};
use core::str;
use context;
use syscall::error::Result;
pub fn resource() -> Result<Vec<u8>> {
let mut string = format!("{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<8}{}\n",
"PID",
"PPID",
"RUID",
"RGID",
"RNS",
"EUID",
"EGID",
"ENS",
"STAT",
"CPU",
"MEM",
"NAME");
{
let contexts = context::contexts();
for (_id, context_lock) in contexts.iter() {
let context = context_lock.read();
let mut stat_string = String::new();
if context.stack.is_some() {
stat_string.push('U');
} else {
stat_string.push('K');
}
match context.status {
context::Status::Runnable => {
stat_string.push('R');
},
context::Status::Blocked => if context.wake.is_some() {
stat_string.push('S');
} else {
stat_string.push('B');
},
context::Status::Exited(_status) => {
stat_string.push('Z');
}
}
if context.running {
stat_string.push('+');
}
let cpu_string = if let Some(cpu_id) = context.cpu_id {
format!("{}", cpu_id)
} else {
format!("?")
};
let mut memory = 0;
if let Some(ref kfx) = context.kstack {
memory += kfx.len();
}
if let Some(ref kstack) = context.kstack {
memory += kstack.len();
}
for shared_mem in context.image.iter() {
shared_mem.with(|mem| {
memory += mem.size();
});
}
if let Some(ref heap) = context.heap {
heap.with(|heap| {
memory += heap.size();
});
}
if let Some(ref stack) = context.stack {
memory += stack.size();
}
let memory_string = if memory >= 1024 * 1024 * 1024 {
format!("{} GB", memory / 1024 / 1024 / 1024)
} else if memory >= 1024 * 1024 {
format!("{} MB", memory / 1024 / 1024)
} else if memory >= 1024 {
format!("{} KB", memory / 1024)
} else {
format!("{} B", memory)
};
let name_bytes = context.name.lock();
let name = str::from_utf8(&name_bytes).unwrap_or("");
string.push_str(&format!("{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<6}{:<8}{}\n",
context.id.into(),
context.ppid.into(),
context.ruid,
context.rgid,
context.rns.into(),
context.euid,
context.egid,
context.ens.into(),
stat_string,
cpu_string,
memory_string,
name));
}
}
Ok(string.into_bytes())
}

View file

@ -1,13 +0,0 @@
use collections::Vec;
use arch::device::cpu::cpu_info;
use syscall::error::{Error, EIO, Result};
pub fn resource() -> Result<Vec<u8>> {
let mut string = format!("CPUs: {}\n", ::cpu_count());
match cpu_info(&mut string) {
Ok(()) => Ok(string.into_bytes()),
Err(_) => Err(Error::new(EIO))
}
}

View file

@ -1,16 +0,0 @@
use collections::Vec;
use context;
use syscall::error::{Error, ESRCH, Result};
pub fn resource() -> Result<Vec<u8>> {
let mut name = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let name = context.name.lock();
name.clone()
};
name.push(b'\n');
Ok(name)
}

View file

@ -1,183 +0,0 @@
use alloc::boxed::Box;
use collections::{BTreeMap, Vec};
use core::{cmp, str};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock;
use syscall::data::Stat;
use syscall::error::{Error, EBADF, EINVAL, ENOENT, Result};
use syscall::flag::{MODE_DIR, MODE_FILE, SEEK_CUR, SEEK_END, SEEK_SET};
use syscall::scheme::Scheme;
mod context;
mod cpu;
mod exe;
mod scheme;
//mod interrupt;
//mod log;
//mod test;
struct Handle {
path: &'static [u8],
data: Vec<u8>,
mode: u16,
seek: usize
}
type SysFn = Fn() -> Result<Vec<u8>> + Send + Sync;
/// System information scheme
pub struct SysScheme {
next_id: AtomicUsize,
files: BTreeMap<&'static [u8], Box<SysFn>>,
handles: RwLock<BTreeMap<usize, Handle>>
}
impl SysScheme {
pub fn new() -> SysScheme {
let mut files: BTreeMap<&'static [u8], Box<SysFn>> = BTreeMap::new();
files.insert(b"context", Box::new(move || context::resource()));
files.insert(b"cpu", Box::new(move || cpu::resource()));
files.insert(b"exe", Box::new(move || exe::resource()));
files.insert(b"scheme", Box::new(move || scheme::resource()));
//files.insert(b"interrupt", Box::new(move || interrupt::resource()));
//files.insert(b"log", Box::new(move || log::resource()));
//files.insert(b"test", Box::new(move || test::resource()));
SysScheme {
next_id: AtomicUsize::new(0),
files: files,
handles: RwLock::new(BTreeMap::new())
}
}
}
impl Scheme for SysScheme {
fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let path_utf8 = str::from_utf8(path).map_err(|_err| Error::new(ENOENT))?;
let path_trimmed = path_utf8.trim_matches('/');
if path_trimmed.is_empty() {
let mut data = Vec::new();
for entry in self.files.iter() {
if ! data.is_empty() {
data.push(b'\n');
}
data.extend_from_slice(entry.0);
}
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: b"",
data: data,
mode: MODE_DIR | 0o444,
seek: 0
});
return Ok(id)
} else {
//Have to iterate to get the path without allocation
for entry in self.files.iter() {
if entry.0 == &path_trimmed.as_bytes() {
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: entry.0,
data: entry.1()?,
mode: MODE_FILE | 0o444,
seek: 0
});
return Ok(id)
}
}
}
Err(Error::new(ENOENT))
}
fn dup(&self, id: usize, _buf: &[u8]) -> Result<usize> {
let (path, data, mode, seek) = {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
(handle.path, handle.data.clone(), handle.mode, handle.seek)
};
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: path,
data: data,
mode: mode,
seek: seek
});
Ok(id)
}
fn read(&self, id: usize, buffer: &mut [u8]) -> Result<usize> {
let mut handles = self.handles.write();
let mut handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let mut i = 0;
while i < buffer.len() && handle.seek < handle.data.len() {
buffer[i] = handle.data[handle.seek];
i += 1;
handle.seek += 1;
}
Ok(i)
}
fn seek(&self, id: usize, pos: usize, whence: usize) -> Result<usize> {
let mut handles = self.handles.write();
let mut handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
handle.seek = match whence {
SEEK_SET => cmp::min(handle.data.len(), pos),
SEEK_CUR => cmp::max(0, cmp::min(handle.data.len() as isize, handle.seek as isize + pos as isize)) as usize,
SEEK_END => cmp::max(0, cmp::min(handle.data.len() as isize, handle.data.len() as isize + pos as isize)) as usize,
_ => return Err(Error::new(EINVAL))
};
Ok(handle.seek)
}
fn fpath(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
let mut i = 0;
let scheme_path = b"sys:";
while i < buf.len() && i < scheme_path.len() {
buf[i] = scheme_path[i];
i += 1;
}
let mut j = 0;
while i < buf.len() && j < handle.path.len() {
buf[i] = handle.path[j];
i += 1;
j += 1;
}
Ok(i)
}
fn fstat(&self, id: usize, stat: &mut Stat) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
stat.st_mode = handle.mode;
stat.st_uid = 0;
stat.st_gid = 0;
stat.st_size = handle.data.len() as u64;
Ok(0)
}
fn fsync(&self, _id: usize) -> Result<usize> {
Ok(0)
}
fn close(&self, id: usize) -> Result<usize> {
self.handles.write().remove(&id).ok_or(Error::new(EBADF)).and(Ok(0))
}
}

View file

@ -1,24 +0,0 @@
use collections::Vec;
use context;
use scheme;
use syscall::error::{Error, ESRCH, Result};
pub fn resource() -> Result<Vec<u8>> {
let scheme_ns = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
context.ens
};
let mut data = Vec::new();
let schemes = scheme::schemes();
for (name, _scheme_lock) in schemes.iter_name(scheme_ns) {
data.extend_from_slice(name);
data.push(b'\n');
}
Ok(data)
}

View file

@ -1,354 +0,0 @@
use alloc::arc::{Arc, Weak};
use collections::BTreeMap;
use core::sync::atomic::{AtomicU64, Ordering};
use core::{mem, slice, usize};
use spin::{Mutex, RwLock};
use arch;
use arch::paging::{InactivePageTable, Page, VirtualAddress, entry};
use arch::paging::temporary_page::TemporaryPage;
use context::{self, Context};
use context::memory::Grant;
use scheme::{AtomicSchemeId, ATOMIC_SCHEMEID_INIT, SchemeId};
use sync::{WaitQueue, WaitMap};
use syscall::data::{Packet, Stat, StatVfs};
use syscall::error::*;
use syscall::flag::{EVENT_READ, O_NONBLOCK};
use syscall::number::*;
use syscall::scheme::Scheme;
pub struct UserInner {
root_id: SchemeId,
handle_id: usize,
flags: usize,
pub scheme_id: AtomicSchemeId,
next_id: AtomicU64,
context: Weak<RwLock<Context>>,
todo: WaitQueue<Packet>,
fmap: Mutex<BTreeMap<u64, (Weak<RwLock<Context>>, usize)>>,
done: WaitMap<u64, usize>
}
impl UserInner {
pub fn new(root_id: SchemeId, handle_id: usize, flags: usize, context: Weak<RwLock<Context>>) -> UserInner {
UserInner {
root_id: root_id,
handle_id: handle_id,
flags: flags,
scheme_id: ATOMIC_SCHEMEID_INIT,
next_id: AtomicU64::new(1),
context: context,
todo: WaitQueue::new(),
fmap: Mutex::new(BTreeMap::new()),
done: WaitMap::new()
}
}
pub fn call(&self, a: usize, b: usize, c: usize, d: usize) -> Result<usize> {
let (pid, uid, gid) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.id, context.euid, context.egid)
};
self.call_inner(Packet {
id: self.next_id.fetch_add(1, Ordering::SeqCst),
pid: pid.into(),
uid: uid,
gid: gid,
a: a,
b: b,
c: c,
d: d
})
}
fn call_inner(&self, packet: Packet) -> Result<usize> {
let id = packet.id;
let len = self.todo.send(packet);
context::event::trigger(self.root_id, self.handle_id, EVENT_READ, mem::size_of::<Packet>() * len);
Error::demux(self.done.receive(&id))
}
pub fn capture(&self, buf: &[u8]) -> Result<usize> {
UserInner::capture_inner(&self.context, buf.as_ptr() as usize, buf.len(), false)
}
pub fn capture_mut(&self, buf: &mut [u8]) -> Result<usize> {
UserInner::capture_inner(&self.context, buf.as_mut_ptr() as usize, buf.len(), true)
}
fn capture_inner(context_weak: &Weak<RwLock<Context>>, address: usize, size: usize, writable: bool) -> Result<usize> {
if size == 0 {
Ok(0)
} else {
let context_lock = context_weak.upgrade().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let mut grants = context.grants.lock();
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(arch::USER_TMP_GRANT_OFFSET)));
let from_address = (address/4096) * 4096;
let offset = address - from_address;
let full_size = ((offset + size + 4095)/4096) * 4096;
let mut to_address = arch::USER_GRANT_OFFSET;
let mut flags = entry::PRESENT | entry::NO_EXECUTE | entry::USER_ACCESSIBLE;
if writable {
flags |= entry::WRITABLE;
}
for i in 0 .. grants.len() {
let start = grants[i].start_address().get();
if to_address + full_size < start {
grants.insert(i, Grant::map_inactive(
VirtualAddress::new(from_address),
VirtualAddress::new(to_address),
full_size,
flags,
&mut new_table,
&mut temporary_page
));
return Ok(to_address + offset);
} else {
let pages = (grants[i].size() + 4095) / 4096;
let end = start + pages * 4096;
to_address = end;
}
}
grants.push(Grant::map_inactive(
VirtualAddress::new(from_address),
VirtualAddress::new(to_address),
full_size,
flags,
&mut new_table,
&mut temporary_page
));
Ok(to_address + offset)
}
}
pub fn release(&self, address: usize) -> Result<()> {
if address == 0 {
Ok(())
} else {
let context_lock = self.context.upgrade().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let mut grants = context.grants.lock();
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(arch::USER_TMP_GRANT_OFFSET)));
for i in 0 .. grants.len() {
let start = grants[i].start_address().get();
let end = start + grants[i].size();
if address >= start && address < end {
grants.remove(i).unmap_inactive(&mut new_table, &mut temporary_page);
return Ok(());
}
}
Err(Error::new(EFAULT))
}
}
pub fn read(&self, buf: &mut [u8]) -> Result<usize> {
let packet_buf = unsafe { slice::from_raw_parts_mut(buf.as_mut_ptr() as *mut Packet, buf.len()/mem::size_of::<Packet>()) };
Ok(self.todo.receive_into(packet_buf, self.flags & O_NONBLOCK != O_NONBLOCK) * mem::size_of::<Packet>())
}
pub fn write(&self, buf: &[u8]) -> Result<usize> {
let packet_size = mem::size_of::<Packet>();
let len = buf.len()/packet_size;
let mut i = 0;
while i < len {
let mut packet = unsafe { *(buf.as_ptr() as *const Packet).offset(i as isize) };
if packet.id == 0 {
match packet.a {
SYS_FEVENT => context::event::trigger(self.scheme_id.load(Ordering::SeqCst), packet.b, packet.c, packet.d),
_ => println!("Unknown scheme -> kernel message {}", packet.a)
}
} else {
if let Some((context_weak, size)) = self.fmap.lock().remove(&packet.id) {
if let Ok(address) = Error::demux(packet.a) {
packet.a = Error::mux(UserInner::capture_inner(&context_weak, address, size, true));
}
}
self.done.send(packet.id, packet.a);
}
i += 1;
}
Ok(i * packet_size)
}
pub fn fevent(&self, _flags: usize) -> Result<usize> {
Ok(self.handle_id)
}
pub fn fsync(&self) -> Result<usize> {
Ok(0)
}
}
/// UserInner has to be wrapped
pub struct UserScheme {
inner: Weak<UserInner>
}
impl UserScheme {
pub fn new(inner: Weak<UserInner>) -> UserScheme {
UserScheme {
inner: inner
}
}
}
impl Scheme for UserScheme {
fn open(&self, path: &[u8], flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address = inner.capture(path)?;
let result = inner.call(SYS_OPEN, address, path.len(), flags);
let _ = inner.release(address);
result
}
fn chmod(&self, path: &[u8], mode: u16, _uid: u32, _gid: u32) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address = inner.capture(path)?;
let result = inner.call(SYS_CHMOD, address, path.len(), mode as usize);
let _ = inner.release(address);
result
}
fn rmdir(&self, path: &[u8], _uid: u32, _gid: u32) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address = inner.capture(path)?;
let result = inner.call(SYS_RMDIR, address, path.len(), 0);
let _ = inner.release(address);
result
}
fn unlink(&self, path: &[u8], _uid: u32, _gid: u32) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address = inner.capture(path)?;
let result = inner.call(SYS_UNLINK, address, path.len(), 0);
let _ = inner.release(address);
result
}
fn dup(&self, file: usize, buf: &[u8]) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address = inner.capture(buf)?;
let result = inner.call(SYS_DUP, file, address, buf.len());
let _ = inner.release(address);
result
}
fn read(&self, file: usize, buf: &mut [u8]) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address = inner.capture_mut(buf)?;
let result = inner.call(SYS_READ, file, address, buf.len());
let _ = inner.release(address);
result
}
fn write(&self, file: usize, buf: &[u8]) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address = inner.capture(buf)?;
let result = inner.call(SYS_WRITE, file, address, buf.len());
let _ = inner.release(address);
result
}
fn seek(&self, file: usize, position: usize, whence: usize) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
inner.call(SYS_LSEEK, file, position, whence)
}
fn fcntl(&self, file: usize, cmd: usize, arg: usize) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
inner.call(SYS_FCNTL, file, cmd, arg)
}
fn fevent(&self, file: usize, flags: usize) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
inner.call(SYS_FEVENT, file, flags, 0)
}
fn fmap(&self, file: usize, offset: usize, size: usize) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let (pid, uid, gid, context_lock) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.id, context.euid, context.egid, Arc::downgrade(&context_lock))
};
let id = inner.next_id.fetch_add(1, Ordering::SeqCst);
inner.fmap.lock().insert(id, (context_lock, size));
inner.call_inner(Packet {
id: id,
pid: pid.into(),
uid: uid,
gid: gid,
a: SYS_FMAP,
b: file,
c: offset,
d: size
})
}
fn fpath(&self, file: usize, buf: &mut [u8]) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address = inner.capture_mut(buf)?;
let result = inner.call(SYS_FPATH, file, address, buf.len());
let _ = inner.release(address);
result
}
fn fstat(&self, file: usize, stat: &mut Stat) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address = inner.capture_mut(stat)?;
let result = inner.call(SYS_FSTAT, file, address, mem::size_of::<Stat>());
let _ = inner.release(address);
result
}
fn fstatvfs(&self, file: usize, stat: &mut StatVfs) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
let address = inner.capture_mut(stat)?;
let result = inner.call(SYS_FSTATVFS, file, address, mem::size_of::<StatVfs>());
let _ = inner.release(address);
result
}
fn fsync(&self, file: usize) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
inner.call(SYS_FSYNC, file, 0, 0)
}
fn ftruncate(&self, file: usize, len: usize) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
inner.call(SYS_FTRUNCATE, file, len, 0)
}
fn close(&self, file: usize) -> Result<usize> {
let inner = self.inner.upgrade().ok_or(Error::new(ENODEV))?;
inner.call(SYS_CLOSE, file, 0, 0)
}
}

View file

@ -1,42 +0,0 @@
use syscall::error::*;
use syscall::scheme::Scheme;
pub struct ZeroScheme;
impl Scheme for ZeroScheme {
fn open(&self, _path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
Ok(0)
}
fn dup(&self, _file: usize, _buf: &[u8]) -> Result<usize> {
Ok(0)
}
/// Read the file `number` into the `buffer`
///
/// Returns the number of bytes read
fn read(&self, _file: usize, buf: &mut [u8]) -> Result<usize> {
let mut i = 0;
while i < buf.len() {
buf[i] = 0;
i += 1;
}
Ok(i)
}
/// Write the `buffer` to the `file`
///
/// Returns the number of bytes written
fn write(&self, _file: usize, buffer: &[u8]) -> Result<usize> {
Ok(buffer.len())
}
fn fsync(&self, _file: usize) -> Result<usize> {
Ok(0)
}
/// Close the file `number`
fn close(&self, _file: usize) -> Result<usize> {
Ok(0)
}
}

View file

@ -1,7 +0,0 @@
pub use self::wait_condition::WaitCondition;
pub use self::wait_queue::WaitQueue;
pub use self::wait_map::WaitMap;
pub mod wait_condition;
pub mod wait_queue;
pub mod wait_map;

View file

@ -1,48 +0,0 @@
use alloc::arc::Arc;
use collections::Vec;
use spin::{Mutex, RwLock};
use context::{self, Context};
#[derive(Debug)]
pub struct WaitCondition {
contexts: Mutex<Vec<Arc<RwLock<Context>>>>
}
impl WaitCondition {
pub fn new() -> WaitCondition {
WaitCondition {
contexts: Mutex::new(Vec::with_capacity(16))
}
}
pub fn notify(&self) -> usize {
let mut contexts = self.contexts.lock();
let len = contexts.len();
while let Some(context_lock) = contexts.pop() {
context_lock.write().unblock();
}
len
}
pub fn wait(&self) {
{
let context_lock = {
let contexts = context::contexts();
let context_lock = contexts.current().expect("WaitCondition::wait: no context");
context_lock.clone()
};
context_lock.write().block();
self.contexts.lock().push(context_lock);
}
unsafe { context::switch(); }
}
}
impl Drop for WaitCondition {
fn drop(&mut self){
self.notify();
}
}

View file

@ -1,62 +0,0 @@
use collections::BTreeMap;
use core::mem;
use spin::Mutex;
use sync::WaitCondition;
#[derive(Debug)]
pub struct WaitMap<K, V> {
inner: Mutex<BTreeMap<K, V>>,
condition: WaitCondition
}
impl<K, V> WaitMap<K, V> where K: Clone + Ord {
pub fn new() -> WaitMap<K, V> {
WaitMap {
inner: Mutex::new(BTreeMap::new()),
condition: WaitCondition::new()
}
}
pub fn receive_nonblock(&self, key: &K) -> Option<V> {
self.inner.lock().remove(key)
}
pub fn receive(&self, key: &K) -> V {
loop {
if let Some(value) = self.receive_nonblock(key) {
return value;
}
self.condition.wait();
}
}
pub fn receive_any_nonblock(&self) -> Option<(K, V)> {
let mut inner = self.inner.lock();
if let Some(key) = inner.keys().next().map(|key| key.clone()) {
inner.remove(&key).map(|value| (key, value))
} else {
None
}
}
pub fn receive_any(&self) -> (K, V) {
loop {
if let Some(entry) = self.receive_any_nonblock() {
return entry;
}
self.condition.wait();
}
}
pub fn receive_all(&self) -> BTreeMap<K, V> {
let mut ret = BTreeMap::new();
mem::swap(&mut ret, &mut *self.inner.lock());
ret
}
pub fn send(&self, key: K, value: V) {
self.inner.lock().insert(key, value);
self.condition.notify();
}
}

View file

@ -1,82 +0,0 @@
use collections::vec_deque::VecDeque;
use spin::Mutex;
use sync::WaitCondition;
#[derive(Debug)]
pub struct WaitQueue<T> {
pub inner: Mutex<VecDeque<T>>,
pub condition: WaitCondition,
}
impl<T> WaitQueue<T> {
pub fn new() -> WaitQueue<T> {
WaitQueue {
inner: Mutex::new(VecDeque::new()),
condition: WaitCondition::new()
}
}
pub fn clone(&self) -> WaitQueue<T> where T: Clone {
WaitQueue {
inner: Mutex::new(self.inner.lock().clone()),
condition: WaitCondition::new()
}
}
pub fn is_empty(&self) -> bool {
self.inner.lock().is_empty()
}
pub fn receive(&self) -> T {
loop {
if let Some(value) = self.inner.lock().pop_front() {
return value;
}
self.condition.wait();
}
}
pub fn receive_into(&self, buf: &mut [T], block: bool) -> usize {
let mut i = 0;
if i < buf.len() && block {
buf[i] = self.receive();
i += 1;
}
{
let mut inner = self.inner.lock();
while i < buf.len() {
if let Some(value) = inner.pop_front() {
buf[i] = value;
i += 1;
} else {
break;
}
}
}
i
}
pub fn send(&self, value: T) -> usize {
let len = {
let mut inner = self.inner.lock();
inner.push_back(value);
inner.len()
};
self.condition.notify();
len
}
pub fn send_from(&self, buf: &[T]) -> usize where T: Copy {
let len = {
let mut inner = self.inner.lock();
inner.extend(buf.iter());
inner.len()
};
self.condition.notify();
len
}
}

View file

@ -1,130 +0,0 @@
use arch;
use arch::memory::{allocate_frames, deallocate_frames, Frame};
use arch::paging::{entry, ActivePageTable, PhysicalAddress, VirtualAddress};
use context;
use context::memory::Grant;
use syscall::error::{Error, EFAULT, ENOMEM, EPERM, ESRCH, Result};
use syscall::flag::{MAP_WRITE, MAP_WRITE_COMBINE};
fn enforce_root() -> Result<()> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
if context.euid == 0 {
Ok(())
} else {
Err(Error::new(EPERM))
}
}
pub fn iopl(_level: usize, _stack_base: usize) -> Result<usize> {
enforce_root()?;
//TODO
Ok(0)
}
pub fn physalloc(size: usize) -> Result<usize> {
enforce_root()?;
allocate_frames((size + 4095)/4096).ok_or(Error::new(ENOMEM)).map(|frame| frame.start_address().get())
}
pub fn physfree(physical_address: usize, size: usize) -> Result<usize> {
enforce_root()?;
deallocate_frames(Frame::containing_address(PhysicalAddress::new(physical_address)), (size + 4095)/4096);
//TODO: Check that no double free occured
Ok(0)
}
//TODO: verify exlusive access to physical memory
pub fn physmap(physical_address: usize, size: usize, flags: usize) -> Result<usize> {
enforce_root()?;
if size == 0 {
Ok(0)
} else {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let mut grants = context.grants.lock();
let from_address = (physical_address/4096) * 4096;
let offset = physical_address - from_address;
let full_size = ((offset + size + 4095)/4096) * 4096;
let mut to_address = arch::USER_GRANT_OFFSET;
let mut entry_flags = entry::PRESENT | entry::NO_EXECUTE | entry::USER_ACCESSIBLE;
if flags & MAP_WRITE == MAP_WRITE {
entry_flags |= entry::WRITABLE;
}
if flags & MAP_WRITE_COMBINE == MAP_WRITE_COMBINE {
entry_flags |= entry::HUGE_PAGE;
}
for i in 0 .. grants.len() {
let start = grants[i].start_address().get();
if to_address + full_size < start {
grants.insert(i, Grant::physmap(
PhysicalAddress::new(from_address),
VirtualAddress::new(to_address),
full_size,
entry_flags
));
return Ok(to_address + offset);
} else {
let pages = (grants[i].size() + 4095) / 4096;
let end = start + pages * 4096;
to_address = end;
}
}
grants.push(Grant::physmap(
PhysicalAddress::new(from_address),
VirtualAddress::new(to_address),
full_size,
entry_flags
));
Ok(to_address + offset)
}
}
pub fn physunmap(virtual_address: usize) -> Result<usize> {
enforce_root()?;
if virtual_address == 0 {
Ok(0)
} else {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let mut grants = context.grants.lock();
for i in 0 .. grants.len() {
let start = grants[i].start_address().get();
let end = start + grants[i].size();
if virtual_address >= start && virtual_address < end {
grants.remove(i).unmap();
return Ok(0);
}
}
Err(Error::new(EFAULT))
}
}
pub fn virttophys(virtual_address: usize) -> Result<usize> {
enforce_root()?;
let active_table = unsafe { ActivePageTable::new() };
match active_table.translate(VirtualAddress::new(virtual_address)) {
Some(physical_address) => Ok(physical_address.get()),
None => Err(Error::new(EFAULT))
}
}

View file

@ -1,356 +0,0 @@
//! Filesystem syscalls
use core::sync::atomic::Ordering;
use context;
use scheme::{self, FileHandle};
use syscall;
use syscall::data::{Packet, Stat};
use syscall::error::*;
use syscall::flag::{MODE_DIR, MODE_FILE};
pub fn file_op(a: usize, fd: FileHandle, c: usize, d: usize) -> Result<usize> {
let (file, pid, uid, gid) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let file = context.get_file(fd).ok_or(Error::new(EBADF))?;
(file, context.id, context.euid, context.egid)
};
let scheme = {
let schemes = scheme::schemes();
let scheme = schemes.get(file.scheme).ok_or(Error::new(EBADF))?;
scheme.clone()
};
let mut packet = Packet {
id: 0,
pid: pid.into(),
uid: uid,
gid: gid,
a: a,
b: file.number,
c: c,
d: d
};
scheme.handle(&mut packet);
Error::demux(packet.a)
}
pub fn file_op_slice(a: usize, fd: FileHandle, slice: &[u8]) -> Result<usize> {
file_op(a, fd, slice.as_ptr() as usize, slice.len())
}
pub fn file_op_mut_slice(a: usize, fd: FileHandle, slice: &mut [u8]) -> Result<usize> {
file_op(a, fd, slice.as_mut_ptr() as usize, slice.len())
}
/// Change the current working directory
pub fn chdir(path: &[u8]) -> Result<usize> {
let fd = open(path, syscall::flag::O_RDONLY | syscall::flag::O_DIRECTORY)?;
let mut stat = Stat::default();
let stat_res = file_op_mut_slice(syscall::number::SYS_FSTAT, fd, &mut stat);
let _ = close(fd);
stat_res?;
if stat.st_mode & (MODE_FILE | MODE_DIR) == MODE_DIR {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let canonical = context.canonicalize(path);
*context.cwd.lock() = canonical;
Ok(0)
} else {
Err(Error::new(ENOTDIR))
}
}
/// Get the current working directory
pub fn getcwd(buf: &mut [u8]) -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let cwd = context.cwd.lock();
let mut i = 0;
while i < buf.len() && i < cwd.len() {
buf[i] = cwd[i];
i += 1;
}
Ok(i)
}
/// Open syscall
pub fn open(path: &[u8], flags: usize) -> Result<FileHandle> {
let (path_canon, uid, gid, scheme_ns) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.canonicalize(path), context.euid, context.egid, context.ens)
};
//println!("open {}", unsafe { ::core::str::from_utf8_unchecked(&path_canon) });
let mut parts = path_canon.splitn(2, |&b| b == b':');
let scheme_name_opt = parts.next();
let reference_opt = parts.next();
let (scheme_id, file_id) = {
let scheme_name = scheme_name_opt.ok_or(Error::new(ENODEV))?;
let (scheme_id, scheme) = {
let schemes = scheme::schemes();
let (scheme_id, scheme) = schemes.get_name(scheme_ns, scheme_name).ok_or(Error::new(ENODEV))?;
(scheme_id, scheme.clone())
};
let file_id = scheme.open(reference_opt.unwrap_or(b""), flags, uid, gid)?;
(scheme_id, file_id)
};
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
context.add_file(::context::file::File {
scheme: scheme_id,
number: file_id,
event: None,
}).ok_or(Error::new(EMFILE))
}
pub fn pipe2(fds: &mut [usize], flags: usize) -> Result<usize> {
if fds.len() >= 2 {
let scheme_id = ::scheme::pipe::PIPE_SCHEME_ID.load(Ordering::SeqCst);
let (read_id, write_id) = ::scheme::pipe::pipe(flags);
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let read_fd = context.add_file(::context::file::File {
scheme: scheme_id,
number: read_id,
event: None,
}).ok_or(Error::new(EMFILE))?;
let write_fd = context.add_file(::context::file::File {
scheme: scheme_id,
number: write_id,
event: None,
}).ok_or(Error::new(EMFILE))?;
fds[0] = read_fd.into();
fds[1] = write_fd.into();
Ok(0)
} else {
Err(Error::new(EFAULT))
}
}
/// chmod syscall
pub fn chmod(path: &[u8], mode: u16) -> Result<usize> {
let (path_canon, uid, gid, scheme_ns) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.canonicalize(path), context.euid, context.egid, context.ens)
};
let mut parts = path_canon.splitn(2, |&b| b == b':');
let scheme_name_opt = parts.next();
let reference_opt = parts.next();
let scheme_name = scheme_name_opt.ok_or(Error::new(ENODEV))?;
let scheme = {
let schemes = scheme::schemes();
let (_scheme_id, scheme) = schemes.get_name(scheme_ns, scheme_name).ok_or(Error::new(ENODEV))?;
scheme.clone()
};
scheme.chmod(reference_opt.unwrap_or(b""), mode, uid, gid)
}
/// rmdir syscall
pub fn rmdir(path: &[u8]) -> Result<usize> {
let (path_canon, uid, gid, scheme_ns) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.canonicalize(path), context.euid, context.egid, context.ens)
};
let mut parts = path_canon.splitn(2, |&b| b == b':');
let scheme_name_opt = parts.next();
let reference_opt = parts.next();
let scheme_name = scheme_name_opt.ok_or(Error::new(ENODEV))?;
let scheme = {
let schemes = scheme::schemes();
let (_scheme_id, scheme) = schemes.get_name(scheme_ns, scheme_name).ok_or(Error::new(ENODEV))?;
scheme.clone()
};
scheme.rmdir(reference_opt.unwrap_or(b""), uid, gid)
}
/// Unlink syscall
pub fn unlink(path: &[u8]) -> Result<usize> {
let (path_canon, uid, gid, scheme_ns) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.canonicalize(path), context.euid, context.egid, context.ens)
};
let mut parts = path_canon.splitn(2, |&b| b == b':');
let scheme_name_opt = parts.next();
let reference_opt = parts.next();
let scheme_name = scheme_name_opt.ok_or(Error::new(ENODEV))?;
let scheme = {
let schemes = scheme::schemes();
let (_scheme_id, scheme) = schemes.get_name(scheme_ns, scheme_name).ok_or(Error::new(ENODEV))?;
scheme.clone()
};
scheme.unlink(reference_opt.unwrap_or(b""), uid, gid)
}
/// Close syscall
pub fn close(fd: FileHandle) -> Result<usize> {
let file = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let file = context.remove_file(fd).ok_or(Error::new(EBADF))?;
file
};
if let Some(event_id) = file.event {
context::event::unregister(fd, file.scheme, event_id);
}
let scheme = {
let schemes = scheme::schemes();
let scheme = schemes.get(file.scheme).ok_or(Error::new(EBADF))?;
scheme.clone()
};
scheme.close(file.number)
}
/// Duplicate file descriptor
pub fn dup(fd: FileHandle, buf: &[u8]) -> Result<FileHandle> {
let file = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let file = context.get_file(fd).ok_or(Error::new(EBADF))?;
file
};
let new_id = {
let scheme = {
let schemes = scheme::schemes();
let scheme = schemes.get(file.scheme).ok_or(Error::new(EBADF))?;
scheme.clone()
};
scheme.dup(file.number, buf)?
};
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
context.add_file(::context::file::File {
scheme: file.scheme,
number: new_id,
event: None,
}).ok_or(Error::new(EMFILE))
}
/// Duplicate file descriptor, replacing another
pub fn dup2(fd: FileHandle, new_fd: FileHandle, buf: &[u8]) -> Result<FileHandle> {
if fd == new_fd {
Ok(new_fd)
} else {
let _ = close(new_fd)?;
let file = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let file = context.get_file(fd).ok_or(Error::new(EBADF))?;
file
};
let new_id = {
let scheme = {
let schemes = scheme::schemes();
let scheme = schemes.get(file.scheme).ok_or(Error::new(EBADF))?;
scheme.clone()
};
scheme.dup(file.number, buf)?
};
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
context.insert_file(new_fd, ::context::file::File {
scheme: file.scheme,
number: new_id,
event: None,
}).ok_or(Error::new(EBADF))
}
}
/// Register events for file
pub fn fevent(fd: FileHandle, flags: usize) -> Result<usize> {
let file = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let mut files = context.files.lock();
let mut file = files.get_mut(fd.into()).ok_or(Error::new(EBADF))?.ok_or(Error::new(EBADF))?;
if let Some(event_id) = file.event.take() {
println!("{:?}: {:?}:{}: events already registered: {}", fd, file.scheme, file.number, event_id);
context::event::unregister(fd, file.scheme, event_id);
}
file.clone()
};
let scheme = {
let schemes = scheme::schemes();
let scheme = schemes.get(file.scheme).ok_or(Error::new(EBADF))?;
scheme.clone()
};
let event_id = scheme.fevent(file.number, flags)?;
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let mut files = context.files.lock();
let mut file = files.get_mut(fd.into()).ok_or(Error::new(EBADF))?.ok_or(Error::new(EBADF))?;
file.event = Some(event_id);
}
context::event::register(fd, file.scheme, event_id);
Ok(0)
}
pub fn funmap(virtual_address: usize) -> Result<usize> {
if virtual_address == 0 {
Ok(0)
} else {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
let mut grants = context.grants.lock();
for i in 0 .. grants.len() {
let start = grants[i].start_address().get();
let end = start + grants[i].size();
if virtual_address >= start && virtual_address < end {
grants.remove(i).unmap();
return Ok(0);
}
}
Err(Error::new(EFAULT))
}
}

View file

@ -1,110 +0,0 @@
use alloc::arc::Arc;
use collections::VecDeque;
use core::intrinsics;
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
use context::{self, Context};
use syscall::error::{Error, Result, ESRCH, EAGAIN, EINVAL};
use syscall::flag::{FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE};
use syscall::validate::validate_slice_mut;
type FutexList = VecDeque<(usize, Arc<RwLock<Context>>)>;
/// Fast userspace mutex list
static FUTEXES: Once<RwLock<FutexList>> = Once::new();
/// Initialize futexes, called if needed
fn init_futexes() -> RwLock<FutexList> {
RwLock::new(VecDeque::new())
}
/// Get the global futexes list, const
pub fn futexes() -> RwLockReadGuard<'static, FutexList> {
FUTEXES.call_once(init_futexes).read()
}
/// Get the global futexes list, mutable
pub fn futexes_mut() -> RwLockWriteGuard<'static, FutexList> {
FUTEXES.call_once(init_futexes).write()
}
pub fn futex(addr: &mut i32, op: usize, val: i32, val2: usize, addr2: *mut i32) -> Result<usize> {
match op {
FUTEX_WAIT => {
{
let mut futexes = futexes_mut();
let context_lock = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
context_lock.clone()
};
if unsafe { intrinsics::atomic_load(addr) != val } {
return Err(Error::new(EAGAIN));
}
context_lock.write().block();
futexes.push_back((addr as *mut i32 as usize, context_lock));
}
unsafe { context::switch(); }
Ok(0)
},
FUTEX_WAKE => {
let mut woken = 0;
{
let mut futexes = futexes_mut();
let mut i = 0;
while i < futexes.len() && (woken as i32) < val {
if futexes[i].0 == addr as *mut i32 as usize {
if let Some(futex) = futexes.swap_remove_back(i) {
futex.1.write().unblock();
woken += 1;
}
} else {
i += 1;
}
}
}
Ok(woken)
},
FUTEX_REQUEUE => {
let addr2_safe = validate_slice_mut(addr2, 1).map(|addr2_safe| &mut addr2_safe[0])?;
let mut woken = 0;
let mut requeued = 0;
{
let mut futexes = futexes_mut();
let mut i = 0;
while i < futexes.len() && (woken as i32) < val {
if futexes[i].0 == addr as *mut i32 as usize {
if let Some(futex) = futexes.swap_remove_back(i) {
futex.1.write().unblock();
woken += 1;
}
} else {
i += 1;
}
}
while i < futexes.len() && requeued < val2 {
if futexes[i].0 == addr as *mut i32 as usize {
futexes[i].0 = addr2_safe as *mut i32 as usize;
requeued += 1;
}
i += 1;
}
}
Ok(woken)
},
_ => Err(Error::new(EINVAL))
}
}

View file

@ -1,121 +0,0 @@
///! Syscall handlers
extern crate syscall;
pub use self::syscall::{data, error, flag, number, scheme};
pub use self::driver::*;
pub use self::fs::*;
pub use self::futex::futex;
pub use self::privilege::*;
pub use self::process::*;
pub use self::time::*;
pub use self::validate::*;
use self::data::TimeSpec;
use self::error::{Error, Result, ENOSYS};
use self::number::*;
use context::ContextId;
use scheme::{FileHandle, SchemeNamespace};
/// Driver syscalls
pub mod driver;
/// Filesystem syscalls
pub mod fs;
/// Fast userspace mutex
pub mod futex;
/// Privilege syscalls
pub mod privilege;
/// Process syscalls
pub mod process;
/// Time syscalls
pub mod time;
/// Validate input
pub mod validate;
#[no_mangle]
pub extern fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, stack: usize) -> usize {
#[inline(always)]
fn inner(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, stack: usize) -> Result<usize> {
match a & SYS_CLASS {
SYS_CLASS_FILE => {
let fd = FileHandle::from(b);
match a & SYS_ARG {
SYS_ARG_SLICE => file_op_slice(a, fd, validate_slice(c as *const u8, d)?),
SYS_ARG_MSLICE => file_op_mut_slice(a, fd, validate_slice_mut(c as *mut u8, d)?),
_ => match a {
SYS_CLOSE => close(fd),
SYS_DUP => dup(fd, validate_slice(c as *const u8, d)?).map(FileHandle::into),
SYS_DUP2 => dup2(fd, FileHandle::from(c), validate_slice(d as *const u8, e)?).map(FileHandle::into),
SYS_FEVENT => fevent(fd, c),
SYS_FUNMAP => funmap(b),
_ => file_op(a, fd, c, d)
}
}
},
SYS_CLASS_PATH => match a {
SYS_OPEN => open(validate_slice(b as *const u8, c)?, d).map(FileHandle::into),
SYS_CHMOD => chmod(validate_slice(b as *const u8, c)?, d as u16),
SYS_RMDIR => rmdir(validate_slice(b as *const u8, c)?),
SYS_UNLINK => unlink(validate_slice(b as *const u8, c)?),
_ => unreachable!()
},
_ => match a {
SYS_YIELD => sched_yield(),
SYS_NANOSLEEP => nanosleep(validate_slice(b as *const TimeSpec, 1).map(|req| &req[0])?, validate_slice_mut(c as *mut TimeSpec, 1).ok().map(|rem| &mut rem[0])),
SYS_CLOCK_GETTIME => clock_gettime(b, validate_slice_mut(c as *mut TimeSpec, 1).map(|time| &mut time[0])?),
SYS_FUTEX => futex(validate_slice_mut(b as *mut i32, 1).map(|uaddr| &mut uaddr[0])?, c, d as i32, e, f as *mut i32),
SYS_BRK => brk(b),
SYS_GETPID => getpid().map(ContextId::into),
SYS_CLONE => clone(b, stack).map(ContextId::into),
SYS_EXIT => exit((b & 0xFF) << 8),
SYS_KILL => kill(ContextId::from(b), c),
SYS_WAITPID => waitpid(ContextId::from(b), c, d).map(ContextId::into),
SYS_CHDIR => chdir(validate_slice(b as *const u8, c)?),
SYS_EXECVE => exec(validate_slice(b as *const u8, c)?, validate_slice(d as *const [usize; 2], e)?),
SYS_IOPL => iopl(b, stack),
SYS_GETCWD => getcwd(validate_slice_mut(b as *mut u8, c)?),
SYS_GETEGID => getegid(),
SYS_GETENS => getens(),
SYS_GETEUID => geteuid(),
SYS_GETGID => getgid(),
SYS_GETNS => getns(),
SYS_GETUID => getuid(),
SYS_MKNS => mkns(validate_slice(b as *const [usize; 2], c)?),
SYS_SETREUID => setreuid(b as u32, c as u32),
SYS_SETRENS => setrens(SchemeNamespace::from(b), SchemeNamespace::from(c)),
SYS_SETREGID => setregid(b as u32, c as u32),
SYS_PIPE2 => pipe2(validate_slice_mut(b as *mut usize, 2)?, c),
SYS_PHYSALLOC => physalloc(b),
SYS_PHYSFREE => physfree(b, c),
SYS_PHYSMAP => physmap(b, c, d),
SYS_PHYSUNMAP => physunmap(b),
SYS_VIRTTOPHYS => virttophys(b),
_ => Err(Error::new(ENOSYS))
}
}
}
let result = inner(a, b, c, d, e, f, stack);
/*
if let Err(ref err) = result {
let contexts = ::context::contexts();
if let Some(context_lock) = contexts.current() {
let context = context_lock.read();
print!("{}: {}: ", unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, context.id.into());
}
println!("{:X}, {:X}, {:X}, {:X}: {}", a, b, c, d, err);
}
*/
Error::mux(result)
}

View file

@ -1,147 +0,0 @@
use collections::Vec;
use context;
use scheme::{self, SchemeNamespace};
use syscall::error::*;
use syscall::validate::validate_slice;
pub fn getegid() -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
Ok(context.egid as usize)
}
pub fn getens() -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
Ok(context.ens.into())
}
pub fn geteuid() -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
Ok(context.euid as usize)
}
pub fn getgid() -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
Ok(context.rgid as usize)
}
pub fn getns() -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
Ok(context.rns.into())
}
pub fn getuid() -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
Ok(context.ruid as usize)
}
pub fn mkns(name_ptrs: &[[usize; 2]]) -> Result<usize> {
let mut names = Vec::new();
for name_ptr in name_ptrs {
names.push(validate_slice(name_ptr[0] as *const u8, name_ptr[1])?);
}
let (uid, from) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.euid, context.ens)
};
if uid == 0 {
let to = scheme::schemes_mut().make_ns(from, &names)?;
Ok(to.into())
} else {
Err(Error::new(EACCES))
}
}
pub fn setregid(rgid: u32, egid: u32) -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
if (context.euid == 0
|| rgid as i32 == -1
|| rgid == context.egid
|| rgid == context.rgid)
&& (context.euid == 0
|| egid as i32 == -1
|| egid == context.egid
|| egid == context.rgid)
{
if rgid as i32 != -1 {
context.rgid = rgid;
}
if egid as i32 != -1 {
context.egid = egid;
}
Ok(0)
} else {
Err(Error::new(EPERM))
}
}
pub fn setrens(rns: SchemeNamespace, ens: SchemeNamespace) -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
if (context.euid == 0
|| rns.into() as isize == -1
|| rns == context.ens
|| rns == context.rns)
&& (context.euid == 0
|| ens.into() as isize == -1
|| ens == context.ens
|| ens == context.rns)
{
if rns.into() as isize != -1 {
context.rns = rns;
}
if ens.into() as isize != -1 {
context.ens = ens;
}
Ok(0)
} else {
Err(Error::new(EPERM))
}
}
pub fn setreuid(ruid: u32, euid: u32) -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
if (context.euid == 0
|| ruid as i32 == -1
|| ruid == context.euid
|| ruid == context.ruid)
&& (context.euid == 0
|| euid as i32 == -1
|| euid == context.euid
|| euid == context.ruid)
{
if ruid as i32 != -1 {
context.ruid = ruid;
}
if euid as i32 != -1 {
context.euid = euid;
}
Ok(0)
} else {
Err(Error::new(EPERM))
}
}

View file

@ -1,965 +0,0 @@
///! Process syscalls
use alloc::arc::Arc;
use alloc::boxed::Box;
use collections::{BTreeMap, Vec};
use core::{intrinsics, mem, str};
use core::ops::DerefMut;
use spin::Mutex;
use arch;
use arch::memory::allocate_frame;
use arch::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, entry};
use arch::paging::temporary_page::TemporaryPage;
use arch::start::usermode;
use context;
use context::ContextId;
use elf::{self, program_header};
use scheme::{self, FileHandle};
use syscall;
use syscall::data::Stat;
use syscall::error::*;
use syscall::flag::{CLONE_VFORK, CLONE_VM, CLONE_FS, CLONE_FILES, WNOHANG};
use syscall::validate::{validate_slice, validate_slice_mut};
pub fn brk(address: usize) -> Result<usize> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
//println!("{}: {}: BRK {:X}", unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) },
// context.id.into(), address);
let current = if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
heap.start_address().get() + heap.size()
})
} else {
panic!("user heap not initialized");
};
if address == 0 {
//println!("Brk query {:X}", current);
Ok(current)
} else if address >= arch::USER_HEAP_OFFSET {
//TODO: out of memory errors
if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
heap.resize(address - arch::USER_HEAP_OFFSET, true);
});
} else {
panic!("user heap not initialized");
}
//println!("Brk resize {:X}", address);
Ok(address)
} else {
//println!("Brk no mem");
Err(Error::new(ENOMEM))
}
}
pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
let ppid;
let pid;
{
let ruid;
let rgid;
let rns;
let euid;
let egid;
let ens;
let mut cpu_id = None;
let arch;
let vfork;
let mut kfx_option = None;
let mut kstack_option = None;
let mut offset = 0;
let mut image = vec![];
let mut heap_option = None;
let mut stack_option = None;
let mut tls_option = None;
let grants;
let name;
let cwd;
let env;
let files;
// Copy from old process
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
ppid = context.id;
ruid = context.ruid;
rgid = context.rgid;
rns = context.rns;
euid = context.euid;
egid = context.egid;
ens = context.ens;
if flags & CLONE_VM == CLONE_VM {
cpu_id = context.cpu_id;
}
arch = context.arch.clone();
if let Some(ref fx) = context.kfx {
let mut new_fx = unsafe { Box::from_raw(::alloc::heap::allocate(512, 16) as *mut [u8; 512]) };
for (new_b, b) in new_fx.iter_mut().zip(fx.iter()) {
*new_b = *b;
}
kfx_option = Some(new_fx);
}
if let Some(ref stack) = context.kstack {
offset = stack_base - stack.as_ptr() as usize - mem::size_of::<usize>(); // Add clone ret
let mut new_stack = stack.clone();
unsafe {
let func_ptr = new_stack.as_mut_ptr().offset(offset as isize);
*(func_ptr as *mut usize) = arch::interrupt::syscall::clone_ret as usize;
}
kstack_option = Some(new_stack);
}
if flags & CLONE_VM == CLONE_VM {
for memory_shared in context.image.iter() {
image.push(memory_shared.clone());
}
if let Some(ref heap_shared) = context.heap {
heap_option = Some(heap_shared.clone());
}
} else {
for memory_shared in context.image.iter() {
memory_shared.with(|memory| {
let mut new_memory = context::memory::Memory::new(
VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET),
memory.size(),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
false
);
unsafe {
intrinsics::copy(memory.start_address().get() as *const u8,
new_memory.start_address().get() as *mut u8,
memory.size());
}
new_memory.remap(memory.flags());
image.push(new_memory.to_shared());
});
}
if let Some(ref heap_shared) = context.heap {
heap_shared.with(|heap| {
let mut new_heap = context::memory::Memory::new(
VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET),
heap.size(),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
false
);
unsafe {
intrinsics::copy(heap.start_address().get() as *const u8,
new_heap.start_address().get() as *mut u8,
heap.size());
}
new_heap.remap(heap.flags());
heap_option = Some(new_heap.to_shared());
});
}
}
if let Some(ref stack) = context.stack {
let mut new_stack = context::memory::Memory::new(
VirtualAddress::new(arch::USER_TMP_STACK_OFFSET),
stack.size(),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
false
);
unsafe {
intrinsics::copy(stack.start_address().get() as *const u8,
new_stack.start_address().get() as *mut u8,
stack.size());
}
new_stack.remap(stack.flags());
stack_option = Some(new_stack);
}
if let Some(ref tls) = context.tls {
let mut new_tls = context::memory::Tls {
master: tls.master,
file_size: tls.file_size,
mem: context::memory::Memory::new(
VirtualAddress::new(arch::USER_TMP_TLS_OFFSET),
tls.mem.size(),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
true
)
};
unsafe {
intrinsics::copy(tls.master.get() as *const u8,
new_tls.mem.start_address().get() as *mut u8,
tls.file_size);
}
new_tls.mem.remap(tls.mem.flags());
tls_option = Some(new_tls);
}
if flags & CLONE_VM == CLONE_VM {
grants = context.grants.clone();
} else {
grants = Arc::new(Mutex::new(Vec::new()));
}
if flags & CLONE_VM == CLONE_VM {
name = context.name.clone();
} else {
name = Arc::new(Mutex::new(context.name.lock().clone()));
}
if flags & CLONE_FS == CLONE_FS {
cwd = context.cwd.clone();
} else {
cwd = Arc::new(Mutex::new(context.cwd.lock().clone()));
}
if flags & CLONE_VM == CLONE_VM {
env = context.env.clone();
} else {
let mut new_env = BTreeMap::new();
for item in context.env.lock().iter() {
new_env.insert(item.0.clone(), Arc::new(Mutex::new(item.1.lock().clone())));
}
env = Arc::new(Mutex::new(new_env));
}
if flags & CLONE_FILES == CLONE_FILES {
files = context.files.clone();
} else {
files = Arc::new(Mutex::new(context.files.lock().clone()));
}
}
// If not cloning files, dup to get a new number from scheme
// This has to be done outside the context lock to prevent deadlocks
if flags & CLONE_FILES == 0 {
for (_fd, mut file_option) in files.lock().iter_mut().enumerate() {
let new_file_option = if let Some(file) = *file_option {
let result = {
let scheme = {
let schemes = scheme::schemes();
let scheme = schemes.get(file.scheme).ok_or(Error::new(EBADF))?;
scheme.clone()
};
let result = scheme.dup(file.number, b"clone");
result
};
match result {
Ok(new_number) => {
Some(context::file::File {
scheme: file.scheme,
number: new_number,
event: None,
})
},
Err(_err) => {
None
}
}
} else {
None
};
*file_option = new_file_option;
}
}
// If vfork, block the current process
// This has to be done after the operations that may require context switches
if flags & CLONE_VFORK == CLONE_VFORK {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
context.block();
vfork = true;
} else {
vfork = false;
}
// Set up new process
{
let mut contexts = context::contexts_mut();
let context_lock = contexts.new_context()?;
let mut context = context_lock.write();
pid = context.id;
context.ppid = ppid;
context.ruid = ruid;
context.rgid = rgid;
context.rns = rns;
context.euid = euid;
context.egid = egid;
context.ens = ens;
context.cpu_id = cpu_id;
context.status = context::Status::Runnable;
context.vfork = vfork;
context.arch = arch;
let mut active_table = unsafe { ActivePageTable::new() };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(arch::USER_TMP_MISC_OFFSET)));
let mut new_table = {
let frame = allocate_frame().expect("no more frames in syscall::clone new_table");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
context.arch.set_page_table(unsafe { new_table.address() });
// Copy kernel mapping
{
let frame = active_table.p4()[510].pointed_frame().expect("kernel table not mapped");
let flags = active_table.p4()[510].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[510].set(frame, flags);
});
}
if let Some(fx) = kfx_option.take() {
context.arch.set_fx(fx.as_ptr() as usize);
context.kfx = Some(fx);
}
// Set kernel stack
if let Some(stack) = kstack_option.take() {
context.arch.set_stack(stack.as_ptr() as usize + offset);
context.kstack = Some(stack);
}
// Setup heap
if flags & CLONE_VM == CLONE_VM {
// Copy user image mapping, if found
if ! image.is_empty() {
let frame = active_table.p4()[0].pointed_frame().expect("user image not mapped");
let flags = active_table.p4()[0].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[0].set(frame, flags);
});
}
context.image = image;
// Copy user heap mapping, if found
if let Some(heap_shared) = heap_option {
let frame = active_table.p4()[1].pointed_frame().expect("user heap not mapped");
let flags = active_table.p4()[1].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[1].set(frame, flags);
});
context.heap = Some(heap_shared);
}
// Copy grant mapping
if ! grants.lock().is_empty() {
let frame = active_table.p4()[2].pointed_frame().expect("user grants not mapped");
let flags = active_table.p4()[2].flags();
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[2].set(frame, flags);
});
}
context.grants = grants;
} else {
// Copy percpu mapping
for cpu_id in 0..::cpu_count() {
extern {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
/// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
}
let size = unsafe { & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize };
let start = arch::KERNEL_PERCPU_OFFSET + arch::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = active_table.translate_page(page).expect("kernel percpu not mapped");
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
let result = mapper.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE);
// Ignore result due to operating on inactive table
unsafe { result.ignore(); }
});
}
}
// Move copy of image
for memory_shared in image.iter_mut() {
memory_shared.with(|memory| {
let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET);
memory.move_to(start, &mut new_table, &mut temporary_page);
});
}
context.image = image;
// Move copy of heap
if let Some(heap_shared) = heap_option {
heap_shared.with(|heap| {
heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page);
});
context.heap = Some(heap_shared);
}
}
// Setup user stack
if let Some(mut stack) = stack_option {
stack.move_to(VirtualAddress::new(arch::USER_STACK_OFFSET), &mut new_table, &mut temporary_page);
context.stack = Some(stack);
}
// Setup user TLS
if let Some(mut tls) = tls_option {
tls.mem.move_to(VirtualAddress::new(arch::USER_TLS_OFFSET), &mut new_table, &mut temporary_page);
context.tls = Some(tls);
}
context.name = name;
context.cwd = cwd;
context.env = env;
context.files = files;
}
}
unsafe { context::switch(); }
Ok(pid)
}
fn empty(context: &mut context::Context, reaping: bool) {
if reaping {
// Memory should already be unmapped
assert!(context.image.is_empty());
assert!(context.heap.is_none());
assert!(context.stack.is_none());
assert!(context.tls.is_none());
} else {
// Unmap previous image, heap, grants, stack, and tls
context.image.clear();
drop(context.heap.take());
drop(context.stack.take());
drop(context.tls.take());
}
// FIXME: Looks like a race condition.
// Is it possible for Arc::strong_count to return 1 to two contexts that exit at the
// same time, or return 2 to both, thus either double freeing or leaking the grants?
if Arc::strong_count(&context.grants) == 1 {
let mut grants = context.grants.lock();
for grant in grants.drain(..) {
if reaping {
println!("{}: {}: Grant should not exist: {:?}", context.id.into(), unsafe { ::core::str::from_utf8_unchecked(&context.name.lock()) }, grant);
let mut new_table = unsafe { InactivePageTable::from_address(context.arch.get_page_table()) };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(arch::USER_TMP_GRANT_OFFSET)));
grant.unmap_inactive(&mut new_table, &mut temporary_page);
} else {
grant.unmap();
}
}
}
}
pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
let entry;
let mut sp = arch::USER_STACK_OFFSET + arch::USER_STACK_SIZE - 256;
{
let mut args = Vec::new();
for arg_ptr in arg_ptrs {
let arg = validate_slice(arg_ptr[0] as *const u8, arg_ptr[1])?;
args.push(arg.to_vec()); // Must be moved into kernel space before exec unmaps all memory
}
let (uid, gid, canonical) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.euid, context.egid, context.canonicalize(path))
};
let file = syscall::open(&canonical, syscall::flag::O_RDONLY)?;
let mut stat = Stat::default();
syscall::file_op_mut_slice(syscall::number::SYS_FSTAT, file, &mut stat)?;
let mut perm = stat.st_mode & 0o7;
if stat.st_uid == uid {
perm |= (stat.st_mode >> 6) & 0o7;
}
if stat.st_gid == gid {
perm |= (stat.st_mode >> 3) & 0o7;
}
if uid == 0 {
perm |= 0o7;
}
if perm & 0o1 != 0o1 {
let _ = syscall::close(file);
return Err(Error::new(EACCES));
}
//TODO: Only read elf header, not entire file. Then read required segments
let mut data = vec![0; stat.st_size as usize];
syscall::file_op_mut_slice(syscall::number::SYS_READ, file, &mut data)?;
let _ = syscall::close(file);
match elf::Elf::from(&data) {
Ok(elf) => {
entry = elf.entry();
drop(path); // Drop so that usage is not allowed after unmapping context
drop(arg_ptrs); // Drop so that usage is not allowed after unmapping context
let contexts = context::contexts();
let (vfork, ppid, files) = {
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
// Set name
context.name = Arc::new(Mutex::new(canonical));
empty(&mut context, false);
if stat.st_mode & syscall::flag::MODE_SETUID == syscall::flag::MODE_SETUID {
context.euid = stat.st_uid;
}
if stat.st_mode & syscall::flag::MODE_SETGID == syscall::flag::MODE_SETGID {
context.egid = stat.st_gid;
}
// Map and copy new segments
let mut tls_option = None;
for segment in elf.segments() {
if segment.p_type == program_header::PT_LOAD {
let mut memory = context::memory::Memory::new(
VirtualAddress::new(segment.p_vaddr as usize),
segment.p_memsz as usize,
entry::NO_EXECUTE | entry::WRITABLE,
true
);
unsafe {
// Copy file data
intrinsics::copy((elf.data.as_ptr() as usize + segment.p_offset as usize) as *const u8,
segment.p_vaddr as *mut u8,
segment.p_filesz as usize);
}
let mut flags = entry::NO_EXECUTE | entry::USER_ACCESSIBLE;
if segment.p_flags & program_header::PF_R == program_header::PF_R {
flags.insert(entry::PRESENT);
}
// W ^ X. If it is executable, do not allow it to be writable, even if requested
if segment.p_flags & program_header::PF_X == program_header::PF_X {
flags.remove(entry::NO_EXECUTE);
} else if segment.p_flags & program_header::PF_W == program_header::PF_W {
flags.insert(entry::WRITABLE);
}
memory.remap(flags);
context.image.push(memory.to_shared());
} else if segment.p_type == program_header::PT_TLS {
let memory = context::memory::Memory::new(
VirtualAddress::new(arch::USER_TCB_OFFSET),
4096,
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
true
);
unsafe { *(arch::USER_TCB_OFFSET as *mut usize) = arch::USER_TLS_OFFSET + segment.p_memsz as usize; }
context.image.push(memory.to_shared());
tls_option = Some((
VirtualAddress::new(segment.p_vaddr as usize),
segment.p_filesz as usize,
segment.p_memsz as usize
));
}
}
// Map heap
context.heap = Some(context::memory::Memory::new(
VirtualAddress::new(arch::USER_HEAP_OFFSET),
0,
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
true
).to_shared());
// Map stack
context.stack = Some(context::memory::Memory::new(
VirtualAddress::new(arch::USER_STACK_OFFSET),
arch::USER_STACK_SIZE,
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
true
));
// Map TLS
if let Some((master, file_size, size)) = tls_option {
let tls = context::memory::Tls {
master: master,
file_size: file_size,
mem: context::memory::Memory::new(
VirtualAddress::new(arch::USER_TLS_OFFSET),
size,
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
true
)
};
unsafe {
// Copy file data
intrinsics::copy(master.get() as *const u8,
tls.mem.start_address().get() as *mut u8,
file_size);
}
context.tls = Some(tls);
}
// Push arguments
let mut arg_size = 0;
for arg in args.iter().rev() {
sp -= mem::size_of::<usize>();
unsafe { *(sp as *mut usize) = arch::USER_ARG_OFFSET + arg_size; }
sp -= mem::size_of::<usize>();
unsafe { *(sp as *mut usize) = arg.len(); }
arg_size += arg.len();
}
sp -= mem::size_of::<usize>();
unsafe { *(sp as *mut usize) = args.len(); }
if arg_size > 0 {
let mut memory = context::memory::Memory::new(
VirtualAddress::new(arch::USER_ARG_OFFSET),
arg_size,
entry::NO_EXECUTE | entry::WRITABLE,
true
);
let mut arg_offset = 0;
for arg in args.iter().rev() {
unsafe {
intrinsics::copy(arg.as_ptr(),
(arch::USER_ARG_OFFSET + arg_offset) as *mut u8,
arg.len());
}
arg_offset += arg.len();
}
memory.remap(entry::NO_EXECUTE | entry::USER_ACCESSIBLE);
context.image.push(memory.to_shared());
}
let files = Arc::new(Mutex::new(context.files.lock().clone()));
context.files = files.clone();
let vfork = context.vfork;
context.vfork = false;
(vfork, context.ppid, files)
};
// Duplicate current files using b"exec", close previous
for (fd, mut file_option) in files.lock().iter_mut().enumerate() {
let new_file_option = if let Some(file) = *file_option {
// Duplicate
let result = {
let scheme_option = {
let schemes = scheme::schemes();
schemes.get(file.scheme).map(|scheme| scheme.clone())
};
if let Some(scheme) = scheme_option {
let result = scheme.dup(file.number, b"exec");
result
} else {
Err(Error::new(EBADF))
}
};
// Close
{
if let Some(event_id) = file.event {
context::event::unregister(FileHandle::from(fd), file.scheme, event_id);
}
let scheme_option = {
let schemes = scheme::schemes();
schemes.get(file.scheme).map(|scheme| scheme.clone())
};
if let Some(scheme) = scheme_option {
let _ = scheme.close(file.number);
}
}
// Return new descriptor
match result {
Ok(new_number) => {
Some(context::file::File {
scheme: file.scheme,
number: new_number,
event: None,
})
},
Err(_err) => {
None
}
}
} else {
None
};
*file_option = new_file_option;
}
if vfork {
if let Some(context_lock) = contexts.get(ppid) {
let mut context = context_lock.write();
if ! context.unblock() {
println!("{:?} not blocked for exec vfork unblock", ppid);
}
} else {
println!("{:?} not found for exec vfork unblock", ppid);
}
}
},
Err(err) => {
println!("failed to execute {}: {}", unsafe { str::from_utf8_unchecked(path) }, err);
return Err(Error::new(ENOEXEC));
}
}
}
// Go to usermode
unsafe { usermode(entry, sp); }
}
pub fn exit(status: usize) -> ! {
{
let context_lock = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH)).expect("exit failed to find context");
context_lock.clone()
};
let mut close_files = Vec::new();
let (pid, ppid) = {
let mut context = context_lock.write();
// FIXME: Looks like a race condition.
// Is it possible for Arc::strong_count to return 1 to two contexts that exit at the
// same time, or return 2 to both, thus either double closing or leaking the files?
if Arc::strong_count(&context.files) == 1 {
mem::swap(context.files.lock().deref_mut(), &mut close_files);
}
context.files = Arc::new(Mutex::new(Vec::new()));
(context.id, context.ppid)
};
/// Files must be closed while context is valid so that messages can be passed
for (fd, file_option) in close_files.drain(..).enumerate() {
if let Some(file) = file_option {
if let Some(event_id) = file.event {
context::event::unregister(FileHandle::from(fd), file.scheme, event_id);
}
let scheme_option = {
let schemes = scheme::schemes();
schemes.get(file.scheme).map(|scheme| scheme.clone())
};
if let Some(scheme) = scheme_option {
let _ = scheme.close(file.number);
}
}
}
/// Transfer child processes to parent
{
let contexts = context::contexts();
for (_id, context_lock) in contexts.iter() {
let mut context = context_lock.write();
if context.ppid == pid {
context.ppid = ppid;
context.vfork = false;
}
}
}
let (vfork, children) = {
let mut context = context_lock.write();
empty(&mut context, false);
let vfork = context.vfork;
context.vfork = false;
context.status = context::Status::Exited(status);
let children = context.waitpid.receive_all();
(vfork, children)
};
{
let contexts = context::contexts();
if let Some(parent_lock) = contexts.get(ppid) {
let waitpid = {
let mut parent = parent_lock.write();
if vfork {
if ! parent.unblock() {
println!("{:?} not blocked for exit vfork unblock", ppid);
}
}
parent.waitpid.clone()
};
for (c_pid, c_status) in children {
waitpid.send(c_pid, c_status);
}
waitpid.send(pid, status);
} else {
println!("{:?} not found for exit vfork unblock", ppid);
}
}
}
unsafe { context::switch(); }
unreachable!();
}
pub fn getpid() -> Result<ContextId> {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
Ok(context.id)
}
pub fn kill(pid: ContextId, sig: usize) -> Result<usize> {
let (ruid, euid) = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
(context.ruid, context.euid)
};
if sig > 0 && sig <= 0x7F {
let contexts = context::contexts();
let context_lock = contexts.get(pid).ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
if euid == 0
|| euid == context.ruid
|| ruid == context.ruid
{
context.pending.push_back(sig as u8);
Ok(0)
} else {
Err(Error::new(EPERM))
}
} else {
Err(Error::new(EINVAL))
}
}
fn reap(pid: ContextId) -> Result<ContextId> {
// Spin until not running
let mut running = false;
while running {
{
let contexts = context::contexts();
let context_lock = contexts.get(pid).ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
running = context.running;
}
arch::interrupt::pause();
}
let mut contexts = context::contexts_mut();
let context_lock = contexts.remove(pid).ok_or(Error::new(ESRCH))?;
{
let mut context = context_lock.write();
empty(&mut context, true);
}
drop(context_lock);
Ok(pid)
}
pub fn waitpid(pid: ContextId, status_ptr: usize, flags: usize) -> Result<ContextId> {
let waitpid = {
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let context = context_lock.read();
context.waitpid.clone()
};
let mut tmp = [0];
let status_slice = if status_ptr != 0 {
validate_slice_mut(status_ptr as *mut usize, 1)?
} else {
&mut tmp
};
if pid.into() == 0 {
if flags & WNOHANG == WNOHANG {
if let Some((w_pid, status)) = waitpid.receive_any_nonblock() {
status_slice[0] = status;
reap(w_pid)
} else {
Ok(ContextId::from(0))
}
} else {
let (w_pid, status) = waitpid.receive_any();
status_slice[0] = status;
reap(w_pid)
}
} else {
if flags & WNOHANG == WNOHANG {
if let Some(status) = waitpid.receive_nonblock(&pid) {
status_slice[0] = status;
reap(pid)
} else {
Ok(ContextId::from(0))
}
} else {
let status = waitpid.receive(&pid);
status_slice[0] = status;
reap(pid)
}
}
}

View file

@ -1,53 +0,0 @@
use arch;
use context;
use syscall::data::TimeSpec;
use syscall::error::*;
use syscall::flag::{CLOCK_REALTIME, CLOCK_MONOTONIC};
pub fn clock_gettime(clock: usize, time: &mut TimeSpec) -> Result<usize> {
match clock {
CLOCK_REALTIME => {
let arch_time = arch::time::realtime();
time.tv_sec = arch_time.0 as i64;
time.tv_nsec = arch_time.1 as i32;
Ok(0)
},
CLOCK_MONOTONIC => {
let arch_time = arch::time::monotonic();
time.tv_sec = arch_time.0 as i64;
time.tv_nsec = arch_time.1 as i32;
Ok(0)
},
_ => Err(Error::new(EINVAL))
}
}
pub fn nanosleep(req: &TimeSpec, rem_opt: Option<&mut TimeSpec>) -> Result<usize> {
let start = arch::time::monotonic();
let sum = start.1 + req.tv_nsec as u64;
let end = (start.0 + req.tv_sec as u64 + sum / 1000000000, sum % 1000000000);
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::new(ESRCH))?;
let mut context = context_lock.write();
context.wake = Some(end);
context.block();
}
unsafe { context::switch(); }
if let Some(mut rem) = rem_opt {
//TODO let current = arch::time::monotonic();
rem.tv_sec = 0;
rem.tv_nsec = 0;
}
Ok(0)
}
pub fn sched_yield() -> Result<usize> {
unsafe { context::switch(); }
Ok(0)
}

View file

@ -1,44 +0,0 @@
use core::{mem, slice};
use arch::paging::{ActivePageTable, Page, VirtualAddress, entry};
use syscall::error::*;
fn validate(address: usize, size: usize, flags: entry::EntryFlags) -> Result<()> {
let active_table = unsafe { ActivePageTable::new() };
let start_page = Page::containing_address(VirtualAddress::new(address));
let end_page = Page::containing_address(VirtualAddress::new(address + size - 1));
for page in Page::range_inclusive(start_page, end_page) {
if let Some(page_flags) = active_table.translate_page_flags(page) {
if ! page_flags.contains(flags) {
//println!("{:X}: Not {:?}", page.start_address().get(), flags);
return Err(Error::new(EFAULT));
}
} else {
//println!("{:X}: Not found", page.start_address().get());
return Err(Error::new(EFAULT));
}
}
Ok(())
}
/// Convert a pointer and length to slice, if valid
pub fn validate_slice<T>(ptr: *const T, len: usize) -> Result<&'static [T]> {
if len == 0 {
Ok(&[])
} else {
validate(ptr as usize, len * mem::size_of::<T>(), entry::PRESENT /* TODO | entry::USER_ACCESSIBLE */)?;
Ok(unsafe { slice::from_raw_parts(ptr, len) })
}
}
/// Convert a pointer and length to slice, if valid
pub fn validate_slice_mut<T>(ptr: *mut T, len: usize) -> Result<&'static mut [T]> {
if len == 0 {
Ok(&mut [])
} else {
validate(ptr as usize, len * mem::size_of::<T>(), entry::PRESENT | entry::WRITABLE /* TODO | entry::USER_ACCESSIBLE */)?;
Ok(unsafe { slice::from_raw_parts_mut(ptr, len) })
}
}

View file

@ -1,29 +0,0 @@
use syscall::{self, Error};
/// Test stdio
#[test]
fn stdio() {
// Test opening stdin
assert_eq!(syscall::open(b"debug:", 0), Ok(0));
// Test opening stdout
assert_eq!(syscall::open(b"debug:", 0), Ok(1));
// Test opening stderr
assert_eq!(syscall::open(b"debug:", 0), Ok(2));
// Test writing stdout
let stdout_str = b"STDOUT";
assert_eq!(syscall::write(1, stdout_str), Ok(stdout_str.len()));
// Test writing stderr
let stderr_str = b"STDERR";
assert_eq!(syscall::write(2, stderr_str), Ok(stderr_str.len()));
}
/// Test that invalid reads/writes cause errors
#[test]
fn invalid_path() {
assert_eq!(syscall::read(999, &mut []), Err(Error::new(EBADF)));
assert_eq!(syscall::write(999, &[]), Err(Error::new(EBADF)));
}