Cleanup Redox repo, update Rust, remove old target

This commit is contained in:
Jeremy Soller 2017-01-03 15:55:00 -07:00
parent 98c76d36fd
commit 7cd2eff74c
97 changed files with 24 additions and 79 deletions

37
kernel/Cargo.toml Normal file
View file

@ -0,0 +1,37 @@
[package]
name = "kernel"
version = "0.1.0"
[lib]
name = "kernel"
path = "src/lib.rs"
crate-type = ["staticlib"]
[dependencies]
bitflags = "*"
spin = "*"
redox_syscall = { path = "../syscall/" }
[dependencies.goblin]
git = "https://github.com/m4b/goblin.git"
default-features = false
features = ["elf32", "elf64"]
[dev-dependencies]
arch_test = { path = "arch/test" }
[target.'cfg(target_arch = "arm")'.dependencies]
arch_arm = { path = "arch/arm" }
[target.'cfg(target_arch = "x86_64")'.dependencies]
arch_x86_64 = { path = "arch/x86_64" }
[features]
default = []
live = []
[profile.dev]
panic = "unwind"
[profile.release]
panic = "abort"

View file

@ -0,0 +1,8 @@
[package]
name = "arch_arm"
version = "0.1.0"
[dependencies]
bitflags = "*"
hole_list_allocator = { path = "../../../crates/hole_list_allocator"}
spin = "*"

View file

@ -0,0 +1,9 @@
#[derive(Debug)]
pub struct Context;
impl Context {
pub fn new() -> Self {
Context
}
}

View file

@ -0,0 +1,70 @@
/// Memcpy
///
/// Copy N bytes of memory from one location to another.
#[no_mangle]
pub unsafe extern fn memcpy(dest: *mut u8, src: *const u8,
n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
dest
}
/// Memmove
///
/// Copy N bytes of memory from src to dest. The memory areas may overlap.
#[no_mangle]
pub unsafe extern fn memmove(dest: *mut u8, src: *const u8,
n: usize) -> *mut u8 {
if src < dest as *const u8 {
let mut i = n;
while i != 0 {
i -= 1;
*dest.offset(i as isize) = *src.offset(i as isize);
}
} else {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
dest
}
/// Memset
///
/// Fill a block of memory with a specified value.
#[no_mangle]
pub unsafe extern fn memset(s: *mut u8, c: i32, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*s.offset(i as isize) = c as u8;
i += 1;
}
s
}
/// Memcmp
///
/// Compare two blocks of memory.
#[no_mangle]
pub unsafe extern fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
let mut i = 0;
while i < n {
let a = *s1.offset(i as isize);
let b = *s2.offset(i as isize);
if a != b {
return a as i32 - b as i32
}
i += 1;
}
0
}

View file

@ -0,0 +1,30 @@
//! Interrupt instructions
/// Clear interrupts
#[inline(always)]
pub unsafe fn disable() {
}
/// Set interrupts
#[inline(always)]
pub unsafe fn enable() {
}
/// Set interrupts and halt
#[inline(always)]
pub unsafe fn enable_and_halt() {
halt();
}
/// Halt instruction
#[inline(always)]
pub unsafe fn halt() {
//asm!("wfi" : : : : "volatile");
asm!("nop" : : : : "volatile");
}
/// Get a stack trace
//TODO: Check for stack being mapped before dereferencing
#[inline(never)]
pub unsafe fn stack_trace() {
}

View file

@ -0,0 +1,39 @@
//! Architecture support for ARM
#![feature(asm)]
#![feature(lang_items)]
#![feature(naked_functions)]
#![no_std]
extern crate hole_list_allocator as allocator;
#[macro_use]
extern crate bitflags;
extern crate spin;
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({});
}
/// Print with new line to console
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Context switching
pub mod context;
/// Memset, memcpy, etc.
pub mod externs;
/// Interrupt handling
pub mod interrupt;
/// Panic support
pub mod panic;
/// Initialization function
pub mod start;

View file

@ -0,0 +1,60 @@
ENTRY(kstart)
OUTPUT_ARCH(arm)
OUTPUT_FORMAT(elf32-littlearm)
KERNEL_OFFSET = 0;
SECTIONS {
. = KERNEL_OFFSET;
.text : AT(ADDR(.text) - KERNEL_OFFSET) {
__text_start = .;
*(.text*)
. = ALIGN(4096);
__text_end = .;
}
.rodata : AT(ADDR(.rodata) - KERNEL_OFFSET) {
__rodata_start = .;
*(.rodata*)
. = ALIGN(4096);
__rodata_end = .;
}
.data : AT(ADDR(.data) - KERNEL_OFFSET) {
__data_start = .;
*(.data*)
. = ALIGN(4096);
__data_end = .;
}
.tdata : AT(ADDR(.tdata) - KERNEL_OFFSET) {
__tdata_start = .;
*(.tdata*)
. = ALIGN(4096);
__tdata_end = .;
__tbss_start = .;
*(.tbss*)
. += 8;
. = ALIGN(4096);
__tbss_end = .;
}
.bss : AT(ADDR(.bss) - KERNEL_OFFSET) {
__bss_start = .;
*(.bss*)
. = ALIGN(4096);
__bss_end = .;
}
__end = .;
/DISCARD/ : {
*(.comment*)
*(.debug*)
*(.eh_frame*)
*(.gcc_except_table*)
*(.note*)
*(.rel.eh_frame*)
}
}

View file

@ -0,0 +1,38 @@
//! Intrinsics for panic handling
use interrupt;
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn eh_personality() {}
#[cfg(not(test))]
/// Required to handle panics
#[lang = "panic_fmt"]
extern "C" fn panic_fmt(fmt: ::core::fmt::Arguments, file: &str, line: u32) -> ! {
println!("PANIC: {}", fmt);
println!("FILE: {}", file);
println!("LINE: {}", line);
unsafe { interrupt::stack_trace(); }
println!("HALT");
loop {
unsafe { interrupt::halt(); }
}
}
#[allow(non_snake_case)]
#[no_mangle]
/// Required to handle panics
pub extern "C" fn _Unwind_Resume() -> ! {
loop {
unsafe { interrupt::halt(); }
}
}
/// Required for linker
#[no_mangle]
pub extern "C" fn __aeabi_unwind_cpp_pr0() {
loop {}
}

View file

@ -0,0 +1,27 @@
const SERIAL_BASE: *mut u8 = 0x16000000 as *mut u8;
const SERIAL_FLAG_REGISTER: *const u8 = 0x16000018 as *const u8;
const SERIAL_BUFFER_FULL: u8 = (1 << 5);
unsafe fn putc (c: u8)
{
/* Wait until the serial buffer is empty */
while *SERIAL_FLAG_REGISTER & SERIAL_BUFFER_FULL == SERIAL_BUFFER_FULL {}
/* Put our character, c, into the serial buffer */
*SERIAL_BASE = c;
}
unsafe fn puts(string: &str)
{
for b in string.bytes() {
putc(b);
}
}
#[no_mangle]
#[naked]
pub unsafe extern fn kstart() -> ! {
asm!("mov sp, 0x18000" : : : : "volatile");
puts("TEST\r\n");
loop {}
}

View file

@ -0,0 +1,3 @@
[package]
name = "arch_test"
version = "0.1.0"

View file

@ -0,0 +1,43 @@
//! Interrupt instructions
static mut INTERRUPTS_ENABLED: bool = false;
/// Clear interrupts
#[inline(always)]
pub unsafe fn disable() {
println!("CLEAR INTERRUPTS");
INTERRUPTS_ENABLED = false;
}
/// Set interrupts
#[inline(always)]
pub unsafe fn enable() {
println!("SET INTERRUPTS");
INTERRUPTS_ENABLED = true;
}
/// Halt instruction
#[inline(always)]
pub unsafe fn halt() {
assert!(INTERRUPTS_ENABLED);
::std::thread::yield_now();
}
/// Pause instruction
#[inline(always)]
pub unsafe fn pause() {
}
/// Set interrupts and nop
#[inline(always)]
pub unsafe fn enable_and_nop() {
enable();
}
/// Set interrupts and halt
#[inline(always)]
pub unsafe fn enable_and_halt() {
enable();
halt();
}

View file

@ -0,0 +1,43 @@
//! Architecture support for testing
pub use std::io;
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({
use $crate::io::Write;
let _ = write!($crate::io::stdout(), $($arg)*);
});
}
/// Print with new line to console
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Create an interrupt function that can safely run rust code
#[macro_export]
macro_rules! interrupt {
($name:ident, $func:block) => {
pub unsafe extern fn $name () {
unsafe fn inner() {
$func
}
// Call inner rust function
inner();
}
};
}
/// Interrupt instructions
pub mod interrupt;
/// Initialization and main function
pub mod main;
/// Time functions
pub mod time;

View file

@ -0,0 +1,11 @@
/// This function is where the kernel sets up IRQ handlers
/// It is increcibly unsafe, and should be minimal in nature
extern {
fn kmain() -> !;
}
#[no_mangle]
pub unsafe extern fn kstart() -> ! {
kmain();
}

View file

@ -0,0 +1,7 @@
pub fn monotonic() -> (u64, u64) {
(0, 0)
}
pub fn realtime() -> (u64, u64) {
(0, 0)
}

View file

@ -0,0 +1,15 @@
[package]
name = "arch_x86_64"
version = "0.1.0"
[dependencies]
bitflags = "*"
hole_list_allocator = { path = "../../../crates/hole_list_allocator/" }
io = { path = "../../../crates/io/" }
raw-cpuid = { git = "https://github.com/gz/rust-cpuid" }
spin = "*"
redox_syscall = { path = "../../../syscall/" }
[dependencies.x86]
version = "0.7"
default-features = false

View file

@ -0,0 +1,77 @@
#[repr(packed)]
pub struct DrhdFault {
pub sts: u32,
pub ctrl: u32,
pub data: u32,
pub addr: [u32; 2],
_rsv: [u64; 2],
pub log: u64,
}
#[repr(packed)]
pub struct DrhdProtectedMemory {
pub en: u32,
pub low_base: u32,
pub low_limit: u32,
pub high_base: u64,
pub high_limit: u64,
}
#[repr(packed)]
pub struct DrhdInvalidation {
pub queue_head: u64,
pub queue_tail: u64,
pub queue_addr: u64,
_rsv: u32,
pub cmpl_sts: u32,
pub cmpl_ctrl: u32,
pub cmpl_data: u32,
pub cmpl_addr: [u32; 2],
}
#[repr(packed)]
pub struct DrhdPageRequest {
pub queue_head: u64,
pub queue_tail: u64,
pub queue_addr: u64,
_rsv: u32,
pub sts: u32,
pub ctrl: u32,
pub data: u32,
pub addr: [u32; 2],
}
#[repr(packed)]
pub struct DrhdMtrrVariable {
pub base: u64,
pub mask: u64,
}
#[repr(packed)]
pub struct DrhdMtrr {
pub cap: u64,
pub def_type: u64,
pub fixed: [u64; 11],
pub variable: [DrhdMtrrVariable; 10],
}
#[repr(packed)]
pub struct Drhd {
pub version: u32,
_rsv: u32,
pub cap: u64,
pub ext_cap: u64,
pub gl_cmd: u32,
pub gl_sts: u32,
pub root_table: u64,
pub ctx_cmd: u64,
_rsv1: u32,
pub fault: DrhdFault,
_rsv2: u32,
pub pm: DrhdProtectedMemory,
pub invl: DrhdInvalidation,
_rsv3: u64,
pub intr_table: u64,
pub page_req: DrhdPageRequest,
pub mtrr: DrhdMtrr,
}

View file

@ -0,0 +1,183 @@
use core::mem;
use super::sdt::Sdt;
use self::drhd::Drhd;
use memory::Frame;
use paging::{entry, ActivePageTable, PhysicalAddress};
pub mod drhd;
/// The DMA Remapping Table
#[derive(Debug)]
pub struct Dmar {
sdt: &'static Sdt,
pub addr_width: u8,
pub flags: u8,
_rsv: [u8; 10],
}
impl Dmar {
pub fn new(sdt: &'static Sdt) -> Option<Dmar> {
if &sdt.signature == b"DMAR" && sdt.data_len() >= 12 { //Not valid if no local address and flags
let addr_width = unsafe { *(sdt.data_address() as *const u8) };
let flags = unsafe { *(sdt.data_address() as *const u8).offset(1) };
let rsv: [u8; 10] = unsafe { *((sdt.data_address() as *const u8).offset(2) as *const [u8; 10]) };
Some(Dmar {
sdt: sdt,
addr_width: addr_width,
flags: flags,
_rsv: rsv,
})
} else {
None
}
}
pub fn iter(&self) -> DmarIter {
DmarIter {
sdt: self.sdt,
i: 12 // Skip address width and flags
}
}
}
///
/// DMAR DMA Remapping Hardware Unit Definition
// TODO: Implement iterator on DmarDrhd scope
#[derive(Debug)]
#[repr(packed)]
pub struct DmarDrhd {
kind: u16,
length: u16,
flags: u8,
_rsv: u8,
segment: u16,
base: u64,
}
impl DmarDrhd {
pub fn get(&self, active_table: &mut ActivePageTable) -> &'static mut Drhd {
let result = active_table.identity_map(Frame::containing_address(PhysicalAddress::new(self.base as usize)), entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE);
result.flush(active_table);
unsafe { &mut *(self.base as *mut Drhd) }
}
}
/// DMAR Reserved Memory Region Reporting
// TODO: Implement iterator on DmarRmrr scope
#[derive(Debug)]
#[repr(packed)]
pub struct DmarRmrr {
kind: u16,
length: u16,
_rsv: u16,
segment: u16,
base: u64,
limit: u64,
}
/// DMAR Root Port ATS Capability Reporting
// TODO: Implement iterator on DmarAtsr scope
#[derive(Debug)]
#[repr(packed)]
pub struct DmarAtsr {
kind: u16,
length: u16,
flags: u8,
_rsv: u8,
segment: u16,
}
/// DMAR Remapping Hardware Static Affinity
#[derive(Debug)]
#[repr(packed)]
pub struct DmarRhsa {
kind: u16,
length: u16,
_rsv: u32,
base: u64,
domain: u32,
}
/// DMAR ACPI Name-space Device Declaration
// TODO: Implement iterator on DmarAndd object name
#[derive(Debug)]
#[repr(packed)]
pub struct DmarAndd {
kind: u16,
length: u16,
_rsv: [u8; 3],
acpi_dev: u8,
}
/// DMAR Entries
#[derive(Debug)]
pub enum DmarEntry {
Drhd(&'static DmarDrhd),
InvalidDrhd(usize),
Rmrr(&'static DmarRmrr),
InvalidRmrr(usize),
Atsr(&'static DmarAtsr),
InvalidAtsr(usize),
Rhsa(&'static DmarRhsa),
InvalidRhsa(usize),
Andd(&'static DmarAndd),
InvalidAndd(usize),
Unknown(u16)
}
pub struct DmarIter {
sdt: &'static Sdt,
i: usize
}
impl Iterator for DmarIter {
type Item = DmarEntry;
fn next(&mut self) -> Option<Self::Item> {
if self.i + 4 <= self.sdt.data_len() {
let entry_type = unsafe { *((self.sdt.data_address() as *const u8).offset(self.i as isize) as *const u16) };
let entry_len = unsafe { *((self.sdt.data_address() as *const u8).offset(self.i as isize + 2) as *const u16) } as usize;
if self.i + entry_len <= self.sdt.data_len() {
let item = match entry_type {
0 => if entry_len >= mem::size_of::<DmarDrhd>() {
DmarEntry::Drhd(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarDrhd) })
} else {
DmarEntry::InvalidDrhd(entry_len)
},
1 => if entry_len >= mem::size_of::<DmarRmrr>() {
DmarEntry::Rmrr(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarRmrr) })
} else {
DmarEntry::InvalidRmrr(entry_len)
},
2 => if entry_len >= mem::size_of::<DmarAtsr>() {
DmarEntry::Atsr(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarAtsr) })
} else {
DmarEntry::InvalidAtsr(entry_len)
},
3 => if entry_len == mem::size_of::<DmarRhsa>() {
DmarEntry::Rhsa(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarRhsa) })
} else {
DmarEntry::InvalidRhsa(entry_len)
},
4 => if entry_len >= mem::size_of::<DmarAndd>() {
DmarEntry::Andd(unsafe { &*((self.sdt.data_address() + self.i) as *const DmarAndd) })
} else {
DmarEntry::InvalidAndd(entry_len)
},
_ => DmarEntry::Unknown(entry_type)
};
self.i += entry_len;
Some(item)
} else {
None
}
} else {
None
}
}
}

View file

@ -0,0 +1,96 @@
use core::{mem, ptr};
use super::sdt::Sdt;
#[repr(packed)]
#[derive(Debug)]
pub struct Fadt {
pub header: Sdt,
pub firmware_ctrl: u32,
pub dsdt: u32,
// field used in ACPI 1.0; no longer in use, for compatibility only
reserved: u8,
pub preferred_power_managament: u8,
pub sci_interrupt: u16,
pub smi_command_port: u32,
pub acpi_enable: u8,
pub acpi_disable: u8,
pub s4_bios_req: u8,
pub pstate_control: u8,
pub pm1a_event_block: u32,
pub pm1b_event_block: u32,
pub pm1a_control_block: u32,
pub pm1b_control_block: u32,
pub pm2_control_block: u32,
pub pm_timer_block: u32,
pub gpe0_block: u32,
pub gpe1_block: u32,
pub pm1_event_length: u8,
pub pm1_control_length: u8,
pub pm2_control_length: u8,
pub pm_timer_length: u8,
pub gpe0_ength: u8,
pub gpe1_length: u8,
pub gpe1_base: u8,
pub c_state_control: u8,
pub worst_c2_latency: u16,
pub worst_c3_latency: u16,
pub flush_size: u16,
pub flush_stride: u16,
pub duty_offset: u8,
pub duty_width: u8,
pub day_alarm: u8,
pub month_alarm: u8,
pub century: u8,
// reserved in ACPI 1.0; used since ACPI 2.0+
pub boot_architecture_flags: u16,
reserved2: u8,
pub flags: u32,
}
/* ACPI 2 structure
#[repr(packed)]
#[derive(Clone, Copy, Debug, Default)]
pub struct GenericAddressStructure {
address_space: u8,
bit_width: u8,
bit_offset: u8,
access_size: u8,
address: u64,
}
{
// 12 byte structure; see below for details
pub reset_reg: GenericAddressStructure,
pub reset_value: u8,
reserved3: [u8; 3],
// 64bit pointers - Available on ACPI 2.0+
pub x_firmware_control: u64,
pub x_dsdt: u64,
pub x_pm1a_event_block: GenericAddressStructure,
pub x_pm1b_event_block: GenericAddressStructure,
pub x_pm1a_control_block: GenericAddressStructure,
pub x_pm1b_control_block: GenericAddressStructure,
pub x_pm2_control_block: GenericAddressStructure,
pub x_pm_timer_block: GenericAddressStructure,
pub x_gpe0_block: GenericAddressStructure,
pub x_gpe1_block: GenericAddressStructure,
}
*/
impl Fadt {
pub fn new(sdt: &'static Sdt) -> Option<Fadt> {
if &sdt.signature == b"FACP" && sdt.length as usize >= mem::size_of::<Fadt>() {
Some(unsafe { ptr::read((sdt as *const Sdt) as *const Fadt) })
} else {
None
}
}
}

View file

@ -0,0 +1,133 @@
use core::mem;
use super::sdt::Sdt;
/// The Multiple APIC Descriptor Table
#[derive(Debug)]
pub struct Madt {
sdt: &'static Sdt,
pub local_address: u32,
pub flags: u32
}
impl Madt {
pub fn new(sdt: &'static Sdt) -> Option<Madt> {
if &sdt.signature == b"APIC" && sdt.data_len() >= 8 { //Not valid if no local address and flags
let local_address = unsafe { *(sdt.data_address() as *const u32) };
let flags = unsafe { *(sdt.data_address() as *const u32).offset(1) };
Some(Madt {
sdt: sdt,
local_address: local_address,
flags: flags
})
} else {
None
}
}
pub fn iter(&self) -> MadtIter {
MadtIter {
sdt: self.sdt,
i: 8 // Skip local controller address and flags
}
}
}
///
/// MADT Local APIC
#[derive(Debug)]
#[repr(packed)]
pub struct MadtLocalApic {
/// Processor ID
pub processor: u8,
/// Local APIC ID
pub id: u8,
/// Flags. 1 means that the processor is enabled
pub flags: u32
}
/// MADT I/O APIC
#[derive(Debug)]
#[repr(packed)]
pub struct MadtIoApic {
/// I/O APIC ID
pub id: u8,
/// reserved
reserved: u8,
/// I/O APIC address
pub address: u32,
/// Global system interrupt base
pub gsi_base: u32
}
/// MADT Interrupt Source Override
#[derive(Debug)]
#[repr(packed)]
pub struct MadtIntSrcOverride {
/// Bus Source
pub bus_source: u8,
/// IRQ Source
pub irq_source: u8,
/// Global system interrupt base
pub gsi_base: u32,
/// Flags
pub flags: u16
}
/// MADT Entries
#[derive(Debug)]
pub enum MadtEntry {
LocalApic(&'static MadtLocalApic),
InvalidLocalApic(usize),
IoApic(&'static MadtIoApic),
InvalidIoApic(usize),
IntSrcOverride(&'static MadtIntSrcOverride),
InvalidIntSrcOverride(usize),
Unknown(u8)
}
pub struct MadtIter {
sdt: &'static Sdt,
i: usize
}
impl Iterator for MadtIter {
type Item = MadtEntry;
fn next(&mut self) -> Option<Self::Item> {
if self.i + 1 < self.sdt.data_len() {
let entry_type = unsafe { *(self.sdt.data_address() as *const u8).offset(self.i as isize) };
let entry_len = unsafe { *(self.sdt.data_address() as *const u8).offset(self.i as isize + 1) } as usize;
if self.i + entry_len <= self.sdt.data_len() {
let item = match entry_type {
0 => if entry_len == mem::size_of::<MadtLocalApic>() + 2 {
MadtEntry::LocalApic(unsafe { &*((self.sdt.data_address() + self.i + 2) as *const MadtLocalApic) })
} else {
MadtEntry::InvalidLocalApic(entry_len)
},
1 => if entry_len == mem::size_of::<MadtIoApic>() + 2 {
MadtEntry::IoApic(unsafe { &*((self.sdt.data_address() + self.i + 2) as *const MadtIoApic) })
} else {
MadtEntry::InvalidIoApic(entry_len)
},
2 => if entry_len == mem::size_of::<MadtIntSrcOverride>() + 2 {
MadtEntry::IntSrcOverride(unsafe { &*((self.sdt.data_address() + self.i + 2) as *const MadtIntSrcOverride) })
} else {
MadtEntry::InvalidIntSrcOverride(entry_len)
},
_ => MadtEntry::Unknown(entry_type)
};
self.i += entry_len;
Some(item)
} else {
None
}
} else {
None
}
}
}

View file

@ -0,0 +1,284 @@
//! # ACPI
//! Code to parse the ACPI tables
use core::intrinsics::{atomic_load, atomic_store};
use core::sync::atomic::Ordering;
use device::local_apic::LOCAL_APIC;
use interrupt;
use memory::{allocate_frames, Frame};
use paging::{entry, ActivePageTable, Page, PhysicalAddress, VirtualAddress};
use start::{kstart_ap, CPU_COUNT, AP_READY};
use self::dmar::{Dmar, DmarEntry};
use self::fadt::Fadt;
use self::madt::{Madt, MadtEntry};
use self::rsdt::Rsdt;
use self::sdt::Sdt;
use self::xsdt::Xsdt;
pub mod dmar;
pub mod fadt;
pub mod madt;
pub mod rsdt;
pub mod sdt;
pub mod xsdt;
const TRAMPOLINE: usize = 0x7E00;
const AP_STARTUP: usize = TRAMPOLINE + 512;
pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
print!(" ");
for &c in sdt.signature.iter() {
print!("{}", c as char);
}
if let Some(fadt) = Fadt::new(sdt) {
println!(": {:#?}", fadt);
} else if let Some(madt) = Madt::new(sdt) {
println!(": {:>08X}: {}", madt.local_address, madt.flags);
let mut local_apic = unsafe { &mut LOCAL_APIC };
let me = local_apic.id() as u8;
if local_apic.x2 {
println!(" X2APIC {}", me);
} else {
println!(" XAPIC {}: {:>08X}", me, local_apic.address);
}
let trampoline_frame = Frame::containing_address(PhysicalAddress::new(TRAMPOLINE));
let trampoline_page = Page::containing_address(VirtualAddress::new(TRAMPOLINE));
// Map trampoline
let result = active_table.map_to(trampoline_page, trampoline_frame, entry::PRESENT | entry::WRITABLE);
result.flush(active_table);
for madt_entry in madt.iter() {
println!(" {:?}", madt_entry);
match madt_entry {
MadtEntry::LocalApic(ap_local_apic) => if ap_local_apic.id == me {
println!(" This is my local APIC");
} else {
if ap_local_apic.flags & 1 == 1 {
// Increase CPU ID
CPU_COUNT.fetch_add(1, Ordering::SeqCst);
// Allocate a stack
let stack_start = allocate_frames(64).expect("no more frames in acpi stack_start").start_address().get() + ::KERNEL_OFFSET;
let stack_end = stack_start + 64 * 4096;
let ap_ready = TRAMPOLINE as *mut u64;
let ap_cpu_id = unsafe { ap_ready.offset(1) };
let ap_page_table = unsafe { ap_ready.offset(2) };
let ap_stack_start = unsafe { ap_ready.offset(3) };
let ap_stack_end = unsafe { ap_ready.offset(4) };
let ap_code = unsafe { ap_ready.offset(5) };
// Set the ap_ready to 0, volatile
unsafe { atomic_store(ap_ready, 0) };
unsafe { atomic_store(ap_cpu_id, ap_local_apic.id as u64) };
unsafe { atomic_store(ap_page_table, active_table.address() as u64) };
unsafe { atomic_store(ap_stack_start, stack_start as u64) };
unsafe { atomic_store(ap_stack_end, stack_end as u64) };
unsafe { atomic_store(ap_code, kstart_ap as u64) };
AP_READY.store(false, Ordering::SeqCst);
print!(" AP {}:", ap_local_apic.id);
// Send INIT IPI
{
let mut icr = 0x4500;
if local_apic.x2 {
icr |= (ap_local_apic.id as u64) << 32;
} else {
icr |= (ap_local_apic.id as u64) << 56;
}
print!(" IPI...");
local_apic.set_icr(icr);
}
// Send START IPI
{
//Start at 0x0800:0000 => 0x8000. Hopefully the bootloader code is still there
let ap_segment = (AP_STARTUP >> 12) & 0xFF;
let mut icr = 0x4600 | ap_segment as u64;
if local_apic.x2 {
icr |= (ap_local_apic.id as u64) << 32;
} else {
icr |= (ap_local_apic.id as u64) << 56;
}
print!(" SIPI...");
local_apic.set_icr(icr);
}
// Wait for trampoline ready
print!(" Wait...");
while unsafe { atomic_load(ap_ready) } == 0 {
interrupt::pause();
}
print!(" Trampoline...");
while ! AP_READY.load(Ordering::SeqCst) {
interrupt::pause();
}
println!(" Ready");
active_table.flush_all();
} else {
println!(" CPU Disabled");
}
},
_ => ()
}
}
// Unmap trampoline
let result = active_table.unmap(trampoline_page);
result.flush(active_table);
} else if let Some(dmar) = Dmar::new(sdt) {
println!(": {}: {}", dmar.addr_width, dmar.flags);
for dmar_entry in dmar.iter() {
println!(" {:?}", dmar_entry);
match dmar_entry {
DmarEntry::Drhd(dmar_drhd) => {
let drhd = dmar_drhd.get(active_table);
println!("VER: {:X}", drhd.version);
println!("CAP: {:X}", drhd.cap);
println!("EXT_CAP: {:X}", drhd.ext_cap);
println!("GCMD: {:X}", drhd.gl_cmd);
println!("GSTS: {:X}", drhd.gl_sts);
println!("RT: {:X}", drhd.root_table);
},
_ => ()
}
}
} else {
println!(": Unknown");
}
}
/// Parse the ACPI tables to gather CPU, interrupt, and timer information
pub unsafe fn init(active_table: &mut ActivePageTable) -> Option<Acpi> {
let start_addr = 0xE0000;
let end_addr = 0xFFFFF;
// Map all of the ACPI RSDP space
{
let start_frame = Frame::containing_address(PhysicalAddress::new(start_addr));
let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
let result = active_table.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE);
result.flush(active_table);
}
}
// Search for RSDP
if let Some(rsdp) = RSDP::search(start_addr, end_addr) {
let get_sdt = |sdt_address: usize, active_table: &mut ActivePageTable| -> (&'static Sdt, bool) {
let mapped = if active_table.translate_page(Page::containing_address(VirtualAddress::new(sdt_address))).is_none() {
let sdt_frame = Frame::containing_address(PhysicalAddress::new(sdt_address));
let sdt_page = Page::containing_address(VirtualAddress::new(sdt_address));
let result = active_table.map_to(sdt_page, sdt_frame, entry::PRESENT | entry::NO_EXECUTE);
result.flush(active_table);
true
} else {
false
};
(&*(sdt_address as *const Sdt), mapped)
};
let drop_sdt = |sdt: &'static Sdt, mapped: bool, active_table: &mut ActivePageTable| {
let sdt_address = sdt as *const Sdt as usize;
drop(sdt);
if mapped {
let sdt_page = Page::containing_address(VirtualAddress::new(sdt_address));
let result = active_table.unmap(sdt_page);
result.flush(active_table);
}
};
let (rxsdt, rxmapped) = get_sdt(rsdp.sdt_address(), active_table);
for &c in rxsdt.signature.iter() {
print!("{}", c as char);
}
println!(":");
if let Some(rsdt) = Rsdt::new(rxsdt) {
for sdt_address in rsdt.iter() {
let (sdt, mapped) = get_sdt(sdt_address, active_table);
init_sdt(sdt, active_table);
drop_sdt(sdt, mapped, active_table);
}
} else if let Some(xsdt) = Xsdt::new(rxsdt) {
for sdt_address in xsdt.iter() {
let (sdt, mapped) = get_sdt(sdt_address, active_table);
init_sdt(sdt, active_table);
drop_sdt(sdt, mapped, active_table);
}
} else {
println!("UNKNOWN RSDT OR XSDT SIGNATURE");
}
drop_sdt(rxsdt, rxmapped, active_table);
} else {
println!("NO RSDP FOUND");
}
// Unmap all of the ACPI RSDP space
{
let start_frame = Frame::containing_address(PhysicalAddress::new(start_addr));
let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
let result = active_table.unmap(page);
result.flush(active_table);
}
}
None
}
pub struct Acpi;
/// RSDP
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct RSDP {
signature: [u8; 8],
checksum: u8,
oemid: [u8; 6],
revision: u8,
rsdt_address: u32,
length: u32,
xsdt_address: u64,
extended_checksum: u8,
reserved: [u8; 3]
}
impl RSDP {
/// Search for the RSDP
pub fn search(start_addr: usize, end_addr: usize) -> Option<RSDP> {
for i in 0 .. (end_addr + 1 - start_addr)/16 {
let rsdp = unsafe { &*((start_addr + i * 16) as *const RSDP) };
if &rsdp.signature == b"RSD PTR " {
return Some(*rsdp);
}
}
None
}
/// Get the RSDT or XSDT address
pub fn sdt_address(&self) -> usize {
if self.revision >= 2 {
self.xsdt_address as usize
} else {
self.rsdt_address as usize
}
}
}

View file

@ -0,0 +1,41 @@
use core::mem;
use super::sdt::Sdt;
#[derive(Debug)]
pub struct Rsdt(&'static Sdt);
impl Rsdt {
pub fn new(sdt: &'static Sdt) -> Option<Rsdt> {
if &sdt.signature == b"RSDT" {
Some(Rsdt(sdt))
} else {
None
}
}
pub fn iter(&self) -> RsdtIter {
RsdtIter {
sdt: self.0,
i: 0
}
}
}
pub struct RsdtIter {
sdt: &'static Sdt,
i: usize
}
impl Iterator for RsdtIter {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.i < self.sdt.data_len()/mem::size_of::<u32>() {
let item = unsafe { *(self.sdt.data_address() as *const u32).offset(self.i as isize) };
self.i += 1;
Some(item as usize)
} else {
None
}
}
}

View file

@ -0,0 +1,33 @@
use core::mem;
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct Sdt {
pub signature: [u8; 4],
pub length: u32,
pub revision: u8,
pub checksum: u8,
pub oem_id: [u8; 6],
pub oem_table_id: [u8; 8],
pub oem_revision: u32,
pub creator_id: u32,
pub creator_revision: u32
}
impl Sdt {
/// Get the address of this tables data
pub fn data_address(&'static self) -> usize {
self as *const _ as usize + mem::size_of::<Sdt>()
}
/// Get the length of this tables data
pub fn data_len(&'static self) -> usize {
let total_size = self.length as usize;
let header_size = mem::size_of::<Sdt>();
if total_size >= header_size {
total_size - header_size
} else {
0
}
}
}

View file

@ -0,0 +1,41 @@
use core::mem;
use super::sdt::Sdt;
#[derive(Debug)]
pub struct Xsdt(&'static Sdt);
impl Xsdt {
pub fn new(sdt: &'static Sdt) -> Option<Xsdt> {
if &sdt.signature == b"XSDT" {
Some(Xsdt(sdt))
} else {
None
}
}
pub fn iter(&self) -> XsdtIter {
XsdtIter {
sdt: self.0,
i: 0
}
}
}
pub struct XsdtIter {
sdt: &'static Sdt,
i: usize
}
impl Iterator for XsdtIter {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.i < self.sdt.data_len()/mem::size_of::<u64>() {
let item = unsafe { *(self.sdt.data_address() as *const u64).offset(self.i as isize) };
self.i += 1;
Some(item as usize)
} else {
None
}
}
}

View file

@ -0,0 +1,14 @@
use core::fmt::{self, Write};
use spin::Mutex;
use device::serial::COM1;
pub static CONSOLE: Mutex<Console> = Mutex::new(Console);
pub struct Console;
impl Write for Console {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
COM1.lock().write_str(s)
}
}

View file

@ -0,0 +1,184 @@
use core::mem;
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT};
/// This must be used by the kernel to ensure that context switches are done atomically
/// Compare and exchange this to true when beginning a context switch on any CPU
/// The Context::switch_to function will set it back to false, allowing other CPU's to switch
/// This must be done, as no locks can be held on the stack during switch
pub static CONTEXT_SWITCH_LOCK: AtomicBool = ATOMIC_BOOL_INIT;
#[derive(Clone, Debug)]
pub struct Context {
/// FX valid?
loadable: bool,
/// FX location
fx: usize,
/// Page table pointer
cr3: usize,
/// RFLAGS register
rflags: usize,
/// RBX register
rbx: usize,
/// R12 register
r12: usize,
/// R13 register
r13: usize,
/// R14 register
r14: usize,
/// R15 register
r15: usize,
/// Base pointer
rbp: usize,
/// Stack pointer
rsp: usize
}
impl Context {
pub fn new() -> Context {
Context {
loadable: false,
fx: 0,
cr3: 0,
rflags: 0,
rbx: 0,
r12: 0,
r13: 0,
r14: 0,
r15: 0,
rbp: 0,
rsp: 0
}
}
pub fn get_page_table(&self) -> usize {
self.cr3
}
pub fn set_fx(&mut self, address: usize) {
self.fx = address;
}
pub fn set_page_table(&mut self, address: usize) {
self.cr3 = address;
}
pub fn set_stack(&mut self, address: usize) {
self.rsp = address;
}
pub unsafe fn signal_stack(&mut self, handler: extern fn(usize), sig: u8) {
self.push_stack(sig as usize);
self.push_stack(handler as usize);
self.push_stack(signal_handler_wrapper as usize);
}
pub unsafe fn push_stack(&mut self, value: usize) {
self.rsp -= mem::size_of::<usize>();
*(self.rsp as *mut usize) = value;
}
pub unsafe fn pop_stack(&mut self) -> usize {
let value = *(self.rsp as *const usize);
self.rsp += mem::size_of::<usize>();
value
}
/// Switch to the next context by restoring its stack and registers
#[cold]
#[inline(never)]
#[naked]
pub unsafe fn switch_to(&mut self, next: &mut Context) {
asm!("fxsave [$0]" : : "r"(self.fx) : "memory" : "intel", "volatile");
self.loadable = true;
if next.loadable {
asm!("fxrstor [$0]" : : "r"(next.fx) : "memory" : "intel", "volatile");
}else{
asm!("fninit" : : : "memory" : "intel", "volatile");
}
asm!("mov $0, cr3" : "=r"(self.cr3) : : "memory" : "intel", "volatile");
if next.cr3 != self.cr3 {
asm!("mov cr3, $0" : : "r"(next.cr3) : "memory" : "intel", "volatile");
}
asm!("pushfq ; pop $0" : "=r"(self.rflags) : : "memory" : "intel", "volatile");
asm!("push $0 ; popfq" : : "r"(next.rflags) : "memory" : "intel", "volatile");
asm!("mov $0, rbx" : "=r"(self.rbx) : : "memory" : "intel", "volatile");
asm!("mov rbx, $0" : : "r"(next.rbx) : "memory" : "intel", "volatile");
asm!("mov $0, r12" : "=r"(self.r12) : : "memory" : "intel", "volatile");
asm!("mov r12, $0" : : "r"(next.r12) : "memory" : "intel", "volatile");
asm!("mov $0, r13" : "=r"(self.r13) : : "memory" : "intel", "volatile");
asm!("mov r13, $0" : : "r"(next.r13) : "memory" : "intel", "volatile");
asm!("mov $0, r14" : "=r"(self.r14) : : "memory" : "intel", "volatile");
asm!("mov r14, $0" : : "r"(next.r14) : "memory" : "intel", "volatile");
asm!("mov $0, r15" : "=r"(self.r15) : : "memory" : "intel", "volatile");
asm!("mov r15, $0" : : "r"(next.r15) : "memory" : "intel", "volatile");
asm!("mov $0, rsp" : "=r"(self.rsp) : : "memory" : "intel", "volatile");
asm!("mov rsp, $0" : : "r"(next.rsp) : "memory" : "intel", "volatile");
asm!("mov $0, rbp" : "=r"(self.rbp) : : "memory" : "intel", "volatile");
asm!("mov rbp, $0" : : "r"(next.rbp) : "memory" : "intel", "volatile");
}
}
#[repr(packed)]
pub struct SignalHandlerStack {
r11: usize,
r10: usize,
r9: usize,
r8: usize,
rsi: usize,
rdi: usize,
rdx: usize,
rcx: usize,
rax: usize,
handler: extern fn(usize),
sig: usize,
rip: usize,
}
#[naked]
unsafe extern fn signal_handler_wrapper() {
#[inline(never)]
unsafe fn inner(stack: &SignalHandlerStack) {
(stack.handler)(stack.sig);
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11"
: : : : "intel", "volatile");
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Call inner rust function
inner(&*(rsp as *const SignalHandlerStack));
// Pop scratch registers, error code, and return
asm!("pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
add rsp, 16"
: : : : "intel", "volatile");
}

View file

@ -0,0 +1,126 @@
extern crate raw_cpuid;
use core::fmt::{Result, Write};
use self::raw_cpuid::CpuId;
pub fn cpu_info<W: Write>(w: &mut W) -> Result {
let cpuid = CpuId::new();
if let Some(info) = cpuid.get_vendor_info() {
write!(w, "Vendor: {}\n", info.as_string())?;
}
if let Some(info) = cpuid.get_extended_function_info() {
if let Some(brand) = info.processor_brand_string() {
write!(w, "Model: {}\n", brand)?;
}
}
if let Some(info) = cpuid.get_processor_frequency_info() {
write!(w, "CPU Base MHz: {}\n", info.processor_base_frequency())?;
write!(w, "CPU Max MHz: {}\n", info.processor_max_frequency())?;
write!(w, "Bus MHz: {}\n", info.bus_frequency())?;
}
write!(w, "Features:")?;
if let Some(info) = cpuid.get_feature_info() {
if info.has_fpu() { write!(w, " fpu")? };
if info.has_vme() { write!(w, " vme")? };
if info.has_de() { write!(w, " de")? };
if info.has_pse() { write!(w, " pse")? };
if info.has_tsc() { write!(w, " tsc")? };
if info.has_msr() { write!(w, " msr")? };
if info.has_pae() { write!(w, " pae")? };
if info.has_mce() { write!(w, " mce")? };
if info.has_cmpxchg8b() { write!(w, " cx8")? };
if info.has_apic() { write!(w, " apic")? };
if info.has_sysenter_sysexit() { write!(w, " sep")? };
if info.has_mtrr() { write!(w, " mtrr")? };
if info.has_pge() { write!(w, " pge")? };
if info.has_mca() { write!(w, " mca")? };
if info.has_cmov() { write!(w, " cmov")? };
if info.has_pat() { write!(w, " pat")? };
if info.has_pse36() { write!(w, " pse36")? };
if info.has_psn() { write!(w, " psn")? };
if info.has_clflush() { write!(w, " clflush")? };
if info.has_ds() { write!(w, " ds")? };
if info.has_acpi() { write!(w, " acpi")? };
if info.has_mmx() { write!(w, " mmx")? };
if info.has_fxsave_fxstor() { write!(w, " fxsr")? };
if info.has_sse() { write!(w, " sse")? };
if info.has_sse2() { write!(w, " sse2")? };
if info.has_ss() { write!(w, " ss")? };
if info.has_htt() { write!(w, " ht")? };
if info.has_tm() { write!(w, " tm")? };
if info.has_pbe() { write!(w, " pbe")? };
if info.has_sse3() { write!(w, " sse3")? };
if info.has_pclmulqdq() { write!(w, " pclmulqdq")? };
if info.has_ds_area() { write!(w, " dtes64")? };
if info.has_monitor_mwait() { write!(w, " monitor")? };
if info.has_cpl() { write!(w, " ds_cpl")? };
if info.has_vmx() { write!(w, " vmx")? };
if info.has_smx() { write!(w, " smx")? };
if info.has_eist() { write!(w, " est")? };
if info.has_tm2() { write!(w, " tm2")? };
if info.has_ssse3() { write!(w, " ssse3")? };
if info.has_cnxtid() { write!(w, " cnxtid")? };
if info.has_fma() { write!(w, " fma")? };
if info.has_cmpxchg16b() { write!(w, " cx16")? };
if info.has_pdcm() { write!(w, " pdcm")? };
if info.has_pcid() { write!(w, " pcid")? };
if info.has_dca() { write!(w, " dca")? };
if info.has_sse41() { write!(w, " sse4_1")? };
if info.has_sse42() { write!(w, " sse4_2")? };
if info.has_x2apic() { write!(w, " x2apic")? };
if info.has_movbe() { write!(w, " movbe")? };
if info.has_popcnt() { write!(w, " popcnt")? };
if info.has_tsc_deadline() { write!(w, " tsc_deadline_timer")? };
if info.has_aesni() { write!(w, " aes")? };
if info.has_xsave() { write!(w, " xsave")? };
if info.has_oxsave() { write!(w, " xsaveopt")? };
if info.has_avx() { write!(w, " avx")? };
if info.has_f16c() { write!(w, " f16c")? };
if info.has_rdrand() { write!(w, " rdrand")? };
}
if let Some(info) = cpuid.get_extended_function_info() {
if info.has_64bit_mode() { write!(w, " lm")? };
if info.has_rdtscp() { write!(w, " rdtscp")? };
if info.has_1gib_pages() { write!(w, " pdpe1gb")? };
if info.has_execute_disable() { write!(w, " nx")? };
if info.has_syscall_sysret() { write!(w, " syscall")? };
if info.has_prefetchw() { write!(w, " prefetchw")? };
if info.has_lzcnt() { write!(w, " lzcnt")? };
if info.has_lahf_sahf() { write!(w, " lahf_lm")? };
if info.has_invariant_tsc() { write!(w, " constant_tsc")? };
}
if let Some(info) = cpuid.get_extended_feature_info() {
if info.has_fsgsbase() { write!(w, " fsgsbase")? };
if info.has_tsc_adjust_msr() { write!(w, " tsc_adjust")? };
if info.has_bmi1() { write!(w, " bmi1")? };
if info.has_hle() { write!(w, " hle")? };
if info.has_avx2() { write!(w, " avx2")? };
if info.has_smep() { write!(w, " smep")? };
if info.has_bmi2() { write!(w, " bmi2")? };
if info.has_rep_movsb_stosb() { write!(w, " erms")? };
if info.has_invpcid() { write!(w, " invpcid")? };
if info.has_rtm() { write!(w, " rtm")? };
if info.has_qm() { write!(w, " qm")? };
if info.has_fpu_cs_ds_deprecated() { write!(w, " fpu_seg")? };
if info.has_mpx() { write!(w, " mpx")? };
}
write!(w, "\n")?;
Ok(())
}

View file

@ -0,0 +1,115 @@
use core::intrinsics::{volatile_load, volatile_store};
use x86::cpuid::CpuId;
use x86::msr::*;
use memory::Frame;
use paging::{entry, ActivePageTable, PhysicalAddress, Page, VirtualAddress};
pub static mut LOCAL_APIC: LocalApic = LocalApic {
address: 0,
x2: false
};
pub unsafe fn init(active_table: &mut ActivePageTable) {
LOCAL_APIC.init(active_table);
}
pub unsafe fn init_ap() {
LOCAL_APIC.init_ap();
}
/// Local APIC
pub struct LocalApic {
pub address: usize,
pub x2: bool
}
impl LocalApic {
unsafe fn init(&mut self, active_table: &mut ActivePageTable) {
self.address = (rdmsr(IA32_APIC_BASE) as usize & 0xFFFF0000) + ::KERNEL_OFFSET;
self.x2 = CpuId::new().get_feature_info().unwrap().has_x2apic();
if ! self.x2 {
let page = Page::containing_address(VirtualAddress::new(self.address));
let frame = Frame::containing_address(PhysicalAddress::new(self.address - ::KERNEL_OFFSET));
let result = active_table.map_to(page, frame, entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE);
result.flush(active_table);
}
self.init_ap();
}
unsafe fn init_ap(&mut self) {
if self.x2 {
wrmsr(IA32_APIC_BASE, rdmsr(IA32_APIC_BASE) | 1 << 10);
wrmsr(IA32_X2APIC_SIVR, 0x100);
} else {
self.write(0xF0, 0x100);
}
}
unsafe fn read(&self, reg: u32) -> u32 {
volatile_load((self.address + reg as usize) as *const u32)
}
unsafe fn write(&mut self, reg: u32, value: u32) {
volatile_store((self.address + reg as usize) as *mut u32, value);
}
pub fn id(&self) -> u32 {
if self.x2 {
unsafe { rdmsr(IA32_X2APIC_APICID) as u32 }
} else {
unsafe { self.read(0x20) }
}
}
pub fn version(&self) -> u32 {
if self.x2 {
unsafe { rdmsr(IA32_X2APIC_VERSION) as u32 }
} else {
unsafe { self.read(0x30) }
}
}
pub fn icr(&self) -> u64 {
if self.x2 {
unsafe { rdmsr(IA32_X2APIC_ICR) }
} else {
unsafe {
(self.read(0x310) as u64) << 32 | self.read(0x300) as u64
}
}
}
pub fn set_icr(&mut self, value: u64) {
if self.x2 {
unsafe { wrmsr(IA32_X2APIC_ICR, value); }
} else {
unsafe {
while self.read(0x300) & 1 << 12 == 1 << 12 {}
self.write(0x310, (value >> 32) as u32);
self.write(0x300, value as u32);
while self.read(0x300) & 1 << 12 == 1 << 12 {}
}
}
}
pub fn ipi(&mut self, apic_id: usize) {
let mut icr = 0x4040;
if self.x2 {
icr |= (apic_id as u64) << 32;
} else {
icr |= (apic_id as u64) << 56;
}
self.set_icr(icr);
}
pub unsafe fn eoi(&mut self) {
if self.x2 {
wrmsr(IA32_X2APIC_EOI, 0);
} else {
self.write(0xB0, 0);
}
}
}

View file

@ -0,0 +1,16 @@
use paging::ActivePageTable;
pub mod cpu;
pub mod local_apic;
pub mod rtc;
pub mod serial;
pub unsafe fn init(active_table: &mut ActivePageTable){
local_apic::init(active_table);
rtc::init();
serial::init();
}
pub unsafe fn init_ap() {
local_apic::init_ap();
}

View file

@ -0,0 +1,109 @@
use io::{Io, Pio};
use time;
pub fn init() {
let mut rtc = Rtc::new();
time::START.lock().0 = rtc.time();
}
fn cvt_bcd(value: usize) -> usize {
(value & 0xF) + ((value / 16) * 10)
}
/// RTC
pub struct Rtc {
addr: Pio<u8>,
data: Pio<u8>,
}
impl Rtc {
/// Create new empty RTC
pub fn new() -> Self {
return Rtc {
addr: Pio::<u8>::new(0x70),
data: Pio::<u8>::new(0x71),
};
}
/// Read
unsafe fn read(&mut self, reg: u8) -> u8 {
self.addr.write(reg);
return self.data.read();
}
/// Wait
unsafe fn wait(&mut self) {
while self.read(0xA) & 0x80 != 0x80 {}
while self.read(0xA) & 0x80 == 0x80 {}
}
/// Get time
pub fn time(&mut self) -> u64 {
let mut second;
let mut minute;
let mut hour;
let mut day;
let mut month;
let mut year;
let register_b;
unsafe {
self.wait();
second = self.read(0) as usize;
minute = self.read(2) as usize;
hour = self.read(4) as usize;
day = self.read(7) as usize;
month = self.read(8) as usize;
year = self.read(9) as usize;
register_b = self.read(0xB);
}
if register_b & 4 != 4 {
second = cvt_bcd(second);
minute = cvt_bcd(minute);
hour = cvt_bcd(hour & 0x7F) | (hour & 0x80);
day = cvt_bcd(day);
month = cvt_bcd(month);
year = cvt_bcd(year);
}
if register_b & 2 != 2 || hour & 0x80 == 0x80 {
hour = ((hour & 0x7F) + 12) % 24;
}
// TODO: Century Register
year += 2000;
// Unix time from clock
let mut secs: u64 = (year as u64 - 1970) * 31536000;
let mut leap_days = (year as u64 - 1972) / 4 + 1;
if year % 4 == 0 {
if month <= 2 {
leap_days -= 1;
}
}
secs += leap_days * 86400;
match month {
2 => secs += 2678400,
3 => secs += 5097600,
4 => secs += 7776000,
5 => secs += 10368000,
6 => secs += 13046400,
7 => secs += 15638400,
8 => secs += 18316800,
9 => secs += 20995200,
10 => secs += 23587200,
11 => secs += 26265600,
12 => secs += 28857600,
_ => (),
}
secs += (day as u64 - 1) * 86400;
secs += hour as u64 * 3600;
secs += minute as u64 * 60;
secs += second as u64;
secs
}
}

View file

@ -0,0 +1,115 @@
use core::fmt::{self, Write};
use spin::Mutex;
use io::{Io, Pio, ReadOnly};
pub static COM1: Mutex<SerialPort> = Mutex::new(SerialPort::new(0x3F8));
pub static COM2: Mutex<SerialPort> = Mutex::new(SerialPort::new(0x2F8));
pub unsafe fn init() {
COM1.lock().init();
COM2.lock().init();
}
bitflags! {
/// Interrupt enable flags
flags IntEnFlags: u8 {
const RECEIVED = 1,
const SENT = 1 << 1,
const ERRORED = 1 << 2,
const STATUS_CHANGE = 1 << 3,
// 4 to 7 are unused
}
}
bitflags! {
/// Line status flags
flags LineStsFlags: u8 {
const INPUT_FULL = 1,
// 1 to 4 unknown
const OUTPUT_EMPTY = 1 << 5,
// 6 and 7 unknown
}
}
#[allow(dead_code)]
pub struct SerialPort {
/// Data register, read to receive, write to send
data: Pio<u8>,
/// Interrupt enable
int_en: Pio<u8>,
/// FIFO control
fifo_ctrl: Pio<u8>,
/// Line control
line_ctrl: Pio<u8>,
/// Modem control
modem_ctrl: Pio<u8>,
/// Line status
line_sts: ReadOnly<Pio<u8>>,
/// Modem status
modem_sts: ReadOnly<Pio<u8>>,
}
impl SerialPort {
const fn new(base: u16) -> SerialPort {
SerialPort {
data: Pio::new(base),
int_en: Pio::new(base + 1),
fifo_ctrl: Pio::new(base + 2),
line_ctrl: Pio::new(base + 3),
modem_ctrl: Pio::new(base + 4),
line_sts: ReadOnly::new(Pio::new(base + 5)),
modem_sts: ReadOnly::new(Pio::new(base + 6))
}
}
fn line_sts(&self) -> LineStsFlags {
LineStsFlags::from_bits_truncate(self.line_sts.read())
}
fn write(&mut self, data: u8) {
while ! self.line_sts().contains(OUTPUT_EMPTY) {}
self.data.write(data)
}
fn init(&mut self) {
//TODO: Cleanup
self.int_en.write(0x00);
self.line_ctrl.write(0x80);
self.data.write(0x03);
self.int_en.write(0x00);
self.line_ctrl.write(0x03);
self.fifo_ctrl.write(0xC7);
self.modem_ctrl.write(0x0B);
self.int_en.write(0x01);
}
pub fn on_receive(&mut self) {
let data = self.data.read();
extern {
fn debug_input(byte: u8);
}
unsafe { debug_input(data) };
}
}
impl Write for SerialPort {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
for byte in s.bytes() {
match byte {
8 | 0x7F => {
self.write(8);
self.write(b' ');
self.write(8);
},
_ => {
self.write(byte);
}
}
}
Ok(())
}
}

View file

@ -0,0 +1,70 @@
/// Memcpy
///
/// Copy N bytes of memory from one location to another.
#[no_mangle]
pub unsafe extern fn memcpy(dest: *mut u8, src: *const u8,
n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*((dest as usize + i) as *mut u8) = *((src as usize + i) as *const u8);
i += 1;
}
dest
}
/// Memmove
///
/// Copy N bytes of memory from src to dest. The memory areas may overlap.
#[no_mangle]
pub unsafe extern fn memmove(dest: *mut u8, src: *const u8,
n: usize) -> *mut u8 {
if src < dest as *const u8 {
let mut i = n;
while i != 0 {
i -= 1;
*((dest as usize + i) as *mut u8) = *((src as usize + i) as *const u8);
}
} else {
let mut i = 0;
while i < n {
*((dest as usize + i) as *mut u8) = *((src as usize + i) as *const u8);
i += 1;
}
}
dest
}
/// Memset
///
/// Fill a block of memory with a specified value.
#[no_mangle]
pub unsafe extern fn memset(dest: *mut u8, c: i32, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*((dest as usize + i) as *mut u8) = c as u8;
i += 1;
}
dest
}
/// Memcmp
///
/// Compare two blocks of memory.
#[no_mangle]
pub unsafe extern fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
let mut i = 0;
while i < n {
let a = *((s1 as usize + i) as *const u8);
let b = *((s2 as usize + i) as *const u8);
if a != b {
return a as i32 - b as i32
}
i += 1;
}
0
}

View file

@ -0,0 +1,177 @@
//! Global descriptor table
use core::mem;
use x86::dtables::{self, DescriptorTablePointer};
use x86::segmentation::{self, SegmentSelector};
use x86::task::{self, TaskStateSegment};
pub const GDT_NULL: usize = 0;
pub const GDT_KERNEL_CODE: usize = 1;
pub const GDT_KERNEL_DATA: usize = 2;
pub const GDT_KERNEL_TLS: usize = 3;
pub const GDT_USER_CODE: usize = 4;
pub const GDT_USER_DATA: usize = 5;
pub const GDT_USER_TLS: usize = 6;
pub const GDT_TSS: usize = 7;
pub const GDT_TSS_HIGH: usize = 8;
pub const GDT_A_PRESENT: u8 = 1 << 7;
pub const GDT_A_RING_0: u8 = 0 << 5;
pub const GDT_A_RING_1: u8 = 1 << 5;
pub const GDT_A_RING_2: u8 = 2 << 5;
pub const GDT_A_RING_3: u8 = 3 << 5;
pub const GDT_A_SYSTEM: u8 = 1 << 4;
pub const GDT_A_EXECUTABLE: u8 = 1 << 3;
pub const GDT_A_CONFORMING: u8 = 1 << 2;
pub const GDT_A_PRIVILEGE: u8 = 1 << 1;
pub const GDT_A_DIRTY: u8 = 1;
pub const GDT_A_TSS_AVAIL: u8 = 0x9;
pub const GDT_A_TSS_BUSY: u8 = 0xB;
pub const GDT_F_PAGE_SIZE: u8 = 1 << 7;
pub const GDT_F_PROTECTED_MODE: u8 = 1 << 6;
pub const GDT_F_LONG_MODE: u8 = 1 << 5;
static mut INIT_GDTR: DescriptorTablePointer = DescriptorTablePointer {
limit: 0,
base: 0
};
static mut INIT_GDT: [GdtEntry; 4] = [
// Null
GdtEntry::new(0, 0, 0, 0),
// Kernel code
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_0 | GDT_A_SYSTEM | GDT_A_EXECUTABLE | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// Kernel data
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_0 | GDT_A_SYSTEM | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// Kernel TLS
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_3 | GDT_A_SYSTEM | GDT_A_PRIVILEGE, GDT_F_LONG_MODE)
];
#[thread_local]
pub static mut GDTR: DescriptorTablePointer = DescriptorTablePointer {
limit: 0,
base: 0
};
#[thread_local]
pub static mut GDT: [GdtEntry; 9] = [
// Null
GdtEntry::new(0, 0, 0, 0),
// Kernel code
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_0 | GDT_A_SYSTEM | GDT_A_EXECUTABLE | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// Kernel data
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_0 | GDT_A_SYSTEM | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// Kernel TLS
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_0 | GDT_A_SYSTEM | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// User code
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_3 | GDT_A_SYSTEM | GDT_A_EXECUTABLE | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// User data
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_3 | GDT_A_SYSTEM | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// User TLS
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_3 | GDT_A_SYSTEM | GDT_A_PRIVILEGE, GDT_F_LONG_MODE),
// TSS
GdtEntry::new(0, 0, GDT_A_PRESENT | GDT_A_RING_3 | GDT_A_TSS_AVAIL, 0),
// TSS must be 16 bytes long, twice the normal size
GdtEntry::new(0, 0, 0, 0),
];
#[thread_local]
pub static mut TSS: TaskStateSegment = TaskStateSegment {
reserved: 0,
rsp: [0; 3],
reserved2: 0,
ist: [0; 7],
reserved3: 0,
reserved4: 0,
iomap_base: 0xFFFF
};
/// Initialize GDT
pub unsafe fn init(tcb_offset: usize, stack_offset: usize) {
// Setup the initial GDT with TLS, so we can setup the TLS GDT (a little confusing)
// This means that each CPU will have its own GDT, but we only need to define it once as a thread local
INIT_GDTR.limit = (INIT_GDT.len() * mem::size_of::<GdtEntry>() - 1) as u16;
INIT_GDTR.base = INIT_GDT.as_ptr() as u64;
// Set the TLS segment to the offset of the Thread Control Block
INIT_GDT[GDT_KERNEL_TLS].set_offset(tcb_offset as u32);
// Load the initial GDT, before we have access to thread locals
dtables::lgdt(&INIT_GDTR);
// Load the segment descriptors
segmentation::load_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_TLS as u16));
segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16));
// Now that we have access to thread locals, setup the AP's individual GDT
GDTR.limit = (GDT.len() * mem::size_of::<GdtEntry>() - 1) as u16;
GDTR.base = GDT.as_ptr() as u64;
// Set the TLS segment to the offset of the Thread Control Block
GDT[GDT_KERNEL_TLS].set_offset(tcb_offset as u32);
// Set the User TLS segment to the offset of the user TCB
GDT[GDT_USER_TLS].set_offset(::USER_TCB_OFFSET as u32);
// We can now access our TSS, which is a thread local
GDT[GDT_TSS].set_offset(&TSS as *const _ as u32);
GDT[GDT_TSS].set_limit(mem::size_of::<TaskStateSegment>() as u32);
// Set the stack pointer when coming back from userspace
TSS.rsp[0] = stack_offset as u64;
// Load the new GDT, which is correctly located in thread local storage
dtables::lgdt(&GDTR);
// Reload the segment descriptors
segmentation::load_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_TLS as u16));
segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16));
// Load the task register
task::load_ltr(SegmentSelector::new(GDT_TSS as u16));
}
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct GdtEntry {
pub limitl: u16,
pub offsetl: u16,
pub offsetm: u8,
pub access: u8,
pub flags_limith: u8,
pub offseth: u8
}
impl GdtEntry {
pub const fn new(offset: u32, limit: u32, access: u8, flags: u8) -> Self {
GdtEntry {
limitl: limit as u16,
offsetl: offset as u16,
offsetm: (offset >> 16) as u8,
access: access,
flags_limith: flags & 0xF0 | ((limit >> 16) as u8) & 0x0F,
offseth: (offset >> 24) as u8
}
}
pub fn set_offset(&mut self, offset: u32) {
self.offsetl = offset as u16;
self.offsetm = (offset >> 16) as u8;
self.offseth = (offset >> 24) as u8;
}
pub fn set_limit(&mut self, limit: u32) {
self.limitl = limit as u16;
self.flags_limith = self.flags_limith & 0xF0 | ((limit >> 16) as u8) & 0x0F;
}
}

View file

@ -0,0 +1,142 @@
use core::mem;
use x86::dtables::{self, DescriptorTablePointer};
use interrupt::*;
pub static mut IDTR: DescriptorTablePointer = DescriptorTablePointer {
limit: 0,
base: 0
};
pub static mut IDT: [IdtEntry; 256] = [IdtEntry::new(); 256];
pub unsafe fn init() {
IDTR.limit = (IDT.len() * mem::size_of::<IdtEntry>() - 1) as u16;
IDTR.base = IDT.as_ptr() as u64;
// Set up exceptions
IDT[0].set_func(exception::divide_by_zero);
IDT[1].set_func(exception::debug);
IDT[2].set_func(exception::non_maskable);
IDT[3].set_func(exception::breakpoint);
IDT[4].set_func(exception::overflow);
IDT[5].set_func(exception::bound_range);
IDT[6].set_func(exception::invalid_opcode);
IDT[7].set_func(exception::device_not_available);
IDT[8].set_func(exception::double_fault);
// 9 no longer available
IDT[10].set_func(exception::invalid_tss);
IDT[11].set_func(exception::segment_not_present);
IDT[12].set_func(exception::stack_segment);
IDT[13].set_func(exception::protection);
IDT[14].set_func(exception::page);
// 15 reserved
IDT[16].set_func(exception::fpu);
IDT[17].set_func(exception::alignment_check);
IDT[18].set_func(exception::machine_check);
IDT[19].set_func(exception::simd);
IDT[20].set_func(exception::virtualization);
// 21 through 29 reserved
IDT[30].set_func(exception::security);
// 31 reserved
// Set up IRQs
IDT[32].set_func(irq::pit);
IDT[33].set_func(irq::keyboard);
IDT[34].set_func(irq::cascade);
IDT[35].set_func(irq::com2);
IDT[36].set_func(irq::com1);
IDT[37].set_func(irq::lpt2);
IDT[38].set_func(irq::floppy);
IDT[39].set_func(irq::lpt1);
IDT[40].set_func(irq::rtc);
IDT[41].set_func(irq::pci1);
IDT[42].set_func(irq::pci2);
IDT[43].set_func(irq::pci3);
IDT[44].set_func(irq::mouse);
IDT[45].set_func(irq::fpu);
IDT[46].set_func(irq::ata1);
IDT[47].set_func(irq::ata2);
// Set IPI handler (null)
IDT[0x40].set_func(ipi::ipi);
// Set syscall function
IDT[0x80].set_func(syscall::syscall);
IDT[0x80].set_flags(IDT_PRESENT | IDT_RING_3 | IDT_INTERRUPT);
dtables::lidt(&IDTR);
}
bitflags! {
pub flags IdtFlags: u8 {
const IDT_PRESENT = 1 << 7,
const IDT_RING_0 = 0 << 5,
const IDT_RING_1 = 1 << 5,
const IDT_RING_2 = 2 << 5,
const IDT_RING_3 = 3 << 5,
const IDT_SS = 1 << 4,
const IDT_INTERRUPT = 0xE,
const IDT_TRAP = 0xF,
}
}
#[repr(packed)]
pub struct IdtDescriptor {
size: u16,
offset: u64
}
impl IdtDescriptor {
pub fn set_slice(&mut self, slice: &'static [IdtEntry]) {
self.size = (slice.len() * mem::size_of::<IdtEntry>() - 1) as u16;
self.offset = slice.as_ptr() as u64;
}
pub unsafe fn load(&self) {
asm!("lidt [rax]" : : "{rax}"(self as *const _ as usize) : : "intel", "volatile");
}
}
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct IdtEntry {
offsetl: u16,
selector: u16,
zero: u8,
attribute: u8,
offsetm: u16,
offseth: u32,
zero2: u32
}
impl IdtEntry {
pub const fn new() -> IdtEntry {
IdtEntry {
offsetl: 0,
selector: 0,
zero: 0,
attribute: 0,
offsetm: 0,
offseth: 0,
zero2: 0
}
}
pub fn set_flags(&mut self, flags: IdtFlags) {
self.attribute = flags.bits;
}
pub fn set_offset(&mut self, selector: u16, base: usize) {
self.selector = selector;
self.offsetl = base as u16;
self.offsetm = (base >> 16) as u16;
self.offseth = (base >> 32) as u32;
}
// A function to set the offset more easily
pub fn set_func(&mut self, func: unsafe extern fn()) {
self.set_flags(IDT_PRESENT | IDT_RING_0 | IDT_INTERRUPT);
self.set_offset(8, func as usize);
}
}

View file

@ -0,0 +1,123 @@
use interrupt::stack_trace;
use syscall::flag::*;
extern {
fn ksignal(signal: usize);
}
interrupt_stack!(divide_by_zero, stack, {
println!("Divide by zero fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGFPE);
});
interrupt_stack!(debug, stack, {
println!("Debug trap at {:>02X}:{:>016X}", stack.cs, stack.rip);
ksignal(SIGTRAP);
});
interrupt_stack!(non_maskable, stack, {
println!("Non-maskable interrupt at {:>02X}:{:>016X}", stack.cs, stack.rip);
});
interrupt_stack!(breakpoint, stack, {
println!("Breakpoint trap at {:>02X}:{:>016X}", stack.cs, stack.rip);
ksignal(SIGTRAP);
});
interrupt_stack!(overflow, stack, {
println!("Overflow trap at {:>02X}:{:>016X}", stack.cs, stack.rip);
ksignal(SIGFPE);
});
interrupt_stack!(bound_range, stack, {
println!("Bound range exceeded fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_stack!(invalid_opcode, stack, {
println!("Invalid opcode fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGILL);
});
interrupt_stack!(device_not_available, stack, {
println!("Device not available fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGILL);
});
interrupt_error!(double_fault, stack, {
println!("Double fault: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error!(invalid_tss, stack, {
println!("Invalid TSS fault: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error!(segment_not_present, stack, {
println!("Segment not present fault: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error!(stack_segment, stack, {
println!("Stack segment fault: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error!(protection, stack, {
println!("Protection fault: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_error!(page, stack, {
let cr2: usize;
asm!("mov rax, cr2" : "={rax}"(cr2) : : : "intel", "volatile");
println!("Page fault: {:>02X}:{:>016X} at {:>02X}:{:>016X}", stack.code, cr2, stack.cs, stack.rip);
stack_trace();
ksignal(SIGSEGV);
});
interrupt_stack!(fpu, stack, {
println!("FPU floating point fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGFPE);
});
interrupt_error!(alignment_check, stack, {
println!("Alignment check fault: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGBUS);
});
interrupt_stack!(machine_check, stack, {
println!("Machine check fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGBUS);
});
interrupt_stack!(simd, stack, {
println!("SIMD floating point fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGFPE);
});
interrupt_stack!(virtualization, stack, {
println!("Virtualization fault at {:>02X}:{:>016X}", stack.cs, stack.rip);
stack_trace();
ksignal(SIGBUS);
});
interrupt_error!(security, stack, {
println!("Security exception: {:X} at {:>02X}:{:>016X}", stack.code, stack.cs, stack.rip);
stack_trace();
ksignal(SIGBUS);
});

View file

@ -0,0 +1,5 @@
use device::local_apic::LOCAL_APIC;
interrupt!(ipi, {
LOCAL_APIC.eoi();
});

View file

@ -0,0 +1,115 @@
use x86::io;
use device::serial::{COM1, COM2};
use time;
extern {
fn irq_trigger(irq: u8);
}
#[inline(always)]
unsafe fn master_ack() {
io::outb(0x20, 0x20);
}
#[inline(always)]
unsafe fn slave_ack() {
io::outb(0xA0, 0x20);
master_ack();
}
pub unsafe fn acknowledge(irq: usize) {
if irq >= 8 {
slave_ack();
} else {
master_ack();
}
}
interrupt!(pit, {
// Saves CPU time by not sending IRQ event irq_trigger(0);
{
const PIT_RATE: u64 = 2250286;
let mut offset = time::OFFSET.lock();
let sum = offset.1 + PIT_RATE;
offset.1 = sum % 1000000000;
offset.0 += sum / 1000000000;
}
master_ack();
});
interrupt!(keyboard, {
irq_trigger(1);
});
interrupt!(cascade, {
irq_trigger(2);
master_ack();
});
interrupt!(com2, {
irq_trigger(3);
COM2.lock().on_receive();
master_ack();
});
interrupt!(com1, {
irq_trigger(4);
COM1.lock().on_receive();
master_ack();
});
interrupt!(lpt2, {
irq_trigger(5);
master_ack();
});
interrupt!(floppy, {
irq_trigger(6);
master_ack();
});
interrupt!(lpt1, {
irq_trigger(7);
master_ack();
});
interrupt!(rtc, {
irq_trigger(8);
slave_ack();
});
interrupt!(pci1, {
irq_trigger(9);
slave_ack();
});
interrupt!(pci2, {
irq_trigger(10);
});
interrupt!(pci3, {
irq_trigger(11);
});
interrupt!(mouse, {
irq_trigger(12);
});
interrupt!(fpu, {
irq_trigger(13);
slave_ack();
});
interrupt!(ata1, {
irq_trigger(14);
slave_ack();
});
interrupt!(ata2, {
irq_trigger(15);
slave_ack();
});

View file

@ -0,0 +1,85 @@
//! Interrupt instructions
use core::mem;
use paging::{ActivePageTable, VirtualAddress};
pub mod exception;
pub mod ipi;
pub mod irq;
pub mod syscall;
/// Clear interrupts
#[inline(always)]
pub unsafe fn disable() {
asm!("cli" : : : : "intel", "volatile");
}
/// Set interrupts
#[inline(always)]
pub unsafe fn enable() {
asm!("sti" : : : : "intel", "volatile");
}
/// Set interrupts and halt
/// This will atomically wait for the next interrupt
/// Performing enable followed by halt is not guaranteed to be atomic, use this instead!
#[inline(always)]
pub unsafe fn enable_and_halt() {
asm!("sti
hlt"
: : : : "intel", "volatile");
}
/// Set interrupts and nop
/// This will enable interrupts and allow the IF flag to be processed
/// Simply enabling interrupts does not gurantee that they will trigger, use this instead!
#[inline(always)]
pub unsafe fn enable_and_nop() {
asm!("sti
nop"
: : : : "intel", "volatile");
}
/// Halt instruction
#[inline(always)]
pub unsafe fn halt() {
asm!("hlt" : : : : "intel", "volatile");
}
/// Pause instruction
/// Safe because it is similar to a NOP, and has no memory effects
#[inline(always)]
pub fn pause() {
unsafe { asm!("pause" : : : : "intel", "volatile"); }
}
/// Get a stack trace
//TODO: Check for stack being mapped before dereferencing
#[inline(never)]
pub unsafe fn stack_trace() {
let mut rbp: usize;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");
println!("TRACE: {:>016X}", rbp);
//Maximum 64 frames
let active_table = ActivePageTable::new();
for _frame in 0..64 {
if let Some(rip_rbp) = rbp.checked_add(mem::size_of::<usize>()) {
if active_table.translate(VirtualAddress::new(rbp)).is_some() && active_table.translate(VirtualAddress::new(rip_rbp)).is_some() {
let rip = *(rip_rbp as *const usize);
if rip == 0 {
println!(" {:>016X}: EMPTY RETURN", rbp);
break;
}
println!(" {:>016X}: {:>016X}", rbp, rip);
rbp = *(rbp as *const usize);
} else {
println!(" {:>016X}: GUARD PAGE", rbp);
break;
}
} else {
println!(" {:>016X}: RBP OVERFLOW", rbp);
}
}
}

View file

@ -0,0 +1,61 @@
#[naked]
pub unsafe extern fn syscall() {
#[inline(never)]
unsafe fn inner() {
extern {
fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, stack: usize) -> usize;
}
let mut a;
{
let b;
let c;
let d;
let e;
let f;
let stack;
asm!("" : "={rax}"(a), "={rbx}"(b), "={rcx}"(c), "={rdx}"(d), "={rsi}"(e), "={rdi}"(f), "={rbp}"(stack)
: : : "intel", "volatile");
a = syscall(a, b, c, d, e, f, stack);
}
asm!("" : : "{rax}"(a) : : "intel", "volatile");
}
// Push scratch registers, minus rax for the return value
asm!("push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov r11, 0x18
mov fs, r11"
: : : : "intel", "volatile");
inner();
// Interrupt return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
iretq"
: : : : "intel", "volatile");
}
#[naked]
pub unsafe extern fn clone_ret() -> usize {
asm!("pop rbp"
: : : : "intel", "volatile");
0
}

View file

@ -0,0 +1,327 @@
//! Architecture support for x86_64
#![deny(unused_must_use)]
#![feature(asm)]
#![feature(concat_idents)]
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(drop_types_in_const)]
#![feature(lang_items)]
#![feature(naked_functions)]
#![feature(thread_local)]
#![feature(unique)]
#![no_std]
extern crate hole_list_allocator as allocator;
#[macro_use]
extern crate bitflags;
extern crate io;
extern crate spin;
extern crate syscall;
pub extern crate x86;
// Because the memory map is so important to not be aliased, it is defined here, in one place
// The lower 256 PML4 entries are reserved for userspace
// Each PML4 entry references up to 512 GB of memory
// The top (511) PML4 is reserved for recursive mapping
// The second from the top (510) PML4 is reserved for the kernel
/// The size of a single PML4
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
/// Offset of recursive paging
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
/// Offset of kernel
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
/// Offset to kernel heap
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET + PML4_SIZE/2;
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 128 * 1024 * 1024; // 128 MB
/// Offset to kernel percpu variables
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000;
/// Size of kernel percpu variables
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
/// Offset to user image
pub const USER_OFFSET: usize = 0;
/// Offset to user TCB
pub const USER_TCB_OFFSET: usize = 0xB000_0000;
/// Offset to user arguments
pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE/2;
/// Offset to user heap
pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
/// Offset to user grants
pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
/// Offset to user stack
pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE;
/// Size of user stack
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
/// Offset to user TLS
pub const USER_TLS_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
/// Offset to user temporary image (used when cloning)
pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE;
/// Offset to user temporary heap (used when cloning)
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
/// Offset to user temporary page for grants
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
/// Offset to user temporary stack (used when cloning)
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE;
/// Offset to user temporary tls (used when cloning)
pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
/// Offset for usage in other temporary pages
pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE;
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({
use core::fmt::Write;
let _ = write!($crate::console::CONSOLE.lock(), $($arg)*);
});
}
/// Print with new line to console
#[macro_export]
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
/// Create an interrupt function that can safely run rust code
#[macro_export]
macro_rules! interrupt {
($name:ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner() {
$func
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov rax, 0x18
mov fs, ax"
: : : : "intel", "volatile");
// Call inner rust function
inner();
// Pop scratch registers and return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
iretq"
: : : : "intel", "volatile");
}
};
}
#[repr(packed)]
pub struct InterruptStack {
fs: usize,
r11: usize,
r10: usize,
r9: usize,
r8: usize,
rsi: usize,
rdi: usize,
rdx: usize,
rcx: usize,
rax: usize,
rip: usize,
cs: usize,
rflags: usize,
}
#[macro_export]
macro_rules! interrupt_stack {
($name:ident, $stack: ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner($stack: &$crate::InterruptStack) {
$func
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov rax, 0x18
mov fs, ax"
: : : : "intel", "volatile");
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Call inner rust function
inner(&*(rsp as *const $crate::InterruptStack));
// Pop scratch registers and return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
iretq"
: : : : "intel", "volatile");
}
};
}
#[repr(packed)]
pub struct InterruptErrorStack {
fs: usize,
r11: usize,
r10: usize,
r9: usize,
r8: usize,
rsi: usize,
rdi: usize,
rdx: usize,
rcx: usize,
rax: usize,
code: usize,
rip: usize,
cs: usize,
rflags: usize,
}
#[macro_export]
macro_rules! interrupt_error {
($name:ident, $stack:ident, $func:block) => {
#[naked]
pub unsafe extern fn $name () {
#[inline(never)]
unsafe fn inner($stack: &$crate::InterruptErrorStack) {
$func
}
// Push scratch registers
asm!("push rax
push rcx
push rdx
push rdi
push rsi
push r8
push r9
push r10
push r11
push fs
mov rax, 0x18
mov fs, ax"
: : : : "intel", "volatile");
// Get reference to stack variables
let rsp: usize;
asm!("" : "={rsp}"(rsp) : : : "intel", "volatile");
// Call inner rust function
inner(&*(rsp as *const $crate::InterruptErrorStack));
// Pop scratch registers, error code, and return
asm!("pop fs
pop r11
pop r10
pop r9
pop r8
pop rsi
pop rdi
pop rdx
pop rcx
pop rax
add rsp, 8
iretq"
: : : : "intel", "volatile");
}
};
}
/// ACPI table parsing
pub mod acpi;
/// Console handling
pub mod console;
/// Context switching
pub mod context;
/// Devices
pub mod device;
/// Memcpy, memmove, etc.
pub mod externs;
/// Global descriptor table
pub mod gdt;
/// Interrupt descriptor table
pub mod idt;
/// Interrupt instructions
pub mod interrupt;
/// Memory management
pub mod memory;
/// Paging
pub mod paging;
/// Panic
pub mod panic;
/// Initialization and start function
pub mod start;
/// Shutdown function
pub mod stop;
/// Time
pub mod time;

View file

@ -0,0 +1,63 @@
ENTRY(kstart)
OUTPUT_FORMAT(elf64-x86-64)
KERNEL_OFFSET = 0xffffff0000100000;
/* KERNEL_OFFSET = 0x100000; */
SECTIONS {
. = KERNEL_OFFSET;
. += SIZEOF_HEADERS;
. = ALIGN(4096);
.text : AT(ADDR(.text) - KERNEL_OFFSET) {
__text_start = .;
*(.text*)
. = ALIGN(4096);
__text_end = .;
}
.rodata : AT(ADDR(.rodata) - KERNEL_OFFSET) {
__rodata_start = .;
*(.rodata*)
. = ALIGN(4096);
__rodata_end = .;
}
.data : AT(ADDR(.data) - KERNEL_OFFSET) {
__data_start = .;
*(.data*)
. = ALIGN(4096);
__data_end = .;
}
.tdata : AT(ADDR(.tdata) - KERNEL_OFFSET) {
__tdata_start = .;
*(.tdata*)
. = ALIGN(4096);
__tdata_end = .;
__tbss_start = .;
*(.tbss*)
. += 8;
. = ALIGN(4096);
__tbss_end = .;
}
.bss : AT(ADDR(.bss) - KERNEL_OFFSET) {
__bss_start = .;
*(.bss*)
. = ALIGN(4096);
__bss_end = .;
}
__end = .;
/DISCARD/ : {
*(.comment*)
*(.debug*)
*(.eh_frame*)
*(.gcc_except_table*)
*(.note*)
*(.rel.eh_frame*)
}
}

View file

@ -0,0 +1,127 @@
//! # Area frame allocator
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html)
use paging::PhysicalAddress;
use super::{Frame, FrameAllocator, MemoryArea, MemoryAreaIter};
pub struct AreaFrameAllocator {
next_free_frame: Frame,
current_area: Option<&'static MemoryArea>,
areas: MemoryAreaIter,
kernel_start: Frame,
kernel_end: Frame
}
impl AreaFrameAllocator {
pub fn new(kernel_start: usize, kernel_end: usize, memory_areas: MemoryAreaIter) -> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(PhysicalAddress::new(0)),
current_area: None,
areas: memory_areas,
kernel_start: Frame::containing_address(PhysicalAddress::new(kernel_start)),
kernel_end: Frame::containing_address(PhysicalAddress::new(kernel_end))
};
allocator.choose_next_area();
allocator
}
fn choose_next_area(&mut self) {
self.current_area = self.areas.clone().filter(|area| {
let address = area.base_addr + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize)) >= self.next_free_frame
}).min_by_key(|area| area.base_addr);
if let Some(area) = self.current_area {
let start_frame = Frame::containing_address(PhysicalAddress::new(area.base_addr as usize));
if self.next_free_frame < start_frame {
self.next_free_frame = start_frame;
}
}
}
}
impl FrameAllocator for AreaFrameAllocator {
fn free_frames(&self) -> usize {
let mut count = 0;
for area in self.areas.clone() {
let start_frame = Frame::containing_address(PhysicalAddress::new(area.base_addr as usize));
let end_frame = Frame::containing_address(PhysicalAddress::new((area.base_addr + area.length - 1) as usize));
for frame in Frame::range_inclusive(start_frame, end_frame) {
if frame >= self.kernel_start && frame <= self.kernel_end {
// Inside of kernel range
} else if frame >= self.next_free_frame {
// Frame is in free range
count += 1;
} else {
// Inside of used range
}
}
}
count
}
fn used_frames(&self) -> usize {
let mut count = 0;
for area in self.areas.clone() {
let start_frame = Frame::containing_address(PhysicalAddress::new(area.base_addr as usize));
let end_frame = Frame::containing_address(PhysicalAddress::new((area.base_addr + area.length - 1) as usize));
for frame in Frame::range_inclusive(start_frame, end_frame) {
if frame >= self.kernel_start && frame <= self.kernel_end {
// Inside of kernel range
count += 1
} else if frame >= self.next_free_frame {
// Frame is in free range
} else {
count += 1;
}
}
}
count
}
fn allocate_frames(&mut self, count: usize) -> Option<Frame> {
if count == 0 {
None
} else if let Some(area) = self.current_area {
// "Clone" the frame to return it if it's free. Frame doesn't
// implement Clone, but we can construct an identical frame.
let start_frame = Frame{ number: self.next_free_frame.number };
let end_frame = Frame { number: self.next_free_frame.number + (count - 1) };
// the last frame of the current area
let current_area_last_frame = {
let address = area.base_addr + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize))
};
if end_frame > current_area_last_frame {
// all frames of current area are used, switch to next area
self.choose_next_area();
} else if (start_frame >= self.kernel_start && start_frame <= self.kernel_end)
|| (end_frame >= self.kernel_start && end_frame <= self.kernel_end) {
// `frame` is used by the kernel
self.next_free_frame = Frame {
number: self.kernel_end.number + 1
};
} else {
// frame is unused, increment `next_free_frame` and return it
self.next_free_frame.number += count;
return Some(start_frame);
}
// `frame` was not valid, try it again with the updated `next_free_frame`
self.allocate_frames(count)
} else {
None // no free frames left
}
}
fn deallocate_frames(&mut self, frame: Frame, count: usize) {
//panic!("AreaFrameAllocator::deallocate_frame: not supported: {:?}", frame);
}
}

View file

@ -0,0 +1,189 @@
//! # Memory management
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html)
pub use paging::{PAGE_SIZE, PhysicalAddress};
use self::area_frame_allocator::AreaFrameAllocator;
use spin::Mutex;
pub mod area_frame_allocator;
/// The current memory map. It's size is maxed out to 512 entries, due to it being
/// from 0x500 to 0x5000 (800 is the absolute total)
static mut MEMORY_MAP: [MemoryArea; 512] = [MemoryArea { base_addr: 0, length: 0, _type: 0, acpi: 0 }; 512];
/// Memory does not exist
pub const MEMORY_AREA_NULL: u32 = 0;
/// Memory is free to use
pub const MEMORY_AREA_FREE: u32 = 1;
/// Memory is reserved
pub const MEMORY_AREA_RESERVED: u32 = 2;
/// Memory is used by ACPI, and can be reclaimed
pub const MEMORY_AREA_ACPI: u32 = 3;
/// A memory map area
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
pub struct MemoryArea {
pub base_addr: u64,
pub length: u64,
pub _type: u32,
pub acpi: u32
}
#[derive(Clone)]
pub struct MemoryAreaIter {
_type: u32,
i: usize
}
impl MemoryAreaIter {
fn new(_type: u32) -> Self {
MemoryAreaIter {
_type: _type,
i: 0
}
}
}
impl Iterator for MemoryAreaIter {
type Item = &'static MemoryArea;
fn next(&mut self) -> Option<Self::Item> {
while self.i < unsafe { MEMORY_MAP.len() } {
let entry = unsafe { &MEMORY_MAP[self.i] };
self.i += 1;
if entry._type == self._type {
return Some(entry);
}
}
None
}
}
static ALLOCATOR: Mutex<Option<AreaFrameAllocator>> = Mutex::new(None);
/// Init memory module
/// Must be called once, and only once,
pub unsafe fn init(kernel_start: usize, kernel_end: usize) {
// Copy memory map from bootloader location
for (i, mut entry) in MEMORY_MAP.iter_mut().enumerate() {
*entry = *(0x500 as *const MemoryArea).offset(i as isize);
if entry._type != MEMORY_AREA_NULL {
println!("{:?}", entry);
}
}
*ALLOCATOR.lock() = Some(AreaFrameAllocator::new(kernel_start, kernel_end, MemoryAreaIter::new(MEMORY_AREA_FREE)));
}
/// Allocate a frame
pub fn allocate_frame() -> Option<Frame> {
allocate_frames(1)
}
/// Deallocate a frame
pub fn deallocate_frame(frame: Frame) {
deallocate_frames(frame, 1)
}
/// Get the number of frames available
pub fn free_frames() -> usize {
if let Some(ref allocator) = *ALLOCATOR.lock() {
allocator.free_frames()
} else {
panic!("frame allocator not initialized");
}
}
/// Get the number of frames used
pub fn used_frames() -> usize {
if let Some(ref allocator) = *ALLOCATOR.lock() {
allocator.used_frames()
} else {
panic!("frame allocator not initialized");
}
}
/// Allocate a range of frames
pub fn allocate_frames(count: usize) -> Option<Frame> {
if let Some(ref mut allocator) = *ALLOCATOR.lock() {
allocator.allocate_frames(count)
} else {
panic!("frame allocator not initialized");
}
}
/// Deallocate a range of frames frame
pub fn deallocate_frames(frame: Frame, count: usize) {
if let Some(ref mut allocator) = *ALLOCATOR.lock() {
allocator.deallocate_frames(frame, count)
} else {
panic!("frame allocator not initialized");
}
}
/// A frame, allocated by the frame allocator.
/// Do not add more derives, or make anything `pub`!
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Frame {
number: usize
}
impl Frame {
/// Get the address of this frame
pub fn start_address(&self) -> PhysicalAddress {
PhysicalAddress::new(self.number * PAGE_SIZE)
}
//TODO: Set private
pub fn clone(&self) -> Frame {
Frame {
number: self.number
}
}
/// Create a frame containing `address`
pub fn containing_address(address: PhysicalAddress) -> Frame {
Frame {
number: address.get() / PAGE_SIZE
}
}
//TODO: Set private
pub fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
FrameIter {
start: start,
end: end,
}
}
}
pub struct FrameIter {
start: Frame,
end: Frame,
}
impl Iterator for FrameIter {
type Item = Frame;
fn next(&mut self) -> Option<Frame> {
if self.start <= self.end {
let frame = self.start.clone();
self.start.number += 1;
Some(frame)
} else {
None
}
}
}
pub trait FrameAllocator {
fn free_frames(&self) -> usize;
fn used_frames(&self) -> usize;
fn allocate_frames(&mut self, size: usize) -> Option<Frame>;
fn deallocate_frames(&mut self, frame: Frame, size: usize);
}

View file

@ -0,0 +1,62 @@
//! # Page table entry
//! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use memory::Frame;
use super::PhysicalAddress;
/// A page table entry
pub struct Entry(u64);
bitflags! {
pub flags EntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}
pub const ADDRESS_MASK: usize = 0x000f_ffff_ffff_f000;
impl Entry {
/// Is the entry unused?
pub fn is_unused(&self) -> bool {
self.0 == 0
}
/// Make the entry unused
pub fn set_unused(&mut self) {
self.0 = 0;
}
/// Get the address this page references
pub fn address(&self) -> PhysicalAddress {
PhysicalAddress::new(self.0 as usize & ADDRESS_MASK)
}
/// Get the current entry flags
pub fn flags(&self) -> EntryFlags {
EntryFlags::from_bits_truncate(self.0)
}
/// Get the associated frame, if available
pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(PRESENT) {
Some(Frame::containing_address(self.address()))
} else {
None
}
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
debug_assert!(frame.start_address().get() & !ADDRESS_MASK == 0);
self.0 = (frame.start_address().get() as u64) | flags.bits();
}
}

View file

@ -0,0 +1,182 @@
use core::mem;
use core::ptr::Unique;
use memory::{allocate_frame, deallocate_frame, Frame};
use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress};
use super::entry::{self, EntryFlags};
use super::table::{self, Table, Level4};
/// In order to enforce correct paging operations in the kernel, these types
/// are returned on any mapping operation to get the code involved to specify
/// how it intends to flush changes to a page table
#[must_use = "The page table must be flushed, or the changes unsafely ignored"]
pub struct MapperFlush(Page);
impl MapperFlush {
/// Create a new page flush promise
pub fn new(page: Page) -> MapperFlush {
MapperFlush(page)
}
/// Flush this page in the active table
pub fn flush(self, table: &mut ActivePageTable) {
table.flush(self.0);
mem::forget(self);
}
/// Ignore the flush. This is unsafe, and a reason should be provided for use
pub unsafe fn ignore(self) {
mem::forget(self);
}
}
/// A flush cannot be dropped, it must be consumed
impl Drop for MapperFlush {
fn drop(&mut self) {
panic!("Mapper flush was not utilized");
}
}
/// To allow for combining multiple flushes into one, we have a way of flushing
/// the active table, which can consume MapperFlush structs
#[must_use = "The page table must be flushed, or the changes unsafely ignored"]
pub struct MapperFlushAll(bool);
impl MapperFlushAll {
/// Create a new promise to flush all mappings
pub fn new() -> MapperFlushAll {
MapperFlushAll(false)
}
/// Consume a single page flush
pub fn consume(&mut self, flush: MapperFlush) {
self.0 = true;
mem::forget(flush);
}
/// Flush the active page table
pub fn flush(self, table: &mut ActivePageTable) {
if self.0 {
table.flush_all();
}
mem::forget(self);
}
/// Ignore the flush. This is unsafe, and a reason should be provided for use
pub unsafe fn ignore(self) {
mem::forget(self);
}
}
/// A flush cannot be dropped, it must be consumed
impl Drop for MapperFlushAll {
fn drop(&mut self) {
panic!("Mapper flush all was not utilized");
}
}
pub struct Mapper {
p4: Unique<Table<Level4>>,
}
impl Mapper {
/// Create a new page table
pub unsafe fn new() -> Mapper {
Mapper {
p4: Unique::new(table::P4),
}
}
pub fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.get() }
}
pub fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.get_mut() }
}
/// Map a page to a frame
pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags) -> MapperFlush {
let mut p3 = self.p4_mut().next_table_create(page.p4_index());
let mut p2 = p3.next_table_create(page.p3_index());
let mut p1 = p2.next_table_create(page.p2_index());
assert!(p1[page.p1_index()].is_unused(),
"{:X}: Set to {:X}: {:?}, requesting {:X}: {:?}",
page.start_address().get(),
p1[page.p1_index()].address().get(), p1[page.p1_index()].flags(),
frame.start_address().get(), flags);
p1[page.p1_index()].set(frame, flags | entry::PRESENT);
MapperFlush::new(page)
}
/// Map a page to the next free frame
pub fn map(&mut self, page: Page, flags: EntryFlags) -> MapperFlush {
let frame = allocate_frame().expect("out of frames");
self.map_to(page, frame, flags)
}
/// Update flags for a page
pub fn remap(&mut self, page: Page, flags: EntryFlags) -> MapperFlush {
let mut p3 = self.p4_mut().next_table_mut(page.p4_index()).expect("failed to remap: no p3");
let mut p2 = p3.next_table_mut(page.p3_index()).expect("failed to remap: no p2");
let mut p1 = p2.next_table_mut(page.p2_index()).expect("failed to remap: no p1");
let frame = p1[page.p1_index()].pointed_frame().expect("failed to remap: not mapped");
p1[page.p1_index()].set(frame, flags | entry::PRESENT);
MapperFlush::new(page)
}
/// Identity map a frame
pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags) -> MapperFlush {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
self.map_to(page, frame, flags)
}
/// Unmap a page
pub fn unmap(&mut self, page: Page) -> MapperFlush {
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("unmap does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
// TODO free p(1,2,3) table if empty
deallocate_frame(frame);
MapperFlush::new(page)
}
/// Unmap a page, return frame without free
pub fn unmap_return(&mut self, page: Page) -> (MapperFlush, Frame) {
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("unmap_return does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
(MapperFlush::new(page), frame)
}
pub fn translate_page(&self, page: Page) -> Option<Frame> {
self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
}
pub fn translate_page_flags(&self, page: Page) -> Option<EntryFlags> {
self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| Some(p1[page.p1_index()].flags()))
}
/// Translate a virtual address to a physical one
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address.get() % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| PhysicalAddress::new(frame.start_address().get() + offset))
}
}

View file

@ -0,0 +1,429 @@
//! # Paging
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::mem;
use core::ops::{Deref, DerefMut};
use x86::{msr, tlb};
use memory::{allocate_frame, Frame};
use self::entry::{EntryFlags, PRESENT, GLOBAL, WRITABLE, NO_EXECUTE};
use self::mapper::Mapper;
use self::temporary_page::TemporaryPage;
pub mod entry;
pub mod mapper;
pub mod table;
pub mod temporary_page;
/// Number of entries per page table
pub const ENTRY_COUNT: usize = 512;
/// Size of pages
pub const PAGE_SIZE: usize = 4096;
/// Setup page attribute table
unsafe fn init_pat() {
let uncacheable = 0;
let write_combining = 1;
let write_through = 4;
//let write_protected = 5;
let write_back = 6;
let uncached = 7;
let pat0 = write_back;
let pat1 = write_through;
let pat2 = uncached;
let pat3 = uncacheable;
let pat4 = write_combining;
let pat5 = pat1;
let pat6 = pat2;
let pat7 = pat3;
msr::wrmsr(msr::IA32_PAT, pat7 << 56 | pat6 << 48 | pat5 << 40 | pat4 << 32
| pat3 << 24 | pat2 << 16 | pat1 << 8 | pat0);
}
/// Copy tdata, clear tbss, set TCB self pointer
unsafe fn init_tcb(cpu_id: usize) -> usize {
extern {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
/// The ending byte of the thread data segment
static mut __tdata_end: u8;
/// The starting byte of the thread BSS segment
static mut __tbss_start: u8;
/// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
}
let tcb_offset;
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let tbss_offset = & __tbss_start as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
tcb_offset = end - mem::size_of::<usize>();
::externs::memcpy(start as *mut u8, & __tdata_start as *const u8, tbss_offset);
::externs::memset((start + tbss_offset) as *mut u8, 0, size - tbss_offset);
*(tcb_offset as *mut usize) = end;
}
tcb_offset
}
/// Initialize paging
///
/// Returns page table and thread control block offset
pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (ActivePageTable, usize) {
extern {
/// The starting byte of the text (code) data segment.
static mut __text_start: u8;
/// The ending byte of the text (code) data segment.
static mut __text_end: u8;
/// The starting byte of the _.rodata_ (read-only data) segment.
static mut __rodata_start: u8;
/// The ending byte of the _.rodata_ (read-only data) segment.
static mut __rodata_end: u8;
/// The starting byte of the _.data_ segment.
static mut __data_start: u8;
/// The ending byte of the _.data_ segment.
static mut __data_end: u8;
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
/// The ending byte of the thread data segment
static mut __tdata_end: u8;
/// The starting byte of the thread BSS segment
static mut __tbss_start: u8;
/// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
/// The starting byte of the _.bss_ (uninitialized data) segment.
static mut __bss_start: u8;
/// The ending byte of the _.bss_ (uninitialized data) segment.
static mut __bss_end: u8;
}
init_pat();
let mut active_table = ActivePageTable::new();
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_MISC_OFFSET)));
let mut new_table = {
let frame = allocate_frame().expect("no more frames in paging::init new_table");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
// Map tdata and tbss
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = mapper.map(page, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
// The flush can be ignored as this is not the active table. See later active_table.switch
unsafe { result.ignore(); }
}
}
let mut remap = |start: usize, end: usize, flags: EntryFlags| {
if end > start {
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET));
let result = mapper.map_to(page, frame, flags);
// The flush can be ignored as this is not the active table. See later active_table.switch
unsafe { result.ignore(); }
}
}
};
// Remap stack writable, no execute
remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
// Remap a section with `flags`
let mut remap_section = |start: &u8, end: &u8, flags: EntryFlags| {
remap(start as *const _ as usize - ::KERNEL_OFFSET, end as *const _ as usize - ::KERNEL_OFFSET, flags);
};
// Remap text read-only
remap_section(& __text_start, & __text_end, PRESENT | GLOBAL);
// Remap rodata read-only, no execute
remap_section(& __rodata_start, & __rodata_end, PRESENT | GLOBAL | NO_EXECUTE);
// Remap data writable, no execute
remap_section(& __data_start, & __data_end, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
// Remap tdata master writable, no execute
remap_section(& __tdata_start, & __tdata_end, PRESENT | GLOBAL | NO_EXECUTE);
// Remap bss writable, no execute
remap_section(& __bss_start, & __bss_end, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
});
// This switches the active table, which is setup by the bootloader, to a correct table
// setup by the lambda above. This will also flush the TLB
active_table.switch(new_table);
(active_table, init_tcb(cpu_id))
}
pub unsafe fn init_ap(cpu_id: usize, bsp_table: usize, stack_start: usize, stack_end: usize) -> usize {
extern {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
/// The ending byte of the thread data segment
static mut __tdata_end: u8;
/// The starting byte of the thread BSS segment
static mut __tbss_start: u8;
/// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
}
init_pat();
let mut active_table = ActivePageTable::new();
let mut new_table = InactivePageTable::from_address(bsp_table);
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(::USER_TMP_MISC_OFFSET)));
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
// Map tdata and tbss
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
let result = mapper.map(page, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
// The flush can be ignored as this is not the active table. See later active_table.switch
unsafe { result.ignore(); }
}
}
let mut remap = |start: usize, end: usize, flags: EntryFlags| {
if end > start {
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET));
let result = mapper.map_to(page, frame, flags);
// The flush can be ignored as this is not the active table. See later active_table.switch
unsafe { result.ignore(); }
}
}
};
// Remap stack writable, no execute
remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
});
// This switches the active table, which is setup by the bootloader, to a correct table
// setup by the lambda above. This will also flush the TLB
active_table.switch(new_table);
init_tcb(cpu_id)
}
pub struct ActivePageTable {
mapper: Mapper,
}
impl Deref for ActivePageTable {
type Target = Mapper;
fn deref(&self) -> &Mapper {
&self.mapper
}
}
impl DerefMut for ActivePageTable {
fn deref_mut(&mut self) -> &mut Mapper {
&mut self.mapper
}
}
impl ActivePageTable {
pub unsafe fn new() -> ActivePageTable {
ActivePageTable {
mapper: Mapper::new(),
}
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::controlregs;
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
PhysicalAddress::new(unsafe { controlregs::cr3() } as usize)
),
};
unsafe {
controlregs::cr3_write(new_table.p4_frame.start_address().get() as u64);
}
old_table
}
pub fn flush(&mut self, page: Page) {
unsafe { tlb::flush(page.start_address().get()); }
}
pub fn flush_all(&mut self) {
unsafe { tlb::flush_all(); }
}
pub fn with<F>(&mut self, table: &mut InactivePageTable, temporary_page: &mut temporary_page::TemporaryPage, f: F)
where F: FnOnce(&mut Mapper)
{
use x86::controlregs;
{
let backup = Frame::containing_address(PhysicalAddress::new(unsafe { controlregs::cr3() as usize }));
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), PRESENT | WRITABLE | NO_EXECUTE, self);
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE | NO_EXECUTE);
self.flush_all();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE | NO_EXECUTE);
self.flush_all();
}
temporary_page.unmap(self);
}
pub unsafe fn address(&self) -> usize {
use x86::controlregs;
controlregs::cr3() as usize
}
}
pub struct InactivePageTable {
p4_frame: Frame,
}
impl InactivePageTable {
pub fn new(frame: Frame, active_table: &mut ActivePageTable, temporary_page: &mut TemporaryPage) -> InactivePageTable {
{
let table = temporary_page.map_table_frame(frame.clone(), PRESENT | WRITABLE | NO_EXECUTE, active_table);
// now we are able to zero the table
table.zero();
// set up recursive mapping for the table
table[511].set(frame.clone(), PRESENT | WRITABLE | NO_EXECUTE);
}
temporary_page.unmap(active_table);
InactivePageTable { p4_frame: frame }
}
pub unsafe fn from_address(cr3: usize) -> InactivePageTable {
InactivePageTable { p4_frame: Frame::containing_address(PhysicalAddress::new(cr3)) }
}
pub unsafe fn address(&self) -> usize {
self.p4_frame.start_address().get()
}
}
/// A physical address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct PhysicalAddress(usize);
impl PhysicalAddress {
pub fn new(address: usize) -> Self {
PhysicalAddress(address)
}
pub fn get(&self) -> usize {
self.0
}
}
/// A virtual address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct VirtualAddress(usize);
impl VirtualAddress {
pub fn new(address: usize) -> Self {
VirtualAddress(address)
}
pub fn get(&self) -> usize {
self.0
}
}
/// Page
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page {
number: usize
}
impl Page {
pub fn start_address(&self) -> VirtualAddress {
VirtualAddress::new(self.number * PAGE_SIZE)
}
pub fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
pub fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
pub fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
pub fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
pub fn containing_address(address: VirtualAddress) -> Page {
//TODO assert!(address.get() < 0x0000_8000_0000_0000 || address.get() >= 0xffff_8000_0000_0000,
// "invalid address: 0x{:x}", address.get());
Page { number: address.get() / PAGE_SIZE }
}
pub fn range_inclusive(start: Page, end: Page) -> PageIter {
PageIter {
start: start,
end: end,
}
}
}
pub struct PageIter {
start: Page,
end: Page,
}
impl Iterator for PageIter {
type Item = Page;
fn next(&mut self) -> Option<Page> {
if self.start <= self.end {
let page = self.start;
self.start.number += 1;
Some(page)
} else {
None
}
}
}

View file

@ -0,0 +1,98 @@
//! # Page table
//! Code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use memory::allocate_frame;
use super::entry::*;
use super::ENTRY_COUNT;
pub const P4: *mut Table<Level4> = 0xffff_ffff_ffff_f000 as *mut _;
pub trait TableLevel {}
pub enum Level4 {}
pub enum Level3 {}
pub enum Level2 {}
pub enum Level1 {}
impl TableLevel for Level4 {}
impl TableLevel for Level3 {}
impl TableLevel for Level2 {}
impl TableLevel for Level1 {}
pub trait HierarchicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierarchicalLevel for Level4 {
type NextLevel = Level3;
}
impl HierarchicalLevel for Level3 {
type NextLevel = Level2;
}
impl HierarchicalLevel for Level2 {
type NextLevel = Level1;
}
pub struct Table<L: TableLevel> {
entries: [Entry; ENTRY_COUNT],
level: PhantomData<L>,
}
impl<L> Table<L> where L: TableLevel {
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
entry.set_unused();
}
}
}
impl<L> Table<L> where L: HierarchicalLevel {
pub fn next_table(&self, index: usize) -> Option<&Table<L::NextLevel>> {
self.next_table_address(index).map(|address| unsafe { &*(address as *const _) })
}
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table<L::NextLevel>> {
self.next_table_address(index).map(|address| unsafe { &mut *(address as *mut _) })
}
pub fn next_table_create(&mut self, index: usize) -> &mut Table<L::NextLevel> {
if self.next_table(index).is_none() {
assert!(!self[index].flags().contains(HUGE_PAGE),
"next_table_create does not support huge pages");
let frame = allocate_frame().expect("no frames available");
self[index].set(frame, PRESENT | WRITABLE | USER_ACCESSIBLE /* Allow users to go down the page table, implement permissions at the page level */);
self.next_table_mut(index).unwrap().zero();
}
self.next_table_mut(index).unwrap()
}
fn next_table_address(&self, index: usize) -> Option<usize> {
let entry_flags = self[index].flags();
if entry_flags.contains(PRESENT) && !entry_flags.contains(HUGE_PAGE) {
let table_address = self as *const _ as usize;
Some((table_address << 9) | (index << 12))
} else {
None
}
}
}
impl<L> Index<usize> for Table<L> where L: TableLevel {
type Output = Entry;
fn index(&self, index: usize) -> &Entry {
&self.entries[index]
}
}
impl<L> IndexMut<usize> for Table<L> where L: TableLevel {
fn index_mut(&mut self, index: usize) -> &mut Entry {
&mut self.entries[index]
}
}

View file

@ -0,0 +1,45 @@
//! Temporarily map a page
//! From [Phil Opp's Blog](http://os.phil-opp.com/remap-the-kernel.html)
use memory::Frame;
use super::{ActivePageTable, Page, VirtualAddress};
use super::entry::EntryFlags;
use super::table::{Table, Level1};
pub struct TemporaryPage {
page: Page,
}
impl TemporaryPage {
pub fn new(page: Page) -> TemporaryPage {
TemporaryPage {
page: page,
}
}
pub fn start_address (&self) -> VirtualAddress {
self.page.start_address()
}
/// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page.
pub fn map(&mut self, frame: Frame, flags: EntryFlags, active_table: &mut ActivePageTable) -> VirtualAddress {
assert!(active_table.translate_page(self.page).is_none(), "temporary page is already mapped");
let result = active_table.map_to(self.page, frame, flags);
result.flush(active_table);
self.page.start_address()
}
/// Maps the temporary page to the given page table frame in the active
/// table. Returns a reference to the now mapped table.
pub fn map_table_frame(&mut self, frame: Frame, flags: EntryFlags, active_table: &mut ActivePageTable) -> &mut Table<Level1> {
unsafe { &mut *(self.map(frame, flags, active_table).get() as *mut Table<Level1>) }
}
/// Unmaps the temporary page in the active table.
pub fn unmap(&mut self, active_table: &mut ActivePageTable) {
let result = active_table.unmap(self.page);
result.flush(active_table);
}
}

View file

@ -0,0 +1,32 @@
//! Intrinsics for panic handling
use interrupt;
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn eh_personality() {}
#[cfg(not(test))]
/// Required to handle panics
#[lang = "panic_fmt"]
extern "C" fn panic_fmt(fmt: ::core::fmt::Arguments, file: &str, line: u32) -> ! {
println!("PANIC: {}", fmt);
println!("FILE: {}", file);
println!("LINE: {}", line);
unsafe { interrupt::stack_trace(); }
println!("HALT");
loop {
unsafe { interrupt::halt(); }
}
}
#[allow(non_snake_case)]
#[no_mangle]
/// Required to handle panics
pub extern "C" fn _Unwind_Resume() -> ! {
loop {
unsafe { interrupt::halt(); }
}
}

View file

@ -0,0 +1,189 @@
/// This function is where the kernel sets up IRQ handlers
/// It is increcibly unsafe, and should be minimal in nature
/// It must create the IDT with the correct entries, those entries are
/// defined in other files inside of the `arch` module
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use acpi;
use allocator;
use device;
use externs::memset;
use gdt;
use idt;
use interrupt;
use memory;
use paging::{self, entry, Page, VirtualAddress};
use paging::mapper::MapperFlushAll;
/// Test of zero values in BSS.
static BSS_TEST_ZERO: usize = 0;
/// Test of non-zero values in data.
static DATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
/// Test of zero values in thread BSS
#[thread_local]
static mut TBSS_TEST_ZERO: usize = 0;
/// Test of non-zero values in thread data.
#[thread_local]
static mut TDATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
pub static CPU_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT;
static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT;
extern {
/// Kernel main function
fn kmain(cpus: usize) -> !;
/// Kernel main for APs
fn kmain_ap(id: usize) -> !;
}
/// The entry to Rust, all things must be initialized
#[no_mangle]
pub unsafe extern fn kstart() -> ! {
{
extern {
/// The starting byte of the _.bss_ (uninitialized data) segment.
static mut __bss_start: u8;
/// The ending byte of the _.bss_ (uninitialized data) segment.
static mut __bss_end: u8;
/// The end of the kernel
static mut __end: u8;
}
// Zero BSS, this initializes statics that are set to 0
{
let start_ptr = &mut __bss_start as *mut u8;
let end_ptr = & __bss_end as *const u8 as usize;
if start_ptr as usize <= end_ptr {
let size = end_ptr - start_ptr as usize;
memset(start_ptr, 0, size);
}
assert_eq!(BSS_TEST_ZERO, 0);
assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
}
// Initialize memory management
memory::init(0, &__end as *const u8 as usize - ::KERNEL_OFFSET);
// TODO: allocate a stack
let stack_start = 0x00080000 + ::KERNEL_OFFSET;
let stack_end = 0x0009F000 + ::KERNEL_OFFSET;
// Initialize paging
let (mut active_table, tcb_offset) = paging::init(0, stack_start, stack_end);
// Set up GDT
gdt::init(tcb_offset, stack_end);
// Set up IDT
idt::init();
// Test tdata and tbss
{
assert_eq!(TBSS_TEST_ZERO, 0);
TBSS_TEST_ZERO += 1;
assert_eq!(TBSS_TEST_ZERO, 1);
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
TDATA_TEST_NONZERO -= 1;
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE);
}
// Reset AP variables
CPU_COUNT.store(1, Ordering::SeqCst);
AP_READY.store(false, Ordering::SeqCst);
BSP_READY.store(false, Ordering::SeqCst);
// Setup kernel heap
{
let mut flush_all = MapperFlushAll::new();
// Map heap pages
let heap_start_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET));
let heap_end_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET + ::KERNEL_HEAP_SIZE-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
let result = active_table.map(page, entry::PRESENT | entry::GLOBAL | entry::WRITABLE | entry::NO_EXECUTE);
flush_all.consume(result);
}
flush_all.flush(&mut active_table);
// Init the allocator
allocator::init(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE);
}
// Initialize devices
device::init(&mut active_table);
// Read ACPI tables, starts APs
acpi::init(&mut active_table);
BSP_READY.store(true, Ordering::SeqCst);
}
kmain(CPU_COUNT.load(Ordering::SeqCst));
}
/// Entry to rust for an AP
pub unsafe extern fn kstart_ap(cpu_id: usize, bsp_table: usize, stack_start: usize, stack_end: usize) -> ! {
{
assert_eq!(BSS_TEST_ZERO, 0);
assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
// Initialize paging
let tcb_offset = paging::init_ap(cpu_id, bsp_table, stack_start, stack_end);
// Set up GDT for AP
gdt::init(tcb_offset, stack_end);
// Set up IDT for AP
idt::init();
// Test tdata and tbss
{
assert_eq!(TBSS_TEST_ZERO, 0);
TBSS_TEST_ZERO += 1;
assert_eq!(TBSS_TEST_ZERO, 1);
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
TDATA_TEST_NONZERO -= 1;
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE);
}
// Initialize devices (for AP)
device::init_ap();
AP_READY.store(true, Ordering::SeqCst);
}
while ! BSP_READY.load(Ordering::SeqCst) {
interrupt::pause();
}
kmain_ap(cpu_id);
}
pub unsafe fn usermode(ip: usize, sp: usize) -> ! {
// Go to usermode
asm!("mov ds, ax
mov es, ax
mov fs, bx
mov gs, ax
push rax
push rcx
push rdx
push rsi
push rdi
iretq"
: // No output because it never returns
: "{rax}"(gdt::GDT_USER_DATA << 3 | 3), // Data segment
"{rbx}"(gdt::GDT_USER_TLS << 3 | 3), // TLS segment
"{rcx}"(sp), // Stack pointer
"{rdx}"(3 << 12 | 1 << 9), // Flags - Set IOPL and interrupt enable flag
"{rsi}"(gdt::GDT_USER_CODE << 3 | 3), // Code segment
"{rdi}"(ip) // IP
: // No clobers because it never returns
: "intel", "volatile");
unreachable!();
}

View file

@ -0,0 +1,23 @@
use io::{Io, Pio};
#[no_mangle]
pub unsafe extern fn kstop() -> ! {
// (phony) ACPI shutdown (http://forum.osdev.org/viewtopic.php?t=16990)
// Works for qemu and bochs.
for &port in [0x604, 0xB004].iter() {
println!("Shutdown with outw(0x{:X}, 0x{:X})", port, 0x2000);
Pio::<u16>::new(port).write(0x2000);
}
// Magic shutdown code for bochs and qemu (older versions).
for c in "Shutdown".bytes() {
println!("Shutdown with outb(0x{:X}, '{}')", 0x8900, c as char);
Pio::<u8>::new(0x8900).write(c);
}
// Magic code for VMWare. Also a hard lock.
println!("Shutdown with cli hlt");
asm!("cli; hlt" : : : : "intel", "volatile");
unreachable!();
}

View file

@ -0,0 +1,15 @@
use spin::Mutex;
pub static START: Mutex<(u64, u64)> = Mutex::new((0, 0));
pub static OFFSET: Mutex<(u64, u64)> = Mutex::new((0, 0));
pub fn monotonic() -> (u64, u64) {
*OFFSET.lock()
}
pub fn realtime() -> (u64, u64) {
let offset = monotonic();
let start = *START.lock();
let sum = start.1 + offset.1;
(start.0 + offset.0 + sum / 1000000000, sum % 1000000000)
}

View file

@ -15,7 +15,7 @@ mod gen {
}
#[cfg(not(test))]
#[path="../../build/userspace/initfs.rs"]
#[path="../../../build/userspace/initfs.rs"]
mod gen;
struct Handle {

View file

@ -11,7 +11,7 @@ use syscall::error::*;
use syscall::flag::{MODE_FILE, SEEK_SET, SEEK_CUR, SEEK_END};
use syscall::scheme::Scheme;
static FILESYSTEM: &'static [u8] = include_bytes!("../../build/filesystem.bin");
static FILESYSTEM: &'static [u8] = include_bytes!("../../../build/filesystem.bin");
struct Handle {
path: &'static [u8],