GDT and IDT in kernel space

Paging constructs completed, remap kernel before jumping to kmain
Panic will do a stack trace
Remove SSE from none target
This commit is contained in:
Jeremy Soller 2016-08-15 14:34:20 -06:00
parent 465363f0a1
commit cc8fe85e6a
18 changed files with 524 additions and 236 deletions

View file

@ -18,7 +18,7 @@ arch_x86_64 = { path = "arch/x86_64" }
bump_allocator = { path = "alloc/bump_allocator"}
[profile.dev]
panic = "abort"
panic = "unwind"
[profile.release]
panic = "abort"

View file

@ -10,7 +10,7 @@ bochs: build/harddrive.bin
bochs -f bochs.$(ARCH)
qemu: build/harddrive.bin
qemu-system-$(ARCH) -serial mon:stdio -drive file=$<,format=raw,index=0,media=disk -nographic
qemu-system-$(ARCH) -serial mon:stdio -drive file=$<,format=raw,index=0,media=disk -nographic -d guest_errors
FORCE:

View file

@ -4,3 +4,7 @@ version = "0.1.0"
[dependencies]
bitflags = "*"
[dependencies.x86]
default-features = false
version = "0.7.1"

View file

@ -1,3 +1,7 @@
//! Global descriptor table
use core::mem;
pub const GDT_NULL: usize = 0;
pub const GDT_KERNEL_CODE: usize = 1;
pub const GDT_KERNEL_DATA: usize = 2;
@ -6,26 +10,104 @@ pub const GDT_USER_DATA: usize = 4;
pub const GDT_USER_TLS: usize = 5;
pub const GDT_TSS: usize = 6;
pub static mut GDTR: GdtDescriptor = GdtDescriptor {
size: 0,
offset: 0
};
pub static mut GDT: [GdtEntry; 5] = [GdtEntry::new(); 5];
pub unsafe fn init() {
GDT[GDT_KERNEL_CODE].set_access(GDT_PRESENT | GDT_RING_0 | GDT_SYSTEM | GDT_EXECUTABLE | GDT_PRIVILEGE);
GDT[GDT_KERNEL_CODE].set_flags(GDT_LONG_MODE);
GDT[GDT_KERNEL_DATA].set_access(GDT_PRESENT | GDT_RING_0 | GDT_SYSTEM | GDT_PRIVILEGE);
GDT[GDT_KERNEL_DATA].set_flags(GDT_LONG_MODE);
GDT[GDT_USER_CODE].set_access(GDT_PRESENT | GDT_RING_3 | GDT_SYSTEM | GDT_EXECUTABLE | GDT_PRIVILEGE);
GDT[GDT_USER_CODE].set_flags(GDT_LONG_MODE);
GDT[GDT_USER_DATA].set_access(GDT_PRESENT | GDT_RING_3 | GDT_SYSTEM | GDT_PRIVILEGE);
GDT[GDT_USER_DATA].set_flags(GDT_LONG_MODE);
GDTR.set_slice(&GDT);
GDTR.load();
}
bitflags! {
pub flags GdtAccess: u8 {
const GDT_PRESENT = 1 << 7,
const GDT_RING_0 = 0 << 5,
const GDT_RING_1 = 1 << 5,
const GDT_RING_2 = 2 << 5,
const GDT_RING_3 = 3 << 5,
const GDT_SYSTEM = 1 << 4,
const GDT_EXECUTABLE = 1 << 3,
const GDT_CONFORMING = 1 << 2,
const GDT_PRIVILEGE = 1 << 1,
const GDT_DIRTY = 1,
}
}
bitflags! {
pub flags GdtFlags: u8 {
const GDT_PAGE_SIZE = 1 << 7,
const GDT_PROTECTED_MODE = 1 << 6,
const GDT_LONG_MODE = 1 << 5
}
}
#[repr(packed)]
pub struct GdtDescriptor {
pub size: u16,
pub ptr: u64
pub offset: u64
}
impl GdtDescriptor {
pub fn set_slice(&mut self, slice: &'static [GdtEntry]) {
self.size = (slice.len() * mem::size_of::<GdtEntry>() - 1) as u16;
self.offset = slice.as_ptr() as u64;
}
pub unsafe fn load(&self) {
asm!("lgdt [rax]" : : "{rax}"(self as *const _ as usize) : : "intel", "volatile");
}
}
#[derive(Copy, Clone, Debug)]
#[repr(packed)]
pub struct GdtEntry {
pub limitl: u16,
pub basel: u16,
pub basem: u8,
pub attribute: u8,
pub access: u8,
pub flags_limith: u8,
pub baseh: u8
}
impl GdtEntry {
pub fn set_base(&mut self, base: usize) {
self.basel = base as u16;
self.basem = (base >> 16) as u8;
self.baseh = (base >> 24) as u8;
pub const fn new() -> Self {
GdtEntry {
limitl: 0,
basel: 0,
basem: 0,
access: 0,
flags_limith: 0,
baseh: 0
}
}
pub fn set_offset(&mut self, offset: usize) {
self.basel = offset as u16;
self.basem = (offset >> 16) as u8;
self.baseh = (offset >> 24) as u8;
}
pub fn set_access(&mut self, access: GdtAccess) {
self.access = access.bits;
}
pub fn set_flags(&mut self, flags: GdtFlags) {
self.flags_limith = (self.flags_limith & 0xF) | flags.bits;
}
}

View file

@ -17,3 +17,68 @@ pub unsafe fn enable_interrupts() {
pub unsafe fn halt() {
asm!("hlt" : : : : "intel", "volatile");
}
/// x86 External Interrupts (1-16).
pub static EXCEPTIONS: [Descriptor; 21] = [
Descriptor::new("Division error", Kind::Fault),
Descriptor::new("Debug trap", Kind::Trap),
Descriptor::new("Unmaskable interrupt", Kind::Unmaskable),
Descriptor::new("Breakpoint", Kind::Trap),
Descriptor::new("Overflow", Kind::Trap),
Descriptor::new("Out of bound", Kind::Fault),
Descriptor::new("Invalid opcode", Kind::Fault),
Descriptor::new("Device unavailable", Kind::Fault),
Descriptor::new("Double fault", Kind::Fault),
Descriptor::new("Coprocessor segment overrun", Kind::Fault),
Descriptor::new("Invalid TSS", Kind::Fault),
Descriptor::new("Segment not present", Kind::Fault),
Descriptor::new("Stack-segment fault", Kind::Fault),
Descriptor::new("General protection", Kind::Fault),
Descriptor::new("Page fault", Kind::Fault),
Descriptor::new("Reserved", Kind::Reserved),
Descriptor::new("x87 FPU", Kind::Fault),
Descriptor::new("Unaligned memory access", Kind::Fault),
Descriptor::new("Machine check", Kind::Abort),
Descriptor::new("SIMD floating-point", Kind::Fault),
Descriptor::new("Virtualization violation", Kind::Fault),
];
/// An interrupt description.
#[derive(Debug, Copy, Clone)]
pub struct Descriptor {
/// The description of this interrupt.
pub desc: &'static str,
/// The interrupt type.
pub kind: Kind,
}
impl Descriptor {
/// Create a new interrupt description.
pub const fn new(desc: &'static str, kind: Kind) -> Descriptor {
Descriptor {
desc: desc,
kind: kind,
}
}
}
/// The interrupt kind.
#[derive(Debug, Copy, Clone)]
pub enum Kind {
/// A fault.
///
/// This can have multiple sources, but is often a result of a program error of some sort.
Fault,
/// A trap.
///
/// These are often for debugging purposes.
Trap,
/// A deliberate abort.
Abort,
/// An unmaskable interrupt.
///
/// This is a forced interrupt which need to be handled immediately.
Unmaskable,
/// Reserved or deprecated.
Reserved,
}

View file

@ -4,12 +4,14 @@
#![feature(concat_idents)]
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(lang_items)]
#![feature(naked_functions)]
#![feature(unique)]
#![no_std]
#[macro_use]
extern crate bitflags;
extern crate x86;
/// Print to console
#[macro_export]
@ -37,6 +39,8 @@ macro_rules! interrupt {
$func
}
asm!("xchg bx, bx" : : : : "intel", "volatile");
// Push scratch registers
asm!("push rax
push rcx
@ -92,6 +96,9 @@ pub mod memory;
/// Paging
pub mod paging;
/// Panic
pub mod panic;
/// Serial driver and print! support
pub mod serial;

View file

@ -4,9 +4,10 @@
/// defined in other files inside of the `arch` module
use externs::memset;
use gdt;
use idt;
use memory::{self, Frame};
use paging::{self, entry, PhysicalAddress};
use paging::{self, entry, Page, PhysicalAddress};
/// Test of zero values in BSS.
static BSS_TEST_ZERO: usize = 0;
@ -18,28 +19,14 @@ extern {
fn kmain() -> !;
}
extern {
/// The starting byte of the text (code) data segment.
static mut __text_start: u8;
/// The ending byte of the text (code) data segment.
static mut __text_end: u8;
/// The starting byte of the _.rodata_ (read-only data) segment.
static mut __rodata_start: u8;
/// The ending byte of the _.rodata_ (read-only data) segment.
static mut __rodata_end: u8;
/// The starting byte of the _.data_ segment.
static mut __data_start: u8;
/// The ending byte of the _.data_ segment.
static mut __data_end: u8;
#[no_mangle]
pub unsafe extern fn kstart() -> ! {
extern {
/// The starting byte of the _.bss_ (uninitialized data) segment.
static mut __bss_start: u8;
/// The ending byte of the _.bss_ (uninitialized data) segment.
static mut __bss_end: u8;
}
#[no_mangle]
pub unsafe extern fn kstart() -> ! {
asm!("xchg bx, bx" : : : : "intel", "volatile");
}
// Zero BSS, this initializes statics that are set to 0
{
@ -55,6 +42,9 @@ pub unsafe extern fn kstart() -> ! {
debug_assert_eq!(BSS_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
}
// Set up GDT
gdt::init();
// Set up IDT
idt::init(blank);
@ -62,28 +52,13 @@ pub unsafe extern fn kstart() -> ! {
let mut allocator = memory::init(0, &__bss_end as *const u8 as usize);
// Initialize paging
let mut pager = paging::init();
let mut active_table = paging::init(&mut allocator);
// Remap a section with `flags`
let mut remap_section = |start_ref: &u8, end_ref: &u8, flags: entry::EntryFlags| {
let start = start_ref as *const _ as usize;
let end = end_ref as *const _ as usize;
for i in 0..(start - end + paging::PAGE_SIZE - 1)/paging::PAGE_SIZE {
let frame = Frame::containing_address(PhysicalAddress::new(start + i * paging::PAGE_SIZE));
pager.identity_map(frame, flags, &mut allocator);
}
};
// Remap text read-only
{
asm!("xchg bx, bx" : : : : "intel", "volatile");
//TODO remap_section(& __text_start, & __text_end, entry::PRESENT);
}
kmain();
}
interrupt!(blank, {
asm!("xchg bx, bx" : : : : "intel", "volatile");
println!("INTERRUPT");
});

View file

@ -96,7 +96,8 @@ impl Frame {
PhysicalAddress::new(self.number * PAGE_SIZE)
}
fn clone(&self) -> Frame {
//TODO: Set private
pub fn clone(&self) -> Frame {
Frame {
number: self.number
}

View file

@ -0,0 +1,87 @@
use core::ptr::Unique;
use memory::{Frame, FrameAllocator};
use super::{Page, PAGE_SIZE, PhysicalAddress, VirtualAddress};
use super::entry::{self, EntryFlags};
use super::table::{self, Table, Level4};
pub struct Mapper {
p4: Unique<Table<Level4>>,
}
impl Mapper {
/// Create a new page table
pub unsafe fn new() -> Mapper {
Mapper {
p4: Unique::new(table::P4),
}
}
pub fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.get() }
}
pub fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.get_mut() }
}
/// Map a page to a frame
pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let mut p3 = self.p4_mut().next_table_create(page.p4_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
assert!(p1[page.p1_index()].is_unused());
p1[page.p1_index()].set(frame, flags | entry::PRESENT);
}
/// Map a page to the next free frame
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
}
/// Identity map a frame
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
self.map_to(page, frame, flags, allocator)
}
/// Unmap a page
pub fn unmap<A>(&mut self, page: Page, allocator: &mut A)
where A: FrameAllocator
{
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
// TODO free p(1,2,3) table if empty
allocator.deallocate_frame(frame);
}
pub fn translate_page(&self, page: Page) -> Option<Frame> {
self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
}
/// Translate a virtual address to a physical one
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address.get() % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| PhysicalAddress::new(frame.start_address().get() + offset))
}
}

View file

@ -1,15 +1,19 @@
//! # Paging
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::ptr::Unique;
use core::ops::{Deref, DerefMut};
use memory::{Frame, FrameAllocator};
use self::entry::EntryFlags;
use self::entry::{PRESENT, WRITABLE, EntryFlags};
use self::mapper::Mapper;
use self::table::{Table, Level4};
use self::temporary_page::TemporaryPage;
pub mod entry;
mod mapper;
pub mod table;
mod temporary_page;
/// Number of entries per page table
pub const ENTRY_COUNT: usize = 512;
@ -18,87 +22,149 @@ pub const ENTRY_COUNT: usize = 512;
pub const PAGE_SIZE: usize = 4096;
/// Initialize paging
pub unsafe fn init() -> ActivePageTable {
ActivePageTable::new()
pub unsafe fn init<A>(allocator: &mut A) -> ActivePageTable where A: FrameAllocator {
extern {
/// The starting byte of the text (code) data segment.
static mut __text_start: u8;
/// The ending byte of the text (code) data segment.
static mut __text_end: u8;
/// The starting byte of the _.rodata_ (read-only data) segment.
static mut __rodata_start: u8;
/// The ending byte of the _.rodata_ (read-only data) segment.
static mut __rodata_end: u8;
/// The starting byte of the _.data_ segment.
static mut __data_start: u8;
/// The ending byte of the _.data_ segment.
static mut __data_end: u8;
/// The starting byte of the _.bss_ (uninitialized data) segment.
static mut __bss_start: u8;
/// The ending byte of the _.bss_ (uninitialized data) segment.
static mut __bss_end: u8;
}
let mut active_table = ActivePageTable::new();
let mut temporary_page = TemporaryPage::new(Page { number: 0xcafebabe },
allocator);
let mut new_table = {
let frame = allocator.allocate_frame().expect("no more frames");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
let mut remap = |start: usize, end: usize, flags: entry::EntryFlags| {
for i in 0..(end - start + PAGE_SIZE - 1)/PAGE_SIZE {
let frame = Frame::containing_address(PhysicalAddress::new(start + i * PAGE_SIZE));
mapper.identity_map(frame, flags, allocator);
}
};
// Remap stack writable, no execute
remap(0x00080000, 0x0009F000, entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE);
// Remap a section with `flags`
let mut remap_section = |start: &u8, end: &u8, flags: entry::EntryFlags| {
remap(start as *const _ as usize, end as *const _ as usize, flags);
};
// Remap text read-only
remap_section(& __text_start, & __text_end, entry::PRESENT);
// Remap rodata read-only, no execute
remap_section(& __rodata_start, & __rodata_end, entry::PRESENT | entry::NO_EXECUTE);
// Remap data writable, no execute
remap_section(& __data_start, & __data_end, entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE);
// Remap bss writable, no execute
remap_section(& __bss_start, & __bss_end, entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE);
});
active_table.switch(new_table);
active_table
}
pub struct ActivePageTable {
p4: Unique<Table<Level4>>,
mapper: Mapper,
}
impl Deref for ActivePageTable {
type Target = Mapper;
fn deref(&self) -> &Mapper {
&self.mapper
}
}
impl DerefMut for ActivePageTable {
fn deref_mut(&mut self) -> &mut Mapper {
&mut self.mapper
}
}
impl ActivePageTable {
/// Create a new page table
pub unsafe fn new() -> ActivePageTable {
unsafe fn new() -> ActivePageTable {
ActivePageTable {
p4: Unique::new(table::P4),
mapper: Mapper::new(),
}
}
fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.get() }
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::controlregs;
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
PhysicalAddress::new(unsafe { controlregs::cr3() } as usize)
),
};
unsafe {
controlregs::cr3_write(new_table.p4_frame.start_address().get() as u64);
}
old_table
}
fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.get_mut() }
}
/// Map a page to a frame
pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
pub fn with<F>(&mut self, table: &mut InactivePageTable, temporary_page: &mut temporary_page::TemporaryPage, f: F)
where F: FnOnce(&mut Mapper)
{
let mut p3 = self.p4_mut().next_table_create(page.p4_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
use x86::{controlregs, tlb};
let flush_tlb = || unsafe { tlb::flush_all() };
assert!(p1[page.p1_index()].is_unused());
p1[page.p1_index()].set(frame, flags | entry::PRESENT);
}
/// Map a page to the next free frame
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
let backup = Frame::containing_address(PhysicalAddress::new(unsafe { controlregs::cr3() } as usize));
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
flush_tlb();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE);
flush_tlb();
}
/// Identity map a frame
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
temporary_page.unmap(self);
}
}
pub struct InactivePageTable {
p4_frame: Frame,
}
impl InactivePageTable {
pub fn new(frame: Frame, active_table: &mut ActivePageTable, temporary_page: &mut TemporaryPage) -> InactivePageTable {
{
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
self.map_to(page, frame, flags, allocator)
let table = temporary_page.map_table_frame(frame.clone(), active_table);
// now we are able to zero the table
table.zero();
// set up recursive mapping for the table
table[511].set(frame.clone(), PRESENT | WRITABLE);
}
temporary_page.unmap(active_table);
/// Unmap a page
fn unmap<A>(&mut self, page: Page, allocator: &mut A)
where A: FrameAllocator
{
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
// TODO free p(1,2,3) table if empty
allocator.deallocate_frame(frame);
}
fn translate_page(&self, page: Page) -> Option<Frame> {
self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
}
/// Translate a virtual address to a physical one
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address.get() % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| PhysicalAddress::new(frame.start_address().get() + offset))
InactivePageTable { p4_frame: frame }
}
}

View file

@ -0,0 +1,74 @@
//! Temporarily map a page
//! From [Phil Opp's Blog](http://os.phil-opp.com/remap-the-kernel.html)
use memory::{Frame, FrameAllocator};
use super::{ActivePageTable, Page, VirtualAddress};
use super::table::{Table, Level1};
pub struct TemporaryPage {
page: Page,
allocator: TinyAllocator,
}
impl TemporaryPage {
pub fn new<A>(page: Page, allocator: &mut A) -> TemporaryPage where A: FrameAllocator {
TemporaryPage {
page: page,
allocator: TinyAllocator::new(allocator),
}
}
/// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page.
pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable) -> VirtualAddress {
use super::entry::WRITABLE;
assert!(active_table.translate_page(self.page).is_none(),
"temporary page is already mapped");
active_table.map_to(self.page, frame, WRITABLE, &mut self.allocator);
self.page.start_address()
}
/// Maps the temporary page to the given page table frame in the active
/// table. Returns a reference to the now mapped table.
pub fn map_table_frame(&mut self, frame: Frame, active_table: &mut ActivePageTable) -> &mut Table<Level1> {
unsafe { &mut *(self.map(frame, active_table).get() as *mut Table<Level1>) }
}
/// Unmaps the temporary page in the active table.
pub fn unmap(&mut self, active_table: &mut ActivePageTable) {
active_table.unmap(self.page, &mut self.allocator)
}
}
struct TinyAllocator([Option<Frame>; 3]);
impl TinyAllocator {
fn new<A>(allocator: &mut A) -> TinyAllocator where A: FrameAllocator {
let mut f = || allocator.allocate_frame();
let frames = [f(), f(), f()];
TinyAllocator(frames)
}
}
impl FrameAllocator for TinyAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
for frame_option in &mut self.0 {
if frame_option.is_some() {
return frame_option.take();
}
}
None
}
fn deallocate_frame(&mut self, frame: Frame) {
for frame_option in &mut self.0 {
if frame_option.is_none() {
*frame_option = Some(frame);
return;
}
}
panic!("Tiny allocator can hold only 3 frames.");
}
}

45
arch/x86_64/src/panic.rs Normal file
View file

@ -0,0 +1,45 @@
//! Intrinsics for panic handling
use interrupt::halt;
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn eh_personality() {}
#[cfg(not(test))]
/// Required to handle panics
#[lang = "panic_fmt"]
extern "C" fn panic_fmt(fmt: ::core::fmt::Arguments, file: &str, line: u32) -> ! {
let mut rbp: usize;
unsafe { asm!("xchg bx, bx" : "={rbp}"(rbp) : : : "intel", "volatile"); }
println!("PANIC: {}", fmt);
println!("FILE: {}", file);
println!("LINE: {}", line);
println!("TRACE: {:>016X}", rbp);
for i in 0..10 {
unsafe {
let rip = *(rbp as *const usize).offset(1);
println!(" {:>016X}: {:>016X}", rbp, rip);
if rip == 0 {
break;
}
rbp = *(rbp as *const usize);
}
}
println!("HALT");
loop {
unsafe { halt() };
}
}
#[allow(non_snake_case)]
#[no_mangle]
/// Required to handle panics
pub extern "C" fn _Unwind_Resume() -> ! {
loop {
unsafe { halt() }
}
}

View file

@ -48,12 +48,12 @@ startup_arch:
mov ecx, 0xC0000080 ; Read from the EFER MSR.
rdmsr
or eax, 0x00000100 ; Set the Long-Mode-Enable bit.
or eax, 1 << 11 | 1 << 8 ; Set the Long-Mode-Enable and NXE bit.
wrmsr
;enabling paging and protection simultaneously
mov ebx, cr0
or ebx, 0x80000001 ;Bit 31: Paging, Bit 0: Protected Mode
or ebx, 1 << 31 | 1 << 16 | 1 ;Bit 31: Paging, Bit 16: write protect kernel, Bit 0: Protected Mode
mov cr0, ebx
; far jump to enable Long Mode and load CS with 64 bit segment
@ -69,13 +69,12 @@ long_mode:
mov gs, rax
mov ss, rax
mov rsp, 0x800000 - 128
mov rsp, 0x0009F000
;rust init
xor rax, rax
mov eax, [kernel_base + 0x18]
mov rbx, gdtr
xchg bx, bx
jmp rax
gdtr:

View file

@ -1,88 +0,0 @@
//! Interrupts.
use core::fmt;
/// x86 External Interrupts (1-16).
pub static EXCEPTIONS: [InterruptDescription; 21] = [
Descriptor::new("Division error", Kind::Fault),
Descriptor::new("Debug trap", Kind::Trap),
Descriptor::new("Unmaskable interrupt", Kind::Unmaskable),
Descriptor::new("Breakpoint", Kind::Trap),
Descriptor::new("Overflow", Kind::Trap),
Descriptor::new("Out of bound", Kind::Fault),
Descriptor::new("Invalid opcode", Kind::Fault),
Descriptor::new("Device unavailable", Kind::Fault),
Descriptor::new("Double fault", Kind::Fault),
Descriptor::new("Coprocessor segment overrun", Kind::Fault),
Descriptor::new("Invalid TSS", Kind::Fault),
Descriptor::new("Segment not present", Kind::Fault),
Descriptor::new("Stack-segment fault", Kind::Fault),
Descriptor::new("General protection", Kind::Fault),
Descriptor::new("Page fault", Kind::Fault),
Descriptor::new("Reserved", Kind::Reserved),
Descriptor::new("x87 FPU", Kind::Fault),
Descriptor::new("Unaligned memory access", Kind::Fault),
Descriptor::new("Machine check", Kind::Abort),
Descriptor::new("SIMD floating-point", Kind::Fault),
Descriptor::new("SIMD floating-point", Kind::Fault),
Descriptor::new("Virtualization violation", Kind::Fault),
];
/// An interrupt description.
#[derive(Debug, Copy, Clone)]
pub struct Descriptor {
/// The description of this interrupt.
pub desc: &'static str,
/// The interrupt type.
pub kind: Kind,
}
impl Descriptor {
/// Create a new interrupt description.
pub fn new(desc: &'static str, kind: Kind) -> Descriptor {
Descriptor {
desc: desc,
kind: kind,
}
}
}
/// The interrupt kind.
pub enum Kind {
/// A fault.
///
/// This can have multiple sources, but is often a result of a program error of some sort.
Fault,
/// A trap.
///
/// These are often for debugging purposes.
Trap,
/// A deliberate abort.
Abort,
/// An unmaskable interrupt.
///
/// This is a forced interrupt which need to be handled immediately.
Unmaskable,
/// Reserved or deprecated.
Reserved,
}
/// Enable interrupts.
pub unsafe fn enable() {
asm!("sti");
}
/// Disable interrupts.
pub unsafe fn disable() {
asm!("cli");
}
/// Fire a software interrupt.
///
/// Due to inlining being strictly required, we use a macro.
#[macro_export]
macro_rules! interrupt {
($int:expr) => {{
asm!("int $0" :: "N" ($int));
}};
}

View file

@ -67,7 +67,6 @@
#![feature(alloc)]
#![feature(collections)]
#![feature(const_fn)]
#![feature(lang_items)]
#![feature(question_mark)]
#![no_std]
@ -97,9 +96,6 @@ extern crate collections;
/// Context management
pub mod context;
/// Intrinsics for panic handling
pub mod panic;
/// Schemes, filesystem handlers
pub mod scheme;

View file

@ -1,25 +0,0 @@
//! Intrinsics for panic handling
use arch::interrupt::halt;
#[cfg(not(test))]
#[lang = "eh_personality"]
extern "C" fn eh_personality() {}
#[cfg(not(test))]
/// Required to handle panics
#[lang = "panic_fmt"]
extern "C" fn panic_fmt(fmt: ::core::fmt::Arguments, file_line: &(&'static str, u32)) -> ! {
loop {
unsafe { halt() };
}
}
#[allow(non_snake_case)]
#[no_mangle]
/// Required to handle panics
pub extern "C" fn _Unwind_Resume() -> ! {
loop {
unsafe { halt() }
}
}

View file

@ -15,7 +15,7 @@
"relocation-model": "static",
"code-model": "kernel",
"disable-redzone": true,
"eliminate-frame-pointer": true,
"eliminate-frame-pointer": false,
"exe-suffix": "",
"has-rpath": false,
"no-compiler-rt": true,

View file

@ -15,7 +15,7 @@
"relocation-model": "static",
"code-model": "kernel",
"disable-redzone": true,
"eliminate-frame-pointer": true,
"eliminate-frame-pointer": false,
"exe-suffix": ".bin",
"has-rpath": false,
"no-compiler-rt": true,