diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..0964bae
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "rust"]
+ path = rust
+ url = https://github.com/rust-lang/rust.git
diff --git a/Makefile b/Makefile
index 77f8c74..9589e58 100644
--- a/Makefile
+++ b/Makefile
@@ -14,12 +14,25 @@ qemu: build/harddrive.bin
FORCE:
-build:
- mkdir build
+build/libcore.rlib: rust/src/libcore/lib.rs
+ mkdir -p build
+ ./rustc.sh --target $(ARCH)-unknown-none.json -C soft-float -o $@ $<
-build/libkernel.a: build FORCE
- cargo rustc -- --crate-type staticlib -o $@
- #--target $(ARCH)-unknown-none.json
+build/liballoc.rlib: rust/src/liballoc/lib.rs build/libcore.rlib
+ mkdir -p build
+ ./rustc.sh --target $(ARCH)-unknown-none.json -C soft-float -o $@ $<
+
+build/librustc_unicode.rlib: rust/src/librustc_unicode/lib.rs build/libcore.rlib
+ mkdir -p build
+ ./rustc.sh --target $(ARCH)-unknown-none.json -C soft-float -o $@ $<
+
+build/libcollections.rlib: rust/src/libcollections/lib.rs build/libcore.rlib build/liballoc.rlib build/librustc_unicode.rlib
+ mkdir -p build
+ ./rustc.sh --target $(ARCH)-unknown-none.json -C soft-float -o $@ $<
+
+build/libkernel.a: build/libcore.rlib build/liballoc.rlib build/libcollections.rlib FORCE
+ mkdir -p build
+ RUSTC="./rustc.sh" cargo rustc --verbose --target $(ARCH)-unknown-none.json -- -C soft-float -o $@
build/kernel.bin: build/libkernel.a
ld -m elf_$(ARCH) --gc-sections -z max-page-size=0x1000 -T bootloader/x86/kernel.ld -o $@ $<
@@ -31,4 +44,4 @@ build/harddrive.bin: build/kernel.bin
nasm -f bin -o $@ -D ARCH_$(ARCH) -ibootloader/x86/ -ibuild/ bootloader/x86/harddrive.asm
clean:
- rm -rf build/*
+ rm -rf build/* target/*
diff --git a/arch/x86_64/src/mem.rs b/arch/x86_64/src/externs.rs
similarity index 100%
rename from arch/x86_64/src/mem.rs
rename to arch/x86_64/src/externs.rs
diff --git a/arch/x86_64/src/idt.rs b/arch/x86_64/src/idt.rs
index 7cc422a..0b08cd2 100644
--- a/arch/x86_64/src/idt.rs
+++ b/arch/x86_64/src/idt.rs
@@ -7,6 +7,15 @@ pub static mut IDTR: IdtDescriptor = IdtDescriptor {
pub static mut IDT: [IdtEntry; 256] = [IdtEntry::new(); 256];
+pub unsafe fn init(func: unsafe extern fn()) {
+ for entry in IDT.iter_mut() {
+ entry.set_flags(IDT_PRESENT | IDT_RING_0 | IDT_INTERRUPT);
+ entry.set_offset(8, func as usize);
+ }
+ IDTR.set_slice(&IDT);
+ IDTR.load();
+}
+
bitflags! {
pub flags IdtFlags: u8 {
const IDT_PRESENT = 1 << 7,
diff --git a/arch/x86_64/src/lib.rs b/arch/x86_64/src/lib.rs
index 3b03958..50f1cc0 100644
--- a/arch/x86_64/src/lib.rs
+++ b/arch/x86_64/src/lib.rs
@@ -5,6 +5,7 @@
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(naked_functions)]
+#![feature(unique)]
#![no_std]
#[macro_use]
@@ -67,6 +68,9 @@ macro_rules! interrupt {
};
}
+/// Memcpy, memmove, etc.
+pub mod externs;
+
/// Global descriptor table
pub mod gdt;
@@ -82,13 +86,14 @@ pub mod interrupt;
/// Initialization and main function
pub mod main;
-/// Memcpy, memmove, etc.
-pub mod mem;
+/// Memory management
+pub mod memory;
+
+/// Paging
+pub mod paging;
/// Serial driver and print! support
pub mod serial;
/// Task state segment
pub mod tss;
-
-pub mod physical;
diff --git a/arch/x86_64/src/main.rs b/arch/x86_64/src/main.rs
index a17d19c..5b0e25a 100644
--- a/arch/x86_64/src/main.rs
+++ b/arch/x86_64/src/main.rs
@@ -3,8 +3,20 @@
/// It must create the IDT with the correct entries, those entries are
/// defined in other files inside of the `arch` module
-use super::idt::{IDTR, IDT, IDT_PRESENT, IDT_RING_0, IDT_INTERRUPT};
-use super::mem::memset;
+use externs::memset;
+use idt;
+use memory::{self, Frame};
+use paging::{self, entry, PhysicalAddress};
+
+/// Test of zero values in BSS.
+static BSS_TEST_ZERO: usize = 0;
+/// Test of non-zero values in BSS.
+static BSS_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
+
+extern {
+ /// Kernel main function
+ fn kmain() -> !;
+}
extern {
/// The starting byte of the text (code) data segment.
@@ -25,15 +37,6 @@ extern {
static mut __bss_end: u8;
}
-/// Test of zero values in BSS.
-static BSS_TEST_ZERO: usize = 0;
-/// Test of non-zero values in BSS.
-static BSS_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
-
-extern {
- fn kmain() -> !;
-}
-
#[no_mangle]
pub unsafe extern fn kstart() -> ! {
asm!("xchg bx, bx" : : : : "intel", "volatile");
@@ -52,21 +55,31 @@ pub unsafe extern fn kstart() -> ! {
debug_assert_eq!(BSS_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
}
- asm!("xchg bx, bx" : : : : "intel", "volatile");
+ // Set up IDT
+ idt::init(blank);
- //Set up IDT
- for entry in IDT.iter_mut() {
- entry.set_flags(IDT_PRESENT | IDT_RING_0 | IDT_INTERRUPT);
- entry.set_offset(8, blank as usize);
+ // Initialize memory management
+ let mut allocator = memory::init(0, &__bss_end as *const u8 as usize);
+
+ // Initialize paging
+ let mut pager = paging::init();
+
+ // Remap a section with `flags`
+ let mut remap_section = |start_ref: &u8, end_ref: &u8, flags: entry::EntryFlags| {
+ let start = start_ref as *const _ as usize;
+ let end = end_ref as *const _ as usize;
+
+ for i in 0..(start - end + paging::PAGE_SIZE - 1)/paging::PAGE_SIZE {
+ let frame = Frame::containing_address(PhysicalAddress::new(start + i * paging::PAGE_SIZE));
+ pager.identity_map(frame, flags, &mut allocator);
+ }
+ };
+
+ // Remap text read-only
+ {
+ asm!("xchg bx, bx" : : : : "intel", "volatile");
+ //TODO remap_section(& __text_start, & __text_end, entry::PRESENT);
}
- IDTR.set_slice(&IDT);
- IDTR.load();
-
- asm!("xchg bx, bx" : : : : "intel", "volatile");
-
- asm!("int 0xFF" : : : : "intel", "volatile");
-
- asm!("xchg bx, bx" : : : : "intel", "volatile");
kmain();
}
diff --git a/arch/x86_64/src/memory/area_frame_alloc.rs b/arch/x86_64/src/memory/area_frame_alloc.rs
new file mode 100644
index 0000000..dd93b62
--- /dev/null
+++ b/arch/x86_64/src/memory/area_frame_alloc.rs
@@ -0,0 +1,81 @@
+//! # Area frame allocator
+//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html)
+
+use paging::PhysicalAddress;
+
+use super::{Frame, FrameAllocator, MemoryArea, MemoryAreaIter};
+
+
+pub struct AreaFrameAllocator {
+ next_free_frame: Frame,
+ current_area: Option<&'static MemoryArea>,
+ areas: MemoryAreaIter,
+ kernel_start: Frame,
+ kernel_end: Frame
+}
+
+impl AreaFrameAllocator {
+ pub fn new(kernel_start: usize, kernel_end: usize, memory_areas: MemoryAreaIter) -> AreaFrameAllocator {
+ let mut allocator = AreaFrameAllocator {
+ next_free_frame: Frame::containing_address(PhysicalAddress::new(0)),
+ current_area: None,
+ areas: memory_areas,
+ kernel_start: Frame::containing_address(PhysicalAddress::new(kernel_start)),
+ kernel_end: Frame::containing_address(PhysicalAddress::new(kernel_end))
+ };
+ allocator.choose_next_area();
+ allocator
+ }
+
+ fn choose_next_area(&mut self) {
+ self.current_area = self.areas.clone().filter(|area| {
+ let address = area.base_addr + area.length - 1;
+ Frame::containing_address(PhysicalAddress::new(address as usize)) >= self.next_free_frame
+ }).min_by_key(|area| area.base_addr);
+
+ if let Some(area) = self.current_area {
+ let start_frame = Frame::containing_address(PhysicalAddress::new(area.base_addr as usize));
+ if self.next_free_frame < start_frame {
+ self.next_free_frame = start_frame;
+ }
+ }
+ }
+}
+
+impl FrameAllocator for AreaFrameAllocator {
+ fn allocate_frame(&mut self) -> Option {
+ if let Some(area) = self.current_area {
+ // "Clone" the frame to return it if it's free. Frame doesn't
+ // implement Clone, but we can construct an identical frame.
+ let frame = Frame{ number: self.next_free_frame.number };
+
+ // the last frame of the current area
+ let current_area_last_frame = {
+ let address = area.base_addr + area.length - 1;
+ Frame::containing_address(PhysicalAddress::new(address as usize))
+ };
+
+ if frame > current_area_last_frame {
+ // all frames of current area are used, switch to next area
+ self.choose_next_area();
+ } else if frame >= self.kernel_start && frame <= self.kernel_end {
+ // `frame` is used by the kernel
+ self.next_free_frame = Frame {
+ number: self.kernel_end.number + 1
+ };
+ } else {
+ // frame is unused, increment `next_free_frame` and return it
+ self.next_free_frame.number += 1;
+ return Some(frame);
+ }
+ // `frame` was not valid, try it again with the updated `next_free_frame`
+ self.allocate_frame()
+ } else {
+ None // no free frames left
+ }
+ }
+
+ fn deallocate_frame(&mut self, frame: Frame) {
+ unimplemented!()
+ }
+}
diff --git a/arch/x86_64/src/memory/mod.rs b/arch/x86_64/src/memory/mod.rs
new file mode 100644
index 0000000..7b3cdea
--- /dev/null
+++ b/arch/x86_64/src/memory/mod.rs
@@ -0,0 +1,109 @@
+//! # Memory management
+//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html)
+
+pub use paging::{PAGE_SIZE, PhysicalAddress};
+
+use self::area_frame_alloc::AreaFrameAllocator;
+
+pub mod area_frame_alloc;
+
+/// The current memory map. It's size is maxed out to 512 entries, due to it being
+/// from 0x500 to 0x5000 (800 is the absolute total)
+static mut MEMORY_MAP: [MemoryArea; 512] = [MemoryArea { base_addr: 0, length: 0, _type: 0, acpi: 0 }; 512];
+
+/// Memory does not exist
+const MEMORY_AREA_NULL: u32 = 0;
+
+/// Memory is free to use
+const MEMORY_AREA_FREE: u32 = 1;
+
+/// Memory is reserved
+const MEMORY_AREA_RESERVED: u32 = 2;
+
+/// Memory is used by ACPI, and can be reclaimed
+const MEMORY_AREA_ACPI: u32 = 3;
+
+#[derive(Clone)]
+pub struct MemoryAreaIter {
+ _type: u32,
+ i: usize
+}
+
+impl MemoryAreaIter {
+ fn new(_type: u32) -> Self {
+ MemoryAreaIter {
+ _type: _type,
+ i: 0
+ }
+ }
+}
+
+impl Iterator for MemoryAreaIter {
+ type Item = &'static MemoryArea;
+ fn next(&mut self) -> Option {
+ while self.i < unsafe { MEMORY_MAP.len() } {
+ let entry = unsafe { &MEMORY_MAP[self.i] };
+ self.i += 1;
+ if entry._type == self._type {
+ return Some(entry);
+ }
+ }
+ None
+ }
+}
+
+/// Init memory module
+/// Must be called once, and only once,
+pub unsafe fn init(kernel_start: usize, kernel_end: usize) -> AreaFrameAllocator {
+ // Copy memory map from bootloader location
+ for (i, mut entry) in MEMORY_MAP.iter_mut().enumerate() {
+ *entry = *(0x500 as *const MemoryArea).offset(i as isize);
+ if entry.length > 0 {
+ println!("{:?}", entry);
+ }
+ }
+
+ AreaFrameAllocator::new(kernel_start, kernel_end, MemoryAreaIter::new(MEMORY_AREA_FREE))
+}
+
+/// A memory map area
+#[derive(Copy, Clone, Debug, Default)]
+#[repr(packed)]
+pub struct MemoryArea {
+ pub base_addr: u64,
+ pub length: u64,
+ pub _type: u32,
+ pub acpi: u32
+}
+
+/// A frame, allocated by the frame allocator.
+/// Do not add more derives, or make anything `pub`!
+#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub struct Frame {
+ number: usize
+}
+
+impl Frame {
+ /// Create a frame containing `address`
+ pub fn containing_address(address: PhysicalAddress) -> Frame {
+ Frame {
+ number: address.get() / PAGE_SIZE
+ }
+ }
+
+ /// Get the address of this frame
+ pub fn start_address(&self) -> PhysicalAddress {
+ PhysicalAddress::new(self.number * PAGE_SIZE)
+ }
+
+ fn clone(&self) -> Frame {
+ Frame {
+ number: self.number
+ }
+ }
+}
+
+pub trait FrameAllocator {
+ fn allocate_frame(&mut self) -> Option;
+ fn deallocate_frame(&mut self, frame: Frame);
+}
diff --git a/arch/x86_64/src/paging/entry.rs b/arch/x86_64/src/paging/entry.rs
new file mode 100644
index 0000000..605650d
--- /dev/null
+++ b/arch/x86_64/src/paging/entry.rs
@@ -0,0 +1,62 @@
+//! # Page table entry
+//! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
+
+use memory::Frame;
+
+use super::PhysicalAddress;
+
+/// A page table entry
+pub struct Entry(u64);
+
+bitflags! {
+ pub flags EntryFlags: u64 {
+ const PRESENT = 1 << 0,
+ const WRITABLE = 1 << 1,
+ const USER_ACCESSIBLE = 1 << 2,
+ const WRITE_THROUGH = 1 << 3,
+ const NO_CACHE = 1 << 4,
+ const ACCESSED = 1 << 5,
+ const DIRTY = 1 << 6,
+ const HUGE_PAGE = 1 << 7,
+ const GLOBAL = 1 << 8,
+ const NO_EXECUTE = 1 << 63,
+ }
+}
+
+pub const ADDRESS_MASK: usize = 0x000fffff_fffff000;
+
+impl Entry {
+ /// Is the entry unused?
+ pub fn is_unused(&self) -> bool {
+ self.0 == 0
+ }
+
+ /// Make the entry unused
+ pub fn set_unused(&mut self) {
+ self.0 = 0;
+ }
+
+ /// Get the address this page references
+ pub fn address(&self) -> PhysicalAddress {
+ PhysicalAddress::new(self.0 as usize & ADDRESS_MASK)
+ }
+
+ /// Get the current entry flags
+ pub fn flags(&self) -> EntryFlags {
+ EntryFlags::from_bits_truncate(self.0)
+ }
+
+ /// Get the associated frame, if available
+ pub fn pointed_frame(&self) -> Option {
+ if self.flags().contains(PRESENT) {
+ Some(Frame::containing_address(self.address()))
+ } else {
+ None
+ }
+ }
+
+ pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
+ debug_assert!(frame.start_address().get() & !ADDRESS_MASK == 0);
+ self.0 = (frame.start_address().get() as u64) | flags.bits();
+ }
+}
diff --git a/arch/x86_64/src/paging/mod.rs b/arch/x86_64/src/paging/mod.rs
new file mode 100644
index 0000000..6c11d69
--- /dev/null
+++ b/arch/x86_64/src/paging/mod.rs
@@ -0,0 +1,165 @@
+//! # Paging
+//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
+
+use core::ptr::Unique;
+
+use memory::{Frame, FrameAllocator};
+
+use self::entry::EntryFlags;
+use self::table::{Table, Level4};
+
+pub mod entry;
+pub mod table;
+
+/// Number of entries per page table
+pub const ENTRY_COUNT: usize = 512;
+
+/// Size of pages
+pub const PAGE_SIZE: usize = 4096;
+
+/// Initialize paging
+pub unsafe fn init() -> ActivePageTable {
+ ActivePageTable::new()
+}
+
+pub struct ActivePageTable {
+ p4: Unique>,
+}
+
+impl ActivePageTable {
+ /// Create a new page table
+ pub unsafe fn new() -> ActivePageTable {
+ ActivePageTable {
+ p4: Unique::new(table::P4),
+ }
+ }
+
+ fn p4(&self) -> &Table {
+ unsafe { self.p4.get() }
+ }
+
+ fn p4_mut(&mut self) -> &mut Table {
+ unsafe { self.p4.get_mut() }
+ }
+
+ /// Map a page to a frame
+ pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
+ where A: FrameAllocator
+ {
+ let mut p3 = self.p4_mut().next_table_create(page.p4_index(), allocator);
+ let mut p2 = p3.next_table_create(page.p3_index(), allocator);
+ let mut p1 = p2.next_table_create(page.p2_index(), allocator);
+
+ assert!(p1[page.p1_index()].is_unused());
+ p1[page.p1_index()].set(frame, flags | entry::PRESENT);
+ }
+
+ /// Map a page to the next free frame
+ pub fn map(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
+ where A: FrameAllocator
+ {
+ let frame = allocator.allocate_frame().expect("out of memory");
+ self.map_to(page, frame, flags, allocator)
+ }
+
+ /// Identity map a frame
+ pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
+ where A: FrameAllocator
+ {
+ let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
+ self.map_to(page, frame, flags, allocator)
+ }
+
+ /// Unmap a page
+ fn unmap(&mut self, page: Page, allocator: &mut A)
+ where A: FrameAllocator
+ {
+ assert!(self.translate(page.start_address()).is_some());
+
+ let p1 = self.p4_mut()
+ .next_table_mut(page.p4_index())
+ .and_then(|p3| p3.next_table_mut(page.p3_index()))
+ .and_then(|p2| p2.next_table_mut(page.p2_index()))
+ .expect("mapping code does not support huge pages");
+ let frame = p1[page.p1_index()].pointed_frame().unwrap();
+ p1[page.p1_index()].set_unused();
+ // TODO free p(1,2,3) table if empty
+ allocator.deallocate_frame(frame);
+ }
+
+ fn translate_page(&self, page: Page) -> Option {
+ self.p4().next_table(page.p4_index())
+ .and_then(|p3| p3.next_table(page.p3_index()))
+ .and_then(|p2| p2.next_table(page.p2_index()))
+ .and_then(|p1| p1[page.p1_index()].pointed_frame())
+ }
+
+ /// Translate a virtual address to a physical one
+ pub fn translate(&self, virtual_address: VirtualAddress) -> Option {
+ let offset = virtual_address.get() % PAGE_SIZE;
+ self.translate_page(Page::containing_address(virtual_address))
+ .map(|frame| PhysicalAddress::new(frame.start_address().get() + offset))
+ }
+}
+
+/// A physical address.
+#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub struct PhysicalAddress(usize);
+
+impl PhysicalAddress {
+ pub fn new(address: usize) -> Self {
+ PhysicalAddress(address)
+ }
+
+ pub fn get(&self) -> usize {
+ self.0
+ }
+}
+
+/// A virtual address.
+#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub struct VirtualAddress(usize);
+
+impl VirtualAddress {
+ pub fn new(address: usize) -> Self {
+ VirtualAddress(address)
+ }
+
+ pub fn get(&self) -> usize {
+ self.0
+ }
+}
+
+/// Page
+#[derive(Debug, Clone, Copy)]
+pub struct Page {
+ number: usize
+}
+
+impl Page {
+ fn start_address(&self) -> VirtualAddress {
+ VirtualAddress::new(self.number * PAGE_SIZE)
+ }
+
+ fn p4_index(&self) -> usize {
+ (self.number >> 27) & 0o777
+ }
+
+ fn p3_index(&self) -> usize {
+ (self.number >> 18) & 0o777
+ }
+
+ fn p2_index(&self) -> usize {
+ (self.number >> 9) & 0o777
+ }
+
+ fn p1_index(&self) -> usize {
+ (self.number >> 0) & 0o777
+ }
+
+ pub fn containing_address(address: VirtualAddress) -> Page {
+ assert!(address.get() < 0x0000_8000_0000_0000 || address.get() >= 0xffff_8000_0000_0000,
+ "invalid address: 0x{:x}", address.get());
+ Page { number: address.get() / PAGE_SIZE }
+ }
+}
diff --git a/arch/x86_64/src/paging/table.rs b/arch/x86_64/src/paging/table.rs
new file mode 100644
index 0000000..2df540d
--- /dev/null
+++ b/arch/x86_64/src/paging/table.rs
@@ -0,0 +1,100 @@
+//! # Page table
+//! Code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
+
+use core::marker::PhantomData;
+use core::ops::{Index, IndexMut};
+
+use memory::FrameAllocator;
+
+use super::entry::*;
+use super::ENTRY_COUNT;
+
+pub const P4: *mut Table = 0xffffffff_fffff000 as *mut _;
+
+pub trait TableLevel {}
+
+pub enum Level4 {}
+pub enum Level3 {}
+pub enum Level2 {}
+pub enum Level1 {}
+
+impl TableLevel for Level4 {}
+impl TableLevel for Level3 {}
+impl TableLevel for Level2 {}
+impl TableLevel for Level1 {}
+
+pub trait HierarchicalLevel: TableLevel {
+ type NextLevel: TableLevel;
+}
+
+impl HierarchicalLevel for Level4 {
+ type NextLevel = Level3;
+}
+
+impl HierarchicalLevel for Level3 {
+ type NextLevel = Level2;
+}
+
+impl HierarchicalLevel for Level2 {
+ type NextLevel = Level1;
+}
+
+pub struct Table {
+ entries: [Entry; ENTRY_COUNT],
+ level: PhantomData,
+}
+
+impl Table where L: TableLevel {
+ pub fn zero(&mut self) {
+ for entry in self.entries.iter_mut() {
+ entry.set_unused();
+ }
+ }
+}
+
+impl Table where L: HierarchicalLevel {
+ pub fn next_table(&self, index: usize) -> Option<&Table> {
+ self.next_table_address(index).map(|address| unsafe { &*(address as *const _) })
+ }
+
+ pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table> {
+ self.next_table_address(index).map(|address| unsafe { &mut *(address as *mut _) })
+ }
+
+ pub fn next_table_create(&mut self, index: usize, allocator: &mut A) -> &mut Table
+ where A: FrameAllocator
+ {
+ if self.next_table(index).is_none() {
+ assert!(!self.entries[index].flags().contains(HUGE_PAGE),
+ "mapping code does not support huge pages");
+ let frame = allocator.allocate_frame().expect("no frames available");
+ self.entries[index].set(frame, PRESENT | WRITABLE);
+ self.next_table_mut(index).unwrap().zero();
+ }
+ self.next_table_mut(index).unwrap()
+ }
+
+ fn next_table_address(&self, index: usize) -> Option {
+ let entry_flags = self[index].flags();
+ if entry_flags.contains(PRESENT) && !entry_flags.contains(HUGE_PAGE) {
+ let table_address = self as *const _ as usize;
+ Some((table_address << 9) | (index << 12))
+ } else {
+ None
+ }
+ }
+}
+
+impl Index for Table where L: TableLevel {
+ type Output = Entry;
+
+ fn index(&self, index: usize) -> &Entry {
+ &self.entries[index]
+ }
+}
+
+impl IndexMut for Table where L: TableLevel {
+ fn index_mut(&mut self, index: usize) -> &mut Entry {
+ &mut self.entries[index]
+ }
+}
diff --git a/arch/x86_64/src/physical.rs b/arch/x86_64/src/physical.rs
deleted file mode 100644
index 8885cd1..0000000
--- a/arch/x86_64/src/physical.rs
+++ /dev/null
@@ -1,11 +0,0 @@
-//! Typestrong address segregation.
-
-/// A physical address in memory.
-#[derive(Clone, Debug, PartialEq, Eq)]
-pub struct Physical {
- /// The position.
- ///
- /// Note that we do not use a pointer here to avoid simple mistakes where the programmer
- /// confuse virtual and physical.
- pub inner: u64,
-}
diff --git a/bootloader/x86/startup-x86_64.asm b/bootloader/x86/startup-x86_64.asm
index 2ced709..25a6102 100644
--- a/bootloader/x86/startup-x86_64.asm
+++ b/bootloader/x86/startup-x86_64.asm
@@ -17,6 +17,8 @@ startup_arch:
;Link first PML4 to PDP
mov DWORD [es:edi], 0x71000 | 1 << 1 | 1
add edi, 0x1000
+ ;Link last PML4 to PML4
+ mov DWORD [es:edi - 8], 0x70000 | 1 << 1 | 1
;Link first PDP to PD
mov DWORD [es:edi], 0x72000 | 1 << 1 | 1
add edi, 0x1000
diff --git a/kernel/paging/mod.rs b/kernel/paging/mod.rs
deleted file mode 100644
index f017962..0000000
--- a/kernel/paging/mod.rs
+++ /dev/null
@@ -1,6 +0,0 @@
-/// A newtype representing a virtual address.
-#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
-pub struct Virtual {
- /// The inner value.
- pub inner: usize,
-}
diff --git a/kernel/panic.rs b/kernel/panic.rs
index fb90cfd..a137120 100644
--- a/kernel/panic.rs
+++ b/kernel/panic.rs
@@ -9,7 +9,7 @@ extern "C" fn eh_personality() {}
#[cfg(not(test))]
/// Required to handle panics
#[lang = "panic_fmt"]
-extern "C" fn panic_fmt() -> ! {
+extern "C" fn panic_fmt(fmt: ::core::fmt::Arguments, file_line: &(&'static str, u32)) -> ! {
loop {
unsafe { halt() };
}
diff --git a/rust b/rust
new file mode 160000
index 0000000..b72fa8c
--- /dev/null
+++ b/rust
@@ -0,0 +1 @@
+Subproject commit b72fa8ca95c02e4b44b216a425fd563ad2ef58bb
diff --git a/rustc.sh b/rustc.sh
new file mode 100755
index 0000000..4278e41
--- /dev/null
+++ b/rustc.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+RUST_BACKTRACE=1 rustc -L build $*
diff --git a/x86_64-unknown-none.json b/x86_64-unknown-none.json
index cc955d3..dd3badd 100644
--- a/x86_64-unknown-none.json
+++ b/x86_64-unknown-none.json
@@ -9,7 +9,7 @@
"vendor": "unknown",
"target-family": "redox",
"pre-link-args": ["-m64", "-nostdlib", "-static"],
- "features": "-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2",
+ "features": "-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2,+soft-float",
"dynamic-linking": false,
"executables": false,
"relocation-model": "static",