Add simple paging, use rust libcore and compile without sse

This commit is contained in:
Jeremy Soller 2016-08-15 11:29:53 -06:00
parent 9f0819dafb
commit 465363f0a1
18 changed files with 601 additions and 53 deletions

View file

@ -0,0 +1,62 @@
//! # Page table entry
//! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use memory::Frame;
use super::PhysicalAddress;
/// A page table entry
pub struct Entry(u64);
bitflags! {
pub flags EntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}
pub const ADDRESS_MASK: usize = 0x000fffff_fffff000;
impl Entry {
/// Is the entry unused?
pub fn is_unused(&self) -> bool {
self.0 == 0
}
/// Make the entry unused
pub fn set_unused(&mut self) {
self.0 = 0;
}
/// Get the address this page references
pub fn address(&self) -> PhysicalAddress {
PhysicalAddress::new(self.0 as usize & ADDRESS_MASK)
}
/// Get the current entry flags
pub fn flags(&self) -> EntryFlags {
EntryFlags::from_bits_truncate(self.0)
}
/// Get the associated frame, if available
pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(PRESENT) {
Some(Frame::containing_address(self.address()))
} else {
None
}
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
debug_assert!(frame.start_address().get() & !ADDRESS_MASK == 0);
self.0 = (frame.start_address().get() as u64) | flags.bits();
}
}

View file

@ -0,0 +1,165 @@
//! # Paging
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::ptr::Unique;
use memory::{Frame, FrameAllocator};
use self::entry::EntryFlags;
use self::table::{Table, Level4};
pub mod entry;
pub mod table;
/// Number of entries per page table
pub const ENTRY_COUNT: usize = 512;
/// Size of pages
pub const PAGE_SIZE: usize = 4096;
/// Initialize paging
pub unsafe fn init() -> ActivePageTable {
ActivePageTable::new()
}
pub struct ActivePageTable {
p4: Unique<Table<Level4>>,
}
impl ActivePageTable {
/// Create a new page table
pub unsafe fn new() -> ActivePageTable {
ActivePageTable {
p4: Unique::new(table::P4),
}
}
fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.get() }
}
fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.get_mut() }
}
/// Map a page to a frame
pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let mut p3 = self.p4_mut().next_table_create(page.p4_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
assert!(p1[page.p1_index()].is_unused());
p1[page.p1_index()].set(frame, flags | entry::PRESENT);
}
/// Map a page to the next free frame
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
}
/// Identity map a frame
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
self.map_to(page, frame, flags, allocator)
}
/// Unmap a page
fn unmap<A>(&mut self, page: Page, allocator: &mut A)
where A: FrameAllocator
{
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
// TODO free p(1,2,3) table if empty
allocator.deallocate_frame(frame);
}
fn translate_page(&self, page: Page) -> Option<Frame> {
self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
}
/// Translate a virtual address to a physical one
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address.get() % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| PhysicalAddress::new(frame.start_address().get() + offset))
}
}
/// A physical address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct PhysicalAddress(usize);
impl PhysicalAddress {
pub fn new(address: usize) -> Self {
PhysicalAddress(address)
}
pub fn get(&self) -> usize {
self.0
}
}
/// A virtual address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct VirtualAddress(usize);
impl VirtualAddress {
pub fn new(address: usize) -> Self {
VirtualAddress(address)
}
pub fn get(&self) -> usize {
self.0
}
}
/// Page
#[derive(Debug, Clone, Copy)]
pub struct Page {
number: usize
}
impl Page {
fn start_address(&self) -> VirtualAddress {
VirtualAddress::new(self.number * PAGE_SIZE)
}
fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
pub fn containing_address(address: VirtualAddress) -> Page {
assert!(address.get() < 0x0000_8000_0000_0000 || address.get() >= 0xffff_8000_0000_0000,
"invalid address: 0x{:x}", address.get());
Page { number: address.get() / PAGE_SIZE }
}
}

View file

@ -0,0 +1,100 @@
//! # Page table
//! Code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use memory::FrameAllocator;
use super::entry::*;
use super::ENTRY_COUNT;
pub const P4: *mut Table<Level4> = 0xffffffff_fffff000 as *mut _;
pub trait TableLevel {}
pub enum Level4 {}
pub enum Level3 {}
pub enum Level2 {}
pub enum Level1 {}
impl TableLevel for Level4 {}
impl TableLevel for Level3 {}
impl TableLevel for Level2 {}
impl TableLevel for Level1 {}
pub trait HierarchicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierarchicalLevel for Level4 {
type NextLevel = Level3;
}
impl HierarchicalLevel for Level3 {
type NextLevel = Level2;
}
impl HierarchicalLevel for Level2 {
type NextLevel = Level1;
}
pub struct Table<L: TableLevel> {
entries: [Entry; ENTRY_COUNT],
level: PhantomData<L>,
}
impl<L> Table<L> where L: TableLevel {
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
entry.set_unused();
}
}
}
impl<L> Table<L> where L: HierarchicalLevel {
pub fn next_table(&self, index: usize) -> Option<&Table<L::NextLevel>> {
self.next_table_address(index).map(|address| unsafe { &*(address as *const _) })
}
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table<L::NextLevel>> {
self.next_table_address(index).map(|address| unsafe { &mut *(address as *mut _) })
}
pub fn next_table_create<A>(&mut self, index: usize, allocator: &mut A) -> &mut Table<L::NextLevel>
where A: FrameAllocator
{
if self.next_table(index).is_none() {
assert!(!self.entries[index].flags().contains(HUGE_PAGE),
"mapping code does not support huge pages");
let frame = allocator.allocate_frame().expect("no frames available");
self.entries[index].set(frame, PRESENT | WRITABLE);
self.next_table_mut(index).unwrap().zero();
}
self.next_table_mut(index).unwrap()
}
fn next_table_address(&self, index: usize) -> Option<usize> {
let entry_flags = self[index].flags();
if entry_flags.contains(PRESENT) && !entry_flags.contains(HUGE_PAGE) {
let table_address = self as *const _ as usize;
Some((table_address << 9) | (index << 12))
} else {
None
}
}
}
impl<L> Index<usize> for Table<L> where L: TableLevel {
type Output = Entry;
fn index(&self, index: usize) -> &Entry {
&self.entries[index]
}
}
impl<L> IndexMut<usize> for Table<L> where L: TableLevel {
fn index_mut(&mut self, index: usize) -> &mut Entry {
&mut self.entries[index]
}
}