Move frame allocation to two global functions, locking happens inside them

This commit is contained in:
Jeremy Soller 2016-08-17 16:26:43 -06:00
parent ebf9766ef5
commit 78432b3875
11 changed files with 97 additions and 140 deletions

View file

@ -5,7 +5,7 @@ use core::intrinsics::{atomic_load, atomic_store};
use x86::controlregs;
use allocator::{HEAP_START, HEAP_SIZE};
use memory::{Frame, FrameAllocator};
use memory::Frame;
use paging::{entry, ActivePageTable, Page, PhysicalAddress, VirtualAddress};
use start::kstart_ap;
@ -24,9 +24,7 @@ pub mod xsdt;
const TRAMPOLINE: usize = 0x7E00;
const AP_STARTUP: usize = 0x8000;
pub fn init_sdt<A>(sdt: &'static Sdt, allocator: &mut A, active_table: &mut ActivePageTable)
where A: FrameAllocator
{
pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
print!(" ");
for &c in sdt.signature.iter() {
print!("{}", c as char);
@ -50,7 +48,7 @@ pub fn init_sdt<A>(sdt: &'static Sdt, allocator: &mut A, active_table: &mut Acti
// Map trampoline
{
if active_table.translate_page(Page::containing_address(VirtualAddress::new(TRAMPOLINE))).is_none() {
active_table.identity_map(Frame::containing_address(PhysicalAddress::new(TRAMPOLINE)), entry::PRESENT | entry::WRITABLE, allocator);
active_table.identity_map(Frame::containing_address(PhysicalAddress::new(TRAMPOLINE)), entry::PRESENT | entry::WRITABLE);
}
}
@ -63,7 +61,7 @@ pub fn init_sdt<A>(sdt: &'static Sdt, allocator: &mut A, active_table: &mut Acti
let end_page = Page::containing_address(VirtualAddress::new(stack_end - 1));
for page in Page::range_inclusive(start_page, end_page) {
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE, allocator);
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE);
}
}
*/
@ -113,9 +111,7 @@ pub fn init_sdt<A>(sdt: &'static Sdt, allocator: &mut A, active_table: &mut Acti
}
/// Parse the ACPI tables to gather CPU, interrupt, and timer information
pub unsafe fn init<A>(allocator: &mut A, active_table: &mut ActivePageTable) -> Option<Acpi>
where A: FrameAllocator
{
pub unsafe fn init(active_table: &mut ActivePageTable) -> Option<Acpi> {
let start_addr = 0xE0000;
let end_addr = 0xFFFFF;
@ -125,7 +121,7 @@ pub unsafe fn init<A>(allocator: &mut A, active_table: &mut ActivePageTable) ->
let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr));
for frame in Frame::range_inclusive(start_frame, end_frame) {
if active_table.translate_page(Page::containing_address(VirtualAddress::new(frame.start_address().get()))).is_none() {
active_table.identity_map(frame, entry::PRESENT | entry::NO_EXECUTE, allocator);
active_table.identity_map(frame, entry::PRESENT | entry::NO_EXECUTE);
}
}
}
@ -134,15 +130,15 @@ pub unsafe fn init<A>(allocator: &mut A, active_table: &mut ActivePageTable) ->
if let Some(rsdp) = RSDP::search(start_addr, end_addr) {
println!("{:?}", rsdp);
let get_sdt = |sdt_address: usize, allocator: &mut A, active_table: &mut ActivePageTable| -> &'static Sdt {
let get_sdt = |sdt_address: usize, active_table: &mut ActivePageTable| -> &'static Sdt {
if active_table.translate_page(Page::containing_address(VirtualAddress::new(sdt_address))).is_none() {
let sdt_frame = Frame::containing_address(PhysicalAddress::new(sdt_address));
active_table.identity_map(sdt_frame, entry::PRESENT | entry::NO_EXECUTE, allocator);
active_table.identity_map(sdt_frame, entry::PRESENT | entry::NO_EXECUTE);
}
&*(sdt_address as *const Sdt)
};
let rxsdt = get_sdt(rsdp.sdt_address(), allocator, active_table);
let rxsdt = get_sdt(rsdp.sdt_address(), active_table);
for &c in rxsdt.signature.iter() {
print!("{}", c as char);
@ -150,13 +146,13 @@ pub unsafe fn init<A>(allocator: &mut A, active_table: &mut ActivePageTable) ->
println!(":");
if let Some(rsdt) = Rsdt::new(rxsdt) {
for sdt_address in rsdt.iter() {
let sdt = get_sdt(sdt_address, allocator, active_table);
init_sdt(sdt, allocator, active_table);
let sdt = get_sdt(sdt_address, active_table);
init_sdt(sdt, active_table);
}
} else if let Some(xsdt) = Xsdt::new(rxsdt) {
for sdt_address in xsdt.iter() {
let sdt = get_sdt(sdt_address, allocator, active_table);
init_sdt(sdt, allocator, active_table);
let sdt = get_sdt(sdt_address, active_table);
init_sdt(sdt, active_table);
}
} else {
println!("UNKNOWN RSDT OR XSDT SIGNATURE");

View file

@ -11,7 +11,7 @@ pub struct Pio<T> {
impl<T> Pio<T> {
/// Create a PIO from a given port
pub fn new(port: u16) -> Self {
pub const fn new(port: u16) -> Self {
Pio::<T> {
port: port,
value: PhantomData,

View file

@ -15,14 +15,17 @@ extern crate bitflags;
extern crate spin;
extern crate x86;
use spin::Mutex;
/// Print to console
#[macro_export]
macro_rules! print {
($($arg:tt)*) => ({
{
use core::fmt::Write;
let _ = write!($crate::serial::SerialConsole::new(), $($arg)*);
//TODO: let mut serial_console = $crate::serial::SERIAL_CONSOLE.lock();
let mut serial_console = $crate::serial::SerialConsole::new();
let _ = write!(serial_console, $($arg)*);
drop(serial_console);
}
});
}
@ -153,5 +156,3 @@ pub mod start;
/// Task state segment
pub mod tss;
pub static ALLOCATOR: Mutex<Option<memory::AreaFrameAllocator>> = Mutex::new(None);

View file

@ -76,6 +76,6 @@ impl FrameAllocator for AreaFrameAllocator {
}
fn deallocate_frame(&mut self, frame: Frame) {
println!("Leak frame: {:?}", frame);
//println!("Leak frame: {:?}", frame);
}
}

View file

@ -3,7 +3,9 @@
pub use paging::{PAGE_SIZE, PhysicalAddress};
pub use self::area_frame_alloc::AreaFrameAllocator;
use self::area_frame_alloc::AreaFrameAllocator;
use spin::Mutex;
pub mod area_frame_alloc;
@ -52,9 +54,11 @@ impl Iterator for MemoryAreaIter {
}
}
static ALLOCATOR: Mutex<Option<AreaFrameAllocator>> = Mutex::new(None);
/// Init memory module
/// Must be called once, and only once,
pub unsafe fn init(kernel_start: usize, kernel_end: usize) -> AreaFrameAllocator {
pub unsafe fn init(kernel_start: usize, kernel_end: usize) {
// Copy memory map from bootloader location
for (i, mut entry) in MEMORY_MAP.iter_mut().enumerate() {
*entry = *(0x500 as *const MemoryArea).offset(i as isize);
@ -63,7 +67,25 @@ pub unsafe fn init(kernel_start: usize, kernel_end: usize) -> AreaFrameAllocator
}
}
AreaFrameAllocator::new(kernel_start, kernel_end, MemoryAreaIter::new(MEMORY_AREA_FREE))
*ALLOCATOR.lock() = Some(AreaFrameAllocator::new(kernel_start, kernel_end, MemoryAreaIter::new(MEMORY_AREA_FREE)));
}
/// Allocate a frame
pub fn allocate_frame() -> Option<Frame> {
if let Some(ref mut allocator) = *ALLOCATOR.lock() {
allocator.allocate_frame()
} else {
panic!("frame allocator not initialized");
}
}
/// Deallocate a frame
pub fn deallocate_frame(frame: Frame) {
if let Some(ref mut allocator) = *ALLOCATOR.lock() {
allocator.deallocate_frame(frame)
} else {
panic!("frame allocator not initialized");
}
}
/// A memory map area

View file

@ -1,6 +1,6 @@
use core::ptr::Unique;
use memory::{Frame, FrameAllocator};
use memory::{allocate_frame, deallocate_frame, Frame};
use super::{Page, PAGE_SIZE, PhysicalAddress, VirtualAddress};
use super::entry::{self, EntryFlags};
@ -27,37 +27,29 @@ impl Mapper {
}
/// Map a page to a frame
pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let mut p3 = self.p4_mut().next_table_create(page.p4_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags) {
let mut p3 = self.p4_mut().next_table_create(page.p4_index());
let mut p2 = p3.next_table_create(page.p3_index());
let mut p1 = p2.next_table_create(page.p2_index());
assert!(p1[page.p1_index()].is_unused());
p1[page.p1_index()].set(frame, flags | entry::PRESENT);
}
/// Map a page to the next free frame
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
pub fn map(&mut self, page: Page, flags: EntryFlags) {
let frame = allocate_frame().expect("out of memory");
self.map_to(page, frame, flags)
}
/// Identity map a frame
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
self.map_to(page, frame, flags, allocator)
self.map_to(page, frame, flags)
}
/// Unmap a page
pub fn unmap<A>(&mut self, page: Page, allocator: &mut A)
where A: FrameAllocator
{
pub fn unmap(&mut self, page: Page) {
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut()
@ -68,7 +60,7 @@ impl Mapper {
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
// TODO free p(1,2,3) table if empty
allocator.deallocate_frame(frame);
deallocate_frame(frame);
}
pub fn translate_page(&self, page: Page) -> Option<Frame> {

View file

@ -3,7 +3,7 @@
use core::ops::{Deref, DerefMut};
use memory::{Frame, FrameAllocator};
use memory::{allocate_frame, Frame};
use self::entry::{EntryFlags, PRESENT, WRITABLE, NO_EXECUTE};
use self::mapper::Mapper;
@ -21,7 +21,7 @@ pub const ENTRY_COUNT: usize = 512;
pub const PAGE_SIZE: usize = 4096;
/// Initialize paging
pub unsafe fn init<A>(stack_start: usize, stack_end: usize, allocator: &mut A) -> ActivePageTable where A: FrameAllocator {
pub unsafe fn init(stack_start: usize, stack_end: usize) -> ActivePageTable {
extern {
/// The starting byte of the text (code) data segment.
static mut __text_start: u8;
@ -43,10 +43,10 @@ pub unsafe fn init<A>(stack_start: usize, stack_end: usize, allocator: &mut A) -
let mut active_table = ActivePageTable::new();
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x80000000)), allocator);
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x80000000)));
let mut new_table = {
let frame = allocator.allocate_frame().expect("no more frames");
let frame = allocate_frame().expect("no more frames");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
@ -55,7 +55,7 @@ pub unsafe fn init<A>(stack_start: usize, stack_end: usize, allocator: &mut A) -
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
mapper.identity_map(frame, flags, allocator);
mapper.identity_map(frame, flags);
}
};

View file

@ -4,7 +4,7 @@
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use memory::FrameAllocator;
use memory::{allocate_frame, deallocate_frame};
use super::entry::*;
use super::ENTRY_COUNT;
@ -61,13 +61,11 @@ impl<L> Table<L> where L: HierarchicalLevel {
self.next_table_address(index).map(|address| unsafe { &mut *(address as *mut _) })
}
pub fn next_table_create<A>(&mut self, index: usize, allocator: &mut A) -> &mut Table<L::NextLevel>
where A: FrameAllocator
{
pub fn next_table_create(&mut self, index: usize) -> &mut Table<L::NextLevel> {
if self.next_table(index).is_none() {
assert!(!self.entries[index].flags().contains(HUGE_PAGE),
"mapping code does not support huge pages");
let frame = allocator.allocate_frame().expect("no frames available");
let frame = allocate_frame().expect("no frames available");
self.entries[index].set(frame, PRESENT | WRITABLE);
self.next_table_mut(index).unwrap().zero();
}

View file

@ -9,24 +9,20 @@ use super::table::{Table, Level1};
pub struct TemporaryPage {
page: Page,
allocator: TinyAllocator,
}
impl TemporaryPage {
pub fn new<A>(page: Page, allocator: &mut A) -> TemporaryPage where A: FrameAllocator {
pub fn new(page: Page) -> TemporaryPage {
TemporaryPage {
page: page,
allocator: TinyAllocator::new(allocator),
}
}
/// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page.
pub fn map(&mut self, frame: Frame, flags: EntryFlags, active_table: &mut ActivePageTable) -> VirtualAddress {
println!("map {:?}", frame);
assert!(active_table.translate_page(self.page).is_none(),
"temporary page is already mapped");
active_table.map_to(self.page, frame, flags, &mut self.allocator);
assert!(active_table.translate_page(self.page).is_none(), "temporary page is already mapped");
active_table.map_to(self.page, frame, flags);
self.page.start_address()
}
@ -38,44 +34,6 @@ impl TemporaryPage {
/// Unmaps the temporary page in the active table.
pub fn unmap(&mut self, active_table: &mut ActivePageTable) {
println!("unmap {:?}", self.page);
active_table.unmap(self.page, &mut self.allocator)
}
}
struct TinyAllocator([Option<Frame>; 3]);
impl TinyAllocator {
fn new<A>(allocator: &mut A) -> TinyAllocator where A: FrameAllocator {
let mut f = || allocator.allocate_frame();
let frames = [f(), f(), f()];
TinyAllocator(frames)
}
}
impl FrameAllocator for TinyAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
for frame_option in &mut self.0 {
if frame_option.is_some() {
println!("Allocate {:?}", frame_option);
return frame_option.take();
}
}
None
}
fn deallocate_frame(&mut self, frame: Frame) {
for frame_option in &mut self.0 {
if frame_option.is_none() {
println!("Deallocate {:?}", frame);
*frame_option = Some(frame);
return;
}
}
println!("Cannot dealloc {:?}", frame);
for frame_option in &self.0 {
println!("Already dealloc {:?}", frame_option)
}
//panic!("Tiny allocator can hold only 3 frames. {:?}", frame);
active_table.unmap(self.page)
}
}

View file

@ -1,13 +1,17 @@
use core::fmt;
use spin::Mutex;
use super::io::{Io, Pio};
pub static SERIAL_CONSOLE: Mutex<SerialConsole> = Mutex::new(SerialConsole::new());
pub struct SerialConsole {
status: Pio<u8>,
data: Pio<u8>
}
impl SerialConsole {
pub fn new() -> SerialConsole {
pub const fn new() -> SerialConsole {
SerialConsole {
status: Pio::new(0x3F8 + 5),
data: Pio::new(0x3F8)

View file

@ -53,32 +53,24 @@ pub unsafe extern fn kstart() -> ! {
idt::init();
// Initialize memory management
*::ALLOCATOR.lock() = Some(memory::init(0, &__bss_end as *const u8 as usize));
memory::init(0, &__bss_end as *const u8 as usize);
if let Some(ref mut allocator) = *::ALLOCATOR.lock() {
// TODO: allocate a stack
let stack_start = 0x00080000;
let stack_end = 0x0009F000;
// Initialize paging
let mut active_table = paging::init(stack_start, stack_end, allocator);
let mut active_table = paging::init(stack_start, stack_end);
// Read ACPI tables
acpi::init(allocator, &mut active_table);
acpi::init(&mut active_table);
// Map heap
let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START));
let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE, allocator);
}
}
}
for i in 0..10 {
if let Some(ref mut allocator) = *::ALLOCATOR.lock() {
println!("BP: {:?}", allocator.allocate_frame());
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE);
}
}
@ -87,28 +79,22 @@ pub unsafe extern fn kstart() -> ! {
/// Entry to rust for an AP
pub unsafe extern fn kstart_ap(stack_start: usize, stack_end: usize) -> ! {
{
// Set up GDT for AP
gdt::init_ap();
// Set up IDT for aP
idt::init_ap();
if let Some(ref mut allocator) = *::ALLOCATOR.lock() {
// Initialize paging
let mut active_table = paging::init(stack_start, stack_end, allocator);
let mut active_table = paging::init(stack_start, stack_end);
// Map heap
let heap_start_page = Page::containing_address(VirtualAddress::new(HEAP_START));
let heap_end_page = Page::containing_address(VirtualAddress::new(HEAP_START + HEAP_SIZE-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE, allocator);
}
}
for i in 0..10 {
if let Some(ref mut allocator) = *::ALLOCATOR.lock() {
println!("AP: {:?}", allocator.allocate_frame());
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE);
}
}