diff --git a/arch/x86_64/src/acpi/dmar/mod.rs b/arch/x86_64/src/acpi/dmar/mod.rs index d13184f..02aac83 100644 --- a/arch/x86_64/src/acpi/dmar/mod.rs +++ b/arch/x86_64/src/acpi/dmar/mod.rs @@ -59,7 +59,8 @@ pub struct DmarDrhd { impl DmarDrhd { pub fn get(&self, active_table: &mut ActivePageTable) -> &'static mut Drhd { - active_table.identity_map(Frame::containing_address(PhysicalAddress::new(self.base as usize)), entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE); + let result = active_table.identity_map(Frame::containing_address(PhysicalAddress::new(self.base as usize)), entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE); + result.flush(active_table); unsafe { &mut *(self.base as *mut Drhd) } } } diff --git a/arch/x86_64/src/acpi/mod.rs b/arch/x86_64/src/acpi/mod.rs index 87f4416..092f609 100644 --- a/arch/x86_64/src/acpi/mod.rs +++ b/arch/x86_64/src/acpi/mod.rs @@ -52,8 +52,8 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) { let trampoline_page = Page::containing_address(VirtualAddress::new(TRAMPOLINE)); // Map trampoline - active_table.map_to(trampoline_page, trampoline_frame, entry::PRESENT | entry::WRITABLE); - active_table.flush(trampoline_page); + let result = active_table.map_to(trampoline_page, trampoline_frame, entry::PRESENT | entry::WRITABLE); + result.flush(active_table); for madt_entry in madt.iter() { println!(" {:?}", madt_entry); @@ -136,8 +136,8 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) { } // Unmap trampoline - active_table.unmap(trampoline_page); - active_table.flush(trampoline_page); + let result = active_table.unmap(trampoline_page); + result.flush(active_table); } else if let Some(dmar) = Dmar::new(sdt) { println!(": {}: {}", dmar.addr_width, dmar.flags); @@ -173,8 +173,8 @@ pub unsafe fn init(active_table: &mut ActivePageTable) -> Option { let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr)); for frame in Frame::range_inclusive(start_frame, end_frame) { let page = Page::containing_address(VirtualAddress::new(frame.start_address().get())); - active_table.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE); - active_table.flush(page); + let result = active_table.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE); + result.flush(active_table); } } @@ -184,8 +184,8 @@ pub unsafe fn init(active_table: &mut ActivePageTable) -> Option { let mapped = if active_table.translate_page(Page::containing_address(VirtualAddress::new(sdt_address))).is_none() { let sdt_frame = Frame::containing_address(PhysicalAddress::new(sdt_address)); let sdt_page = Page::containing_address(VirtualAddress::new(sdt_address)); - active_table.map_to(sdt_page, sdt_frame, entry::PRESENT | entry::NO_EXECUTE); - active_table.flush(sdt_page); + let result = active_table.map_to(sdt_page, sdt_frame, entry::PRESENT | entry::NO_EXECUTE); + result.flush(active_table); true } else { false @@ -198,8 +198,8 @@ pub unsafe fn init(active_table: &mut ActivePageTable) -> Option { drop(sdt); if mapped { let sdt_page = Page::containing_address(VirtualAddress::new(sdt_address)); - active_table.unmap(sdt_page); - active_table.flush(sdt_page); + let result = active_table.unmap(sdt_page); + result.flush(active_table); } }; @@ -236,8 +236,8 @@ pub unsafe fn init(active_table: &mut ActivePageTable) -> Option { let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr)); for frame in Frame::range_inclusive(start_frame, end_frame) { let page = Page::containing_address(VirtualAddress::new(frame.start_address().get())); - active_table.unmap(page); - active_table.flush(page); + let result = active_table.unmap(page); + result.flush(active_table); } } diff --git a/arch/x86_64/src/device/local_apic.rs b/arch/x86_64/src/device/local_apic.rs index d352c2c..d55bc3b 100644 --- a/arch/x86_64/src/device/local_apic.rs +++ b/arch/x86_64/src/device/local_apic.rs @@ -32,7 +32,8 @@ impl LocalApic { if ! self.x2 { let page = Page::containing_address(VirtualAddress::new(self.address)); let frame = Frame::containing_address(PhysicalAddress::new(self.address - ::KERNEL_OFFSET)); - active_table.map_to(page, frame, entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE); + let result = active_table.map_to(page, frame, entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE); + result.flush(active_table); } self.init_ap(); diff --git a/arch/x86_64/src/lib.rs b/arch/x86_64/src/lib.rs index 609596b..954535f 100644 --- a/arch/x86_64/src/lib.rs +++ b/arch/x86_64/src/lib.rs @@ -1,5 +1,6 @@ //! Architecture support for x86_64 +#![deny(unused_must_use)] #![feature(asm)] #![feature(concat_idents)] #![feature(const_fn)] diff --git a/arch/x86_64/src/paging/mapper.rs b/arch/x86_64/src/paging/mapper.rs index faa5ec6..b18e7a5 100644 --- a/arch/x86_64/src/paging/mapper.rs +++ b/arch/x86_64/src/paging/mapper.rs @@ -1,11 +1,81 @@ +use core::mem; use core::ptr::Unique; use memory::{allocate_frame, deallocate_frame, Frame}; -use super::{Page, PAGE_SIZE, PhysicalAddress, VirtualAddress}; +use super::{ActivePageTable, Page, PAGE_SIZE, PhysicalAddress, VirtualAddress}; use super::entry::{self, EntryFlags}; use super::table::{self, Table, Level4}; +/// In order to enforce correct paging operations in the kernel, these types +/// are returned on any mapping operation to get the code involved to specify +/// how it intends to flush changes to a page table +#[must_use = "The page table must be flushed, or the changes unsafely ignored"] +pub struct MapperFlush(Page); + +impl MapperFlush { + /// Create a new page flush promise + pub fn new(page: Page) -> MapperFlush { + MapperFlush(page) + } + + /// Flush this page in the active table + pub fn flush(self, table: &mut ActivePageTable) { + table.flush(self.0); + mem::forget(self); + } + + /// Ignore the flush. This is unsafe, and a reason should be provided for use + pub unsafe fn ignore(self) { + mem::forget(self); + } +} + +/// A flush cannot be dropped, it must be consumed +impl Drop for MapperFlush { + fn drop(&mut self) { + panic!("Mapper flush was not utilized"); + } +} + +/// To allow for combining multiple flushes into one, we have a way of flushing +/// the active table, which can consume MapperFlush structs +#[must_use = "The page table must be flushed, or the changes unsafely ignored"] +pub struct MapperFlushAll(bool); + +impl MapperFlushAll { + /// Create a new promise to flush all mappings + pub fn new() -> MapperFlushAll { + MapperFlushAll(false) + } + + /// Consume a single page flush + pub fn consume(&mut self, flush: MapperFlush) { + self.0 = true; + mem::forget(flush); + } + + /// Flush the active page table + pub fn flush(self, table: &mut ActivePageTable) { + if self.0 { + table.flush_all(); + } + mem::forget(self); + } + + /// Ignore the flush. This is unsafe, and a reason should be provided for use + pub unsafe fn ignore(self) { + mem::forget(self); + } +} + +/// A flush cannot be dropped, it must be consumed +impl Drop for MapperFlushAll { + fn drop(&mut self) { + panic!("Mapper flush all was not utilized"); + } +} + pub struct Mapper { p4: Unique>, } @@ -27,7 +97,7 @@ impl Mapper { } /// Map a page to a frame - pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags) { + pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags) -> MapperFlush { let mut p3 = self.p4_mut().next_table_create(page.p4_index()); let mut p2 = p3.next_table_create(page.p3_index()); let mut p1 = p2.next_table_create(page.p2_index()); @@ -38,31 +108,33 @@ impl Mapper { p1[page.p1_index()].address().get(), p1[page.p1_index()].flags(), frame.start_address().get(), flags); p1[page.p1_index()].set(frame, flags | entry::PRESENT); + MapperFlush::new(page) } /// Map a page to the next free frame - pub fn map(&mut self, page: Page, flags: EntryFlags) { + pub fn map(&mut self, page: Page, flags: EntryFlags) -> MapperFlush { let frame = allocate_frame().expect("out of frames"); self.map_to(page, frame, flags) } /// Update flags for a page - pub fn remap(&mut self, page: Page, flags: EntryFlags) { + pub fn remap(&mut self, page: Page, flags: EntryFlags) -> MapperFlush { let mut p3 = self.p4_mut().next_table_mut(page.p4_index()).expect("failed to remap: no p3"); let mut p2 = p3.next_table_mut(page.p3_index()).expect("failed to remap: no p2"); let mut p1 = p2.next_table_mut(page.p2_index()).expect("failed to remap: no p1"); let frame = p1[page.p1_index()].pointed_frame().expect("failed to remap: not mapped"); p1[page.p1_index()].set(frame, flags | entry::PRESENT); + MapperFlush::new(page) } /// Identity map a frame - pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags) { + pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags) -> MapperFlush { let page = Page::containing_address(VirtualAddress::new(frame.start_address().get())); self.map_to(page, frame, flags) } /// Unmap a page - pub fn unmap(&mut self, page: Page) { + pub fn unmap(&mut self, page: Page) -> MapperFlush { let p1 = self.p4_mut() .next_table_mut(page.p4_index()) .and_then(|p3| p3.next_table_mut(page.p3_index())) @@ -72,10 +144,11 @@ impl Mapper { p1[page.p1_index()].set_unused(); // TODO free p(1,2,3) table if empty deallocate_frame(frame); + MapperFlush::new(page) } /// Unmap a page, return frame without free - pub fn unmap_return(&mut self, page: Page) -> Frame { + pub fn unmap_return(&mut self, page: Page) -> (MapperFlush, Frame) { let p1 = self.p4_mut() .next_table_mut(page.p4_index()) .and_then(|p3| p3.next_table_mut(page.p3_index())) @@ -83,7 +156,7 @@ impl Mapper { .expect("unmap_return does not support huge pages"); let frame = p1[page.p1_index()].pointed_frame().unwrap(); p1[page.p1_index()].set_unused(); - frame + (MapperFlush::new(page), frame) } pub fn translate_page(&self, page: Page) -> Option { diff --git a/arch/x86_64/src/paging/mod.rs b/arch/x86_64/src/paging/mod.rs index fd289ce..6c133e1 100644 --- a/arch/x86_64/src/paging/mod.rs +++ b/arch/x86_64/src/paging/mod.rs @@ -118,52 +118,56 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti }; active_table.with(&mut new_table, &mut temporary_page, |mapper| { + // Map tdata and tbss { - // Map tdata and tbss - { - let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize; + let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize; - let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id; - let end = start + size; + let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id; + let end = start + size; - let start_page = Page::containing_address(VirtualAddress::new(start)); - let end_page = Page::containing_address(VirtualAddress::new(end - 1)); - for page in Page::range_inclusive(start_page, end_page) { - mapper.map(page, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE); + let start_page = Page::containing_address(VirtualAddress::new(start)); + let end_page = Page::containing_address(VirtualAddress::new(end - 1)); + for page in Page::range_inclusive(start_page, end_page) { + let result = mapper.map(page, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE); + // The flush can be ignored as this is not the active table. See later active_table.switch + unsafe { result.ignore(); } + } + } + + let mut remap = |start: usize, end: usize, flags: EntryFlags| { + if end > start { + let start_frame = Frame::containing_address(PhysicalAddress::new(start)); + let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1)); + for frame in Frame::range_inclusive(start_frame, end_frame) { + let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET)); + let result = mapper.map_to(page, frame, flags); + // The flush can be ignored as this is not the active table. See later active_table.switch + unsafe { result.ignore(); } } } + }; - let mut remap = |start: usize, end: usize, flags: EntryFlags| { - if end > start { - let start_frame = Frame::containing_address(PhysicalAddress::new(start)); - let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1)); - for frame in Frame::range_inclusive(start_frame, end_frame) { - let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET)); - mapper.map_to(page, frame, flags); - } - } - }; + // Remap stack writable, no execute + remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE); - // Remap stack writable, no execute - remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE); - - // Remap a section with `flags` - let mut remap_section = |start: &u8, end: &u8, flags: EntryFlags| { - remap(start as *const _ as usize - ::KERNEL_OFFSET, end as *const _ as usize - ::KERNEL_OFFSET, flags); - }; - // Remap text read-only - remap_section(& __text_start, & __text_end, PRESENT | GLOBAL); - // Remap rodata read-only, no execute - remap_section(& __rodata_start, & __rodata_end, PRESENT | GLOBAL | NO_EXECUTE); - // Remap data writable, no execute - remap_section(& __data_start, & __data_end, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE); - // Remap tdata master writable, no execute - remap_section(& __tdata_start, & __tdata_end, PRESENT | GLOBAL | NO_EXECUTE); - // Remap bss writable, no execute - remap_section(& __bss_start, & __bss_end, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE); - } + // Remap a section with `flags` + let mut remap_section = |start: &u8, end: &u8, flags: EntryFlags| { + remap(start as *const _ as usize - ::KERNEL_OFFSET, end as *const _ as usize - ::KERNEL_OFFSET, flags); + }; + // Remap text read-only + remap_section(& __text_start, & __text_end, PRESENT | GLOBAL); + // Remap rodata read-only, no execute + remap_section(& __rodata_start, & __rodata_end, PRESENT | GLOBAL | NO_EXECUTE); + // Remap data writable, no execute + remap_section(& __data_start, & __data_end, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE); + // Remap tdata master writable, no execute + remap_section(& __tdata_start, & __tdata_end, PRESENT | GLOBAL | NO_EXECUTE); + // Remap bss writable, no execute + remap_section(& __bss_start, & __bss_end, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE); }); + // This switches the active table, which is setup by the bootloader, to a correct table + // setup by the lambda above. This will also flush the TLB active_table.switch(new_table); (active_table, init_tcb(cpu_id)) @@ -200,7 +204,9 @@ pub unsafe fn init_ap(cpu_id: usize, bsp_table: usize, stack_start: usize, stack let start_page = Page::containing_address(VirtualAddress::new(start)); let end_page = Page::containing_address(VirtualAddress::new(end - 1)); for page in Page::range_inclusive(start_page, end_page) { - mapper.map(page, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE); + let result = mapper.map(page, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE); + // The flush can be ignored as this is not the active table. See later active_table.switch + unsafe { result.ignore(); } } } @@ -210,7 +216,9 @@ pub unsafe fn init_ap(cpu_id: usize, bsp_table: usize, stack_start: usize, stack let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1)); for frame in Frame::range_inclusive(start_frame, end_frame) { let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET)); - mapper.map_to(page, frame, flags); + let result = mapper.map_to(page, frame, flags); + // The flush can be ignored as this is not the active table. See later active_table.switch + unsafe { result.ignore(); } } } }; @@ -219,6 +227,8 @@ pub unsafe fn init_ap(cpu_id: usize, bsp_table: usize, stack_start: usize, stack remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE); }); + // This switches the active table, which is setup by the bootloader, to a correct table + // setup by the lambda above. This will also flush the TLB active_table.switch(new_table); init_tcb(cpu_id) diff --git a/arch/x86_64/src/paging/temporary_page.rs b/arch/x86_64/src/paging/temporary_page.rs index 4afa6c5..aba6703 100644 --- a/arch/x86_64/src/paging/temporary_page.rs +++ b/arch/x86_64/src/paging/temporary_page.rs @@ -26,7 +26,8 @@ impl TemporaryPage { /// Returns the start address of the temporary page. pub fn map(&mut self, frame: Frame, flags: EntryFlags, active_table: &mut ActivePageTable) -> VirtualAddress { assert!(active_table.translate_page(self.page).is_none(), "temporary page is already mapped"); - active_table.map_to(self.page, frame, flags); + let result = active_table.map_to(self.page, frame, flags); + result.flush(active_table); self.page.start_address() } @@ -38,6 +39,7 @@ impl TemporaryPage { /// Unmaps the temporary page in the active table. pub fn unmap(&mut self, active_table: &mut ActivePageTable) { - active_table.unmap(self.page) + let result = active_table.unmap(self.page); + result.flush(active_table); } } diff --git a/arch/x86_64/src/start.rs b/arch/x86_64/src/start.rs index c39edf4..4488009 100644 --- a/arch/x86_64/src/start.rs +++ b/arch/x86_64/src/start.rs @@ -14,6 +14,7 @@ use idt; use interrupt; use memory; use paging::{self, entry, Page, VirtualAddress}; +use paging::mapper::MapperFlushAll; /// Test of zero values in BSS. static BSS_TEST_ZERO: usize = 0; @@ -97,13 +98,18 @@ pub unsafe extern fn kstart() -> ! { // Setup kernel heap { + let mut flush_all = MapperFlushAll::new(); + // Map heap pages let heap_start_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET)); let heap_end_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET + ::KERNEL_HEAP_SIZE-1)); for page in Page::range_inclusive(heap_start_page, heap_end_page) { - active_table.map(page, entry::PRESENT | entry::GLOBAL | entry::WRITABLE | entry::NO_EXECUTE); + let result = active_table.map(page, entry::PRESENT | entry::GLOBAL | entry::WRITABLE | entry::NO_EXECUTE); + flush_all.consume(result); } + flush_all.flush(&mut active_table); + // Init the allocator allocator::init(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE); } diff --git a/kernel/context/memory.rs b/kernel/context/memory.rs index 73f808c..e12fbda 100644 --- a/kernel/context/memory.rs +++ b/kernel/context/memory.rs @@ -6,6 +6,7 @@ use spin::Mutex; use arch::memory::Frame; use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, PhysicalAddress, VirtualAddress}; use arch::paging::entry::{self, EntryFlags}; +use arch::paging::mapper::MapperFlushAll; use arch::paging::temporary_page::TemporaryPage; #[derive(Debug)] @@ -20,19 +21,17 @@ impl Grant { pub fn physmap(from: PhysicalAddress, to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = false; + let mut flush_all = MapperFlushAll::new(); let start_page = Page::containing_address(to); let end_page = Page::containing_address(VirtualAddress::new(to.get() + size - 1)); for page in Page::range_inclusive(start_page, end_page) { let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get() - to.get() + from.get())); - active_table.map_to(page, frame, flags); - flush_all = true; + let result = active_table.map_to(page, frame, flags); + flush_all.consume(result); } - if flush_all { - active_table.flush_all(); - } + flush_all.flush(&mut active_table); Grant { start: to, @@ -59,7 +58,9 @@ impl Grant { let end_page = Page::containing_address(VirtualAddress::new(to.get() + size - 1)); for page in Page::range_inclusive(start_page, end_page) { let frame = frames.pop_front().expect("grant did not find enough frames"); - mapper.map_to(page, frame, flags); + let result = mapper.map_to(page, frame, flags); + // Ignore result due to mapping on inactive table + unsafe { result.ignore(); } } }); @@ -88,18 +89,16 @@ impl Grant { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = false; + let mut flush_all = MapperFlushAll::new(); let start_page = Page::containing_address(self.start); let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1)); for page in Page::range_inclusive(start_page, end_page) { - active_table.unmap_return(page); - flush_all = true; + let (result, _frame) = active_table.unmap_return(page); + flush_all.consume(result); } - if flush_all { - active_table.flush_all(); - } + flush_all.flush(&mut active_table); self.mapped = false; } @@ -113,7 +112,9 @@ impl Grant { let start_page = Page::containing_address(self.start); let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1)); for page in Page::range_inclusive(start_page, end_page) { - mapper.unmap_return(page); + let (result, _frame) = mapper.unmap_return(page); + // This is not the active table, so the flush can be ignored + unsafe { result.ignore(); } } }); @@ -164,14 +165,14 @@ pub struct Memory { } impl Memory { - pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, flush: bool, clear: bool) -> Self { + pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, clear: bool) -> Self { let mut memory = Memory { start: start, size: size, flags: flags }; - memory.map(flush, clear); + memory.map(clear); memory } @@ -198,150 +199,114 @@ impl Memory { Page::range_inclusive(start_page, end_page) } - fn map(&mut self, flush: bool, clear: bool) { + fn map(&mut self, clear: bool) { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = false; + let mut flush_all = MapperFlushAll::new(); - //TODO: Clear pages? for page in self.pages() { - active_table.map(page, self.flags); - - if flush { - //active_table.flush(page); - flush_all = true; - } + let result = active_table.map(page, self.flags); + flush_all.consume(result); } - if flush_all { - active_table.flush_all(); - } + flush_all.flush(&mut active_table); if clear { - assert!(flush && self.flags.contains(entry::WRITABLE)); + assert!(self.flags.contains(entry::WRITABLE)); unsafe { intrinsics::write_bytes(self.start_address().get() as *mut u8, 0, self.size); } } } - fn unmap(&mut self, flush: bool) { + fn unmap(&mut self) { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = false; + let mut flush_all = MapperFlushAll::new(); for page in self.pages() { - active_table.unmap(page); - - if flush { - //active_table.flush(page); - flush_all = true; - } + let result = active_table.unmap(page); + flush_all.consume(result); } - if flush_all { - active_table.flush_all(); - } + flush_all.flush(&mut active_table); } /// A complicated operation to move a piece of memory to a new page table /// It also allows for changing the address at the same time - pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage, flush: bool) { + pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = false; + let mut flush_all = MapperFlushAll::new(); for page in self.pages() { - let frame = active_table.unmap_return(page); + let (result, frame) = active_table.unmap_return(page); + flush_all.consume(result); active_table.with(new_table, temporary_page, |mapper| { let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.start.get() + new_start.get())); - mapper.map_to(new_page, frame, self.flags); + let result = mapper.map_to(new_page, frame, self.flags); + // This is not the active table, so the flush can be ignored + unsafe { result.ignore(); } }); - - if flush { - //active_table.flush(page); - flush_all = true; - } } - if flush_all { - active_table.flush_all(); - } + flush_all.flush(&mut active_table); self.start = new_start; } - pub fn remap(&mut self, new_flags: EntryFlags, flush: bool) { + pub fn remap(&mut self, new_flags: EntryFlags) { let mut active_table = unsafe { ActivePageTable::new() }; - let mut flush_all = false; + let mut flush_all = MapperFlushAll::new(); for page in self.pages() { - active_table.remap(page, new_flags); - - if flush { - //active_table.flush(page); - flush_all = true; - } + let result = active_table.remap(page, new_flags); + flush_all.consume(result); } - if flush_all { - active_table.flush_all(); - } + flush_all.flush(&mut active_table); self.flags = new_flags; } - pub fn resize(&mut self, new_size: usize, flush: bool, clear: bool) { + pub fn resize(&mut self, new_size: usize, clear: bool) { let mut active_table = unsafe { ActivePageTable::new() }; //TODO: Calculate page changes to minimize operations if new_size > self.size { - let mut flush_all = false; + let mut flush_all = MapperFlushAll::new(); let start_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size)); let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + new_size - 1)); for page in Page::range_inclusive(start_page, end_page) { if active_table.translate_page(page).is_none() { - active_table.map(page, self.flags); - - if flush { - //active_table.flush(page); - flush_all = true; - } + let result = active_table.map(page, self.flags); + flush_all.consume(result); } } - if flush_all { - active_table.flush_all(); - } + flush_all.flush(&mut active_table); if clear { - assert!(flush); unsafe { intrinsics::write_bytes((self.start.get() + self.size) as *mut u8, 0, new_size - self.size); } } } else if new_size < self.size { - let mut flush_all = false; + let mut flush_all = MapperFlushAll::new(); let start_page = Page::containing_address(VirtualAddress::new(self.start.get() + new_size)); let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1)); for page in Page::range_inclusive(start_page, end_page) { if active_table.translate_page(page).is_some() { - active_table.unmap(page); - - if flush { - //active_table.flush(page); - flush_all = true; - } + let result = active_table.unmap(page); + flush_all.consume(result); } } - if flush_all { - active_table.flush_all(); - } + flush_all.flush(&mut active_table); } self.size = new_size; @@ -350,7 +315,7 @@ impl Memory { impl Drop for Memory { fn drop(&mut self) { - self.unmap(true); + self.unmap(); } } diff --git a/kernel/syscall/process.rs b/kernel/syscall/process.rs index aeb7b7d..f8bc6c3 100644 --- a/kernel/syscall/process.rs +++ b/kernel/syscall/process.rs @@ -44,7 +44,7 @@ pub fn brk(address: usize) -> Result { //TODO: out of memory errors if let Some(ref heap_shared) = context.heap { heap_shared.with(|heap| { - heap.resize(address - arch::USER_HEAP_OFFSET, true, true); + heap.resize(address - arch::USER_HEAP_OFFSET, true); }); } else { panic!("user heap not initialized"); @@ -139,7 +139,6 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET), memory.size(), entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, - true, false ); @@ -149,7 +148,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { memory.size()); } - new_memory.remap(memory.flags(), true); + new_memory.remap(memory.flags()); image.push(new_memory.to_shared()); }); } @@ -160,7 +159,6 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET), heap.size(), entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, - true, false ); @@ -170,7 +168,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { heap.size()); } - new_heap.remap(heap.flags(), true); + new_heap.remap(heap.flags()); heap_option = Some(new_heap.to_shared()); }); } @@ -181,7 +179,6 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { VirtualAddress::new(arch::USER_TMP_STACK_OFFSET), stack.size(), entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, - true, false ); @@ -191,7 +188,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { stack.size()); } - new_stack.remap(stack.flags(), true); + new_stack.remap(stack.flags()); stack_option = Some(new_stack); } @@ -203,7 +200,6 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { VirtualAddress::new(arch::USER_TMP_TLS_OFFSET), tls.mem.size(), entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, - true, true ) }; @@ -214,7 +210,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { tls.file_size); } - new_tls.mem.remap(tls.mem.flags(), true); + new_tls.mem.remap(tls.mem.flags()); tls_option = Some(new_tls); } @@ -405,7 +401,9 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { for page in Page::range_inclusive(start_page, end_page) { let frame = active_table.translate_page(page).expect("kernel percpu not mapped"); active_table.with(&mut new_table, &mut temporary_page, |mapper| { - mapper.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE); + let result = mapper.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE); + // Ignore result due to operating on inactive table + unsafe { result.ignore(); } }); } } @@ -414,7 +412,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { for memory_shared in image.iter_mut() { memory_shared.with(|memory| { let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET); - memory.move_to(start, &mut new_table, &mut temporary_page, true); + memory.move_to(start, &mut new_table, &mut temporary_page); }); } context.image = image; @@ -422,7 +420,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { // Move copy of heap if let Some(heap_shared) = heap_option { heap_shared.with(|heap| { - heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page, true); + heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page); }); context.heap = Some(heap_shared); } @@ -430,13 +428,13 @@ pub fn clone(flags: usize, stack_base: usize) -> Result { // Setup user stack if let Some(mut stack) = stack_option { - stack.move_to(VirtualAddress::new(arch::USER_STACK_OFFSET), &mut new_table, &mut temporary_page, true); + stack.move_to(VirtualAddress::new(arch::USER_STACK_OFFSET), &mut new_table, &mut temporary_page); context.stack = Some(stack); } // Setup user TLS if let Some(mut tls) = tls_option { - tls.mem.move_to(VirtualAddress::new(arch::USER_TLS_OFFSET), &mut new_table, &mut temporary_page, true); + tls.mem.move_to(VirtualAddress::new(arch::USER_TLS_OFFSET), &mut new_table, &mut temporary_page); context.tls = Some(tls); } @@ -566,7 +564,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result { VirtualAddress::new(segment.p_vaddr as usize), segment.p_memsz as usize, entry::NO_EXECUTE | entry::WRITABLE, - true, true ); @@ -590,7 +587,7 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result { flags.insert(entry::WRITABLE); } - memory.remap(flags, true); + memory.remap(flags); context.image.push(memory.to_shared()); } else if segment.p_type == program_header::PT_TLS { @@ -598,7 +595,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result { VirtualAddress::new(arch::USER_TCB_OFFSET), 4096, entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE, - true, true ); @@ -619,7 +615,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result { VirtualAddress::new(arch::USER_HEAP_OFFSET), 0, entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE, - true, true ).to_shared()); @@ -628,7 +623,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result { VirtualAddress::new(arch::USER_STACK_OFFSET), arch::USER_STACK_SIZE, entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE, - true, true )); @@ -641,7 +635,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result { VirtualAddress::new(arch::USER_TLS_OFFSET), size, entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE, - true, true ) }; @@ -675,7 +668,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result { VirtualAddress::new(arch::USER_ARG_OFFSET), arg_size, entry::NO_EXECUTE | entry::WRITABLE, - true, true ); @@ -690,7 +682,7 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result { arg_offset += arg.len(); } - memory.remap(entry::NO_EXECUTE | entry::USER_ACCESSIBLE, true); + memory.remap(entry::NO_EXECUTE | entry::USER_ACCESSIBLE); context.image.push(memory.to_shared()); }