diff --git a/arch/x86_64/src/paging/mapper.rs b/arch/x86_64/src/paging/mapper.rs
index dbbaa60..5b7665a 100644
--- a/arch/x86_64/src/paging/mapper.rs
+++ b/arch/x86_64/src/paging/mapper.rs
@@ -63,8 +63,6 @@ impl Mapper {
/// Unmap a page
pub fn unmap(&mut self, page: Page) {
- assert!(self.translate(page.start_address()).is_some());
-
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
@@ -76,6 +74,18 @@ impl Mapper {
deallocate_frame(frame);
}
+ /// Unmap a page, return frame without free
+ pub fn unmap_return(&mut self, page: Page) -> Frame {
+ let p1 = self.p4_mut()
+ .next_table_mut(page.p4_index())
+ .and_then(|p3| p3.next_table_mut(page.p3_index()))
+ .and_then(|p2| p2.next_table_mut(page.p2_index()))
+ .expect("mapping code does not support huge pages");
+ let frame = p1[page.p1_index()].pointed_frame().unwrap();
+ p1[page.p1_index()].set_unused();
+ frame
+ }
+
pub fn translate_page(&self, page: Page) -> Option {
self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index()))
diff --git a/kernel/context/memory.rs b/kernel/context/memory.rs
index ab9033b..e0eaa5f 100644
--- a/kernel/context/memory.rs
+++ b/kernel/context/memory.rs
@@ -1,6 +1,7 @@
use arch::externs::memset;
-use arch::paging::{ActivePageTable, Page, PageIter, VirtualAddress};
+use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, VirtualAddress};
use arch::paging::entry::EntryFlags;
+use arch::paging::temporary_page::TemporaryPage;
#[derive(Debug)]
pub struct Memory {
@@ -36,7 +37,7 @@ impl Memory {
Page::range_inclusive(start_page, end_page)
}
- pub fn map(&mut self, flush: bool, clear: bool) {
+ fn map(&mut self, flush: bool, clear: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = false;
@@ -61,7 +62,7 @@ impl Memory {
}
}
- pub fn unmap(&mut self, flush: bool) {
+ fn unmap(&mut self, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = false;
@@ -80,6 +81,34 @@ impl Memory {
}
}
+ /// A complicated operation to move a piece of memory to a new page table
+ /// It also allows for changing the address at the same time
+ pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage, flush: bool) {
+ let mut active_table = unsafe { ActivePageTable::new() };
+
+ let mut flush_all = false;
+
+ for page in self.pages() {
+ let frame = active_table.unmap_return(page);
+
+ active_table.with(new_table, temporary_page, |mapper| {
+ let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.start.get() + new_start.get()));
+ mapper.map_to(new_page, frame, self.flags);
+ });
+
+ if flush {
+ //active_table.flush(page);
+ flush_all = true;
+ }
+ }
+
+ if flush_all {
+ active_table.flush_all();
+ }
+
+ self.start = new_start;
+ }
+
pub fn remap(&mut self, new_flags: EntryFlags, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
@@ -101,36 +130,6 @@ impl Memory {
self.flags = new_flags;
}
- pub fn replace(&mut self, new_start: VirtualAddress, flush: bool) {
- let mut active_table = unsafe { ActivePageTable::new() };
-
- let mut flush_all = false;
-
- for page in self.pages() {
- active_table.unmap(page);
-
- if flush {
- //active_table.flush(page);
- flush_all = true;
- }
- }
-
- self.start = new_start;
-
- for page in self.pages() {
- active_table.map(page, self.flags);
-
- if flush {
- //active_table.flush(page);
- flush_all = true;
- }
- }
-
- if flush_all {
- active_table.flush_all();
- }
- }
-
pub fn resize(&mut self, new_size: usize, flush: bool, clear: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
diff --git a/kernel/syscall/process.rs b/kernel/syscall/process.rs
index c7ff1ee..d760d5c 100644
--- a/kernel/syscall/process.rs
+++ b/kernel/syscall/process.rs
@@ -4,7 +4,9 @@ use core::mem;
use core::str;
use arch;
-use arch::paging::{VirtualAddress, entry};
+use arch::memory::allocate_frame;
+use arch::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, entry};
+use arch::paging::temporary_page::TemporaryPage;
use context;
use elf;
use syscall::{self, Error, Result};
@@ -67,7 +69,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result {
kstack_option = Some(new_stack);
}
if let Some(ref stack) = context.stack {
- let mut new_stack = context::memory::Memory::new(
+ let new_stack = context::memory::Memory::new(
VirtualAddress::new(arch::USER_TMP_STACK_OFFSET),
stack.size(),
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
@@ -79,7 +81,6 @@ pub fn clone(flags: usize, stack_base: usize) -> Result {
stack.start_address().get() as *const u8,
stack.size());
}
- new_stack.unmap(true);
stack_option = Some(new_stack);
}
}
@@ -91,19 +92,39 @@ pub fn clone(flags: usize, stack_base: usize) -> Result {
let context_lock = contexts.new_context()?;
let mut context = context_lock.write();
context.arch = arch;
+
+ let mut active_table = unsafe { ActivePageTable::new() };
+
+ let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000)));
+
+ let mut new_table = {
+ let frame = allocate_frame().expect("no more frames in syscall::clone new_table");
+ InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
+ };
+
+ // Copy kernel mapping
+ let kernel_frame = active_table.p4()[510].pointed_frame().expect("kernel table not mapped");
+ active_table.with(&mut new_table, &mut temporary_page, |mapper| {
+ mapper.p4_mut()[510].set(kernel_frame, entry::PRESENT | entry::WRITABLE);
+ });
+
if let Some(stack) = kstack_option.take() {
context.arch.set_stack(stack.as_ptr() as usize + offset);
context.kstack = Some(stack);
}
+
if let Some(mut stack) = stack_option.take() {
- //stack.replace(VirtualAddress::new(arch::USER_STACK_OFFSET), true);
+ stack.move_to(VirtualAddress::new(arch::USER_STACK_OFFSET), &mut new_table, &mut temporary_page, true);
context.stack = Some(stack);
}
+
+ context.arch.set_page_table(unsafe { new_table.address() });
+
context.blocked = false;
pid = context.id;
}
- //unsafe { context::switch(); }
+ unsafe { context::switch(); }
Ok(pid)
}