WIP: Create new page table for clone

This commit is contained in:
Jeremy Soller 2016-09-14 22:21:52 -06:00
parent c000820d72
commit b01a918556
3 changed files with 70 additions and 40 deletions

View file

@ -63,8 +63,6 @@ impl Mapper {
/// Unmap a page /// Unmap a page
pub fn unmap(&mut self, page: Page) { pub fn unmap(&mut self, page: Page) {
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut() let p1 = self.p4_mut()
.next_table_mut(page.p4_index()) .next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index())) .and_then(|p3| p3.next_table_mut(page.p3_index()))
@ -76,6 +74,18 @@ impl Mapper {
deallocate_frame(frame); deallocate_frame(frame);
} }
/// Unmap a page, return frame without free
pub fn unmap_return(&mut self, page: Page) -> Frame {
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
frame
}
pub fn translate_page(&self, page: Page) -> Option<Frame> { pub fn translate_page(&self, page: Page) -> Option<Frame> {
self.p4().next_table(page.p4_index()) self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index())) .and_then(|p3| p3.next_table(page.p3_index()))

View file

@ -1,6 +1,7 @@
use arch::externs::memset; use arch::externs::memset;
use arch::paging::{ActivePageTable, Page, PageIter, VirtualAddress}; use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, VirtualAddress};
use arch::paging::entry::EntryFlags; use arch::paging::entry::EntryFlags;
use arch::paging::temporary_page::TemporaryPage;
#[derive(Debug)] #[derive(Debug)]
pub struct Memory { pub struct Memory {
@ -36,7 +37,7 @@ impl Memory {
Page::range_inclusive(start_page, end_page) Page::range_inclusive(start_page, end_page)
} }
pub fn map(&mut self, flush: bool, clear: bool) { fn map(&mut self, flush: bool, clear: bool) {
let mut active_table = unsafe { ActivePageTable::new() }; let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = false; let mut flush_all = false;
@ -61,7 +62,7 @@ impl Memory {
} }
} }
pub fn unmap(&mut self, flush: bool) { fn unmap(&mut self, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() }; let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = false; let mut flush_all = false;
@ -80,6 +81,34 @@ impl Memory {
} }
} }
/// A complicated operation to move a piece of memory to a new page table
/// It also allows for changing the address at the same time
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = false;
for page in self.pages() {
let frame = active_table.unmap_return(page);
active_table.with(new_table, temporary_page, |mapper| {
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.start.get() + new_start.get()));
mapper.map_to(new_page, frame, self.flags);
});
if flush {
//active_table.flush(page);
flush_all = true;
}
}
if flush_all {
active_table.flush_all();
}
self.start = new_start;
}
pub fn remap(&mut self, new_flags: EntryFlags, flush: bool) { pub fn remap(&mut self, new_flags: EntryFlags, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() }; let mut active_table = unsafe { ActivePageTable::new() };
@ -101,36 +130,6 @@ impl Memory {
self.flags = new_flags; self.flags = new_flags;
} }
pub fn replace(&mut self, new_start: VirtualAddress, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = false;
for page in self.pages() {
active_table.unmap(page);
if flush {
//active_table.flush(page);
flush_all = true;
}
}
self.start = new_start;
for page in self.pages() {
active_table.map(page, self.flags);
if flush {
//active_table.flush(page);
flush_all = true;
}
}
if flush_all {
active_table.flush_all();
}
}
pub fn resize(&mut self, new_size: usize, flush: bool, clear: bool) { pub fn resize(&mut self, new_size: usize, flush: bool, clear: bool) {
let mut active_table = unsafe { ActivePageTable::new() }; let mut active_table = unsafe { ActivePageTable::new() };

View file

@ -4,7 +4,9 @@ use core::mem;
use core::str; use core::str;
use arch; use arch;
use arch::paging::{VirtualAddress, entry}; use arch::memory::allocate_frame;
use arch::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, entry};
use arch::paging::temporary_page::TemporaryPage;
use context; use context;
use elf; use elf;
use syscall::{self, Error, Result}; use syscall::{self, Error, Result};
@ -67,7 +69,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
kstack_option = Some(new_stack); kstack_option = Some(new_stack);
} }
if let Some(ref stack) = context.stack { if let Some(ref stack) = context.stack {
let mut new_stack = context::memory::Memory::new( let new_stack = context::memory::Memory::new(
VirtualAddress::new(arch::USER_TMP_STACK_OFFSET), VirtualAddress::new(arch::USER_TMP_STACK_OFFSET),
stack.size(), stack.size(),
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE, entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
@ -79,7 +81,6 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
stack.start_address().get() as *const u8, stack.start_address().get() as *const u8,
stack.size()); stack.size());
} }
new_stack.unmap(true);
stack_option = Some(new_stack); stack_option = Some(new_stack);
} }
} }
@ -91,19 +92,39 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
let context_lock = contexts.new_context()?; let context_lock = contexts.new_context()?;
let mut context = context_lock.write(); let mut context = context_lock.write();
context.arch = arch; context.arch = arch;
let mut active_table = unsafe { ActivePageTable::new() };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000)));
let mut new_table = {
let frame = allocate_frame().expect("no more frames in syscall::clone new_table");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
// Copy kernel mapping
let kernel_frame = active_table.p4()[510].pointed_frame().expect("kernel table not mapped");
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[510].set(kernel_frame, entry::PRESENT | entry::WRITABLE);
});
if let Some(stack) = kstack_option.take() { if let Some(stack) = kstack_option.take() {
context.arch.set_stack(stack.as_ptr() as usize + offset); context.arch.set_stack(stack.as_ptr() as usize + offset);
context.kstack = Some(stack); context.kstack = Some(stack);
} }
if let Some(mut stack) = stack_option.take() { if let Some(mut stack) = stack_option.take() {
//stack.replace(VirtualAddress::new(arch::USER_STACK_OFFSET), true); stack.move_to(VirtualAddress::new(arch::USER_STACK_OFFSET), &mut new_table, &mut temporary_page, true);
context.stack = Some(stack); context.stack = Some(stack);
} }
context.arch.set_page_table(unsafe { new_table.address() });
context.blocked = false; context.blocked = false;
pid = context.id; pid = context.id;
} }
//unsafe { context::switch(); } unsafe { context::switch(); }
Ok(pid) Ok(pid)
} }