Force flush of tables
This commit is contained in:
parent
e3635f37f6
commit
a627cb7fef
10 changed files with 223 additions and 172 deletions
|
@ -6,6 +6,7 @@ use spin::Mutex;
|
|||
use arch::memory::Frame;
|
||||
use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, PhysicalAddress, VirtualAddress};
|
||||
use arch::paging::entry::{self, EntryFlags};
|
||||
use arch::paging::mapper::MapperFlushAll;
|
||||
use arch::paging::temporary_page::TemporaryPage;
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -20,19 +21,17 @@ impl Grant {
|
|||
pub fn physmap(from: PhysicalAddress, to: VirtualAddress, size: usize, flags: EntryFlags) -> Grant {
|
||||
let mut active_table = unsafe { ActivePageTable::new() };
|
||||
|
||||
let mut flush_all = false;
|
||||
let mut flush_all = MapperFlushAll::new();
|
||||
|
||||
let start_page = Page::containing_address(to);
|
||||
let end_page = Page::containing_address(VirtualAddress::new(to.get() + size - 1));
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get() - to.get() + from.get()));
|
||||
active_table.map_to(page, frame, flags);
|
||||
flush_all = true;
|
||||
let result = active_table.map_to(page, frame, flags);
|
||||
flush_all.consume(result);
|
||||
}
|
||||
|
||||
if flush_all {
|
||||
active_table.flush_all();
|
||||
}
|
||||
flush_all.flush(&mut active_table);
|
||||
|
||||
Grant {
|
||||
start: to,
|
||||
|
@ -59,7 +58,9 @@ impl Grant {
|
|||
let end_page = Page::containing_address(VirtualAddress::new(to.get() + size - 1));
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
let frame = frames.pop_front().expect("grant did not find enough frames");
|
||||
mapper.map_to(page, frame, flags);
|
||||
let result = mapper.map_to(page, frame, flags);
|
||||
// Ignore result due to mapping on inactive table
|
||||
unsafe { result.ignore(); }
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -88,18 +89,16 @@ impl Grant {
|
|||
|
||||
let mut active_table = unsafe { ActivePageTable::new() };
|
||||
|
||||
let mut flush_all = false;
|
||||
let mut flush_all = MapperFlushAll::new();
|
||||
|
||||
let start_page = Page::containing_address(self.start);
|
||||
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
active_table.unmap_return(page);
|
||||
flush_all = true;
|
||||
let (result, _frame) = active_table.unmap_return(page);
|
||||
flush_all.consume(result);
|
||||
}
|
||||
|
||||
if flush_all {
|
||||
active_table.flush_all();
|
||||
}
|
||||
flush_all.flush(&mut active_table);
|
||||
|
||||
self.mapped = false;
|
||||
}
|
||||
|
@ -113,7 +112,9 @@ impl Grant {
|
|||
let start_page = Page::containing_address(self.start);
|
||||
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
mapper.unmap_return(page);
|
||||
let (result, _frame) = mapper.unmap_return(page);
|
||||
// This is not the active table, so the flush can be ignored
|
||||
unsafe { result.ignore(); }
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -164,14 +165,14 @@ pub struct Memory {
|
|||
}
|
||||
|
||||
impl Memory {
|
||||
pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, flush: bool, clear: bool) -> Self {
|
||||
pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, clear: bool) -> Self {
|
||||
let mut memory = Memory {
|
||||
start: start,
|
||||
size: size,
|
||||
flags: flags
|
||||
};
|
||||
|
||||
memory.map(flush, clear);
|
||||
memory.map(clear);
|
||||
|
||||
memory
|
||||
}
|
||||
|
@ -198,150 +199,114 @@ impl Memory {
|
|||
Page::range_inclusive(start_page, end_page)
|
||||
}
|
||||
|
||||
fn map(&mut self, flush: bool, clear: bool) {
|
||||
fn map(&mut self, clear: bool) {
|
||||
let mut active_table = unsafe { ActivePageTable::new() };
|
||||
|
||||
let mut flush_all = false;
|
||||
let mut flush_all = MapperFlushAll::new();
|
||||
|
||||
//TODO: Clear pages?
|
||||
for page in self.pages() {
|
||||
active_table.map(page, self.flags);
|
||||
|
||||
if flush {
|
||||
//active_table.flush(page);
|
||||
flush_all = true;
|
||||
}
|
||||
let result = active_table.map(page, self.flags);
|
||||
flush_all.consume(result);
|
||||
}
|
||||
|
||||
if flush_all {
|
||||
active_table.flush_all();
|
||||
}
|
||||
flush_all.flush(&mut active_table);
|
||||
|
||||
if clear {
|
||||
assert!(flush && self.flags.contains(entry::WRITABLE));
|
||||
assert!(self.flags.contains(entry::WRITABLE));
|
||||
unsafe {
|
||||
intrinsics::write_bytes(self.start_address().get() as *mut u8, 0, self.size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn unmap(&mut self, flush: bool) {
|
||||
fn unmap(&mut self) {
|
||||
let mut active_table = unsafe { ActivePageTable::new() };
|
||||
|
||||
let mut flush_all = false;
|
||||
let mut flush_all = MapperFlushAll::new();
|
||||
|
||||
for page in self.pages() {
|
||||
active_table.unmap(page);
|
||||
|
||||
if flush {
|
||||
//active_table.flush(page);
|
||||
flush_all = true;
|
||||
}
|
||||
let result = active_table.unmap(page);
|
||||
flush_all.consume(result);
|
||||
}
|
||||
|
||||
if flush_all {
|
||||
active_table.flush_all();
|
||||
}
|
||||
flush_all.flush(&mut active_table);
|
||||
}
|
||||
|
||||
/// A complicated operation to move a piece of memory to a new page table
|
||||
/// It also allows for changing the address at the same time
|
||||
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage, flush: bool) {
|
||||
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage) {
|
||||
let mut active_table = unsafe { ActivePageTable::new() };
|
||||
|
||||
let mut flush_all = false;
|
||||
let mut flush_all = MapperFlushAll::new();
|
||||
|
||||
for page in self.pages() {
|
||||
let frame = active_table.unmap_return(page);
|
||||
let (result, frame) = active_table.unmap_return(page);
|
||||
flush_all.consume(result);
|
||||
|
||||
active_table.with(new_table, temporary_page, |mapper| {
|
||||
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.start.get() + new_start.get()));
|
||||
mapper.map_to(new_page, frame, self.flags);
|
||||
let result = mapper.map_to(new_page, frame, self.flags);
|
||||
// This is not the active table, so the flush can be ignored
|
||||
unsafe { result.ignore(); }
|
||||
});
|
||||
|
||||
if flush {
|
||||
//active_table.flush(page);
|
||||
flush_all = true;
|
||||
}
|
||||
}
|
||||
|
||||
if flush_all {
|
||||
active_table.flush_all();
|
||||
}
|
||||
flush_all.flush(&mut active_table);
|
||||
|
||||
self.start = new_start;
|
||||
}
|
||||
|
||||
pub fn remap(&mut self, new_flags: EntryFlags, flush: bool) {
|
||||
pub fn remap(&mut self, new_flags: EntryFlags) {
|
||||
let mut active_table = unsafe { ActivePageTable::new() };
|
||||
|
||||
let mut flush_all = false;
|
||||
let mut flush_all = MapperFlushAll::new();
|
||||
|
||||
for page in self.pages() {
|
||||
active_table.remap(page, new_flags);
|
||||
|
||||
if flush {
|
||||
//active_table.flush(page);
|
||||
flush_all = true;
|
||||
}
|
||||
let result = active_table.remap(page, new_flags);
|
||||
flush_all.consume(result);
|
||||
}
|
||||
|
||||
if flush_all {
|
||||
active_table.flush_all();
|
||||
}
|
||||
flush_all.flush(&mut active_table);
|
||||
|
||||
self.flags = new_flags;
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, new_size: usize, flush: bool, clear: bool) {
|
||||
pub fn resize(&mut self, new_size: usize, clear: bool) {
|
||||
let mut active_table = unsafe { ActivePageTable::new() };
|
||||
|
||||
//TODO: Calculate page changes to minimize operations
|
||||
if new_size > self.size {
|
||||
let mut flush_all = false;
|
||||
let mut flush_all = MapperFlushAll::new();
|
||||
|
||||
let start_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size));
|
||||
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + new_size - 1));
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
if active_table.translate_page(page).is_none() {
|
||||
active_table.map(page, self.flags);
|
||||
|
||||
if flush {
|
||||
//active_table.flush(page);
|
||||
flush_all = true;
|
||||
}
|
||||
let result = active_table.map(page, self.flags);
|
||||
flush_all.consume(result);
|
||||
}
|
||||
}
|
||||
|
||||
if flush_all {
|
||||
active_table.flush_all();
|
||||
}
|
||||
flush_all.flush(&mut active_table);
|
||||
|
||||
if clear {
|
||||
assert!(flush);
|
||||
unsafe {
|
||||
intrinsics::write_bytes((self.start.get() + self.size) as *mut u8, 0, new_size - self.size);
|
||||
}
|
||||
}
|
||||
} else if new_size < self.size {
|
||||
let mut flush_all = false;
|
||||
let mut flush_all = MapperFlushAll::new();
|
||||
|
||||
let start_page = Page::containing_address(VirtualAddress::new(self.start.get() + new_size));
|
||||
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
|
||||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
if active_table.translate_page(page).is_some() {
|
||||
active_table.unmap(page);
|
||||
|
||||
if flush {
|
||||
//active_table.flush(page);
|
||||
flush_all = true;
|
||||
}
|
||||
let result = active_table.unmap(page);
|
||||
flush_all.consume(result);
|
||||
}
|
||||
}
|
||||
|
||||
if flush_all {
|
||||
active_table.flush_all();
|
||||
}
|
||||
flush_all.flush(&mut active_table);
|
||||
}
|
||||
|
||||
self.size = new_size;
|
||||
|
@ -350,7 +315,7 @@ impl Memory {
|
|||
|
||||
impl Drop for Memory {
|
||||
fn drop(&mut self) {
|
||||
self.unmap(true);
|
||||
self.unmap();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ pub fn brk(address: usize) -> Result<usize> {
|
|||
//TODO: out of memory errors
|
||||
if let Some(ref heap_shared) = context.heap {
|
||||
heap_shared.with(|heap| {
|
||||
heap.resize(address - arch::USER_HEAP_OFFSET, true, true);
|
||||
heap.resize(address - arch::USER_HEAP_OFFSET, true);
|
||||
});
|
||||
} else {
|
||||
panic!("user heap not initialized");
|
||||
|
@ -139,7 +139,6 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
|
|||
VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET),
|
||||
memory.size(),
|
||||
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
|
||||
true,
|
||||
false
|
||||
);
|
||||
|
||||
|
@ -149,7 +148,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
|
|||
memory.size());
|
||||
}
|
||||
|
||||
new_memory.remap(memory.flags(), true);
|
||||
new_memory.remap(memory.flags());
|
||||
image.push(new_memory.to_shared());
|
||||
});
|
||||
}
|
||||
|
@ -160,7 +159,6 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
|
|||
VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET),
|
||||
heap.size(),
|
||||
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
|
||||
true,
|
||||
false
|
||||
);
|
||||
|
||||
|
@ -170,7 +168,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
|
|||
heap.size());
|
||||
}
|
||||
|
||||
new_heap.remap(heap.flags(), true);
|
||||
new_heap.remap(heap.flags());
|
||||
heap_option = Some(new_heap.to_shared());
|
||||
});
|
||||
}
|
||||
|
@ -181,7 +179,6 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
|
|||
VirtualAddress::new(arch::USER_TMP_STACK_OFFSET),
|
||||
stack.size(),
|
||||
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
|
||||
true,
|
||||
false
|
||||
);
|
||||
|
||||
|
@ -191,7 +188,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
|
|||
stack.size());
|
||||
}
|
||||
|
||||
new_stack.remap(stack.flags(), true);
|
||||
new_stack.remap(stack.flags());
|
||||
stack_option = Some(new_stack);
|
||||
}
|
||||
|
||||
|
@ -203,7 +200,6 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
|
|||
VirtualAddress::new(arch::USER_TMP_TLS_OFFSET),
|
||||
tls.mem.size(),
|
||||
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
|
||||
true,
|
||||
true
|
||||
)
|
||||
};
|
||||
|
@ -214,7 +210,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
|
|||
tls.file_size);
|
||||
}
|
||||
|
||||
new_tls.mem.remap(tls.mem.flags(), true);
|
||||
new_tls.mem.remap(tls.mem.flags());
|
||||
tls_option = Some(new_tls);
|
||||
}
|
||||
|
||||
|
@ -405,7 +401,9 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
|
|||
for page in Page::range_inclusive(start_page, end_page) {
|
||||
let frame = active_table.translate_page(page).expect("kernel percpu not mapped");
|
||||
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
|
||||
mapper.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE);
|
||||
let result = mapper.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE);
|
||||
// Ignore result due to operating on inactive table
|
||||
unsafe { result.ignore(); }
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -414,7 +412,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
|
|||
for memory_shared in image.iter_mut() {
|
||||
memory_shared.with(|memory| {
|
||||
let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET);
|
||||
memory.move_to(start, &mut new_table, &mut temporary_page, true);
|
||||
memory.move_to(start, &mut new_table, &mut temporary_page);
|
||||
});
|
||||
}
|
||||
context.image = image;
|
||||
|
@ -422,7 +420,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
|
|||
// Move copy of heap
|
||||
if let Some(heap_shared) = heap_option {
|
||||
heap_shared.with(|heap| {
|
||||
heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page, true);
|
||||
heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page);
|
||||
});
|
||||
context.heap = Some(heap_shared);
|
||||
}
|
||||
|
@ -430,13 +428,13 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
|
|||
|
||||
// Setup user stack
|
||||
if let Some(mut stack) = stack_option {
|
||||
stack.move_to(VirtualAddress::new(arch::USER_STACK_OFFSET), &mut new_table, &mut temporary_page, true);
|
||||
stack.move_to(VirtualAddress::new(arch::USER_STACK_OFFSET), &mut new_table, &mut temporary_page);
|
||||
context.stack = Some(stack);
|
||||
}
|
||||
|
||||
// Setup user TLS
|
||||
if let Some(mut tls) = tls_option {
|
||||
tls.mem.move_to(VirtualAddress::new(arch::USER_TLS_OFFSET), &mut new_table, &mut temporary_page, true);
|
||||
tls.mem.move_to(VirtualAddress::new(arch::USER_TLS_OFFSET), &mut new_table, &mut temporary_page);
|
||||
context.tls = Some(tls);
|
||||
}
|
||||
|
||||
|
@ -566,7 +564,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
|
|||
VirtualAddress::new(segment.p_vaddr as usize),
|
||||
segment.p_memsz as usize,
|
||||
entry::NO_EXECUTE | entry::WRITABLE,
|
||||
true,
|
||||
true
|
||||
);
|
||||
|
||||
|
@ -590,7 +587,7 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
|
|||
flags.insert(entry::WRITABLE);
|
||||
}
|
||||
|
||||
memory.remap(flags, true);
|
||||
memory.remap(flags);
|
||||
|
||||
context.image.push(memory.to_shared());
|
||||
} else if segment.p_type == program_header::PT_TLS {
|
||||
|
@ -598,7 +595,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
|
|||
VirtualAddress::new(arch::USER_TCB_OFFSET),
|
||||
4096,
|
||||
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
|
||||
true,
|
||||
true
|
||||
);
|
||||
|
||||
|
@ -619,7 +615,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
|
|||
VirtualAddress::new(arch::USER_HEAP_OFFSET),
|
||||
0,
|
||||
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
|
||||
true,
|
||||
true
|
||||
).to_shared());
|
||||
|
||||
|
@ -628,7 +623,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
|
|||
VirtualAddress::new(arch::USER_STACK_OFFSET),
|
||||
arch::USER_STACK_SIZE,
|
||||
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
|
||||
true,
|
||||
true
|
||||
));
|
||||
|
||||
|
@ -641,7 +635,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
|
|||
VirtualAddress::new(arch::USER_TLS_OFFSET),
|
||||
size,
|
||||
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
|
||||
true,
|
||||
true
|
||||
)
|
||||
};
|
||||
|
@ -675,7 +668,6 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
|
|||
VirtualAddress::new(arch::USER_ARG_OFFSET),
|
||||
arg_size,
|
||||
entry::NO_EXECUTE | entry::WRITABLE,
|
||||
true,
|
||||
true
|
||||
);
|
||||
|
||||
|
@ -690,7 +682,7 @@ pub fn exec(path: &[u8], arg_ptrs: &[[usize; 2]]) -> Result<usize> {
|
|||
arg_offset += arg.len();
|
||||
}
|
||||
|
||||
memory.remap(entry::NO_EXECUTE | entry::USER_ACCESSIBLE, true);
|
||||
memory.remap(entry::NO_EXECUTE | entry::USER_ACCESSIBLE);
|
||||
|
||||
context.image.push(memory.to_shared());
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue