Cleanup heap management

This commit is contained in:
Jeremy Soller 2016-09-11 21:18:18 -06:00
parent bcd318d80b
commit 0d101ae5fa
4 changed files with 77 additions and 46 deletions

View file

@ -21,6 +21,14 @@ impl Memory {
memory
}
pub fn start_address(&self) -> VirtualAddress {
self.start
}
pub fn size(&self) -> usize {
self.size
}
pub fn pages(&self) -> PageIter {
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
@ -52,14 +60,52 @@ impl Memory {
pub fn remap(&mut self, new_flags: EntryFlags, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
self.flags = new_flags;
for page in self.pages() {
active_table.remap(page, self.flags);
active_table.remap(page, new_flags);
if flush {
active_table.flush(page);
}
}
self.flags = new_flags;
}
pub fn resize(&mut self, new_size: usize, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
if new_size > self.size {
let start_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + new_size - 1));
for page in Page::range_inclusive(start_page, end_page) {
//println!("Map {:X}", page.start_address().get());
if active_table.translate_page(page).is_none() {
//println!("Not found - mapping");
active_table.map(page, self.flags);
if flush {
active_table.flush(page);
}
} else {
//println!("Found - skipping");
}
}
} else if new_size < self.size {
let start_page = Page::containing_address(VirtualAddress::new(self.start.get() + new_size));
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
for page in Page::range_inclusive(start_page, end_page) {
//println!("Unmap {:X}", page.start_address().get());
if active_table.translate_page(page).is_some() {
//println!("Found - unmapping");
active_table.unmap(page);
if flush {
active_table.flush(page);
}
} else {
//println!("Not found - skipping");
}
}
}
self.size = new_size;
}
}

View file

@ -178,6 +178,8 @@ pub struct Context {
pub image: Vec<memory::Memory>,
/// User stack
pub stack: Option<memory::Memory>,
/// User heap
pub heap: Option<memory::Memory>,
/// The open files in the scheme
pub files: Vec<Option<file::File>>
}
@ -193,6 +195,7 @@ impl Context {
kstack: None,
image: Vec::new(),
stack: None,
heap: None,
files: Vec::new()
}
}

View file

@ -70,7 +70,8 @@ impl<'a> Elf<'a> {
let mut memory = context::memory::Memory::new(
VirtualAddress::new(segment.p_vaddr as usize),
segment.p_memsz as usize,
entry::NO_EXECUTE | entry::WRITABLE);
entry::NO_EXECUTE | entry::WRITABLE
);
unsafe {
// Copy file data
@ -106,7 +107,8 @@ impl<'a> Elf<'a> {
context.stack = Some(context::memory::Memory::new(
VirtualAddress::new(stack_addr),
stack_size,
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE));
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE
));
// Clear stack
unsafe { memset(stack_addr as *mut u8, 0, stack_size); }

View file

@ -4,60 +4,40 @@ use core::str;
use arch;
use arch::interrupt::halt;
use arch::paging::{ActivePageTable, Page, VirtualAddress, entry};
use arch::paging::{VirtualAddress, entry};
use context;
use elf;
use syscall::{self, Error, Result};
pub fn brk(address: usize) -> Result<usize> {
//TODO: Make this more efficient
let mut active_table = unsafe { ActivePageTable::new() };
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::NoProcess)?;
let mut context = context_lock.write();
let mut current = arch::USER_HEAP_OFFSET;
{
let min_page = Page::containing_address(VirtualAddress::new(arch::USER_HEAP_OFFSET));
let max_page = Page::containing_address(VirtualAddress::new(arch::USER_HEAP_OFFSET + arch::USER_HEAP_SIZE - 1));
for page in Page::range_inclusive(min_page, max_page) {
if active_table.translate_page(page).is_none() {
break;
}
current = page.start_address().get() + 4096;
}
if let Some(ref heap) = context.heap {
current = heap.start_address().get() + heap.size();
}
if address == 0 {
//println!("Brk query {:X}", current);
Ok(current)
} else if address > current {
let start_page = Page::containing_address(VirtualAddress::new(current));
let end_page = Page::containing_address(VirtualAddress::new(address - 1));
for page in Page::range_inclusive(start_page, end_page) {
//println!("Map {:X}", page.start_address().get());
if active_table.translate_page(page).is_none() {
//println!("Not found - mapping");
active_table.map(page, entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE | entry::USER_ACCESSIBLE);
active_table.flush(page);
} else {
//println!("Found - skipping");
}
} else if address >= arch::USER_HEAP_OFFSET {
//TODO: out of memory errors
if let Some(ref mut heap) = context.heap {
heap.resize(address - arch::USER_HEAP_OFFSET, true);
return Ok(address);
}
//let new = end_page.start_address().get() + 4096;
//println!("Brk increase {:X}: from {:X} to {:X}", address, current, new);
context.heap = Some(context::memory::Memory::new(
VirtualAddress::new(arch::USER_HEAP_OFFSET),
address - arch::USER_HEAP_OFFSET,
entry::WRITABLE | entry::NO_EXECUTE | entry::USER_ACCESSIBLE
));
Ok(address)
} else {
let start_page = Page::containing_address(VirtualAddress::new(address));
let end_page = Page::containing_address(VirtualAddress::new(current - 1));
for page in Page::range_inclusive(start_page, end_page) {
//println!("Unmap {:X}", page.start_address().get());
if active_table.translate_page(page).is_some() {
//println!("Found - unmapping");
active_table.unmap(page);
active_table.flush(page);
} else {
//println!("Not found - skipping");
}
}
//let new = start_page.start_address().get();
//println!("Brk decrease {:X}: from {:X} to {:X}", address, current, new);
Ok(address)
//TODO: Return correct error
Err(Error::NotPermitted)
}
}