Store context memory information

This commit is contained in:
Jeremy Soller 2016-09-11 21:04:34 -06:00
parent bed09d0518
commit bcd318d80b
5 changed files with 131 additions and 59 deletions

70
kernel/context/memory.rs Normal file
View file

@ -0,0 +1,70 @@
use arch::paging::{ActivePageTable, Page, PageIter, VirtualAddress};
use arch::paging::entry::EntryFlags;
#[derive(Debug)]
pub struct Memory {
start: VirtualAddress,
size: usize,
flags: EntryFlags
}
impl Memory {
pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags) -> Self {
let mut memory = Memory {
start: start,
size: size,
flags: flags
};
memory.map(true);
memory
}
pub fn pages(&self) -> PageIter {
let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
Page::range_inclusive(start_page, end_page)
}
pub fn map(&mut self, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
for page in self.pages() {
active_table.map(page, self.flags);
if flush {
active_table.flush(page);
}
}
}
pub fn unmap(&mut self, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
for page in self.pages() {
active_table.unmap(page);
if flush {
active_table.flush(page);
}
}
}
pub fn remap(&mut self, new_flags: EntryFlags, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
self.flags = new_flags;
for page in self.pages() {
active_table.remap(page, self.flags);
if flush {
active_table.flush(page);
}
}
}
}
impl Drop for Memory {
fn drop(&mut self) {
self.unmap(true);
}
}

View file

@ -13,6 +13,9 @@ use syscall::{Error, Result};
/// File operations
pub mod file;
/// Memory operations
pub mod memory;
/// Limit on number of contexts
pub const CONTEXT_MAX_CONTEXTS: usize = 65536;
@ -171,6 +174,10 @@ pub struct Context {
pub arch: ArchContext,
/// Kernel stack
pub kstack: Option<Box<[u8]>>,
/// Executable image
pub image: Vec<memory::Memory>,
/// User stack
pub stack: Option<memory::Memory>,
/// The open files in the scheme
pub files: Vec<Option<file::File>>
}
@ -184,6 +191,8 @@ impl Context {
blocked: true,
arch: ArchContext::new(),
kstack: None,
image: Vec::new(),
stack: None,
files: Vec::new()
}
}

View file

@ -11,8 +11,10 @@ use goblin::elf32::{header, program_header};
use goblin::elf64::{header, program_header};
use arch::externs::{memcpy, memset};
use arch::paging::{entry, ActivePageTable, Page, VirtualAddress};
use arch::paging::{entry, VirtualAddress};
use arch::start::usermode;
use context;
use syscall::{Error, Result as SysResult};
/// An ELF executable
pub struct Elf<'a> {
@ -51,74 +53,67 @@ impl<'a> Elf<'a> {
}
/// Test function to run. Remove and replace with proper syscall
pub fn run(self) {
let mut active_table = unsafe { ActivePageTable::new() };
pub fn run(self) -> SysResult<!> {
let stack_addr = 0x80000000;
let stack_size = 64 * 1024;
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::NoProcess)?;
let mut context = context_lock.write();
for segment in self.segments() {
if segment.p_type == program_header::PT_LOAD {
let start_page = Page::containing_address(VirtualAddress::new(segment.p_vaddr as usize));
let end_page = Page::containing_address(VirtualAddress::new((segment.p_vaddr + segment.p_memsz) as usize));
// Unmap previous image and stack
context.image.clear();
context.stack.take();
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_some() {
//TODO panic!("Elf::run: still mapped: {:?}", page);
active_table.unmap(page);
for segment in self.segments() {
if segment.p_type == program_header::PT_LOAD {
let mut memory = context::memory::Memory::new(
VirtualAddress::new(segment.p_vaddr as usize),
segment.p_memsz as usize,
entry::NO_EXECUTE | entry::WRITABLE);
unsafe {
// Copy file data
memcpy(segment.p_vaddr as *mut u8,
(self.data.as_ptr() as usize + segment.p_offset as usize) as *const u8,
segment.p_filesz as usize);
// Set BSS
memset((segment.p_vaddr + segment.p_filesz) as *mut u8,
0,
(segment.p_memsz - segment.p_filesz) as usize);
}
active_table.map(page, entry::NO_EXECUTE | entry::WRITABLE);
}
active_table.flush_all();
unsafe {
// Copy file data
memcpy(segment.p_vaddr as *mut u8,
(self.data.as_ptr() as usize + segment.p_offset as usize) as *const u8,
segment.p_filesz as usize);
// Set BSS
memset((segment.p_vaddr + segment.p_filesz) as *mut u8,
0,
(segment.p_memsz - segment.p_filesz) as usize);
}
let mut flags = entry::NO_EXECUTE | entry::USER_ACCESSIBLE;
let mut flags = entry::NO_EXECUTE | entry::USER_ACCESSIBLE;
if segment.p_flags & program_header::PF_R == program_header::PF_R {
flags.insert(entry::PRESENT);
}
if segment.p_flags & program_header::PF_R == program_header::PF_R {
flags.insert(entry::PRESENT);
}
// W ^ X. If it is executable, do not allow it to be writable, even if requested
if segment.p_flags & program_header::PF_X == program_header::PF_X {
flags.remove(entry::NO_EXECUTE);
} else if segment.p_flags & program_header::PF_W == program_header::PF_W {
flags.insert(entry::WRITABLE);
}
// W ^ X. If it is executable, do not allow it to be writable, even if requested
if segment.p_flags & program_header::PF_X == program_header::PF_X {
flags.remove(entry::NO_EXECUTE);
} else if segment.p_flags & program_header::PF_W == program_header::PF_W {
flags.insert(entry::WRITABLE);
}
memory.remap(flags, true);
for page in Page::range_inclusive(start_page, end_page) {
active_table.remap(page, flags);
context.image.push(memory);
}
active_table.flush_all();
}
}
// Map stack
let start_page = Page::containing_address(VirtualAddress::new(0x80000000));
let end_page = Page::containing_address(VirtualAddress::new(0x80000000 + 64*1024 - 1));
// Map stack
context.stack = Some(context::memory::Memory::new(
VirtualAddress::new(stack_addr),
stack_size,
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE));
for page in Page::range_inclusive(start_page, end_page) {
if active_table.translate_page(page).is_some() {
//TODO panic!("Elf::run: still mapped: {:?}", page);
active_table.unmap(page);
}
active_table.map(page, entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE);
}
active_table.flush_all();
unsafe {
// Clear stack
memset(0x80000000 as *mut u8, 0, 64 * 1024);
// Go to usermode
usermode(self.entry(), 0x80000000 + 64*1024 - 256);
unsafe { memset(stack_addr as *mut u8, 0, stack_size); }
}
// Go to usermode
unsafe { usermode(self.entry(), stack_addr + stack_size - 256); }
}
}

View file

@ -70,6 +70,7 @@
#![feature(const_fn)]
#![feature(drop_types_in_const)]
#![feature(question_mark)]
#![feature(never_type)]
#![feature(thread_local)]
#![no_std]

View file

@ -93,10 +93,7 @@ pub fn exec(path: &[u8], _args: &[[usize; 2]]) -> Result<usize> {
let _ = syscall::close(file);
match elf::Elf::from(&data) {
Ok(elf) => {
elf.run();
Ok(0)
},
Ok(elf) => elf.run().and(Ok(0)),
Err(err) => {
println!("failed to execute {}: {}", unsafe { str::from_utf8_unchecked(path) }, err);
Err(Error::NoExec)