redox/kernel/elf.rs

143 lines
4.6 KiB
Rust
Raw Normal View History

//! ELF executables
use collections::String;
use core::str;
2016-08-19 15:57:24 +02:00
#[cfg(target_arch = "x86")]
use goblin::elf32::{header, program_header};
#[cfg(target_arch = "x86_64")]
use goblin::elf64::{header, program_header};
2016-09-12 23:02:03 +02:00
use arch;
2016-09-12 05:47:44 +02:00
use arch::externs::memcpy;
2016-09-12 05:04:34 +02:00
use arch::paging::{entry, VirtualAddress};
use arch::start::usermode;
2016-09-12 05:04:34 +02:00
use context;
use syscall::{Error, Result as SysResult};
/// An ELF executable
pub struct Elf<'a> {
pub data: &'a [u8],
header: &'a header::Header
}
impl<'a> Elf<'a> {
/// Create a ELF executable from data
pub fn from(data: &'a [u8]) -> Result<Elf<'a>, String> {
2016-08-19 15:57:24 +02:00
if data.len() < header::SIZEOF_EHDR {
Err(format!("Elf: Not enough data: {} < {}", data.len(), header::SIZEOF_EHDR))
} else if &data[..header::SELFMAG] != header::ELFMAG {
Err(format!("Elf: Invalid magic: {:?} != {:?}", &data[..4], header::ELFMAG))
} else if data.get(header::EI_CLASS) != Some(&header::ELFCLASS) {
Err(format!("Elf: Invalid architecture: {:?} != {:?}", data.get(header::EI_CLASS), header::ELFCLASS))
} else {
Ok(Elf {
data: data,
header: unsafe { &*(data.as_ptr() as usize as *const header::Header) }
})
}
}
pub fn segments(&'a self) -> ElfSegments<'a> {
ElfSegments {
data: self.data,
header: self.header,
i: 0
}
}
/// Get the entry field of the header
pub fn entry(&self) -> usize {
self.header.e_entry as usize
}
/// Test function to run. Remove and replace with proper syscall
2016-09-12 05:04:34 +02:00
pub fn run(self) -> SysResult<!> {
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::NoProcess)?;
let mut context = context_lock.write();
// Unmap previous image and stack
context.image.clear();
2016-09-12 06:03:03 +02:00
drop(context.heap.take());
drop(context.stack.take());
2016-09-12 05:04:34 +02:00
for segment in self.segments() {
if segment.p_type == program_header::PT_LOAD {
let mut memory = context::memory::Memory::new(
VirtualAddress::new(segment.p_vaddr as usize),
segment.p_memsz as usize,
2016-09-12 05:47:44 +02:00
entry::NO_EXECUTE | entry::WRITABLE,
true,
true
2016-09-12 05:18:18 +02:00
);
2016-09-12 05:04:34 +02:00
unsafe {
// Copy file data
memcpy(segment.p_vaddr as *mut u8,
(self.data.as_ptr() as usize + segment.p_offset as usize) as *const u8,
segment.p_filesz as usize);
}
2016-09-12 05:04:34 +02:00
let mut flags = entry::NO_EXECUTE | entry::USER_ACCESSIBLE;
2016-09-12 05:04:34 +02:00
if segment.p_flags & program_header::PF_R == program_header::PF_R {
flags.insert(entry::PRESENT);
}
2016-09-12 05:04:34 +02:00
// W ^ X. If it is executable, do not allow it to be writable, even if requested
if segment.p_flags & program_header::PF_X == program_header::PF_X {
flags.remove(entry::NO_EXECUTE);
} else if segment.p_flags & program_header::PF_W == program_header::PF_W {
flags.insert(entry::WRITABLE);
}
memory.remap(flags, true);
2016-09-12 05:04:34 +02:00
context.image.push(memory);
}
}
2016-09-12 05:04:34 +02:00
// Map stack
context.stack = Some(context::memory::Memory::new(
2016-09-14 05:27:27 +02:00
VirtualAddress::new(arch::USER_STACK_OFFSET),
arch::USER_STACK_SIZE,
2016-09-12 05:47:44 +02:00
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
true,
true
2016-09-12 05:18:18 +02:00
));
}
2016-09-12 05:04:34 +02:00
// Go to usermode
2016-09-14 05:27:27 +02:00
unsafe { usermode(self.entry(), arch::USER_STACK_OFFSET + arch::USER_STACK_SIZE - 256); }
}
}
pub struct ElfSegments<'a> {
data: &'a [u8],
header: &'a header::Header,
i: usize
}
impl<'a> Iterator for ElfSegments<'a> {
type Item = &'a program_header::ProgramHeader;
fn next(&mut self) -> Option<Self::Item> {
if self.i < self.header.e_phnum as usize {
let item = unsafe {
&* ((
self.data.as_ptr() as usize
+ self.header.e_phoff as usize
+ self.i * self.header.e_phentsize as usize
) as *const program_header::ProgramHeader)
};
self.i += 1;
Some(item)
} else {
None
}
}
}