Merge pull request #5 from redox-os/clone

Clone
This commit is contained in:
Jeremy Soller 2016-09-15 08:56:41 -06:00 committed by GitHub
commit a4f8613971
23 changed files with 582 additions and 185 deletions

View file

@ -43,6 +43,13 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
println!(" XAPIC {}: {:>08X}", me, local_apic.address); println!(" XAPIC {}: {:>08X}", me, local_apic.address);
} }
let trampoline_frame = Frame::containing_address(PhysicalAddress::new(TRAMPOLINE));
let trampoline_page = Page::containing_address(VirtualAddress::new(TRAMPOLINE));
// Map trampoline
active_table.map_to(trampoline_page, trampoline_frame, entry::PRESENT | entry::WRITABLE);
active_table.flush(trampoline_page);
for madt_entry in madt.iter() { for madt_entry in madt.iter() {
println!(" {:?}", madt_entry); println!(" {:?}", madt_entry);
match madt_entry { match madt_entry {
@ -50,13 +57,6 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
println!(" This is my local APIC"); println!(" This is my local APIC");
} else { } else {
if ap_local_apic.flags & 1 == 1 { if ap_local_apic.flags & 1 == 1 {
// Map trampoline
{
if active_table.translate_page(Page::containing_address(VirtualAddress::new(TRAMPOLINE))).is_none() {
active_table.identity_map(Frame::containing_address(PhysicalAddress::new(TRAMPOLINE)), entry::PRESENT | entry::WRITABLE);
}
}
// Allocate a stack // Allocate a stack
// TODO: Allocate contiguous // TODO: Allocate contiguous
let stack_start = allocate_frame().expect("no more frames in acpi stack_start").start_address().get(); let stack_start = allocate_frame().expect("no more frames in acpi stack_start").start_address().get();
@ -128,6 +128,10 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) {
_ => () _ => ()
} }
} }
// Unmap trampoline
active_table.unmap(trampoline_page);
active_table.flush(trampoline_page);
} else { } else {
println!(": Unknown"); println!(": Unknown");
} }
@ -138,28 +142,43 @@ pub unsafe fn init(active_table: &mut ActivePageTable) -> Option<Acpi> {
let start_addr = 0xE0000; let start_addr = 0xE0000;
let end_addr = 0xFFFFF; let end_addr = 0xFFFFF;
// Map all of the ACPI table space // Map all of the ACPI RSDP space
{ {
let start_frame = Frame::containing_address(PhysicalAddress::new(start_addr)); let start_frame = Frame::containing_address(PhysicalAddress::new(start_addr));
let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr)); let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr));
for frame in Frame::range_inclusive(start_frame, end_frame) { for frame in Frame::range_inclusive(start_frame, end_frame) {
if active_table.translate_page(Page::containing_address(VirtualAddress::new(frame.start_address().get()))).is_none() { let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
active_table.identity_map(frame, entry::PRESENT | entry::NO_EXECUTE); active_table.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE);
} active_table.flush(page);
} }
} }
// Search for RSDP // Search for RSDP
if let Some(rsdp) = RSDP::search(start_addr, end_addr) { if let Some(rsdp) = RSDP::search(start_addr, end_addr) {
let get_sdt = |sdt_address: usize, active_table: &mut ActivePageTable| -> &'static Sdt { let get_sdt = |sdt_address: usize, active_table: &mut ActivePageTable| -> (&'static Sdt, bool) {
if active_table.translate_page(Page::containing_address(VirtualAddress::new(sdt_address))).is_none() { let mapped = if active_table.translate_page(Page::containing_address(VirtualAddress::new(sdt_address))).is_none() {
let sdt_frame = Frame::containing_address(PhysicalAddress::new(sdt_address)); let sdt_frame = Frame::containing_address(PhysicalAddress::new(sdt_address));
active_table.identity_map(sdt_frame, entry::PRESENT | entry::NO_EXECUTE); let sdt_page = Page::containing_address(VirtualAddress::new(sdt_address));
} active_table.map_to(sdt_page, sdt_frame, entry::PRESENT | entry::NO_EXECUTE);
&*(sdt_address as *const Sdt) active_table.flush(sdt_page);
true
} else {
false
};
(&*(sdt_address as *const Sdt), mapped)
}; };
let rxsdt = get_sdt(rsdp.sdt_address(), active_table); let drop_sdt = |sdt: &'static Sdt, mapped: bool, active_table: &mut ActivePageTable| {
let sdt_address = sdt as *const Sdt as usize;
drop(sdt);
if mapped {
let sdt_page = Page::containing_address(VirtualAddress::new(sdt_address));
active_table.unmap(sdt_page);
active_table.flush(sdt_page);
}
};
let (rxsdt, rxmapped) = get_sdt(rsdp.sdt_address(), active_table);
for &c in rxsdt.signature.iter() { for &c in rxsdt.signature.iter() {
print!("{}", c as char); print!("{}", c as char);
@ -167,21 +186,36 @@ pub unsafe fn init(active_table: &mut ActivePageTable) -> Option<Acpi> {
println!(":"); println!(":");
if let Some(rsdt) = Rsdt::new(rxsdt) { if let Some(rsdt) = Rsdt::new(rxsdt) {
for sdt_address in rsdt.iter() { for sdt_address in rsdt.iter() {
let sdt = get_sdt(sdt_address, active_table); let (sdt, mapped) = get_sdt(sdt_address, active_table);
init_sdt(sdt, active_table); init_sdt(sdt, active_table);
drop_sdt(sdt, mapped, active_table);
} }
} else if let Some(xsdt) = Xsdt::new(rxsdt) { } else if let Some(xsdt) = Xsdt::new(rxsdt) {
for sdt_address in xsdt.iter() { for sdt_address in xsdt.iter() {
let sdt = get_sdt(sdt_address, active_table); let (sdt, mapped) = get_sdt(sdt_address, active_table);
init_sdt(sdt, active_table); init_sdt(sdt, active_table);
drop_sdt(sdt, mapped, active_table);
} }
} else { } else {
println!("UNKNOWN RSDT OR XSDT SIGNATURE"); println!("UNKNOWN RSDT OR XSDT SIGNATURE");
} }
drop_sdt(rxsdt, rxmapped, active_table);
} else { } else {
println!("NO RSDP FOUND"); println!("NO RSDP FOUND");
} }
// Unmap all of the ACPI RSDP space
{
let start_frame = Frame::containing_address(PhysicalAddress::new(start_addr));
let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
active_table.unmap(page);
active_table.flush(page);
}
}
None None
} }

View file

@ -6,7 +6,7 @@ use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
/// This must be done, as no locks can be held on the stack during switch /// This must be done, as no locks can be held on the stack during switch
pub static CONTEXT_SWITCH_LOCK: AtomicBool = ATOMIC_BOOL_INIT; pub static CONTEXT_SWITCH_LOCK: AtomicBool = ATOMIC_BOOL_INIT;
#[derive(Debug)] #[derive(Clone, Debug)]
pub struct Context { pub struct Context {
/// Page table pointer /// Page table pointer
cr3: usize, cr3: usize,

View file

@ -3,7 +3,7 @@ use ransid::{Console, Event};
use spin::Mutex; use spin::Mutex;
use memory::Frame; use memory::Frame;
use paging::{ActivePageTable, PhysicalAddress, entry}; use paging::{ActivePageTable, Page, PhysicalAddress, VirtualAddress, entry};
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
#[allow(unused_assignments)] #[allow(unused_assignments)]
@ -95,14 +95,15 @@ pub unsafe fn init(active_table: &mut ActivePageTable) {
if mode_info.physbaseptr > 0 { if mode_info.physbaseptr > 0 {
let width = mode_info.xresolution as usize; let width = mode_info.xresolution as usize;
let height = mode_info.yresolution as usize; let height = mode_info.yresolution as usize;
let onscreen = mode_info.physbaseptr as usize; let onscreen = mode_info.physbaseptr as usize + ::KERNEL_OFFSET;
let size = width * height; let size = width * height;
{ {
let start_frame = Frame::containing_address(PhysicalAddress::new(onscreen)); let start_page = Page::containing_address(VirtualAddress::new(onscreen));
let end_frame = Frame::containing_address(PhysicalAddress::new(onscreen + size * 4 - 1)); let end_page = Page::containing_address(VirtualAddress::new(onscreen + size * 4 - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) { for page in Page::range_inclusive(start_page, end_page) {
active_table.identity_map(frame, /*actually sets PAT for write combining*/ entry::HUGE_PAGE | entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE); let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get() - ::KERNEL_OFFSET));
active_table.map_to(page, frame, /*actually sets PAT for write combining*/ entry::HUGE_PAGE | entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE);
} }
} }
@ -117,26 +118,8 @@ pub unsafe fn init(active_table: &mut ActivePageTable) {
)); ));
*CONSOLE.lock() = Some(Console::new(width/8, height/16)); *CONSOLE.lock() = Some(Console::new(width/8, height/16));
} }
}
pub unsafe fn init_ap(active_table: &mut ActivePageTable) { active_table.unmap(Page::containing_address(VirtualAddress::new(0x5200)));
active_table.identity_map(Frame::containing_address(PhysicalAddress::new(0x5200)), entry::PRESENT | entry::NO_EXECUTE);
let mode_info = &*(0x5200 as *const VBEModeInfo);
if mode_info.physbaseptr > 0 {
let width = mode_info.xresolution as usize;
let height = mode_info.yresolution as usize;
let start = mode_info.physbaseptr as usize;
let size = width * height;
{
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
let end_frame = Frame::containing_address(PhysicalAddress::new(start + size * 4 - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
active_table.identity_map(frame, /*actually sets PAT for write combining*/ entry::HUGE_PAGE | entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE);
}
}
}
} }
/// A display /// A display

View file

@ -9,7 +9,3 @@ pub unsafe fn init(active_table: &mut ActivePageTable){
display::init(active_table); display::init(active_table);
ps2::init(); ps2::init();
} }
pub unsafe fn init_ap(active_table: &mut ActivePageTable) {
display::init_ap(active_table);
}

View file

@ -3,7 +3,7 @@ pub unsafe extern fn syscall() {
#[inline(never)] #[inline(never)]
unsafe fn inner() { unsafe fn inner() {
extern { extern {
fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -> usize; fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, stack: usize) -> usize;
} }
let mut a; let mut a;
@ -13,10 +13,11 @@ pub unsafe extern fn syscall() {
let d; let d;
let e; let e;
let f; let f;
asm!("" : "={rax}"(a), "={rbx}"(b), "={rcx}"(c), "={rdx}"(d), "={rsi}"(e), "={rdi}"(f) let stack;
asm!("" : "={rax}"(a), "={rbx}"(b), "={rcx}"(c), "={rdx}"(d), "={rsi}"(e), "={rdi}"(f), "={rbp}"(stack)
: : : "intel", "volatile"); : : : "intel", "volatile");
a = syscall(a, b, c, d, e, f); a = syscall(a, b, c, d, e, f, stack);
} }
asm!("" : : "{rax}"(a) : : "intel", "volatile"); asm!("" : : "{rax}"(a) : : "intel", "volatile");
@ -36,3 +37,10 @@ pub unsafe extern fn syscall() {
iretq" iretq"
: : : : "intel", "volatile"); : : : : "intel", "volatile");
} }
#[naked]
pub unsafe extern fn clone_ret() -> usize {
asm!("pop rbp"
: : : : "intel", "volatile");
0
}

View file

@ -22,7 +22,8 @@ pub extern crate x86;
// Because the memory map is so important to not be aliased, it is defined here, in one place // Because the memory map is so important to not be aliased, it is defined here, in one place
// The lower 256 PML4 entries are reserved for userspace // The lower 256 PML4 entries are reserved for userspace
// Each PML4 entry references up to 512 GB of memory // Each PML4 entry references up to 512 GB of memory
// The upper 256 are reserved for the kernel // The top (511) PML4 is reserved for recursive mapping
// The second from the top (510) PML4 is reserved for the kernel
/// The size of a single PML4 /// The size of a single PML4
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000; pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
@ -33,7 +34,7 @@ pub extern crate x86;
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE; pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
/// Offset to kernel heap /// Offset to kernel heap
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE; pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET + PML4_SIZE/2;
/// Size of kernel heap /// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 64 * 1024 * 1024; // 64 MB pub const KERNEL_HEAP_SIZE: usize = 64 * 1024 * 1024; // 64 MB
@ -43,14 +44,26 @@ pub extern crate x86;
/// Size of kernel percpu variables /// Size of kernel percpu variables
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
/// Offset to user image
pub const USER_OFFSET: usize = 0;
/// Offset to user heap /// Offset to user heap
pub const USER_HEAP_OFFSET: usize = PML4_SIZE; pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
/// Offset to user stack /// Offset to user stack
pub const USER_STACK_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE; pub const USER_STACK_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
/// Size of user stack /// Size of user stack
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
/// Offset to user temporary image (used when cloning)
pub const USER_TMP_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
/// Offset to user temporary heap (used when cloning)
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
/// Offset to user temporary stack (used when cloning)
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
/// Print to console /// Print to console
#[macro_export] #[macro_export]
@ -126,7 +139,14 @@ macro_rules! interrupt_error {
} }
// Push scratch registers // Push scratch registers
asm!("push rax asm!("pop rax
pop rbx
pop rcx
pop rdx
pop rsi
pop rdi
hlt
push rax
push rcx push rcx
push rdx push rdx
push rdi push rdi

View file

@ -1,8 +1,8 @@
ENTRY(kstart) ENTRY(kstart)
OUTPUT_FORMAT(elf64-x86-64) OUTPUT_FORMAT(elf64-x86-64)
/* KERNEL_OFFSET = 0xffffff0000100000; */ KERNEL_OFFSET = 0xffffff0000100000;
KERNEL_OFFSET = 0x100000; /* KERNEL_OFFSET = 0x100000; */
SECTIONS { SECTIONS {
. = KERNEL_OFFSET; . = KERNEL_OFFSET;

View file

@ -63,8 +63,6 @@ impl Mapper {
/// Unmap a page /// Unmap a page
pub fn unmap(&mut self, page: Page) { pub fn unmap(&mut self, page: Page) {
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut() let p1 = self.p4_mut()
.next_table_mut(page.p4_index()) .next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index())) .and_then(|p3| p3.next_table_mut(page.p3_index()))
@ -76,6 +74,18 @@ impl Mapper {
deallocate_frame(frame); deallocate_frame(frame);
} }
/// Unmap a page, return frame without free
pub fn unmap_return(&mut self, page: Page) -> Frame {
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
frame
}
pub fn translate_page(&self, page: Page) -> Option<Frame> { pub fn translate_page(&self, page: Page) -> Option<Frame> {
self.p4().next_table(page.p4_index()) self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index())) .and_then(|p3| p3.next_table(page.p3_index()))

View file

@ -22,6 +22,59 @@ pub const ENTRY_COUNT: usize = 512;
/// Size of pages /// Size of pages
pub const PAGE_SIZE: usize = 4096; pub const PAGE_SIZE: usize = 4096;
/// Setup page attribute table
unsafe fn init_pat() {
let uncacheable = 0;
let write_combining = 1;
let write_through = 4;
//let write_protected = 5;
let write_back = 6;
let uncached = 7;
let pat0 = write_back;
let pat1 = write_through;
let pat2 = uncached;
let pat3 = uncacheable;
let pat4 = write_combining;
let pat5 = pat1;
let pat6 = pat2;
let pat7 = pat3;
msr::wrmsr(msr::IA32_PAT, pat7 << 56 | pat6 << 48 | pat5 << 40 | pat4 << 32
| pat3 << 24 | pat2 << 16 | pat1 << 8 | pat0);
}
/// Copy tdata, clear tbss, set TCB self pointer
unsafe fn init_tcb(cpu_id: usize) -> usize {
extern {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
/// The ending byte of the thread data segment
static mut __tdata_end: u8;
/// The starting byte of the thread BSS segment
static mut __tbss_start: u8;
/// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
}
let tcb_offset;
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let tbss_offset = & __tbss_start as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
tcb_offset = end - mem::size_of::<usize>();
::externs::memcpy(start as *mut u8, & __tdata_start as *const u8, tbss_offset);
::externs::memset((start + tbss_offset) as *mut u8, 0, size - tbss_offset);
*(tcb_offset as *mut usize) = end;
}
tcb_offset
}
/// Initialize paging /// Initialize paging
/// ///
/// Returns page table and thread control block offset /// Returns page table and thread control block offset
@ -53,6 +106,8 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
static mut __bss_end: u8; static mut __bss_end: u8;
} }
init_pat();
let mut active_table = ActivePageTable::new(); let mut active_table = ActivePageTable::new();
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000))); let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000)));
@ -78,25 +133,23 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
} }
} }
let mut remap = |start: usize, end: usize, flags: EntryFlags| { let mut remap = |start: usize, end: usize, flags: EntryFlags, offset: usize| {
if end > start { if end > start {
let start_frame = Frame::containing_address(PhysicalAddress::new(start)); let start_frame = Frame::containing_address(PhysicalAddress::new(start));
let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1)); let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) { for frame in Frame::range_inclusive(start_frame, end_frame) {
mapper.identity_map(frame.clone(), flags); let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + offset));
mapper.map_to(page, frame, flags);
//let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET));
//mapper.map_to(page, frame, flags);
} }
} }
}; };
// Remap stack writable, no execute // Remap stack writable, no execute
remap(stack_start, stack_end, PRESENT | NO_EXECUTE | WRITABLE); remap(stack_start, stack_end, PRESENT | NO_EXECUTE | WRITABLE, 0);
// Remap a section with `flags` // Remap a section with `flags`
let mut remap_section = |start: &u8, end: &u8, flags: EntryFlags| { let mut remap_section = |start: &u8, end: &u8, flags: EntryFlags| {
remap(start as *const _ as usize, end as *const _ as usize, flags); remap(start as *const _ as usize - ::KERNEL_OFFSET, end as *const _ as usize - ::KERNEL_OFFSET, flags, ::KERNEL_OFFSET);
}; };
// Remap text read-only // Remap text read-only
remap_section(& __text_start, & __text_end, PRESENT); remap_section(& __text_start, & __text_end, PRESENT);
@ -111,45 +164,71 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
} }
}); });
let uncacheable = 0;
let write_combining = 1;
let write_through = 4;
//let write_protected = 5;
let write_back = 6;
let uncached = 7;
let pat0 = write_back;
let pat1 = write_through;
let pat2 = uncached;
let pat3 = uncacheable;
let pat4 = write_combining;
let pat5 = pat1;
let pat6 = pat2;
let pat7 = pat3;
msr::wrmsr(msr::IA32_PAT, pat7 << 56 | pat6 << 48 | pat5 << 40 | pat4 << 32
| pat3 << 24 | pat2 << 16 | pat1 << 8 | pat0);
active_table.switch(new_table); active_table.switch(new_table);
// Copy tdata, clear tbss, set TCB self pointer (active_table, init_tcb(cpu_id))
let tcb_offset; }
pub unsafe fn init_ap(cpu_id: usize, stack_start: usize, stack_end: usize, kernel_table: usize) -> (ActivePageTable, usize) {
extern {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
/// The ending byte of the thread data segment
static mut __tdata_end: u8;
/// The starting byte of the thread BSS segment
static mut __tbss_start: u8;
/// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
}
init_pat();
let mut active_table = ActivePageTable::new();
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000)));
let mut new_table = {
let frame = allocate_frame().expect("no more frames in paging::init new_table");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
// Copy kernel mapping
let kernel_frame = Frame::containing_address(PhysicalAddress::new(kernel_table));
mapper.p4_mut()[510].set(kernel_frame, entry::PRESENT | entry::WRITABLE);
// Map tdata and tbss
{ {
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize; let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let tbss_offset = & __tbss_start as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id; let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size; let end = start + size;
tcb_offset = end - mem::size_of::<usize>();
::externs::memcpy(start as *mut u8, & __tdata_start as *const u8, tbss_offset); let start_page = Page::containing_address(VirtualAddress::new(start));
::externs::memset((start + tbss_offset) as *mut u8, 0, size - tbss_offset); let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
*(tcb_offset as *mut usize) = end; mapper.map(page, PRESENT | NO_EXECUTE | WRITABLE);
}
} }
(active_table, tcb_offset) let mut remap = |start: usize, end: usize, flags: EntryFlags, offset: usize| {
if end > start {
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + offset));
mapper.map_to(page, frame, flags);
}
}
};
// Remap stack writable, no execute
remap(stack_start, stack_end, PRESENT | NO_EXECUTE | WRITABLE, 0);
});
active_table.switch(new_table);
(active_table, init_tcb(cpu_id))
} }
pub struct ActivePageTable { pub struct ActivePageTable {

View file

@ -28,7 +28,7 @@ static mut TDATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT; pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT;
static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT; static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT;
static HEAP_TABLE: AtomicUsize = ATOMIC_USIZE_INIT; static KERNEL_TABLE: AtomicUsize = ATOMIC_USIZE_INIT;
extern { extern {
/// Kernel main function /// Kernel main function
@ -65,7 +65,7 @@ pub unsafe extern fn kstart() -> ! {
} }
// Initialize memory management // Initialize memory management
memory::init(0, &__end as *const u8 as usize); memory::init(0, &__end as *const u8 as usize - ::KERNEL_OFFSET);
// TODO: allocate a stack // TODO: allocate a stack
let stack_start = 0x00080000; let stack_start = 0x00080000;
@ -93,9 +93,9 @@ pub unsafe extern fn kstart() -> ! {
// Reset AP variables // Reset AP variables
AP_READY.store(false, Ordering::SeqCst); AP_READY.store(false, Ordering::SeqCst);
BSP_READY.store(false, Ordering::SeqCst); BSP_READY.store(false, Ordering::SeqCst);
HEAP_TABLE.store(0, Ordering::SeqCst); KERNEL_TABLE.store(0, Ordering::SeqCst);
// Map heap // Setup kernel heap
{ {
// Map heap pages // Map heap pages
let heap_start_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET)); let heap_start_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET));
@ -106,24 +106,26 @@ pub unsafe extern fn kstart() -> ! {
// Init the allocator // Init the allocator
allocator::init(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE); allocator::init(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE);
}
// Send heap page table to APs // Initialize devices
let index = heap_start_page.p4_index(); device::init(&mut active_table);
// Send kernel page table to APs
{
let index = Page::containing_address(VirtualAddress::new(::KERNEL_OFFSET)).p4_index();
let p4 = active_table.p4(); let p4 = active_table.p4();
{ {
let entry = &p4[index]; let entry = &p4[index];
if let Some(frame) = entry.pointed_frame() { if let Some(frame) = entry.pointed_frame() {
HEAP_TABLE.store(frame.start_address().get(), Ordering::SeqCst); KERNEL_TABLE.store(frame.start_address().get(), Ordering::SeqCst);
} else { } else {
panic!("heap does not have PML4 entry"); panic!("kernel does not have PML4 entry");
} }
} }
} }
// Initialize devices
device::init(&mut active_table);
// Read ACPI tables, starts APs // Read ACPI tables, starts APs
acpi::init(&mut active_table); acpi::init(&mut active_table);
@ -139,8 +141,14 @@ pub unsafe extern fn kstart_ap(cpu_id: usize, page_table: usize, stack_start: us
assert_eq!(BSS_TEST_ZERO, 0); assert_eq!(BSS_TEST_ZERO, 0);
assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF); assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
// Retrieve kernel table entry
while KERNEL_TABLE.load(Ordering::SeqCst) == 0 {
interrupt::pause();
}
let kernel_table = KERNEL_TABLE.load(Ordering::SeqCst);
// Initialize paging // Initialize paging
let (mut active_table, tcb_offset) = paging::init(cpu_id, stack_start, stack_end); let (mut active_table, tcb_offset) = paging::init_ap(cpu_id, stack_start, stack_end, kernel_table);
// Set up GDT for AP // Set up GDT for AP
gdt::init(tcb_offset, stack_end); gdt::init(tcb_offset, stack_end);
@ -158,23 +166,6 @@ pub unsafe extern fn kstart_ap(cpu_id: usize, page_table: usize, stack_start: us
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE); assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE);
} }
// Copy heap PML4
{
while HEAP_TABLE.load(Ordering::SeqCst) == 0 {
interrupt::pause();
}
let frame = Frame::containing_address(PhysicalAddress::new(HEAP_TABLE.load(Ordering::SeqCst)));
let page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET));
let p4 = active_table.p4_mut();
let entry = &mut p4[page.p4_index()];
assert!(entry.is_unused());
entry.set(frame, entry::PRESENT | entry::WRITABLE);
}
// Init devices for AP
device::init_ap(&mut active_table);
AP_READY.store(true, Ordering::SeqCst); AP_READY.store(true, Ordering::SeqCst);
} }
@ -201,7 +192,7 @@ pub unsafe fn usermode(ip: usize, sp: usize) -> ! {
: // No output because it never returns : // No output because it never returns
: "{rax}"(gdt::GDT_USER_DATA << 3 | 3), // Stack segment : "{rax}"(gdt::GDT_USER_DATA << 3 | 3), // Stack segment
"{rbx}"(sp), // Stack pointer "{rbx}"(sp), // Stack pointer
"{rcx}"(3 << 12 | 1 << 9), // Flags - Set IOPL and interrupt enable flag "{rcx}"(3 << 12/* | 1 << 9*/), // Flags - Set IOPL and interrupt enable flag
"{rdx}"(gdt::GDT_USER_CODE << 3 | 3), // Code segment "{rdx}"(gdt::GDT_USER_CODE << 3 | 3), // Code segment
"{rsi}"(ip) // IP "{rsi}"(ip) // IP
: // No clobers because it never returns : // No clobers because it never returns

View file

@ -126,6 +126,7 @@ long_mode:
;rust init ;rust init
mov rax, [kernel_base + 0x18] mov rax, [kernel_base + 0x18]
xchg bx, bx
jmp rax jmp rax
long_mode_ap: long_mode_ap:

View file

@ -115,8 +115,8 @@ vesa:
.minx dw 640 .minx dw 640
.miny dw 480 .miny dw 480
.required: .required:
.requiredx dw 0 ;1024 ;USE THESE WITH CAUTION .requiredx dw 1024 ;USE THESE WITH CAUTION
.requiredy dw 0 ;768 .requiredy dw 768
.requiredmode dw 0 .requiredmode dw 0
.modeok db ": Is this OK?(y/n)",10,13,0 .modeok db ": Is this OK?(y/n)",10,13,0

View file

@ -1,4 +1,4 @@
use std::{env, thread}; use std::env;
use std::fs::File; use std::fs::File;
use std::io::{BufRead, BufReader}; use std::io::{BufRead, BufReader};
use std::process::Command; use std::process::Command;
@ -47,8 +47,4 @@ pub fn main() {
} }
} }
} }
loop {
thread::yield_now();
}
} }

View file

@ -1,6 +1,7 @@
//! File struct //! File struct
/// A file /// A file
//TODO: Close on exec
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct File { pub struct File {
/// The scheme that this file refers to /// The scheme that this file refers to

View file

@ -1,6 +1,7 @@
use arch::externs::memset; use arch::externs::memset;
use arch::paging::{ActivePageTable, Page, PageIter, VirtualAddress}; use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, VirtualAddress};
use arch::paging::entry::EntryFlags; use arch::paging::entry::{self, EntryFlags};
use arch::paging::temporary_page::TemporaryPage;
#[derive(Debug)] #[derive(Debug)]
pub struct Memory { pub struct Memory {
@ -30,13 +31,17 @@ impl Memory {
self.size self.size
} }
pub fn flags(&self) -> EntryFlags {
self.flags
}
pub fn pages(&self) -> PageIter { pub fn pages(&self) -> PageIter {
let start_page = Page::containing_address(self.start); let start_page = Page::containing_address(self.start);
let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1)); let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1));
Page::range_inclusive(start_page, end_page) Page::range_inclusive(start_page, end_page)
} }
pub fn map(&mut self, flush: bool, clear: bool) { fn map(&mut self, flush: bool, clear: bool) {
let mut active_table = unsafe { ActivePageTable::new() }; let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = false; let mut flush_all = false;
@ -56,12 +61,12 @@ impl Memory {
} }
if clear { if clear {
assert!(flush); assert!(flush && self.flags.contains(entry::WRITABLE));
unsafe { memset(self.start_address().get() as *mut u8, 0, self.size); } unsafe { memset(self.start_address().get() as *mut u8, 0, self.size); }
} }
} }
pub fn unmap(&mut self, flush: bool) { fn unmap(&mut self, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() }; let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = false; let mut flush_all = false;
@ -80,6 +85,34 @@ impl Memory {
} }
} }
/// A complicated operation to move a piece of memory to a new page table
/// It also allows for changing the address at the same time
pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() };
let mut flush_all = false;
for page in self.pages() {
let frame = active_table.unmap_return(page);
active_table.with(new_table, temporary_page, |mapper| {
let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.start.get() + new_start.get()));
mapper.map_to(new_page, frame, self.flags);
});
if flush {
//active_table.flush(page);
flush_all = true;
}
}
if flush_all {
active_table.flush_all();
}
self.start = new_start;
}
pub fn remap(&mut self, new_flags: EntryFlags, flush: bool) { pub fn remap(&mut self, new_flags: EntryFlags, flush: bool) {
let mut active_table = unsafe { ActivePageTable::new() }; let mut active_table = unsafe { ActivePageTable::new() };

View file

@ -141,24 +141,40 @@ pub unsafe fn switch() {
let mut to_ptr = 0 as *mut Context; let mut to_ptr = 0 as *mut Context;
for (_pid, context_lock) in contexts().map.iter() { for (pid, context_lock) in contexts().map.iter() {
if *pid > (*from_ptr).id {
let mut context = context_lock.write(); let mut context = context_lock.write();
if ! context.running && ! context.blocked && ! context.exited { if ! context.running && ! context.blocked && ! context.exited {
to_ptr = context.deref_mut() as *mut Context; to_ptr = context.deref_mut() as *mut Context;
break; break;
} }
} }
}
if to_ptr as usize == 0 {
for (pid, context_lock) in contexts().map.iter() {
if *pid < (*from_ptr).id {
let mut context = context_lock.write();
if ! context.running && ! context.blocked && ! context.exited {
to_ptr = context.deref_mut() as *mut Context;
break;
}
}
}
}
if to_ptr as usize == 0 { if to_ptr as usize == 0 {
// TODO: Sleep, wait for interrupt // TODO: Sleep, wait for interrupt
// Unset global lock if no context found // Unset global lock if no context found
arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst); arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst);
println!("No to_ptr");
return; return;
} }
(&mut *from_ptr).running = false; (&mut *from_ptr).running = false;
(&mut *to_ptr).running = true; (&mut *to_ptr).running = true;
if let Some(ref stack) = (*to_ptr).kstack {
arch::gdt::TSS.rsp[0] = (stack.as_ptr() as usize + stack.len() - 256) as u64;
}
CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst); CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst);
(&mut *from_ptr).arch.switch_to(&mut (&mut *to_ptr).arch); (&mut *from_ptr).arch.switch_to(&mut (&mut *to_ptr).arch);

View file

@ -55,8 +55,6 @@ impl<'a> Elf<'a> {
/// Test function to run. Remove and replace with proper syscall /// Test function to run. Remove and replace with proper syscall
pub fn run(self) -> SysResult<!> { pub fn run(self) -> SysResult<!> {
let stack_addr = arch::USER_STACK_OFFSET;
let stack_size = arch::USER_STACK_SIZE;
{ {
let contexts = context::contexts(); let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::NoProcess)?; let context_lock = contexts.current().ok_or(Error::NoProcess)?;
@ -105,8 +103,8 @@ impl<'a> Elf<'a> {
// Map stack // Map stack
context.stack = Some(context::memory::Memory::new( context.stack = Some(context::memory::Memory::new(
VirtualAddress::new(stack_addr), VirtualAddress::new(arch::USER_STACK_OFFSET),
stack_size, arch::USER_STACK_SIZE,
entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE, entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE,
true, true,
true true
@ -114,7 +112,7 @@ impl<'a> Elf<'a> {
} }
// Go to usermode // Go to usermode
unsafe { usermode(self.entry(), stack_addr + stack_size - 256); } unsafe { usermode(self.entry(), arch::USER_STACK_OFFSET + arch::USER_STACK_SIZE - 256); }
} }
} }

View file

@ -100,6 +100,8 @@ extern crate bitflags;
extern crate goblin; extern crate goblin;
extern crate spin; extern crate spin;
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
/// Context management /// Context management
pub mod context; pub mod context;
@ -117,6 +119,14 @@ pub mod syscall;
#[cfg(test)] #[cfg(test)]
pub mod tests; pub mod tests;
#[thread_local]
static CPU_ID: AtomicUsize = ATOMIC_USIZE_INIT;
#[inline(always)]
pub fn cpu_id() -> usize {
CPU_ID.load(Ordering::Relaxed)
}
pub extern fn userspace_init() { pub extern fn userspace_init() {
assert_eq!(syscall::open(b"debug:", 0), Ok(0)); assert_eq!(syscall::open(b"debug:", 0), Ok(0));
assert_eq!(syscall::open(b"debug:", 0), Ok(1)); assert_eq!(syscall::open(b"debug:", 0), Ok(1));
@ -129,6 +139,8 @@ pub extern fn userspace_init() {
#[no_mangle] #[no_mangle]
pub extern fn kmain() { pub extern fn kmain() {
CPU_ID.store(0, Ordering::SeqCst);
context::init(); context::init();
let pid = syscall::getpid(); let pid = syscall::getpid();
@ -144,15 +156,19 @@ pub extern fn kmain() {
} }
} }
unsafe { context::switch(); }
loop { loop {
unsafe { interrupt::enable_and_halt(); } unsafe {
interrupt::disable();
context::switch();
interrupt::enable_and_halt();
}
} }
} }
#[no_mangle] #[no_mangle]
pub extern fn kmain_ap(id: usize) { pub extern fn kmain_ap(id: usize) {
CPU_ID.store(id, Ordering::SeqCst);
context::init(); context::init();
let pid = syscall::getpid(); let pid = syscall::getpid();

View file

@ -105,7 +105,10 @@ pub fn convert_slice_mut<T>(ptr: *mut T, len: usize) -> Result<&'static mut [T]>
Ok(unsafe { slice::from_raw_parts_mut(ptr, len) }) Ok(unsafe { slice::from_raw_parts_mut(ptr, len) })
} }
pub fn handle(a: usize, b: usize, c: usize, d: usize, e: usize, _f: usize) -> Result<usize> { #[no_mangle]
pub extern fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, stack: usize) -> usize {
#[inline(always)]
fn inner(a: usize, b: usize, c: usize, d: usize, e: usize, _f: usize, stack: usize) -> Result<usize> {
match Call::from(a) { match Call::from(a) {
Ok(call) => match call { Ok(call) => match call {
Call::Exit => exit(b), Call::Exit => exit(b),
@ -118,7 +121,7 @@ pub fn handle(a: usize, b: usize, c: usize, d: usize, e: usize, _f: usize) -> Re
Call::Dup => dup(b), Call::Dup => dup(b),
Call::Brk => brk(b), Call::Brk => brk(b),
Call::Iopl => iopl(b), Call::Iopl => iopl(b),
Call::Clone => clone(b), Call::Clone => clone(b, stack),
Call::SchedYield => sched_yield() Call::SchedYield => sched_yield()
}, },
Err(err) => { Err(err) => {
@ -128,9 +131,7 @@ pub fn handle(a: usize, b: usize, c: usize, d: usize, e: usize, _f: usize) -> Re
} }
} }
#[no_mangle] match inner(a, b, c, d, e, f, stack) {
pub extern fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -> usize {
match handle(a, b, c, d, e, f) {
Ok(value) => value, Ok(value) => value,
Err(value) => (-(value as isize)) as usize Err(value) => (-(value as isize)) as usize
} }

View file

@ -1,11 +1,15 @@
///! Process syscalls ///! Process syscalls
use core::mem;
use core::str; use core::str;
use arch; use arch;
use arch::paging::{VirtualAddress, entry}; use arch::memory::allocate_frame;
use arch::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, entry};
use arch::paging::temporary_page::TemporaryPage;
use context; use context;
use elf; use elf;
use scheme;
use syscall::{self, Error, Result}; use syscall::{self, Error, Result};
pub fn brk(address: usize) -> Result<usize> { pub fn brk(address: usize) -> Result<usize> {
@ -42,9 +46,195 @@ pub fn brk(address: usize) -> Result<usize> {
} }
} }
pub fn clone(flags: usize) -> Result<usize> { pub fn clone(flags: usize, stack_base: usize) -> Result<usize> {
println!("Clone {:X}", flags); //TODO: Implement flags
Ok(0) //TODO: Copy on write?
println!("Clone {:X}: {:X}", flags, stack_base);
let pid;
{
let arch;
let mut kstack_option = None;
let mut offset = 0;
let mut image = vec![];
let mut heap_option = None;
let mut stack_option = None;
let mut files = vec![];
// Copy from old process
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::NoProcess)?;
let context = context_lock.read();
arch = context.arch.clone();
if let Some(ref stack) = context.kstack {
offset = stack_base - stack.as_ptr() as usize - mem::size_of::<usize>(); // Add clone ret
let mut new_stack = stack.clone();
unsafe {
let func_ptr = new_stack.as_mut_ptr().offset(offset as isize);
*(func_ptr as *mut usize) = arch::interrupt::syscall::clone_ret as usize;
}
kstack_option = Some(new_stack);
}
for memory in context.image.iter() {
let mut new_memory = context::memory::Memory::new(
VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET),
memory.size(),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
true,
false
);
unsafe {
arch::externs::memcpy(new_memory.start_address().get() as *mut u8,
memory.start_address().get() as *const u8,
memory.size());
}
new_memory.remap(memory.flags(), true);
image.push(new_memory);
}
if let Some(ref heap) = context.heap {
let mut new_heap = context::memory::Memory::new(
VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET),
heap.size(),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
true,
false
);
unsafe {
arch::externs::memcpy(new_heap.start_address().get() as *mut u8,
heap.start_address().get() as *const u8,
heap.size());
}
new_heap.remap(heap.flags(), true);
heap_option = Some(new_heap);
}
if let Some(ref stack) = context.stack {
let mut new_stack = context::memory::Memory::new(
VirtualAddress::new(arch::USER_TMP_STACK_OFFSET),
stack.size(),
entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE,
true,
false
);
unsafe {
arch::externs::memcpy(new_stack.start_address().get() as *mut u8,
stack.start_address().get() as *const u8,
stack.size());
}
new_stack.remap(stack.flags(), true);
stack_option = Some(new_stack);
}
for (fd, file_option) in context.files.iter().enumerate() {
if let Some(file) = *file_option {
let result = {
let schemes = scheme::schemes();
let scheme_mutex = schemes.get(file.scheme).ok_or(Error::BadFile)?;
let result = scheme_mutex.lock().dup(file.number);
result
};
match result {
Ok(new_number) => {
files.push(Some(context::file::File { scheme: file.scheme, number: new_number }));
},
Err(err) => {
println!("clone: failed to dup {}: {:?}", fd, err);
}
}
} else {
files.push(None);
}
}
}
// Set up new process
{
let mut contexts = context::contexts_mut();
let context_lock = contexts.new_context()?;
let mut context = context_lock.write();
pid = context.id;
context.arch = arch;
let mut active_table = unsafe { ActivePageTable::new() };
let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000)));
let mut new_table = {
let frame = allocate_frame().expect("no more frames in syscall::clone new_table");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
// Copy kernel mapping
{
let kernel_frame = active_table.p4()[510].pointed_frame().expect("kernel table not mapped");
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.p4_mut()[510].set(kernel_frame, entry::PRESENT | entry::WRITABLE);
});
}
// Copy percpu mapping
{
extern {
/// The starting byte of the thread data segment
static mut __tdata_start: u8;
/// The ending byte of the thread BSS segment
static mut __tbss_end: u8;
}
let size = unsafe { & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize };
let start = arch::KERNEL_PERCPU_OFFSET + arch::KERNEL_PERCPU_SIZE * ::cpu_id();
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
let frame = active_table.translate_page(page).expect("kernel percpu not mapped");
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
mapper.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE);
});
}
}
if let Some(stack) = kstack_option.take() {
context.arch.set_stack(stack.as_ptr() as usize + offset);
context.kstack = Some(stack);
}
for memory in image.iter_mut() {
let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET);
memory.move_to(start, &mut new_table, &mut temporary_page, true);
}
context.image = image;
if let Some(mut heap) = heap_option.take() {
heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page, true);
context.heap = Some(heap);
}
if let Some(mut stack) = stack_option.take() {
stack.move_to(VirtualAddress::new(arch::USER_STACK_OFFSET), &mut new_table, &mut temporary_page, true);
context.stack = Some(stack);
}
context.files = files;
context.arch.set_page_table(unsafe { new_table.address() });
context.blocked = false;
}
}
unsafe { context::switch(); }
Ok(pid)
} }
pub fn exit(status: usize) -> ! { pub fn exit(status: usize) -> ! {
@ -85,7 +275,9 @@ pub fn exec(path: &[u8], _args: &[[usize; 2]]) -> Result<usize> {
let _ = syscall::close(file); let _ = syscall::close(file);
match elf::Elf::from(&data) { match elf::Elf::from(&data) {
Ok(elf) => elf.run().and(Ok(0)), Ok(elf) => {
elf.run().and(Ok(0))
},
Err(err) => { Err(err) => {
println!("failed to execute {}: {}", unsafe { str::from_utf8_unchecked(path) }, err); println!("failed to execute {}: {}", unsafe { str::from_utf8_unchecked(path) }, err);
Err(Error::NoExec) Err(Error::NoExec)

View file

@ -110,7 +110,7 @@ pub fn chdir(path: &str) -> Result<usize> {
} }
pub unsafe fn clone(flags: usize) -> Result<usize> { pub unsafe fn clone(flags: usize) -> Result<usize> {
syscall1(SYS_CLONE, flags) syscall1_clobber(SYS_CLONE, flags)
} }
pub fn close(fd: usize) -> Result<usize> { pub fn close(fd: usize) -> Result<usize> {

View file

@ -20,6 +20,17 @@ pub unsafe fn syscall1(mut a: usize, b: usize) -> Result<usize> {
Error::demux(a) Error::demux(a)
} }
// Clobbers all registers - special for clone
pub unsafe fn syscall1_clobber(mut a: usize, b: usize) -> Result<usize> {
asm!("int 0x80"
: "={eax}"(a)
: "{eax}"(a), "{ebx}"(b)
: "memory", "ebx", "ecx", "edx", "esi", "edi"
: "intel", "volatile");
Error::demux(a)
}
pub unsafe fn syscall2(mut a: usize, b: usize, c: usize) -> Result<usize> { pub unsafe fn syscall2(mut a: usize, b: usize, c: usize) -> Result<usize> {
asm!("int 0x80" asm!("int 0x80"
: "={eax}"(a) : "={eax}"(a)

View file

@ -20,6 +20,17 @@ pub unsafe fn syscall1(mut a: usize, b: usize) -> Result<usize> {
Error::demux(a) Error::demux(a)
} }
// Clobbers all registers - special for clone
pub unsafe fn syscall1_clobber(mut a: usize, b: usize) -> Result<usize> {
asm!("int 0x80"
: "={rax}"(a)
: "{rax}"(a), "{rbx}"(b)
: "memory", "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
: "intel", "volatile");
Error::demux(a)
}
pub unsafe fn syscall2(mut a: usize, b: usize, c: usize) -> Result<usize> { pub unsafe fn syscall2(mut a: usize, b: usize, c: usize) -> Result<usize> {
asm!("int 0x80" asm!("int 0x80"
: "={rax}"(a) : "={rax}"(a)