diff --git a/arch/x86_64/src/acpi/mod.rs b/arch/x86_64/src/acpi/mod.rs index ffe4ae9..eccd165 100644 --- a/arch/x86_64/src/acpi/mod.rs +++ b/arch/x86_64/src/acpi/mod.rs @@ -43,6 +43,13 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) { println!(" XAPIC {}: {:>08X}", me, local_apic.address); } + let trampoline_frame = Frame::containing_address(PhysicalAddress::new(TRAMPOLINE)); + let trampoline_page = Page::containing_address(VirtualAddress::new(TRAMPOLINE)); + + // Map trampoline + active_table.map_to(trampoline_page, trampoline_frame, entry::PRESENT | entry::WRITABLE); + active_table.flush(trampoline_page); + for madt_entry in madt.iter() { println!(" {:?}", madt_entry); match madt_entry { @@ -50,13 +57,6 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) { println!(" This is my local APIC"); } else { if ap_local_apic.flags & 1 == 1 { - // Map trampoline - { - if active_table.translate_page(Page::containing_address(VirtualAddress::new(TRAMPOLINE))).is_none() { - active_table.identity_map(Frame::containing_address(PhysicalAddress::new(TRAMPOLINE)), entry::PRESENT | entry::WRITABLE); - } - } - // Allocate a stack // TODO: Allocate contiguous let stack_start = allocate_frame().expect("no more frames in acpi stack_start").start_address().get(); @@ -128,6 +128,10 @@ pub fn init_sdt(sdt: &'static Sdt, active_table: &mut ActivePageTable) { _ => () } } + + // Unmap trampoline + active_table.unmap(trampoline_page); + active_table.flush(trampoline_page); } else { println!(": Unknown"); } @@ -138,28 +142,43 @@ pub unsafe fn init(active_table: &mut ActivePageTable) -> Option { let start_addr = 0xE0000; let end_addr = 0xFFFFF; - // Map all of the ACPI table space + // Map all of the ACPI RSDP space { let start_frame = Frame::containing_address(PhysicalAddress::new(start_addr)); let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr)); for frame in Frame::range_inclusive(start_frame, end_frame) { - if active_table.translate_page(Page::containing_address(VirtualAddress::new(frame.start_address().get()))).is_none() { - active_table.identity_map(frame, entry::PRESENT | entry::NO_EXECUTE); - } + let page = Page::containing_address(VirtualAddress::new(frame.start_address().get())); + active_table.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE); + active_table.flush(page); } } // Search for RSDP if let Some(rsdp) = RSDP::search(start_addr, end_addr) { - let get_sdt = |sdt_address: usize, active_table: &mut ActivePageTable| -> &'static Sdt { - if active_table.translate_page(Page::containing_address(VirtualAddress::new(sdt_address))).is_none() { + let get_sdt = |sdt_address: usize, active_table: &mut ActivePageTable| -> (&'static Sdt, bool) { + let mapped = if active_table.translate_page(Page::containing_address(VirtualAddress::new(sdt_address))).is_none() { let sdt_frame = Frame::containing_address(PhysicalAddress::new(sdt_address)); - active_table.identity_map(sdt_frame, entry::PRESENT | entry::NO_EXECUTE); - } - &*(sdt_address as *const Sdt) + let sdt_page = Page::containing_address(VirtualAddress::new(sdt_address)); + active_table.map_to(sdt_page, sdt_frame, entry::PRESENT | entry::NO_EXECUTE); + active_table.flush(sdt_page); + true + } else { + false + }; + (&*(sdt_address as *const Sdt), mapped) }; - let rxsdt = get_sdt(rsdp.sdt_address(), active_table); + let drop_sdt = |sdt: &'static Sdt, mapped: bool, active_table: &mut ActivePageTable| { + let sdt_address = sdt as *const Sdt as usize; + drop(sdt); + if mapped { + let sdt_page = Page::containing_address(VirtualAddress::new(sdt_address)); + active_table.unmap(sdt_page); + active_table.flush(sdt_page); + } + }; + + let (rxsdt, rxmapped) = get_sdt(rsdp.sdt_address(), active_table); for &c in rxsdt.signature.iter() { print!("{}", c as char); @@ -167,21 +186,36 @@ pub unsafe fn init(active_table: &mut ActivePageTable) -> Option { println!(":"); if let Some(rsdt) = Rsdt::new(rxsdt) { for sdt_address in rsdt.iter() { - let sdt = get_sdt(sdt_address, active_table); + let (sdt, mapped) = get_sdt(sdt_address, active_table); init_sdt(sdt, active_table); + drop_sdt(sdt, mapped, active_table); } } else if let Some(xsdt) = Xsdt::new(rxsdt) { for sdt_address in xsdt.iter() { - let sdt = get_sdt(sdt_address, active_table); + let (sdt, mapped) = get_sdt(sdt_address, active_table); init_sdt(sdt, active_table); + drop_sdt(sdt, mapped, active_table); } } else { println!("UNKNOWN RSDT OR XSDT SIGNATURE"); } + + drop_sdt(rxsdt, rxmapped, active_table); } else { println!("NO RSDP FOUND"); } + // Unmap all of the ACPI RSDP space + { + let start_frame = Frame::containing_address(PhysicalAddress::new(start_addr)); + let end_frame = Frame::containing_address(PhysicalAddress::new(end_addr)); + for frame in Frame::range_inclusive(start_frame, end_frame) { + let page = Page::containing_address(VirtualAddress::new(frame.start_address().get())); + active_table.unmap(page); + active_table.flush(page); + } + } + None } diff --git a/arch/x86_64/src/context.rs b/arch/x86_64/src/context.rs index 4c9ba54..004914d 100644 --- a/arch/x86_64/src/context.rs +++ b/arch/x86_64/src/context.rs @@ -6,7 +6,7 @@ use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering}; /// This must be done, as no locks can be held on the stack during switch pub static CONTEXT_SWITCH_LOCK: AtomicBool = ATOMIC_BOOL_INIT; -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct Context { /// Page table pointer cr3: usize, diff --git a/arch/x86_64/src/device/display.rs b/arch/x86_64/src/device/display.rs index d5e9523..6716250 100644 --- a/arch/x86_64/src/device/display.rs +++ b/arch/x86_64/src/device/display.rs @@ -3,7 +3,7 @@ use ransid::{Console, Event}; use spin::Mutex; use memory::Frame; -use paging::{ActivePageTable, PhysicalAddress, entry}; +use paging::{ActivePageTable, Page, PhysicalAddress, VirtualAddress, entry}; #[cfg(target_arch = "x86_64")] #[allow(unused_assignments)] @@ -95,14 +95,15 @@ pub unsafe fn init(active_table: &mut ActivePageTable) { if mode_info.physbaseptr > 0 { let width = mode_info.xresolution as usize; let height = mode_info.yresolution as usize; - let onscreen = mode_info.physbaseptr as usize; + let onscreen = mode_info.physbaseptr as usize + ::KERNEL_OFFSET; let size = width * height; { - let start_frame = Frame::containing_address(PhysicalAddress::new(onscreen)); - let end_frame = Frame::containing_address(PhysicalAddress::new(onscreen + size * 4 - 1)); - for frame in Frame::range_inclusive(start_frame, end_frame) { - active_table.identity_map(frame, /*actually sets PAT for write combining*/ entry::HUGE_PAGE | entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE); + let start_page = Page::containing_address(VirtualAddress::new(onscreen)); + let end_page = Page::containing_address(VirtualAddress::new(onscreen + size * 4 - 1)); + for page in Page::range_inclusive(start_page, end_page) { + let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get() - ::KERNEL_OFFSET)); + active_table.map_to(page, frame, /*actually sets PAT for write combining*/ entry::HUGE_PAGE | entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE); } } @@ -117,26 +118,8 @@ pub unsafe fn init(active_table: &mut ActivePageTable) { )); *CONSOLE.lock() = Some(Console::new(width/8, height/16)); } -} -pub unsafe fn init_ap(active_table: &mut ActivePageTable) { - active_table.identity_map(Frame::containing_address(PhysicalAddress::new(0x5200)), entry::PRESENT | entry::NO_EXECUTE); - - let mode_info = &*(0x5200 as *const VBEModeInfo); - if mode_info.physbaseptr > 0 { - let width = mode_info.xresolution as usize; - let height = mode_info.yresolution as usize; - let start = mode_info.physbaseptr as usize; - let size = width * height; - - { - let start_frame = Frame::containing_address(PhysicalAddress::new(start)); - let end_frame = Frame::containing_address(PhysicalAddress::new(start + size * 4 - 1)); - for frame in Frame::range_inclusive(start_frame, end_frame) { - active_table.identity_map(frame, /*actually sets PAT for write combining*/ entry::HUGE_PAGE | entry::PRESENT | entry::WRITABLE | entry::NO_EXECUTE); - } - } - } + active_table.unmap(Page::containing_address(VirtualAddress::new(0x5200))); } /// A display diff --git a/arch/x86_64/src/device/mod.rs b/arch/x86_64/src/device/mod.rs index 8a58e0a..02fe70d 100644 --- a/arch/x86_64/src/device/mod.rs +++ b/arch/x86_64/src/device/mod.rs @@ -9,7 +9,3 @@ pub unsafe fn init(active_table: &mut ActivePageTable){ display::init(active_table); ps2::init(); } - -pub unsafe fn init_ap(active_table: &mut ActivePageTable) { - display::init_ap(active_table); -} diff --git a/arch/x86_64/src/interrupt/syscall.rs b/arch/x86_64/src/interrupt/syscall.rs index 7133906..d97154e 100644 --- a/arch/x86_64/src/interrupt/syscall.rs +++ b/arch/x86_64/src/interrupt/syscall.rs @@ -3,7 +3,7 @@ pub unsafe extern fn syscall() { #[inline(never)] unsafe fn inner() { extern { - fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -> usize; + fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, stack: usize) -> usize; } let mut a; @@ -13,10 +13,11 @@ pub unsafe extern fn syscall() { let d; let e; let f; - asm!("" : "={rax}"(a), "={rbx}"(b), "={rcx}"(c), "={rdx}"(d), "={rsi}"(e), "={rdi}"(f) + let stack; + asm!("" : "={rax}"(a), "={rbx}"(b), "={rcx}"(c), "={rdx}"(d), "={rsi}"(e), "={rdi}"(f), "={rbp}"(stack) : : : "intel", "volatile"); - a = syscall(a, b, c, d, e, f); + a = syscall(a, b, c, d, e, f, stack); } asm!("" : : "{rax}"(a) : : "intel", "volatile"); @@ -36,3 +37,10 @@ pub unsafe extern fn syscall() { iretq" : : : : "intel", "volatile"); } + +#[naked] +pub unsafe extern fn clone_ret() -> usize { + asm!("pop rbp" + : : : : "intel", "volatile"); + 0 +} diff --git a/arch/x86_64/src/lib.rs b/arch/x86_64/src/lib.rs index 6d07468..bbc68ce 100644 --- a/arch/x86_64/src/lib.rs +++ b/arch/x86_64/src/lib.rs @@ -22,7 +22,8 @@ pub extern crate x86; // Because the memory map is so important to not be aliased, it is defined here, in one place // The lower 256 PML4 entries are reserved for userspace // Each PML4 entry references up to 512 GB of memory -// The upper 256 are reserved for the kernel +// The top (511) PML4 is reserved for recursive mapping +// The second from the top (510) PML4 is reserved for the kernel /// The size of a single PML4 pub const PML4_SIZE: usize = 0x0000_0080_0000_0000; @@ -33,7 +34,7 @@ pub extern crate x86; pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE; /// Offset to kernel heap - pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE; + pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET + PML4_SIZE/2; /// Size of kernel heap pub const KERNEL_HEAP_SIZE: usize = 64 * 1024 * 1024; // 64 MB @@ -43,14 +44,26 @@ pub extern crate x86; /// Size of kernel percpu variables pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB + /// Offset to user image + pub const USER_OFFSET: usize = 0; + /// Offset to user heap - pub const USER_HEAP_OFFSET: usize = PML4_SIZE; + pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE; /// Offset to user stack pub const USER_STACK_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE; /// Size of user stack pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB + /// Offset to user temporary image (used when cloning) + pub const USER_TMP_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE; + + /// Offset to user temporary heap (used when cloning) + pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE; + + /// Offset to user temporary stack (used when cloning) + pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE; + /// Print to console #[macro_export] @@ -126,7 +139,14 @@ macro_rules! interrupt_error { } // Push scratch registers - asm!("push rax + asm!("pop rax + pop rbx + pop rcx + pop rdx + pop rsi + pop rdi + hlt + push rax push rcx push rdx push rdi diff --git a/arch/x86_64/src/linker.ld b/arch/x86_64/src/linker.ld index c8c932f..546adaa 100644 --- a/arch/x86_64/src/linker.ld +++ b/arch/x86_64/src/linker.ld @@ -1,8 +1,8 @@ ENTRY(kstart) OUTPUT_FORMAT(elf64-x86-64) -/* KERNEL_OFFSET = 0xffffff0000100000; */ -KERNEL_OFFSET = 0x100000; +KERNEL_OFFSET = 0xffffff0000100000; +/* KERNEL_OFFSET = 0x100000; */ SECTIONS { . = KERNEL_OFFSET; diff --git a/arch/x86_64/src/paging/mapper.rs b/arch/x86_64/src/paging/mapper.rs index dbbaa60..5b7665a 100644 --- a/arch/x86_64/src/paging/mapper.rs +++ b/arch/x86_64/src/paging/mapper.rs @@ -63,8 +63,6 @@ impl Mapper { /// Unmap a page pub fn unmap(&mut self, page: Page) { - assert!(self.translate(page.start_address()).is_some()); - let p1 = self.p4_mut() .next_table_mut(page.p4_index()) .and_then(|p3| p3.next_table_mut(page.p3_index())) @@ -76,6 +74,18 @@ impl Mapper { deallocate_frame(frame); } + /// Unmap a page, return frame without free + pub fn unmap_return(&mut self, page: Page) -> Frame { + let p1 = self.p4_mut() + .next_table_mut(page.p4_index()) + .and_then(|p3| p3.next_table_mut(page.p3_index())) + .and_then(|p2| p2.next_table_mut(page.p2_index())) + .expect("mapping code does not support huge pages"); + let frame = p1[page.p1_index()].pointed_frame().unwrap(); + p1[page.p1_index()].set_unused(); + frame + } + pub fn translate_page(&self, page: Page) -> Option { self.p4().next_table(page.p4_index()) .and_then(|p3| p3.next_table(page.p3_index())) diff --git a/arch/x86_64/src/paging/mod.rs b/arch/x86_64/src/paging/mod.rs index c321534..4c3da06 100644 --- a/arch/x86_64/src/paging/mod.rs +++ b/arch/x86_64/src/paging/mod.rs @@ -22,6 +22,59 @@ pub const ENTRY_COUNT: usize = 512; /// Size of pages pub const PAGE_SIZE: usize = 4096; +/// Setup page attribute table +unsafe fn init_pat() { + let uncacheable = 0; + let write_combining = 1; + let write_through = 4; + //let write_protected = 5; + let write_back = 6; + let uncached = 7; + + let pat0 = write_back; + let pat1 = write_through; + let pat2 = uncached; + let pat3 = uncacheable; + + let pat4 = write_combining; + let pat5 = pat1; + let pat6 = pat2; + let pat7 = pat3; + + msr::wrmsr(msr::IA32_PAT, pat7 << 56 | pat6 << 48 | pat5 << 40 | pat4 << 32 + | pat3 << 24 | pat2 << 16 | pat1 << 8 | pat0); +} + +/// Copy tdata, clear tbss, set TCB self pointer +unsafe fn init_tcb(cpu_id: usize) -> usize { + extern { + /// The starting byte of the thread data segment + static mut __tdata_start: u8; + /// The ending byte of the thread data segment + static mut __tdata_end: u8; + /// The starting byte of the thread BSS segment + static mut __tbss_start: u8; + /// The ending byte of the thread BSS segment + static mut __tbss_end: u8; + } + + let tcb_offset; + { + let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize; + let tbss_offset = & __tbss_start as *const _ as usize - & __tdata_start as *const _ as usize; + + let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id; + let end = start + size; + tcb_offset = end - mem::size_of::(); + + ::externs::memcpy(start as *mut u8, & __tdata_start as *const u8, tbss_offset); + ::externs::memset((start + tbss_offset) as *mut u8, 0, size - tbss_offset); + + *(tcb_offset as *mut usize) = end; + } + tcb_offset +} + /// Initialize paging /// /// Returns page table and thread control block offset @@ -53,6 +106,8 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti static mut __bss_end: u8; } + init_pat(); + let mut active_table = ActivePageTable::new(); let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000))); @@ -78,25 +133,23 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti } } - let mut remap = |start: usize, end: usize, flags: EntryFlags| { + let mut remap = |start: usize, end: usize, flags: EntryFlags, offset: usize| { if end > start { let start_frame = Frame::containing_address(PhysicalAddress::new(start)); let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1)); for frame in Frame::range_inclusive(start_frame, end_frame) { - mapper.identity_map(frame.clone(), flags); - - //let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET)); - //mapper.map_to(page, frame, flags); + let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + offset)); + mapper.map_to(page, frame, flags); } } }; // Remap stack writable, no execute - remap(stack_start, stack_end, PRESENT | NO_EXECUTE | WRITABLE); + remap(stack_start, stack_end, PRESENT | NO_EXECUTE | WRITABLE, 0); // Remap a section with `flags` let mut remap_section = |start: &u8, end: &u8, flags: EntryFlags| { - remap(start as *const _ as usize, end as *const _ as usize, flags); + remap(start as *const _ as usize - ::KERNEL_OFFSET, end as *const _ as usize - ::KERNEL_OFFSET, flags, ::KERNEL_OFFSET); }; // Remap text read-only remap_section(& __text_start, & __text_end, PRESENT); @@ -111,45 +164,71 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti } }); - let uncacheable = 0; - let write_combining = 1; - let write_through = 4; - //let write_protected = 5; - let write_back = 6; - let uncached = 7; + active_table.switch(new_table); - let pat0 = write_back; - let pat1 = write_through; - let pat2 = uncached; - let pat3 = uncacheable; + (active_table, init_tcb(cpu_id)) +} - let pat4 = write_combining; - let pat5 = pat1; - let pat6 = pat2; - let pat7 = pat3; +pub unsafe fn init_ap(cpu_id: usize, stack_start: usize, stack_end: usize, kernel_table: usize) -> (ActivePageTable, usize) { + extern { + /// The starting byte of the thread data segment + static mut __tdata_start: u8; + /// The ending byte of the thread data segment + static mut __tdata_end: u8; + /// The starting byte of the thread BSS segment + static mut __tbss_start: u8; + /// The ending byte of the thread BSS segment + static mut __tbss_end: u8; + } - msr::wrmsr(msr::IA32_PAT, pat7 << 56 | pat6 << 48 | pat5 << 40 | pat4 << 32 - | pat3 << 24 | pat2 << 16 | pat1 << 8 | pat0); + init_pat(); + + let mut active_table = ActivePageTable::new(); + + let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000))); + + let mut new_table = { + let frame = allocate_frame().expect("no more frames in paging::init new_table"); + InactivePageTable::new(frame, &mut active_table, &mut temporary_page) + }; + + active_table.with(&mut new_table, &mut temporary_page, |mapper| { + // Copy kernel mapping + let kernel_frame = Frame::containing_address(PhysicalAddress::new(kernel_table)); + mapper.p4_mut()[510].set(kernel_frame, entry::PRESENT | entry::WRITABLE); + + // Map tdata and tbss + { + let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize; + + let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id; + let end = start + size; + + let start_page = Page::containing_address(VirtualAddress::new(start)); + let end_page = Page::containing_address(VirtualAddress::new(end - 1)); + for page in Page::range_inclusive(start_page, end_page) { + mapper.map(page, PRESENT | NO_EXECUTE | WRITABLE); + } + } + + let mut remap = |start: usize, end: usize, flags: EntryFlags, offset: usize| { + if end > start { + let start_frame = Frame::containing_address(PhysicalAddress::new(start)); + let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1)); + for frame in Frame::range_inclusive(start_frame, end_frame) { + let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + offset)); + mapper.map_to(page, frame, flags); + } + } + }; + + // Remap stack writable, no execute + remap(stack_start, stack_end, PRESENT | NO_EXECUTE | WRITABLE, 0); + }); active_table.switch(new_table); - // Copy tdata, clear tbss, set TCB self pointer - let tcb_offset; - { - let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize; - let tbss_offset = & __tbss_start as *const _ as usize - & __tdata_start as *const _ as usize; - - let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id; - let end = start + size; - tcb_offset = end - mem::size_of::(); - - ::externs::memcpy(start as *mut u8, & __tdata_start as *const u8, tbss_offset); - ::externs::memset((start + tbss_offset) as *mut u8, 0, size - tbss_offset); - - *(tcb_offset as *mut usize) = end; - } - - (active_table, tcb_offset) + (active_table, init_tcb(cpu_id)) } pub struct ActivePageTable { diff --git a/arch/x86_64/src/start.rs b/arch/x86_64/src/start.rs index 11a1290..7748a00 100644 --- a/arch/x86_64/src/start.rs +++ b/arch/x86_64/src/start.rs @@ -28,7 +28,7 @@ static mut TDATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF; pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT; static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT; -static HEAP_TABLE: AtomicUsize = ATOMIC_USIZE_INIT; +static KERNEL_TABLE: AtomicUsize = ATOMIC_USIZE_INIT; extern { /// Kernel main function @@ -65,7 +65,7 @@ pub unsafe extern fn kstart() -> ! { } // Initialize memory management - memory::init(0, &__end as *const u8 as usize); + memory::init(0, &__end as *const u8 as usize - ::KERNEL_OFFSET); // TODO: allocate a stack let stack_start = 0x00080000; @@ -93,9 +93,9 @@ pub unsafe extern fn kstart() -> ! { // Reset AP variables AP_READY.store(false, Ordering::SeqCst); BSP_READY.store(false, Ordering::SeqCst); - HEAP_TABLE.store(0, Ordering::SeqCst); + KERNEL_TABLE.store(0, Ordering::SeqCst); - // Map heap + // Setup kernel heap { // Map heap pages let heap_start_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET)); @@ -106,24 +106,26 @@ pub unsafe extern fn kstart() -> ! { // Init the allocator allocator::init(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE); + } - // Send heap page table to APs - let index = heap_start_page.p4_index(); + // Initialize devices + device::init(&mut active_table); + + // Send kernel page table to APs + { + let index = Page::containing_address(VirtualAddress::new(::KERNEL_OFFSET)).p4_index(); let p4 = active_table.p4(); { let entry = &p4[index]; if let Some(frame) = entry.pointed_frame() { - HEAP_TABLE.store(frame.start_address().get(), Ordering::SeqCst); + KERNEL_TABLE.store(frame.start_address().get(), Ordering::SeqCst); } else { - panic!("heap does not have PML4 entry"); + panic!("kernel does not have PML4 entry"); } } } - // Initialize devices - device::init(&mut active_table); - // Read ACPI tables, starts APs acpi::init(&mut active_table); @@ -139,8 +141,14 @@ pub unsafe extern fn kstart_ap(cpu_id: usize, page_table: usize, stack_start: us assert_eq!(BSS_TEST_ZERO, 0); assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF); + // Retrieve kernel table entry + while KERNEL_TABLE.load(Ordering::SeqCst) == 0 { + interrupt::pause(); + } + let kernel_table = KERNEL_TABLE.load(Ordering::SeqCst); + // Initialize paging - let (mut active_table, tcb_offset) = paging::init(cpu_id, stack_start, stack_end); + let (mut active_table, tcb_offset) = paging::init_ap(cpu_id, stack_start, stack_end, kernel_table); // Set up GDT for AP gdt::init(tcb_offset, stack_end); @@ -158,23 +166,6 @@ pub unsafe extern fn kstart_ap(cpu_id: usize, page_table: usize, stack_start: us assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE); } - // Copy heap PML4 - { - while HEAP_TABLE.load(Ordering::SeqCst) == 0 { - interrupt::pause(); - } - let frame = Frame::containing_address(PhysicalAddress::new(HEAP_TABLE.load(Ordering::SeqCst))); - - let page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET)); - let p4 = active_table.p4_mut(); - let entry = &mut p4[page.p4_index()]; - assert!(entry.is_unused()); - entry.set(frame, entry::PRESENT | entry::WRITABLE); - } - - // Init devices for AP - device::init_ap(&mut active_table); - AP_READY.store(true, Ordering::SeqCst); } @@ -201,7 +192,7 @@ pub unsafe fn usermode(ip: usize, sp: usize) -> ! { : // No output because it never returns : "{rax}"(gdt::GDT_USER_DATA << 3 | 3), // Stack segment "{rbx}"(sp), // Stack pointer - "{rcx}"(3 << 12 | 1 << 9), // Flags - Set IOPL and interrupt enable flag + "{rcx}"(3 << 12/* | 1 << 9*/), // Flags - Set IOPL and interrupt enable flag "{rdx}"(gdt::GDT_USER_CODE << 3 | 3), // Code segment "{rsi}"(ip) // IP : // No clobers because it never returns diff --git a/bootloader/x86_64/startup-x86_64.asm b/bootloader/x86_64/startup-x86_64.asm index 9cb683b..d5a0a79 100644 --- a/bootloader/x86_64/startup-x86_64.asm +++ b/bootloader/x86_64/startup-x86_64.asm @@ -126,6 +126,7 @@ long_mode: ;rust init mov rax, [kernel_base + 0x18] + xchg bx, bx jmp rax long_mode_ap: diff --git a/bootloader/x86_64/vesa.asm b/bootloader/x86_64/vesa.asm index cf54f61..943e4eb 100644 --- a/bootloader/x86_64/vesa.asm +++ b/bootloader/x86_64/vesa.asm @@ -115,8 +115,8 @@ vesa: .minx dw 640 .miny dw 480 .required: -.requiredx dw 0 ;1024 ;USE THESE WITH CAUTION -.requiredy dw 0 ;768 +.requiredx dw 1024 ;USE THESE WITH CAUTION +.requiredy dw 768 .requiredmode dw 0 .modeok db ": Is this OK?(y/n)",10,13,0 diff --git a/init/src/main.rs b/init/src/main.rs index 093f66a..104f9da 100644 --- a/init/src/main.rs +++ b/init/src/main.rs @@ -1,4 +1,4 @@ -use std::{env, thread}; +use std::env; use std::fs::File; use std::io::{BufRead, BufReader}; use std::process::Command; @@ -47,8 +47,4 @@ pub fn main() { } } } - - loop { - thread::yield_now(); - } } diff --git a/kernel/context/file.rs b/kernel/context/file.rs index 91643ef..67a28eb 100644 --- a/kernel/context/file.rs +++ b/kernel/context/file.rs @@ -1,6 +1,7 @@ //! File struct /// A file +//TODO: Close on exec #[derive(Copy, Clone, Debug)] pub struct File { /// The scheme that this file refers to diff --git a/kernel/context/memory.rs b/kernel/context/memory.rs index 9cd57f2..71ed47c 100644 --- a/kernel/context/memory.rs +++ b/kernel/context/memory.rs @@ -1,6 +1,7 @@ use arch::externs::memset; -use arch::paging::{ActivePageTable, Page, PageIter, VirtualAddress}; -use arch::paging::entry::EntryFlags; +use arch::paging::{ActivePageTable, InactivePageTable, Page, PageIter, VirtualAddress}; +use arch::paging::entry::{self, EntryFlags}; +use arch::paging::temporary_page::TemporaryPage; #[derive(Debug)] pub struct Memory { @@ -30,13 +31,17 @@ impl Memory { self.size } + pub fn flags(&self) -> EntryFlags { + self.flags + } + pub fn pages(&self) -> PageIter { let start_page = Page::containing_address(self.start); let end_page = Page::containing_address(VirtualAddress::new(self.start.get() + self.size - 1)); Page::range_inclusive(start_page, end_page) } - pub fn map(&mut self, flush: bool, clear: bool) { + fn map(&mut self, flush: bool, clear: bool) { let mut active_table = unsafe { ActivePageTable::new() }; let mut flush_all = false; @@ -56,12 +61,12 @@ impl Memory { } if clear { - assert!(flush); + assert!(flush && self.flags.contains(entry::WRITABLE)); unsafe { memset(self.start_address().get() as *mut u8, 0, self.size); } } } - pub fn unmap(&mut self, flush: bool) { + fn unmap(&mut self, flush: bool) { let mut active_table = unsafe { ActivePageTable::new() }; let mut flush_all = false; @@ -80,6 +85,34 @@ impl Memory { } } + /// A complicated operation to move a piece of memory to a new page table + /// It also allows for changing the address at the same time + pub fn move_to(&mut self, new_start: VirtualAddress, new_table: &mut InactivePageTable, temporary_page: &mut TemporaryPage, flush: bool) { + let mut active_table = unsafe { ActivePageTable::new() }; + + let mut flush_all = false; + + for page in self.pages() { + let frame = active_table.unmap_return(page); + + active_table.with(new_table, temporary_page, |mapper| { + let new_page = Page::containing_address(VirtualAddress::new(page.start_address().get() - self.start.get() + new_start.get())); + mapper.map_to(new_page, frame, self.flags); + }); + + if flush { + //active_table.flush(page); + flush_all = true; + } + } + + if flush_all { + active_table.flush_all(); + } + + self.start = new_start; + } + pub fn remap(&mut self, new_flags: EntryFlags, flush: bool) { let mut active_table = unsafe { ActivePageTable::new() }; diff --git a/kernel/context/mod.rs b/kernel/context/mod.rs index a31d7b4..e243123 100644 --- a/kernel/context/mod.rs +++ b/kernel/context/mod.rs @@ -141,11 +141,25 @@ pub unsafe fn switch() { let mut to_ptr = 0 as *mut Context; - for (_pid, context_lock) in contexts().map.iter() { - let mut context = context_lock.write(); - if ! context.running && ! context.blocked && ! context.exited { - to_ptr = context.deref_mut() as *mut Context; - break; + for (pid, context_lock) in contexts().map.iter() { + if *pid > (*from_ptr).id { + let mut context = context_lock.write(); + if ! context.running && ! context.blocked && ! context.exited { + to_ptr = context.deref_mut() as *mut Context; + break; + } + } + } + + if to_ptr as usize == 0 { + for (pid, context_lock) in contexts().map.iter() { + if *pid < (*from_ptr).id { + let mut context = context_lock.write(); + if ! context.running && ! context.blocked && ! context.exited { + to_ptr = context.deref_mut() as *mut Context; + break; + } + } } } @@ -153,12 +167,14 @@ pub unsafe fn switch() { // TODO: Sleep, wait for interrupt // Unset global lock if no context found arch::context::CONTEXT_SWITCH_LOCK.store(false, Ordering::SeqCst); - println!("No to_ptr"); return; } (&mut *from_ptr).running = false; (&mut *to_ptr).running = true; + if let Some(ref stack) = (*to_ptr).kstack { + arch::gdt::TSS.rsp[0] = (stack.as_ptr() as usize + stack.len() - 256) as u64; + } CONTEXT_ID.store((&mut *to_ptr).id, Ordering::SeqCst); (&mut *from_ptr).arch.switch_to(&mut (&mut *to_ptr).arch); diff --git a/kernel/elf.rs b/kernel/elf.rs index 819b1ec..7fb7f9a 100644 --- a/kernel/elf.rs +++ b/kernel/elf.rs @@ -55,8 +55,6 @@ impl<'a> Elf<'a> { /// Test function to run. Remove and replace with proper syscall pub fn run(self) -> SysResult { - let stack_addr = arch::USER_STACK_OFFSET; - let stack_size = arch::USER_STACK_SIZE; { let contexts = context::contexts(); let context_lock = contexts.current().ok_or(Error::NoProcess)?; @@ -105,8 +103,8 @@ impl<'a> Elf<'a> { // Map stack context.stack = Some(context::memory::Memory::new( - VirtualAddress::new(stack_addr), - stack_size, + VirtualAddress::new(arch::USER_STACK_OFFSET), + arch::USER_STACK_SIZE, entry::NO_EXECUTE | entry::WRITABLE | entry::USER_ACCESSIBLE, true, true @@ -114,7 +112,7 @@ impl<'a> Elf<'a> { } // Go to usermode - unsafe { usermode(self.entry(), stack_addr + stack_size - 256); } + unsafe { usermode(self.entry(), arch::USER_STACK_OFFSET + arch::USER_STACK_SIZE - 256); } } } diff --git a/kernel/lib.rs b/kernel/lib.rs index bbd70f4..1e29fcd 100644 --- a/kernel/lib.rs +++ b/kernel/lib.rs @@ -100,6 +100,8 @@ extern crate bitflags; extern crate goblin; extern crate spin; +use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + /// Context management pub mod context; @@ -117,6 +119,14 @@ pub mod syscall; #[cfg(test)] pub mod tests; +#[thread_local] +static CPU_ID: AtomicUsize = ATOMIC_USIZE_INIT; + +#[inline(always)] +pub fn cpu_id() -> usize { + CPU_ID.load(Ordering::Relaxed) +} + pub extern fn userspace_init() { assert_eq!(syscall::open(b"debug:", 0), Ok(0)); assert_eq!(syscall::open(b"debug:", 0), Ok(1)); @@ -129,6 +139,8 @@ pub extern fn userspace_init() { #[no_mangle] pub extern fn kmain() { + CPU_ID.store(0, Ordering::SeqCst); + context::init(); let pid = syscall::getpid(); @@ -144,15 +156,19 @@ pub extern fn kmain() { } } - unsafe { context::switch(); } - loop { - unsafe { interrupt::enable_and_halt(); } + unsafe { + interrupt::disable(); + context::switch(); + interrupt::enable_and_halt(); + } } } #[no_mangle] pub extern fn kmain_ap(id: usize) { + CPU_ID.store(id, Ordering::SeqCst); + context::init(); let pid = syscall::getpid(); diff --git a/kernel/syscall/mod.rs b/kernel/syscall/mod.rs index 3fdb7a5..bd53ad4 100644 --- a/kernel/syscall/mod.rs +++ b/kernel/syscall/mod.rs @@ -105,32 +105,33 @@ pub fn convert_slice_mut(ptr: *mut T, len: usize) -> Result<&'static mut [T]> Ok(unsafe { slice::from_raw_parts_mut(ptr, len) }) } -pub fn handle(a: usize, b: usize, c: usize, d: usize, e: usize, _f: usize) -> Result { - match Call::from(a) { - Ok(call) => match call { - Call::Exit => exit(b), - Call::Read => read(b, convert_slice_mut(c as *mut u8, d)?), - Call::Write => write(b, convert_slice(c as *const u8, d)?), - Call::Open => open(convert_slice(b as *const u8, c)?, d), - Call::Close => close(b), - Call::Exec => exec(convert_slice(b as *const u8, c)?, convert_slice(d as *const [usize; 2], e)?), - Call::GetPid => getpid(), - Call::Dup => dup(b), - Call::Brk => brk(b), - Call::Iopl => iopl(b), - Call::Clone => clone(b), - Call::SchedYield => sched_yield() - }, - Err(err) => { - println!("Unknown syscall {}", a); - Err(err) +#[no_mangle] +pub extern fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, stack: usize) -> usize { + #[inline(always)] + fn inner(a: usize, b: usize, c: usize, d: usize, e: usize, _f: usize, stack: usize) -> Result { + match Call::from(a) { + Ok(call) => match call { + Call::Exit => exit(b), + Call::Read => read(b, convert_slice_mut(c as *mut u8, d)?), + Call::Write => write(b, convert_slice(c as *const u8, d)?), + Call::Open => open(convert_slice(b as *const u8, c)?, d), + Call::Close => close(b), + Call::Exec => exec(convert_slice(b as *const u8, c)?, convert_slice(d as *const [usize; 2], e)?), + Call::GetPid => getpid(), + Call::Dup => dup(b), + Call::Brk => brk(b), + Call::Iopl => iopl(b), + Call::Clone => clone(b, stack), + Call::SchedYield => sched_yield() + }, + Err(err) => { + println!("Unknown syscall {}", a); + Err(err) + } } } -} -#[no_mangle] -pub extern fn syscall(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize) -> usize { - match handle(a, b, c, d, e, f) { + match inner(a, b, c, d, e, f, stack) { Ok(value) => value, Err(value) => (-(value as isize)) as usize } diff --git a/kernel/syscall/process.rs b/kernel/syscall/process.rs index ad1d107..5a4c83c 100644 --- a/kernel/syscall/process.rs +++ b/kernel/syscall/process.rs @@ -1,11 +1,15 @@ ///! Process syscalls +use core::mem; use core::str; use arch; -use arch::paging::{VirtualAddress, entry}; +use arch::memory::allocate_frame; +use arch::paging::{ActivePageTable, InactivePageTable, Page, VirtualAddress, entry}; +use arch::paging::temporary_page::TemporaryPage; use context; use elf; +use scheme; use syscall::{self, Error, Result}; pub fn brk(address: usize) -> Result { @@ -42,9 +46,195 @@ pub fn brk(address: usize) -> Result { } } -pub fn clone(flags: usize) -> Result { - println!("Clone {:X}", flags); - Ok(0) +pub fn clone(flags: usize, stack_base: usize) -> Result { + //TODO: Implement flags + //TODO: Copy on write? + println!("Clone {:X}: {:X}", flags, stack_base); + + let pid; + { + let arch; + let mut kstack_option = None; + let mut offset = 0; + let mut image = vec![]; + let mut heap_option = None; + let mut stack_option = None; + let mut files = vec![]; + + // Copy from old process + { + let contexts = context::contexts(); + let context_lock = contexts.current().ok_or(Error::NoProcess)?; + let context = context_lock.read(); + arch = context.arch.clone(); + + if let Some(ref stack) = context.kstack { + offset = stack_base - stack.as_ptr() as usize - mem::size_of::(); // Add clone ret + let mut new_stack = stack.clone(); + unsafe { + let func_ptr = new_stack.as_mut_ptr().offset(offset as isize); + *(func_ptr as *mut usize) = arch::interrupt::syscall::clone_ret as usize; + } + kstack_option = Some(new_stack); + } + + for memory in context.image.iter() { + let mut new_memory = context::memory::Memory::new( + VirtualAddress::new(memory.start_address().get() + arch::USER_TMP_OFFSET), + memory.size(), + entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, + true, + false + ); + unsafe { + arch::externs::memcpy(new_memory.start_address().get() as *mut u8, + memory.start_address().get() as *const u8, + memory.size()); + } + new_memory.remap(memory.flags(), true); + image.push(new_memory); + } + + if let Some(ref heap) = context.heap { + let mut new_heap = context::memory::Memory::new( + VirtualAddress::new(arch::USER_TMP_HEAP_OFFSET), + heap.size(), + entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, + true, + false + ); + unsafe { + arch::externs::memcpy(new_heap.start_address().get() as *mut u8, + heap.start_address().get() as *const u8, + heap.size()); + } + new_heap.remap(heap.flags(), true); + heap_option = Some(new_heap); + } + + if let Some(ref stack) = context.stack { + let mut new_stack = context::memory::Memory::new( + VirtualAddress::new(arch::USER_TMP_STACK_OFFSET), + stack.size(), + entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE, + true, + false + ); + unsafe { + arch::externs::memcpy(new_stack.start_address().get() as *mut u8, + stack.start_address().get() as *const u8, + stack.size()); + } + new_stack.remap(stack.flags(), true); + stack_option = Some(new_stack); + } + + for (fd, file_option) in context.files.iter().enumerate() { + if let Some(file) = *file_option { + let result = { + let schemes = scheme::schemes(); + let scheme_mutex = schemes.get(file.scheme).ok_or(Error::BadFile)?; + let result = scheme_mutex.lock().dup(file.number); + result + }; + match result { + Ok(new_number) => { + files.push(Some(context::file::File { scheme: file.scheme, number: new_number })); + }, + Err(err) => { + println!("clone: failed to dup {}: {:?}", fd, err); + } + } + } else { + files.push(None); + } + } + } + + // Set up new process + { + let mut contexts = context::contexts_mut(); + let context_lock = contexts.new_context()?; + let mut context = context_lock.write(); + + pid = context.id; + + context.arch = arch; + + let mut active_table = unsafe { ActivePageTable::new() }; + + let mut temporary_page = TemporaryPage::new(Page::containing_address(VirtualAddress::new(0x8_0000_0000))); + + let mut new_table = { + let frame = allocate_frame().expect("no more frames in syscall::clone new_table"); + InactivePageTable::new(frame, &mut active_table, &mut temporary_page) + }; + + // Copy kernel mapping + { + let kernel_frame = active_table.p4()[510].pointed_frame().expect("kernel table not mapped"); + active_table.with(&mut new_table, &mut temporary_page, |mapper| { + mapper.p4_mut()[510].set(kernel_frame, entry::PRESENT | entry::WRITABLE); + }); + } + + // Copy percpu mapping + { + extern { + /// The starting byte of the thread data segment + static mut __tdata_start: u8; + /// The ending byte of the thread BSS segment + static mut __tbss_end: u8; + } + + let size = unsafe { & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize }; + + let start = arch::KERNEL_PERCPU_OFFSET + arch::KERNEL_PERCPU_SIZE * ::cpu_id(); + let end = start + size; + + let start_page = Page::containing_address(VirtualAddress::new(start)); + let end_page = Page::containing_address(VirtualAddress::new(end - 1)); + for page in Page::range_inclusive(start_page, end_page) { + let frame = active_table.translate_page(page).expect("kernel percpu not mapped"); + active_table.with(&mut new_table, &mut temporary_page, |mapper| { + mapper.map_to(page, frame, entry::PRESENT | entry::NO_EXECUTE | entry::WRITABLE); + }); + } + } + + + if let Some(stack) = kstack_option.take() { + context.arch.set_stack(stack.as_ptr() as usize + offset); + context.kstack = Some(stack); + } + + for memory in image.iter_mut() { + let start = VirtualAddress::new(memory.start_address().get() - arch::USER_TMP_OFFSET + arch::USER_OFFSET); + memory.move_to(start, &mut new_table, &mut temporary_page, true); + } + context.image = image; + + if let Some(mut heap) = heap_option.take() { + heap.move_to(VirtualAddress::new(arch::USER_HEAP_OFFSET), &mut new_table, &mut temporary_page, true); + context.heap = Some(heap); + } + + if let Some(mut stack) = stack_option.take() { + stack.move_to(VirtualAddress::new(arch::USER_STACK_OFFSET), &mut new_table, &mut temporary_page, true); + context.stack = Some(stack); + } + + context.files = files; + + context.arch.set_page_table(unsafe { new_table.address() }); + + context.blocked = false; + } + } + + unsafe { context::switch(); } + + Ok(pid) } pub fn exit(status: usize) -> ! { @@ -85,7 +275,9 @@ pub fn exec(path: &[u8], _args: &[[usize; 2]]) -> Result { let _ = syscall::close(file); match elf::Elf::from(&data) { - Ok(elf) => elf.run().and(Ok(0)), + Ok(elf) => { + elf.run().and(Ok(0)) + }, Err(err) => { println!("failed to execute {}: {}", unsafe { str::from_utf8_unchecked(path) }, err); Err(Error::NoExec) diff --git a/syscall/src/lib.rs b/syscall/src/lib.rs index 3f3e879..98fd080 100644 --- a/syscall/src/lib.rs +++ b/syscall/src/lib.rs @@ -110,7 +110,7 @@ pub fn chdir(path: &str) -> Result { } pub unsafe fn clone(flags: usize) -> Result { - syscall1(SYS_CLONE, flags) + syscall1_clobber(SYS_CLONE, flags) } pub fn close(fd: usize) -> Result { diff --git a/syscall/src/x86.rs b/syscall/src/x86.rs index d4b53e9..3196189 100644 --- a/syscall/src/x86.rs +++ b/syscall/src/x86.rs @@ -20,6 +20,17 @@ pub unsafe fn syscall1(mut a: usize, b: usize) -> Result { Error::demux(a) } +// Clobbers all registers - special for clone +pub unsafe fn syscall1_clobber(mut a: usize, b: usize) -> Result { + asm!("int 0x80" + : "={eax}"(a) + : "{eax}"(a), "{ebx}"(b) + : "memory", "ebx", "ecx", "edx", "esi", "edi" + : "intel", "volatile"); + + Error::demux(a) +} + pub unsafe fn syscall2(mut a: usize, b: usize, c: usize) -> Result { asm!("int 0x80" : "={eax}"(a) diff --git a/syscall/src/x86_64.rs b/syscall/src/x86_64.rs index 3de2d5f..84d5769 100644 --- a/syscall/src/x86_64.rs +++ b/syscall/src/x86_64.rs @@ -20,6 +20,17 @@ pub unsafe fn syscall1(mut a: usize, b: usize) -> Result { Error::demux(a) } +// Clobbers all registers - special for clone +pub unsafe fn syscall1_clobber(mut a: usize, b: usize) -> Result { + asm!("int 0x80" + : "={rax}"(a) + : "{rax}"(a), "{rbx}"(b) + : "memory", "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" + : "intel", "volatile"); + + Error::demux(a) +} + pub unsafe fn syscall2(mut a: usize, b: usize, c: usize) -> Result { asm!("int 0x80" : "={rax}"(a)