redox/arch/x86_64/src/start.rs

210 lines
6.3 KiB
Rust
Raw Normal View History

2016-08-14 02:21:46 +02:00
/// This function is where the kernel sets up IRQ handlers
/// It is increcibly unsafe, and should be minimal in nature
/// It must create the IDT with the correct entries, those entries are
/// defined in other files inside of the `arch` module
2016-08-14 02:58:31 +02:00
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
use acpi;
2016-09-11 02:48:27 +02:00
use allocator;
use device;
use externs::memset;
use gdt;
use idt;
2016-08-31 00:23:51 +02:00
use interrupt;
use memory::{self, Frame};
use paging::{self, entry, Page, PhysicalAddress, VirtualAddress};
/// Test of zero values in BSS.
static BSS_TEST_ZERO: usize = 0;
/// Test of non-zero values in data.
static DATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
/// Test of zero values in thread BSS
#[thread_local]
static mut TBSS_TEST_ZERO: usize = 0;
/// Test of non-zero values in thread data.
#[thread_local]
static mut TDATA_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
2016-09-02 01:08:43 +02:00
pub static AP_READY: AtomicBool = ATOMIC_BOOL_INIT;
static BSP_READY: AtomicBool = ATOMIC_BOOL_INIT;
2016-09-11 02:48:27 +02:00
static HEAP_TABLE: AtomicUsize = ATOMIC_USIZE_INIT;
extern {
/// Kernel main function
fn kmain() -> !;
/// Kernel main for APs
fn kmain_ap(id: usize) -> !;
}
/// The entry to Rust, all things must be initialized
2016-08-14 02:21:46 +02:00
#[no_mangle]
2016-08-14 19:45:47 +02:00
pub unsafe extern fn kstart() -> ! {
{
extern {
/// The starting byte of the _.bss_ (uninitialized data) segment.
static mut __bss_start: u8;
/// The ending byte of the _.bss_ (uninitialized data) segment.
static mut __bss_end: u8;
/// The end of the kernel
static mut __end: u8;
}
// Zero BSS, this initializes statics that are set to 0
{
let start_ptr = &mut __bss_start as *mut u8;
let end_ptr = & __bss_end as *const u8 as usize;
if start_ptr as usize <= end_ptr {
let size = end_ptr - start_ptr as usize;
memset(start_ptr, 0, size);
}
assert_eq!(BSS_TEST_ZERO, 0);
assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
}
// Initialize memory management
memory::init(0, &__end as *const u8 as usize);
// TODO: allocate a stack
let stack_start = 0x00080000;
let stack_end = 0x0009F000;
2016-08-14 02:58:31 +02:00
// Initialize paging
let (mut active_table, tcb_offset) = paging::init(0, stack_start, stack_end);
// Set up GDT
2016-09-12 02:25:05 +02:00
gdt::init(tcb_offset, stack_end);
// Set up IDT
idt::init();
// Test tdata and tbss
{
assert_eq!(TBSS_TEST_ZERO, 0);
TBSS_TEST_ZERO += 1;
assert_eq!(TBSS_TEST_ZERO, 1);
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
TDATA_TEST_NONZERO -= 1;
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE);
}
// Reset AP variables
2016-09-02 01:08:43 +02:00
AP_READY.store(false, Ordering::SeqCst);
BSP_READY.store(false, Ordering::SeqCst);
2016-09-11 02:48:27 +02:00
HEAP_TABLE.store(0, Ordering::SeqCst);
// Map heap
{
2016-09-11 02:48:27 +02:00
// Map heap pages
let heap_start_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET));
let heap_end_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET + ::KERNEL_HEAP_SIZE-1));
2016-09-11 02:48:27 +02:00
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE);
}
2016-09-11 02:48:27 +02:00
// Init the allocator
allocator::init(::KERNEL_HEAP_OFFSET, ::KERNEL_HEAP_SIZE);
2016-09-11 02:48:27 +02:00
// Send heap page table to APs
let index = heap_start_page.p4_index();
2016-09-11 02:48:27 +02:00
let p4 = active_table.p4();
{
let entry = &p4[index];
if let Some(frame) = entry.pointed_frame() {
HEAP_TABLE.store(frame.start_address().get(), Ordering::SeqCst);
} else {
panic!("heap does not have PML4 entry");
}
}
2016-08-17 23:47:54 +02:00
}
2016-09-01 19:51:33 +02:00
// Initialize devices
device::init(&mut active_table);
// Read ACPI tables, starts APs
2016-09-02 01:08:43 +02:00
acpi::init(&mut active_table);
2016-09-01 19:51:33 +02:00
BSP_READY.store(true, Ordering::SeqCst);
}
2016-08-14 19:45:47 +02:00
kmain();
2016-08-14 02:58:31 +02:00
}
/// Entry to rust for an AP
2016-09-12 23:50:16 +02:00
pub unsafe extern fn kstart_ap(cpu_id: usize, page_table: usize, stack_start: usize, stack_end: usize) -> ! {
{
assert_eq!(BSS_TEST_ZERO, 0);
assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
// Initialize paging
let (mut active_table, tcb_offset) = paging::init(cpu_id, stack_start, stack_end);
// Set up GDT for AP
2016-09-12 23:02:03 +02:00
gdt::init(tcb_offset, stack_end);
2016-08-17 23:47:54 +02:00
// Set up IDT for AP
idt::init();
2016-08-17 23:47:54 +02:00
// Test tdata and tbss
{
assert_eq!(TBSS_TEST_ZERO, 0);
TBSS_TEST_ZERO += 1;
assert_eq!(TBSS_TEST_ZERO, 1);
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
TDATA_TEST_NONZERO -= 1;
assert_eq!(TDATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFE);
}
2016-09-11 02:48:27 +02:00
// Copy heap PML4
{
2016-09-11 02:48:27 +02:00
while HEAP_TABLE.load(Ordering::SeqCst) == 0 {
interrupt::pause();
}
2016-09-11 02:48:27 +02:00
let frame = Frame::containing_address(PhysicalAddress::new(HEAP_TABLE.load(Ordering::SeqCst)));
let page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET));
2016-09-11 02:48:27 +02:00
let p4 = active_table.p4_mut();
let entry = &mut p4[page.p4_index()];
assert!(entry.is_unused());
entry.set(frame, entry::PRESENT | entry::WRITABLE);
}
2016-09-01 19:51:33 +02:00
// Init devices for AP
device::init_ap(&mut active_table);
2016-09-02 01:08:43 +02:00
AP_READY.store(true, Ordering::SeqCst);
2016-08-17 23:47:54 +02:00
}
while ! BSP_READY.load(Ordering::SeqCst) {
2016-08-31 00:23:51 +02:00
interrupt::pause();
}
kmain_ap(cpu_id);
}
pub unsafe fn usermode(ip: usize, sp: usize) -> ! {
// Go to usermode
asm!("mov rax, 0x2B # Set segment pointers
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
push rax # Push stack segment
push rbx # Push stack pointer
mov rax, 3 << 12 | 1 << 9 # Set IOPL and interrupt enable flag
push rax # Push rflags
mov rax, 0x23
push rax # Push code segment
push rcx # Push rip
iretq"
:
: "{rbx}"(sp), "{rcx}"(ip)
: "rax", "sp"
: "intel", "volatile");
unreachable!();
}