Map kernel TLS to general area

This commit is contained in:
Jeremy Soller 2016-09-12 15:02:03 -06:00
parent 11eddfecd1
commit 6d675fc43a
5 changed files with 40 additions and 56 deletions

View file

@ -88,7 +88,7 @@ pub static mut TSS: TaskStateSegment = TaskStateSegment {
iomap_base: 0xFFFF
};
/// Initialize GDT on the BSP
/// Initialize GDT
pub unsafe fn init(tcb_offset: usize, stack_offset: usize) {
// Setup the initial GDT with TLS, so we can setup the TLS GDT (a little confusing)
// This means that each CPU will have its own GDT, but we only need to define it once as a thread local
@ -98,12 +98,6 @@ pub unsafe fn init(tcb_offset: usize, stack_offset: usize) {
// Set the TLS segment to the offset of the Thread Control Block
INIT_GDT[GDT_KERNEL_TLS].set_offset(tcb_offset as u32);
// Run the AP GDT initialization, which does the rest
init_ap(tcb_offset, stack_offset);
}
/// Initialize GDT for an AP
pub unsafe fn init_ap(tcb_offset: usize, stack_offset: usize) {
// Load the initial GDT, before we have access to thread locals
dtables::lgdt(&INIT_GDTR);

View file

@ -38,11 +38,19 @@ pub extern crate x86;
pub const KERNEL_HEAP_SIZE: usize = 64 * 1024 * 1024; // 64 MB
/// Offset to kernel percpu variables
pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
pub const KERNEL_PERCPU_OFFSET: usize = 0xC0000000;
/// Size of kernel percpu variables
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
/// Offset to user heap
pub const USER_HEAP_OFFSET: usize = PML4_SIZE;
/// Offset to user stack
pub const USER_STACK_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
/// Size of user stack
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
/// Print to console
#[macro_export]

View file

@ -64,6 +64,20 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
{
// Map tdata and tbss
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
mapper.map(page, PRESENT | NO_EXECUTE | WRITABLE);
}
}
let mut remap = |start: usize, end: usize, flags: EntryFlags| {
if end > start {
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
@ -90,6 +104,8 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
remap_section(& __rodata_start, & __rodata_end, PRESENT | NO_EXECUTE);
// Remap data writable, no execute
remap_section(& __data_start, & __data_end, PRESENT | NO_EXECUTE | WRITABLE);
// Remap tdata master writable, no execute
remap_section(& __tdata_start, & __tdata_end, PRESENT | NO_EXECUTE);
// Remap bss writable, no execute
remap_section(& __bss_start, & __bss_end, PRESENT | NO_EXECUTE | WRITABLE);
}
@ -117,57 +133,22 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
active_table.switch(new_table);
// Map and copy TDATA
{
let start = & __tdata_start as *const _ as usize;
let end = & __tdata_end as *const _ as usize;
if end > start {
temporary_page.map(allocate_frame().expect("no more frames in paging::init TDATA"), PRESENT | NO_EXECUTE | WRITABLE, &mut active_table);
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
// Copy parent to temporary page
{
let frame = Frame::containing_address(PhysicalAddress::new(page.start_address().get()));
active_table.identity_map(frame, PRESENT | NO_EXECUTE);
active_table.flush(page);
::externs::memcpy(temporary_page.start_address().get() as *mut u8, page.start_address().get() as *const u8, 4096);
active_table.unmap(page);
}
// Copy temporary page to child
{
active_table.map(page, PRESENT | NO_EXECUTE | WRITABLE);
active_table.flush(page);
::externs::memcpy(page.start_address().get() as *mut u8, temporary_page.start_address().get() as *const u8, 4096);
}
}
temporary_page.unmap(&mut active_table);
}
}
// Map and clear TBSS
// Copy tdata, clear tbss, set TCB self pointer
let tcb_offset;
{
let start = & __tbss_start as *const _ as usize;
let end = & __tbss_end as *const _ as usize;
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
let tbss_offset = & __tbss_start as *const _ as usize - & __tdata_start as *const _ as usize;
let start = ::KERNEL_PERCPU_OFFSET + ::KERNEL_PERCPU_SIZE * cpu_id;
let end = start + size;
tcb_offset = end - mem::size_of::<usize>();
if end > start {
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
active_table.map(page, PRESENT | NO_EXECUTE | WRITABLE);
active_table.flush(page);
::externs::memset(page.start_address().get() as *mut u8, 0, 4096);
}
*(tcb_offset as *mut usize) = end;
}
::externs::memcpy(start as *mut u8, & __tdata_start as *const u8, tbss_offset);
::externs::memset((start + tbss_offset) as *mut u8, 0, size - tbss_offset);
*(tcb_offset as *mut usize) = end;
}
active_table.flush_all();
(active_table, tcb_offset)
}

View file

@ -143,7 +143,7 @@ pub unsafe extern fn kstart_ap(cpu_id: usize, stack_start: usize, stack_end: usi
let (mut active_table, tcb_offset) = paging::init(cpu_id, stack_start, stack_end);
// Set up GDT for AP
gdt::init_ap(tcb_offset, stack_end);
gdt::init(tcb_offset, stack_end);
// Set up IDT for AP
idt::init();

View file

@ -10,6 +10,7 @@ use goblin::elf32::{header, program_header};
#[cfg(target_arch = "x86_64")]
use goblin::elf64::{header, program_header};
use arch;
use arch::externs::memcpy;
use arch::paging::{entry, VirtualAddress};
use arch::start::usermode;
@ -54,8 +55,8 @@ impl<'a> Elf<'a> {
/// Test function to run. Remove and replace with proper syscall
pub fn run(self) -> SysResult<!> {
let stack_addr = 0x80000000;
let stack_size = 64 * 1024;
let stack_addr = arch::USER_STACK_OFFSET;
let stack_size = arch::USER_STACK_SIZE;
{
let contexts = context::contexts();
let context_lock = contexts.current().ok_or(Error::NoProcess)?;