Mark kernel memory global

This commit is contained in:
Jeremy Soller 2016-10-16 16:18:01 -06:00
parent 2e575db3db
commit 9c2a45d0bf
6 changed files with 20 additions and 18 deletions

View file

@ -70,6 +70,7 @@ impl Context {
#[inline(never)]
#[naked]
pub unsafe fn switch_to(&mut self, next: &mut Context) {
/*
asm!("fxsave [$0]" : : "r"(self.fx) : "memory" : "intel", "volatile");
self.loadable = true;
if next.loadable {
@ -77,6 +78,7 @@ impl Context {
}else{
asm!("fninit" : : : "memory" : "intel", "volatile");
}
*/
asm!("mov $0, cr3" : "=r"(self.cr3) : : "memory" : "intel", "volatile");
if next.cr3 != self.cr3 {

View file

@ -27,10 +27,10 @@ pub unsafe fn acknowledge(irq: usize) {
}
interrupt!(pit, {
irq_trigger(0);
// Saves CPU time by not sending IRQ event irq_trigger(0);
{
const PIT_RATE: u64 = 46500044;
const PIT_RATE: u64 = 2250286;
let mut offset = time::OFFSET.lock();
let sum = offset.1 + PIT_RATE;

View file

@ -7,7 +7,7 @@ use x86::{msr, tlb};
use memory::{allocate_frame, Frame};
use self::entry::{EntryFlags, PRESENT, WRITABLE, NO_EXECUTE};
use self::entry::{EntryFlags, PRESENT, GLOBAL, WRITABLE, NO_EXECUTE};
use self::mapper::Mapper;
use self::temporary_page::TemporaryPage;
@ -129,7 +129,7 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
mapper.map(page, PRESENT | NO_EXECUTE | WRITABLE);
mapper.map(page, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
}
}
@ -145,22 +145,22 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
};
// Remap stack writable, no execute
remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, PRESENT | NO_EXECUTE | WRITABLE);
remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
// Remap a section with `flags`
let mut remap_section = |start: &u8, end: &u8, flags: EntryFlags| {
remap(start as *const _ as usize - ::KERNEL_OFFSET, end as *const _ as usize - ::KERNEL_OFFSET, flags);
};
// Remap text read-only
remap_section(& __text_start, & __text_end, PRESENT);
remap_section(& __text_start, & __text_end, PRESENT | GLOBAL);
// Remap rodata read-only, no execute
remap_section(& __rodata_start, & __rodata_end, PRESENT | NO_EXECUTE);
remap_section(& __rodata_start, & __rodata_end, PRESENT | GLOBAL | NO_EXECUTE);
// Remap data writable, no execute
remap_section(& __data_start, & __data_end, PRESENT | NO_EXECUTE | WRITABLE);
remap_section(& __data_start, & __data_end, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
// Remap tdata master writable, no execute
remap_section(& __tdata_start, & __tdata_end, PRESENT | NO_EXECUTE);
remap_section(& __tdata_start, & __tdata_end, PRESENT | GLOBAL | NO_EXECUTE);
// Remap bss writable, no execute
remap_section(& __bss_start, & __bss_end, PRESENT | NO_EXECUTE | WRITABLE);
remap_section(& __bss_start, & __bss_end, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
}
});
@ -207,7 +207,7 @@ pub unsafe fn init_ap(cpu_id: usize, stack_start: usize, stack_end: usize, kerne
let start_page = Page::containing_address(VirtualAddress::new(start));
let end_page = Page::containing_address(VirtualAddress::new(end - 1));
for page in Page::range_inclusive(start_page, end_page) {
mapper.map(page, PRESENT | NO_EXECUTE | WRITABLE);
mapper.map(page, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
}
}
@ -223,7 +223,7 @@ pub unsafe fn init_ap(cpu_id: usize, stack_start: usize, stack_end: usize, kerne
};
// Remap stack writable, no execute
remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, PRESENT | NO_EXECUTE | WRITABLE);
remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
});
active_table.switch(new_table);

View file

@ -101,7 +101,7 @@ pub unsafe extern fn kstart() -> ! {
let heap_start_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET));
let heap_end_page = Page::containing_address(VirtualAddress::new(::KERNEL_HEAP_OFFSET + ::KERNEL_HEAP_SIZE-1));
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, entry::WRITABLE | entry::NO_EXECUTE);
active_table.map(page, entry::PRESENT | entry::GLOBAL | entry::WRITABLE | entry::NO_EXECUTE);
}
// Init the allocator

View file

@ -38,7 +38,7 @@ initialize:
.pit:
;initialize the PIT
mov ax, 55483 ;5370 ;this is the divider for the PIT
mov ax, 2685 ;this is the divider for the PIT
out 0x40, al
rol ax, 8
out 0x40, al

View file

@ -26,9 +26,9 @@ startup_ap:
mov edi, 0x70000
mov cr3, edi
;enable Page Address Extension and Page Size Extension
;enable Page Global, Page Address Extension, and Page Size Extension
mov eax, cr4
or eax, 1 << 5 | 1 << 4
or eax, 1 << 7 | 1 << 5 | 1 << 4
mov cr4, eax
; load protected mode GDT
@ -91,9 +91,9 @@ startup_arch:
mov edi, 0x70000
mov cr3, edi
;enable Page Address Extension and Page Size Extension
;enable Page Global, Page Address Extension, and Page Size Extension
mov eax, cr4
or eax, 1 << 5 | 1 << 4
or eax, 1 << 7 | 1 << 5 | 1 << 4
mov cr4, eax
; load protected mode GDT