Add simple paging, use rust libcore and compile without sse

This commit is contained in:
Jeremy Soller 2016-08-15 11:29:53 -06:00
parent 9f0819dafb
commit 465363f0a1
18 changed files with 601 additions and 53 deletions

3
.gitmodules vendored Normal file
View file

@ -0,0 +1,3 @@
[submodule "rust"]
path = rust
url = https://github.com/rust-lang/rust.git

View file

@ -14,12 +14,25 @@ qemu: build/harddrive.bin
FORCE: FORCE:
build: build/libcore.rlib: rust/src/libcore/lib.rs
mkdir build mkdir -p build
./rustc.sh --target $(ARCH)-unknown-none.json -C soft-float -o $@ $<
build/libkernel.a: build FORCE build/liballoc.rlib: rust/src/liballoc/lib.rs build/libcore.rlib
cargo rustc -- --crate-type staticlib -o $@ mkdir -p build
#--target $(ARCH)-unknown-none.json ./rustc.sh --target $(ARCH)-unknown-none.json -C soft-float -o $@ $<
build/librustc_unicode.rlib: rust/src/librustc_unicode/lib.rs build/libcore.rlib
mkdir -p build
./rustc.sh --target $(ARCH)-unknown-none.json -C soft-float -o $@ $<
build/libcollections.rlib: rust/src/libcollections/lib.rs build/libcore.rlib build/liballoc.rlib build/librustc_unicode.rlib
mkdir -p build
./rustc.sh --target $(ARCH)-unknown-none.json -C soft-float -o $@ $<
build/libkernel.a: build/libcore.rlib build/liballoc.rlib build/libcollections.rlib FORCE
mkdir -p build
RUSTC="./rustc.sh" cargo rustc --verbose --target $(ARCH)-unknown-none.json -- -C soft-float -o $@
build/kernel.bin: build/libkernel.a build/kernel.bin: build/libkernel.a
ld -m elf_$(ARCH) --gc-sections -z max-page-size=0x1000 -T bootloader/x86/kernel.ld -o $@ $< ld -m elf_$(ARCH) --gc-sections -z max-page-size=0x1000 -T bootloader/x86/kernel.ld -o $@ $<
@ -31,4 +44,4 @@ build/harddrive.bin: build/kernel.bin
nasm -f bin -o $@ -D ARCH_$(ARCH) -ibootloader/x86/ -ibuild/ bootloader/x86/harddrive.asm nasm -f bin -o $@ -D ARCH_$(ARCH) -ibootloader/x86/ -ibuild/ bootloader/x86/harddrive.asm
clean: clean:
rm -rf build/* rm -rf build/* target/*

View file

@ -7,6 +7,15 @@ pub static mut IDTR: IdtDescriptor = IdtDescriptor {
pub static mut IDT: [IdtEntry; 256] = [IdtEntry::new(); 256]; pub static mut IDT: [IdtEntry; 256] = [IdtEntry::new(); 256];
pub unsafe fn init(func: unsafe extern fn()) {
for entry in IDT.iter_mut() {
entry.set_flags(IDT_PRESENT | IDT_RING_0 | IDT_INTERRUPT);
entry.set_offset(8, func as usize);
}
IDTR.set_slice(&IDT);
IDTR.load();
}
bitflags! { bitflags! {
pub flags IdtFlags: u8 { pub flags IdtFlags: u8 {
const IDT_PRESENT = 1 << 7, const IDT_PRESENT = 1 << 7,

View file

@ -5,6 +5,7 @@
#![feature(const_fn)] #![feature(const_fn)]
#![feature(core_intrinsics)] #![feature(core_intrinsics)]
#![feature(naked_functions)] #![feature(naked_functions)]
#![feature(unique)]
#![no_std] #![no_std]
#[macro_use] #[macro_use]
@ -67,6 +68,9 @@ macro_rules! interrupt {
}; };
} }
/// Memcpy, memmove, etc.
pub mod externs;
/// Global descriptor table /// Global descriptor table
pub mod gdt; pub mod gdt;
@ -82,13 +86,14 @@ pub mod interrupt;
/// Initialization and main function /// Initialization and main function
pub mod main; pub mod main;
/// Memcpy, memmove, etc. /// Memory management
pub mod mem; pub mod memory;
/// Paging
pub mod paging;
/// Serial driver and print! support /// Serial driver and print! support
pub mod serial; pub mod serial;
/// Task state segment /// Task state segment
pub mod tss; pub mod tss;
pub mod physical;

View file

@ -3,8 +3,20 @@
/// It must create the IDT with the correct entries, those entries are /// It must create the IDT with the correct entries, those entries are
/// defined in other files inside of the `arch` module /// defined in other files inside of the `arch` module
use super::idt::{IDTR, IDT, IDT_PRESENT, IDT_RING_0, IDT_INTERRUPT}; use externs::memset;
use super::mem::memset; use idt;
use memory::{self, Frame};
use paging::{self, entry, PhysicalAddress};
/// Test of zero values in BSS.
static BSS_TEST_ZERO: usize = 0;
/// Test of non-zero values in BSS.
static BSS_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
extern {
/// Kernel main function
fn kmain() -> !;
}
extern { extern {
/// The starting byte of the text (code) data segment. /// The starting byte of the text (code) data segment.
@ -25,15 +37,6 @@ extern {
static mut __bss_end: u8; static mut __bss_end: u8;
} }
/// Test of zero values in BSS.
static BSS_TEST_ZERO: usize = 0;
/// Test of non-zero values in BSS.
static BSS_TEST_NONZERO: usize = 0xFFFFFFFFFFFFFFFF;
extern {
fn kmain() -> !;
}
#[no_mangle] #[no_mangle]
pub unsafe extern fn kstart() -> ! { pub unsafe extern fn kstart() -> ! {
asm!("xchg bx, bx" : : : : "intel", "volatile"); asm!("xchg bx, bx" : : : : "intel", "volatile");
@ -52,21 +55,31 @@ pub unsafe extern fn kstart() -> ! {
debug_assert_eq!(BSS_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF); debug_assert_eq!(BSS_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
} }
asm!("xchg bx, bx" : : : : "intel", "volatile"); // Set up IDT
idt::init(blank);
//Set up IDT // Initialize memory management
for entry in IDT.iter_mut() { let mut allocator = memory::init(0, &__bss_end as *const u8 as usize);
entry.set_flags(IDT_PRESENT | IDT_RING_0 | IDT_INTERRUPT);
entry.set_offset(8, blank as usize); // Initialize paging
let mut pager = paging::init();
// Remap a section with `flags`
let mut remap_section = |start_ref: &u8, end_ref: &u8, flags: entry::EntryFlags| {
let start = start_ref as *const _ as usize;
let end = end_ref as *const _ as usize;
for i in 0..(start - end + paging::PAGE_SIZE - 1)/paging::PAGE_SIZE {
let frame = Frame::containing_address(PhysicalAddress::new(start + i * paging::PAGE_SIZE));
pager.identity_map(frame, flags, &mut allocator);
} }
IDTR.set_slice(&IDT); };
IDTR.load();
asm!("xchg bx, bx" : : : : "intel", "volatile");
asm!("int 0xFF" : : : : "intel", "volatile");
// Remap text read-only
{
asm!("xchg bx, bx" : : : : "intel", "volatile"); asm!("xchg bx, bx" : : : : "intel", "volatile");
//TODO remap_section(& __text_start, & __text_end, entry::PRESENT);
}
kmain(); kmain();
} }

View file

@ -0,0 +1,81 @@
//! # Area frame allocator
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html)
use paging::PhysicalAddress;
use super::{Frame, FrameAllocator, MemoryArea, MemoryAreaIter};
pub struct AreaFrameAllocator {
next_free_frame: Frame,
current_area: Option<&'static MemoryArea>,
areas: MemoryAreaIter,
kernel_start: Frame,
kernel_end: Frame
}
impl AreaFrameAllocator {
pub fn new(kernel_start: usize, kernel_end: usize, memory_areas: MemoryAreaIter) -> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(PhysicalAddress::new(0)),
current_area: None,
areas: memory_areas,
kernel_start: Frame::containing_address(PhysicalAddress::new(kernel_start)),
kernel_end: Frame::containing_address(PhysicalAddress::new(kernel_end))
};
allocator.choose_next_area();
allocator
}
fn choose_next_area(&mut self) {
self.current_area = self.areas.clone().filter(|area| {
let address = area.base_addr + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize)) >= self.next_free_frame
}).min_by_key(|area| area.base_addr);
if let Some(area) = self.current_area {
let start_frame = Frame::containing_address(PhysicalAddress::new(area.base_addr as usize));
if self.next_free_frame < start_frame {
self.next_free_frame = start_frame;
}
}
}
}
impl FrameAllocator for AreaFrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
if let Some(area) = self.current_area {
// "Clone" the frame to return it if it's free. Frame doesn't
// implement Clone, but we can construct an identical frame.
let frame = Frame{ number: self.next_free_frame.number };
// the last frame of the current area
let current_area_last_frame = {
let address = area.base_addr + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize))
};
if frame > current_area_last_frame {
// all frames of current area are used, switch to next area
self.choose_next_area();
} else if frame >= self.kernel_start && frame <= self.kernel_end {
// `frame` is used by the kernel
self.next_free_frame = Frame {
number: self.kernel_end.number + 1
};
} else {
// frame is unused, increment `next_free_frame` and return it
self.next_free_frame.number += 1;
return Some(frame);
}
// `frame` was not valid, try it again with the updated `next_free_frame`
self.allocate_frame()
} else {
None // no free frames left
}
}
fn deallocate_frame(&mut self, frame: Frame) {
unimplemented!()
}
}

View file

@ -0,0 +1,109 @@
//! # Memory management
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/allocating-frames.html)
pub use paging::{PAGE_SIZE, PhysicalAddress};
use self::area_frame_alloc::AreaFrameAllocator;
pub mod area_frame_alloc;
/// The current memory map. It's size is maxed out to 512 entries, due to it being
/// from 0x500 to 0x5000 (800 is the absolute total)
static mut MEMORY_MAP: [MemoryArea; 512] = [MemoryArea { base_addr: 0, length: 0, _type: 0, acpi: 0 }; 512];
/// Memory does not exist
const MEMORY_AREA_NULL: u32 = 0;
/// Memory is free to use
const MEMORY_AREA_FREE: u32 = 1;
/// Memory is reserved
const MEMORY_AREA_RESERVED: u32 = 2;
/// Memory is used by ACPI, and can be reclaimed
const MEMORY_AREA_ACPI: u32 = 3;
#[derive(Clone)]
pub struct MemoryAreaIter {
_type: u32,
i: usize
}
impl MemoryAreaIter {
fn new(_type: u32) -> Self {
MemoryAreaIter {
_type: _type,
i: 0
}
}
}
impl Iterator for MemoryAreaIter {
type Item = &'static MemoryArea;
fn next(&mut self) -> Option<Self::Item> {
while self.i < unsafe { MEMORY_MAP.len() } {
let entry = unsafe { &MEMORY_MAP[self.i] };
self.i += 1;
if entry._type == self._type {
return Some(entry);
}
}
None
}
}
/// Init memory module
/// Must be called once, and only once,
pub unsafe fn init(kernel_start: usize, kernel_end: usize) -> AreaFrameAllocator {
// Copy memory map from bootloader location
for (i, mut entry) in MEMORY_MAP.iter_mut().enumerate() {
*entry = *(0x500 as *const MemoryArea).offset(i as isize);
if entry.length > 0 {
println!("{:?}", entry);
}
}
AreaFrameAllocator::new(kernel_start, kernel_end, MemoryAreaIter::new(MEMORY_AREA_FREE))
}
/// A memory map area
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
pub struct MemoryArea {
pub base_addr: u64,
pub length: u64,
pub _type: u32,
pub acpi: u32
}
/// A frame, allocated by the frame allocator.
/// Do not add more derives, or make anything `pub`!
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Frame {
number: usize
}
impl Frame {
/// Create a frame containing `address`
pub fn containing_address(address: PhysicalAddress) -> Frame {
Frame {
number: address.get() / PAGE_SIZE
}
}
/// Get the address of this frame
pub fn start_address(&self) -> PhysicalAddress {
PhysicalAddress::new(self.number * PAGE_SIZE)
}
fn clone(&self) -> Frame {
Frame {
number: self.number
}
}
}
pub trait FrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame>;
fn deallocate_frame(&mut self, frame: Frame);
}

View file

@ -0,0 +1,62 @@
//! # Page table entry
//! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use memory::Frame;
use super::PhysicalAddress;
/// A page table entry
pub struct Entry(u64);
bitflags! {
pub flags EntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}
pub const ADDRESS_MASK: usize = 0x000fffff_fffff000;
impl Entry {
/// Is the entry unused?
pub fn is_unused(&self) -> bool {
self.0 == 0
}
/// Make the entry unused
pub fn set_unused(&mut self) {
self.0 = 0;
}
/// Get the address this page references
pub fn address(&self) -> PhysicalAddress {
PhysicalAddress::new(self.0 as usize & ADDRESS_MASK)
}
/// Get the current entry flags
pub fn flags(&self) -> EntryFlags {
EntryFlags::from_bits_truncate(self.0)
}
/// Get the associated frame, if available
pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(PRESENT) {
Some(Frame::containing_address(self.address()))
} else {
None
}
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
debug_assert!(frame.start_address().get() & !ADDRESS_MASK == 0);
self.0 = (frame.start_address().get() as u64) | flags.bits();
}
}

View file

@ -0,0 +1,165 @@
//! # Paging
//! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::ptr::Unique;
use memory::{Frame, FrameAllocator};
use self::entry::EntryFlags;
use self::table::{Table, Level4};
pub mod entry;
pub mod table;
/// Number of entries per page table
pub const ENTRY_COUNT: usize = 512;
/// Size of pages
pub const PAGE_SIZE: usize = 4096;
/// Initialize paging
pub unsafe fn init() -> ActivePageTable {
ActivePageTable::new()
}
pub struct ActivePageTable {
p4: Unique<Table<Level4>>,
}
impl ActivePageTable {
/// Create a new page table
pub unsafe fn new() -> ActivePageTable {
ActivePageTable {
p4: Unique::new(table::P4),
}
}
fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.get() }
}
fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.get_mut() }
}
/// Map a page to a frame
pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let mut p3 = self.p4_mut().next_table_create(page.p4_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
assert!(p1[page.p1_index()].is_unused());
p1[page.p1_index()].set(frame, flags | entry::PRESENT);
}
/// Map a page to the next free frame
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
}
/// Identity map a frame
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
{
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get()));
self.map_to(page, frame, flags, allocator)
}
/// Unmap a page
fn unmap<A>(&mut self, page: Page, allocator: &mut A)
where A: FrameAllocator
{
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
// TODO free p(1,2,3) table if empty
allocator.deallocate_frame(frame);
}
fn translate_page(&self, page: Page) -> Option<Frame> {
self.p4().next_table(page.p4_index())
.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
}
/// Translate a virtual address to a physical one
pub fn translate(&self, virtual_address: VirtualAddress) -> Option<PhysicalAddress> {
let offset = virtual_address.get() % PAGE_SIZE;
self.translate_page(Page::containing_address(virtual_address))
.map(|frame| PhysicalAddress::new(frame.start_address().get() + offset))
}
}
/// A physical address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct PhysicalAddress(usize);
impl PhysicalAddress {
pub fn new(address: usize) -> Self {
PhysicalAddress(address)
}
pub fn get(&self) -> usize {
self.0
}
}
/// A virtual address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct VirtualAddress(usize);
impl VirtualAddress {
pub fn new(address: usize) -> Self {
VirtualAddress(address)
}
pub fn get(&self) -> usize {
self.0
}
}
/// Page
#[derive(Debug, Clone, Copy)]
pub struct Page {
number: usize
}
impl Page {
fn start_address(&self) -> VirtualAddress {
VirtualAddress::new(self.number * PAGE_SIZE)
}
fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
pub fn containing_address(address: VirtualAddress) -> Page {
assert!(address.get() < 0x0000_8000_0000_0000 || address.get() >= 0xffff_8000_0000_0000,
"invalid address: 0x{:x}", address.get());
Page { number: address.get() / PAGE_SIZE }
}
}

View file

@ -0,0 +1,100 @@
//! # Page table
//! Code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use memory::FrameAllocator;
use super::entry::*;
use super::ENTRY_COUNT;
pub const P4: *mut Table<Level4> = 0xffffffff_fffff000 as *mut _;
pub trait TableLevel {}
pub enum Level4 {}
pub enum Level3 {}
pub enum Level2 {}
pub enum Level1 {}
impl TableLevel for Level4 {}
impl TableLevel for Level3 {}
impl TableLevel for Level2 {}
impl TableLevel for Level1 {}
pub trait HierarchicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierarchicalLevel for Level4 {
type NextLevel = Level3;
}
impl HierarchicalLevel for Level3 {
type NextLevel = Level2;
}
impl HierarchicalLevel for Level2 {
type NextLevel = Level1;
}
pub struct Table<L: TableLevel> {
entries: [Entry; ENTRY_COUNT],
level: PhantomData<L>,
}
impl<L> Table<L> where L: TableLevel {
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
entry.set_unused();
}
}
}
impl<L> Table<L> where L: HierarchicalLevel {
pub fn next_table(&self, index: usize) -> Option<&Table<L::NextLevel>> {
self.next_table_address(index).map(|address| unsafe { &*(address as *const _) })
}
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table<L::NextLevel>> {
self.next_table_address(index).map(|address| unsafe { &mut *(address as *mut _) })
}
pub fn next_table_create<A>(&mut self, index: usize, allocator: &mut A) -> &mut Table<L::NextLevel>
where A: FrameAllocator
{
if self.next_table(index).is_none() {
assert!(!self.entries[index].flags().contains(HUGE_PAGE),
"mapping code does not support huge pages");
let frame = allocator.allocate_frame().expect("no frames available");
self.entries[index].set(frame, PRESENT | WRITABLE);
self.next_table_mut(index).unwrap().zero();
}
self.next_table_mut(index).unwrap()
}
fn next_table_address(&self, index: usize) -> Option<usize> {
let entry_flags = self[index].flags();
if entry_flags.contains(PRESENT) && !entry_flags.contains(HUGE_PAGE) {
let table_address = self as *const _ as usize;
Some((table_address << 9) | (index << 12))
} else {
None
}
}
}
impl<L> Index<usize> for Table<L> where L: TableLevel {
type Output = Entry;
fn index(&self, index: usize) -> &Entry {
&self.entries[index]
}
}
impl<L> IndexMut<usize> for Table<L> where L: TableLevel {
fn index_mut(&mut self, index: usize) -> &mut Entry {
&mut self.entries[index]
}
}

View file

@ -1,11 +0,0 @@
//! Typestrong address segregation.
/// A physical address in memory.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Physical {
/// The position.
///
/// Note that we do not use a pointer here to avoid simple mistakes where the programmer
/// confuse virtual and physical.
pub inner: u64,
}

View file

@ -17,6 +17,8 @@ startup_arch:
;Link first PML4 to PDP ;Link first PML4 to PDP
mov DWORD [es:edi], 0x71000 | 1 << 1 | 1 mov DWORD [es:edi], 0x71000 | 1 << 1 | 1
add edi, 0x1000 add edi, 0x1000
;Link last PML4 to PML4
mov DWORD [es:edi - 8], 0x70000 | 1 << 1 | 1
;Link first PDP to PD ;Link first PDP to PD
mov DWORD [es:edi], 0x72000 | 1 << 1 | 1 mov DWORD [es:edi], 0x72000 | 1 << 1 | 1
add edi, 0x1000 add edi, 0x1000

View file

@ -1,6 +0,0 @@
/// A newtype representing a virtual address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct Virtual {
/// The inner value.
pub inner: usize,
}

View file

@ -9,7 +9,7 @@ extern "C" fn eh_personality() {}
#[cfg(not(test))] #[cfg(not(test))]
/// Required to handle panics /// Required to handle panics
#[lang = "panic_fmt"] #[lang = "panic_fmt"]
extern "C" fn panic_fmt() -> ! { extern "C" fn panic_fmt(fmt: ::core::fmt::Arguments, file_line: &(&'static str, u32)) -> ! {
loop { loop {
unsafe { halt() }; unsafe { halt() };
} }

1
rust Submodule

@ -0,0 +1 @@
Subproject commit b72fa8ca95c02e4b44b216a425fd563ad2ef58bb

2
rustc.sh Executable file
View file

@ -0,0 +1,2 @@
#!/bin/bash
RUST_BACKTRACE=1 rustc -L build $*

View file

@ -9,7 +9,7 @@
"vendor": "unknown", "vendor": "unknown",
"target-family": "redox", "target-family": "redox",
"pre-link-args": ["-m64", "-nostdlib", "-static"], "pre-link-args": ["-m64", "-nostdlib", "-static"],
"features": "-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2", "features": "-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2,+soft-float",
"dynamic-linking": false, "dynamic-linking": false,
"executables": false, "executables": false,
"relocation-model": "static", "relocation-model": "static",