diff options
author | 2022-01-21 17:46:29 +0000 | |
---|---|---|
committer | 2022-01-25 09:11:49 -0800 | |
commit | 5d96aedd1403ad2ec6cfd3d5d2d043476d45645f (patch) | |
tree | 881771b058fe8e4a76a1a1935da7b7191dd30a7c | |
parent | 7c788ce1dc6d04dee68baa0ceddaf62b032953bb (diff) | |
download | rust-x86-5d96aedd1403ad2ec6cfd3d5d2d043476d45645f.tar.gz rust-x86-5d96aedd1403ad2ec6cfd3d5d2d043476d45645f.tar.zst rust-x86-5d96aedd1403ad2ec6cfd3d5d2d043476d45645f.zip |
Finish converting to new asm!() syntax.
I got a number of compile failures with `llvm_asm`
with the latest nightly. Instead of figuring out
how to fix that, I just went ahead and finished
converting everything to use `asm`, which has the
added bonus of being stable, anyway!
I'm not entirely sure this is all correct;
`cargo test` seems to drag in an explicit
dependency on x86-0.44.0, which still fails.
Note also that the SGX modifications are a bit
more elaborate than what had been there; apparently
LLVM uses the %rbx internally, even though the
instructions exposed in that module use that
register. I worked around that by pushing %rbx
and then copying the leaf operand from %rsi to
%rbx and copying %rbx _back_ to %rsi for output
and popping the original value of %rbx. The
input/output operands are then writen in terms
of %rsi.
Signed-off-by: Dan Cross <cross@gajendra.net>
-rw-r--r-- | src/bits32/eflags.rs | 9 | ||||
-rw-r--r-- | src/bits32/mod.rs | 5 | ||||
-rw-r--r-- | src/bits32/segmentation.rs | 9 | ||||
-rw-r--r-- | src/bits64/registers.rs | 8 | ||||
-rw-r--r-- | src/bits64/rflags.rs | 9 | ||||
-rw-r--r-- | src/bits64/segmentation.rs | 21 | ||||
-rw-r--r-- | src/bits64/sgx.rs | 42 | ||||
-rw-r--r-- | src/bits64/syscall.rs | 60 | ||||
-rw-r--r-- | src/bits64/vmx.rs | 19 | ||||
-rw-r--r-- | src/controlregs.rs | 21 | ||||
-rw-r--r-- | src/dtables.rs | 13 | ||||
-rw-r--r-- | src/fence.rs | 8 | ||||
-rw-r--r-- | src/io.rs | 14 | ||||
-rw-r--r-- | src/lib.rs | 2 | ||||
-rw-r--r-- | src/msr.rs | 6 | ||||
-rw-r--r-- | src/segmentation.rs | 23 | ||||
-rw-r--r-- | x86test/Cargo.toml | 2 | ||||
-rw-r--r-- | x86test/README.md | 4 |
18 files changed, 173 insertions, 102 deletions
diff --git a/src/bits32/eflags.rs b/src/bits32/eflags.rs index f6bd682..32d46f0 100644 --- a/src/bits32/eflags.rs +++ b/src/bits32/eflags.rs @@ -3,6 +3,7 @@ use bitflags::*; use crate::Ring; +use core::arch::asm; bitflags! { /// The EFLAGS register. @@ -70,14 +71,14 @@ impl EFlags { #[inline(always)] pub unsafe fn read() -> EFlags { let r: u32; - llvm_asm!("pushfl; popl $0" : "=r"(r) :: "memory"); + asm!("pushfl; popl {0}", out(reg) r, options(att_syntax)); EFlags::from_bits_truncate(r) } #[cfg(target_arch = "x86")] #[inline(always)] pub unsafe fn set(val: EFlags) { - llvm_asm!("pushl $0; popfl" :: "r"(val.bits()) : "memory" "flags"); + asm!("pushl {0}; popfl", in(reg) val.bits(), options(att_syntax)); } /// Clears the AC flag bit in EFLAGS register. @@ -92,7 +93,7 @@ pub unsafe fn set(val: EFlags) { /// that the CPU supports the instruction (check CPUID). #[inline(always)] pub unsafe fn clac() { - llvm_asm!("clac" ::: "memory" "flags" : "volatile"); + asm!("clac"); } /// Sets the AC flag bit in EFLAGS register. @@ -107,5 +108,5 @@ pub unsafe fn clac() { /// that the CPU supports the instruction (check CPUID). #[inline(always)] pub unsafe fn stac() { - llvm_asm!("stac" ::: "memory" "flags" : "volatile"); + asm!("stac"); } diff --git a/src/bits32/mod.rs b/src/bits32/mod.rs index f02ff4d..1267023 100644 --- a/src/bits32/mod.rs +++ b/src/bits32/mod.rs @@ -6,8 +6,11 @@ pub mod segmentation; pub mod task; #[cfg(target_arch = "x86")] +use core::arch::asm; + +#[cfg(target_arch = "x86")] #[inline(always)] pub unsafe fn stack_jmp(stack: *mut (), ip: *const ()) -> ! { - llvm_asm!("mov esp, $0; jmp $1" :: "rg"(stack), "r"(ip) :: "volatile", "intel"); + asm!("movl {0}, %esp; jmp {1}", in(reg) stack, in(reg) ip, options(att_syntax)); loop {} } diff --git a/src/bits32/segmentation.rs b/src/bits32/segmentation.rs index ff9cc9d..2bbd3bd 100644 --- a/src/bits32/segmentation.rs +++ b/src/bits32/segmentation.rs @@ -1,6 +1,9 @@ #[allow(unused_imports)] use crate::segmentation::SegmentSelector; +#[cfg(target_arch = "x86")] +use core::arch::asm; + /// Reload code segment register. /// Note this is special since we can not directly move /// to %cs. Instead we push the new segment selector @@ -8,8 +11,8 @@ use crate::segmentation::SegmentSelector; /// to reload cs and continue at 1:. #[cfg(target_arch = "x86")] pub unsafe fn load_cs(sel: SegmentSelector) { - llvm_asm!("pushl $0; \ - pushl $$1f; \ + asm!("pushl {0}; \ + pushl $1f; \ lretl; \ - 1:" :: "ri" (sel.bits() as u32) : "memory"); + 1:", in(reg) sel.bits() as u32, options(att_syntax)); } diff --git a/src/bits64/registers.rs b/src/bits64/registers.rs index 428f326..6226c14 100644 --- a/src/bits64/registers.rs +++ b/src/bits64/registers.rs @@ -1,9 +1,11 @@ +use core::arch::asm; + /// Read the RIP register (instruction pointer). #[inline(always)] pub fn rip() -> u64 { let rip: u64; unsafe { - llvm_asm!("leaq 0(%rip), $0" : "=r" (rip) ::); + asm!("leaq 0(%rip), {0}", out(reg) rip, options(att_syntax)); } rip } @@ -13,7 +15,7 @@ pub fn rip() -> u64 { pub fn rsp() -> u64 { let rsp: u64; unsafe { - llvm_asm!("mov %rsp, $0" : "=r" (rsp) ::); + asm!("mov %rsp, {0}", out(reg) rsp, options(att_syntax)); } rsp } @@ -23,7 +25,7 @@ pub fn rsp() -> u64 { pub fn rbp() -> u64 { let rbp: u64; unsafe { - llvm_asm!("mov %rbp, $0" : "=r" (rbp) ::); + asm!("mov %rbp, {0}", out(reg) rbp, options(att_syntax)); } rbp } diff --git a/src/bits64/rflags.rs b/src/bits64/rflags.rs index e0b752e..159d0a7 100644 --- a/src/bits64/rflags.rs +++ b/src/bits64/rflags.rs @@ -8,6 +8,9 @@ use bitflags::*; use crate::Ring; +#[cfg(target_arch = "x86_64")] +use core::arch::asm; + bitflags! { /// The RFLAGS register. /// This is duplicated code from bits32 eflags.rs. @@ -79,14 +82,16 @@ impl RFlags { #[inline(always)] pub fn read() -> RFlags { let r: u64; - unsafe { llvm_asm!("pushfq; popq $0" : "=r"(r) :: "memory") }; + unsafe { asm!("pushfq; popq {0}", out(reg) r, options(att_syntax)) }; RFlags::from_bits_truncate(r) } #[cfg(target_arch = "x86_64")] #[inline(always)] pub fn set(val: RFlags) { - unsafe { llvm_asm!("pushq $0; popfq" :: "r"(val.bits()) : "memory" "flags") }; + unsafe { + asm!("pushq {0}; popfq", in(reg) val.bits(), options(att_syntax)); + } } // clac and stac are also usable in 64-bit mode diff --git a/src/bits64/segmentation.rs b/src/bits64/segmentation.rs index c4420d8..36e3171 100644 --- a/src/bits64/segmentation.rs +++ b/src/bits64/segmentation.rs @@ -5,6 +5,9 @@ use crate::segmentation::{ LdtDescriptorBuilder, SystemDescriptorTypes64, }; +#[cfg(target_arch = "x86_64")] +use core::arch::asm; + /// Entry for IDT, GDT or LDT. /// /// See Intel 3a, Section 3.4.5 "Segment Descriptors", and Section 3.5.2 @@ -141,11 +144,11 @@ impl BuildDescriptor<Descriptor64> for DescriptorBuilder { /// Can cause a GP-fault with a bad `sel` value. #[cfg(target_arch = "x86_64")] pub unsafe fn load_cs(sel: SegmentSelector) { - llvm_asm!("pushq $0; \ + asm!("pushq {0}; \ leaq 1f(%rip), %rax; \ pushq %rax; \ lretq; \ - 1:" :: "ri" (sel.bits() as usize) : "rax" "memory"); + 1:", in(reg) sel.bits() as usize, out("rax") _, options(att_syntax)); } /// Write GS Segment Base @@ -154,7 +157,7 @@ pub unsafe fn load_cs(sel: SegmentSelector) { /// Needs FSGSBASE-Enable Bit (bit 16 of CR4) set. #[cfg(target_arch = "x86_64")] pub unsafe fn wrgsbase(base: u64) { - llvm_asm!("wrgsbase $0" :: "r" (base) : "memory"); + asm!("wrgsbase {0}", in(reg) base, options(att_syntax)); } /// Write FS Segment Base @@ -163,7 +166,7 @@ pub unsafe fn wrgsbase(base: u64) { /// Needs FSGSBASE-Enable Bit (bit 16 of CR4) set. #[cfg(target_arch = "x86_64")] pub unsafe fn wrfsbase(base: u64) { - llvm_asm!("wrfsbase $0" :: "r" (base) : "memory"); + asm!("wrfsbase {0}", in(reg) base, options(att_syntax)); } /// Read GS Segment Base @@ -173,7 +176,7 @@ pub unsafe fn wrfsbase(base: u64) { #[cfg(target_arch = "x86_64")] pub unsafe fn rdgsbase() -> u64 { let gs_base: u64; - llvm_asm!("rdgsbase $0" : "=r" (gs_base) ); + asm!("rdgsbase {0}", out(reg) gs_base, options(att_syntax)); gs_base } @@ -184,7 +187,7 @@ pub unsafe fn rdgsbase() -> u64 { #[cfg(target_arch = "x86_64")] pub unsafe fn rdfsbase() -> u64 { let fs_base: u64; - llvm_asm!("rdfsbase $0" : "=r" (fs_base) ); + asm!("rdfsbase {0}", out(reg) fs_base, options(att_syntax)); fs_base } @@ -195,7 +198,7 @@ pub unsafe fn rdfsbase() -> u64 { #[cfg(target_arch = "x86_64")] pub unsafe fn fs_deref() -> u64 { let fs: u64; - llvm_asm!("movq %fs:0x0, $0" : "=r" (fs) ); + asm!("movq %fs:0x0, {0}", out(reg) fs, options(att_syntax)); fs } @@ -206,7 +209,7 @@ pub unsafe fn fs_deref() -> u64 { #[cfg(target_arch = "x86_64")] pub unsafe fn gs_deref() -> u64 { let gs: u64; - llvm_asm!("movq %gs:0x0, $0" : "=r" (gs) ); + asm!("movq %gs:0x0, {0}", out(reg) gs, options(att_syntax)); gs } @@ -221,5 +224,5 @@ pub unsafe fn gs_deref() -> u64 { /// The SWAPGS instruction is a privileged instruction intended for use by system software. #[cfg(target_arch = "x86_64")] pub unsafe fn swapgs() { - llvm_asm!("swapgs" ::: "gs"); + asm!("swapgs"); } diff --git a/src/bits64/sgx.rs b/src/bits64/sgx.rs index 3d018b3..32c1616 100644 --- a/src/bits64/sgx.rs +++ b/src/bits64/sgx.rs @@ -1,5 +1,7 @@ //! Program x86 enclaves. +use core::arch::asm; + /// Execute an enclave system function of specified leaf number. /// /// # Safety @@ -22,8 +24,12 @@ macro_rules! encls { unsafe fn encls2(rax: u64, rbx: u64) -> (u32, u64) { let eax: u32; let out_rbx: u64; - llvm_asm!("encls" : "={eax}" (eax), "={rbx}" (out_rbx) - : "{rax}" (rax), "{rbx}" (rbx)); + asm!( + "pushq %rbx; movq %rsi, %rbx; encls; movq %rbx, %rsi; popq %rbx", + lateout("eax") eax, lateout("rsi") out_rbx, + in("rax") rax, in("rsi") rbx, + options(att_syntax), + ); (eax, out_rbx) } @@ -31,8 +37,12 @@ unsafe fn encls2(rax: u64, rbx: u64) -> (u32, u64) { unsafe fn encls3(rax: u64, rbx: u64, rcx: u64) -> (u32, u64) { let eax: u32; let out_rbx: u64; - llvm_asm!("encls" : "={eax}" (eax), "={rbx}" (out_rbx) - : "{rax}" (rax), "{rbx}" (rbx), "{rcx}" (rcx)); + asm!( + "pushq %rbx; movq %rsi, %rbx; encls; movq %rbx, %r11; popq %rbx", + lateout("eax") eax, lateout("rsi") out_rbx, + in("rax") rax, in("rsi") rbx, in("rcx") rcx, + options(att_syntax), + ); (eax, out_rbx) } @@ -40,8 +50,12 @@ unsafe fn encls3(rax: u64, rbx: u64, rcx: u64) -> (u32, u64) { unsafe fn encls4(rax: u64, rbx: u64, rcx: u64, rdx: u64) -> (u32, u64) { let eax: u32; let out_rbx: u64; - llvm_asm!("encls" : "={eax}" (eax), "={rbx}" (out_rbx) - : "{rax}" (rax), "{rbx}" (rbx), "{rcx}" (rcx), "{rdx}" (rdx)); + asm!( + "pushq %rbx; movq %rsi, %rbx; encls; movq %rbx, %rsi; popq %rbx", + lateout("eax") eax, lateout("rsi") out_rbx, + in("rax") rax, in("rsi") rbx, in("rcx") rcx, in("rdx") rdx, + options(att_syntax), + ); (eax, out_rbx) } @@ -289,8 +303,12 @@ macro_rules! enclu { unsafe fn enclu3(rax: u64, rbx: u64, rcx: u64) -> (u32, u64) { let eax: u32; let out_rcx: u64; - llvm_asm!("enclu" : "={eax}" (eax), "={rcx}" (out_rcx) - : "{rax}" (rax), "{rbx}" (rbx), "{rcx}" (rcx)); + asm!( + "pushq %rbx; movq %rsi, %rbx; enclu; popq %rbx", + lateout("eax") eax, lateout("rcx") out_rcx, + in("rax") rax, in("rsi") rbx, in("rcx") rcx, + options(att_syntax), + ); (eax, out_rcx) } @@ -298,8 +316,12 @@ unsafe fn enclu3(rax: u64, rbx: u64, rcx: u64) -> (u32, u64) { unsafe fn enclu4(rax: u64, rbx: u64, rcx: u64, rdx: u64) -> (u32, u64) { let eax: u32; let out_rcx: u64; - llvm_asm!("enclu" : "={eax}" (eax), "={rcx}" (out_rcx) - : "{rax}" (rax), "{rbx}" (rbx), "{rcx}" (rcx), "{rdx}" (rdx)); + asm!( + "pushq %rbx; movq %rsi, %rbx; enclu; popq %rbx", + lateout("eax") eax, lateout("rcx") out_rcx, + in("rax") rax, in("rsi") rbx, in("rcx") rcx, in("rdx") rdx, + options(att_syntax), + ); (eax, out_rcx) } diff --git a/src/bits64/syscall.rs b/src/bits64/syscall.rs index 63c7c30..ca09cb5 100644 --- a/src/bits64/syscall.rs +++ b/src/bits64/syscall.rs @@ -14,6 +14,10 @@ //! * Only values of class INTEGER or class MEMORY are passed to the kernel. //! //! This code is inspired by the syscall.rs (https://github.com/kmcallister/syscall.rs/) project. + +#[cfg(target_arch = "x86_64")] +use core::arch::asm; + #[macro_export] macro_rules! syscall { ($arg0:expr) => { @@ -97,7 +101,7 @@ macro_rules! syscall { #[allow(unused_mut)] pub unsafe fn syscall0(arg0: u64) -> u64 { let mut ret: u64; - llvm_asm!("syscall" : "={rax}" (ret) : "{rax}" (arg0) : "rcx", "r11", "memory" : "volatile"); + asm!("syscall", lateout("rax") ret, in("rax") arg0, options(att_syntax)); ret } @@ -110,8 +114,11 @@ pub unsafe fn syscall0(arg0: u64) -> u64 { #[allow(unused_mut)] pub unsafe fn syscall1(arg0: u64, arg1: u64) -> u64 { let mut ret: u64; - llvm_asm!("syscall" : "={rax}" (ret) : "{rax}" (arg0), "{rdi}" (arg1) - : "rcx", "r11", "memory" : "volatile"); + asm!( + "syscall", + lateout("rax") ret, in("rax") arg0, in("rdi") arg1, + out("rcx") _, out("r11") _, options(att_syntax), + ); ret } @@ -124,8 +131,12 @@ pub unsafe fn syscall1(arg0: u64, arg1: u64) -> u64 { #[allow(unused_mut)] pub unsafe fn syscall2(arg0: u64, arg1: u64, arg2: u64) -> u64 { let mut ret: u64; - llvm_asm!("syscall" : "={rax}" (ret) : "{rax}" (arg0), "{rdi}" (arg1), "{rsi}" (arg2) - : "rcx", "r11", "memory" : "volatile"); + asm!( + "syscall", + lateout("rax") ret, + in("rax") arg0, in("rdi") arg1, in("rsi") arg2, + out("rcx") _, out("r11") _, options(att_syntax), + ); ret } @@ -138,8 +149,12 @@ pub unsafe fn syscall2(arg0: u64, arg1: u64, arg2: u64) -> u64 { #[allow(unused_mut)] pub unsafe fn syscall3(arg0: u64, arg1: u64, arg2: u64, arg3: u64) -> u64 { let mut ret: u64; - llvm_asm!("syscall" : "={rax}" (ret) : "{rax}" (arg0), "{rdi}" (arg1), "{rsi}" (arg2), "{rdx}" (arg3) - : "rcx", "r11", "memory" : "volatile"); + asm!( + "syscall", + lateout("rax") ret, + in("rax") arg0, in("rdi") arg1, in("rsi") arg2, in("rdx") arg3, + out("rcx") _, out("r11") _, options(att_syntax), + ); ret } @@ -152,9 +167,12 @@ pub unsafe fn syscall3(arg0: u64, arg1: u64, arg2: u64, arg3: u64) -> u64 { #[allow(unused_mut)] pub unsafe fn syscall4(arg0: u64, arg1: u64, arg2: u64, arg3: u64, arg4: u64) -> u64 { let mut ret: u64; - llvm_asm!("syscall" : "={rax}" (ret) - : "{rax}" (arg0), "{rdi}" (arg1), "{rsi}" (arg2), "{rdx}" (arg3), "{r10}" (arg4) - : "rcx", "r11", "memory" : "volatile"); + asm!( + "syscall", + lateout("rax") ret, + in("rax") arg0, in("rdi") arg1, in("rsi") arg2, in("rdx") arg3, in("r10") arg4, + out("rcx") _, out("r11") _, options(att_syntax), + ); ret } @@ -167,10 +185,12 @@ pub unsafe fn syscall4(arg0: u64, arg1: u64, arg2: u64, arg3: u64, arg4: u64) -> #[allow(unused_mut)] pub unsafe fn syscall5(arg0: u64, arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> u64 { let mut ret: u64; - llvm_asm!("syscall" : "={rax}" (ret) - : "{rax}" (arg0), "{rdi}" (arg1), "{rsi}" (arg2), "{rdx}" (arg3), "{r10}" (arg4), "{r8}" (arg5) - : "rcx", "r11", "memory" - : "volatile"); + asm!( + "syscall", + lateout("rax") ret, + in("rax") arg0, in("rdi") arg1, in("rsi") arg2, in("rdx") arg3, in("r10") arg4, in("r8") arg5, + out("rcx") _, out("r11") _, options(att_syntax), + ); ret } @@ -191,10 +211,12 @@ pub unsafe fn syscall6( arg6: u64, ) -> u64 { let mut ret: u64; - llvm_asm!("syscall" : "={rax}" (ret) - : "{rax}" (arg0), "{rdi}" (arg1), "{rsi}" (arg2), "{rdx}" (arg3), - "{r10}" (arg4), "{r8}" (arg5), "{r9}" (arg6) - : "rcx", "r11", "memory" - : "volatile"); + asm!( + "syscall", + lateout("rax") ret, + in("rax") arg0, in("rdi") arg1, in("rsi") arg2, in("rdx") arg3, + in("r10") arg4, in("r8") arg5, in("r9") arg6, + out("rcx") _, out("r11") _, options(att_syntax), + ); ret } diff --git a/src/bits64/vmx.rs b/src/bits64/vmx.rs index 5535d36..f4d2f7e 100644 --- a/src/bits64/vmx.rs +++ b/src/bits64/vmx.rs @@ -2,6 +2,7 @@ use crate::bits64::rflags::{self, RFlags}; use crate::vmx::{Result, VmFail}; +use core::arch::asm; /// Helper used to extract VMX-specific Result in accordance with /// conventions described in Intel SDM, Volume 3C, Section 30.2. @@ -30,7 +31,7 @@ fn vmx_capture_status() -> Result<()> { /// # Safety /// Needs CPL 0. pub unsafe fn vmxon(addr: u64) -> Result<()> { - llvm_asm!("vmxon $0" : /* no outputs */ : "m"(addr)); + asm!("vmxon ({0})", in(reg) addr, options(att_syntax)); vmx_capture_status() } @@ -39,7 +40,7 @@ pub unsafe fn vmxon(addr: u64) -> Result<()> { /// # Safety /// Needs CPL 0. pub unsafe fn vmxoff() -> Result<()> { - llvm_asm!("vmxoff"); + asm!("vmxoff"); vmx_capture_status() } @@ -51,7 +52,7 @@ pub unsafe fn vmxoff() -> Result<()> { /// # Safety /// Needs CPL 0. pub unsafe fn vmclear(addr: u64) -> Result<()> { - llvm_asm!("vmclear $0" : /* no outputs */ : "m"(addr)); + asm!("vmclear ({0})", in(reg) addr, options(att_syntax)); vmx_capture_status() } @@ -62,7 +63,7 @@ pub unsafe fn vmclear(addr: u64) -> Result<()> { /// # Safety /// Needs CPL 0. pub unsafe fn vmptrld(addr: u64) -> Result<()> { - llvm_asm!("vmptrld $0" : /* no outputs */ : "m"(addr)); + asm!("vmptrld ({0})", in(reg) addr, options(att_syntax)); vmx_capture_status() } @@ -72,7 +73,7 @@ pub unsafe fn vmptrld(addr: u64) -> Result<()> { /// Needs CPL 0. pub unsafe fn vmptrst() -> Result<u64> { let value: u64 = 0; - llvm_asm!("vmptrst ($0)" : /* no outputs */ : "r"(&value) : "memory"); + asm!("vmptrst ({0})", in(reg) &value, options(att_syntax)); vmx_capture_status().and(Ok(value)) } @@ -83,7 +84,7 @@ pub unsafe fn vmptrst() -> Result<u64> { pub unsafe fn vmread(field: u32) -> Result<u64> { let field: u64 = field.into(); let value: u64; - llvm_asm!("vmread $1, $0" : "=r"(value) : "r"(field)); + asm!("vmread {1}, {0}", in(reg) field, out(reg) value, options(att_syntax)); vmx_capture_status().and(Ok(value)) } @@ -93,7 +94,7 @@ pub unsafe fn vmread(field: u32) -> Result<u64> { /// Needs CPL 0. pub unsafe fn vmwrite(field: u32, value: u64) -> Result<()> { let field: u64 = field.into(); - llvm_asm!("vmwrite $1, $0" : /* no outputs */ : "r"(field), "r"(value)); + asm!("vmwrite {1}, {0}", in(reg) field, in(reg) value, options(att_syntax)); vmx_capture_status() } @@ -103,7 +104,7 @@ pub unsafe fn vmwrite(field: u32, value: u64) -> Result<()> { /// Needs CPL 0. #[inline(always)] pub unsafe fn vmlaunch() -> Result<()> { - llvm_asm!("vmlaunch"); + asm!("vmlaunch"); vmx_capture_status() } @@ -113,6 +114,6 @@ pub unsafe fn vmlaunch() -> Result<()> { /// Needs CPL 0. #[inline(always)] pub unsafe fn vmresume() -> Result<()> { - llvm_asm!("vmresume"); + asm!("vmresume"); vmx_capture_status() } diff --git a/src/controlregs.rs b/src/controlregs.rs index 11a1d43..dbe3aac 100644 --- a/src/controlregs.rs +++ b/src/controlregs.rs @@ -4,6 +4,7 @@ use bitflags::*; use crate::arch::{_xgetbv, _xsetbv}; +use core::arch::asm; bitflags! { pub struct Cr0: usize { @@ -93,7 +94,7 @@ bitflags! { /// Needs CPL 0. pub unsafe fn cr0() -> Cr0 { let ret: usize; - llvm_asm!("mov %cr0, $0" : "=r" (ret)); + asm!("mov %cr0, {0}", out(reg) ret, options(att_syntax)); Cr0::from_bits_truncate(ret) } @@ -102,7 +103,7 @@ pub unsafe fn cr0() -> Cr0 { /// # Safety /// Needs CPL 0. pub unsafe fn cr0_write(val: Cr0) { - llvm_asm!("mov $0, %cr0" :: "r" (val.bits) : "memory"); + asm!("mov {0}, %cr0", in(reg) val.bits, options(att_syntax)); } /// Contains page-fault linear address. @@ -111,7 +112,7 @@ pub unsafe fn cr0_write(val: Cr0) { /// Needs CPL 0. pub unsafe fn cr2() -> usize { let ret: usize; - llvm_asm!("mov %cr2, $0" : "=r" (ret)); + asm!("mov %cr2, {0}", out(reg) ret, options(att_syntax)); ret } @@ -120,7 +121,7 @@ pub unsafe fn cr2() -> usize { /// # Safety /// Needs CPL 0. pub unsafe fn cr2_write(val: u64) { - llvm_asm!("mov $0, %cr2" :: "r" (val) : "memory"); + asm!("mov {0}, %cr2", in(reg) val as usize, options(att_syntax)); } /// Contains page-table root pointer. @@ -128,9 +129,9 @@ pub unsafe fn cr2_write(val: u64) { /// # Safety /// Needs CPL 0. pub unsafe fn cr3() -> u64 { - let ret: u64; - llvm_asm!("mov %cr3, $0" : "=r" (ret)); - ret + let ret: usize; + asm!("mov %cr3, {0}", out(reg) ret, options(att_syntax)); + ret as u64 } /// Switch page-table PML4 pointer. @@ -138,7 +139,7 @@ pub unsafe fn cr3() -> u64 { /// # Safety /// Needs CPL 0. pub unsafe fn cr3_write(val: u64) { - llvm_asm!("mov $0, %cr3" :: "r" (val) : "memory"); + asm!("mov {0}, %cr3", in(reg) val as usize, options(att_syntax)); } /// Contains various flags to control operations in protected mode. @@ -147,7 +148,7 @@ pub unsafe fn cr3_write(val: u64) { /// Needs CPL 0. pub unsafe fn cr4() -> Cr4 { let ret: usize; - llvm_asm!("mov %cr4, $0" : "=r" (ret)); + asm!("mov %cr4, {0}", out(reg) ret, options(att_syntax)); Cr4::from_bits_truncate(ret) } @@ -167,7 +168,7 @@ pub unsafe fn cr4() -> Cr4 { /// # Safety /// Needs CPL 0. pub unsafe fn cr4_write(val: Cr4) { - llvm_asm!("mov $0, %cr4" :: "r" (val.bits) : "memory"); + asm!("mov {0}, %cr4", in(reg) val.bits, options(att_syntax)); } /// Read Extended Control Register XCR0. diff --git a/src/dtables.rs b/src/dtables.rs index 7a39296..211be35 100644 --- a/src/dtables.rs +++ b/src/dtables.rs @@ -1,5 +1,6 @@ //! Functions and data-structures for working with descriptor tables. use crate::segmentation::SegmentSelector; +use core::arch::asm; use core::fmt; use core::mem::size_of; @@ -61,7 +62,7 @@ impl<T> fmt::Debug for DescriptorTablePointer<T> { /// # Safety /// Needs CPL 0. pub unsafe fn lgdt<T>(gdt: &DescriptorTablePointer<T>) { - llvm_asm!("lgdt ($0)" :: "r" (gdt) : "memory"); + asm!("lgdt ({0})", in(reg) gdt, options(att_syntax)); } /// Retrieve base and limit from the GDTR register. @@ -69,7 +70,7 @@ pub unsafe fn lgdt<T>(gdt: &DescriptorTablePointer<T>) { /// # Safety /// Needs CPL 0. pub unsafe fn sgdt<T>(idt: &mut DescriptorTablePointer<T>) { - llvm_asm!("sgdt ($0)" : "+r" (idt as *mut DescriptorTablePointer<T>) :: "memory"); + asm!("sgdt ({0})", in(reg) idt as *mut DescriptorTablePointer<T>, options(att_syntax)); } /// Loads the segment selector into the selector field of the local @@ -83,7 +84,7 @@ pub unsafe fn sgdt<T>(idt: &mut DescriptorTablePointer<T>) { /// # Safety /// Needs CPL 0. pub unsafe fn load_ldtr(selector: SegmentSelector) { - llvm_asm!("lldt $0" :: "r" (selector.bits()) : "memory"); + asm!("lldt {0:x}", in(reg) selector.bits(), options(att_syntax)); } /// Returns the segment selector from the local descriptor table register (LDTR). @@ -95,7 +96,7 @@ pub unsafe fn load_ldtr(selector: SegmentSelector) { /// Needs CPL 0. pub unsafe fn ldtr() -> SegmentSelector { let selector: u16; - llvm_asm!("sldt $0" : "=r"(selector)); + asm!("sldt {0:x}", out(reg) selector, options(att_syntax)); SegmentSelector::from_raw(selector) } @@ -104,7 +105,7 @@ pub unsafe fn ldtr() -> SegmentSelector { /// # Safety /// Needs CPL 0. pub unsafe fn lidt<T>(idt: &DescriptorTablePointer<T>) { - llvm_asm!("lidt ($0)" :: "r" (idt) : "memory"); + asm!("lidt ({0})", in(reg) idt, options(att_syntax)); } /// Retrieve base and limit from the IDTR register. @@ -112,7 +113,7 @@ pub unsafe fn lidt<T>(idt: &DescriptorTablePointer<T>) { /// # Safety /// Needs CPL 0. pub unsafe fn sidt<T>(idt: &mut DescriptorTablePointer<T>) { - llvm_asm!("sidt ($0)" : "+r" (idt as *mut DescriptorTablePointer<T>) :: "memory"); + asm!("sidt ({0})", in(reg) idt as *mut DescriptorTablePointer<T>, options(att_syntax)); } #[cfg(all(test, feature = "utest"))] diff --git a/src/fence.rs b/src/fence.rs index a61aedc..86015fc 100644 --- a/src/fence.rs +++ b/src/fence.rs @@ -1,11 +1,13 @@ //! Intel fence instructions +use core::arch::asm; + /// mfence -- Memory Fence /// /// Performs a serializing operation on all load-from-memory and store-to-memory /// instructions that were issued prior the MFENCE instruction. pub fn mfence() { - unsafe { llvm_asm!("mfence" ::: "memory") }; + unsafe { asm!("mfence") }; } /// sfence -- Store Fence @@ -14,7 +16,7 @@ pub fn mfence() { /// instruction. The processor ensures that every store prior to SFENCE is /// globally visible before any store after SFENCE becomes globally visible. pub fn sfence() { - unsafe { llvm_asm!("sfence" ::: "memory") }; + unsafe { asm!("sfence") }; } /// lfence -- Load Fence @@ -24,5 +26,5 @@ pub fn sfence() { /// execute until all prior instructions have completed locally, and no later /// instruction begins execution until LFENCE completes. pub fn lfence() { - unsafe { llvm_asm!("lfence" ::: "memory") }; + unsafe { asm!("lfence") }; } @@ -1,12 +1,14 @@ //! I/O port functionality. +use core::arch::asm; + /// Write 8 bits to port /// /// # Safety /// Needs IO privileges. #[inline] pub unsafe fn outb(port: u16, val: u8) { - llvm_asm!("outb %al, %dx" :: "{dx}"(port), "{al}"(val)); + asm!("outb %al, %dx", in("al") val, in("dx") port, options(att_syntax)); } /// Read 8 bits from port @@ -16,7 +18,7 @@ pub unsafe fn outb(port: u16, val: u8) { #[inline] pub unsafe fn inb(port: u16) -> u8 { let ret: u8; - llvm_asm!("inb %dx, %al" : "={ax}"(ret) : "{dx}"(port) :: "volatile"); + asm!("inb %dx, %al", in("dx") port, out("al") ret, options(att_syntax)); ret } @@ -26,7 +28,7 @@ pub unsafe fn inb(port: u16) -> u8 { /// Needs IO privileges. #[inline] pub unsafe fn outw(port: u16, val: u16) { - llvm_asm!("outw %ax, %dx" :: "{dx}"(port), "{al}"(val)); + asm!("outw %ax, %dx", in("ax") val, in("dx") port, options(att_syntax)); } /// Read 16 bits from port @@ -36,7 +38,7 @@ pub unsafe fn outw(port: u16, val: u16) { #[inline] pub unsafe fn inw(port: u16) -> u16 { let ret: u16; - llvm_asm!("inw %dx, %ax" : "={ax}"(ret) : "{dx}"(port) :: "volatile"); + asm!("inw %dx, %ax", in("dx") port, out("ax") ret, options(att_syntax)); ret } @@ -46,7 +48,7 @@ pub unsafe fn inw(port: u16) -> u16 { /// Needs IO privileges. #[inline] pub unsafe fn outl(port: u16, val: u32) { - llvm_asm!("outl %eax, %dx" :: "{dx}"(port), "{al}"(val)); + asm!("outl %eax, %dx", in("eax") val, in("dx") port, options(att_syntax)); } /// Read 32 bits from port @@ -56,7 +58,7 @@ pub unsafe fn outl(port: u16, val: u32) { #[inline] pub unsafe fn inl(port: u16) -> u32 { let ret: u32; - llvm_asm!("inl %dx, %eax" : "={ax}"(ret) : "{dx}"(port) :: "volatile"); + asm!("inl %dx, %eax", out("eax") ret, in("dx") port, options(att_syntax)); ret } @@ -1,6 +1,6 @@ #![cfg(any(target_arch = "x86", target_arch = "x86_64"))] #![allow(stable_features)] -#![feature(asm, llvm_asm, core_intrinsics)] +#![feature(asm, core_intrinsics)] #![no_std] #![cfg_attr(test, allow(unused_features))] #![cfg_attr(all(test, feature = "vmtest"), feature(custom_test_frameworks))] @@ -1,5 +1,7 @@ //! MSR value list and function to read and write them. +use core::arch::asm; + /// Write 64 bits to msr register. /// /// # Safety @@ -7,7 +9,7 @@ pub unsafe fn wrmsr(msr: u32, value: u64) { let low = value as u32; let high = (value >> 32) as u32; - llvm_asm!("wrmsr" :: "{ecx}" (msr), "{eax}" (low), "{edx}" (high) : "memory" : "volatile" ); + asm!("wrmsr", in("ecx") msr, in("eax") low, in("edx") high); } /// Read 64 bits msr register. @@ -17,7 +19,7 @@ pub unsafe fn wrmsr(msr: u32, value: u64) { #[allow(unused_mut)] pub unsafe fn rdmsr(msr: u32) -> u64 { let (high, low): (u32, u32); - llvm_asm!("rdmsr" : "={eax}" (low), "={edx}" (high) : "{ecx}" (msr) : "memory" : "volatile"); + asm!("rdmsr", out("eax") low, out("edx") high, in("ecx") msr); ((high as u64) << 32) | (low as u64) } diff --git a/src/segmentation.rs b/src/segmentation.rs index 3ff748a..d11a44d 100644 --- a/src/segmentation.rs +++ b/src/segmentation.rs @@ -2,6 +2,7 @@ //! descriptors and selectors. use bitflags::*; +use core::arch::asm; use core::fmt; use crate::Ring; @@ -561,7 +562,7 @@ impl Descriptor { /// # Safety /// Needs CPL 0. pub unsafe fn load_ss(sel: SegmentSelector) { - llvm_asm!("movw $0, %ss " :: "r" (sel.bits()) : "memory"); + asm!("movw {0:x}, %ss", in(reg) sel.bits(), options(att_syntax)); } /// Reload data segment register. @@ -569,7 +570,7 @@ pub unsafe fn load_ss(sel: SegmentSelector) { /// # Safety /// Needs CPL 0. pub unsafe fn load_ds(sel: SegmentSelector) { - llvm_asm!("movw $0, %ds " :: "r" (sel.bits()) : "memory"); + asm!("movw {0:x}, %ds", in(reg) sel.bits(), options(att_syntax)); } /// Reload es segment register. @@ -577,7 +578,7 @@ pub unsafe fn load_ds(sel: SegmentSelector) { /// # Safety /// Needs CPL 0. pub unsafe fn load_es(sel: SegmentSelector) { - llvm_asm!("movw $0, %es " :: "r" (sel.bits()) : "memory"); + asm!("movw {0:x}, %es", in(reg) sel.bits(), options(att_syntax)); } /// Reload fs segment register. @@ -585,7 +586,7 @@ pub unsafe fn load_es(sel: SegmentSelector) { /// # Safety /// Needs CPL 0. pub unsafe fn load_fs(sel: SegmentSelector) { - llvm_asm!("movw $0, %fs " :: "r" (sel.bits()) : "memory"); + asm!("movw {0:x}, %fs", in(reg) sel.bits(), options(att_syntax)); } /// Reload gs segment register. @@ -593,7 +594,7 @@ pub unsafe fn load_fs(sel: SegmentSelector) { /// # Safety /// Needs CPL 0. pub unsafe fn load_gs(sel: SegmentSelector) { - llvm_asm!("movw $0, %gs " :: "r" (sel.bits()) : "memory"); + asm!("movw {0:x}, %gs", in(reg) sel.bits(), options(att_syntax)); } pub use crate::current::segmentation::load_cs; @@ -601,42 +602,42 @@ pub use crate::current::segmentation::load_cs; /// Returns the current value of the code segment register. pub fn cs() -> SegmentSelector { let segment: u16; - unsafe { llvm_asm!("mov %cs, $0" : "=r" (segment) ) }; + unsafe { asm!("mov %cs, {0:x}", out(reg) segment, options(att_syntax)) }; SegmentSelector::from_raw(segment) } /// Returns the current value of the extra segment register. pub fn es() -> SegmentSelector { let segment: u16; - unsafe { llvm_asm!("mov %es, $0" : "=r" (segment) ) }; + unsafe { asm!("mov %es, {0:x}", out(reg) segment, options(att_syntax)) }; SegmentSelector::from_raw(segment) } /// Returns the current value of the stack segment register. pub fn ss() -> SegmentSelector { let segment: u16; - unsafe { llvm_asm!("mov %ss, $0" : "=r" (segment) ) }; + unsafe { asm!("mov %ss, {0:x}", out(reg) segment, options(att_syntax)) }; SegmentSelector::from_raw(segment) } /// Returns the current value of the data segment register. pub fn ds() -> SegmentSelector { let segment: u16; - unsafe { llvm_asm!("mov %ds, $0" : "=r" (segment) ) }; + unsafe { asm!("mov %ds, {0:x}", out(reg) segment, options(att_syntax)) }; SegmentSelector::from_raw(segment) } /// Returns the current value of the FS segment register. pub fn fs() -> SegmentSelector { let segment: u16; - unsafe { llvm_asm!("mov %fs, $0" : "=r" (segment) ) }; + unsafe { asm!("mov %fs, {0:x}", out(reg) segment, options(att_syntax)) }; SegmentSelector::from_raw(segment) } /// Returns the current value of the GS segment register. pub fn gs() -> SegmentSelector { let segment: u16; - unsafe { llvm_asm!("mov %gs, $0" : "=r" (segment) ) }; + unsafe { asm!("mov %gs, {0:x}", out(reg) segment, options(att_syntax)) }; SegmentSelector::from_raw(segment) } diff --git a/x86test/Cargo.toml b/x86test/Cargo.toml index 739c3fe..96cbc09 100644 --- a/x86test/Cargo.toml +++ b/x86test/Cargo.toml @@ -19,7 +19,7 @@ Custom test runner for bare-metal x86 tests. x86test-macro = { path = "x86test_macro" } x86test-types = { path = "x86test_types" } kvm-sys = "0.3.0" -x86 = "0.44.0" +x86 = { path = ".." } mmap = "0.1.1" log = "0.4" klogger = { version = "0.0.8", features = ["use_ioports"] } diff --git a/x86test/README.md b/x86test/README.md index e543cee..f18223b 100644 --- a/x86test/README.md +++ b/x86test/README.md @@ -30,7 +30,7 @@ For example say we have a function like this: #[inline] pub unsafe fn inw(port: u16) -> u16 { let ret: u16; - llvm_asm!("inw %dx, %ax" : "={ax}"(ret) : "{dx}"(port) :: "volatile"); + asm!("inw %dx, %ax", in("dx") port, out("ax") ret, options(att_syntax)); ret } ``` @@ -89,4 +89,4 @@ Should be done in the following order: * Release new version of `x86test-types` * Release new version of `x86test-macro` (adjust version dependency of x86test-types) * Release new version of `x86test` (adjust version dependency of x86test-types and x86test-macro) -* Tag with `git tag x86test-0.0.x`
\ No newline at end of file +* Tag with `git tag x86test-0.0.x` |