aboutsummaryrefslogtreecommitdiff
path: root/src/asm.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/asm.rs')
-rw-r--r--src/asm.rs139
1 files changed, 112 insertions, 27 deletions
diff --git a/src/asm.rs b/src/asm.rs
index 4dc1ab0..0434b5f 100644
--- a/src/asm.rs
+++ b/src/asm.rs
@@ -1,18 +1,17 @@
//! Miscellaneous assembly instructions
-// When inline assembly is enabled, pull in the assembly routines here. `call_asm!` will invoke
-// these routines.
-#[cfg(feature = "inline-asm")]
-#[path = "../asm/inline.rs"]
-pub(crate) mod inline;
+#[cfg(cortex_m)]
+use core::arch::asm;
+use core::sync::atomic::{compiler_fence, Ordering};
/// Puts the processor in Debug state. Debuggers can pick this up as a "breakpoint".
///
/// **NOTE** calling `bkpt` when the processor is not connected to a debugger will cause an
/// exception.
+#[cfg(cortex_m)]
#[inline(always)]
pub fn bkpt() {
- call_asm!(__bkpt());
+ unsafe { asm!("bkpt", options(nomem, nostack, preserves_flags)) };
}
/// Blocks the program for *at least* `cycles` CPU cycles.
@@ -24,41 +23,66 @@ pub fn bkpt() {
/// and the execution time may vary with other factors. This delay is mainly useful for simple
/// timer-less initialization of peripherals if and only if accurate timing is not essential. In
/// any other case please use a more accurate method to produce a delay.
+#[cfg(cortex_m)]
#[inline]
pub fn delay(cycles: u32) {
- call_asm!(__delay(cycles: u32));
+ // The loop will normally take 3 to 4 CPU cycles per iteration, but superscalar cores
+ // (eg. Cortex-M7) can potentially do it in 2, so we use that as the lower bound, since delaying
+ // for more cycles is okay.
+ // Add 1 to prevent an integer underflow which would cause a long freeze
+ let real_cycles = 1 + cycles / 2;
+ unsafe {
+ asm!(
+ // Use local labels to avoid R_ARM_THM_JUMP8 relocations which fail on thumbv6m.
+ "1:",
+ "subs {}, #1",
+ "bne 1b",
+ inout(reg) real_cycles => _,
+ options(nomem, nostack),
+ )
+ };
}
/// A no-operation. Useful to prevent delay loops from being optimized away.
#[inline]
pub fn nop() {
- call_asm!(__nop());
+ // NOTE: This is a `pure` asm block, but applying that option allows the compiler to eliminate
+ // the nop entirely (or to collapse multiple subsequent ones). Since the user probably wants N
+ // nops when they call `nop` N times, let's not add that option.
+ #[cfg(cortex_m)]
+ unsafe {
+ asm!("nop", options(nomem, nostack, preserves_flags))
+ };
}
/// Generate an Undefined Instruction exception.
///
/// Can be used as a stable alternative to `core::intrinsics::abort`.
+#[cfg(cortex_m)]
#[inline]
pub fn udf() -> ! {
- call_asm!(__udf() -> !)
+ unsafe { asm!("udf #0", options(noreturn, nomem, nostack, preserves_flags)) };
}
/// Wait For Event
+#[cfg(cortex_m)]
#[inline]
pub fn wfe() {
- call_asm!(__wfe())
+ unsafe { asm!("wfe", options(nomem, nostack, preserves_flags)) };
}
/// Wait For Interrupt
+#[cfg(cortex_m)]
#[inline]
pub fn wfi() {
- call_asm!(__wfi())
+ unsafe { asm!("wfi", options(nomem, nostack, preserves_flags)) };
}
/// Send Event
+#[cfg(cortex_m)]
#[inline]
pub fn sev() {
- call_asm!(__sev())
+ unsafe { asm!("sev", options(nomem, nostack, preserves_flags)) };
}
/// Instruction Synchronization Barrier
@@ -67,7 +91,12 @@ pub fn sev() {
/// from cache or memory, after the instruction has been completed.
#[inline]
pub fn isb() {
- call_asm!(__isb())
+ compiler_fence(Ordering::SeqCst);
+ #[cfg(cortex_m)]
+ unsafe {
+ asm!("isb", options(nomem, nostack, preserves_flags))
+ };
+ compiler_fence(Ordering::SeqCst);
}
/// Data Synchronization Barrier
@@ -79,7 +108,12 @@ pub fn isb() {
/// * all cache and branch predictor maintenance operations before this instruction complete
#[inline]
pub fn dsb() {
- call_asm!(__dsb())
+ compiler_fence(Ordering::SeqCst);
+ #[cfg(cortex_m)]
+ unsafe {
+ asm!("dsb", options(nomem, nostack, preserves_flags))
+ };
+ compiler_fence(Ordering::SeqCst);
}
/// Data Memory Barrier
@@ -89,7 +123,12 @@ pub fn dsb() {
/// after the `DMB` instruction.
#[inline]
pub fn dmb() {
- call_asm!(__dmb())
+ compiler_fence(Ordering::SeqCst);
+ #[cfg(cortex_m)]
+ unsafe {
+ asm!("dmb", options(nomem, nostack, preserves_flags))
+ };
+ compiler_fence(Ordering::SeqCst);
}
/// Test Target
@@ -102,8 +141,15 @@ pub fn dmb() {
// The __tt function does not dereference the pointer received.
#[allow(clippy::not_unsafe_ptr_arg_deref)]
pub fn tt(addr: *mut u32) -> u32 {
- let addr = addr as u32;
- call_asm!(__tt(addr: u32) -> u32)
+ let mut target = addr as u32;
+ unsafe {
+ asm!(
+ "tt {target}, {target}",
+ target = inout(reg) target,
+ options(nomem, nostack, preserves_flags),
+ )
+ };
+ target
}
/// Test Target Unprivileged
@@ -117,8 +163,15 @@ pub fn tt(addr: *mut u32) -> u32 {
// The __ttt function does not dereference the pointer received.
#[allow(clippy::not_unsafe_ptr_arg_deref)]
pub fn ttt(addr: *mut u32) -> u32 {
- let addr = addr as u32;
- call_asm!(__ttt(addr: u32) -> u32)
+ let mut target = addr as u32;
+ unsafe {
+ asm!(
+ "ttt {target}, {target}",
+ target = inout(reg) target,
+ options(nomem, nostack, preserves_flags),
+ )
+ };
+ target
}
/// Test Target Alternate Domain
@@ -133,8 +186,15 @@ pub fn ttt(addr: *mut u32) -> u32 {
// The __tta function does not dereference the pointer received.
#[allow(clippy::not_unsafe_ptr_arg_deref)]
pub fn tta(addr: *mut u32) -> u32 {
- let addr = addr as u32;
- call_asm!(__tta(addr: u32) -> u32)
+ let mut target = addr as u32;
+ unsafe {
+ asm!(
+ "tta {target}, {target}",
+ target = inout(reg) target,
+ options(nomem, nostack, preserves_flags),
+ )
+ };
+ target
}
/// Test Target Alternate Domain Unprivileged
@@ -149,8 +209,15 @@ pub fn tta(addr: *mut u32) -> u32 {
// The __ttat function does not dereference the pointer received.
#[allow(clippy::not_unsafe_ptr_arg_deref)]
pub fn ttat(addr: *mut u32) -> u32 {
- let addr = addr as u32;
- call_asm!(__ttat(addr: u32) -> u32)
+ let mut target = addr as u32;
+ unsafe {
+ asm!(
+ "ttat {target}, {target}",
+ target = inout(reg) target,
+ options(nomem, nostack, preserves_flags),
+ )
+ };
+ target
}
/// Branch and Exchange Non-secure
@@ -160,15 +227,17 @@ pub fn ttat(addr: *mut u32) -> u32 {
#[inline]
#[cfg(armv8m)]
pub unsafe fn bx_ns(addr: u32) {
- call_asm!(__bxns(addr: u32));
+ asm!("bxns {}", in(reg) addr, options(nomem, nostack, preserves_flags));
}
/// Semihosting syscall.
///
/// This method is used by cortex-m-semihosting to provide semihosting syscalls.
+#[cfg(cortex_m)]
#[inline]
-pub unsafe fn semihosting_syscall(nr: u32, arg: u32) -> u32 {
- call_asm!(__sh_syscall(nr: u32, arg: u32) -> u32)
+pub unsafe fn semihosting_syscall(mut nr: u32, arg: u32) -> u32 {
+ asm!("bkpt #0xab", inout("r0") nr, in("r1") arg, options(nomem, nostack, preserves_flags));
+ nr
}
/// Bootstrap.
@@ -181,12 +250,27 @@ pub unsafe fn semihosting_syscall(nr: u32, arg: u32) -> u32 {
///
/// `msp` and `rv` must point to valid stack memory and executable code,
/// respectively.
+#[cfg(cortex_m)]
#[inline]
pub unsafe fn bootstrap(msp: *const u32, rv: *const u32) -> ! {
// Ensure thumb mode is set.
let rv = (rv as u32) | 1;
let msp = msp as u32;
- call_asm!(__bootstrap(msp: u32, rv: u32) -> !);
+ asm!(
+ "mrs {tmp}, CONTROL",
+ "bics {tmp}, {spsel}",
+ "msr CONTROL, {tmp}",
+ "isb",
+ "msr MSP, {msp}",
+ "bx {rv}",
+ // `out(reg) _` is not permitted in a `noreturn` asm! call,
+ // so instead use `in(reg) 0` and don't restore it afterwards.
+ tmp = in(reg) 0,
+ spsel = in(reg) 2,
+ msp = in(reg) msp,
+ rv = in(reg) rv,
+ options(noreturn, nomem, nostack),
+ );
}
/// Bootload.
@@ -201,6 +285,7 @@ pub unsafe fn bootstrap(msp: *const u32, rv: *const u32) -> ! {
/// The provided `vector_table` must point to a valid vector
/// table, with a valid stack pointer as the first word and
/// a valid reset vector as the second word.
+#[cfg(cortex_m)]
#[inline]
pub unsafe fn bootload(vector_table: *const u32) -> ! {
let msp = core::ptr::read_volatile(vector_table);