aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/asm.rs209
-rw-r--r--src/call_asm.rs24
-rw-r--r--src/cmse.rs238
-rw-r--r--src/delay.rs136
-rw-r--r--src/interrupt.rs64
-rw-r--r--src/itm.rs158
-rw-r--r--src/lib.rs101
-rw-r--r--src/macros.rs114
-rw-r--r--src/peripheral/ac.rs93
-rw-r--r--src/peripheral/cbp.rs138
-rw-r--r--src/peripheral/cpuid.rs140
-rw-r--r--src/peripheral/dcb.rs81
-rw-r--r--src/peripheral/dwt.rs495
-rw-r--r--src/peripheral/fpb.rs21
-rw-r--r--src/peripheral/fpu.rs19
-rw-r--r--src/peripheral/icb.rs32
-rw-r--r--src/peripheral/itm.rs215
-rw-r--r--src/peripheral/mod.rs594
-rw-r--r--src/peripheral/mpu.rs65
-rw-r--r--src/peripheral/nvic.rs265
-rw-r--r--src/peripheral/sau.rs243
-rw-r--r--src/peripheral/scb.rs1110
-rw-r--r--src/peripheral/syst.rs185
-rw-r--r--src/peripheral/test.rs170
-rw-r--r--src/peripheral/tpiu.rs161
-rw-r--r--src/prelude.rs3
-rw-r--r--src/register/apsr.rs54
-rw-r--r--src/register/basepri.rs24
-rw-r--r--src/register/basepri_max.rs21
-rw-r--r--src/register/control.rs164
-rw-r--r--src/register/faultmask.rs35
-rw-r--r--src/register/fpscr.rs305
-rw-r--r--src/register/lr.rs17
-rw-r--r--src/register/mod.rs68
-rw-r--r--src/register/msp.rs32
-rw-r--r--src/register/msplim.rs13
-rw-r--r--src/register/pc.rs17
-rw-r--r--src/register/primask.rs35
-rw-r--r--src/register/psp.rs13
-rw-r--r--src/register/psplim.rs13
40 files changed, 5885 insertions, 0 deletions
diff --git a/src/asm.rs b/src/asm.rs
new file mode 100644
index 0000000..4dc1ab0
--- /dev/null
+++ b/src/asm.rs
@@ -0,0 +1,209 @@
+//! Miscellaneous assembly instructions
+
+// When inline assembly is enabled, pull in the assembly routines here. `call_asm!` will invoke
+// these routines.
+#[cfg(feature = "inline-asm")]
+#[path = "../asm/inline.rs"]
+pub(crate) mod inline;
+
+/// Puts the processor in Debug state. Debuggers can pick this up as a "breakpoint".
+///
+/// **NOTE** calling `bkpt` when the processor is not connected to a debugger will cause an
+/// exception.
+#[inline(always)]
+pub fn bkpt() {
+ call_asm!(__bkpt());
+}
+
+/// Blocks the program for *at least* `cycles` CPU cycles.
+///
+/// This is implemented in assembly so its execution time is independent of the optimization
+/// level, however it is dependent on the specific architecture and core configuration.
+///
+/// NOTE that the delay can take much longer if interrupts are serviced during its execution
+/// and the execution time may vary with other factors. This delay is mainly useful for simple
+/// timer-less initialization of peripherals if and only if accurate timing is not essential. In
+/// any other case please use a more accurate method to produce a delay.
+#[inline]
+pub fn delay(cycles: u32) {
+ call_asm!(__delay(cycles: u32));
+}
+
+/// A no-operation. Useful to prevent delay loops from being optimized away.
+#[inline]
+pub fn nop() {
+ call_asm!(__nop());
+}
+
+/// Generate an Undefined Instruction exception.
+///
+/// Can be used as a stable alternative to `core::intrinsics::abort`.
+#[inline]
+pub fn udf() -> ! {
+ call_asm!(__udf() -> !)
+}
+
+/// Wait For Event
+#[inline]
+pub fn wfe() {
+ call_asm!(__wfe())
+}
+
+/// Wait For Interrupt
+#[inline]
+pub fn wfi() {
+ call_asm!(__wfi())
+}
+
+/// Send Event
+#[inline]
+pub fn sev() {
+ call_asm!(__sev())
+}
+
+/// Instruction Synchronization Barrier
+///
+/// Flushes the pipeline in the processor, so that all instructions following the `ISB` are fetched
+/// from cache or memory, after the instruction has been completed.
+#[inline]
+pub fn isb() {
+ call_asm!(__isb())
+}
+
+/// Data Synchronization Barrier
+///
+/// Acts as a special kind of memory barrier. No instruction in program order after this instruction
+/// can execute until this instruction completes. This instruction completes only when both:
+///
+/// * any explicit memory access made before this instruction is complete
+/// * all cache and branch predictor maintenance operations before this instruction complete
+#[inline]
+pub fn dsb() {
+ call_asm!(__dsb())
+}
+
+/// Data Memory Barrier
+///
+/// Ensures that all explicit memory accesses that appear in program order before the `DMB`
+/// instruction are observed before any explicit memory accesses that appear in program order
+/// after the `DMB` instruction.
+#[inline]
+pub fn dmb() {
+ call_asm!(__dmb())
+}
+
+/// Test Target
+///
+/// Queries the Security state and access permissions of a memory location.
+/// Returns a Test Target Response Payload (cf section D1.2.215 of
+/// Armv8-M Architecture Reference Manual).
+#[inline]
+#[cfg(armv8m)]
+// The __tt function does not dereference the pointer received.
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn tt(addr: *mut u32) -> u32 {
+ let addr = addr as u32;
+ call_asm!(__tt(addr: u32) -> u32)
+}
+
+/// Test Target Unprivileged
+///
+/// Queries the Security state and access permissions of a memory location for an unprivileged
+/// access to that location.
+/// Returns a Test Target Response Payload (cf section D1.2.215 of
+/// Armv8-M Architecture Reference Manual).
+#[inline]
+#[cfg(armv8m)]
+// The __ttt function does not dereference the pointer received.
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn ttt(addr: *mut u32) -> u32 {
+ let addr = addr as u32;
+ call_asm!(__ttt(addr: u32) -> u32)
+}
+
+/// Test Target Alternate Domain
+///
+/// Queries the Security state and access permissions of a memory location for a Non-Secure access
+/// to that location. This instruction is only valid when executing in Secure state and is
+/// undefined if used from Non-Secure state.
+/// Returns a Test Target Response Payload (cf section D1.2.215 of
+/// Armv8-M Architecture Reference Manual).
+#[inline]
+#[cfg(armv8m)]
+// The __tta function does not dereference the pointer received.
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn tta(addr: *mut u32) -> u32 {
+ let addr = addr as u32;
+ call_asm!(__tta(addr: u32) -> u32)
+}
+
+/// Test Target Alternate Domain Unprivileged
+///
+/// Queries the Security state and access permissions of a memory location for a Non-Secure and
+/// unprivileged access to that location. This instruction is only valid when executing in Secure
+/// state and is undefined if used from Non-Secure state.
+/// Returns a Test Target Response Payload (cf section D1.2.215 of
+/// Armv8-M Architecture Reference Manual).
+#[inline]
+#[cfg(armv8m)]
+// The __ttat function does not dereference the pointer received.
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn ttat(addr: *mut u32) -> u32 {
+ let addr = addr as u32;
+ call_asm!(__ttat(addr: u32) -> u32)
+}
+
+/// Branch and Exchange Non-secure
+///
+/// See section C2.4.26 of Armv8-M Architecture Reference Manual for details.
+/// Undefined if executed in Non-Secure state.
+#[inline]
+#[cfg(armv8m)]
+pub unsafe fn bx_ns(addr: u32) {
+ call_asm!(__bxns(addr: u32));
+}
+
+/// Semihosting syscall.
+///
+/// This method is used by cortex-m-semihosting to provide semihosting syscalls.
+#[inline]
+pub unsafe fn semihosting_syscall(nr: u32, arg: u32) -> u32 {
+ call_asm!(__sh_syscall(nr: u32, arg: u32) -> u32)
+}
+
+/// Bootstrap.
+///
+/// Clears CONTROL.SPSEL (setting the main stack to be the active stack),
+/// updates the main stack pointer to the address in `msp`, then jumps
+/// to the address in `rv`.
+///
+/// # Safety
+///
+/// `msp` and `rv` must point to valid stack memory and executable code,
+/// respectively.
+#[inline]
+pub unsafe fn bootstrap(msp: *const u32, rv: *const u32) -> ! {
+ // Ensure thumb mode is set.
+ let rv = (rv as u32) | 1;
+ let msp = msp as u32;
+ call_asm!(__bootstrap(msp: u32, rv: u32) -> !);
+}
+
+/// Bootload.
+///
+/// Reads the initial stack pointer value and reset vector from
+/// the provided vector table address, sets the active stack to
+/// the main stack, sets the main stack pointer to the new initial
+/// stack pointer, then jumps to the reset vector.
+///
+/// # Safety
+///
+/// The provided `vector_table` must point to a valid vector
+/// table, with a valid stack pointer as the first word and
+/// a valid reset vector as the second word.
+#[inline]
+pub unsafe fn bootload(vector_table: *const u32) -> ! {
+ let msp = core::ptr::read_volatile(vector_table);
+ let rv = core::ptr::read_volatile(vector_table.offset(1));
+ bootstrap(msp as *const u32, rv as *const u32);
+}
diff --git a/src/call_asm.rs b/src/call_asm.rs
new file mode 100644
index 0000000..295277f
--- /dev/null
+++ b/src/call_asm.rs
@@ -0,0 +1,24 @@
+/// An internal macro to invoke an assembly routine.
+///
+/// Depending on whether the unstable `inline-asm` feature is enabled, this will either call into
+/// the inline assembly implementation directly, or through the FFI shim (see `asm/lib.rs`).
+macro_rules! call_asm {
+ ( $func:ident ( $($args:ident: $tys:ty),* ) $(-> $ret:ty)? ) => {{
+ #[allow(unused_unsafe)]
+ unsafe {
+ match () {
+ #[cfg(feature = "inline-asm")]
+ () => crate::asm::inline::$func($($args),*),
+
+ #[cfg(not(feature = "inline-asm"))]
+ () => {
+ extern "C" {
+ fn $func($($args: $tys),*) $(-> $ret)?;
+ }
+
+ $func($($args),*)
+ },
+ }
+ }
+ }};
+}
diff --git a/src/cmse.rs b/src/cmse.rs
new file mode 100644
index 0000000..36d7447
--- /dev/null
+++ b/src/cmse.rs
@@ -0,0 +1,238 @@
+//! Cortex-M Security Extensions
+//!
+//! This module provides several helper functions to support Armv8-M and Armv8.1-M Security
+//! Extensions.
+//! Most of this implementation is directly inspired by the "Armv8-M Security Extensions:
+//! Requirements on Development Tools" document available here:
+//! https://developer.arm.com/docs/ecm0359818/latest
+//!
+//! Please note that the TT instructions support as described part 4 of the document linked above is
+//! not part of CMSE but is still present in this module. The TT instructions return the
+//! configuration of the Memory Protection Unit at an address.
+//!
+//! # Notes
+//!
+//! * Non-Secure Unprivileged code will always read zeroes from TestTarget and should not use it.
+//! * Non-Secure Privileged code can check current (AccessType::Current) and Non-Secure Unprivileged
+//! accesses (AccessType::Unprivileged).
+//! * Secure Unprivileged code can check Non-Secure Unprivileged accesses (AccessType::NonSecure).
+//! * Secure Privileged code can check all access types.
+//!
+//! # Example
+//!
+//! ```
+//! use cortex_m::cmse::{TestTarget, AccessType};
+//!
+//! // suspect_address was given by Non-Secure to a Secure function to write at it.
+//! // But is it allowed to?
+//! let suspect_address_test = TestTarget::check(0xDEADBEEF as *mut u32,
+//! AccessType::NonSecureUnprivileged);
+//! if suspect_address_test.ns_read_and_writable() {
+//! // Non-Secure can not read or write this address!
+//! }
+//! ```
+
+use crate::asm::{tt, tta, ttat, ttt};
+use bitfield::bitfield;
+
+/// Memory access behaviour: determine which privilege execution mode is used and which Memory
+/// Protection Unit (MPU) is used.
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub enum AccessType {
+ /// Access using current privilege level and reading from current security state MPU.
+ /// Uses the TT instruction.
+ Current,
+ /// Unprivileged access reading from current security state MPU. Uses the TTT instruction.
+ Unprivileged,
+ /// Access using current privilege level reading from Non-Secure MPU. Uses the TTA instruction.
+ /// Undefined if used from Non-Secure state.
+ NonSecure,
+ /// Unprivilege access reading from Non-Secure MPU. Uses the TTAT instruction.
+ /// Undefined if used from Non-Secure state.
+ NonSecureUnprivileged,
+}
+
+/// Abstraction of TT instructions and helper functions to determine the security and privilege
+/// attribute of a target address, accessed in different ways.
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub struct TestTarget {
+ tt_resp: TtResp,
+ access_type: AccessType,
+}
+
+bitfield! {
+ /// Test Target Response Payload
+ ///
+ /// Provides the response payload from a TT, TTA, TTT or TTAT instruction.
+ #[derive(PartialEq, Copy, Clone)]
+ struct TtResp(u32);
+ impl Debug;
+ mregion, _: 7, 0;
+ sregion, _: 15, 8;
+ mrvalid, _: 16;
+ srvalid, _: 17;
+ r, _: 18;
+ rw, _: 19;
+ nsr, _: 20;
+ nsrw, _: 21;
+ s, _: 22;
+ irvalid, _: 23;
+ iregion, _: 31, 24;
+}
+
+impl TestTarget {
+ /// Creates a Test Target Response Payload by testing addr using access_type.
+ #[inline]
+ pub fn check(addr: *mut u32, access_type: AccessType) -> Self {
+ let tt_resp = match access_type {
+ AccessType::Current => TtResp(tt(addr)),
+ AccessType::Unprivileged => TtResp(ttt(addr)),
+ AccessType::NonSecure => TtResp(tta(addr)),
+ AccessType::NonSecureUnprivileged => TtResp(ttat(addr)),
+ };
+
+ TestTarget {
+ tt_resp,
+ access_type,
+ }
+ }
+
+ /// Creates a Test Target Response Payload by testing the zone from addr to addr + size - 1
+ /// using access_type.
+ /// Returns None if:
+ /// * the address zone overlaps SAU, IDAU or MPU region boundaries
+ /// * size is 0
+ /// * addr + size - 1 overflows
+ #[inline]
+ pub fn check_range(addr: *mut u32, size: usize, access_type: AccessType) -> Option<Self> {
+ let begin: usize = addr as usize;
+ // Last address of the range (addr + size - 1). This also checks if size is 0.
+ let end: usize = begin.checked_add(size.checked_sub(1)?)?;
+
+ // Regions are aligned at 32-byte boundaries. If the address range fits in one 32-byte
+ // address line, a single TT instruction suffices. This is the case when the following
+ // constraint holds.
+ let single_check: bool = (begin % 32).checked_add(size)? <= 32usize;
+
+ let test_start = TestTarget::check(addr, access_type);
+
+ if single_check {
+ Some(test_start)
+ } else {
+ let test_end = TestTarget::check(end as *mut u32, access_type);
+ // Check that the range does not cross SAU, IDAU or MPU region boundaries.
+ if test_start != test_end {
+ None
+ } else {
+ Some(test_start)
+ }
+ }
+ }
+
+ /// Access type that was used for this test target.
+ #[inline]
+ pub fn access_type(self) -> AccessType {
+ self.access_type
+ }
+
+ /// Get the raw u32 value returned by the TT instruction used.
+ #[inline]
+ pub fn as_u32(self) -> u32 {
+ self.tt_resp.0
+ }
+
+ /// Read accessibility of the target address. Only returns the MPU settings without checking
+ /// the Security state of the target.
+ /// For Unprivileged and NonSecureUnprivileged access types, returns the permissions for
+ /// unprivileged access, regardless of whether the current mode is privileged or unprivileged.
+ /// Returns false if the TT instruction was executed from an unprivileged mode
+ /// and the NonSecure access type was not specified.
+ /// Returns false if the address matches multiple MPU regions.
+ #[inline]
+ pub fn readable(self) -> bool {
+ self.tt_resp.r()
+ }
+
+ /// Read and write accessibility of the target address. Only returns the MPU settings without
+ /// checking the Security state of the target.
+ /// For Unprivileged and NonSecureUnprivileged access types, returns the permissions for
+ /// unprivileged access, regardless of whether the current mode is privileged or unprivileged.
+ /// Returns false if the TT instruction was executed from an unprivileged mode
+ /// and the NonSecure access type was not specified.
+ /// Returns false if the address matches multiple MPU regions.
+ #[inline]
+ pub fn read_and_writable(self) -> bool {
+ self.tt_resp.rw()
+ }
+
+ /// Indicate the MPU region number containing the target address.
+ /// Returns None if the value is not valid:
+ /// * the MPU is not implemented or MPU_CTRL.ENABLE is set to zero
+ /// * the register argument specified by the MREGION field does not match any enabled MPU regions
+ /// * the address matched multiple MPU regions
+ /// * the address specified by the SREGION field is exempt from the secure memory attribution
+ /// * the TT instruction was executed from an unprivileged mode and the A flag was not specified.
+ #[inline]
+ pub fn mpu_region(self) -> Option<u8> {
+ if self.tt_resp.srvalid() {
+ // Cast is safe as SREGION field is defined on 8 bits.
+ Some(self.tt_resp.sregion() as u8)
+ } else {
+ None
+ }
+ }
+
+ /// Indicates the Security attribute of the target address. Independent of AccessType.
+ /// Always zero when the test target is done in the Non-Secure state.
+ #[inline]
+ pub fn secure(self) -> bool {
+ self.tt_resp.s()
+ }
+
+ /// Non-Secure Read accessibility of the target address.
+ /// Same as readable() && !secure()
+ #[inline]
+ pub fn ns_readable(self) -> bool {
+ self.tt_resp.nsr()
+ }
+
+ /// Non-Secure Read and Write accessibility of the target address.
+ /// Same as read_and_writable() && !secure()
+ #[inline]
+ pub fn ns_read_and_writable(self) -> bool {
+ self.tt_resp.nsrw()
+ }
+
+ /// Indicate the IDAU region number containing the target address. Independent of AccessType.
+ /// Returns None if the value is not valid:
+ /// * the IDAU cannot provide a region number
+ /// * the address is exempt from security attribution
+ /// * the test target is done from Non-Secure state
+ #[inline]
+ pub fn idau_region(self) -> Option<u8> {
+ if self.tt_resp.irvalid() {
+ // Cast is safe as IREGION field is defined on 8 bits.
+ Some(self.tt_resp.iregion() as u8)
+ } else {
+ None
+ }
+ }
+
+ /// Indicate the SAU region number containing the target address. Independent of AccessType.
+ /// Returns None if the value is not valid:
+ /// * SAU_CTRL.ENABLE is set to zero
+ /// * the register argument specified in the SREGION field does not match any enabled SAU regions
+ /// * the address specified matches multiple enabled SAU regions
+ /// * the address specified by the SREGION field is exempt from the secure memory attribution
+ /// * the TT instruction was executed from the Non-secure state or the Security Extension is not
+ /// implemented
+ #[inline]
+ pub fn sau_region(self) -> Option<u8> {
+ if self.tt_resp.srvalid() {
+ // Cast is safe as SREGION field is defined on 8 bits.
+ Some(self.tt_resp.sregion() as u8)
+ } else {
+ None
+ }
+ }
+}
diff --git a/src/delay.rs b/src/delay.rs
new file mode 100644
index 0000000..66a63bf
--- /dev/null
+++ b/src/delay.rs
@@ -0,0 +1,136 @@
+//! A delay driver based on SysTick.
+
+use crate::peripheral::{syst::SystClkSource, SYST};
+use embedded_hal::blocking::delay::{DelayMs, DelayUs};
+
+/// System timer (SysTick) as a delay provider.
+pub struct Delay {
+ syst: SYST,
+ frequency: u32,
+}
+
+impl Delay {
+ /// Configures the system timer (SysTick) as a delay provider.
+ ///
+ /// `ahb_frequency` is a frequency of the AHB bus in Hz.
+ #[inline]
+ pub fn new(syst: SYST, ahb_frequency: u32) -> Self {
+ Self::with_source(syst, ahb_frequency, SystClkSource::Core)
+ }
+
+ /// Configures the system timer (SysTick) as a delay provider
+ /// with a clock source.
+ ///
+ /// `frequency` is the frequency of your `clock_source` in Hz.
+ #[inline]
+ pub fn with_source(mut syst: SYST, frequency: u32, clock_source: SystClkSource) -> Self {
+ syst.set_clock_source(clock_source);
+
+ Delay { syst, frequency }
+ }
+
+ /// Releases the system timer (SysTick) resource.
+ #[inline]
+ pub fn free(self) -> SYST {
+ self.syst
+ }
+
+ /// Delay using the Cortex-M systick for a certain duration, in µs.
+ #[allow(clippy::missing_inline_in_public_items)]
+ pub fn delay_us(&mut self, us: u32) {
+ let ticks = (u64::from(us)) * (u64::from(self.frequency)) / 1_000_000;
+
+ let full_cycles = ticks >> 24;
+ if full_cycles > 0 {
+ self.syst.set_reload(0xffffff);
+ self.syst.clear_current();
+ self.syst.enable_counter();
+
+ for _ in 0..full_cycles {
+ while !self.syst.has_wrapped() {}
+ }
+ }
+
+ let ticks = (ticks & 0xffffff) as u32;
+ if ticks > 1 {
+ self.syst.set_reload(ticks - 1);
+ self.syst.clear_current();
+ self.syst.enable_counter();
+
+ while !self.syst.has_wrapped() {}
+ }
+
+ self.syst.disable_counter();
+ }
+
+ /// Delay using the Cortex-M systick for a certain duration, in ms.
+ #[inline]
+ pub fn delay_ms(&mut self, mut ms: u32) {
+ // 4294967 is the highest u32 value which you can multiply by 1000 without overflow
+ while ms > 4294967 {
+ self.delay_us(4294967000u32);
+ ms -= 4294967;
+ }
+ self.delay_us(ms * 1_000);
+ }
+}
+
+impl DelayMs<u32> for Delay {
+ #[inline]
+ fn delay_ms(&mut self, ms: u32) {
+ Delay::delay_ms(self, ms);
+ }
+}
+
+// This is a workaround to allow `delay_ms(42)` construction without specifying a type.
+impl DelayMs<i32> for Delay {
+ #[inline(always)]
+ fn delay_ms(&mut self, ms: i32) {
+ assert!(ms >= 0);
+ Delay::delay_ms(self, ms as u32);
+ }
+}
+
+impl DelayMs<u16> for Delay {
+ #[inline(always)]
+ fn delay_ms(&mut self, ms: u16) {
+ Delay::delay_ms(self, u32::from(ms));
+ }
+}
+
+impl DelayMs<u8> for Delay {
+ #[inline(always)]
+ fn delay_ms(&mut self, ms: u8) {
+ Delay::delay_ms(self, u32::from(ms));
+ }
+}
+
+impl DelayUs<u32> for Delay {
+ #[inline]
+ fn delay_us(&mut self, us: u32) {
+ Delay::delay_us(self, us);
+ }
+}
+
+// This is a workaround to allow `delay_us(42)` construction without specifying a type.
+impl DelayUs<i32> for Delay {
+ #[inline(always)]
+ fn delay_us(&mut self, us: i32) {
+ assert!(us >= 0);
+ Delay::delay_us(self, us as u32);
+ }
+}
+
+impl DelayUs<u16> for Delay {
+ #[inline(always)]
+ fn delay_us(&mut self, us: u16) {
+ Delay::delay_us(self, u32::from(us))
+ }
+}
+
+impl DelayUs<u8> for Delay {
+ #[inline(always)]
+ fn delay_us(&mut self, us: u8) {
+ Delay::delay_us(self, u32::from(us))
+ }
+}
diff --git a/src/interrupt.rs b/src/interrupt.rs
new file mode 100644
index 0000000..68719ec
--- /dev/null
+++ b/src/interrupt.rs
@@ -0,0 +1,64 @@
+//! Interrupts
+
+pub use bare_metal::{CriticalSection, Mutex};
+
+/// Trait for enums of external interrupt numbers.
+///
+/// This trait should be implemented by a peripheral access crate (PAC)
+/// on its enum of available external interrupts for a specific device.
+/// Each variant must convert to a u16 of its interrupt number,
+/// which is its exception number - 16.
+///
+/// # Safety
+///
+/// This trait must only be implemented on enums of device interrupts. Each
+/// enum variant must represent a distinct value (no duplicates are permitted),
+/// and must always return the same value (do not change at runtime).
+///
+/// These requirements ensure safe nesting of critical sections.
+pub unsafe trait InterruptNumber: Copy {
+ /// Return the interrupt number associated with this variant.
+ ///
+ /// See trait documentation for safety requirements.
+ fn number(self) -> u16;
+}
+
+/// Disables all interrupts
+#[inline]
+pub fn disable() {
+ call_asm!(__cpsid());
+}
+
+/// Enables all the interrupts
+///
+/// # Safety
+///
+/// - Do not call this function inside an `interrupt::free` critical section
+#[inline]
+pub unsafe fn enable() {
+ call_asm!(__cpsie());
+}
+
+/// Execute closure `f` in an interrupt-free context.
+///
+/// This as also known as a "critical section".
+#[inline]
+pub fn free<F, R>(f: F) -> R
+where
+ F: FnOnce(&CriticalSection) -> R,
+{
+ let primask = crate::register::primask::read();
+
+ // disable interrupts
+ disable();
+
+ let r = f(unsafe { &CriticalSection::new() });
+
+ // If the interrupts were active before our `disable` call, then re-enable
+ // them. Otherwise, keep them disabled
+ if primask.is_active() {
+ unsafe { enable() }
+ }
+
+ r
+}
diff --git a/src/itm.rs b/src/itm.rs
new file mode 100644
index 0000000..72cb0d9
--- /dev/null
+++ b/src/itm.rs
@@ -0,0 +1,158 @@
+//! Instrumentation Trace Macrocell
+//!
+//! **NOTE** This module is only available on ARMv7-M and newer.
+
+use core::{fmt, ptr, slice};
+
+use crate::peripheral::itm::Stim;
+
+// NOTE assumes that `bytes` is 32-bit aligned
+unsafe fn write_words(stim: &mut Stim, bytes: &[u32]) {
+ let mut p = bytes.as_ptr();
+ for _ in 0..bytes.len() {
+ while !stim.is_fifo_ready() {}
+ stim.write_u32(ptr::read(p));
+ p = p.offset(1);
+ }
+}
+
+/// Writes an aligned byte slice to the ITM.
+///
+/// `buffer` must be 4-byte aligned.
+unsafe fn write_aligned_impl(port: &mut Stim, buffer: &[u8]) {
+ let len = buffer.len();
+
+ if len == 0 {
+ return;
+ }
+
+ let split = len & !0b11;
+ #[allow(clippy::cast_ptr_alignment)]
+ write_words(
+ port,
+ slice::from_raw_parts(buffer.as_ptr() as *const u32, split >> 2),
+ );
+
+ // 3 bytes or less left
+ let mut left = len & 0b11;
+ let mut ptr = buffer.as_ptr().add(split);
+
+ // at least 2 bytes left
+ if left > 1 {
+ while !port.is_fifo_ready() {}
+
+ #[allow(clippy::cast_ptr_alignment)]
+ port.write_u16(ptr::read(ptr as *const u16));
+
+ ptr = ptr.offset(2);
+ left -= 2;
+ }
+
+ // final byte
+ if left == 1 {
+ while !port.is_fifo_ready() {}
+ port.write_u8(*ptr);
+ }
+}
+
+struct Port<'p>(&'p mut Stim);
+
+impl<'p> fmt::Write for Port<'p> {
+ #[inline]
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ write_all(self.0, s.as_bytes());
+ Ok(())
+ }
+}
+
+/// A wrapper type that aligns its contents on a 4-Byte boundary.
+///
+/// ITM transfers are most efficient when the data is 4-Byte-aligned. This type provides an easy
+/// way to accomplish and enforce such an alignment.
+#[repr(align(4))]
+pub struct Aligned<T: ?Sized>(pub T);
+
+/// Writes `buffer` to an ITM port.
+#[allow(clippy::missing_inline_in_public_items)]
+pub fn write_all(port: &mut Stim, buffer: &[u8]) {
+ unsafe {
+ let mut len = buffer.len();
+ let mut ptr = buffer.as_ptr();
+
+ if len == 0 {
+ return;
+ }
+
+ // 0x01 OR 0x03
+ if ptr as usize % 2 == 1 {
+ while !port.is_fifo_ready() {}
+ port.write_u8(*ptr);
+
+ // 0x02 OR 0x04
+ ptr = ptr.offset(1);
+ len -= 1;
+ }
+
+ // 0x02
+ if ptr as usize % 4 == 2 {
+ if len > 1 {
+ // at least 2 bytes
+ while !port.is_fifo_ready() {}
+
+ // We checked the alignment above, so this is safe
+ #[allow(clippy::cast_ptr_alignment)]
+ port.write_u16(ptr::read(ptr as *const u16));
+
+ // 0x04
+ ptr = ptr.offset(2);
+ len -= 2;
+ } else {
+ if len == 1 {
+ // last byte
+ while !port.is_fifo_ready() {}
+ port.write_u8(*ptr);
+ }
+
+ return;
+ }
+ }
+
+ // The remaining data is 4-byte aligned, but might not be a multiple of 4 bytes
+ write_aligned_impl(port, slice::from_raw_parts(ptr, len));
+ }
+}
+
+/// Writes a 4-byte aligned `buffer` to an ITM port.
+///
+/// # Examples
+///
+/// ```no_run
+/// # use cortex_m::{itm::{self, Aligned}, peripheral::ITM};
+/// # let port = unsafe { &mut (*ITM::PTR).stim[0] };
+/// let mut buffer = Aligned([0; 14]);
+///
+/// buffer.0.copy_from_slice(b"Hello, world!\n");
+///
+/// itm::write_aligned(port, &buffer);
+///
+/// // Or equivalently
+/// itm::write_aligned(port, &Aligned(*b"Hello, world!\n"));
+/// ```
+#[allow(clippy::missing_inline_in_public_items)]
+pub fn write_aligned(port: &mut Stim, buffer: &Aligned<[u8]>) {
+ unsafe { write_aligned_impl(port, &buffer.0) }
+}
+
+/// Writes `fmt::Arguments` to the ITM `port`
+#[inline]
+pub fn write_fmt(port: &mut Stim, args: fmt::Arguments) {
+ use core::fmt::Write;
+
+ Port(port).write_fmt(args).ok();
+}
+
+/// Writes a string to the ITM `port`
+#[inline]
+pub fn write_str(port: &mut Stim, string: &str) {
+ write_all(port, string.as_bytes())
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..0914639
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,101 @@
+//! Low level access to Cortex-M processors
+//!
+//! This crate provides:
+//!
+//! - Access to core peripherals like NVIC, SCB and SysTick.
+//! - Access to core registers like CONTROL, MSP and PSR.
+//! - Interrupt manipulation mechanisms
+//! - Safe wrappers around Cortex-M specific instructions like `bkpt`
+//!
+//! # Optional features
+//!
+//! ## `inline-asm`
+//!
+//! When this feature is enabled the implementation of all the functions inside the `asm` and
+//! `register` modules use inline assembly (`asm!`) instead of external assembly (FFI into separate
+//! assembly files pre-compiled using `arm-none-eabi-gcc`). The advantages of enabling `inline-asm`
+//! are:
+//!
+//! - Reduced overhead. FFI eliminates the possibility of inlining so all operations include a
+//! function call overhead when `inline-asm` is not enabled.
+//!
+//! - Some of the `register` API only becomes available only when `inline-asm` is enabled. Check the
+//! API docs for details.
+//!
+//! The disadvantage is that `inline-asm` requires a nightly toolchain.
+//!
+//! ## `cm7-r0p1`
+//!
+//! This feature enables workarounds for errata found on Cortex-M7 chips with revision r0p1. Some
+//! functions in this crate only work correctly on those chips if this Cargo feature is enabled
+//! (the functions are documented accordingly).
+//!
+//! ## `linker-plugin-lto`
+//!
+//! This feature links against prebuilt assembly blobs that are compatible with [Linker-Plugin LTO].
+//! This allows inlining assembly routines into the caller, even without the `inline-asm` feature,
+//! and works on stable Rust (but note the drawbacks below!).
+//!
+//! If you want to use this feature, you need to be aware of a few things:
+//!
+//! - You need to make sure that `-Clinker-plugin-lto` is passed to rustc. Please refer to the
+//! [Linker-Plugin LTO] documentation for details.
+//!
+//! - You have to use a Rust version whose LLVM version is compatible with the toolchain in
+//! `asm-toolchain`.
+//!
+//! - Due to a [Rust bug][rust-lang/rust#75940] in compiler versions **before 1.49**, this option
+//! does not work with optimization levels `s` and `z`.
+//!
+//! [Linker-Plugin LTO]: https://doc.rust-lang.org/stable/rustc/linker-plugin-lto.html
+//! [rust-lang/rust#75940]: https://github.com/rust-lang/rust/issues/75940
+//!
+//! # Minimum Supported Rust Version (MSRV)
+//!
+//! This crate is guaranteed to compile on stable Rust 1.40 and up. It *might*
+//! compile with older versions but that may change in any new patch release.
+
+#![cfg_attr(feature = "inline-asm", feature(asm))]
+#![deny(missing_docs)]
+#![no_std]
+#![allow(clippy::identity_op)]
+#![allow(clippy::missing_safety_doc)]
+// Prevent clippy from complaining about empty match expression that are used for cfg gating.
+#![allow(clippy::match_single_binding)]
+// This makes clippy warn about public functions which are not #[inline].
+//
+// Almost all functions in this crate result in trivial or even no assembly.
+// These functions should be #[inline].
+//
+// If you do add a function that's not supposed to be #[inline], you can add
+// #[allow(clippy::missing_inline_in_public_items)] in front of it to add an
+// exception to clippy's rules.
+//
+// This should be done in case of:
+// - A function containing non-trivial logic (such as itm::write_all); or
+// - A generated #[derive(Debug)] function (in which case the attribute needs
+// to be applied to the struct).
+#![deny(clippy::missing_inline_in_public_items)]
+// Don't warn about feature(asm) being stable on Rust >= 1.59.0
+#![allow(stable_features)]
+
+extern crate bare_metal;
+extern crate volatile_register;
+
+#[macro_use]
+mod call_asm;
+#[macro_use]
+mod macros;
+
+pub mod asm;
+#[cfg(armv8m)]
+pub mod cmse;
+pub mod delay;
+pub mod interrupt;
+#[cfg(all(not(armv6m), not(armv8m_base)))]
+pub mod itm;
+pub mod peripheral;
+pub mod prelude;
+pub mod register;
+
+pub use crate::peripheral::Peripherals;
diff --git a/src/macros.rs b/src/macros.rs
new file mode 100644
index 0000000..512c932
--- /dev/null
+++ b/src/macros.rs
@@ -0,0 +1,114 @@
+/// Macro for sending a formatted string through an ITM channel
+#[macro_export]
+macro_rules! iprint {
+ ($channel:expr, $s:expr) => {
+ $crate::itm::write_str($channel, $s);
+ };
+ ($channel:expr, $($arg:tt)*) => {
+ $crate::itm::write_fmt($channel, format_args!($($arg)*));
+ };
+}
+
+/// Macro for sending a formatted string through an ITM channel, with a newline.
+#[macro_export]
+macro_rules! iprintln {
+ ($channel:expr) => {
+ $crate::itm::write_str($channel, "\n");
+ };
+ ($channel:expr, $fmt:expr) => {
+ $crate::itm::write_str($channel, concat!($fmt, "\n"));
+ };
+ ($channel:expr, $fmt:expr, $($arg:tt)*) => {
+ $crate::itm::write_fmt($channel, format_args!(concat!($fmt, "\n"), $($arg)*));
+ };
+}
+
+/// Macro to create a mutable reference to a statically allocated value
+///
+/// This macro returns a value with type `Option<&'static mut $ty>`. `Some($expr)` will be returned
+/// the first time the macro is executed; further calls will return `None`. To avoid `unwrap`ping a
+/// `None` variant the caller must ensure that the macro is called from a function that's executed
+/// at most once in the whole lifetime of the program.
+///
+/// # Notes
+/// This macro is unsound on multi core systems.
+///
+/// For debuggability, you can set an explicit name for a singleton. This name only shows up the
+/// the debugger and is not referencable from other code. See example below.
+///
+/// # Example
+///
+/// ``` no_run
+/// use cortex_m::singleton;
+///
+/// fn main() {
+/// // OK if `main` is executed only once
+/// let x: &'static mut bool = singleton!(: bool = false).unwrap();
+///
+/// let y = alias();
+/// // BAD this second call to `alias` will definitively `panic!`
+/// let y_alias = alias();
+/// }
+///
+/// fn alias() -> &'static mut bool {
+/// singleton!(: bool = false).unwrap()
+/// }
+///
+/// fn singleton_with_name() {
+/// // A name only for debugging purposes
+/// singleton!(FOO_BUFFER: [u8; 1024] = [0u8; 1024]);
+/// }
+/// ```
+#[macro_export]
+macro_rules! singleton {
+ ($name:ident: $ty:ty = $expr:expr) => {
+ $crate::interrupt::free(|_| {
+ // this is a tuple of a MaybeUninit and a bool because using an Option here is
+ // problematic: Due to niche-optimization, an Option could end up producing a non-zero
+ // initializer value which would move the entire static from `.bss` into `.data`...
+ static mut $name: (::core::mem::MaybeUninit<$ty>, bool) =
+ (::core::mem::MaybeUninit::uninit(), false);
+
+ #[allow(unsafe_code)]
+ let used = unsafe { $name.1 };
+ if used {
+ None
+ } else {
+ let expr = $expr;
+
+ #[allow(unsafe_code)]
+ unsafe {
+ $name.1 = true;
+ $name.0 = ::core::mem::MaybeUninit::new(expr);
+ Some(&mut *$name.0.as_mut_ptr())
+ }
+ }
+ })
+ };
+ (: $ty:ty = $expr:expr) => {
+ $crate::singleton!(VAR: $ty = $expr)
+ };
+}
+
+/// ``` compile_fail
+/// use cortex_m::singleton;
+///
+/// fn foo() {
+/// // check that the call to `uninitialized` requires unsafe
+/// singleton!(: u8 = std::mem::uninitialized());
+/// }
+/// ```
+#[allow(dead_code)]
+const CFAIL: () = ();
+
+/// ```
+/// #![deny(unsafe_code)]
+/// use cortex_m::singleton;
+///
+/// fn foo() {
+/// // check that calls to `singleton!` don't trip the `unsafe_code` lint
+/// singleton!(: u8 = 0);
+/// }
+/// ```
+#[allow(dead_code)]
+const CPASS: () = ();
diff --git a/src/peripheral/ac.rs b/src/peripheral/ac.rs
new file mode 100644
index 0000000..1ac5be1
--- /dev/null
+++ b/src/peripheral/ac.rs
@@ -0,0 +1,93 @@
+//! Cortex-M7 TCM and Cache access control.
+
+use volatile_register::RW;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Instruction Tightly-Coupled Memory Control Register
+ pub itcmcr: RW<u32>,
+ /// Data Tightly-Coupled Memory Control Register
+ pub dtcmcr: RW<u32>,
+ /// AHBP Control Register
+ pub ahbpcr: RW<u32>,
+ /// L1 Cache Control Register
+ pub cacr: RW<u32>,
+ /// AHB Slave Control Register
+ pub ahbscr: RW<u32>,
+ reserved0: u32,
+ /// Auxilary Bus Fault Status Register
+ pub abfsr: RW<u32>,
+}
+
+/// ITCMCR and DTCMCR TCM enable bit.
+pub const TCM_EN: u32 = 1;
+
+/// ITCMCR and DTCMCR TCM read-modify-write bit.
+pub const TCM_RMW: u32 = 2;
+
+/// ITCMCR and DTCMCR TCM rety phase enable bit.
+pub const TCM_RETEN: u32 = 4;
+
+/// ITCMCR and DTCMCR TCM size mask.
+pub const TCM_SZ_MASK: u32 = 0x78;
+
+/// ITCMCR and DTCMCR TCM shift.
+pub const TCM_SZ_SHIFT: usize = 3;
+
+/// AHBPCR AHBP enable bit.
+pub const AHBPCR_EN: u32 = 1;
+
+/// AHBPCR AHBP size mask.
+pub const AHBPCR_SZ_MASK: u32 = 0x0e;
+
+/// AHBPCR AHBP size shit.
+pub const AHBPCR_SZ_SHIFT: usize = 1;
+
+/// CACR Shared cachedable-is-WT for data cache.
+pub const CACR_SIWT: u32 = 1;
+
+/// CACR ECC in the instruction and data cache (disable).
+pub const CACR_ECCDIS: u32 = 2;
+
+/// CACR Force Write-Through in the data cache.
+pub const CACR_FORCEWT: u32 = 4;
+
+/// AHBSCR AHBS prioritization control mask.
+pub const AHBSCR_CTL_MASK: u32 = 0x03;
+
+/// AHBSCR AHBS prioritization control shift.
+pub const AHBSCR_CTL_SHIFT: usize = 0;
+
+/// AHBSCR Threshold execution prioity for AHBS traffic demotion, mask.
+pub const AHBSCR_TPRI_MASK: u32 = 0x7fc;
+
+/// AHBSCR Threshold execution prioity for AHBS traffic demotion, shift.
+pub const AHBSCR_TPRI_SHIFT: usize = 2;
+
+/// AHBSCR Failness counter initialization value, mask.
+pub const AHBSCR_INITCOUNT_MASK: u32 = 0xf800;
+
+/// AHBSCR Failness counter initialization value, shift.
+pub const AHBSCR_INITCOUNT_SHIFT: usize = 11;
+
+/// ABFSR Async fault on ITCM interface.
+pub const ABFSR_ITCM: u32 = 1;
+
+/// ABFSR Async fault on DTCM interface.
+pub const ABFSR_DTCM: u32 = 2;
+
+/// ABFSR Async fault on AHBP interface.
+pub const ABFSR_AHBP: u32 = 4;
+
+/// ABFSR Async fault on AXIM interface.
+pub const ABFSR_AXIM: u32 = 8;
+
+/// ABFSR Async fault on EPPB interface.
+pub const ABFSR_EPPB: u32 = 16;
+
+/// ABFSR Indicates the type of fault on the AXIM interface, mask.
+pub const ABFSR_AXIMTYPE_MASK: u32 = 0x300;
+
+/// ABFSR Indicates the type of fault on the AXIM interface, shift.
+pub const ABFSR_AXIMTYPE_SHIFT: usize = 8;
diff --git a/src/peripheral/cbp.rs b/src/peripheral/cbp.rs
new file mode 100644
index 0000000..5aee544
--- /dev/null
+++ b/src/peripheral/cbp.rs
@@ -0,0 +1,138 @@
+//! Cache and branch predictor maintenance operations
+//!
+//! *NOTE* Not available on Armv6-M.
+
+use volatile_register::WO;
+
+use crate::peripheral::CBP;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// I-cache invalidate all to PoU
+ pub iciallu: WO<u32>,
+ reserved0: u32,
+ /// I-cache invalidate by MVA to PoU
+ pub icimvau: WO<u32>,
+ /// D-cache invalidate by MVA to PoC
+ pub dcimvac: WO<u32>,
+ /// D-cache invalidate by set-way
+ pub dcisw: WO<u32>,
+ /// D-cache clean by MVA to PoU
+ pub dccmvau: WO<u32>,
+ /// D-cache clean by MVA to PoC
+ pub dccmvac: WO<u32>,
+ /// D-cache clean by set-way
+ pub dccsw: WO<u32>,
+ /// D-cache clean and invalidate by MVA to PoC
+ pub dccimvac: WO<u32>,
+ /// D-cache clean and invalidate by set-way
+ pub dccisw: WO<u32>,
+ /// Branch predictor invalidate all
+ pub bpiall: WO<u32>,
+}
+
+const CBP_SW_WAY_POS: u32 = 30;
+const CBP_SW_WAY_MASK: u32 = 0x3 << CBP_SW_WAY_POS;
+const CBP_SW_SET_POS: u32 = 5;
+const CBP_SW_SET_MASK: u32 = 0x1FF << CBP_SW_SET_POS;
+
+impl CBP {
+ /// I-cache invalidate all to PoU
+ #[inline(always)]
+ pub fn iciallu(&mut self) {
+ unsafe { self.iciallu.write(0) };
+ }
+
+ /// I-cache invalidate by MVA to PoU
+ #[inline(always)]
+ pub fn icimvau(&mut self, mva: u32) {
+ unsafe { self.icimvau.write(mva) };
+ }
+
+ /// D-cache invalidate by MVA to PoC
+ #[inline(always)]
+ pub unsafe fn dcimvac(&mut self, mva: u32) {
+ self.dcimvac.write(mva);
+ }
+
+ /// D-cache invalidate by set-way
+ ///
+ /// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
+ #[inline(always)]
+ pub unsafe fn dcisw(&mut self, set: u16, way: u16) {
+ // The ARMv7-M Architecture Reference Manual, as of Revision E.b, says these set/way
+ // operations have a register data format which depends on the implementation's
+ // associativity and number of sets. Specifically the 'way' and 'set' fields have
+ // offsets 32-log2(ASSOCIATIVITY) and log2(LINELEN) respectively.
+ //
+ // However, in Cortex-M7 devices, these offsets are fixed at 30 and 5, as per the Cortex-M7
+ // Generic User Guide section 4.8.3. Since no other ARMv7-M implementations except the
+ // Cortex-M7 have a DCACHE or ICACHE at all, it seems safe to do the same thing as the
+ // CMSIS-Core implementation and use fixed values.
+ self.dcisw.write(
+ ((u32::from(way) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS)
+ | ((u32::from(set) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS),
+ );
+ }
+
+ /// D-cache clean by MVA to PoU
+ #[inline(always)]
+ pub fn dccmvau(&mut self, mva: u32) {
+ unsafe {
+ self.dccmvau.write(mva);
+ }
+ }
+
+ /// D-cache clean by MVA to PoC
+ #[inline(always)]
+ pub fn dccmvac(&mut self, mva: u32) {
+ unsafe {
+ self.dccmvac.write(mva);
+ }
+ }
+
+ /// D-cache clean by set-way
+ ///
+ /// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
+ #[inline(always)]
+ pub fn dccsw(&mut self, set: u16, way: u16) {
+ // See comment for dcisw() about the format here
+ unsafe {
+ self.dccsw.write(
+ ((u32::from(way) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS)
+ | ((u32::from(set) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS),
+ );
+ }
+ }
+
+ /// D-cache clean and invalidate by MVA to PoC
+ #[inline(always)]
+ pub fn dccimvac(&mut self, mva: u32) {
+ unsafe {
+ self.dccimvac.write(mva);
+ }
+ }
+
+ /// D-cache clean and invalidate by set-way
+ ///
+ /// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
+ #[inline(always)]
+ pub fn dccisw(&mut self, set: u16, way: u16) {
+ // See comment for dcisw() about the format here
+ unsafe {
+ self.dccisw.write(
+ ((u32::from(way) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS)
+ | ((u32::from(set) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS),
+ );
+ }
+ }
+
+ /// Branch predictor invalidate all
+ #[inline(always)]
+ pub fn bpiall(&mut self) {
+ unsafe {
+ self.bpiall.write(0);
+ }
+ }
+}
diff --git a/src/peripheral/cpuid.rs b/src/peripheral/cpuid.rs
new file mode 100644
index 0000000..db85566
--- /dev/null
+++ b/src/peripheral/cpuid.rs
@@ -0,0 +1,140 @@
+//! CPUID
+
+use volatile_register::RO;
+#[cfg(not(armv6m))]
+use volatile_register::RW;
+
+#[cfg(not(armv6m))]
+use crate::peripheral::CPUID;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// CPUID base
+ pub base: RO<u32>,
+
+ _reserved0: [u32; 15],
+
+ /// Processor Feature (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub pfr: [RO<u32>; 2],
+ #[cfg(armv6m)]
+ _reserved1: [u32; 2],
+
+ /// Debug Feature (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub dfr: RO<u32>,
+ #[cfg(armv6m)]
+ _reserved2: u32,
+
+ /// Auxiliary Feature (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub afr: RO<u32>,
+ #[cfg(armv6m)]
+ _reserved3: u32,
+
+ /// Memory Model Feature (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub mmfr: [RO<u32>; 4],
+ #[cfg(armv6m)]
+ _reserved4: [u32; 4],
+
+ /// Instruction Set Attribute (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub isar: [RO<u32>; 5],
+ #[cfg(armv6m)]
+ _reserved5: [u32; 5],
+
+ _reserved6: u32,
+
+ /// Cache Level ID (only present on Cortex-M7)
+ #[cfg(not(armv6m))]
+ pub clidr: RO<u32>,
+
+ /// Cache Type (only present on Cortex-M7)
+ #[cfg(not(armv6m))]
+ pub ctr: RO<u32>,
+
+ /// Cache Size ID (only present on Cortex-M7)
+ #[cfg(not(armv6m))]
+ pub ccsidr: RO<u32>,
+
+ /// Cache Size Selection (only present on Cortex-M7)
+ #[cfg(not(armv6m))]
+ pub csselr: RW<u32>,
+}
+
+/// Type of cache to select on CSSELR writes.
+#[cfg(not(armv6m))]
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum CsselrCacheType {
+ /// Select DCache or unified cache
+ DataOrUnified = 0,
+ /// Select ICache
+ Instruction = 1,
+}
+
+#[cfg(not(armv6m))]
+impl CPUID {
+ /// Selects the current CCSIDR
+ ///
+ /// * `level`: the required cache level minus 1, e.g. 0 for L1, 1 for L2
+ /// * `ind`: select instruction cache or data/unified cache
+ ///
+ /// `level` is masked to be between 0 and 7.
+ #[inline]
+ pub fn select_cache(&mut self, level: u8, ind: CsselrCacheType) {
+ const CSSELR_IND_POS: u32 = 0;
+ const CSSELR_IND_MASK: u32 = 1 << CSSELR_IND_POS;
+ const CSSELR_LEVEL_POS: u32 = 1;
+ const CSSELR_LEVEL_MASK: u32 = 0x7 << CSSELR_LEVEL_POS;
+
+ unsafe {
+ self.csselr.write(
+ ((u32::from(level) << CSSELR_LEVEL_POS) & CSSELR_LEVEL_MASK)
+ | (((ind as u32) << CSSELR_IND_POS) & CSSELR_IND_MASK),
+ )
+ }
+ }
+
+ /// Returns the number of sets and ways in the selected cache
+ #[inline]
+ pub fn cache_num_sets_ways(&mut self, level: u8, ind: CsselrCacheType) -> (u16, u16) {
+ const CCSIDR_NUMSETS_POS: u32 = 13;
+ const CCSIDR_NUMSETS_MASK: u32 = 0x7FFF << CCSIDR_NUMSETS_POS;
+ const CCSIDR_ASSOCIATIVITY_POS: u32 = 3;
+ const CCSIDR_ASSOCIATIVITY_MASK: u32 = 0x3FF << CCSIDR_ASSOCIATIVITY_POS;
+
+ self.select_cache(level, ind);
+ crate::asm::dsb();
+ let ccsidr = self.ccsidr.read();
+ (
+ (1 + ((ccsidr & CCSIDR_NUMSETS_MASK) >> CCSIDR_NUMSETS_POS)) as u16,
+ (1 + ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> CCSIDR_ASSOCIATIVITY_POS)) as u16,
+ )
+ }
+
+ /// Returns log2 of the number of words in the smallest cache line of all the data cache and
+ /// unified caches that are controlled by the processor.
+ ///
+ /// This is the `DminLine` field of the CTR register.
+ #[inline(always)]
+ pub fn cache_dminline() -> u32 {
+ const CTR_DMINLINE_POS: u32 = 16;
+ const CTR_DMINLINE_MASK: u32 = 0xF << CTR_DMINLINE_POS;
+ let ctr = unsafe { (*Self::PTR).ctr.read() };
+ (ctr & CTR_DMINLINE_MASK) >> CTR_DMINLINE_POS
+ }
+
+ /// Returns log2 of the number of words in the smallest cache line of all the instruction
+ /// caches that are controlled by the processor.
+ ///
+ /// This is the `IminLine` field of the CTR register.
+ #[inline(always)]
+ pub fn cache_iminline() -> u32 {
+ const CTR_IMINLINE_POS: u32 = 0;
+ const CTR_IMINLINE_MASK: u32 = 0xF << CTR_IMINLINE_POS;
+ let ctr = unsafe { (*Self::PTR).ctr.read() };
+ (ctr & CTR_IMINLINE_MASK) >> CTR_IMINLINE_POS
+ }
+}
diff --git a/src/peripheral/dcb.rs b/src/peripheral/dcb.rs
new file mode 100644
index 0000000..a4db9fc
--- /dev/null
+++ b/src/peripheral/dcb.rs
@@ -0,0 +1,81 @@
+//! Debug Control Block
+
+use volatile_register::{RW, WO};
+
+use crate::peripheral::DCB;
+use core::ptr;
+
+const DCB_DEMCR_TRCENA: u32 = 1 << 24;
+const DCB_DEMCR_MON_EN: u32 = 1 << 16;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Debug Halting Control and Status
+ pub dhcsr: RW<u32>,
+ /// Debug Core Register Selector
+ pub dcrsr: WO<u32>,
+ /// Debug Core Register Data
+ pub dcrdr: RW<u32>,
+ /// Debug Exception and Monitor Control
+ pub demcr: RW<u32>,
+}
+
+impl DCB {
+ /// Enables TRACE. This is for example required by the
+ /// `peripheral::DWT` cycle counter to work properly.
+ /// As by STM documentation, this flag is not reset on
+ /// soft-reset, only on power reset.
+ ///
+ /// Note: vendor-specific registers may have to be set to completely
+ /// enable tracing. For example, on the STM32F401RE, `TRACE_MODE`
+ /// and `TRACE_IOEN` must be configured in `DBGMCU_CR` register.
+ #[inline]
+ pub fn enable_trace(&mut self) {
+ // set bit 24 / TRCENA
+ unsafe {
+ self.demcr.modify(|w| w | DCB_DEMCR_TRCENA);
+ }
+ }
+
+ /// Disables TRACE. See `DCB::enable_trace()` for more details
+ #[inline]
+ pub fn disable_trace(&mut self) {
+ // unset bit 24 / TRCENA
+ unsafe {
+ self.demcr.modify(|w| w & !DCB_DEMCR_TRCENA);
+ }
+ }
+
+ /// Enables the [`DebugMonitor`](crate::peripheral::scb::Exception::DebugMonitor) exception
+ #[inline]
+ pub fn enable_debug_monitor(&mut self) {
+ unsafe {
+ self.demcr.modify(|w| w | DCB_DEMCR_MON_EN);
+ }
+ }
+
+ /// Disables the [`DebugMonitor`](crate::peripheral::scb::Exception::DebugMonitor) exception
+ #[inline]
+ pub fn disable_debug_monitor(&mut self) {
+ unsafe {
+ self.demcr.modify(|w| w & !DCB_DEMCR_MON_EN);
+ }
+ }
+
+ /// Is there a debugger attached? (see note)
+ ///
+ /// Note: This function is [reported not to
+ /// work](http://web.archive.org/web/20180821191012/https://community.nxp.com/thread/424925#comment-782843)
+ /// on Cortex-M0 devices. Per the ARM v6-M Architecture Reference Manual, "Access to the DHCSR
+ /// from software running on the processor is IMPLEMENTATION DEFINED". Indeed, from the
+ /// [Cortex-M0+ r0p1 Technical Reference Manual](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0484c/BABJHEIG.html), "Note Software cannot access the debug registers."
+ #[inline]
+ pub fn is_debugger_attached() -> bool {
+ unsafe {
+ // do an 8-bit read of the 32-bit DHCSR register, and get the LSB
+ let value = ptr::read_volatile(Self::PTR as *const u8);
+ value & 0x1 == 1
+ }
+ }
+}
diff --git a/src/peripheral/dwt.rs b/src/peripheral/dwt.rs
new file mode 100644
index 0000000..c5f7bc9
--- /dev/null
+++ b/src/peripheral/dwt.rs
@@ -0,0 +1,495 @@
+//! Data Watchpoint and Trace unit
+
+#[cfg(not(armv6m))]
+use volatile_register::WO;
+use volatile_register::{RO, RW};
+
+use crate::peripheral::DWT;
+use bitfield::bitfield;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Control
+ pub ctrl: RW<Ctrl>,
+ /// Cycle Count
+ #[cfg(not(armv6m))]
+ pub cyccnt: RW<u32>,
+ /// CPI Count
+ #[cfg(not(armv6m))]
+ pub cpicnt: RW<u32>,
+ /// Exception Overhead Count
+ #[cfg(not(armv6m))]
+ pub exccnt: RW<u32>,
+ /// Sleep Count
+ #[cfg(not(armv6m))]
+ pub sleepcnt: RW<u32>,
+ /// LSU Count
+ #[cfg(not(armv6m))]
+ pub lsucnt: RW<u32>,
+ /// Folded-instruction Count
+ #[cfg(not(armv6m))]
+ pub foldcnt: RW<u32>,
+ /// Cortex-M0(+) does not have these parts
+ #[cfg(armv6m)]
+ reserved: [u32; 6],
+ /// Program Counter Sample
+ pub pcsr: RO<u32>,
+ /// Comparators
+ #[cfg(armv6m)]
+ pub c: [Comparator; 2],
+ #[cfg(not(armv6m))]
+ /// Comparators
+ pub c: [Comparator; 16],
+ #[cfg(not(armv6m))]
+ reserved: [u32; 932],
+ /// Lock Access
+ #[cfg(not(armv6m))]
+ pub lar: WO<u32>,
+ /// Lock Status
+ #[cfg(not(armv6m))]
+ pub lsr: RO<u32>,
+}
+
+bitfield! {
+ /// Control register.
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Ctrl(u32);
+ cyccntena, set_cyccntena: 0;
+ pcsamplena, set_pcsamplena: 12;
+ exctrcena, set_exctrcena: 16;
+ noprfcnt, _: 24;
+ nocyccnt, _: 25;
+ noexttrig, _: 26;
+ notrcpkt, _: 27;
+ u8, numcomp, _: 31, 28;
+}
+
+/// Comparator
+#[repr(C)]
+pub struct Comparator {
+ /// Comparator
+ pub comp: RW<u32>,
+ /// Comparator Mask
+ pub mask: RW<u32>,
+ /// Comparator Function
+ pub function: RW<Function>,
+ reserved: u32,
+}
+
+bitfield! {
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ /// Comparator FUNCTIONn register.
+ ///
+ /// See C1.8.17 "Comparator Function registers, DWT_FUNCTIONn"
+ pub struct Function(u32);
+ u8, function, set_function: 3, 0;
+ emitrange, set_emitrange: 5;
+ cycmatch, set_cycmatch: 7;
+ datavmatch, set_datavmatch: 8;
+ lnk1ena, set_lnk1ena: 9;
+ u8, datavsize, set_datavsize: 11, 10;
+ u8, datavaddr0, set_datavaddr0: 15, 12;
+ u8, datavaddr1, set_datavaddr1: 19, 16;
+ matched, _: 24;
+}
+
+impl DWT {
+ /// Number of comparators implemented
+ ///
+ /// A value of zero indicates no comparator support.
+ #[inline]
+ pub fn num_comp(&self) -> u8 {
+ self.ctrl.read().numcomp()
+ }
+
+ /// Returns `true` if the the implementation supports sampling and exception tracing
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn has_exception_trace(&self) -> bool {
+ !self.ctrl.read().notrcpkt()
+ }
+
+ /// Returns `true` if the implementation includes external match signals
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn has_external_match(&self) -> bool {
+ !self.ctrl.read().noexttrig()
+ }
+
+ /// Returns `true` if the implementation supports a cycle counter
+ #[inline]
+ pub fn has_cycle_counter(&self) -> bool {
+ #[cfg(not(armv6m))]
+ return !self.ctrl.read().nocyccnt();
+
+ #[cfg(armv6m)]
+ return false;
+ }
+
+ /// Returns `true` if the implementation the profiling counters
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn has_profiling_counter(&self) -> bool {
+ !self.ctrl.read().noprfcnt()
+ }
+
+ /// Enables the cycle counter
+ ///
+ /// The global trace enable ([`DCB::enable_trace`]) should be set before
+ /// enabling the cycle counter, the processor may ignore writes to the
+ /// cycle counter enable if the global trace is disabled
+ /// (implementation defined behaviour).
+ ///
+ /// [`DCB::enable_trace`]: crate::peripheral::DCB::enable_trace
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn enable_cycle_counter(&mut self) {
+ unsafe {
+ self.ctrl.modify(|mut r| {
+ r.set_cyccntena(true);
+ r
+ });
+ }
+ }
+
+ /// Returns `true` if the cycle counter is enabled
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn cycle_counter_enabled(&self) -> bool {
+ self.ctrl.read().cyccntena()
+ }
+
+ /// Enables exception tracing
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn enable_exception_tracing(&mut self) {
+ unsafe {
+ self.ctrl.modify(|mut r| {
+ r.set_exctrcena(true);
+ r
+ });
+ }
+ }
+
+ /// Disables exception tracing
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn disable_exception_tracing(&mut self) {
+ unsafe {
+ self.ctrl.modify(|mut r| {
+ r.set_exctrcena(false);
+ r
+ });
+ }
+ }
+
+ /// Whether to periodically generate PC samples
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn enable_pc_samples(&mut self, bit: bool) {
+ unsafe {
+ self.ctrl.modify(|mut r| {
+ r.set_pcsamplena(bit);
+ r
+ });
+ }
+ }
+
+ /// Returns the current clock cycle count
+ #[cfg(not(armv6m))]
+ #[inline]
+ #[deprecated(
+ since = "0.7.4",
+ note = "Use `cycle_count` which follows the C-GETTER convention"
+ )]
+ pub fn get_cycle_count() -> u32 {
+ Self::cycle_count()
+ }
+
+ /// Returns the current clock cycle count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn cycle_count() -> u32 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).cyccnt.read() }
+ }
+
+ /// Set the cycle count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn set_cycle_count(&mut self, count: u32) {
+ unsafe { self.cyccnt.write(count) }
+ }
+
+ /// Removes the software lock on the DWT
+ ///
+ /// Some devices, like the STM32F7, software lock the DWT after a power cycle.
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn unlock() {
+ // NOTE(unsafe) atomic write to a stateless, write-only register
+ unsafe { (*Self::PTR).lar.write(0xC5AC_CE55) }
+ }
+
+ /// Get the CPI count
+ ///
+ /// Counts additional cycles required to execute multi-cycle instructions,
+ /// except those recorded by [`lsu_count`], and counts any instruction fetch
+ /// stalls.
+ ///
+ /// [`lsu_count`]: DWT::lsu_count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn cpi_count() -> u8 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).cpicnt.read() as u8 }
+ }
+
+ /// Set the CPI count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn set_cpi_count(&mut self, count: u8) {
+ unsafe { self.cpicnt.write(count as u32) }
+ }
+
+ /// Get the total cycles spent in exception processing
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn exception_count() -> u8 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).exccnt.read() as u8 }
+ }
+
+ /// Set the exception count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn set_exception_count(&mut self, count: u8) {
+ unsafe { self.exccnt.write(count as u32) }
+ }
+
+ /// Get the total number of cycles that the processor is sleeping
+ ///
+ /// ARM recommends that this counter counts all cycles when the processor is sleeping,
+ /// regardless of whether a WFI or WFE instruction, or the sleep-on-exit functionality,
+ /// caused the entry to sleep mode.
+ /// However, all sleep features are implementation defined and therefore when
+ /// this counter counts is implementation defined.
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn sleep_count() -> u8 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).sleepcnt.read() as u8 }
+ }
+
+ /// Set the sleep count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn set_sleep_count(&mut self, count: u8) {
+ unsafe { self.sleepcnt.write(count as u32) }
+ }
+
+ /// Get the additional cycles required to execute all load or store instructions
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn lsu_count() -> u8 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).lsucnt.read() as u8 }
+ }
+
+ /// Set the lsu count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn set_lsu_count(&mut self, count: u8) {
+ unsafe { self.lsucnt.write(count as u32) }
+ }
+
+ /// Get the folded instruction count
+ ///
+ /// Increments on each instruction that takes 0 cycles.
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn fold_count() -> u8 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).foldcnt.read() as u8 }
+ }
+
+ /// Set the folded instruction count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn set_fold_count(&mut self, count: u8) {
+ unsafe { self.foldcnt.write(count as u32) }
+ }
+}
+
+/// Whether the comparator should match on read, write or read/write operations.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub enum AccessType {
+ /// Generate packet only when matched address is read from.
+ ReadOnly,
+ /// Generate packet only when matched address is written to.
+ WriteOnly,
+ /// Generate packet when matched address is both read from and written to.
+ ReadWrite,
+}
+
+/// The sequence of packet(s) or events that should be emitted/generated on comparator match.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub enum EmitOption {
+ /// Emit only trace data value packet.
+ Data,
+ /// Emit only trace address packet.
+ Address,
+ /// Emit only trace PC value packet
+ ///
+ /// *NOTE* only compatible with [AccessType::ReadWrite].
+ PC,
+ /// Emit trace address and data value packets.
+ AddressData,
+ /// Emit trace PC value and data value packets.
+ PCData,
+ /// Generate a watchpoint debug event. Either halts execution or fires a `DebugMonitor` exception.
+ ///
+ /// See more in section "Watchpoint debug event generation" page C1-729.
+ WatchpointDebugEvent,
+ /// Generate a `CMPMATCH[N]` event.
+ ///
+ /// See more in section "CMPMATCH[N] event generation" page C1-730.
+ CompareMatchEvent,
+}
+
+/// Settings for address matching
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub struct ComparatorAddressSettings {
+ /// The address to match against.
+ pub address: u32,
+ /// The address mask to match against.
+ pub mask: u32,
+ /// What sequence of packet(s) to emit on comparator match.
+ pub emit: EmitOption,
+ /// Whether to match on read, write or read/write operations.
+ pub access_type: AccessType,
+}
+
+/// Settings for cycle count matching
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub struct CycleCountSettings {
+ /// The function selection used.
+ /// See Table C1-15 for DWT cycle count comparison functions.
+ pub emit: EmitOption,
+ /// The cycle count value to compare against.
+ pub compare: u32,
+}
+
+/// The available functions of a DWT comparator.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+#[non_exhaustive]
+pub enum ComparatorFunction {
+ /// Compare accessed memory addresses.
+ Address(ComparatorAddressSettings),
+ /// Compare cycle count & target value.
+ ///
+ /// **NOTE**: only supported by comparator 0 and if the HW supports the cycle counter.
+ /// Check [`DWT::has_cycle_counter`] for support. See C1.8.1 for more details.
+ CycleCount(CycleCountSettings),
+}
+
+/// Possible error values returned on [Comparator::configure].
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+#[non_exhaustive]
+pub enum DwtError {
+ /// Invalid combination of [AccessType] and [EmitOption].
+ InvalidFunction,
+}
+
+impl Comparator {
+ /// Configure the function of the comparator
+ #[allow(clippy::missing_inline_in_public_items)]
+ pub fn configure(&self, settings: ComparatorFunction) -> Result<(), DwtError> {
+ match settings {
+ ComparatorFunction::Address(settings) => {
+ // FUNCTION, EMITRANGE
+ // See Table C1-14
+ let (function, emit_range) = match (&settings.access_type, &settings.emit) {
+ (AccessType::ReadOnly, EmitOption::Data) => (0b1100, false),
+ (AccessType::ReadOnly, EmitOption::Address) => (0b1100, true),
+ (AccessType::ReadOnly, EmitOption::AddressData) => (0b1110, true),
+ (AccessType::ReadOnly, EmitOption::PCData) => (0b1110, false),
+ (AccessType::ReadOnly, EmitOption::WatchpointDebugEvent) => (0b0101, false),
+ (AccessType::ReadOnly, EmitOption::CompareMatchEvent) => (0b1001, false),
+
+ (AccessType::WriteOnly, EmitOption::Data) => (0b1101, false),
+ (AccessType::WriteOnly, EmitOption::Address) => (0b1101, true),
+ (AccessType::WriteOnly, EmitOption::AddressData) => (0b1111, true),
+ (AccessType::WriteOnly, EmitOption::PCData) => (0b1111, false),
+ (AccessType::WriteOnly, EmitOption::WatchpointDebugEvent) => (0b0110, false),
+ (AccessType::WriteOnly, EmitOption::CompareMatchEvent) => (0b1010, false),
+
+ (AccessType::ReadWrite, EmitOption::Data) => (0b0010, false),
+ (AccessType::ReadWrite, EmitOption::Address) => (0b0001, true),
+ (AccessType::ReadWrite, EmitOption::AddressData) => (0b0010, true),
+ (AccessType::ReadWrite, EmitOption::PCData) => (0b0011, false),
+ (AccessType::ReadWrite, EmitOption::WatchpointDebugEvent) => (0b0111, false),
+ (AccessType::ReadWrite, EmitOption::CompareMatchEvent) => (0b1011, false),
+
+ (AccessType::ReadWrite, EmitOption::PC) => (0b0001, false),
+ (_, EmitOption::PC) => return Err(DwtError::InvalidFunction),
+ };
+
+ unsafe {
+ self.function.modify(|mut r| {
+ r.set_function(function);
+ r.set_emitrange(emit_range);
+ // don't compare data value
+ r.set_datavmatch(false);
+ // don't compare cycle counter value
+ // NOTE: only needed for comparator 0, but is SBZP.
+ r.set_cycmatch(false);
+ // SBZ as needed, see Page 784/C1-724
+ r.set_datavsize(0);
+ r.set_datavaddr0(0);
+ r.set_datavaddr1(0);
+
+ r
+ });
+
+ self.comp.write(settings.address);
+ self.mask.write(settings.mask);
+ }
+ }
+ ComparatorFunction::CycleCount(settings) => {
+ let function = match &settings.emit {
+ EmitOption::PCData => 0b0001,
+ EmitOption::WatchpointDebugEvent => 0b0100,
+ EmitOption::CompareMatchEvent => 0b1000,
+ _ => return Err(DwtError::InvalidFunction),
+ };
+
+ unsafe {
+ self.function.modify(|mut r| {
+ r.set_function(function);
+ // emit_range is N/A for cycle count compare
+ r.set_emitrange(false);
+ // don't compare data
+ r.set_datavmatch(false);
+ // compare cyccnt
+ r.set_cycmatch(true);
+ // SBZ as needed, see Page 784/C1-724
+ r.set_datavsize(0);
+ r.set_datavaddr0(0);
+ r.set_datavaddr1(0);
+
+ r
+ });
+
+ self.comp.write(settings.compare);
+ self.mask.write(0); // SBZ, see Page 784/C1-724
+ }
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/src/peripheral/fpb.rs b/src/peripheral/fpb.rs
new file mode 100644
index 0000000..b86b8b2
--- /dev/null
+++ b/src/peripheral/fpb.rs
@@ -0,0 +1,21 @@
+//! Flash Patch and Breakpoint unit
+//!
+//! *NOTE* Not available on Armv6-M.
+
+use volatile_register::{RO, RW, WO};
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Control
+ pub ctrl: RW<u32>,
+ /// Remap
+ pub remap: RW<u32>,
+ /// Comparator
+ pub comp: [RW<u32>; 127],
+ reserved: [u32; 875],
+ /// Lock Access
+ pub lar: WO<u32>,
+ /// Lock Status
+ pub lsr: RO<u32>,
+}
diff --git a/src/peripheral/fpu.rs b/src/peripheral/fpu.rs
new file mode 100644
index 0000000..9a047d8
--- /dev/null
+++ b/src/peripheral/fpu.rs
@@ -0,0 +1,19 @@
+//! Floating Point Unit
+//!
+//! *NOTE* Available only on targets with a Floating Point Unit (FPU) extension.
+
+use volatile_register::{RO, RW};
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ reserved: u32,
+ /// Floating Point Context Control
+ pub fpccr: RW<u32>,
+ /// Floating Point Context Address
+ pub fpcar: RW<u32>,
+ /// Floating Point Default Status Control
+ pub fpdscr: RW<u32>,
+ /// Media and FP Feature
+ pub mvfr: [RO<u32>; 3],
+}
diff --git a/src/peripheral/icb.rs b/src/peripheral/icb.rs
new file mode 100644
index 0000000..e1de33b
--- /dev/null
+++ b/src/peripheral/icb.rs
@@ -0,0 +1,32 @@
+//! Implementation Control Block
+
+#[cfg(any(armv7m, armv8m, native))]
+use volatile_register::RO;
+use volatile_register::RW;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Interrupt Controller Type Register
+ ///
+ /// The bottom four bits of this register give the number of implemented
+ /// interrupt lines, divided by 32. So a value of `0b0010` indicates 64
+ /// interrupts.
+ #[cfg(any(armv7m, armv8m, native))]
+ pub ictr: RO<u32>,
+
+ /// The ICTR is not defined in the ARMv6-M Architecture Reference manual, so
+ /// we replace it with this.
+ #[cfg(not(any(armv7m, armv8m, native)))]
+ _reserved: u32,
+
+ /// Auxiliary Control Register
+ ///
+ /// This register is entirely implementation defined -- the standard gives
+ /// it an address, but does not define its role or contents.
+ pub actlr: RW<u32>,
+
+ /// Coprocessor Power Control Register
+ #[cfg(armv8m)]
+ pub cppwr: RW<u32>,
+}
diff --git a/src/peripheral/itm.rs b/src/peripheral/itm.rs
new file mode 100644
index 0000000..f8e9e25
--- /dev/null
+++ b/src/peripheral/itm.rs
@@ -0,0 +1,215 @@
+//! Instrumentation Trace Macrocell
+//!
+//! *NOTE* Not available on Armv6-M and Armv8-M Baseline.
+
+use core::cell::UnsafeCell;
+use core::ptr;
+
+use volatile_register::{RO, RW, WO};
+
+use crate::peripheral::ITM;
+use bitfield::bitfield;
+
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Serialize};
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Stimulus Port
+ pub stim: [Stim; 256],
+ reserved0: [u32; 640],
+ /// Trace Enable
+ pub ter: [RW<u32>; 8],
+ reserved1: [u32; 8],
+ /// Trace Privilege
+ pub tpr: RW<u32>,
+ reserved2: [u32; 15],
+ /// Trace Control
+ pub tcr: RW<Tcr>,
+ reserved3: [u32; 75],
+ /// Lock Access
+ pub lar: WO<u32>,
+ /// Lock Status
+ pub lsr: RO<u32>,
+}
+
+bitfield! {
+ /// Trace Control Register.
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Tcr(u32);
+ itmena, set_itmena: 0;
+ tsena, set_tsena: 1;
+ syncena, set_synena: 2;
+ txena, set_txena: 3;
+ swoena, set_swoena: 4;
+ u8, tsprescale, set_tsprescale: 9, 8;
+ u8, gtsfreq, set_gtsfreq: 11, 10;
+ u8, tracebusid, set_tracebusid: 22, 16;
+ busy, _: 23;
+}
+
+/// Stimulus Port
+pub struct Stim {
+ register: UnsafeCell<u32>,
+}
+
+impl Stim {
+ /// Writes an `u8` payload into the stimulus port
+ #[inline]
+ pub fn write_u8(&mut self, value: u8) {
+ unsafe { ptr::write_volatile(self.register.get() as *mut u8, value) }
+ }
+
+ /// Writes an `u16` payload into the stimulus port
+ #[inline]
+ pub fn write_u16(&mut self, value: u16) {
+ unsafe { ptr::write_volatile(self.register.get() as *mut u16, value) }
+ }
+
+ /// Writes an `u32` payload into the stimulus port
+ #[inline]
+ pub fn write_u32(&mut self, value: u32) {
+ unsafe { ptr::write_volatile(self.register.get(), value) }
+ }
+
+ /// Returns `true` if the stimulus port is ready to accept more data
+ #[cfg(not(armv8m))]
+ #[inline]
+ pub fn is_fifo_ready(&self) -> bool {
+ unsafe { ptr::read_volatile(self.register.get()) & 0b1 == 1 }
+ }
+
+ /// Returns `true` if the stimulus port is ready to accept more data
+ #[cfg(armv8m)]
+ #[inline]
+ pub fn is_fifo_ready(&self) -> bool {
+ // ARMv8-M adds a disabled bit; we indicate that we are ready to
+ // proceed with a stimulus write if the port is either ready (bit 0) or
+ // disabled (bit 1).
+ unsafe { ptr::read_volatile(self.register.get()) & 0b11 != 0 }
+ }
+}
+
+/// The possible local timestamp options.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub enum LocalTimestampOptions {
+ /// Disable local timestamps.
+ Disabled,
+ /// Enable local timestamps and use no prescaling.
+ Enabled,
+ /// Enable local timestamps and set the prescaler to divide the
+ /// reference clock by 4.
+ EnabledDiv4,
+ /// Enable local timestamps and set the prescaler to divide the
+ /// reference clock by 16.
+ EnabledDiv16,
+ /// Enable local timestamps and set the prescaler to divide the
+ /// reference clock by 64.
+ EnabledDiv64,
+}
+
+#[cfg(feature = "std")]
+impl core::convert::TryFrom<u8> for LocalTimestampOptions {
+ type Error = ();
+
+ /// Converts an integer value to an enabled [LocalTimestampOptions]
+ /// variant. Accepted values are: 1, 4, 16, 64. Any other value
+ /// yields `Err(())`.
+ fn try_from(value: u8) -> Result<Self, Self::Error> {
+ match value {
+ 1 => Ok(Self::Enabled),
+ 4 => Ok(Self::EnabledDiv4),
+ 16 => Ok(Self::EnabledDiv16),
+ 64 => Ok(Self::EnabledDiv64),
+ _ => Err(()),
+ }
+ }
+}
+
+/// The possible global timestamp options.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub enum GlobalTimestampOptions {
+ /// Disable global timestamps.
+ Disabled,
+ /// Generate a global timestamp approximately every 128 cycles.
+ Every128Cycles,
+ /// Generate a global timestamp approximately every 8921 cycles.
+ Every8192Cycles,
+ /// Generate a global timestamp after every packet, if the output FIFO is empty.
+ EveryPacket,
+}
+
+/// The possible clock sources for timestamp counters.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub enum TimestampClkSrc {
+ /// Clock timestamp counters using the system processor clock.
+ SystemClock,
+ /// Clock timestamp counters using the asynchronous clock from the
+ /// TPIU interface.
+ ///
+ /// NOTE: The timestamp counter is held in reset while the output
+ /// line is idle.
+ AsyncTPIU,
+}
+
+/// Available settings for the ITM peripheral.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub struct ITMSettings {
+ /// Whether to enable ITM.
+ pub enable: bool,
+ /// Whether DWT packets should be forwarded to ITM.
+ pub forward_dwt: bool,
+ /// The local timestamp options that should be applied.
+ pub local_timestamps: LocalTimestampOptions,
+ /// The global timestamp options that should be applied.
+ pub global_timestamps: GlobalTimestampOptions,
+ /// The trace bus ID to use when multi-trace sources are in use.
+ /// `None` specifies that only a single trace source is in use and
+ /// has the same effect as `Some(0)`.
+ pub bus_id: Option<u8>,
+ /// The clock that should increase timestamp counters.
+ pub timestamp_clk_src: TimestampClkSrc,
+}
+
+impl ITM {
+ /// Removes the software lock on the ITM.
+ #[inline]
+ pub fn unlock(&mut self) {
+ // NOTE(unsafe) atomic write to a stateless, write-only register
+ unsafe { self.lar.write(0xC5AC_CE55) }
+ }
+
+ /// Configures the ITM with the passed [ITMSettings].
+ #[inline]
+ pub fn configure(&mut self, settings: ITMSettings) {
+ unsafe {
+ self.tcr.modify(|mut r| {
+ r.set_itmena(settings.enable);
+ r.set_tsena(settings.local_timestamps != LocalTimestampOptions::Disabled);
+ r.set_txena(settings.forward_dwt);
+ r.set_tsprescale(match settings.local_timestamps {
+ LocalTimestampOptions::Disabled | LocalTimestampOptions::Enabled => 0b00,
+ LocalTimestampOptions::EnabledDiv4 => 0b10,
+ LocalTimestampOptions::EnabledDiv16 => 0b10,
+ LocalTimestampOptions::EnabledDiv64 => 0b11,
+ });
+ r.set_gtsfreq(match settings.global_timestamps {
+ GlobalTimestampOptions::Disabled => 0b00,
+ GlobalTimestampOptions::Every128Cycles => 0b01,
+ GlobalTimestampOptions::Every8192Cycles => 0b10,
+ GlobalTimestampOptions::EveryPacket => 0b11,
+ });
+ r.set_swoena(match settings.timestamp_clk_src {
+ TimestampClkSrc::SystemClock => false,
+ TimestampClkSrc::AsyncTPIU => true,
+ });
+ r.set_tracebusid(settings.bus_id.unwrap_or(0));
+
+ r
+ });
+ }
+ }
+}
diff --git a/src/peripheral/mod.rs b/src/peripheral/mod.rs
new file mode 100644
index 0000000..af922b1
--- /dev/null
+++ b/src/peripheral/mod.rs
@@ -0,0 +1,594 @@
+//! Core peripherals.
+//!
+//! # API
+//!
+//! To use (most of) the peripheral API first you must get an *instance* of the peripheral. All the
+//! core peripherals are modeled as singletons (there can only ever be, at most, one instance of any
+//! one of them at any given point in time) and the only way to get an instance of them is through
+//! the [`Peripherals::take`](struct.Peripherals.html#method.take) method.
+//!
+//! ``` no_run
+//! # use cortex_m::peripheral::Peripherals;
+//! let mut peripherals = Peripherals::take().unwrap();
+//! peripherals.DCB.enable_trace();
+//! ```
+//!
+//! This method can only be successfully called *once* -- this is why the method returns an
+//! `Option`. Subsequent calls to the method will result in a `None` value being returned.
+//!
+//! ``` no_run, should_panic
+//! # use cortex_m::peripheral::Peripherals;
+//! let ok = Peripherals::take().unwrap();
+//! let panics = Peripherals::take().unwrap();
+//! ```
+//! A part of the peripheral API doesn't require access to a peripheral instance. This part of the
+//! API is provided as static methods on the peripheral types. One example is the
+//! [`DWT::cycle_count`](struct.DWT.html#method.cycle_count) method.
+//!
+//! ``` no_run
+//! # use cortex_m::peripheral::{DWT, Peripherals};
+//! {
+//! let mut peripherals = Peripherals::take().unwrap();
+//! peripherals.DCB.enable_trace();
+//! peripherals.DWT.enable_cycle_counter();
+//! } // all the peripheral singletons are destroyed here
+//!
+//! // but this method can be called without a DWT instance
+//! let cyccnt = DWT::cycle_count();
+//! ```
+//!
+//! The singleton property can be *unsafely* bypassed using the `ptr` static method which is
+//! available on all the peripheral types. This method is a useful building block for implementing
+//! safe higher level abstractions.
+//!
+//! ``` no_run
+//! # use cortex_m::peripheral::{DWT, Peripherals};
+//! {
+//! let mut peripherals = Peripherals::take().unwrap();
+//! peripherals.DCB.enable_trace();
+//! peripherals.DWT.enable_cycle_counter();
+//! } // all the peripheral singletons are destroyed here
+//!
+//! // actually safe because this is an atomic read with no side effects
+//! let cyccnt = unsafe { (*DWT::PTR).cyccnt.read() };
+//! ```
+//!
+//! # References
+//!
+//! - ARMv7-M Architecture Reference Manual (Issue E.b) - Chapter B3
+
+use core::marker::PhantomData;
+use core::ops;
+
+use crate::interrupt;
+
+#[cfg(cm7)]
+pub mod ac;
+#[cfg(not(armv6m))]
+pub mod cbp;
+pub mod cpuid;
+pub mod dcb;
+pub mod dwt;
+#[cfg(not(armv6m))]
+pub mod fpb;
+// NOTE(native) is for documentation purposes
+#[cfg(any(has_fpu, native))]
+pub mod fpu;
+pub mod icb;
+#[cfg(all(not(armv6m), not(armv8m_base)))]
+pub mod itm;
+pub mod mpu;
+pub mod nvic;
+#[cfg(armv8m)]
+pub mod sau;
+pub mod scb;
+pub mod syst;
+#[cfg(not(armv6m))]
+pub mod tpiu;
+
+#[cfg(test)]
+mod test;
+
+// NOTE the `PhantomData` used in the peripherals proxy is to make them `Send` but *not* `Sync`
+
+/// Core peripherals
+#[allow(non_snake_case)]
+#[allow(clippy::manual_non_exhaustive)]
+pub struct Peripherals {
+ /// Cortex-M7 TCM and cache access control.
+ #[cfg(cm7)]
+ pub AC: AC,
+
+ /// Cache and branch predictor maintenance operations.
+ /// Not available on Armv6-M.
+ pub CBP: CBP,
+
+ /// CPUID
+ pub CPUID: CPUID,
+
+ /// Debug Control Block
+ pub DCB: DCB,
+
+ /// Data Watchpoint and Trace unit
+ pub DWT: DWT,
+
+ /// Flash Patch and Breakpoint unit.
+ /// Not available on Armv6-M.
+ pub FPB: FPB,
+
+ /// Floating Point Unit.
+ pub FPU: FPU,
+
+ /// Implementation Control Block.
+ ///
+ /// The name is from the v8-M spec, but the block existed in earlier
+ /// revisions, without a name.
+ pub ICB: ICB,
+
+ /// Instrumentation Trace Macrocell.
+ /// Not available on Armv6-M and Armv8-M Baseline.
+ pub ITM: ITM,
+
+ /// Memory Protection Unit
+ pub MPU: MPU,
+
+ /// Nested Vector Interrupt Controller
+ pub NVIC: NVIC,
+
+ /// Security Attribution Unit
+ pub SAU: SAU,
+
+ /// System Control Block
+ pub SCB: SCB,
+
+ /// SysTick: System Timer
+ pub SYST: SYST,
+
+ /// Trace Port Interface Unit.
+ /// Not available on Armv6-M.
+ pub TPIU: TPIU,
+
+ // Private field making `Peripherals` non-exhaustive. We don't use `#[non_exhaustive]` so we
+ // can support older Rust versions.
+ _priv: (),
+}
+
+// NOTE `no_mangle` is used here to prevent linking different minor versions of this crate as that
+// would let you `take` the core peripherals more than once (one per minor version)
+#[no_mangle]
+static CORE_PERIPHERALS: () = ();
+
+/// Set to `true` when `take` or `steal` was called to make `Peripherals` a singleton.
+static mut TAKEN: bool = false;
+
+impl Peripherals {
+ /// Returns all the core peripherals *once*
+ #[inline]
+ pub fn take() -> Option<Self> {
+ interrupt::free(|_| {
+ if unsafe { TAKEN } {
+ None
+ } else {
+ Some(unsafe { Peripherals::steal() })
+ }
+ })
+ }
+
+ /// Unchecked version of `Peripherals::take`
+ #[inline]
+ pub unsafe fn steal() -> Self {
+ TAKEN = true;
+
+ Peripherals {
+ #[cfg(cm7)]
+ AC: AC {
+ _marker: PhantomData,
+ },
+ CBP: CBP {
+ _marker: PhantomData,
+ },
+ CPUID: CPUID {
+ _marker: PhantomData,
+ },
+ DCB: DCB {
+ _marker: PhantomData,
+ },
+ DWT: DWT {
+ _marker: PhantomData,
+ },
+ FPB: FPB {
+ _marker: PhantomData,
+ },
+ FPU: FPU {
+ _marker: PhantomData,
+ },
+ ICB: ICB {
+ _marker: PhantomData,
+ },
+ ITM: ITM {
+ _marker: PhantomData,
+ },
+ MPU: MPU {
+ _marker: PhantomData,
+ },
+ NVIC: NVIC {
+ _marker: PhantomData,
+ },
+ SAU: SAU {
+ _marker: PhantomData,
+ },
+ SCB: SCB {
+ _marker: PhantomData,
+ },
+ SYST: SYST {
+ _marker: PhantomData,
+ },
+ TPIU: TPIU {
+ _marker: PhantomData,
+ },
+ _priv: (),
+ }
+ }
+}
+
+/// Access control
+#[cfg(cm7)]
+pub struct AC {
+ _marker: PhantomData<*const ()>,
+}
+
+#[cfg(cm7)]
+unsafe impl Send for AC {}
+
+#[cfg(cm7)]
+impl AC {
+ /// Pointer to the register block
+ pub const PTR: *const self::ac::RegisterBlock = 0xE000_EF90 as *const _;
+}
+
+/// Cache and branch predictor maintenance operations
+#[allow(clippy::upper_case_acronyms)]
+pub struct CBP {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for CBP {}
+
+#[cfg(not(armv6m))]
+impl CBP {
+ #[inline(always)]
+ pub(crate) const unsafe fn new() -> Self {
+ CBP {
+ _marker: PhantomData,
+ }
+ }
+
+ /// Pointer to the register block
+ pub const PTR: *const self::cbp::RegisterBlock = 0xE000_EF50 as *const _;
+}
+
+#[cfg(not(armv6m))]
+impl ops::Deref for CBP {
+ type Target = self::cbp::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// CPUID
+#[allow(clippy::upper_case_acronyms)]
+pub struct CPUID {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for CPUID {}
+
+impl CPUID {
+ /// Pointer to the register block
+ pub const PTR: *const self::cpuid::RegisterBlock = 0xE000_ED00 as *const _;
+}
+
+impl ops::Deref for CPUID {
+ type Target = self::cpuid::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Debug Control Block
+#[allow(clippy::upper_case_acronyms)]
+pub struct DCB {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for DCB {}
+
+impl DCB {
+ /// Pointer to the register block
+ pub const PTR: *const dcb::RegisterBlock = 0xE000_EDF0 as *const _;
+}
+
+impl ops::Deref for DCB {
+ type Target = self::dcb::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*DCB::PTR }
+ }
+}
+
+/// Data Watchpoint and Trace unit
+#[allow(clippy::upper_case_acronyms)]
+pub struct DWT {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for DWT {}
+
+impl DWT {
+ /// Pointer to the register block
+ pub const PTR: *const dwt::RegisterBlock = 0xE000_1000 as *const _;
+}
+
+impl ops::Deref for DWT {
+ type Target = self::dwt::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Flash Patch and Breakpoint unit
+#[allow(clippy::upper_case_acronyms)]
+pub struct FPB {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for FPB {}
+
+#[cfg(not(armv6m))]
+impl FPB {
+ /// Pointer to the register block
+ pub const PTR: *const fpb::RegisterBlock = 0xE000_2000 as *const _;
+}
+
+#[cfg(not(armv6m))]
+impl ops::Deref for FPB {
+ type Target = self::fpb::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Floating Point Unit
+#[allow(clippy::upper_case_acronyms)]
+pub struct FPU {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for FPU {}
+
+#[cfg(any(has_fpu, native))]
+impl FPU {
+ /// Pointer to the register block
+ pub const PTR: *const fpu::RegisterBlock = 0xE000_EF30 as *const _;
+}
+
+#[cfg(any(has_fpu, native))]
+impl ops::Deref for FPU {
+ type Target = self::fpu::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Implementation Control Block.
+///
+/// This block contains implementation-defined registers like `ictr` and
+/// `actlr`. It's called the "implementation control block" in the ARMv8-M
+/// standard, but earlier standards contained the registers, just without a
+/// name.
+#[allow(clippy::upper_case_acronyms)]
+pub struct ICB {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for ICB {}
+
+impl ICB {
+ /// Pointer to the register block
+ pub const PTR: *mut icb::RegisterBlock = 0xE000_E004 as *mut _;
+}
+
+impl ops::Deref for ICB {
+ type Target = self::icb::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+impl ops::DerefMut for ICB {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *Self::PTR }
+ }
+}
+
+/// Instrumentation Trace Macrocell
+#[allow(clippy::upper_case_acronyms)]
+pub struct ITM {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for ITM {}
+
+#[cfg(all(not(armv6m), not(armv8m_base)))]
+impl ITM {
+ /// Pointer to the register block
+ pub const PTR: *mut itm::RegisterBlock = 0xE000_0000 as *mut _;
+}
+
+#[cfg(all(not(armv6m), not(armv8m_base)))]
+impl ops::Deref for ITM {
+ type Target = self::itm::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+#[cfg(all(not(armv6m), not(armv8m_base)))]
+impl ops::DerefMut for ITM {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *Self::PTR }
+ }
+}
+
+/// Memory Protection Unit
+#[allow(clippy::upper_case_acronyms)]
+pub struct MPU {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for MPU {}
+
+impl MPU {
+ /// Pointer to the register block
+ pub const PTR: *const mpu::RegisterBlock = 0xE000_ED90 as *const _;
+}
+
+impl ops::Deref for MPU {
+ type Target = self::mpu::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Nested Vector Interrupt Controller
+#[allow(clippy::upper_case_acronyms)]
+pub struct NVIC {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for NVIC {}
+
+impl NVIC {
+ /// Pointer to the register block
+ pub const PTR: *const nvic::RegisterBlock = 0xE000_E100 as *const _;
+}
+
+impl ops::Deref for NVIC {
+ type Target = self::nvic::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Security Attribution Unit
+#[allow(clippy::upper_case_acronyms)]
+pub struct SAU {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for SAU {}
+
+#[cfg(armv8m)]
+impl SAU {
+ /// Pointer to the register block
+ pub const PTR: *const sau::RegisterBlock = 0xE000_EDD0 as *const _;
+}
+
+#[cfg(armv8m)]
+impl ops::Deref for SAU {
+ type Target = self::sau::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// System Control Block
+#[allow(clippy::upper_case_acronyms)]
+pub struct SCB {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for SCB {}
+
+impl SCB {
+ /// Pointer to the register block
+ pub const PTR: *const scb::RegisterBlock = 0xE000_ED04 as *const _;
+}
+
+impl ops::Deref for SCB {
+ type Target = self::scb::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// SysTick: System Timer
+#[allow(clippy::upper_case_acronyms)]
+pub struct SYST {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for SYST {}
+
+impl SYST {
+ /// Pointer to the register block
+ pub const PTR: *const syst::RegisterBlock = 0xE000_E010 as *const _;
+}
+
+impl ops::Deref for SYST {
+ type Target = self::syst::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Trace Port Interface Unit
+#[allow(clippy::upper_case_acronyms)]
+pub struct TPIU {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for TPIU {}
+
+#[cfg(not(armv6m))]
+impl TPIU {
+ /// Pointer to the register block
+ pub const PTR: *const tpiu::RegisterBlock = 0xE004_0000 as *const _;
+}
+
+#[cfg(not(armv6m))]
+impl ops::Deref for TPIU {
+ type Target = self::tpiu::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
diff --git a/src/peripheral/mpu.rs b/src/peripheral/mpu.rs
new file mode 100644
index 0000000..3a5f5b4
--- /dev/null
+++ b/src/peripheral/mpu.rs
@@ -0,0 +1,65 @@
+//! Memory Protection Unit
+
+use volatile_register::{RO, RW};
+
+/// Register block for ARMv7-M
+#[cfg(not(armv8m))]
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Type
+ pub _type: RO<u32>,
+ /// Control
+ pub ctrl: RW<u32>,
+ /// Region Number
+ pub rnr: RW<u32>,
+ /// Region Base Address
+ pub rbar: RW<u32>,
+ /// Region Attribute and Size
+ pub rasr: RW<u32>,
+ /// Alias 1 of RBAR
+ pub rbar_a1: RW<u32>,
+ /// Alias 1 of RASR
+ pub rasr_a1: RW<u32>,
+ /// Alias 2 of RBAR
+ pub rbar_a2: RW<u32>,
+ /// Alias 2 of RASR
+ pub rasr_a2: RW<u32>,
+ /// Alias 3 of RBAR
+ pub rbar_a3: RW<u32>,
+ /// Alias 3 of RASR
+ pub rasr_a3: RW<u32>,
+}
+
+/// Register block for ARMv8-M
+#[cfg(armv8m)]
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Type
+ pub _type: RO<u32>,
+ /// Control
+ pub ctrl: RW<u32>,
+ /// Region Number
+ pub rnr: RW<u32>,
+ /// Region Base Address
+ pub rbar: RW<u32>,
+ /// Region Limit Address
+ pub rlar: RW<u32>,
+ /// Alias 1 of RBAR
+ pub rbar_a1: RW<u32>,
+ /// Alias 1 of RLAR
+ pub rlar_a1: RW<u32>,
+ /// Alias 2 of RBAR
+ pub rbar_a2: RW<u32>,
+ /// Alias 2 of RLAR
+ pub rlar_a2: RW<u32>,
+ /// Alias 3 of RBAR
+ pub rbar_a3: RW<u32>,
+ /// Alias 3 of RLAR
+ pub rlar_a3: RW<u32>,
+
+ // Reserved word at offset 0xBC
+ _reserved: u32,
+
+ /// Memory Attribute Indirection register 0 and 1
+ pub mair: [RW<u32>; 2],
+}
diff --git a/src/peripheral/nvic.rs b/src/peripheral/nvic.rs
new file mode 100644
index 0000000..57fa94b
--- /dev/null
+++ b/src/peripheral/nvic.rs
@@ -0,0 +1,265 @@
+//! Nested Vector Interrupt Controller
+
+use volatile_register::RW;
+#[cfg(not(armv6m))]
+use volatile_register::{RO, WO};
+
+use crate::interrupt::InterruptNumber;
+use crate::peripheral::NVIC;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Interrupt Set-Enable
+ pub iser: [RW<u32>; 16],
+
+ _reserved0: [u32; 16],
+
+ /// Interrupt Clear-Enable
+ pub icer: [RW<u32>; 16],
+
+ _reserved1: [u32; 16],
+
+ /// Interrupt Set-Pending
+ pub ispr: [RW<u32>; 16],
+
+ _reserved2: [u32; 16],
+
+ /// Interrupt Clear-Pending
+ pub icpr: [RW<u32>; 16],
+
+ _reserved3: [u32; 16],
+
+ /// Interrupt Active Bit (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub iabr: [RO<u32>; 16],
+ #[cfg(armv6m)]
+ _reserved4: [u32; 16],
+
+ _reserved5: [u32; 48],
+
+ /// Interrupt Priority
+ ///
+ /// On ARMv7-M, 124 word-sized registers are available. Each of those
+ /// contains of 4 interrupt priorities of 8 byte each.The architecture
+ /// specifically allows accessing those along byte boundaries, so they are
+ /// represented as 496 byte-sized registers, for convenience, and to allow
+ /// atomic priority updates.
+ ///
+ /// On ARMv6-M, the registers must only be accessed along word boundaries,
+ /// so convenient byte-sized representation wouldn't work on that
+ /// architecture.
+ #[cfg(not(armv6m))]
+ pub ipr: [RW<u8>; 496],
+
+ /// Interrupt Priority
+ ///
+ /// On ARMv7-M, 124 word-sized registers are available. Each of those
+ /// contains of 4 interrupt priorities of 8 byte each.The architecture
+ /// specifically allows accessing those along byte boundaries, so they are
+ /// represented as 496 byte-sized registers, for convenience, and to allow
+ /// atomic priority updates.
+ ///
+ /// On ARMv6-M, the registers must only be accessed along word boundaries,
+ /// so convenient byte-sized representation wouldn't work on that
+ /// architecture.
+ #[cfg(armv6m)]
+ pub ipr: [RW<u32>; 8],
+
+ #[cfg(not(armv6m))]
+ _reserved6: [u32; 580],
+
+ /// Software Trigger Interrupt
+ #[cfg(not(armv6m))]
+ pub stir: WO<u32>,
+}
+
+impl NVIC {
+ /// Request an IRQ in software
+ ///
+ /// Writing a value to the INTID field is the same as manually pending an interrupt by setting
+ /// the corresponding interrupt bit in an Interrupt Set Pending Register. This is similar to
+ /// [`NVIC::pend`].
+ ///
+ /// This method is not available on ARMv6-M chips.
+ ///
+ /// [`NVIC::pend`]: #method.pend
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn request<I>(&mut self, interrupt: I)
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+
+ unsafe {
+ self.stir.write(u32::from(nr));
+ }
+ }
+
+ /// Disables `interrupt`
+ #[inline]
+ pub fn mask<I>(interrupt: I)
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+ // NOTE(unsafe) this is a write to a stateless register
+ unsafe { (*Self::PTR).icer[usize::from(nr / 32)].write(1 << (nr % 32)) }
+ }
+
+ /// Enables `interrupt`
+ ///
+ /// This function is `unsafe` because it can break mask-based critical sections
+ #[inline]
+ pub unsafe fn unmask<I>(interrupt: I)
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+ // NOTE(ptr) this is a write to a stateless register
+ (*Self::PTR).iser[usize::from(nr / 32)].write(1 << (nr % 32))
+ }
+
+ /// Returns the NVIC priority of `interrupt`
+ ///
+ /// *NOTE* NVIC encodes priority in the highest bits of a byte so values like `1` and `2` map
+ /// to the same priority. Also for NVIC priorities, a lower value (e.g. `16`) has higher
+ /// priority (urgency) than a larger value (e.g. `32`).
+ #[inline]
+ pub fn get_priority<I>(interrupt: I) -> u8
+ where
+ I: InterruptNumber,
+ {
+ #[cfg(not(armv6m))]
+ {
+ let nr = interrupt.number();
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).ipr[usize::from(nr)].read() }
+ }
+
+ #[cfg(armv6m)]
+ {
+ // NOTE(unsafe) atomic read with no side effects
+ let ipr_n = unsafe { (*Self::PTR).ipr[Self::ipr_index(interrupt)].read() };
+ let prio = (ipr_n >> Self::ipr_shift(interrupt)) & 0x0000_00ff;
+ prio as u8
+ }
+ }
+
+ /// Is `interrupt` active or pre-empted and stacked
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn is_active<I>(interrupt: I) -> bool
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+ let mask = 1 << (nr % 32);
+
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { ((*Self::PTR).iabr[usize::from(nr / 32)].read() & mask) == mask }
+ }
+
+ /// Checks if `interrupt` is enabled
+ #[inline]
+ pub fn is_enabled<I>(interrupt: I) -> bool
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+ let mask = 1 << (nr % 32);
+
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { ((*Self::PTR).iser[usize::from(nr / 32)].read() & mask) == mask }
+ }
+
+ /// Checks if `interrupt` is pending
+ #[inline]
+ pub fn is_pending<I>(interrupt: I) -> bool
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+ let mask = 1 << (nr % 32);
+
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { ((*Self::PTR).ispr[usize::from(nr / 32)].read() & mask) == mask }
+ }
+
+ /// Forces `interrupt` into pending state
+ #[inline]
+ pub fn pend<I>(interrupt: I)
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+
+ // NOTE(unsafe) atomic stateless write; ICPR doesn't store any state
+ unsafe { (*Self::PTR).ispr[usize::from(nr / 32)].write(1 << (nr % 32)) }
+ }
+
+ /// Sets the "priority" of `interrupt` to `prio`
+ ///
+ /// *NOTE* See [`get_priority`](struct.NVIC.html#method.get_priority) method for an explanation
+ /// of how NVIC priorities work.
+ ///
+ /// On ARMv6-M, updating an interrupt priority requires a read-modify-write operation. On
+ /// ARMv7-M, the operation is performed in a single atomic write operation.
+ ///
+ /// # Unsafety
+ ///
+ /// Changing priority levels can break priority-based critical sections (see
+ /// [`register::basepri`](crate::register::basepri)) and compromise memory safety.
+ #[inline]
+ pub unsafe fn set_priority<I>(&mut self, interrupt: I, prio: u8)
+ where
+ I: InterruptNumber,
+ {
+ #[cfg(not(armv6m))]
+ {
+ let nr = interrupt.number();
+ self.ipr[usize::from(nr)].write(prio)
+ }
+
+ #[cfg(armv6m)]
+ {
+ self.ipr[Self::ipr_index(interrupt)].modify(|value| {
+ let mask = 0x0000_00ff << Self::ipr_shift(interrupt);
+ let prio = u32::from(prio) << Self::ipr_shift(interrupt);
+
+ (value & !mask) | prio
+ })
+ }
+ }
+
+ /// Clears `interrupt`'s pending state
+ #[inline]
+ pub fn unpend<I>(interrupt: I)
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+
+ // NOTE(unsafe) atomic stateless write; ICPR doesn't store any state
+ unsafe { (*Self::PTR).icpr[usize::from(nr / 32)].write(1 << (nr % 32)) }
+ }
+
+ #[cfg(armv6m)]
+ #[inline]
+ fn ipr_index<I>(interrupt: I) -> usize
+ where
+ I: InterruptNumber,
+ {
+ usize::from(interrupt.number()) / 4
+ }
+
+ #[cfg(armv6m)]
+ #[inline]
+ fn ipr_shift<I>(interrupt: I) -> usize
+ where
+ I: InterruptNumber,
+ {
+ (usize::from(interrupt.number()) % 4) * 8
+ }
+}
diff --git a/src/peripheral/sau.rs b/src/peripheral/sau.rs
new file mode 100644
index 0000000..da91aca
--- /dev/null
+++ b/src/peripheral/sau.rs
@@ -0,0 +1,243 @@
+//! Security Attribution Unit
+//!
+//! *NOTE* Available only on Armv8-M and Armv8.1-M, for the following Rust target triples:
+//! * `thumbv8m.base-none-eabi`
+//! * `thumbv8m.main-none-eabi`
+//! * `thumbv8m.main-none-eabihf`
+//!
+//! For reference please check the section B8.3 of the Armv8-M Architecture Reference Manual.
+
+use crate::interrupt;
+use crate::peripheral::SAU;
+use bitfield::bitfield;
+use volatile_register::{RO, RW};
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Control Register
+ pub ctrl: RW<Ctrl>,
+ /// Type Register
+ pub _type: RO<Type>,
+ /// Region Number Register
+ pub rnr: RW<Rnr>,
+ /// Region Base Address Register
+ pub rbar: RW<Rbar>,
+ /// Region Limit Address Register
+ pub rlar: RW<Rlar>,
+ /// Secure Fault Status Register
+ pub sfsr: RO<Sfsr>,
+ /// Secure Fault Address Register
+ pub sfar: RO<Sfar>,
+}
+
+bitfield! {
+ /// Control Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Ctrl(u32);
+ get_enable, set_enable: 0;
+ get_allns, set_allns: 1;
+}
+
+bitfield! {
+ /// Type Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Type(u32);
+ u8;
+ sregion, _: 7, 0;
+}
+
+bitfield! {
+ /// Region Number Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Rnr(u32);
+ u8;
+ get_region, set_region: 7, 0;
+}
+
+bitfield! {
+ /// Region Base Address Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Rbar(u32);
+ u32;
+ get_baddr, set_baddr: 31, 5;
+}
+
+bitfield! {
+ /// Region Limit Address Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Rlar(u32);
+ u32;
+ get_laddr, set_laddr: 31, 5;
+ get_nsc, set_nsc: 1;
+ get_enable, set_enable: 0;
+}
+
+bitfield! {
+ /// Secure Fault Status Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Sfsr(u32);
+ invep, _: 0;
+ invis, _: 1;
+ inver, _: 2;
+ auviol, _: 3;
+ invtran, _: 4;
+ lsperr, _: 5;
+ sfarvalid, _: 6;
+ lserr, _: 7;
+}
+
+bitfield! {
+ /// Secure Fault Address Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Sfar(u32);
+ u32;
+ address, _: 31, 0;
+}
+
+/// Possible attribute of a SAU region.
+#[derive(Debug)]
+pub enum SauRegionAttribute {
+ /// SAU region is Secure
+ Secure,
+ /// SAU region is Non-Secure Callable
+ NonSecureCallable,
+ /// SAU region is Non-Secure
+ NonSecure,
+}
+
+/// Description of a SAU region.
+#[derive(Debug)]
+pub struct SauRegion {
+ /// First address of the region, its 5 least significant bits must be set to zero.
+ pub base_address: u32,
+ /// Last address of the region, its 5 least significant bits must be set to one.
+ pub limit_address: u32,
+ /// Attribute of the region.
+ pub attribute: SauRegionAttribute,
+}
+
+/// Possible error values returned by the SAU methods.
+#[derive(Debug)]
+pub enum SauError {
+ /// The region number parameter to set or get a region must be between 0 and
+ /// region_numbers() - 1.
+ RegionNumberTooBig,
+ /// Bits 0 to 4 of the base address of a SAU region must be set to zero.
+ WrongBaseAddress,
+ /// Bits 0 to 4 of the limit address of a SAU region must be set to one.
+ WrongLimitAddress,
+}
+
+impl SAU {
+ /// Get the number of implemented SAU regions.
+ #[inline]
+ pub fn region_numbers(&self) -> u8 {
+ self._type.read().sregion()
+ }
+
+ /// Enable the SAU.
+ #[inline]
+ pub fn enable(&mut self) {
+ unsafe {
+ self.ctrl.modify(|mut ctrl| {
+ ctrl.set_enable(true);
+ ctrl
+ });
+ }
+ }
+
+ /// Set a SAU region to a region number.
+ /// SAU regions must be 32 bytes aligned and their sizes must be a multiple of 32 bytes. It
+ /// means that the 5 least significant bits of the base address of a SAU region must be set to
+ /// zero and the 5 least significant bits of the limit address must be set to one.
+ /// The region number must be valid.
+ /// This function is executed under a critical section to prevent having inconsistent results.
+ #[inline]
+ pub fn set_region(&mut self, region_number: u8, region: SauRegion) -> Result<(), SauError> {
+ interrupt::free(|_| {
+ let base_address = region.base_address;
+ let limit_address = region.limit_address;
+ let attribute = region.attribute;
+
+ if region_number >= self.region_numbers() {
+ Err(SauError::RegionNumberTooBig)
+ } else if base_address & 0x1F != 0 {
+ Err(SauError::WrongBaseAddress)
+ } else if limit_address & 0x1F != 0x1F {
+ Err(SauError::WrongLimitAddress)
+ } else {
+ // All fields of these registers are going to be modified so we don't need to read them
+ // before.
+ let mut rnr = Rnr(0);
+ let mut rbar = Rbar(0);
+ let mut rlar = Rlar(0);
+
+ rnr.set_region(region_number);
+ rbar.set_baddr(base_address >> 5);
+ rlar.set_laddr(limit_address >> 5);
+
+ match attribute {
+ SauRegionAttribute::Secure => {
+ rlar.set_nsc(false);
+ rlar.set_enable(false);
+ }
+ SauRegionAttribute::NonSecureCallable => {
+ rlar.set_nsc(true);
+ rlar.set_enable(true);
+ }
+ SauRegionAttribute::NonSecure => {
+ rlar.set_nsc(false);
+ rlar.set_enable(true);
+ }
+ }
+
+ unsafe {
+ self.rnr.write(rnr);
+ self.rbar.write(rbar);
+ self.rlar.write(rlar);
+ }
+
+ Ok(())
+ }
+ })
+ }
+
+ /// Get a region from the SAU.
+ /// The region number must be valid.
+ /// This function is executed under a critical section to prevent having inconsistent results.
+ #[inline]
+ pub fn get_region(&mut self, region_number: u8) -> Result<SauRegion, SauError> {
+ interrupt::free(|_| {
+ if region_number >= self.region_numbers() {
+ Err(SauError::RegionNumberTooBig)
+ } else {
+ unsafe {
+ self.rnr.write(Rnr(region_number.into()));
+ }
+
+ let rbar = self.rbar.read();
+ let rlar = self.rlar.read();
+
+ let attribute = match (rlar.get_enable(), rlar.get_nsc()) {
+ (false, _) => SauRegionAttribute::Secure,
+ (true, false) => SauRegionAttribute::NonSecure,
+ (true, true) => SauRegionAttribute::NonSecureCallable,
+ };
+
+ Ok(SauRegion {
+ base_address: rbar.get_baddr() << 5,
+ limit_address: (rlar.get_laddr() << 5) | 0x1F,
+ attribute,
+ })
+ }
+ })
+ }
+}
diff --git a/src/peripheral/scb.rs b/src/peripheral/scb.rs
new file mode 100644
index 0000000..b9cf0e4
--- /dev/null
+++ b/src/peripheral/scb.rs
@@ -0,0 +1,1110 @@
+//! System Control Block
+
+use core::ptr;
+
+use volatile_register::RW;
+
+#[cfg(not(armv6m))]
+use super::cpuid::CsselrCacheType;
+#[cfg(not(armv6m))]
+use super::CBP;
+#[cfg(not(armv6m))]
+use super::CPUID;
+use super::SCB;
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Serialize};
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Interrupt Control and State
+ pub icsr: RW<u32>,
+
+ /// Vector Table Offset (not present on Cortex-M0 variants)
+ pub vtor: RW<u32>,
+
+ /// Application Interrupt and Reset Control
+ pub aircr: RW<u32>,
+
+ /// System Control
+ pub scr: RW<u32>,
+
+ /// Configuration and Control
+ pub ccr: RW<u32>,
+
+ /// System Handler Priority (word accessible only on Cortex-M0 variants)
+ ///
+ /// On ARMv7-M, `shpr[0]` points to SHPR1
+ ///
+ /// On ARMv6-M, `shpr[0]` points to SHPR2
+ #[cfg(not(armv6m))]
+ pub shpr: [RW<u8>; 12],
+ #[cfg(armv6m)]
+ _reserved1: u32,
+ /// System Handler Priority (word accessible only on Cortex-M0 variants)
+ ///
+ /// On ARMv7-M, `shpr[0]` points to SHPR1
+ ///
+ /// On ARMv6-M, `shpr[0]` points to SHPR2
+ #[cfg(armv6m)]
+ pub shpr: [RW<u32>; 2],
+
+ /// System Handler Control and State
+ pub shcsr: RW<u32>,
+
+ /// Configurable Fault Status (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub cfsr: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved2: u32,
+
+ /// HardFault Status (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub hfsr: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved3: u32,
+
+ /// Debug Fault Status (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub dfsr: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved4: u32,
+
+ /// MemManage Fault Address (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub mmfar: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved5: u32,
+
+ /// BusFault Address (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub bfar: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved6: u32,
+
+ /// Auxiliary Fault Status (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub afsr: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved7: u32,
+
+ _reserved8: [u32; 18],
+
+ /// Coprocessor Access Control (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub cpacr: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved9: u32,
+}
+
+/// FPU access mode
+#[cfg(has_fpu)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum FpuAccessMode {
+ /// FPU is not accessible
+ Disabled,
+ /// FPU is accessible in Privileged and User mode
+ Enabled,
+ /// FPU is accessible in Privileged mode only
+ Privileged,
+}
+
+#[cfg(has_fpu)]
+mod fpu_consts {
+ pub const SCB_CPACR_FPU_MASK: u32 = 0b11_11 << 20;
+ pub const SCB_CPACR_FPU_ENABLE: u32 = 0b01_01 << 20;
+ pub const SCB_CPACR_FPU_USER: u32 = 0b10_10 << 20;
+}
+
+#[cfg(has_fpu)]
+use self::fpu_consts::*;
+
+#[cfg(has_fpu)]
+impl SCB {
+ /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Disabled)`
+ #[inline]
+ pub fn disable_fpu(&mut self) {
+ self.set_fpu_access_mode(FpuAccessMode::Disabled)
+ }
+
+ /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Enabled)`
+ #[inline]
+ pub fn enable_fpu(&mut self) {
+ self.set_fpu_access_mode(FpuAccessMode::Enabled)
+ }
+
+ /// Gets FPU access mode
+ #[inline]
+ pub fn fpu_access_mode() -> FpuAccessMode {
+ // NOTE(unsafe) atomic read operation with no side effects
+ let cpacr = unsafe { (*Self::PTR).cpacr.read() };
+
+ if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER {
+ FpuAccessMode::Enabled
+ } else if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE {
+ FpuAccessMode::Privileged
+ } else {
+ FpuAccessMode::Disabled
+ }
+ }
+
+ /// Sets FPU access mode
+ ///
+ /// *IMPORTANT* Any function that runs fully or partly with the FPU disabled must *not* take any
+ /// floating-point arguments or have any floating-point local variables. Because the compiler
+ /// might inline such a function into a caller that does have floating-point arguments or
+ /// variables, any such function must be also marked #[inline(never)].
+ #[inline]
+ pub fn set_fpu_access_mode(&mut self, mode: FpuAccessMode) {
+ let mut cpacr = self.cpacr.read() & !SCB_CPACR_FPU_MASK;
+ match mode {
+ FpuAccessMode::Disabled => (),
+ FpuAccessMode::Privileged => cpacr |= SCB_CPACR_FPU_ENABLE,
+ FpuAccessMode::Enabled => cpacr |= SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER,
+ }
+ unsafe { self.cpacr.write(cpacr) }
+ }
+}
+
+impl SCB {
+ /// Returns the active exception number
+ #[inline]
+ pub fn vect_active() -> VectActive {
+ let icsr =
+ unsafe { ptr::read_volatile(&(*SCB::PTR).icsr as *const _ as *const u32) } & 0x1FF;
+
+ match icsr as u16 {
+ 0 => VectActive::ThreadMode,
+ 2 => VectActive::Exception(Exception::NonMaskableInt),
+ 3 => VectActive::Exception(Exception::HardFault),
+ #[cfg(not(armv6m))]
+ 4 => VectActive::Exception(Exception::MemoryManagement),
+ #[cfg(not(armv6m))]
+ 5 => VectActive::Exception(Exception::BusFault),
+ #[cfg(not(armv6m))]
+ 6 => VectActive::Exception(Exception::UsageFault),
+ #[cfg(any(armv8m, native))]
+ 7 => VectActive::Exception(Exception::SecureFault),
+ 11 => VectActive::Exception(Exception::SVCall),
+ #[cfg(not(armv6m))]
+ 12 => VectActive::Exception(Exception::DebugMonitor),
+ 14 => VectActive::Exception(Exception::PendSV),
+ 15 => VectActive::Exception(Exception::SysTick),
+ irqn => VectActive::Interrupt { irqn: irqn - 16 },
+ }
+ }
+}
+
+/// Processor core exceptions (internal interrupts)
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+#[cfg_attr(feature = "std", derive(PartialOrd, Hash))]
+pub enum Exception {
+ /// Non maskable interrupt
+ NonMaskableInt,
+
+ /// Hard fault interrupt
+ HardFault,
+
+ /// Memory management interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ MemoryManagement,
+
+ /// Bus fault interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ BusFault,
+
+ /// Usage fault interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ UsageFault,
+
+ /// Secure fault interrupt (only on ARMv8-M)
+ #[cfg(any(armv8m, native))]
+ SecureFault,
+
+ /// SV call interrupt
+ SVCall,
+
+ /// Debug monitor interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ DebugMonitor,
+
+ /// Pend SV interrupt
+ PendSV,
+
+ /// System Tick interrupt
+ SysTick,
+}
+
+impl Exception {
+ /// Returns the IRQ number of this `Exception`
+ ///
+ /// The return value is always within the closed range `[-1, -14]`
+ #[inline]
+ pub fn irqn(self) -> i8 {
+ match self {
+ Exception::NonMaskableInt => -14,
+ Exception::HardFault => -13,
+ #[cfg(not(armv6m))]
+ Exception::MemoryManagement => -12,
+ #[cfg(not(armv6m))]
+ Exception::BusFault => -11,
+ #[cfg(not(armv6m))]
+ Exception::UsageFault => -10,
+ #[cfg(any(armv8m, native))]
+ Exception::SecureFault => -9,
+ Exception::SVCall => -5,
+ #[cfg(not(armv6m))]
+ Exception::DebugMonitor => -4,
+ Exception::PendSV => -2,
+ Exception::SysTick => -1,
+ }
+ }
+}
+
+/// Active exception number
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+#[cfg_attr(feature = "std", derive(PartialOrd, Hash))]
+pub enum VectActive {
+ /// Thread mode
+ ThreadMode,
+
+ /// Processor core exception (internal interrupts)
+ Exception(Exception),
+
+ /// Device specific exception (external interrupts)
+ Interrupt {
+ /// Interrupt number. This number is always within half open range `[0, 512)` (9 bit)
+ irqn: u16,
+ },
+}
+
+impl VectActive {
+ /// Converts a vector number into `VectActive`
+ #[inline]
+ pub fn from(vect_active: u16) -> Option<Self> {
+ Some(match vect_active {
+ 0 => VectActive::ThreadMode,
+ 2 => VectActive::Exception(Exception::NonMaskableInt),
+ 3 => VectActive::Exception(Exception::HardFault),
+ #[cfg(not(armv6m))]
+ 4 => VectActive::Exception(Exception::MemoryManagement),
+ #[cfg(not(armv6m))]
+ 5 => VectActive::Exception(Exception::BusFault),
+ #[cfg(not(armv6m))]
+ 6 => VectActive::Exception(Exception::UsageFault),
+ #[cfg(any(armv8m, native))]
+ 7 => VectActive::Exception(Exception::SecureFault),
+ 11 => VectActive::Exception(Exception::SVCall),
+ #[cfg(not(armv6m))]
+ 12 => VectActive::Exception(Exception::DebugMonitor),
+ 14 => VectActive::Exception(Exception::PendSV),
+ 15 => VectActive::Exception(Exception::SysTick),
+ irqn if (16..512).contains(&irqn) => VectActive::Interrupt { irqn: irqn - 16 },
+ _ => return None,
+ })
+ }
+}
+
+#[cfg(not(armv6m))]
+mod scb_consts {
+ pub const SCB_CCR_IC_MASK: u32 = 1 << 17;
+ pub const SCB_CCR_DC_MASK: u32 = 1 << 16;
+}
+
+#[cfg(not(armv6m))]
+use self::scb_consts::*;
+
+#[cfg(not(armv6m))]
+impl SCB {
+ /// Enables I-cache if currently disabled.
+ ///
+ /// This operation first invalidates the entire I-cache.
+ #[inline]
+ pub fn enable_icache(&mut self) {
+ // Don't do anything if I-cache is already enabled
+ if Self::icache_enabled() {
+ return;
+ }
+
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ // Invalidate I-cache
+ cbp.iciallu();
+
+ // Enable I-cache
+ extern "C" {
+ // see asm-v7m.s
+ fn __enable_icache();
+ }
+
+ // NOTE(unsafe): The asm routine manages exclusive access to the SCB
+ // registers and applies the proper barriers; it is technically safe on
+ // its own, and is only `unsafe` here because it's `extern "C"`.
+ unsafe {
+ __enable_icache();
+ }
+ }
+
+ /// Disables I-cache if currently enabled.
+ ///
+ /// This operation invalidates the entire I-cache after disabling.
+ #[inline]
+ pub fn disable_icache(&mut self) {
+ // Don't do anything if I-cache is already disabled
+ if !Self::icache_enabled() {
+ return;
+ }
+
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ // Disable I-cache
+ // NOTE(unsafe): We have synchronised access by &mut self
+ unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) };
+
+ // Invalidate I-cache
+ cbp.iciallu();
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Returns whether the I-cache is currently enabled.
+ #[inline(always)]
+ pub fn icache_enabled() -> bool {
+ crate::asm::dsb();
+ crate::asm::isb();
+
+ // NOTE(unsafe): atomic read with no side effects
+ unsafe { (*Self::PTR).ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK }
+ }
+
+ /// Invalidates the entire I-cache.
+ #[inline]
+ pub fn invalidate_icache(&mut self) {
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ // Invalidate I-cache
+ cbp.iciallu();
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Enables D-cache if currently disabled.
+ ///
+ /// This operation first invalidates the entire D-cache, ensuring it does
+ /// not contain stale values before being enabled.
+ #[inline]
+ pub fn enable_dcache(&mut self, cpuid: &mut CPUID) {
+ // Don't do anything if D-cache is already enabled
+ if Self::dcache_enabled() {
+ return;
+ }
+
+ // Invalidate anything currently in the D-cache
+ unsafe { self.invalidate_dcache(cpuid) };
+
+ // Now turn on the D-cache
+ extern "C" {
+ // see asm-v7m.s
+ fn __enable_dcache();
+ }
+
+ // NOTE(unsafe): The asm routine manages exclusive access to the SCB
+ // registers and applies the proper barriers; it is technically safe on
+ // its own, and is only `unsafe` here because it's `extern "C"`.
+ unsafe {
+ __enable_dcache();
+ }
+ }
+
+ /// Disables D-cache if currently enabled.
+ ///
+ /// This operation subsequently cleans and invalidates the entire D-cache,
+ /// ensuring all contents are safely written back to main memory after disabling.
+ #[inline]
+ pub fn disable_dcache(&mut self, cpuid: &mut CPUID) {
+ // Don't do anything if D-cache is already disabled
+ if !Self::dcache_enabled() {
+ return;
+ }
+
+ // Turn off the D-cache
+ // NOTE(unsafe): We have synchronised access by &mut self
+ unsafe { self.ccr.modify(|r| r & !SCB_CCR_DC_MASK) };
+
+ // Clean and invalidate whatever was left in it
+ self.clean_invalidate_dcache(cpuid);
+ }
+
+ /// Returns whether the D-cache is currently enabled.
+ #[inline]
+ pub fn dcache_enabled() -> bool {
+ crate::asm::dsb();
+ crate::asm::isb();
+
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK }
+ }
+
+ /// Invalidates the entire D-cache.
+ ///
+ /// Note that calling this while the dcache is enabled will probably wipe out the
+ /// stack, depending on optimisations, therefore breaking returning to the call point.
+ ///
+ /// It's used immediately before enabling the dcache, but not exported publicly.
+ #[inline]
+ unsafe fn invalidate_dcache(&mut self, cpuid: &mut CPUID) {
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = CBP::new();
+
+ // Read number of sets and ways
+ let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
+
+ // Invalidate entire D-cache
+ for set in 0..sets {
+ for way in 0..ways {
+ cbp.dcisw(set, way);
+ }
+ }
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Cleans the entire D-cache.
+ ///
+ /// This function causes everything in the D-cache to be written back to main memory,
+ /// overwriting whatever is already there.
+ #[inline]
+ pub fn clean_dcache(&mut self, cpuid: &mut CPUID) {
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ // Read number of sets and ways
+ let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
+
+ for set in 0..sets {
+ for way in 0..ways {
+ cbp.dccsw(set, way);
+ }
+ }
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Cleans and invalidates the entire D-cache.
+ ///
+ /// This function causes everything in the D-cache to be written back to main memory,
+ /// and then marks the entire D-cache as invalid, causing future reads to first fetch
+ /// from main memory.
+ #[inline]
+ pub fn clean_invalidate_dcache(&mut self, cpuid: &mut CPUID) {
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ // Read number of sets and ways
+ let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
+
+ for set in 0..sets {
+ for way in 0..ways {
+ cbp.dccisw(set, way);
+ }
+ }
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Invalidates D-cache by address.
+ ///
+ /// * `addr`: The address to invalidate, which must be cache-line aligned.
+ /// * `size`: Number of bytes to invalidate, which must be a multiple of the cache line size.
+ ///
+ /// Invalidates D-cache cache lines, starting from the first line containing `addr`,
+ /// finishing once at least `size` bytes have been invalidated.
+ ///
+ /// Invalidation causes the next read access to memory to be fetched from main memory instead
+ /// of the cache.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `addr` must be 32-byte aligned and `size` must be a multiple
+ /// of 32. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `addr` is not cache-line aligned, or `size` is not a multiple of the cache line size,
+ /// other data before or after the desired memory would also be invalidated, which can very
+ /// easily cause memory corruption and undefined behaviour.
+ ///
+ /// # Safety
+ ///
+ /// After invalidating, the next read of invalidated data will be from main memory. This may
+ /// cause recent writes to be lost, potentially including writes that initialized objects.
+ /// Therefore, this method may cause uninitialized memory or invalid values to be read,
+ /// resulting in undefined behaviour. You must ensure that main memory contains valid and
+ /// initialized values before invalidating.
+ ///
+ /// `addr` **must** be aligned to the size of the cache lines, and `size` **must** be a
+ /// multiple of the cache line size, otherwise this function will invalidate other memory,
+ /// easily leading to memory corruption and undefined behaviour. This precondition is checked
+ /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid
+ /// a runtime-dependent `panic!()` call.
+ #[inline]
+ pub unsafe fn invalidate_dcache_by_address(&mut self, addr: usize, size: usize) {
+ // No-op zero sized operations
+ if size == 0 {
+ return;
+ }
+
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = CBP::new();
+
+ // dminline is log2(num words), so 2**dminline * 4 gives size in bytes
+ let dminline = CPUID::cache_dminline();
+ let line_size = (1 << dminline) * 4;
+
+ debug_assert!((addr & (line_size - 1)) == 0);
+ debug_assert!((size & (line_size - 1)) == 0);
+
+ crate::asm::dsb();
+
+ // Find number of cache lines to invalidate
+ let num_lines = ((size - 1) / line_size) + 1;
+
+ // Compute address of first cache line
+ let mask = 0xFFFF_FFFF - (line_size - 1);
+ let mut addr = addr & mask;
+
+ for _ in 0..num_lines {
+ cbp.dcimvac(addr as u32);
+ addr += line_size;
+ }
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Invalidates an object from the D-cache.
+ ///
+ /// * `obj`: The object to invalidate.
+ ///
+ /// Invalidates D-cache starting from the first cache line containing `obj`,
+ /// continuing to invalidate cache lines until all of `obj` has been invalidated.
+ ///
+ /// Invalidation causes the next read access to memory to be fetched from main memory instead
+ /// of the cache.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `obj` must be 32-byte aligned, and its size must be a multiple
+ /// of 32 bytes. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `obj` is not cache-line aligned, or its size is not a multiple of the cache line size,
+ /// other data before or after the desired memory would also be invalidated, which can very
+ /// easily cause memory corruption and undefined behaviour.
+ ///
+ /// # Safety
+ ///
+ /// After invalidating, `obj` will be read from main memory on next access. This may cause
+ /// recent writes to `obj` to be lost, potentially including the write that initialized it.
+ /// Therefore, this method may cause uninitialized memory or invalid values to be read,
+ /// resulting in undefined behaviour. You must ensure that main memory contains a valid and
+ /// initialized value for T before invalidating `obj`.
+ ///
+ /// `obj` **must** be aligned to the size of the cache lines, and its size **must** be a
+ /// multiple of the cache line size, otherwise this function will invalidate other memory,
+ /// easily leading to memory corruption and undefined behaviour. This precondition is checked
+ /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid
+ /// a runtime-dependent `panic!()` call.
+ #[inline]
+ pub unsafe fn invalidate_dcache_by_ref<T>(&mut self, obj: &mut T) {
+ self.invalidate_dcache_by_address(obj as *const T as usize, core::mem::size_of::<T>());
+ }
+
+ /// Invalidates a slice from the D-cache.
+ ///
+ /// * `slice`: The slice to invalidate.
+ ///
+ /// Invalidates D-cache starting from the first cache line containing members of `slice`,
+ /// continuing to invalidate cache lines until all of `slice` has been invalidated.
+ ///
+ /// Invalidation causes the next read access to memory to be fetched from main memory instead
+ /// of the cache.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `slice` must be 32-byte aligned, and its size must be a multiple
+ /// of 32 bytes. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `slice` is not cache-line aligned, or its size is not a multiple of the cache line size,
+ /// other data before or after the desired memory would also be invalidated, which can very
+ /// easily cause memory corruption and undefined behaviour.
+ ///
+ /// # Safety
+ ///
+ /// After invalidating, `slice` will be read from main memory on next access. This may cause
+ /// recent writes to `slice` to be lost, potentially including the write that initialized it.
+ /// Therefore, this method may cause uninitialized memory or invalid values to be read,
+ /// resulting in undefined behaviour. You must ensure that main memory contains valid and
+ /// initialized values for T before invalidating `slice`.
+ ///
+ /// `slice` **must** be aligned to the size of the cache lines, and its size **must** be a
+ /// multiple of the cache line size, otherwise this function will invalidate other memory,
+ /// easily leading to memory corruption and undefined behaviour. This precondition is checked
+ /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid
+ /// a runtime-dependent `panic!()` call.
+ #[inline]
+ pub unsafe fn invalidate_dcache_by_slice<T>(&mut self, slice: &mut [T]) {
+ self.invalidate_dcache_by_address(
+ slice.as_ptr() as usize,
+ slice.len() * core::mem::size_of::<T>(),
+ );
+ }
+
+ /// Cleans D-cache by address.
+ ///
+ /// * `addr`: The address to start cleaning at.
+ /// * `size`: The number of bytes to clean.
+ ///
+ /// Cleans D-cache cache lines, starting from the first line containing `addr`,
+ /// finishing once at least `size` bytes have been invalidated.
+ ///
+ /// Cleaning the cache causes whatever data is present in the cache to be immediately written
+ /// to main memory, overwriting whatever was in main memory.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `addr` should generally be 32-byte aligned and `size` should be a
+ /// multiple of 32. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `addr` is not cache-line aligned, or `size` is not a multiple of the cache line size,
+ /// other data before or after the desired memory will also be cleaned. From the point of view
+ /// of the core executing this function, memory remains consistent, so this is not unsound,
+ /// but is worth knowing about.
+ #[inline]
+ pub fn clean_dcache_by_address(&mut self, addr: usize, size: usize) {
+ // No-op zero sized operations
+ if size == 0 {
+ return;
+ }
+
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ crate::asm::dsb();
+
+ let dminline = CPUID::cache_dminline();
+ let line_size = (1 << dminline) * 4;
+ let num_lines = ((size - 1) / line_size) + 1;
+
+ let mask = 0xFFFF_FFFF - (line_size - 1);
+ let mut addr = addr & mask;
+
+ for _ in 0..num_lines {
+ cbp.dccmvac(addr as u32);
+ addr += line_size;
+ }
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Cleans an object from the D-cache.
+ ///
+ /// * `obj`: The object to clean.
+ ///
+ /// Cleans D-cache starting from the first cache line containing `obj`,
+ /// continuing to clean cache lines until all of `obj` has been cleaned.
+ ///
+ /// It is recommended that `obj` is both aligned to the cache line size and a multiple of
+ /// the cache line size long, otherwise surrounding data will also be cleaned.
+ ///
+ /// Cleaning the cache causes whatever data is present in the cache to be immediately written
+ /// to main memory, overwriting whatever was in main memory.
+ #[inline]
+ pub fn clean_dcache_by_ref<T>(&mut self, obj: &T) {
+ self.clean_dcache_by_address(obj as *const T as usize, core::mem::size_of::<T>());
+ }
+
+ /// Cleans a slice from D-cache.
+ ///
+ /// * `slice`: The slice to clean.
+ ///
+ /// Cleans D-cache starting from the first cache line containing members of `slice`,
+ /// continuing to clean cache lines until all of `slice` has been cleaned.
+ ///
+ /// It is recommended that `slice` is both aligned to the cache line size and a multiple of
+ /// the cache line size long, otherwise surrounding data will also be cleaned.
+ ///
+ /// Cleaning the cache causes whatever data is present in the cache to be immediately written
+ /// to main memory, overwriting whatever was in main memory.
+ #[inline]
+ pub fn clean_dcache_by_slice<T>(&mut self, slice: &[T]) {
+ self.clean_dcache_by_address(
+ slice.as_ptr() as usize,
+ slice.len() * core::mem::size_of::<T>(),
+ );
+ }
+
+ /// Cleans and invalidates D-cache by address.
+ ///
+ /// * `addr`: The address to clean and invalidate.
+ /// * `size`: The number of bytes to clean and invalidate.
+ ///
+ /// Cleans and invalidates D-cache starting from the first cache line containing `addr`,
+ /// finishing once at least `size` bytes have been cleaned and invalidated.
+ ///
+ /// It is recommended that `addr` is aligned to the cache line size and `size` is a multiple of
+ /// the cache line size, otherwise surrounding data will also be cleaned.
+ ///
+ /// Cleaning and invalidating causes data in the D-cache to be written back to main memory,
+ /// and then marks that data in the D-cache as invalid, causing future reads to first fetch
+ /// from main memory.
+ #[inline]
+ pub fn clean_invalidate_dcache_by_address(&mut self, addr: usize, size: usize) {
+ // No-op zero sized operations
+ if size == 0 {
+ return;
+ }
+
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ crate::asm::dsb();
+
+ // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M
+ const LINESIZE: usize = 32;
+ let num_lines = ((size - 1) / LINESIZE) + 1;
+
+ let mut addr = addr & 0xFFFF_FFE0;
+
+ for _ in 0..num_lines {
+ cbp.dccimvac(addr as u32);
+ addr += LINESIZE;
+ }
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+}
+
+const SCB_SCR_SLEEPDEEP: u32 = 0x1 << 2;
+
+impl SCB {
+ /// Set the SLEEPDEEP bit in the SCR register
+ #[inline]
+ pub fn set_sleepdeep(&mut self) {
+ unsafe {
+ self.scr.modify(|scr| scr | SCB_SCR_SLEEPDEEP);
+ }
+ }
+
+ /// Clear the SLEEPDEEP bit in the SCR register
+ #[inline]
+ pub fn clear_sleepdeep(&mut self) {
+ unsafe {
+ self.scr.modify(|scr| scr & !SCB_SCR_SLEEPDEEP);
+ }
+ }
+}
+
+const SCB_SCR_SLEEPONEXIT: u32 = 0x1 << 1;
+
+impl SCB {
+ /// Set the SLEEPONEXIT bit in the SCR register
+ #[inline]
+ pub fn set_sleeponexit(&mut self) {
+ unsafe {
+ self.scr.modify(|scr| scr | SCB_SCR_SLEEPONEXIT);
+ }
+ }
+
+ /// Clear the SLEEPONEXIT bit in the SCR register
+ #[inline]
+ pub fn clear_sleeponexit(&mut self) {
+ unsafe {
+ self.scr.modify(|scr| scr & !SCB_SCR_SLEEPONEXIT);
+ }
+ }
+}
+
+const SCB_AIRCR_VECTKEY: u32 = 0x05FA << 16;
+const SCB_AIRCR_PRIGROUP_MASK: u32 = 0x7 << 8;
+const SCB_AIRCR_SYSRESETREQ: u32 = 1 << 2;
+
+impl SCB {
+ /// Initiate a system reset request to reset the MCU
+ #[inline]
+ pub fn sys_reset() -> ! {
+ crate::asm::dsb();
+ unsafe {
+ (*Self::PTR).aircr.modify(
+ |r| {
+ SCB_AIRCR_VECTKEY | // otherwise the write is ignored
+ r & SCB_AIRCR_PRIGROUP_MASK | // keep priority group unchanged
+ SCB_AIRCR_SYSRESETREQ
+ }, // set the bit
+ )
+ };
+ crate::asm::dsb();
+ loop {
+ // wait for the reset
+ crate::asm::nop(); // avoid rust-lang/rust#28728
+ }
+ }
+}
+
+const SCB_ICSR_PENDSVSET: u32 = 1 << 28;
+const SCB_ICSR_PENDSVCLR: u32 = 1 << 27;
+
+const SCB_ICSR_PENDSTSET: u32 = 1 << 26;
+const SCB_ICSR_PENDSTCLR: u32 = 1 << 25;
+
+impl SCB {
+ /// Set the PENDSVSET bit in the ICSR register which will pend the PendSV interrupt
+ #[inline]
+ pub fn set_pendsv() {
+ unsafe {
+ (*Self::PTR).icsr.write(SCB_ICSR_PENDSVSET);
+ }
+ }
+
+ /// Check if PENDSVSET bit in the ICSR register is set meaning PendSV interrupt is pending
+ #[inline]
+ pub fn is_pendsv_pending() -> bool {
+ unsafe { (*Self::PTR).icsr.read() & SCB_ICSR_PENDSVSET == SCB_ICSR_PENDSVSET }
+ }
+
+ /// Set the PENDSVCLR bit in the ICSR register which will clear a pending PendSV interrupt
+ #[inline]
+ pub fn clear_pendsv() {
+ unsafe {
+ (*Self::PTR).icsr.write(SCB_ICSR_PENDSVCLR);
+ }
+ }
+
+ /// Set the PENDSTSET bit in the ICSR register which will pend a SysTick interrupt
+ #[inline]
+ pub fn set_pendst() {
+ unsafe {
+ (*Self::PTR).icsr.write(SCB_ICSR_PENDSTSET);
+ }
+ }
+
+ /// Check if PENDSTSET bit in the ICSR register is set meaning SysTick interrupt is pending
+ #[inline]
+ pub fn is_pendst_pending() -> bool {
+ unsafe { (*Self::PTR).icsr.read() & SCB_ICSR_PENDSTSET == SCB_ICSR_PENDSTSET }
+ }
+
+ /// Set the PENDSTCLR bit in the ICSR register which will clear a pending SysTick interrupt
+ #[inline]
+ pub fn clear_pendst() {
+ unsafe {
+ (*Self::PTR).icsr.write(SCB_ICSR_PENDSTCLR);
+ }
+ }
+}
+
+/// System handlers, exceptions with configurable priority
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[repr(u8)]
+pub enum SystemHandler {
+ // NonMaskableInt, // priority is fixed
+ // HardFault, // priority is fixed
+ /// Memory management interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ MemoryManagement = 4,
+
+ /// Bus fault interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ BusFault = 5,
+
+ /// Usage fault interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ UsageFault = 6,
+
+ /// Secure fault interrupt (only on ARMv8-M)
+ #[cfg(any(armv8m, native))]
+ SecureFault = 7,
+
+ /// SV call interrupt
+ SVCall = 11,
+
+ /// Debug monitor interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ DebugMonitor = 12,
+
+ /// Pend SV interrupt
+ PendSV = 14,
+
+ /// System Tick interrupt
+ SysTick = 15,
+}
+
+impl SCB {
+ /// Returns the hardware priority of `system_handler`
+ ///
+ /// *NOTE*: Hardware priority does not exactly match logical priority levels. See
+ /// [`NVIC.get_priority`](struct.NVIC.html#method.get_priority) for more details.
+ #[inline]
+ pub fn get_priority(system_handler: SystemHandler) -> u8 {
+ let index = system_handler as u8;
+
+ #[cfg(not(armv6m))]
+ {
+ // NOTE(unsafe) atomic read with no side effects
+
+ // NOTE(unsafe): Index is bounded to [4,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = unsafe { (*Self::PTR).shpr.get_unchecked(usize::from(index - 4)) };
+
+ priority_ref.read()
+ }
+
+ #[cfg(armv6m)]
+ {
+ // NOTE(unsafe) atomic read with no side effects
+
+ // NOTE(unsafe): Index is bounded to [11,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = unsafe {
+ (*Self::PTR)
+ .shpr
+ .get_unchecked(usize::from((index - 8) / 4))
+ };
+
+ let shpr = priority_ref.read();
+ let prio = (shpr >> (8 * (index % 4))) & 0x0000_00ff;
+ prio as u8
+ }
+ }
+
+ /// Sets the hardware priority of `system_handler` to `prio`
+ ///
+ /// *NOTE*: Hardware priority does not exactly match logical priority levels. See
+ /// [`NVIC.get_priority`](struct.NVIC.html#method.get_priority) for more details.
+ ///
+ /// On ARMv6-M, updating a system handler priority requires a read-modify-write operation. On
+ /// ARMv7-M, the operation is performed in a single, atomic write operation.
+ ///
+ /// # Unsafety
+ ///
+ /// Changing priority levels can break priority-based critical sections (see
+ /// [`register::basepri`](crate::register::basepri)) and compromise memory safety.
+ #[inline]
+ pub unsafe fn set_priority(&mut self, system_handler: SystemHandler, prio: u8) {
+ let index = system_handler as u8;
+
+ #[cfg(not(armv6m))]
+ {
+ // NOTE(unsafe): Index is bounded to [4,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = (*Self::PTR).shpr.get_unchecked(usize::from(index - 4));
+
+ priority_ref.write(prio)
+ }
+
+ #[cfg(armv6m)]
+ {
+ // NOTE(unsafe): Index is bounded to [11,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = (*Self::PTR)
+ .shpr
+ .get_unchecked(usize::from((index - 8) / 4));
+
+ priority_ref.modify(|value| {
+ let shift = 8 * (index % 4);
+ let mask = 0x0000_00ff << shift;
+ let prio = u32::from(prio) << shift;
+
+ (value & !mask) | prio
+ });
+ }
+ }
+
+ /// Return the bit position of the exception enable bit in the SHCSR register
+ #[inline]
+ #[cfg(not(any(armv6m, armv8m_base)))]
+ fn shcsr_enable_shift(exception: Exception) -> Option<u32> {
+ match exception {
+ Exception::MemoryManagement => Some(16),
+ Exception::BusFault => Some(17),
+ Exception::UsageFault => Some(18),
+ #[cfg(armv8m_main)]
+ Exception::SecureFault => Some(19),
+ _ => None,
+ }
+ }
+
+ /// Enable the exception
+ ///
+ /// If the exception is enabled, when the exception is triggered, the exception handler will be executed instead of the
+ /// HardFault handler.
+ /// This function is only allowed on the following exceptions:
+ /// * `MemoryManagement`
+ /// * `BusFault`
+ /// * `UsageFault`
+ /// * `SecureFault` (can only be enabled from Secure state)
+ ///
+ /// Calling this function with any other exception will do nothing.
+ #[inline]
+ #[cfg(not(any(armv6m, armv8m_base)))]
+ pub fn enable(&mut self, exception: Exception) {
+ if let Some(shift) = SCB::shcsr_enable_shift(exception) {
+ // The mutable reference to SCB makes sure that only this code is currently modifying
+ // the register.
+ unsafe { self.shcsr.modify(|value| value | (1 << shift)) }
+ }
+ }
+
+ /// Disable the exception
+ ///
+ /// If the exception is disabled, when the exception is triggered, the HardFault handler will be executed instead of the
+ /// exception handler.
+ /// This function is only allowed on the following exceptions:
+ /// * `MemoryManagement`
+ /// * `BusFault`
+ /// * `UsageFault`
+ /// * `SecureFault` (can not be changed from Non-secure state)
+ ///
+ /// Calling this function with any other exception will do nothing.
+ #[inline]
+ #[cfg(not(any(armv6m, armv8m_base)))]
+ pub fn disable(&mut self, exception: Exception) {
+ if let Some(shift) = SCB::shcsr_enable_shift(exception) {
+ // The mutable reference to SCB makes sure that only this code is currently modifying
+ // the register.
+ unsafe { self.shcsr.modify(|value| value & !(1 << shift)) }
+ }
+ }
+
+ /// Check if an exception is enabled
+ ///
+ /// This function is only allowed on the following exception:
+ /// * `MemoryManagement`
+ /// * `BusFault`
+ /// * `UsageFault`
+ /// * `SecureFault` (can not be read from Non-secure state)
+ ///
+ /// Calling this function with any other exception will read `false`.
+ #[inline]
+ #[cfg(not(any(armv6m, armv8m_base)))]
+ pub fn is_enabled(&self, exception: Exception) -> bool {
+ if let Some(shift) = SCB::shcsr_enable_shift(exception) {
+ (self.shcsr.read() & (1 << shift)) > 0
+ } else {
+ false
+ }
+ }
+}
diff --git a/src/peripheral/syst.rs b/src/peripheral/syst.rs
new file mode 100644
index 0000000..345acc2
--- /dev/null
+++ b/src/peripheral/syst.rs
@@ -0,0 +1,185 @@
+//! SysTick: System Timer
+
+use volatile_register::{RO, RW};
+
+use crate::peripheral::SYST;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Control and Status
+ pub csr: RW<u32>,
+ /// Reload Value
+ pub rvr: RW<u32>,
+ /// Current Value
+ pub cvr: RW<u32>,
+ /// Calibration Value
+ pub calib: RO<u32>,
+}
+
+/// SysTick clock source
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum SystClkSource {
+ /// Core-provided clock
+ Core,
+ /// External reference clock
+ External,
+}
+
+const SYST_COUNTER_MASK: u32 = 0x00ff_ffff;
+
+const SYST_CSR_ENABLE: u32 = 1 << 0;
+const SYST_CSR_TICKINT: u32 = 1 << 1;
+const SYST_CSR_CLKSOURCE: u32 = 1 << 2;
+const SYST_CSR_COUNTFLAG: u32 = 1 << 16;
+
+const SYST_CALIB_SKEW: u32 = 1 << 30;
+const SYST_CALIB_NOREF: u32 = 1 << 31;
+
+impl SYST {
+ /// Clears current value to 0
+ ///
+ /// After calling `clear_current()`, the next call to `has_wrapped()` will return `false`.
+ #[inline]
+ pub fn clear_current(&mut self) {
+ unsafe { self.cvr.write(0) }
+ }
+
+ /// Disables counter
+ #[inline]
+ pub fn disable_counter(&mut self) {
+ unsafe { self.csr.modify(|v| v & !SYST_CSR_ENABLE) }
+ }
+
+ /// Disables SysTick interrupt
+ #[inline]
+ pub fn disable_interrupt(&mut self) {
+ unsafe { self.csr.modify(|v| v & !SYST_CSR_TICKINT) }
+ }
+
+ /// Enables counter
+ ///
+ /// *NOTE* The reference manual indicates that:
+ ///
+ /// "The SysTick counter reload and current value are undefined at reset, the correct
+ /// initialization sequence for the SysTick counter is:
+ ///
+ /// - Program reload value
+ /// - Clear current value
+ /// - Program Control and Status register"
+ ///
+ /// The sequence translates to `self.set_reload(x); self.clear_current(); self.enable_counter()`
+ #[inline]
+ pub fn enable_counter(&mut self) {
+ unsafe { self.csr.modify(|v| v | SYST_CSR_ENABLE) }
+ }
+
+ /// Enables SysTick interrupt
+ #[inline]
+ pub fn enable_interrupt(&mut self) {
+ unsafe { self.csr.modify(|v| v | SYST_CSR_TICKINT) }
+ }
+
+ /// Gets clock source
+ ///
+ /// *NOTE* This takes `&mut self` because the read operation is side effectful and can clear the
+ /// bit that indicates that the timer has wrapped (cf. `SYST.has_wrapped`)
+ #[inline]
+ pub fn get_clock_source(&mut self) -> SystClkSource {
+ // NOTE(unsafe) atomic read with no side effects
+ if self.csr.read() & SYST_CSR_CLKSOURCE != 0 {
+ SystClkSource::Core
+ } else {
+ SystClkSource::External
+ }
+ }
+
+ /// Gets current value
+ #[inline]
+ pub fn get_current() -> u32 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).cvr.read() }
+ }
+
+ /// Gets reload value
+ #[inline]
+ pub fn get_reload() -> u32 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).rvr.read() }
+ }
+
+ /// Returns the reload value with which the counter would wrap once per 10
+ /// ms
+ ///
+ /// Returns `0` if the value is not known (e.g. because the clock can
+ /// change dynamically).
+ #[inline]
+ pub fn get_ticks_per_10ms() -> u32 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).calib.read() & SYST_COUNTER_MASK }
+ }
+
+ /// Checks if an external reference clock is available
+ #[inline]
+ pub fn has_reference_clock() -> bool {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).calib.read() & SYST_CALIB_NOREF == 0 }
+ }
+
+ /// Checks if the counter wrapped (underflowed) since the last check
+ ///
+ /// *NOTE* This takes `&mut self` because the read operation is side effectful and will clear
+ /// the bit of the read register.
+ #[inline]
+ pub fn has_wrapped(&mut self) -> bool {
+ self.csr.read() & SYST_CSR_COUNTFLAG != 0
+ }
+
+ /// Checks if counter is enabled
+ ///
+ /// *NOTE* This takes `&mut self` because the read operation is side effectful and can clear the
+ /// bit that indicates that the timer has wrapped (cf. `SYST.has_wrapped`)
+ #[inline]
+ pub fn is_counter_enabled(&mut self) -> bool {
+ self.csr.read() & SYST_CSR_ENABLE != 0
+ }
+
+ /// Checks if SysTick interrupt is enabled
+ ///
+ /// *NOTE* This takes `&mut self` because the read operation is side effectful and can clear the
+ /// bit that indicates that the timer has wrapped (cf. `SYST.has_wrapped`)
+ #[inline]
+ pub fn is_interrupt_enabled(&mut self) -> bool {
+ self.csr.read() & SYST_CSR_TICKINT != 0
+ }
+
+ /// Checks if the calibration value is precise
+ ///
+ /// Returns `false` if using the reload value returned by
+ /// `get_ticks_per_10ms()` may result in a period significantly deviating
+ /// from 10 ms.
+ #[inline]
+ pub fn is_precise() -> bool {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).calib.read() & SYST_CALIB_SKEW == 0 }
+ }
+
+ /// Sets clock source
+ #[inline]
+ pub fn set_clock_source(&mut self, clk_source: SystClkSource) {
+ match clk_source {
+ SystClkSource::External => unsafe { self.csr.modify(|v| v & !SYST_CSR_CLKSOURCE) },
+ SystClkSource::Core => unsafe { self.csr.modify(|v| v | SYST_CSR_CLKSOURCE) },
+ }
+ }
+
+ /// Sets reload value
+ ///
+ /// Valid values are between `1` and `0x00ffffff`.
+ ///
+ /// *NOTE* To make the timer wrap every `N` ticks set the reload value to `N - 1`
+ #[inline]
+ pub fn set_reload(&mut self, value: u32) {
+ unsafe { self.rvr.write(value) }
+ }
+}
diff --git a/src/peripheral/test.rs b/src/peripheral/test.rs
new file mode 100644
index 0000000..cab064a
--- /dev/null
+++ b/src/peripheral/test.rs
@@ -0,0 +1,170 @@
+#[test]
+fn cpuid() {
+ let cpuid = unsafe { &*crate::peripheral::CPUID::PTR };
+
+ assert_eq!(address(&cpuid.base), 0xE000_ED00);
+ assert_eq!(address(&cpuid.pfr), 0xE000_ED40);
+ assert_eq!(address(&cpuid.dfr), 0xE000_ED48);
+ assert_eq!(address(&cpuid.afr), 0xE000_ED4C);
+ assert_eq!(address(&cpuid.mmfr), 0xE000_ED50);
+ assert_eq!(address(&cpuid.isar), 0xE000_ED60);
+ assert_eq!(address(&cpuid.clidr), 0xE000_ED78);
+ assert_eq!(address(&cpuid.ctr), 0xE000_ED7C);
+ assert_eq!(address(&cpuid.ccsidr), 0xE000_ED80);
+ assert_eq!(address(&cpuid.csselr), 0xE000_ED84);
+}
+
+#[test]
+fn dcb() {
+ let dcb = unsafe { &*crate::peripheral::DCB::PTR };
+
+ assert_eq!(address(&dcb.dhcsr), 0xE000_EDF0);
+ assert_eq!(address(&dcb.dcrsr), 0xE000_EDF4);
+ assert_eq!(address(&dcb.dcrdr), 0xE000_EDF8);
+ assert_eq!(address(&dcb.demcr), 0xE000_EDFC);
+}
+
+#[test]
+fn dwt() {
+ let dwt = unsafe { &*crate::peripheral::DWT::PTR };
+
+ assert_eq!(address(&dwt.ctrl), 0xE000_1000);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.cyccnt), 0xE000_1004);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.cpicnt), 0xE000_1008);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.exccnt), 0xE000_100C);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.sleepcnt), 0xE000_1010);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.lsucnt), 0xE000_1014);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.foldcnt), 0xE000_1018);
+ assert_eq!(address(&dwt.pcsr), 0xE000_101C);
+ assert_eq!(address(&dwt.c[0].comp), 0xE000_1020);
+ assert_eq!(address(&dwt.c[0].mask), 0xE000_1024);
+ assert_eq!(address(&dwt.c[0].function), 0xE000_1028);
+ assert_eq!(address(&dwt.c[1].comp), 0xE000_1030);
+ assert_eq!(address(&dwt.c[1].mask), 0xE000_1034);
+ assert_eq!(address(&dwt.c[1].function), 0xE000_1038);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.lar), 0xE000_1FB0);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.lsr), 0xE000_1FB4);
+}
+
+#[test]
+fn fpb() {
+ let fpb = unsafe { &*crate::peripheral::FPB::PTR };
+
+ assert_eq!(address(&fpb.ctrl), 0xE000_2000);
+ assert_eq!(address(&fpb.remap), 0xE000_2004);
+ assert_eq!(address(&fpb.comp), 0xE000_2008);
+ assert_eq!(address(&fpb.comp[1]), 0xE000_200C);
+ assert_eq!(address(&fpb.lar), 0xE000_2FB0);
+ assert_eq!(address(&fpb.lsr), 0xE000_2FB4);
+}
+
+#[test]
+fn fpu() {
+ let fpu = unsafe { &*crate::peripheral::FPU::PTR };
+
+ assert_eq!(address(&fpu.fpccr), 0xE000_EF34);
+ assert_eq!(address(&fpu.fpcar), 0xE000_EF38);
+ assert_eq!(address(&fpu.fpdscr), 0xE000_EF3C);
+ assert_eq!(address(&fpu.mvfr), 0xE000_EF40);
+ assert_eq!(address(&fpu.mvfr[1]), 0xE000_EF44);
+ assert_eq!(address(&fpu.mvfr[2]), 0xE000_EF48);
+}
+
+#[test]
+fn itm() {
+ let itm = unsafe { &*crate::peripheral::ITM::PTR };
+
+ assert_eq!(address(&itm.stim), 0xE000_0000);
+ assert_eq!(address(&itm.ter), 0xE000_0E00);
+ assert_eq!(address(&itm.tpr), 0xE000_0E40);
+ assert_eq!(address(&itm.tcr), 0xE000_0E80);
+ assert_eq!(address(&itm.lar), 0xE000_0FB0);
+ assert_eq!(address(&itm.lsr), 0xE000_0FB4);
+}
+
+#[test]
+fn mpu() {
+ let mpu = unsafe { &*crate::peripheral::MPU::PTR };
+
+ assert_eq!(address(&mpu._type), 0xE000ED90);
+ assert_eq!(address(&mpu.ctrl), 0xE000ED94);
+ assert_eq!(address(&mpu.rnr), 0xE000ED98);
+ assert_eq!(address(&mpu.rbar), 0xE000ED9C);
+ assert_eq!(address(&mpu.rasr), 0xE000EDA0);
+ assert_eq!(address(&mpu.rbar_a1), 0xE000EDA4);
+ assert_eq!(address(&mpu.rasr_a1), 0xE000EDA8);
+ assert_eq!(address(&mpu.rbar_a2), 0xE000EDAC);
+ assert_eq!(address(&mpu.rasr_a2), 0xE000EDB0);
+ assert_eq!(address(&mpu.rbar_a3), 0xE000EDB4);
+ assert_eq!(address(&mpu.rasr_a3), 0xE000EDB8);
+}
+
+#[test]
+fn nvic() {
+ let nvic = unsafe { &*crate::peripheral::NVIC::PTR };
+
+ assert_eq!(address(&nvic.iser), 0xE000E100);
+ assert_eq!(address(&nvic.icer), 0xE000E180);
+ assert_eq!(address(&nvic.ispr), 0xE000E200);
+ assert_eq!(address(&nvic.icpr), 0xE000E280);
+ assert_eq!(address(&nvic.iabr), 0xE000E300);
+ assert_eq!(address(&nvic.ipr), 0xE000E400);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&nvic.stir), 0xE000EF00);
+}
+
+#[test]
+fn scb() {
+ let scb = unsafe { &*crate::peripheral::SCB::PTR };
+
+ assert_eq!(address(&scb.icsr), 0xE000_ED04);
+ assert_eq!(address(&scb.vtor), 0xE000_ED08);
+ assert_eq!(address(&scb.aircr), 0xE000_ED0C);
+ assert_eq!(address(&scb.scr), 0xE000_ED10);
+ assert_eq!(address(&scb.ccr), 0xE000_ED14);
+ assert_eq!(address(&scb.shpr), 0xE000_ED18);
+ assert_eq!(address(&scb.shcsr), 0xE000_ED24);
+ assert_eq!(address(&scb.cfsr), 0xE000_ED28);
+ assert_eq!(address(&scb.hfsr), 0xE000_ED2C);
+ assert_eq!(address(&scb.dfsr), 0xE000_ED30);
+ assert_eq!(address(&scb.mmfar), 0xE000_ED34);
+ assert_eq!(address(&scb.bfar), 0xE000_ED38);
+ assert_eq!(address(&scb.afsr), 0xE000_ED3C);
+ assert_eq!(address(&scb.cpacr), 0xE000_ED88);
+}
+
+#[test]
+fn syst() {
+ let syst = unsafe { &*crate::peripheral::SYST::PTR };
+
+ assert_eq!(address(&syst.csr), 0xE000_E010);
+ assert_eq!(address(&syst.rvr), 0xE000_E014);
+ assert_eq!(address(&syst.cvr), 0xE000_E018);
+ assert_eq!(address(&syst.calib), 0xE000_E01C);
+}
+
+#[test]
+fn tpiu() {
+ let tpiu = unsafe { &*crate::peripheral::TPIU::PTR };
+
+ assert_eq!(address(&tpiu.sspsr), 0xE004_0000);
+ assert_eq!(address(&tpiu.cspsr), 0xE004_0004);
+ assert_eq!(address(&tpiu.acpr), 0xE004_0010);
+ assert_eq!(address(&tpiu.sppr), 0xE004_00F0);
+ assert_eq!(address(&tpiu.ffcr), 0xE004_0304);
+ assert_eq!(address(&tpiu.lar), 0xE004_0FB0);
+ assert_eq!(address(&tpiu.lsr), 0xE004_0FB4);
+ assert_eq!(address(&tpiu._type), 0xE004_0FC8);
+}
+
+fn address<T>(r: *const T) -> usize {
+ r as usize
+}
diff --git a/src/peripheral/tpiu.rs b/src/peripheral/tpiu.rs
new file mode 100644
index 0000000..0762495
--- /dev/null
+++ b/src/peripheral/tpiu.rs
@@ -0,0 +1,161 @@
+//! Trace Port Interface Unit;
+//!
+//! *NOTE* Not available on Armv6-M.
+
+use volatile_register::{RO, RW, WO};
+
+use crate::peripheral::TPIU;
+use bitfield::bitfield;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Supported Parallel Port Sizes
+ pub sspsr: RO<u32>,
+ /// Current Parallel Port Size
+ pub cspsr: RW<u32>,
+ reserved0: [u32; 2],
+ /// Asynchronous Clock Prescaler
+ pub acpr: RW<u32>,
+ reserved1: [u32; 55],
+ /// Selected Pin Control
+ pub sppr: RW<Sppr>,
+ reserved2: [u32; 132],
+ /// Formatter and Flush Control
+ pub ffcr: RW<Ffcr>,
+ reserved3: [u32; 810],
+ /// Lock Access
+ pub lar: WO<u32>,
+ /// Lock Status
+ pub lsr: RO<u32>,
+ reserved4: [u32; 4],
+ /// TPIU Type
+ pub _type: RO<Type>,
+}
+
+bitfield! {
+ /// Formatter and flush control register.
+ #[repr(C)]
+ #[derive(Clone, Copy)]
+ pub struct Ffcr(u32);
+ enfcont, set_enfcont: 1;
+}
+
+bitfield! {
+ /// TPIU Type Register.
+ #[repr(C)]
+ #[derive(Clone, Copy)]
+ pub struct Type(u32);
+ u8, fifosz, _: 8, 6;
+ ptinvalid, _: 9;
+ mancvalid, _: 10;
+ nrzvalid, _: 11;
+}
+
+bitfield! {
+ /// Selected pin protocol register.
+ #[repr(C)]
+ #[derive(Clone, Copy)]
+ pub struct Sppr(u32);
+ u8, txmode, set_txmode: 1, 0;
+}
+
+/// The available protocols for the trace output.
+#[repr(u8)]
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub enum TraceProtocol {
+ /// Parallel trace port mode
+ Parallel = 0b00,
+ /// Asynchronous SWO, using Manchester encoding
+ AsyncSWOManchester = 0b01,
+ /// Asynchronous SWO, using NRZ encoding
+ AsyncSWONRZ = 0b10,
+}
+impl core::convert::TryFrom<u8> for TraceProtocol {
+ type Error = ();
+
+ /// Tries to convert from a `TXMODE` field value. Fails if the set mode is
+ /// unknown (and thus unpredictable).
+ #[inline]
+ fn try_from(value: u8) -> Result<Self, Self::Error> {
+ match value {
+ x if x == Self::Parallel as u8 => Ok(Self::Parallel),
+ x if x == Self::AsyncSWOManchester as u8 => Ok(Self::AsyncSWOManchester),
+ x if x == Self::AsyncSWONRZ as u8 => Ok(Self::AsyncSWONRZ),
+ _ => Err(()), // unknown and unpredictable mode
+ }
+ }
+}
+
+/// The SWO options supported by the TPIU, and the mimimum size of the
+/// FIFO output queue for trace data.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub struct SWOSupports {
+ /// Whether UART/NRZ encoding is supported for SWO.
+ pub nrz_encoding: bool,
+ /// Whether Manchester encoding is supported for SWO.
+ pub manchester_encoding: bool,
+ /// Whether parallel trace port operation is supported.
+ pub parallel_operation: bool,
+ /// The minimum implemented FIFO queue size of the TPIU for trace data.
+ pub min_queue_size: u8,
+}
+
+impl TPIU {
+ /// Sets the prescaler value for a wanted baud rate of the Serial
+ /// Wire Output (SWO) in relation to a given asynchronous refernce
+ /// clock rate.
+ #[inline]
+ pub fn set_swo_baud_rate(&mut self, ref_clk_rate: u32, baud_rate: u32) {
+ unsafe {
+ self.acpr.write((ref_clk_rate / baud_rate) - 1);
+ }
+ }
+
+ /// The used protocol for the trace output. Return `None` if an
+ /// unknown (and thus unpredicable mode) is configured by means
+ /// other than
+ /// [`trace_output_protocol`](Self::set_trace_output_protocol).
+ #[inline]
+ pub fn trace_output_protocol(&self) -> Option<TraceProtocol> {
+ use core::convert::TryInto;
+ self.sppr.read().txmode().try_into().ok()
+ }
+
+ /// Sets the used protocol for the trace output.
+ #[inline]
+ pub fn set_trace_output_protocol(&mut self, proto: TraceProtocol) {
+ unsafe {
+ self.sppr.modify(|mut r| {
+ r.set_txmode(proto as u8);
+ r
+ });
+ }
+ }
+
+ /// Whether to enable the formatter. If disabled, only ITM and DWT
+ /// trace sources are passed through. Data from the ETM is
+ /// discarded.
+ #[inline]
+ pub fn enable_continuous_formatting(&mut self, bit: bool) {
+ unsafe {
+ self.ffcr.modify(|mut r| {
+ r.set_enfcont(bit);
+ r
+ });
+ }
+ }
+
+ /// Reads the supported trace output modes and the minimum size of
+ /// the TPIU FIFO queue for trace data.
+ #[inline]
+ pub fn swo_supports() -> SWOSupports {
+ let _type = unsafe { (*Self::PTR)._type.read() };
+ SWOSupports {
+ nrz_encoding: _type.nrzvalid(),
+ manchester_encoding: _type.mancvalid(),
+ parallel_operation: !_type.ptinvalid(),
+ min_queue_size: _type.fifosz(),
+ }
+ }
+}
diff --git a/src/prelude.rs b/src/prelude.rs
new file mode 100644
index 0000000..bc47cc0
--- /dev/null
+++ b/src/prelude.rs
@@ -0,0 +1,3 @@
+//! Prelude
+
+pub use embedded_hal::prelude::*;
diff --git a/src/register/apsr.rs b/src/register/apsr.rs
new file mode 100644
index 0000000..e83435c
--- /dev/null
+++ b/src/register/apsr.rs
@@ -0,0 +1,54 @@
+//! Application Program Status Register
+
+/// Application Program Status Register
+#[derive(Clone, Copy, Debug)]
+pub struct Apsr {
+ bits: u32,
+}
+
+impl Apsr {
+ /// Returns the contents of the register as raw bits
+ #[inline]
+ pub fn bits(self) -> u32 {
+ self.bits
+ }
+
+ /// DSP overflow and saturation flag
+ #[inline]
+ pub fn q(self) -> bool {
+ self.bits & (1 << 27) == (1 << 27)
+ }
+
+ /// Overflow flag
+ #[inline]
+ pub fn v(self) -> bool {
+ self.bits & (1 << 28) == (1 << 28)
+ }
+
+ /// Carry or borrow flag
+ #[inline]
+ pub fn c(self) -> bool {
+ self.bits & (1 << 29) == (1 << 29)
+ }
+
+ /// Zero flag
+ #[inline]
+ pub fn z(self) -> bool {
+ self.bits & (1 << 30) == (1 << 30)
+ }
+
+ /// Negative flag
+ #[inline]
+ pub fn n(self) -> bool {
+ self.bits & (1 << 31) == (1 << 31)
+ }
+}
+
+/// Reads the CPU register
+///
+/// **NOTE** This function is available if `cortex-m` is built with the `"inline-asm"` feature.
+#[inline]
+pub fn read() -> Apsr {
+ let bits: u32 = call_asm!(__apsr_r() -> u32);
+ Apsr { bits }
+}
diff --git a/src/register/basepri.rs b/src/register/basepri.rs
new file mode 100644
index 0000000..07084cd
--- /dev/null
+++ b/src/register/basepri.rs
@@ -0,0 +1,24 @@
+//! Base Priority Mask Register
+
+/// Reads the CPU register
+#[inline]
+pub fn read() -> u8 {
+ call_asm!(__basepri_r() -> u8)
+}
+
+/// Writes to the CPU register
+///
+/// **IMPORTANT** If you are using a Cortex-M7 device with revision r0p1 you MUST enable the
+/// `cm7-r0p1` Cargo feature or this function WILL misbehave.
+#[inline]
+pub unsafe fn write(basepri: u8) {
+ #[cfg(feature = "cm7-r0p1")]
+ {
+ call_asm!(__basepri_w_cm7_r0p1(basepri: u8));
+ }
+
+ #[cfg(not(feature = "cm7-r0p1"))]
+ {
+ call_asm!(__basepri_w(basepri: u8));
+ }
+}
diff --git a/src/register/basepri_max.rs b/src/register/basepri_max.rs
new file mode 100644
index 0000000..cea3838
--- /dev/null
+++ b/src/register/basepri_max.rs
@@ -0,0 +1,21 @@
+//! Base Priority Mask Register (conditional write)
+
+/// Writes to BASEPRI *if*
+///
+/// - `basepri != 0` AND `basepri::read() == 0`, OR
+/// - `basepri != 0` AND `basepri < basepri::read()`
+///
+/// **IMPORTANT** If you are using a Cortex-M7 device with revision r0p1 you MUST enable the
+/// `cm7-r0p1` Cargo feature or this function WILL misbehave.
+#[inline]
+pub fn write(basepri: u8) {
+ #[cfg(feature = "cm7-r0p1")]
+ {
+ call_asm!(__basepri_max_cm7_r0p1(basepri: u8));
+ }
+
+ #[cfg(not(feature = "cm7-r0p1"))]
+ {
+ call_asm!(__basepri_max(basepri: u8));
+ }
+}
diff --git a/src/register/control.rs b/src/register/control.rs
new file mode 100644
index 0000000..a991625
--- /dev/null
+++ b/src/register/control.rs
@@ -0,0 +1,164 @@
+//! Control register
+
+/// Control register
+#[derive(Clone, Copy, Debug)]
+pub struct Control {
+ bits: u32,
+}
+
+impl Control {
+ /// Creates a `Control` value from raw bits.
+ #[inline]
+ pub fn from_bits(bits: u32) -> Self {
+ Self { bits }
+ }
+
+ /// Returns the contents of the register as raw bits
+ #[inline]
+ pub fn bits(self) -> u32 {
+ self.bits
+ }
+
+ /// Thread mode privilege level
+ #[inline]
+ pub fn npriv(self) -> Npriv {
+ if self.bits & (1 << 0) == (1 << 0) {
+ Npriv::Unprivileged
+ } else {
+ Npriv::Privileged
+ }
+ }
+
+ /// Sets the thread mode privilege level value (nPRIV).
+ #[inline]
+ pub fn set_npriv(&mut self, npriv: Npriv) {
+ let mask = 1 << 0;
+ match npriv {
+ Npriv::Unprivileged => self.bits |= mask,
+ Npriv::Privileged => self.bits &= !mask,
+ }
+ }
+
+ /// Currently active stack pointer
+ #[inline]
+ pub fn spsel(self) -> Spsel {
+ if self.bits & (1 << 1) == (1 << 1) {
+ Spsel::Psp
+ } else {
+ Spsel::Msp
+ }
+ }
+
+ /// Sets the SPSEL value.
+ #[inline]
+ pub fn set_spsel(&mut self, spsel: Spsel) {
+ let mask = 1 << 1;
+ match spsel {
+ Spsel::Psp => self.bits |= mask,
+ Spsel::Msp => self.bits &= !mask,
+ }
+ }
+
+ /// Whether context floating-point is currently active
+ #[inline]
+ pub fn fpca(self) -> Fpca {
+ if self.bits & (1 << 2) == (1 << 2) {
+ Fpca::Active
+ } else {
+ Fpca::NotActive
+ }
+ }
+
+ /// Sets the FPCA value.
+ #[inline]
+ pub fn set_fpca(&mut self, fpca: Fpca) {
+ let mask = 1 << 2;
+ match fpca {
+ Fpca::Active => self.bits |= mask,
+ Fpca::NotActive => self.bits &= !mask,
+ }
+ }
+}
+
+/// Thread mode privilege level
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum Npriv {
+ /// Privileged
+ Privileged,
+ /// Unprivileged
+ Unprivileged,
+}
+
+impl Npriv {
+ /// Is in privileged thread mode?
+ #[inline]
+ pub fn is_privileged(self) -> bool {
+ self == Npriv::Privileged
+ }
+
+ /// Is in unprivileged thread mode?
+ #[inline]
+ pub fn is_unprivileged(self) -> bool {
+ self == Npriv::Unprivileged
+ }
+}
+
+/// Currently active stack pointer
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum Spsel {
+ /// MSP is the current stack pointer
+ Msp,
+ /// PSP is the current stack pointer
+ Psp,
+}
+
+impl Spsel {
+ /// Is MSP the current stack pointer?
+ #[inline]
+ pub fn is_msp(self) -> bool {
+ self == Spsel::Msp
+ }
+
+ /// Is PSP the current stack pointer?
+ #[inline]
+ pub fn is_psp(self) -> bool {
+ self == Spsel::Psp
+ }
+}
+
+/// Whether context floating-point is currently active
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum Fpca {
+ /// Floating-point context active.
+ Active,
+ /// No floating-point context active
+ NotActive,
+}
+
+impl Fpca {
+ /// Is a floating-point context active?
+ #[inline]
+ pub fn is_active(self) -> bool {
+ self == Fpca::Active
+ }
+
+ /// Is a floating-point context not active?
+ #[inline]
+ pub fn is_not_active(self) -> bool {
+ self == Fpca::NotActive
+ }
+}
+
+/// Reads the CPU register
+#[inline]
+pub fn read() -> Control {
+ let bits: u32 = call_asm!(__control_r() -> u32);
+ Control { bits }
+}
+
+/// Writes to the CPU register.
+#[inline]
+pub unsafe fn write(control: Control) {
+ let control = control.bits();
+ call_asm!(__control_w(control: u32));
+}
diff --git a/src/register/faultmask.rs b/src/register/faultmask.rs
new file mode 100644
index 0000000..e57fa28
--- /dev/null
+++ b/src/register/faultmask.rs
@@ -0,0 +1,35 @@
+//! Fault Mask Register
+
+/// All exceptions are ...
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum Faultmask {
+ /// Active
+ Active,
+ /// Inactive, expect for NMI
+ Inactive,
+}
+
+impl Faultmask {
+ /// All exceptions are active
+ #[inline]
+ pub fn is_active(self) -> bool {
+ self == Faultmask::Active
+ }
+
+ /// All exceptions, except for NMI, are inactive
+ #[inline]
+ pub fn is_inactive(self) -> bool {
+ self == Faultmask::Inactive
+ }
+}
+
+/// Reads the CPU register
+#[inline]
+pub fn read() -> Faultmask {
+ let r: u32 = call_asm!(__faultmask_r() -> u32);
+ if r & (1 << 0) == (1 << 0) {
+ Faultmask::Inactive
+ } else {
+ Faultmask::Active
+ }
+}
diff --git a/src/register/fpscr.rs b/src/register/fpscr.rs
new file mode 100644
index 0000000..68692c7
--- /dev/null
+++ b/src/register/fpscr.rs
@@ -0,0 +1,305 @@
+//! Floating-point Status Control Register
+
+/// Floating-point Status Control Register
+#[derive(Clone, Copy, Debug)]
+pub struct Fpscr {
+ bits: u32,
+}
+
+impl Fpscr {
+ /// Creates a `Fspcr` value from raw bits.
+ #[inline]
+ pub fn from_bits(bits: u32) -> Self {
+ Self { bits }
+ }
+
+ /// Returns the contents of the register as raw bits
+ #[inline]
+ pub fn bits(self) -> u32 {
+ self.bits
+ }
+
+ /// Read the Negative condition code flag
+ #[inline]
+ pub fn n(self) -> bool {
+ self.bits & (1 << 31) != 0
+ }
+
+ /// Sets the Negative condition code flag
+ #[inline]
+ pub fn set_n(&mut self, n: bool) {
+ let mask = 1 << 31;
+ match n {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+
+ /// Read the Zero condition code flag
+ #[inline]
+ pub fn z(self) -> bool {
+ self.bits & (1 << 30) != 0
+ }
+
+ /// Sets the Zero condition code flag
+ #[inline]
+ pub fn set_z(&mut self, z: bool) {
+ let mask = 1 << 30;
+ match z {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+
+ /// Read the Carry condition code flag
+ #[inline]
+ pub fn c(self) -> bool {
+ self.bits & (1 << 29) != 0
+ }
+
+ /// Sets the Carry condition code flag
+ #[inline]
+ pub fn set_c(&mut self, c: bool) {
+ let mask = 1 << 29;
+ match c {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+
+ /// Read the Overflow condition code flag
+ #[inline]
+ pub fn v(self) -> bool {
+ self.bits & (1 << 28) != 0
+ }
+
+ /// Sets the Zero condition code flag
+ #[inline]
+ pub fn set_v(&mut self, v: bool) {
+ let mask = 1 << 28;
+ match v {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+
+ /// Read the Alternative Half Precision bit
+ #[inline]
+ pub fn ahp(self) -> bool {
+ self.bits & (1 << 26) != 0
+ }
+
+ /// Sets the Alternative Half Precision bit
+ #[inline]
+ pub fn set_ahp(&mut self, ahp: bool) {
+ let mask = 1 << 26;
+ match ahp {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+
+ /// Read the Default NaN mode bit
+ #[inline]
+ pub fn dn(self) -> bool {
+ self.bits & (1 << 25) != 0
+ }
+
+ /// Sets the Default NaN mode bit
+ #[inline]
+ pub fn set_dn(&mut self, dn: bool) {
+ let mask = 1 << 25;
+ match dn {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+
+ /// Read the Flush to Zero mode bit
+ #[inline]
+ pub fn fz(self) -> bool {
+ self.bits & (1 << 24) != 0
+ }
+
+ /// Sets the Flush to Zero mode bit
+ #[inline]
+ pub fn set_fz(&mut self, fz: bool) {
+ let mask = 1 << 24;
+ match fz {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+
+ /// Read the Rounding Mode control field
+ #[inline]
+ pub fn rmode(self) -> RMode {
+ match (self.bits & (3 << 22)) >> 22 {
+ 0 => RMode::Nearest,
+ 1 => RMode::PlusInfinity,
+ 2 => RMode::MinusInfinity,
+ _ => RMode::Zero,
+ }
+ }
+
+ /// Sets the Rounding Mode control field
+ #[inline]
+ pub fn set_rmode(&mut self, rmode: RMode) {
+ let mask = 3 << 22;
+ match rmode {
+ RMode::Nearest => self.bits &= !mask,
+ RMode::PlusInfinity => self.bits = (self.bits & !mask) | (1 << 22),
+ RMode::MinusInfinity => self.bits = (self.bits & !mask) | (2 << 22),
+ RMode::Zero => self.bits |= mask,
+ }
+ }
+
+ /// Read the Input Denormal cumulative exception bit
+ #[inline]
+ pub fn idc(self) -> bool {
+ self.bits & (1 << 7) != 0
+ }
+
+ /// Sets the Input Denormal cumulative exception bit
+ #[inline]
+ pub fn set_idc(&mut self, idc: bool) {
+ let mask = 1 << 7;
+ match idc {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+
+ /// Read the Inexact cumulative exception bit
+ #[inline]
+ pub fn ixc(self) -> bool {
+ self.bits & (1 << 4) != 0
+ }
+
+ /// Sets the Inexact cumulative exception bit
+ #[inline]
+ pub fn set_ixc(&mut self, ixc: bool) {
+ let mask = 1 << 4;
+ match ixc {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+
+ /// Read the Underflow cumulative exception bit
+ #[inline]
+ pub fn ufc(self) -> bool {
+ self.bits & (1 << 3) != 0
+ }
+
+ /// Sets the Underflow cumulative exception bit
+ #[inline]
+ pub fn set_ufc(&mut self, ufc: bool) {
+ let mask = 1 << 3;
+ match ufc {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+
+ /// Read the Overflow cumulative exception bit
+ #[inline]
+ pub fn ofc(self) -> bool {
+ self.bits & (1 << 2) != 0
+ }
+
+ /// Sets the Overflow cumulative exception bit
+ #[inline]
+ pub fn set_ofc(&mut self, ofc: bool) {
+ let mask = 1 << 2;
+ match ofc {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+
+ /// Read the Division by Zero cumulative exception bit
+ #[inline]
+ pub fn dzc(self) -> bool {
+ self.bits & (1 << 1) != 0
+ }
+
+ /// Sets the Division by Zero cumulative exception bit
+ #[inline]
+ pub fn set_dzc(&mut self, dzc: bool) {
+ let mask = 1 << 1;
+ match dzc {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+
+ /// Read the Invalid Operation cumulative exception bit
+ #[inline]
+ pub fn ioc(self) -> bool {
+ self.bits & (1 << 0) != 0
+ }
+
+ /// Sets the Invalid Operation cumulative exception bit
+ #[inline]
+ pub fn set_ioc(&mut self, ioc: bool) {
+ let mask = 1 << 0;
+ match ioc {
+ true => self.bits |= mask,
+ false => self.bits &= !mask,
+ }
+ }
+}
+
+/// Rounding mode
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum RMode {
+ /// Round to Nearest (RN) mode. This is the reset value.
+ Nearest,
+ /// Round towards Plus Infinity (RP) mode.
+ PlusInfinity,
+ /// Round towards Minus Infinity (RM) mode.
+ MinusInfinity,
+ /// Round towards Zero (RZ) mode.
+ Zero,
+}
+
+impl RMode {
+ /// Is Nearest the current rounding mode?
+ #[inline]
+ pub fn is_nearest(self) -> bool {
+ self == RMode::Nearest
+ }
+
+ /// Is Plus Infinity the current rounding mode?
+ #[inline]
+ pub fn is_plus_infinity(self) -> bool {
+ self == RMode::PlusInfinity
+ }
+
+ /// Is Minus Infinity the current rounding mode?
+ #[inline]
+ pub fn is_minus_infinity(self) -> bool {
+ self == RMode::MinusInfinity
+ }
+
+ /// Is Zero the current rounding mode?
+ #[inline]
+ pub fn is_zero(self) -> bool {
+ self == RMode::Zero
+ }
+}
+
+/// Read the FPSCR register
+#[inline]
+pub fn read() -> Fpscr {
+ let r: u32 = call_asm!(__fpscr_r() -> u32);
+ Fpscr::from_bits(r)
+}
+
+/// Set the value of the FPSCR register
+#[inline]
+pub unsafe fn write(fpscr: Fpscr) {
+ let fpscr = fpscr.bits();
+ call_asm!(__fpscr_w(fpscr: u32));
+}
diff --git a/src/register/lr.rs b/src/register/lr.rs
new file mode 100644
index 0000000..1aa546c
--- /dev/null
+++ b/src/register/lr.rs
@@ -0,0 +1,17 @@
+//! Link register
+
+/// Reads the CPU register
+///
+/// **NOTE** This function is available if `cortex-m` is built with the `"inline-asm"` feature.
+#[inline]
+pub fn read() -> u32 {
+ call_asm!(__lr_r() -> u32)
+}
+
+/// Writes `bits` to the CPU register
+///
+/// **NOTE** This function is available if `cortex-m` is built with the `"inline-asm"` feature.
+#[inline]
+pub unsafe fn write(bits: u32) {
+ call_asm!(__lr_w(bits: u32));
+}
diff --git a/src/register/mod.rs b/src/register/mod.rs
new file mode 100644
index 0000000..48d157a
--- /dev/null
+++ b/src/register/mod.rs
@@ -0,0 +1,68 @@
+//! Processor core registers
+//!
+//! The following registers can only be accessed in PRIVILEGED mode:
+//!
+//! - BASEPRI
+//! - CONTROL
+//! - FAULTMASK
+//! - MSP
+//! - PRIMASK
+//!
+//! The rest of registers (see list below) can be accessed in either, PRIVILEGED
+//! or UNPRIVILEGED, mode.
+//!
+//! - APSR
+//! - LR
+//! - PC
+//! - PSP
+//!
+//! The following registers are NOT available on ARMv6-M devices
+//! (`thumbv6m-none-eabi`):
+//!
+//! - BASEPRI
+//! - FAULTMASK
+//!
+//! The following registers are only available for devices with an FPU:
+//!
+//! - FPSCR
+//!
+//! # References
+//!
+//! - Cortex-M* Devices Generic User Guide - Section 2.1.3 Core registers
+
+#[cfg(all(not(armv6m), not(armv8m_base)))]
+pub mod basepri;
+
+#[cfg(all(not(armv6m), not(armv8m_base)))]
+pub mod basepri_max;
+
+pub mod control;
+
+#[cfg(all(not(armv6m), not(armv8m_base)))]
+pub mod faultmask;
+
+#[cfg(has_fpu)]
+pub mod fpscr;
+
+pub mod msp;
+
+pub mod primask;
+
+pub mod psp;
+
+#[cfg(armv8m_main)]
+pub mod msplim;
+
+#[cfg(armv8m_main)]
+pub mod psplim;
+
+// Accessing these registers requires inline assembly because their contents are tied to the current
+// stack frame
+#[cfg(feature = "inline-asm")]
+pub mod apsr;
+
+#[cfg(feature = "inline-asm")]
+pub mod lr;
+
+#[cfg(feature = "inline-asm")]
+pub mod pc;
diff --git a/src/register/msp.rs b/src/register/msp.rs
new file mode 100644
index 0000000..bccc2ae
--- /dev/null
+++ b/src/register/msp.rs
@@ -0,0 +1,32 @@
+//! Main Stack Pointer
+
+/// Reads the CPU register
+#[inline]
+pub fn read() -> u32 {
+ call_asm!(__msp_r() -> u32)
+}
+
+/// Writes `bits` to the CPU register
+#[inline]
+#[deprecated = "calling this function invokes Undefined Behavior, consider asm::bootstrap as an alternative"]
+pub unsafe fn write(bits: u32) {
+ call_asm!(__msp_w(bits: u32));
+}
+
+/// Reads the Non-Secure CPU register from Secure state.
+///
+/// Executing this function in Non-Secure state will return zeroes.
+#[cfg(armv8m)]
+#[inline]
+pub fn read_ns() -> u32 {
+ call_asm!(__msp_ns_r() -> u32)
+}
+
+/// Writes `bits` to the Non-Secure CPU register from Secure state.
+///
+/// Executing this function in Non-Secure state will be ignored.
+#[cfg(armv8m)]
+#[inline]
+pub unsafe fn write_ns(bits: u32) {
+ call_asm!(__msp_ns_w(bits: u32));
+}
diff --git a/src/register/msplim.rs b/src/register/msplim.rs
new file mode 100644
index 0000000..ac6f9ed
--- /dev/null
+++ b/src/register/msplim.rs
@@ -0,0 +1,13 @@
+//! Main Stack Pointer Limit Register
+
+/// Reads the CPU register
+#[inline]
+pub fn read() -> u32 {
+ call_asm!(__msplim_r() -> u32)
+}
+
+/// Writes `bits` to the CPU register
+#[inline]
+pub unsafe fn write(bits: u32) {
+ call_asm!(__msplim_w(bits: u32))
+}
diff --git a/src/register/pc.rs b/src/register/pc.rs
new file mode 100644
index 0000000..0b33629
--- /dev/null
+++ b/src/register/pc.rs
@@ -0,0 +1,17 @@
+//! Program counter
+
+/// Reads the CPU register
+///
+/// **NOTE** This function is available if `cortex-m` is built with the `"inline-asm"` feature.
+#[inline]
+pub fn read() -> u32 {
+ call_asm!(__pc_r() -> u32)
+}
+
+/// Writes `bits` to the CPU register
+///
+/// **NOTE** This function is available if `cortex-m` is built with the `"inline-asm"` feature.
+#[inline]
+pub unsafe fn write(bits: u32) {
+ call_asm!(__pc_w(bits: u32));
+}
diff --git a/src/register/primask.rs b/src/register/primask.rs
new file mode 100644
index 0000000..842ca49
--- /dev/null
+++ b/src/register/primask.rs
@@ -0,0 +1,35 @@
+//! Priority mask register
+
+/// All exceptions with configurable priority are ...
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum Primask {
+ /// Active
+ Active,
+ /// Inactive
+ Inactive,
+}
+
+impl Primask {
+ /// All exceptions with configurable priority are active
+ #[inline]
+ pub fn is_active(self) -> bool {
+ self == Primask::Active
+ }
+
+ /// All exceptions with configurable priority are inactive
+ #[inline]
+ pub fn is_inactive(self) -> bool {
+ self == Primask::Inactive
+ }
+}
+
+/// Reads the CPU register
+#[inline]
+pub fn read() -> Primask {
+ let r: u32 = call_asm!(__primask_r() -> u32);
+ if r & (1 << 0) == (1 << 0) {
+ Primask::Inactive
+ } else {
+ Primask::Active
+ }
+}
diff --git a/src/register/psp.rs b/src/register/psp.rs
new file mode 100644
index 0000000..0bca22c
--- /dev/null
+++ b/src/register/psp.rs
@@ -0,0 +1,13 @@
+//! Process Stack Pointer
+
+/// Reads the CPU register
+#[inline]
+pub fn read() -> u32 {
+ call_asm!(__psp_r() -> u32)
+}
+
+/// Writes `bits` to the CPU register
+#[inline]
+pub unsafe fn write(bits: u32) {
+ call_asm!(__psp_w(bits: u32))
+}
diff --git a/src/register/psplim.rs b/src/register/psplim.rs
new file mode 100644
index 0000000..8ee1e94
--- /dev/null
+++ b/src/register/psplim.rs
@@ -0,0 +1,13 @@
+//! Process Stack Pointer Limit Register
+
+/// Reads the CPU register
+#[inline]
+pub fn read() -> u32 {
+ call_asm!(__psplim_r() -> u32)
+}
+
+/// Writes `bits` to the CPU register
+#[inline]
+pub unsafe fn write(bits: u32) {
+ call_asm!(__psplim_w(bits: u32))
+}