aboutsummaryrefslogtreecommitdiff
path: root/cortex-m/src/peripheral
diff options
context:
space:
mode:
Diffstat (limited to 'cortex-m/src/peripheral')
-rw-r--r--cortex-m/src/peripheral/ac.rs93
-rw-r--r--cortex-m/src/peripheral/cbp.rs138
-rw-r--r--cortex-m/src/peripheral/cpuid.rs140
-rw-r--r--cortex-m/src/peripheral/dcb.rs81
-rw-r--r--cortex-m/src/peripheral/dwt.rs507
-rw-r--r--cortex-m/src/peripheral/fpb.rs21
-rw-r--r--cortex-m/src/peripheral/fpu.rs19
-rw-r--r--cortex-m/src/peripheral/icb.rs32
-rw-r--r--cortex-m/src/peripheral/itm.rs216
-rw-r--r--cortex-m/src/peripheral/mod.rs592
-rw-r--r--cortex-m/src/peripheral/mpu.rs65
-rw-r--r--cortex-m/src/peripheral/nvic.rs272
-rw-r--r--cortex-m/src/peripheral/sau.rs242
-rw-r--r--cortex-m/src/peripheral/scb.rs1110
-rw-r--r--cortex-m/src/peripheral/syst.rs185
-rw-r--r--cortex-m/src/peripheral/test.rs170
-rw-r--r--cortex-m/src/peripheral/tpiu.rs160
17 files changed, 4043 insertions, 0 deletions
diff --git a/cortex-m/src/peripheral/ac.rs b/cortex-m/src/peripheral/ac.rs
new file mode 100644
index 0000000..1ac5be1
--- /dev/null
+++ b/cortex-m/src/peripheral/ac.rs
@@ -0,0 +1,93 @@
+//! Cortex-M7 TCM and Cache access control.
+
+use volatile_register::RW;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Instruction Tightly-Coupled Memory Control Register
+ pub itcmcr: RW<u32>,
+ /// Data Tightly-Coupled Memory Control Register
+ pub dtcmcr: RW<u32>,
+ /// AHBP Control Register
+ pub ahbpcr: RW<u32>,
+ /// L1 Cache Control Register
+ pub cacr: RW<u32>,
+ /// AHB Slave Control Register
+ pub ahbscr: RW<u32>,
+ reserved0: u32,
+ /// Auxilary Bus Fault Status Register
+ pub abfsr: RW<u32>,
+}
+
+/// ITCMCR and DTCMCR TCM enable bit.
+pub const TCM_EN: u32 = 1;
+
+/// ITCMCR and DTCMCR TCM read-modify-write bit.
+pub const TCM_RMW: u32 = 2;
+
+/// ITCMCR and DTCMCR TCM rety phase enable bit.
+pub const TCM_RETEN: u32 = 4;
+
+/// ITCMCR and DTCMCR TCM size mask.
+pub const TCM_SZ_MASK: u32 = 0x78;
+
+/// ITCMCR and DTCMCR TCM shift.
+pub const TCM_SZ_SHIFT: usize = 3;
+
+/// AHBPCR AHBP enable bit.
+pub const AHBPCR_EN: u32 = 1;
+
+/// AHBPCR AHBP size mask.
+pub const AHBPCR_SZ_MASK: u32 = 0x0e;
+
+/// AHBPCR AHBP size shit.
+pub const AHBPCR_SZ_SHIFT: usize = 1;
+
+/// CACR Shared cachedable-is-WT for data cache.
+pub const CACR_SIWT: u32 = 1;
+
+/// CACR ECC in the instruction and data cache (disable).
+pub const CACR_ECCDIS: u32 = 2;
+
+/// CACR Force Write-Through in the data cache.
+pub const CACR_FORCEWT: u32 = 4;
+
+/// AHBSCR AHBS prioritization control mask.
+pub const AHBSCR_CTL_MASK: u32 = 0x03;
+
+/// AHBSCR AHBS prioritization control shift.
+pub const AHBSCR_CTL_SHIFT: usize = 0;
+
+/// AHBSCR Threshold execution prioity for AHBS traffic demotion, mask.
+pub const AHBSCR_TPRI_MASK: u32 = 0x7fc;
+
+/// AHBSCR Threshold execution prioity for AHBS traffic demotion, shift.
+pub const AHBSCR_TPRI_SHIFT: usize = 2;
+
+/// AHBSCR Failness counter initialization value, mask.
+pub const AHBSCR_INITCOUNT_MASK: u32 = 0xf800;
+
+/// AHBSCR Failness counter initialization value, shift.
+pub const AHBSCR_INITCOUNT_SHIFT: usize = 11;
+
+/// ABFSR Async fault on ITCM interface.
+pub const ABFSR_ITCM: u32 = 1;
+
+/// ABFSR Async fault on DTCM interface.
+pub const ABFSR_DTCM: u32 = 2;
+
+/// ABFSR Async fault on AHBP interface.
+pub const ABFSR_AHBP: u32 = 4;
+
+/// ABFSR Async fault on AXIM interface.
+pub const ABFSR_AXIM: u32 = 8;
+
+/// ABFSR Async fault on EPPB interface.
+pub const ABFSR_EPPB: u32 = 16;
+
+/// ABFSR Indicates the type of fault on the AXIM interface, mask.
+pub const ABFSR_AXIMTYPE_MASK: u32 = 0x300;
+
+/// ABFSR Indicates the type of fault on the AXIM interface, shift.
+pub const ABFSR_AXIMTYPE_SHIFT: usize = 8;
diff --git a/cortex-m/src/peripheral/cbp.rs b/cortex-m/src/peripheral/cbp.rs
new file mode 100644
index 0000000..5aee544
--- /dev/null
+++ b/cortex-m/src/peripheral/cbp.rs
@@ -0,0 +1,138 @@
+//! Cache and branch predictor maintenance operations
+//!
+//! *NOTE* Not available on Armv6-M.
+
+use volatile_register::WO;
+
+use crate::peripheral::CBP;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// I-cache invalidate all to PoU
+ pub iciallu: WO<u32>,
+ reserved0: u32,
+ /// I-cache invalidate by MVA to PoU
+ pub icimvau: WO<u32>,
+ /// D-cache invalidate by MVA to PoC
+ pub dcimvac: WO<u32>,
+ /// D-cache invalidate by set-way
+ pub dcisw: WO<u32>,
+ /// D-cache clean by MVA to PoU
+ pub dccmvau: WO<u32>,
+ /// D-cache clean by MVA to PoC
+ pub dccmvac: WO<u32>,
+ /// D-cache clean by set-way
+ pub dccsw: WO<u32>,
+ /// D-cache clean and invalidate by MVA to PoC
+ pub dccimvac: WO<u32>,
+ /// D-cache clean and invalidate by set-way
+ pub dccisw: WO<u32>,
+ /// Branch predictor invalidate all
+ pub bpiall: WO<u32>,
+}
+
+const CBP_SW_WAY_POS: u32 = 30;
+const CBP_SW_WAY_MASK: u32 = 0x3 << CBP_SW_WAY_POS;
+const CBP_SW_SET_POS: u32 = 5;
+const CBP_SW_SET_MASK: u32 = 0x1FF << CBP_SW_SET_POS;
+
+impl CBP {
+ /// I-cache invalidate all to PoU
+ #[inline(always)]
+ pub fn iciallu(&mut self) {
+ unsafe { self.iciallu.write(0) };
+ }
+
+ /// I-cache invalidate by MVA to PoU
+ #[inline(always)]
+ pub fn icimvau(&mut self, mva: u32) {
+ unsafe { self.icimvau.write(mva) };
+ }
+
+ /// D-cache invalidate by MVA to PoC
+ #[inline(always)]
+ pub unsafe fn dcimvac(&mut self, mva: u32) {
+ self.dcimvac.write(mva);
+ }
+
+ /// D-cache invalidate by set-way
+ ///
+ /// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
+ #[inline(always)]
+ pub unsafe fn dcisw(&mut self, set: u16, way: u16) {
+ // The ARMv7-M Architecture Reference Manual, as of Revision E.b, says these set/way
+ // operations have a register data format which depends on the implementation's
+ // associativity and number of sets. Specifically the 'way' and 'set' fields have
+ // offsets 32-log2(ASSOCIATIVITY) and log2(LINELEN) respectively.
+ //
+ // However, in Cortex-M7 devices, these offsets are fixed at 30 and 5, as per the Cortex-M7
+ // Generic User Guide section 4.8.3. Since no other ARMv7-M implementations except the
+ // Cortex-M7 have a DCACHE or ICACHE at all, it seems safe to do the same thing as the
+ // CMSIS-Core implementation and use fixed values.
+ self.dcisw.write(
+ ((u32::from(way) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS)
+ | ((u32::from(set) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS),
+ );
+ }
+
+ /// D-cache clean by MVA to PoU
+ #[inline(always)]
+ pub fn dccmvau(&mut self, mva: u32) {
+ unsafe {
+ self.dccmvau.write(mva);
+ }
+ }
+
+ /// D-cache clean by MVA to PoC
+ #[inline(always)]
+ pub fn dccmvac(&mut self, mva: u32) {
+ unsafe {
+ self.dccmvac.write(mva);
+ }
+ }
+
+ /// D-cache clean by set-way
+ ///
+ /// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
+ #[inline(always)]
+ pub fn dccsw(&mut self, set: u16, way: u16) {
+ // See comment for dcisw() about the format here
+ unsafe {
+ self.dccsw.write(
+ ((u32::from(way) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS)
+ | ((u32::from(set) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS),
+ );
+ }
+ }
+
+ /// D-cache clean and invalidate by MVA to PoC
+ #[inline(always)]
+ pub fn dccimvac(&mut self, mva: u32) {
+ unsafe {
+ self.dccimvac.write(mva);
+ }
+ }
+
+ /// D-cache clean and invalidate by set-way
+ ///
+ /// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
+ #[inline(always)]
+ pub fn dccisw(&mut self, set: u16, way: u16) {
+ // See comment for dcisw() about the format here
+ unsafe {
+ self.dccisw.write(
+ ((u32::from(way) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS)
+ | ((u32::from(set) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS),
+ );
+ }
+ }
+
+ /// Branch predictor invalidate all
+ #[inline(always)]
+ pub fn bpiall(&mut self) {
+ unsafe {
+ self.bpiall.write(0);
+ }
+ }
+}
diff --git a/cortex-m/src/peripheral/cpuid.rs b/cortex-m/src/peripheral/cpuid.rs
new file mode 100644
index 0000000..db85566
--- /dev/null
+++ b/cortex-m/src/peripheral/cpuid.rs
@@ -0,0 +1,140 @@
+//! CPUID
+
+use volatile_register::RO;
+#[cfg(not(armv6m))]
+use volatile_register::RW;
+
+#[cfg(not(armv6m))]
+use crate::peripheral::CPUID;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// CPUID base
+ pub base: RO<u32>,
+
+ _reserved0: [u32; 15],
+
+ /// Processor Feature (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub pfr: [RO<u32>; 2],
+ #[cfg(armv6m)]
+ _reserved1: [u32; 2],
+
+ /// Debug Feature (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub dfr: RO<u32>,
+ #[cfg(armv6m)]
+ _reserved2: u32,
+
+ /// Auxiliary Feature (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub afr: RO<u32>,
+ #[cfg(armv6m)]
+ _reserved3: u32,
+
+ /// Memory Model Feature (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub mmfr: [RO<u32>; 4],
+ #[cfg(armv6m)]
+ _reserved4: [u32; 4],
+
+ /// Instruction Set Attribute (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub isar: [RO<u32>; 5],
+ #[cfg(armv6m)]
+ _reserved5: [u32; 5],
+
+ _reserved6: u32,
+
+ /// Cache Level ID (only present on Cortex-M7)
+ #[cfg(not(armv6m))]
+ pub clidr: RO<u32>,
+
+ /// Cache Type (only present on Cortex-M7)
+ #[cfg(not(armv6m))]
+ pub ctr: RO<u32>,
+
+ /// Cache Size ID (only present on Cortex-M7)
+ #[cfg(not(armv6m))]
+ pub ccsidr: RO<u32>,
+
+ /// Cache Size Selection (only present on Cortex-M7)
+ #[cfg(not(armv6m))]
+ pub csselr: RW<u32>,
+}
+
+/// Type of cache to select on CSSELR writes.
+#[cfg(not(armv6m))]
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum CsselrCacheType {
+ /// Select DCache or unified cache
+ DataOrUnified = 0,
+ /// Select ICache
+ Instruction = 1,
+}
+
+#[cfg(not(armv6m))]
+impl CPUID {
+ /// Selects the current CCSIDR
+ ///
+ /// * `level`: the required cache level minus 1, e.g. 0 for L1, 1 for L2
+ /// * `ind`: select instruction cache or data/unified cache
+ ///
+ /// `level` is masked to be between 0 and 7.
+ #[inline]
+ pub fn select_cache(&mut self, level: u8, ind: CsselrCacheType) {
+ const CSSELR_IND_POS: u32 = 0;
+ const CSSELR_IND_MASK: u32 = 1 << CSSELR_IND_POS;
+ const CSSELR_LEVEL_POS: u32 = 1;
+ const CSSELR_LEVEL_MASK: u32 = 0x7 << CSSELR_LEVEL_POS;
+
+ unsafe {
+ self.csselr.write(
+ ((u32::from(level) << CSSELR_LEVEL_POS) & CSSELR_LEVEL_MASK)
+ | (((ind as u32) << CSSELR_IND_POS) & CSSELR_IND_MASK),
+ )
+ }
+ }
+
+ /// Returns the number of sets and ways in the selected cache
+ #[inline]
+ pub fn cache_num_sets_ways(&mut self, level: u8, ind: CsselrCacheType) -> (u16, u16) {
+ const CCSIDR_NUMSETS_POS: u32 = 13;
+ const CCSIDR_NUMSETS_MASK: u32 = 0x7FFF << CCSIDR_NUMSETS_POS;
+ const CCSIDR_ASSOCIATIVITY_POS: u32 = 3;
+ const CCSIDR_ASSOCIATIVITY_MASK: u32 = 0x3FF << CCSIDR_ASSOCIATIVITY_POS;
+
+ self.select_cache(level, ind);
+ crate::asm::dsb();
+ let ccsidr = self.ccsidr.read();
+ (
+ (1 + ((ccsidr & CCSIDR_NUMSETS_MASK) >> CCSIDR_NUMSETS_POS)) as u16,
+ (1 + ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> CCSIDR_ASSOCIATIVITY_POS)) as u16,
+ )
+ }
+
+ /// Returns log2 of the number of words in the smallest cache line of all the data cache and
+ /// unified caches that are controlled by the processor.
+ ///
+ /// This is the `DminLine` field of the CTR register.
+ #[inline(always)]
+ pub fn cache_dminline() -> u32 {
+ const CTR_DMINLINE_POS: u32 = 16;
+ const CTR_DMINLINE_MASK: u32 = 0xF << CTR_DMINLINE_POS;
+ let ctr = unsafe { (*Self::PTR).ctr.read() };
+ (ctr & CTR_DMINLINE_MASK) >> CTR_DMINLINE_POS
+ }
+
+ /// Returns log2 of the number of words in the smallest cache line of all the instruction
+ /// caches that are controlled by the processor.
+ ///
+ /// This is the `IminLine` field of the CTR register.
+ #[inline(always)]
+ pub fn cache_iminline() -> u32 {
+ const CTR_IMINLINE_POS: u32 = 0;
+ const CTR_IMINLINE_MASK: u32 = 0xF << CTR_IMINLINE_POS;
+ let ctr = unsafe { (*Self::PTR).ctr.read() };
+ (ctr & CTR_IMINLINE_MASK) >> CTR_IMINLINE_POS
+ }
+}
diff --git a/cortex-m/src/peripheral/dcb.rs b/cortex-m/src/peripheral/dcb.rs
new file mode 100644
index 0000000..a4db9fc
--- /dev/null
+++ b/cortex-m/src/peripheral/dcb.rs
@@ -0,0 +1,81 @@
+//! Debug Control Block
+
+use volatile_register::{RW, WO};
+
+use crate::peripheral::DCB;
+use core::ptr;
+
+const DCB_DEMCR_TRCENA: u32 = 1 << 24;
+const DCB_DEMCR_MON_EN: u32 = 1 << 16;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Debug Halting Control and Status
+ pub dhcsr: RW<u32>,
+ /// Debug Core Register Selector
+ pub dcrsr: WO<u32>,
+ /// Debug Core Register Data
+ pub dcrdr: RW<u32>,
+ /// Debug Exception and Monitor Control
+ pub demcr: RW<u32>,
+}
+
+impl DCB {
+ /// Enables TRACE. This is for example required by the
+ /// `peripheral::DWT` cycle counter to work properly.
+ /// As by STM documentation, this flag is not reset on
+ /// soft-reset, only on power reset.
+ ///
+ /// Note: vendor-specific registers may have to be set to completely
+ /// enable tracing. For example, on the STM32F401RE, `TRACE_MODE`
+ /// and `TRACE_IOEN` must be configured in `DBGMCU_CR` register.
+ #[inline]
+ pub fn enable_trace(&mut self) {
+ // set bit 24 / TRCENA
+ unsafe {
+ self.demcr.modify(|w| w | DCB_DEMCR_TRCENA);
+ }
+ }
+
+ /// Disables TRACE. See `DCB::enable_trace()` for more details
+ #[inline]
+ pub fn disable_trace(&mut self) {
+ // unset bit 24 / TRCENA
+ unsafe {
+ self.demcr.modify(|w| w & !DCB_DEMCR_TRCENA);
+ }
+ }
+
+ /// Enables the [`DebugMonitor`](crate::peripheral::scb::Exception::DebugMonitor) exception
+ #[inline]
+ pub fn enable_debug_monitor(&mut self) {
+ unsafe {
+ self.demcr.modify(|w| w | DCB_DEMCR_MON_EN);
+ }
+ }
+
+ /// Disables the [`DebugMonitor`](crate::peripheral::scb::Exception::DebugMonitor) exception
+ #[inline]
+ pub fn disable_debug_monitor(&mut self) {
+ unsafe {
+ self.demcr.modify(|w| w & !DCB_DEMCR_MON_EN);
+ }
+ }
+
+ /// Is there a debugger attached? (see note)
+ ///
+ /// Note: This function is [reported not to
+ /// work](http://web.archive.org/web/20180821191012/https://community.nxp.com/thread/424925#comment-782843)
+ /// on Cortex-M0 devices. Per the ARM v6-M Architecture Reference Manual, "Access to the DHCSR
+ /// from software running on the processor is IMPLEMENTATION DEFINED". Indeed, from the
+ /// [Cortex-M0+ r0p1 Technical Reference Manual](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0484c/BABJHEIG.html), "Note Software cannot access the debug registers."
+ #[inline]
+ pub fn is_debugger_attached() -> bool {
+ unsafe {
+ // do an 8-bit read of the 32-bit DHCSR register, and get the LSB
+ let value = ptr::read_volatile(Self::PTR as *const u8);
+ value & 0x1 == 1
+ }
+ }
+}
diff --git a/cortex-m/src/peripheral/dwt.rs b/cortex-m/src/peripheral/dwt.rs
new file mode 100644
index 0000000..72575d3
--- /dev/null
+++ b/cortex-m/src/peripheral/dwt.rs
@@ -0,0 +1,507 @@
+//! Data Watchpoint and Trace unit
+
+#[cfg(not(armv6m))]
+use volatile_register::WO;
+use volatile_register::{RO, RW};
+
+use crate::peripheral::DWT;
+use bitfield::bitfield;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Control
+ pub ctrl: RW<Ctrl>,
+ /// Cycle Count
+ #[cfg(not(armv6m))]
+ pub cyccnt: RW<u32>,
+ /// CPI Count
+ #[cfg(not(armv6m))]
+ pub cpicnt: RW<u32>,
+ /// Exception Overhead Count
+ #[cfg(not(armv6m))]
+ pub exccnt: RW<u32>,
+ /// Sleep Count
+ #[cfg(not(armv6m))]
+ pub sleepcnt: RW<u32>,
+ /// LSU Count
+ #[cfg(not(armv6m))]
+ pub lsucnt: RW<u32>,
+ /// Folded-instruction Count
+ #[cfg(not(armv6m))]
+ pub foldcnt: RW<u32>,
+ /// Cortex-M0(+) does not have these parts
+ #[cfg(armv6m)]
+ reserved: [u32; 6],
+ /// Program Counter Sample
+ pub pcsr: RO<u32>,
+ /// Comparators
+ #[cfg(armv6m)]
+ pub c: [Comparator; 2],
+ #[cfg(not(armv6m))]
+ /// Comparators
+ pub c: [Comparator; 16],
+ #[cfg(not(armv6m))]
+ reserved: [u32; 932],
+ /// Lock Access
+ #[cfg(not(armv6m))]
+ pub lar: WO<u32>,
+ /// Lock Status
+ #[cfg(not(armv6m))]
+ pub lsr: RO<u32>,
+}
+
+bitfield! {
+ /// Control register.
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Ctrl(u32);
+ cyccntena, set_cyccntena: 0;
+ pcsamplena, set_pcsamplena: 12;
+ exctrcena, set_exctrcena: 16;
+ noprfcnt, _: 24;
+ nocyccnt, _: 25;
+ noexttrig, _: 26;
+ notrcpkt, _: 27;
+ u8, numcomp, _: 31, 28;
+}
+
+/// Comparator
+#[repr(C)]
+pub struct Comparator {
+ /// Comparator
+ pub comp: RW<u32>,
+ /// Comparator Mask
+ pub mask: RW<u32>,
+ /// Comparator Function
+ pub function: RW<Function>,
+ reserved: u32,
+}
+
+bitfield! {
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ /// Comparator FUNCTIONn register.
+ ///
+ /// See C1.8.17 "Comparator Function registers, DWT_FUNCTIONn"
+ pub struct Function(u32);
+ u8, function, set_function: 3, 0;
+ emitrange, set_emitrange: 5;
+ cycmatch, set_cycmatch: 7;
+ datavmatch, set_datavmatch: 8;
+ lnk1ena, set_lnk1ena: 9;
+ u8, datavsize, set_datavsize: 11, 10;
+ u8, datavaddr0, set_datavaddr0: 15, 12;
+ u8, datavaddr1, set_datavaddr1: 19, 16;
+ matched, _: 24;
+}
+
+impl DWT {
+ /// Number of comparators implemented
+ ///
+ /// A value of zero indicates no comparator support.
+ #[inline]
+ pub fn num_comp(&self) -> u8 {
+ self.ctrl.read().numcomp()
+ }
+
+ /// Returns `true` if the the implementation supports sampling and exception tracing
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn has_exception_trace(&self) -> bool {
+ !self.ctrl.read().notrcpkt()
+ }
+
+ /// Returns `true` if the implementation includes external match signals
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn has_external_match(&self) -> bool {
+ !self.ctrl.read().noexttrig()
+ }
+
+ /// Returns `true` if the implementation supports a cycle counter
+ #[inline]
+ pub fn has_cycle_counter(&self) -> bool {
+ #[cfg(not(armv6m))]
+ return !self.ctrl.read().nocyccnt();
+
+ #[cfg(armv6m)]
+ return false;
+ }
+
+ /// Returns `true` if the implementation the profiling counters
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn has_profiling_counter(&self) -> bool {
+ !self.ctrl.read().noprfcnt()
+ }
+
+ /// Enables the cycle counter
+ ///
+ /// The global trace enable ([`DCB::enable_trace`]) should be set before
+ /// enabling the cycle counter, the processor may ignore writes to the
+ /// cycle counter enable if the global trace is disabled
+ /// (implementation defined behaviour).
+ ///
+ /// [`DCB::enable_trace`]: crate::peripheral::DCB::enable_trace
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn enable_cycle_counter(&mut self) {
+ unsafe {
+ self.ctrl.modify(|mut r| {
+ r.set_cyccntena(true);
+ r
+ });
+ }
+ }
+
+ /// Disables the cycle counter
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn disable_cycle_counter(&mut self) {
+ unsafe {
+ self.ctrl.modify(|mut r| {
+ r.set_cyccntena(false);
+ r
+ });
+ }
+ }
+
+ /// Returns `true` if the cycle counter is enabled
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn cycle_counter_enabled(&self) -> bool {
+ self.ctrl.read().cyccntena()
+ }
+
+ /// Enables exception tracing
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn enable_exception_tracing(&mut self) {
+ unsafe {
+ self.ctrl.modify(|mut r| {
+ r.set_exctrcena(true);
+ r
+ });
+ }
+ }
+
+ /// Disables exception tracing
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn disable_exception_tracing(&mut self) {
+ unsafe {
+ self.ctrl.modify(|mut r| {
+ r.set_exctrcena(false);
+ r
+ });
+ }
+ }
+
+ /// Whether to periodically generate PC samples
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn enable_pc_samples(&mut self, bit: bool) {
+ unsafe {
+ self.ctrl.modify(|mut r| {
+ r.set_pcsamplena(bit);
+ r
+ });
+ }
+ }
+
+ /// Returns the current clock cycle count
+ #[cfg(not(armv6m))]
+ #[inline]
+ #[deprecated(
+ since = "0.7.4",
+ note = "Use `cycle_count` which follows the C-GETTER convention"
+ )]
+ pub fn get_cycle_count() -> u32 {
+ Self::cycle_count()
+ }
+
+ /// Returns the current clock cycle count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn cycle_count() -> u32 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).cyccnt.read() }
+ }
+
+ /// Set the cycle count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn set_cycle_count(&mut self, count: u32) {
+ unsafe { self.cyccnt.write(count) }
+ }
+
+ /// Removes the software lock on the DWT
+ ///
+ /// Some devices, like the STM32F7, software lock the DWT after a power cycle.
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn unlock() {
+ // NOTE(unsafe) atomic write to a stateless, write-only register
+ unsafe { (*Self::PTR).lar.write(0xC5AC_CE55) }
+ }
+
+ /// Get the CPI count
+ ///
+ /// Counts additional cycles required to execute multi-cycle instructions,
+ /// except those recorded by [`lsu_count`], and counts any instruction fetch
+ /// stalls.
+ ///
+ /// [`lsu_count`]: DWT::lsu_count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn cpi_count() -> u8 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).cpicnt.read() as u8 }
+ }
+
+ /// Set the CPI count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn set_cpi_count(&mut self, count: u8) {
+ unsafe { self.cpicnt.write(count as u32) }
+ }
+
+ /// Get the total cycles spent in exception processing
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn exception_count() -> u8 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).exccnt.read() as u8 }
+ }
+
+ /// Set the exception count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn set_exception_count(&mut self, count: u8) {
+ unsafe { self.exccnt.write(count as u32) }
+ }
+
+ /// Get the total number of cycles that the processor is sleeping
+ ///
+ /// ARM recommends that this counter counts all cycles when the processor is sleeping,
+ /// regardless of whether a WFI or WFE instruction, or the sleep-on-exit functionality,
+ /// caused the entry to sleep mode.
+ /// However, all sleep features are implementation defined and therefore when
+ /// this counter counts is implementation defined.
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn sleep_count() -> u8 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).sleepcnt.read() as u8 }
+ }
+
+ /// Set the sleep count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn set_sleep_count(&mut self, count: u8) {
+ unsafe { self.sleepcnt.write(count as u32) }
+ }
+
+ /// Get the additional cycles required to execute all load or store instructions
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn lsu_count() -> u8 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).lsucnt.read() as u8 }
+ }
+
+ /// Set the lsu count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn set_lsu_count(&mut self, count: u8) {
+ unsafe { self.lsucnt.write(count as u32) }
+ }
+
+ /// Get the folded instruction count
+ ///
+ /// Increments on each instruction that takes 0 cycles.
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn fold_count() -> u8 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).foldcnt.read() as u8 }
+ }
+
+ /// Set the folded instruction count
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn set_fold_count(&mut self, count: u8) {
+ unsafe { self.foldcnt.write(count as u32) }
+ }
+}
+
+/// Whether the comparator should match on read, write or read/write operations.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub enum AccessType {
+ /// Generate packet only when matched address is read from.
+ ReadOnly,
+ /// Generate packet only when matched address is written to.
+ WriteOnly,
+ /// Generate packet when matched address is both read from and written to.
+ ReadWrite,
+}
+
+/// The sequence of packet(s) or events that should be emitted/generated on comparator match.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub enum EmitOption {
+ /// Emit only trace data value packet.
+ Data,
+ /// Emit only trace address packet.
+ Address,
+ /// Emit only trace PC value packet
+ ///
+ /// *NOTE* only compatible with [AccessType::ReadWrite].
+ PC,
+ /// Emit trace address and data value packets.
+ AddressData,
+ /// Emit trace PC value and data value packets.
+ PCData,
+ /// Generate a watchpoint debug event. Either halts execution or fires a `DebugMonitor` exception.
+ ///
+ /// See more in section "Watchpoint debug event generation" page C1-729.
+ WatchpointDebugEvent,
+ /// Generate a `CMPMATCH[N]` event.
+ ///
+ /// See more in section "CMPMATCH[N] event generation" page C1-730.
+ CompareMatchEvent,
+}
+
+/// Settings for address matching
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub struct ComparatorAddressSettings {
+ /// The address to match against.
+ pub address: u32,
+ /// The address mask to match against.
+ pub mask: u32,
+ /// What sequence of packet(s) to emit on comparator match.
+ pub emit: EmitOption,
+ /// Whether to match on read, write or read/write operations.
+ pub access_type: AccessType,
+}
+
+/// Settings for cycle count matching
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub struct CycleCountSettings {
+ /// The function selection used.
+ /// See Table C1-15 for DWT cycle count comparison functions.
+ pub emit: EmitOption,
+ /// The cycle count value to compare against.
+ pub compare: u32,
+}
+
+/// The available functions of a DWT comparator.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+#[non_exhaustive]
+pub enum ComparatorFunction {
+ /// Compare accessed memory addresses.
+ Address(ComparatorAddressSettings),
+ /// Compare cycle count & target value.
+ ///
+ /// **NOTE**: only supported by comparator 0 and if the HW supports the cycle counter.
+ /// Check [`DWT::has_cycle_counter`] for support. See C1.8.1 for more details.
+ CycleCount(CycleCountSettings),
+}
+
+/// Possible error values returned on [Comparator::configure].
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+#[non_exhaustive]
+pub enum DwtError {
+ /// Invalid combination of [AccessType] and [EmitOption].
+ InvalidFunction,
+}
+
+impl Comparator {
+ /// Configure the function of the comparator
+ #[allow(clippy::missing_inline_in_public_items)]
+ pub fn configure(&self, settings: ComparatorFunction) -> Result<(), DwtError> {
+ match settings {
+ ComparatorFunction::Address(settings) => {
+ // FUNCTION, EMITRANGE
+ // See Table C1-14
+ let (function, emit_range) = match (&settings.access_type, &settings.emit) {
+ (AccessType::ReadOnly, EmitOption::Data) => (0b1100, false),
+ (AccessType::ReadOnly, EmitOption::Address) => (0b1100, true),
+ (AccessType::ReadOnly, EmitOption::AddressData) => (0b1110, true),
+ (AccessType::ReadOnly, EmitOption::PCData) => (0b1110, false),
+ (AccessType::ReadOnly, EmitOption::WatchpointDebugEvent) => (0b0101, false),
+ (AccessType::ReadOnly, EmitOption::CompareMatchEvent) => (0b1001, false),
+
+ (AccessType::WriteOnly, EmitOption::Data) => (0b1101, false),
+ (AccessType::WriteOnly, EmitOption::Address) => (0b1101, true),
+ (AccessType::WriteOnly, EmitOption::AddressData) => (0b1111, true),
+ (AccessType::WriteOnly, EmitOption::PCData) => (0b1111, false),
+ (AccessType::WriteOnly, EmitOption::WatchpointDebugEvent) => (0b0110, false),
+ (AccessType::WriteOnly, EmitOption::CompareMatchEvent) => (0b1010, false),
+
+ (AccessType::ReadWrite, EmitOption::Data) => (0b0010, false),
+ (AccessType::ReadWrite, EmitOption::Address) => (0b0001, true),
+ (AccessType::ReadWrite, EmitOption::AddressData) => (0b0010, true),
+ (AccessType::ReadWrite, EmitOption::PCData) => (0b0011, false),
+ (AccessType::ReadWrite, EmitOption::WatchpointDebugEvent) => (0b0111, false),
+ (AccessType::ReadWrite, EmitOption::CompareMatchEvent) => (0b1011, false),
+
+ (AccessType::ReadWrite, EmitOption::PC) => (0b0001, false),
+ (_, EmitOption::PC) => return Err(DwtError::InvalidFunction),
+ };
+
+ unsafe {
+ self.function.modify(|mut r| {
+ r.set_function(function);
+ r.set_emitrange(emit_range);
+ // don't compare data value
+ r.set_datavmatch(false);
+ // don't compare cycle counter value
+ // NOTE: only needed for comparator 0, but is SBZP.
+ r.set_cycmatch(false);
+ // SBZ as needed, see Page 784/C1-724
+ r.set_datavsize(0);
+ r.set_datavaddr0(0);
+ r.set_datavaddr1(0);
+
+ r
+ });
+
+ self.comp.write(settings.address);
+ self.mask.write(settings.mask);
+ }
+ }
+ ComparatorFunction::CycleCount(settings) => {
+ let function = match &settings.emit {
+ EmitOption::PCData => 0b0001,
+ EmitOption::WatchpointDebugEvent => 0b0100,
+ EmitOption::CompareMatchEvent => 0b1000,
+ _ => return Err(DwtError::InvalidFunction),
+ };
+
+ unsafe {
+ self.function.modify(|mut r| {
+ r.set_function(function);
+ // emit_range is N/A for cycle count compare
+ r.set_emitrange(false);
+ // don't compare data
+ r.set_datavmatch(false);
+ // compare cyccnt
+ r.set_cycmatch(true);
+ // SBZ as needed, see Page 784/C1-724
+ r.set_datavsize(0);
+ r.set_datavaddr0(0);
+ r.set_datavaddr1(0);
+
+ r
+ });
+
+ self.comp.write(settings.compare);
+ self.mask.write(0); // SBZ, see Page 784/C1-724
+ }
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/cortex-m/src/peripheral/fpb.rs b/cortex-m/src/peripheral/fpb.rs
new file mode 100644
index 0000000..b86b8b2
--- /dev/null
+++ b/cortex-m/src/peripheral/fpb.rs
@@ -0,0 +1,21 @@
+//! Flash Patch and Breakpoint unit
+//!
+//! *NOTE* Not available on Armv6-M.
+
+use volatile_register::{RO, RW, WO};
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Control
+ pub ctrl: RW<u32>,
+ /// Remap
+ pub remap: RW<u32>,
+ /// Comparator
+ pub comp: [RW<u32>; 127],
+ reserved: [u32; 875],
+ /// Lock Access
+ pub lar: WO<u32>,
+ /// Lock Status
+ pub lsr: RO<u32>,
+}
diff --git a/cortex-m/src/peripheral/fpu.rs b/cortex-m/src/peripheral/fpu.rs
new file mode 100644
index 0000000..9a047d8
--- /dev/null
+++ b/cortex-m/src/peripheral/fpu.rs
@@ -0,0 +1,19 @@
+//! Floating Point Unit
+//!
+//! *NOTE* Available only on targets with a Floating Point Unit (FPU) extension.
+
+use volatile_register::{RO, RW};
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ reserved: u32,
+ /// Floating Point Context Control
+ pub fpccr: RW<u32>,
+ /// Floating Point Context Address
+ pub fpcar: RW<u32>,
+ /// Floating Point Default Status Control
+ pub fpdscr: RW<u32>,
+ /// Media and FP Feature
+ pub mvfr: [RO<u32>; 3],
+}
diff --git a/cortex-m/src/peripheral/icb.rs b/cortex-m/src/peripheral/icb.rs
new file mode 100644
index 0000000..e1de33b
--- /dev/null
+++ b/cortex-m/src/peripheral/icb.rs
@@ -0,0 +1,32 @@
+//! Implementation Control Block
+
+#[cfg(any(armv7m, armv8m, native))]
+use volatile_register::RO;
+use volatile_register::RW;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Interrupt Controller Type Register
+ ///
+ /// The bottom four bits of this register give the number of implemented
+ /// interrupt lines, divided by 32. So a value of `0b0010` indicates 64
+ /// interrupts.
+ #[cfg(any(armv7m, armv8m, native))]
+ pub ictr: RO<u32>,
+
+ /// The ICTR is not defined in the ARMv6-M Architecture Reference manual, so
+ /// we replace it with this.
+ #[cfg(not(any(armv7m, armv8m, native)))]
+ _reserved: u32,
+
+ /// Auxiliary Control Register
+ ///
+ /// This register is entirely implementation defined -- the standard gives
+ /// it an address, but does not define its role or contents.
+ pub actlr: RW<u32>,
+
+ /// Coprocessor Power Control Register
+ #[cfg(armv8m)]
+ pub cppwr: RW<u32>,
+}
diff --git a/cortex-m/src/peripheral/itm.rs b/cortex-m/src/peripheral/itm.rs
new file mode 100644
index 0000000..7291ae0
--- /dev/null
+++ b/cortex-m/src/peripheral/itm.rs
@@ -0,0 +1,216 @@
+//! Instrumentation Trace Macrocell
+//!
+//! *NOTE* Not available on Armv6-M and Armv8-M Baseline.
+
+use core::cell::UnsafeCell;
+use core::ptr;
+
+use volatile_register::{RO, RW, WO};
+
+use crate::peripheral::ITM;
+use bitfield::bitfield;
+
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Serialize};
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Stimulus Port
+ pub stim: [Stim; 256],
+ reserved0: [u32; 640],
+ /// Trace Enable
+ pub ter: [RW<u32>; 8],
+ reserved1: [u32; 8],
+ /// Trace Privilege
+ pub tpr: RW<u32>,
+ reserved2: [u32; 15],
+ /// Trace Control
+ pub tcr: RW<Tcr>,
+ reserved3: [u32; 75],
+ /// Lock Access
+ pub lar: WO<u32>,
+ /// Lock Status
+ pub lsr: RO<u32>,
+}
+
+bitfield! {
+ /// Trace Control Register.
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Tcr(u32);
+ itmena, set_itmena: 0;
+ tsena, set_tsena: 1;
+ syncena, set_synena: 2;
+ txena, set_txena: 3;
+ swoena, set_swoena: 4;
+ u8, tsprescale, set_tsprescale: 9, 8;
+ u8, gtsfreq, set_gtsfreq: 11, 10;
+ u8, tracebusid, set_tracebusid: 22, 16;
+ busy, _: 23;
+}
+
+/// Stimulus Port
+pub struct Stim {
+ register: UnsafeCell<u32>,
+}
+
+impl Stim {
+ /// Writes an `u8` payload into the stimulus port
+ #[inline]
+ pub fn write_u8(&mut self, value: u8) {
+ unsafe { ptr::write_volatile(self.register.get() as *mut u8, value) }
+ }
+
+ /// Writes an `u16` payload into the stimulus port
+ #[inline]
+ pub fn write_u16(&mut self, value: u16) {
+ unsafe { ptr::write_volatile(self.register.get() as *mut u16, value) }
+ }
+
+ /// Writes an `u32` payload into the stimulus port
+ #[inline]
+ pub fn write_u32(&mut self, value: u32) {
+ unsafe { ptr::write_volatile(self.register.get(), value) }
+ }
+
+ /// Returns `true` if the stimulus port is ready to accept more data
+ #[cfg(not(armv8m))]
+ #[inline]
+ pub fn is_fifo_ready(&self) -> bool {
+ unsafe { ptr::read_volatile(self.register.get()) & 0b1 == 1 }
+ }
+
+ /// Returns `true` if the stimulus port is ready to accept more data
+ #[cfg(armv8m)]
+ #[inline]
+ pub fn is_fifo_ready(&self) -> bool {
+ // ARMv8-M adds a disabled bit; we indicate that we are ready to
+ // proceed with a stimulus write if the port is either ready (bit 0) or
+ // disabled (bit 1).
+ unsafe { ptr::read_volatile(self.register.get()) & 0b11 != 0 }
+ }
+}
+
+/// The possible local timestamp options.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+pub enum LocalTimestampOptions {
+ /// Disable local timestamps.
+ Disabled,
+ /// Enable local timestamps and use no prescaling.
+ Enabled,
+ /// Enable local timestamps and set the prescaler to divide the
+ /// reference clock by 4.
+ EnabledDiv4,
+ /// Enable local timestamps and set the prescaler to divide the
+ /// reference clock by 16.
+ EnabledDiv16,
+ /// Enable local timestamps and set the prescaler to divide the
+ /// reference clock by 64.
+ EnabledDiv64,
+}
+
+#[cfg(feature = "std")]
+impl core::convert::TryFrom<u8> for LocalTimestampOptions {
+ type Error = ();
+
+ /// Converts an integer value to an enabled [LocalTimestampOptions]
+ /// variant. Accepted values are: 1, 4, 16, 64. Any other value
+ /// yields `Err(())`.
+ #[inline]
+ fn try_from(value: u8) -> Result<Self, Self::Error> {
+ match value {
+ 1 => Ok(Self::Enabled),
+ 4 => Ok(Self::EnabledDiv4),
+ 16 => Ok(Self::EnabledDiv16),
+ 64 => Ok(Self::EnabledDiv64),
+ _ => Err(()),
+ }
+ }
+}
+
+/// The possible global timestamp options.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub enum GlobalTimestampOptions {
+ /// Disable global timestamps.
+ Disabled,
+ /// Generate a global timestamp approximately every 128 cycles.
+ Every128Cycles,
+ /// Generate a global timestamp approximately every 8921 cycles.
+ Every8192Cycles,
+ /// Generate a global timestamp after every packet, if the output FIFO is empty.
+ EveryPacket,
+}
+
+/// The possible clock sources for timestamp counters.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub enum TimestampClkSrc {
+ /// Clock timestamp counters using the system processor clock.
+ SystemClock,
+ /// Clock timestamp counters using the asynchronous clock from the
+ /// TPIU interface.
+ ///
+ /// NOTE: The timestamp counter is held in reset while the output
+ /// line is idle.
+ AsyncTPIU,
+}
+
+/// Available settings for the ITM peripheral.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub struct ITMSettings {
+ /// Whether to enable ITM.
+ pub enable: bool,
+ /// Whether DWT packets should be forwarded to ITM.
+ pub forward_dwt: bool,
+ /// The local timestamp options that should be applied.
+ pub local_timestamps: LocalTimestampOptions,
+ /// The global timestamp options that should be applied.
+ pub global_timestamps: GlobalTimestampOptions,
+ /// The trace bus ID to use when multi-trace sources are in use.
+ /// `None` specifies that only a single trace source is in use and
+ /// has the same effect as `Some(0)`.
+ pub bus_id: Option<u8>,
+ /// The clock that should increase timestamp counters.
+ pub timestamp_clk_src: TimestampClkSrc,
+}
+
+impl ITM {
+ /// Removes the software lock on the ITM.
+ #[inline]
+ pub fn unlock(&mut self) {
+ // NOTE(unsafe) atomic write to a stateless, write-only register
+ unsafe { self.lar.write(0xC5AC_CE55) }
+ }
+
+ /// Configures the ITM with the passed [ITMSettings].
+ #[inline]
+ pub fn configure(&mut self, settings: ITMSettings) {
+ unsafe {
+ self.tcr.modify(|mut r| {
+ r.set_itmena(settings.enable);
+ r.set_tsena(settings.local_timestamps != LocalTimestampOptions::Disabled);
+ r.set_txena(settings.forward_dwt);
+ r.set_tsprescale(match settings.local_timestamps {
+ LocalTimestampOptions::Disabled | LocalTimestampOptions::Enabled => 0b00,
+ LocalTimestampOptions::EnabledDiv4 => 0b10,
+ LocalTimestampOptions::EnabledDiv16 => 0b10,
+ LocalTimestampOptions::EnabledDiv64 => 0b11,
+ });
+ r.set_gtsfreq(match settings.global_timestamps {
+ GlobalTimestampOptions::Disabled => 0b00,
+ GlobalTimestampOptions::Every128Cycles => 0b01,
+ GlobalTimestampOptions::Every8192Cycles => 0b10,
+ GlobalTimestampOptions::EveryPacket => 0b11,
+ });
+ r.set_swoena(match settings.timestamp_clk_src {
+ TimestampClkSrc::SystemClock => false,
+ TimestampClkSrc::AsyncTPIU => true,
+ });
+ r.set_tracebusid(settings.bus_id.unwrap_or(0));
+
+ r
+ });
+ }
+ }
+}
diff --git a/cortex-m/src/peripheral/mod.rs b/cortex-m/src/peripheral/mod.rs
new file mode 100644
index 0000000..bf18151
--- /dev/null
+++ b/cortex-m/src/peripheral/mod.rs
@@ -0,0 +1,592 @@
+//! Core peripherals.
+//!
+//! # API
+//!
+//! To use (most of) the peripheral API first you must get an *instance* of the peripheral. All the
+//! core peripherals are modeled as singletons (there can only ever be, at most, one instance of any
+//! one of them at any given point in time) and the only way to get an instance of them is through
+//! the [`Peripherals::take`](struct.Peripherals.html#method.take) method.
+//!
+//! ``` no_run
+//! # use cortex_m::peripheral::Peripherals;
+//! let mut peripherals = Peripherals::take().unwrap();
+//! peripherals.DCB.enable_trace();
+//! ```
+//!
+//! This method can only be successfully called *once* -- this is why the method returns an
+//! `Option`. Subsequent calls to the method will result in a `None` value being returned.
+//!
+//! ``` no_run, should_panic
+//! # use cortex_m::peripheral::Peripherals;
+//! let ok = Peripherals::take().unwrap();
+//! let panics = Peripherals::take().unwrap();
+//! ```
+//! A part of the peripheral API doesn't require access to a peripheral instance. This part of the
+//! API is provided as static methods on the peripheral types. One example is the
+//! [`DWT::cycle_count`](struct.DWT.html#method.cycle_count) method.
+//!
+//! ``` no_run
+//! # use cortex_m::peripheral::{DWT, Peripherals};
+//! {
+//! let mut peripherals = Peripherals::take().unwrap();
+//! peripherals.DCB.enable_trace();
+//! peripherals.DWT.enable_cycle_counter();
+//! } // all the peripheral singletons are destroyed here
+//!
+//! // but this method can be called without a DWT instance
+//! let cyccnt = DWT::cycle_count();
+//! ```
+//!
+//! The singleton property can be *unsafely* bypassed using the `ptr` static method which is
+//! available on all the peripheral types. This method is a useful building block for implementing
+//! safe higher level abstractions.
+//!
+//! ``` no_run
+//! # use cortex_m::peripheral::{DWT, Peripherals};
+//! {
+//! let mut peripherals = Peripherals::take().unwrap();
+//! peripherals.DCB.enable_trace();
+//! peripherals.DWT.enable_cycle_counter();
+//! } // all the peripheral singletons are destroyed here
+//!
+//! // actually safe because this is an atomic read with no side effects
+//! let cyccnt = unsafe { (*DWT::PTR).cyccnt.read() };
+//! ```
+//!
+//! # References
+//!
+//! - ARMv7-M Architecture Reference Manual (Issue E.b) - Chapter B3
+
+use core::marker::PhantomData;
+use core::ops;
+
+#[cfg(cm7)]
+pub mod ac;
+#[cfg(not(armv6m))]
+pub mod cbp;
+pub mod cpuid;
+pub mod dcb;
+pub mod dwt;
+#[cfg(not(armv6m))]
+pub mod fpb;
+// NOTE(native) is for documentation purposes
+#[cfg(any(has_fpu, native))]
+pub mod fpu;
+pub mod icb;
+#[cfg(all(not(armv6m), not(armv8m_base)))]
+pub mod itm;
+pub mod mpu;
+pub mod nvic;
+#[cfg(armv8m)]
+pub mod sau;
+pub mod scb;
+pub mod syst;
+#[cfg(not(armv6m))]
+pub mod tpiu;
+
+#[cfg(test)]
+mod test;
+
+// NOTE the `PhantomData` used in the peripherals proxy is to make them `Send` but *not* `Sync`
+
+/// Core peripherals
+#[allow(non_snake_case)]
+#[allow(clippy::manual_non_exhaustive)]
+pub struct Peripherals {
+ /// Cortex-M7 TCM and cache access control.
+ #[cfg(cm7)]
+ pub AC: AC,
+
+ /// Cache and branch predictor maintenance operations.
+ /// Not available on Armv6-M.
+ pub CBP: CBP,
+
+ /// CPUID
+ pub CPUID: CPUID,
+
+ /// Debug Control Block
+ pub DCB: DCB,
+
+ /// Data Watchpoint and Trace unit
+ pub DWT: DWT,
+
+ /// Flash Patch and Breakpoint unit.
+ /// Not available on Armv6-M.
+ pub FPB: FPB,
+
+ /// Floating Point Unit.
+ pub FPU: FPU,
+
+ /// Implementation Control Block.
+ ///
+ /// The name is from the v8-M spec, but the block existed in earlier
+ /// revisions, without a name.
+ pub ICB: ICB,
+
+ /// Instrumentation Trace Macrocell.
+ /// Not available on Armv6-M and Armv8-M Baseline.
+ pub ITM: ITM,
+
+ /// Memory Protection Unit
+ pub MPU: MPU,
+
+ /// Nested Vector Interrupt Controller
+ pub NVIC: NVIC,
+
+ /// Security Attribution Unit
+ pub SAU: SAU,
+
+ /// System Control Block
+ pub SCB: SCB,
+
+ /// SysTick: System Timer
+ pub SYST: SYST,
+
+ /// Trace Port Interface Unit.
+ /// Not available on Armv6-M.
+ pub TPIU: TPIU,
+
+ // Private field making `Peripherals` non-exhaustive. We don't use `#[non_exhaustive]` so we
+ // can support older Rust versions.
+ _priv: (),
+}
+
+// NOTE `no_mangle` is used here to prevent linking different minor versions of this crate as that
+// would let you `take` the core peripherals more than once (one per minor version)
+#[no_mangle]
+static CORE_PERIPHERALS: () = ();
+
+/// Set to `true` when `take` or `steal` was called to make `Peripherals` a singleton.
+static mut TAKEN: bool = false;
+
+impl Peripherals {
+ /// Returns all the core peripherals *once*
+ #[inline]
+ pub fn take() -> Option<Self> {
+ critical_section::with(|_| {
+ if unsafe { TAKEN } {
+ None
+ } else {
+ Some(unsafe { Peripherals::steal() })
+ }
+ })
+ }
+
+ /// Unchecked version of `Peripherals::take`
+ #[inline]
+ pub unsafe fn steal() -> Self {
+ TAKEN = true;
+
+ Peripherals {
+ #[cfg(cm7)]
+ AC: AC {
+ _marker: PhantomData,
+ },
+ CBP: CBP {
+ _marker: PhantomData,
+ },
+ CPUID: CPUID {
+ _marker: PhantomData,
+ },
+ DCB: DCB {
+ _marker: PhantomData,
+ },
+ DWT: DWT {
+ _marker: PhantomData,
+ },
+ FPB: FPB {
+ _marker: PhantomData,
+ },
+ FPU: FPU {
+ _marker: PhantomData,
+ },
+ ICB: ICB {
+ _marker: PhantomData,
+ },
+ ITM: ITM {
+ _marker: PhantomData,
+ },
+ MPU: MPU {
+ _marker: PhantomData,
+ },
+ NVIC: NVIC {
+ _marker: PhantomData,
+ },
+ SAU: SAU {
+ _marker: PhantomData,
+ },
+ SCB: SCB {
+ _marker: PhantomData,
+ },
+ SYST: SYST {
+ _marker: PhantomData,
+ },
+ TPIU: TPIU {
+ _marker: PhantomData,
+ },
+ _priv: (),
+ }
+ }
+}
+
+/// Access control
+#[cfg(cm7)]
+pub struct AC {
+ _marker: PhantomData<*const ()>,
+}
+
+#[cfg(cm7)]
+unsafe impl Send for AC {}
+
+#[cfg(cm7)]
+impl AC {
+ /// Pointer to the register block
+ pub const PTR: *const self::ac::RegisterBlock = 0xE000_EF90 as *const _;
+}
+
+/// Cache and branch predictor maintenance operations
+#[allow(clippy::upper_case_acronyms)]
+pub struct CBP {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for CBP {}
+
+#[cfg(not(armv6m))]
+impl CBP {
+ #[inline(always)]
+ pub(crate) const unsafe fn new() -> Self {
+ CBP {
+ _marker: PhantomData,
+ }
+ }
+
+ /// Pointer to the register block
+ pub const PTR: *const self::cbp::RegisterBlock = 0xE000_EF50 as *const _;
+}
+
+#[cfg(not(armv6m))]
+impl ops::Deref for CBP {
+ type Target = self::cbp::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// CPUID
+#[allow(clippy::upper_case_acronyms)]
+pub struct CPUID {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for CPUID {}
+
+impl CPUID {
+ /// Pointer to the register block
+ pub const PTR: *const self::cpuid::RegisterBlock = 0xE000_ED00 as *const _;
+}
+
+impl ops::Deref for CPUID {
+ type Target = self::cpuid::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Debug Control Block
+#[allow(clippy::upper_case_acronyms)]
+pub struct DCB {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for DCB {}
+
+impl DCB {
+ /// Pointer to the register block
+ pub const PTR: *const dcb::RegisterBlock = 0xE000_EDF0 as *const _;
+}
+
+impl ops::Deref for DCB {
+ type Target = self::dcb::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*DCB::PTR }
+ }
+}
+
+/// Data Watchpoint and Trace unit
+#[allow(clippy::upper_case_acronyms)]
+pub struct DWT {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for DWT {}
+
+impl DWT {
+ /// Pointer to the register block
+ pub const PTR: *const dwt::RegisterBlock = 0xE000_1000 as *const _;
+}
+
+impl ops::Deref for DWT {
+ type Target = self::dwt::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Flash Patch and Breakpoint unit
+#[allow(clippy::upper_case_acronyms)]
+pub struct FPB {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for FPB {}
+
+#[cfg(not(armv6m))]
+impl FPB {
+ /// Pointer to the register block
+ pub const PTR: *const fpb::RegisterBlock = 0xE000_2000 as *const _;
+}
+
+#[cfg(not(armv6m))]
+impl ops::Deref for FPB {
+ type Target = self::fpb::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Floating Point Unit
+#[allow(clippy::upper_case_acronyms)]
+pub struct FPU {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for FPU {}
+
+#[cfg(any(has_fpu, native))]
+impl FPU {
+ /// Pointer to the register block
+ pub const PTR: *const fpu::RegisterBlock = 0xE000_EF30 as *const _;
+}
+
+#[cfg(any(has_fpu, native))]
+impl ops::Deref for FPU {
+ type Target = self::fpu::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Implementation Control Block.
+///
+/// This block contains implementation-defined registers like `ictr` and
+/// `actlr`. It's called the "implementation control block" in the ARMv8-M
+/// standard, but earlier standards contained the registers, just without a
+/// name.
+#[allow(clippy::upper_case_acronyms)]
+pub struct ICB {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for ICB {}
+
+impl ICB {
+ /// Pointer to the register block
+ pub const PTR: *mut icb::RegisterBlock = 0xE000_E004 as *mut _;
+}
+
+impl ops::Deref for ICB {
+ type Target = self::icb::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+impl ops::DerefMut for ICB {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *Self::PTR }
+ }
+}
+
+/// Instrumentation Trace Macrocell
+#[allow(clippy::upper_case_acronyms)]
+pub struct ITM {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for ITM {}
+
+#[cfg(all(not(armv6m), not(armv8m_base)))]
+impl ITM {
+ /// Pointer to the register block
+ pub const PTR: *mut itm::RegisterBlock = 0xE000_0000 as *mut _;
+}
+
+#[cfg(all(not(armv6m), not(armv8m_base)))]
+impl ops::Deref for ITM {
+ type Target = self::itm::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+#[cfg(all(not(armv6m), not(armv8m_base)))]
+impl ops::DerefMut for ITM {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *Self::PTR }
+ }
+}
+
+/// Memory Protection Unit
+#[allow(clippy::upper_case_acronyms)]
+pub struct MPU {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for MPU {}
+
+impl MPU {
+ /// Pointer to the register block
+ pub const PTR: *const mpu::RegisterBlock = 0xE000_ED90 as *const _;
+}
+
+impl ops::Deref for MPU {
+ type Target = self::mpu::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Nested Vector Interrupt Controller
+#[allow(clippy::upper_case_acronyms)]
+pub struct NVIC {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for NVIC {}
+
+impl NVIC {
+ /// Pointer to the register block
+ pub const PTR: *const nvic::RegisterBlock = 0xE000_E100 as *const _;
+}
+
+impl ops::Deref for NVIC {
+ type Target = self::nvic::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Security Attribution Unit
+#[allow(clippy::upper_case_acronyms)]
+pub struct SAU {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for SAU {}
+
+#[cfg(armv8m)]
+impl SAU {
+ /// Pointer to the register block
+ pub const PTR: *const sau::RegisterBlock = 0xE000_EDD0 as *const _;
+}
+
+#[cfg(armv8m)]
+impl ops::Deref for SAU {
+ type Target = self::sau::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// System Control Block
+#[allow(clippy::upper_case_acronyms)]
+pub struct SCB {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for SCB {}
+
+impl SCB {
+ /// Pointer to the register block
+ pub const PTR: *const scb::RegisterBlock = 0xE000_ED04 as *const _;
+}
+
+impl ops::Deref for SCB {
+ type Target = self::scb::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// SysTick: System Timer
+#[allow(clippy::upper_case_acronyms)]
+pub struct SYST {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for SYST {}
+
+impl SYST {
+ /// Pointer to the register block
+ pub const PTR: *const syst::RegisterBlock = 0xE000_E010 as *const _;
+}
+
+impl ops::Deref for SYST {
+ type Target = self::syst::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
+
+/// Trace Port Interface Unit
+#[allow(clippy::upper_case_acronyms)]
+pub struct TPIU {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for TPIU {}
+
+#[cfg(not(armv6m))]
+impl TPIU {
+ /// Pointer to the register block
+ pub const PTR: *const tpiu::RegisterBlock = 0xE004_0000 as *const _;
+}
+
+#[cfg(not(armv6m))]
+impl ops::Deref for TPIU {
+ type Target = self::tpiu::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::PTR }
+ }
+}
diff --git a/cortex-m/src/peripheral/mpu.rs b/cortex-m/src/peripheral/mpu.rs
new file mode 100644
index 0000000..3a5f5b4
--- /dev/null
+++ b/cortex-m/src/peripheral/mpu.rs
@@ -0,0 +1,65 @@
+//! Memory Protection Unit
+
+use volatile_register::{RO, RW};
+
+/// Register block for ARMv7-M
+#[cfg(not(armv8m))]
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Type
+ pub _type: RO<u32>,
+ /// Control
+ pub ctrl: RW<u32>,
+ /// Region Number
+ pub rnr: RW<u32>,
+ /// Region Base Address
+ pub rbar: RW<u32>,
+ /// Region Attribute and Size
+ pub rasr: RW<u32>,
+ /// Alias 1 of RBAR
+ pub rbar_a1: RW<u32>,
+ /// Alias 1 of RASR
+ pub rasr_a1: RW<u32>,
+ /// Alias 2 of RBAR
+ pub rbar_a2: RW<u32>,
+ /// Alias 2 of RASR
+ pub rasr_a2: RW<u32>,
+ /// Alias 3 of RBAR
+ pub rbar_a3: RW<u32>,
+ /// Alias 3 of RASR
+ pub rasr_a3: RW<u32>,
+}
+
+/// Register block for ARMv8-M
+#[cfg(armv8m)]
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Type
+ pub _type: RO<u32>,
+ /// Control
+ pub ctrl: RW<u32>,
+ /// Region Number
+ pub rnr: RW<u32>,
+ /// Region Base Address
+ pub rbar: RW<u32>,
+ /// Region Limit Address
+ pub rlar: RW<u32>,
+ /// Alias 1 of RBAR
+ pub rbar_a1: RW<u32>,
+ /// Alias 1 of RLAR
+ pub rlar_a1: RW<u32>,
+ /// Alias 2 of RBAR
+ pub rbar_a2: RW<u32>,
+ /// Alias 2 of RLAR
+ pub rlar_a2: RW<u32>,
+ /// Alias 3 of RBAR
+ pub rbar_a3: RW<u32>,
+ /// Alias 3 of RLAR
+ pub rlar_a3: RW<u32>,
+
+ // Reserved word at offset 0xBC
+ _reserved: u32,
+
+ /// Memory Attribute Indirection register 0 and 1
+ pub mair: [RW<u32>; 2],
+}
diff --git a/cortex-m/src/peripheral/nvic.rs b/cortex-m/src/peripheral/nvic.rs
new file mode 100644
index 0000000..fccd6a2
--- /dev/null
+++ b/cortex-m/src/peripheral/nvic.rs
@@ -0,0 +1,272 @@
+//! Nested Vector Interrupt Controller
+
+use volatile_register::RW;
+#[cfg(not(armv6m))]
+use volatile_register::{RO, WO};
+
+use crate::interrupt::InterruptNumber;
+use crate::peripheral::NVIC;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Interrupt Set-Enable
+ pub iser: [RW<u32>; 16],
+
+ _reserved0: [u32; 16],
+
+ /// Interrupt Clear-Enable
+ pub icer: [RW<u32>; 16],
+
+ _reserved1: [u32; 16],
+
+ /// Interrupt Set-Pending
+ pub ispr: [RW<u32>; 16],
+
+ _reserved2: [u32; 16],
+
+ /// Interrupt Clear-Pending
+ pub icpr: [RW<u32>; 16],
+
+ _reserved3: [u32; 16],
+
+ /// Interrupt Active Bit (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub iabr: [RO<u32>; 16],
+ #[cfg(armv6m)]
+ _reserved4: [u32; 16],
+
+ _reserved5: [u32; 16],
+
+ #[cfg(armv8m)]
+ /// Interrupt Target Non-secure (only present on Arm v8-M)
+ pub itns: [RW<u32>; 16],
+ #[cfg(not(armv8m))]
+ _reserved6: [u32; 16],
+
+ _reserved7: [u32; 16],
+
+ /// Interrupt Priority
+ ///
+ /// On ARMv7-M, 124 word-sized registers are available. Each of those
+ /// contains of 4 interrupt priorities of 8 byte each.The architecture
+ /// specifically allows accessing those along byte boundaries, so they are
+ /// represented as 496 byte-sized registers, for convenience, and to allow
+ /// atomic priority updates.
+ ///
+ /// On ARMv6-M, the registers must only be accessed along word boundaries,
+ /// so convenient byte-sized representation wouldn't work on that
+ /// architecture.
+ #[cfg(not(armv6m))]
+ pub ipr: [RW<u8>; 496],
+
+ /// Interrupt Priority
+ ///
+ /// On ARMv7-M, 124 word-sized registers are available. Each of those
+ /// contains of 4 interrupt priorities of 8 byte each.The architecture
+ /// specifically allows accessing those along byte boundaries, so they are
+ /// represented as 496 byte-sized registers, for convenience, and to allow
+ /// atomic priority updates.
+ ///
+ /// On ARMv6-M, the registers must only be accessed along word boundaries,
+ /// so convenient byte-sized representation wouldn't work on that
+ /// architecture.
+ #[cfg(armv6m)]
+ pub ipr: [RW<u32>; 8],
+
+ #[cfg(not(armv6m))]
+ _reserved8: [u32; 580],
+
+ /// Software Trigger Interrupt
+ #[cfg(not(armv6m))]
+ pub stir: WO<u32>,
+}
+
+impl NVIC {
+ /// Request an IRQ in software
+ ///
+ /// Writing a value to the INTID field is the same as manually pending an interrupt by setting
+ /// the corresponding interrupt bit in an Interrupt Set Pending Register. This is similar to
+ /// [`NVIC::pend`].
+ ///
+ /// This method is not available on ARMv6-M chips.
+ ///
+ /// [`NVIC::pend`]: #method.pend
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn request<I>(interrupt: I)
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+
+ // NOTE(ptr) this is a write to a stateless register
+ unsafe { (*Self::PTR).stir.write(u32::from(nr)) }
+ }
+
+ /// Disables `interrupt`
+ #[inline]
+ pub fn mask<I>(interrupt: I)
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+ // NOTE(unsafe) this is a write to a stateless register
+ unsafe { (*Self::PTR).icer[usize::from(nr / 32)].write(1 << (nr % 32)) }
+ }
+
+ /// Enables `interrupt`
+ ///
+ /// This function is `unsafe` because it can break mask-based critical sections
+ #[inline]
+ pub unsafe fn unmask<I>(interrupt: I)
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+ // NOTE(ptr) this is a write to a stateless register
+ (*Self::PTR).iser[usize::from(nr / 32)].write(1 << (nr % 32))
+ }
+
+ /// Returns the NVIC priority of `interrupt`
+ ///
+ /// *NOTE* NVIC encodes priority in the highest bits of a byte so values like `1` and `2` map
+ /// to the same priority. Also for NVIC priorities, a lower value (e.g. `16`) has higher
+ /// priority (urgency) than a larger value (e.g. `32`).
+ #[inline]
+ pub fn get_priority<I>(interrupt: I) -> u8
+ where
+ I: InterruptNumber,
+ {
+ #[cfg(not(armv6m))]
+ {
+ let nr = interrupt.number();
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).ipr[usize::from(nr)].read() }
+ }
+
+ #[cfg(armv6m)]
+ {
+ // NOTE(unsafe) atomic read with no side effects
+ let ipr_n = unsafe { (*Self::PTR).ipr[Self::ipr_index(interrupt)].read() };
+ let prio = (ipr_n >> Self::ipr_shift(interrupt)) & 0x0000_00ff;
+ prio as u8
+ }
+ }
+
+ /// Is `interrupt` active or pre-empted and stacked
+ #[cfg(not(armv6m))]
+ #[inline]
+ pub fn is_active<I>(interrupt: I) -> bool
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+ let mask = 1 << (nr % 32);
+
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { ((*Self::PTR).iabr[usize::from(nr / 32)].read() & mask) == mask }
+ }
+
+ /// Checks if `interrupt` is enabled
+ #[inline]
+ pub fn is_enabled<I>(interrupt: I) -> bool
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+ let mask = 1 << (nr % 32);
+
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { ((*Self::PTR).iser[usize::from(nr / 32)].read() & mask) == mask }
+ }
+
+ /// Checks if `interrupt` is pending
+ #[inline]
+ pub fn is_pending<I>(interrupt: I) -> bool
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+ let mask = 1 << (nr % 32);
+
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { ((*Self::PTR).ispr[usize::from(nr / 32)].read() & mask) == mask }
+ }
+
+ /// Forces `interrupt` into pending state
+ #[inline]
+ pub fn pend<I>(interrupt: I)
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+
+ // NOTE(unsafe) atomic stateless write; ICPR doesn't store any state
+ unsafe { (*Self::PTR).ispr[usize::from(nr / 32)].write(1 << (nr % 32)) }
+ }
+
+ /// Sets the "priority" of `interrupt` to `prio`
+ ///
+ /// *NOTE* See [`get_priority`](struct.NVIC.html#method.get_priority) method for an explanation
+ /// of how NVIC priorities work.
+ ///
+ /// On ARMv6-M, updating an interrupt priority requires a read-modify-write operation. On
+ /// ARMv7-M, the operation is performed in a single atomic write operation.
+ ///
+ /// # Unsafety
+ ///
+ /// Changing priority levels can break priority-based critical sections (see
+ /// [`register::basepri`](crate::register::basepri)) and compromise memory safety.
+ #[inline]
+ pub unsafe fn set_priority<I>(&mut self, interrupt: I, prio: u8)
+ where
+ I: InterruptNumber,
+ {
+ #[cfg(not(armv6m))]
+ {
+ let nr = interrupt.number();
+ self.ipr[usize::from(nr)].write(prio)
+ }
+
+ #[cfg(armv6m)]
+ {
+ self.ipr[Self::ipr_index(interrupt)].modify(|value| {
+ let mask = 0x0000_00ff << Self::ipr_shift(interrupt);
+ let prio = u32::from(prio) << Self::ipr_shift(interrupt);
+
+ (value & !mask) | prio
+ })
+ }
+ }
+
+ /// Clears `interrupt`'s pending state
+ #[inline]
+ pub fn unpend<I>(interrupt: I)
+ where
+ I: InterruptNumber,
+ {
+ let nr = interrupt.number();
+
+ // NOTE(unsafe) atomic stateless write; ICPR doesn't store any state
+ unsafe { (*Self::PTR).icpr[usize::from(nr / 32)].write(1 << (nr % 32)) }
+ }
+
+ #[cfg(armv6m)]
+ #[inline]
+ fn ipr_index<I>(interrupt: I) -> usize
+ where
+ I: InterruptNumber,
+ {
+ usize::from(interrupt.number()) / 4
+ }
+
+ #[cfg(armv6m)]
+ #[inline]
+ fn ipr_shift<I>(interrupt: I) -> usize
+ where
+ I: InterruptNumber,
+ {
+ (usize::from(interrupt.number()) % 4) * 8
+ }
+}
diff --git a/cortex-m/src/peripheral/sau.rs b/cortex-m/src/peripheral/sau.rs
new file mode 100644
index 0000000..6b8477f
--- /dev/null
+++ b/cortex-m/src/peripheral/sau.rs
@@ -0,0 +1,242 @@
+//! Security Attribution Unit
+//!
+//! *NOTE* Available only on Armv8-M and Armv8.1-M, for the following Rust target triples:
+//! * `thumbv8m.base-none-eabi`
+//! * `thumbv8m.main-none-eabi`
+//! * `thumbv8m.main-none-eabihf`
+//!
+//! For reference please check the section B8.3 of the Armv8-M Architecture Reference Manual.
+
+use crate::peripheral::SAU;
+use bitfield::bitfield;
+use volatile_register::{RO, RW};
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Control Register
+ pub ctrl: RW<Ctrl>,
+ /// Type Register
+ pub _type: RO<Type>,
+ /// Region Number Register
+ pub rnr: RW<Rnr>,
+ /// Region Base Address Register
+ pub rbar: RW<Rbar>,
+ /// Region Limit Address Register
+ pub rlar: RW<Rlar>,
+ /// Secure Fault Status Register
+ pub sfsr: RO<Sfsr>,
+ /// Secure Fault Address Register
+ pub sfar: RO<Sfar>,
+}
+
+bitfield! {
+ /// Control Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Ctrl(u32);
+ get_enable, set_enable: 0;
+ get_allns, set_allns: 1;
+}
+
+bitfield! {
+ /// Type Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Type(u32);
+ u8;
+ sregion, _: 7, 0;
+}
+
+bitfield! {
+ /// Region Number Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Rnr(u32);
+ u8;
+ get_region, set_region: 7, 0;
+}
+
+bitfield! {
+ /// Region Base Address Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Rbar(u32);
+ u32;
+ get_baddr, set_baddr: 31, 5;
+}
+
+bitfield! {
+ /// Region Limit Address Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Rlar(u32);
+ u32;
+ get_laddr, set_laddr: 31, 5;
+ get_nsc, set_nsc: 1;
+ get_enable, set_enable: 0;
+}
+
+bitfield! {
+ /// Secure Fault Status Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Sfsr(u32);
+ invep, _: 0;
+ invis, _: 1;
+ inver, _: 2;
+ auviol, _: 3;
+ invtran, _: 4;
+ lsperr, _: 5;
+ sfarvalid, _: 6;
+ lserr, _: 7;
+}
+
+bitfield! {
+ /// Secure Fault Address Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Sfar(u32);
+ u32;
+ address, _: 31, 0;
+}
+
+/// Possible attribute of a SAU region.
+#[derive(Debug)]
+pub enum SauRegionAttribute {
+ /// SAU region is Secure
+ Secure,
+ /// SAU region is Non-Secure Callable
+ NonSecureCallable,
+ /// SAU region is Non-Secure
+ NonSecure,
+}
+
+/// Description of a SAU region.
+#[derive(Debug)]
+pub struct SauRegion {
+ /// First address of the region, its 5 least significant bits must be set to zero.
+ pub base_address: u32,
+ /// Last address of the region, its 5 least significant bits must be set to one.
+ pub limit_address: u32,
+ /// Attribute of the region.
+ pub attribute: SauRegionAttribute,
+}
+
+/// Possible error values returned by the SAU methods.
+#[derive(Debug)]
+pub enum SauError {
+ /// The region number parameter to set or get a region must be between 0 and
+ /// region_numbers() - 1.
+ RegionNumberTooBig,
+ /// Bits 0 to 4 of the base address of a SAU region must be set to zero.
+ WrongBaseAddress,
+ /// Bits 0 to 4 of the limit address of a SAU region must be set to one.
+ WrongLimitAddress,
+}
+
+impl SAU {
+ /// Get the number of implemented SAU regions.
+ #[inline]
+ pub fn region_numbers(&self) -> u8 {
+ self._type.read().sregion()
+ }
+
+ /// Enable the SAU.
+ #[inline]
+ pub fn enable(&mut self) {
+ unsafe {
+ self.ctrl.modify(|mut ctrl| {
+ ctrl.set_enable(true);
+ ctrl
+ });
+ }
+ }
+
+ /// Set a SAU region to a region number.
+ /// SAU regions must be 32 bytes aligned and their sizes must be a multiple of 32 bytes. It
+ /// means that the 5 least significant bits of the base address of a SAU region must be set to
+ /// zero and the 5 least significant bits of the limit address must be set to one.
+ /// The region number must be valid.
+ /// This function is executed under a critical section to prevent having inconsistent results.
+ #[inline]
+ pub fn set_region(&mut self, region_number: u8, region: SauRegion) -> Result<(), SauError> {
+ critical_section::with(|_| {
+ let base_address = region.base_address;
+ let limit_address = region.limit_address;
+ let attribute = region.attribute;
+
+ if region_number >= self.region_numbers() {
+ Err(SauError::RegionNumberTooBig)
+ } else if base_address & 0x1F != 0 {
+ Err(SauError::WrongBaseAddress)
+ } else if limit_address & 0x1F != 0x1F {
+ Err(SauError::WrongLimitAddress)
+ } else {
+ // All fields of these registers are going to be modified so we don't need to read them
+ // before.
+ let mut rnr = Rnr(0);
+ let mut rbar = Rbar(0);
+ let mut rlar = Rlar(0);
+
+ rnr.set_region(region_number);
+ rbar.set_baddr(base_address >> 5);
+ rlar.set_laddr(limit_address >> 5);
+
+ match attribute {
+ SauRegionAttribute::Secure => {
+ rlar.set_nsc(false);
+ rlar.set_enable(false);
+ }
+ SauRegionAttribute::NonSecureCallable => {
+ rlar.set_nsc(true);
+ rlar.set_enable(true);
+ }
+ SauRegionAttribute::NonSecure => {
+ rlar.set_nsc(false);
+ rlar.set_enable(true);
+ }
+ }
+
+ unsafe {
+ self.rnr.write(rnr);
+ self.rbar.write(rbar);
+ self.rlar.write(rlar);
+ }
+
+ Ok(())
+ }
+ })
+ }
+
+ /// Get a region from the SAU.
+ /// The region number must be valid.
+ /// This function is executed under a critical section to prevent having inconsistent results.
+ #[inline]
+ pub fn get_region(&mut self, region_number: u8) -> Result<SauRegion, SauError> {
+ critical_section::with(|_| {
+ if region_number >= self.region_numbers() {
+ Err(SauError::RegionNumberTooBig)
+ } else {
+ unsafe {
+ self.rnr.write(Rnr(region_number.into()));
+ }
+
+ let rbar = self.rbar.read();
+ let rlar = self.rlar.read();
+
+ let attribute = match (rlar.get_enable(), rlar.get_nsc()) {
+ (false, _) => SauRegionAttribute::Secure,
+ (true, false) => SauRegionAttribute::NonSecure,
+ (true, true) => SauRegionAttribute::NonSecureCallable,
+ };
+
+ Ok(SauRegion {
+ base_address: rbar.get_baddr() << 5,
+ limit_address: (rlar.get_laddr() << 5) | 0x1F,
+ attribute,
+ })
+ }
+ })
+ }
+}
diff --git a/cortex-m/src/peripheral/scb.rs b/cortex-m/src/peripheral/scb.rs
new file mode 100644
index 0000000..b9cf0e4
--- /dev/null
+++ b/cortex-m/src/peripheral/scb.rs
@@ -0,0 +1,1110 @@
+//! System Control Block
+
+use core::ptr;
+
+use volatile_register::RW;
+
+#[cfg(not(armv6m))]
+use super::cpuid::CsselrCacheType;
+#[cfg(not(armv6m))]
+use super::CBP;
+#[cfg(not(armv6m))]
+use super::CPUID;
+use super::SCB;
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Serialize};
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Interrupt Control and State
+ pub icsr: RW<u32>,
+
+ /// Vector Table Offset (not present on Cortex-M0 variants)
+ pub vtor: RW<u32>,
+
+ /// Application Interrupt and Reset Control
+ pub aircr: RW<u32>,
+
+ /// System Control
+ pub scr: RW<u32>,
+
+ /// Configuration and Control
+ pub ccr: RW<u32>,
+
+ /// System Handler Priority (word accessible only on Cortex-M0 variants)
+ ///
+ /// On ARMv7-M, `shpr[0]` points to SHPR1
+ ///
+ /// On ARMv6-M, `shpr[0]` points to SHPR2
+ #[cfg(not(armv6m))]
+ pub shpr: [RW<u8>; 12],
+ #[cfg(armv6m)]
+ _reserved1: u32,
+ /// System Handler Priority (word accessible only on Cortex-M0 variants)
+ ///
+ /// On ARMv7-M, `shpr[0]` points to SHPR1
+ ///
+ /// On ARMv6-M, `shpr[0]` points to SHPR2
+ #[cfg(armv6m)]
+ pub shpr: [RW<u32>; 2],
+
+ /// System Handler Control and State
+ pub shcsr: RW<u32>,
+
+ /// Configurable Fault Status (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub cfsr: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved2: u32,
+
+ /// HardFault Status (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub hfsr: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved3: u32,
+
+ /// Debug Fault Status (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub dfsr: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved4: u32,
+
+ /// MemManage Fault Address (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub mmfar: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved5: u32,
+
+ /// BusFault Address (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub bfar: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved6: u32,
+
+ /// Auxiliary Fault Status (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub afsr: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved7: u32,
+
+ _reserved8: [u32; 18],
+
+ /// Coprocessor Access Control (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ pub cpacr: RW<u32>,
+ #[cfg(armv6m)]
+ _reserved9: u32,
+}
+
+/// FPU access mode
+#[cfg(has_fpu)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum FpuAccessMode {
+ /// FPU is not accessible
+ Disabled,
+ /// FPU is accessible in Privileged and User mode
+ Enabled,
+ /// FPU is accessible in Privileged mode only
+ Privileged,
+}
+
+#[cfg(has_fpu)]
+mod fpu_consts {
+ pub const SCB_CPACR_FPU_MASK: u32 = 0b11_11 << 20;
+ pub const SCB_CPACR_FPU_ENABLE: u32 = 0b01_01 << 20;
+ pub const SCB_CPACR_FPU_USER: u32 = 0b10_10 << 20;
+}
+
+#[cfg(has_fpu)]
+use self::fpu_consts::*;
+
+#[cfg(has_fpu)]
+impl SCB {
+ /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Disabled)`
+ #[inline]
+ pub fn disable_fpu(&mut self) {
+ self.set_fpu_access_mode(FpuAccessMode::Disabled)
+ }
+
+ /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Enabled)`
+ #[inline]
+ pub fn enable_fpu(&mut self) {
+ self.set_fpu_access_mode(FpuAccessMode::Enabled)
+ }
+
+ /// Gets FPU access mode
+ #[inline]
+ pub fn fpu_access_mode() -> FpuAccessMode {
+ // NOTE(unsafe) atomic read operation with no side effects
+ let cpacr = unsafe { (*Self::PTR).cpacr.read() };
+
+ if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER {
+ FpuAccessMode::Enabled
+ } else if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE {
+ FpuAccessMode::Privileged
+ } else {
+ FpuAccessMode::Disabled
+ }
+ }
+
+ /// Sets FPU access mode
+ ///
+ /// *IMPORTANT* Any function that runs fully or partly with the FPU disabled must *not* take any
+ /// floating-point arguments or have any floating-point local variables. Because the compiler
+ /// might inline such a function into a caller that does have floating-point arguments or
+ /// variables, any such function must be also marked #[inline(never)].
+ #[inline]
+ pub fn set_fpu_access_mode(&mut self, mode: FpuAccessMode) {
+ let mut cpacr = self.cpacr.read() & !SCB_CPACR_FPU_MASK;
+ match mode {
+ FpuAccessMode::Disabled => (),
+ FpuAccessMode::Privileged => cpacr |= SCB_CPACR_FPU_ENABLE,
+ FpuAccessMode::Enabled => cpacr |= SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER,
+ }
+ unsafe { self.cpacr.write(cpacr) }
+ }
+}
+
+impl SCB {
+ /// Returns the active exception number
+ #[inline]
+ pub fn vect_active() -> VectActive {
+ let icsr =
+ unsafe { ptr::read_volatile(&(*SCB::PTR).icsr as *const _ as *const u32) } & 0x1FF;
+
+ match icsr as u16 {
+ 0 => VectActive::ThreadMode,
+ 2 => VectActive::Exception(Exception::NonMaskableInt),
+ 3 => VectActive::Exception(Exception::HardFault),
+ #[cfg(not(armv6m))]
+ 4 => VectActive::Exception(Exception::MemoryManagement),
+ #[cfg(not(armv6m))]
+ 5 => VectActive::Exception(Exception::BusFault),
+ #[cfg(not(armv6m))]
+ 6 => VectActive::Exception(Exception::UsageFault),
+ #[cfg(any(armv8m, native))]
+ 7 => VectActive::Exception(Exception::SecureFault),
+ 11 => VectActive::Exception(Exception::SVCall),
+ #[cfg(not(armv6m))]
+ 12 => VectActive::Exception(Exception::DebugMonitor),
+ 14 => VectActive::Exception(Exception::PendSV),
+ 15 => VectActive::Exception(Exception::SysTick),
+ irqn => VectActive::Interrupt { irqn: irqn - 16 },
+ }
+ }
+}
+
+/// Processor core exceptions (internal interrupts)
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+#[cfg_attr(feature = "std", derive(PartialOrd, Hash))]
+pub enum Exception {
+ /// Non maskable interrupt
+ NonMaskableInt,
+
+ /// Hard fault interrupt
+ HardFault,
+
+ /// Memory management interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ MemoryManagement,
+
+ /// Bus fault interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ BusFault,
+
+ /// Usage fault interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ UsageFault,
+
+ /// Secure fault interrupt (only on ARMv8-M)
+ #[cfg(any(armv8m, native))]
+ SecureFault,
+
+ /// SV call interrupt
+ SVCall,
+
+ /// Debug monitor interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ DebugMonitor,
+
+ /// Pend SV interrupt
+ PendSV,
+
+ /// System Tick interrupt
+ SysTick,
+}
+
+impl Exception {
+ /// Returns the IRQ number of this `Exception`
+ ///
+ /// The return value is always within the closed range `[-1, -14]`
+ #[inline]
+ pub fn irqn(self) -> i8 {
+ match self {
+ Exception::NonMaskableInt => -14,
+ Exception::HardFault => -13,
+ #[cfg(not(armv6m))]
+ Exception::MemoryManagement => -12,
+ #[cfg(not(armv6m))]
+ Exception::BusFault => -11,
+ #[cfg(not(armv6m))]
+ Exception::UsageFault => -10,
+ #[cfg(any(armv8m, native))]
+ Exception::SecureFault => -9,
+ Exception::SVCall => -5,
+ #[cfg(not(armv6m))]
+ Exception::DebugMonitor => -4,
+ Exception::PendSV => -2,
+ Exception::SysTick => -1,
+ }
+ }
+}
+
+/// Active exception number
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
+#[cfg_attr(feature = "std", derive(PartialOrd, Hash))]
+pub enum VectActive {
+ /// Thread mode
+ ThreadMode,
+
+ /// Processor core exception (internal interrupts)
+ Exception(Exception),
+
+ /// Device specific exception (external interrupts)
+ Interrupt {
+ /// Interrupt number. This number is always within half open range `[0, 512)` (9 bit)
+ irqn: u16,
+ },
+}
+
+impl VectActive {
+ /// Converts a vector number into `VectActive`
+ #[inline]
+ pub fn from(vect_active: u16) -> Option<Self> {
+ Some(match vect_active {
+ 0 => VectActive::ThreadMode,
+ 2 => VectActive::Exception(Exception::NonMaskableInt),
+ 3 => VectActive::Exception(Exception::HardFault),
+ #[cfg(not(armv6m))]
+ 4 => VectActive::Exception(Exception::MemoryManagement),
+ #[cfg(not(armv6m))]
+ 5 => VectActive::Exception(Exception::BusFault),
+ #[cfg(not(armv6m))]
+ 6 => VectActive::Exception(Exception::UsageFault),
+ #[cfg(any(armv8m, native))]
+ 7 => VectActive::Exception(Exception::SecureFault),
+ 11 => VectActive::Exception(Exception::SVCall),
+ #[cfg(not(armv6m))]
+ 12 => VectActive::Exception(Exception::DebugMonitor),
+ 14 => VectActive::Exception(Exception::PendSV),
+ 15 => VectActive::Exception(Exception::SysTick),
+ irqn if (16..512).contains(&irqn) => VectActive::Interrupt { irqn: irqn - 16 },
+ _ => return None,
+ })
+ }
+}
+
+#[cfg(not(armv6m))]
+mod scb_consts {
+ pub const SCB_CCR_IC_MASK: u32 = 1 << 17;
+ pub const SCB_CCR_DC_MASK: u32 = 1 << 16;
+}
+
+#[cfg(not(armv6m))]
+use self::scb_consts::*;
+
+#[cfg(not(armv6m))]
+impl SCB {
+ /// Enables I-cache if currently disabled.
+ ///
+ /// This operation first invalidates the entire I-cache.
+ #[inline]
+ pub fn enable_icache(&mut self) {
+ // Don't do anything if I-cache is already enabled
+ if Self::icache_enabled() {
+ return;
+ }
+
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ // Invalidate I-cache
+ cbp.iciallu();
+
+ // Enable I-cache
+ extern "C" {
+ // see asm-v7m.s
+ fn __enable_icache();
+ }
+
+ // NOTE(unsafe): The asm routine manages exclusive access to the SCB
+ // registers and applies the proper barriers; it is technically safe on
+ // its own, and is only `unsafe` here because it's `extern "C"`.
+ unsafe {
+ __enable_icache();
+ }
+ }
+
+ /// Disables I-cache if currently enabled.
+ ///
+ /// This operation invalidates the entire I-cache after disabling.
+ #[inline]
+ pub fn disable_icache(&mut self) {
+ // Don't do anything if I-cache is already disabled
+ if !Self::icache_enabled() {
+ return;
+ }
+
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ // Disable I-cache
+ // NOTE(unsafe): We have synchronised access by &mut self
+ unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) };
+
+ // Invalidate I-cache
+ cbp.iciallu();
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Returns whether the I-cache is currently enabled.
+ #[inline(always)]
+ pub fn icache_enabled() -> bool {
+ crate::asm::dsb();
+ crate::asm::isb();
+
+ // NOTE(unsafe): atomic read with no side effects
+ unsafe { (*Self::PTR).ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK }
+ }
+
+ /// Invalidates the entire I-cache.
+ #[inline]
+ pub fn invalidate_icache(&mut self) {
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ // Invalidate I-cache
+ cbp.iciallu();
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Enables D-cache if currently disabled.
+ ///
+ /// This operation first invalidates the entire D-cache, ensuring it does
+ /// not contain stale values before being enabled.
+ #[inline]
+ pub fn enable_dcache(&mut self, cpuid: &mut CPUID) {
+ // Don't do anything if D-cache is already enabled
+ if Self::dcache_enabled() {
+ return;
+ }
+
+ // Invalidate anything currently in the D-cache
+ unsafe { self.invalidate_dcache(cpuid) };
+
+ // Now turn on the D-cache
+ extern "C" {
+ // see asm-v7m.s
+ fn __enable_dcache();
+ }
+
+ // NOTE(unsafe): The asm routine manages exclusive access to the SCB
+ // registers and applies the proper barriers; it is technically safe on
+ // its own, and is only `unsafe` here because it's `extern "C"`.
+ unsafe {
+ __enable_dcache();
+ }
+ }
+
+ /// Disables D-cache if currently enabled.
+ ///
+ /// This operation subsequently cleans and invalidates the entire D-cache,
+ /// ensuring all contents are safely written back to main memory after disabling.
+ #[inline]
+ pub fn disable_dcache(&mut self, cpuid: &mut CPUID) {
+ // Don't do anything if D-cache is already disabled
+ if !Self::dcache_enabled() {
+ return;
+ }
+
+ // Turn off the D-cache
+ // NOTE(unsafe): We have synchronised access by &mut self
+ unsafe { self.ccr.modify(|r| r & !SCB_CCR_DC_MASK) };
+
+ // Clean and invalidate whatever was left in it
+ self.clean_invalidate_dcache(cpuid);
+ }
+
+ /// Returns whether the D-cache is currently enabled.
+ #[inline]
+ pub fn dcache_enabled() -> bool {
+ crate::asm::dsb();
+ crate::asm::isb();
+
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK }
+ }
+
+ /// Invalidates the entire D-cache.
+ ///
+ /// Note that calling this while the dcache is enabled will probably wipe out the
+ /// stack, depending on optimisations, therefore breaking returning to the call point.
+ ///
+ /// It's used immediately before enabling the dcache, but not exported publicly.
+ #[inline]
+ unsafe fn invalidate_dcache(&mut self, cpuid: &mut CPUID) {
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = CBP::new();
+
+ // Read number of sets and ways
+ let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
+
+ // Invalidate entire D-cache
+ for set in 0..sets {
+ for way in 0..ways {
+ cbp.dcisw(set, way);
+ }
+ }
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Cleans the entire D-cache.
+ ///
+ /// This function causes everything in the D-cache to be written back to main memory,
+ /// overwriting whatever is already there.
+ #[inline]
+ pub fn clean_dcache(&mut self, cpuid: &mut CPUID) {
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ // Read number of sets and ways
+ let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
+
+ for set in 0..sets {
+ for way in 0..ways {
+ cbp.dccsw(set, way);
+ }
+ }
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Cleans and invalidates the entire D-cache.
+ ///
+ /// This function causes everything in the D-cache to be written back to main memory,
+ /// and then marks the entire D-cache as invalid, causing future reads to first fetch
+ /// from main memory.
+ #[inline]
+ pub fn clean_invalidate_dcache(&mut self, cpuid: &mut CPUID) {
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ // Read number of sets and ways
+ let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
+
+ for set in 0..sets {
+ for way in 0..ways {
+ cbp.dccisw(set, way);
+ }
+ }
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Invalidates D-cache by address.
+ ///
+ /// * `addr`: The address to invalidate, which must be cache-line aligned.
+ /// * `size`: Number of bytes to invalidate, which must be a multiple of the cache line size.
+ ///
+ /// Invalidates D-cache cache lines, starting from the first line containing `addr`,
+ /// finishing once at least `size` bytes have been invalidated.
+ ///
+ /// Invalidation causes the next read access to memory to be fetched from main memory instead
+ /// of the cache.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `addr` must be 32-byte aligned and `size` must be a multiple
+ /// of 32. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `addr` is not cache-line aligned, or `size` is not a multiple of the cache line size,
+ /// other data before or after the desired memory would also be invalidated, which can very
+ /// easily cause memory corruption and undefined behaviour.
+ ///
+ /// # Safety
+ ///
+ /// After invalidating, the next read of invalidated data will be from main memory. This may
+ /// cause recent writes to be lost, potentially including writes that initialized objects.
+ /// Therefore, this method may cause uninitialized memory or invalid values to be read,
+ /// resulting in undefined behaviour. You must ensure that main memory contains valid and
+ /// initialized values before invalidating.
+ ///
+ /// `addr` **must** be aligned to the size of the cache lines, and `size` **must** be a
+ /// multiple of the cache line size, otherwise this function will invalidate other memory,
+ /// easily leading to memory corruption and undefined behaviour. This precondition is checked
+ /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid
+ /// a runtime-dependent `panic!()` call.
+ #[inline]
+ pub unsafe fn invalidate_dcache_by_address(&mut self, addr: usize, size: usize) {
+ // No-op zero sized operations
+ if size == 0 {
+ return;
+ }
+
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = CBP::new();
+
+ // dminline is log2(num words), so 2**dminline * 4 gives size in bytes
+ let dminline = CPUID::cache_dminline();
+ let line_size = (1 << dminline) * 4;
+
+ debug_assert!((addr & (line_size - 1)) == 0);
+ debug_assert!((size & (line_size - 1)) == 0);
+
+ crate::asm::dsb();
+
+ // Find number of cache lines to invalidate
+ let num_lines = ((size - 1) / line_size) + 1;
+
+ // Compute address of first cache line
+ let mask = 0xFFFF_FFFF - (line_size - 1);
+ let mut addr = addr & mask;
+
+ for _ in 0..num_lines {
+ cbp.dcimvac(addr as u32);
+ addr += line_size;
+ }
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Invalidates an object from the D-cache.
+ ///
+ /// * `obj`: The object to invalidate.
+ ///
+ /// Invalidates D-cache starting from the first cache line containing `obj`,
+ /// continuing to invalidate cache lines until all of `obj` has been invalidated.
+ ///
+ /// Invalidation causes the next read access to memory to be fetched from main memory instead
+ /// of the cache.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `obj` must be 32-byte aligned, and its size must be a multiple
+ /// of 32 bytes. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `obj` is not cache-line aligned, or its size is not a multiple of the cache line size,
+ /// other data before or after the desired memory would also be invalidated, which can very
+ /// easily cause memory corruption and undefined behaviour.
+ ///
+ /// # Safety
+ ///
+ /// After invalidating, `obj` will be read from main memory on next access. This may cause
+ /// recent writes to `obj` to be lost, potentially including the write that initialized it.
+ /// Therefore, this method may cause uninitialized memory or invalid values to be read,
+ /// resulting in undefined behaviour. You must ensure that main memory contains a valid and
+ /// initialized value for T before invalidating `obj`.
+ ///
+ /// `obj` **must** be aligned to the size of the cache lines, and its size **must** be a
+ /// multiple of the cache line size, otherwise this function will invalidate other memory,
+ /// easily leading to memory corruption and undefined behaviour. This precondition is checked
+ /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid
+ /// a runtime-dependent `panic!()` call.
+ #[inline]
+ pub unsafe fn invalidate_dcache_by_ref<T>(&mut self, obj: &mut T) {
+ self.invalidate_dcache_by_address(obj as *const T as usize, core::mem::size_of::<T>());
+ }
+
+ /// Invalidates a slice from the D-cache.
+ ///
+ /// * `slice`: The slice to invalidate.
+ ///
+ /// Invalidates D-cache starting from the first cache line containing members of `slice`,
+ /// continuing to invalidate cache lines until all of `slice` has been invalidated.
+ ///
+ /// Invalidation causes the next read access to memory to be fetched from main memory instead
+ /// of the cache.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `slice` must be 32-byte aligned, and its size must be a multiple
+ /// of 32 bytes. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `slice` is not cache-line aligned, or its size is not a multiple of the cache line size,
+ /// other data before or after the desired memory would also be invalidated, which can very
+ /// easily cause memory corruption and undefined behaviour.
+ ///
+ /// # Safety
+ ///
+ /// After invalidating, `slice` will be read from main memory on next access. This may cause
+ /// recent writes to `slice` to be lost, potentially including the write that initialized it.
+ /// Therefore, this method may cause uninitialized memory or invalid values to be read,
+ /// resulting in undefined behaviour. You must ensure that main memory contains valid and
+ /// initialized values for T before invalidating `slice`.
+ ///
+ /// `slice` **must** be aligned to the size of the cache lines, and its size **must** be a
+ /// multiple of the cache line size, otherwise this function will invalidate other memory,
+ /// easily leading to memory corruption and undefined behaviour. This precondition is checked
+ /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid
+ /// a runtime-dependent `panic!()` call.
+ #[inline]
+ pub unsafe fn invalidate_dcache_by_slice<T>(&mut self, slice: &mut [T]) {
+ self.invalidate_dcache_by_address(
+ slice.as_ptr() as usize,
+ slice.len() * core::mem::size_of::<T>(),
+ );
+ }
+
+ /// Cleans D-cache by address.
+ ///
+ /// * `addr`: The address to start cleaning at.
+ /// * `size`: The number of bytes to clean.
+ ///
+ /// Cleans D-cache cache lines, starting from the first line containing `addr`,
+ /// finishing once at least `size` bytes have been invalidated.
+ ///
+ /// Cleaning the cache causes whatever data is present in the cache to be immediately written
+ /// to main memory, overwriting whatever was in main memory.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `addr` should generally be 32-byte aligned and `size` should be a
+ /// multiple of 32. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `addr` is not cache-line aligned, or `size` is not a multiple of the cache line size,
+ /// other data before or after the desired memory will also be cleaned. From the point of view
+ /// of the core executing this function, memory remains consistent, so this is not unsound,
+ /// but is worth knowing about.
+ #[inline]
+ pub fn clean_dcache_by_address(&mut self, addr: usize, size: usize) {
+ // No-op zero sized operations
+ if size == 0 {
+ return;
+ }
+
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ crate::asm::dsb();
+
+ let dminline = CPUID::cache_dminline();
+ let line_size = (1 << dminline) * 4;
+ let num_lines = ((size - 1) / line_size) + 1;
+
+ let mask = 0xFFFF_FFFF - (line_size - 1);
+ let mut addr = addr & mask;
+
+ for _ in 0..num_lines {
+ cbp.dccmvac(addr as u32);
+ addr += line_size;
+ }
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+
+ /// Cleans an object from the D-cache.
+ ///
+ /// * `obj`: The object to clean.
+ ///
+ /// Cleans D-cache starting from the first cache line containing `obj`,
+ /// continuing to clean cache lines until all of `obj` has been cleaned.
+ ///
+ /// It is recommended that `obj` is both aligned to the cache line size and a multiple of
+ /// the cache line size long, otherwise surrounding data will also be cleaned.
+ ///
+ /// Cleaning the cache causes whatever data is present in the cache to be immediately written
+ /// to main memory, overwriting whatever was in main memory.
+ #[inline]
+ pub fn clean_dcache_by_ref<T>(&mut self, obj: &T) {
+ self.clean_dcache_by_address(obj as *const T as usize, core::mem::size_of::<T>());
+ }
+
+ /// Cleans a slice from D-cache.
+ ///
+ /// * `slice`: The slice to clean.
+ ///
+ /// Cleans D-cache starting from the first cache line containing members of `slice`,
+ /// continuing to clean cache lines until all of `slice` has been cleaned.
+ ///
+ /// It is recommended that `slice` is both aligned to the cache line size and a multiple of
+ /// the cache line size long, otherwise surrounding data will also be cleaned.
+ ///
+ /// Cleaning the cache causes whatever data is present in the cache to be immediately written
+ /// to main memory, overwriting whatever was in main memory.
+ #[inline]
+ pub fn clean_dcache_by_slice<T>(&mut self, slice: &[T]) {
+ self.clean_dcache_by_address(
+ slice.as_ptr() as usize,
+ slice.len() * core::mem::size_of::<T>(),
+ );
+ }
+
+ /// Cleans and invalidates D-cache by address.
+ ///
+ /// * `addr`: The address to clean and invalidate.
+ /// * `size`: The number of bytes to clean and invalidate.
+ ///
+ /// Cleans and invalidates D-cache starting from the first cache line containing `addr`,
+ /// finishing once at least `size` bytes have been cleaned and invalidated.
+ ///
+ /// It is recommended that `addr` is aligned to the cache line size and `size` is a multiple of
+ /// the cache line size, otherwise surrounding data will also be cleaned.
+ ///
+ /// Cleaning and invalidating causes data in the D-cache to be written back to main memory,
+ /// and then marks that data in the D-cache as invalid, causing future reads to first fetch
+ /// from main memory.
+ #[inline]
+ pub fn clean_invalidate_dcache_by_address(&mut self, addr: usize, size: usize) {
+ // No-op zero sized operations
+ if size == 0 {
+ return;
+ }
+
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = unsafe { CBP::new() };
+
+ crate::asm::dsb();
+
+ // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M
+ const LINESIZE: usize = 32;
+ let num_lines = ((size - 1) / LINESIZE) + 1;
+
+ let mut addr = addr & 0xFFFF_FFE0;
+
+ for _ in 0..num_lines {
+ cbp.dccimvac(addr as u32);
+ addr += LINESIZE;
+ }
+
+ crate::asm::dsb();
+ crate::asm::isb();
+ }
+}
+
+const SCB_SCR_SLEEPDEEP: u32 = 0x1 << 2;
+
+impl SCB {
+ /// Set the SLEEPDEEP bit in the SCR register
+ #[inline]
+ pub fn set_sleepdeep(&mut self) {
+ unsafe {
+ self.scr.modify(|scr| scr | SCB_SCR_SLEEPDEEP);
+ }
+ }
+
+ /// Clear the SLEEPDEEP bit in the SCR register
+ #[inline]
+ pub fn clear_sleepdeep(&mut self) {
+ unsafe {
+ self.scr.modify(|scr| scr & !SCB_SCR_SLEEPDEEP);
+ }
+ }
+}
+
+const SCB_SCR_SLEEPONEXIT: u32 = 0x1 << 1;
+
+impl SCB {
+ /// Set the SLEEPONEXIT bit in the SCR register
+ #[inline]
+ pub fn set_sleeponexit(&mut self) {
+ unsafe {
+ self.scr.modify(|scr| scr | SCB_SCR_SLEEPONEXIT);
+ }
+ }
+
+ /// Clear the SLEEPONEXIT bit in the SCR register
+ #[inline]
+ pub fn clear_sleeponexit(&mut self) {
+ unsafe {
+ self.scr.modify(|scr| scr & !SCB_SCR_SLEEPONEXIT);
+ }
+ }
+}
+
+const SCB_AIRCR_VECTKEY: u32 = 0x05FA << 16;
+const SCB_AIRCR_PRIGROUP_MASK: u32 = 0x7 << 8;
+const SCB_AIRCR_SYSRESETREQ: u32 = 1 << 2;
+
+impl SCB {
+ /// Initiate a system reset request to reset the MCU
+ #[inline]
+ pub fn sys_reset() -> ! {
+ crate::asm::dsb();
+ unsafe {
+ (*Self::PTR).aircr.modify(
+ |r| {
+ SCB_AIRCR_VECTKEY | // otherwise the write is ignored
+ r & SCB_AIRCR_PRIGROUP_MASK | // keep priority group unchanged
+ SCB_AIRCR_SYSRESETREQ
+ }, // set the bit
+ )
+ };
+ crate::asm::dsb();
+ loop {
+ // wait for the reset
+ crate::asm::nop(); // avoid rust-lang/rust#28728
+ }
+ }
+}
+
+const SCB_ICSR_PENDSVSET: u32 = 1 << 28;
+const SCB_ICSR_PENDSVCLR: u32 = 1 << 27;
+
+const SCB_ICSR_PENDSTSET: u32 = 1 << 26;
+const SCB_ICSR_PENDSTCLR: u32 = 1 << 25;
+
+impl SCB {
+ /// Set the PENDSVSET bit in the ICSR register which will pend the PendSV interrupt
+ #[inline]
+ pub fn set_pendsv() {
+ unsafe {
+ (*Self::PTR).icsr.write(SCB_ICSR_PENDSVSET);
+ }
+ }
+
+ /// Check if PENDSVSET bit in the ICSR register is set meaning PendSV interrupt is pending
+ #[inline]
+ pub fn is_pendsv_pending() -> bool {
+ unsafe { (*Self::PTR).icsr.read() & SCB_ICSR_PENDSVSET == SCB_ICSR_PENDSVSET }
+ }
+
+ /// Set the PENDSVCLR bit in the ICSR register which will clear a pending PendSV interrupt
+ #[inline]
+ pub fn clear_pendsv() {
+ unsafe {
+ (*Self::PTR).icsr.write(SCB_ICSR_PENDSVCLR);
+ }
+ }
+
+ /// Set the PENDSTSET bit in the ICSR register which will pend a SysTick interrupt
+ #[inline]
+ pub fn set_pendst() {
+ unsafe {
+ (*Self::PTR).icsr.write(SCB_ICSR_PENDSTSET);
+ }
+ }
+
+ /// Check if PENDSTSET bit in the ICSR register is set meaning SysTick interrupt is pending
+ #[inline]
+ pub fn is_pendst_pending() -> bool {
+ unsafe { (*Self::PTR).icsr.read() & SCB_ICSR_PENDSTSET == SCB_ICSR_PENDSTSET }
+ }
+
+ /// Set the PENDSTCLR bit in the ICSR register which will clear a pending SysTick interrupt
+ #[inline]
+ pub fn clear_pendst() {
+ unsafe {
+ (*Self::PTR).icsr.write(SCB_ICSR_PENDSTCLR);
+ }
+ }
+}
+
+/// System handlers, exceptions with configurable priority
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[repr(u8)]
+pub enum SystemHandler {
+ // NonMaskableInt, // priority is fixed
+ // HardFault, // priority is fixed
+ /// Memory management interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ MemoryManagement = 4,
+
+ /// Bus fault interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ BusFault = 5,
+
+ /// Usage fault interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ UsageFault = 6,
+
+ /// Secure fault interrupt (only on ARMv8-M)
+ #[cfg(any(armv8m, native))]
+ SecureFault = 7,
+
+ /// SV call interrupt
+ SVCall = 11,
+
+ /// Debug monitor interrupt (not present on Cortex-M0 variants)
+ #[cfg(not(armv6m))]
+ DebugMonitor = 12,
+
+ /// Pend SV interrupt
+ PendSV = 14,
+
+ /// System Tick interrupt
+ SysTick = 15,
+}
+
+impl SCB {
+ /// Returns the hardware priority of `system_handler`
+ ///
+ /// *NOTE*: Hardware priority does not exactly match logical priority levels. See
+ /// [`NVIC.get_priority`](struct.NVIC.html#method.get_priority) for more details.
+ #[inline]
+ pub fn get_priority(system_handler: SystemHandler) -> u8 {
+ let index = system_handler as u8;
+
+ #[cfg(not(armv6m))]
+ {
+ // NOTE(unsafe) atomic read with no side effects
+
+ // NOTE(unsafe): Index is bounded to [4,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = unsafe { (*Self::PTR).shpr.get_unchecked(usize::from(index - 4)) };
+
+ priority_ref.read()
+ }
+
+ #[cfg(armv6m)]
+ {
+ // NOTE(unsafe) atomic read with no side effects
+
+ // NOTE(unsafe): Index is bounded to [11,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = unsafe {
+ (*Self::PTR)
+ .shpr
+ .get_unchecked(usize::from((index - 8) / 4))
+ };
+
+ let shpr = priority_ref.read();
+ let prio = (shpr >> (8 * (index % 4))) & 0x0000_00ff;
+ prio as u8
+ }
+ }
+
+ /// Sets the hardware priority of `system_handler` to `prio`
+ ///
+ /// *NOTE*: Hardware priority does not exactly match logical priority levels. See
+ /// [`NVIC.get_priority`](struct.NVIC.html#method.get_priority) for more details.
+ ///
+ /// On ARMv6-M, updating a system handler priority requires a read-modify-write operation. On
+ /// ARMv7-M, the operation is performed in a single, atomic write operation.
+ ///
+ /// # Unsafety
+ ///
+ /// Changing priority levels can break priority-based critical sections (see
+ /// [`register::basepri`](crate::register::basepri)) and compromise memory safety.
+ #[inline]
+ pub unsafe fn set_priority(&mut self, system_handler: SystemHandler, prio: u8) {
+ let index = system_handler as u8;
+
+ #[cfg(not(armv6m))]
+ {
+ // NOTE(unsafe): Index is bounded to [4,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = (*Self::PTR).shpr.get_unchecked(usize::from(index - 4));
+
+ priority_ref.write(prio)
+ }
+
+ #[cfg(armv6m)]
+ {
+ // NOTE(unsafe): Index is bounded to [11,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = (*Self::PTR)
+ .shpr
+ .get_unchecked(usize::from((index - 8) / 4));
+
+ priority_ref.modify(|value| {
+ let shift = 8 * (index % 4);
+ let mask = 0x0000_00ff << shift;
+ let prio = u32::from(prio) << shift;
+
+ (value & !mask) | prio
+ });
+ }
+ }
+
+ /// Return the bit position of the exception enable bit in the SHCSR register
+ #[inline]
+ #[cfg(not(any(armv6m, armv8m_base)))]
+ fn shcsr_enable_shift(exception: Exception) -> Option<u32> {
+ match exception {
+ Exception::MemoryManagement => Some(16),
+ Exception::BusFault => Some(17),
+ Exception::UsageFault => Some(18),
+ #[cfg(armv8m_main)]
+ Exception::SecureFault => Some(19),
+ _ => None,
+ }
+ }
+
+ /// Enable the exception
+ ///
+ /// If the exception is enabled, when the exception is triggered, the exception handler will be executed instead of the
+ /// HardFault handler.
+ /// This function is only allowed on the following exceptions:
+ /// * `MemoryManagement`
+ /// * `BusFault`
+ /// * `UsageFault`
+ /// * `SecureFault` (can only be enabled from Secure state)
+ ///
+ /// Calling this function with any other exception will do nothing.
+ #[inline]
+ #[cfg(not(any(armv6m, armv8m_base)))]
+ pub fn enable(&mut self, exception: Exception) {
+ if let Some(shift) = SCB::shcsr_enable_shift(exception) {
+ // The mutable reference to SCB makes sure that only this code is currently modifying
+ // the register.
+ unsafe { self.shcsr.modify(|value| value | (1 << shift)) }
+ }
+ }
+
+ /// Disable the exception
+ ///
+ /// If the exception is disabled, when the exception is triggered, the HardFault handler will be executed instead of the
+ /// exception handler.
+ /// This function is only allowed on the following exceptions:
+ /// * `MemoryManagement`
+ /// * `BusFault`
+ /// * `UsageFault`
+ /// * `SecureFault` (can not be changed from Non-secure state)
+ ///
+ /// Calling this function with any other exception will do nothing.
+ #[inline]
+ #[cfg(not(any(armv6m, armv8m_base)))]
+ pub fn disable(&mut self, exception: Exception) {
+ if let Some(shift) = SCB::shcsr_enable_shift(exception) {
+ // The mutable reference to SCB makes sure that only this code is currently modifying
+ // the register.
+ unsafe { self.shcsr.modify(|value| value & !(1 << shift)) }
+ }
+ }
+
+ /// Check if an exception is enabled
+ ///
+ /// This function is only allowed on the following exception:
+ /// * `MemoryManagement`
+ /// * `BusFault`
+ /// * `UsageFault`
+ /// * `SecureFault` (can not be read from Non-secure state)
+ ///
+ /// Calling this function with any other exception will read `false`.
+ #[inline]
+ #[cfg(not(any(armv6m, armv8m_base)))]
+ pub fn is_enabled(&self, exception: Exception) -> bool {
+ if let Some(shift) = SCB::shcsr_enable_shift(exception) {
+ (self.shcsr.read() & (1 << shift)) > 0
+ } else {
+ false
+ }
+ }
+}
diff --git a/cortex-m/src/peripheral/syst.rs b/cortex-m/src/peripheral/syst.rs
new file mode 100644
index 0000000..345acc2
--- /dev/null
+++ b/cortex-m/src/peripheral/syst.rs
@@ -0,0 +1,185 @@
+//! SysTick: System Timer
+
+use volatile_register::{RO, RW};
+
+use crate::peripheral::SYST;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Control and Status
+ pub csr: RW<u32>,
+ /// Reload Value
+ pub rvr: RW<u32>,
+ /// Current Value
+ pub cvr: RW<u32>,
+ /// Calibration Value
+ pub calib: RO<u32>,
+}
+
+/// SysTick clock source
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum SystClkSource {
+ /// Core-provided clock
+ Core,
+ /// External reference clock
+ External,
+}
+
+const SYST_COUNTER_MASK: u32 = 0x00ff_ffff;
+
+const SYST_CSR_ENABLE: u32 = 1 << 0;
+const SYST_CSR_TICKINT: u32 = 1 << 1;
+const SYST_CSR_CLKSOURCE: u32 = 1 << 2;
+const SYST_CSR_COUNTFLAG: u32 = 1 << 16;
+
+const SYST_CALIB_SKEW: u32 = 1 << 30;
+const SYST_CALIB_NOREF: u32 = 1 << 31;
+
+impl SYST {
+ /// Clears current value to 0
+ ///
+ /// After calling `clear_current()`, the next call to `has_wrapped()` will return `false`.
+ #[inline]
+ pub fn clear_current(&mut self) {
+ unsafe { self.cvr.write(0) }
+ }
+
+ /// Disables counter
+ #[inline]
+ pub fn disable_counter(&mut self) {
+ unsafe { self.csr.modify(|v| v & !SYST_CSR_ENABLE) }
+ }
+
+ /// Disables SysTick interrupt
+ #[inline]
+ pub fn disable_interrupt(&mut self) {
+ unsafe { self.csr.modify(|v| v & !SYST_CSR_TICKINT) }
+ }
+
+ /// Enables counter
+ ///
+ /// *NOTE* The reference manual indicates that:
+ ///
+ /// "The SysTick counter reload and current value are undefined at reset, the correct
+ /// initialization sequence for the SysTick counter is:
+ ///
+ /// - Program reload value
+ /// - Clear current value
+ /// - Program Control and Status register"
+ ///
+ /// The sequence translates to `self.set_reload(x); self.clear_current(); self.enable_counter()`
+ #[inline]
+ pub fn enable_counter(&mut self) {
+ unsafe { self.csr.modify(|v| v | SYST_CSR_ENABLE) }
+ }
+
+ /// Enables SysTick interrupt
+ #[inline]
+ pub fn enable_interrupt(&mut self) {
+ unsafe { self.csr.modify(|v| v | SYST_CSR_TICKINT) }
+ }
+
+ /// Gets clock source
+ ///
+ /// *NOTE* This takes `&mut self` because the read operation is side effectful and can clear the
+ /// bit that indicates that the timer has wrapped (cf. `SYST.has_wrapped`)
+ #[inline]
+ pub fn get_clock_source(&mut self) -> SystClkSource {
+ // NOTE(unsafe) atomic read with no side effects
+ if self.csr.read() & SYST_CSR_CLKSOURCE != 0 {
+ SystClkSource::Core
+ } else {
+ SystClkSource::External
+ }
+ }
+
+ /// Gets current value
+ #[inline]
+ pub fn get_current() -> u32 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).cvr.read() }
+ }
+
+ /// Gets reload value
+ #[inline]
+ pub fn get_reload() -> u32 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).rvr.read() }
+ }
+
+ /// Returns the reload value with which the counter would wrap once per 10
+ /// ms
+ ///
+ /// Returns `0` if the value is not known (e.g. because the clock can
+ /// change dynamically).
+ #[inline]
+ pub fn get_ticks_per_10ms() -> u32 {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).calib.read() & SYST_COUNTER_MASK }
+ }
+
+ /// Checks if an external reference clock is available
+ #[inline]
+ pub fn has_reference_clock() -> bool {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).calib.read() & SYST_CALIB_NOREF == 0 }
+ }
+
+ /// Checks if the counter wrapped (underflowed) since the last check
+ ///
+ /// *NOTE* This takes `&mut self` because the read operation is side effectful and will clear
+ /// the bit of the read register.
+ #[inline]
+ pub fn has_wrapped(&mut self) -> bool {
+ self.csr.read() & SYST_CSR_COUNTFLAG != 0
+ }
+
+ /// Checks if counter is enabled
+ ///
+ /// *NOTE* This takes `&mut self` because the read operation is side effectful and can clear the
+ /// bit that indicates that the timer has wrapped (cf. `SYST.has_wrapped`)
+ #[inline]
+ pub fn is_counter_enabled(&mut self) -> bool {
+ self.csr.read() & SYST_CSR_ENABLE != 0
+ }
+
+ /// Checks if SysTick interrupt is enabled
+ ///
+ /// *NOTE* This takes `&mut self` because the read operation is side effectful and can clear the
+ /// bit that indicates that the timer has wrapped (cf. `SYST.has_wrapped`)
+ #[inline]
+ pub fn is_interrupt_enabled(&mut self) -> bool {
+ self.csr.read() & SYST_CSR_TICKINT != 0
+ }
+
+ /// Checks if the calibration value is precise
+ ///
+ /// Returns `false` if using the reload value returned by
+ /// `get_ticks_per_10ms()` may result in a period significantly deviating
+ /// from 10 ms.
+ #[inline]
+ pub fn is_precise() -> bool {
+ // NOTE(unsafe) atomic read with no side effects
+ unsafe { (*Self::PTR).calib.read() & SYST_CALIB_SKEW == 0 }
+ }
+
+ /// Sets clock source
+ #[inline]
+ pub fn set_clock_source(&mut self, clk_source: SystClkSource) {
+ match clk_source {
+ SystClkSource::External => unsafe { self.csr.modify(|v| v & !SYST_CSR_CLKSOURCE) },
+ SystClkSource::Core => unsafe { self.csr.modify(|v| v | SYST_CSR_CLKSOURCE) },
+ }
+ }
+
+ /// Sets reload value
+ ///
+ /// Valid values are between `1` and `0x00ffffff`.
+ ///
+ /// *NOTE* To make the timer wrap every `N` ticks set the reload value to `N - 1`
+ #[inline]
+ pub fn set_reload(&mut self, value: u32) {
+ unsafe { self.rvr.write(value) }
+ }
+}
diff --git a/cortex-m/src/peripheral/test.rs b/cortex-m/src/peripheral/test.rs
new file mode 100644
index 0000000..cab064a
--- /dev/null
+++ b/cortex-m/src/peripheral/test.rs
@@ -0,0 +1,170 @@
+#[test]
+fn cpuid() {
+ let cpuid = unsafe { &*crate::peripheral::CPUID::PTR };
+
+ assert_eq!(address(&cpuid.base), 0xE000_ED00);
+ assert_eq!(address(&cpuid.pfr), 0xE000_ED40);
+ assert_eq!(address(&cpuid.dfr), 0xE000_ED48);
+ assert_eq!(address(&cpuid.afr), 0xE000_ED4C);
+ assert_eq!(address(&cpuid.mmfr), 0xE000_ED50);
+ assert_eq!(address(&cpuid.isar), 0xE000_ED60);
+ assert_eq!(address(&cpuid.clidr), 0xE000_ED78);
+ assert_eq!(address(&cpuid.ctr), 0xE000_ED7C);
+ assert_eq!(address(&cpuid.ccsidr), 0xE000_ED80);
+ assert_eq!(address(&cpuid.csselr), 0xE000_ED84);
+}
+
+#[test]
+fn dcb() {
+ let dcb = unsafe { &*crate::peripheral::DCB::PTR };
+
+ assert_eq!(address(&dcb.dhcsr), 0xE000_EDF0);
+ assert_eq!(address(&dcb.dcrsr), 0xE000_EDF4);
+ assert_eq!(address(&dcb.dcrdr), 0xE000_EDF8);
+ assert_eq!(address(&dcb.demcr), 0xE000_EDFC);
+}
+
+#[test]
+fn dwt() {
+ let dwt = unsafe { &*crate::peripheral::DWT::PTR };
+
+ assert_eq!(address(&dwt.ctrl), 0xE000_1000);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.cyccnt), 0xE000_1004);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.cpicnt), 0xE000_1008);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.exccnt), 0xE000_100C);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.sleepcnt), 0xE000_1010);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.lsucnt), 0xE000_1014);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.foldcnt), 0xE000_1018);
+ assert_eq!(address(&dwt.pcsr), 0xE000_101C);
+ assert_eq!(address(&dwt.c[0].comp), 0xE000_1020);
+ assert_eq!(address(&dwt.c[0].mask), 0xE000_1024);
+ assert_eq!(address(&dwt.c[0].function), 0xE000_1028);
+ assert_eq!(address(&dwt.c[1].comp), 0xE000_1030);
+ assert_eq!(address(&dwt.c[1].mask), 0xE000_1034);
+ assert_eq!(address(&dwt.c[1].function), 0xE000_1038);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.lar), 0xE000_1FB0);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&dwt.lsr), 0xE000_1FB4);
+}
+
+#[test]
+fn fpb() {
+ let fpb = unsafe { &*crate::peripheral::FPB::PTR };
+
+ assert_eq!(address(&fpb.ctrl), 0xE000_2000);
+ assert_eq!(address(&fpb.remap), 0xE000_2004);
+ assert_eq!(address(&fpb.comp), 0xE000_2008);
+ assert_eq!(address(&fpb.comp[1]), 0xE000_200C);
+ assert_eq!(address(&fpb.lar), 0xE000_2FB0);
+ assert_eq!(address(&fpb.lsr), 0xE000_2FB4);
+}
+
+#[test]
+fn fpu() {
+ let fpu = unsafe { &*crate::peripheral::FPU::PTR };
+
+ assert_eq!(address(&fpu.fpccr), 0xE000_EF34);
+ assert_eq!(address(&fpu.fpcar), 0xE000_EF38);
+ assert_eq!(address(&fpu.fpdscr), 0xE000_EF3C);
+ assert_eq!(address(&fpu.mvfr), 0xE000_EF40);
+ assert_eq!(address(&fpu.mvfr[1]), 0xE000_EF44);
+ assert_eq!(address(&fpu.mvfr[2]), 0xE000_EF48);
+}
+
+#[test]
+fn itm() {
+ let itm = unsafe { &*crate::peripheral::ITM::PTR };
+
+ assert_eq!(address(&itm.stim), 0xE000_0000);
+ assert_eq!(address(&itm.ter), 0xE000_0E00);
+ assert_eq!(address(&itm.tpr), 0xE000_0E40);
+ assert_eq!(address(&itm.tcr), 0xE000_0E80);
+ assert_eq!(address(&itm.lar), 0xE000_0FB0);
+ assert_eq!(address(&itm.lsr), 0xE000_0FB4);
+}
+
+#[test]
+fn mpu() {
+ let mpu = unsafe { &*crate::peripheral::MPU::PTR };
+
+ assert_eq!(address(&mpu._type), 0xE000ED90);
+ assert_eq!(address(&mpu.ctrl), 0xE000ED94);
+ assert_eq!(address(&mpu.rnr), 0xE000ED98);
+ assert_eq!(address(&mpu.rbar), 0xE000ED9C);
+ assert_eq!(address(&mpu.rasr), 0xE000EDA0);
+ assert_eq!(address(&mpu.rbar_a1), 0xE000EDA4);
+ assert_eq!(address(&mpu.rasr_a1), 0xE000EDA8);
+ assert_eq!(address(&mpu.rbar_a2), 0xE000EDAC);
+ assert_eq!(address(&mpu.rasr_a2), 0xE000EDB0);
+ assert_eq!(address(&mpu.rbar_a3), 0xE000EDB4);
+ assert_eq!(address(&mpu.rasr_a3), 0xE000EDB8);
+}
+
+#[test]
+fn nvic() {
+ let nvic = unsafe { &*crate::peripheral::NVIC::PTR };
+
+ assert_eq!(address(&nvic.iser), 0xE000E100);
+ assert_eq!(address(&nvic.icer), 0xE000E180);
+ assert_eq!(address(&nvic.ispr), 0xE000E200);
+ assert_eq!(address(&nvic.icpr), 0xE000E280);
+ assert_eq!(address(&nvic.iabr), 0xE000E300);
+ assert_eq!(address(&nvic.ipr), 0xE000E400);
+ #[cfg(not(armv6m))]
+ assert_eq!(address(&nvic.stir), 0xE000EF00);
+}
+
+#[test]
+fn scb() {
+ let scb = unsafe { &*crate::peripheral::SCB::PTR };
+
+ assert_eq!(address(&scb.icsr), 0xE000_ED04);
+ assert_eq!(address(&scb.vtor), 0xE000_ED08);
+ assert_eq!(address(&scb.aircr), 0xE000_ED0C);
+ assert_eq!(address(&scb.scr), 0xE000_ED10);
+ assert_eq!(address(&scb.ccr), 0xE000_ED14);
+ assert_eq!(address(&scb.shpr), 0xE000_ED18);
+ assert_eq!(address(&scb.shcsr), 0xE000_ED24);
+ assert_eq!(address(&scb.cfsr), 0xE000_ED28);
+ assert_eq!(address(&scb.hfsr), 0xE000_ED2C);
+ assert_eq!(address(&scb.dfsr), 0xE000_ED30);
+ assert_eq!(address(&scb.mmfar), 0xE000_ED34);
+ assert_eq!(address(&scb.bfar), 0xE000_ED38);
+ assert_eq!(address(&scb.afsr), 0xE000_ED3C);
+ assert_eq!(address(&scb.cpacr), 0xE000_ED88);
+}
+
+#[test]
+fn syst() {
+ let syst = unsafe { &*crate::peripheral::SYST::PTR };
+
+ assert_eq!(address(&syst.csr), 0xE000_E010);
+ assert_eq!(address(&syst.rvr), 0xE000_E014);
+ assert_eq!(address(&syst.cvr), 0xE000_E018);
+ assert_eq!(address(&syst.calib), 0xE000_E01C);
+}
+
+#[test]
+fn tpiu() {
+ let tpiu = unsafe { &*crate::peripheral::TPIU::PTR };
+
+ assert_eq!(address(&tpiu.sspsr), 0xE004_0000);
+ assert_eq!(address(&tpiu.cspsr), 0xE004_0004);
+ assert_eq!(address(&tpiu.acpr), 0xE004_0010);
+ assert_eq!(address(&tpiu.sppr), 0xE004_00F0);
+ assert_eq!(address(&tpiu.ffcr), 0xE004_0304);
+ assert_eq!(address(&tpiu.lar), 0xE004_0FB0);
+ assert_eq!(address(&tpiu.lsr), 0xE004_0FB4);
+ assert_eq!(address(&tpiu._type), 0xE004_0FC8);
+}
+
+fn address<T>(r: *const T) -> usize {
+ r as usize
+}
diff --git a/cortex-m/src/peripheral/tpiu.rs b/cortex-m/src/peripheral/tpiu.rs
new file mode 100644
index 0000000..14dd35c
--- /dev/null
+++ b/cortex-m/src/peripheral/tpiu.rs
@@ -0,0 +1,160 @@
+//! Trace Port Interface Unit;
+//!
+//! *NOTE* Not available on Armv6-M.
+
+use volatile_register::{RO, RW, WO};
+
+use crate::peripheral::TPIU;
+use bitfield::bitfield;
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Supported Parallel Port Sizes
+ pub sspsr: RO<u32>,
+ /// Current Parallel Port Size
+ pub cspsr: RW<u32>,
+ reserved0: [u32; 2],
+ /// Asynchronous Clock Prescaler
+ pub acpr: RW<u32>,
+ reserved1: [u32; 55],
+ /// Selected Pin Control
+ pub sppr: RW<Sppr>,
+ reserved2: [u32; 132],
+ /// Formatter and Flush Control
+ pub ffcr: RW<Ffcr>,
+ reserved3: [u32; 810],
+ /// Lock Access
+ pub lar: WO<u32>,
+ /// Lock Status
+ pub lsr: RO<u32>,
+ reserved4: [u32; 4],
+ /// TPIU Type
+ pub _type: RO<Type>,
+}
+
+bitfield! {
+ /// Formatter and flush control register.
+ #[repr(C)]
+ #[derive(Clone, Copy)]
+ pub struct Ffcr(u32);
+ enfcont, set_enfcont: 1;
+}
+
+bitfield! {
+ /// TPIU Type Register.
+ #[repr(C)]
+ #[derive(Clone, Copy)]
+ pub struct Type(u32);
+ u8, fifosz, _: 8, 6;
+ ptinvalid, _: 9;
+ mancvalid, _: 10;
+ nrzvalid, _: 11;
+}
+
+bitfield! {
+ /// Selected pin protocol register.
+ #[repr(C)]
+ #[derive(Clone, Copy)]
+ pub struct Sppr(u32);
+ u8, txmode, set_txmode: 1, 0;
+}
+
+/// The available protocols for the trace output.
+#[repr(u8)]
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub enum TraceProtocol {
+ /// Parallel trace port mode
+ Parallel = 0b00,
+ /// Asynchronous SWO, using Manchester encoding
+ AsyncSWOManchester = 0b01,
+ /// Asynchronous SWO, using NRZ encoding
+ AsyncSWONRZ = 0b10,
+}
+impl core::convert::TryFrom<u8> for TraceProtocol {
+ type Error = ();
+
+ /// Tries to convert from a `TXMODE` field value. Fails if the set mode is
+ /// unknown (and thus unpredictable).
+ #[inline]
+ fn try_from(value: u8) -> Result<Self, Self::Error> {
+ match value {
+ x if x == Self::Parallel as u8 => Ok(Self::Parallel),
+ x if x == Self::AsyncSWOManchester as u8 => Ok(Self::AsyncSWOManchester),
+ x if x == Self::AsyncSWONRZ as u8 => Ok(Self::AsyncSWONRZ),
+ _ => Err(()), // unknown and unpredictable mode
+ }
+ }
+}
+
+/// The SWO options supported by the TPIU, and the mimimum size of the
+/// FIFO output queue for trace data.
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+pub struct SWOSupports {
+ /// Whether UART/NRZ encoding is supported for SWO.
+ pub nrz_encoding: bool,
+ /// Whether Manchester encoding is supported for SWO.
+ pub manchester_encoding: bool,
+ /// Whether parallel trace port operation is supported.
+ pub parallel_operation: bool,
+ /// The minimum implemented FIFO queue size of the TPIU for trace data.
+ pub min_queue_size: u8,
+}
+
+impl TPIU {
+ /// Sets the prescaler value for a wanted baud rate of the Serial
+ /// Wire Output (SWO) in relation to a given asynchronous refernce
+ /// clock rate.
+ #[inline]
+ pub fn set_swo_baud_rate(&mut self, ref_clk_rate: u32, baud_rate: u32) {
+ unsafe {
+ self.acpr.write((ref_clk_rate / baud_rate) - 1);
+ }
+ }
+
+ /// The used protocol for the trace output. Return `None` if an
+ /// unknown (and thus unpredicable mode) is configured by means
+ /// other than
+ /// [`trace_output_protocol`](Self::set_trace_output_protocol).
+ #[inline]
+ pub fn trace_output_protocol(&self) -> Option<TraceProtocol> {
+ self.sppr.read().txmode().try_into().ok()
+ }
+
+ /// Sets the used protocol for the trace output.
+ #[inline]
+ pub fn set_trace_output_protocol(&mut self, proto: TraceProtocol) {
+ unsafe {
+ self.sppr.modify(|mut r| {
+ r.set_txmode(proto as u8);
+ r
+ });
+ }
+ }
+
+ /// Whether to enable the formatter. If disabled, only ITM and DWT
+ /// trace sources are passed through. Data from the ETM is
+ /// discarded.
+ #[inline]
+ pub fn enable_continuous_formatting(&mut self, bit: bool) {
+ unsafe {
+ self.ffcr.modify(|mut r| {
+ r.set_enfcont(bit);
+ r
+ });
+ }
+ }
+
+ /// Reads the supported trace output modes and the minimum size of
+ /// the TPIU FIFO queue for trace data.
+ #[inline]
+ pub fn swo_supports() -> SWOSupports {
+ let _type = unsafe { (*Self::PTR)._type.read() };
+ SWOSupports {
+ nrz_encoding: _type.nrzvalid(),
+ manchester_encoding: _type.mancvalid(),
+ parallel_operation: !_type.ptinvalid(),
+ min_queue_size: _type.fifosz(),
+ }
+ }
+}