aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/asm.rs172
-rw-r--r--src/cmse.rs240
-rw-r--r--src/itm.rs112
-rw-r--r--src/lib.rs5
-rw-r--r--src/peripheral/cbp.rs46
-rw-r--r--src/peripheral/cpuid.rs24
-rw-r--r--src/peripheral/mod.rs62
-rw-r--r--src/peripheral/nvic.rs49
-rw-r--r--src/peripheral/scb.rs420
-rw-r--r--src/register/mod.rs21
10 files changed, 849 insertions, 302 deletions
diff --git a/src/asm.rs b/src/asm.rs
index 5a35fa3..b7ff19e 100644
--- a/src/asm.rs
+++ b/src/asm.rs
@@ -42,7 +42,7 @@ pub fn delay(_n: u32) {
bne.n 1b"
: "+r"(_n / 4 + 1)
:
- :
+ : "cpsr"
: "volatile");
},
@@ -81,6 +81,35 @@ pub fn nop() {
}
}
+
+/// Generate an Undefined Instruction exception.
+///
+/// Can be used as a stable alternative to `core::intrinsics::abort`.
+#[inline]
+pub fn udf() -> ! {
+ match () {
+ #[cfg(all(cortex_m, feature = "inline-asm"))]
+ () => unsafe {
+ asm!("udf" :::: "volatile");
+ core::hint::unreachable_unchecked();
+ },
+
+ #[cfg(all(cortex_m, not(feature = "inline-asm")))]
+ () => unsafe {
+ extern "C" {
+ fn __udf();
+ }
+
+ __udf();
+
+ core::hint::unreachable_unchecked();
+ },
+
+ #[cfg(not(cortex_m))]
+ () => unimplemented!(),
+ }
+}
+
/// Wait For Event
#[inline]
pub fn wfe() {
@@ -222,3 +251,144 @@ pub fn dmb() {
() => unimplemented!(),
}
}
+
+/// Test Target
+///
+/// Queries the Security state and access permissions of a memory location.
+/// Returns a Test Target Response Payload (cf section D1.2.215 of
+/// Armv8-M Architecture Reference Manual).
+#[inline]
+#[cfg(armv8m)]
+// The __tt function does not dereference the pointer received.
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn tt(addr: *mut u32) -> u32 {
+ match () {
+ #[cfg(all(cortex_m, feature = "inline-asm"))]
+ () => {
+ let tt_resp: u32;
+ unsafe {
+ asm!("tt $0, $1" : "=r"(tt_resp) : "r"(addr) :: "volatile");
+ }
+ tt_resp
+ }
+
+ #[cfg(all(cortex_m, not(feature = "inline-asm")))]
+ () => unsafe {
+ extern "C" {
+ fn __tt(_: *mut u32) -> u32;
+ }
+
+ __tt(addr)
+ },
+
+ #[cfg(not(cortex_m))]
+ () => unimplemented!(),
+ }
+}
+
+/// Test Target Unprivileged
+///
+/// Queries the Security state and access permissions of a memory location for an unprivileged
+/// access to that location.
+/// Returns a Test Target Response Payload (cf section D1.2.215 of
+/// Armv8-M Architecture Reference Manual).
+#[inline]
+#[cfg(armv8m)]
+// The __ttt function does not dereference the pointer received.
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn ttt(addr: *mut u32) -> u32 {
+ match () {
+ #[cfg(all(cortex_m, feature = "inline-asm"))]
+ () => {
+ let tt_resp: u32;
+ unsafe {
+ asm!("ttt $0, $1" : "=r"(tt_resp) : "r"(addr) :: "volatile");
+ }
+ tt_resp
+ }
+
+ #[cfg(all(cortex_m, not(feature = "inline-asm")))]
+ () => unsafe {
+ extern "C" {
+ fn __ttt(_: *mut u32) -> u32;
+ }
+
+ __ttt(addr)
+ },
+
+ #[cfg(not(cortex_m))]
+ () => unimplemented!(),
+ }
+}
+
+/// Test Target Alternate Domain
+///
+/// Queries the Security state and access permissions of a memory location for a Non-Secure access
+/// to that location. This instruction is only valid when executing in Secure state and is
+/// undefined if used from Non-Secure state.
+/// Returns a Test Target Response Payload (cf section D1.2.215 of
+/// Armv8-M Architecture Reference Manual).
+#[inline]
+#[cfg(armv8m)]
+// The __tta function does not dereference the pointer received.
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn tta(addr: *mut u32) -> u32 {
+ match () {
+ #[cfg(all(cortex_m, feature = "inline-asm"))]
+ () => {
+ let tt_resp: u32;
+ unsafe {
+ asm!("tta $0, $1" : "=r"(tt_resp) : "r"(addr) :: "volatile");
+ }
+ tt_resp
+ }
+
+ #[cfg(all(cortex_m, not(feature = "inline-asm")))]
+ () => unsafe {
+ extern "C" {
+ fn __tta(_: *mut u32) -> u32;
+ }
+
+ __tta(addr)
+ },
+
+ #[cfg(not(cortex_m))]
+ () => unimplemented!(),
+ }
+}
+
+/// Test Target Alternate Domain Unprivileged
+///
+/// Queries the Security state and access permissions of a memory location for a Non-Secure and
+/// unprivileged access to that location. This instruction is only valid when executing in Secure
+/// state and is undefined if used from Non-Secure state.
+/// Returns a Test Target Response Payload (cf section D1.2.215 of
+/// Armv8-M Architecture Reference Manual).
+#[inline]
+#[cfg(armv8m)]
+// The __ttat function does not dereference the pointer received.
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn ttat(addr: *mut u32) -> u32 {
+ match () {
+ #[cfg(all(cortex_m, feature = "inline-asm"))]
+ () => {
+ let tt_resp: u32;
+ unsafe {
+ asm!("ttat $0, $1" : "=r"(tt_resp) : "r"(addr) :: "volatile");
+ }
+ tt_resp
+ }
+
+ #[cfg(all(cortex_m, not(feature = "inline-asm")))]
+ () => unsafe {
+ extern "C" {
+ fn __ttat(_: *mut u32) -> u32;
+ }
+
+ __ttat(addr)
+ },
+
+ #[cfg(not(cortex_m))]
+ () => unimplemented!(),
+ }
+}
diff --git a/src/cmse.rs b/src/cmse.rs
new file mode 100644
index 0000000..393e463
--- /dev/null
+++ b/src/cmse.rs
@@ -0,0 +1,240 @@
+//! Cortex-M Security Extensions
+//!
+//! This module provides several helper functions to support Armv8-M and Armv8.1-M Security
+//! Extensions.
+//! Most of this implementation is directly inspired by the "Armv8-M Security Extensions:
+//! Requirements on Development Tools" document available here:
+//! https://developer.arm.com/docs/ecm0359818/latest
+//!
+//! Please note that the TT instructions support as described part 4 of the document linked above is
+//! not part of CMSE but is still present in this module. The TT instructions return the
+//! configuration of the Memory Protection Unit at an address.
+//!
+//! # Notes
+//!
+//! * Non-Secure Unprivileged code will always read zeroes from TestTarget and should not use it.
+//! * Non-Secure Privileged code can check current (AccessType::Current) and Non-Secure Unprivileged
+//! accesses (AccessType::Unprivileged).
+//! * Secure Unprivileged code can check Non-Secure Unprivileged accesses (AccessType::NonSecure).
+//! * Secure Privileged code can check all access types.
+//!
+//! # Example
+//!
+//! ```
+//! use cortex_m::cmse::{TestTarget, AccessType};
+//!
+//! // suspect_address was given by Non-Secure to a Secure function to write at it.
+//! // But is it allowed to?
+//! let suspect_address_test = TestTarget::check(0xDEADBEEF as *mut u32,
+//! AccessType::NonSecureUnprivileged);
+//! if suspect_address_test.ns_read_and_writable() {
+//! // Non-Secure can not read or write this address!
+//! }
+//! ```
+
+use crate::asm::{tt, tta, ttat, ttt};
+use bitfield::bitfield;
+
+/// Memory access behaviour: determine which privilege execution mode is used and which Memory
+/// Protection Unit (MPU) is used.
+#[allow(clippy::missing_inline_in_public_items)]
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub enum AccessType {
+ /// Access using current privilege level and reading from current security state MPU.
+ /// Uses the TT instruction.
+ Current,
+ /// Unprivileged access reading from current security state MPU. Uses the TTT instruction.
+ Unprivileged,
+ /// Access using current privilege level reading from Non-Secure MPU. Uses the TTA instruction.
+ /// Undefined if used from Non-Secure state.
+ NonSecure,
+ /// Unprivilege access reading from Non-Secure MPU. Uses the TTAT instruction.
+ /// Undefined if used from Non-Secure state.
+ NonSecureUnprivileged,
+}
+
+/// Abstraction of TT instructions and helper functions to determine the security and privilege
+/// attribute of a target address, accessed in different ways.
+#[allow(clippy::missing_inline_in_public_items)]
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub struct TestTarget {
+ tt_resp: TtResp,
+ access_type: AccessType,
+}
+
+bitfield! {
+ /// Test Target Response Payload
+ ///
+ /// Provides the response payload from a TT, TTA, TTT or TTAT instruction.
+ #[derive(PartialEq, Copy, Clone)]
+ struct TtResp(u32);
+ impl Debug;
+ mregion, _: 7, 0;
+ sregion, _: 15, 8;
+ mrvalid, _: 16;
+ srvalid, _: 17;
+ r, _: 18;
+ rw, _: 19;
+ nsr, _: 20;
+ nsrw, _: 21;
+ s, _: 22;
+ irvalid, _: 23;
+ iregion, _: 31, 24;
+}
+
+impl TestTarget {
+ /// Creates a Test Target Response Payload by testing addr using access_type.
+ #[inline]
+ pub fn check(addr: *mut u32, access_type: AccessType) -> Self {
+ let tt_resp = match access_type {
+ AccessType::Current => TtResp(tt(addr)),
+ AccessType::Unprivileged => TtResp(ttt(addr)),
+ AccessType::NonSecure => TtResp(tta(addr)),
+ AccessType::NonSecureUnprivileged => TtResp(ttat(addr)),
+ };
+
+ TestTarget {
+ tt_resp,
+ access_type,
+ }
+ }
+
+ /// Creates a Test Target Response Payload by testing the zone from addr to addr + size - 1
+ /// using access_type.
+ /// Returns None if:
+ /// * the address zone overlaps SAU, IDAU or MPU region boundaries
+ /// * size is 0
+ /// * addr + size - 1 overflows
+ #[inline]
+ pub fn check_range(addr: *mut u32, size: usize, access_type: AccessType) -> Option<Self> {
+ let begin: usize = addr as usize;
+ // Last address of the range (addr + size - 1). This also checks if size is 0.
+ let end: usize = begin.checked_add(size.checked_sub(1)?)?;
+
+ // Regions are aligned at 32-byte boundaries. If the address range fits in one 32-byte
+ // address line, a single TT instruction suffices. This is the case when the following
+ // constraint holds.
+ let single_check: bool = (begin % 32).checked_add(size)? <= 32usize;
+
+ let test_start = TestTarget::check(addr, access_type);
+
+ if single_check {
+ Some(test_start)
+ } else {
+ let test_end = TestTarget::check(end as *mut u32, access_type);
+ // Check that the range does not cross SAU, IDAU or MPU region boundaries.
+ if test_start != test_end {
+ None
+ } else {
+ Some(test_start)
+ }
+ }
+ }
+
+ /// Access type that was used for this test target.
+ #[inline]
+ pub fn access_type(self) -> AccessType {
+ self.access_type
+ }
+
+ /// Get the raw u32 value returned by the TT instruction used.
+ #[inline]
+ pub fn as_u32(self) -> u32 {
+ self.tt_resp.0
+ }
+
+ /// Read accessibility of the target address. Only returns the MPU settings without checking
+ /// the Security state of the target.
+ /// For Unprivileged and NonSecureUnprivileged access types, returns the permissions for
+ /// unprivileged access, regardless of whether the current mode is privileged or unprivileged.
+ /// Returns false if the TT instruction was executed from an unprivileged mode
+ /// and the NonSecure access type was not specified.
+ /// Returns false if the address matches multiple MPU regions.
+ #[inline]
+ pub fn readable(self) -> bool {
+ self.tt_resp.r()
+ }
+
+ /// Read and write accessibility of the target address. Only returns the MPU settings without
+ /// checking the Security state of the target.
+ /// For Unprivileged and NonSecureUnprivileged access types, returns the permissions for
+ /// unprivileged access, regardless of whether the current mode is privileged or unprivileged.
+ /// Returns false if the TT instruction was executed from an unprivileged mode
+ /// and the NonSecure access type was not specified.
+ /// Returns false if the address matches multiple MPU regions.
+ #[inline]
+ pub fn read_and_writable(self) -> bool {
+ self.tt_resp.rw()
+ }
+
+ /// Indicate the MPU region number containing the target address.
+ /// Returns None if the value is not valid:
+ /// * the MPU is not implemented or MPU_CTRL.ENABLE is set to zero
+ /// * the register argument specified by the MREGION field does not match any enabled MPU regions
+ /// * the address matched multiple MPU regions
+ /// * the address specified by the SREGION field is exempt from the secure memory attribution
+ /// * the TT instruction was executed from an unprivileged mode and the A flag was not specified.
+ #[inline]
+ pub fn mpu_region(self) -> Option<u8> {
+ if self.tt_resp.srvalid() {
+ // Cast is safe as SREGION field is defined on 8 bits.
+ Some(self.tt_resp.sregion() as u8)
+ } else {
+ None
+ }
+ }
+
+ /// Indicates the Security attribute of the target address. Independent of AccessType.
+ /// Always zero when the test target is done in the Non-Secure state.
+ #[inline]
+ pub fn secure(self) -> bool {
+ self.tt_resp.s()
+ }
+
+ /// Non-Secure Read accessibility of the target address.
+ /// Same as readable() && !secure()
+ #[inline]
+ pub fn ns_readable(self) -> bool {
+ self.tt_resp.nsr()
+ }
+
+ /// Non-Secure Read and Write accessibility of the target address.
+ /// Same as read_and_writable() && !secure()
+ #[inline]
+ pub fn ns_read_and_writable(self) -> bool {
+ self.tt_resp.nsrw()
+ }
+
+ /// Indicate the IDAU region number containing the target address. Independent of AccessType.
+ /// Returns None if the value is not valid:
+ /// * the IDAU cannot provide a region number
+ /// * the address is exempt from security attribution
+ /// * the test target is done from Non-Secure state
+ #[inline]
+ pub fn idau_region(self) -> Option<u8> {
+ if self.tt_resp.irvalid() {
+ // Cast is safe as IREGION field is defined on 8 bits.
+ Some(self.tt_resp.iregion() as u8)
+ } else {
+ None
+ }
+ }
+
+ /// Indicate the SAU region number containing the target address. Independent of AccessType.
+ /// Returns None if the value is not valid:
+ /// * SAU_CTRL.ENABLE is set to zero
+ /// * the register argument specified in the SREGION field does not match any enabled SAU regions
+ /// * the address specified matches multiple enabled SAU regions
+ /// * the address specified by the SREGION field is exempt from the secure memory attribution
+ /// * the TT instruction was executed from the Non-secure state or the Security Extension is not
+ /// implemented
+ #[inline]
+ pub fn sau_region(self) -> Option<u8> {
+ if self.tt_resp.srvalid() {
+ // Cast is safe as SREGION field is defined on 8 bits.
+ Some(self.tt_resp.sregion() as u8)
+ } else {
+ None
+ }
+ }
+}
diff --git a/src/itm.rs b/src/itm.rs
index 6d75d00..32d3caf 100644
--- a/src/itm.rs
+++ b/src/itm.rs
@@ -1,15 +1,12 @@
//! Instrumentation Trace Macrocell
//!
-//! **NOTE** This module is only available on ARMv7-M and newer
+//! **NOTE** This module is only available on ARMv7-M and newer.
-use core::{fmt, mem, ptr, slice};
-
-use aligned::{Aligned, A4};
+use core::{fmt, ptr, slice};
use crate::peripheral::itm::Stim;
// NOTE assumes that `bytes` is 32-bit aligned
-#[allow(clippy::missing_inline_in_public_items)]
unsafe fn write_words(stim: &mut Stim, bytes: &[u32]) {
let mut p = bytes.as_ptr();
for _ in 0..bytes.len() {
@@ -19,6 +16,45 @@ unsafe fn write_words(stim: &mut Stim, bytes: &[u32]) {
}
}
+/// Writes an aligned byte slice to the ITM.
+///
+/// `buffer` must be 4-byte aligned.
+unsafe fn write_aligned_impl(port: &mut Stim, buffer: &[u8]) {
+ let len = buffer.len();
+
+ if len == 0 {
+ return;
+ }
+
+ let split = len & !0b11;
+ #[allow(clippy::cast_ptr_alignment)]
+ write_words(
+ port,
+ slice::from_raw_parts(buffer.as_ptr() as *const u32, split >> 2),
+ );
+
+ // 3 bytes or less left
+ let mut left = len & 0b11;
+ let mut ptr = buffer.as_ptr().add(split);
+
+ // at least 2 bytes left
+ if left > 1 {
+ while !port.is_fifo_ready() {}
+
+ #[allow(clippy::cast_ptr_alignment)]
+ port.write_u16(ptr::read(ptr as *const u16));
+
+ ptr = ptr.offset(2);
+ left -= 2;
+ }
+
+ // final byte
+ if left == 1 {
+ while !port.is_fifo_ready() {}
+ port.write_u8(*ptr);
+ }
+}
+
struct Port<'p>(&'p mut Stim);
impl<'p> fmt::Write for Port<'p> {
@@ -29,10 +65,15 @@ impl<'p> fmt::Write for Port<'p> {
}
}
-/// Writes a `buffer` to the ITM `port`
-#[allow(clippy::cast_ptr_alignment)]
+/// A wrapper type that aligns its contents on a 4-Byte boundary.
+///
+/// ITM transfers are most efficient when the data is 4-Byte-aligned. This type provides an easy
+/// way to accomplish and enforce such an alignment.
+#[repr(align(4))]
+pub struct Aligned<T: ?Sized>(pub T);
+
+/// Writes `buffer` to an ITM port.
#[allow(clippy::missing_inline_in_public_items)]
-#[allow(clippy::transmute_ptr_to_ptr)]
pub fn write_all(port: &mut Stim, buffer: &[u8]) {
unsafe {
let mut len = buffer.len();
@@ -57,6 +98,9 @@ pub fn write_all(port: &mut Stim, buffer: &[u8]) {
if len > 1 {
// at least 2 bytes
while !port.is_fifo_ready() {}
+
+ // We checked the alignment above, so this is safe
+ #[allow(clippy::cast_ptr_alignment)]
port.write_u16(ptr::read(ptr as *const u16));
// 0x04
@@ -73,59 +117,31 @@ pub fn write_all(port: &mut Stim, buffer: &[u8]) {
}
}
- write_aligned(port, mem::transmute(slice::from_raw_parts(ptr, len)));
+ // The remaining data is 4-byte aligned, but might not be a multiple of 4 bytes
+ write_aligned_impl(port, slice::from_raw_parts(ptr, len));
}
}
-/// Writes a 4-byte aligned `buffer` to the ITM `port`
+/// Writes a 4-byte aligned `buffer` to an ITM port.
///
/// # Examples
///
-/// ``` ignore
-/// let mut buffer: Aligned<A4, _> = Aligned([0; 14]);
+/// ```no_run
+/// # use cortex_m::{itm::{self, Aligned}, peripheral::ITM};
+/// # let port = unsafe { &mut (*ITM::ptr()).stim[0] };
+/// let mut buffer = Aligned([0; 14]);
///
-/// buffer.copy_from_slice(b"Hello, world!\n");
+/// buffer.0.copy_from_slice(b"Hello, world!\n");
///
-/// itm::write_aligned(&itm.stim[0], &buffer);
+/// itm::write_aligned(port, &buffer);
///
/// // Or equivalently
-/// itm::write_aligned(&itm.stim[0], &Aligned(*b"Hello, world!\n"));
+/// itm::write_aligned(port, &Aligned(*b"Hello, world!\n"));
/// ```
-#[allow(clippy::cast_ptr_alignment)]
#[allow(clippy::missing_inline_in_public_items)]
-#[allow(clippy::transmute_ptr_to_ptr)]
-pub fn write_aligned(port: &mut Stim, buffer: &Aligned<A4, [u8]>) {
+pub fn write_aligned(port: &mut Stim, buffer: &Aligned<[u8]>) {
unsafe {
- let len = buffer.len();
-
- if len == 0 {
- return;
- }
-
- let split = len & !0b11;
- write_words(
- port,
- slice::from_raw_parts(buffer.as_ptr() as *const u32, split >> 2),
- );
-
- // 3 bytes or less left
- let mut left = len & 0b11;
- let mut ptr = buffer.as_ptr().add(split);
-
- // at least 2 bytes left
- if left > 1 {
- while !port.is_fifo_ready() {}
- port.write_u16(ptr::read(ptr as *const u16));
-
- ptr = ptr.offset(2);
- left -= 2;
- }
-
- // final byte
- if left == 1 {
- while !port.is_fifo_ready() {}
- port.write_u8(*ptr);
- }
+ write_aligned_impl(port, &buffer.0)
}
}
diff --git a/src/lib.rs b/src/lib.rs
index 481d84e..276551c 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -34,6 +34,8 @@
#![no_std]
#![allow(clippy::identity_op)]
#![allow(clippy::missing_safety_doc)]
+// Prevent clippy from complaining about empty match expression that are used for cfg gating.
+#![allow(clippy::match_single_binding)]
// This makes clippy warn about public functions which are not #[inline].
//
@@ -50,7 +52,6 @@
// to be applied to the struct).
#![deny(clippy::missing_inline_in_public_items)]
-extern crate aligned;
extern crate bare_metal;
extern crate volatile_register;
@@ -58,6 +59,8 @@ extern crate volatile_register;
mod macros;
pub mod asm;
+#[cfg(armv8m)]
+pub mod cmse;
pub mod interrupt;
#[cfg(not(armv6m))]
pub mod itm;
diff --git a/src/peripheral/cbp.rs b/src/peripheral/cbp.rs
index 8d82e2a..6a1defd 100644
--- a/src/peripheral/cbp.rs
+++ b/src/peripheral/cbp.rs
@@ -39,34 +39,28 @@ const CBP_SW_SET_MASK: u32 = 0x1FF << CBP_SW_SET_POS;
impl CBP {
/// I-cache invalidate all to PoU
- #[inline]
+ #[inline(always)]
pub fn iciallu(&mut self) {
- unsafe {
- self.iciallu.write(0);
- }
+ unsafe { self.iciallu.write(0) };
}
/// I-cache invalidate by MVA to PoU
- #[inline]
+ #[inline(always)]
pub fn icimvau(&mut self, mva: u32) {
- unsafe {
- self.icimvau.write(mva);
- }
+ unsafe { self.icimvau.write(mva) };
}
/// D-cache invalidate by MVA to PoC
- #[inline]
- pub fn dcimvac(&mut self, mva: u32) {
- unsafe {
- self.dcimvac.write(mva);
- }
+ #[inline(always)]
+ pub unsafe fn dcimvac(&mut self, mva: u32) {
+ self.dcimvac.write(mva);
}
/// D-cache invalidate by set-way
///
/// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
- #[inline]
- pub fn dcisw(&mut self, set: u16, way: u16) {
+ #[inline(always)]
+ pub unsafe fn dcisw(&mut self, set: u16, way: u16) {
// The ARMv7-M Architecture Reference Manual, as of Revision E.b, says these set/way
// operations have a register data format which depends on the implementation's
// associativity and number of sets. Specifically the 'way' and 'set' fields have
@@ -76,16 +70,14 @@ impl CBP {
// Generic User Guide section 4.8.3. Since no other ARMv7-M implementations except the
// Cortex-M7 have a DCACHE or ICACHE at all, it seems safe to do the same thing as the
// CMSIS-Core implementation and use fixed values.
- unsafe {
- self.dcisw.write(
- ((u32::from(way) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS)
- | ((u32::from(set) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS),
- );
- }
+ self.dcisw.write(
+ ((u32::from(way) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS)
+ | ((u32::from(set) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS),
+ );
}
/// D-cache clean by MVA to PoU
- #[inline]
+ #[inline(always)]
pub fn dccmvau(&mut self, mva: u32) {
unsafe {
self.dccmvau.write(mva);
@@ -93,7 +85,7 @@ impl CBP {
}
/// D-cache clean by MVA to PoC
- #[inline]
+ #[inline(always)]
pub fn dccmvac(&mut self, mva: u32) {
unsafe {
self.dccmvac.write(mva);
@@ -103,7 +95,7 @@ impl CBP {
/// D-cache clean by set-way
///
/// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
- #[inline]
+ #[inline(always)]
pub fn dccsw(&mut self, set: u16, way: u16) {
// See comment for dcisw() about the format here
unsafe {
@@ -115,7 +107,7 @@ impl CBP {
}
/// D-cache clean and invalidate by MVA to PoC
- #[inline]
+ #[inline(always)]
pub fn dccimvac(&mut self, mva: u32) {
unsafe {
self.dccimvac.write(mva);
@@ -125,7 +117,7 @@ impl CBP {
/// D-cache clean and invalidate by set-way
///
/// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
- #[inline]
+ #[inline(always)]
pub fn dccisw(&mut self, set: u16, way: u16) {
// See comment for dcisw() about the format here
unsafe {
@@ -137,7 +129,7 @@ impl CBP {
}
/// Branch predictor invalidate all
- #[inline]
+ #[inline(always)]
pub fn bpiall(&mut self) {
unsafe {
self.bpiall.write(0);
diff --git a/src/peripheral/cpuid.rs b/src/peripheral/cpuid.rs
index 787be5c..32d0baf 100644
--- a/src/peripheral/cpuid.rs
+++ b/src/peripheral/cpuid.rs
@@ -114,4 +114,28 @@ impl CPUID {
(1 + ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> CCSIDR_ASSOCIATIVITY_POS)) as u16,
)
}
+
+ /// Returns log2 of the number of words in the smallest cache line of all the data cache and
+ /// unified caches that are controlled by the processor.
+ ///
+ /// This is the `DminLine` field of the CTR register.
+ #[inline(always)]
+ pub fn cache_dminline() -> u32 {
+ const CTR_DMINLINE_POS: u32 = 16;
+ const CTR_DMINLINE_MASK: u32 = 0xF << CTR_DMINLINE_POS;
+ let ctr = unsafe { (*Self::ptr()).ctr.read() };
+ (ctr & CTR_DMINLINE_MASK) >> CTR_DMINLINE_POS
+ }
+
+ /// Returns log2 of the number of words in the smallest cache line of all the instruction
+ /// caches that are controlled by the processor.
+ ///
+ /// This is the `IminLine` field of the CTR register.
+ #[inline(always)]
+ pub fn cache_iminline() -> u32 {
+ const CTR_IMINLINE_POS: u32 = 0;
+ const CTR_IMINLINE_MASK: u32 = 0xF << CTR_IMINLINE_POS;
+ let ctr = unsafe { (*Self::ptr()).ctr.read() };
+ (ctr & CTR_IMINLINE_MASK) >> CTR_IMINLINE_POS
+ }
}
diff --git a/src/peripheral/mod.rs b/src/peripheral/mod.rs
index 22dd9cf..7af4b90 100644
--- a/src/peripheral/mod.rs
+++ b/src/peripheral/mod.rs
@@ -1,5 +1,4 @@
-#![allow(clippy::needless_doctest_main)]
-//! Core peripherals
+//! Core peripherals.
//!
//! # API
//!
@@ -9,41 +8,32 @@
//! the [`Peripherals::take`](struct.Peripherals.html#method.take) method.
//!
//! ``` no_run
-//! use cortex_m::peripheral::Peripherals;
-//!
-//! fn main() {
-//! let mut peripherals = Peripherals::take().unwrap();
-//! peripherals.DWT.enable_cycle_counter();
-//! }
+//! # use cortex_m::peripheral::Peripherals;
+//! let mut peripherals = Peripherals::take().unwrap();
+//! peripherals.DWT.enable_cycle_counter();
//! ```
//!
//! This method can only be successfully called *once* -- this is why the method returns an
//! `Option`. Subsequent calls to the method will result in a `None` value being returned.
//!
-//! ``` no_run
-//! use cortex_m::peripheral::Peripherals;
-//!
-//! fn main() {
-//! let ok = Peripherals::take().unwrap();
-//! let panics = Peripherals::take().unwrap();
-//! }
+//! ``` no_run, should_panic
+//! # use cortex_m::peripheral::Peripherals;
+//! let ok = Peripherals::take().unwrap();
+//! let panics = Peripherals::take().unwrap();
//! ```
//! A part of the peripheral API doesn't require access to a peripheral instance. This part of the
//! API is provided as static methods on the peripheral types. One example is the
//! [`DWT::get_cycle_count`](struct.DWT.html#method.get_cycle_count) method.
//!
//! ``` no_run
-//! use cortex_m::peripheral::{DWT, Peripherals};
-//!
-//! fn main() {
-//! {
-//! let mut peripherals = Peripherals::take().unwrap();
-//! peripherals.DWT.enable_cycle_counter();
-//! } // all the peripheral singletons are destroyed here
+//! # use cortex_m::peripheral::{DWT, Peripherals};
+//! {
+//! let mut peripherals = Peripherals::take().unwrap();
+//! peripherals.DWT.enable_cycle_counter();
+//! } // all the peripheral singletons are destroyed here
//!
-//! // but this method can be called without a DWT instance
-//! let cyccnt = DWT::get_cycle_count();
-//! }
+//! // but this method can be called without a DWT instance
+//! let cyccnt = DWT::get_cycle_count();
//! ```
//!
//! The singleton property can be *unsafely* bypassed using the `ptr` static method which is
@@ -51,17 +41,14 @@
//! safe higher level abstractions.
//!
//! ``` no_run
-//! use cortex_m::peripheral::{DWT, Peripherals};
-//!
-//! fn main() {
-//! {
-//! let mut peripherals = Peripherals::take().unwrap();
-//! peripherals.DWT.enable_cycle_counter();
-//! } // all the peripheral singletons are destroyed here
+//! # use cortex_m::peripheral::{DWT, Peripherals};
+//! {
+//! let mut peripherals = Peripherals::take().unwrap();
+//! peripherals.DWT.enable_cycle_counter();
+//! } // all the peripheral singletons are destroyed here
//!
-//! // actually safe because this is an atomic read with no side effects
-//! let cyccnt = unsafe { (*DWT::ptr()).cyccnt.read() };
-//! }
+//! // actually safe because this is an atomic read with no side effects
+//! let cyccnt = unsafe { (*DWT::ptr()).cyccnt.read() };
//! ```
//!
//! # References
@@ -142,6 +129,10 @@ pub struct Peripherals {
/// Trace Port Interface Unit (not present on Cortex-M0 variants)
pub TPIU: TPIU,
+
+ // Private field making `Peripherals` non-exhaustive. We don't use `#[non_exhaustive]` so we
+ // can support older Rust versions.
+ _priv: (),
}
// NOTE `no_mangle` is used here to prevent linking different minor versions of this crate as that
@@ -207,6 +198,7 @@ impl Peripherals {
TPIU: TPIU {
_marker: PhantomData,
},
+ _priv: (),
}
}
}
diff --git a/src/peripheral/nvic.rs b/src/peripheral/nvic.rs
index 1ecfc6e..6627e60 100644
--- a/src/peripheral/nvic.rs
+++ b/src/peripheral/nvic.rs
@@ -38,7 +38,6 @@ pub struct RegisterBlock {
_reserved5: [u32; 48],
- #[cfg(not(armv6m))]
/// Interrupt Priority
///
/// On ARMv7-M, 124 word-sized registers are available. Each of those
@@ -50,9 +49,9 @@ pub struct RegisterBlock {
/// On ARMv6-M, the registers must only be accessed along word boundaries,
/// so convenient byte-sized representation wouldn't work on that
/// architecture.
+ #[cfg(not(armv6m))]
pub ipr: [RW<u8>; 496],
- #[cfg(armv6m)]
/// Interrupt Priority
///
/// On ARMv7-M, 124 word-sized registers are available. Each of those
@@ -64,18 +63,18 @@ pub struct RegisterBlock {
/// On ARMv6-M, the registers must only be accessed along word boundaries,
/// so convenient byte-sized representation wouldn't work on that
/// architecture.
+ #[cfg(armv6m)]
pub ipr: [RW<u32>; 8],
#[cfg(not(armv6m))]
_reserved6: [u32; 580],
- #[cfg(not(armv6m))]
/// Software Trigger Interrupt
+ #[cfg(not(armv6m))]
pub stir: WO<u32>,
}
impl NVIC {
- #[cfg(not(armv6m))]
/// Request an IRQ in software
///
/// Writing a value to the INTID field is the same as manually pending an interrupt by setting
@@ -83,6 +82,7 @@ impl NVIC {
/// `set_pending`.
///
/// This method is not available on ARMv6-M chips.
+ #[cfg(not(armv6m))]
#[inline]
pub fn request<I>(&mut self, interrupt: I)
where
@@ -95,16 +95,6 @@ impl NVIC {
}
}
- /// Clears `interrupt`'s pending state
- #[deprecated(since = "0.5.8", note = "Use `NVIC::unpend`")]
- #[inline]
- pub fn clear_pending<I>(&mut self, interrupt: I)
- where
- I: Nr,
- {
- Self::unpend(interrupt)
- }
-
/// Disables `interrupt`
#[inline]
pub fn mask<I>(interrupt: I)
@@ -129,27 +119,6 @@ impl NVIC {
(*Self::ptr()).iser[usize::from(nr / 32)].write(1 << (nr % 32))
}
- /// Disables `interrupt`
- #[deprecated(since = "0.6.1", note = "Use `NVIC::mask`")]
- #[inline]
- pub fn disable<I>(&mut self, interrupt: I)
- where
- I: Nr,
- {
- Self::mask(interrupt)
- }
-
- /// **WARNING** This method is a soundness hole in the API; it should actually be an `unsafe`
- /// function. Use `NVIC::unmask` which has the right unsafety.
- #[deprecated(since = "0.6.1", note = "Use `NVIC::unmask`")]
- #[inline]
- pub fn enable<I>(&mut self, interrupt: I)
- where
- I: Nr,
- {
- unsafe { Self::unmask(interrupt) }
- }
-
/// Returns the NVIC priority of `interrupt`
///
/// *NOTE* NVIC encodes priority in the highest bits of a byte so values like `1` and `2` map
@@ -228,16 +197,6 @@ impl NVIC {
unsafe { (*Self::ptr()).ispr[usize::from(nr / 32)].write(1 << (nr % 32)) }
}
- /// Forces `interrupt` into pending state
- #[deprecated(since = "0.5.8", note = "Use `NVIC::pend`")]
- #[inline]
- pub fn set_pending<I>(&mut self, interrupt: I)
- where
- I: Nr,
- {
- Self::pend(interrupt)
- }
-
/// Sets the "priority" of `interrupt` to `prio`
///
/// *NOTE* See [`get_priority`](struct.NVIC.html#method.get_priority) method for an explanation
diff --git a/src/peripheral/scb.rs b/src/peripheral/scb.rs
index 9d58b03..940809e 100644
--- a/src/peripheral/scb.rs
+++ b/src/peripheral/scb.rs
@@ -305,8 +305,8 @@ impl VectActive {
#[cfg(not(armv6m))]
mod scb_consts {
- pub const SCB_CCR_IC_MASK: u32 = (1 << 17);
- pub const SCB_CCR_DC_MASK: u32 = (1 << 16);
+ pub const SCB_CCR_IC_MASK: u32 = 1 << 17;
+ pub const SCB_CCR_DC_MASK: u32 = 1 << 16;
}
#[cfg(not(armv6m))]
@@ -314,105 +314,119 @@ use self::scb_consts::*;
#[cfg(not(armv6m))]
impl SCB {
- /// Enables I-Cache if currently disabled
+ /// Enables I-cache if currently disabled.
+ ///
+ /// This operation first invalidates the entire I-cache.
#[inline]
pub fn enable_icache(&mut self) {
- // Don't do anything if ICache is already enabled
+ // Don't do anything if I-cache is already enabled
if Self::icache_enabled() {
return;
}
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
- // Invalidate I-Cache
+ // Invalidate I-cache
cbp.iciallu();
- // Enable I-Cache
+ // Enable I-cache
+ // NOTE(unsafe): We have synchronised access by &mut self
unsafe { self.ccr.modify(|r| r | SCB_CCR_IC_MASK) };
crate::asm::dsb();
crate::asm::isb();
}
- /// Disables I-Cache if currently enabled
+ /// Disables I-cache if currently enabled.
+ ///
+ /// This operation invalidates the entire I-cache after disabling.
#[inline]
pub fn disable_icache(&mut self) {
- // Don't do anything if ICache is already disabled
+ // Don't do anything if I-cache is already disabled
if !Self::icache_enabled() {
return;
}
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
- // Disable I-Cache
+ // Disable I-cache
+ // NOTE(unsafe): We have synchronised access by &mut self
unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) };
- // Invalidate I-Cache
+ // Invalidate I-cache
cbp.iciallu();
crate::asm::dsb();
crate::asm::isb();
}
- /// Returns whether the I-Cache is currently enabled
- #[inline]
+ /// Returns whether the I-cache is currently enabled.
+ #[inline(always)]
pub fn icache_enabled() -> bool {
crate::asm::dsb();
crate::asm::isb();
- // NOTE(unsafe) atomic read with no side effects
+ // NOTE(unsafe): atomic read with no side effects
unsafe { (*Self::ptr()).ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK }
}
- /// Invalidates I-Cache
+ /// Invalidates the entire I-cache.
#[inline]
pub fn invalidate_icache(&mut self) {
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
- // Invalidate I-Cache
+ // Invalidate I-cache
cbp.iciallu();
crate::asm::dsb();
crate::asm::isb();
}
- /// Enables D-cache if currently disabled
+ /// Enables D-cache if currently disabled.
+ ///
+ /// This operation first invalidates the entire D-cache, ensuring it does
+ /// not contain stale values before being enabled.
#[inline]
pub fn enable_dcache(&mut self, cpuid: &mut CPUID) {
- // Don't do anything if DCache is already enabled
+ // Don't do anything if D-cache is already enabled
if Self::dcache_enabled() {
return;
}
- // Invalidate anything currently in the DCache
- self.invalidate_dcache(cpuid);
+ // Invalidate anything currently in the D-cache
+ unsafe { self.invalidate_dcache(cpuid) };
- // Now turn on the DCache
+ // Now turn on the D-cache
+ // NOTE(unsafe): We have synchronised access by &mut self
unsafe { self.ccr.modify(|r| r | SCB_CCR_DC_MASK) };
crate::asm::dsb();
crate::asm::isb();
}
- /// Disables D-cache if currently enabled
+ /// Disables D-cache if currently enabled.
+ ///
+ /// This operation subsequently cleans and invalidates the entire D-cache,
+ /// ensuring all contents are safely written back to main memory after disabling.
#[inline]
pub fn disable_dcache(&mut self, cpuid: &mut CPUID) {
- // Don't do anything if DCache is already disabled
+ // Don't do anything if D-cache is already disabled
if !Self::dcache_enabled() {
return;
}
- // Turn off the DCache
+ // Turn off the D-cache
+ // NOTE(unsafe): We have synchronised access by &mut self
unsafe { self.ccr.modify(|r| r & !SCB_CCR_DC_MASK) };
// Clean and invalidate whatever was left in it
self.clean_invalidate_dcache(cpuid);
}
- /// Returns whether the D-Cache is currently enabled
+ /// Returns whether the D-cache is currently enabled.
#[inline]
pub fn dcache_enabled() -> bool {
crate::asm::dsb();
@@ -422,20 +436,21 @@ impl SCB {
unsafe { (*Self::ptr()).ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK }
}
- /// Invalidates D-cache
+ /// Invalidates the entire D-cache.
+ ///
+ /// Note that calling this while the dcache is enabled will probably wipe out the
+ /// stack, depending on optimisations, therefore breaking returning to the call point.
///
- /// Note that calling this while the dcache is enabled will probably wipe out your
- /// stack, depending on optimisations, breaking returning to the call point.
/// It's used immediately before enabling the dcache, but not exported publicly.
#[inline]
- fn invalidate_dcache(&mut self, cpuid: &mut CPUID) {
- // NOTE(unsafe) All CBP registers are write-only and stateless
- let mut cbp = unsafe { CBP::new() };
+ unsafe fn invalidate_dcache(&mut self, cpuid: &mut CPUID) {
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = CBP::new();
// Read number of sets and ways
let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
- // Invalidate entire D-Cache
+ // Invalidate entire D-cache
for set in 0..sets {
for way in 0..ways {
cbp.dcisw(set, way);
@@ -446,10 +461,13 @@ impl SCB {
crate::asm::isb();
}
- /// Cleans D-cache
+ /// Cleans the entire D-cache.
+ ///
+ /// This function causes everything in the D-cache to be written back to main memory,
+ /// overwriting whatever is already there.
#[inline]
pub fn clean_dcache(&mut self, cpuid: &mut CPUID) {
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
// Read number of sets and ways
@@ -465,10 +483,14 @@ impl SCB {
crate::asm::isb();
}
- /// Cleans and invalidates D-cache
+ /// Cleans and invalidates the entire D-cache.
+ ///
+ /// This function causes everything in the D-cache to be written back to main memory,
+ /// and then marks the entire D-cache as invalid, causing future reads to first fetch
+ /// from main memory.
#[inline]
pub fn clean_invalidate_dcache(&mut self, cpuid: &mut CPUID) {
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
// Read number of sets and ways
@@ -484,47 +506,175 @@ impl SCB {
crate::asm::isb();
}
- /// Invalidates D-cache by address
+ /// Invalidates D-cache by address.
+ ///
+ /// * `addr`: The address to invalidate, which must be cache-line aligned.
+ /// * `size`: Number of bytes to invalidate, which must be a multiple of the cache line size.
+ ///
+ /// Invalidates D-cache cache lines, starting from the first line containing `addr`,
+ /// finishing once at least `size` bytes have been invalidated.
+ ///
+ /// Invalidation causes the next read access to memory to be fetched from main memory instead
+ /// of the cache.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `addr` must be 32-byte aligned and `size` must be a multiple
+ /// of 32. At the time of writing, no other Cortex-M cores have data caches.
///
- /// `addr`: the address to invalidate
- /// `size`: size of the memory block, in number of bytes
+ /// If `addr` is not cache-line aligned, or `size` is not a multiple of the cache line size,
+ /// other data before or after the desired memory would also be invalidated, which can very
+ /// easily cause memory corruption and undefined behaviour.
///
- /// Invalidates cache starting from the lowest 32-byte aligned address represented by `addr`,
- /// in blocks of 32 bytes until at least `size` bytes have been invalidated.
+ /// # Safety
+ ///
+ /// After invalidating, the next read of invalidated data will be from main memory. This may
+ /// cause recent writes to be lost, potentially including writes that initialized objects.
+ /// Therefore, this method may cause uninitialized memory or invalid values to be read,
+ /// resulting in undefined behaviour. You must ensure that main memory contains valid and
+ /// initialized values before invalidating.
+ ///
+ /// `addr` **must** be aligned to the size of the cache lines, and `size` **must** be a
+ /// multiple of the cache line size, otherwise this function will invalidate other memory,
+ /// easily leading to memory corruption and undefined behaviour. This precondition is checked
+ /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid
+ /// a runtime-dependent `panic!()` call.
#[inline]
- pub fn invalidate_dcache_by_address(&mut self, addr: usize, size: usize) {
+ pub unsafe fn invalidate_dcache_by_address(&mut self, addr: usize, size: usize) {
// No-op zero sized operations
if size == 0 {
return;
}
- // NOTE(unsafe) All CBP registers are write-only and stateless
- let mut cbp = unsafe { CBP::new() };
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = CBP::new();
+
+ // dminline is log2(num words), so 2**dminline * 4 gives size in bytes
+ let dminline = CPUID::cache_dminline();
+ let line_size = (1 << dminline) * 4;
+
+ debug_assert!((addr & (line_size - 1)) == 0);
+ debug_assert!((size & (line_size - 1)) == 0);
crate::asm::dsb();
- // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M
- const LINESIZE: usize = 32;
- let num_lines = ((size - 1) / LINESIZE) + 1;
+ // Find number of cache lines to invalidate
+ let num_lines = ((size - 1) / line_size) + 1;
- let mut addr = addr & 0xFFFF_FFE0;
+ // Compute address of first cache line
+ let mask = 0xFFFF_FFFF - (line_size - 1);
+ let mut addr = addr & mask;
for _ in 0..num_lines {
cbp.dcimvac(addr as u32);
- addr += LINESIZE;
+ addr += line_size;
}
crate::asm::dsb();
crate::asm::isb();
}
- /// Cleans D-cache by address
+ /// Invalidates an object from the D-cache.
+ ///
+ /// * `obj`: The object to invalidate.
+ ///
+ /// Invalidates D-cache starting from the first cache line containing `obj`,
+ /// continuing to invalidate cache lines until all of `obj` has been invalidated.
+ ///
+ /// Invalidation causes the next read access to memory to be fetched from main memory instead
+ /// of the cache.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `obj` must be 32-byte aligned, and its size must be a multiple
+ /// of 32 bytes. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `obj` is not cache-line aligned, or its size is not a multiple of the cache line size,
+ /// other data before or after the desired memory would also be invalidated, which can very
+ /// easily cause memory corruption and undefined behaviour.
+ ///
+ /// # Safety
+ ///
+ /// After invalidating, `obj` will be read from main memory on next access. This may cause
+ /// recent writes to `obj` to be lost, potentially including the write that initialized it.
+ /// Therefore, this method may cause uninitialized memory or invalid values to be read,
+ /// resulting in undefined behaviour. You must ensure that main memory contains a valid and
+ /// initialized value for T before invalidating `obj`.
+ ///
+ /// `obj` **must** be aligned to the size of the cache lines, and its size **must** be a
+ /// multiple of the cache line size, otherwise this function will invalidate other memory,
+ /// easily leading to memory corruption and undefined behaviour. This precondition is checked
+ /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid
+ /// a runtime-dependent `panic!()` call.
+ #[inline]
+ pub unsafe fn invalidate_dcache_by_ref<T>(&mut self, obj: &mut T) {
+ self.invalidate_dcache_by_address(obj as *const T as usize, core::mem::size_of::<T>());
+ }
+
+ /// Invalidates a slice from the D-cache.
+ ///
+ /// * `slice`: The slice to invalidate.
+ ///
+ /// Invalidates D-cache starting from the first cache line containing members of `slice`,
+ /// continuing to invalidate cache lines until all of `slice` has been invalidated.
+ ///
+ /// Invalidation causes the next read access to memory to be fetched from main memory instead
+ /// of the cache.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `slice` must be 32-byte aligned, and its size must be a multiple
+ /// of 32 bytes. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `slice` is not cache-line aligned, or its size is not a multiple of the cache line size,
+ /// other data before or after the desired memory would also be invalidated, which can very
+ /// easily cause memory corruption and undefined behaviour.
+ ///
+ /// # Safety
///
- /// `addr`: the address to clean
- /// `size`: size of the memory block, in number of bytes
+ /// After invalidating, `slice` will be read from main memory on next access. This may cause
+ /// recent writes to `slice` to be lost, potentially including the write that initialized it.
+ /// Therefore, this method may cause uninitialized memory or invalid values to be read,
+ /// resulting in undefined behaviour. You must ensure that main memory contains valid and
+ /// initialized values for T before invalidating `slice`.
///
- /// Cleans cache starting from the lowest 32-byte aligned address represented by `addr`,
- /// in blocks of 32 bytes until at least `size` bytes have been cleaned.
+ /// `slice` **must** be aligned to the size of the cache lines, and its size **must** be a
+ /// multiple of the cache line size, otherwise this function will invalidate other memory,
+ /// easily leading to memory corruption and undefined behaviour. This precondition is checked
+ /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid
+ /// a runtime-dependent `panic!()` call.
+ #[inline]
+ pub unsafe fn invalidate_dcache_by_slice<T>(&mut self, slice: &mut [T]) {
+ self.invalidate_dcache_by_address(
+ slice.as_ptr() as usize,
+ slice.len() * core::mem::size_of::<T>(),
+ );
+ }
+
+ /// Cleans D-cache by address.
+ ///
+ /// * `addr`: The address to start cleaning at.
+ /// * `size`: The number of bytes to clean.
+ ///
+ /// Cleans D-cache cache lines, starting from the first line containing `addr`,
+ /// finishing once at least `size` bytes have been invalidated.
+ ///
+ /// Cleaning the cache causes whatever data is present in the cache to be immediately written
+ /// to main memory, overwriting whatever was in main memory.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `addr` should generally be 32-byte aligned and `size` should be a
+ /// multiple of 32. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `addr` is not cache-line aligned, or `size` is not a multiple of the cache line size,
+ /// other data before or after the desired memory will also be cleaned. From the point of view
+ /// of the core executing this function, memory remains consistent, so this is not unsound,
+ /// but is worth knowing about.
#[inline]
pub fn clean_dcache_by_address(&mut self, addr: usize, size: usize) {
// No-op zero sized operations
@@ -532,34 +682,78 @@ impl SCB {
return;
}
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
crate::asm::dsb();
- // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M
- const LINESIZE: usize = 32;
- let num_lines = ((size - 1) / LINESIZE) + 1;
+ let dminline = CPUID::cache_dminline();
+ let line_size = (1 << dminline) * 4;
+ let num_lines = ((size - 1) / line_size) + 1;
- let mut addr = addr & 0xFFFF_FFE0;
+ let mask = 0xFFFF_FFFF - (line_size - 1);
+ let mut addr = addr & mask;
for _ in 0..num_lines {
cbp.dccmvac(addr as u32);
- addr += LINESIZE;
+ addr += line_size;
}
crate::asm::dsb();
crate::asm::isb();
}
- /// Cleans and invalidates D-cache by address
+ /// Cleans an object from the D-cache.
///
- /// `addr`: the address to clean and invalidate
- /// `size`: size of the memory block, in number of bytes
+ /// * `obj`: The object to clean.
///
- /// Cleans and invalidates cache starting from the lowest 32-byte aligned address represented
- /// by `addr`, in blocks of 32 bytes until at least `size` bytes have been cleaned and
- /// invalidated.
+ /// Cleans D-cache starting from the first cache line containing `obj`,
+ /// continuing to clean cache lines until all of `obj` has been cleaned.
+ ///
+ /// It is recommended that `obj` is both aligned to the cache line size and a multiple of
+ /// the cache line size long, otherwise surrounding data will also be cleaned.
+ ///
+ /// Cleaning the cache causes whatever data is present in the cache to be immediately written
+ /// to main memory, overwriting whatever was in main memory.
+ #[inline]
+ pub fn clean_dcache_by_ref<T>(&mut self, obj: &T) {
+ self.clean_dcache_by_address(obj as *const T as usize, core::mem::size_of::<T>());
+ }
+
+ /// Cleans a slice from D-cache.
+ ///
+ /// * `slice`: The slice to clean.
+ ///
+ /// Cleans D-cache starting from the first cache line containing members of `slice`,
+ /// continuing to clean cache lines until all of `slice` has been cleaned.
+ ///
+ /// It is recommended that `slice` is both aligned to the cache line size and a multiple of
+ /// the cache line size long, otherwise surrounding data will also be cleaned.
+ ///
+ /// Cleaning the cache causes whatever data is present in the cache to be immediately written
+ /// to main memory, overwriting whatever was in main memory.
+ #[inline]
+ pub fn clean_dcache_by_slice<T>(&mut self, slice: &[T]) {
+ self.clean_dcache_by_address(
+ slice.as_ptr() as usize,
+ slice.len() * core::mem::size_of::<T>(),
+ );
+ }
+
+ /// Cleans and invalidates D-cache by address.
+ ///
+ /// * `addr`: The address to clean and invalidate.
+ /// * `size`: The number of bytes to clean and invalidate.
+ ///
+ /// Cleans and invalidates D-cache starting from the first cache line containing `addr`,
+ /// finishing once at least `size` bytes have been cleaned and invalidated.
+ ///
+ /// It is recommended that `addr` is aligned to the cache line size and `size` is a multiple of
+ /// the cache line size, otherwise surrounding data will also be cleaned.
+ ///
+ /// Cleaning and invalidating causes data in the D-cache to be written back to main memory,
+ /// and then marks that data in the D-cache as invalid, causing future reads to first fetch
+ /// from main memory.
#[inline]
pub fn clean_invalidate_dcache_by_address(&mut self, addr: usize, size: usize) {
// No-op zero sized operations
@@ -567,7 +761,7 @@ impl SCB {
return;
}
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
crate::asm::dsb();
@@ -634,27 +828,6 @@ const SCB_AIRCR_SYSRESETREQ: u32 = 1 << 2;
impl SCB {
/// Initiate a system reset request to reset the MCU
- #[deprecated(since = "0.6.1", note = "Use `SCB::sys_reset`")]
- #[inline]
- pub fn system_reset(&mut self) -> ! {
- crate::asm::dsb();
- unsafe {
- self.aircr.modify(
- |r| {
- SCB_AIRCR_VECTKEY | // otherwise the write is ignored
- r & SCB_AIRCR_PRIGROUP_MASK | // keep priority group unchanged
- SCB_AIRCR_SYSRESETREQ
- }, // set the bit
- )
- };
- crate::asm::dsb();
- loop {
- // wait for the reset
- crate::asm::nop(); // avoid rust-lang/rust#28728
- }
- }
-
- /// Initiate a system reset request to reset the MCU
#[inline]
pub fn sys_reset() -> ! {
crate::asm::dsb();
@@ -730,57 +903,38 @@ impl SCB {
/// System handlers, exceptions with configurable priority
#[allow(clippy::missing_inline_in_public_items)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[repr(u8)]
pub enum SystemHandler {
// NonMaskableInt, // priority is fixed
// HardFault, // priority is fixed
/// Memory management interrupt (not present on Cortex-M0 variants)
#[cfg(not(armv6m))]
- MemoryManagement,
+ MemoryManagement = 4,
/// Bus fault interrupt (not present on Cortex-M0 variants)
#[cfg(not(armv6m))]
- BusFault,
+ BusFault = 5,
/// Usage fault interrupt (not present on Cortex-M0 variants)
#[cfg(not(armv6m))]
- UsageFault,
+ UsageFault = 6,
/// Secure fault interrupt (only on ARMv8-M)
#[cfg(any(armv8m, target_arch = "x86_64"))]
- SecureFault,
+ SecureFault = 7,
/// SV call interrupt
- SVCall,
+ SVCall = 11,
/// Debug monitor interrupt (not present on Cortex-M0 variants)
#[cfg(not(armv6m))]
- DebugMonitor,
+ DebugMonitor = 12,
/// Pend SV interrupt
- PendSV,
+ PendSV = 14,
/// System Tick interrupt
- SysTick,
-}
-
-impl SystemHandler {
- fn index(self) -> u8 {
- match self {
- #[cfg(not(armv6m))]
- SystemHandler::MemoryManagement => 4,
- #[cfg(not(armv6m))]
- SystemHandler::BusFault => 5,
- #[cfg(not(armv6m))]
- SystemHandler::UsageFault => 6,
- #[cfg(any(armv8m, target_arch = "x86_64"))]
- SystemHandler::SecureFault => 7,
- SystemHandler::SVCall => 11,
- #[cfg(not(armv6m))]
- SystemHandler::DebugMonitor => 12,
- SystemHandler::PendSV => 14,
- SystemHandler::SysTick => 15,
- }
- }
+ SysTick = 15,
}
impl SCB {
@@ -790,18 +944,28 @@ impl SCB {
/// [`NVIC.get_priority`](struct.NVIC.html#method.get_priority) for more details.
#[inline]
pub fn get_priority(system_handler: SystemHandler) -> u8 {
- let index = system_handler.index();
+ let index = system_handler as u8;
#[cfg(not(armv6m))]
{
// NOTE(unsafe) atomic read with no side effects
- unsafe { (*Self::ptr()).shpr[usize::from(index - 4)].read() }
+
+ // NOTE(unsafe): Index is bounded to [4,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = unsafe {(*Self::ptr()).shpr.get_unchecked(usize::from(index - 4))};
+
+ priority_ref.read()
}
#[cfg(armv6m)]
{
// NOTE(unsafe) atomic read with no side effects
- let shpr = unsafe { (*Self::ptr()).shpr[usize::from((index - 8) / 4)].read() };
+
+ // NOTE(unsafe): Index is bounded to [11,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = unsafe {(*Self::ptr()).shpr.get_unchecked(usize::from((index - 8) / 4))};
+
+ let shpr = priority_ref.read();
let prio = (shpr >> (8 * (index % 4))) & 0x0000_00ff;
prio as u8
}
@@ -821,16 +985,24 @@ impl SCB {
/// [`register::basepri`](../register/basepri/index.html)) and compromise memory safety.
#[inline]
pub unsafe fn set_priority(&mut self, system_handler: SystemHandler, prio: u8) {
- let index = system_handler.index();
+ let index = system_handler as u8;
#[cfg(not(armv6m))]
{
- self.shpr[usize::from(index - 4)].write(prio)
+ // NOTE(unsafe): Index is bounded to [4,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = (*Self::ptr()).shpr.get_unchecked(usize::from(index - 4));
+
+ priority_ref.write(prio)
}
#[cfg(armv6m)]
{
- self.shpr[usize::from((index - 8) / 4)].modify(|value| {
+ // NOTE(unsafe): Index is bounded to [11,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = (*Self::ptr()).shpr.get_unchecked(usize::from((index - 8) / 4));
+
+ priority_ref.modify(|value| {
let shift = 8 * (index % 4);
let mask = 0x0000_00ff << shift;
let prio = u32::from(prio) << shift;
diff --git a/src/register/mod.rs b/src/register/mod.rs
index e7879c5..d69c1a5 100644
--- a/src/register/mod.rs
+++ b/src/register/mod.rs
@@ -29,35 +29,14 @@
#[cfg(all(not(armv6m), not(armv8m_base)))]
pub mod basepri;
-#[cfg(armv8m_base)]
-#[deprecated(
- since = "0.6.2",
- note = "basepri is unavailable on thumbv8.base, and will be removed in the next release"
-)]
-pub mod basepri;
-
#[cfg(all(not(armv6m), not(armv8m_base)))]
pub mod basepri_max;
-#[cfg(armv8m_base)]
-#[deprecated(
- since = "0.6.2",
- note = "basepri is unavailable on thumbv8m.base, and will be removed in the next release"
-)]
-pub mod basepri_max;
-
pub mod control;
#[cfg(all(not(armv6m), not(armv8m_base)))]
pub mod faultmask;
-#[cfg(armv8m_base)]
-#[deprecated(
- since = "0.6.2",
- note = "faultmask is unavailable on thumbv8m.base, and will be removed in the next release"
-)]
-pub mod faultmask;
-
pub mod msp;
pub mod primask;