aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Emil Fresk <emil.fresk@gmail.com> 2020-04-21 21:58:44 +0200
committerGravatar GitHub <noreply@github.com> 2020-04-21 21:58:44 +0200
commit03a788a4e234ddc6a38c5545417928b8cbe62a05 (patch)
treeb17e8b72b8792a4c3cb5ba4a1b56cf7b5fbc35a4
parentb4635839218108d4c68158ad38ca26e063137f79 (diff)
parente41b27331c70865b89b5584b13c0b469de30daff (diff)
downloadcortex-m-03a788a4e234ddc6a38c5545417928b8cbe62a05.tar.gz
cortex-m-03a788a4e234ddc6a38c5545417928b8cbe62a05.tar.zst
cortex-m-03a788a4e234ddc6a38c5545417928b8cbe62a05.zip
Merge branch 'master' into mutex_add
-rw-r--r--Cargo.toml1
-rw-r--r--asm-v8.s27
-rw-r--r--asm.s7
-rwxr-xr-xassemble.sh9
-rw-r--r--bin/thumbv6m-none-eabi.abin2956 -> 3070 bytes
-rw-r--r--bin/thumbv7em-none-eabi.abin5184 -> 5298 bytes
-rw-r--r--bin/thumbv7em-none-eabihf.abin5184 -> 5298 bytes
-rw-r--r--bin/thumbv7m-none-eabi.abin4126 -> 4240 bytes
-rw-r--r--bin/thumbv8m.base-none-eabi.abin2960 -> 4170 bytes
-rw-r--r--bin/thumbv8m.main-none-eabi.abin5370 -> 6580 bytes
-rw-r--r--bin/thumbv8m.main-none-eabihf.abin5370 -> 6580 bytes
-rw-r--r--src/asm.rs172
-rw-r--r--src/cmse.rs240
-rw-r--r--src/lib.rs6
-rw-r--r--src/peripheral/cbp.rs48
-rw-r--r--src/peripheral/cpuid.rs24
-rw-r--r--src/peripheral/fpb.rs2
-rw-r--r--src/peripheral/fpu.rs2
-rw-r--r--src/peripheral/itm.rs2
-rw-r--r--src/peripheral/mod.rs119
-rw-r--r--src/peripheral/sau.rs243
-rw-r--r--src/peripheral/scb.rs399
-rw-r--r--src/peripheral/tpiu.rs2
-rw-r--r--triagebot.toml1
24 files changed, 1119 insertions, 185 deletions
diff --git a/Cargo.toml b/Cargo.toml
index b1c6114..d93caca 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -19,6 +19,7 @@ links = "cortex-m" # prevent multiple versions of this crate to be linked toget
bare-metal = { version = "0.2.0", features = ["const-fn"] }
volatile-register = "0.2.0"
mutex-trait = "0.2.0"
+bitfield = "0.13.2"
[features]
cm7-r0p1 = []
diff --git a/asm-v8.s b/asm-v8.s
new file mode 100644
index 0000000..b667bf0
--- /dev/null
+++ b/asm-v8.s
@@ -0,0 +1,27 @@
+ .section .text.__tt
+ .global __tt
+ .thumb_func
+__tt:
+ tt r0, r0
+ bx lr
+
+ .section .text.__ttt
+ .global __ttt
+ .thumb_func
+__ttt:
+ ttt r0, r0
+ bx lr
+
+ .section .text.__tta
+ .global __tta
+ .thumb_func
+__tta:
+ tta r0, r0
+ bx lr
+
+ .section .text.__ttat
+ .global __ttat
+ .thumb_func
+__ttat:
+ ttat r0, r0
+ bx lr
diff --git a/asm.s b/asm.s
index fd2c7fa..ed1ec3f 100644
--- a/asm.s
+++ b/asm.s
@@ -114,6 +114,13 @@ __sev:
sev
bx lr
+
+ .section .text.__udf
+ .global __udf
+ .thumb_func
+__udf:
+ udf
+
.section .text.__wfe
.global __wfe
.thumb_func
diff --git a/assemble.sh b/assemble.sh
index 497925f..f63e837 100755
--- a/assemble.sh
+++ b/assemble.sh
@@ -23,15 +23,18 @@ ar crs bin/thumbv7em-none-eabi.a bin/$crate.o bin/$crate-v7.o bin/$crate-cm7-r0p
ar crs bin/thumbv7em-none-eabihf.a bin/$crate.o bin/$crate-v7.o bin/$crate-cm7-r0p1.o
arm-none-eabi-as -march=armv8-m.base asm.s -o bin/$crate.o
-ar crs bin/thumbv8m.base-none-eabi.a bin/$crate.o
+arm-none-eabi-as -march=armv8-m.base asm-v8.s -o bin/$crate-v8.o
+ar crs bin/thumbv8m.base-none-eabi.a bin/$crate.o bin/$crate-v8.o
arm-none-eabi-as -march=armv8-m.main asm.s -o bin/$crate.o
arm-none-eabi-as -march=armv8-m.main asm-v7.s -o bin/$crate-v7.o
+arm-none-eabi-as -march=armv8-m.main asm-v8.s -o bin/$crate-v8.o
arm-none-eabi-as -march=armv8-m.main asm-v8-main.s -o bin/$crate-v8-main.o
-ar crs bin/thumbv8m.main-none-eabi.a bin/$crate.o bin/$crate-v7.o bin/$crate-v8-main.o
-ar crs bin/thumbv8m.main-none-eabihf.a bin/$crate.o bin/$crate-v7.o bin/$crate-v8-main.o
+ar crs bin/thumbv8m.main-none-eabi.a bin/$crate.o bin/$crate-v7.o bin/$crate-v8.o bin/$crate-v8-main.o
+ar crs bin/thumbv8m.main-none-eabihf.a bin/$crate.o bin/$crate-v7.o bin/$crate-v8.o bin/$crate-v8-main.o
rm bin/$crate.o
rm bin/$crate-v7.o
rm bin/$crate-cm7-r0p1.o
+rm bin/$crate-v8.o
rm bin/$crate-v8-main.o
diff --git a/bin/thumbv6m-none-eabi.a b/bin/thumbv6m-none-eabi.a
index 0684d4e..06a7cb7 100644
--- a/bin/thumbv6m-none-eabi.a
+++ b/bin/thumbv6m-none-eabi.a
Binary files differ
diff --git a/bin/thumbv7em-none-eabi.a b/bin/thumbv7em-none-eabi.a
index cbfe5ae..48da24d 100644
--- a/bin/thumbv7em-none-eabi.a
+++ b/bin/thumbv7em-none-eabi.a
Binary files differ
diff --git a/bin/thumbv7em-none-eabihf.a b/bin/thumbv7em-none-eabihf.a
index cbfe5ae..48da24d 100644
--- a/bin/thumbv7em-none-eabihf.a
+++ b/bin/thumbv7em-none-eabihf.a
Binary files differ
diff --git a/bin/thumbv7m-none-eabi.a b/bin/thumbv7m-none-eabi.a
index 6e77aeb..296f66f 100644
--- a/bin/thumbv7m-none-eabi.a
+++ b/bin/thumbv7m-none-eabi.a
Binary files differ
diff --git a/bin/thumbv8m.base-none-eabi.a b/bin/thumbv8m.base-none-eabi.a
index 78ae15c..026250b 100644
--- a/bin/thumbv8m.base-none-eabi.a
+++ b/bin/thumbv8m.base-none-eabi.a
Binary files differ
diff --git a/bin/thumbv8m.main-none-eabi.a b/bin/thumbv8m.main-none-eabi.a
index e751232..6848518 100644
--- a/bin/thumbv8m.main-none-eabi.a
+++ b/bin/thumbv8m.main-none-eabi.a
Binary files differ
diff --git a/bin/thumbv8m.main-none-eabihf.a b/bin/thumbv8m.main-none-eabihf.a
index e751232..6848518 100644
--- a/bin/thumbv8m.main-none-eabihf.a
+++ b/bin/thumbv8m.main-none-eabihf.a
Binary files differ
diff --git a/src/asm.rs b/src/asm.rs
index 5a35fa3..b7ff19e 100644
--- a/src/asm.rs
+++ b/src/asm.rs
@@ -42,7 +42,7 @@ pub fn delay(_n: u32) {
bne.n 1b"
: "+r"(_n / 4 + 1)
:
- :
+ : "cpsr"
: "volatile");
},
@@ -81,6 +81,35 @@ pub fn nop() {
}
}
+
+/// Generate an Undefined Instruction exception.
+///
+/// Can be used as a stable alternative to `core::intrinsics::abort`.
+#[inline]
+pub fn udf() -> ! {
+ match () {
+ #[cfg(all(cortex_m, feature = "inline-asm"))]
+ () => unsafe {
+ asm!("udf" :::: "volatile");
+ core::hint::unreachable_unchecked();
+ },
+
+ #[cfg(all(cortex_m, not(feature = "inline-asm")))]
+ () => unsafe {
+ extern "C" {
+ fn __udf();
+ }
+
+ __udf();
+
+ core::hint::unreachable_unchecked();
+ },
+
+ #[cfg(not(cortex_m))]
+ () => unimplemented!(),
+ }
+}
+
/// Wait For Event
#[inline]
pub fn wfe() {
@@ -222,3 +251,144 @@ pub fn dmb() {
() => unimplemented!(),
}
}
+
+/// Test Target
+///
+/// Queries the Security state and access permissions of a memory location.
+/// Returns a Test Target Response Payload (cf section D1.2.215 of
+/// Armv8-M Architecture Reference Manual).
+#[inline]
+#[cfg(armv8m)]
+// The __tt function does not dereference the pointer received.
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn tt(addr: *mut u32) -> u32 {
+ match () {
+ #[cfg(all(cortex_m, feature = "inline-asm"))]
+ () => {
+ let tt_resp: u32;
+ unsafe {
+ asm!("tt $0, $1" : "=r"(tt_resp) : "r"(addr) :: "volatile");
+ }
+ tt_resp
+ }
+
+ #[cfg(all(cortex_m, not(feature = "inline-asm")))]
+ () => unsafe {
+ extern "C" {
+ fn __tt(_: *mut u32) -> u32;
+ }
+
+ __tt(addr)
+ },
+
+ #[cfg(not(cortex_m))]
+ () => unimplemented!(),
+ }
+}
+
+/// Test Target Unprivileged
+///
+/// Queries the Security state and access permissions of a memory location for an unprivileged
+/// access to that location.
+/// Returns a Test Target Response Payload (cf section D1.2.215 of
+/// Armv8-M Architecture Reference Manual).
+#[inline]
+#[cfg(armv8m)]
+// The __ttt function does not dereference the pointer received.
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn ttt(addr: *mut u32) -> u32 {
+ match () {
+ #[cfg(all(cortex_m, feature = "inline-asm"))]
+ () => {
+ let tt_resp: u32;
+ unsafe {
+ asm!("ttt $0, $1" : "=r"(tt_resp) : "r"(addr) :: "volatile");
+ }
+ tt_resp
+ }
+
+ #[cfg(all(cortex_m, not(feature = "inline-asm")))]
+ () => unsafe {
+ extern "C" {
+ fn __ttt(_: *mut u32) -> u32;
+ }
+
+ __ttt(addr)
+ },
+
+ #[cfg(not(cortex_m))]
+ () => unimplemented!(),
+ }
+}
+
+/// Test Target Alternate Domain
+///
+/// Queries the Security state and access permissions of a memory location for a Non-Secure access
+/// to that location. This instruction is only valid when executing in Secure state and is
+/// undefined if used from Non-Secure state.
+/// Returns a Test Target Response Payload (cf section D1.2.215 of
+/// Armv8-M Architecture Reference Manual).
+#[inline]
+#[cfg(armv8m)]
+// The __tta function does not dereference the pointer received.
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn tta(addr: *mut u32) -> u32 {
+ match () {
+ #[cfg(all(cortex_m, feature = "inline-asm"))]
+ () => {
+ let tt_resp: u32;
+ unsafe {
+ asm!("tta $0, $1" : "=r"(tt_resp) : "r"(addr) :: "volatile");
+ }
+ tt_resp
+ }
+
+ #[cfg(all(cortex_m, not(feature = "inline-asm")))]
+ () => unsafe {
+ extern "C" {
+ fn __tta(_: *mut u32) -> u32;
+ }
+
+ __tta(addr)
+ },
+
+ #[cfg(not(cortex_m))]
+ () => unimplemented!(),
+ }
+}
+
+/// Test Target Alternate Domain Unprivileged
+///
+/// Queries the Security state and access permissions of a memory location for a Non-Secure and
+/// unprivileged access to that location. This instruction is only valid when executing in Secure
+/// state and is undefined if used from Non-Secure state.
+/// Returns a Test Target Response Payload (cf section D1.2.215 of
+/// Armv8-M Architecture Reference Manual).
+#[inline]
+#[cfg(armv8m)]
+// The __ttat function does not dereference the pointer received.
+#[allow(clippy::not_unsafe_ptr_arg_deref)]
+pub fn ttat(addr: *mut u32) -> u32 {
+ match () {
+ #[cfg(all(cortex_m, feature = "inline-asm"))]
+ () => {
+ let tt_resp: u32;
+ unsafe {
+ asm!("ttat $0, $1" : "=r"(tt_resp) : "r"(addr) :: "volatile");
+ }
+ tt_resp
+ }
+
+ #[cfg(all(cortex_m, not(feature = "inline-asm")))]
+ () => unsafe {
+ extern "C" {
+ fn __ttat(_: *mut u32) -> u32;
+ }
+
+ __ttat(addr)
+ },
+
+ #[cfg(not(cortex_m))]
+ () => unimplemented!(),
+ }
+}
diff --git a/src/cmse.rs b/src/cmse.rs
new file mode 100644
index 0000000..393e463
--- /dev/null
+++ b/src/cmse.rs
@@ -0,0 +1,240 @@
+//! Cortex-M Security Extensions
+//!
+//! This module provides several helper functions to support Armv8-M and Armv8.1-M Security
+//! Extensions.
+//! Most of this implementation is directly inspired by the "Armv8-M Security Extensions:
+//! Requirements on Development Tools" document available here:
+//! https://developer.arm.com/docs/ecm0359818/latest
+//!
+//! Please note that the TT instructions support as described part 4 of the document linked above is
+//! not part of CMSE but is still present in this module. The TT instructions return the
+//! configuration of the Memory Protection Unit at an address.
+//!
+//! # Notes
+//!
+//! * Non-Secure Unprivileged code will always read zeroes from TestTarget and should not use it.
+//! * Non-Secure Privileged code can check current (AccessType::Current) and Non-Secure Unprivileged
+//! accesses (AccessType::Unprivileged).
+//! * Secure Unprivileged code can check Non-Secure Unprivileged accesses (AccessType::NonSecure).
+//! * Secure Privileged code can check all access types.
+//!
+//! # Example
+//!
+//! ```
+//! use cortex_m::cmse::{TestTarget, AccessType};
+//!
+//! // suspect_address was given by Non-Secure to a Secure function to write at it.
+//! // But is it allowed to?
+//! let suspect_address_test = TestTarget::check(0xDEADBEEF as *mut u32,
+//! AccessType::NonSecureUnprivileged);
+//! if suspect_address_test.ns_read_and_writable() {
+//! // Non-Secure can not read or write this address!
+//! }
+//! ```
+
+use crate::asm::{tt, tta, ttat, ttt};
+use bitfield::bitfield;
+
+/// Memory access behaviour: determine which privilege execution mode is used and which Memory
+/// Protection Unit (MPU) is used.
+#[allow(clippy::missing_inline_in_public_items)]
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub enum AccessType {
+ /// Access using current privilege level and reading from current security state MPU.
+ /// Uses the TT instruction.
+ Current,
+ /// Unprivileged access reading from current security state MPU. Uses the TTT instruction.
+ Unprivileged,
+ /// Access using current privilege level reading from Non-Secure MPU. Uses the TTA instruction.
+ /// Undefined if used from Non-Secure state.
+ NonSecure,
+ /// Unprivilege access reading from Non-Secure MPU. Uses the TTAT instruction.
+ /// Undefined if used from Non-Secure state.
+ NonSecureUnprivileged,
+}
+
+/// Abstraction of TT instructions and helper functions to determine the security and privilege
+/// attribute of a target address, accessed in different ways.
+#[allow(clippy::missing_inline_in_public_items)]
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub struct TestTarget {
+ tt_resp: TtResp,
+ access_type: AccessType,
+}
+
+bitfield! {
+ /// Test Target Response Payload
+ ///
+ /// Provides the response payload from a TT, TTA, TTT or TTAT instruction.
+ #[derive(PartialEq, Copy, Clone)]
+ struct TtResp(u32);
+ impl Debug;
+ mregion, _: 7, 0;
+ sregion, _: 15, 8;
+ mrvalid, _: 16;
+ srvalid, _: 17;
+ r, _: 18;
+ rw, _: 19;
+ nsr, _: 20;
+ nsrw, _: 21;
+ s, _: 22;
+ irvalid, _: 23;
+ iregion, _: 31, 24;
+}
+
+impl TestTarget {
+ /// Creates a Test Target Response Payload by testing addr using access_type.
+ #[inline]
+ pub fn check(addr: *mut u32, access_type: AccessType) -> Self {
+ let tt_resp = match access_type {
+ AccessType::Current => TtResp(tt(addr)),
+ AccessType::Unprivileged => TtResp(ttt(addr)),
+ AccessType::NonSecure => TtResp(tta(addr)),
+ AccessType::NonSecureUnprivileged => TtResp(ttat(addr)),
+ };
+
+ TestTarget {
+ tt_resp,
+ access_type,
+ }
+ }
+
+ /// Creates a Test Target Response Payload by testing the zone from addr to addr + size - 1
+ /// using access_type.
+ /// Returns None if:
+ /// * the address zone overlaps SAU, IDAU or MPU region boundaries
+ /// * size is 0
+ /// * addr + size - 1 overflows
+ #[inline]
+ pub fn check_range(addr: *mut u32, size: usize, access_type: AccessType) -> Option<Self> {
+ let begin: usize = addr as usize;
+ // Last address of the range (addr + size - 1). This also checks if size is 0.
+ let end: usize = begin.checked_add(size.checked_sub(1)?)?;
+
+ // Regions are aligned at 32-byte boundaries. If the address range fits in one 32-byte
+ // address line, a single TT instruction suffices. This is the case when the following
+ // constraint holds.
+ let single_check: bool = (begin % 32).checked_add(size)? <= 32usize;
+
+ let test_start = TestTarget::check(addr, access_type);
+
+ if single_check {
+ Some(test_start)
+ } else {
+ let test_end = TestTarget::check(end as *mut u32, access_type);
+ // Check that the range does not cross SAU, IDAU or MPU region boundaries.
+ if test_start != test_end {
+ None
+ } else {
+ Some(test_start)
+ }
+ }
+ }
+
+ /// Access type that was used for this test target.
+ #[inline]
+ pub fn access_type(self) -> AccessType {
+ self.access_type
+ }
+
+ /// Get the raw u32 value returned by the TT instruction used.
+ #[inline]
+ pub fn as_u32(self) -> u32 {
+ self.tt_resp.0
+ }
+
+ /// Read accessibility of the target address. Only returns the MPU settings without checking
+ /// the Security state of the target.
+ /// For Unprivileged and NonSecureUnprivileged access types, returns the permissions for
+ /// unprivileged access, regardless of whether the current mode is privileged or unprivileged.
+ /// Returns false if the TT instruction was executed from an unprivileged mode
+ /// and the NonSecure access type was not specified.
+ /// Returns false if the address matches multiple MPU regions.
+ #[inline]
+ pub fn readable(self) -> bool {
+ self.tt_resp.r()
+ }
+
+ /// Read and write accessibility of the target address. Only returns the MPU settings without
+ /// checking the Security state of the target.
+ /// For Unprivileged and NonSecureUnprivileged access types, returns the permissions for
+ /// unprivileged access, regardless of whether the current mode is privileged or unprivileged.
+ /// Returns false if the TT instruction was executed from an unprivileged mode
+ /// and the NonSecure access type was not specified.
+ /// Returns false if the address matches multiple MPU regions.
+ #[inline]
+ pub fn read_and_writable(self) -> bool {
+ self.tt_resp.rw()
+ }
+
+ /// Indicate the MPU region number containing the target address.
+ /// Returns None if the value is not valid:
+ /// * the MPU is not implemented or MPU_CTRL.ENABLE is set to zero
+ /// * the register argument specified by the MREGION field does not match any enabled MPU regions
+ /// * the address matched multiple MPU regions
+ /// * the address specified by the SREGION field is exempt from the secure memory attribution
+ /// * the TT instruction was executed from an unprivileged mode and the A flag was not specified.
+ #[inline]
+ pub fn mpu_region(self) -> Option<u8> {
+ if self.tt_resp.srvalid() {
+ // Cast is safe as SREGION field is defined on 8 bits.
+ Some(self.tt_resp.sregion() as u8)
+ } else {
+ None
+ }
+ }
+
+ /// Indicates the Security attribute of the target address. Independent of AccessType.
+ /// Always zero when the test target is done in the Non-Secure state.
+ #[inline]
+ pub fn secure(self) -> bool {
+ self.tt_resp.s()
+ }
+
+ /// Non-Secure Read accessibility of the target address.
+ /// Same as readable() && !secure()
+ #[inline]
+ pub fn ns_readable(self) -> bool {
+ self.tt_resp.nsr()
+ }
+
+ /// Non-Secure Read and Write accessibility of the target address.
+ /// Same as read_and_writable() && !secure()
+ #[inline]
+ pub fn ns_read_and_writable(self) -> bool {
+ self.tt_resp.nsrw()
+ }
+
+ /// Indicate the IDAU region number containing the target address. Independent of AccessType.
+ /// Returns None if the value is not valid:
+ /// * the IDAU cannot provide a region number
+ /// * the address is exempt from security attribution
+ /// * the test target is done from Non-Secure state
+ #[inline]
+ pub fn idau_region(self) -> Option<u8> {
+ if self.tt_resp.irvalid() {
+ // Cast is safe as IREGION field is defined on 8 bits.
+ Some(self.tt_resp.iregion() as u8)
+ } else {
+ None
+ }
+ }
+
+ /// Indicate the SAU region number containing the target address. Independent of AccessType.
+ /// Returns None if the value is not valid:
+ /// * SAU_CTRL.ENABLE is set to zero
+ /// * the register argument specified in the SREGION field does not match any enabled SAU regions
+ /// * the address specified matches multiple enabled SAU regions
+ /// * the address specified by the SREGION field is exempt from the secure memory attribution
+ /// * the TT instruction was executed from the Non-secure state or the Security Extension is not
+ /// implemented
+ #[inline]
+ pub fn sau_region(self) -> Option<u8> {
+ if self.tt_resp.srvalid() {
+ // Cast is safe as SREGION field is defined on 8 bits.
+ Some(self.tt_resp.sregion() as u8)
+ } else {
+ None
+ }
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
index 5287041..6b280f0 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -34,6 +34,8 @@
#![no_std]
#![allow(clippy::identity_op)]
#![allow(clippy::missing_safety_doc)]
+// Prevent clippy from complaining about empty match expression that are used for cfg gating.
+#![allow(clippy::match_single_binding)]
// This makes clippy warn about public functions which are not #[inline].
//
@@ -57,8 +59,10 @@ extern crate volatile_register;
mod macros;
pub mod asm;
+#[cfg(armv8m)]
+pub mod cmse;
pub mod interrupt;
-#[cfg(not(armv6m))]
+#[cfg(all(not(armv6m), not(armv8m_base)))]
pub mod itm;
pub mod peripheral;
pub mod register;
diff --git a/src/peripheral/cbp.rs b/src/peripheral/cbp.rs
index 8d82e2a..5aee544 100644
--- a/src/peripheral/cbp.rs
+++ b/src/peripheral/cbp.rs
@@ -1,6 +1,6 @@
//! Cache and branch predictor maintenance operations
//!
-//! *NOTE* Available only on ARMv7-M (`thumbv7*m-none-eabi*`)
+//! *NOTE* Not available on Armv6-M.
use volatile_register::WO;
@@ -39,34 +39,28 @@ const CBP_SW_SET_MASK: u32 = 0x1FF << CBP_SW_SET_POS;
impl CBP {
/// I-cache invalidate all to PoU
- #[inline]
+ #[inline(always)]
pub fn iciallu(&mut self) {
- unsafe {
- self.iciallu.write(0);
- }
+ unsafe { self.iciallu.write(0) };
}
/// I-cache invalidate by MVA to PoU
- #[inline]
+ #[inline(always)]
pub fn icimvau(&mut self, mva: u32) {
- unsafe {
- self.icimvau.write(mva);
- }
+ unsafe { self.icimvau.write(mva) };
}
/// D-cache invalidate by MVA to PoC
- #[inline]
- pub fn dcimvac(&mut self, mva: u32) {
- unsafe {
- self.dcimvac.write(mva);
- }
+ #[inline(always)]
+ pub unsafe fn dcimvac(&mut self, mva: u32) {
+ self.dcimvac.write(mva);
}
/// D-cache invalidate by set-way
///
/// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
- #[inline]
- pub fn dcisw(&mut self, set: u16, way: u16) {
+ #[inline(always)]
+ pub unsafe fn dcisw(&mut self, set: u16, way: u16) {
// The ARMv7-M Architecture Reference Manual, as of Revision E.b, says these set/way
// operations have a register data format which depends on the implementation's
// associativity and number of sets. Specifically the 'way' and 'set' fields have
@@ -76,16 +70,14 @@ impl CBP {
// Generic User Guide section 4.8.3. Since no other ARMv7-M implementations except the
// Cortex-M7 have a DCACHE or ICACHE at all, it seems safe to do the same thing as the
// CMSIS-Core implementation and use fixed values.
- unsafe {
- self.dcisw.write(
- ((u32::from(way) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS)
- | ((u32::from(set) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS),
- );
- }
+ self.dcisw.write(
+ ((u32::from(way) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS)
+ | ((u32::from(set) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS),
+ );
}
/// D-cache clean by MVA to PoU
- #[inline]
+ #[inline(always)]
pub fn dccmvau(&mut self, mva: u32) {
unsafe {
self.dccmvau.write(mva);
@@ -93,7 +85,7 @@ impl CBP {
}
/// D-cache clean by MVA to PoC
- #[inline]
+ #[inline(always)]
pub fn dccmvac(&mut self, mva: u32) {
unsafe {
self.dccmvac.write(mva);
@@ -103,7 +95,7 @@ impl CBP {
/// D-cache clean by set-way
///
/// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
- #[inline]
+ #[inline(always)]
pub fn dccsw(&mut self, set: u16, way: u16) {
// See comment for dcisw() about the format here
unsafe {
@@ -115,7 +107,7 @@ impl CBP {
}
/// D-cache clean and invalidate by MVA to PoC
- #[inline]
+ #[inline(always)]
pub fn dccimvac(&mut self, mva: u32) {
unsafe {
self.dccimvac.write(mva);
@@ -125,7 +117,7 @@ impl CBP {
/// D-cache clean and invalidate by set-way
///
/// `set` is masked to be between 0 and 3, and `way` between 0 and 511.
- #[inline]
+ #[inline(always)]
pub fn dccisw(&mut self, set: u16, way: u16) {
// See comment for dcisw() about the format here
unsafe {
@@ -137,7 +129,7 @@ impl CBP {
}
/// Branch predictor invalidate all
- #[inline]
+ #[inline(always)]
pub fn bpiall(&mut self) {
unsafe {
self.bpiall.write(0);
diff --git a/src/peripheral/cpuid.rs b/src/peripheral/cpuid.rs
index 787be5c..32d0baf 100644
--- a/src/peripheral/cpuid.rs
+++ b/src/peripheral/cpuid.rs
@@ -114,4 +114,28 @@ impl CPUID {
(1 + ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> CCSIDR_ASSOCIATIVITY_POS)) as u16,
)
}
+
+ /// Returns log2 of the number of words in the smallest cache line of all the data cache and
+ /// unified caches that are controlled by the processor.
+ ///
+ /// This is the `DminLine` field of the CTR register.
+ #[inline(always)]
+ pub fn cache_dminline() -> u32 {
+ const CTR_DMINLINE_POS: u32 = 16;
+ const CTR_DMINLINE_MASK: u32 = 0xF << CTR_DMINLINE_POS;
+ let ctr = unsafe { (*Self::ptr()).ctr.read() };
+ (ctr & CTR_DMINLINE_MASK) >> CTR_DMINLINE_POS
+ }
+
+ /// Returns log2 of the number of words in the smallest cache line of all the instruction
+ /// caches that are controlled by the processor.
+ ///
+ /// This is the `IminLine` field of the CTR register.
+ #[inline(always)]
+ pub fn cache_iminline() -> u32 {
+ const CTR_IMINLINE_POS: u32 = 0;
+ const CTR_IMINLINE_MASK: u32 = 0xF << CTR_IMINLINE_POS;
+ let ctr = unsafe { (*Self::ptr()).ctr.read() };
+ (ctr & CTR_IMINLINE_MASK) >> CTR_IMINLINE_POS
+ }
}
diff --git a/src/peripheral/fpb.rs b/src/peripheral/fpb.rs
index 215d4ff..b86b8b2 100644
--- a/src/peripheral/fpb.rs
+++ b/src/peripheral/fpb.rs
@@ -1,6 +1,6 @@
//! Flash Patch and Breakpoint unit
//!
-//! *NOTE* Available only on ARMv7-M (`thumbv7*m-none-eabi*`)
+//! *NOTE* Not available on Armv6-M.
use volatile_register::{RO, RW, WO};
diff --git a/src/peripheral/fpu.rs b/src/peripheral/fpu.rs
index c4e8a1d..9a047d8 100644
--- a/src/peripheral/fpu.rs
+++ b/src/peripheral/fpu.rs
@@ -1,6 +1,6 @@
//! Floating Point Unit
//!
-//! *NOTE* Available only on ARMv7E-M (`thumbv7em-none-eabihf`)
+//! *NOTE* Available only on targets with a Floating Point Unit (FPU) extension.
use volatile_register::{RO, RW};
diff --git a/src/peripheral/itm.rs b/src/peripheral/itm.rs
index 30c7e47..0b63524 100644
--- a/src/peripheral/itm.rs
+++ b/src/peripheral/itm.rs
@@ -1,6 +1,6 @@
//! Instrumentation Trace Macrocell
//!
-//! *NOTE* Available only on ARMv7-M (`thumbv7*m-none-eabi*`)
+//! *NOTE* Not available on Armv6-M and Armv8-M Baseline.
use core::cell::UnsafeCell;
use core::ptr;
diff --git a/src/peripheral/mod.rs b/src/peripheral/mod.rs
index 8854830..04fae31 100644
--- a/src/peripheral/mod.rs
+++ b/src/peripheral/mod.rs
@@ -1,5 +1,4 @@
-#![allow(clippy::needless_doctest_main)]
-//! Core peripherals
+//! Core peripherals.
//!
//! # API
//!
@@ -9,41 +8,32 @@
//! the [`Peripherals::take`](struct.Peripherals.html#method.take) method.
//!
//! ``` no_run
-//! use cortex_m::peripheral::Peripherals;
-//!
-//! fn main() {
-//! let mut peripherals = Peripherals::take().unwrap();
-//! peripherals.DWT.enable_cycle_counter();
-//! }
+//! # use cortex_m::peripheral::Peripherals;
+//! let mut peripherals = Peripherals::take().unwrap();
+//! peripherals.DWT.enable_cycle_counter();
//! ```
//!
//! This method can only be successfully called *once* -- this is why the method returns an
//! `Option`. Subsequent calls to the method will result in a `None` value being returned.
//!
-//! ``` no_run
-//! use cortex_m::peripheral::Peripherals;
-//!
-//! fn main() {
-//! let ok = Peripherals::take().unwrap();
-//! let panics = Peripherals::take().unwrap();
-//! }
+//! ``` no_run, should_panic
+//! # use cortex_m::peripheral::Peripherals;
+//! let ok = Peripherals::take().unwrap();
+//! let panics = Peripherals::take().unwrap();
//! ```
//! A part of the peripheral API doesn't require access to a peripheral instance. This part of the
//! API is provided as static methods on the peripheral types. One example is the
//! [`DWT::get_cycle_count`](struct.DWT.html#method.get_cycle_count) method.
//!
//! ``` no_run
-//! use cortex_m::peripheral::{DWT, Peripherals};
-//!
-//! fn main() {
-//! {
-//! let mut peripherals = Peripherals::take().unwrap();
-//! peripherals.DWT.enable_cycle_counter();
-//! } // all the peripheral singletons are destroyed here
+//! # use cortex_m::peripheral::{DWT, Peripherals};
+//! {
+//! let mut peripherals = Peripherals::take().unwrap();
+//! peripherals.DWT.enable_cycle_counter();
+//! } // all the peripheral singletons are destroyed here
//!
-//! // but this method can be called without a DWT instance
-//! let cyccnt = DWT::get_cycle_count();
-//! }
+//! // but this method can be called without a DWT instance
+//! let cyccnt = DWT::get_cycle_count();
//! ```
//!
//! The singleton property can be *unsafely* bypassed using the `ptr` static method which is
@@ -51,17 +41,14 @@
//! safe higher level abstractions.
//!
//! ``` no_run
-//! use cortex_m::peripheral::{DWT, Peripherals};
-//!
-//! fn main() {
-//! {
-//! let mut peripherals = Peripherals::take().unwrap();
-//! peripherals.DWT.enable_cycle_counter();
-//! } // all the peripheral singletons are destroyed here
+//! # use cortex_m::peripheral::{DWT, Peripherals};
+//! {
+//! let mut peripherals = Peripherals::take().unwrap();
+//! peripherals.DWT.enable_cycle_counter();
+//! } // all the peripheral singletons are destroyed here
//!
-//! // actually safe because this is an atomic read with no side effects
-//! let cyccnt = unsafe { (*DWT::ptr()).cyccnt.read() };
-//! }
+//! // actually safe because this is an atomic read with no side effects
+//! let cyccnt = unsafe { (*DWT::ptr()).cyccnt.read() };
//! ```
//!
//! # References
@@ -70,7 +57,6 @@
// TODO stand-alone registers: ICTR, ACTLR and STIR
-
use core::marker::PhantomData;
use core::ops;
@@ -86,10 +72,12 @@ pub mod fpb;
// NOTE(target_arch) is for documentation purposes
#[cfg(any(has_fpu, target_arch = "x86_64"))]
pub mod fpu;
-#[cfg(not(armv6m))]
+#[cfg(all(not(armv6m), not(armv8m_base)))]
pub mod itm;
pub mod mpu;
pub mod nvic;
+#[cfg(armv8m)]
+pub mod sau;
pub mod scb;
pub mod syst;
#[cfg(not(armv6m))]
@@ -103,7 +91,8 @@ mod test;
/// Core peripherals
#[allow(non_snake_case)]
pub struct Peripherals {
- /// Cache and branch predictor maintenance operations (not present on Cortex-M0 variants)
+ /// Cache and branch predictor maintenance operations.
+ /// Not available on Armv6-M.
pub CBP: CBP,
/// CPUID
@@ -115,13 +104,15 @@ pub struct Peripherals {
/// Data Watchpoint and Trace unit
pub DWT: DWT,
- /// Flash Patch and Breakpoint unit (not present on Cortex-M0 variants)
+ /// Flash Patch and Breakpoint unit.
+ /// Not available on Armv6-M.
pub FPB: FPB,
- /// Floating Point Unit (only present on `thumbv7em-none-eabihf`)
+ /// Floating Point Unit.
pub FPU: FPU,
- /// Instrumentation Trace Macrocell (not present on Cortex-M0 variants)
+ /// Instrumentation Trace Macrocell.
+ /// Not available on Armv6-M and Armv8-M Baseline.
pub ITM: ITM,
/// Memory Protection Unit
@@ -130,14 +121,22 @@ pub struct Peripherals {
/// Nested Vector Interrupt Controller
pub NVIC: NVIC,
+ /// Security Attribution Unit
+ pub SAU: SAU,
+
/// System Control Block
pub SCB: SCB,
/// SysTick: System Timer
pub SYST: SYST,
- /// Trace Port Interface Unit (not present on Cortex-M0 variants)
+ /// Trace Port Interface Unit.
+ /// Not available on Armv6-M.
pub TPIU: TPIU,
+
+ // Private field making `Peripherals` non-exhaustive. We don't use `#[non_exhaustive]` so we
+ // can support older Rust versions.
+ _priv: (),
}
// NOTE `no_mangle` is used here to prevent linking different minor versions of this crate as that
@@ -191,6 +190,9 @@ impl Peripherals {
NVIC: NVIC {
_marker: PhantomData,
},
+ SAU: SAU {
+ _marker: PhantomData,
+ },
SCB: SCB {
_marker: PhantomData,
},
@@ -200,6 +202,7 @@ impl Peripherals {
TPIU: TPIU {
_marker: PhantomData,
},
+ _priv: (),
}
}
}
@@ -368,7 +371,7 @@ pub struct ITM {
unsafe impl Send for ITM {}
-#[cfg(not(armv6m))]
+#[cfg(all(not(armv6m), not(armv8m_base)))]
impl ITM {
/// Returns a pointer to the register block
#[inline(always)]
@@ -377,7 +380,7 @@ impl ITM {
}
}
-#[cfg(not(armv6m))]
+#[cfg(all(not(armv6m), not(armv8m_base)))]
impl ops::Deref for ITM {
type Target = self::itm::RegisterBlock;
@@ -387,7 +390,7 @@ impl ops::Deref for ITM {
}
}
-#[cfg(not(armv6m))]
+#[cfg(all(not(armv6m), not(armv8m_base)))]
impl ops::DerefMut for ITM {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
@@ -443,6 +446,32 @@ impl ops::Deref for NVIC {
}
}
+/// Security Attribution Unit
+pub struct SAU {
+ _marker: PhantomData<*const ()>,
+}
+
+unsafe impl Send for SAU {}
+
+#[cfg(armv8m)]
+impl SAU {
+ /// Returns a pointer to the register block
+ #[inline(always)]
+ pub fn ptr() -> *const sau::RegisterBlock {
+ 0xE000_EDD0 as *const _
+ }
+}
+
+#[cfg(armv8m)]
+impl ops::Deref for SAU {
+ type Target = self::sau::RegisterBlock;
+
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*Self::ptr() }
+ }
+}
+
/// System Control Block
pub struct SCB {
_marker: PhantomData<*const ()>,
diff --git a/src/peripheral/sau.rs b/src/peripheral/sau.rs
new file mode 100644
index 0000000..da91aca
--- /dev/null
+++ b/src/peripheral/sau.rs
@@ -0,0 +1,243 @@
+//! Security Attribution Unit
+//!
+//! *NOTE* Available only on Armv8-M and Armv8.1-M, for the following Rust target triples:
+//! * `thumbv8m.base-none-eabi`
+//! * `thumbv8m.main-none-eabi`
+//! * `thumbv8m.main-none-eabihf`
+//!
+//! For reference please check the section B8.3 of the Armv8-M Architecture Reference Manual.
+
+use crate::interrupt;
+use crate::peripheral::SAU;
+use bitfield::bitfield;
+use volatile_register::{RO, RW};
+
+/// Register block
+#[repr(C)]
+pub struct RegisterBlock {
+ /// Control Register
+ pub ctrl: RW<Ctrl>,
+ /// Type Register
+ pub _type: RO<Type>,
+ /// Region Number Register
+ pub rnr: RW<Rnr>,
+ /// Region Base Address Register
+ pub rbar: RW<Rbar>,
+ /// Region Limit Address Register
+ pub rlar: RW<Rlar>,
+ /// Secure Fault Status Register
+ pub sfsr: RO<Sfsr>,
+ /// Secure Fault Address Register
+ pub sfar: RO<Sfar>,
+}
+
+bitfield! {
+ /// Control Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Ctrl(u32);
+ get_enable, set_enable: 0;
+ get_allns, set_allns: 1;
+}
+
+bitfield! {
+ /// Type Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Type(u32);
+ u8;
+ sregion, _: 7, 0;
+}
+
+bitfield! {
+ /// Region Number Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Rnr(u32);
+ u8;
+ get_region, set_region: 7, 0;
+}
+
+bitfield! {
+ /// Region Base Address Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Rbar(u32);
+ u32;
+ get_baddr, set_baddr: 31, 5;
+}
+
+bitfield! {
+ /// Region Limit Address Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Rlar(u32);
+ u32;
+ get_laddr, set_laddr: 31, 5;
+ get_nsc, set_nsc: 1;
+ get_enable, set_enable: 0;
+}
+
+bitfield! {
+ /// Secure Fault Status Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Sfsr(u32);
+ invep, _: 0;
+ invis, _: 1;
+ inver, _: 2;
+ auviol, _: 3;
+ invtran, _: 4;
+ lsperr, _: 5;
+ sfarvalid, _: 6;
+ lserr, _: 7;
+}
+
+bitfield! {
+ /// Secure Fault Address Register description
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct Sfar(u32);
+ u32;
+ address, _: 31, 0;
+}
+
+/// Possible attribute of a SAU region.
+#[derive(Debug)]
+pub enum SauRegionAttribute {
+ /// SAU region is Secure
+ Secure,
+ /// SAU region is Non-Secure Callable
+ NonSecureCallable,
+ /// SAU region is Non-Secure
+ NonSecure,
+}
+
+/// Description of a SAU region.
+#[derive(Debug)]
+pub struct SauRegion {
+ /// First address of the region, its 5 least significant bits must be set to zero.
+ pub base_address: u32,
+ /// Last address of the region, its 5 least significant bits must be set to one.
+ pub limit_address: u32,
+ /// Attribute of the region.
+ pub attribute: SauRegionAttribute,
+}
+
+/// Possible error values returned by the SAU methods.
+#[derive(Debug)]
+pub enum SauError {
+ /// The region number parameter to set or get a region must be between 0 and
+ /// region_numbers() - 1.
+ RegionNumberTooBig,
+ /// Bits 0 to 4 of the base address of a SAU region must be set to zero.
+ WrongBaseAddress,
+ /// Bits 0 to 4 of the limit address of a SAU region must be set to one.
+ WrongLimitAddress,
+}
+
+impl SAU {
+ /// Get the number of implemented SAU regions.
+ #[inline]
+ pub fn region_numbers(&self) -> u8 {
+ self._type.read().sregion()
+ }
+
+ /// Enable the SAU.
+ #[inline]
+ pub fn enable(&mut self) {
+ unsafe {
+ self.ctrl.modify(|mut ctrl| {
+ ctrl.set_enable(true);
+ ctrl
+ });
+ }
+ }
+
+ /// Set a SAU region to a region number.
+ /// SAU regions must be 32 bytes aligned and their sizes must be a multiple of 32 bytes. It
+ /// means that the 5 least significant bits of the base address of a SAU region must be set to
+ /// zero and the 5 least significant bits of the limit address must be set to one.
+ /// The region number must be valid.
+ /// This function is executed under a critical section to prevent having inconsistent results.
+ #[inline]
+ pub fn set_region(&mut self, region_number: u8, region: SauRegion) -> Result<(), SauError> {
+ interrupt::free(|_| {
+ let base_address = region.base_address;
+ let limit_address = region.limit_address;
+ let attribute = region.attribute;
+
+ if region_number >= self.region_numbers() {
+ Err(SauError::RegionNumberTooBig)
+ } else if base_address & 0x1F != 0 {
+ Err(SauError::WrongBaseAddress)
+ } else if limit_address & 0x1F != 0x1F {
+ Err(SauError::WrongLimitAddress)
+ } else {
+ // All fields of these registers are going to be modified so we don't need to read them
+ // before.
+ let mut rnr = Rnr(0);
+ let mut rbar = Rbar(0);
+ let mut rlar = Rlar(0);
+
+ rnr.set_region(region_number);
+ rbar.set_baddr(base_address >> 5);
+ rlar.set_laddr(limit_address >> 5);
+
+ match attribute {
+ SauRegionAttribute::Secure => {
+ rlar.set_nsc(false);
+ rlar.set_enable(false);
+ }
+ SauRegionAttribute::NonSecureCallable => {
+ rlar.set_nsc(true);
+ rlar.set_enable(true);
+ }
+ SauRegionAttribute::NonSecure => {
+ rlar.set_nsc(false);
+ rlar.set_enable(true);
+ }
+ }
+
+ unsafe {
+ self.rnr.write(rnr);
+ self.rbar.write(rbar);
+ self.rlar.write(rlar);
+ }
+
+ Ok(())
+ }
+ })
+ }
+
+ /// Get a region from the SAU.
+ /// The region number must be valid.
+ /// This function is executed under a critical section to prevent having inconsistent results.
+ #[inline]
+ pub fn get_region(&mut self, region_number: u8) -> Result<SauRegion, SauError> {
+ interrupt::free(|_| {
+ if region_number >= self.region_numbers() {
+ Err(SauError::RegionNumberTooBig)
+ } else {
+ unsafe {
+ self.rnr.write(Rnr(region_number.into()));
+ }
+
+ let rbar = self.rbar.read();
+ let rlar = self.rlar.read();
+
+ let attribute = match (rlar.get_enable(), rlar.get_nsc()) {
+ (false, _) => SauRegionAttribute::Secure,
+ (true, false) => SauRegionAttribute::NonSecure,
+ (true, true) => SauRegionAttribute::NonSecureCallable,
+ };
+
+ Ok(SauRegion {
+ base_address: rbar.get_baddr() << 5,
+ limit_address: (rlar.get_laddr() << 5) | 0x1F,
+ attribute,
+ })
+ }
+ })
+ }
+}
diff --git a/src/peripheral/scb.rs b/src/peripheral/scb.rs
index b2f45c5..940809e 100644
--- a/src/peripheral/scb.rs
+++ b/src/peripheral/scb.rs
@@ -305,8 +305,8 @@ impl VectActive {
#[cfg(not(armv6m))]
mod scb_consts {
- pub const SCB_CCR_IC_MASK: u32 = (1 << 17);
- pub const SCB_CCR_DC_MASK: u32 = (1 << 16);
+ pub const SCB_CCR_IC_MASK: u32 = 1 << 17;
+ pub const SCB_CCR_DC_MASK: u32 = 1 << 16;
}
#[cfg(not(armv6m))]
@@ -314,105 +314,119 @@ use self::scb_consts::*;
#[cfg(not(armv6m))]
impl SCB {
- /// Enables I-Cache if currently disabled
+ /// Enables I-cache if currently disabled.
+ ///
+ /// This operation first invalidates the entire I-cache.
#[inline]
pub fn enable_icache(&mut self) {
- // Don't do anything if ICache is already enabled
+ // Don't do anything if I-cache is already enabled
if Self::icache_enabled() {
return;
}
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
- // Invalidate I-Cache
+ // Invalidate I-cache
cbp.iciallu();
- // Enable I-Cache
+ // Enable I-cache
+ // NOTE(unsafe): We have synchronised access by &mut self
unsafe { self.ccr.modify(|r| r | SCB_CCR_IC_MASK) };
crate::asm::dsb();
crate::asm::isb();
}
- /// Disables I-Cache if currently enabled
+ /// Disables I-cache if currently enabled.
+ ///
+ /// This operation invalidates the entire I-cache after disabling.
#[inline]
pub fn disable_icache(&mut self) {
- // Don't do anything if ICache is already disabled
+ // Don't do anything if I-cache is already disabled
if !Self::icache_enabled() {
return;
}
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
- // Disable I-Cache
+ // Disable I-cache
+ // NOTE(unsafe): We have synchronised access by &mut self
unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) };
- // Invalidate I-Cache
+ // Invalidate I-cache
cbp.iciallu();
crate::asm::dsb();
crate::asm::isb();
}
- /// Returns whether the I-Cache is currently enabled
- #[inline]
+ /// Returns whether the I-cache is currently enabled.
+ #[inline(always)]
pub fn icache_enabled() -> bool {
crate::asm::dsb();
crate::asm::isb();
- // NOTE(unsafe) atomic read with no side effects
+ // NOTE(unsafe): atomic read with no side effects
unsafe { (*Self::ptr()).ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK }
}
- /// Invalidates I-Cache
+ /// Invalidates the entire I-cache.
#[inline]
pub fn invalidate_icache(&mut self) {
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
- // Invalidate I-Cache
+ // Invalidate I-cache
cbp.iciallu();
crate::asm::dsb();
crate::asm::isb();
}
- /// Enables D-cache if currently disabled
+ /// Enables D-cache if currently disabled.
+ ///
+ /// This operation first invalidates the entire D-cache, ensuring it does
+ /// not contain stale values before being enabled.
#[inline]
pub fn enable_dcache(&mut self, cpuid: &mut CPUID) {
- // Don't do anything if DCache is already enabled
+ // Don't do anything if D-cache is already enabled
if Self::dcache_enabled() {
return;
}
- // Invalidate anything currently in the DCache
- self.invalidate_dcache(cpuid);
+ // Invalidate anything currently in the D-cache
+ unsafe { self.invalidate_dcache(cpuid) };
- // Now turn on the DCache
+ // Now turn on the D-cache
+ // NOTE(unsafe): We have synchronised access by &mut self
unsafe { self.ccr.modify(|r| r | SCB_CCR_DC_MASK) };
crate::asm::dsb();
crate::asm::isb();
}
- /// Disables D-cache if currently enabled
+ /// Disables D-cache if currently enabled.
+ ///
+ /// This operation subsequently cleans and invalidates the entire D-cache,
+ /// ensuring all contents are safely written back to main memory after disabling.
#[inline]
pub fn disable_dcache(&mut self, cpuid: &mut CPUID) {
- // Don't do anything if DCache is already disabled
+ // Don't do anything if D-cache is already disabled
if !Self::dcache_enabled() {
return;
}
- // Turn off the DCache
+ // Turn off the D-cache
+ // NOTE(unsafe): We have synchronised access by &mut self
unsafe { self.ccr.modify(|r| r & !SCB_CCR_DC_MASK) };
// Clean and invalidate whatever was left in it
self.clean_invalidate_dcache(cpuid);
}
- /// Returns whether the D-Cache is currently enabled
+ /// Returns whether the D-cache is currently enabled.
#[inline]
pub fn dcache_enabled() -> bool {
crate::asm::dsb();
@@ -422,20 +436,21 @@ impl SCB {
unsafe { (*Self::ptr()).ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK }
}
- /// Invalidates D-cache
+ /// Invalidates the entire D-cache.
+ ///
+ /// Note that calling this while the dcache is enabled will probably wipe out the
+ /// stack, depending on optimisations, therefore breaking returning to the call point.
///
- /// Note that calling this while the dcache is enabled will probably wipe out your
- /// stack, depending on optimisations, breaking returning to the call point.
/// It's used immediately before enabling the dcache, but not exported publicly.
#[inline]
- fn invalidate_dcache(&mut self, cpuid: &mut CPUID) {
- // NOTE(unsafe) All CBP registers are write-only and stateless
- let mut cbp = unsafe { CBP::new() };
+ unsafe fn invalidate_dcache(&mut self, cpuid: &mut CPUID) {
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = CBP::new();
// Read number of sets and ways
let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified);
- // Invalidate entire D-Cache
+ // Invalidate entire D-cache
for set in 0..sets {
for way in 0..ways {
cbp.dcisw(set, way);
@@ -446,10 +461,13 @@ impl SCB {
crate::asm::isb();
}
- /// Cleans D-cache
+ /// Cleans the entire D-cache.
+ ///
+ /// This function causes everything in the D-cache to be written back to main memory,
+ /// overwriting whatever is already there.
#[inline]
pub fn clean_dcache(&mut self, cpuid: &mut CPUID) {
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
// Read number of sets and ways
@@ -465,10 +483,14 @@ impl SCB {
crate::asm::isb();
}
- /// Cleans and invalidates D-cache
+ /// Cleans and invalidates the entire D-cache.
+ ///
+ /// This function causes everything in the D-cache to be written back to main memory,
+ /// and then marks the entire D-cache as invalid, causing future reads to first fetch
+ /// from main memory.
#[inline]
pub fn clean_invalidate_dcache(&mut self, cpuid: &mut CPUID) {
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
// Read number of sets and ways
@@ -484,47 +506,175 @@ impl SCB {
crate::asm::isb();
}
- /// Invalidates D-cache by address
+ /// Invalidates D-cache by address.
+ ///
+ /// * `addr`: The address to invalidate, which must be cache-line aligned.
+ /// * `size`: Number of bytes to invalidate, which must be a multiple of the cache line size.
+ ///
+ /// Invalidates D-cache cache lines, starting from the first line containing `addr`,
+ /// finishing once at least `size` bytes have been invalidated.
+ ///
+ /// Invalidation causes the next read access to memory to be fetched from main memory instead
+ /// of the cache.
///
- /// `addr`: the address to invalidate
- /// `size`: size of the memory block, in number of bytes
+ /// # Cache Line Sizes
///
- /// Invalidates cache starting from the lowest 32-byte aligned address represented by `addr`,
- /// in blocks of 32 bytes until at least `size` bytes have been invalidated.
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `addr` must be 32-byte aligned and `size` must be a multiple
+ /// of 32. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `addr` is not cache-line aligned, or `size` is not a multiple of the cache line size,
+ /// other data before or after the desired memory would also be invalidated, which can very
+ /// easily cause memory corruption and undefined behaviour.
+ ///
+ /// # Safety
+ ///
+ /// After invalidating, the next read of invalidated data will be from main memory. This may
+ /// cause recent writes to be lost, potentially including writes that initialized objects.
+ /// Therefore, this method may cause uninitialized memory or invalid values to be read,
+ /// resulting in undefined behaviour. You must ensure that main memory contains valid and
+ /// initialized values before invalidating.
+ ///
+ /// `addr` **must** be aligned to the size of the cache lines, and `size` **must** be a
+ /// multiple of the cache line size, otherwise this function will invalidate other memory,
+ /// easily leading to memory corruption and undefined behaviour. This precondition is checked
+ /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid
+ /// a runtime-dependent `panic!()` call.
#[inline]
- pub fn invalidate_dcache_by_address(&mut self, addr: usize, size: usize) {
+ pub unsafe fn invalidate_dcache_by_address(&mut self, addr: usize, size: usize) {
// No-op zero sized operations
if size == 0 {
return;
}
- // NOTE(unsafe) All CBP registers are write-only and stateless
- let mut cbp = unsafe { CBP::new() };
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
+ let mut cbp = CBP::new();
+
+ // dminline is log2(num words), so 2**dminline * 4 gives size in bytes
+ let dminline = CPUID::cache_dminline();
+ let line_size = (1 << dminline) * 4;
+
+ debug_assert!((addr & (line_size - 1)) == 0);
+ debug_assert!((size & (line_size - 1)) == 0);
crate::asm::dsb();
- // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M
- const LINESIZE: usize = 32;
- let num_lines = ((size - 1) / LINESIZE) + 1;
+ // Find number of cache lines to invalidate
+ let num_lines = ((size - 1) / line_size) + 1;
- let mut addr = addr & 0xFFFF_FFE0;
+ // Compute address of first cache line
+ let mask = 0xFFFF_FFFF - (line_size - 1);
+ let mut addr = addr & mask;
for _ in 0..num_lines {
cbp.dcimvac(addr as u32);
- addr += LINESIZE;
+ addr += line_size;
}
crate::asm::dsb();
crate::asm::isb();
}
- /// Cleans D-cache by address
+ /// Invalidates an object from the D-cache.
+ ///
+ /// * `obj`: The object to invalidate.
+ ///
+ /// Invalidates D-cache starting from the first cache line containing `obj`,
+ /// continuing to invalidate cache lines until all of `obj` has been invalidated.
+ ///
+ /// Invalidation causes the next read access to memory to be fetched from main memory instead
+ /// of the cache.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `obj` must be 32-byte aligned, and its size must be a multiple
+ /// of 32 bytes. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `obj` is not cache-line aligned, or its size is not a multiple of the cache line size,
+ /// other data before or after the desired memory would also be invalidated, which can very
+ /// easily cause memory corruption and undefined behaviour.
+ ///
+ /// # Safety
+ ///
+ /// After invalidating, `obj` will be read from main memory on next access. This may cause
+ /// recent writes to `obj` to be lost, potentially including the write that initialized it.
+ /// Therefore, this method may cause uninitialized memory or invalid values to be read,
+ /// resulting in undefined behaviour. You must ensure that main memory contains a valid and
+ /// initialized value for T before invalidating `obj`.
+ ///
+ /// `obj` **must** be aligned to the size of the cache lines, and its size **must** be a
+ /// multiple of the cache line size, otherwise this function will invalidate other memory,
+ /// easily leading to memory corruption and undefined behaviour. This precondition is checked
+ /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid
+ /// a runtime-dependent `panic!()` call.
+ #[inline]
+ pub unsafe fn invalidate_dcache_by_ref<T>(&mut self, obj: &mut T) {
+ self.invalidate_dcache_by_address(obj as *const T as usize, core::mem::size_of::<T>());
+ }
+
+ /// Invalidates a slice from the D-cache.
+ ///
+ /// * `slice`: The slice to invalidate.
+ ///
+ /// Invalidates D-cache starting from the first cache line containing members of `slice`,
+ /// continuing to invalidate cache lines until all of `slice` has been invalidated.
+ ///
+ /// Invalidation causes the next read access to memory to be fetched from main memory instead
+ /// of the cache.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `slice` must be 32-byte aligned, and its size must be a multiple
+ /// of 32 bytes. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `slice` is not cache-line aligned, or its size is not a multiple of the cache line size,
+ /// other data before or after the desired memory would also be invalidated, which can very
+ /// easily cause memory corruption and undefined behaviour.
+ ///
+ /// # Safety
+ ///
+ /// After invalidating, `slice` will be read from main memory on next access. This may cause
+ /// recent writes to `slice` to be lost, potentially including the write that initialized it.
+ /// Therefore, this method may cause uninitialized memory or invalid values to be read,
+ /// resulting in undefined behaviour. You must ensure that main memory contains valid and
+ /// initialized values for T before invalidating `slice`.
+ ///
+ /// `slice` **must** be aligned to the size of the cache lines, and its size **must** be a
+ /// multiple of the cache line size, otherwise this function will invalidate other memory,
+ /// easily leading to memory corruption and undefined behaviour. This precondition is checked
+ /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid
+ /// a runtime-dependent `panic!()` call.
+ #[inline]
+ pub unsafe fn invalidate_dcache_by_slice<T>(&mut self, slice: &mut [T]) {
+ self.invalidate_dcache_by_address(
+ slice.as_ptr() as usize,
+ slice.len() * core::mem::size_of::<T>(),
+ );
+ }
+
+ /// Cleans D-cache by address.
///
- /// `addr`: the address to clean
- /// `size`: size of the memory block, in number of bytes
+ /// * `addr`: The address to start cleaning at.
+ /// * `size`: The number of bytes to clean.
///
- /// Cleans cache starting from the lowest 32-byte aligned address represented by `addr`,
- /// in blocks of 32 bytes until at least `size` bytes have been cleaned.
+ /// Cleans D-cache cache lines, starting from the first line containing `addr`,
+ /// finishing once at least `size` bytes have been invalidated.
+ ///
+ /// Cleaning the cache causes whatever data is present in the cache to be immediately written
+ /// to main memory, overwriting whatever was in main memory.
+ ///
+ /// # Cache Line Sizes
+ ///
+ /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed
+ /// to 32 bytes, which means `addr` should generally be 32-byte aligned and `size` should be a
+ /// multiple of 32. At the time of writing, no other Cortex-M cores have data caches.
+ ///
+ /// If `addr` is not cache-line aligned, or `size` is not a multiple of the cache line size,
+ /// other data before or after the desired memory will also be cleaned. From the point of view
+ /// of the core executing this function, memory remains consistent, so this is not unsound,
+ /// but is worth knowing about.
#[inline]
pub fn clean_dcache_by_address(&mut self, addr: usize, size: usize) {
// No-op zero sized operations
@@ -532,34 +682,78 @@ impl SCB {
return;
}
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
crate::asm::dsb();
- // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M
- const LINESIZE: usize = 32;
- let num_lines = ((size - 1) / LINESIZE) + 1;
+ let dminline = CPUID::cache_dminline();
+ let line_size = (1 << dminline) * 4;
+ let num_lines = ((size - 1) / line_size) + 1;
- let mut addr = addr & 0xFFFF_FFE0;
+ let mask = 0xFFFF_FFFF - (line_size - 1);
+ let mut addr = addr & mask;
for _ in 0..num_lines {
cbp.dccmvac(addr as u32);
- addr += LINESIZE;
+ addr += line_size;
}
crate::asm::dsb();
crate::asm::isb();
}
- /// Cleans and invalidates D-cache by address
+ /// Cleans an object from the D-cache.
+ ///
+ /// * `obj`: The object to clean.
+ ///
+ /// Cleans D-cache starting from the first cache line containing `obj`,
+ /// continuing to clean cache lines until all of `obj` has been cleaned.
+ ///
+ /// It is recommended that `obj` is both aligned to the cache line size and a multiple of
+ /// the cache line size long, otherwise surrounding data will also be cleaned.
+ ///
+ /// Cleaning the cache causes whatever data is present in the cache to be immediately written
+ /// to main memory, overwriting whatever was in main memory.
+ #[inline]
+ pub fn clean_dcache_by_ref<T>(&mut self, obj: &T) {
+ self.clean_dcache_by_address(obj as *const T as usize, core::mem::size_of::<T>());
+ }
+
+ /// Cleans a slice from D-cache.
+ ///
+ /// * `slice`: The slice to clean.
+ ///
+ /// Cleans D-cache starting from the first cache line containing members of `slice`,
+ /// continuing to clean cache lines until all of `slice` has been cleaned.
+ ///
+ /// It is recommended that `slice` is both aligned to the cache line size and a multiple of
+ /// the cache line size long, otherwise surrounding data will also be cleaned.
+ ///
+ /// Cleaning the cache causes whatever data is present in the cache to be immediately written
+ /// to main memory, overwriting whatever was in main memory.
+ #[inline]
+ pub fn clean_dcache_by_slice<T>(&mut self, slice: &[T]) {
+ self.clean_dcache_by_address(
+ slice.as_ptr() as usize,
+ slice.len() * core::mem::size_of::<T>(),
+ );
+ }
+
+ /// Cleans and invalidates D-cache by address.
+ ///
+ /// * `addr`: The address to clean and invalidate.
+ /// * `size`: The number of bytes to clean and invalidate.
+ ///
+ /// Cleans and invalidates D-cache starting from the first cache line containing `addr`,
+ /// finishing once at least `size` bytes have been cleaned and invalidated.
///
- /// `addr`: the address to clean and invalidate
- /// `size`: size of the memory block, in number of bytes
+ /// It is recommended that `addr` is aligned to the cache line size and `size` is a multiple of
+ /// the cache line size, otherwise surrounding data will also be cleaned.
///
- /// Cleans and invalidates cache starting from the lowest 32-byte aligned address represented
- /// by `addr`, in blocks of 32 bytes until at least `size` bytes have been cleaned and
- /// invalidated.
+ /// Cleaning and invalidating causes data in the D-cache to be written back to main memory,
+ /// and then marks that data in the D-cache as invalid, causing future reads to first fetch
+ /// from main memory.
#[inline]
pub fn clean_invalidate_dcache_by_address(&mut self, addr: usize, size: usize) {
// No-op zero sized operations
@@ -567,7 +761,7 @@ impl SCB {
return;
}
- // NOTE(unsafe) All CBP registers are write-only and stateless
+ // NOTE(unsafe): No races as all CBP registers are write-only and stateless
let mut cbp = unsafe { CBP::new() };
crate::asm::dsb();
@@ -709,57 +903,38 @@ impl SCB {
/// System handlers, exceptions with configurable priority
#[allow(clippy::missing_inline_in_public_items)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[repr(u8)]
pub enum SystemHandler {
// NonMaskableInt, // priority is fixed
// HardFault, // priority is fixed
/// Memory management interrupt (not present on Cortex-M0 variants)
#[cfg(not(armv6m))]
- MemoryManagement,
+ MemoryManagement = 4,
/// Bus fault interrupt (not present on Cortex-M0 variants)
#[cfg(not(armv6m))]
- BusFault,
+ BusFault = 5,
/// Usage fault interrupt (not present on Cortex-M0 variants)
#[cfg(not(armv6m))]
- UsageFault,
+ UsageFault = 6,
/// Secure fault interrupt (only on ARMv8-M)
#[cfg(any(armv8m, target_arch = "x86_64"))]
- SecureFault,
+ SecureFault = 7,
/// SV call interrupt
- SVCall,
+ SVCall = 11,
/// Debug monitor interrupt (not present on Cortex-M0 variants)
#[cfg(not(armv6m))]
- DebugMonitor,
+ DebugMonitor = 12,
/// Pend SV interrupt
- PendSV,
+ PendSV = 14,
/// System Tick interrupt
- SysTick,
-}
-
-impl SystemHandler {
- fn index(self) -> u8 {
- match self {
- #[cfg(not(armv6m))]
- SystemHandler::MemoryManagement => 4,
- #[cfg(not(armv6m))]
- SystemHandler::BusFault => 5,
- #[cfg(not(armv6m))]
- SystemHandler::UsageFault => 6,
- #[cfg(any(armv8m, target_arch = "x86_64"))]
- SystemHandler::SecureFault => 7,
- SystemHandler::SVCall => 11,
- #[cfg(not(armv6m))]
- SystemHandler::DebugMonitor => 12,
- SystemHandler::PendSV => 14,
- SystemHandler::SysTick => 15,
- }
- }
+ SysTick = 15,
}
impl SCB {
@@ -769,18 +944,28 @@ impl SCB {
/// [`NVIC.get_priority`](struct.NVIC.html#method.get_priority) for more details.
#[inline]
pub fn get_priority(system_handler: SystemHandler) -> u8 {
- let index = system_handler.index();
+ let index = system_handler as u8;
#[cfg(not(armv6m))]
{
// NOTE(unsafe) atomic read with no side effects
- unsafe { (*Self::ptr()).shpr[usize::from(index - 4)].read() }
+
+ // NOTE(unsafe): Index is bounded to [4,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = unsafe {(*Self::ptr()).shpr.get_unchecked(usize::from(index - 4))};
+
+ priority_ref.read()
}
#[cfg(armv6m)]
{
// NOTE(unsafe) atomic read with no side effects
- let shpr = unsafe { (*Self::ptr()).shpr[usize::from((index - 8) / 4)].read() };
+
+ // NOTE(unsafe): Index is bounded to [11,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = unsafe {(*Self::ptr()).shpr.get_unchecked(usize::from((index - 8) / 4))};
+
+ let shpr = priority_ref.read();
let prio = (shpr >> (8 * (index % 4))) & 0x0000_00ff;
prio as u8
}
@@ -800,16 +985,24 @@ impl SCB {
/// [`register::basepri`](../register/basepri/index.html)) and compromise memory safety.
#[inline]
pub unsafe fn set_priority(&mut self, system_handler: SystemHandler, prio: u8) {
- let index = system_handler.index();
+ let index = system_handler as u8;
#[cfg(not(armv6m))]
{
- self.shpr[usize::from(index - 4)].write(prio)
+ // NOTE(unsafe): Index is bounded to [4,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = (*Self::ptr()).shpr.get_unchecked(usize::from(index - 4));
+
+ priority_ref.write(prio)
}
#[cfg(armv6m)]
{
- self.shpr[usize::from((index - 8) / 4)].modify(|value| {
+ // NOTE(unsafe): Index is bounded to [11,15] by SystemHandler design.
+ // TODO: Review it after rust-lang/rust/issues/13926 will be fixed.
+ let priority_ref = (*Self::ptr()).shpr.get_unchecked(usize::from((index - 8) / 4));
+
+ priority_ref.modify(|value| {
let shift = 8 * (index % 4);
let mask = 0x0000_00ff << shift;
let prio = u32::from(prio) << shift;
diff --git a/src/peripheral/tpiu.rs b/src/peripheral/tpiu.rs
index 4115bb3..11cb79e 100644
--- a/src/peripheral/tpiu.rs
+++ b/src/peripheral/tpiu.rs
@@ -1,6 +1,6 @@
//! Trace Port Interface Unit;
//!
-//! *NOTE* Available only on ARMv7-M (`thumbv7*m-none-eabi*`)
+//! *NOTE* Not available on Armv6-M.
use volatile_register::{RO, RW, WO};
diff --git a/triagebot.toml b/triagebot.toml
new file mode 100644
index 0000000..fa0824a
--- /dev/null
+++ b/triagebot.toml
@@ -0,0 +1 @@
+[assign]