diff options
author | 2020-04-21 21:58:44 +0200 | |
---|---|---|
committer | 2020-04-21 21:58:44 +0200 | |
commit | 03a788a4e234ddc6a38c5545417928b8cbe62a05 (patch) | |
tree | b17e8b72b8792a4c3cb5ba4a1b56cf7b5fbc35a4 /src/peripheral | |
parent | b4635839218108d4c68158ad38ca26e063137f79 (diff) | |
parent | e41b27331c70865b89b5584b13c0b469de30daff (diff) | |
download | cortex-m-03a788a4e234ddc6a38c5545417928b8cbe62a05.tar.gz cortex-m-03a788a4e234ddc6a38c5545417928b8cbe62a05.tar.zst cortex-m-03a788a4e234ddc6a38c5545417928b8cbe62a05.zip |
Merge branch 'master' into mutex_add
Diffstat (limited to 'src/peripheral')
-rw-r--r-- | src/peripheral/cbp.rs | 48 | ||||
-rw-r--r-- | src/peripheral/cpuid.rs | 24 | ||||
-rw-r--r-- | src/peripheral/fpb.rs | 2 | ||||
-rw-r--r-- | src/peripheral/fpu.rs | 2 | ||||
-rw-r--r-- | src/peripheral/itm.rs | 2 | ||||
-rw-r--r-- | src/peripheral/mod.rs | 119 | ||||
-rw-r--r-- | src/peripheral/sau.rs | 243 | ||||
-rw-r--r-- | src/peripheral/scb.rs | 399 | ||||
-rw-r--r-- | src/peripheral/tpiu.rs | 2 |
9 files changed, 661 insertions, 180 deletions
diff --git a/src/peripheral/cbp.rs b/src/peripheral/cbp.rs index 8d82e2a..5aee544 100644 --- a/src/peripheral/cbp.rs +++ b/src/peripheral/cbp.rs @@ -1,6 +1,6 @@ //! Cache and branch predictor maintenance operations //! -//! *NOTE* Available only on ARMv7-M (`thumbv7*m-none-eabi*`) +//! *NOTE* Not available on Armv6-M. use volatile_register::WO; @@ -39,34 +39,28 @@ const CBP_SW_SET_MASK: u32 = 0x1FF << CBP_SW_SET_POS; impl CBP { /// I-cache invalidate all to PoU - #[inline] + #[inline(always)] pub fn iciallu(&mut self) { - unsafe { - self.iciallu.write(0); - } + unsafe { self.iciallu.write(0) }; } /// I-cache invalidate by MVA to PoU - #[inline] + #[inline(always)] pub fn icimvau(&mut self, mva: u32) { - unsafe { - self.icimvau.write(mva); - } + unsafe { self.icimvau.write(mva) }; } /// D-cache invalidate by MVA to PoC - #[inline] - pub fn dcimvac(&mut self, mva: u32) { - unsafe { - self.dcimvac.write(mva); - } + #[inline(always)] + pub unsafe fn dcimvac(&mut self, mva: u32) { + self.dcimvac.write(mva); } /// D-cache invalidate by set-way /// /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. - #[inline] - pub fn dcisw(&mut self, set: u16, way: u16) { + #[inline(always)] + pub unsafe fn dcisw(&mut self, set: u16, way: u16) { // The ARMv7-M Architecture Reference Manual, as of Revision E.b, says these set/way // operations have a register data format which depends on the implementation's // associativity and number of sets. Specifically the 'way' and 'set' fields have @@ -76,16 +70,14 @@ impl CBP { // Generic User Guide section 4.8.3. Since no other ARMv7-M implementations except the // Cortex-M7 have a DCACHE or ICACHE at all, it seems safe to do the same thing as the // CMSIS-Core implementation and use fixed values. - unsafe { - self.dcisw.write( - ((u32::from(way) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) - | ((u32::from(set) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS), - ); - } + self.dcisw.write( + ((u32::from(way) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) + | ((u32::from(set) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS), + ); } /// D-cache clean by MVA to PoU - #[inline] + #[inline(always)] pub fn dccmvau(&mut self, mva: u32) { unsafe { self.dccmvau.write(mva); @@ -93,7 +85,7 @@ impl CBP { } /// D-cache clean by MVA to PoC - #[inline] + #[inline(always)] pub fn dccmvac(&mut self, mva: u32) { unsafe { self.dccmvac.write(mva); @@ -103,7 +95,7 @@ impl CBP { /// D-cache clean by set-way /// /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. - #[inline] + #[inline(always)] pub fn dccsw(&mut self, set: u16, way: u16) { // See comment for dcisw() about the format here unsafe { @@ -115,7 +107,7 @@ impl CBP { } /// D-cache clean and invalidate by MVA to PoC - #[inline] + #[inline(always)] pub fn dccimvac(&mut self, mva: u32) { unsafe { self.dccimvac.write(mva); @@ -125,7 +117,7 @@ impl CBP { /// D-cache clean and invalidate by set-way /// /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. - #[inline] + #[inline(always)] pub fn dccisw(&mut self, set: u16, way: u16) { // See comment for dcisw() about the format here unsafe { @@ -137,7 +129,7 @@ impl CBP { } /// Branch predictor invalidate all - #[inline] + #[inline(always)] pub fn bpiall(&mut self) { unsafe { self.bpiall.write(0); diff --git a/src/peripheral/cpuid.rs b/src/peripheral/cpuid.rs index 787be5c..32d0baf 100644 --- a/src/peripheral/cpuid.rs +++ b/src/peripheral/cpuid.rs @@ -114,4 +114,28 @@ impl CPUID { (1 + ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> CCSIDR_ASSOCIATIVITY_POS)) as u16, ) } + + /// Returns log2 of the number of words in the smallest cache line of all the data cache and + /// unified caches that are controlled by the processor. + /// + /// This is the `DminLine` field of the CTR register. + #[inline(always)] + pub fn cache_dminline() -> u32 { + const CTR_DMINLINE_POS: u32 = 16; + const CTR_DMINLINE_MASK: u32 = 0xF << CTR_DMINLINE_POS; + let ctr = unsafe { (*Self::ptr()).ctr.read() }; + (ctr & CTR_DMINLINE_MASK) >> CTR_DMINLINE_POS + } + + /// Returns log2 of the number of words in the smallest cache line of all the instruction + /// caches that are controlled by the processor. + /// + /// This is the `IminLine` field of the CTR register. + #[inline(always)] + pub fn cache_iminline() -> u32 { + const CTR_IMINLINE_POS: u32 = 0; + const CTR_IMINLINE_MASK: u32 = 0xF << CTR_IMINLINE_POS; + let ctr = unsafe { (*Self::ptr()).ctr.read() }; + (ctr & CTR_IMINLINE_MASK) >> CTR_IMINLINE_POS + } } diff --git a/src/peripheral/fpb.rs b/src/peripheral/fpb.rs index 215d4ff..b86b8b2 100644 --- a/src/peripheral/fpb.rs +++ b/src/peripheral/fpb.rs @@ -1,6 +1,6 @@ //! Flash Patch and Breakpoint unit //! -//! *NOTE* Available only on ARMv7-M (`thumbv7*m-none-eabi*`) +//! *NOTE* Not available on Armv6-M. use volatile_register::{RO, RW, WO}; diff --git a/src/peripheral/fpu.rs b/src/peripheral/fpu.rs index c4e8a1d..9a047d8 100644 --- a/src/peripheral/fpu.rs +++ b/src/peripheral/fpu.rs @@ -1,6 +1,6 @@ //! Floating Point Unit //! -//! *NOTE* Available only on ARMv7E-M (`thumbv7em-none-eabihf`) +//! *NOTE* Available only on targets with a Floating Point Unit (FPU) extension. use volatile_register::{RO, RW}; diff --git a/src/peripheral/itm.rs b/src/peripheral/itm.rs index 30c7e47..0b63524 100644 --- a/src/peripheral/itm.rs +++ b/src/peripheral/itm.rs @@ -1,6 +1,6 @@ //! Instrumentation Trace Macrocell //! -//! *NOTE* Available only on ARMv7-M (`thumbv7*m-none-eabi*`) +//! *NOTE* Not available on Armv6-M and Armv8-M Baseline. use core::cell::UnsafeCell; use core::ptr; diff --git a/src/peripheral/mod.rs b/src/peripheral/mod.rs index 8854830..04fae31 100644 --- a/src/peripheral/mod.rs +++ b/src/peripheral/mod.rs @@ -1,5 +1,4 @@ -#![allow(clippy::needless_doctest_main)] -//! Core peripherals +//! Core peripherals. //! //! # API //! @@ -9,41 +8,32 @@ //! the [`Peripherals::take`](struct.Peripherals.html#method.take) method. //! //! ``` no_run -//! use cortex_m::peripheral::Peripherals; -//! -//! fn main() { -//! let mut peripherals = Peripherals::take().unwrap(); -//! peripherals.DWT.enable_cycle_counter(); -//! } +//! # use cortex_m::peripheral::Peripherals; +//! let mut peripherals = Peripherals::take().unwrap(); +//! peripherals.DWT.enable_cycle_counter(); //! ``` //! //! This method can only be successfully called *once* -- this is why the method returns an //! `Option`. Subsequent calls to the method will result in a `None` value being returned. //! -//! ``` no_run -//! use cortex_m::peripheral::Peripherals; -//! -//! fn main() { -//! let ok = Peripherals::take().unwrap(); -//! let panics = Peripherals::take().unwrap(); -//! } +//! ``` no_run, should_panic +//! # use cortex_m::peripheral::Peripherals; +//! let ok = Peripherals::take().unwrap(); +//! let panics = Peripherals::take().unwrap(); //! ``` //! A part of the peripheral API doesn't require access to a peripheral instance. This part of the //! API is provided as static methods on the peripheral types. One example is the //! [`DWT::get_cycle_count`](struct.DWT.html#method.get_cycle_count) method. //! //! ``` no_run -//! use cortex_m::peripheral::{DWT, Peripherals}; -//! -//! fn main() { -//! { -//! let mut peripherals = Peripherals::take().unwrap(); -//! peripherals.DWT.enable_cycle_counter(); -//! } // all the peripheral singletons are destroyed here +//! # use cortex_m::peripheral::{DWT, Peripherals}; +//! { +//! let mut peripherals = Peripherals::take().unwrap(); +//! peripherals.DWT.enable_cycle_counter(); +//! } // all the peripheral singletons are destroyed here //! -//! // but this method can be called without a DWT instance -//! let cyccnt = DWT::get_cycle_count(); -//! } +//! // but this method can be called without a DWT instance +//! let cyccnt = DWT::get_cycle_count(); //! ``` //! //! The singleton property can be *unsafely* bypassed using the `ptr` static method which is @@ -51,17 +41,14 @@ //! safe higher level abstractions. //! //! ``` no_run -//! use cortex_m::peripheral::{DWT, Peripherals}; -//! -//! fn main() { -//! { -//! let mut peripherals = Peripherals::take().unwrap(); -//! peripherals.DWT.enable_cycle_counter(); -//! } // all the peripheral singletons are destroyed here +//! # use cortex_m::peripheral::{DWT, Peripherals}; +//! { +//! let mut peripherals = Peripherals::take().unwrap(); +//! peripherals.DWT.enable_cycle_counter(); +//! } // all the peripheral singletons are destroyed here //! -//! // actually safe because this is an atomic read with no side effects -//! let cyccnt = unsafe { (*DWT::ptr()).cyccnt.read() }; -//! } +//! // actually safe because this is an atomic read with no side effects +//! let cyccnt = unsafe { (*DWT::ptr()).cyccnt.read() }; //! ``` //! //! # References @@ -70,7 +57,6 @@ // TODO stand-alone registers: ICTR, ACTLR and STIR - use core::marker::PhantomData; use core::ops; @@ -86,10 +72,12 @@ pub mod fpb; // NOTE(target_arch) is for documentation purposes #[cfg(any(has_fpu, target_arch = "x86_64"))] pub mod fpu; -#[cfg(not(armv6m))] +#[cfg(all(not(armv6m), not(armv8m_base)))] pub mod itm; pub mod mpu; pub mod nvic; +#[cfg(armv8m)] +pub mod sau; pub mod scb; pub mod syst; #[cfg(not(armv6m))] @@ -103,7 +91,8 @@ mod test; /// Core peripherals #[allow(non_snake_case)] pub struct Peripherals { - /// Cache and branch predictor maintenance operations (not present on Cortex-M0 variants) + /// Cache and branch predictor maintenance operations. + /// Not available on Armv6-M. pub CBP: CBP, /// CPUID @@ -115,13 +104,15 @@ pub struct Peripherals { /// Data Watchpoint and Trace unit pub DWT: DWT, - /// Flash Patch and Breakpoint unit (not present on Cortex-M0 variants) + /// Flash Patch and Breakpoint unit. + /// Not available on Armv6-M. pub FPB: FPB, - /// Floating Point Unit (only present on `thumbv7em-none-eabihf`) + /// Floating Point Unit. pub FPU: FPU, - /// Instrumentation Trace Macrocell (not present on Cortex-M0 variants) + /// Instrumentation Trace Macrocell. + /// Not available on Armv6-M and Armv8-M Baseline. pub ITM: ITM, /// Memory Protection Unit @@ -130,14 +121,22 @@ pub struct Peripherals { /// Nested Vector Interrupt Controller pub NVIC: NVIC, + /// Security Attribution Unit + pub SAU: SAU, + /// System Control Block pub SCB: SCB, /// SysTick: System Timer pub SYST: SYST, - /// Trace Port Interface Unit (not present on Cortex-M0 variants) + /// Trace Port Interface Unit. + /// Not available on Armv6-M. pub TPIU: TPIU, + + // Private field making `Peripherals` non-exhaustive. We don't use `#[non_exhaustive]` so we + // can support older Rust versions. + _priv: (), } // NOTE `no_mangle` is used here to prevent linking different minor versions of this crate as that @@ -191,6 +190,9 @@ impl Peripherals { NVIC: NVIC { _marker: PhantomData, }, + SAU: SAU { + _marker: PhantomData, + }, SCB: SCB { _marker: PhantomData, }, @@ -200,6 +202,7 @@ impl Peripherals { TPIU: TPIU { _marker: PhantomData, }, + _priv: (), } } } @@ -368,7 +371,7 @@ pub struct ITM { unsafe impl Send for ITM {} -#[cfg(not(armv6m))] +#[cfg(all(not(armv6m), not(armv8m_base)))] impl ITM { /// Returns a pointer to the register block #[inline(always)] @@ -377,7 +380,7 @@ impl ITM { } } -#[cfg(not(armv6m))] +#[cfg(all(not(armv6m), not(armv8m_base)))] impl ops::Deref for ITM { type Target = self::itm::RegisterBlock; @@ -387,7 +390,7 @@ impl ops::Deref for ITM { } } -#[cfg(not(armv6m))] +#[cfg(all(not(armv6m), not(armv8m_base)))] impl ops::DerefMut for ITM { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { @@ -443,6 +446,32 @@ impl ops::Deref for NVIC { } } +/// Security Attribution Unit +pub struct SAU { + _marker: PhantomData<*const ()>, +} + +unsafe impl Send for SAU {} + +#[cfg(armv8m)] +impl SAU { + /// Returns a pointer to the register block + #[inline(always)] + pub fn ptr() -> *const sau::RegisterBlock { + 0xE000_EDD0 as *const _ + } +} + +#[cfg(armv8m)] +impl ops::Deref for SAU { + type Target = self::sau::RegisterBlock; + + #[inline(always)] + fn deref(&self) -> &Self::Target { + unsafe { &*Self::ptr() } + } +} + /// System Control Block pub struct SCB { _marker: PhantomData<*const ()>, diff --git a/src/peripheral/sau.rs b/src/peripheral/sau.rs new file mode 100644 index 0000000..da91aca --- /dev/null +++ b/src/peripheral/sau.rs @@ -0,0 +1,243 @@ +//! Security Attribution Unit +//! +//! *NOTE* Available only on Armv8-M and Armv8.1-M, for the following Rust target triples: +//! * `thumbv8m.base-none-eabi` +//! * `thumbv8m.main-none-eabi` +//! * `thumbv8m.main-none-eabihf` +//! +//! For reference please check the section B8.3 of the Armv8-M Architecture Reference Manual. + +use crate::interrupt; +use crate::peripheral::SAU; +use bitfield::bitfield; +use volatile_register::{RO, RW}; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + /// Control Register + pub ctrl: RW<Ctrl>, + /// Type Register + pub _type: RO<Type>, + /// Region Number Register + pub rnr: RW<Rnr>, + /// Region Base Address Register + pub rbar: RW<Rbar>, + /// Region Limit Address Register + pub rlar: RW<Rlar>, + /// Secure Fault Status Register + pub sfsr: RO<Sfsr>, + /// Secure Fault Address Register + pub sfar: RO<Sfar>, +} + +bitfield! { + /// Control Register description + #[repr(C)] + #[derive(Copy, Clone)] + pub struct Ctrl(u32); + get_enable, set_enable: 0; + get_allns, set_allns: 1; +} + +bitfield! { + /// Type Register description + #[repr(C)] + #[derive(Copy, Clone)] + pub struct Type(u32); + u8; + sregion, _: 7, 0; +} + +bitfield! { + /// Region Number Register description + #[repr(C)] + #[derive(Copy, Clone)] + pub struct Rnr(u32); + u8; + get_region, set_region: 7, 0; +} + +bitfield! { + /// Region Base Address Register description + #[repr(C)] + #[derive(Copy, Clone)] + pub struct Rbar(u32); + u32; + get_baddr, set_baddr: 31, 5; +} + +bitfield! { + /// Region Limit Address Register description + #[repr(C)] + #[derive(Copy, Clone)] + pub struct Rlar(u32); + u32; + get_laddr, set_laddr: 31, 5; + get_nsc, set_nsc: 1; + get_enable, set_enable: 0; +} + +bitfield! { + /// Secure Fault Status Register description + #[repr(C)] + #[derive(Copy, Clone)] + pub struct Sfsr(u32); + invep, _: 0; + invis, _: 1; + inver, _: 2; + auviol, _: 3; + invtran, _: 4; + lsperr, _: 5; + sfarvalid, _: 6; + lserr, _: 7; +} + +bitfield! { + /// Secure Fault Address Register description + #[repr(C)] + #[derive(Copy, Clone)] + pub struct Sfar(u32); + u32; + address, _: 31, 0; +} + +/// Possible attribute of a SAU region. +#[derive(Debug)] +pub enum SauRegionAttribute { + /// SAU region is Secure + Secure, + /// SAU region is Non-Secure Callable + NonSecureCallable, + /// SAU region is Non-Secure + NonSecure, +} + +/// Description of a SAU region. +#[derive(Debug)] +pub struct SauRegion { + /// First address of the region, its 5 least significant bits must be set to zero. + pub base_address: u32, + /// Last address of the region, its 5 least significant bits must be set to one. + pub limit_address: u32, + /// Attribute of the region. + pub attribute: SauRegionAttribute, +} + +/// Possible error values returned by the SAU methods. +#[derive(Debug)] +pub enum SauError { + /// The region number parameter to set or get a region must be between 0 and + /// region_numbers() - 1. + RegionNumberTooBig, + /// Bits 0 to 4 of the base address of a SAU region must be set to zero. + WrongBaseAddress, + /// Bits 0 to 4 of the limit address of a SAU region must be set to one. + WrongLimitAddress, +} + +impl SAU { + /// Get the number of implemented SAU regions. + #[inline] + pub fn region_numbers(&self) -> u8 { + self._type.read().sregion() + } + + /// Enable the SAU. + #[inline] + pub fn enable(&mut self) { + unsafe { + self.ctrl.modify(|mut ctrl| { + ctrl.set_enable(true); + ctrl + }); + } + } + + /// Set a SAU region to a region number. + /// SAU regions must be 32 bytes aligned and their sizes must be a multiple of 32 bytes. It + /// means that the 5 least significant bits of the base address of a SAU region must be set to + /// zero and the 5 least significant bits of the limit address must be set to one. + /// The region number must be valid. + /// This function is executed under a critical section to prevent having inconsistent results. + #[inline] + pub fn set_region(&mut self, region_number: u8, region: SauRegion) -> Result<(), SauError> { + interrupt::free(|_| { + let base_address = region.base_address; + let limit_address = region.limit_address; + let attribute = region.attribute; + + if region_number >= self.region_numbers() { + Err(SauError::RegionNumberTooBig) + } else if base_address & 0x1F != 0 { + Err(SauError::WrongBaseAddress) + } else if limit_address & 0x1F != 0x1F { + Err(SauError::WrongLimitAddress) + } else { + // All fields of these registers are going to be modified so we don't need to read them + // before. + let mut rnr = Rnr(0); + let mut rbar = Rbar(0); + let mut rlar = Rlar(0); + + rnr.set_region(region_number); + rbar.set_baddr(base_address >> 5); + rlar.set_laddr(limit_address >> 5); + + match attribute { + SauRegionAttribute::Secure => { + rlar.set_nsc(false); + rlar.set_enable(false); + } + SauRegionAttribute::NonSecureCallable => { + rlar.set_nsc(true); + rlar.set_enable(true); + } + SauRegionAttribute::NonSecure => { + rlar.set_nsc(false); + rlar.set_enable(true); + } + } + + unsafe { + self.rnr.write(rnr); + self.rbar.write(rbar); + self.rlar.write(rlar); + } + + Ok(()) + } + }) + } + + /// Get a region from the SAU. + /// The region number must be valid. + /// This function is executed under a critical section to prevent having inconsistent results. + #[inline] + pub fn get_region(&mut self, region_number: u8) -> Result<SauRegion, SauError> { + interrupt::free(|_| { + if region_number >= self.region_numbers() { + Err(SauError::RegionNumberTooBig) + } else { + unsafe { + self.rnr.write(Rnr(region_number.into())); + } + + let rbar = self.rbar.read(); + let rlar = self.rlar.read(); + + let attribute = match (rlar.get_enable(), rlar.get_nsc()) { + (false, _) => SauRegionAttribute::Secure, + (true, false) => SauRegionAttribute::NonSecure, + (true, true) => SauRegionAttribute::NonSecureCallable, + }; + + Ok(SauRegion { + base_address: rbar.get_baddr() << 5, + limit_address: (rlar.get_laddr() << 5) | 0x1F, + attribute, + }) + } + }) + } +} diff --git a/src/peripheral/scb.rs b/src/peripheral/scb.rs index b2f45c5..940809e 100644 --- a/src/peripheral/scb.rs +++ b/src/peripheral/scb.rs @@ -305,8 +305,8 @@ impl VectActive { #[cfg(not(armv6m))] mod scb_consts { - pub const SCB_CCR_IC_MASK: u32 = (1 << 17); - pub const SCB_CCR_DC_MASK: u32 = (1 << 16); + pub const SCB_CCR_IC_MASK: u32 = 1 << 17; + pub const SCB_CCR_DC_MASK: u32 = 1 << 16; } #[cfg(not(armv6m))] @@ -314,105 +314,119 @@ use self::scb_consts::*; #[cfg(not(armv6m))] impl SCB { - /// Enables I-Cache if currently disabled + /// Enables I-cache if currently disabled. + /// + /// This operation first invalidates the entire I-cache. #[inline] pub fn enable_icache(&mut self) { - // Don't do anything if ICache is already enabled + // Don't do anything if I-cache is already enabled if Self::icache_enabled() { return; } - // NOTE(unsafe) All CBP registers are write-only and stateless + // NOTE(unsafe): No races as all CBP registers are write-only and stateless let mut cbp = unsafe { CBP::new() }; - // Invalidate I-Cache + // Invalidate I-cache cbp.iciallu(); - // Enable I-Cache + // Enable I-cache + // NOTE(unsafe): We have synchronised access by &mut self unsafe { self.ccr.modify(|r| r | SCB_CCR_IC_MASK) }; crate::asm::dsb(); crate::asm::isb(); } - /// Disables I-Cache if currently enabled + /// Disables I-cache if currently enabled. + /// + /// This operation invalidates the entire I-cache after disabling. #[inline] pub fn disable_icache(&mut self) { - // Don't do anything if ICache is already disabled + // Don't do anything if I-cache is already disabled if !Self::icache_enabled() { return; } - // NOTE(unsafe) All CBP registers are write-only and stateless + // NOTE(unsafe): No races as all CBP registers are write-only and stateless let mut cbp = unsafe { CBP::new() }; - // Disable I-Cache + // Disable I-cache + // NOTE(unsafe): We have synchronised access by &mut self unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) }; - // Invalidate I-Cache + // Invalidate I-cache cbp.iciallu(); crate::asm::dsb(); crate::asm::isb(); } - /// Returns whether the I-Cache is currently enabled - #[inline] + /// Returns whether the I-cache is currently enabled. + #[inline(always)] pub fn icache_enabled() -> bool { crate::asm::dsb(); crate::asm::isb(); - // NOTE(unsafe) atomic read with no side effects + // NOTE(unsafe): atomic read with no side effects unsafe { (*Self::ptr()).ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK } } - /// Invalidates I-Cache + /// Invalidates the entire I-cache. #[inline] pub fn invalidate_icache(&mut self) { - // NOTE(unsafe) All CBP registers are write-only and stateless + // NOTE(unsafe): No races as all CBP registers are write-only and stateless let mut cbp = unsafe { CBP::new() }; - // Invalidate I-Cache + // Invalidate I-cache cbp.iciallu(); crate::asm::dsb(); crate::asm::isb(); } - /// Enables D-cache if currently disabled + /// Enables D-cache if currently disabled. + /// + /// This operation first invalidates the entire D-cache, ensuring it does + /// not contain stale values before being enabled. #[inline] pub fn enable_dcache(&mut self, cpuid: &mut CPUID) { - // Don't do anything if DCache is already enabled + // Don't do anything if D-cache is already enabled if Self::dcache_enabled() { return; } - // Invalidate anything currently in the DCache - self.invalidate_dcache(cpuid); + // Invalidate anything currently in the D-cache + unsafe { self.invalidate_dcache(cpuid) }; - // Now turn on the DCache + // Now turn on the D-cache + // NOTE(unsafe): We have synchronised access by &mut self unsafe { self.ccr.modify(|r| r | SCB_CCR_DC_MASK) }; crate::asm::dsb(); crate::asm::isb(); } - /// Disables D-cache if currently enabled + /// Disables D-cache if currently enabled. + /// + /// This operation subsequently cleans and invalidates the entire D-cache, + /// ensuring all contents are safely written back to main memory after disabling. #[inline] pub fn disable_dcache(&mut self, cpuid: &mut CPUID) { - // Don't do anything if DCache is already disabled + // Don't do anything if D-cache is already disabled if !Self::dcache_enabled() { return; } - // Turn off the DCache + // Turn off the D-cache + // NOTE(unsafe): We have synchronised access by &mut self unsafe { self.ccr.modify(|r| r & !SCB_CCR_DC_MASK) }; // Clean and invalidate whatever was left in it self.clean_invalidate_dcache(cpuid); } - /// Returns whether the D-Cache is currently enabled + /// Returns whether the D-cache is currently enabled. #[inline] pub fn dcache_enabled() -> bool { crate::asm::dsb(); @@ -422,20 +436,21 @@ impl SCB { unsafe { (*Self::ptr()).ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK } } - /// Invalidates D-cache + /// Invalidates the entire D-cache. + /// + /// Note that calling this while the dcache is enabled will probably wipe out the + /// stack, depending on optimisations, therefore breaking returning to the call point. /// - /// Note that calling this while the dcache is enabled will probably wipe out your - /// stack, depending on optimisations, breaking returning to the call point. /// It's used immediately before enabling the dcache, but not exported publicly. #[inline] - fn invalidate_dcache(&mut self, cpuid: &mut CPUID) { - // NOTE(unsafe) All CBP registers are write-only and stateless - let mut cbp = unsafe { CBP::new() }; + unsafe fn invalidate_dcache(&mut self, cpuid: &mut CPUID) { + // NOTE(unsafe): No races as all CBP registers are write-only and stateless + let mut cbp = CBP::new(); // Read number of sets and ways let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); - // Invalidate entire D-Cache + // Invalidate entire D-cache for set in 0..sets { for way in 0..ways { cbp.dcisw(set, way); @@ -446,10 +461,13 @@ impl SCB { crate::asm::isb(); } - /// Cleans D-cache + /// Cleans the entire D-cache. + /// + /// This function causes everything in the D-cache to be written back to main memory, + /// overwriting whatever is already there. #[inline] pub fn clean_dcache(&mut self, cpuid: &mut CPUID) { - // NOTE(unsafe) All CBP registers are write-only and stateless + // NOTE(unsafe): No races as all CBP registers are write-only and stateless let mut cbp = unsafe { CBP::new() }; // Read number of sets and ways @@ -465,10 +483,14 @@ impl SCB { crate::asm::isb(); } - /// Cleans and invalidates D-cache + /// Cleans and invalidates the entire D-cache. + /// + /// This function causes everything in the D-cache to be written back to main memory, + /// and then marks the entire D-cache as invalid, causing future reads to first fetch + /// from main memory. #[inline] pub fn clean_invalidate_dcache(&mut self, cpuid: &mut CPUID) { - // NOTE(unsafe) All CBP registers are write-only and stateless + // NOTE(unsafe): No races as all CBP registers are write-only and stateless let mut cbp = unsafe { CBP::new() }; // Read number of sets and ways @@ -484,47 +506,175 @@ impl SCB { crate::asm::isb(); } - /// Invalidates D-cache by address + /// Invalidates D-cache by address. + /// + /// * `addr`: The address to invalidate, which must be cache-line aligned. + /// * `size`: Number of bytes to invalidate, which must be a multiple of the cache line size. + /// + /// Invalidates D-cache cache lines, starting from the first line containing `addr`, + /// finishing once at least `size` bytes have been invalidated. + /// + /// Invalidation causes the next read access to memory to be fetched from main memory instead + /// of the cache. /// - /// `addr`: the address to invalidate - /// `size`: size of the memory block, in number of bytes + /// # Cache Line Sizes /// - /// Invalidates cache starting from the lowest 32-byte aligned address represented by `addr`, - /// in blocks of 32 bytes until at least `size` bytes have been invalidated. + /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed + /// to 32 bytes, which means `addr` must be 32-byte aligned and `size` must be a multiple + /// of 32. At the time of writing, no other Cortex-M cores have data caches. + /// + /// If `addr` is not cache-line aligned, or `size` is not a multiple of the cache line size, + /// other data before or after the desired memory would also be invalidated, which can very + /// easily cause memory corruption and undefined behaviour. + /// + /// # Safety + /// + /// After invalidating, the next read of invalidated data will be from main memory. This may + /// cause recent writes to be lost, potentially including writes that initialized objects. + /// Therefore, this method may cause uninitialized memory or invalid values to be read, + /// resulting in undefined behaviour. You must ensure that main memory contains valid and + /// initialized values before invalidating. + /// + /// `addr` **must** be aligned to the size of the cache lines, and `size` **must** be a + /// multiple of the cache line size, otherwise this function will invalidate other memory, + /// easily leading to memory corruption and undefined behaviour. This precondition is checked + /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid + /// a runtime-dependent `panic!()` call. #[inline] - pub fn invalidate_dcache_by_address(&mut self, addr: usize, size: usize) { + pub unsafe fn invalidate_dcache_by_address(&mut self, addr: usize, size: usize) { // No-op zero sized operations if size == 0 { return; } - // NOTE(unsafe) All CBP registers are write-only and stateless - let mut cbp = unsafe { CBP::new() }; + // NOTE(unsafe): No races as all CBP registers are write-only and stateless + let mut cbp = CBP::new(); + + // dminline is log2(num words), so 2**dminline * 4 gives size in bytes + let dminline = CPUID::cache_dminline(); + let line_size = (1 << dminline) * 4; + + debug_assert!((addr & (line_size - 1)) == 0); + debug_assert!((size & (line_size - 1)) == 0); crate::asm::dsb(); - // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M - const LINESIZE: usize = 32; - let num_lines = ((size - 1) / LINESIZE) + 1; + // Find number of cache lines to invalidate + let num_lines = ((size - 1) / line_size) + 1; - let mut addr = addr & 0xFFFF_FFE0; + // Compute address of first cache line + let mask = 0xFFFF_FFFF - (line_size - 1); + let mut addr = addr & mask; for _ in 0..num_lines { cbp.dcimvac(addr as u32); - addr += LINESIZE; + addr += line_size; } crate::asm::dsb(); crate::asm::isb(); } - /// Cleans D-cache by address + /// Invalidates an object from the D-cache. + /// + /// * `obj`: The object to invalidate. + /// + /// Invalidates D-cache starting from the first cache line containing `obj`, + /// continuing to invalidate cache lines until all of `obj` has been invalidated. + /// + /// Invalidation causes the next read access to memory to be fetched from main memory instead + /// of the cache. + /// + /// # Cache Line Sizes + /// + /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed + /// to 32 bytes, which means `obj` must be 32-byte aligned, and its size must be a multiple + /// of 32 bytes. At the time of writing, no other Cortex-M cores have data caches. + /// + /// If `obj` is not cache-line aligned, or its size is not a multiple of the cache line size, + /// other data before or after the desired memory would also be invalidated, which can very + /// easily cause memory corruption and undefined behaviour. + /// + /// # Safety + /// + /// After invalidating, `obj` will be read from main memory on next access. This may cause + /// recent writes to `obj` to be lost, potentially including the write that initialized it. + /// Therefore, this method may cause uninitialized memory or invalid values to be read, + /// resulting in undefined behaviour. You must ensure that main memory contains a valid and + /// initialized value for T before invalidating `obj`. + /// + /// `obj` **must** be aligned to the size of the cache lines, and its size **must** be a + /// multiple of the cache line size, otherwise this function will invalidate other memory, + /// easily leading to memory corruption and undefined behaviour. This precondition is checked + /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid + /// a runtime-dependent `panic!()` call. + #[inline] + pub unsafe fn invalidate_dcache_by_ref<T>(&mut self, obj: &mut T) { + self.invalidate_dcache_by_address(obj as *const T as usize, core::mem::size_of::<T>()); + } + + /// Invalidates a slice from the D-cache. + /// + /// * `slice`: The slice to invalidate. + /// + /// Invalidates D-cache starting from the first cache line containing members of `slice`, + /// continuing to invalidate cache lines until all of `slice` has been invalidated. + /// + /// Invalidation causes the next read access to memory to be fetched from main memory instead + /// of the cache. + /// + /// # Cache Line Sizes + /// + /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed + /// to 32 bytes, which means `slice` must be 32-byte aligned, and its size must be a multiple + /// of 32 bytes. At the time of writing, no other Cortex-M cores have data caches. + /// + /// If `slice` is not cache-line aligned, or its size is not a multiple of the cache line size, + /// other data before or after the desired memory would also be invalidated, which can very + /// easily cause memory corruption and undefined behaviour. + /// + /// # Safety + /// + /// After invalidating, `slice` will be read from main memory on next access. This may cause + /// recent writes to `slice` to be lost, potentially including the write that initialized it. + /// Therefore, this method may cause uninitialized memory or invalid values to be read, + /// resulting in undefined behaviour. You must ensure that main memory contains valid and + /// initialized values for T before invalidating `slice`. + /// + /// `slice` **must** be aligned to the size of the cache lines, and its size **must** be a + /// multiple of the cache line size, otherwise this function will invalidate other memory, + /// easily leading to memory corruption and undefined behaviour. This precondition is checked + /// in debug builds using a `debug_assert!()`, but not checked in release builds to avoid + /// a runtime-dependent `panic!()` call. + #[inline] + pub unsafe fn invalidate_dcache_by_slice<T>(&mut self, slice: &mut [T]) { + self.invalidate_dcache_by_address( + slice.as_ptr() as usize, + slice.len() * core::mem::size_of::<T>(), + ); + } + + /// Cleans D-cache by address. /// - /// `addr`: the address to clean - /// `size`: size of the memory block, in number of bytes + /// * `addr`: The address to start cleaning at. + /// * `size`: The number of bytes to clean. /// - /// Cleans cache starting from the lowest 32-byte aligned address represented by `addr`, - /// in blocks of 32 bytes until at least `size` bytes have been cleaned. + /// Cleans D-cache cache lines, starting from the first line containing `addr`, + /// finishing once at least `size` bytes have been invalidated. + /// + /// Cleaning the cache causes whatever data is present in the cache to be immediately written + /// to main memory, overwriting whatever was in main memory. + /// + /// # Cache Line Sizes + /// + /// Cache line sizes vary by core. For all Cortex-M7 cores, the cache line size is fixed + /// to 32 bytes, which means `addr` should generally be 32-byte aligned and `size` should be a + /// multiple of 32. At the time of writing, no other Cortex-M cores have data caches. + /// + /// If `addr` is not cache-line aligned, or `size` is not a multiple of the cache line size, + /// other data before or after the desired memory will also be cleaned. From the point of view + /// of the core executing this function, memory remains consistent, so this is not unsound, + /// but is worth knowing about. #[inline] pub fn clean_dcache_by_address(&mut self, addr: usize, size: usize) { // No-op zero sized operations @@ -532,34 +682,78 @@ impl SCB { return; } - // NOTE(unsafe) All CBP registers are write-only and stateless + // NOTE(unsafe): No races as all CBP registers are write-only and stateless let mut cbp = unsafe { CBP::new() }; crate::asm::dsb(); - // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M - const LINESIZE: usize = 32; - let num_lines = ((size - 1) / LINESIZE) + 1; + let dminline = CPUID::cache_dminline(); + let line_size = (1 << dminline) * 4; + let num_lines = ((size - 1) / line_size) + 1; - let mut addr = addr & 0xFFFF_FFE0; + let mask = 0xFFFF_FFFF - (line_size - 1); + let mut addr = addr & mask; for _ in 0..num_lines { cbp.dccmvac(addr as u32); - addr += LINESIZE; + addr += line_size; } crate::asm::dsb(); crate::asm::isb(); } - /// Cleans and invalidates D-cache by address + /// Cleans an object from the D-cache. + /// + /// * `obj`: The object to clean. + /// + /// Cleans D-cache starting from the first cache line containing `obj`, + /// continuing to clean cache lines until all of `obj` has been cleaned. + /// + /// It is recommended that `obj` is both aligned to the cache line size and a multiple of + /// the cache line size long, otherwise surrounding data will also be cleaned. + /// + /// Cleaning the cache causes whatever data is present in the cache to be immediately written + /// to main memory, overwriting whatever was in main memory. + #[inline] + pub fn clean_dcache_by_ref<T>(&mut self, obj: &T) { + self.clean_dcache_by_address(obj as *const T as usize, core::mem::size_of::<T>()); + } + + /// Cleans a slice from D-cache. + /// + /// * `slice`: The slice to clean. + /// + /// Cleans D-cache starting from the first cache line containing members of `slice`, + /// continuing to clean cache lines until all of `slice` has been cleaned. + /// + /// It is recommended that `slice` is both aligned to the cache line size and a multiple of + /// the cache line size long, otherwise surrounding data will also be cleaned. + /// + /// Cleaning the cache causes whatever data is present in the cache to be immediately written + /// to main memory, overwriting whatever was in main memory. + #[inline] + pub fn clean_dcache_by_slice<T>(&mut self, slice: &[T]) { + self.clean_dcache_by_address( + slice.as_ptr() as usize, + slice.len() * core::mem::size_of::<T>(), + ); + } + + /// Cleans and invalidates D-cache by address. + /// + /// * `addr`: The address to clean and invalidate. + /// * `size`: The number of bytes to clean and invalidate. + /// + /// Cleans and invalidates D-cache starting from the first cache line containing `addr`, + /// finishing once at least `size` bytes have been cleaned and invalidated. /// - /// `addr`: the address to clean and invalidate - /// `size`: size of the memory block, in number of bytes + /// It is recommended that `addr` is aligned to the cache line size and `size` is a multiple of + /// the cache line size, otherwise surrounding data will also be cleaned. /// - /// Cleans and invalidates cache starting from the lowest 32-byte aligned address represented - /// by `addr`, in blocks of 32 bytes until at least `size` bytes have been cleaned and - /// invalidated. + /// Cleaning and invalidating causes data in the D-cache to be written back to main memory, + /// and then marks that data in the D-cache as invalid, causing future reads to first fetch + /// from main memory. #[inline] pub fn clean_invalidate_dcache_by_address(&mut self, addr: usize, size: usize) { // No-op zero sized operations @@ -567,7 +761,7 @@ impl SCB { return; } - // NOTE(unsafe) All CBP registers are write-only and stateless + // NOTE(unsafe): No races as all CBP registers are write-only and stateless let mut cbp = unsafe { CBP::new() }; crate::asm::dsb(); @@ -709,57 +903,38 @@ impl SCB { /// System handlers, exceptions with configurable priority #[allow(clippy::missing_inline_in_public_items)] #[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[repr(u8)] pub enum SystemHandler { // NonMaskableInt, // priority is fixed // HardFault, // priority is fixed /// Memory management interrupt (not present on Cortex-M0 variants) #[cfg(not(armv6m))] - MemoryManagement, + MemoryManagement = 4, /// Bus fault interrupt (not present on Cortex-M0 variants) #[cfg(not(armv6m))] - BusFault, + BusFault = 5, /// Usage fault interrupt (not present on Cortex-M0 variants) #[cfg(not(armv6m))] - UsageFault, + UsageFault = 6, /// Secure fault interrupt (only on ARMv8-M) #[cfg(any(armv8m, target_arch = "x86_64"))] - SecureFault, + SecureFault = 7, /// SV call interrupt - SVCall, + SVCall = 11, /// Debug monitor interrupt (not present on Cortex-M0 variants) #[cfg(not(armv6m))] - DebugMonitor, + DebugMonitor = 12, /// Pend SV interrupt - PendSV, + PendSV = 14, /// System Tick interrupt - SysTick, -} - -impl SystemHandler { - fn index(self) -> u8 { - match self { - #[cfg(not(armv6m))] - SystemHandler::MemoryManagement => 4, - #[cfg(not(armv6m))] - SystemHandler::BusFault => 5, - #[cfg(not(armv6m))] - SystemHandler::UsageFault => 6, - #[cfg(any(armv8m, target_arch = "x86_64"))] - SystemHandler::SecureFault => 7, - SystemHandler::SVCall => 11, - #[cfg(not(armv6m))] - SystemHandler::DebugMonitor => 12, - SystemHandler::PendSV => 14, - SystemHandler::SysTick => 15, - } - } + SysTick = 15, } impl SCB { @@ -769,18 +944,28 @@ impl SCB { /// [`NVIC.get_priority`](struct.NVIC.html#method.get_priority) for more details. #[inline] pub fn get_priority(system_handler: SystemHandler) -> u8 { - let index = system_handler.index(); + let index = system_handler as u8; #[cfg(not(armv6m))] { // NOTE(unsafe) atomic read with no side effects - unsafe { (*Self::ptr()).shpr[usize::from(index - 4)].read() } + + // NOTE(unsafe): Index is bounded to [4,15] by SystemHandler design. + // TODO: Review it after rust-lang/rust/issues/13926 will be fixed. + let priority_ref = unsafe {(*Self::ptr()).shpr.get_unchecked(usize::from(index - 4))}; + + priority_ref.read() } #[cfg(armv6m)] { // NOTE(unsafe) atomic read with no side effects - let shpr = unsafe { (*Self::ptr()).shpr[usize::from((index - 8) / 4)].read() }; + + // NOTE(unsafe): Index is bounded to [11,15] by SystemHandler design. + // TODO: Review it after rust-lang/rust/issues/13926 will be fixed. + let priority_ref = unsafe {(*Self::ptr()).shpr.get_unchecked(usize::from((index - 8) / 4))}; + + let shpr = priority_ref.read(); let prio = (shpr >> (8 * (index % 4))) & 0x0000_00ff; prio as u8 } @@ -800,16 +985,24 @@ impl SCB { /// [`register::basepri`](../register/basepri/index.html)) and compromise memory safety. #[inline] pub unsafe fn set_priority(&mut self, system_handler: SystemHandler, prio: u8) { - let index = system_handler.index(); + let index = system_handler as u8; #[cfg(not(armv6m))] { - self.shpr[usize::from(index - 4)].write(prio) + // NOTE(unsafe): Index is bounded to [4,15] by SystemHandler design. + // TODO: Review it after rust-lang/rust/issues/13926 will be fixed. + let priority_ref = (*Self::ptr()).shpr.get_unchecked(usize::from(index - 4)); + + priority_ref.write(prio) } #[cfg(armv6m)] { - self.shpr[usize::from((index - 8) / 4)].modify(|value| { + // NOTE(unsafe): Index is bounded to [11,15] by SystemHandler design. + // TODO: Review it after rust-lang/rust/issues/13926 will be fixed. + let priority_ref = (*Self::ptr()).shpr.get_unchecked(usize::from((index - 8) / 4)); + + priority_ref.modify(|value| { let shift = 8 * (index % 4); let mask = 0x0000_00ff << shift; let prio = u32::from(prio) << shift; diff --git a/src/peripheral/tpiu.rs b/src/peripheral/tpiu.rs index 4115bb3..11cb79e 100644 --- a/src/peripheral/tpiu.rs +++ b/src/peripheral/tpiu.rs @@ -1,6 +1,6 @@ //! Trace Port Interface Unit; //! -//! *NOTE* Available only on ARMv7-M (`thumbv7*m-none-eabi*`) +//! *NOTE* Not available on Armv6-M. use volatile_register::{RO, RW, WO}; |