diff options
author | 2022-07-27 19:15:09 +0000 | |
---|---|---|
committer | 2022-07-27 19:15:09 +0000 | |
commit | b87fca3d21d92018ac4d50b200cf8e77cb577028 (patch) | |
tree | 48f6da77b0c2ba1bb54e6ad8f30f5dad0e3765d7 | |
parent | d4816e054b556e326c5e3d4c40f7120372aafc50 (diff) | |
parent | 368ab1d4fb780386f3162c7a84502e301af7f3b0 (diff) | |
download | rtic-b87fca3d21d92018ac4d50b200cf8e77cb577028.tar.gz rtic-b87fca3d21d92018ac4d50b200cf8e77cb577028.tar.zst rtic-b87fca3d21d92018ac4d50b200cf8e77cb577028.zip |
Merge #652
652: Remove use of basepri register on thumbv8m.base r=AfoHT a=neonquill
The basepri register appears to be aviable on thumbv8m.main but not thumbv8m.base. At the very least, attempting to compile against a Cortex-M23 based Microchip ATSAML10E16A generates an error:
```
error[E0432]: unresolved import `cortex_m::register::basepri`
--> /Users/dwatson/.cargo/registry/src/github.com-1ecc6299db9ec823/cortex-m-rtic-1.1.3/src/export.rs:25:5
|
25 | use cortex_m::register::basepri;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^ no `basepri` in `register`
```
I wasn't sure if it made more sense to replace the `armv7m` config flag with something related to basepri availability or to get closer to matching the cortex-m use of several architecture specific flags. In the end i chose to make the minimal change possible and just narrowed the existing `thumbv8m` check.
Context:
[cortex-m:src/register/mod.rs](https://github.com/rust-embedded/cortex-m/blob/4e908625204a1e95dd3fd5bdcd8d66d6bc11c3bc/src/register/mod.rs#L33):
```
#[cfg(all(not(armv6m), not(armv8m_base)))]
pub mod basepri;
```
[cortex-m:build.rs](https://github.com/rust-embedded/cortex-m/blob/4e908625204a1e95dd3fd5bdcd8d66d6bc11c3bc/build.rs#L21):
```
} else if target.starts_with("thumbv8m.base") {
println!("cargo:rustc-cfg=cortex_m");
println!("cargo:rustc-cfg=armv8m");
println!("cargo:rustc-cfg=armv8m_base");
```
Co-authored-by: David Watson <david@neonquill.com>
-rw-r--r-- | CHANGELOG.md | 2 | ||||
-rw-r--r-- | build.rs | 18 | ||||
-rw-r--r-- | macros/src/codegen/assertions.rs | 11 | ||||
-rw-r--r-- | macros/src/codegen/shared_resources.rs | 19 | ||||
-rw-r--r-- | macros/src/codegen/util.rs | 5 | ||||
-rw-r--r-- | src/export.rs | 145 |
6 files changed, 144 insertions, 56 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md index b131b307..7d1a9f3d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,8 @@ For each category, *Added*, *Changed*, *Fixed* add new entries at the top! ### Fixed +- Distinguish between thumbv8m.base and thumbv8m.main for basepri usage. + ### Changed ## [v1.1.3] - 2022-06-23 @@ -7,15 +7,21 @@ fn main() { println!("cargo:rustc-cfg=rustc_is_nightly"); } - if target.starts_with("thumbv6m") { - println!("cargo:rustc-cfg=armv6m"); - } - + // These targets all have know support for the BASEPRI register. if target.starts_with("thumbv7m") | target.starts_with("thumbv7em") - | target.starts_with("thumbv8m") + | target.starts_with("thumbv8m.main") + { + println!("cargo:rustc-cfg=have_basepri"); + + // These targets are all known to _not_ have the BASEPRI register. + } else if target.starts_with("thumb") + && !(target.starts_with("thumbv6m") | target.starts_with("thumbv8m.base")) { - println!("cargo:rustc-cfg=armv7m"); + panic!( + "Unknown target '{}'. Need to update BASEPRI logic in build.rs.", + target + ); } println!("cargo:rerun-if-changed=build.rs"); diff --git a/macros/src/codegen/assertions.rs b/macros/src/codegen/assertions.rs index f6a098b5..66e54095 100644 --- a/macros/src/codegen/assertions.rs +++ b/macros/src/codegen/assertions.rs @@ -22,15 +22,16 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream } let device = &extra.device; - let arm_v6_checks: Vec<_> = app + let chunks_name = util::priority_mask_chunks_ident(); + let no_basepri_checks: Vec<_> = app .hardware_tasks .iter() .filter_map(|(_, task)| { if !util::is_exception(&task.args.binds) { let interrupt_name = &task.args.binds; Some(quote!( - if (#device::Interrupt::#interrupt_name as u32) > 31 { - ::core::panic!("An interrupt above value 31 is used while in armv6"); + if (#device::Interrupt::#interrupt_name as usize) >= (#chunks_name * 32) { + ::core::panic!("An interrupt out of range is used while in armv6 or armv8m.base"); } )) } else { @@ -41,8 +42,8 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream let const_check = quote! { const _CONST_CHECK: () = { - if rtic::export::is_armv6() { - #(#arm_v6_checks)* + if !rtic::export::have_basepri() { + #(#no_basepri_checks)* } else { // TODO: Add armv7 checks here } diff --git a/macros/src/codegen/shared_resources.rs b/macros/src/codegen/shared_resources.rs index 08d77cc3..4a750070 100644 --- a/macros/src/codegen/shared_resources.rs +++ b/macros/src/codegen/shared_resources.rs @@ -118,6 +118,8 @@ pub fn codegen( let device = &extra.device; let mut uses_exceptions_with_resources = false; + let mut mask_ids = Vec::new(); + for (&priority, name) in interrupt_ids.chain(app.hardware_tasks.values().flat_map(|task| { if !util::is_exception(&task.args.binds) { Some((&task.args.priority, &task.args.binds)) @@ -147,12 +149,13 @@ pub fn codegen( })) { let v = prio_to_masks.entry(priority - 1).or_insert(Vec::new()); v.push(quote!(#device::Interrupt::#name as u32)); + mask_ids.push(quote!(#device::Interrupt::#name as u32)); } - // Call rtic::export::create_mask([u32; N]), where the array is the list of shifts + // Call rtic::export::create_mask([Mask; N]), where the array is the list of shifts let mut mask_arr = Vec::new(); - // NOTE: 0..3 assumes max 4 priority levels according to M0 spec + // NOTE: 0..3 assumes max 4 priority levels according to M0, M23 spec for i in 0..3 { let v = if let Some(v) = prio_to_masks.get(&i) { v.clone() @@ -165,18 +168,26 @@ pub fn codegen( )); } + // Generate a constant for the number of chunks needed by Mask. + let chunks_name = util::priority_mask_chunks_ident(); + mod_app.push(quote!( + #[doc(hidden)] + #[allow(non_upper_case_globals)] + const #chunks_name: usize = rtic::export::compute_mask_chunks([#(#mask_ids),*]); + )); + let masks_name = util::priority_masks_ident(); mod_app.push(quote!( #[doc(hidden)] #[allow(non_upper_case_globals)] - const #masks_name: [u32; 3] = [#(#mask_arr),*]; + const #masks_name: [rtic::export::Mask<#chunks_name>; 3] = [#(#mask_arr),*]; )); if uses_exceptions_with_resources { mod_app.push(quote!( #[doc(hidden)] #[allow(non_upper_case_globals)] - const __rtic_internal_V6_ERROR: () = rtic::export::v6_panic(); + const __rtic_internal_V6_ERROR: () = rtic::export::no_basepri_panic(); )); } diff --git a/macros/src/codegen/util.rs b/macros/src/codegen/util.rs index 0f3dca7c..0a3edc20 100644 --- a/macros/src/codegen/util.rs +++ b/macros/src/codegen/util.rs @@ -253,6 +253,11 @@ pub fn static_shared_resource_ident(name: &Ident) -> Ident { mark_internal_name(&format!("shared_resource_{}", name)) } +/// Generates an Ident for the number of 32 bit chunks used for Mask storage. +pub fn priority_mask_chunks_ident() -> Ident { + mark_internal_name("MASK_CHUNKS") +} + pub fn priority_masks_ident() -> Ident { mark_internal_name("MASKS") } diff --git a/src/export.rs b/src/export.rs index a4d76b53..6f2a1b63 100644 --- a/src/export.rs +++ b/src/export.rs @@ -21,10 +21,43 @@ pub use rtic_monotonic as monotonic; pub type SCFQ<const N: usize> = Queue<u8, N>; pub type SCRQ<T, const N: usize> = Queue<(T, u8), N>; -#[cfg(armv7m)] +/// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23). +/// It needs to be large enough to cover all the relevant interrupts in use. +/// For M0/M0+ there are only 32 interrupts so we only need one u32 value. +/// For M23 there can be as many as 480 interrupts. +/// Rather than providing space for all possible interrupts, we just detect the highest interrupt in +/// use at compile time and allocate enough u32 chunks to cover them. +#[derive(Copy, Clone)] +pub struct Mask<const M: usize>([u32; M]); + +impl<const M: usize> core::ops::BitOrAssign for Mask<M> { + fn bitor_assign(&mut self, rhs: Self) { + for i in 0..M { + self.0[i] |= rhs.0[i]; + } + } +} + +#[cfg(not(have_basepri))] +impl<const M: usize> Mask<M> { + /// Set a bit inside a Mask. + const fn set_bit(mut self, bit: u32) -> Self { + let block = bit / 32; + + if block as usize >= M { + panic!("Generating masks for thumbv6/thumbv8m.base failed! Are you compiling for thumbv6 on an thumbv7 MCU or using an unsupported thumbv8m.base MCU?"); + } + + let offset = bit - (block * 32); + self.0[block as usize] |= 1 << offset; + self + } +} + +#[cfg(have_basepri)] use cortex_m::register::basepri; -#[cfg(armv7m)] +#[cfg(have_basepri)] #[inline(always)] pub fn run<F>(priority: u8, f: F) where @@ -41,7 +74,7 @@ where } } -#[cfg(not(armv7m))] +#[cfg(not(have_basepri))] #[inline(always)] pub fn run<F>(_priority: u8, f: F) where @@ -105,15 +138,15 @@ impl Priority { } /// Const helper to check architecture -pub const fn is_armv6() -> bool { - #[cfg(not(armv6m))] +pub const fn have_basepri() -> bool { + #[cfg(have_basepri)] { - false + true } - #[cfg(armv6m)] + #[cfg(not(have_basepri))] { - true + false } } @@ -172,14 +205,14 @@ where /// Total OH of per task is max 2 clock cycles, negligible in practice /// but can in theory be fixed. /// -#[cfg(armv7m)] +#[cfg(have_basepri)] #[inline(always)] -pub unsafe fn lock<T, R>( +pub unsafe fn lock<T, R, const M: usize>( ptr: *mut T, priority: &Priority, ceiling: u8, nvic_prio_bits: u8, - _mask: &[u32; 3], + _mask: &[Mask<M>; 3], f: impl FnOnce(&mut T) -> R, ) -> R { let current = priority.get(); @@ -247,14 +280,14 @@ pub unsafe fn lock<T, R>( /// - Temporary lower exception priority /// /// These possible solutions are set goals for future work -#[cfg(not(armv7m))] +#[cfg(not(have_basepri))] #[inline(always)] -pub unsafe fn lock<T, R>( +pub unsafe fn lock<T, R, const M: usize>( ptr: *mut T, priority: &Priority, ceiling: u8, _nvic_prio_bits: u8, - masks: &[u32; 3], + masks: &[Mask<M>; 3], f: impl FnOnce(&mut T) -> R, ) -> R { let current = priority.get(); @@ -288,28 +321,38 @@ pub unsafe fn lock<T, R>( } } -#[cfg(not(armv7m))] +#[cfg(not(have_basepri))] #[inline(always)] -fn compute_mask(from_prio: u8, to_prio: u8, masks: &[u32; 3]) -> u32 { - let mut res = 0; +fn compute_mask<const M: usize>(from_prio: u8, to_prio: u8, masks: &[Mask<M>; 3]) -> Mask<M> { + let mut res = Mask([0; M]); masks[from_prio as usize..to_prio as usize] .iter() - .for_each(|m| res |= m); + .for_each(|m| res |= *m); res } // enables interrupts -#[cfg(not(armv7m))] +#[cfg(not(have_basepri))] #[inline(always)] -unsafe fn set_enable_mask(mask: u32) { - (*NVIC::PTR).iser[0].write(mask) +unsafe fn set_enable_mask<const M: usize>(mask: Mask<M>) { + for i in 0..M { + // This check should involve compile time constants and be optimized out. + if mask.0[i] != 0 { + (*NVIC::PTR).iser[i].write(mask.0[i]); + } + } } // disables interrupts -#[cfg(not(armv7m))] +#[cfg(not(have_basepri))] #[inline(always)] -unsafe fn clear_enable_mask(mask: u32) { - (*NVIC::PTR).icer[0].write(mask) +unsafe fn clear_enable_mask<const M: usize>(mask: Mask<M>) { + for i in 0..M { + // This check should involve compile time constants and be optimized out. + if mask.0[i] != 0 { + (*NVIC::PTR).icer[i].write(mask.0[i]); + } + } } #[inline] @@ -318,36 +361,56 @@ pub fn logical2hw(logical: u8, nvic_prio_bits: u8) -> u8 { ((1 << nvic_prio_bits) - logical) << (8 - nvic_prio_bits) } -#[cfg(not(armv6m))] -pub const fn create_mask<const N: usize>(_: [u32; N]) -> u32 { - 0 +#[cfg(have_basepri)] +pub const fn create_mask<const N: usize, const M: usize>(_: [u32; N]) -> Mask<M> { + Mask([0; M]) } -#[cfg(armv6m)] -pub const fn create_mask<const N: usize>(list_of_shifts: [u32; N]) -> u32 { - let mut mask = 0; +#[cfg(not(have_basepri))] +pub const fn create_mask<const N: usize, const M: usize>(list_of_shifts: [u32; N]) -> Mask<M> { + let mut mask = Mask([0; M]); let mut i = 0; while i < N { let shift = list_of_shifts[i]; i += 1; - - if shift > 31 { - panic!("Generating masks for thumbv6 failed! Are you compiling for thumbv6 on an thumbv7 MCU?"); - } - - mask |= 1 << shift; + mask = mask.set_bit(shift); } mask } -#[cfg(not(armv6m))] -pub const fn v6_panic() { +#[cfg(have_basepri)] +pub const fn compute_mask_chunks<const L: usize>(_: [u32; L]) -> usize { + 0 +} + +/// Compute the number of u32 chunks needed to store the Mask value. +/// On M0, M0+ this should always end up being 1. +/// On M23 we will pick a number that allows us to store the highest index used by the code. +/// This means the amount of overhead will vary based on the actually interrupts used by the code. +#[cfg(not(have_basepri))] +pub const fn compute_mask_chunks<const L: usize>(ids: [u32; L]) -> usize { + let mut max: usize = 0; + let mut i = 0; + + while i < L { + let id = ids[i] as usize; + i += 1; + + if id > max { + max = id; + } + } + (max + 32) / 32 +} + +#[cfg(have_basepri)] +pub const fn no_basepri_panic() { // For non-v6 all is fine } -#[cfg(armv6m)] -pub const fn v6_panic() { - panic!("Exceptions with shared resources are not allowed when compiling for thumbv6. Use local resources or `#[lock_free]` shared resources"); +#[cfg(not(have_basepri))] +pub const fn no_basepri_panic() { + panic!("Exceptions with shared resources are not allowed when compiling for thumbv6 or thumbv8m.base. Use local resources or `#[lock_free]` shared resources"); } |