diff options
Diffstat (limited to 'macros/src/codegen/dispatchers.rs')
-rw-r--r-- | macros/src/codegen/dispatchers.rs | 140 |
1 files changed, 24 insertions, 116 deletions
diff --git a/macros/src/codegen/dispatchers.rs b/macros/src/codegen/dispatchers.rs index f5f36c49..7a9fa0a3 100644 --- a/macros/src/codegen/dispatchers.rs +++ b/macros/src/codegen/dispatchers.rs @@ -5,41 +5,28 @@ use rtic_syntax::ast::App; use crate::{analyze::Analysis, check::Extra, codegen::util}; /// Generates task dispatchers -pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream2> { +pub fn codegen(app: &App, analysis: &Analysis, _extra: &Extra) -> Vec<TokenStream2> { let mut items = vec![]; - let interrupts = &analysis.interrupts; - - // Generate executor definition and priority in global scope - for (name, task) in app.software_tasks.iter() { - if task.is_async { - let type_name = util::internal_task_ident(name, "F"); - let exec_name = util::internal_task_ident(name, "EXEC"); - let prio_name = util::internal_task_ident(name, "PRIORITY"); - - items.push(quote!( - #[allow(non_camel_case_types)] - type #type_name = impl core::future::Future + 'static; - #[allow(non_upper_case_globals)] - static #exec_name: - rtic::RacyCell<rtic::export::executor::AsyncTaskExecutor<#type_name>> = - rtic::RacyCell::new(rtic::export::executor::AsyncTaskExecutor::new()); - - // The executors priority, this can be any value - we will overwrite it when we - // start a task - #[allow(non_upper_case_globals)] - static #prio_name: rtic::RacyCell<rtic::export::Priority> = - unsafe { rtic::RacyCell::new(rtic::export::Priority::new(0)) }; - )); - } - } + let interrupts = &analysis.interrupts_normal; for (&level, channel) in &analysis.channels { + if channel + .tasks + .iter() + .map(|task_name| app.software_tasks[task_name].is_async) + .all(|is_async| is_async) + { + // check if all tasks are async, if so don't generate this. + continue; + } + let mut stmts = vec![]; let variants = channel .tasks .iter() + .filter(|name| !app.software_tasks[*name].is_async) .map(|name| { let cfgs = &app.software_tasks[name].cfgs; @@ -69,6 +56,7 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream let n = util::capacity_literal(channel.capacity as usize + 1); let rq = util::rq_ident(level); + // let (_, _, _, input_ty) = util::regroup_inputs(inputs); let (rq_ty, rq_expr) = { ( quote!(rtic::export::SCRQ<#t, #n>), @@ -88,9 +76,13 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream static #rq: rtic::RacyCell<#rq_ty> = rtic::RacyCell::new(#rq_expr); )); - let device = &extra.device; - let enum_ = util::interrupt_ident(); - let interrupt = util::suffixed(&interrupts[&level].0.to_string()); + let interrupt = util::suffixed( + &interrupts + .get(&level) + .expect("RTIC-ICE: Unable to get interrrupt") + .0 + .to_string(), + ); let arms = channel .tasks .iter() @@ -100,36 +92,8 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream let fq = util::fq_ident(name); let inputs = util::inputs_ident(name); let (_, tupled, pats, _) = util::regroup_inputs(&task.inputs); - let exec_name = util::internal_task_ident(name, "EXEC"); - let prio_name = util::internal_task_ident(name, "PRIORITY"); - if task.is_async { - let executor_run_ident = util::executor_run_ident(name); - - quote!( - #(#cfgs)* - #t::#name => { - if !(&mut *#exec_name.get_mut()).is_running() { - let #tupled = - (&*#inputs - .get()) - .get_unchecked(usize::from(index)) - .as_ptr() - .read(); - (&mut *#fq.get_mut()).split().0.enqueue_unchecked(index); - - // The async executor needs a static priority - #prio_name.get_mut().write(rtic::export::Priority::new(PRIORITY)); - let priority: &'static _ = &*#prio_name.get(); - - (&mut *#exec_name.get_mut()).spawn(#name(#name::Context::new(priority) #(,#pats)*)); - #executor_run_ident.store(true, core::sync::atomic::Ordering::Relaxed); - } else { - retry_queue.push_unchecked((#t::#name, index)); - } - } - ) - } else { + if !task.is_async { quote!( #(#cfgs)* #t::#name => { @@ -147,36 +111,11 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream ) } ) - } - }) - .collect::<Vec<_>>(); - - let n_executors = channel - .tasks - .iter() - .map(|name| { - let task = &app.software_tasks[name]; - if task.is_async { - 1 } else { - 0 + quote!() } }) - .sum::<usize>() - .max(1); - - // TODO: This `retry_queue` comes from the current design of the dispatcher queue handling. - // To remove this we would need to redesign how the dispatcher handles queues, and this can - // be done as an optimization later. - // - // The core issue is that we should only dequeue the ready queue if the exexutor associated - // to the task is not running. As it is today this queue is blindly dequeued, see the - // `while let Some(...) = (&mut *#rq.get_mut())...` a few lines down. The current "hack" is - // to just requeue the executor run if it should not have been dequeued. This needs however - // to be done after the ready queue has been exhausted. - stmts.push(quote!( - let mut retry_queue: rtic::export::Vec<_, #n_executors> = rtic::export::Vec::new(); - )); + .collect::<Vec<_>>(); stmts.push(quote!( while let Some((task, index)) = (&mut *#rq.get_mut()).split().1.dequeue() { @@ -186,37 +125,6 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream } )); - for name in channel - .tasks - .iter() - .filter(|name| app.software_tasks[*name].is_async) - { - let exec_name = util::internal_task_ident(name, "EXEC"); - - let executor_run_ident = util::executor_run_ident(name); - stmts.push(quote!( - if #executor_run_ident.load(core::sync::atomic::Ordering::Relaxed) { - #executor_run_ident.store(false, core::sync::atomic::Ordering::Relaxed); - if (&mut *#exec_name.get_mut()).poll(|| { - #executor_run_ident.store(true, core::sync::atomic::Ordering::Release); - rtic::pend(#device::#enum_::#interrupt); - }) && !retry_queue.is_empty() { - // If the retry queue is not empty and the executor finished, restart this - // dispatch to check if the executor should be restarted. - rtic::pend(#device::#enum_::#interrupt); - } - } - )); - } - - stmts.push(quote!( - while let Some((task, index)) = retry_queue.pop() { - rtic::export::interrupt::free(|_| { - (&mut *#rq.get_mut()).enqueue_unchecked((task, index)); - }); - } - )); - let doc = format!("Interrupt handler to dispatch tasks at priority {}", level); let attribute = &interrupts[&level].1.attrs; items.push(quote!( |