blob: f492d31d753255846eb57112904358765e7f0b3b (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
|
use proc_macro2::TokenStream as TokenStream2;
use quote::quote;
use crate::{analyze::Analysis, check::Extra, codegen::util};
/// Generates code that runs after `#[init]` returns
pub fn codegen(
core: u8,
analysis: &Analysis,
extra: &Extra,
) -> (Vec<TokenStream2>, Vec<TokenStream2>) {
let mut const_app = vec![];
let mut stmts = vec![];
// initialize late resources
if let Some(late_resources) = analysis.late_resources.get(&core) {
for name in late_resources {
// if it's live
if analysis.locations.get(name).is_some() {
stmts.push(quote!(#name.as_mut_ptr().write(late.#name);));
}
}
}
if analysis.timer_queues.is_empty() {
// cross-initialization barriers -- notify *other* cores that their resources have been
// initialized
if analysis.initialization_barriers.contains_key(&core) {
let ib = util::init_barrier(core);
const_app.push(quote!(
#[rtfm::export::shared]
static #ib: rtfm::export::Barrier = rtfm::export::Barrier::new();
));
stmts.push(quote!(
#ib.release();
));
}
// then wait until the other cores have initialized *our* resources
for (&initializer, users) in &analysis.initialization_barriers {
if users.contains(&core) {
let ib = util::init_barrier(initializer);
stmts.push(quote!(
#ib.wait();
));
}
}
// cross-spawn barriers: wait until other cores are ready to receive messages
for (&receiver, senders) in &analysis.spawn_barriers {
if senders.get(&core) == Some(&false) {
let sb = util::spawn_barrier(receiver);
stmts.push(quote!(
#sb.wait();
));
}
}
} else {
// if the `schedule` API is used then we'll synchronize all cores to leave the
// `init`-ialization phase at the same time. In this case the rendezvous barrier makes the
// cross-initialization and spawn barriers unnecessary
let m = extra.monotonic();
if analysis.timer_queues.len() == 1 {
// reset the monotonic timer / counter
stmts.push(quote!(
<#m as rtfm::Monotonic>::reset();
));
} else {
// in the multi-core case we need a rendezvous (RV) barrier between *all* the cores that
// use the `schedule` API; otherwise one of the cores could observe the before-reset
// value of the monotonic counter
// (this may be easier to implement with `AtomicU8.fetch_sub` but that API is not
// available on ARMv6-M)
// this core will reset the monotonic counter
const FIRST: u8 = 0;
if core == FIRST {
for &i in analysis.timer_queues.keys() {
let rv = util::rendezvous_ident(i);
const_app.push(quote!(
#[rtfm::export::shared]
static #rv: rtfm::export::Barrier = rtfm::export::Barrier::new();
));
// wait until all the other cores have reached the RV point
if i != FIRST {
stmts.push(quote!(
#rv.wait();
));
}
}
let rv = util::rendezvous_ident(core);
stmts.push(quote!(
// the compiler fences are used to prevent `reset` from being re-ordering wrt to
// the atomic operations -- we don't know if `reset` contains load or store
// operations
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
// reset the counter
<#m as rtfm::Monotonic>::reset();
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
// now unblock all the other cores
#rv.release();
));
} else {
let rv = util::rendezvous_ident(core);
// let the first core know that we have reached the RV point
stmts.push(quote!(
#rv.release();
));
let rv = util::rendezvous_ident(FIRST);
// wait until the first core has reset the monotonic timer
stmts.push(quote!(
#rv.wait();
));
}
}
}
// enable the interrupts -- this completes the `init`-ialization phase
stmts.push(quote!(rtfm::export::interrupt::enable();));
(const_app, stmts)
}
|