diff options
Diffstat (limited to 'litmus/rt_domain.c')
-rw-r--r-- | litmus/rt_domain.c | 349 |
1 files changed, 349 insertions, 0 deletions
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c new file mode 100644 index 00000000000..1683d384756 --- /dev/null +++ b/litmus/rt_domain.c | |||
@@ -0,0 +1,349 @@ | |||
1 | /* | ||
2 | * litmus/rt_domain.c | ||
3 | * | ||
4 | * LITMUS real-time infrastructure. This file contains the | ||
5 | * functions that manipulate RT domains. RT domains are an abstraction | ||
6 | * of a ready queue and a release queue. | ||
7 | */ | ||
8 | |||
9 | #include <linux/percpu.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/list.h> | ||
12 | #include <linux/slab.h> | ||
13 | |||
14 | #include <litmus/litmus.h> | ||
15 | #include <litmus/sched_plugin.h> | ||
16 | #include <litmus/sched_trace.h> | ||
17 | |||
18 | #include <litmus/rt_domain.h> | ||
19 | |||
20 | #include <litmus/trace.h> | ||
21 | |||
22 | #include <litmus/bheap.h> | ||
23 | |||
24 | /* Uncomment when debugging timer races... */ | ||
25 | #if 0 | ||
26 | #define VTRACE_TASK TRACE_TASK | ||
27 | #define VTRACE TRACE | ||
28 | #else | ||
29 | #define VTRACE_TASK(t, fmt, args...) /* shut up */ | ||
30 | #define VTRACE(fmt, args...) /* be quiet already */ | ||
31 | #endif | ||
32 | |||
33 | static int dummy_resched(rt_domain_t *rt) | ||
34 | { | ||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | static int dummy_order(struct bheap_node* a, struct bheap_node* b) | ||
39 | { | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | /* default implementation: use default lock */ | ||
44 | static void default_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
45 | { | ||
46 | merge_ready(rt, tasks); | ||
47 | } | ||
48 | |||
49 | static unsigned int time2slot(lt_t time) | ||
50 | { | ||
51 | return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS; | ||
52 | } | ||
53 | |||
54 | static enum hrtimer_restart on_release_timer(struct hrtimer *timer) | ||
55 | { | ||
56 | unsigned long flags; | ||
57 | struct release_heap* rh; | ||
58 | rh = container_of(timer, struct release_heap, timer); | ||
59 | |||
60 | TS_RELEASE_LATENCY(rh->release_time); | ||
61 | |||
62 | VTRACE("on_release_timer(0x%p) starts.\n", timer); | ||
63 | |||
64 | TS_RELEASE_START; | ||
65 | |||
66 | |||
67 | raw_spin_lock_irqsave(&rh->dom->release_lock, flags); | ||
68 | VTRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); | ||
69 | /* remove from release queue */ | ||
70 | list_del(&rh->list); | ||
71 | raw_spin_unlock_irqrestore(&rh->dom->release_lock, flags); | ||
72 | VTRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); | ||
73 | |||
74 | /* call release callback */ | ||
75 | rh->dom->release_jobs(rh->dom, &rh->heap); | ||
76 | /* WARNING: rh can be referenced from other CPUs from now on. */ | ||
77 | |||
78 | TS_RELEASE_END; | ||
79 | |||
80 | VTRACE("on_release_timer(0x%p) ends.\n", timer); | ||
81 | |||
82 | return HRTIMER_NORESTART; | ||
83 | } | ||
84 | |||
85 | /* allocated in litmus.c */ | ||
86 | struct kmem_cache * release_heap_cache; | ||
87 | |||
88 | struct release_heap* release_heap_alloc(int gfp_flags) | ||
89 | { | ||
90 | struct release_heap* rh; | ||
91 | rh= kmem_cache_alloc(release_heap_cache, gfp_flags); | ||
92 | if (rh) { | ||
93 | /* initialize timer */ | ||
94 | hrtimer_init(&rh->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
95 | rh->timer.function = on_release_timer; | ||
96 | } | ||
97 | return rh; | ||
98 | } | ||
99 | |||
100 | void release_heap_free(struct release_heap* rh) | ||
101 | { | ||
102 | /* make sure timer is no longer in use */ | ||
103 | hrtimer_cancel(&rh->timer); | ||
104 | kmem_cache_free(release_heap_cache, rh); | ||
105 | } | ||
106 | |||
107 | /* Caller must hold release lock. | ||
108 | * Will return heap for given time. If no such heap exists prior to | ||
109 | * the invocation it will be created. | ||
110 | */ | ||
111 | static struct release_heap* get_release_heap(rt_domain_t *rt, | ||
112 | struct task_struct* t, | ||
113 | int use_task_heap) | ||
114 | { | ||
115 | struct list_head* pos; | ||
116 | struct release_heap* heap = NULL; | ||
117 | struct release_heap* rh; | ||
118 | lt_t release_time = get_release(t); | ||
119 | unsigned int slot = time2slot(release_time); | ||
120 | |||
121 | /* initialize pos for the case that the list is empty */ | ||
122 | pos = rt->release_queue.slot[slot].next; | ||
123 | list_for_each(pos, &rt->release_queue.slot[slot]) { | ||
124 | rh = list_entry(pos, struct release_heap, list); | ||
125 | if (release_time == rh->release_time) { | ||
126 | /* perfect match -- this happens on hyperperiod | ||
127 | * boundaries | ||
128 | */ | ||
129 | heap = rh; | ||
130 | break; | ||
131 | } else if (lt_before(release_time, rh->release_time)) { | ||
132 | /* we need to insert a new node since rh is | ||
133 | * already in the future | ||
134 | */ | ||
135 | break; | ||
136 | } | ||
137 | } | ||
138 | if (!heap && use_task_heap) { | ||
139 | /* use pre-allocated release heap */ | ||
140 | rh = tsk_rt(t)->rel_heap; | ||
141 | |||
142 | rh->dom = rt; | ||
143 | rh->release_time = release_time; | ||
144 | |||
145 | /* add to release queue */ | ||
146 | list_add(&rh->list, pos->prev); | ||
147 | heap = rh; | ||
148 | } | ||
149 | return heap; | ||
150 | } | ||
151 | |||
152 | static void reinit_release_heap(struct task_struct* t) | ||
153 | { | ||
154 | struct release_heap* rh; | ||
155 | |||
156 | /* use pre-allocated release heap */ | ||
157 | rh = tsk_rt(t)->rel_heap; | ||
158 | |||
159 | /* Make sure it is safe to use. The timer callback could still | ||
160 | * be executing on another CPU; hrtimer_cancel() will wait | ||
161 | * until the timer callback has completed. However, under no | ||
162 | * circumstances should the timer be active (= yet to be | ||
163 | * triggered). | ||
164 | * | ||
165 | * WARNING: If the CPU still holds the release_lock at this point, | ||
166 | * deadlock may occur! | ||
167 | */ | ||
168 | BUG_ON(hrtimer_cancel(&rh->timer)); | ||
169 | |||
170 | /* initialize */ | ||
171 | bheap_init(&rh->heap); | ||
172 | #ifdef CONFIG_RELEASE_MASTER | ||
173 | atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE); | ||
174 | #endif | ||
175 | } | ||
176 | /* arm_release_timer() - start local release timer or trigger | ||
177 | * remote timer (pull timer) | ||
178 | * | ||
179 | * Called by add_release() with: | ||
180 | * - tobe_lock taken | ||
181 | * - IRQ disabled | ||
182 | */ | ||
183 | #ifdef CONFIG_RELEASE_MASTER | ||
184 | #define arm_release_timer(t) arm_release_timer_on((t), NO_CPU) | ||
185 | static void arm_release_timer_on(rt_domain_t *_rt , int target_cpu) | ||
186 | #else | ||
187 | static void arm_release_timer(rt_domain_t *_rt) | ||
188 | #endif | ||
189 | { | ||
190 | rt_domain_t *rt = _rt; | ||
191 | struct list_head list; | ||
192 | struct list_head *pos, *safe; | ||
193 | struct task_struct* t; | ||
194 | struct release_heap* rh; | ||
195 | |||
196 | VTRACE("arm_release_timer() at %llu\n", litmus_clock()); | ||
197 | list_replace_init(&rt->tobe_released, &list); | ||
198 | |||
199 | list_for_each_safe(pos, safe, &list) { | ||
200 | /* pick task of work list */ | ||
201 | t = list_entry(pos, struct task_struct, rt_param.list); | ||
202 | sched_trace_task_release(t); | ||
203 | list_del(pos); | ||
204 | |||
205 | /* put into release heap while holding release_lock */ | ||
206 | raw_spin_lock(&rt->release_lock); | ||
207 | VTRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); | ||
208 | |||
209 | rh = get_release_heap(rt, t, 0); | ||
210 | if (!rh) { | ||
211 | /* need to use our own, but drop lock first */ | ||
212 | raw_spin_unlock(&rt->release_lock); | ||
213 | VTRACE_TASK(t, "Dropped release_lock 0x%p\n", | ||
214 | &rt->release_lock); | ||
215 | |||
216 | reinit_release_heap(t); | ||
217 | VTRACE_TASK(t, "release_heap ready\n"); | ||
218 | |||
219 | raw_spin_lock(&rt->release_lock); | ||
220 | VTRACE_TASK(t, "Re-acquired release_lock 0x%p\n", | ||
221 | &rt->release_lock); | ||
222 | |||
223 | rh = get_release_heap(rt, t, 1); | ||
224 | } | ||
225 | bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); | ||
226 | VTRACE_TASK(t, "arm_release_timer(): added to release heap\n"); | ||
227 | |||
228 | raw_spin_unlock(&rt->release_lock); | ||
229 | VTRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); | ||
230 | |||
231 | /* To avoid arming the timer multiple times, we only let the | ||
232 | * owner do the arming (which is the "first" task to reference | ||
233 | * this release_heap anyway). | ||
234 | */ | ||
235 | if (rh == tsk_rt(t)->rel_heap) { | ||
236 | VTRACE_TASK(t, "arming timer 0x%p\n", &rh->timer); | ||
237 | /* we cannot arm the timer using hrtimer_start() | ||
238 | * as it may deadlock on rq->lock | ||
239 | * | ||
240 | * PINNED mode is ok on both local and remote CPU | ||
241 | */ | ||
242 | #ifdef CONFIG_RELEASE_MASTER | ||
243 | if (rt->release_master == NO_CPU && | ||
244 | target_cpu == NO_CPU) | ||
245 | #endif | ||
246 | __hrtimer_start_range_ns(&rh->timer, | ||
247 | ns_to_ktime(rh->release_time), | ||
248 | 0, HRTIMER_MODE_ABS_PINNED, 0); | ||
249 | #ifdef CONFIG_RELEASE_MASTER | ||
250 | else | ||
251 | hrtimer_start_on( | ||
252 | /* target_cpu overrides release master */ | ||
253 | (target_cpu != NO_CPU ? | ||
254 | target_cpu : rt->release_master), | ||
255 | &rh->info, &rh->timer, | ||
256 | ns_to_ktime(rh->release_time), | ||
257 | HRTIMER_MODE_ABS_PINNED); | ||
258 | #endif | ||
259 | } else | ||
260 | VTRACE_TASK(t, "0x%p is not my timer\n", &rh->timer); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | void rt_domain_init(rt_domain_t *rt, | ||
265 | bheap_prio_t order, | ||
266 | check_resched_needed_t check, | ||
267 | release_jobs_t release | ||
268 | ) | ||
269 | { | ||
270 | int i; | ||
271 | |||
272 | BUG_ON(!rt); | ||
273 | if (!check) | ||
274 | check = dummy_resched; | ||
275 | if (!release) | ||
276 | release = default_release_jobs; | ||
277 | if (!order) | ||
278 | order = dummy_order; | ||
279 | |||
280 | #ifdef CONFIG_RELEASE_MASTER | ||
281 | rt->release_master = NO_CPU; | ||
282 | #endif | ||
283 | |||
284 | bheap_init(&rt->ready_queue); | ||
285 | INIT_LIST_HEAD(&rt->tobe_released); | ||
286 | for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) | ||
287 | INIT_LIST_HEAD(&rt->release_queue.slot[i]); | ||
288 | |||
289 | raw_spin_lock_init(&rt->ready_lock); | ||
290 | raw_spin_lock_init(&rt->release_lock); | ||
291 | raw_spin_lock_init(&rt->tobe_lock); | ||
292 | |||
293 | rt->check_resched = check; | ||
294 | rt->release_jobs = release; | ||
295 | rt->order = order; | ||
296 | } | ||
297 | |||
298 | /* add_ready - add a real-time task to the rt ready queue. It must be runnable. | ||
299 | * @new: the newly released task | ||
300 | */ | ||
301 | void __add_ready(rt_domain_t* rt, struct task_struct *new) | ||
302 | { | ||
303 | TRACE("rt: adding %s/%d (%llu, %llu, %llu) rel=%llu " | ||
304 | "to ready queue at %llu\n", | ||
305 | new->comm, new->pid, | ||
306 | get_exec_cost(new), get_rt_period(new), get_rt_relative_deadline(new), | ||
307 | get_release(new), litmus_clock()); | ||
308 | |||
309 | BUG_ON(bheap_node_in_heap(tsk_rt(new)->heap_node)); | ||
310 | |||
311 | bheap_insert(rt->order, &rt->ready_queue, tsk_rt(new)->heap_node); | ||
312 | rt->check_resched(rt); | ||
313 | } | ||
314 | |||
315 | /* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable. | ||
316 | * @tasks - the newly released tasks | ||
317 | */ | ||
318 | void __merge_ready(rt_domain_t* rt, struct bheap* tasks) | ||
319 | { | ||
320 | bheap_union(rt->order, &rt->ready_queue, tasks); | ||
321 | rt->check_resched(rt); | ||
322 | } | ||
323 | |||
324 | |||
325 | #ifdef CONFIG_RELEASE_MASTER | ||
326 | void __add_release_on(rt_domain_t* rt, struct task_struct *task, | ||
327 | int target_cpu) | ||
328 | { | ||
329 | TRACE_TASK(task, "add_release_on(), rel=%llu, target=%d\n", | ||
330 | get_release(task), target_cpu); | ||
331 | list_add(&tsk_rt(task)->list, &rt->tobe_released); | ||
332 | task->rt_param.domain = rt; | ||
333 | |||
334 | arm_release_timer_on(rt, target_cpu); | ||
335 | } | ||
336 | #endif | ||
337 | |||
338 | /* add_release - add a real-time task to the rt release queue. | ||
339 | * @task: the sleeping task | ||
340 | */ | ||
341 | void __add_release(rt_domain_t* rt, struct task_struct *task) | ||
342 | { | ||
343 | TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task)); | ||
344 | list_add(&tsk_rt(task)->list, &rt->tobe_released); | ||
345 | task->rt_param.domain = rt; | ||
346 | |||
347 | arm_release_timer(rt); | ||
348 | } | ||
349 | |||