aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/aux_tasks.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/aux_tasks.c')
-rw-r--r--litmus/aux_tasks.c529
1 files changed, 529 insertions, 0 deletions
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c
new file mode 100644
index 000000000000..ef26bba3be77
--- /dev/null
+++ b/litmus/aux_tasks.c
@@ -0,0 +1,529 @@
1#include <litmus/sched_plugin.h>
2#include <litmus/trace.h>
3#include <litmus/litmus.h>
4
5#ifdef CONFIG_REALTIME_AUX_TASKS
6#include <litmus/rt_param.h>
7#include <litmus/aux_tasks.h>
8
9#include <linux/time.h>
10
11#define AUX_SLICE_NR_JIFFIES 1
12#define AUX_SLICE_NS ((NSEC_PER_SEC / HZ) * AUX_SLICE_NR_JIFFIES)
13
14static int admit_aux_task(struct task_struct *t)
15{
16 int retval = 0;
17 struct task_struct *leader = t->group_leader;
18
19 /* budget enforcement increments job numbers. job numbers are used in
20 * tie-breaking of aux_tasks. method helps ensure:
21 * 1) aux threads with no inherited priority can starve another (they share
22 * the CPUs equally.
23 * 2) aux threads that inherit the same priority cannot starve each other.
24 *
25 * Assuming aux threads are well-behavied (they do very little work and
26 * suspend), risk of starvation should not be an issue, but this is a
27 * fail-safe.
28 */
29 struct rt_task tp = {
30 .period = AUX_SLICE_NS,
31 .relative_deadline = AUX_SLICE_NS,
32 .exec_cost = AUX_SLICE_NS, /* allow full utilization with buget tracking */
33 .phase = 0,
34 .cpu = task_cpu(leader), /* take CPU of group leader */
35 .budget_policy = QUANTUM_ENFORCEMENT,
36 .budget_signal_policy = NO_SIGNALS,
37 .cls = RT_CLASS_BEST_EFFORT
38 };
39
40 struct sched_param param = { .sched_priority = 0};
41
42 tsk_rt(t)->task_params = tp;
43 retval = sched_setscheduler_nocheck(t, SCHED_LITMUS, &param);
44
45 return retval;
46}
47
48int exit_aux_task(struct task_struct *t)
49{
50 int retval = 0;
51
52 BUG_ON(!tsk_rt(t)->is_aux_task);
53
54 TRACE_CUR("Aux task %s/%d is exiting from %s/%d.\n", t->comm, t->pid, t->group_leader->comm, t->group_leader->pid);
55
56 tsk_rt(t)->is_aux_task = 0;
57
58#ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE
59 list_del(&tsk_rt(t)->aux_task_node);
60 if (tsk_rt(t)->inh_task) {
61 litmus->__decrease_prio(t, NULL);
62 }
63#endif
64
65 return retval;
66}
67
68static int aux_tasks_increase_priority(struct task_struct *leader, struct task_struct *hp)
69{
70 int retval = 0;
71
72#ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE
73 struct list_head *pos;
74
75 TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid);
76
77 list_for_each(pos, &tsk_aux(leader)->aux_tasks) {
78 struct task_struct *aux =
79 container_of(list_entry(pos, struct rt_param, aux_task_node),
80 struct task_struct, rt_param);
81
82 if (!is_realtime(aux)) {
83 TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid);
84 }
85 else if(tsk_rt(aux)->inh_task == hp) {
86 TRACE_CUR("skipping real-time aux task %s/%d that already inherits from %s/%d\n", aux->comm, aux->pid, hp->comm, hp->pid);
87 }
88 else {
89 // aux tasks don't touch rt locks, so no nested call needed.
90 TRACE_CUR("increasing %s/%d.\n", aux->comm, aux->pid);
91 retval = litmus->__increase_prio(aux, hp);
92 }
93 }
94#endif
95
96 return retval;
97}
98
99static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_struct *hp)
100{
101 int retval = 0;
102
103#ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE
104 struct list_head *pos;
105
106 TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid);
107
108 list_for_each(pos, &tsk_aux(leader)->aux_tasks) {
109 struct task_struct *aux =
110 container_of(list_entry(pos, struct rt_param, aux_task_node),
111 struct task_struct, rt_param);
112
113 if (!is_realtime(aux)) {
114 TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid);
115 }
116 else {
117 TRACE_CUR("decreasing %s/%d.\n", aux->comm, aux->pid);
118 retval = litmus->__decrease_prio(aux, hp);
119 }
120 }
121#endif
122
123 return retval;
124}
125
126int aux_task_owner_increase_priority(struct task_struct *t)
127{
128 int retval = 0;
129
130#ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE
131 struct task_struct *leader;
132 struct task_struct *hp = NULL;
133 struct task_struct *hp_eff = NULL;
134
135 BUG_ON(!is_realtime(t));
136 BUG_ON(!tsk_rt(t)->has_aux_tasks);
137
138 leader = t->group_leader;
139
140 if (!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) {
141 WARN_ON(!is_running(t));
142 TRACE_CUR("aux tasks may not inherit from %s/%d in group %s/%d\n",
143 t->comm, t->pid, leader->comm, leader->pid);
144 goto out;
145 }
146
147 TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid);
148
149 hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
150 struct task_struct, rt_param);
151 hp_eff = effective_priority(hp);
152
153 if (hp != t) { /* our position in the heap may have changed. hp is already at the root. */
154 binheap_decrease(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners);
155 }
156
157 hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
158 struct task_struct, rt_param);
159
160 if (effective_priority(hp) != hp_eff) { /* the eff. prio. of hp has changed */
161 hp_eff = effective_priority(hp);
162 TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid);
163 retval = aux_tasks_increase_priority(leader, hp_eff);
164 }
165#endif
166
167out:
168 return retval;
169}
170
171int aux_task_owner_decrease_priority(struct task_struct *t)
172{
173 int retval = 0;
174
175#ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE
176 struct task_struct *leader;
177 struct task_struct *hp = NULL;
178 struct task_struct *hp_eff = NULL;
179
180 BUG_ON(!is_realtime(t));
181 BUG_ON(!tsk_rt(t)->has_aux_tasks);
182
183 leader = t->group_leader;
184
185 if (!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) {
186 WARN_ON(!is_running(t));
187 TRACE_CUR("aux tasks may not inherit from %s/%d in group %s/%d\n",
188 t->comm, t->pid, leader->comm, leader->pid);
189 goto out;
190 }
191
192 TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid);
193
194 hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
195 struct task_struct, rt_param);
196 hp_eff = effective_priority(hp);
197 binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners);
198 binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners,
199 struct rt_param, aux_task_owner_node);
200
201 if (hp == t) { /* t was originally the hp */
202 struct task_struct *new_hp =
203 container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
204 struct task_struct, rt_param);
205 if (effective_priority(new_hp) != hp_eff) { /* eff prio. of hp has changed */
206 hp_eff = effective_priority(new_hp);
207 TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid);
208 retval = aux_tasks_decrease_priority(leader, hp_eff);
209 }
210 }
211#endif
212
213out:
214 return retval;
215}
216
217int make_aux_task_if_required(struct task_struct *t)
218{
219 struct task_struct *leader;
220 int retval = 0;
221
222 read_lock_irq(&tasklist_lock);
223
224 leader = t->group_leader;
225
226 if(!tsk_aux(leader)->initialized || !tsk_aux(leader)->aux_future) {
227 goto out;
228 }
229
230 TRACE_CUR("Making %s/%d in %s/%d an aux thread.\n", t->comm, t->pid, leader->comm, leader->pid);
231
232 INIT_LIST_HEAD(&tsk_rt(t)->aux_task_node);
233 INIT_BINHEAP_NODE(&tsk_rt(t)->aux_task_owner_node);
234
235 retval = admit_aux_task(t);
236 if (retval == 0) {
237 tsk_rt(t)->is_aux_task = 1;
238
239#ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE
240 list_add_tail(&tsk_rt(t)->aux_task_node, &tsk_aux(leader)->aux_tasks);
241
242 if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) {
243 struct task_struct *hp =
244 container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
245 struct task_struct, rt_param);
246
247 TRACE_CUR("hp in group: %s/%d\n", hp->comm, hp->pid);
248
249 retval = litmus->__increase_prio(t, (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp);
250
251 if (retval != 0) {
252 /* don't know how to recover from bugs with prio inheritance. better just crash. */
253 read_unlock_irq(&tasklist_lock);
254 BUG();
255 }
256 }
257#endif
258 }
259
260out:
261 read_unlock_irq(&tasklist_lock);
262
263 return retval;
264}
265
266
267long enable_aux_task_owner(struct task_struct *t)
268{
269 long retval = 0;
270
271#ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE
272 struct task_struct *leader = t->group_leader;
273 struct task_struct *hp;
274
275 if (!tsk_rt(t)->has_aux_tasks) {
276 TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid);
277 return -1;
278 }
279
280 BUG_ON(!is_realtime(t));
281
282 if (binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) {
283 TRACE_CUR("task %s/%d is already active\n", t->comm, t->pid);
284 goto out;
285 }
286
287 binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners,
288 struct rt_param, aux_task_owner_node);
289
290 hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
291 struct task_struct, rt_param);
292 if (hp == t) {
293 /* we're the new hp */
294 TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid);
295
296 retval = aux_tasks_increase_priority(leader,
297 (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp);
298 }
299#endif
300
301out:
302 return retval;
303}
304
305long disable_aux_task_owner(struct task_struct *t)
306{
307 long retval = 0;
308
309#ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE
310 struct task_struct *leader = t->group_leader;
311 struct task_struct *hp;
312 struct task_struct *new_hp = NULL;
313
314 if (!tsk_rt(t)->has_aux_tasks) {
315 TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid);
316 return -1;
317 }
318
319 BUG_ON(!is_realtime(t));
320
321 if (!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) {
322 TRACE_CUR("task %s/%d is already not active\n", t->comm, t->pid);
323 goto out;
324 }
325
326 TRACE_CUR("task %s/%d exiting from group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid);
327
328 hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
329 struct task_struct, rt_param);
330 binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners);
331
332 if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) {
333 new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
334 struct task_struct, rt_param);
335 }
336
337 if (hp == t && new_hp != t) {
338 struct task_struct *to_inh = NULL;
339
340 TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid);
341
342 if (new_hp) {
343 to_inh = (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp;
344 }
345
346 retval = aux_tasks_decrease_priority(leader, to_inh);
347 }
348#endif
349
350out:
351 return retval;
352}
353
354
355static int aux_task_owner_max_priority_order(struct binheap_node *a,
356 struct binheap_node *b)
357{
358 struct task_struct *d_a = container_of(binheap_entry(a, struct rt_param, aux_task_owner_node),
359 struct task_struct, rt_param);
360 struct task_struct *d_b = container_of(binheap_entry(b, struct rt_param, aux_task_owner_node),
361 struct task_struct, rt_param);
362
363 BUG_ON(!d_a);
364 BUG_ON(!d_b);
365
366 return litmus->compare(d_a, d_b);
367}
368
369
370static long __do_enable_aux_tasks(int flags)
371{
372 long retval = 0;
373 struct task_struct *leader;
374 struct task_struct *t;
375 int aux_tasks_added = 0;
376
377 leader = current->group_leader;
378
379 if (!tsk_aux(leader)->initialized) {
380 INIT_LIST_HEAD(&tsk_aux(leader)->aux_tasks);
381 INIT_BINHEAP_HANDLE(&tsk_aux(leader)->aux_task_owners, aux_task_owner_max_priority_order);
382 tsk_aux(leader)->initialized = 1;
383 }
384
385 if (flags & AUX_FUTURE) {
386 tsk_aux(leader)->aux_future = 1;
387 }
388
389 t = leader;
390 do {
391 if (!tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->is_aux_task) {
392 /* This may harmlessly reinit unused nodes. TODO: Don't reinit already init nodes. */
393 /* doesn't hurt to initialize both nodes */
394 INIT_LIST_HEAD(&tsk_rt(t)->aux_task_node);
395 INIT_BINHEAP_NODE(&tsk_rt(t)->aux_task_owner_node);
396 }
397
398 TRACE_CUR("Checking task in %s/%d: %s/%d = (p = %llu):\n",
399 leader->comm, leader->pid, t->comm, t->pid,
400 tsk_rt(t)->task_params.period);
401
402 /* inspect period to see if it is an rt task */
403 if (tsk_rt(t)->task_params.period == 0) {
404 if (flags && AUX_CURRENT) {
405 if (!tsk_rt(t)->is_aux_task) {
406 int admit_ret;
407
408 TRACE_CUR("AUX task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid);
409
410 admit_ret = admit_aux_task(t);
411
412 if (admit_ret == 0) {
413 /* hasn't been aux_tasks_increase_priorityted into rt. make it a aux. */
414 tsk_rt(t)->is_aux_task = 1;
415 aux_tasks_added = 1;
416
417#ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE
418 list_add_tail(&tsk_rt(t)->aux_task_node, &tsk_aux(leader)->aux_tasks);
419#endif
420 }
421 }
422 else {
423 TRACE_CUR("AUX task in %s/%d is already set up: %s/%d\n", leader->comm, leader->pid, t->comm, t->pid);
424 }
425 }
426 else {
427 TRACE_CUR("Not changing thread in %s/%d to AUX task: %s/%d\n", leader->comm, leader->pid, t->comm, t->pid);
428 }
429 }
430 else if (!tsk_rt(t)->is_aux_task) { /* don't let aux tasks get aux tasks of their own */
431 if (!tsk_rt(t)->has_aux_tasks) {
432 TRACE_CUR("task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid);
433 tsk_rt(t)->has_aux_tasks = 1;
434 }
435 else {
436 TRACE_CUR("task in %s/%d is already set up: %s/%d\n", leader->comm, leader->pid, t->comm, t->pid);
437 }
438 }
439
440 t = next_thread(t);
441 } while(t != leader);
442
443
444#ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE
445 if (aux_tasks_added && !binheap_empty(&tsk_aux(leader)->aux_task_owners)) {
446 struct task_struct *hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
447 struct task_struct, rt_param);
448 TRACE_CUR("hp in group: %s/%d\n", hp->comm, hp->pid);
449 retval = aux_tasks_increase_priority(leader, (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp);
450 }
451#endif
452
453 return retval;
454}
455
456static long __do_disable_aux_tasks(int flags)
457{
458 long retval = 0;
459 struct task_struct *leader;
460 struct task_struct *t;
461
462 leader = current->group_leader;
463
464 if (flags & AUX_FUTURE) {
465 tsk_aux(leader)->aux_future = 0;
466 }
467
468 if (flags & AUX_CURRENT) {
469 t = leader;
470 do {
471 if (tsk_rt(t)->is_aux_task) {
472
473 TRACE_CUR("%s/%d is an aux task.\n", t->comm, t->pid);
474
475 if (is_realtime(t)) {
476 long temp_retval;
477 struct sched_param param = { .sched_priority = 0};
478
479 TRACE_CUR("%s/%d is real-time. Changing policy to SCHED_NORMAL.\n", t->comm, t->pid);
480
481 temp_retval = sched_setscheduler_nocheck(t, SCHED_NORMAL, &param);
482
483 if (temp_retval != 0) {
484 TRACE_CUR("error changing policy of %s/%d to SCHED_NORMAL\n", t->comm, t->pid);
485 if (retval == 0) {
486 retval = temp_retval;
487 }
488 else {
489 TRACE_CUR("prior error (%d) masks new error (%d)\n", retval, temp_retval);
490 }
491 }
492 }
493
494 tsk_rt(t)->is_aux_task = 0;
495 }
496 t = next_thread(t);
497 } while(t != leader);
498 }
499
500 return retval;
501}
502
503asmlinkage long sys_set_aux_tasks(int flags)
504{
505 long retval;
506
507 read_lock_irq(&tasklist_lock);
508
509 if (flags & AUX_ENABLE) {
510 retval = __do_enable_aux_tasks(flags);
511 }
512 else {
513 retval = __do_disable_aux_tasks(flags);
514 }
515
516 read_unlock_irq(&tasklist_lock);
517
518 return retval;
519}
520
521#else
522
523asmlinkage long sys_set_aux_tasks(int flags)
524{
525 printk("Unsupported. Recompile with CONFIG_REALTIME_AUX_TASKS.\n");
526 return -EINVAL;
527}
528
529#endif