aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/aux_tasks.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-09-14 08:34:36 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-09-14 08:34:36 -0400
commit4ad6ba08f0dab67bbd89a26b27f1cc86e3c45c13 (patch)
treefd982c7a12f7a947278e05d0b126a015c24793f4 /litmus/aux_tasks.c
parentc1d1979c99ca397241da4e3d7e0cb77f7ec28240 (diff)
checkpoint for aux_tasks. can still deadlock
Diffstat (limited to 'litmus/aux_tasks.c')
-rw-r--r--litmus/aux_tasks.c387
1 files changed, 387 insertions, 0 deletions
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c
new file mode 100644
index 000000000000..c197a95fc3a1
--- /dev/null
+++ b/litmus/aux_tasks.c
@@ -0,0 +1,387 @@
1#ifdef CONFIG_LITMUS_LOCKING
2
3#include <litmus/sched_plugin.h>
4#include <litmus/trace.h>
5#include <litmus/litmus.h>
6#include <litmus/rt_param.h>
7#include <litmus/aux_tasks.h>
8
9static int admit_aux_task(struct task_struct *t)
10{
11 int retval = 0;
12 struct task_struct *leader = t->group_leader;
13
14 struct rt_task tp = {
15 .exec_cost = 0,
16 .period = MAGIC_AUX_TASK_PERIOD,
17 .relative_deadline = MAGIC_AUX_TASK_PERIOD,
18 .phase = 0,
19 .cpu = task_cpu(leader), /* take CPU of group leader */
20 .budget_policy = NO_ENFORCEMENT,
21 .cls = RT_CLASS_BEST_EFFORT
22 };
23
24 struct sched_param param = { .sched_priority = 0};
25
26 tsk_rt(t)->task_params = tp;
27 retval = sched_setscheduler_nocheck(t, SCHED_LITMUS, &param);
28
29 return retval;
30}
31
32int exit_aux_task(struct task_struct *t)
33{
34 int retval = 0;
35 struct task_struct *leader = t->group_leader;
36
37 BUG_ON(!tsk_rt(t)->is_aux_task);
38
39 TRACE_CUR("Aux task %s/%d is exiting from %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid);
40
41 list_del(&tsk_rt(t)->aux_task_node);
42
43 tsk_rt(t)->is_aux_task = 0;
44
45 if (tsk_rt(t)->inh_task) {
46 litmus->decrease_prio(t, NULL);
47 }
48
49 return retval;
50}
51
52static int aux_tasks_increase_priority(struct task_struct *leader, struct task_struct *hp)
53{
54 int retval = 0;
55 struct list_head *pos;
56
57 TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid);
58
59 list_for_each(pos, &tsk_aux(leader)->aux_tasks) {
60 struct task_struct *aux =
61 container_of(list_entry(pos, struct rt_param, aux_task_node),
62 struct task_struct, rt_param);
63
64 if (!is_realtime(aux)) {
65#if 0
66 /* currently can't do this here because of scheduler deadlock on itself */
67 TRACE_CUR("aux_tasks_increase_priorityting aux task: %s/%d\n", aux->comm, aux->pid);
68 retval = admit_aux_task(aux);
69
70 if (retval != 0) {
71 TRACE_CUR("failed to admit aux task %s/%d\n", aux->comm, aux->pid);
72 goto out;
73 }
74#endif
75 TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid);
76 }
77
78 // aux tasks don't touch rt locks, so no nested call needed.
79 TRACE_CUR("increasing %s/%d.\n", aux->comm, aux->pid);
80 retval = litmus->__increase_prio(aux, hp);
81 }
82
83 //out:
84 return retval;
85}
86
87static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_struct *hp)
88{
89 int retval = 0;
90 struct list_head *pos;
91
92 TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid);
93
94 list_for_each(pos, &tsk_aux(leader)->aux_tasks) {
95 struct task_struct *aux =
96 container_of(list_entry(pos, struct rt_param, aux_task_node),
97 struct task_struct, rt_param);
98
99 if (!is_realtime(aux)) {
100#if 0
101 /* currently can't do this here because of scheduler deadlock on itself */
102 TRACE_CUR("aux_tasks_increase_priorityting aux task: %s/%d\n", aux->comm, aux->pid);
103 retval = admit_aux_task(aux);
104
105 if (retval != 0)
106 goto out;
107
108 if (hp) {
109 // aux tasks don't touch rt locks, so no nested call needed.
110 TRACE_CUR("decreasing (actually increasing) %s/%d.\n", aux->comm, aux->pid);
111 retval = litmus->__increase_prio(aux, hp);
112 }
113#endif
114
115 TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid);
116 }
117 else {
118 TRACE_CUR("decreasing %s/%d.\n", aux->comm, aux->pid);
119 retval = litmus->__decrease_prio(aux, hp);
120 }
121 }
122
123 //out:
124 return retval;
125}
126
127int aux_task_owner_increase_priority(struct task_struct *t)
128{
129 int retval = 0;
130 struct task_struct *leader;
131 struct task_struct *hp = NULL;
132
133 BUG_ON(!tsk_rt(t)->has_aux_tasks);
134 BUG_ON(!is_realtime(t));
135 BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node));
136
137 leader = t->group_leader;
138
139 TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid);
140
141 hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
142 struct task_struct, rt_param);
143
144 if (hp == t) {
145 goto out; // already hp, nothing to do.
146 }
147
148 binheap_decrease(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners);
149
150 hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
151 struct task_struct, rt_param);
152
153 if (hp == t) {
154 TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid);
155 retval = aux_tasks_increase_priority(leader,
156 (tsk_rt(hp)->inh_task) ? tsk_rt(hp)->inh_task : hp);
157 }
158
159out:
160 return retval;
161}
162
163int aux_task_owner_decrease_priority(struct task_struct *t)
164{
165 int retval = 0;
166 struct task_struct *leader;
167 struct task_struct *hp = NULL;
168 struct task_struct *new_hp = NULL;
169
170 BUG_ON(!tsk_rt(t)->has_aux_tasks);
171 BUG_ON(!is_realtime(t));
172 BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node));
173
174 leader = t->group_leader;
175
176 TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid);
177
178 hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
179 struct task_struct, rt_param);
180 binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners);
181 binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners,
182 struct rt_param, aux_task_owner_node);
183 new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
184 struct task_struct, rt_param);
185
186 if (hp == t && new_hp != t) {
187 TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid);
188 retval = aux_tasks_decrease_priority(leader,
189 (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp);
190 }
191
192 return retval;
193}
194
195
196
197long enable_aux_task_owner(struct task_struct *t)
198{
199 long retval = 0;
200 struct task_struct *leader = t->group_leader;
201 struct task_struct *hp;
202
203 if (!tsk_rt(t)->has_aux_tasks) {
204 TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid);
205 return -1;
206 }
207
208 BUG_ON(!is_realtime(t));
209
210 if (binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) {
211 TRACE_CUR("task %s/%d is already active\n", t->comm, t->pid);
212 goto out;
213 }
214
215 binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners,
216 struct rt_param, aux_task_owner_node);
217
218 hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
219 struct task_struct, rt_param);
220 if (hp == t) {
221 /* we're the new hp */
222 TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid);
223
224 retval = aux_tasks_increase_priority(leader,
225 (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp);
226 }
227
228
229out:
230 return retval;
231}
232
233long disable_aux_task_owner(struct task_struct *t)
234{
235 long retval = 0;
236 struct task_struct *leader = t->group_leader;
237 struct task_struct *hp;
238 struct task_struct *new_hp = NULL;
239
240 if (!tsk_rt(t)->has_aux_tasks) {
241 TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid);
242 return -1;
243 }
244
245 BUG_ON(!is_realtime(t));
246
247 if (!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) {
248 TRACE_CUR("task %s/%d is already not active\n", t->comm, t->pid);
249 goto out;
250 }
251
252 TRACE_CUR("task %s/%d exiting from group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid);
253
254 hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
255 struct task_struct, rt_param);
256 binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners);
257
258 if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) {
259 new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
260 struct task_struct, rt_param);
261 }
262
263 if (hp == t && new_hp != t) {
264 struct task_struct *to_inh = NULL;
265
266 TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid);
267
268 if (new_hp) {
269 to_inh = (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp;
270 }
271
272 retval = aux_tasks_decrease_priority(leader, to_inh);
273 }
274
275out:
276 return retval;
277}
278
279
280static int aux_task_owner_max_priority_order(struct binheap_node *a,
281 struct binheap_node *b)
282{
283 struct task_struct *d_a = container_of(binheap_entry(a, struct rt_param, aux_task_owner_node),
284 struct task_struct, rt_param);
285 struct task_struct *d_b = container_of(binheap_entry(b, struct rt_param, aux_task_owner_node),
286 struct task_struct, rt_param);
287
288 BUG_ON(!d_a);
289 BUG_ON(!d_b);
290
291 return litmus->compare(d_a, d_b);
292}
293
294
295asmlinkage long sys_slave_non_rt_threads(void)
296{
297 long retval = 0;
298 struct task_struct *leader;
299 struct task_struct *t;
300
301 read_lock_irq(&tasklist_lock);
302
303 leader = current->group_leader;
304
305#if 0
306 t = leader;
307 do {
308 if (tsk_rt(t)->has_aux_tasks || tsk_rt(t)->is_aux_task) {
309 printk("slave_non_rt_tasks may only be called once per process.\n");
310 retval = -EINVAL;
311 goto out_unlock;
312 }
313 } while (t != leader);
314#endif
315
316 if (!tsk_aux(leader)->initialized) {
317 INIT_LIST_HEAD(&tsk_aux(leader)->aux_tasks);
318 INIT_BINHEAP_HANDLE(&tsk_aux(leader)->aux_task_owners, aux_task_owner_max_priority_order);
319 tsk_aux(leader)->initialized = 1;
320 }
321
322 t = leader;
323 do {
324 /* doesn't hurt to initialize them both */
325 INIT_LIST_HEAD(&tsk_rt(t)->aux_task_node);
326 INIT_BINHEAP_NODE(&tsk_rt(t)->aux_task_owner_node);
327
328 TRACE_CUR("Checking task in %s/%d: %s/%d = (p = %llu):\n",
329 leader->comm, leader->pid, t->comm, t->pid,
330 tsk_rt(t)->task_params.period);
331
332 /* inspect heap_node to see if it is an rt task */
333 if (tsk_rt(t)->task_params.period == 0 ||
334 tsk_rt(t)->task_params.period == MAGIC_AUX_TASK_PERIOD) {
335 if (!tsk_rt(t)->is_aux_task) {
336 TRACE_CUR("AUX task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid);
337 /* hasn't been aux_tasks_increase_priorityted into rt. make it a aux. */
338 tsk_rt(t)->is_aux_task = 1;
339 list_add_tail(&tsk_rt(t)->aux_task_node, &tsk_aux(leader)->aux_tasks);
340
341 (void)admit_aux_task(t);
342 }
343 else {
344 TRACE_CUR("AUX task in %s/%d is already set up: %s/%d\n", leader->comm, leader->pid, t->comm, t->pid);
345 }
346 }
347 else {
348 if (!tsk_rt(t)->has_aux_tasks) {
349 TRACE_CUR("task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid);
350 tsk_rt(t)->has_aux_tasks = 1;
351 if (is_realtime(t)) {
352 binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners,
353 struct rt_param, aux_task_owner_node);
354 }
355 }
356 else {
357 TRACE_CUR("task in %s/%d is already set up: %s/%d\n", leader->comm, leader->pid, t->comm, t->pid);
358 }
359 }
360
361 t = next_thread(t);
362 } while(t != leader);
363
364
365 if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) {
366 struct task_struct *hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node),
367 struct task_struct, rt_param);
368 TRACE_CUR("found hp in group: %s/%d\n", hp->comm, hp->pid);
369 retval = aux_tasks_increase_priority(leader,
370 (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp);
371 }
372
373 //out_unlock:
374 read_unlock_irq(&tasklist_lock);
375
376 return retval;
377}
378
379#else
380
381asmlinkage long sys_slave_non_rt_tasks(void)
382{
383 printk("Unsupported. Recompile with CONFIG_LITMUS_LOCKING.\n");
384 return -EINVAL;
385}
386
387#endif