diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-05-26 17:29:58 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-05-26 17:29:58 -0400 |
commit | a463f9a9e04385f0729f7435a0a6dff7d89b25de (patch) | |
tree | 00ff42c305926c800e18b13df8440a4de1a1a041 /litmus/litmus_softirq.c | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
GPUSync patch for Litmus 2012.1.
Diffstat (limited to 'litmus/litmus_softirq.c')
-rw-r--r-- | litmus/litmus_softirq.c | 1582 |
1 files changed, 1582 insertions, 0 deletions
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c new file mode 100644 index 000000000000..9f7d9da5facb --- /dev/null +++ b/litmus/litmus_softirq.c | |||
@@ -0,0 +1,1582 @@ | |||
1 | #include <linux/interrupt.h> | ||
2 | #include <linux/percpu.h> | ||
3 | #include <linux/cpu.h> | ||
4 | #include <linux/kthread.h> | ||
5 | #include <linux/ftrace.h> | ||
6 | #include <linux/smp.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/mutex.h> | ||
9 | |||
10 | #include <linux/sched.h> | ||
11 | #include <linux/cpuset.h> | ||
12 | |||
13 | #include <litmus/litmus.h> | ||
14 | #include <litmus/sched_trace.h> | ||
15 | #include <litmus/jobs.h> | ||
16 | #include <litmus/sched_plugin.h> | ||
17 | #include <litmus/litmus_softirq.h> | ||
18 | |||
19 | /* TODO: Remove unneeded mb() and other barriers. */ | ||
20 | |||
21 | |||
22 | /* counts number of daemons ready to handle litmus irqs. */ | ||
23 | static atomic_t num_ready_klitirqds = ATOMIC_INIT(0); | ||
24 | |||
25 | enum pending_flags | ||
26 | { | ||
27 | LIT_TASKLET_LOW = 0x1, | ||
28 | LIT_TASKLET_HI = LIT_TASKLET_LOW<<1, | ||
29 | LIT_WORK = LIT_TASKLET_HI<<1 | ||
30 | }; | ||
31 | |||
32 | /* only support tasklet processing for now. */ | ||
33 | struct tasklet_head | ||
34 | { | ||
35 | struct tasklet_struct *head; | ||
36 | struct tasklet_struct **tail; | ||
37 | }; | ||
38 | |||
39 | struct klitirqd_info | ||
40 | { | ||
41 | struct task_struct* klitirqd; | ||
42 | struct task_struct* current_owner; | ||
43 | int terminating; | ||
44 | |||
45 | |||
46 | raw_spinlock_t lock; | ||
47 | |||
48 | u32 pending; | ||
49 | atomic_t num_hi_pending; | ||
50 | atomic_t num_low_pending; | ||
51 | atomic_t num_work_pending; | ||
52 | |||
53 | /* in order of priority */ | ||
54 | struct tasklet_head pending_tasklets_hi; | ||
55 | struct tasklet_head pending_tasklets; | ||
56 | struct list_head worklist; | ||
57 | }; | ||
58 | |||
59 | /* one list for each klitirqd */ | ||
60 | static struct klitirqd_info klitirqds[NR_LITMUS_SOFTIRQD]; | ||
61 | |||
62 | |||
63 | |||
64 | |||
65 | |||
66 | int proc_read_klitirqd_stats(char *page, char **start, | ||
67 | off_t off, int count, | ||
68 | int *eof, void *data) | ||
69 | { | ||
70 | int len = snprintf(page, PAGE_SIZE, | ||
71 | "num ready klitirqds: %d\n\n", | ||
72 | atomic_read(&num_ready_klitirqds)); | ||
73 | |||
74 | if(klitirqd_is_ready()) | ||
75 | { | ||
76 | int i; | ||
77 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
78 | { | ||
79 | len += | ||
80 | snprintf(page + len - 1, PAGE_SIZE, /* -1 to strip off \0 */ | ||
81 | "klitirqd_th%d: %s/%d\n" | ||
82 | "\tcurrent_owner: %s/%d\n" | ||
83 | "\tpending: %x\n" | ||
84 | "\tnum hi: %d\n" | ||
85 | "\tnum low: %d\n" | ||
86 | "\tnum work: %d\n\n", | ||
87 | i, | ||
88 | klitirqds[i].klitirqd->comm, klitirqds[i].klitirqd->pid, | ||
89 | (klitirqds[i].current_owner != NULL) ? | ||
90 | klitirqds[i].current_owner->comm : "(null)", | ||
91 | (klitirqds[i].current_owner != NULL) ? | ||
92 | klitirqds[i].current_owner->pid : 0, | ||
93 | klitirqds[i].pending, | ||
94 | atomic_read(&klitirqds[i].num_hi_pending), | ||
95 | atomic_read(&klitirqds[i].num_low_pending), | ||
96 | atomic_read(&klitirqds[i].num_work_pending)); | ||
97 | } | ||
98 | } | ||
99 | |||
100 | return(len); | ||
101 | } | ||
102 | |||
103 | |||
104 | |||
105 | |||
106 | |||
107 | #if 0 | ||
108 | static atomic_t dump_id = ATOMIC_INIT(0); | ||
109 | |||
110 | static void __dump_state(struct klitirqd_info* which, const char* caller) | ||
111 | { | ||
112 | struct tasklet_struct* list; | ||
113 | |||
114 | int id = atomic_inc_return(&dump_id); | ||
115 | |||
116 | //if(in_interrupt()) | ||
117 | { | ||
118 | if(which->current_owner) | ||
119 | { | ||
120 | TRACE("(id: %d caller: %s)\n" | ||
121 | "klitirqd: %s/%d\n" | ||
122 | "current owner: %s/%d\n" | ||
123 | "pending: %x\n", | ||
124 | id, caller, | ||
125 | which->klitirqd->comm, which->klitirqd->pid, | ||
126 | which->current_owner->comm, which->current_owner->pid, | ||
127 | which->pending); | ||
128 | } | ||
129 | else | ||
130 | { | ||
131 | TRACE("(id: %d caller: %s)\n" | ||
132 | "klitirqd: %s/%d\n" | ||
133 | "current owner: %p\n" | ||
134 | "pending: %x\n", | ||
135 | id, caller, | ||
136 | which->klitirqd->comm, which->klitirqd->pid, | ||
137 | NULL, | ||
138 | which->pending); | ||
139 | } | ||
140 | |||
141 | list = which->pending_tasklets.head; | ||
142 | while(list) | ||
143 | { | ||
144 | struct tasklet_struct *t = list; | ||
145 | list = list->next; /* advance */ | ||
146 | if(t->owner) | ||
147 | TRACE("(id: %d caller: %s) Tasklet: %x, Owner = %s/%d\n", id, caller, t, t->owner->comm, t->owner->pid); | ||
148 | else | ||
149 | TRACE("(id: %d caller: %s) Tasklet: %x, Owner = %p\n", id, caller, t, NULL); | ||
150 | } | ||
151 | } | ||
152 | } | ||
153 | |||
154 | static void dump_state(struct klitirqd_info* which, const char* caller) | ||
155 | { | ||
156 | unsigned long flags; | ||
157 | |||
158 | raw_spin_lock_irqsave(&which->lock, flags); | ||
159 | __dump_state(which, caller); | ||
160 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
161 | } | ||
162 | #endif | ||
163 | |||
164 | |||
165 | /* forward declarations */ | ||
166 | static void ___litmus_tasklet_schedule(struct tasklet_struct *t, | ||
167 | struct klitirqd_info *which, | ||
168 | int wakeup); | ||
169 | static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
170 | struct klitirqd_info *which, | ||
171 | int wakeup); | ||
172 | static void ___litmus_schedule_work(struct work_struct *w, | ||
173 | struct klitirqd_info *which, | ||
174 | int wakeup); | ||
175 | |||
176 | |||
177 | |||
178 | inline unsigned int klitirqd_id(struct task_struct* tsk) | ||
179 | { | ||
180 | int i; | ||
181 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
182 | { | ||
183 | if(klitirqds[i].klitirqd == tsk) | ||
184 | { | ||
185 | return i; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | BUG(); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | |||
195 | inline static u32 litirq_pending_hi_irqoff(struct klitirqd_info* which) | ||
196 | { | ||
197 | return (which->pending & LIT_TASKLET_HI); | ||
198 | } | ||
199 | |||
200 | inline static u32 litirq_pending_low_irqoff(struct klitirqd_info* which) | ||
201 | { | ||
202 | return (which->pending & LIT_TASKLET_LOW); | ||
203 | } | ||
204 | |||
205 | inline static u32 litirq_pending_work_irqoff(struct klitirqd_info* which) | ||
206 | { | ||
207 | return (which->pending & LIT_WORK); | ||
208 | } | ||
209 | |||
210 | inline static u32 litirq_pending_irqoff(struct klitirqd_info* which) | ||
211 | { | ||
212 | return(which->pending); | ||
213 | } | ||
214 | |||
215 | |||
216 | inline static u32 litirq_pending(struct klitirqd_info* which) | ||
217 | { | ||
218 | unsigned long flags; | ||
219 | u32 pending; | ||
220 | |||
221 | raw_spin_lock_irqsave(&which->lock, flags); | ||
222 | pending = litirq_pending_irqoff(which); | ||
223 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
224 | |||
225 | return pending; | ||
226 | }; | ||
227 | |||
228 | inline static u32 litirq_pending_with_owner(struct klitirqd_info* which, struct task_struct* owner) | ||
229 | { | ||
230 | unsigned long flags; | ||
231 | u32 pending; | ||
232 | |||
233 | raw_spin_lock_irqsave(&which->lock, flags); | ||
234 | pending = litirq_pending_irqoff(which); | ||
235 | if(pending) | ||
236 | { | ||
237 | if(which->current_owner != owner) | ||
238 | { | ||
239 | pending = 0; // owner switch! | ||
240 | } | ||
241 | } | ||
242 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
243 | |||
244 | return pending; | ||
245 | } | ||
246 | |||
247 | |||
248 | inline static u32 litirq_pending_and_sem_and_owner(struct klitirqd_info* which, | ||
249 | struct mutex** sem, | ||
250 | struct task_struct** t) | ||
251 | { | ||
252 | unsigned long flags; | ||
253 | u32 pending; | ||
254 | |||
255 | /* init values */ | ||
256 | *sem = NULL; | ||
257 | *t = NULL; | ||
258 | |||
259 | raw_spin_lock_irqsave(&which->lock, flags); | ||
260 | |||
261 | pending = litirq_pending_irqoff(which); | ||
262 | if(pending) | ||
263 | { | ||
264 | if(which->current_owner != NULL) | ||
265 | { | ||
266 | *t = which->current_owner; | ||
267 | *sem = &tsk_rt(which->current_owner)->klitirqd_sem; | ||
268 | } | ||
269 | else | ||
270 | { | ||
271 | BUG(); | ||
272 | } | ||
273 | } | ||
274 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
275 | |||
276 | if(likely(*sem)) | ||
277 | { | ||
278 | return pending; | ||
279 | } | ||
280 | else | ||
281 | { | ||
282 | return 0; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | /* returns true if the next piece of work to do is from a different owner. | ||
287 | */ | ||
288 | static int tasklet_ownership_change( | ||
289 | struct klitirqd_info* which, | ||
290 | enum pending_flags taskletQ) | ||
291 | { | ||
292 | /* this function doesn't have to look at work objects since they have | ||
293 | priority below tasklets. */ | ||
294 | |||
295 | unsigned long flags; | ||
296 | int ret = 0; | ||
297 | |||
298 | raw_spin_lock_irqsave(&which->lock, flags); | ||
299 | |||
300 | switch(taskletQ) | ||
301 | { | ||
302 | case LIT_TASKLET_HI: | ||
303 | if(litirq_pending_hi_irqoff(which)) | ||
304 | { | ||
305 | ret = (which->pending_tasklets_hi.head->owner != | ||
306 | which->current_owner); | ||
307 | } | ||
308 | break; | ||
309 | case LIT_TASKLET_LOW: | ||
310 | if(litirq_pending_low_irqoff(which)) | ||
311 | { | ||
312 | ret = (which->pending_tasklets.head->owner != | ||
313 | which->current_owner); | ||
314 | } | ||
315 | break; | ||
316 | default: | ||
317 | break; | ||
318 | } | ||
319 | |||
320 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
321 | |||
322 | TRACE_TASK(which->klitirqd, "ownership change needed: %d\n", ret); | ||
323 | |||
324 | return ret; | ||
325 | } | ||
326 | |||
327 | |||
328 | static void __reeval_prio(struct klitirqd_info* which) | ||
329 | { | ||
330 | struct task_struct* next_owner = NULL; | ||
331 | struct task_struct* klitirqd = which->klitirqd; | ||
332 | |||
333 | /* Check in prio-order */ | ||
334 | u32 pending = litirq_pending_irqoff(which); | ||
335 | |||
336 | //__dump_state(which, "__reeval_prio: before"); | ||
337 | |||
338 | if(pending) | ||
339 | { | ||
340 | if(pending & LIT_TASKLET_HI) | ||
341 | { | ||
342 | next_owner = which->pending_tasklets_hi.head->owner; | ||
343 | } | ||
344 | else if(pending & LIT_TASKLET_LOW) | ||
345 | { | ||
346 | next_owner = which->pending_tasklets.head->owner; | ||
347 | } | ||
348 | else if(pending & LIT_WORK) | ||
349 | { | ||
350 | struct work_struct* work = | ||
351 | list_first_entry(&which->worklist, struct work_struct, entry); | ||
352 | next_owner = work->owner; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | if(next_owner != which->current_owner) | ||
357 | { | ||
358 | struct task_struct* old_owner = which->current_owner; | ||
359 | |||
360 | /* bind the next owner. */ | ||
361 | which->current_owner = next_owner; | ||
362 | mb(); | ||
363 | |||
364 | if(next_owner != NULL) | ||
365 | { | ||
366 | if(!in_interrupt()) | ||
367 | { | ||
368 | TRACE_CUR("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, | ||
369 | ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm, | ||
370 | ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid, | ||
371 | next_owner->comm, next_owner->pid); | ||
372 | } | ||
373 | else | ||
374 | { | ||
375 | TRACE("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, | ||
376 | ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm, | ||
377 | ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid, | ||
378 | next_owner->comm, next_owner->pid); | ||
379 | } | ||
380 | |||
381 | litmus->increase_prio_inheritance_klitirqd(klitirqd, old_owner, next_owner); | ||
382 | } | ||
383 | else | ||
384 | { | ||
385 | if(likely(!in_interrupt())) | ||
386 | { | ||
387 | TRACE_CUR("%s: Ownership change: %s/%d to NULL (reverting)\n", | ||
388 | __FUNCTION__, klitirqd->comm, klitirqd->pid); | ||
389 | } | ||
390 | else | ||
391 | { | ||
392 | // is this a bug? | ||
393 | TRACE("%s: Ownership change: %s/%d to NULL (reverting)\n", | ||
394 | __FUNCTION__, klitirqd->comm, klitirqd->pid); | ||
395 | } | ||
396 | |||
397 | BUG_ON(pending != 0); | ||
398 | litmus->decrease_prio_inheritance_klitirqd(klitirqd, old_owner, NULL); | ||
399 | } | ||
400 | } | ||
401 | |||
402 | //__dump_state(which, "__reeval_prio: after"); | ||
403 | } | ||
404 | |||
405 | static void reeval_prio(struct klitirqd_info* which) | ||
406 | { | ||
407 | unsigned long flags; | ||
408 | |||
409 | raw_spin_lock_irqsave(&which->lock, flags); | ||
410 | __reeval_prio(which); | ||
411 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
412 | } | ||
413 | |||
414 | |||
415 | static void wakeup_litirqd_locked(struct klitirqd_info* which) | ||
416 | { | ||
417 | /* Interrupts are disabled: no need to stop preemption */ | ||
418 | if (which && which->klitirqd) | ||
419 | { | ||
420 | __reeval_prio(which); /* configure the proper priority */ | ||
421 | |||
422 | if(which->klitirqd->state != TASK_RUNNING) | ||
423 | { | ||
424 | TRACE("%s: Waking up klitirqd: %s/%d\n", __FUNCTION__, | ||
425 | which->klitirqd->comm, which->klitirqd->pid); | ||
426 | |||
427 | wake_up_process(which->klitirqd); | ||
428 | } | ||
429 | } | ||
430 | } | ||
431 | |||
432 | |||
433 | static void do_lit_tasklet(struct klitirqd_info* which, | ||
434 | struct tasklet_head* pending_tasklets) | ||
435 | { | ||
436 | unsigned long flags; | ||
437 | struct tasklet_struct *list; | ||
438 | atomic_t* count; | ||
439 | |||
440 | raw_spin_lock_irqsave(&which->lock, flags); | ||
441 | |||
442 | //__dump_state(which, "do_lit_tasklet: before steal"); | ||
443 | |||
444 | /* copy out the tasklets for our private use. */ | ||
445 | list = pending_tasklets->head; | ||
446 | pending_tasklets->head = NULL; | ||
447 | pending_tasklets->tail = &pending_tasklets->head; | ||
448 | |||
449 | /* remove pending flag */ | ||
450 | which->pending &= (pending_tasklets == &which->pending_tasklets) ? | ||
451 | ~LIT_TASKLET_LOW : | ||
452 | ~LIT_TASKLET_HI; | ||
453 | |||
454 | count = (pending_tasklets == &which->pending_tasklets) ? | ||
455 | &which->num_low_pending: | ||
456 | &which->num_hi_pending; | ||
457 | |||
458 | //__dump_state(which, "do_lit_tasklet: after steal"); | ||
459 | |||
460 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
461 | |||
462 | |||
463 | while(list) | ||
464 | { | ||
465 | struct tasklet_struct *t = list; | ||
466 | |||
467 | /* advance, lest we forget */ | ||
468 | list = list->next; | ||
469 | |||
470 | /* execute tasklet if it has my priority and is free */ | ||
471 | if ((t->owner == which->current_owner) && tasklet_trylock(t)) { | ||
472 | if (!atomic_read(&t->count)) { | ||
473 | |||
474 | sched_trace_tasklet_begin(t->owner); | ||
475 | |||
476 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | ||
477 | { | ||
478 | BUG(); | ||
479 | } | ||
480 | TRACE_CUR("%s: Invoking tasklet.\n", __FUNCTION__); | ||
481 | t->func(t->data); | ||
482 | tasklet_unlock(t); | ||
483 | |||
484 | atomic_dec(count); | ||
485 | |||
486 | sched_trace_tasklet_end(t->owner, 0ul); | ||
487 | |||
488 | continue; /* process more tasklets */ | ||
489 | } | ||
490 | tasklet_unlock(t); | ||
491 | } | ||
492 | |||
493 | TRACE_CUR("%s: Could not invoke tasklet. Requeuing.\n", __FUNCTION__); | ||
494 | |||
495 | /* couldn't process tasklet. put it back at the end of the queue. */ | ||
496 | if(pending_tasklets == &which->pending_tasklets) | ||
497 | ___litmus_tasklet_schedule(t, which, 0); | ||
498 | else | ||
499 | ___litmus_tasklet_hi_schedule(t, which, 0); | ||
500 | } | ||
501 | } | ||
502 | |||
503 | |||
504 | // returns 1 if priorities need to be changed to continue processing | ||
505 | // pending tasklets. | ||
506 | static int do_litirq(struct klitirqd_info* which) | ||
507 | { | ||
508 | u32 pending; | ||
509 | int resched = 0; | ||
510 | |||
511 | if(in_interrupt()) | ||
512 | { | ||
513 | TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__); | ||
514 | return(0); | ||
515 | } | ||
516 | |||
517 | if(which->klitirqd != current) | ||
518 | { | ||
519 | TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n", | ||
520 | __FUNCTION__, current->comm, current->pid, | ||
521 | which->klitirqd->comm, which->klitirqd->pid); | ||
522 | return(0); | ||
523 | } | ||
524 | |||
525 | if(!is_realtime(current)) | ||
526 | { | ||
527 | TRACE_CUR("%s: exiting early: klitirqd is not real-time. Sched Policy = %d\n", | ||
528 | __FUNCTION__, current->policy); | ||
529 | return(0); | ||
530 | } | ||
531 | |||
532 | |||
533 | /* We only handle tasklets & work objects, no need for RCU triggers? */ | ||
534 | |||
535 | pending = litirq_pending(which); | ||
536 | if(pending) | ||
537 | { | ||
538 | /* extract the work to do and do it! */ | ||
539 | if(pending & LIT_TASKLET_HI) | ||
540 | { | ||
541 | TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__); | ||
542 | do_lit_tasklet(which, &which->pending_tasklets_hi); | ||
543 | resched = tasklet_ownership_change(which, LIT_TASKLET_HI); | ||
544 | |||
545 | if(resched) | ||
546 | { | ||
547 | TRACE_CUR("%s: HI tasklets of another owner remain. " | ||
548 | "Skipping any LOW tasklets.\n", __FUNCTION__); | ||
549 | } | ||
550 | } | ||
551 | |||
552 | if(!resched && (pending & LIT_TASKLET_LOW)) | ||
553 | { | ||
554 | TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__); | ||
555 | do_lit_tasklet(which, &which->pending_tasklets); | ||
556 | resched = tasklet_ownership_change(which, LIT_TASKLET_LOW); | ||
557 | |||
558 | if(resched) | ||
559 | { | ||
560 | TRACE_CUR("%s: LOW tasklets of another owner remain. " | ||
561 | "Skipping any work objects.\n", __FUNCTION__); | ||
562 | } | ||
563 | } | ||
564 | } | ||
565 | |||
566 | return(resched); | ||
567 | } | ||
568 | |||
569 | |||
570 | static void do_work(struct klitirqd_info* which) | ||
571 | { | ||
572 | unsigned long flags; | ||
573 | work_func_t f; | ||
574 | struct work_struct* work; | ||
575 | |||
576 | // only execute one work-queue item to yield to tasklets. | ||
577 | // ...is this a good idea, or should we just batch them? | ||
578 | raw_spin_lock_irqsave(&which->lock, flags); | ||
579 | |||
580 | if(!litirq_pending_work_irqoff(which)) | ||
581 | { | ||
582 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
583 | goto no_work; | ||
584 | } | ||
585 | |||
586 | work = list_first_entry(&which->worklist, struct work_struct, entry); | ||
587 | list_del_init(&work->entry); | ||
588 | |||
589 | if(list_empty(&which->worklist)) | ||
590 | { | ||
591 | which->pending &= ~LIT_WORK; | ||
592 | } | ||
593 | |||
594 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
595 | |||
596 | |||
597 | |||
598 | /* safe to read current_owner outside of lock since only this thread | ||
599 | may write to the pointer. */ | ||
600 | if(work->owner == which->current_owner) | ||
601 | { | ||
602 | TRACE_CUR("%s: Invoking work object.\n", __FUNCTION__); | ||
603 | // do the work! | ||
604 | work_clear_pending(work); | ||
605 | f = work->func; | ||
606 | f(work); /* can't touch 'work' after this point, | ||
607 | the user may have freed it. */ | ||
608 | |||
609 | atomic_dec(&which->num_work_pending); | ||
610 | } | ||
611 | else | ||
612 | { | ||
613 | TRACE_CUR("%s: Could not invoke work object. Requeuing.\n", | ||
614 | __FUNCTION__); | ||
615 | ___litmus_schedule_work(work, which, 0); | ||
616 | } | ||
617 | |||
618 | no_work: | ||
619 | return; | ||
620 | } | ||
621 | |||
622 | |||
623 | static int set_litmus_daemon_sched(void) | ||
624 | { | ||
625 | /* set up a daemon job that will never complete. | ||
626 | it should only ever run on behalf of another | ||
627 | real-time task. | ||
628 | |||
629 | TODO: Transition to a new job whenever a | ||
630 | new tasklet is handled */ | ||
631 | |||
632 | int ret = 0; | ||
633 | |||
634 | struct rt_task tp = { | ||
635 | .exec_cost = 0, | ||
636 | .period = 1000000000, /* dummy 1 second period */ | ||
637 | .phase = 0, | ||
638 | .cpu = task_cpu(current), | ||
639 | .budget_policy = NO_ENFORCEMENT, | ||
640 | .cls = RT_CLASS_BEST_EFFORT | ||
641 | }; | ||
642 | |||
643 | struct sched_param param = { .sched_priority = 0}; | ||
644 | |||
645 | |||
646 | /* set task params, mark as proxy thread, and init other data */ | ||
647 | tsk_rt(current)->task_params = tp; | ||
648 | tsk_rt(current)->is_proxy_thread = 1; | ||
649 | tsk_rt(current)->cur_klitirqd = NULL; | ||
650 | mutex_init(&tsk_rt(current)->klitirqd_sem); | ||
651 | atomic_set(&tsk_rt(current)->klitirqd_sem_stat, NOT_HELD); | ||
652 | |||
653 | /* inform the OS we're SCHED_LITMUS -- | ||
654 | sched_setscheduler_nocheck() calls litmus_admit_task(). */ | ||
655 | sched_setscheduler_nocheck(current, SCHED_LITMUS, ¶m); | ||
656 | |||
657 | return ret; | ||
658 | } | ||
659 | |||
660 | static void enter_execution_phase(struct klitirqd_info* which, | ||
661 | struct mutex* sem, | ||
662 | struct task_struct* t) | ||
663 | { | ||
664 | TRACE_CUR("%s: Trying to enter execution phase. " | ||
665 | "Acquiring semaphore of %s/%d\n", __FUNCTION__, | ||
666 | t->comm, t->pid); | ||
667 | down_and_set_stat(current, HELD, sem); | ||
668 | TRACE_CUR("%s: Execution phase entered! " | ||
669 | "Acquired semaphore of %s/%d\n", __FUNCTION__, | ||
670 | t->comm, t->pid); | ||
671 | } | ||
672 | |||
673 | static void exit_execution_phase(struct klitirqd_info* which, | ||
674 | struct mutex* sem, | ||
675 | struct task_struct* t) | ||
676 | { | ||
677 | TRACE_CUR("%s: Exiting execution phase. " | ||
678 | "Releasing semaphore of %s/%d\n", __FUNCTION__, | ||
679 | t->comm, t->pid); | ||
680 | if(atomic_read(&tsk_rt(current)->klitirqd_sem_stat) == HELD) | ||
681 | { | ||
682 | up_and_set_stat(current, NOT_HELD, sem); | ||
683 | TRACE_CUR("%s: Execution phase exited! " | ||
684 | "Released semaphore of %s/%d\n", __FUNCTION__, | ||
685 | t->comm, t->pid); | ||
686 | } | ||
687 | else | ||
688 | { | ||
689 | TRACE_CUR("%s: COULDN'T RELEASE SEMAPHORE BECAUSE ONE IS NOT HELD!\n", __FUNCTION__); | ||
690 | } | ||
691 | } | ||
692 | |||
693 | /* main loop for klitsoftirqd */ | ||
694 | static int run_klitirqd(void* unused) | ||
695 | { | ||
696 | struct klitirqd_info* which = &klitirqds[klitirqd_id(current)]; | ||
697 | struct mutex* sem; | ||
698 | struct task_struct* owner; | ||
699 | |||
700 | int rt_status = set_litmus_daemon_sched(); | ||
701 | |||
702 | if(rt_status != 0) | ||
703 | { | ||
704 | TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__); | ||
705 | goto rt_failed; | ||
706 | } | ||
707 | |||
708 | atomic_inc(&num_ready_klitirqds); | ||
709 | |||
710 | set_current_state(TASK_INTERRUPTIBLE); | ||
711 | |||
712 | while (!kthread_should_stop()) | ||
713 | { | ||
714 | preempt_disable(); | ||
715 | if (!litirq_pending(which)) | ||
716 | { | ||
717 | /* sleep for work */ | ||
718 | TRACE_CUR("%s: No more tasklets or work objects. Going to sleep.\n", | ||
719 | __FUNCTION__); | ||
720 | preempt_enable_no_resched(); | ||
721 | schedule(); | ||
722 | |||
723 | if(kthread_should_stop()) /* bail out */ | ||
724 | { | ||
725 | TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__); | ||
726 | continue; | ||
727 | } | ||
728 | |||
729 | preempt_disable(); | ||
730 | } | ||
731 | |||
732 | __set_current_state(TASK_RUNNING); | ||
733 | |||
734 | while (litirq_pending_and_sem_and_owner(which, &sem, &owner)) | ||
735 | { | ||
736 | int needs_resched = 0; | ||
737 | |||
738 | preempt_enable_no_resched(); | ||
739 | |||
740 | BUG_ON(sem == NULL); | ||
741 | |||
742 | // wait to enter execution phase; wait for 'current_owner' to block. | ||
743 | enter_execution_phase(which, sem, owner); | ||
744 | |||
745 | if(kthread_should_stop()) | ||
746 | { | ||
747 | TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__); | ||
748 | break; | ||
749 | } | ||
750 | |||
751 | preempt_disable(); | ||
752 | |||
753 | /* Double check that there's still pending work and the owner hasn't | ||
754 | * changed. Pending items may have been flushed while we were sleeping. | ||
755 | */ | ||
756 | if(litirq_pending_with_owner(which, owner)) | ||
757 | { | ||
758 | TRACE_CUR("%s: Executing tasklets and/or work objects.\n", | ||
759 | __FUNCTION__); | ||
760 | |||
761 | needs_resched = do_litirq(which); | ||
762 | |||
763 | preempt_enable_no_resched(); | ||
764 | |||
765 | // work objects are preemptible. | ||
766 | if(!needs_resched) | ||
767 | { | ||
768 | do_work(which); | ||
769 | } | ||
770 | |||
771 | // exit execution phase. | ||
772 | exit_execution_phase(which, sem, owner); | ||
773 | |||
774 | TRACE_CUR("%s: Setting up next priority.\n", __FUNCTION__); | ||
775 | reeval_prio(which); /* check if we need to change priority here */ | ||
776 | } | ||
777 | else | ||
778 | { | ||
779 | TRACE_CUR("%s: Pending work was flushed! Prev owner was %s/%d\n", | ||
780 | __FUNCTION__, | ||
781 | owner->comm, owner->pid); | ||
782 | preempt_enable_no_resched(); | ||
783 | |||
784 | // exit execution phase. | ||
785 | exit_execution_phase(which, sem, owner); | ||
786 | } | ||
787 | |||
788 | cond_resched(); | ||
789 | preempt_disable(); | ||
790 | } | ||
791 | preempt_enable(); | ||
792 | set_current_state(TASK_INTERRUPTIBLE); | ||
793 | } | ||
794 | __set_current_state(TASK_RUNNING); | ||
795 | |||
796 | atomic_dec(&num_ready_klitirqds); | ||
797 | |||
798 | rt_failed: | ||
799 | litmus_exit_task(current); | ||
800 | |||
801 | return rt_status; | ||
802 | } | ||
803 | |||
804 | |||
805 | struct klitirqd_launch_data | ||
806 | { | ||
807 | int* cpu_affinity; | ||
808 | struct work_struct work; | ||
809 | }; | ||
810 | |||
811 | /* executed by a kworker from workqueues */ | ||
812 | static void launch_klitirqd(struct work_struct *work) | ||
813 | { | ||
814 | int i; | ||
815 | |||
816 | struct klitirqd_launch_data* launch_data = | ||
817 | container_of(work, struct klitirqd_launch_data, work); | ||
818 | |||
819 | TRACE("%s: Creating %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
820 | |||
821 | /* create the daemon threads */ | ||
822 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
823 | { | ||
824 | if(launch_data->cpu_affinity) | ||
825 | { | ||
826 | klitirqds[i].klitirqd = | ||
827 | kthread_create( | ||
828 | run_klitirqd, | ||
829 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
830 | (void*)(long long)launch_data->cpu_affinity[i], | ||
831 | "klitirqd_th%d/%d", | ||
832 | i, | ||
833 | launch_data->cpu_affinity[i]); | ||
834 | |||
835 | /* litmus will put is in the right cluster. */ | ||
836 | kthread_bind(klitirqds[i].klitirqd, launch_data->cpu_affinity[i]); | ||
837 | } | ||
838 | else | ||
839 | { | ||
840 | klitirqds[i].klitirqd = | ||
841 | kthread_create( | ||
842 | run_klitirqd, | ||
843 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
844 | (void*)(long long)(-1), | ||
845 | "klitirqd_th%d", | ||
846 | i); | ||
847 | } | ||
848 | } | ||
849 | |||
850 | TRACE("%s: Launching %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
851 | |||
852 | /* unleash the daemons */ | ||
853 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
854 | { | ||
855 | wake_up_process(klitirqds[i].klitirqd); | ||
856 | } | ||
857 | |||
858 | if(launch_data->cpu_affinity) | ||
859 | kfree(launch_data->cpu_affinity); | ||
860 | kfree(launch_data); | ||
861 | } | ||
862 | |||
863 | |||
864 | void spawn_klitirqd(int* affinity) | ||
865 | { | ||
866 | int i; | ||
867 | struct klitirqd_launch_data* delayed_launch; | ||
868 | |||
869 | if(atomic_read(&num_ready_klitirqds) != 0) | ||
870 | { | ||
871 | TRACE("%s: At least one klitirqd is already running! Need to call kill_klitirqd()?\n"); | ||
872 | return; | ||
873 | } | ||
874 | |||
875 | /* init the tasklet & work queues */ | ||
876 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
877 | { | ||
878 | klitirqds[i].terminating = 0; | ||
879 | klitirqds[i].pending = 0; | ||
880 | |||
881 | klitirqds[i].num_hi_pending.counter = 0; | ||
882 | klitirqds[i].num_low_pending.counter = 0; | ||
883 | klitirqds[i].num_work_pending.counter = 0; | ||
884 | |||
885 | klitirqds[i].pending_tasklets_hi.head = NULL; | ||
886 | klitirqds[i].pending_tasklets_hi.tail = &klitirqds[i].pending_tasklets_hi.head; | ||
887 | |||
888 | klitirqds[i].pending_tasklets.head = NULL; | ||
889 | klitirqds[i].pending_tasklets.tail = &klitirqds[i].pending_tasklets.head; | ||
890 | |||
891 | INIT_LIST_HEAD(&klitirqds[i].worklist); | ||
892 | |||
893 | raw_spin_lock_init(&klitirqds[i].lock); | ||
894 | } | ||
895 | |||
896 | /* wait to flush the initializations to memory since other threads | ||
897 | will access it. */ | ||
898 | mb(); | ||
899 | |||
900 | /* tell a work queue to launch the threads. we can't make scheduling | ||
901 | calls since we're in an atomic state. */ | ||
902 | TRACE("%s: Setting callback up to launch klitirqds\n", __FUNCTION__); | ||
903 | delayed_launch = kmalloc(sizeof(struct klitirqd_launch_data), GFP_ATOMIC); | ||
904 | if(affinity) | ||
905 | { | ||
906 | delayed_launch->cpu_affinity = | ||
907 | kmalloc(sizeof(int)*NR_LITMUS_SOFTIRQD, GFP_ATOMIC); | ||
908 | |||
909 | memcpy(delayed_launch->cpu_affinity, affinity, | ||
910 | sizeof(int)*NR_LITMUS_SOFTIRQD); | ||
911 | } | ||
912 | else | ||
913 | { | ||
914 | delayed_launch->cpu_affinity = NULL; | ||
915 | } | ||
916 | INIT_WORK(&delayed_launch->work, launch_klitirqd); | ||
917 | schedule_work(&delayed_launch->work); | ||
918 | } | ||
919 | |||
920 | |||
921 | void kill_klitirqd(void) | ||
922 | { | ||
923 | if(!klitirqd_is_dead()) | ||
924 | { | ||
925 | int i; | ||
926 | |||
927 | TRACE("%s: Killing %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
928 | |||
929 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
930 | { | ||
931 | if(klitirqds[i].terminating != 1) | ||
932 | { | ||
933 | klitirqds[i].terminating = 1; | ||
934 | mb(); /* just to be sure? */ | ||
935 | flush_pending(klitirqds[i].klitirqd, NULL); | ||
936 | |||
937 | /* signal termination */ | ||
938 | kthread_stop(klitirqds[i].klitirqd); | ||
939 | } | ||
940 | } | ||
941 | } | ||
942 | } | ||
943 | |||
944 | |||
945 | int klitirqd_is_ready(void) | ||
946 | { | ||
947 | return(atomic_read(&num_ready_klitirqds) == NR_LITMUS_SOFTIRQD); | ||
948 | } | ||
949 | |||
950 | int klitirqd_is_dead(void) | ||
951 | { | ||
952 | return(atomic_read(&num_ready_klitirqds) == 0); | ||
953 | } | ||
954 | |||
955 | |||
956 | struct task_struct* get_klitirqd(unsigned int k_id) | ||
957 | { | ||
958 | return(klitirqds[k_id].klitirqd); | ||
959 | } | ||
960 | |||
961 | |||
962 | void flush_pending(struct task_struct* klitirqd_thread, | ||
963 | struct task_struct* owner) | ||
964 | { | ||
965 | unsigned int k_id = klitirqd_id(klitirqd_thread); | ||
966 | struct klitirqd_info *which = &klitirqds[k_id]; | ||
967 | |||
968 | unsigned long flags; | ||
969 | struct tasklet_struct *list; | ||
970 | |||
971 | u32 work_flushed = 0; | ||
972 | |||
973 | raw_spin_lock_irqsave(&which->lock, flags); | ||
974 | |||
975 | //__dump_state(which, "flush_pending: before"); | ||
976 | |||
977 | // flush hi tasklets. | ||
978 | if(litirq_pending_hi_irqoff(which)) | ||
979 | { | ||
980 | which->pending &= ~LIT_TASKLET_HI; | ||
981 | |||
982 | list = which->pending_tasklets_hi.head; | ||
983 | which->pending_tasklets_hi.head = NULL; | ||
984 | which->pending_tasklets_hi.tail = &which->pending_tasklets_hi.head; | ||
985 | |||
986 | TRACE("%s: Handing HI tasklets back to Linux.\n", __FUNCTION__); | ||
987 | |||
988 | while(list) | ||
989 | { | ||
990 | struct tasklet_struct *t = list; | ||
991 | list = list->next; | ||
992 | |||
993 | if(likely((t->owner == owner) || (owner == NULL))) | ||
994 | { | ||
995 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) | ||
996 | { | ||
997 | BUG(); | ||
998 | } | ||
999 | |||
1000 | work_flushed |= LIT_TASKLET_HI; | ||
1001 | |||
1002 | t->owner = NULL; | ||
1003 | |||
1004 | // WTF? | ||
1005 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
1006 | { | ||
1007 | atomic_dec(&which->num_hi_pending); | ||
1008 | ___tasklet_hi_schedule(t); | ||
1009 | } | ||
1010 | else | ||
1011 | { | ||
1012 | TRACE("%s: dropped hi tasklet??\n", __FUNCTION__); | ||
1013 | BUG(); | ||
1014 | } | ||
1015 | } | ||
1016 | else | ||
1017 | { | ||
1018 | TRACE("%s: Could not flush a HI tasklet.\n", __FUNCTION__); | ||
1019 | // put back on queue. | ||
1020 | ___litmus_tasklet_hi_schedule(t, which, 0); | ||
1021 | } | ||
1022 | } | ||
1023 | } | ||
1024 | |||
1025 | // flush low tasklets. | ||
1026 | if(litirq_pending_low_irqoff(which)) | ||
1027 | { | ||
1028 | which->pending &= ~LIT_TASKLET_LOW; | ||
1029 | |||
1030 | list = which->pending_tasklets.head; | ||
1031 | which->pending_tasklets.head = NULL; | ||
1032 | which->pending_tasklets.tail = &which->pending_tasklets.head; | ||
1033 | |||
1034 | TRACE("%s: Handing LOW tasklets back to Linux.\n", __FUNCTION__); | ||
1035 | |||
1036 | while(list) | ||
1037 | { | ||
1038 | struct tasklet_struct *t = list; | ||
1039 | list = list->next; | ||
1040 | |||
1041 | if(likely((t->owner == owner) || (owner == NULL))) | ||
1042 | { | ||
1043 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) | ||
1044 | { | ||
1045 | BUG(); | ||
1046 | } | ||
1047 | |||
1048 | work_flushed |= LIT_TASKLET_LOW; | ||
1049 | |||
1050 | t->owner = NULL; | ||
1051 | sched_trace_tasklet_end(owner, 1ul); | ||
1052 | |||
1053 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
1054 | { | ||
1055 | atomic_dec(&which->num_low_pending); | ||
1056 | ___tasklet_schedule(t); | ||
1057 | } | ||
1058 | else | ||
1059 | { | ||
1060 | TRACE("%s: dropped tasklet??\n", __FUNCTION__); | ||
1061 | BUG(); | ||
1062 | } | ||
1063 | } | ||
1064 | else | ||
1065 | { | ||
1066 | TRACE("%s: Could not flush a LOW tasklet.\n", __FUNCTION__); | ||
1067 | // put back on queue | ||
1068 | ___litmus_tasklet_schedule(t, which, 0); | ||
1069 | } | ||
1070 | } | ||
1071 | } | ||
1072 | |||
1073 | // flush work objects | ||
1074 | if(litirq_pending_work_irqoff(which)) | ||
1075 | { | ||
1076 | which->pending &= ~LIT_WORK; | ||
1077 | |||
1078 | TRACE("%s: Handing work objects back to Linux.\n", __FUNCTION__); | ||
1079 | |||
1080 | while(!list_empty(&which->worklist)) | ||
1081 | { | ||
1082 | struct work_struct* work = | ||
1083 | list_first_entry(&which->worklist, struct work_struct, entry); | ||
1084 | list_del_init(&work->entry); | ||
1085 | |||
1086 | if(likely((work->owner == owner) || (owner == NULL))) | ||
1087 | { | ||
1088 | work_flushed |= LIT_WORK; | ||
1089 | atomic_dec(&which->num_work_pending); | ||
1090 | |||
1091 | work->owner = NULL; | ||
1092 | sched_trace_work_end(owner, current, 1ul); | ||
1093 | __schedule_work(work); | ||
1094 | } | ||
1095 | else | ||
1096 | { | ||
1097 | TRACE("%s: Could not flush a work object.\n", __FUNCTION__); | ||
1098 | // put back on queue | ||
1099 | ___litmus_schedule_work(work, which, 0); | ||
1100 | } | ||
1101 | } | ||
1102 | } | ||
1103 | |||
1104 | //__dump_state(which, "flush_pending: after (before reeval prio)"); | ||
1105 | |||
1106 | |||
1107 | mb(); /* commit changes to pending flags */ | ||
1108 | |||
1109 | /* reset the scheduling priority */ | ||
1110 | if(work_flushed) | ||
1111 | { | ||
1112 | __reeval_prio(which); | ||
1113 | |||
1114 | /* Try to offload flushed tasklets to Linux's ksoftirqd. */ | ||
1115 | if(work_flushed & (LIT_TASKLET_LOW | LIT_TASKLET_HI)) | ||
1116 | { | ||
1117 | wakeup_softirqd(); | ||
1118 | } | ||
1119 | } | ||
1120 | else | ||
1121 | { | ||
1122 | TRACE_CUR("%s: no work flushed, so __reeval_prio() skipped\n", __FUNCTION__); | ||
1123 | } | ||
1124 | |||
1125 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1126 | } | ||
1127 | |||
1128 | |||
1129 | |||
1130 | |||
1131 | static void ___litmus_tasklet_schedule(struct tasklet_struct *t, | ||
1132 | struct klitirqd_info *which, | ||
1133 | int wakeup) | ||
1134 | { | ||
1135 | unsigned long flags; | ||
1136 | u32 old_pending; | ||
1137 | |||
1138 | t->next = NULL; | ||
1139 | |||
1140 | raw_spin_lock_irqsave(&which->lock, flags); | ||
1141 | |||
1142 | //__dump_state(which, "___litmus_tasklet_schedule: before queuing"); | ||
1143 | |||
1144 | *(which->pending_tasklets.tail) = t; | ||
1145 | which->pending_tasklets.tail = &t->next; | ||
1146 | |||
1147 | old_pending = which->pending; | ||
1148 | which->pending |= LIT_TASKLET_LOW; | ||
1149 | |||
1150 | atomic_inc(&which->num_low_pending); | ||
1151 | |||
1152 | mb(); | ||
1153 | |||
1154 | if(!old_pending && wakeup) | ||
1155 | { | ||
1156 | wakeup_litirqd_locked(which); /* wake up the klitirqd */ | ||
1157 | } | ||
1158 | |||
1159 | //__dump_state(which, "___litmus_tasklet_schedule: after queuing"); | ||
1160 | |||
1161 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1162 | } | ||
1163 | |||
1164 | int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id) | ||
1165 | { | ||
1166 | int ret = 0; /* assume failure */ | ||
1167 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
1168 | { | ||
1169 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1170 | BUG(); | ||
1171 | } | ||
1172 | |||
1173 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1174 | { | ||
1175 | TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); | ||
1176 | BUG(); | ||
1177 | } | ||
1178 | |||
1179 | if(likely(!klitirqds[k_id].terminating)) | ||
1180 | { | ||
1181 | /* Can't accept tasklets while we're processing a workqueue | ||
1182 | because they're handled by the same thread. This case is | ||
1183 | very RARE. | ||
1184 | |||
1185 | TODO: Use a separate thread for work objects!!!!!! | ||
1186 | */ | ||
1187 | if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) | ||
1188 | { | ||
1189 | ret = 1; | ||
1190 | ___litmus_tasklet_schedule(t, &klitirqds[k_id], 1); | ||
1191 | } | ||
1192 | else | ||
1193 | { | ||
1194 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1195 | __FUNCTION__); | ||
1196 | } | ||
1197 | } | ||
1198 | return(ret); | ||
1199 | } | ||
1200 | |||
1201 | EXPORT_SYMBOL(__litmus_tasklet_schedule); | ||
1202 | |||
1203 | |||
1204 | static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
1205 | struct klitirqd_info *which, | ||
1206 | int wakeup) | ||
1207 | { | ||
1208 | unsigned long flags; | ||
1209 | u32 old_pending; | ||
1210 | |||
1211 | t->next = NULL; | ||
1212 | |||
1213 | raw_spin_lock_irqsave(&which->lock, flags); | ||
1214 | |||
1215 | *(which->pending_tasklets_hi.tail) = t; | ||
1216 | which->pending_tasklets_hi.tail = &t->next; | ||
1217 | |||
1218 | old_pending = which->pending; | ||
1219 | which->pending |= LIT_TASKLET_HI; | ||
1220 | |||
1221 | atomic_inc(&which->num_hi_pending); | ||
1222 | |||
1223 | mb(); | ||
1224 | |||
1225 | if(!old_pending && wakeup) | ||
1226 | { | ||
1227 | wakeup_litirqd_locked(which); /* wake up the klitirqd */ | ||
1228 | } | ||
1229 | |||
1230 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1231 | } | ||
1232 | |||
1233 | int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id) | ||
1234 | { | ||
1235 | int ret = 0; /* assume failure */ | ||
1236 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
1237 | { | ||
1238 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1239 | BUG(); | ||
1240 | } | ||
1241 | |||
1242 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1243 | { | ||
1244 | TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); | ||
1245 | BUG(); | ||
1246 | } | ||
1247 | |||
1248 | if(unlikely(!klitirqd_is_ready())) | ||
1249 | { | ||
1250 | TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); | ||
1251 | BUG(); | ||
1252 | } | ||
1253 | |||
1254 | if(likely(!klitirqds[k_id].terminating)) | ||
1255 | { | ||
1256 | if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) | ||
1257 | { | ||
1258 | ret = 1; | ||
1259 | ___litmus_tasklet_hi_schedule(t, &klitirqds[k_id], 1); | ||
1260 | } | ||
1261 | else | ||
1262 | { | ||
1263 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1264 | __FUNCTION__); | ||
1265 | } | ||
1266 | } | ||
1267 | return(ret); | ||
1268 | } | ||
1269 | |||
1270 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule); | ||
1271 | |||
1272 | |||
1273 | int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_id) | ||
1274 | { | ||
1275 | int ret = 0; /* assume failure */ | ||
1276 | u32 old_pending; | ||
1277 | |||
1278 | BUG_ON(!irqs_disabled()); | ||
1279 | |||
1280 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
1281 | { | ||
1282 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1283 | BUG(); | ||
1284 | } | ||
1285 | |||
1286 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1287 | { | ||
1288 | TRACE("%s: No klitirqd_th%u!\n", __FUNCTION__, k_id); | ||
1289 | BUG(); | ||
1290 | } | ||
1291 | |||
1292 | if(unlikely(!klitirqd_is_ready())) | ||
1293 | { | ||
1294 | TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); | ||
1295 | BUG(); | ||
1296 | } | ||
1297 | |||
1298 | if(likely(!klitirqds[k_id].terminating)) | ||
1299 | { | ||
1300 | raw_spin_lock(&klitirqds[k_id].lock); | ||
1301 | |||
1302 | if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) | ||
1303 | { | ||
1304 | ret = 1; // success! | ||
1305 | |||
1306 | t->next = klitirqds[k_id].pending_tasklets_hi.head; | ||
1307 | klitirqds[k_id].pending_tasklets_hi.head = t; | ||
1308 | |||
1309 | old_pending = klitirqds[k_id].pending; | ||
1310 | klitirqds[k_id].pending |= LIT_TASKLET_HI; | ||
1311 | |||
1312 | atomic_inc(&klitirqds[k_id].num_hi_pending); | ||
1313 | |||
1314 | mb(); | ||
1315 | |||
1316 | if(!old_pending) | ||
1317 | wakeup_litirqd_locked(&klitirqds[k_id]); /* wake up the klitirqd */ | ||
1318 | } | ||
1319 | else | ||
1320 | { | ||
1321 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1322 | __FUNCTION__); | ||
1323 | } | ||
1324 | |||
1325 | raw_spin_unlock(&klitirqds[k_id].lock); | ||
1326 | } | ||
1327 | return(ret); | ||
1328 | } | ||
1329 | |||
1330 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first); | ||
1331 | |||
1332 | |||
1333 | |||
1334 | static void ___litmus_schedule_work(struct work_struct *w, | ||
1335 | struct klitirqd_info *which, | ||
1336 | int wakeup) | ||
1337 | { | ||
1338 | unsigned long flags; | ||
1339 | u32 old_pending; | ||
1340 | |||
1341 | raw_spin_lock_irqsave(&which->lock, flags); | ||
1342 | |||
1343 | work_pending(w); | ||
1344 | list_add_tail(&w->entry, &which->worklist); | ||
1345 | |||
1346 | old_pending = which->pending; | ||
1347 | which->pending |= LIT_WORK; | ||
1348 | |||
1349 | atomic_inc(&which->num_work_pending); | ||
1350 | |||
1351 | mb(); | ||
1352 | |||
1353 | if(!old_pending && wakeup) | ||
1354 | { | ||
1355 | wakeup_litirqd_locked(which); /* wakeup the klitirqd */ | ||
1356 | } | ||
1357 | |||
1358 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1359 | } | ||
1360 | |||
1361 | int __litmus_schedule_work(struct work_struct *w, unsigned int k_id) | ||
1362 | { | ||
1363 | int ret = 1; /* assume success */ | ||
1364 | if(unlikely(w->owner == NULL) || !is_realtime(w->owner)) | ||
1365 | { | ||
1366 | TRACE("%s: No owner associated with this work object!\n", __FUNCTION__); | ||
1367 | BUG(); | ||
1368 | } | ||
1369 | |||
1370 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1371 | { | ||
1372 | TRACE("%s: No klitirqd_th%u!\n", k_id); | ||
1373 | BUG(); | ||
1374 | } | ||
1375 | |||
1376 | if(unlikely(!klitirqd_is_ready())) | ||
1377 | { | ||
1378 | TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); | ||
1379 | BUG(); | ||
1380 | } | ||
1381 | |||
1382 | if(likely(!klitirqds[k_id].terminating)) | ||
1383 | ___litmus_schedule_work(w, &klitirqds[k_id], 1); | ||
1384 | else | ||
1385 | ret = 0; | ||
1386 | return(ret); | ||
1387 | } | ||
1388 | EXPORT_SYMBOL(__litmus_schedule_work); | ||
1389 | |||
1390 | |||
1391 | static int set_klitirqd_sem_status(unsigned long stat) | ||
1392 | { | ||
1393 | TRACE_CUR("SETTING STATUS FROM %d TO %d\n", | ||
1394 | atomic_read(&tsk_rt(current)->klitirqd_sem_stat), | ||
1395 | stat); | ||
1396 | atomic_set(&tsk_rt(current)->klitirqd_sem_stat, stat); | ||
1397 | //mb(); | ||
1398 | |||
1399 | return(0); | ||
1400 | } | ||
1401 | |||
1402 | static int set_klitirqd_sem_status_if_not_held(unsigned long stat) | ||
1403 | { | ||
1404 | if(atomic_read(&tsk_rt(current)->klitirqd_sem_stat) != HELD) | ||
1405 | { | ||
1406 | return(set_klitirqd_sem_status(stat)); | ||
1407 | } | ||
1408 | return(-1); | ||
1409 | } | ||
1410 | |||
1411 | |||
1412 | void __down_and_reset_and_set_stat(struct task_struct* t, | ||
1413 | enum klitirqd_sem_status to_reset, | ||
1414 | enum klitirqd_sem_status to_set, | ||
1415 | struct mutex* sem) | ||
1416 | { | ||
1417 | #if 0 | ||
1418 | struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem); | ||
1419 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1420 | |||
1421 | TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n", | ||
1422 | __FUNCTION__, task->comm, task->pid); | ||
1423 | #endif | ||
1424 | |||
1425 | mutex_lock_sfx(sem, | ||
1426 | set_klitirqd_sem_status_if_not_held, to_reset, | ||
1427 | set_klitirqd_sem_status, to_set); | ||
1428 | #if 0 | ||
1429 | TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n", | ||
1430 | __FUNCTION__, task->comm, task->pid); | ||
1431 | #endif | ||
1432 | } | ||
1433 | |||
1434 | void down_and_set_stat(struct task_struct* t, | ||
1435 | enum klitirqd_sem_status to_set, | ||
1436 | struct mutex* sem) | ||
1437 | { | ||
1438 | #if 0 | ||
1439 | struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem); | ||
1440 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1441 | |||
1442 | TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n", | ||
1443 | __FUNCTION__, task->comm, task->pid); | ||
1444 | #endif | ||
1445 | |||
1446 | mutex_lock_sfx(sem, | ||
1447 | NULL, 0, | ||
1448 | set_klitirqd_sem_status, to_set); | ||
1449 | |||
1450 | #if 0 | ||
1451 | TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n", | ||
1452 | __FUNCTION__, task->comm, task->pid); | ||
1453 | #endif | ||
1454 | } | ||
1455 | |||
1456 | |||
1457 | void up_and_set_stat(struct task_struct* t, | ||
1458 | enum klitirqd_sem_status to_set, | ||
1459 | struct mutex* sem) | ||
1460 | { | ||
1461 | #if 0 | ||
1462 | struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem); | ||
1463 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1464 | |||
1465 | TRACE_CUR("%s: entered. Unlocking semaphore of %s/%d\n", | ||
1466 | __FUNCTION__, | ||
1467 | task->comm, task->pid); | ||
1468 | #endif | ||
1469 | |||
1470 | mutex_unlock_sfx(sem, NULL, 0, | ||
1471 | set_klitirqd_sem_status, to_set); | ||
1472 | |||
1473 | #if 0 | ||
1474 | TRACE_CUR("%s: exiting. Unlocked semaphore of %s/%d\n", | ||
1475 | __FUNCTION__, | ||
1476 | task->comm, task->pid); | ||
1477 | #endif | ||
1478 | } | ||
1479 | |||
1480 | |||
1481 | |||
1482 | void release_klitirqd_lock(struct task_struct* t) | ||
1483 | { | ||
1484 | if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klitirqd_sem_stat) == HELD)) | ||
1485 | { | ||
1486 | struct mutex* sem; | ||
1487 | struct task_struct* owner = t; | ||
1488 | |||
1489 | if(t->state == TASK_RUNNING) | ||
1490 | { | ||
1491 | TRACE_TASK(t, "NOT giving up klitirqd_sem because we're not blocked!\n"); | ||
1492 | return; | ||
1493 | } | ||
1494 | |||
1495 | if(likely(!tsk_rt(t)->is_proxy_thread)) | ||
1496 | { | ||
1497 | sem = &tsk_rt(t)->klitirqd_sem; | ||
1498 | } | ||
1499 | else | ||
1500 | { | ||
1501 | unsigned int k_id = klitirqd_id(t); | ||
1502 | owner = klitirqds[k_id].current_owner; | ||
1503 | |||
1504 | BUG_ON(t != klitirqds[k_id].klitirqd); | ||
1505 | |||
1506 | if(likely(owner)) | ||
1507 | { | ||
1508 | sem = &tsk_rt(owner)->klitirqd_sem; | ||
1509 | } | ||
1510 | else | ||
1511 | { | ||
1512 | BUG(); | ||
1513 | |||
1514 | // We had the rug pulled out from under us. Abort attempt | ||
1515 | // to reacquire the lock since our client no longer needs us. | ||
1516 | TRACE_CUR("HUH?! How did this happen?\n"); | ||
1517 | atomic_set(&tsk_rt(t)->klitirqd_sem_stat, NOT_HELD); | ||
1518 | return; | ||
1519 | } | ||
1520 | } | ||
1521 | |||
1522 | //TRACE_CUR("Releasing semaphore of %s/%d...\n", owner->comm, owner->pid); | ||
1523 | up_and_set_stat(t, NEED_TO_REACQUIRE, sem); | ||
1524 | //TRACE_CUR("Semaphore of %s/%d released!\n", owner->comm, owner->pid); | ||
1525 | } | ||
1526 | /* | ||
1527 | else if(is_realtime(t)) | ||
1528 | { | ||
1529 | TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klitirqd_sem_stat); | ||
1530 | } | ||
1531 | */ | ||
1532 | } | ||
1533 | |||
1534 | int reacquire_klitirqd_lock(struct task_struct* t) | ||
1535 | { | ||
1536 | int ret = 0; | ||
1537 | |||
1538 | if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klitirqd_sem_stat) == NEED_TO_REACQUIRE)) | ||
1539 | { | ||
1540 | struct mutex* sem; | ||
1541 | struct task_struct* owner = t; | ||
1542 | |||
1543 | if(likely(!tsk_rt(t)->is_proxy_thread)) | ||
1544 | { | ||
1545 | sem = &tsk_rt(t)->klitirqd_sem; | ||
1546 | } | ||
1547 | else | ||
1548 | { | ||
1549 | unsigned int k_id = klitirqd_id(t); | ||
1550 | //struct task_struct* owner = klitirqds[k_id].current_owner; | ||
1551 | owner = klitirqds[k_id].current_owner; | ||
1552 | |||
1553 | BUG_ON(t != klitirqds[k_id].klitirqd); | ||
1554 | |||
1555 | if(likely(owner)) | ||
1556 | { | ||
1557 | sem = &tsk_rt(owner)->klitirqd_sem; | ||
1558 | } | ||
1559 | else | ||
1560 | { | ||
1561 | // We had the rug pulled out from under us. Abort attempt | ||
1562 | // to reacquire the lock since our client no longer needs us. | ||
1563 | TRACE_CUR("No longer needs to reacquire klitirqd_sem!\n"); | ||
1564 | atomic_set(&tsk_rt(t)->klitirqd_sem_stat, NOT_HELD); | ||
1565 | return(0); | ||
1566 | } | ||
1567 | } | ||
1568 | |||
1569 | //TRACE_CUR("Trying to reacquire semaphore of %s/%d\n", owner->comm, owner->pid); | ||
1570 | __down_and_reset_and_set_stat(t, REACQUIRING, HELD, sem); | ||
1571 | //TRACE_CUR("Reacquired semaphore %s/%d\n", owner->comm, owner->pid); | ||
1572 | } | ||
1573 | /* | ||
1574 | else if(is_realtime(t)) | ||
1575 | { | ||
1576 | TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klitirqd_sem_stat); | ||
1577 | } | ||
1578 | */ | ||
1579 | |||
1580 | return(ret); | ||
1581 | } | ||
1582 | |||