aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/litmus_softirq.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/litmus_softirq.c')
-rw-r--r--litmus/litmus_softirq.c1584
1 files changed, 1584 insertions, 0 deletions
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c
new file mode 100644
index 000000000000..c49676c6d3a7
--- /dev/null
+++ b/litmus/litmus_softirq.c
@@ -0,0 +1,1584 @@
1#include <linux/interrupt.h>
2#include <linux/percpu.h>
3#include <linux/cpu.h>
4#include <linux/kthread.h>
5#include <linux/ftrace.h>
6#include <linux/smp.h>
7#include <linux/slab.h>
8#include <linux/mutex.h>
9
10#include <linux/sched.h>
11#include <linux/cpuset.h>
12
13#include <litmus/litmus.h>
14#include <litmus/sched_trace.h>
15#include <litmus/jobs.h>
16#include <litmus/sched_plugin.h>
17#include <litmus/litmus_softirq.h>
18
19/* TODO: Remove unneeded mb() and other barriers. */
20
21
22/* counts number of daemons ready to handle litmus irqs. */
23static atomic_t num_ready_klitirqds = ATOMIC_INIT(0);
24
25enum pending_flags
26{
27 LIT_TASKLET_LOW = 0x1,
28 LIT_TASKLET_HI = LIT_TASKLET_LOW<<1,
29 LIT_WORK = LIT_TASKLET_HI<<1
30};
31
32/* only support tasklet processing for now. */
33struct tasklet_head
34{
35 struct tasklet_struct *head;
36 struct tasklet_struct **tail;
37};
38
39struct klitirqd_info
40{
41 struct task_struct* klitirqd;
42 struct task_struct* current_owner;
43 int terminating;
44
45
46 raw_spinlock_t lock;
47
48 u32 pending;
49 atomic_t num_hi_pending;
50 atomic_t num_low_pending;
51 atomic_t num_work_pending;
52
53 /* in order of priority */
54 struct tasklet_head pending_tasklets_hi;
55 struct tasklet_head pending_tasklets;
56 struct list_head worklist;
57};
58
59/* one list for each klitirqd */
60static struct klitirqd_info klitirqds[NR_LITMUS_SOFTIRQD];
61
62
63
64
65
66int proc_read_klitirqd_stats(char *page, char **start,
67 off_t off, int count,
68 int *eof, void *data)
69{
70 int len = snprintf(page, PAGE_SIZE,
71 "num ready klitirqds: %d\n\n",
72 atomic_read(&num_ready_klitirqds));
73
74 if(klitirqd_is_ready())
75 {
76 int i;
77 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
78 {
79 len +=
80 snprintf(page + len - 1, PAGE_SIZE, /* -1 to strip off \0 */
81 "klitirqd_th%d: %s/%d\n"
82 "\tcurrent_owner: %s/%d\n"
83 "\tpending: %x\n"
84 "\tnum hi: %d\n"
85 "\tnum low: %d\n"
86 "\tnum work: %d\n\n",
87 i,
88 klitirqds[i].klitirqd->comm, klitirqds[i].klitirqd->pid,
89 (klitirqds[i].current_owner != NULL) ?
90 klitirqds[i].current_owner->comm : "(null)",
91 (klitirqds[i].current_owner != NULL) ?
92 klitirqds[i].current_owner->pid : 0,
93 klitirqds[i].pending,
94 atomic_read(&klitirqds[i].num_hi_pending),
95 atomic_read(&klitirqds[i].num_low_pending),
96 atomic_read(&klitirqds[i].num_work_pending));
97 }
98 }
99
100 return(len);
101}
102
103
104
105
106
107#if 0
108static atomic_t dump_id = ATOMIC_INIT(0);
109
110static void __dump_state(struct klitirqd_info* which, const char* caller)
111{
112 struct tasklet_struct* list;
113
114 int id = atomic_inc_return(&dump_id);
115
116 //if(in_interrupt())
117 {
118 if(which->current_owner)
119 {
120 TRACE("(id: %d caller: %s)\n"
121 "klitirqd: %s/%d\n"
122 "current owner: %s/%d\n"
123 "pending: %x\n",
124 id, caller,
125 which->klitirqd->comm, which->klitirqd->pid,
126 which->current_owner->comm, which->current_owner->pid,
127 which->pending);
128 }
129 else
130 {
131 TRACE("(id: %d caller: %s)\n"
132 "klitirqd: %s/%d\n"
133 "current owner: %p\n"
134 "pending: %x\n",
135 id, caller,
136 which->klitirqd->comm, which->klitirqd->pid,
137 NULL,
138 which->pending);
139 }
140
141 list = which->pending_tasklets.head;
142 while(list)
143 {
144 struct tasklet_struct *t = list;
145 list = list->next; /* advance */
146 if(t->owner)
147 TRACE("(id: %d caller: %s) Tasklet: %x, Owner = %s/%d\n", id, caller, t, t->owner->comm, t->owner->pid);
148 else
149 TRACE("(id: %d caller: %s) Tasklet: %x, Owner = %p\n", id, caller, t, NULL);
150 }
151 }
152}
153
154static void dump_state(struct klitirqd_info* which, const char* caller)
155{
156 unsigned long flags;
157
158 raw_spin_lock_irqsave(&which->lock, flags);
159 __dump_state(which, caller);
160 raw_spin_unlock_irqrestore(&which->lock, flags);
161}
162#endif
163
164
165/* forward declarations */
166static void ___litmus_tasklet_schedule(struct tasklet_struct *t,
167 struct klitirqd_info *which,
168 int wakeup);
169static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t,
170 struct klitirqd_info *which,
171 int wakeup);
172static void ___litmus_schedule_work(struct work_struct *w,
173 struct klitirqd_info *which,
174 int wakeup);
175
176
177
178inline unsigned int klitirqd_id(struct task_struct* tsk)
179{
180 int i;
181 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
182 {
183 if(klitirqds[i].klitirqd == tsk)
184 {
185 return i;
186 }
187 }
188
189 BUG();
190
191 return 0;
192}
193
194
195inline static u32 litirq_pending_hi_irqoff(struct klitirqd_info* which)
196{
197 return (which->pending & LIT_TASKLET_HI);
198}
199
200inline static u32 litirq_pending_low_irqoff(struct klitirqd_info* which)
201{
202 return (which->pending & LIT_TASKLET_LOW);
203}
204
205inline static u32 litirq_pending_work_irqoff(struct klitirqd_info* which)
206{
207 return (which->pending & LIT_WORK);
208}
209
210inline static u32 litirq_pending_irqoff(struct klitirqd_info* which)
211{
212 return(which->pending);
213}
214
215
216inline static u32 litirq_pending(struct klitirqd_info* which)
217{
218 unsigned long flags;
219 u32 pending;
220
221 raw_spin_lock_irqsave(&which->lock, flags);
222 pending = litirq_pending_irqoff(which);
223 raw_spin_unlock_irqrestore(&which->lock, flags);
224
225 return pending;
226};
227
228inline static u32 litirq_pending_with_owner(struct klitirqd_info* which, struct task_struct* owner)
229{
230 unsigned long flags;
231 u32 pending;
232
233 raw_spin_lock_irqsave(&which->lock, flags);
234 pending = litirq_pending_irqoff(which);
235 if(pending)
236 {
237 if(which->current_owner != owner)
238 {
239 pending = 0; // owner switch!
240 }
241 }
242 raw_spin_unlock_irqrestore(&which->lock, flags);
243
244 return pending;
245}
246
247
248inline static u32 litirq_pending_and_sem_and_owner(struct klitirqd_info* which,
249 struct mutex** sem,
250 struct task_struct** t)
251{
252 unsigned long flags;
253 u32 pending;
254
255 /* init values */
256 *sem = NULL;
257 *t = NULL;
258
259 raw_spin_lock_irqsave(&which->lock, flags);
260
261 pending = litirq_pending_irqoff(which);
262 if(pending)
263 {
264 if(which->current_owner != NULL)
265 {
266 *t = which->current_owner;
267 *sem = &tsk_rt(which->current_owner)->klitirqd_sem;
268 }
269 else
270 {
271 BUG();
272 }
273 }
274 raw_spin_unlock_irqrestore(&which->lock, flags);
275
276 if(likely(*sem))
277 {
278 return pending;
279 }
280 else
281 {
282 return 0;
283 }
284}
285
286/* returns true if the next piece of work to do is from a different owner.
287 */
288static int tasklet_ownership_change(
289 struct klitirqd_info* which,
290 enum pending_flags taskletQ)
291{
292 /* this function doesn't have to look at work objects since they have
293 priority below tasklets. */
294
295 unsigned long flags;
296 int ret = 0;
297
298 raw_spin_lock_irqsave(&which->lock, flags);
299
300 switch(taskletQ)
301 {
302 case LIT_TASKLET_HI:
303 if(litirq_pending_hi_irqoff(which))
304 {
305 ret = (which->pending_tasklets_hi.head->owner !=
306 which->current_owner);
307 }
308 break;
309 case LIT_TASKLET_LOW:
310 if(litirq_pending_low_irqoff(which))
311 {
312 ret = (which->pending_tasklets.head->owner !=
313 which->current_owner);
314 }
315 break;
316 default:
317 break;
318 }
319
320 raw_spin_unlock_irqrestore(&which->lock, flags);
321
322 TRACE_TASK(which->klitirqd, "ownership change needed: %d\n", ret);
323
324 return ret;
325}
326
327
328static void __reeval_prio(struct klitirqd_info* which)
329{
330 struct task_struct* next_owner = NULL;
331 struct task_struct* klitirqd = which->klitirqd;
332
333 /* Check in prio-order */
334 u32 pending = litirq_pending_irqoff(which);
335
336 //__dump_state(which, "__reeval_prio: before");
337
338 if(pending)
339 {
340 if(pending & LIT_TASKLET_HI)
341 {
342 next_owner = which->pending_tasklets_hi.head->owner;
343 }
344 else if(pending & LIT_TASKLET_LOW)
345 {
346 next_owner = which->pending_tasklets.head->owner;
347 }
348 else if(pending & LIT_WORK)
349 {
350 struct work_struct* work =
351 list_first_entry(&which->worklist, struct work_struct, entry);
352 next_owner = work->owner;
353 }
354 }
355
356 if(next_owner != which->current_owner)
357 {
358 struct task_struct* old_owner = which->current_owner;
359
360 /* bind the next owner. */
361 which->current_owner = next_owner;
362 mb();
363
364 if(next_owner != NULL)
365 {
366 if(!in_interrupt())
367 {
368 TRACE_CUR("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__,
369 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm,
370 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid,
371 next_owner->comm, next_owner->pid);
372 }
373 else
374 {
375 TRACE("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__,
376 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm,
377 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid,
378 next_owner->comm, next_owner->pid);
379 }
380
381 litmus->set_prio_inh_klitirqd(klitirqd, old_owner, next_owner);
382 }
383 else
384 {
385 if(likely(!in_interrupt()))
386 {
387 TRACE_CUR("%s: Ownership change: %s/%d to NULL (reverting)\n",
388 __FUNCTION__, klitirqd->comm, klitirqd->pid);
389 }
390 else
391 {
392 // is this a bug?
393 TRACE("%s: Ownership change: %s/%d to NULL (reverting)\n",
394 __FUNCTION__, klitirqd->comm, klitirqd->pid);
395 }
396
397 BUG_ON(pending != 0);
398 litmus->clear_prio_inh_klitirqd(klitirqd, old_owner);
399 }
400 }
401
402 //__dump_state(which, "__reeval_prio: after");
403}
404
405static void reeval_prio(struct klitirqd_info* which)
406{
407 unsigned long flags;
408
409 raw_spin_lock_irqsave(&which->lock, flags);
410 __reeval_prio(which);
411 raw_spin_unlock_irqrestore(&which->lock, flags);
412}
413
414
415static void wakeup_litirqd_locked(struct klitirqd_info* which)
416{
417 /* Interrupts are disabled: no need to stop preemption */
418 if (which && which->klitirqd)
419 {
420 __reeval_prio(which); /* configure the proper priority */
421
422 if(which->klitirqd->state != TASK_RUNNING)
423 {
424 TRACE("%s: Waking up klitirqd: %s/%d\n", __FUNCTION__,
425 which->klitirqd->comm, which->klitirqd->pid);
426
427 wake_up_process(which->klitirqd);
428 }
429 }
430}
431
432
433static void do_lit_tasklet(struct klitirqd_info* which,
434 struct tasklet_head* pending_tasklets)
435{
436 unsigned long flags;
437 struct tasklet_struct *list;
438 atomic_t* count;
439
440 raw_spin_lock_irqsave(&which->lock, flags);
441
442 //__dump_state(which, "do_lit_tasklet: before steal");
443
444 /* copy out the tasklets for our private use. */
445 list = pending_tasklets->head;
446 pending_tasklets->head = NULL;
447 pending_tasklets->tail = &pending_tasklets->head;
448
449 /* remove pending flag */
450 which->pending &= (pending_tasklets == &which->pending_tasklets) ?
451 ~LIT_TASKLET_LOW :
452 ~LIT_TASKLET_HI;
453
454 count = (pending_tasklets == &which->pending_tasklets) ?
455 &which->num_low_pending:
456 &which->num_hi_pending;
457
458 //__dump_state(which, "do_lit_tasklet: after steal");
459
460 raw_spin_unlock_irqrestore(&which->lock, flags);
461
462
463 while(list)
464 {
465 struct tasklet_struct *t = list;
466
467 /* advance, lest we forget */
468 list = list->next;
469
470 /* execute tasklet if it has my priority and is free */
471 if ((t->owner == which->current_owner) && tasklet_trylock(t)) {
472 if (!atomic_read(&t->count)) {
473
474 sched_trace_tasklet_begin(t->owner);
475
476 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
477 {
478 BUG();
479 }
480 TRACE_CUR("%s: Invoking tasklet.\n", __FUNCTION__);
481 t->func(t->data);
482 tasklet_unlock(t);
483
484 atomic_dec(count);
485
486 sched_trace_tasklet_end(t->owner, 0ul);
487
488 continue; /* process more tasklets */
489 }
490 tasklet_unlock(t);
491 }
492
493 TRACE_CUR("%s: Could not invoke tasklet. Requeuing.\n", __FUNCTION__);
494
495 /* couldn't process tasklet. put it back at the end of the queue. */
496 if(pending_tasklets == &which->pending_tasklets)
497 ___litmus_tasklet_schedule(t, which, 0);
498 else
499 ___litmus_tasklet_hi_schedule(t, which, 0);
500 }
501}
502
503
504// returns 1 if priorities need to be changed to continue processing
505// pending tasklets.
506static int do_litirq(struct klitirqd_info* which)
507{
508 u32 pending;
509 int resched = 0;
510
511 if(in_interrupt())
512 {
513 TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__);
514 return(0);
515 }
516
517 if(which->klitirqd != current)
518 {
519 TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n",
520 __FUNCTION__, current->comm, current->pid,
521 which->klitirqd->comm, which->klitirqd->pid);
522 return(0);
523 }
524
525 if(!is_realtime(current))
526 {
527 TRACE_CUR("%s: exiting early: klitirqd is not real-time. Sched Policy = %d\n",
528 __FUNCTION__, current->policy);
529 return(0);
530 }
531
532
533 /* We only handle tasklets & work objects, no need for RCU triggers? */
534
535 pending = litirq_pending(which);
536 if(pending)
537 {
538 /* extract the work to do and do it! */
539 if(pending & LIT_TASKLET_HI)
540 {
541 TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__);
542 do_lit_tasklet(which, &which->pending_tasklets_hi);
543 resched = tasklet_ownership_change(which, LIT_TASKLET_HI);
544
545 if(resched)
546 {
547 TRACE_CUR("%s: HI tasklets of another owner remain. "
548 "Skipping any LOW tasklets.\n", __FUNCTION__);
549 }
550 }
551
552 if(!resched && (pending & LIT_TASKLET_LOW))
553 {
554 TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__);
555 do_lit_tasklet(which, &which->pending_tasklets);
556 resched = tasklet_ownership_change(which, LIT_TASKLET_LOW);
557
558 if(resched)
559 {
560 TRACE_CUR("%s: LOW tasklets of another owner remain. "
561 "Skipping any work objects.\n", __FUNCTION__);
562 }
563 }
564 }
565
566 return(resched);
567}
568
569
570static void do_work(struct klitirqd_info* which)
571{
572 unsigned long flags;
573 work_func_t f;
574 struct work_struct* work;
575
576 // only execute one work-queue item to yield to tasklets.
577 // ...is this a good idea, or should we just batch them?
578 raw_spin_lock_irqsave(&which->lock, flags);
579
580 if(!litirq_pending_work_irqoff(which))
581 {
582 raw_spin_unlock_irqrestore(&which->lock, flags);
583 goto no_work;
584 }
585
586 work = list_first_entry(&which->worklist, struct work_struct, entry);
587 list_del_init(&work->entry);
588
589 if(list_empty(&which->worklist))
590 {
591 which->pending &= ~LIT_WORK;
592 }
593
594 raw_spin_unlock_irqrestore(&which->lock, flags);
595
596
597
598 /* safe to read current_owner outside of lock since only this thread
599 may write to the pointer. */
600 if(work->owner == which->current_owner)
601 {
602 TRACE_CUR("%s: Invoking work object.\n", __FUNCTION__);
603 // do the work!
604 work_clear_pending(work);
605 f = work->func;
606 f(work); /* can't touch 'work' after this point,
607 the user may have freed it. */
608
609 atomic_dec(&which->num_work_pending);
610 }
611 else
612 {
613 TRACE_CUR("%s: Could not invoke work object. Requeuing.\n",
614 __FUNCTION__);
615 ___litmus_schedule_work(work, which, 0);
616 }
617
618no_work:
619 return;
620}
621
622
623static int set_litmus_daemon_sched(void)
624{
625 /* set up a daemon job that will never complete.
626 it should only ever run on behalf of another
627 real-time task.
628
629 TODO: Transition to a new job whenever a
630 new tasklet is handled */
631
632 int ret = 0;
633
634 struct rt_task tp = {
635 .exec_cost = 0,
636 .period = 1000000000, /* dummy 1 second period */
637 .phase = 0,
638 .cpu = task_cpu(current),
639 .budget_policy = NO_ENFORCEMENT,
640 .cls = RT_CLASS_BEST_EFFORT
641 };
642
643 struct sched_param param = { .sched_priority = 0};
644
645
646 /* set task params, mark as proxy thread, and init other data */
647 tsk_rt(current)->task_params = tp;
648 tsk_rt(current)->is_proxy_thread = 1;
649 tsk_rt(current)->cur_klitirqd = NULL;
650 //init_MUTEX(&tsk_rt(current)->klitirqd_sem);
651 mutex_init(&tsk_rt(current)->klitirqd_sem);
652 //init_completion(&tsk_rt(current)->klitirqd_sem);
653 atomic_set(&tsk_rt(current)->klitirqd_sem_stat, NOT_HELD);
654
655 /* inform the OS we're SCHED_LITMUS --
656 sched_setscheduler_nocheck() calls litmus_admit_task(). */
657 sched_setscheduler_nocheck(current, SCHED_LITMUS, &param);
658
659 return ret;
660}
661
662static void enter_execution_phase(struct klitirqd_info* which,
663 struct mutex* sem,
664 struct task_struct* t)
665{
666 TRACE_CUR("%s: Trying to enter execution phase. "
667 "Acquiring semaphore of %s/%d\n", __FUNCTION__,
668 t->comm, t->pid);
669 down_and_set_stat(current, HELD, sem);
670 TRACE_CUR("%s: Execution phase entered! "
671 "Acquired semaphore of %s/%d\n", __FUNCTION__,
672 t->comm, t->pid);
673}
674
675static void exit_execution_phase(struct klitirqd_info* which,
676 struct mutex* sem,
677 struct task_struct* t)
678{
679 TRACE_CUR("%s: Exiting execution phase. "
680 "Releasing semaphore of %s/%d\n", __FUNCTION__,
681 t->comm, t->pid);
682 if(atomic_read(&tsk_rt(current)->klitirqd_sem_stat) == HELD)
683 {
684 up_and_set_stat(current, NOT_HELD, sem);
685 TRACE_CUR("%s: Execution phase exited! "
686 "Released semaphore of %s/%d\n", __FUNCTION__,
687 t->comm, t->pid);
688 }
689 else
690 {
691 TRACE_CUR("%s: COULDN'T RELEASE SEMAPHORE BECAUSE ONE IS NOT HELD!\n", __FUNCTION__);
692 }
693}
694
695/* main loop for klitsoftirqd */
696static int run_klitirqd(void* unused)
697{
698 struct klitirqd_info* which = &klitirqds[klitirqd_id(current)];
699 struct mutex* sem;
700 struct task_struct* owner;
701
702 int rt_status = set_litmus_daemon_sched();
703
704 if(rt_status != 0)
705 {
706 TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__);
707 goto rt_failed;
708 }
709
710 atomic_inc(&num_ready_klitirqds);
711
712 set_current_state(TASK_INTERRUPTIBLE);
713
714 while (!kthread_should_stop())
715 {
716 preempt_disable();
717 if (!litirq_pending(which))
718 {
719 /* sleep for work */
720 TRACE_CUR("%s: No more tasklets or work objects. Going to sleep.\n",
721 __FUNCTION__);
722 preempt_enable_no_resched();
723 schedule();
724
725 if(kthread_should_stop()) /* bail out */
726 {
727 TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__);
728 continue;
729 }
730
731 preempt_disable();
732 }
733
734 __set_current_state(TASK_RUNNING);
735
736 while (litirq_pending_and_sem_and_owner(which, &sem, &owner))
737 {
738 int needs_resched = 0;
739
740 preempt_enable_no_resched();
741
742 BUG_ON(sem == NULL);
743
744 // wait to enter execution phase; wait for 'current_owner' to block.
745 enter_execution_phase(which, sem, owner);
746
747 if(kthread_should_stop())
748 {
749 TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__);
750 break;
751 }
752
753 preempt_disable();
754
755 /* Double check that there's still pending work and the owner hasn't
756 * changed. Pending items may have been flushed while we were sleeping.
757 */
758 if(litirq_pending_with_owner(which, owner))
759 {
760 TRACE_CUR("%s: Executing tasklets and/or work objects.\n",
761 __FUNCTION__);
762
763 needs_resched = do_litirq(which);
764
765 preempt_enable_no_resched();
766
767 // work objects are preemptible.
768 if(!needs_resched)
769 {
770 do_work(which);
771 }
772
773 // exit execution phase.
774 exit_execution_phase(which, sem, owner);
775
776 TRACE_CUR("%s: Setting up next priority.\n", __FUNCTION__);
777 reeval_prio(which); /* check if we need to change priority here */
778 }
779 else
780 {
781 TRACE_CUR("%s: Pending work was flushed! Prev owner was %s/%d\n",
782 __FUNCTION__,
783 owner->comm, owner->pid);
784 preempt_enable_no_resched();
785
786 // exit execution phase.
787 exit_execution_phase(which, sem, owner);
788 }
789
790 cond_resched();
791 preempt_disable();
792 }
793 preempt_enable();
794 set_current_state(TASK_INTERRUPTIBLE);
795 }
796 __set_current_state(TASK_RUNNING);
797
798 atomic_dec(&num_ready_klitirqds);
799
800rt_failed:
801 litmus_exit_task(current);
802
803 return rt_status;
804}
805
806
807struct klitirqd_launch_data
808{
809 int* cpu_affinity;
810 struct work_struct work;
811};
812
813/* executed by a kworker from workqueues */
814static void launch_klitirqd(struct work_struct *work)
815{
816 int i;
817
818 struct klitirqd_launch_data* launch_data =
819 container_of(work, struct klitirqd_launch_data, work);
820
821 TRACE("%s: Creating %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD);
822
823 /* create the daemon threads */
824 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
825 {
826 if(launch_data->cpu_affinity)
827 {
828 klitirqds[i].klitirqd =
829 kthread_create(
830 run_klitirqd,
831 /* treat the affinity as a pointer, we'll cast it back later */
832 (void*)(long long)launch_data->cpu_affinity[i],
833 "klitirqd_th%d/%d",
834 i,
835 launch_data->cpu_affinity[i]);
836
837 /* litmus will put is in the right cluster. */
838 kthread_bind(klitirqds[i].klitirqd, launch_data->cpu_affinity[i]);
839 }
840 else
841 {
842 klitirqds[i].klitirqd =
843 kthread_create(
844 run_klitirqd,
845 /* treat the affinity as a pointer, we'll cast it back later */
846 (void*)(long long)(-1),
847 "klitirqd_th%d",
848 i);
849 }
850 }
851
852 TRACE("%s: Launching %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD);
853
854 /* unleash the daemons */
855 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
856 {
857 wake_up_process(klitirqds[i].klitirqd);
858 }
859
860 if(launch_data->cpu_affinity)
861 kfree(launch_data->cpu_affinity);
862 kfree(launch_data);
863}
864
865
866void spawn_klitirqd(int* affinity)
867{
868 int i;
869 struct klitirqd_launch_data* delayed_launch;
870
871 if(atomic_read(&num_ready_klitirqds) != 0)
872 {
873 TRACE("%s: At least one klitirqd is already running! Need to call kill_klitirqd()?\n");
874 return;
875 }
876
877 /* init the tasklet & work queues */
878 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
879 {
880 klitirqds[i].terminating = 0;
881 klitirqds[i].pending = 0;
882
883 klitirqds[i].num_hi_pending.counter = 0;
884 klitirqds[i].num_low_pending.counter = 0;
885 klitirqds[i].num_work_pending.counter = 0;
886
887 klitirqds[i].pending_tasklets_hi.head = NULL;
888 klitirqds[i].pending_tasklets_hi.tail = &klitirqds[i].pending_tasklets_hi.head;
889
890 klitirqds[i].pending_tasklets.head = NULL;
891 klitirqds[i].pending_tasklets.tail = &klitirqds[i].pending_tasklets.head;
892
893 INIT_LIST_HEAD(&klitirqds[i].worklist);
894
895 raw_spin_lock_init(&klitirqds[i].lock);
896 }
897
898 /* wait to flush the initializations to memory since other threads
899 will access it. */
900 mb();
901
902 /* tell a work queue to launch the threads. we can't make scheduling
903 calls since we're in an atomic state. */
904 TRACE("%s: Setting callback up to launch klitirqds\n", __FUNCTION__);
905 delayed_launch = kmalloc(sizeof(struct klitirqd_launch_data), GFP_ATOMIC);
906 if(affinity)
907 {
908 delayed_launch->cpu_affinity =
909 kmalloc(sizeof(int)*NR_LITMUS_SOFTIRQD, GFP_ATOMIC);
910
911 memcpy(delayed_launch->cpu_affinity, affinity,
912 sizeof(int)*NR_LITMUS_SOFTIRQD);
913 }
914 else
915 {
916 delayed_launch->cpu_affinity = NULL;
917 }
918 INIT_WORK(&delayed_launch->work, launch_klitirqd);
919 schedule_work(&delayed_launch->work);
920}
921
922
923void kill_klitirqd(void)
924{
925 if(!klitirqd_is_dead())
926 {
927 int i;
928
929 TRACE("%s: Killing %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD);
930
931 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
932 {
933 if(klitirqds[i].terminating != 1)
934 {
935 klitirqds[i].terminating = 1;
936 mb(); /* just to be sure? */
937 flush_pending(klitirqds[i].klitirqd, NULL);
938
939 /* signal termination */
940 kthread_stop(klitirqds[i].klitirqd);
941 }
942 }
943 }
944}
945
946
947int klitirqd_is_ready(void)
948{
949 return(atomic_read(&num_ready_klitirqds) == NR_LITMUS_SOFTIRQD);
950}
951
952int klitirqd_is_dead(void)
953{
954 return(atomic_read(&num_ready_klitirqds) == 0);
955}
956
957
958struct task_struct* get_klitirqd(unsigned int k_id)
959{
960 return(klitirqds[k_id].klitirqd);
961}
962
963
964void flush_pending(struct task_struct* klitirqd_thread,
965 struct task_struct* owner)
966{
967 unsigned int k_id = klitirqd_id(klitirqd_thread);
968 struct klitirqd_info *which = &klitirqds[k_id];
969
970 unsigned long flags;
971 struct tasklet_struct *list;
972
973 u32 work_flushed = 0;
974
975 raw_spin_lock_irqsave(&which->lock, flags);
976
977 //__dump_state(which, "flush_pending: before");
978
979 // flush hi tasklets.
980 if(litirq_pending_hi_irqoff(which))
981 {
982 which->pending &= ~LIT_TASKLET_HI;
983
984 list = which->pending_tasklets_hi.head;
985 which->pending_tasklets_hi.head = NULL;
986 which->pending_tasklets_hi.tail = &which->pending_tasklets_hi.head;
987
988 TRACE("%s: Handing HI tasklets back to Linux.\n", __FUNCTION__);
989
990 while(list)
991 {
992 struct tasklet_struct *t = list;
993 list = list->next;
994
995 if(likely((t->owner == owner) || (owner == NULL)))
996 {
997 if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)))
998 {
999 BUG();
1000 }
1001
1002 work_flushed |= LIT_TASKLET_HI;
1003
1004 t->owner = NULL;
1005
1006 // WTF?
1007 if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
1008 {
1009 atomic_dec(&which->num_hi_pending);
1010 ___tasklet_hi_schedule(t);
1011 }
1012 else
1013 {
1014 TRACE("%s: dropped hi tasklet??\n", __FUNCTION__);
1015 BUG();
1016 }
1017 }
1018 else
1019 {
1020 TRACE("%s: Could not flush a HI tasklet.\n", __FUNCTION__);
1021 // put back on queue.
1022 ___litmus_tasklet_hi_schedule(t, which, 0);
1023 }
1024 }
1025 }
1026
1027 // flush low tasklets.
1028 if(litirq_pending_low_irqoff(which))
1029 {
1030 which->pending &= ~LIT_TASKLET_LOW;
1031
1032 list = which->pending_tasklets.head;
1033 which->pending_tasklets.head = NULL;
1034 which->pending_tasklets.tail = &which->pending_tasklets.head;
1035
1036 TRACE("%s: Handing LOW tasklets back to Linux.\n", __FUNCTION__);
1037
1038 while(list)
1039 {
1040 struct tasklet_struct *t = list;
1041 list = list->next;
1042
1043 if(likely((t->owner == owner) || (owner == NULL)))
1044 {
1045 if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)))
1046 {
1047 BUG();
1048 }
1049
1050 work_flushed |= LIT_TASKLET_LOW;
1051
1052 t->owner = NULL;
1053 sched_trace_tasklet_end(owner, 1ul);
1054
1055 if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
1056 {
1057 atomic_dec(&which->num_low_pending);
1058 ___tasklet_schedule(t);
1059 }
1060 else
1061 {
1062 TRACE("%s: dropped tasklet??\n", __FUNCTION__);
1063 BUG();
1064 }
1065 }
1066 else
1067 {
1068 TRACE("%s: Could not flush a LOW tasklet.\n", __FUNCTION__);
1069 // put back on queue
1070 ___litmus_tasklet_schedule(t, which, 0);
1071 }
1072 }
1073 }
1074
1075 // flush work objects
1076 if(litirq_pending_work_irqoff(which))
1077 {
1078 which->pending &= ~LIT_WORK;
1079
1080 TRACE("%s: Handing work objects back to Linux.\n", __FUNCTION__);
1081
1082 while(!list_empty(&which->worklist))
1083 {
1084 struct work_struct* work =
1085 list_first_entry(&which->worklist, struct work_struct, entry);
1086 list_del_init(&work->entry);
1087
1088 if(likely((work->owner == owner) || (owner == NULL)))
1089 {
1090 work_flushed |= LIT_WORK;
1091 atomic_dec(&which->num_work_pending);
1092
1093 work->owner = NULL;
1094 sched_trace_work_end(owner, current, 1ul);
1095 __schedule_work(work);
1096 }
1097 else
1098 {
1099 TRACE("%s: Could not flush a work object.\n", __FUNCTION__);
1100 // put back on queue
1101 ___litmus_schedule_work(work, which, 0);
1102 }
1103 }
1104 }
1105
1106 //__dump_state(which, "flush_pending: after (before reeval prio)");
1107
1108
1109 mb(); /* commit changes to pending flags */
1110
1111 /* reset the scheduling priority */
1112 if(work_flushed)
1113 {
1114 __reeval_prio(which);
1115
1116 /* Try to offload flushed tasklets to Linux's ksoftirqd. */
1117 if(work_flushed & (LIT_TASKLET_LOW | LIT_TASKLET_HI))
1118 {
1119 wakeup_softirqd();
1120 }
1121 }
1122 else
1123 {
1124 TRACE_CUR("%s: no work flushed, so __reeval_prio() skipped\n", __FUNCTION__);
1125 }
1126
1127 raw_spin_unlock_irqrestore(&which->lock, flags);
1128}
1129
1130
1131
1132
1133static void ___litmus_tasklet_schedule(struct tasklet_struct *t,
1134 struct klitirqd_info *which,
1135 int wakeup)
1136{
1137 unsigned long flags;
1138 u32 old_pending;
1139
1140 t->next = NULL;
1141
1142 raw_spin_lock_irqsave(&which->lock, flags);
1143
1144 //__dump_state(which, "___litmus_tasklet_schedule: before queuing");
1145
1146 *(which->pending_tasklets.tail) = t;
1147 which->pending_tasklets.tail = &t->next;
1148
1149 old_pending = which->pending;
1150 which->pending |= LIT_TASKLET_LOW;
1151
1152 atomic_inc(&which->num_low_pending);
1153
1154 mb();
1155
1156 if(!old_pending && wakeup)
1157 {
1158 wakeup_litirqd_locked(which); /* wake up the klitirqd */
1159 }
1160
1161 //__dump_state(which, "___litmus_tasklet_schedule: after queuing");
1162
1163 raw_spin_unlock_irqrestore(&which->lock, flags);
1164}
1165
1166int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id)
1167{
1168 int ret = 0; /* assume failure */
1169 if(unlikely((t->owner == NULL) || !is_realtime(t->owner)))
1170 {
1171 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
1172 BUG();
1173 }
1174
1175 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1176 {
1177 TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id);
1178 BUG();
1179 }
1180
1181 if(likely(!klitirqds[k_id].terminating))
1182 {
1183 /* Can't accept tasklets while we're processing a workqueue
1184 because they're handled by the same thread. This case is
1185 very RARE.
1186
1187 TODO: Use a separate thread for work objects!!!!!!
1188 */
1189 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0))
1190 {
1191 ret = 1;
1192 ___litmus_tasklet_schedule(t, &klitirqds[k_id], 1);
1193 }
1194 else
1195 {
1196 TRACE("%s: rejected tasklet because of pending work.\n",
1197 __FUNCTION__);
1198 }
1199 }
1200 return(ret);
1201}
1202
1203EXPORT_SYMBOL(__litmus_tasklet_schedule);
1204
1205
1206static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t,
1207 struct klitirqd_info *which,
1208 int wakeup)
1209{
1210 unsigned long flags;
1211 u32 old_pending;
1212
1213 t->next = NULL;
1214
1215 raw_spin_lock_irqsave(&which->lock, flags);
1216
1217 *(which->pending_tasklets_hi.tail) = t;
1218 which->pending_tasklets_hi.tail = &t->next;
1219
1220 old_pending = which->pending;
1221 which->pending |= LIT_TASKLET_HI;
1222
1223 atomic_inc(&which->num_hi_pending);
1224
1225 mb();
1226
1227 if(!old_pending && wakeup)
1228 {
1229 wakeup_litirqd_locked(which); /* wake up the klitirqd */
1230 }
1231
1232 raw_spin_unlock_irqrestore(&which->lock, flags);
1233}
1234
1235int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id)
1236{
1237 int ret = 0; /* assume failure */
1238 if(unlikely((t->owner == NULL) || !is_realtime(t->owner)))
1239 {
1240 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
1241 BUG();
1242 }
1243
1244 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1245 {
1246 TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id);
1247 BUG();
1248 }
1249
1250 if(unlikely(!klitirqd_is_ready()))
1251 {
1252 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id);
1253 BUG();
1254 }
1255
1256 if(likely(!klitirqds[k_id].terminating))
1257 {
1258 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0))
1259 {
1260 ret = 1;
1261 ___litmus_tasklet_hi_schedule(t, &klitirqds[k_id], 1);
1262 }
1263 else
1264 {
1265 TRACE("%s: rejected tasklet because of pending work.\n",
1266 __FUNCTION__);
1267 }
1268 }
1269 return(ret);
1270}
1271
1272EXPORT_SYMBOL(__litmus_tasklet_hi_schedule);
1273
1274
1275int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_id)
1276{
1277 int ret = 0; /* assume failure */
1278 u32 old_pending;
1279
1280 BUG_ON(!irqs_disabled());
1281
1282 if(unlikely((t->owner == NULL) || !is_realtime(t->owner)))
1283 {
1284 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
1285 BUG();
1286 }
1287
1288 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1289 {
1290 TRACE("%s: No klitirqd_th%u!\n", __FUNCTION__, k_id);
1291 BUG();
1292 }
1293
1294 if(unlikely(!klitirqd_is_ready()))
1295 {
1296 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id);
1297 BUG();
1298 }
1299
1300 if(likely(!klitirqds[k_id].terminating))
1301 {
1302 raw_spin_lock(&klitirqds[k_id].lock);
1303
1304 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0))
1305 {
1306 ret = 1; // success!
1307
1308 t->next = klitirqds[k_id].pending_tasklets_hi.head;
1309 klitirqds[k_id].pending_tasklets_hi.head = t;
1310
1311 old_pending = klitirqds[k_id].pending;
1312 klitirqds[k_id].pending |= LIT_TASKLET_HI;
1313
1314 atomic_inc(&klitirqds[k_id].num_hi_pending);
1315
1316 mb();
1317
1318 if(!old_pending)
1319 wakeup_litirqd_locked(&klitirqds[k_id]); /* wake up the klitirqd */
1320 }
1321 else
1322 {
1323 TRACE("%s: rejected tasklet because of pending work.\n",
1324 __FUNCTION__);
1325 }
1326
1327 raw_spin_unlock(&klitirqds[k_id].lock);
1328 }
1329 return(ret);
1330}
1331
1332EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first);
1333
1334
1335
1336static void ___litmus_schedule_work(struct work_struct *w,
1337 struct klitirqd_info *which,
1338 int wakeup)
1339{
1340 unsigned long flags;
1341 u32 old_pending;
1342
1343 raw_spin_lock_irqsave(&which->lock, flags);
1344
1345 work_pending(w);
1346 list_add_tail(&w->entry, &which->worklist);
1347
1348 old_pending = which->pending;
1349 which->pending |= LIT_WORK;
1350
1351 atomic_inc(&which->num_work_pending);
1352
1353 mb();
1354
1355 if(!old_pending && wakeup)
1356 {
1357 wakeup_litirqd_locked(which); /* wakeup the klitirqd */
1358 }
1359
1360 raw_spin_unlock_irqrestore(&which->lock, flags);
1361}
1362
1363int __litmus_schedule_work(struct work_struct *w, unsigned int k_id)
1364{
1365 int ret = 1; /* assume success */
1366 if(unlikely(w->owner == NULL) || !is_realtime(w->owner))
1367 {
1368 TRACE("%s: No owner associated with this work object!\n", __FUNCTION__);
1369 BUG();
1370 }
1371
1372 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1373 {
1374 TRACE("%s: No klitirqd_th%u!\n", k_id);
1375 BUG();
1376 }
1377
1378 if(unlikely(!klitirqd_is_ready()))
1379 {
1380 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id);
1381 BUG();
1382 }
1383
1384 if(likely(!klitirqds[k_id].terminating))
1385 ___litmus_schedule_work(w, &klitirqds[k_id], 1);
1386 else
1387 ret = 0;
1388 return(ret);
1389}
1390EXPORT_SYMBOL(__litmus_schedule_work);
1391
1392
1393static int set_klitirqd_sem_status(unsigned long stat)
1394{
1395 TRACE_CUR("SETTING STATUS FROM %d TO %d\n",
1396 atomic_read(&tsk_rt(current)->klitirqd_sem_stat),
1397 stat);
1398 atomic_set(&tsk_rt(current)->klitirqd_sem_stat, stat);
1399 //mb();
1400
1401 return(0);
1402}
1403
1404static int set_klitirqd_sem_status_if_not_held(unsigned long stat)
1405{
1406 if(atomic_read(&tsk_rt(current)->klitirqd_sem_stat) != HELD)
1407 {
1408 return(set_klitirqd_sem_status(stat));
1409 }
1410 return(-1);
1411}
1412
1413
1414void __down_and_reset_and_set_stat(struct task_struct* t,
1415 enum klitirqd_sem_status to_reset,
1416 enum klitirqd_sem_status to_set,
1417 struct mutex* sem)
1418{
1419#if 0
1420 struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem);
1421 struct task_struct* task = container_of(param, struct task_struct, rt_param);
1422
1423 TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n",
1424 __FUNCTION__, task->comm, task->pid);
1425#endif
1426
1427 mutex_lock_sfx(sem,
1428 set_klitirqd_sem_status_if_not_held, to_reset,
1429 set_klitirqd_sem_status, to_set);
1430#if 0
1431 TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n",
1432 __FUNCTION__, task->comm, task->pid);
1433#endif
1434}
1435
1436void down_and_set_stat(struct task_struct* t,
1437 enum klitirqd_sem_status to_set,
1438 struct mutex* sem)
1439{
1440#if 0
1441 struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem);
1442 struct task_struct* task = container_of(param, struct task_struct, rt_param);
1443
1444 TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n",
1445 __FUNCTION__, task->comm, task->pid);
1446#endif
1447
1448 mutex_lock_sfx(sem,
1449 NULL, 0,
1450 set_klitirqd_sem_status, to_set);
1451
1452#if 0
1453 TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n",
1454 __FUNCTION__, task->comm, task->pid);
1455#endif
1456}
1457
1458
1459void up_and_set_stat(struct task_struct* t,
1460 enum klitirqd_sem_status to_set,
1461 struct mutex* sem)
1462{
1463#if 0
1464 struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem);
1465 struct task_struct* task = container_of(param, struct task_struct, rt_param);
1466
1467 TRACE_CUR("%s: entered. Unlocking semaphore of %s/%d\n",
1468 __FUNCTION__,
1469 task->comm, task->pid);
1470#endif
1471
1472 mutex_unlock_sfx(sem, NULL, 0,
1473 set_klitirqd_sem_status, to_set);
1474
1475#if 0
1476 TRACE_CUR("%s: exiting. Unlocked semaphore of %s/%d\n",
1477 __FUNCTION__,
1478 task->comm, task->pid);
1479#endif
1480}
1481
1482
1483
1484void release_klitirqd_lock(struct task_struct* t)
1485{
1486 if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klitirqd_sem_stat) == HELD))
1487 {
1488 struct mutex* sem;
1489 struct task_struct* owner = t;
1490
1491 if(t->state == TASK_RUNNING)
1492 {
1493 TRACE_TASK(t, "NOT giving up klitirqd_sem because we're not blocked!\n");
1494 return;
1495 }
1496
1497 if(likely(!tsk_rt(t)->is_proxy_thread))
1498 {
1499 sem = &tsk_rt(t)->klitirqd_sem;
1500 }
1501 else
1502 {
1503 unsigned int k_id = klitirqd_id(t);
1504 owner = klitirqds[k_id].current_owner;
1505
1506 BUG_ON(t != klitirqds[k_id].klitirqd);
1507
1508 if(likely(owner))
1509 {
1510 sem = &tsk_rt(owner)->klitirqd_sem;
1511 }
1512 else
1513 {
1514 BUG();
1515
1516 // We had the rug pulled out from under us. Abort attempt
1517 // to reacquire the lock since our client no longer needs us.
1518 TRACE_CUR("HUH?! How did this happen?\n");
1519 atomic_set(&tsk_rt(t)->klitirqd_sem_stat, NOT_HELD);
1520 return;
1521 }
1522 }
1523
1524 //TRACE_CUR("Releasing semaphore of %s/%d...\n", owner->comm, owner->pid);
1525 up_and_set_stat(t, NEED_TO_REACQUIRE, sem);
1526 //TRACE_CUR("Semaphore of %s/%d released!\n", owner->comm, owner->pid);
1527 }
1528 /*
1529 else if(is_realtime(t))
1530 {
1531 TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klitirqd_sem_stat);
1532 }
1533 */
1534}
1535
1536int reacquire_klitirqd_lock(struct task_struct* t)
1537{
1538 int ret = 0;
1539
1540 if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klitirqd_sem_stat) == NEED_TO_REACQUIRE))
1541 {
1542 struct mutex* sem;
1543 struct task_struct* owner = t;
1544
1545 if(likely(!tsk_rt(t)->is_proxy_thread))
1546 {
1547 sem = &tsk_rt(t)->klitirqd_sem;
1548 }
1549 else
1550 {
1551 unsigned int k_id = klitirqd_id(t);
1552 //struct task_struct* owner = klitirqds[k_id].current_owner;
1553 owner = klitirqds[k_id].current_owner;
1554
1555 BUG_ON(t != klitirqds[k_id].klitirqd);
1556
1557 if(likely(owner))
1558 {
1559 sem = &tsk_rt(owner)->klitirqd_sem;
1560 }
1561 else
1562 {
1563 // We had the rug pulled out from under us. Abort attempt
1564 // to reacquire the lock since our client no longer needs us.
1565 TRACE_CUR("No longer needs to reacquire klitirqd_sem!\n");
1566 atomic_set(&tsk_rt(t)->klitirqd_sem_stat, NOT_HELD);
1567 return(0);
1568 }
1569 }
1570
1571 //TRACE_CUR("Trying to reacquire semaphore of %s/%d\n", owner->comm, owner->pid);
1572 __down_and_reset_and_set_stat(t, REACQUIRING, HELD, sem);
1573 //TRACE_CUR("Reacquired semaphore %s/%d\n", owner->comm, owner->pid);
1574 }
1575 /*
1576 else if(is_realtime(t))
1577 {
1578 TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klitirqd_sem_stat);
1579 }
1580 */
1581
1582 return(ret);
1583}
1584