aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_gsn_edf.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r--litmus/sched_gsn_edf.c1195
1 files changed, 1061 insertions, 134 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index b8548b885b35..01791a18e8f3 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -12,24 +12,54 @@
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
15 17
16#include <litmus/litmus.h> 18#include <litmus/litmus.h>
17#include <litmus/jobs.h> 19#include <litmus/jobs.h>
18#include <litmus/sched_plugin.h> 20#include <litmus/sched_plugin.h>
19#include <litmus/edf_common.h> 21#include <litmus/edf_common.h>
20#include <litmus/sched_trace.h> 22#include <litmus/sched_trace.h>
21#include <litmus/trace.h>
22 23
23#include <litmus/preempt.h> 24#include <litmus/preempt.h>
24#include <litmus/budget.h> 25#include <litmus/budget.h>
25 26
26#include <litmus/bheap.h> 27#include <litmus/bheap.h>
28#include <litmus/binheap.h>
29#include <litmus/trace.h>
30
31#ifdef CONFIG_LITMUS_LOCKING
32#include <litmus/kfmlp_lock.h>
33#endif
34
35#ifdef CONFIG_LITMUS_NESTED_LOCKING
36#include <litmus/rsm_lock.h>
37#include <litmus/ikglp_lock.h>
38#endif
27 39
28#ifdef CONFIG_SCHED_CPU_AFFINITY 40#ifdef CONFIG_SCHED_CPU_AFFINITY
29#include <litmus/affinity.h> 41#include <litmus/affinity.h>
30#endif 42#endif
31 43
32#include <linux/module.h> 44#ifdef CONFIG_REALTIME_AUX_TASKS
45#include <litmus/aux_tasks.h>
46#endif
47
48#ifdef CONFIG_LITMUS_SOFTIRQD
49#include <litmus/litmus_softirq.h>
50#endif
51
52#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
53#include <linux/interrupt.h>
54#endif
55
56#ifdef CONFIG_LITMUS_NVIDIA
57#include <litmus/nvidia_info.h>
58#endif
59
60#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
61#include <litmus/gpu_affinity.h>
62#endif
33 63
34/* Overview of GSN-EDF operations. 64/* Overview of GSN-EDF operations.
35 * 65 *
@@ -104,52 +134,64 @@ typedef struct {
104 int cpu; 134 int cpu;
105 struct task_struct* linked; /* only RT tasks */ 135 struct task_struct* linked; /* only RT tasks */
106 struct task_struct* scheduled; /* only RT tasks */ 136 struct task_struct* scheduled; /* only RT tasks */
107 struct bheap_node* hn; 137 struct binheap_node hn;
108} cpu_entry_t; 138} cpu_entry_t;
109DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); 139DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);
110 140
111cpu_entry_t* gsnedf_cpus[NR_CPUS]; 141cpu_entry_t* gsnedf_cpus[NR_CPUS];
112 142
113/* the cpus queue themselves according to priority in here */ 143/* the cpus queue themselves according to priority in here */
114static struct bheap_node gsnedf_heap_node[NR_CPUS]; 144static struct binheap gsnedf_cpu_heap;
115static struct bheap gsnedf_cpu_heap;
116 145
117static rt_domain_t gsnedf; 146static rt_domain_t gsnedf;
118#define gsnedf_lock (gsnedf.ready_lock) 147#define gsnedf_lock (gsnedf.ready_lock)
119 148
149#ifdef CONFIG_LITMUS_DGL_SUPPORT
150static raw_spinlock_t dgl_lock;
151
152static raw_spinlock_t* gsnedf_get_dgl_spinlock(struct task_struct *t)
153{
154 return(&dgl_lock);
155}
156#endif
157
158#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
159struct tasklet_head gsnedf_pending_tasklets;
160#endif
161
120 162
121/* Uncomment this if you want to see all scheduling decisions in the 163/* Uncomment this if you want to see all scheduling decisions in the
122 * TRACE() log. 164 * TRACE() log.
123#define WANT_ALL_SCHED_EVENTS 165#define WANT_ALL_SCHED_EVENTS
124 */ 166 */
125 167
126static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) 168static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b)
127{ 169{
128 cpu_entry_t *a, *b; 170 cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn);
129 a = _a->value; 171 cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn);
130 b = _b->value; 172
131 /* Note that a and b are inverted: we want the lowest-priority CPU at 173 /* Note that a and b are inverted: we want the lowest-priority CPU at
132 * the top of the heap. 174 * the top of the heap.
133 */ 175 */
134 return edf_higher_prio(b->linked, a->linked); 176 return edf_higher_prio(b->linked, a->linked);
135} 177}
136 178
179
137/* update_cpu_position - Move the cpu entry to the correct place to maintain 180/* update_cpu_position - Move the cpu entry to the correct place to maintain
138 * order in the cpu queue. Caller must hold gsnedf lock. 181 * order in the cpu queue. Caller must hold gsnedf lock.
139 */ 182 */
140static void update_cpu_position(cpu_entry_t *entry) 183static void update_cpu_position(cpu_entry_t *entry)
141{ 184{
142 if (likely(bheap_node_in_heap(entry->hn))) 185 if (likely(binheap_is_in_heap(&entry->hn))) {
143 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); 186 binheap_delete(&entry->hn, &gsnedf_cpu_heap);
144 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); 187 }
188 binheap_add(&entry->hn, &gsnedf_cpu_heap, cpu_entry_t, hn);
145} 189}
146 190
147/* caller must hold gsnedf lock */ 191/* caller must hold gsnedf lock */
148static cpu_entry_t* lowest_prio_cpu(void) 192static cpu_entry_t* lowest_prio_cpu(void)
149{ 193{
150 struct bheap_node* hn; 194 return binheap_top_entry(&gsnedf_cpu_heap, cpu_entry_t, hn);
151 hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap);
152 return hn->value;
153} 195}
154 196
155 197
@@ -164,8 +206,17 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
164 struct task_struct* tmp; 206 struct task_struct* tmp;
165 int on_cpu; 207 int on_cpu;
166 208
209 //int print = (linked != NULL || entry->linked != NULL);
210
167 BUG_ON(linked && !is_realtime(linked)); 211 BUG_ON(linked && !is_realtime(linked));
168 212
213 /*
214 if (print) {
215 TRACE_CUR("linked = %s/%d\n", (linked) ? linked->comm : "(nil)", (linked)? linked->pid : 0);
216 TRACE_CUR("entry->linked = %s/%d\n", (entry->linked) ? entry->linked->comm : "(nil)", (entry->linked)? entry->linked->pid : 0);
217 }
218 */
219
169 /* Currently linked task is set to be unlinked. */ 220 /* Currently linked task is set to be unlinked. */
170 if (entry->linked) { 221 if (entry->linked) {
171 entry->linked->rt_param.linked_on = NO_CPU; 222 entry->linked->rt_param.linked_on = NO_CPU;
@@ -201,12 +252,18 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
201 linked->rt_param.linked_on = entry->cpu; 252 linked->rt_param.linked_on = entry->cpu;
202 } 253 }
203 entry->linked = linked; 254 entry->linked = linked;
204#ifdef WANT_ALL_SCHED_EVENTS 255
205 if (linked) 256 /*
206 TRACE_TASK(linked, "linked to %d.\n", entry->cpu); 257 if (print) {
207 else 258 //#ifdef WANT_ALL_SCHED_EVENTS
208 TRACE("NULL linked to %d.\n", entry->cpu); 259 if (linked)
209#endif 260 TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
261 else
262 TRACE("NULL linked to %d.\n", entry->cpu);
263 //#endif
264 }
265 */
266
210 update_cpu_position(entry); 267 update_cpu_position(entry);
211} 268}
212 269
@@ -251,8 +308,17 @@ static noinline void requeue(struct task_struct* task)
251 /* sanity check before insertion */ 308 /* sanity check before insertion */
252 BUG_ON(is_queued(task)); 309 BUG_ON(is_queued(task));
253 310
254 if (is_released(task, litmus_clock())) 311 if (is_released(task, litmus_clock())) {
255 __add_ready(&gsnedf, task); 312#ifdef CONFIG_REALTIME_AUX_TASKS
313 if (unlikely(tsk_rt(task)->is_aux_task && !is_running(task))) {
314 /* aux_task probably transitioned to real-time while it was blocked */
315 TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid);
316 unlink(task); /* really needed? */
317 }
318 else
319#endif
320 __add_ready(&gsnedf, task);
321 }
256 else { 322 else {
257 /* it has got to wait */ 323 /* it has got to wait */
258 add_release(&gsnedf, task); 324 add_release(&gsnedf, task);
@@ -326,6 +392,7 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
326 raw_spin_lock_irqsave(&gsnedf_lock, flags); 392 raw_spin_lock_irqsave(&gsnedf_lock, flags);
327 393
328 __merge_ready(rt, tasks); 394 __merge_ready(rt, tasks);
395
329 check_for_preemptions(); 396 check_for_preemptions();
330 397
331 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 398 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
@@ -338,12 +405,17 @@ static noinline void job_completion(struct task_struct *t, int forced)
338 405
339 sched_trace_task_completion(t, forced); 406 sched_trace_task_completion(t, forced);
340 407
408#ifdef CONFIG_LITMUS_NVIDIA
409 atomic_set(&tsk_rt(t)->nv_int_count, 0);
410#endif
411
341 TRACE_TASK(t, "job_completion().\n"); 412 TRACE_TASK(t, "job_completion().\n");
342 413
343 /* set flags */ 414 /* set flags */
344 tsk_rt(t)->completed = 1; 415 tsk_rt(t)->completed = 1;
345 /* prepare for next period */ 416 /* prepare for next period */
346 prepare_for_next_period(t); 417 prepare_for_next_period(t);
418
347 if (is_released(t, litmus_clock())) 419 if (is_released(t, litmus_clock()))
348 sched_trace_task_release(t); 420 sched_trace_task_release(t);
349 /* unlink */ 421 /* unlink */
@@ -362,24 +434,350 @@ static noinline void job_completion(struct task_struct *t, int forced)
362 */ 434 */
363static void gsnedf_tick(struct task_struct* t) 435static void gsnedf_tick(struct task_struct* t)
364{ 436{
365 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { 437 if (is_realtime(t) && budget_exhausted(t))
366 if (!is_np(t)) { 438 {
367 /* np tasks will be preempted when they become 439 if (budget_signalled(t) && !sigbudget_sent(t)) {
368 * preemptable again 440 /* signal exhaustion */
369 */ 441 send_sigbudget(t);
370 litmus_reschedule_local(); 442 }
371 TRACE("gsnedf_scheduler_tick: " 443
372 "%d is preemptable " 444 if (budget_enforced(t)) {
373 " => FORCE_RESCHED\n", t->pid); 445 if (!is_np(t)) {
374 } else if (is_user_np(t)) { 446 /* np tasks will be preempted when they become
375 TRACE("gsnedf_scheduler_tick: " 447 * preemptable again
376 "%d is non-preemptable, " 448 */
377 "preemption delayed.\n", t->pid); 449 litmus_reschedule_local();
378 request_exit_np(t); 450 TRACE("gsnedf_scheduler_tick: "
451 "%d is preemptable "
452 " => FORCE_RESCHED\n", t->pid);
453 } else if (is_user_np(t)) {
454 TRACE("gsnedf_scheduler_tick: "
455 "%d is non-preemptable, "
456 "preemption delayed.\n", t->pid);
457 request_exit_np(t);
458 }
459 }
460 }
461
462 /*
463 if(is_realtime(t)) {
464 TRACE_TASK(t, "tick %llu\n", litmus_clock());
465 }
466 */
467}
468
469
470
471
472#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
473
474
475static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
476{
477 if (!atomic_read(&tasklet->count)) {
478 if(tasklet->owner) {
479 sched_trace_tasklet_begin(tasklet->owner);
480 }
481
482 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
483 {
484 BUG();
485 }
486 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n",
487 __FUNCTION__,
488 (tasklet->owner) ? tasklet->owner->pid : -1,
489 (tasklet->owner) ? 0 : 1);
490 tasklet->func(tasklet->data);
491 tasklet_unlock(tasklet);
492
493 if(tasklet->owner) {
494 sched_trace_tasklet_end(tasklet->owner, flushed);
495 }
496 }
497 else {
498 BUG();
499 }
500}
501
502static void do_lit_tasklets(struct task_struct* sched_task)
503{
504 int work_to_do = 1;
505 struct tasklet_struct *tasklet = NULL;
506 unsigned long flags;
507
508 while(work_to_do) {
509
510 TS_NV_SCHED_BOTISR_START;
511
512 // execute one tasklet that has higher priority
513 raw_spin_lock_irqsave(&gsnedf_lock, flags);
514
515 if(gsnedf_pending_tasklets.head != NULL) {
516 struct tasklet_struct *prev = NULL;
517 tasklet = gsnedf_pending_tasklets.head;
518
519 while(tasklet && edf_higher_prio(sched_task, tasklet->owner)) {
520 prev = tasklet;
521 tasklet = tasklet->next;
522 }
523
524 // remove the tasklet from the queue
525 if(prev) {
526 prev->next = tasklet->next;
527 if(prev->next == NULL) {
528 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
529 gsnedf_pending_tasklets.tail = &(prev);
530 }
531 }
532 else {
533 gsnedf_pending_tasklets.head = tasklet->next;
534 if(tasklet->next == NULL) {
535 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
536 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
537 }
538 }
539 }
540 else {
541 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
542 }
543
544 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
545
546 if(tasklet) {
547 __do_lit_tasklet(tasklet, 0ul);
548 tasklet = NULL;
549 }
550 else {
551 work_to_do = 0;
552 }
553
554 TS_NV_SCHED_BOTISR_END;
555 }
556}
557
558//static void do_lit_tasklets(struct task_struct* sched_task)
559//{
560// int work_to_do = 1;
561// struct tasklet_struct *tasklet = NULL;
562// //struct tasklet_struct *step;
563// unsigned long flags;
564//
565// while(work_to_do) {
566//
567// TS_NV_SCHED_BOTISR_START;
568//
569// // remove tasklet at head of list if it has higher priority.
570// raw_spin_lock_irqsave(&gsnedf_lock, flags);
571//
572// if(gsnedf_pending_tasklets.head != NULL) {
573// // remove tasklet at head.
574// tasklet = gsnedf_pending_tasklets.head;
575//
576// if(edf_higher_prio(tasklet->owner, sched_task)) {
577//
578// if(NULL == tasklet->next) {
579// // tasklet is at the head, list only has one element
580// TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
581// gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
582// }
583//
584// // remove the tasklet from the queue
585// gsnedf_pending_tasklets.head = tasklet->next;
586//
587// TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
588// }
589// else {
590// TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id());
591// tasklet = NULL;
592// }
593// }
594// else {
595// TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
596// }
597//
598// raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
599//
600// TS_NV_SCHED_BOTISR_END;
601//
602// if(tasklet) {
603// __do_lit_tasklet(tasklet, 0ul);
604// tasklet = NULL;
605// }
606// else {
607// work_to_do = 0;
608// }
609// }
610//
611// //TRACE("%s: exited.\n", __FUNCTION__);
612//}
613
614static void __add_pai_tasklet(struct tasklet_struct* tasklet)
615{
616 struct tasklet_struct* step;
617
618 tasklet->next = NULL; // make sure there are no old values floating around
619
620 step = gsnedf_pending_tasklets.head;
621 if(step == NULL) {
622 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
623 // insert at tail.
624 *(gsnedf_pending_tasklets.tail) = tasklet;
625 gsnedf_pending_tasklets.tail = &(tasklet->next);
626 }
627 else if((*(gsnedf_pending_tasklets.tail) != NULL) &&
628 edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) {
629 // insert at tail.
630 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
631
632 *(gsnedf_pending_tasklets.tail) = tasklet;
633 gsnedf_pending_tasklets.tail = &(tasklet->next);
634 }
635 else {
636 // insert the tasklet somewhere in the middle.
637
638 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
639
640 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) {
641 step = step->next;
642 }
643
644 // insert tasklet right before step->next.
645
646 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1);
647
648 tasklet->next = step->next;
649 step->next = tasklet;
650
651 // patch up the head if needed.
652 if(gsnedf_pending_tasklets.head == step)
653 {
654 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
655 gsnedf_pending_tasklets.head = tasklet;
656 }
657 }
658}
659
660static void gsnedf_run_tasklets(struct task_struct* sched_task)
661{
662 preempt_disable();
663
664 if(gsnedf_pending_tasklets.head != NULL) {
665 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
666 do_lit_tasklets(sched_task);
667 }
668
669 preempt_enable_no_resched();
670}
671
672static int gsnedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
673{
674 cpu_entry_t *targetCPU = NULL;
675 int thisCPU;
676 int runLocal = 0;
677 int runNow = 0;
678 unsigned long flags;
679
680 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
681 {
682 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
683 return 0;
684 }
685
686
687 raw_spin_lock_irqsave(&gsnedf_lock, flags);
688
689 thisCPU = smp_processor_id();
690
691#ifdef CONFIG_SCHED_CPU_AFFINITY
692 {
693 cpu_entry_t* affinity = NULL;
694
695 // use this CPU if it is in our cluster and isn't running any RT work.
696 if(
697#ifdef CONFIG_RELEASE_MASTER
698 (thisCPU != gsnedf.release_master) &&
699#endif
700 (__get_cpu_var(gsnedf_cpu_entries).linked == NULL)) {
701 affinity = &(__get_cpu_var(gsnedf_cpu_entries));
702 }
703 else {
704 // this CPU is busy or shouldn't run tasklet in this cluster.
705 // look for available near by CPUs.
706 // NOTE: Affinity towards owner and not this CPU. Is this right?
707 affinity =
708 gsnedf_get_nearest_available_cpu(
709 &per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner)));
710 }
711
712 targetCPU = affinity;
713 }
714#endif
715
716 if (targetCPU == NULL) {
717 targetCPU = lowest_prio_cpu();
718 }
719
720 if (edf_higher_prio(tasklet->owner, targetCPU->linked)) {
721 if (thisCPU == targetCPU->cpu) {
722 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
723 runLocal = 1;
724 runNow = 1;
725 }
726 else {
727 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
728 runLocal = 0;
729 runNow = 1;
730 }
731 }
732 else {
733 runLocal = 0;
734 runNow = 0;
735 }
736
737 if(!runLocal) {
738 // enqueue the tasklet
739 __add_pai_tasklet(tasklet);
740 }
741
742 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
743
744
745 if (runLocal /*&& runNow */) { // runNow == 1 is implied
746 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
747 __do_lit_tasklet(tasklet, 0ul);
748 }
749 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
750 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
751 preempt(targetCPU); // need to be protected by cedf_lock?
752 }
753 else {
754 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
755 }
756
757 return(1); // success
758}
759
760static void gsnedf_change_prio_pai_tasklet(struct task_struct *old_prio,
761 struct task_struct *new_prio)
762{
763 struct tasklet_struct* step;
764 unsigned long flags;
765
766 if(gsnedf_pending_tasklets.head != NULL) {
767 raw_spin_lock_irqsave(&gsnedf_lock, flags);
768 for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) {
769 if(step->owner == old_prio) {
770 TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid);
771 step->owner = new_prio;
772 }
379 } 773 }
774 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
380 } 775 }
381} 776}
382 777
778#endif // end PAI
779
780
383/* Getting schedule() right is a bit tricky. schedule() may not make any 781/* Getting schedule() right is a bit tricky. schedule() may not make any
384 * assumptions on the state of the current task since it may be called for a 782 * assumptions on the state of the current task since it may be called for a
385 * number of reasons. The reasons include a scheduler_tick() determined that it 783 * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -404,9 +802,11 @@ static void gsnedf_tick(struct task_struct* t)
404static struct task_struct* gsnedf_schedule(struct task_struct * prev) 802static struct task_struct* gsnedf_schedule(struct task_struct * prev)
405{ 803{
406 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); 804 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries);
407 int out_of_time, sleep, preempt, np, exists, blocks; 805 int out_of_time, signal_budget, sleep, preempt, np, exists, blocks;
408 struct task_struct* next = NULL; 806 struct task_struct* next = NULL;
409 807
808 //int completion = 0;
809
410#ifdef CONFIG_RELEASE_MASTER 810#ifdef CONFIG_RELEASE_MASTER
411 /* Bail out early if we are the release master. 811 /* Bail out early if we are the release master.
412 * The release master never schedules any real-time tasks. 812 * The release master never schedules any real-time tasks.
@@ -427,8 +827,13 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
427 /* (0) Determine state */ 827 /* (0) Determine state */
428 exists = entry->scheduled != NULL; 828 exists = entry->scheduled != NULL;
429 blocks = exists && !is_running(entry->scheduled); 829 blocks = exists && !is_running(entry->scheduled);
430 out_of_time = exists && budget_enforced(entry->scheduled) 830 out_of_time = exists &&
431 && budget_exhausted(entry->scheduled); 831 budget_enforced(entry->scheduled) &&
832 budget_exhausted(entry->scheduled);
833 signal_budget = exists &&
834 budget_signalled(entry->scheduled) &&
835 budget_exhausted(entry->scheduled) &&
836 !sigbudget_sent(entry->scheduled);
432 np = exists && is_np(entry->scheduled); 837 np = exists && is_np(entry->scheduled);
433 sleep = exists && is_completed(entry->scheduled); 838 sleep = exists && is_completed(entry->scheduled);
434 preempt = entry->scheduled != entry->linked; 839 preempt = entry->scheduled != entry->linked;
@@ -437,21 +842,36 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
437 TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); 842 TRACE_TASK(prev, "invoked gsnedf_schedule.\n");
438#endif 843#endif
439 844
440 if (exists) 845 if (exists) {
441 TRACE_TASK(prev, 846 TRACE_TASK(prev,
442 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " 847 "blocks:%d out_of_time:%d signal_budget: %d np:%d sleep:%d preempt:%d "
443 "state:%d sig:%d\n", 848 "state:%d sig:%d\n",
444 blocks, out_of_time, np, sleep, preempt, 849 blocks, out_of_time, signal_budget, np, sleep, preempt,
445 prev->state, signal_pending(prev)); 850 prev->state, signal_pending(prev));
851 }
852
446 if (entry->linked && preempt) 853 if (entry->linked && preempt)
447 TRACE_TASK(prev, "will be preempted by %s/%d\n", 854 TRACE_TASK(prev, "will be preempted by %s/%d\n",
448 entry->linked->comm, entry->linked->pid); 855 entry->linked->comm, entry->linked->pid);
449 856
857 /* Send the signal that the budget has been exhausted */
858 if (signal_budget) {
859 send_sigbudget(entry->scheduled);
860 }
450 861
451 /* If a task blocks we have no choice but to reschedule. 862 /* If a task blocks we have no choice but to reschedule.
452 */ 863 */
453 if (blocks) 864 if (blocks) {
454 unlink(entry->scheduled); 865 unlink(entry->scheduled);
866 }
867
868#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING)
869 if(exists && is_realtime(entry->scheduled) && tsk_rt(entry->scheduled)->held_gpus) {
870 if(!blocks || tsk_rt(entry->scheduled)->suspend_gpu_tracker_on_block) {
871 stop_gpu_tracker(entry->scheduled);
872 }
873 }
874#endif
455 875
456 /* Request a sys_exit_np() call if we would like to preempt but cannot. 876 /* Request a sys_exit_np() call if we would like to preempt but cannot.
457 * We need to make sure to update the link structure anyway in case 877 * We need to make sure to update the link structure anyway in case
@@ -468,8 +888,10 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
468 * this. Don't do a job completion if we block (can't have timers running 888 * this. Don't do a job completion if we block (can't have timers running
469 * for blocked jobs). 889 * for blocked jobs).
470 */ 890 */
471 if (!np && (out_of_time || sleep) && !blocks) 891 if (!np && (out_of_time || sleep) && !blocks) {
472 job_completion(entry->scheduled, !sleep); 892 job_completion(entry->scheduled, !sleep);
893 //completion = 1;
894 }
473 895
474 /* Link pending task if we became unlinked. 896 /* Link pending task if we became unlinked.
475 */ 897 */
@@ -492,12 +914,21 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
492 entry->scheduled->rt_param.scheduled_on = NO_CPU; 914 entry->scheduled->rt_param.scheduled_on = NO_CPU;
493 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); 915 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
494 } 916 }
495 } else 917 }
918 else
919 {
496 /* Only override Linux scheduler if we have a real-time task 920 /* Only override Linux scheduler if we have a real-time task
497 * scheduled that needs to continue. 921 * scheduled that needs to continue.
498 */ 922 */
499 if (exists) 923 if (exists)
500 next = prev; 924 next = prev;
925 }
926
927#if 0
928 if (completion) {
929 TRACE_CUR("switching away from a completion\n");
930 }
931#endif
501 932
502 sched_state_task_picked(); 933 sched_state_task_picked();
503 934
@@ -512,7 +943,6 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
512 TRACE("becomes idle at %llu.\n", litmus_clock()); 943 TRACE("becomes idle at %llu.\n", litmus_clock());
513#endif 944#endif
514 945
515
516 return next; 946 return next;
517} 947}
518 948
@@ -524,6 +954,7 @@ static void gsnedf_finish_switch(struct task_struct *prev)
524 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); 954 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries);
525 955
526 entry->scheduled = is_realtime(current) ? current : NULL; 956 entry->scheduled = is_realtime(current) ? current : NULL;
957
527#ifdef WANT_ALL_SCHED_EVENTS 958#ifdef WANT_ALL_SCHED_EVENTS
528 TRACE_TASK(prev, "switched away from\n"); 959 TRACE_TASK(prev, "switched away from\n");
529#endif 960#endif
@@ -537,7 +968,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
537 unsigned long flags; 968 unsigned long flags;
538 cpu_entry_t* entry; 969 cpu_entry_t* entry;
539 970
540 TRACE("gsn edf: task new %d\n", t->pid); 971 TRACE("gsn edf: task new = %d on_rq = %d running = %d\n", t->pid, on_rq, running);
541 972
542 raw_spin_lock_irqsave(&gsnedf_lock, flags); 973 raw_spin_lock_irqsave(&gsnedf_lock, flags);
543 974
@@ -572,11 +1003,14 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
572static void gsnedf_task_wake_up(struct task_struct *task) 1003static void gsnedf_task_wake_up(struct task_struct *task)
573{ 1004{
574 unsigned long flags; 1005 unsigned long flags;
575 lt_t now; 1006 //lt_t now;
576 1007
577 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 1008 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
578 1009
579 raw_spin_lock_irqsave(&gsnedf_lock, flags); 1010 raw_spin_lock_irqsave(&gsnedf_lock, flags);
1011
1012#if 0
1013 /* sporadic task model. will increment job numbers automatically */
580 now = litmus_clock(); 1014 now = litmus_clock();
581 if (is_tardy(task, now)) { 1015 if (is_tardy(task, now)) {
582 /* new sporadic release */ 1016 /* new sporadic release */
@@ -590,6 +1024,25 @@ static void gsnedf_task_wake_up(struct task_struct *task)
590 tsk_rt(task)->completed = 0; 1024 tsk_rt(task)->completed = 0;
591 } 1025 }
592 } 1026 }
1027#else
1028 /* don't force job to end. rely on user to say when jobs complete */
1029 tsk_rt(task)->completed = 0;
1030#endif
1031
1032#ifdef CONFIG_REALTIME_AUX_TASKS
1033 if (tsk_rt(task)->has_aux_tasks && !tsk_rt(task)->hide_from_aux_tasks) {
1034 TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid);
1035 disable_aux_task_owner(task);
1036 }
1037#endif
1038
1039#ifdef CONFIG_LITMUS_NVIDIA
1040 if (tsk_rt(task)->held_gpus && !tsk_rt(task)->hide_from_gpu) {
1041 TRACE_CUR("%s/%d is ready so gpu klmirqd tasks may not inherit.\n", task->comm, task->pid);
1042 disable_gpu_owner(task);
1043 }
1044#endif
1045
593 gsnedf_job_arrival(task); 1046 gsnedf_job_arrival(task);
594 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 1047 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
595} 1048}
@@ -602,7 +1055,25 @@ static void gsnedf_task_block(struct task_struct *t)
602 1055
603 /* unlink if necessary */ 1056 /* unlink if necessary */
604 raw_spin_lock_irqsave(&gsnedf_lock, flags); 1057 raw_spin_lock_irqsave(&gsnedf_lock, flags);
1058
605 unlink(t); 1059 unlink(t);
1060
1061#ifdef CONFIG_REALTIME_AUX_TASKS
1062 if (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks) {
1063
1064 TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid);
1065 enable_aux_task_owner(t);
1066 }
1067#endif
1068
1069#ifdef CONFIG_LITMUS_NVIDIA
1070 if (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) {
1071
1072 TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid);
1073 enable_gpu_owner(t);
1074 }
1075#endif
1076
606 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 1077 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
607 1078
608 BUG_ON(!is_realtime(t)); 1079 BUG_ON(!is_realtime(t));
@@ -613,8 +1084,30 @@ static void gsnedf_task_exit(struct task_struct * t)
613{ 1084{
614 unsigned long flags; 1085 unsigned long flags;
615 1086
1087#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1088 gsnedf_change_prio_pai_tasklet(t, NULL);
1089#endif
1090
616 /* unlink if necessary */ 1091 /* unlink if necessary */
617 raw_spin_lock_irqsave(&gsnedf_lock, flags); 1092 raw_spin_lock_irqsave(&gsnedf_lock, flags);
1093
1094#ifdef CONFIG_REALTIME_AUX_TASKS
1095 /* make sure we clean up on our way out */
1096 if (unlikely(tsk_rt(t)->is_aux_task)) {
1097 exit_aux_task(t);
1098 }
1099 else if(tsk_rt(t)->has_aux_tasks) {
1100 disable_aux_task_owner(t);
1101 }
1102#endif
1103
1104#ifdef CONFIG_LITMUS_NVIDIA
1105 /* make sure we clean up on our way out */
1106 if(tsk_rt(t)->held_gpus) {
1107 disable_gpu_owner(t);
1108 }
1109#endif
1110
618 unlink(t); 1111 unlink(t);
619 if (tsk_rt(t)->scheduled_on != NO_CPU) { 1112 if (tsk_rt(t)->scheduled_on != NO_CPU) {
620 gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; 1113 gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
@@ -623,106 +1116,413 @@ static void gsnedf_task_exit(struct task_struct * t)
623 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 1116 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
624 1117
625 BUG_ON(!is_realtime(t)); 1118 BUG_ON(!is_realtime(t));
626 TRACE_TASK(t, "RIP\n"); 1119 TRACE_TASK(t, "RIP\n");
627} 1120}
628 1121
629 1122
630static long gsnedf_admit_task(struct task_struct* tsk) 1123static long gsnedf_admit_task(struct task_struct* tsk)
631{ 1124{
1125#ifdef CONFIG_LITMUS_NESTED_LOCKING
1126 INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks,
1127 edf_max_heap_base_priority_order);
1128#endif
1129
632 return 0; 1130 return 0;
633} 1131}
634 1132
1133
1134
1135
1136
1137
635#ifdef CONFIG_LITMUS_LOCKING 1138#ifdef CONFIG_LITMUS_LOCKING
636 1139
637#include <litmus/fdso.h> 1140#include <litmus/fdso.h>
638 1141
639/* called with IRQs off */ 1142/* called with IRQs off */
640static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) 1143static int __increase_priority_inheritance(struct task_struct* t,
1144 struct task_struct* prio_inh)
641{ 1145{
1146 int success = 1;
642 int linked_on; 1147 int linked_on;
643 int check_preempt = 0; 1148 int check_preempt = 0;
644 1149
645 raw_spin_lock(&gsnedf_lock); 1150 if (prio_inh && prio_inh == effective_priority(t)) {
1151 /* relationship already established. */
1152 TRACE_TASK(t, "already has effective priority of %s/%d\n",
1153 prio_inh->comm, prio_inh->pid);
1154 goto out;
1155 }
646 1156
647 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); 1157#ifdef CONFIG_LITMUS_NESTED_LOCKING
648 tsk_rt(t)->inh_task = prio_inh; 1158 /* this sanity check allows for weaker locking in protocols */
649 1159 if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) {
650 linked_on = tsk_rt(t)->linked_on; 1160#endif
651 1161 TRACE_TASK(t, "inherits priority from %s/%d\n",
652 /* If it is scheduled, then we need to reorder the CPU heap. */ 1162 prio_inh->comm, prio_inh->pid);
653 if (linked_on != NO_CPU) { 1163 tsk_rt(t)->inh_task = prio_inh;
654 TRACE_TASK(t, "%s: linked on %d\n", 1164
655 __FUNCTION__, linked_on); 1165 linked_on = tsk_rt(t)->linked_on;
656 /* Holder is scheduled; need to re-order CPUs. 1166
657 * We can't use heap_decrease() here since 1167 /* If it is scheduled, then we need to reorder the CPU heap. */
658 * the cpu_heap is ordered in reverse direction, so 1168 if (linked_on != NO_CPU) {
659 * it is actually an increase. */ 1169 TRACE_TASK(t, "%s: linked on %d\n",
660 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, 1170 __FUNCTION__, linked_on);
661 gsnedf_cpus[linked_on]->hn); 1171 /* Holder is scheduled; need to re-order CPUs.
662 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, 1172 * We can't use heap_decrease() here since
663 gsnedf_cpus[linked_on]->hn); 1173 * the cpu_heap is ordered in reverse direction, so
664 } else { 1174 * it is actually an increase. */
665 /* holder may be queued: first stop queue changes */ 1175 binheap_delete(&gsnedf_cpus[linked_on]->hn, &gsnedf_cpu_heap);
666 raw_spin_lock(&gsnedf.release_lock); 1176 binheap_add(&gsnedf_cpus[linked_on]->hn,
667 if (is_queued(t)) { 1177 &gsnedf_cpu_heap, cpu_entry_t, hn);
668 TRACE_TASK(t, "%s: is queued\n",
669 __FUNCTION__);
670 /* We need to update the position of holder in some
671 * heap. Note that this could be a release heap if we
672 * budget enforcement is used and this job overran. */
673 check_preempt =
674 !bheap_decrease(edf_ready_order,
675 tsk_rt(t)->heap_node);
676 } else { 1178 } else {
677 /* Nothing to do: if it is not queued and not linked 1179 /* holder may be queued: first stop queue changes */
678 * then it is either sleeping or currently being moved 1180 raw_spin_lock(&gsnedf.release_lock);
679 * by other code (e.g., a timer interrupt handler) that 1181 if (is_queued(t)) {
680 * will use the correct priority when enqueuing the 1182 TRACE_TASK(t, "%s: is queued\n",
681 * task. */ 1183 __FUNCTION__);
682 TRACE_TASK(t, "%s: is NOT queued => Done.\n", 1184 /* We need to update the position of holder in some
683 __FUNCTION__); 1185 * heap. Note that this could be a release heap if we
684 } 1186 * budget enforcement is used and this job overran. */
685 raw_spin_unlock(&gsnedf.release_lock); 1187 check_preempt =
686 1188 !bheap_decrease(edf_ready_order,
687 /* If holder was enqueued in a release heap, then the following 1189 tsk_rt(t)->heap_node);
688 * preemption check is pointless, but we can't easily detect 1190 } else {
689 * that case. If you want to fix this, then consider that 1191 /* Nothing to do: if it is not queued and not linked
690 * simply adding a state flag requires O(n) time to update when 1192 * then it is either sleeping or currently being moved
691 * releasing n tasks, which conflicts with the goal to have 1193 * by other code (e.g., a timer interrupt handler) that
692 * O(log n) merges. */ 1194 * will use the correct priority when enqueuing the
693 if (check_preempt) { 1195 * task. */
694 /* heap_decrease() hit the top level of the heap: make 1196 TRACE_TASK(t, "%s: is NOT queued => Done.\n",
695 * sure preemption checks get the right task, not the 1197 __FUNCTION__);
696 * potentially stale cache. */ 1198 }
697 bheap_uncache_min(edf_ready_order, 1199 raw_spin_unlock(&gsnedf.release_lock);
698 &gsnedf.ready_queue); 1200
699 check_for_preemptions(); 1201 /* If holder was enqueued in a release heap, then the following
1202 * preemption check is pointless, but we can't easily detect
1203 * that case. If you want to fix this, then consider that
1204 * simply adding a state flag requires O(n) time to update when
1205 * releasing n tasks, which conflicts with the goal to have
1206 * O(log n) merges. */
1207 if (check_preempt) {
1208 /* heap_decrease() hit the top level of the heap: make
1209 * sure preemption checks get the right task, not the
1210 * potentially stale cache. */
1211 bheap_uncache_min(edf_ready_order,
1212 &gsnedf.ready_queue);
1213 check_for_preemptions();
1214 }
1215
1216#ifdef CONFIG_REALTIME_AUX_TASKS
1217 /* propagate to aux tasks */
1218 if (tsk_rt(t)->has_aux_tasks) {
1219 aux_task_owner_increase_priority(t);
1220 }
1221#endif
1222
1223#ifdef CONFIG_LITMUS_NVIDIA
1224 /* propagate to gpu klmirqd */
1225 if (tsk_rt(t)->held_gpus) {
1226 gpu_owner_increase_priority(t);
1227 }
1228#endif
1229
700 } 1230 }
1231#ifdef CONFIG_LITMUS_NESTED_LOCKING
1232 }
1233 else {
1234 TRACE_TASK(t, "Spurious invalid priority increase. "
1235 "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n"
1236 "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n",
1237 t->comm, t->pid,
1238 effective_priority(t)->comm, effective_priority(t)->pid,
1239 (prio_inh) ? prio_inh->comm : "nil",
1240 (prio_inh) ? prio_inh->pid : -1);
1241 WARN_ON(!prio_inh);
1242 success = 0;
701 } 1243 }
1244#endif
702 1245
703 raw_spin_unlock(&gsnedf_lock); 1246out:
1247 return success;
704} 1248}
705 1249
706/* called with IRQs off */ 1250/* called with IRQs off */
707static void clear_priority_inheritance(struct task_struct* t) 1251static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
708{ 1252{
1253 int success;
1254
709 raw_spin_lock(&gsnedf_lock); 1255 raw_spin_lock(&gsnedf_lock);
710 1256
711 /* A job only stops inheriting a priority when it releases a 1257 success = __increase_priority_inheritance(t, prio_inh);
712 * resource. Thus we can make the following assumption.*/
713 BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU);
714 1258
715 TRACE_TASK(t, "priority restored\n"); 1259 raw_spin_unlock(&gsnedf_lock);
716 tsk_rt(t)->inh_task = NULL;
717 1260
718 /* Check if rescheduling is necessary. We can't use heap_decrease() 1261#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
719 * since the priority was effectively lowered. */ 1262 if(tsk_rt(t)->held_gpus) {
720 unlink(t); 1263 int i;
721 gsnedf_job_arrival(t); 1264 for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus));
1265 i < NV_DEVICE_NUM;
1266 i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) {
1267 pai_check_priority_increase(t, i);
1268 }
1269 }
1270#endif
1271}
1272
1273
1274/* called with IRQs off */
1275static int __decrease_priority_inheritance(struct task_struct* t,
1276 struct task_struct* prio_inh)
1277{
1278 int success = 1;
1279
1280 if (prio_inh == tsk_rt(t)->inh_task) {
1281 /* relationship already established. */
1282 TRACE_TASK(t, "already inherits priority from %s/%d\n",
1283 (prio_inh) ? prio_inh->comm : "(nil)",
1284 (prio_inh) ? prio_inh->pid : 0);
1285 goto out;
1286 }
1287
1288#ifdef CONFIG_LITMUS_NESTED_LOCKING
1289 if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) {
1290#endif
1291 /* A job only stops inheriting a priority when it releases a
1292 * resource. Thus we can make the following assumption.*/
1293 if(prio_inh)
1294 TRACE_TASK(t, "EFFECTIVE priority decreased to %s/%d\n",
1295 prio_inh->comm, prio_inh->pid);
1296 else
1297 TRACE_TASK(t, "base priority restored.\n");
1298
1299 tsk_rt(t)->inh_task = prio_inh;
1300
1301 if(tsk_rt(t)->scheduled_on != NO_CPU) {
1302 TRACE_TASK(t, "is scheduled.\n");
1303
1304 /* Check if rescheduling is necessary. We can't use heap_decrease()
1305 * since the priority was effectively lowered. */
1306 unlink(t);
1307 gsnedf_job_arrival(t);
1308 }
1309 else {
1310 /* task is queued */
1311 raw_spin_lock(&gsnedf.release_lock);
1312 if (is_queued(t)) {
1313 TRACE_TASK(t, "is queued.\n");
1314
1315 /* decrease in priority, so we have to re-add to binomial heap */
1316 unlink(t);
1317 gsnedf_job_arrival(t);
1318 }
1319 else {
1320 TRACE_TASK(t, "is not in scheduler. Probably on wait queue somewhere.\n");
1321 }
1322 raw_spin_unlock(&gsnedf.release_lock);
1323 }
1324
1325#ifdef CONFIG_REALTIME_AUX_TASKS
1326 /* propagate to aux tasks */
1327 if (tsk_rt(t)->has_aux_tasks) {
1328 aux_task_owner_decrease_priority(t);
1329 }
1330#endif
1331
1332#ifdef CONFIG_LITMUS_NVIDIA
1333 /* propagate to gpu */
1334 if (tsk_rt(t)->held_gpus) {
1335 gpu_owner_decrease_priority(t);
1336 }
1337#endif
1338
1339
1340#ifdef CONFIG_LITMUS_NESTED_LOCKING
1341 }
1342 else {
1343 TRACE_TASK(t, "Spurious invalid priority decrease. "
1344 "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n"
1345 "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n",
1346 t->comm, t->pid,
1347 effective_priority(t)->comm, effective_priority(t)->pid,
1348 (prio_inh) ? prio_inh->comm : "nil",
1349 (prio_inh) ? prio_inh->pid : -1);
1350 success = 0;
1351 }
1352#endif
1353
1354out:
1355 return success;
1356}
1357
1358static void decrease_priority_inheritance(struct task_struct* t,
1359 struct task_struct* prio_inh)
1360{
1361 int success;
1362
1363 raw_spin_lock(&gsnedf_lock);
1364
1365 success = __decrease_priority_inheritance(t, prio_inh);
722 1366
723 raw_spin_unlock(&gsnedf_lock); 1367 raw_spin_unlock(&gsnedf_lock);
1368
1369#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1370 if(tsk_rt(t)->held_gpus) {
1371 int i;
1372 for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus));
1373 i < NV_DEVICE_NUM;
1374 i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) {
1375 pai_check_priority_decrease(t, i);
1376 }
1377 }
1378#endif
1379}
1380
1381
1382
1383#ifdef CONFIG_LITMUS_NESTED_LOCKING
1384
1385/* called with IRQs off */
1386/* preconditions:
1387 (1) The 'hp_blocked_tasks_lock' of task 't' is held.
1388 (2) The lock 'to_unlock' is held.
1389 */
1390static void nested_increase_priority_inheritance(struct task_struct* t,
1391 struct task_struct* prio_inh,
1392 raw_spinlock_t *to_unlock,
1393 unsigned long irqflags)
1394{
1395 struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock;
1396
1397 if(tsk_rt(t)->inh_task != prio_inh) { // shield redundent calls.
1398 increase_priority_inheritance(t, prio_inh); // increase our prio.
1399 }
1400
1401 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap.
1402
1403
1404 if(blocked_lock) {
1405 if(blocked_lock->ops->propagate_increase_inheritance) {
1406 TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n",
1407 blocked_lock->ident);
1408
1409 // beware: recursion
1410 blocked_lock->ops->propagate_increase_inheritance(blocked_lock,
1411 t, to_unlock,
1412 irqflags);
1413 }
1414 else {
1415 TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n",
1416 blocked_lock->ident);
1417 unlock_fine_irqrestore(to_unlock, irqflags);
1418 }
1419 }
1420 else {
1421 TRACE_TASK(t, "is not blocked. No propagation.\n");
1422 unlock_fine_irqrestore(to_unlock, irqflags);
1423 }
1424}
1425
1426/* called with IRQs off */
1427/* preconditions:
1428 (1) The 'hp_blocked_tasks_lock' of task 't' is held.
1429 (2) The lock 'to_unlock' is held.
1430 */
1431static void nested_decrease_priority_inheritance(struct task_struct* t,
1432 struct task_struct* prio_inh,
1433 raw_spinlock_t *to_unlock,
1434 unsigned long irqflags)
1435{
1436 struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock;
1437 decrease_priority_inheritance(t, prio_inh);
1438
1439 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap.
1440
1441 if(blocked_lock) {
1442 if(blocked_lock->ops->propagate_decrease_inheritance) {
1443 TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n",
1444 blocked_lock->ident);
1445
1446 // beware: recursion
1447 blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t,
1448 to_unlock,
1449 irqflags);
1450 }
1451 else {
1452 TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n",
1453 blocked_lock);
1454 unlock_fine_irqrestore(to_unlock, irqflags);
1455 }
1456 }
1457 else {
1458 TRACE_TASK(t, "is not blocked. No propagation.\n");
1459 unlock_fine_irqrestore(to_unlock, irqflags);
1460 }
1461}
1462
1463
1464/* ******************** RSM MUTEX ********************** */
1465
1466static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = {
1467 .lock = rsm_mutex_lock,
1468 .unlock = rsm_mutex_unlock,
1469 .close = rsm_mutex_close,
1470 .deallocate = rsm_mutex_free,
1471
1472 .propagate_increase_inheritance = rsm_mutex_propagate_increase_inheritance,
1473 .propagate_decrease_inheritance = rsm_mutex_propagate_decrease_inheritance,
1474
1475#ifdef CONFIG_LITMUS_DGL_SUPPORT
1476 .dgl_lock = rsm_mutex_dgl_lock,
1477 .is_owner = rsm_mutex_is_owner,
1478 .enable_priority = rsm_mutex_enable_priority,
1479#endif
1480};
1481
1482static struct litmus_lock* gsnedf_new_rsm_mutex(void)
1483{
1484 return rsm_mutex_new(&gsnedf_rsm_mutex_lock_ops);
724} 1485}
725 1486
1487/* ******************** IKGLP ********************** */
1488
1489static struct litmus_lock_ops gsnedf_ikglp_lock_ops = {
1490 .lock = ikglp_lock,
1491 .unlock = ikglp_unlock,
1492 .close = ikglp_close,
1493 .deallocate = ikglp_free,
1494
1495 // ikglp can only be an outer-most lock.
1496 .propagate_increase_inheritance = NULL,
1497 .propagate_decrease_inheritance = NULL,
1498};
1499
1500static struct litmus_lock* gsnedf_new_ikglp(void* __user arg)
1501{
1502 return ikglp_new(num_online_cpus(), &gsnedf_ikglp_lock_ops, arg);
1503}
1504
1505#endif /* CONFIG_LITMUS_NESTED_LOCKING */
1506
1507
1508/* ******************** KFMLP support ********************** */
1509
1510static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = {
1511 .lock = kfmlp_lock,
1512 .unlock = kfmlp_unlock,
1513 .close = kfmlp_close,
1514 .deallocate = kfmlp_free,
1515
1516 // kfmlp can only be an outer-most lock.
1517 .propagate_increase_inheritance = NULL,
1518 .propagate_decrease_inheritance = NULL,
1519};
1520
1521
1522static struct litmus_lock* gsnedf_new_kfmlp(void* __user arg)
1523{
1524 return kfmlp_new(&gsnedf_kfmlp_lock_ops, arg);
1525}
726 1526
727/* ******************** FMLP support ********************** */ 1527/* ******************** FMLP support ********************** */
728 1528
@@ -789,7 +1589,7 @@ int gsnedf_fmlp_lock(struct litmus_lock* l)
789 if (edf_higher_prio(t, sem->hp_waiter)) { 1589 if (edf_higher_prio(t, sem->hp_waiter)) {
790 sem->hp_waiter = t; 1590 sem->hp_waiter = t;
791 if (edf_higher_prio(t, sem->owner)) 1591 if (edf_higher_prio(t, sem->owner))
792 set_priority_inheritance(sem->owner, sem->hp_waiter); 1592 increase_priority_inheritance(sem->owner, sem->hp_waiter);
793 } 1593 }
794 1594
795 TS_LOCK_SUSPEND; 1595 TS_LOCK_SUSPEND;
@@ -802,7 +1602,7 @@ int gsnedf_fmlp_lock(struct litmus_lock* l)
802 * there is only one wake up per release. 1602 * there is only one wake up per release.
803 */ 1603 */
804 1604
805 schedule(); 1605 suspend_for_lock();
806 1606
807 TS_LOCK_RESUME; 1607 TS_LOCK_RESUME;
808 1608
@@ -857,7 +1657,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l)
857 /* Well, if next is not the highest-priority waiter, 1657 /* Well, if next is not the highest-priority waiter,
858 * then it ought to inherit the highest-priority 1658 * then it ought to inherit the highest-priority
859 * waiter's priority. */ 1659 * waiter's priority. */
860 set_priority_inheritance(next, sem->hp_waiter); 1660 increase_priority_inheritance(next, sem->hp_waiter);
861 } 1661 }
862 1662
863 /* wake up next */ 1663 /* wake up next */
@@ -868,7 +1668,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l)
868 1668
869 /* we lose the benefit of priority inheritance (if any) */ 1669 /* we lose the benefit of priority inheritance (if any) */
870 if (tsk_rt(t)->inh_task) 1670 if (tsk_rt(t)->inh_task)
871 clear_priority_inheritance(t); 1671 decrease_priority_inheritance(t, NULL);
872 1672
873out: 1673out:
874 spin_unlock_irqrestore(&sem->wait.lock, flags); 1674 spin_unlock_irqrestore(&sem->wait.lock, flags);
@@ -906,6 +1706,11 @@ static struct litmus_lock_ops gsnedf_fmlp_lock_ops = {
906 .lock = gsnedf_fmlp_lock, 1706 .lock = gsnedf_fmlp_lock,
907 .unlock = gsnedf_fmlp_unlock, 1707 .unlock = gsnedf_fmlp_unlock,
908 .deallocate = gsnedf_fmlp_free, 1708 .deallocate = gsnedf_fmlp_free,
1709
1710#ifdef CONFIG_LITMUS_NESTED_LOCKING
1711 .propagate_increase_inheritance = NULL,
1712 .propagate_decrease_inheritance = NULL
1713#endif
909}; 1714};
910 1715
911static struct litmus_lock* gsnedf_new_fmlp(void) 1716static struct litmus_lock* gsnedf_new_fmlp(void)
@@ -924,31 +1729,110 @@ static struct litmus_lock* gsnedf_new_fmlp(void)
924 return &sem->litmus_lock; 1729 return &sem->litmus_lock;
925} 1730}
926 1731
927/* **** lock constructor **** */
928
929 1732
930static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, 1733static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
931 void* __user unused) 1734 void* __user args)
932{ 1735{
933 int err = -ENXIO; 1736 int err;
934 1737
935 /* GSN-EDF currently only supports the FMLP for global resources. */
936 switch (type) { 1738 switch (type) {
937 1739
938 case FMLP_SEM: 1740 case FMLP_SEM:
939 /* Flexible Multiprocessor Locking Protocol */ 1741 /* Flexible Multiprocessor Locking Protocol */
940 *lock = gsnedf_new_fmlp(); 1742 *lock = gsnedf_new_fmlp();
941 if (*lock) 1743 break;
942 err = 0; 1744#ifdef CONFIG_LITMUS_NESTED_LOCKING
943 else 1745 case RSM_MUTEX:
944 err = -ENOMEM; 1746 *lock = gsnedf_new_rsm_mutex();
945 break; 1747 break;
946 1748
1749 case IKGLP_SEM:
1750 *lock = gsnedf_new_ikglp(args);
1751 break;
1752#endif
1753 case KFMLP_SEM:
1754 *lock = gsnedf_new_kfmlp(args);
1755 break;
1756 default:
1757 err = -ENXIO;
1758 goto UNSUPPORTED_LOCK;
1759 };
1760
1761 if (*lock)
1762 err = 0;
1763 else
1764 err = -ENOMEM;
1765
1766UNSUPPORTED_LOCK:
1767 return err;
1768}
1769
1770#endif // CONFIG_LITMUS_LOCKING
1771
1772
1773
1774
1775
1776#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1777static struct affinity_observer_ops gsnedf_kfmlp_affinity_ops = {
1778 .close = kfmlp_aff_obs_close,
1779 .deallocate = kfmlp_aff_obs_free,
1780};
1781
1782#ifdef CONFIG_LITMUS_NESTED_LOCKING
1783static struct affinity_observer_ops gsnedf_ikglp_affinity_ops = {
1784 .close = ikglp_aff_obs_close,
1785 .deallocate = ikglp_aff_obs_free,
1786};
1787#endif
1788
1789static long gsnedf_allocate_affinity_observer(
1790 struct affinity_observer **aff_obs,
1791 int type,
1792 void* __user args)
1793{
1794 int err;
1795
1796 switch (type) {
1797
1798 case KFMLP_SIMPLE_GPU_AFF_OBS:
1799 *aff_obs = kfmlp_simple_gpu_aff_obs_new(&gsnedf_kfmlp_affinity_ops, args);
1800 break;
1801
1802 case KFMLP_GPU_AFF_OBS:
1803 *aff_obs = kfmlp_gpu_aff_obs_new(&gsnedf_kfmlp_affinity_ops, args);
1804 break;
1805
1806#ifdef CONFIG_LITMUS_NESTED_LOCKING
1807 case IKGLP_SIMPLE_GPU_AFF_OBS:
1808 *aff_obs = ikglp_simple_gpu_aff_obs_new(&gsnedf_ikglp_affinity_ops, args);
1809 break;
1810
1811 case IKGLP_GPU_AFF_OBS:
1812 *aff_obs = ikglp_gpu_aff_obs_new(&gsnedf_ikglp_affinity_ops, args);
1813 break;
1814#endif
1815 default:
1816 err = -ENXIO;
1817 goto UNSUPPORTED_AFF_OBS;
947 }; 1818 };
948 1819
1820 if (*aff_obs)
1821 err = 0;
1822 else
1823 err = -ENOMEM;
1824
1825UNSUPPORTED_AFF_OBS:
949 return err; 1826 return err;
950} 1827}
1828#endif
1829
951 1830
1831#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
1832static int gsnedf_map_gpu_to_cpu(int gpu)
1833{
1834 return -1; // No CPU affinity needed.
1835}
952#endif 1836#endif
953 1837
954 1838
@@ -957,14 +1841,14 @@ static long gsnedf_activate_plugin(void)
957 int cpu; 1841 int cpu;
958 cpu_entry_t *entry; 1842 cpu_entry_t *entry;
959 1843
960 bheap_init(&gsnedf_cpu_heap); 1844 INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio);
961#ifdef CONFIG_RELEASE_MASTER 1845#ifdef CONFIG_RELEASE_MASTER
962 gsnedf.release_master = atomic_read(&release_master_cpu); 1846 gsnedf.release_master = atomic_read(&release_master_cpu);
963#endif 1847#endif
964 1848
965 for_each_online_cpu(cpu) { 1849 for_each_online_cpu(cpu) {
966 entry = &per_cpu(gsnedf_cpu_entries, cpu); 1850 entry = &per_cpu(gsnedf_cpu_entries, cpu);
967 bheap_node_init(&entry->hn, entry); 1851 INIT_BINHEAP_NODE(&entry->hn);
968 entry->linked = NULL; 1852 entry->linked = NULL;
969 entry->scheduled = NULL; 1853 entry->scheduled = NULL;
970#ifdef CONFIG_RELEASE_MASTER 1854#ifdef CONFIG_RELEASE_MASTER
@@ -978,6 +1862,20 @@ static long gsnedf_activate_plugin(void)
978 } 1862 }
979#endif 1863#endif
980 } 1864 }
1865
1866#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1867 gsnedf_pending_tasklets.head = NULL;
1868 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
1869#endif
1870
1871#ifdef CONFIG_LITMUS_SOFTIRQD
1872 init_klmirqd();
1873#endif
1874
1875#ifdef CONFIG_LITMUS_NVIDIA
1876 init_nvidia_info();
1877#endif
1878
981 return 0; 1879 return 0;
982} 1880}
983 1881
@@ -994,8 +1892,32 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
994 .task_block = gsnedf_task_block, 1892 .task_block = gsnedf_task_block,
995 .admit_task = gsnedf_admit_task, 1893 .admit_task = gsnedf_admit_task,
996 .activate_plugin = gsnedf_activate_plugin, 1894 .activate_plugin = gsnedf_activate_plugin,
1895 .compare = edf_higher_prio,
997#ifdef CONFIG_LITMUS_LOCKING 1896#ifdef CONFIG_LITMUS_LOCKING
998 .allocate_lock = gsnedf_allocate_lock, 1897 .allocate_lock = gsnedf_allocate_lock,
1898 .increase_prio = increase_priority_inheritance,
1899 .decrease_prio = decrease_priority_inheritance,
1900 .__increase_prio = __increase_priority_inheritance,
1901 .__decrease_prio = __decrease_priority_inheritance,
1902#endif
1903#ifdef CONFIG_LITMUS_NESTED_LOCKING
1904 .nested_increase_prio = nested_increase_priority_inheritance,
1905 .nested_decrease_prio = nested_decrease_priority_inheritance,
1906 .__compare = __edf_higher_prio,
1907#endif
1908#ifdef CONFIG_LITMUS_DGL_SUPPORT
1909 .get_dgl_spinlock = gsnedf_get_dgl_spinlock,
1910#endif
1911#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1912 .allocate_aff_obs = gsnedf_allocate_affinity_observer,
1913#endif
1914#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1915 .enqueue_pai_tasklet = gsnedf_enqueue_pai_tasklet,
1916 .change_prio_pai_tasklet = gsnedf_change_prio_pai_tasklet,
1917 .run_tasklets = gsnedf_run_tasklets,
1918#endif
1919#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
1920 .map_gpu_to_cpu = gsnedf_map_gpu_to_cpu,
999#endif 1921#endif
1000}; 1922};
1001 1923
@@ -1005,15 +1927,20 @@ static int __init init_gsn_edf(void)
1005 int cpu; 1927 int cpu;
1006 cpu_entry_t *entry; 1928 cpu_entry_t *entry;
1007 1929
1008 bheap_init(&gsnedf_cpu_heap); 1930 INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio);
1009 /* initialize CPU state */ 1931 /* initialize CPU state */
1010 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1932 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
1011 entry = &per_cpu(gsnedf_cpu_entries, cpu); 1933 entry = &per_cpu(gsnedf_cpu_entries, cpu);
1012 gsnedf_cpus[cpu] = entry; 1934 gsnedf_cpus[cpu] = entry;
1013 entry->cpu = cpu; 1935 entry->cpu = cpu;
1014 entry->hn = &gsnedf_heap_node[cpu]; 1936
1015 bheap_node_init(&entry->hn, entry); 1937 INIT_BINHEAP_NODE(&entry->hn);
1016 } 1938 }
1939
1940#ifdef CONFIG_LITMUS_DGL_SUPPORT
1941 raw_spin_lock_init(&dgl_lock);
1942#endif
1943
1017 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); 1944 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs);
1018 return register_sched_plugin(&gsn_edf_plugin); 1945 return register_sched_plugin(&gsn_edf_plugin);
1019} 1946}