diff options
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r-- | litmus/sched_gsn_edf.c | 1032 |
1 files changed, 932 insertions, 100 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 6ed504f4750e..8c48757fa86c 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -12,23 +12,49 @@ | |||
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/uaccess.h> | ||
16 | #include <linux/module.h> | ||
15 | 17 | ||
16 | #include <litmus/litmus.h> | 18 | #include <litmus/litmus.h> |
17 | #include <litmus/jobs.h> | 19 | #include <litmus/jobs.h> |
18 | #include <litmus/sched_plugin.h> | 20 | #include <litmus/sched_plugin.h> |
19 | #include <litmus/edf_common.h> | 21 | #include <litmus/edf_common.h> |
20 | #include <litmus/sched_trace.h> | 22 | #include <litmus/sched_trace.h> |
21 | #include <litmus/trace.h> | ||
22 | 23 | ||
23 | #include <litmus/preempt.h> | 24 | #include <litmus/preempt.h> |
24 | 25 | ||
25 | #include <litmus/bheap.h> | 26 | #include <litmus/bheap.h> |
27 | #include <litmus/binheap.h> | ||
28 | |||
29 | #ifdef CONFIG_LITMUS_LOCKING | ||
30 | #include <litmus/kfmlp_lock.h> | ||
31 | #endif | ||
32 | |||
33 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
34 | #include <litmus/rsm_lock.h> | ||
35 | #include <litmus/ikglp_lock.h> | ||
36 | #endif | ||
26 | 37 | ||
27 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 38 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
28 | #include <litmus/affinity.h> | 39 | #include <litmus/affinity.h> |
29 | #endif | 40 | #endif |
30 | 41 | ||
31 | #include <linux/module.h> | 42 | #ifdef CONFIG_LITMUS_SOFTIRQD |
43 | #include <litmus/litmus_softirq.h> | ||
44 | #endif | ||
45 | |||
46 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
47 | #include <linux/interrupt.h> | ||
48 | #include <litmus/trace.h> | ||
49 | #endif | ||
50 | |||
51 | #ifdef CONFIG_LITMUS_NVIDIA | ||
52 | #include <litmus/nvidia_info.h> | ||
53 | #endif | ||
54 | |||
55 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | ||
56 | #include <litmus/gpu_affinity.h> | ||
57 | #endif | ||
32 | 58 | ||
33 | /* Overview of GSN-EDF operations. | 59 | /* Overview of GSN-EDF operations. |
34 | * | 60 | * |
@@ -103,52 +129,70 @@ typedef struct { | |||
103 | int cpu; | 129 | int cpu; |
104 | struct task_struct* linked; /* only RT tasks */ | 130 | struct task_struct* linked; /* only RT tasks */ |
105 | struct task_struct* scheduled; /* only RT tasks */ | 131 | struct task_struct* scheduled; /* only RT tasks */ |
106 | struct bheap_node* hn; | 132 | struct binheap_node hn; |
107 | } cpu_entry_t; | 133 | } cpu_entry_t; |
108 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | 134 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); |
109 | 135 | ||
110 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; | 136 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; |
111 | 137 | ||
112 | /* the cpus queue themselves according to priority in here */ | 138 | /* the cpus queue themselves according to priority in here */ |
113 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; | 139 | static struct binheap_handle gsnedf_cpu_heap; |
114 | static struct bheap gsnedf_cpu_heap; | ||
115 | 140 | ||
116 | static rt_domain_t gsnedf; | 141 | static rt_domain_t gsnedf; |
117 | #define gsnedf_lock (gsnedf.ready_lock) | 142 | #define gsnedf_lock (gsnedf.ready_lock) |
118 | 143 | ||
144 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
145 | static raw_spinlock_t dgl_lock; | ||
146 | |||
147 | static raw_spinlock_t* gsnedf_get_dgl_spinlock(struct task_struct *t) | ||
148 | { | ||
149 | return(&dgl_lock); | ||
150 | } | ||
151 | #endif | ||
152 | |||
153 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
154 | struct tasklet_head | ||
155 | { | ||
156 | struct tasklet_struct *head; | ||
157 | struct tasklet_struct **tail; | ||
158 | }; | ||
159 | |||
160 | struct tasklet_head gsnedf_pending_tasklets; | ||
161 | #endif | ||
162 | |||
119 | 163 | ||
120 | /* Uncomment this if you want to see all scheduling decisions in the | 164 | /* Uncomment this if you want to see all scheduling decisions in the |
121 | * TRACE() log. | 165 | * TRACE() log. |
122 | #define WANT_ALL_SCHED_EVENTS | 166 | #define WANT_ALL_SCHED_EVENTS |
123 | */ | 167 | */ |
124 | 168 | ||
125 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | 169 | static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) |
126 | { | 170 | { |
127 | cpu_entry_t *a, *b; | 171 | cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn); |
128 | a = _a->value; | 172 | cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn); |
129 | b = _b->value; | 173 | |
130 | /* Note that a and b are inverted: we want the lowest-priority CPU at | 174 | /* Note that a and b are inverted: we want the lowest-priority CPU at |
131 | * the top of the heap. | 175 | * the top of the heap. |
132 | */ | 176 | */ |
133 | return edf_higher_prio(b->linked, a->linked); | 177 | return edf_higher_prio(b->linked, a->linked); |
134 | } | 178 | } |
135 | 179 | ||
180 | |||
136 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | 181 | /* update_cpu_position - Move the cpu entry to the correct place to maintain |
137 | * order in the cpu queue. Caller must hold gsnedf lock. | 182 | * order in the cpu queue. Caller must hold gsnedf lock. |
138 | */ | 183 | */ |
139 | static void update_cpu_position(cpu_entry_t *entry) | 184 | static void update_cpu_position(cpu_entry_t *entry) |
140 | { | 185 | { |
141 | if (likely(bheap_node_in_heap(entry->hn))) | 186 | if (likely(binheap_is_in_heap(&entry->hn))) { |
142 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | 187 | binheap_delete(&entry->hn, &gsnedf_cpu_heap); |
143 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | 188 | } |
189 | binheap_add(&entry->hn, &gsnedf_cpu_heap, cpu_entry_t, hn); | ||
144 | } | 190 | } |
145 | 191 | ||
146 | /* caller must hold gsnedf lock */ | 192 | /* caller must hold gsnedf lock */ |
147 | static cpu_entry_t* lowest_prio_cpu(void) | 193 | static cpu_entry_t* lowest_prio_cpu(void) |
148 | { | 194 | { |
149 | struct bheap_node* hn; | 195 | return binheap_top_entry(&gsnedf_cpu_heap, cpu_entry_t, hn); |
150 | hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap); | ||
151 | return hn->value; | ||
152 | } | 196 | } |
153 | 197 | ||
154 | 198 | ||
@@ -337,6 +381,10 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
337 | 381 | ||
338 | sched_trace_task_completion(t, forced); | 382 | sched_trace_task_completion(t, forced); |
339 | 383 | ||
384 | #ifdef CONFIG_LITMUS_NVIDIA | ||
385 | atomic_set(&tsk_rt(t)->nv_int_count, 0); | ||
386 | #endif | ||
387 | |||
340 | TRACE_TASK(t, "job_completion().\n"); | 388 | TRACE_TASK(t, "job_completion().\n"); |
341 | 389 | ||
342 | /* set flags */ | 390 | /* set flags */ |
@@ -379,6 +427,318 @@ static void gsnedf_tick(struct task_struct* t) | |||
379 | } | 427 | } |
380 | } | 428 | } |
381 | 429 | ||
430 | |||
431 | |||
432 | |||
433 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
434 | |||
435 | |||
436 | static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) | ||
437 | { | ||
438 | if (!atomic_read(&tasklet->count)) { | ||
439 | if(tasklet->owner) { | ||
440 | sched_trace_tasklet_begin(tasklet->owner); | ||
441 | } | ||
442 | |||
443 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) | ||
444 | { | ||
445 | BUG(); | ||
446 | } | ||
447 | TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", | ||
448 | __FUNCTION__, | ||
449 | (tasklet->owner) ? tasklet->owner->pid : -1, | ||
450 | (tasklet->owner) ? 0 : 1); | ||
451 | tasklet->func(tasklet->data); | ||
452 | tasklet_unlock(tasklet); | ||
453 | |||
454 | if(tasklet->owner) { | ||
455 | sched_trace_tasklet_end(tasklet->owner, flushed); | ||
456 | } | ||
457 | } | ||
458 | else { | ||
459 | BUG(); | ||
460 | } | ||
461 | } | ||
462 | |||
463 | static void do_lit_tasklets(struct task_struct* sched_task) | ||
464 | { | ||
465 | int work_to_do = 1; | ||
466 | struct tasklet_struct *tasklet = NULL; | ||
467 | unsigned long flags; | ||
468 | |||
469 | while(work_to_do) { | ||
470 | |||
471 | TS_NV_SCHED_BOTISR_START; | ||
472 | |||
473 | // execute one tasklet that has higher priority | ||
474 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
475 | |||
476 | if(gsnedf_pending_tasklets.head != NULL) { | ||
477 | struct tasklet_struct *prev = NULL; | ||
478 | tasklet = gsnedf_pending_tasklets.head; | ||
479 | |||
480 | while(tasklet && edf_higher_prio(sched_task, tasklet->owner)) { | ||
481 | prev = tasklet; | ||
482 | tasklet = tasklet->next; | ||
483 | } | ||
484 | |||
485 | // remove the tasklet from the queue | ||
486 | if(prev) { | ||
487 | prev->next = tasklet->next; | ||
488 | if(prev->next == NULL) { | ||
489 | TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
490 | gsnedf_pending_tasklets.tail = &(prev); | ||
491 | } | ||
492 | } | ||
493 | else { | ||
494 | gsnedf_pending_tasklets.head = tasklet->next; | ||
495 | if(tasklet->next == NULL) { | ||
496 | TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
497 | gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); | ||
498 | } | ||
499 | } | ||
500 | } | ||
501 | else { | ||
502 | TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); | ||
503 | } | ||
504 | |||
505 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
506 | |||
507 | if(tasklet) { | ||
508 | __do_lit_tasklet(tasklet, 0ul); | ||
509 | tasklet = NULL; | ||
510 | } | ||
511 | else { | ||
512 | work_to_do = 0; | ||
513 | } | ||
514 | |||
515 | TS_NV_SCHED_BOTISR_END; | ||
516 | } | ||
517 | } | ||
518 | |||
519 | //static void do_lit_tasklets(struct task_struct* sched_task) | ||
520 | //{ | ||
521 | // int work_to_do = 1; | ||
522 | // struct tasklet_struct *tasklet = NULL; | ||
523 | // //struct tasklet_struct *step; | ||
524 | // unsigned long flags; | ||
525 | // | ||
526 | // while(work_to_do) { | ||
527 | // | ||
528 | // TS_NV_SCHED_BOTISR_START; | ||
529 | // | ||
530 | // // remove tasklet at head of list if it has higher priority. | ||
531 | // raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
532 | // | ||
533 | // if(gsnedf_pending_tasklets.head != NULL) { | ||
534 | // // remove tasklet at head. | ||
535 | // tasklet = gsnedf_pending_tasklets.head; | ||
536 | // | ||
537 | // if(edf_higher_prio(tasklet->owner, sched_task)) { | ||
538 | // | ||
539 | // if(NULL == tasklet->next) { | ||
540 | // // tasklet is at the head, list only has one element | ||
541 | // TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
542 | // gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); | ||
543 | // } | ||
544 | // | ||
545 | // // remove the tasklet from the queue | ||
546 | // gsnedf_pending_tasklets.head = tasklet->next; | ||
547 | // | ||
548 | // TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); | ||
549 | // } | ||
550 | // else { | ||
551 | // TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id()); | ||
552 | // tasklet = NULL; | ||
553 | // } | ||
554 | // } | ||
555 | // else { | ||
556 | // TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); | ||
557 | // } | ||
558 | // | ||
559 | // raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
560 | // | ||
561 | // TS_NV_SCHED_BOTISR_END; | ||
562 | // | ||
563 | // if(tasklet) { | ||
564 | // __do_lit_tasklet(tasklet, 0ul); | ||
565 | // tasklet = NULL; | ||
566 | // } | ||
567 | // else { | ||
568 | // work_to_do = 0; | ||
569 | // } | ||
570 | // } | ||
571 | // | ||
572 | // //TRACE("%s: exited.\n", __FUNCTION__); | ||
573 | //} | ||
574 | |||
575 | static void __add_pai_tasklet(struct tasklet_struct* tasklet) | ||
576 | { | ||
577 | struct tasklet_struct* step; | ||
578 | |||
579 | tasklet->next = NULL; // make sure there are no old values floating around | ||
580 | |||
581 | step = gsnedf_pending_tasklets.head; | ||
582 | if(step == NULL) { | ||
583 | TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); | ||
584 | // insert at tail. | ||
585 | *(gsnedf_pending_tasklets.tail) = tasklet; | ||
586 | gsnedf_pending_tasklets.tail = &(tasklet->next); | ||
587 | } | ||
588 | else if((*(gsnedf_pending_tasklets.tail) != NULL) && | ||
589 | edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) { | ||
590 | // insert at tail. | ||
591 | TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); | ||
592 | |||
593 | *(gsnedf_pending_tasklets.tail) = tasklet; | ||
594 | gsnedf_pending_tasklets.tail = &(tasklet->next); | ||
595 | } | ||
596 | else { | ||
597 | // insert the tasklet somewhere in the middle. | ||
598 | |||
599 | TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__); | ||
600 | |||
601 | while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) { | ||
602 | step = step->next; | ||
603 | } | ||
604 | |||
605 | // insert tasklet right before step->next. | ||
606 | |||
607 | TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); | ||
608 | |||
609 | tasklet->next = step->next; | ||
610 | step->next = tasklet; | ||
611 | |||
612 | // patch up the head if needed. | ||
613 | if(gsnedf_pending_tasklets.head == step) | ||
614 | { | ||
615 | TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid); | ||
616 | gsnedf_pending_tasklets.head = tasklet; | ||
617 | } | ||
618 | } | ||
619 | } | ||
620 | |||
621 | static void gsnedf_run_tasklets(struct task_struct* sched_task) | ||
622 | { | ||
623 | preempt_disable(); | ||
624 | |||
625 | if(gsnedf_pending_tasklets.head != NULL) { | ||
626 | TRACE("%s: There are tasklets to process.\n", __FUNCTION__); | ||
627 | do_lit_tasklets(sched_task); | ||
628 | } | ||
629 | |||
630 | preempt_enable_no_resched(); | ||
631 | } | ||
632 | |||
633 | static int gsnedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet) | ||
634 | { | ||
635 | cpu_entry_t *targetCPU = NULL; | ||
636 | int thisCPU; | ||
637 | int runLocal = 0; | ||
638 | int runNow = 0; | ||
639 | unsigned long flags; | ||
640 | |||
641 | if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) | ||
642 | { | ||
643 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
644 | return 0; | ||
645 | } | ||
646 | |||
647 | |||
648 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
649 | |||
650 | thisCPU = smp_processor_id(); | ||
651 | |||
652 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
653 | { | ||
654 | cpu_entry_t* affinity = NULL; | ||
655 | |||
656 | // use this CPU if it is in our cluster and isn't running any RT work. | ||
657 | if( | ||
658 | #ifdef CONFIG_RELEASE_MASTER | ||
659 | (thisCPU != gsnedf.release_master) && | ||
660 | #endif | ||
661 | (__get_cpu_var(gsnedf_cpu_entries).linked == NULL)) { | ||
662 | affinity = &(__get_cpu_var(gsnedf_cpu_entries)); | ||
663 | } | ||
664 | else { | ||
665 | // this CPU is busy or shouldn't run tasklet in this cluster. | ||
666 | // look for available near by CPUs. | ||
667 | // NOTE: Affinity towards owner and not this CPU. Is this right? | ||
668 | affinity = | ||
669 | gsnedf_get_nearest_available_cpu( | ||
670 | &per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner))); | ||
671 | } | ||
672 | |||
673 | targetCPU = affinity; | ||
674 | } | ||
675 | #endif | ||
676 | |||
677 | if (targetCPU == NULL) { | ||
678 | targetCPU = lowest_prio_cpu(); | ||
679 | } | ||
680 | |||
681 | if (edf_higher_prio(tasklet->owner, targetCPU->linked)) { | ||
682 | if (thisCPU == targetCPU->cpu) { | ||
683 | TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__); | ||
684 | runLocal = 1; | ||
685 | runNow = 1; | ||
686 | } | ||
687 | else { | ||
688 | TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__); | ||
689 | runLocal = 0; | ||
690 | runNow = 1; | ||
691 | } | ||
692 | } | ||
693 | else { | ||
694 | runLocal = 0; | ||
695 | runNow = 0; | ||
696 | } | ||
697 | |||
698 | if(!runLocal) { | ||
699 | // enqueue the tasklet | ||
700 | __add_pai_tasklet(tasklet); | ||
701 | } | ||
702 | |||
703 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
704 | |||
705 | |||
706 | if (runLocal /*&& runNow */) { // runNow == 1 is implied | ||
707 | TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); | ||
708 | __do_lit_tasklet(tasklet, 0ul); | ||
709 | } | ||
710 | else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied | ||
711 | TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu); | ||
712 | preempt(targetCPU); // need to be protected by cedf_lock? | ||
713 | } | ||
714 | else { | ||
715 | TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); | ||
716 | } | ||
717 | |||
718 | return(1); // success | ||
719 | } | ||
720 | |||
721 | static void gsnedf_change_prio_pai_tasklet(struct task_struct *old_prio, | ||
722 | struct task_struct *new_prio) | ||
723 | { | ||
724 | struct tasklet_struct* step; | ||
725 | unsigned long flags; | ||
726 | |||
727 | if(gsnedf_pending_tasklets.head != NULL) { | ||
728 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
729 | for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) { | ||
730 | if(step->owner == old_prio) { | ||
731 | TRACE("%s: Found tasklet to change: %d\n", __FUNCTION__, step->owner->pid); | ||
732 | step->owner = new_prio; | ||
733 | } | ||
734 | } | ||
735 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
736 | } | ||
737 | } | ||
738 | |||
739 | #endif // end PAI | ||
740 | |||
741 | |||
382 | /* Getting schedule() right is a bit tricky. schedule() may not make any | 742 | /* Getting schedule() right is a bit tricky. schedule() may not make any |
383 | * assumptions on the state of the current task since it may be called for a | 743 | * assumptions on the state of the current task since it may be called for a |
384 | * number of reasons. The reasons include a scheduler_tick() determined that it | 744 | * number of reasons. The reasons include a scheduler_tick() determined that it |
@@ -437,21 +797,32 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
437 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | 797 | TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); |
438 | #endif | 798 | #endif |
439 | 799 | ||
800 | /* | ||
440 | if (exists) | 801 | if (exists) |
441 | TRACE_TASK(prev, | 802 | TRACE_TASK(prev, |
442 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | 803 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " |
443 | "state:%d sig:%d\n", | 804 | "state:%d sig:%d\n", |
444 | blocks, out_of_time, np, sleep, preempt, | 805 | blocks, out_of_time, np, sleep, preempt, |
445 | prev->state, signal_pending(prev)); | 806 | prev->state, signal_pending(prev)); |
807 | */ | ||
808 | |||
446 | if (entry->linked && preempt) | 809 | if (entry->linked && preempt) |
447 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 810 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
448 | entry->linked->comm, entry->linked->pid); | 811 | entry->linked->comm, entry->linked->pid); |
449 | 812 | ||
450 | |||
451 | /* If a task blocks we have no choice but to reschedule. | 813 | /* If a task blocks we have no choice but to reschedule. |
452 | */ | 814 | */ |
453 | if (blocks) | 815 | if (blocks) { |
454 | unlink(entry->scheduled); | 816 | unlink(entry->scheduled); |
817 | } | ||
818 | |||
819 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | ||
820 | if(exists && is_realtime(entry->scheduled) && tsk_rt(entry->scheduled)->held_gpus) { | ||
821 | if(!blocks || tsk_rt(entry->scheduled)->suspend_gpu_tracker_on_block) { | ||
822 | stop_gpu_tracker(entry->scheduled); | ||
823 | } | ||
824 | } | ||
825 | #endif | ||
455 | 826 | ||
456 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | 827 | /* Request a sys_exit_np() call if we would like to preempt but cannot. |
457 | * We need to make sure to update the link structure anyway in case | 828 | * We need to make sure to update the link structure anyway in case |
@@ -492,12 +863,15 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
492 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | 863 | entry->scheduled->rt_param.scheduled_on = NO_CPU; |
493 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | 864 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); |
494 | } | 865 | } |
495 | } else | 866 | } |
867 | else | ||
868 | { | ||
496 | /* Only override Linux scheduler if we have a real-time task | 869 | /* Only override Linux scheduler if we have a real-time task |
497 | * scheduled that needs to continue. | 870 | * scheduled that needs to continue. |
498 | */ | 871 | */ |
499 | if (exists) | 872 | if (exists) |
500 | next = prev; | 873 | next = prev; |
874 | } | ||
501 | 875 | ||
502 | sched_state_task_picked(); | 876 | sched_state_task_picked(); |
503 | 877 | ||
@@ -524,6 +898,7 @@ static void gsnedf_finish_switch(struct task_struct *prev) | |||
524 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | 898 | cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); |
525 | 899 | ||
526 | entry->scheduled = is_realtime(current) ? current : NULL; | 900 | entry->scheduled = is_realtime(current) ? current : NULL; |
901 | |||
527 | #ifdef WANT_ALL_SCHED_EVENTS | 902 | #ifdef WANT_ALL_SCHED_EVENTS |
528 | TRACE_TASK(prev, "switched away from\n"); | 903 | TRACE_TASK(prev, "switched away from\n"); |
529 | #endif | 904 | #endif |
@@ -572,11 +947,14 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
572 | static void gsnedf_task_wake_up(struct task_struct *task) | 947 | static void gsnedf_task_wake_up(struct task_struct *task) |
573 | { | 948 | { |
574 | unsigned long flags; | 949 | unsigned long flags; |
575 | lt_t now; | 950 | //lt_t now; |
576 | 951 | ||
577 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 952 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
578 | 953 | ||
579 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 954 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
955 | |||
956 | |||
957 | #if 0 // sporadic task model | ||
580 | /* We need to take suspensions because of semaphores into | 958 | /* We need to take suspensions because of semaphores into |
581 | * account! If a job resumes after being suspended due to acquiring | 959 | * account! If a job resumes after being suspended due to acquiring |
582 | * a semaphore, it should never be treated as a new job release. | 960 | * a semaphore, it should never be treated as a new job release. |
@@ -598,19 +976,26 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
598 | } | 976 | } |
599 | } | 977 | } |
600 | } | 978 | } |
979 | #else // periodic task model | ||
980 | set_rt_flags(task, RT_F_RUNNING); | ||
981 | #endif | ||
982 | |||
601 | gsnedf_job_arrival(task); | 983 | gsnedf_job_arrival(task); |
602 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 984 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
603 | } | 985 | } |
604 | 986 | ||
605 | static void gsnedf_task_block(struct task_struct *t) | 987 | static void gsnedf_task_block(struct task_struct *t) |
606 | { | 988 | { |
989 | // TODO: is this called on preemption?? | ||
607 | unsigned long flags; | 990 | unsigned long flags; |
608 | 991 | ||
609 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | 992 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); |
610 | 993 | ||
611 | /* unlink if necessary */ | 994 | /* unlink if necessary */ |
612 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 995 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
996 | |||
613 | unlink(t); | 997 | unlink(t); |
998 | |||
614 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 999 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
615 | 1000 | ||
616 | BUG_ON(!is_realtime(t)); | 1001 | BUG_ON(!is_realtime(t)); |
@@ -621,6 +1006,10 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
621 | { | 1006 | { |
622 | unsigned long flags; | 1007 | unsigned long flags; |
623 | 1008 | ||
1009 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1010 | gsnedf_change_prio_pai_tasklet(t, NULL); | ||
1011 | #endif | ||
1012 | |||
624 | /* unlink if necessary */ | 1013 | /* unlink if necessary */ |
625 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1014 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
626 | unlink(t); | 1015 | unlink(t); |
@@ -637,101 +1026,423 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
637 | 1026 | ||
638 | static long gsnedf_admit_task(struct task_struct* tsk) | 1027 | static long gsnedf_admit_task(struct task_struct* tsk) |
639 | { | 1028 | { |
1029 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1030 | INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, | ||
1031 | edf_max_heap_base_priority_order); | ||
1032 | #endif | ||
1033 | |||
640 | return 0; | 1034 | return 0; |
641 | } | 1035 | } |
642 | 1036 | ||
1037 | |||
1038 | |||
1039 | |||
1040 | |||
1041 | |||
643 | #ifdef CONFIG_LITMUS_LOCKING | 1042 | #ifdef CONFIG_LITMUS_LOCKING |
644 | 1043 | ||
645 | #include <litmus/fdso.h> | 1044 | #include <litmus/fdso.h> |
646 | 1045 | ||
647 | /* called with IRQs off */ | 1046 | /* called with IRQs off */ |
648 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | 1047 | static void __increase_priority_inheritance(struct task_struct* t, |
1048 | struct task_struct* prio_inh) | ||
649 | { | 1049 | { |
650 | int linked_on; | 1050 | int linked_on; |
651 | int check_preempt = 0; | 1051 | int check_preempt = 0; |
652 | 1052 | ||
1053 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1054 | /* this sanity check allows for weaker locking in protocols */ | ||
1055 | /* TODO (klitirqd): Skip this check if 't' is a proxy thread (???) */ | ||
1056 | if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { | ||
1057 | #endif | ||
1058 | TRACE_TASK(t, "inherits priority from %s/%d\n", | ||
1059 | prio_inh->comm, prio_inh->pid); | ||
1060 | tsk_rt(t)->inh_task = prio_inh; | ||
1061 | |||
1062 | linked_on = tsk_rt(t)->linked_on; | ||
1063 | |||
1064 | /* If it is scheduled, then we need to reorder the CPU heap. */ | ||
1065 | if (linked_on != NO_CPU) { | ||
1066 | TRACE_TASK(t, "%s: linked on %d\n", | ||
1067 | __FUNCTION__, linked_on); | ||
1068 | /* Holder is scheduled; need to re-order CPUs. | ||
1069 | * We can't use heap_decrease() here since | ||
1070 | * the cpu_heap is ordered in reverse direction, so | ||
1071 | * it is actually an increase. */ | ||
1072 | binheap_delete(&gsnedf_cpus[linked_on]->hn, &gsnedf_cpu_heap); | ||
1073 | binheap_add(&gsnedf_cpus[linked_on]->hn, | ||
1074 | &gsnedf_cpu_heap, cpu_entry_t, hn); | ||
1075 | } else { | ||
1076 | /* holder may be queued: first stop queue changes */ | ||
1077 | raw_spin_lock(&gsnedf.release_lock); | ||
1078 | if (is_queued(t)) { | ||
1079 | TRACE_TASK(t, "%s: is queued\n", | ||
1080 | __FUNCTION__); | ||
1081 | /* We need to update the position of holder in some | ||
1082 | * heap. Note that this could be a release heap if we | ||
1083 | * budget enforcement is used and this job overran. */ | ||
1084 | check_preempt = | ||
1085 | !bheap_decrease(edf_ready_order, | ||
1086 | tsk_rt(t)->heap_node); | ||
1087 | } else { | ||
1088 | /* Nothing to do: if it is not queued and not linked | ||
1089 | * then it is either sleeping or currently being moved | ||
1090 | * by other code (e.g., a timer interrupt handler) that | ||
1091 | * will use the correct priority when enqueuing the | ||
1092 | * task. */ | ||
1093 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | ||
1094 | __FUNCTION__); | ||
1095 | } | ||
1096 | raw_spin_unlock(&gsnedf.release_lock); | ||
1097 | |||
1098 | /* If holder was enqueued in a release heap, then the following | ||
1099 | * preemption check is pointless, but we can't easily detect | ||
1100 | * that case. If you want to fix this, then consider that | ||
1101 | * simply adding a state flag requires O(n) time to update when | ||
1102 | * releasing n tasks, which conflicts with the goal to have | ||
1103 | * O(log n) merges. */ | ||
1104 | if (check_preempt) { | ||
1105 | /* heap_decrease() hit the top level of the heap: make | ||
1106 | * sure preemption checks get the right task, not the | ||
1107 | * potentially stale cache. */ | ||
1108 | bheap_uncache_min(edf_ready_order, | ||
1109 | &gsnedf.ready_queue); | ||
1110 | check_for_preemptions(); | ||
1111 | } | ||
1112 | } | ||
1113 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1114 | } | ||
1115 | else { | ||
1116 | TRACE_TASK(t, "Spurious invalid priority increase. " | ||
1117 | "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" | ||
1118 | "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", | ||
1119 | t->comm, t->pid, | ||
1120 | effective_priority(t)->comm, effective_priority(t)->pid, | ||
1121 | (prio_inh) ? prio_inh->comm : "nil", | ||
1122 | (prio_inh) ? prio_inh->pid : -1); | ||
1123 | WARN_ON(!prio_inh); | ||
1124 | } | ||
1125 | #endif | ||
1126 | } | ||
1127 | |||
1128 | /* called with IRQs off */ | ||
1129 | static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
1130 | { | ||
653 | raw_spin_lock(&gsnedf_lock); | 1131 | raw_spin_lock(&gsnedf_lock); |
654 | 1132 | ||
655 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | 1133 | __increase_priority_inheritance(t, prio_inh); |
656 | tsk_rt(t)->inh_task = prio_inh; | 1134 | |
657 | 1135 | #ifdef CONFIG_LITMUS_SOFTIRQD | |
658 | linked_on = tsk_rt(t)->linked_on; | 1136 | if(tsk_rt(t)->cur_klitirqd != NULL) |
659 | 1137 | { | |
660 | /* If it is scheduled, then we need to reorder the CPU heap. */ | 1138 | TRACE_TASK(t, "%s/%d inherits a new priority!\n", |
661 | if (linked_on != NO_CPU) { | 1139 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); |
662 | TRACE_TASK(t, "%s: linked on %d\n", | 1140 | |
663 | __FUNCTION__, linked_on); | 1141 | __increase_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); |
664 | /* Holder is scheduled; need to re-order CPUs. | 1142 | } |
665 | * We can't use heap_decrease() here since | 1143 | #endif |
666 | * the cpu_heap is ordered in reverse direction, so | 1144 | |
667 | * it is actually an increase. */ | 1145 | raw_spin_unlock(&gsnedf_lock); |
668 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | 1146 | |
669 | gsnedf_cpus[linked_on]->hn); | 1147 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) |
670 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | 1148 | if(tsk_rt(t)->held_gpus) { |
671 | gsnedf_cpus[linked_on]->hn); | 1149 | int i; |
672 | } else { | 1150 | for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); |
673 | /* holder may be queued: first stop queue changes */ | 1151 | i < NV_DEVICE_NUM; |
674 | raw_spin_lock(&gsnedf.release_lock); | 1152 | i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { |
675 | if (is_queued(t)) { | 1153 | pai_check_priority_increase(t, i); |
676 | TRACE_TASK(t, "%s: is queued\n", | 1154 | } |
677 | __FUNCTION__); | 1155 | } |
678 | /* We need to update the position of holder in some | 1156 | #endif |
679 | * heap. Note that this could be a release heap if we | 1157 | } |
680 | * budget enforcement is used and this job overran. */ | 1158 | |
681 | check_preempt = | 1159 | |
682 | !bheap_decrease(edf_ready_order, | 1160 | /* called with IRQs off */ |
683 | tsk_rt(t)->heap_node); | 1161 | static void __decrease_priority_inheritance(struct task_struct* t, |
684 | } else { | 1162 | struct task_struct* prio_inh) |
685 | /* Nothing to do: if it is not queued and not linked | 1163 | { |
686 | * then it is either sleeping or currently being moved | 1164 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
687 | * by other code (e.g., a timer interrupt handler) that | 1165 | if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { |
688 | * will use the correct priority when enqueuing the | 1166 | #endif |
689 | * task. */ | 1167 | /* A job only stops inheriting a priority when it releases a |
690 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | 1168 | * resource. Thus we can make the following assumption.*/ |
691 | __FUNCTION__); | 1169 | if(prio_inh) |
1170 | TRACE_TASK(t, "EFFECTIVE priority decreased to %s/%d\n", | ||
1171 | prio_inh->comm, prio_inh->pid); | ||
1172 | else | ||
1173 | TRACE_TASK(t, "base priority restored.\n"); | ||
1174 | |||
1175 | tsk_rt(t)->inh_task = prio_inh; | ||
1176 | |||
1177 | if(tsk_rt(t)->scheduled_on != NO_CPU) { | ||
1178 | TRACE_TASK(t, "is scheduled.\n"); | ||
1179 | |||
1180 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
1181 | * since the priority was effectively lowered. */ | ||
1182 | unlink(t); | ||
1183 | gsnedf_job_arrival(t); | ||
692 | } | 1184 | } |
693 | raw_spin_unlock(&gsnedf.release_lock); | 1185 | else { |
694 | 1186 | /* task is queued */ | |
695 | /* If holder was enqueued in a release heap, then the following | 1187 | raw_spin_lock(&gsnedf.release_lock); |
696 | * preemption check is pointless, but we can't easily detect | 1188 | if (is_queued(t)) { |
697 | * that case. If you want to fix this, then consider that | 1189 | TRACE_TASK(t, "is queued.\n"); |
698 | * simply adding a state flag requires O(n) time to update when | 1190 | |
699 | * releasing n tasks, which conflicts with the goal to have | 1191 | /* decrease in priority, so we have to re-add to binomial heap */ |
700 | * O(log n) merges. */ | 1192 | unlink(t); |
701 | if (check_preempt) { | 1193 | gsnedf_job_arrival(t); |
702 | /* heap_decrease() hit the top level of the heap: make | 1194 | } |
703 | * sure preemption checks get the right task, not the | 1195 | else { |
704 | * potentially stale cache. */ | 1196 | TRACE_TASK(t, "is not in scheduler. Probably on wait queue somewhere.\n"); |
705 | bheap_uncache_min(edf_ready_order, | 1197 | } |
706 | &gsnedf.ready_queue); | 1198 | raw_spin_unlock(&gsnedf.release_lock); |
707 | check_for_preemptions(); | ||
708 | } | 1199 | } |
1200 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1201 | } | ||
1202 | else { | ||
1203 | TRACE_TASK(t, "Spurious invalid priority decrease. " | ||
1204 | "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" | ||
1205 | "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", | ||
1206 | t->comm, t->pid, | ||
1207 | effective_priority(t)->comm, effective_priority(t)->pid, | ||
1208 | (prio_inh) ? prio_inh->comm : "nil", | ||
1209 | (prio_inh) ? prio_inh->pid : -1); | ||
709 | } | 1210 | } |
1211 | #endif | ||
1212 | } | ||
1213 | |||
1214 | static void decrease_priority_inheritance(struct task_struct* t, | ||
1215 | struct task_struct* prio_inh) | ||
1216 | { | ||
1217 | raw_spin_lock(&gsnedf_lock); | ||
1218 | __decrease_priority_inheritance(t, prio_inh); | ||
1219 | |||
1220 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1221 | if(tsk_rt(t)->cur_klitirqd != NULL) | ||
1222 | { | ||
1223 | TRACE_TASK(t, "%s/%d decreases in priority!\n", | ||
1224 | tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); | ||
1225 | |||
1226 | __decrease_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); | ||
1227 | } | ||
1228 | #endif | ||
710 | 1229 | ||
711 | raw_spin_unlock(&gsnedf_lock); | 1230 | raw_spin_unlock(&gsnedf_lock); |
1231 | |||
1232 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) | ||
1233 | if(tsk_rt(t)->held_gpus) { | ||
1234 | int i; | ||
1235 | for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); | ||
1236 | i < NV_DEVICE_NUM; | ||
1237 | i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { | ||
1238 | pai_check_priority_decrease(t, i); | ||
1239 | } | ||
1240 | } | ||
1241 | #endif | ||
712 | } | 1242 | } |
713 | 1243 | ||
1244 | |||
1245 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
714 | /* called with IRQs off */ | 1246 | /* called with IRQs off */ |
715 | static void clear_priority_inheritance(struct task_struct* t) | 1247 | static void increase_priority_inheritance_klitirqd(struct task_struct* klitirqd, |
1248 | struct task_struct* old_owner, | ||
1249 | struct task_struct* new_owner) | ||
716 | { | 1250 | { |
1251 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
1252 | |||
717 | raw_spin_lock(&gsnedf_lock); | 1253 | raw_spin_lock(&gsnedf_lock); |
718 | 1254 | ||
719 | /* A job only stops inheriting a priority when it releases a | 1255 | if(old_owner != new_owner) |
720 | * resource. Thus we can make the following assumption.*/ | 1256 | { |
721 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | 1257 | if(old_owner) |
1258 | { | ||
1259 | // unreachable? | ||
1260 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
1261 | } | ||
722 | 1262 | ||
723 | TRACE_TASK(t, "priority restored\n"); | 1263 | TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n", |
724 | tsk_rt(t)->inh_task = NULL; | 1264 | new_owner->comm, new_owner->pid); |
725 | 1265 | ||
726 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 1266 | tsk_rt(new_owner)->cur_klitirqd = klitirqd; |
727 | * since the priority was effectively lowered. */ | 1267 | } |
728 | unlink(t); | 1268 | |
729 | gsnedf_job_arrival(t); | 1269 | __decrease_priority_inheritance(klitirqd, NULL); // kludge to clear out cur prio. |
1270 | |||
1271 | __increase_priority_inheritance(klitirqd, | ||
1272 | (tsk_rt(new_owner)->inh_task == NULL) ? | ||
1273 | new_owner : | ||
1274 | tsk_rt(new_owner)->inh_task); | ||
730 | 1275 | ||
731 | raw_spin_unlock(&gsnedf_lock); | 1276 | raw_spin_unlock(&gsnedf_lock); |
732 | } | 1277 | } |
733 | 1278 | ||
734 | 1279 | ||
1280 | /* called with IRQs off */ | ||
1281 | static void decrease_priority_inheritance_klitirqd(struct task_struct* klitirqd, | ||
1282 | struct task_struct* old_owner, | ||
1283 | struct task_struct* new_owner) | ||
1284 | { | ||
1285 | BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); | ||
1286 | |||
1287 | raw_spin_lock(&gsnedf_lock); | ||
1288 | |||
1289 | TRACE_TASK(klitirqd, "priority restored\n"); | ||
1290 | |||
1291 | __decrease_priority_inheritance(klitirqd, new_owner); | ||
1292 | |||
1293 | tsk_rt(old_owner)->cur_klitirqd = NULL; | ||
1294 | |||
1295 | raw_spin_unlock(&gsnedf_lock); | ||
1296 | } | ||
1297 | #endif | ||
1298 | |||
1299 | |||
1300 | |||
1301 | |||
1302 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1303 | |||
1304 | /* called with IRQs off */ | ||
1305 | /* preconditions: | ||
1306 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. | ||
1307 | (2) The lock 'to_unlock' is held. | ||
1308 | */ | ||
1309 | static void nested_increase_priority_inheritance(struct task_struct* t, | ||
1310 | struct task_struct* prio_inh, | ||
1311 | raw_spinlock_t *to_unlock, | ||
1312 | unsigned long irqflags) | ||
1313 | { | ||
1314 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; | ||
1315 | |||
1316 | if(tsk_rt(t)->inh_task != prio_inh) { // shield redundent calls. | ||
1317 | increase_priority_inheritance(t, prio_inh); // increase our prio. | ||
1318 | } | ||
1319 | |||
1320 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. | ||
1321 | |||
1322 | |||
1323 | if(blocked_lock) { | ||
1324 | if(blocked_lock->ops->propagate_increase_inheritance) { | ||
1325 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", | ||
1326 | blocked_lock->ident); | ||
1327 | |||
1328 | // beware: recursion | ||
1329 | blocked_lock->ops->propagate_increase_inheritance(blocked_lock, | ||
1330 | t, to_unlock, | ||
1331 | irqflags); | ||
1332 | } | ||
1333 | else { | ||
1334 | TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", | ||
1335 | blocked_lock->ident); | ||
1336 | unlock_fine_irqrestore(to_unlock, irqflags); | ||
1337 | } | ||
1338 | } | ||
1339 | else { | ||
1340 | TRACE_TASK(t, "is not blocked. No propagation.\n"); | ||
1341 | unlock_fine_irqrestore(to_unlock, irqflags); | ||
1342 | } | ||
1343 | } | ||
1344 | |||
1345 | /* called with IRQs off */ | ||
1346 | /* preconditions: | ||
1347 | (1) The 'hp_blocked_tasks_lock' of task 't' is held. | ||
1348 | (2) The lock 'to_unlock' is held. | ||
1349 | */ | ||
1350 | static void nested_decrease_priority_inheritance(struct task_struct* t, | ||
1351 | struct task_struct* prio_inh, | ||
1352 | raw_spinlock_t *to_unlock, | ||
1353 | unsigned long irqflags) | ||
1354 | { | ||
1355 | struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; | ||
1356 | decrease_priority_inheritance(t, prio_inh); | ||
1357 | |||
1358 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. | ||
1359 | |||
1360 | if(blocked_lock) { | ||
1361 | if(blocked_lock->ops->propagate_decrease_inheritance) { | ||
1362 | TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", | ||
1363 | blocked_lock->ident); | ||
1364 | |||
1365 | // beware: recursion | ||
1366 | blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, | ||
1367 | to_unlock, | ||
1368 | irqflags); | ||
1369 | } | ||
1370 | else { | ||
1371 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", | ||
1372 | blocked_lock); | ||
1373 | unlock_fine_irqrestore(to_unlock, irqflags); | ||
1374 | } | ||
1375 | } | ||
1376 | else { | ||
1377 | TRACE_TASK(t, "is not blocked. No propagation.\n"); | ||
1378 | unlock_fine_irqrestore(to_unlock, irqflags); | ||
1379 | } | ||
1380 | } | ||
1381 | |||
1382 | |||
1383 | /* ******************** RSM MUTEX ********************** */ | ||
1384 | |||
1385 | static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = { | ||
1386 | .lock = rsm_mutex_lock, | ||
1387 | .unlock = rsm_mutex_unlock, | ||
1388 | .close = rsm_mutex_close, | ||
1389 | .deallocate = rsm_mutex_free, | ||
1390 | |||
1391 | .propagate_increase_inheritance = rsm_mutex_propagate_increase_inheritance, | ||
1392 | .propagate_decrease_inheritance = rsm_mutex_propagate_decrease_inheritance, | ||
1393 | |||
1394 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1395 | .dgl_lock = rsm_mutex_dgl_lock, | ||
1396 | .is_owner = rsm_mutex_is_owner, | ||
1397 | .enable_priority = rsm_mutex_enable_priority, | ||
1398 | #endif | ||
1399 | }; | ||
1400 | |||
1401 | static struct litmus_lock* gsnedf_new_rsm_mutex(void) | ||
1402 | { | ||
1403 | return rsm_mutex_new(&gsnedf_rsm_mutex_lock_ops); | ||
1404 | } | ||
1405 | |||
1406 | /* ******************** IKGLP ********************** */ | ||
1407 | |||
1408 | static struct litmus_lock_ops gsnedf_ikglp_lock_ops = { | ||
1409 | .lock = ikglp_lock, | ||
1410 | .unlock = ikglp_unlock, | ||
1411 | .close = ikglp_close, | ||
1412 | .deallocate = ikglp_free, | ||
1413 | |||
1414 | // ikglp can only be an outer-most lock. | ||
1415 | .propagate_increase_inheritance = NULL, | ||
1416 | .propagate_decrease_inheritance = NULL, | ||
1417 | }; | ||
1418 | |||
1419 | static struct litmus_lock* gsnedf_new_ikglp(void* __user arg) | ||
1420 | { | ||
1421 | return ikglp_new(num_online_cpus(), &gsnedf_ikglp_lock_ops, arg); | ||
1422 | } | ||
1423 | |||
1424 | #endif /* CONFIG_LITMUS_NESTED_LOCKING */ | ||
1425 | |||
1426 | |||
1427 | /* ******************** KFMLP support ********************** */ | ||
1428 | |||
1429 | static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = { | ||
1430 | .lock = kfmlp_lock, | ||
1431 | .unlock = kfmlp_unlock, | ||
1432 | .close = kfmlp_close, | ||
1433 | .deallocate = kfmlp_free, | ||
1434 | |||
1435 | // kfmlp can only be an outer-most lock. | ||
1436 | .propagate_increase_inheritance = NULL, | ||
1437 | .propagate_decrease_inheritance = NULL, | ||
1438 | }; | ||
1439 | |||
1440 | |||
1441 | static struct litmus_lock* gsnedf_new_kfmlp(void* __user arg) | ||
1442 | { | ||
1443 | return kfmlp_new(&gsnedf_kfmlp_lock_ops, arg); | ||
1444 | } | ||
1445 | |||
735 | /* ******************** FMLP support ********************** */ | 1446 | /* ******************** FMLP support ********************** */ |
736 | 1447 | ||
737 | /* struct for semaphore with priority inheritance */ | 1448 | /* struct for semaphore with priority inheritance */ |
@@ -797,7 +1508,7 @@ int gsnedf_fmlp_lock(struct litmus_lock* l) | |||
797 | if (edf_higher_prio(t, sem->hp_waiter)) { | 1508 | if (edf_higher_prio(t, sem->hp_waiter)) { |
798 | sem->hp_waiter = t; | 1509 | sem->hp_waiter = t; |
799 | if (edf_higher_prio(t, sem->owner)) | 1510 | if (edf_higher_prio(t, sem->owner)) |
800 | set_priority_inheritance(sem->owner, sem->hp_waiter); | 1511 | increase_priority_inheritance(sem->owner, sem->hp_waiter); |
801 | } | 1512 | } |
802 | 1513 | ||
803 | TS_LOCK_SUSPEND; | 1514 | TS_LOCK_SUSPEND; |
@@ -865,7 +1576,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l) | |||
865 | /* Well, if next is not the highest-priority waiter, | 1576 | /* Well, if next is not the highest-priority waiter, |
866 | * then it ought to inherit the highest-priority | 1577 | * then it ought to inherit the highest-priority |
867 | * waiter's priority. */ | 1578 | * waiter's priority. */ |
868 | set_priority_inheritance(next, sem->hp_waiter); | 1579 | increase_priority_inheritance(next, sem->hp_waiter); |
869 | } | 1580 | } |
870 | 1581 | ||
871 | /* wake up next */ | 1582 | /* wake up next */ |
@@ -876,7 +1587,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l) | |||
876 | 1587 | ||
877 | /* we lose the benefit of priority inheritance (if any) */ | 1588 | /* we lose the benefit of priority inheritance (if any) */ |
878 | if (tsk_rt(t)->inh_task) | 1589 | if (tsk_rt(t)->inh_task) |
879 | clear_priority_inheritance(t); | 1590 | decrease_priority_inheritance(t, NULL); |
880 | 1591 | ||
881 | out: | 1592 | out: |
882 | spin_unlock_irqrestore(&sem->wait.lock, flags); | 1593 | spin_unlock_irqrestore(&sem->wait.lock, flags); |
@@ -914,6 +1625,11 @@ static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { | |||
914 | .lock = gsnedf_fmlp_lock, | 1625 | .lock = gsnedf_fmlp_lock, |
915 | .unlock = gsnedf_fmlp_unlock, | 1626 | .unlock = gsnedf_fmlp_unlock, |
916 | .deallocate = gsnedf_fmlp_free, | 1627 | .deallocate = gsnedf_fmlp_free, |
1628 | |||
1629 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1630 | .propagate_increase_inheritance = NULL, | ||
1631 | .propagate_decrease_inheritance = NULL | ||
1632 | #endif | ||
917 | }; | 1633 | }; |
918 | 1634 | ||
919 | static struct litmus_lock* gsnedf_new_fmlp(void) | 1635 | static struct litmus_lock* gsnedf_new_fmlp(void) |
@@ -932,47 +1648,121 @@ static struct litmus_lock* gsnedf_new_fmlp(void) | |||
932 | return &sem->litmus_lock; | 1648 | return &sem->litmus_lock; |
933 | } | 1649 | } |
934 | 1650 | ||
935 | /* **** lock constructor **** */ | ||
936 | |||
937 | 1651 | ||
938 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | 1652 | static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, |
939 | void* __user unused) | 1653 | void* __user args) |
940 | { | 1654 | { |
941 | int err = -ENXIO; | 1655 | int err; |
942 | 1656 | ||
943 | /* GSN-EDF currently only supports the FMLP for global resources. */ | ||
944 | switch (type) { | 1657 | switch (type) { |
945 | 1658 | ||
946 | case FMLP_SEM: | 1659 | case FMLP_SEM: |
947 | /* Flexible Multiprocessor Locking Protocol */ | 1660 | /* Flexible Multiprocessor Locking Protocol */ |
948 | *lock = gsnedf_new_fmlp(); | 1661 | *lock = gsnedf_new_fmlp(); |
949 | if (*lock) | 1662 | break; |
950 | err = 0; | 1663 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
951 | else | 1664 | case RSM_MUTEX: |
952 | err = -ENOMEM; | 1665 | *lock = gsnedf_new_rsm_mutex(); |
953 | break; | 1666 | break; |
954 | 1667 | ||
1668 | case IKGLP_SEM: | ||
1669 | *lock = gsnedf_new_ikglp(args); | ||
1670 | break; | ||
1671 | #endif | ||
1672 | case KFMLP_SEM: | ||
1673 | *lock = gsnedf_new_kfmlp(args); | ||
1674 | break; | ||
1675 | default: | ||
1676 | err = -ENXIO; | ||
1677 | goto UNSUPPORTED_LOCK; | ||
955 | }; | 1678 | }; |
956 | 1679 | ||
1680 | if (*lock) | ||
1681 | err = 0; | ||
1682 | else | ||
1683 | err = -ENOMEM; | ||
1684 | |||
1685 | UNSUPPORTED_LOCK: | ||
957 | return err; | 1686 | return err; |
958 | } | 1687 | } |
959 | 1688 | ||
1689 | #endif // CONFIG_LITMUS_LOCKING | ||
1690 | |||
1691 | |||
1692 | |||
1693 | |||
1694 | |||
1695 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
1696 | static struct affinity_observer_ops gsnedf_kfmlp_affinity_ops = { | ||
1697 | .close = kfmlp_aff_obs_close, | ||
1698 | .deallocate = kfmlp_aff_obs_free, | ||
1699 | }; | ||
1700 | |||
1701 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1702 | static struct affinity_observer_ops gsnedf_ikglp_affinity_ops = { | ||
1703 | .close = ikglp_aff_obs_close, | ||
1704 | .deallocate = ikglp_aff_obs_free, | ||
1705 | }; | ||
960 | #endif | 1706 | #endif |
961 | 1707 | ||
1708 | static long gsnedf_allocate_affinity_observer( | ||
1709 | struct affinity_observer **aff_obs, | ||
1710 | int type, | ||
1711 | void* __user args) | ||
1712 | { | ||
1713 | int err; | ||
1714 | |||
1715 | switch (type) { | ||
1716 | |||
1717 | case KFMLP_SIMPLE_GPU_AFF_OBS: | ||
1718 | *aff_obs = kfmlp_simple_gpu_aff_obs_new(&gsnedf_kfmlp_affinity_ops, args); | ||
1719 | break; | ||
1720 | |||
1721 | case KFMLP_GPU_AFF_OBS: | ||
1722 | *aff_obs = kfmlp_gpu_aff_obs_new(&gsnedf_kfmlp_affinity_ops, args); | ||
1723 | break; | ||
1724 | |||
1725 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1726 | case IKGLP_SIMPLE_GPU_AFF_OBS: | ||
1727 | *aff_obs = ikglp_simple_gpu_aff_obs_new(&gsnedf_ikglp_affinity_ops, args); | ||
1728 | break; | ||
1729 | |||
1730 | case IKGLP_GPU_AFF_OBS: | ||
1731 | *aff_obs = ikglp_gpu_aff_obs_new(&gsnedf_ikglp_affinity_ops, args); | ||
1732 | break; | ||
1733 | #endif | ||
1734 | default: | ||
1735 | err = -ENXIO; | ||
1736 | goto UNSUPPORTED_AFF_OBS; | ||
1737 | }; | ||
1738 | |||
1739 | if (*aff_obs) | ||
1740 | err = 0; | ||
1741 | else | ||
1742 | err = -ENOMEM; | ||
1743 | |||
1744 | UNSUPPORTED_AFF_OBS: | ||
1745 | return err; | ||
1746 | } | ||
1747 | #endif | ||
1748 | |||
1749 | |||
1750 | |||
1751 | |||
962 | 1752 | ||
963 | static long gsnedf_activate_plugin(void) | 1753 | static long gsnedf_activate_plugin(void) |
964 | { | 1754 | { |
965 | int cpu; | 1755 | int cpu; |
966 | cpu_entry_t *entry; | 1756 | cpu_entry_t *entry; |
967 | 1757 | ||
968 | bheap_init(&gsnedf_cpu_heap); | 1758 | INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio); |
969 | #ifdef CONFIG_RELEASE_MASTER | 1759 | #ifdef CONFIG_RELEASE_MASTER |
970 | gsnedf.release_master = atomic_read(&release_master_cpu); | 1760 | gsnedf.release_master = atomic_read(&release_master_cpu); |
971 | #endif | 1761 | #endif |
972 | 1762 | ||
973 | for_each_online_cpu(cpu) { | 1763 | for_each_online_cpu(cpu) { |
974 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 1764 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
975 | bheap_node_init(&entry->hn, entry); | 1765 | INIT_BINHEAP_NODE(&entry->hn); |
976 | entry->linked = NULL; | 1766 | entry->linked = NULL; |
977 | entry->scheduled = NULL; | 1767 | entry->scheduled = NULL; |
978 | #ifdef CONFIG_RELEASE_MASTER | 1768 | #ifdef CONFIG_RELEASE_MASTER |
@@ -986,6 +1776,20 @@ static long gsnedf_activate_plugin(void) | |||
986 | } | 1776 | } |
987 | #endif | 1777 | #endif |
988 | } | 1778 | } |
1779 | |||
1780 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1781 | gsnedf_pending_tasklets.head = NULL; | ||
1782 | gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); | ||
1783 | #endif | ||
1784 | |||
1785 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1786 | spawn_klitirqd(NULL); | ||
1787 | #endif | ||
1788 | |||
1789 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1790 | init_nvidia_info(); | ||
1791 | #endif | ||
1792 | |||
989 | return 0; | 1793 | return 0; |
990 | } | 1794 | } |
991 | 1795 | ||
@@ -1002,8 +1806,31 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
1002 | .task_block = gsnedf_task_block, | 1806 | .task_block = gsnedf_task_block, |
1003 | .admit_task = gsnedf_admit_task, | 1807 | .admit_task = gsnedf_admit_task, |
1004 | .activate_plugin = gsnedf_activate_plugin, | 1808 | .activate_plugin = gsnedf_activate_plugin, |
1809 | .compare = edf_higher_prio, | ||
1005 | #ifdef CONFIG_LITMUS_LOCKING | 1810 | #ifdef CONFIG_LITMUS_LOCKING |
1006 | .allocate_lock = gsnedf_allocate_lock, | 1811 | .allocate_lock = gsnedf_allocate_lock, |
1812 | .increase_prio = increase_priority_inheritance, | ||
1813 | .decrease_prio = decrease_priority_inheritance, | ||
1814 | #endif | ||
1815 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
1816 | .nested_increase_prio = nested_increase_priority_inheritance, | ||
1817 | .nested_decrease_prio = nested_decrease_priority_inheritance, | ||
1818 | .__compare = __edf_higher_prio, | ||
1819 | #endif | ||
1820 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1821 | .get_dgl_spinlock = gsnedf_get_dgl_spinlock, | ||
1822 | #endif | ||
1823 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
1824 | .allocate_aff_obs = gsnedf_allocate_affinity_observer, | ||
1825 | #endif | ||
1826 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1827 | .increase_prio_klitirqd = increase_priority_inheritance_klitirqd, | ||
1828 | .decrease_prio_klitirqd = decrease_priority_inheritance_klitirqd, | ||
1829 | #endif | ||
1830 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1831 | .enqueue_pai_tasklet = gsnedf_enqueue_pai_tasklet, | ||
1832 | .change_prio_pai_tasklet = gsnedf_change_prio_pai_tasklet, | ||
1833 | .run_tasklets = gsnedf_run_tasklets, | ||
1007 | #endif | 1834 | #endif |
1008 | }; | 1835 | }; |
1009 | 1836 | ||
@@ -1013,15 +1840,20 @@ static int __init init_gsn_edf(void) | |||
1013 | int cpu; | 1840 | int cpu; |
1014 | cpu_entry_t *entry; | 1841 | cpu_entry_t *entry; |
1015 | 1842 | ||
1016 | bheap_init(&gsnedf_cpu_heap); | 1843 | INIT_BINHEAP_HANDLE(&gsnedf_cpu_heap, cpu_lower_prio); |
1017 | /* initialize CPU state */ | 1844 | /* initialize CPU state */ |
1018 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 1845 | for (cpu = 0; cpu < NR_CPUS; ++cpu) { |
1019 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 1846 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
1020 | gsnedf_cpus[cpu] = entry; | 1847 | gsnedf_cpus[cpu] = entry; |
1021 | entry->cpu = cpu; | 1848 | entry->cpu = cpu; |
1022 | entry->hn = &gsnedf_heap_node[cpu]; | 1849 | |
1023 | bheap_node_init(&entry->hn, entry); | 1850 | INIT_BINHEAP_NODE(&entry->hn); |
1024 | } | 1851 | } |
1852 | |||
1853 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1854 | raw_spin_lock_init(&dgl_lock); | ||
1855 | #endif | ||
1856 | |||
1025 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | 1857 | edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); |
1026 | return register_sched_plugin(&gsn_edf_plugin); | 1858 | return register_sched_plugin(&gsn_edf_plugin); |
1027 | } | 1859 | } |