aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_gsn_edf.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r--litmus/sched_gsn_edf.c756
1 files changed, 711 insertions, 45 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 3092797480f8..d04e0703c154 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -12,6 +12,8 @@
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/uaccess.h>
16
15 17
16#include <litmus/litmus.h> 18#include <litmus/litmus.h>
17#include <litmus/jobs.h> 19#include <litmus/jobs.h>
@@ -25,6 +27,19 @@
25 27
26#include <linux/module.h> 28#include <linux/module.h>
27 29
30#ifdef CONFIG_SCHED_CPU_AFFINITY
31#include <litmus/affinity.h>
32#endif
33
34#ifdef CONFIG_LITMUS_SOFTIRQD
35#include <litmus/litmus_softirq.h>
36#endif
37
38#ifdef CONFIG_LITMUS_NVIDIA
39#include <litmus/nvidia_info.h>
40#endif
41
42
28/* Overview of GSN-EDF operations. 43/* Overview of GSN-EDF operations.
29 * 44 *
30 * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This 45 * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This
@@ -253,21 +268,52 @@ static noinline void requeue(struct task_struct* task)
253 } 268 }
254} 269}
255 270
271#ifdef CONFIG_SCHED_CPU_AFFINITY
272static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start)
273{
274 cpu_entry_t* affinity;
275
276 get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries,
277#ifdef CONFIG_RELEASE_MASTER
278 gsnedf.release_master
279#else
280 -1
281#endif
282 );
283
284 return(affinity);
285}
286#endif
287
256/* check for any necessary preemptions */ 288/* check for any necessary preemptions */
257static void check_for_preemptions(void) 289static void check_for_preemptions(void)
258{ 290{
259 struct task_struct *task; 291 struct task_struct *task;
260 cpu_entry_t* last; 292 cpu_entry_t *last;
261 293
262 for(last = lowest_prio_cpu(); 294 for(last = lowest_prio_cpu();
263 edf_preemption_needed(&gsnedf, last->linked); 295 edf_preemption_needed(&gsnedf, last->linked);
264 last = lowest_prio_cpu()) { 296 last = lowest_prio_cpu()) {
265 /* preemption necessary */ 297 /* preemption necessary */
266 task = __take_ready(&gsnedf); 298 task = __take_ready(&gsnedf);
267 TRACE("check_for_preemptions: attempting to link task %d to %d\n", 299
268 task->pid, last->cpu); 300#ifdef CONFIG_SCHED_CPU_AFFINITY
301 {
302 cpu_entry_t* affinity = gsnedf_get_nearest_available_cpu(
303 &per_cpu(gsnedf_cpu_entries, task_cpu(task)));
304 if(affinity)
305 last = affinity;
306 else if(last->linked)
307 requeue(last->linked);
308 }
309#else
269 if (last->linked) 310 if (last->linked)
270 requeue(last->linked); 311 requeue(last->linked);
312#endif
313
314 TRACE("check_for_preemptions: attempting to link task %d to %d\n",
315 task->pid, last->cpu);
316
271 link_task_to_cpu(task, last); 317 link_task_to_cpu(task, last);
272 preempt(last); 318 preempt(last);
273 } 319 }
@@ -277,7 +323,7 @@ static void check_for_preemptions(void)
277static noinline void gsnedf_job_arrival(struct task_struct* task) 323static noinline void gsnedf_job_arrival(struct task_struct* task)
278{ 324{
279 BUG_ON(!task); 325 BUG_ON(!task);
280 326
281 requeue(task); 327 requeue(task);
282 check_for_preemptions(); 328 check_for_preemptions();
283} 329}
@@ -298,9 +344,13 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
298static noinline void job_completion(struct task_struct *t, int forced) 344static noinline void job_completion(struct task_struct *t, int forced)
299{ 345{
300 BUG_ON(!t); 346 BUG_ON(!t);
301 347
302 sched_trace_task_completion(t, forced); 348 sched_trace_task_completion(t, forced);
303 349
350#ifdef CONFIG_LITMUS_NVIDIA
351 atomic_set(&tsk_rt(t)->nv_int_count, 0);
352#endif
353
304 TRACE_TASK(t, "job_completion().\n"); 354 TRACE_TASK(t, "job_completion().\n");
305 355
306 /* set flags */ 356 /* set flags */
@@ -401,17 +451,19 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
401 TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); 451 TRACE_TASK(prev, "invoked gsnedf_schedule.\n");
402#endif 452#endif
403 453
454 /*
404 if (exists) 455 if (exists)
405 TRACE_TASK(prev, 456 TRACE_TASK(prev,
406 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " 457 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
407 "state:%d sig:%d\n", 458 "state:%d sig:%d\n",
408 blocks, out_of_time, np, sleep, preempt, 459 blocks, out_of_time, np, sleep, preempt,
409 prev->state, signal_pending(prev)); 460 prev->state, signal_pending(prev));
461 */
462
410 if (entry->linked && preempt) 463 if (entry->linked && preempt)
411 TRACE_TASK(prev, "will be preempted by %s/%d\n", 464 TRACE_TASK(prev, "will be preempted by %s/%d\n",
412 entry->linked->comm, entry->linked->pid); 465 entry->linked->comm, entry->linked->pid);
413 466
414
415 /* If a task blocks we have no choice but to reschedule. 467 /* If a task blocks we have no choice but to reschedule.
416 */ 468 */
417 if (blocks) 469 if (blocks)
@@ -456,12 +508,15 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
456 entry->scheduled->rt_param.scheduled_on = NO_CPU; 508 entry->scheduled->rt_param.scheduled_on = NO_CPU;
457 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); 509 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
458 } 510 }
459 } else 511 }
512 else
513 {
460 /* Only override Linux scheduler if we have a real-time task 514 /* Only override Linux scheduler if we have a real-time task
461 * scheduled that needs to continue. 515 * scheduled that needs to continue.
462 */ 516 */
463 if (exists) 517 if (exists)
464 next = prev; 518 next = prev;
519 }
465 520
466 sched_state_task_picked(); 521 sched_state_task_picked();
467 522
@@ -486,8 +541,9 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
486static void gsnedf_finish_switch(struct task_struct *prev) 541static void gsnedf_finish_switch(struct task_struct *prev)
487{ 542{
488 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); 543 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries);
489 544
490 entry->scheduled = is_realtime(current) ? current : NULL; 545 entry->scheduled = is_realtime(current) ? current : NULL;
546
491#ifdef WANT_ALL_SCHED_EVENTS 547#ifdef WANT_ALL_SCHED_EVENTS
492 TRACE_TASK(prev, "switched away from\n"); 548 TRACE_TASK(prev, "switched away from\n");
493#endif 549#endif
@@ -536,11 +592,14 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
536static void gsnedf_task_wake_up(struct task_struct *task) 592static void gsnedf_task_wake_up(struct task_struct *task)
537{ 593{
538 unsigned long flags; 594 unsigned long flags;
539 lt_t now; 595 lt_t now;
540 596
541 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 597 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
542 598
543 raw_spin_lock_irqsave(&gsnedf_lock, flags); 599 raw_spin_lock_irqsave(&gsnedf_lock, flags);
600
601
602#if 0 // sporadic task model
544 /* We need to take suspensions because of semaphores into 603 /* We need to take suspensions because of semaphores into
545 * account! If a job resumes after being suspended due to acquiring 604 * account! If a job resumes after being suspended due to acquiring
546 * a semaphore, it should never be treated as a new job release. 605 * a semaphore, it should never be treated as a new job release.
@@ -562,19 +621,26 @@ static void gsnedf_task_wake_up(struct task_struct *task)
562 } 621 }
563 } 622 }
564 } 623 }
624#else // periodic task model
625 set_rt_flags(task, RT_F_RUNNING);
626#endif
627
565 gsnedf_job_arrival(task); 628 gsnedf_job_arrival(task);
566 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 629 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
567} 630}
568 631
569static void gsnedf_task_block(struct task_struct *t) 632static void gsnedf_task_block(struct task_struct *t)
570{ 633{
634 // TODO: is this called on preemption??
571 unsigned long flags; 635 unsigned long flags;
572 636
573 TRACE_TASK(t, "block at %llu\n", litmus_clock()); 637 TRACE_TASK(t, "block at %llu\n", litmus_clock());
574 638
575 /* unlink if necessary */ 639 /* unlink if necessary */
576 raw_spin_lock_irqsave(&gsnedf_lock, flags); 640 raw_spin_lock_irqsave(&gsnedf_lock, flags);
641
577 unlink(t); 642 unlink(t);
643
578 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 644 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
579 645
580 BUG_ON(!is_realtime(t)); 646 BUG_ON(!is_realtime(t));
@@ -608,51 +674,53 @@ static long gsnedf_admit_task(struct task_struct* tsk)
608 674
609#include <litmus/fdso.h> 675#include <litmus/fdso.h>
610 676
611/* called with IRQs off */ 677
612static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) 678static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
613{ 679{
614 int linked_on; 680 int linked_on;
615 int check_preempt = 0; 681 int check_preempt = 0;
616 682
617 raw_spin_lock(&gsnedf_lock); 683 if(prio_inh != NULL)
618 684 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
619 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); 685 else
686 TRACE_TASK(t, "inherits priority from %p\n", prio_inh);
687
688 sched_trace_eff_prio_change(t, prio_inh);
689
620 tsk_rt(t)->inh_task = prio_inh; 690 tsk_rt(t)->inh_task = prio_inh;
621 691
622 linked_on = tsk_rt(t)->linked_on; 692 linked_on = tsk_rt(t)->linked_on;
623 693
624 /* If it is scheduled, then we need to reorder the CPU heap. */ 694 /* If it is scheduled, then we need to reorder the CPU heap. */
625 if (linked_on != NO_CPU) { 695 if (linked_on != NO_CPU) {
626 TRACE_TASK(t, "%s: linked on %d\n", 696 TRACE_TASK(t, "%s: linked on %d\n",
627 __FUNCTION__, linked_on); 697 __FUNCTION__, linked_on);
628 /* Holder is scheduled; need to re-order CPUs. 698 /* Holder is scheduled; need to re-order CPUs.
629 * We can't use heap_decrease() here since 699 * We can't use heap_decrease() here since
630 * the cpu_heap is ordered in reverse direction, so 700 * the cpu_heap is ordered in reverse direction, so
631 * it is actually an increase. */ 701 * it is actually an increase. */
632 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, 702 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap,
633 gsnedf_cpus[linked_on]->hn); 703 gsnedf_cpus[linked_on]->hn);
634 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, 704 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap,
635 gsnedf_cpus[linked_on]->hn); 705 gsnedf_cpus[linked_on]->hn);
636 } else { 706 } else {
637 /* holder may be queued: first stop queue changes */ 707 /* holder may be queued: first stop queue changes */
638 raw_spin_lock(&gsnedf.release_lock); 708 raw_spin_lock(&gsnedf.release_lock);
639 if (is_queued(t)) { 709 if (is_queued(t)) {
640 TRACE_TASK(t, "%s: is queued\n", 710 TRACE_TASK(t, "%s: is queued\n", __FUNCTION__);
641 __FUNCTION__); 711
642 /* We need to update the position of holder in some 712 /* We need to update the position of holder in some
643 * heap. Note that this could be a release heap if we 713 * heap. Note that this could be a release heap if we
644 * budget enforcement is used and this job overran. */ 714 * budget enforcement is used and this job overran. */
645 check_preempt = 715 check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node);
646 !bheap_decrease(edf_ready_order, 716
647 tsk_rt(t)->heap_node);
648 } else { 717 } else {
649 /* Nothing to do: if it is not queued and not linked 718 /* Nothing to do: if it is not queued and not linked
650 * then it is either sleeping or currently being moved 719 * then it is either sleeping or currently being moved
651 * by other code (e.g., a timer interrupt handler) that 720 * by other code (e.g., a timer interrupt handler) that
652 * will use the correct priority when enqueuing the 721 * will use the correct priority when enqueuing the
653 * task. */ 722 * task. */
654 TRACE_TASK(t, "%s: is NOT queued => Done.\n", 723 TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__);
655 __FUNCTION__);
656 } 724 }
657 raw_spin_unlock(&gsnedf.release_lock); 725 raw_spin_unlock(&gsnedf.release_lock);
658 726
@@ -666,34 +734,148 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
666 /* heap_decrease() hit the top level of the heap: make 734 /* heap_decrease() hit the top level of the heap: make
667 * sure preemption checks get the right task, not the 735 * sure preemption checks get the right task, not the
668 * potentially stale cache. */ 736 * potentially stale cache. */
669 bheap_uncache_min(edf_ready_order, 737 bheap_uncache_min(edf_ready_order, &gsnedf.ready_queue);
670 &gsnedf.ready_queue);
671 check_for_preemptions(); 738 check_for_preemptions();
672 } 739 }
673 } 740 }
741}
674 742
743/* called with IRQs off */
744static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
745{
746 raw_spin_lock(&gsnedf_lock);
747
748 __set_priority_inheritance(t, prio_inh);
749
750#ifdef CONFIG_LITMUS_SOFTIRQD
751 if(tsk_rt(t)->cur_klitirqd != NULL)
752 {
753 TRACE_TASK(t, "%s/%d inherits a new priority!\n",
754 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);
755
756 __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh);
757 }
758#endif
759
675 raw_spin_unlock(&gsnedf_lock); 760 raw_spin_unlock(&gsnedf_lock);
676} 761}
677 762
763
764/* called with IRQs off */
765static void __clear_priority_inheritance(struct task_struct* t)
766{
767 TRACE_TASK(t, "priority restored\n");
768
769 if(tsk_rt(t)->scheduled_on != NO_CPU)
770 {
771 sched_trace_eff_prio_change(t, NULL);
772
773 tsk_rt(t)->inh_task = NULL;
774
775 /* Check if rescheduling is necessary. We can't use heap_decrease()
776 * since the priority was effectively lowered. */
777 unlink(t);
778 gsnedf_job_arrival(t);
779 }
780 else
781 {
782 __set_priority_inheritance(t, NULL);
783 }
784
785#ifdef CONFIG_LITMUS_SOFTIRQD
786 if(tsk_rt(t)->cur_klitirqd != NULL)
787 {
788 TRACE_TASK(t, "%s/%d inheritance set back to owner.\n",
789 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);
790
791 if(tsk_rt(tsk_rt(t)->cur_klitirqd)->scheduled_on != NO_CPU)
792 {
793 sched_trace_eff_prio_change(tsk_rt(t)->cur_klitirqd, t);
794
795 tsk_rt(tsk_rt(t)->cur_klitirqd)->inh_task = t;
796
797 /* Check if rescheduling is necessary. We can't use heap_decrease()
798 * since the priority was effectively lowered. */
799 unlink(tsk_rt(t)->cur_klitirqd);
800 gsnedf_job_arrival(tsk_rt(t)->cur_klitirqd);
801 }
802 else
803 {
804 __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, t);
805 }
806 }
807#endif
808}
809
678/* called with IRQs off */ 810/* called with IRQs off */
679static void clear_priority_inheritance(struct task_struct* t) 811static void clear_priority_inheritance(struct task_struct* t)
680{ 812{
681 raw_spin_lock(&gsnedf_lock); 813 raw_spin_lock(&gsnedf_lock);
814 __clear_priority_inheritance(t);
815 raw_spin_unlock(&gsnedf_lock);
816}
682 817
683 /* A job only stops inheriting a priority when it releases a 818#ifdef CONFIG_LITMUS_SOFTIRQD
684 * resource. Thus we can make the following assumption.*/ 819/* called with IRQs off */
685 BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); 820static void set_priority_inheritance_klitirqd(struct task_struct* klitirqd,
686 821 struct task_struct* old_owner,
687 TRACE_TASK(t, "priority restored\n"); 822 struct task_struct* new_owner)
688 tsk_rt(t)->inh_task = NULL; 823{
824 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
825
826 raw_spin_lock(&gsnedf_lock);
827
828 if(old_owner != new_owner)
829 {
830 if(old_owner)
831 {
832 // unreachable?
833 tsk_rt(old_owner)->cur_klitirqd = NULL;
834 }
835
836 TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n",
837 new_owner->comm, new_owner->pid);
689 838
690 /* Check if rescheduling is necessary. We can't use heap_decrease() 839 tsk_rt(new_owner)->cur_klitirqd = klitirqd;
691 * since the priority was effectively lowered. */ 840 }
692 unlink(t); 841
693 gsnedf_job_arrival(t); 842 __set_priority_inheritance(klitirqd,
843 (tsk_rt(new_owner)->inh_task == NULL) ?
844 new_owner :
845 tsk_rt(new_owner)->inh_task);
846
847 raw_spin_unlock(&gsnedf_lock);
848}
694 849
850/* called with IRQs off */
851static void clear_priority_inheritance_klitirqd(struct task_struct* klitirqd,
852 struct task_struct* old_owner)
853{
854 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
855
856 raw_spin_lock(&gsnedf_lock);
857
858 TRACE_TASK(klitirqd, "priority restored\n");
859
860 if(tsk_rt(klitirqd)->scheduled_on != NO_CPU)
861 {
862 tsk_rt(klitirqd)->inh_task = NULL;
863
864 /* Check if rescheduling is necessary. We can't use heap_decrease()
865 * since the priority was effectively lowered. */
866 unlink(klitirqd);
867 gsnedf_job_arrival(klitirqd);
868 }
869 else
870 {
871 __set_priority_inheritance(klitirqd, NULL);
872 }
873
874 tsk_rt(old_owner)->cur_klitirqd = NULL;
875
695 raw_spin_unlock(&gsnedf_lock); 876 raw_spin_unlock(&gsnedf_lock);
696} 877}
878#endif
697 879
698 880
699/* ******************** FMLP support ********************** */ 881/* ******************** FMLP support ********************** */
@@ -892,11 +1074,477 @@ static struct litmus_lock* gsnedf_new_fmlp(void)
892 return &sem->litmus_lock; 1074 return &sem->litmus_lock;
893} 1075}
894 1076
1077
1078
1079
1080
1081
1082
1083/* ******************** KFMLP support ********************** */
1084
1085/* struct for semaphore with priority inheritance */
1086struct kfmlp_queue
1087{
1088 wait_queue_head_t wait;
1089 struct task_struct* owner;
1090 struct task_struct* hp_waiter;
1091 int count; /* number of waiters + holder */
1092};
1093
1094struct kfmlp_semaphore
1095{
1096 struct litmus_lock litmus_lock;
1097
1098 spinlock_t lock;
1099
1100 int num_resources; /* aka k */
1101
1102 struct kfmlp_queue *queues; /* array */
1103 struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */
1104};
1105
1106static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock)
1107{
1108 return container_of(lock, struct kfmlp_semaphore, litmus_lock);
1109}
1110
1111static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem,
1112 struct kfmlp_queue* queue)
1113{
1114 return (queue - &sem->queues[0]);
1115}
1116
1117static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem,
1118 struct task_struct* holder)
1119{
1120 int i;
1121 for(i = 0; i < sem->num_resources; ++i)
1122 if(sem->queues[i].owner == holder)
1123 return(&sem->queues[i]);
1124 return(NULL);
1125}
1126
1127/* caller is responsible for locking */
1128static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue,
1129 struct task_struct *skip)
1130{
1131 struct list_head *pos;
1132 struct task_struct *queued, *found = NULL;
1133
1134 list_for_each(pos, &kqueue->wait.task_list) {
1135 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
1136 task_list)->private;
1137
1138 /* Compare task prios, find high prio task. */
1139 if (queued != skip && edf_higher_prio(queued, found))
1140 found = queued;
1141 }
1142 return found;
1143}
1144
1145static inline struct kfmlp_queue* kfmlp_find_shortest(
1146 struct kfmlp_semaphore* sem,
1147 struct kfmlp_queue* search_start)
1148{
1149 // we start our search at search_start instead of at the beginning of the
1150 // queue list to load-balance across all resources.
1151 struct kfmlp_queue* step = search_start;
1152 struct kfmlp_queue* shortest = sem->shortest_queue;
1153
1154 do
1155 {
1156 step = (step+1 != &sem->queues[sem->num_resources]) ?
1157 step+1 : &sem->queues[0];
1158 if(step->count < shortest->count)
1159 {
1160 shortest = step;
1161 if(step->count == 0)
1162 break; /* can't get any shorter */
1163 }
1164 }while(step != search_start);
1165
1166 return(shortest);
1167}
1168
1169static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1170{
1171 /* must hold sem->lock */
1172
1173 struct kfmlp_queue *my_queue = NULL;
1174 struct task_struct *max_hp = NULL;
1175
1176
1177 struct list_head *pos;
1178 struct task_struct *queued;
1179 int i;
1180
1181 for(i = 0; i < sem->num_resources; ++i)
1182 {
1183 if( (sem->queues[i].count > 1) &&
1184 ((my_queue == NULL) ||
1185 (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) )
1186 {
1187 my_queue = &sem->queues[i];
1188 }
1189 }
1190
1191 if(my_queue)
1192 {
1193 max_hp = my_queue->hp_waiter;
1194
1195 BUG_ON(!max_hp);
1196
1197 TRACE_CUR("queue %d: stealing %s/%d from queue %d\n",
1198 kfmlp_get_idx(sem, my_queue),
1199 max_hp->comm, max_hp->pid,
1200 kfmlp_get_idx(sem, my_queue));
1201
1202 my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp);
1203
1204 /*
1205 if(my_queue->hp_waiter)
1206 TRACE_CUR("queue %d: new hp_waiter is %s/%d\n",
1207 kfmlp_get_idx(sem, my_queue),
1208 my_queue->hp_waiter->comm,
1209 my_queue->hp_waiter->pid);
1210 else
1211 TRACE_CUR("queue %d: new hp_waiter is %p\n",
1212 kfmlp_get_idx(sem, my_queue), NULL);
1213 */
1214
1215 raw_spin_lock(&gsnedf_lock);
1216
1217 /*
1218 if(my_queue->owner)
1219 TRACE_CUR("queue %d: owner is %s/%d\n",
1220 kfmlp_get_idx(sem, my_queue),
1221 my_queue->owner->comm,
1222 my_queue->owner->pid);
1223 else
1224 TRACE_CUR("queue %d: owner is %p\n",
1225 kfmlp_get_idx(sem, my_queue),
1226 NULL);
1227 */
1228
1229 if(tsk_rt(my_queue->owner)->inh_task == max_hp)
1230 {
1231 __clear_priority_inheritance(my_queue->owner);
1232 if(my_queue->hp_waiter != NULL)
1233 {
1234 __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter);
1235 }
1236 }
1237 raw_spin_unlock(&gsnedf_lock);
1238
1239 list_for_each(pos, &my_queue->wait.task_list)
1240 {
1241 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
1242 task_list)->private;
1243 /* Compare task prios, find high prio task. */
1244 if (queued == max_hp)
1245 {
1246 /*
1247 TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n",
1248 kfmlp_get_idx(sem, my_queue));
1249 */
1250 __remove_wait_queue(&my_queue->wait,
1251 list_entry(pos, wait_queue_t, task_list));
1252 break;
1253 }
1254 }
1255 --(my_queue->count);
1256 }
1257
1258 return(max_hp);
1259}
1260
1261int gsnedf_kfmlp_lock(struct litmus_lock* l)
1262{
1263 struct task_struct* t = current;
1264 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1265 struct kfmlp_queue* my_queue;
1266 wait_queue_t wait;
1267 unsigned long flags;
1268
1269 if (!is_realtime(t))
1270 return -EPERM;
1271
1272 spin_lock_irqsave(&sem->lock, flags);
1273
1274 my_queue = sem->shortest_queue;
1275
1276 if (my_queue->owner) {
1277 /* resource is not free => must suspend and wait */
1278 TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n",
1279 kfmlp_get_idx(sem, my_queue));
1280
1281 init_waitqueue_entry(&wait, t);
1282
1283 /* FIXME: interruptible would be nice some day */
1284 set_task_state(t, TASK_UNINTERRUPTIBLE);
1285
1286 __add_wait_queue_tail_exclusive(&my_queue->wait, &wait);
1287
1288 /* check if we need to activate priority inheritance */
1289 if (edf_higher_prio(t, my_queue->hp_waiter))
1290 {
1291 my_queue->hp_waiter = t;
1292 if (edf_higher_prio(t, my_queue->owner))
1293 {
1294 set_priority_inheritance(my_queue->owner, my_queue->hp_waiter);
1295 }
1296 }
1297
1298 ++(my_queue->count);
1299 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
1300
1301 /* release lock before sleeping */
1302 spin_unlock_irqrestore(&sem->lock, flags);
1303
1304 /* We depend on the FIFO order. Thus, we don't need to recheck
1305 * when we wake up; we are guaranteed to have the lock since
1306 * there is only one wake up per release (or steal).
1307 */
1308 schedule();
1309
1310
1311 if(my_queue->owner == t)
1312 {
1313 TRACE_CUR("queue %d: acquired through waiting\n",
1314 kfmlp_get_idx(sem, my_queue));
1315 }
1316 else
1317 {
1318 /* this case may happen if our wait entry was stolen
1319 between queues. record where we went. */
1320 my_queue = kfmlp_get_queue(sem, t);
1321
1322 BUG_ON(!my_queue);
1323 TRACE_CUR("queue %d: acquired through stealing\n",
1324 kfmlp_get_idx(sem, my_queue));
1325 }
1326 }
1327 else
1328 {
1329 TRACE_CUR("queue %d: acquired immediately\n",
1330 kfmlp_get_idx(sem, my_queue));
1331
1332 my_queue->owner = t;
1333
1334 ++(my_queue->count);
1335 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
1336
1337 spin_unlock_irqrestore(&sem->lock, flags);
1338 }
1339
1340 return kfmlp_get_idx(sem, my_queue);
1341}
1342
1343int gsnedf_kfmlp_unlock(struct litmus_lock* l)
1344{
1345 struct task_struct *t = current, *next;
1346 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1347 struct kfmlp_queue *my_queue;
1348 unsigned long flags;
1349 int err = 0;
1350
1351 spin_lock_irqsave(&sem->lock, flags);
1352
1353 my_queue = kfmlp_get_queue(sem, t);
1354
1355 if (!my_queue) {
1356 err = -EINVAL;
1357 goto out;
1358 }
1359
1360 /* check if there are jobs waiting for this resource */
1361 next = __waitqueue_remove_first(&my_queue->wait);
1362 if (next) {
1363 /*
1364 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n",
1365 kfmlp_get_idx(sem, my_queue),
1366 next->comm, next->pid);
1367 */
1368 /* next becomes the resouce holder */
1369 my_queue->owner = next;
1370
1371 --(my_queue->count);
1372 if(my_queue->count < sem->shortest_queue->count)
1373 {
1374 sem->shortest_queue = my_queue;
1375 }
1376
1377 TRACE_CUR("queue %d: lock ownership passed to %s/%d\n",
1378 kfmlp_get_idx(sem, my_queue), next->comm, next->pid);
1379
1380 /* determine new hp_waiter if necessary */
1381 if (next == my_queue->hp_waiter) {
1382 TRACE_TASK(next, "was highest-prio waiter\n");
1383 /* next has the highest priority --- it doesn't need to
1384 * inherit. However, we need to make sure that the
1385 * next-highest priority in the queue is reflected in
1386 * hp_waiter. */
1387 my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next);
1388 if (my_queue->hp_waiter)
1389 TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue));
1390 else
1391 TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue));
1392 } else {
1393 /* Well, if next is not the highest-priority waiter,
1394 * then it ought to inherit the highest-priority
1395 * waiter's priority. */
1396 set_priority_inheritance(next, my_queue->hp_waiter);
1397 }
1398
1399 /* wake up next */
1400 wake_up_process(next);
1401 }
1402 else
1403 {
1404 TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue));
1405
1406 next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */
1407
1408 /*
1409 if(next)
1410 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n",
1411 kfmlp_get_idx(sem, my_queue),
1412 next->comm, next->pid);
1413 */
1414
1415 my_queue->owner = next;
1416
1417 if(next)
1418 {
1419 TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n",
1420 kfmlp_get_idx(sem, my_queue),
1421 next->comm, next->pid);
1422
1423 /* wake up next */
1424 wake_up_process(next);
1425 }
1426 else
1427 {
1428 TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue));
1429
1430 --(my_queue->count);
1431 if(my_queue->count < sem->shortest_queue->count)
1432 {
1433 sem->shortest_queue = my_queue;
1434 }
1435 }
1436 }
1437
1438 /* we lose the benefit of priority inheritance (if any) */
1439 if (tsk_rt(t)->inh_task)
1440 clear_priority_inheritance(t);
1441
1442out:
1443 spin_unlock_irqrestore(&sem->lock, flags);
1444
1445 return err;
1446}
1447
1448int gsnedf_kfmlp_close(struct litmus_lock* l)
1449{
1450 struct task_struct *t = current;
1451 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1452 struct kfmlp_queue *my_queue;
1453 unsigned long flags;
1454
1455 int owner;
1456
1457 spin_lock_irqsave(&sem->lock, flags);
1458
1459 my_queue = kfmlp_get_queue(sem, t);
1460 owner = (my_queue) ? (my_queue->owner == t) : 0;
1461
1462 spin_unlock_irqrestore(&sem->lock, flags);
1463
1464 if (owner)
1465 gsnedf_kfmlp_unlock(l);
1466
1467 return 0;
1468}
1469
1470void gsnedf_kfmlp_free(struct litmus_lock* l)
1471{
1472 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1473 kfree(sem->queues);
1474 kfree(sem);
1475}
1476
1477static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = {
1478 .close = gsnedf_kfmlp_close,
1479 .lock = gsnedf_kfmlp_lock,
1480 .unlock = gsnedf_kfmlp_unlock,
1481 .deallocate = gsnedf_kfmlp_free,
1482};
1483
1484static struct litmus_lock* gsnedf_new_kfmlp(void* __user arg, int* ret_code)
1485{
1486 struct kfmlp_semaphore* sem;
1487 int num_resources = 0;
1488 int i;
1489
1490 if(!access_ok(VERIFY_READ, arg, sizeof(num_resources)))
1491 {
1492 *ret_code = -EINVAL;
1493 return(NULL);
1494 }
1495 if(__copy_from_user(&num_resources, arg, sizeof(num_resources)))
1496 {
1497 *ret_code = -EINVAL;
1498 return(NULL);
1499 }
1500 if(num_resources < 1)
1501 {
1502 *ret_code = -EINVAL;
1503 return(NULL);
1504 }
1505
1506 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1507 if(!sem)
1508 {
1509 *ret_code = -ENOMEM;
1510 return NULL;
1511 }
1512
1513 sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL);
1514 if(!sem->queues)
1515 {
1516 kfree(sem);
1517 *ret_code = -ENOMEM;
1518 return NULL;
1519 }
1520
1521 sem->litmus_lock.ops = &gsnedf_kfmlp_lock_ops;
1522 spin_lock_init(&sem->lock);
1523 sem->num_resources = num_resources;
1524
1525 for(i = 0; i < num_resources; ++i)
1526 {
1527 sem->queues[i].owner = NULL;
1528 sem->queues[i].hp_waiter = NULL;
1529 init_waitqueue_head(&sem->queues[i].wait);
1530 sem->queues[i].count = 0;
1531 }
1532
1533 sem->shortest_queue = &sem->queues[0];
1534
1535 *ret_code = 0;
1536 return &sem->litmus_lock;
1537}
1538
1539
1540
1541
1542
895/* **** lock constructor **** */ 1543/* **** lock constructor **** */
896 1544
897 1545
898static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, 1546static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
899 void* __user unused) 1547 void* __user arg)
900{ 1548{
901 int err = -ENXIO; 1549 int err = -ENXIO;
902 1550
@@ -911,7 +1559,10 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
911 else 1559 else
912 err = -ENOMEM; 1560 err = -ENOMEM;
913 break; 1561 break;
914 1562
1563 case KFMLP_SEM:
1564 *lock = gsnedf_new_kfmlp(arg, &err);
1565 break;
915 }; 1566 };
916 1567
917 return err; 1568 return err;
@@ -919,7 +1570,6 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
919 1570
920#endif 1571#endif
921 1572
922
923static long gsnedf_activate_plugin(void) 1573static long gsnedf_activate_plugin(void)
924{ 1574{
925 int cpu; 1575 int cpu;
@@ -946,6 +1596,15 @@ static long gsnedf_activate_plugin(void)
946 } 1596 }
947#endif 1597#endif
948 } 1598 }
1599
1600#ifdef CONFIG_LITMUS_SOFTIRQD
1601 spawn_klitirqd(NULL);
1602#endif
1603
1604#ifdef CONFIG_LITMUS_NVIDIA
1605 init_nvidia_info();
1606#endif
1607
949 return 0; 1608 return 0;
950} 1609}
951 1610
@@ -963,8 +1622,15 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
963 .admit_task = gsnedf_admit_task, 1622 .admit_task = gsnedf_admit_task,
964 .activate_plugin = gsnedf_activate_plugin, 1623 .activate_plugin = gsnedf_activate_plugin,
965#ifdef CONFIG_LITMUS_LOCKING 1624#ifdef CONFIG_LITMUS_LOCKING
966 .allocate_lock = gsnedf_allocate_lock, 1625 .allocate_lock = gsnedf_allocate_lock,
1626 .set_prio_inh = set_priority_inheritance,
1627 .clear_prio_inh = clear_priority_inheritance,
1628#endif
1629#ifdef CONFIG_LITMUS_SOFTIRQD
1630 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd,
1631 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd,
967#endif 1632#endif
1633
968}; 1634};
969 1635
970 1636