aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_gsn_edf.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r--litmus/sched_gsn_edf.c102
1 files changed, 68 insertions, 34 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index d5d834cc411b..586b7c3f7de1 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -25,6 +25,10 @@
25 25
26#include <linux/module.h> 26#include <linux/module.h>
27 27
28#ifdef CONFIG_LITMUS_SOFTIRQD
29#include <litmus/litmus_softirq.h>
30#endif
31
28/* Overview of GSN-EDF operations. 32/* Overview of GSN-EDF operations.
29 * 33 *
30 * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This 34 * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This
@@ -277,7 +281,7 @@ static void check_for_preemptions(void)
277static noinline void gsnedf_job_arrival(struct task_struct* task) 281static noinline void gsnedf_job_arrival(struct task_struct* task)
278{ 282{
279 BUG_ON(!task); 283 BUG_ON(!task);
280 284
281 requeue(task); 285 requeue(task);
282 check_for_preemptions(); 286 check_for_preemptions();
283} 287}
@@ -298,7 +302,7 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
298static noinline void job_completion(struct task_struct *t, int forced) 302static noinline void job_completion(struct task_struct *t, int forced)
299{ 303{
300 BUG_ON(!t); 304 BUG_ON(!t);
301 305
302 sched_trace_task_completion(t, forced); 306 sched_trace_task_completion(t, forced);
303 307
304 TRACE_TASK(t, "job_completion().\n"); 308 TRACE_TASK(t, "job_completion().\n");
@@ -534,8 +538,8 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
534static void gsnedf_task_wake_up(struct task_struct *task) 538static void gsnedf_task_wake_up(struct task_struct *task)
535{ 539{
536 unsigned long flags; 540 unsigned long flags;
537 lt_t now; 541 lt_t now;
538 542
539 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 543 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
540 544
541 raw_spin_lock_irqsave(&gsnedf_lock, flags); 545 raw_spin_lock_irqsave(&gsnedf_lock, flags);
@@ -606,43 +610,44 @@ static long gsnedf_admit_task(struct task_struct* tsk)
606 610
607#include <litmus/fdso.h> 611#include <litmus/fdso.h>
608 612
609/* called with IRQs off */ 613inline static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
610static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
611{ 614{
612 int linked_on; 615 int linked_on;
613 int check_preempt = 0; 616 int check_preempt = 0;
614 617
615 raw_spin_lock(&gsnedf_lock); 618 if(prio_inh != NULL)
616 619 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
617 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); 620 else
621 TRACE_TASK(t, "inherits priority from NULL\n");
622
618 tsk_rt(t)->inh_task = prio_inh; 623 tsk_rt(t)->inh_task = prio_inh;
619 624
620 linked_on = tsk_rt(t)->linked_on; 625 linked_on = tsk_rt(t)->linked_on;
621 626
622 /* If it is scheduled, then we need to reorder the CPU heap. */ 627 /* If it is scheduled, then we need to reorder the CPU heap. */
623 if (linked_on != NO_CPU) { 628 if (linked_on != NO_CPU) {
624 TRACE_TASK(t, "%s: linked on %d\n", 629 TRACE_TASK(t, "%s: linked on %d\n",
625 __FUNCTION__, linked_on); 630 __FUNCTION__, linked_on);
626 /* Holder is scheduled; need to re-order CPUs. 631 /* Holder is scheduled; need to re-order CPUs.
627 * We can't use heap_decrease() here since 632 * We can't use heap_decrease() here since
628 * the cpu_heap is ordered in reverse direction, so 633 * the cpu_heap is ordered in reverse direction, so
629 * it is actually an increase. */ 634 * it is actually an increase. */
630 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, 635 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap,
631 gsnedf_cpus[linked_on]->hn); 636 gsnedf_cpus[linked_on]->hn);
632 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, 637 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap,
633 gsnedf_cpus[linked_on]->hn); 638 gsnedf_cpus[linked_on]->hn);
634 } else { 639 } else {
635 /* holder may be queued: first stop queue changes */ 640 /* holder may be queued: first stop queue changes */
636 raw_spin_lock(&gsnedf.release_lock); 641 raw_spin_lock(&gsnedf.release_lock);
637 if (is_queued(t)) { 642 if (is_queued(t)) {
638 TRACE_TASK(t, "%s: is queued\n", 643 TRACE_TASK(t, "%s: is queued\n",
639 __FUNCTION__); 644 __FUNCTION__);
640 /* We need to update the position of holder in some 645 /* We need to update the position of holder in some
641 * heap. Note that this could be a release heap if we 646 * heap. Note that this could be a release heap if we
642 * budget enforcement is used and this job overran. */ 647 * budget enforcement is used and this job overran. */
643 check_preempt = 648 check_preempt =
644 !bheap_decrease(edf_ready_order, 649 !bheap_decrease(edf_ready_order,
645 tsk_rt(t)->heap_node); 650 tsk_rt(t)->heap_node);
646 } else { 651 } else {
647 /* Nothing to do: if it is not queued and not linked 652 /* Nothing to do: if it is not queued and not linked
648 * then it is either sleeping or currently being moved 653 * then it is either sleeping or currently being moved
@@ -650,10 +655,10 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
650 * will use the correct priority when enqueuing the 655 * will use the correct priority when enqueuing the
651 * task. */ 656 * task. */
652 TRACE_TASK(t, "%s: is NOT queued => Done.\n", 657 TRACE_TASK(t, "%s: is NOT queued => Done.\n",
653 __FUNCTION__); 658 __FUNCTION__);
654 } 659 }
655 raw_spin_unlock(&gsnedf.release_lock); 660 raw_spin_unlock(&gsnedf.release_lock);
656 661
657 /* If holder was enqueued in a release heap, then the following 662 /* If holder was enqueued in a release heap, then the following
658 * preemption check is pointless, but we can't easily detect 663 * preemption check is pointless, but we can't easily detect
659 * that case. If you want to fix this, then consider that 664 * that case. If you want to fix this, then consider that
@@ -665,30 +670,42 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
665 * sure preemption checks get the right task, not the 670 * sure preemption checks get the right task, not the
666 * potentially stale cache. */ 671 * potentially stale cache. */
667 bheap_uncache_min(edf_ready_order, 672 bheap_uncache_min(edf_ready_order,
668 &gsnedf.ready_queue); 673 &gsnedf.ready_queue);
669 check_for_preemptions(); 674 check_for_preemptions();
670 } 675 }
671 } 676 }
672
673 raw_spin_unlock(&gsnedf_lock);
674} 677}
675 678
676/* called with IRQs off */ 679/* called with IRQs off */
677static void clear_priority_inheritance(struct task_struct* t) 680static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
678{ 681{
679 raw_spin_lock(&gsnedf_lock); 682 raw_spin_lock(&gsnedf_lock);
680 683
681 /* A job only stops inheriting a priority when it releases a 684 __set_priority_inheritance(t, prio_inh);
682 * resource. Thus we can make the following assumption.*/
683 BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU);
684 685
685 TRACE_TASK(t, "priority restored\n"); 686 raw_spin_unlock(&gsnedf_lock);
686 tsk_rt(t)->inh_task = NULL; 687}
687 688
688 /* Check if rescheduling is necessary. We can't use heap_decrease() 689/* called with IRQs off */
689 * since the priority was effectively lowered. */ 690static void clear_priority_inheritance(struct task_struct* t)
690 unlink(t); 691{
691 gsnedf_job_arrival(t); 692 raw_spin_lock(&gsnedf_lock);
693
694 TRACE_TASK(t, "priority restored\n");
695
696 if(tsk_rt(t)->scheduled_on != NO_CPU)
697 {
698 tsk_rt(t)->inh_task = NULL;
699
700 /* Check if rescheduling is necessary. We can't use heap_decrease()
701 * since the priority was effectively lowered. */
702 unlink(t);
703 gsnedf_job_arrival(t);
704 }
705 else
706 {
707 __set_priority_inheritance(t, NULL);
708 }
692 709
693 raw_spin_unlock(&gsnedf_lock); 710 raw_spin_unlock(&gsnedf_lock);
694} 711}
@@ -919,7 +936,6 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
919 936
920#endif 937#endif
921 938
922
923static long gsnedf_activate_plugin(void) 939static long gsnedf_activate_plugin(void)
924{ 940{
925 int cpu; 941 int cpu;
@@ -946,10 +962,22 @@ static long gsnedf_activate_plugin(void)
946 } 962 }
947#endif 963#endif
948 } 964 }
965
966#ifdef CONFIG_LITMUS_SOFTIRQD
967 spawn_klitirqd();
968#endif
949 969
950 return 0; 970 return 0;
951} 971}
952 972
973static long gsnedf_deactivate_plugin(void)
974{
975#ifdef CONFIG_LITMUS_SOFTIRQD
976 kill_klitirqd();
977#endif
978 return 0;
979}
980
953/* Plugin object */ 981/* Plugin object */
954static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { 982static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
955 .plugin_name = "GSN-EDF", 983 .plugin_name = "GSN-EDF",
@@ -966,6 +994,12 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
966#ifdef CONFIG_LITMUS_LOCKING 994#ifdef CONFIG_LITMUS_LOCKING
967 .allocate_lock = gsnedf_allocate_lock, 995 .allocate_lock = gsnedf_allocate_lock,
968#endif 996#endif
997#ifdef CONFIG_LITMUS_SOFTIRQD
998 .set_prio_inh = set_priority_inheritance,
999 .clear_prio_inh = clear_priority_inheritance,
1000
1001 .deactivate_plugin = gsnedf_deactivate_plugin,
1002#endif
969}; 1003};
970 1004
971 1005