aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
Diffstat (limited to 'litmus')
-rw-r--r--litmus/sched_cedf.c3
-rw-r--r--litmus/sched_cfl_split.c54
2 files changed, 55 insertions, 2 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 8699f6d9d5b6..a25f2a474263 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -566,12 +566,13 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
566 entry->scheduled->rt_param.scheduled_on = NO_CPU; 566 entry->scheduled->rt_param.scheduled_on = NO_CPU;
567 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); 567 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
568 } 568 }
569 } else 569 } else {
570 /* Only override Linux scheduler if we have a real-time task 570 /* Only override Linux scheduler if we have a real-time task
571 * scheduled that needs to continue. 571 * scheduled that needs to continue.
572 */ 572 */
573 if (exists) 573 if (exists)
574 next = prev; 574 next = prev;
575 }
575 576
576 sched_state_task_picked(); 577 sched_state_task_picked();
577 raw_spin_unlock(&cluster->cluster_lock); 578 raw_spin_unlock(&cluster->cluster_lock);
diff --git a/litmus/sched_cfl_split.c b/litmus/sched_cfl_split.c
index 7d9302eb296b..af2b28230c72 100644
--- a/litmus/sched_cfl_split.c
+++ b/litmus/sched_cfl_split.c
@@ -49,6 +49,10 @@
49#include <litmus/affinity.h> 49#include <litmus/affinity.h>
50#endif 50#endif
51 51
52#ifdef CONFIG_SCHED_PGM
53#include <litmus/pgm.h>
54#endif
55
52/* to configure the cluster size */ 56/* to configure the cluster size */
53#include <litmus/litmus_proc.h> 57#include <litmus/litmus_proc.h>
54#include <linux/uaccess.h> 58#include <linux/uaccess.h>
@@ -587,6 +591,49 @@ static struct task_struct* cflsplit_schedule(struct task_struct * prev)
587 TRACE_TASK(prev, "will be preempted by %s/%d\n", 591 TRACE_TASK(prev, "will be preempted by %s/%d\n",
588 entry->linked->comm, entry->linked->pid); 592 entry->linked->comm, entry->linked->pid);
589 593
594#ifdef CONFIG_SCHED_PGM
595 if (exists) {
596 if (is_pgm_sending(entry->scheduled)) {
597 if (!is_pgm_satisfied(entry->scheduled)) {
598 if (!is_priority_boosted(entry->scheduled)) {
599 TRACE_TASK(entry->scheduled, "is sending PGM tokens and needs boosting.\n");
600 BUG_ON(is_pgm_satisfied(entry->scheduled));
601
602 /* We are either sending tokens or waiting for tokes.
603 If waiting: Boost priority so we'll be scheduled
604 immediately when needed tokens arrive.
605 If sending: Boost priority so no one (specifically, our
606 consumers) will preempt us while signalling the token
607 transmission.
608 */
609 tsk_rt(entry->scheduled)->priority_boosted = 1;
610 tsk_rt(entry->scheduled)->boost_start_time = litmus_clock();
611
612 if (likely(!blocks)) {
613 unlink(entry->scheduled);
614 cflsplit_job_arrival(entry->scheduled);
615 }
616 }
617 }
618 else { /* sending is satisfied */
619 tsk_rt(entry->scheduled)->ctrl_page->pgm_sending = 0;
620 tsk_rt(entry->scheduled)->ctrl_page->pgm_satisfied = 0;
621
622 if (is_priority_boosted(entry->scheduled)) {
623 TRACE_TASK(entry->scheduled,
624 "is done sending PGM tokens must relinquish boosting.\n");
625 /* clear boosting */
626 tsk_rt(entry->scheduled)->priority_boosted = 0;
627 if(likely(!blocks)) {
628 /* recheck priority */
629 unlink(entry->scheduled);
630 cflsplit_job_arrival(entry->scheduled);
631 }
632 }
633 }
634 }
635 }
636#endif
590 637
591 /* If a task blocks we have no choice but to reschedule. 638 /* If a task blocks we have no choice but to reschedule.
592 */ 639 */
@@ -639,12 +686,13 @@ static struct task_struct* cflsplit_schedule(struct task_struct * prev)
639 entry->scheduled->rt_param.scheduled_on = NO_CPU; 686 entry->scheduled->rt_param.scheduled_on = NO_CPU;
640 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); 687 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
641 } 688 }
642 } else 689 } else {
643 /* Only override Linux scheduler if we have a real-time task 690 /* Only override Linux scheduler if we have a real-time task
644 * scheduled that needs to continue. 691 * scheduled that needs to continue.
645 */ 692 */
646 if (exists) 693 if (exists)
647 next = prev; 694 next = prev;
695 }
648 696
649 sched_state_task_picked(); 697 sched_state_task_picked();
650 raw_spin_unlock(&cluster->cluster_lock); 698 raw_spin_unlock(&cluster->cluster_lock);
@@ -751,6 +799,10 @@ static void cflsplit_task_wake_up(struct task_struct *task)
751 cflsplit_release_at(task, now); 799 cflsplit_release_at(task, now);
752 sched_trace_task_release(task); 800 sched_trace_task_release(task);
753 } 801 }
802 if (is_pgm_waiting(task)) {
803 /* shift out release/deadline, if needed */
804 setup_pgm_release(task);
805 }
754 cflsplit_job_arrival(task); 806 cflsplit_job_arrival(task);
755 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 807 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
756} 808}