aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-01-25 15:08:22 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:22 -0500
commit9a897c5a6701bcb6f099f7ca20194999102729fd (patch)
tree9c5415d5e2dd115660200cbd246fe1343cd5cd5c /kernel
parent4bf0b77158d581c952af237aec79d0604b78fe27 (diff)
sched: RT-balance, replace hooks with pre/post schedule and wakeup methods
To make the main sched.c code more agnostic to the schedule classes. Instead of having specific hooks in the schedule code for the RT class balancing. They are replaced with a pre_schedule, post_schedule and task_wake_up methods. These methods may be used by any of the classes but currently, only the sched_rt class implements them. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c20
-rw-r--r--kernel/sched_rt.c17
2 files changed, 23 insertions, 14 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 9d6fb731559b..2368a0d882e3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1625,7 +1625,10 @@ out_activate:
1625 1625
1626out_running: 1626out_running:
1627 p->state = TASK_RUNNING; 1627 p->state = TASK_RUNNING;
1628 wakeup_balance_rt(rq, p); 1628#ifdef CONFIG_SMP
1629 if (p->sched_class->task_wake_up)
1630 p->sched_class->task_wake_up(rq, p);
1631#endif
1629out: 1632out:
1630 task_rq_unlock(rq, &flags); 1633 task_rq_unlock(rq, &flags);
1631 1634
@@ -1748,7 +1751,10 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1748 inc_nr_running(p, rq); 1751 inc_nr_running(p, rq);
1749 } 1752 }
1750 check_preempt_curr(rq, p); 1753 check_preempt_curr(rq, p);
1751 wakeup_balance_rt(rq, p); 1754#ifdef CONFIG_SMP
1755 if (p->sched_class->task_wake_up)
1756 p->sched_class->task_wake_up(rq, p);
1757#endif
1752 task_rq_unlock(rq, &flags); 1758 task_rq_unlock(rq, &flags);
1753} 1759}
1754 1760
@@ -1869,7 +1875,10 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1869 prev_state = prev->state; 1875 prev_state = prev->state;
1870 finish_arch_switch(prev); 1876 finish_arch_switch(prev);
1871 finish_lock_switch(rq, prev); 1877 finish_lock_switch(rq, prev);
1872 schedule_tail_balance_rt(rq); 1878#ifdef CONFIG_SMP
1879 if (current->sched_class->post_schedule)
1880 current->sched_class->post_schedule(rq);
1881#endif
1873 1882
1874 fire_sched_in_preempt_notifiers(current); 1883 fire_sched_in_preempt_notifiers(current);
1875 if (mm) 1884 if (mm)
@@ -3638,7 +3647,10 @@ need_resched_nonpreemptible:
3638 switch_count = &prev->nvcsw; 3647 switch_count = &prev->nvcsw;
3639 } 3648 }
3640 3649
3641 schedule_balance_rt(rq, prev); 3650#ifdef CONFIG_SMP
3651 if (prev->sched_class->pre_schedule)
3652 prev->sched_class->pre_schedule(rq, prev);
3653#endif
3642 3654
3643 if (unlikely(!rq->nr_running)) 3655 if (unlikely(!rq->nr_running))
3644 idle_balance(cpu, rq); 3656 idle_balance(cpu, rq);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 3ea0cae513d2..a5a45104603a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -689,14 +689,14 @@ static int pull_rt_task(struct rq *this_rq)
689 return ret; 689 return ret;
690} 690}
691 691
692static void schedule_balance_rt(struct rq *rq, struct task_struct *prev) 692static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
693{ 693{
694 /* Try to pull RT tasks here if we lower this rq's prio */ 694 /* Try to pull RT tasks here if we lower this rq's prio */
695 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) 695 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
696 pull_rt_task(rq); 696 pull_rt_task(rq);
697} 697}
698 698
699static void schedule_tail_balance_rt(struct rq *rq) 699static void post_schedule_rt(struct rq *rq)
700{ 700{
701 /* 701 /*
702 * If we have more than one rt_task queued, then 702 * If we have more than one rt_task queued, then
@@ -713,10 +713,9 @@ static void schedule_tail_balance_rt(struct rq *rq)
713} 713}
714 714
715 715
716static void wakeup_balance_rt(struct rq *rq, struct task_struct *p) 716static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
717{ 717{
718 if (unlikely(rt_task(p)) && 718 if (!task_running(rq, p) &&
719 !task_running(rq, p) &&
720 (p->prio >= rq->rt.highest_prio) && 719 (p->prio >= rq->rt.highest_prio) &&
721 rq->rt.overloaded) 720 rq->rt.overloaded)
722 push_rt_tasks(rq); 721 push_rt_tasks(rq);
@@ -780,11 +779,6 @@ static void leave_domain_rt(struct rq *rq)
780 if (rq->rt.overloaded) 779 if (rq->rt.overloaded)
781 rt_clear_overload(rq); 780 rt_clear_overload(rq);
782} 781}
783
784#else /* CONFIG_SMP */
785# define schedule_tail_balance_rt(rq) do { } while (0)
786# define schedule_balance_rt(rq, prev) do { } while (0)
787# define wakeup_balance_rt(rq, p) do { } while (0)
788#endif /* CONFIG_SMP */ 782#endif /* CONFIG_SMP */
789 783
790static void task_tick_rt(struct rq *rq, struct task_struct *p) 784static void task_tick_rt(struct rq *rq, struct task_struct *p)
@@ -840,6 +834,9 @@ const struct sched_class rt_sched_class = {
840 .set_cpus_allowed = set_cpus_allowed_rt, 834 .set_cpus_allowed = set_cpus_allowed_rt,
841 .join_domain = join_domain_rt, 835 .join_domain = join_domain_rt,
842 .leave_domain = leave_domain_rt, 836 .leave_domain = leave_domain_rt,
837 .pre_schedule = pre_schedule_rt,
838 .post_schedule = post_schedule_rt,
839 .task_wake_up = task_wake_up_rt,
843#endif 840#endif
844 841
845 .set_curr_task = set_curr_task_rt, 842 .set_curr_task = set_curr_task_rt,