diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:18 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:18 -0500 |
commit | bdd7c81b4973e72b670eff6b5725bab189b723d6 (patch) | |
tree | 5660b7ee8b2e9bc295053901112f70d5e784c183 /kernel/sched_rt.c | |
parent | 637f50851b57a32f7ec67c50fc16f1601ab1a87a (diff) |
sched: fix sched_rt.c:join/leave_domain
fix build bug in sched_rt.c:join/leave_domain and make them only
be included on SMP builds.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 33 |
1 files changed, 16 insertions, 17 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index b049e5110eea..3ea0cae513d2 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -767,6 +767,20 @@ static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask) | |||
767 | p->nr_cpus_allowed = weight; | 767 | p->nr_cpus_allowed = weight; |
768 | } | 768 | } |
769 | 769 | ||
770 | /* Assumes rq->lock is held */ | ||
771 | static void join_domain_rt(struct rq *rq) | ||
772 | { | ||
773 | if (rq->rt.overloaded) | ||
774 | rt_set_overload(rq); | ||
775 | } | ||
776 | |||
777 | /* Assumes rq->lock is held */ | ||
778 | static void leave_domain_rt(struct rq *rq) | ||
779 | { | ||
780 | if (rq->rt.overloaded) | ||
781 | rt_clear_overload(rq); | ||
782 | } | ||
783 | |||
770 | #else /* CONFIG_SMP */ | 784 | #else /* CONFIG_SMP */ |
771 | # define schedule_tail_balance_rt(rq) do { } while (0) | 785 | # define schedule_tail_balance_rt(rq) do { } while (0) |
772 | # define schedule_balance_rt(rq, prev) do { } while (0) | 786 | # define schedule_balance_rt(rq, prev) do { } while (0) |
@@ -799,20 +813,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p) | |||
799 | } | 813 | } |
800 | } | 814 | } |
801 | 815 | ||
802 | /* Assumes rq->lock is held */ | ||
803 | static void join_domain_rt(struct rq *rq) | ||
804 | { | ||
805 | if (rq->rt.overloaded) | ||
806 | rt_set_overload(rq); | ||
807 | } | ||
808 | |||
809 | /* Assumes rq->lock is held */ | ||
810 | static void leave_domain_rt(struct rq *rq) | ||
811 | { | ||
812 | if (rq->rt.overloaded) | ||
813 | rt_clear_overload(rq); | ||
814 | } | ||
815 | |||
816 | static void set_curr_task_rt(struct rq *rq) | 816 | static void set_curr_task_rt(struct rq *rq) |
817 | { | 817 | { |
818 | struct task_struct *p = rq->curr; | 818 | struct task_struct *p = rq->curr; |
@@ -838,11 +838,10 @@ const struct sched_class rt_sched_class = { | |||
838 | .load_balance = load_balance_rt, | 838 | .load_balance = load_balance_rt, |
839 | .move_one_task = move_one_task_rt, | 839 | .move_one_task = move_one_task_rt, |
840 | .set_cpus_allowed = set_cpus_allowed_rt, | 840 | .set_cpus_allowed = set_cpus_allowed_rt, |
841 | .join_domain = join_domain_rt, | ||
842 | .leave_domain = leave_domain_rt, | ||
841 | #endif | 843 | #endif |
842 | 844 | ||
843 | .set_curr_task = set_curr_task_rt, | 845 | .set_curr_task = set_curr_task_rt, |
844 | .task_tick = task_tick_rt, | 846 | .task_tick = task_tick_rt, |
845 | |||
846 | .join_domain = join_domain_rt, | ||
847 | .leave_domain = leave_domain_rt, | ||
848 | }; | 847 | }; |