aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-03-25 00:31:22 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-01 07:24:51 -0400
commit13b8bd0a5713bdf05659019badd7c0407984ece1 (patch)
tree378950741dd647b672a2b3e61988725b52557f2d /kernel
parenta18b83b7ef3c98cd8b4bb885e4a649a8f30fb7b0 (diff)
sched_rt: don't allocate cpumask in fastpath
Impact: cleanup As pointed out by Steven Rostedt. Since the arg in question is unused, we simply change cpupri_find() to accept NULL. Reported-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> LKML-Reference: <200903251501.22664.rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_cpupri.c5
-rw-r--r--kernel/sched_rt.c15
2 files changed, 7 insertions, 13 deletions
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 1e00bfacf9b8..cdd3c89574cd 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -55,7 +55,7 @@ static int convert_prio(int prio)
55 * cpupri_find - find the best (lowest-pri) CPU in the system 55 * cpupri_find - find the best (lowest-pri) CPU in the system
56 * @cp: The cpupri context 56 * @cp: The cpupri context
57 * @p: The task 57 * @p: The task
58 * @lowest_mask: A mask to fill in with selected CPUs 58 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
59 * 59 *
60 * Note: This function returns the recommended CPUs as calculated during the 60 * Note: This function returns the recommended CPUs as calculated during the
61 * current invokation. By the time the call returns, the CPUs may have in 61 * current invokation. By the time the call returns, the CPUs may have in
@@ -81,7 +81,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) 81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
82 continue; 82 continue;
83 83
84 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); 84 if (lowest_mask)
85 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
85 return 1; 86 return 1;
86 } 87 }
87 88
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index bac1061cea2f..fbec5a58ff10 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -805,20 +805,15 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
805 805
806static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 806static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
807{ 807{
808 cpumask_var_t mask;
809
810 if (rq->curr->rt.nr_cpus_allowed == 1) 808 if (rq->curr->rt.nr_cpus_allowed == 1)
811 return; 809 return;
812 810
813 if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
814 return;
815
816 if (p->rt.nr_cpus_allowed != 1 811 if (p->rt.nr_cpus_allowed != 1
817 && cpupri_find(&rq->rd->cpupri, p, mask)) 812 && cpupri_find(&rq->rd->cpupri, p, NULL))
818 goto free; 813 return;
819 814
820 if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask)) 815 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
821 goto free; 816 return;
822 817
823 /* 818 /*
824 * There appears to be other cpus that can accept 819 * There appears to be other cpus that can accept
@@ -827,8 +822,6 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
827 */ 822 */
828 requeue_task_rt(rq, p, 1); 823 requeue_task_rt(rq, p, 1);
829 resched_task(rq->curr); 824 resched_task(rq->curr);
830free:
831 free_cpumask_var(mask);
832} 825}
833 826
834#endif /* CONFIG_SMP */ 827#endif /* CONFIG_SMP */