aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-06-25 08:47:14 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 13:01:01 -0400
commitbfe5d834195b3089b8846577311340376cc0f450 (patch)
tree52470de0fe87ff8372700e3472735cd5c14cee9d /kernel/sched.c
parent6ceab8a936c302c0cea2bfe55617c76e2f5746fa (diff)
[PATCH] Define __raw_get_cpu_var and use it
There are several instances of per_cpu(foo, raw_smp_processor_id()), which is semantically equivalent to __get_cpu_var(foo) but without the warning that smp_processor_id() can give if CONFIG_DEBUG_PREEMPT is enabled. For those architectures with optimized per-cpu implementations, namely ia64, powerpc, s390, sparc64 and x86_64, per_cpu() turns into more and slower code than __get_cpu_var(), so it would be preferable to use __get_cpu_var on those platforms. This defines a __raw_get_cpu_var(x) macro which turns into per_cpu(x, raw_smp_processor_id()) on architectures that use the generic per-cpu implementation, and turns into __get_cpu_var(x) on the architectures that have an optimized per-cpu implementation. Signed-off-by: Paul Mackerras <paulus@samba.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5dbc42694477..f8d540b324ca 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4152,7 +4152,7 @@ EXPORT_SYMBOL(yield);
4152 */ 4152 */
4153void __sched io_schedule(void) 4153void __sched io_schedule(void)
4154{ 4154{
4155 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id()); 4155 struct runqueue *rq = &__raw_get_cpu_var(runqueues);
4156 4156
4157 atomic_inc(&rq->nr_iowait); 4157 atomic_inc(&rq->nr_iowait);
4158 schedule(); 4158 schedule();
@@ -4163,7 +4163,7 @@ EXPORT_SYMBOL(io_schedule);
4163 4163
4164long __sched io_schedule_timeout(long timeout) 4164long __sched io_schedule_timeout(long timeout)
4165{ 4165{
4166 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id()); 4166 struct runqueue *rq = &__raw_get_cpu_var(runqueues);
4167 long ret; 4167 long ret;
4168 4168
4169 atomic_inc(&rq->nr_iowait); 4169 atomic_inc(&rq->nr_iowait);