aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-06-25 08:47:14 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 13:01:01 -0400
commitbfe5d834195b3089b8846577311340376cc0f450 (patch)
tree52470de0fe87ff8372700e3472735cd5c14cee9d /include/asm-generic
parent6ceab8a936c302c0cea2bfe55617c76e2f5746fa (diff)
[PATCH] Define __raw_get_cpu_var and use it
There are several instances of per_cpu(foo, raw_smp_processor_id()), which is semantically equivalent to __get_cpu_var(foo) but without the warning that smp_processor_id() can give if CONFIG_DEBUG_PREEMPT is enabled. For those architectures with optimized per-cpu implementations, namely ia64, powerpc, s390, sparc64 and x86_64, per_cpu() turns into more and slower code than __get_cpu_var(), so it would be preferable to use __get_cpu_var on those platforms. This defines a __raw_get_cpu_var(x) macro which turns into per_cpu(x, raw_smp_processor_id()) on architectures that use the generic per-cpu implementation, and turns into __get_cpu_var(x) on the architectures that have an optimized per-cpu implementation. Signed-off-by: Paul Mackerras <paulus@samba.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/percpu.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index c0caf433a7d7..c74521157461 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -14,6 +14,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
14/* var is in discarded region: offset to particular copy we want */ 14/* var is in discarded region: offset to particular copy we want */
15#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) 15#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
16#define __get_cpu_var(var) per_cpu(var, smp_processor_id()) 16#define __get_cpu_var(var) per_cpu(var, smp_processor_id())
17#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id())
17 18
18/* A macro to avoid #include hell... */ 19/* A macro to avoid #include hell... */
19#define percpu_modcopy(pcpudst, src, size) \ 20#define percpu_modcopy(pcpudst, src, size) \
@@ -30,6 +31,7 @@ do { \
30 31
31#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 32#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
32#define __get_cpu_var(var) per_cpu__##var 33#define __get_cpu_var(var) per_cpu__##var
34#define __raw_get_cpu_var(var) per_cpu__##var
33 35
34#endif /* SMP */ 36#endif /* SMP */
35 37