aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/percpu.h
diff options
context:
space:
mode:
authortravis@sgi.com <travis@sgi.com>2008-01-30 07:32:52 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:32:52 -0500
commit5280e004fc22314122c84978c0b6a741cf96dc0f (patch)
tree008b96d81a924be764629f62f98fa5f7c9e04773 /include/asm-sparc64/percpu.h
parentb32ef636a59aad12f9f9b5dc34c93222842c58ba (diff)
percpu: move arch XX_PER_CPU_XX definitions into linux/percpu.h
- Special consideration for IA64: Add the ability to specify arch specific per cpu flags - remove .data.percpu attribute from DEFINE_PER_CPU for non-smp case. The arch definitions are all the same. So move them into linux/percpu.h. We cannot move DECLARE_PER_CPU since some include files just include asm/percpu.h to avoid include recursion problems. Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-sparc64/percpu.h')
-rw-r--r--include/asm-sparc64/percpu.h16
1 files changed, 0 insertions, 16 deletions
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index a1f53a4da405..c7e52decba98 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -16,15 +16,6 @@ extern unsigned long __per_cpu_shift;
16 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) 16 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
17#define per_cpu_offset(x) (__per_cpu_offset(x)) 17#define per_cpu_offset(x) (__per_cpu_offset(x))
18 18
19/* Separate out the type, so (int[3], foo) works. */
20#define DEFINE_PER_CPU(type, name) \
21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
22
23#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
24 __attribute__((__section__(".data.percpu.shared_aligned"))) \
25 __typeof__(type) per_cpu__##name \
26 ____cacheline_aligned_in_smp
27
28/* var is in discarded region: offset to particular copy we want */ 19/* var is in discarded region: offset to particular copy we want */
29#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) 20#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
30#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset)) 21#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
@@ -41,10 +32,6 @@ do { \
41#else /* ! SMP */ 32#else /* ! SMP */
42 33
43#define real_setup_per_cpu_areas() do { } while (0) 34#define real_setup_per_cpu_areas() do { } while (0)
44#define DEFINE_PER_CPU(type, name) \
45 __typeof__(type) per_cpu__##name
46#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
47 DEFINE_PER_CPU(type, name)
48 35
49#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) 36#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
50#define __get_cpu_var(var) per_cpu__##var 37#define __get_cpu_var(var) per_cpu__##var
@@ -54,7 +41,4 @@ do { \
54 41
55#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name 42#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
56 43
57#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
58#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
59
60#endif /* __ARCH_SPARC64_PERCPU__ */ 44#endif /* __ARCH_SPARC64_PERCPU__ */