aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/percpu_32.h
diff options
context:
space:
mode:
authortravis@sgi.com <travis@sgi.com>2008-01-30 07:32:52 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:32:52 -0500
commit5280e004fc22314122c84978c0b6a741cf96dc0f (patch)
tree008b96d81a924be764629f62f98fa5f7c9e04773 /include/asm-x86/percpu_32.h
parentb32ef636a59aad12f9f9b5dc34c93222842c58ba (diff)
percpu: move arch XX_PER_CPU_XX definitions into linux/percpu.h
- Special consideration for IA64: Add the ability to specify arch specific per cpu flags - remove .data.percpu attribute from DEFINE_PER_CPU for non-smp case. The arch definitions are all the same. So move them into linux/percpu.h. We cannot move DECLARE_PER_CPU since some include files just include asm/percpu.h to avoid include recursion problems. Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/percpu_32.h')
-rw-r--r--include/asm-x86/percpu_32.h12
1 files changed, 0 insertions, 12 deletions
diff --git a/include/asm-x86/percpu_32.h b/include/asm-x86/percpu_32.h
index 3949586bf94e..77bd0045f331 100644
--- a/include/asm-x86/percpu_32.h
+++ b/include/asm-x86/percpu_32.h
@@ -47,16 +47,7 @@ extern unsigned long __per_cpu_offset[];
47 47
48#define per_cpu_offset(x) (__per_cpu_offset[x]) 48#define per_cpu_offset(x) (__per_cpu_offset[x])
49 49
50/* Separate out the type, so (int[3], foo) works. */
51#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name 50#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
52#define DEFINE_PER_CPU(type, name) \
53 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
54
55#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
56 __attribute__((__section__(".data.percpu.shared_aligned"))) \
57 __typeof__(type) per_cpu__##name \
58 ____cacheline_aligned_in_smp
59
60/* We can use this directly for local CPU (faster). */ 51/* We can use this directly for local CPU (faster). */
61DECLARE_PER_CPU(unsigned long, this_cpu_off); 52DECLARE_PER_CPU(unsigned long, this_cpu_off);
62 53
@@ -81,9 +72,6 @@ do { \
81 (src), (size)); \ 72 (src), (size)); \
82} while (0) 73} while (0)
83 74
84#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
85#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
86
87/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ 75/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
88#define __percpu_seg "%%fs:" 76#define __percpu_seg "%%fs:"
89#else /* !SMP */ 77#else /* !SMP */