aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authortravis@sgi.com <travis@sgi.com>2008-01-30 07:32:52 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:32:52 -0500
commit5280e004fc22314122c84978c0b6a741cf96dc0f (patch)
tree008b96d81a924be764629f62f98fa5f7c9e04773 /include
parentb32ef636a59aad12f9f9b5dc34c93222842c58ba (diff)
percpu: move arch XX_PER_CPU_XX definitions into linux/percpu.h
- Special consideration for IA64: Add the ability to specify arch specific per cpu flags - remove .data.percpu attribute from DEFINE_PER_CPU for non-smp case. The arch definitions are all the same. So move them into linux/percpu.h. We cannot move DECLARE_PER_CPU since some include files just include asm/percpu.h to avoid include recursion problems. Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/percpu.h18
-rw-r--r--include/asm-ia64/percpu.h24
-rw-r--r--include/asm-powerpc/percpu.h17
-rw-r--r--include/asm-s390/percpu.h18
-rw-r--r--include/asm-sparc64/percpu.h16
-rw-r--r--include/asm-x86/percpu_32.h12
-rw-r--r--include/asm-x86/percpu_64.h17
-rw-r--r--include/linux/percpu.h24
8 files changed, 26 insertions, 120 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index b5e53b9ab1f7..e038f13594e5 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -9,15 +9,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
9 9
10#define per_cpu_offset(x) (__per_cpu_offset[x]) 10#define per_cpu_offset(x) (__per_cpu_offset[x])
11 11
12/* Separate out the type, so (int[3], foo) works. */
13#define DEFINE_PER_CPU(type, name) \
14 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
15
16#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
17 __attribute__((__section__(".data.percpu.shared_aligned"))) \
18 __typeof__(type) per_cpu__##name \
19 ____cacheline_aligned_in_smp
20
21/* var is in discarded region: offset to particular copy we want */ 12/* var is in discarded region: offset to particular copy we want */
22#define per_cpu(var, cpu) (*({ \ 13#define per_cpu(var, cpu) (*({ \
23 extern int simple_identifier_##var(void); \ 14 extern int simple_identifier_##var(void); \
@@ -35,12 +26,6 @@ do { \
35} while (0) 26} while (0)
36#else /* ! SMP */ 27#else /* ! SMP */
37 28
38#define DEFINE_PER_CPU(type, name) \
39 __typeof__(type) per_cpu__##name
40
41#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
42 DEFINE_PER_CPU(type, name)
43
44#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 29#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
45#define __get_cpu_var(var) per_cpu__##var 30#define __get_cpu_var(var) per_cpu__##var
46#define __raw_get_cpu_var(var) per_cpu__##var 31#define __raw_get_cpu_var(var) per_cpu__##var
@@ -49,7 +34,4 @@ do { \
49 34
50#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name 35#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
51 36
52#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
53#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
54
55#endif /* _ASM_GENERIC_PERCPU_H_ */ 37#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h
index c4f1e328a5ba..0095bcf79848 100644
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -16,28 +16,11 @@
16#include <linux/threads.h> 16#include <linux/threads.h>
17 17
18#ifdef HAVE_MODEL_SMALL_ATTRIBUTE 18#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
19# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__))) 19# define PER_CPU_ATTRIBUTES __attribute__((__model__ (__small__)))
20#else
21# define __SMALL_ADDR_AREA
22#endif 20#endif
23 21
24#define DECLARE_PER_CPU(type, name) \ 22#define DECLARE_PER_CPU(type, name) \
25 extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name 23 extern PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
26
27/* Separate out the type, so (int[3], foo) works. */
28#define DEFINE_PER_CPU(type, name) \
29 __attribute__((__section__(".data.percpu"))) \
30 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
31
32#ifdef CONFIG_SMP
33#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
34 __attribute__((__section__(".data.percpu.shared_aligned"))) \
35 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \
36 ____cacheline_aligned_in_smp
37#else
38#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
39 DEFINE_PER_CPU(type, name)
40#endif
41 24
42/* 25/*
43 * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an 26 * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
@@ -68,9 +51,6 @@ extern void *per_cpu_init(void);
68 51
69#endif /* SMP */ 52#endif /* SMP */
70 53
71#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
72#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
73
74/* 54/*
75 * Be extremely careful when taking the address of this variable! Due to virtual 55 * Be extremely careful when taking the address of this variable! Due to virtual
76 * remapping, it is different from the canonical address returned by __get_cpu_var(var)! 56 * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index 6b229626d3ff..cc1cbf656b02 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -16,15 +16,6 @@
16#define __my_cpu_offset() get_paca()->data_offset 16#define __my_cpu_offset() get_paca()->data_offset
17#define per_cpu_offset(x) (__per_cpu_offset(x)) 17#define per_cpu_offset(x) (__per_cpu_offset(x))
18 18
19/* Separate out the type, so (int[3], foo) works. */
20#define DEFINE_PER_CPU(type, name) \
21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
22
23#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
24 __attribute__((__section__(".data.percpu.shared_aligned"))) \
25 __typeof__(type) per_cpu__##name \
26 ____cacheline_aligned_in_smp
27
28/* var is in discarded region: offset to particular copy we want */ 19/* var is in discarded region: offset to particular copy we want */
29#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) 20#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
30#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) 21#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
@@ -43,11 +34,6 @@ extern void setup_per_cpu_areas(void);
43 34
44#else /* ! SMP */ 35#else /* ! SMP */
45 36
46#define DEFINE_PER_CPU(type, name) \
47 __typeof__(type) per_cpu__##name
48#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
49 DEFINE_PER_CPU(type, name)
50
51#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 37#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
52#define __get_cpu_var(var) per_cpu__##var 38#define __get_cpu_var(var) per_cpu__##var
53#define __raw_get_cpu_var(var) per_cpu__##var 39#define __raw_get_cpu_var(var) per_cpu__##var
@@ -56,9 +42,6 @@ extern void setup_per_cpu_areas(void);
56 42
57#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name 43#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
58 44
59#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
60#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
61
62#else 45#else
63#include <asm-generic/percpu.h> 46#include <asm-generic/percpu.h>
64#endif 47#endif
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index f94d0d3cdb2f..2d676a873858 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -34,16 +34,6 @@
34 34
35extern unsigned long __per_cpu_offset[NR_CPUS]; 35extern unsigned long __per_cpu_offset[NR_CPUS];
36 36
37/* Separate out the type, so (int[3], foo) works. */
38#define DEFINE_PER_CPU(type, name) \
39 __attribute__((__section__(".data.percpu"))) \
40 __typeof__(type) per_cpu__##name
41
42#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
43 __attribute__((__section__(".data.percpu.shared_aligned"))) \
44 __typeof__(type) per_cpu__##name \
45 ____cacheline_aligned_in_smp
46
47#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) 37#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
48#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) 38#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
49#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) 39#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
@@ -60,11 +50,6 @@ do { \
60 50
61#else /* ! SMP */ 51#else /* ! SMP */
62 52
63#define DEFINE_PER_CPU(type, name) \
64 __typeof__(type) per_cpu__##name
65#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
66 DEFINE_PER_CPU(type, name)
67
68#define __get_cpu_var(var) __reloc_hide(var,0) 53#define __get_cpu_var(var) __reloc_hide(var,0)
69#define __raw_get_cpu_var(var) __reloc_hide(var,0) 54#define __raw_get_cpu_var(var) __reloc_hide(var,0)
70#define per_cpu(var,cpu) __reloc_hide(var,0) 55#define per_cpu(var,cpu) __reloc_hide(var,0)
@@ -73,7 +58,4 @@ do { \
73 58
74#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name 59#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
75 60
76#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
77#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
78
79#endif /* __ARCH_S390_PERCPU__ */ 61#endif /* __ARCH_S390_PERCPU__ */
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index a1f53a4da405..c7e52decba98 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -16,15 +16,6 @@ extern unsigned long __per_cpu_shift;
16 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) 16 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
17#define per_cpu_offset(x) (__per_cpu_offset(x)) 17#define per_cpu_offset(x) (__per_cpu_offset(x))
18 18
19/* Separate out the type, so (int[3], foo) works. */
20#define DEFINE_PER_CPU(type, name) \
21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
22
23#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
24 __attribute__((__section__(".data.percpu.shared_aligned"))) \
25 __typeof__(type) per_cpu__##name \
26 ____cacheline_aligned_in_smp
27
28/* var is in discarded region: offset to particular copy we want */ 19/* var is in discarded region: offset to particular copy we want */
29#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) 20#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
30#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset)) 21#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
@@ -41,10 +32,6 @@ do { \
41#else /* ! SMP */ 32#else /* ! SMP */
42 33
43#define real_setup_per_cpu_areas() do { } while (0) 34#define real_setup_per_cpu_areas() do { } while (0)
44#define DEFINE_PER_CPU(type, name) \
45 __typeof__(type) per_cpu__##name
46#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
47 DEFINE_PER_CPU(type, name)
48 35
49#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) 36#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
50#define __get_cpu_var(var) per_cpu__##var 37#define __get_cpu_var(var) per_cpu__##var
@@ -54,7 +41,4 @@ do { \
54 41
55#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name 42#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
56 43
57#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
58#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
59
60#endif /* __ARCH_SPARC64_PERCPU__ */ 44#endif /* __ARCH_SPARC64_PERCPU__ */
diff --git a/include/asm-x86/percpu_32.h b/include/asm-x86/percpu_32.h
index 3949586bf94e..77bd0045f331 100644
--- a/include/asm-x86/percpu_32.h
+++ b/include/asm-x86/percpu_32.h
@@ -47,16 +47,7 @@ extern unsigned long __per_cpu_offset[];
47 47
48#define per_cpu_offset(x) (__per_cpu_offset[x]) 48#define per_cpu_offset(x) (__per_cpu_offset[x])
49 49
50/* Separate out the type, so (int[3], foo) works. */
51#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name 50#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
52#define DEFINE_PER_CPU(type, name) \
53 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
54
55#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
56 __attribute__((__section__(".data.percpu.shared_aligned"))) \
57 __typeof__(type) per_cpu__##name \
58 ____cacheline_aligned_in_smp
59
60/* We can use this directly for local CPU (faster). */ 51/* We can use this directly for local CPU (faster). */
61DECLARE_PER_CPU(unsigned long, this_cpu_off); 52DECLARE_PER_CPU(unsigned long, this_cpu_off);
62 53
@@ -81,9 +72,6 @@ do { \
81 (src), (size)); \ 72 (src), (size)); \
82} while (0) 73} while (0)
83 74
84#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
85#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
86
87/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ 75/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
88#define __percpu_seg "%%fs:" 76#define __percpu_seg "%%fs:"
89#else /* !SMP */ 77#else /* !SMP */
diff --git a/include/asm-x86/percpu_64.h b/include/asm-x86/percpu_64.h
index 5abd48270101..24fe7075248d 100644
--- a/include/asm-x86/percpu_64.h
+++ b/include/asm-x86/percpu_64.h
@@ -16,15 +16,6 @@
16 16
17#define per_cpu_offset(x) (__per_cpu_offset(x)) 17#define per_cpu_offset(x) (__per_cpu_offset(x))
18 18
19/* Separate out the type, so (int[3], foo) works. */
20#define DEFINE_PER_CPU(type, name) \
21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
22
23#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
24 __attribute__((__section__(".data.percpu.shared_aligned"))) \
25 __typeof__(type) per_cpu__##name \
26 ____cacheline_internodealigned_in_smp
27
28/* var is in discarded region: offset to particular copy we want */ 19/* var is in discarded region: offset to particular copy we want */
29#define per_cpu(var, cpu) (*({ \ 20#define per_cpu(var, cpu) (*({ \
30 extern int simple_identifier_##var(void); \ 21 extern int simple_identifier_##var(void); \
@@ -49,11 +40,6 @@ extern void setup_per_cpu_areas(void);
49 40
50#else /* ! SMP */ 41#else /* ! SMP */
51 42
52#define DEFINE_PER_CPU(type, name) \
53 __typeof__(type) per_cpu__##name
54#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
55 DEFINE_PER_CPU(type, name)
56
57#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 43#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
58#define __get_cpu_var(var) per_cpu__##var 44#define __get_cpu_var(var) per_cpu__##var
59#define __raw_get_cpu_var(var) per_cpu__##var 45#define __raw_get_cpu_var(var) per_cpu__##var
@@ -62,7 +48,4 @@ extern void setup_per_cpu_areas(void);
62 48
63#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name 49#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
64 50
65#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
66#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
67
68#endif /* _ASM_X8664_PERCPU_H_ */ 51#endif /* _ASM_X8664_PERCPU_H_ */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 926adaae0f96..00412bb494c4 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -9,6 +9,30 @@
9 9
10#include <asm/percpu.h> 10#include <asm/percpu.h>
11 11
12#ifndef PER_CPU_ATTRIBUTES
13#define PER_CPU_ATTRIBUTES
14#endif
15
16#ifdef CONFIG_SMP
17#define DEFINE_PER_CPU(type, name) \
18 __attribute__((__section__(".data.percpu"))) \
19 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
20
21#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
22 __attribute__((__section__(".data.percpu.shared_aligned"))) \
23 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
24 ____cacheline_aligned_in_smp
25#else
26#define DEFINE_PER_CPU(type, name) \
27 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
28
29#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
30 DEFINE_PER_CPU(type, name)
31#endif
32
33#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
34#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
35
12/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ 36/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
13#ifndef PERCPU_ENOUGH_ROOM 37#ifndef PERCPU_ENOUGH_ROOM
14#ifdef CONFIG_MODULES 38#ifdef CONFIG_MODULES