aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S5
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/cris/arch-v32/vmlinux.lds.S5
-rw-r--r--arch/frv/kernel/vmlinux.lds.S5
-rw-r--r--arch/i386/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S5
-rw-r--r--arch/mips/kernel/vmlinux.lds.S5
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S7
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S1
-rw-r--r--arch/ppc/kernel/vmlinux.lds.S5
-rw-r--r--arch/s390/kernel/vmlinux.lds.S5
-rw-r--r--arch/sh/kernel/vmlinux.lds.S5
-rw-r--r--arch/sh64/kernel/vmlinux.lds.S5
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S5
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S6
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S6
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S5
-rw-r--r--include/asm-generic/percpu.h8
-rw-r--r--include/asm-generic/vmlinux.lds.h8
-rw-r--r--include/asm-i386/percpu.h5
-rw-r--r--include/asm-ia64/percpu.h10
-rw-r--r--include/asm-powerpc/percpu.h7
-rw-r--r--include/asm-s390/percpu.h7
-rw-r--r--include/asm-sparc64/percpu.h7
-rw-r--r--include/asm-x86_64/percpu.h7
26 files changed, 84 insertions, 53 deletions
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 449e76f118d3..6f4f0378e708 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -69,10 +69,7 @@ SECTIONS
69 . = ALIGN(8); 69 . = ALIGN(8);
70 SECURITY_INIT 70 SECURITY_INIT
71 71
72 . = ALIGN(8192); 72 PERCPU(8192)
73 __per_cpu_start = .;
74 .data.percpu : { *(.data.percpu) }
75 __per_cpu_end = .;
76 73
77 . = ALIGN(2*8192); 74 . = ALIGN(2*8192);
78 __init_end = .; 75 __init_end = .;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 2b7a8f5d8cf2..5ff5406666b4 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -66,6 +66,7 @@ SECTIONS
66 . = ALIGN(4096); 66 . = ALIGN(4096);
67 __per_cpu_start = .; 67 __per_cpu_start = .;
68 *(.data.percpu) 68 *(.data.percpu)
69 *(.data.percpu.shared_aligned)
69 __per_cpu_end = .; 70 __per_cpu_end = .;
70#ifndef CONFIG_XIP_KERNEL 71#ifndef CONFIG_XIP_KERNEL
71 __init_begin = _stext; 72 __init_begin = _stext;
diff --git a/arch/cris/arch-v32/vmlinux.lds.S b/arch/cris/arch-v32/vmlinux.lds.S
index dfa25e1542b9..651a77f2ccc4 100644
--- a/arch/cris/arch-v32/vmlinux.lds.S
+++ b/arch/cris/arch-v32/vmlinux.lds.S
@@ -91,10 +91,7 @@ SECTIONS
91 } 91 }
92 SECURITY_INIT 92 SECURITY_INIT
93 93
94 . = ALIGN (8192); 94 PERCPU(8192)
95 __per_cpu_start = .;
96 .data.percpu : { *(.data.percpu) }
97 __per_cpu_end = .;
98 95
99#ifdef CONFIG_BLK_DEV_INITRD 96#ifdef CONFIG_BLK_DEV_INITRD
100 .init.ramfs : { 97 .init.ramfs : {
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index 481dc1374640..3b71e0c86399 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -57,10 +57,7 @@ SECTIONS
57 __alt_instructions_end = .; 57 __alt_instructions_end = .;
58 .altinstr_replacement : { *(.altinstr_replacement) } 58 .altinstr_replacement : { *(.altinstr_replacement) }
59 59
60 . = ALIGN(4096); 60 PERCPU(4096)
61 __per_cpu_start = .;
62 .data.percpu : { *(.data.percpu) }
63 __per_cpu_end = .;
64 61
65#ifdef CONFIG_BLK_DEV_INITRD 62#ifdef CONFIG_BLK_DEV_INITRD
66 . = ALIGN(4096); 63 . = ALIGN(4096);
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 00f1bc47d3a2..4dc44b8007cc 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -181,6 +181,7 @@ SECTIONS
181 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { 181 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
182 __per_cpu_start = .; 182 __per_cpu_start = .;
183 *(.data.percpu) 183 *(.data.percpu)
184 *(.data.percpu.shared_aligned)
184 __per_cpu_end = .; 185 __per_cpu_end = .;
185 } 186 }
186 . = ALIGN(4096); 187 . = ALIGN(4096);
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5a65965c8b53..860f251d2fc2 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -206,6 +206,7 @@ SECTIONS
206 { 206 {
207 __per_cpu_start = .; 207 __per_cpu_start = .;
208 *(.data.percpu) 208 *(.data.percpu)
209 *(.data.percpu.shared_aligned)
209 __per_cpu_end = .; 210 __per_cpu_end = .;
210 } 211 }
211 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits 212 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index 4e2d5b9f0a9a..942a8c7a4417 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -110,10 +110,7 @@ SECTIONS
110 __initramfs_end = .; 110 __initramfs_end = .;
111#endif 111#endif
112 112
113 . = ALIGN(4096); 113 PERCPU(4096)
114 __per_cpu_start = .;
115 .data.percpu : { *(.data.percpu) }
116 __per_cpu_end = .;
117 . = ALIGN(4096); 114 . = ALIGN(4096);
118 __init_end = .; 115 __init_end = .;
119 /* freed after init ends here */ 116 /* freed after init ends here */
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 9b9992cd562a..bc9bae2a73f4 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -119,10 +119,7 @@ SECTIONS
119 .init.ramfs : { *(.init.ramfs) } 119 .init.ramfs : { *(.init.ramfs) }
120 __initramfs_end = .; 120 __initramfs_end = .;
121#endif 121#endif
122 . = ALIGN(_PAGE_SIZE); 122 PERCPU(_PAGE_SIZE)
123 __per_cpu_start = .;
124 .data.percpu : { *(.data.percpu) }
125 __per_cpu_end = .;
126 . = ALIGN(_PAGE_SIZE); 123 . = ALIGN(_PAGE_SIZE);
127 __init_end = .; 124 __init_end = .;
128 /* freed after init ends here */ 125 /* freed after init ends here */
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 4d96ba4b9849..d4e6a93c8d9a 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -181,10 +181,9 @@ SECTIONS
181 .init.ramfs : { *(.init.ramfs) } 181 .init.ramfs : { *(.init.ramfs) }
182 __initramfs_end = .; 182 __initramfs_end = .;
183#endif 183#endif
184 . = ALIGN(ASM_PAGE_SIZE); 184
185 __per_cpu_start = .; 185 PERCPU(ASM_PAGE_SIZE)
186 .data.percpu : { *(.data.percpu) } 186
187 __per_cpu_end = .;
188 . = ALIGN(ASM_PAGE_SIZE); 187 . = ALIGN(ASM_PAGE_SIZE);
189 __init_end = .; 188 __init_end = .;
190 /* freed after init ends here */ 189 /* freed after init ends here */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index ae4acd84143d..39fda6e6aa47 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -144,6 +144,7 @@ SECTIONS
144 .data.percpu : { 144 .data.percpu : {
145 __per_cpu_start = .; 145 __per_cpu_start = .;
146 *(.data.percpu) 146 *(.data.percpu)
147 *(.data.percpu.shared_aligned)
147 __per_cpu_end = .; 148 __per_cpu_end = .;
148 } 149 }
149 150
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
index 19db8746ff14..c0aac3ff9e91 100644
--- a/arch/ppc/kernel/vmlinux.lds.S
+++ b/arch/ppc/kernel/vmlinux.lds.S
@@ -130,10 +130,7 @@ SECTIONS
130 __ftr_fixup : { *(__ftr_fixup) } 130 __ftr_fixup : { *(__ftr_fixup) }
131 __stop___ftr_fixup = .; 131 __stop___ftr_fixup = .;
132 132
133 . = ALIGN(4096); 133 PERCPU(4096)
134 __per_cpu_start = .;
135 .data.percpu : { *(.data.percpu) }
136 __per_cpu_end = .;
137 134
138#ifdef CONFIG_BLK_DEV_INITRD 135#ifdef CONFIG_BLK_DEV_INITRD
139 . = ALIGN(4096); 136 . = ALIGN(4096);
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 7158a804a5e4..61ffd50fac40 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -107,10 +107,7 @@ SECTIONS
107 . = ALIGN(2); 107 . = ALIGN(2);
108 __initramfs_end = .; 108 __initramfs_end = .;
109#endif 109#endif
110 . = ALIGN(4096); 110 PERCPU(4096)
111 __per_cpu_start = .;
112 .data.percpu : { *(.data.percpu) }
113 __per_cpu_end = .;
114 . = ALIGN(4096); 111 . = ALIGN(4096);
115 __init_end = .; 112 __init_end = .;
116 /* freed after init ends here */ 113 /* freed after init ends here */
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 0696402f446a..5ba216180b30 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -60,10 +60,7 @@ SECTIONS
60 . = ALIGN(PAGE_SIZE); 60 . = ALIGN(PAGE_SIZE);
61 __nosave_end = .; 61 __nosave_end = .;
62 62
63 . = ALIGN(PAGE_SIZE); 63 PERCPU(PAGE_SIZE)
64 __per_cpu_start = .;
65 .data.percpu : { *(.data.percpu) }
66 __per_cpu_end = .;
67 .data.cacheline_aligned : { *(.data.cacheline_aligned) } 64 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
68 65
69 _edata = .; /* End of data section */ 66 _edata = .; /* End of data section */
diff --git a/arch/sh64/kernel/vmlinux.lds.S b/arch/sh64/kernel/vmlinux.lds.S
index 02aea86c5907..8ac9c7c5f848 100644
--- a/arch/sh64/kernel/vmlinux.lds.S
+++ b/arch/sh64/kernel/vmlinux.lds.S
@@ -87,7 +87,10 @@ SECTIONS
87 87
88 . = ALIGN(PAGE_SIZE); 88 . = ALIGN(PAGE_SIZE);
89 __per_cpu_start = .; 89 __per_cpu_start = .;
90 .data.percpu : C_PHYS(.data.percpu) { *(.data.percpu) } 90 .data.percpu : C_PHYS(.data.percpu) {
91 *(.data.percpu)
92 *(.data.percpu.shared_aligned)
93 }
91 __per_cpu_end = . ; 94 __per_cpu_end = . ;
92 .data.cacheline_aligned : C_PHYS(.data.cacheline_aligned) { *(.data.cacheline_aligned) } 95 .data.cacheline_aligned : C_PHYS(.data.cacheline_aligned) { *(.data.cacheline_aligned) }
93 96
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index f75a1b822789..47583887abc6 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -65,10 +65,7 @@ SECTIONS
65 __initramfs_end = .; 65 __initramfs_end = .;
66#endif 66#endif
67 67
68 . = ALIGN(4096); 68 PERCPU(4096)
69 __per_cpu_start = .;
70 .data.percpu : { *(.data.percpu) }
71 __per_cpu_end = .;
72 . = ALIGN(4096); 69 . = ALIGN(4096);
73 __init_end = .; 70 __init_end = .;
74 . = ALIGN(32); 71 . = ALIGN(32);
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 3ad10f3027e4..481861764deb 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -90,10 +90,8 @@ SECTIONS
90 __initramfs_end = .; 90 __initramfs_end = .;
91#endif 91#endif
92 92
93 . = ALIGN(PAGE_SIZE); 93 PERCPU(PAGE_SIZE)
94 __per_cpu_start = .; 94
95 .data.percpu : { *(.data.percpu) }
96 __per_cpu_end = .;
97 . = ALIGN(PAGE_SIZE); 95 . = ALIGN(PAGE_SIZE);
98 __init_end = .; 96 __init_end = .;
99 __bss_start = .; 97 __bss_start = .;
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index dbccfda8364f..22590690a8a0 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -194,10 +194,8 @@ SECTIONS
194 __initramfs_end = .; 194 __initramfs_end = .;
195#endif 195#endif
196 196
197 . = ALIGN(4096); 197 PERCPU(4096)
198 __per_cpu_start = .; 198
199 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
200 __per_cpu_end = .;
201 . = ALIGN(4096); 199 . = ALIGN(4096);
202 __init_end = .; 200 __init_end = .;
203 201
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index b0582c3c5f8d..3e31512109f9 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -190,10 +190,7 @@ SECTIONS
190 __initramfs_end = .; 190 __initramfs_end = .;
191#endif 191#endif
192 192
193 . = ALIGN(4096); 193 PERCPU(4096)
194 __per_cpu_start = .;
195 .data.percpu : { *(.data.percpu) }
196 __per_cpu_end = .;
197 194
198 195
199 /* We need this dummy segment here */ 196 /* We need this dummy segment here */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d984a9041436..d85172e9ed45 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -14,6 +14,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
14#define DEFINE_PER_CPU(type, name) \ 14#define DEFINE_PER_CPU(type, name) \
15 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 15 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
16 16
17#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
18 __attribute__((__section__(".data.percpu.shared_aligned"))) \
19 __typeof__(type) per_cpu__##name \
20 ____cacheline_aligned_in_smp
21
17/* var is in discarded region: offset to particular copy we want */ 22/* var is in discarded region: offset to particular copy we want */
18#define per_cpu(var, cpu) (*({ \ 23#define per_cpu(var, cpu) (*({ \
19 extern int simple_identifier_##var(void); \ 24 extern int simple_identifier_##var(void); \
@@ -34,6 +39,9 @@ do { \
34#define DEFINE_PER_CPU(type, name) \ 39#define DEFINE_PER_CPU(type, name) \
35 __typeof__(type) per_cpu__##name 40 __typeof__(type) per_cpu__##name
36 41
42#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
43 DEFINE_PER_CPU(type, name)
44
37#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 45#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
38#define __get_cpu_var(var) per_cpu__##var 46#define __get_cpu_var(var) per_cpu__##var
39#define __raw_get_cpu_var(var) per_cpu__##var 47#define __raw_get_cpu_var(var) per_cpu__##var
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 84155eb67f1d..a2b09ed852ad 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -245,3 +245,11 @@
245 *(.initcall7.init) \ 245 *(.initcall7.init) \
246 *(.initcall7s.init) 246 *(.initcall7s.init)
247 247
248#define PERCPU(align) \
249 . = ALIGN(align); \
250 __per_cpu_start = .; \
251 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
252 *(.data.percpu) \
253 *(.data.percpu.shared_aligned) \
254 } \
255 __per_cpu_end = .;
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
index f54830b5d5ac..a7ebd436f3cc 100644
--- a/include/asm-i386/percpu.h
+++ b/include/asm-i386/percpu.h
@@ -54,6 +54,11 @@ extern unsigned long __per_cpu_offset[];
54#define DEFINE_PER_CPU(type, name) \ 54#define DEFINE_PER_CPU(type, name) \
55 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 55 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
56 56
57#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
58 __attribute__((__section__(".data.percpu.shared_aligned"))) \
59 __typeof__(type) per_cpu__##name \
60 ____cacheline_aligned_in_smp
61
57/* We can use this directly for local CPU (faster). */ 62/* We can use this directly for local CPU (faster). */
58DECLARE_PER_CPU(unsigned long, this_cpu_off); 63DECLARE_PER_CPU(unsigned long, this_cpu_off);
59 64
diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h
index fbe5cf3ab8dc..43a7aac414e0 100644
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -29,6 +29,16 @@
29 __attribute__((__section__(".data.percpu"))) \ 29 __attribute__((__section__(".data.percpu"))) \
30 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name 30 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
31 31
32#ifdef CONFIG_SMP
33#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
34 __attribute__((__section__(".data.percpu.shared_aligned"))) \
35 __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \
36 ____cacheline_aligned_in_smp
37#else
38#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
39 DEFINE_PER_CPU(type, name)
40#endif
41
32/* 42/*
33 * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an 43 * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
34 * external routine, to avoid include-hell. 44 * external routine, to avoid include-hell.
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index 2f2e3024fa61..73dc8ba4010d 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -20,6 +20,11 @@
20#define DEFINE_PER_CPU(type, name) \ 20#define DEFINE_PER_CPU(type, name) \
21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
22 22
23#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
24 __attribute__((__section__(".data.percpu.shared_aligned"))) \
25 __typeof__(type) per_cpu__##name \
26 ____cacheline_aligned_in_smp
27
23/* var is in discarded region: offset to particular copy we want */ 28/* var is in discarded region: offset to particular copy we want */
24#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) 29#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
25#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) 30#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
@@ -40,6 +45,8 @@ extern void setup_per_cpu_areas(void);
40 45
41#define DEFINE_PER_CPU(type, name) \ 46#define DEFINE_PER_CPU(type, name) \
42 __typeof__(type) per_cpu__##name 47 __typeof__(type) per_cpu__##name
48#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
49 DEFINE_PER_CPU(type, name)
43 50
44#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 51#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
45#define __get_cpu_var(var) per_cpu__##var 52#define __get_cpu_var(var) per_cpu__##var
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index 9ea7f1023e57..545857e64443 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -41,6 +41,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
41 __attribute__((__section__(".data.percpu"))) \ 41 __attribute__((__section__(".data.percpu"))) \
42 __typeof__(type) per_cpu__##name 42 __typeof__(type) per_cpu__##name
43 43
44#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
45 __attribute__((__section__(".data.percpu.shared_aligned"))) \
46 __typeof__(type) per_cpu__##name \
47 ____cacheline_aligned_in_smp
48
44#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) 49#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
45#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) 50#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
46#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) 51#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
@@ -59,6 +64,8 @@ do { \
59 64
60#define DEFINE_PER_CPU(type, name) \ 65#define DEFINE_PER_CPU(type, name) \
61 __typeof__(type) per_cpu__##name 66 __typeof__(type) per_cpu__##name
67#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
68 DEFINE_PER_CPU(type, name)
62 69
63#define __get_cpu_var(var) __reloc_hide(var,0) 70#define __get_cpu_var(var) __reloc_hide(var,0)
64#define __raw_get_cpu_var(var) __reloc_hide(var,0) 71#define __raw_get_cpu_var(var) __reloc_hide(var,0)
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index 88db872ce2f8..caf8750792ff 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -18,6 +18,11 @@ extern unsigned long __per_cpu_shift;
18#define DEFINE_PER_CPU(type, name) \ 18#define DEFINE_PER_CPU(type, name) \
19 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 19 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
20 20
21#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
22 __attribute__((__section__(".data.percpu.shared_aligned"))) \
23 __typeof__(type) per_cpu__##name \
24 ____cacheline_aligned_in_smp
25
21register unsigned long __local_per_cpu_offset asm("g5"); 26register unsigned long __local_per_cpu_offset asm("g5");
22 27
23/* var is in discarded region: offset to particular copy we want */ 28/* var is in discarded region: offset to particular copy we want */
@@ -38,6 +43,8 @@ do { \
38#define real_setup_per_cpu_areas() do { } while (0) 43#define real_setup_per_cpu_areas() do { } while (0)
39#define DEFINE_PER_CPU(type, name) \ 44#define DEFINE_PER_CPU(type, name) \
40 __typeof__(type) per_cpu__##name 45 __typeof__(type) per_cpu__##name
46#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
47 DEFINE_PER_CPU(type, name)
41 48
42#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) 49#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
43#define __get_cpu_var(var) per_cpu__##var 50#define __get_cpu_var(var) per_cpu__##var
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index c6fbb67eac90..5abd48270101 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -20,6 +20,11 @@
20#define DEFINE_PER_CPU(type, name) \ 20#define DEFINE_PER_CPU(type, name) \
21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name 21 __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
22 22
23#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
24 __attribute__((__section__(".data.percpu.shared_aligned"))) \
25 __typeof__(type) per_cpu__##name \
26 ____cacheline_internodealigned_in_smp
27
23/* var is in discarded region: offset to particular copy we want */ 28/* var is in discarded region: offset to particular copy we want */
24#define per_cpu(var, cpu) (*({ \ 29#define per_cpu(var, cpu) (*({ \
25 extern int simple_identifier_##var(void); \ 30 extern int simple_identifier_##var(void); \
@@ -46,6 +51,8 @@ extern void setup_per_cpu_areas(void);
46 51
47#define DEFINE_PER_CPU(type, name) \ 52#define DEFINE_PER_CPU(type, name) \
48 __typeof__(type) per_cpu__##name 53 __typeof__(type) per_cpu__##name
54#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
55 DEFINE_PER_CPU(type, name)
49 56
50#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 57#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
51#define __get_cpu_var(var) per_cpu__##var 58#define __get_cpu_var(var) per_cpu__##var