aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/percpu.h100
-rw-r--r--arch/alpha/include/asm/tlbflush.h1
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/avr32/kernel/vmlinux.lds.S1
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S1
-rw-r--r--arch/blackfin/mm/sram-alloc.c6
-rw-r--r--arch/cris/include/asm/mmu_context.h3
-rw-r--r--arch/cris/kernel/vmlinux.lds.S1
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/kernel/vmlinux.lds.S2
-rw-r--r--arch/h8300/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/kernel/smp.c3
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S1
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds1
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds1
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S1
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S1
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S1
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S1
-rw-r--r--arch/powerpc/mm/stab.c2
-rw-r--r--arch/powerpc/platforms/ps3/smp.c2
-rw-r--r--arch/s390/include/asm/percpu.h32
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S1
-rw-r--r--arch/um/kernel/dyn.lds.S2
-rw-r--r--arch/um/kernel/uml.lds.S2
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c8
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c14
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S1
-rw-r--r--block/as-iosched.c10
-rw-r--r--block/cfq-iosched.c10
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c12
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c15
-rw-r--r--drivers/xen/events.c13
-rw-r--r--include/asm-generic/vmlinux.lds.h8
-rw-r--r--include/linux/percpu-defs.h66
-rw-r--r--include/linux/percpu.h12
-rw-r--r--init/main.c24
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/perf_counter.c6
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/trace/trace_events.c6
-rw-r--r--lib/Kconfig.debug15
-rw-r--r--mm/Makefile2
-rw-r--r--mm/allocpercpu.c28
-rw-r--r--mm/kmemleak-test.c6
-rw-r--r--mm/page-writeback.c5
-rw-r--r--mm/percpu.c40
-rw-r--r--mm/quicklist.c2
-rw-r--r--mm/slub.c4
-rw-r--r--net/ipv4/syncookies.c5
-rw-r--r--net/ipv6/syncookies.c5
-rw-r--r--net/rds/ib_stats.c2
-rw-r--r--net/rds/iw_stats.c2
-rw-r--r--net/rds/page.c2
-rw-r--r--scripts/module-common.lds8
69 files changed, 301 insertions, 239 deletions
diff --git a/Makefile b/Makefile
index d1216fea0c92..2f503380e149 100644
--- a/Makefile
+++ b/Makefile
@@ -327,7 +327,7 @@ CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
327MODFLAGS = -DMODULE 327MODFLAGS = -DMODULE
328CFLAGS_MODULE = $(MODFLAGS) 328CFLAGS_MODULE = $(MODFLAGS)
329AFLAGS_MODULE = $(MODFLAGS) 329AFLAGS_MODULE = $(MODFLAGS)
330LDFLAGS_MODULE = 330LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds
331CFLAGS_KERNEL = 331CFLAGS_KERNEL =
332AFLAGS_KERNEL = 332AFLAGS_KERNEL =
333CFLAGS_GCOV = -fprofile-arcs -ftest-coverage 333CFLAGS_GCOV = -fprofile-arcs -ftest-coverage
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
index b663f1f10b6a..2c12378e3aa9 100644
--- a/arch/alpha/include/asm/percpu.h
+++ b/arch/alpha/include/asm/percpu.h
@@ -1,102 +1,18 @@
1#ifndef __ALPHA_PERCPU_H 1#ifndef __ALPHA_PERCPU_H
2#define __ALPHA_PERCPU_H 2#define __ALPHA_PERCPU_H
3 3
4#include <linux/compiler.h>
5#include <linux/threads.h>
6#include <linux/percpu-defs.h>
7
8/*
9 * Determine the real variable name from the name visible in the
10 * kernel sources.
11 */
12#define per_cpu_var(var) per_cpu__##var
13
14#ifdef CONFIG_SMP
15
16/*
17 * per_cpu_offset() is the offset that has to be added to a
18 * percpu variable to get to the instance for a certain processor.
19 */
20extern unsigned long __per_cpu_offset[NR_CPUS];
21
22#define per_cpu_offset(x) (__per_cpu_offset[x])
23
24#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
25#ifdef CONFIG_DEBUG_PREEMPT
26#define my_cpu_offset per_cpu_offset(smp_processor_id())
27#else
28#define my_cpu_offset __my_cpu_offset
29#endif
30
31#ifndef MODULE
32#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
33#define PER_CPU_DEF_ATTRIBUTES
34#else
35/* 4/*
36 * To calculate addresses of locally defined variables, GCC uses 32-bit 5 * To calculate addresses of locally defined variables, GCC uses
37 * displacement from the GP. Which doesn't work for per cpu variables in 6 * 32-bit displacement from the GP. Which doesn't work for per cpu
38 * modules, as an offset to the kernel per cpu area is way above 4G. 7 * variables in modules, as an offset to the kernel per cpu area is
8 * way above 4G.
39 * 9 *
40 * This forces allocation of a GOT entry for per cpu variable using 10 * Always use weak definitions for percpu variables in modules.
41 * ldq instruction with a 'literal' relocation.
42 */
43#define SHIFT_PERCPU_PTR(var, offset) ({ \
44 extern int simple_identifier_##var(void); \
45 unsigned long __ptr, tmp_gp; \
46 asm ( "br %1, 1f \n\
47 1: ldgp %1, 0(%1) \n\
48 ldq %0, per_cpu__" #var"(%1)\t!literal" \
49 : "=&r"(__ptr), "=&r"(tmp_gp)); \
50 (typeof(&per_cpu_var(var)))(__ptr + (offset)); })
51
52#define PER_CPU_DEF_ATTRIBUTES __used
53
54#endif /* MODULE */
55
56/*
57 * A percpu variable may point to a discarded regions. The following are
58 * established ways to produce a usable pointer from the percpu variable
59 * offset.
60 */ 11 */
61#define per_cpu(var, cpu) \ 12#if defined(MODULE) && defined(CONFIG_SMP)
62 (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu))) 13#define ARCH_NEEDS_WEAK_PER_CPU
63#define __get_cpu_var(var) \
64 (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
65#define __raw_get_cpu_var(var) \
66 (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
67
68#else /* ! SMP */
69
70#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
71#define __get_cpu_var(var) per_cpu_var(var)
72#define __raw_get_cpu_var(var) per_cpu_var(var)
73
74#define PER_CPU_DEF_ATTRIBUTES
75
76#endif /* SMP */
77
78#ifdef CONFIG_SMP
79#define PER_CPU_BASE_SECTION ".data.percpu"
80#else
81#define PER_CPU_BASE_SECTION ".data"
82#endif
83
84#ifdef CONFIG_SMP
85
86#ifdef MODULE
87#define PER_CPU_SHARED_ALIGNED_SECTION ""
88#else
89#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
90#endif
91#define PER_CPU_FIRST_SECTION ".first"
92
93#else
94
95#define PER_CPU_SHARED_ALIGNED_SECTION ""
96#define PER_CPU_FIRST_SECTION ""
97
98#endif 14#endif
99 15
100#define PER_CPU_ATTRIBUTES 16#include <asm-generic/percpu.h>
101 17
102#endif /* __ALPHA_PERCPU_H */ 18#endif /* __ALPHA_PERCPU_H */
diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h
index 9d87aaa08c0d..e89e0c2e15b1 100644
--- a/arch/alpha/include/asm/tlbflush.h
+++ b/arch/alpha/include/asm/tlbflush.h
@@ -2,6 +2,7 @@
2#define _ALPHA_TLBFLUSH_H 2#define _ALPHA_TLBFLUSH_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/sched.h>
5#include <asm/compiler.h> 6#include <asm/compiler.h>
6#include <asm/pgalloc.h> 7#include <asm/pgalloc.h>
7 8
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index b9d6568e5f7f..75fe1d6877e9 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -139,6 +139,7 @@ SECTIONS
139 EXIT_TEXT 139 EXIT_TEXT
140 EXIT_DATA 140 EXIT_DATA
141 *(.exitcall.exit) 141 *(.exitcall.exit)
142 *(.discard)
142 } 143 }
143 144
144 .mdebug 0 : { 145 .mdebug 0 : {
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 69371028a202..5cc4812c9763 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -83,6 +83,7 @@ SECTIONS
83 EXIT_TEXT 83 EXIT_TEXT
84 EXIT_DATA 84 EXIT_DATA
85 *(.exitcall.exit) 85 *(.exitcall.exit)
86 *(.discard)
86 *(.ARM.exidx.exit.text) 87 *(.ARM.exidx.exit.text)
87 *(.ARM.extab.exit.text) 88 *(.ARM.extab.exit.text)
88#ifndef CONFIG_HOTPLUG_CPU 89#ifndef CONFIG_HOTPLUG_CPU
diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S
index 7910d41eb886..b8324608ec0c 100644
--- a/arch/avr32/kernel/vmlinux.lds.S
+++ b/arch/avr32/kernel/vmlinux.lds.S
@@ -131,6 +131,7 @@ SECTIONS
131 /DISCARD/ : { 131 /DISCARD/ : {
132 EXIT_DATA 132 EXIT_DATA
133 *(.exitcall.exit) 133 *(.exitcall.exit)
134 *(.discard)
134 } 135 }
135 136
136 DWARF_DEBUG 137 DWARF_DEBUG
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 6ac307ca0d80..6e8eabd8f0a6 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -280,5 +280,6 @@ SECTIONS
280 /DISCARD/ : 280 /DISCARD/ :
281 { 281 {
282 *(.exitcall.exit) 282 *(.exitcall.exit)
283 *(.discard)
283 } 284 }
284} 285}
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index 0bc3c4ef0aad..99e4dbb1dfd1 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -42,9 +42,9 @@
42#include <asm/mem_map.h> 42#include <asm/mem_map.h>
43#include "blackfin_sram.h" 43#include "blackfin_sram.h"
44 44
45static DEFINE_PER_CPU(spinlock_t, l1sram_lock) ____cacheline_aligned_in_smp; 45static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
46static DEFINE_PER_CPU(spinlock_t, l1_data_sram_lock) ____cacheline_aligned_in_smp; 46static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
47static DEFINE_PER_CPU(spinlock_t, l1_inst_sram_lock) ____cacheline_aligned_in_smp; 47static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
48static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp; 48static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
49 49
50/* the data structure for L1 scratchpad and DATA SRAM */ 50/* the data structure for L1 scratchpad and DATA SRAM */
diff --git a/arch/cris/include/asm/mmu_context.h b/arch/cris/include/asm/mmu_context.h
index 72ba08dcfd18..1d45fd6365b7 100644
--- a/arch/cris/include/asm/mmu_context.h
+++ b/arch/cris/include/asm/mmu_context.h
@@ -17,7 +17,8 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17 * registers like cr3 on the i386 17 * registers like cr3 on the i386
18 */ 18 */
19 19
20extern volatile DEFINE_PER_CPU(pgd_t *,current_pgd); /* defined in arch/cris/mm/fault.c */ 20/* defined in arch/cris/mm/fault.c */
21DECLARE_PER_CPU(pgd_t *, current_pgd);
21 22
22static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 23static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
23{ 24{
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 0d2adfc794d4..a3175ebb38cc 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -145,6 +145,7 @@ SECTIONS
145 EXIT_TEXT 145 EXIT_TEXT
146 EXIT_DATA 146 EXIT_DATA
147 *(.exitcall.exit) 147 *(.exitcall.exit)
148 *(.discard)
148 } 149 }
149 150
150 dram_end = dram_start + (CONFIG_ETRAX_DRAM_SIZE - __CONFIG_ETRAX_VMEM_SIZE)*1024*1024; 151 dram_end = dram_start + (CONFIG_ETRAX_DRAM_SIZE - __CONFIG_ETRAX_VMEM_SIZE)*1024*1024;
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index f925115e3250..4a7cdd9ea1ee 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -29,7 +29,7 @@ extern void die_if_kernel(const char *, struct pt_regs *, long);
29 29
30/* current active page directory */ 30/* current active page directory */
31 31
32volatile DEFINE_PER_CPU(pgd_t *,current_pgd); 32DEFINE_PER_CPU(pgd_t *, current_pgd);
33unsigned long cris_signal_return_page; 33unsigned long cris_signal_return_page;
34 34
35/* 35/*
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index 22d9787406ed..64b5a5e4d35e 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -177,6 +177,8 @@ SECTIONS
177 .debug_ranges 0 : { *(.debug_ranges) } 177 .debug_ranges 0 : { *(.debug_ranges) }
178 178
179 .comment 0 : { *(.comment) } 179 .comment 0 : { *(.comment) }
180
181 /DISCARD/ : { *(.discard) }
180} 182}
181 183
182__kernel_image_size_no_bss = __bss_start - __kernel_image_start; 184__kernel_image_size_no_bss = __bss_start - __kernel_image_start;
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
index 43a87b9085b6..03d6c0df33db 100644
--- a/arch/h8300/kernel/vmlinux.lds.S
+++ b/arch/h8300/kernel/vmlinux.lds.S
@@ -154,6 +154,7 @@ SECTIONS
154 } 154 }
155 /DISCARD/ : { 155 /DISCARD/ : {
156 *(.exitcall.exit) 156 *(.exitcall.exit)
157 *(.discard)
157 } 158 }
158 .romfs : 159 .romfs :
159 { 160 {
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 170042b420d4..328d2f8b8c3f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -89,6 +89,9 @@ config GENERIC_TIME_VSYSCALL
89 bool 89 bool
90 default y 90 default y
91 91
92config HAVE_LEGACY_PER_CPU_AREA
93 def_bool y
94
92config HAVE_SETUP_PER_CPU_AREA 95config HAVE_SETUP_PER_CPU_AREA
93 def_bool y 96 def_bool y
94 97
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index f0c521b0ba4c..93ebfea43c6c 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -58,7 +58,8 @@ static struct local_tlb_flush_counts {
58 unsigned int count; 58 unsigned int count;
59} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; 59} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
60 60
61static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; 61static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS],
62 shadow_flush_counts);
62 63
63#define IPI_CALL_FUNC 0 64#define IPI_CALL_FUNC 0
64#define IPI_CPU_STOP 1 65#define IPI_CPU_STOP 1
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 4a95e86b9ac2..13d958975874 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -29,6 +29,7 @@ SECTIONS
29 EXIT_TEXT 29 EXIT_TEXT
30 EXIT_DATA 30 EXIT_DATA
31 *(.exitcall.exit) 31 *(.exitcall.exit)
32 *(.discard)
32 *(.IA_64.unwind.exit.text) 33 *(.IA_64.unwind.exit.text)
33 *(.IA_64.unwind_info.exit.text) 34 *(.IA_64.unwind_info.exit.text)
34 } 35 }
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index e456f062f241..ece1bf994499 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
71DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); 71DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
72EXPORT_PER_CPU_SYMBOL(__sn_hub_info); 72EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
73 73
74DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); 74DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
75EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); 75EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
76 76
77DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); 77DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index 4179adf6c624..480a49944cfd 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -125,6 +125,7 @@ SECTIONS
125 EXIT_TEXT 125 EXIT_TEXT
126 EXIT_DATA 126 EXIT_DATA
127 *(.exitcall.exit) 127 *(.exitcall.exit)
128 *(.discard)
128 } 129 }
129 130
130 /* Stabs debugging sections. */ 131 /* Stabs debugging sections. */
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 01d212bb05a6..905a797ada93 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -87,6 +87,7 @@ SECTIONS
87 EXIT_TEXT 87 EXIT_TEXT
88 EXIT_DATA 88 EXIT_DATA
89 *(.exitcall.exit) 89 *(.exitcall.exit)
90 *(.discard)
90 } 91 }
91 92
92 /* Stabs debugging sections. */ 93 /* Stabs debugging sections. */
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index c192f773db96..47d04be322aa 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -82,6 +82,7 @@ __init_begin = .;
82 EXIT_TEXT 82 EXIT_TEXT
83 EXIT_DATA 83 EXIT_DATA
84 *(.exitcall.exit) 84 *(.exitcall.exit)
85 *(.discard)
85 } 86 }
86 87
87 .crap : { 88 .crap : {
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index b7fe505e358d..68111a61a77f 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -188,6 +188,7 @@ SECTIONS {
188 EXIT_TEXT 188 EXIT_TEXT
189 EXIT_DATA 189 EXIT_DATA
190 *(.exitcall.exit) 190 *(.exitcall.exit)
191 *(.discard)
191 } 192 }
192 193
193 .bss : { 194 .bss : {
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index d34d38dcd12c..a207543c5927 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -162,4 +162,6 @@ SECTIONS {
162 } 162 }
163 . = ALIGN(4096); 163 . = ALIGN(4096);
164 _end = .; 164 _end = .;
165
166 /DISCARD/ : { *(.discard) }
165} 167}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 58738c8d754f..45901609b741 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -179,6 +179,7 @@ SECTIONS
179 /* Sections to be discarded */ 179 /* Sections to be discarded */
180 /DISCARD/ : { 180 /DISCARD/ : {
181 *(.exitcall.exit) 181 *(.exitcall.exit)
182 *(.discard)
182 183
183 /* ABI crap starts here */ 184 /* ABI crap starts here */
184 *(.MIPS.options) 185 *(.MIPS.options)
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index bcebcefb4ad7..5609d4962a55 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -118,6 +118,7 @@ SECTIONS
118 /* Sections to be discarded */ 118 /* Sections to be discarded */
119 /DISCARD/ : { 119 /DISCARD/ : {
120 EXIT_CALL 120 EXIT_CALL
121 *(.discard)
121 } 122 }
122 123
123 STABS_DEBUG 124 STABS_DEBUG
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index fd2cc4fd2b65..ccf58341845a 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -240,6 +240,7 @@ SECTIONS
240 /* Sections to be discarded */ 240 /* Sections to be discarded */
241 /DISCARD/ : { 241 /DISCARD/ : {
242 *(.exitcall.exit) 242 *(.exitcall.exit)
243 *(.discard)
243#ifdef CONFIG_64BIT 244#ifdef CONFIG_64BIT
244 /* temporary hack until binutils is fixed to not emit these 245 /* temporary hack until binutils is fixed to not emit these
245 * for static binaries 246 * for static binaries
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d00131ca0835..61bbffa2fe60 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -46,6 +46,9 @@ config GENERIC_HARDIRQS_NO__DO_IRQ
46 bool 46 bool
47 default y 47 default y
48 48
49config HAVE_LEGACY_PER_CPU_AREA
50 def_bool PPC64
51
49config HAVE_SETUP_PER_CPU_AREA 52config HAVE_SETUP_PER_CPU_AREA
50 def_bool PPC64 53 def_bool PPC64
51 54
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 8ef8a14abc95..7fca9355fd3d 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -40,6 +40,7 @@ SECTIONS
40 /* Sections to be discarded. */ 40 /* Sections to be discarded. */
41 /DISCARD/ : { 41 /DISCARD/ : {
42 *(.exitcall.exit) 42 *(.exitcall.exit)
43 *(.discard)
43 EXIT_DATA 44 EXIT_DATA
44 } 45 }
45 46
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 98cd1dc2ae75..6e9b69c99856 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -31,7 +31,7 @@ struct stab_entry {
31 31
32#define NR_STAB_CACHE_ENTRIES 8 32#define NR_STAB_CACHE_ENTRIES 8
33static DEFINE_PER_CPU(long, stab_cache_ptr); 33static DEFINE_PER_CPU(long, stab_cache_ptr);
34static DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]); 34static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache);
35 35
36/* 36/*
37 * Create a segment table entry for the given esid/vsid pair. 37 * Create a segment table entry for the given esid/vsid pair.
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index f6e04bcc70ef..51ffde40af2b 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -37,7 +37,7 @@
37 */ 37 */
38 38
39#define MSG_COUNT 4 39#define MSG_COUNT 4
40static DEFINE_PER_CPU(unsigned int, ps3_ipi_virqs[MSG_COUNT]); 40static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs);
41 41
42static void do_message_pass(int target, int msg) 42static void do_message_pass(int target, int msg)
43{ 43{
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 408d60b4f75b..f7ad8719d02d 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -1,37 +1,21 @@
1#ifndef __ARCH_S390_PERCPU__ 1#ifndef __ARCH_S390_PERCPU__
2#define __ARCH_S390_PERCPU__ 2#define __ARCH_S390_PERCPU__
3 3
4#include <linux/compiler.h>
5#include <asm/lowcore.h>
6
7/* 4/*
8 * s390 uses its own implementation for per cpu data, the offset of 5 * s390 uses its own implementation for per cpu data, the offset of
9 * the cpu local data area is cached in the cpu's lowcore memory. 6 * the cpu local data area is cached in the cpu's lowcore memory.
10 * For 64 bit module code s390 forces the use of a GOT slot for the
11 * address of the per cpu variable. This is needed because the module
12 * may be more than 4G above the per cpu area.
13 */ 7 */
14#if defined(__s390x__) && defined(MODULE) 8#define __my_cpu_offset S390_lowcore.percpu_offset
15
16#define SHIFT_PERCPU_PTR(ptr,offset) (({ \
17 extern int simple_identifier_##var(void); \
18 unsigned long *__ptr; \
19 asm ( "larl %0, %1@GOTENT" \
20 : "=a" (__ptr) : "X" (ptr) ); \
21 (typeof(ptr))((*__ptr) + (offset)); }))
22
23#else
24
25#define SHIFT_PERCPU_PTR(ptr, offset) (({ \
26 extern int simple_identifier_##var(void); \
27 unsigned long __ptr; \
28 asm ( "" : "=a" (__ptr) : "0" (ptr) ); \
29 (typeof(ptr)) (__ptr + (offset)); }))
30 9
10/*
11 * For 64 bit module code, the module may be more than 4G above the
12 * per cpu area, use weak definitions to force the compiler to
13 * generate external references.
14 */
15#if defined(CONFIG_SMP) && defined(__s390x__) && defined(MODULE)
16#define ARCH_NEEDS_WEAK_PER_CPU
31#endif 17#endif
32 18
33#define __my_cpu_offset S390_lowcore.percpu_offset
34
35#include <asm-generic/percpu.h> 19#include <asm-generic/percpu.h>
36 20
37#endif /* __ARCH_S390_PERCPU__ */ 21#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index a53db23ee092..98867dfea469 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -161,6 +161,7 @@ SECTIONS
161 /DISCARD/ : { 161 /DISCARD/ : {
162 EXIT_DATA 162 EXIT_DATA
163 *(.exitcall.exit) 163 *(.exitcall.exit)
164 *(.discard)
164 } 165 }
165 166
166 /* Debugging sections. */ 167 /* Debugging sections. */
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index f53c76acaede..766976d27b21 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -171,6 +171,7 @@ SECTIONS
171 */ 171 */
172 /DISCARD/ : { 172 /DISCARD/ : {
173 *(.exitcall.exit) 173 *(.exitcall.exit)
174 *(.discard)
174 } 175 }
175 176
176 STABS_DEBUG 177 STABS_DEBUG
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3f8b6a92eabd..4f6ed0f113f0 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -95,9 +95,6 @@ config AUDIT_ARCH
95config HAVE_SETUP_PER_CPU_AREA 95config HAVE_SETUP_PER_CPU_AREA
96 def_bool y if SPARC64 96 def_bool y if SPARC64
97 97
98config HAVE_DYNAMIC_PER_CPU_AREA
99 def_bool y if SPARC64
100
101config GENERIC_HARDIRQS_NO__DO_IRQ 98config GENERIC_HARDIRQS_NO__DO_IRQ
102 bool 99 bool
103 def_bool y if SPARC64 100 def_bool y if SPARC64
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index fcbbd000ec08..d63cf914667d 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -175,6 +175,7 @@ SECTIONS
175 EXIT_TEXT 175 EXIT_TEXT
176 EXIT_DATA 176 EXIT_DATA
177 *(.exitcall.exit) 177 *(.exitcall.exit)
178 *(.discard)
178 } 179 }
179 180
180 STABS_DEBUG 181 STABS_DEBUG
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 9975e1ab44fb..2916d6eadffd 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -156,4 +156,6 @@ SECTIONS
156 STABS_DEBUG 156 STABS_DEBUG
157 157
158 DWARF_DEBUG 158 DWARF_DEBUG
159
160 /DISCARD/ : { *(.discard) }
159} 161}
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 11b835248b86..1f8a622cabe1 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -100,4 +100,6 @@ SECTIONS
100 STABS_DEBUG 100 STABS_DEBUG
101 101
102 DWARF_DEBUG 102 DWARF_DEBUG
103
104 /DISCARD/ : { *(.discard) }
103} 105}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c07f72205909..646fcabb0ad7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -149,9 +149,6 @@ config ARCH_HAS_CACHE_LINE_SIZE
149config HAVE_SETUP_PER_CPU_AREA 149config HAVE_SETUP_PER_CPU_AREA
150 def_bool y 150 def_bool y
151 151
152config HAVE_DYNAMIC_PER_CPU_AREA
153 def_bool y
154
155config HAVE_CPUMASK_OF_CPU_MAP 152config HAVE_CPUMASK_OF_CPU_MAP
156 def_bool X86_64_SMP 153 def_bool X86_64_SMP
157 154
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index 6b2a52dd0403..dca325c03999 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,8 +30,8 @@
30#include <asm/apic.h> 30#include <asm/apic.h>
31#include <asm/desc.h> 31#include <asm/desc.h>
32 32
33static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]); 33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
34static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]); 34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
35static DEFINE_PER_CPU(int, cpu_priv_count); 35static DEFINE_PER_CPU(int, cpu_priv_count);
36 36
37static DEFINE_MUTEX(cpu_debug_lock); 37static DEFINE_MUTEX(cpu_debug_lock);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index af425b83202b..20d498351105 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1091,7 +1091,7 @@ void mce_log_therm_throt_event(__u64 status)
1091 */ 1091 */
1092static int check_interval = 5 * 60; /* 5 minutes */ 1092static int check_interval = 5 * 60; /* 5 minutes */
1093 1093
1094static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ 1094static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
1095static DEFINE_PER_CPU(struct timer_list, mce_timer); 1095static DEFINE_PER_CPU(struct timer_list, mce_timer);
1096 1096
1097static void mcheck_timer(unsigned long data) 1097static void mcheck_timer(unsigned long data)
@@ -1110,7 +1110,7 @@ static void mcheck_timer(unsigned long data)
1110 * Alert userspace if needed. If we logged an MCE, reduce the 1110 * Alert userspace if needed. If we logged an MCE, reduce the
1111 * polling interval, otherwise increase the polling interval. 1111 * polling interval, otherwise increase the polling interval.
1112 */ 1112 */
1113 n = &__get_cpu_var(next_interval); 1113 n = &__get_cpu_var(mce_next_interval);
1114 if (mce_notify_irq()) 1114 if (mce_notify_irq())
1115 *n = max(*n/2, HZ/100); 1115 *n = max(*n/2, HZ/100);
1116 else 1116 else
@@ -1311,7 +1311,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
1311static void mce_init_timer(void) 1311static void mce_init_timer(void)
1312{ 1312{
1313 struct timer_list *t = &__get_cpu_var(mce_timer); 1313 struct timer_list *t = &__get_cpu_var(mce_timer);
1314 int *n = &__get_cpu_var(next_interval); 1314 int *n = &__get_cpu_var(mce_next_interval);
1315 1315
1316 if (mce_ignore_ce) 1316 if (mce_ignore_ce)
1317 return; 1317 return;
@@ -1914,7 +1914,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1914 case CPU_DOWN_FAILED: 1914 case CPU_DOWN_FAILED:
1915 case CPU_DOWN_FAILED_FROZEN: 1915 case CPU_DOWN_FAILED_FROZEN:
1916 t->expires = round_jiffies(jiffies + 1916 t->expires = round_jiffies(jiffies +
1917 __get_cpu_var(next_interval)); 1917 __get_cpu_var(mce_next_interval));
1918 add_timer_on(t, cpu); 1918 add_timer_on(t, cpu);
1919 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); 1919 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1920 break; 1920 break;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index ddae21620bda..bd2a2fa84628 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -69,7 +69,7 @@ struct threshold_bank {
69 struct threshold_block *blocks; 69 struct threshold_block *blocks;
70 cpumask_var_t cpus; 70 cpumask_var_t cpus;
71}; 71};
72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); 72static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
73 73
74#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
75static unsigned char shared_bank[NR_BANKS] = { 75static unsigned char shared_bank[NR_BANKS] = {
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index d4cf4ce19aac..13bd6d6cf0bd 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
862 x86_pmu_disable_counter(hwc, idx); 862 x86_pmu_disable_counter(hwc, idx);
863} 863}
864 864
865static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); 865static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
866 866
867/* 867/*
868 * Set the next IRQ period, based on the hwc->period_left value. 868 * Set the next IRQ period, based on the hwc->period_left value.
@@ -901,7 +901,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
901 if (left > x86_pmu.max_period) 901 if (left > x86_pmu.max_period)
902 left = x86_pmu.max_period; 902 left = x86_pmu.max_period;
903 903
904 per_cpu(prev_left[idx], smp_processor_id()) = left; 904 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
905 905
906 /* 906 /*
907 * The hw counter starts counting from this counter offset, 907 * The hw counter starts counting from this counter offset,
@@ -1086,7 +1086,7 @@ void perf_counter_print_debug(void)
1086 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1086 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1087 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1087 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1088 1088
1089 prev_left = per_cpu(prev_left[idx], cpu); 1089 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1090 1090
1091 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", 1091 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1092 cpu, idx, pmc_ctrl); 1092 cpu, idx, pmc_ctrl);
@@ -1559,8 +1559,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1559 entry->ip[entry->nr++] = ip; 1559 entry->ip[entry->nr++] = ip;
1560} 1560}
1561 1561
1562static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); 1562static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1563static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); 1563static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
1564 1564
1565 1565
1566static void 1566static void
@@ -1707,9 +1707,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1707 struct perf_callchain_entry *entry; 1707 struct perf_callchain_entry *entry;
1708 1708
1709 if (in_nmi()) 1709 if (in_nmi())
1710 entry = &__get_cpu_var(nmi_entry); 1710 entry = &__get_cpu_var(pmc_nmi_entry);
1711 else 1711 else
1712 entry = &__get_cpu_var(irq_entry); 1712 entry = &__get_cpu_var(pmc_irq_entry);
1713 1713
1714 entry->nr = 0; 1714 entry->nr = 0;
1715 1715
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 41c159cd872f..b1e24638acd7 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -287,6 +287,7 @@ SECTIONS
287 EXIT_TEXT 287 EXIT_TEXT
288 EXIT_DATA 288 EXIT_DATA
289 *(.exitcall.exit) 289 *(.exitcall.exit)
290 *(.discard)
290 } 291 }
291 292
292 .xt.lit : { *(.xt.lit) } 293 .xt.lit : { *(.xt.lit) }
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 7a12cf6ee1d3..ce8ba57c6557 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -146,7 +146,7 @@ enum arq_state {
146#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) 146#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
147#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) 147#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
148 148
149static DEFINE_PER_CPU(unsigned long, ioc_count); 149static DEFINE_PER_CPU(unsigned long, as_ioc_count);
150static struct completion *ioc_gone; 150static struct completion *ioc_gone;
151static DEFINE_SPINLOCK(ioc_gone_lock); 151static DEFINE_SPINLOCK(ioc_gone_lock);
152 152
@@ -161,7 +161,7 @@ static void as_antic_stop(struct as_data *ad);
161static void free_as_io_context(struct as_io_context *aic) 161static void free_as_io_context(struct as_io_context *aic)
162{ 162{
163 kfree(aic); 163 kfree(aic);
164 elv_ioc_count_dec(ioc_count); 164 elv_ioc_count_dec(as_ioc_count);
165 if (ioc_gone) { 165 if (ioc_gone) {
166 /* 166 /*
167 * AS scheduler is exiting, grab exit lock and check 167 * AS scheduler is exiting, grab exit lock and check
@@ -169,7 +169,7 @@ static void free_as_io_context(struct as_io_context *aic)
169 * complete ioc_gone and set it back to NULL. 169 * complete ioc_gone and set it back to NULL.
170 */ 170 */
171 spin_lock(&ioc_gone_lock); 171 spin_lock(&ioc_gone_lock);
172 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 172 if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) {
173 complete(ioc_gone); 173 complete(ioc_gone);
174 ioc_gone = NULL; 174 ioc_gone = NULL;
175 } 175 }
@@ -211,7 +211,7 @@ static struct as_io_context *alloc_as_io_context(void)
211 ret->seek_total = 0; 211 ret->seek_total = 0;
212 ret->seek_samples = 0; 212 ret->seek_samples = 0;
213 ret->seek_mean = 0; 213 ret->seek_mean = 0;
214 elv_ioc_count_inc(ioc_count); 214 elv_ioc_count_inc(as_ioc_count);
215 } 215 }
216 216
217 return ret; 217 return ret;
@@ -1507,7 +1507,7 @@ static void __exit as_exit(void)
1507 ioc_gone = &all_gone; 1507 ioc_gone = &all_gone;
1508 /* ioc_gone's update must be visible before reading ioc_count */ 1508 /* ioc_gone's update must be visible before reading ioc_count */
1509 smp_wmb(); 1509 smp_wmb();
1510 if (elv_ioc_count_read(ioc_count)) 1510 if (elv_ioc_count_read(as_ioc_count))
1511 wait_for_completion(&all_gone); 1511 wait_for_completion(&all_gone);
1512 synchronize_rcu(); 1512 synchronize_rcu();
1513} 1513}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 87276eb83f7f..85208dd1d05b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125;
48static struct kmem_cache *cfq_pool; 48static struct kmem_cache *cfq_pool;
49static struct kmem_cache *cfq_ioc_pool; 49static struct kmem_cache *cfq_ioc_pool;
50 50
51static DEFINE_PER_CPU(unsigned long, ioc_count); 51static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
52static struct completion *ioc_gone; 52static struct completion *ioc_gone;
53static DEFINE_SPINLOCK(ioc_gone_lock); 53static DEFINE_SPINLOCK(ioc_gone_lock);
54 54
@@ -1427,7 +1427,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
1427 cic = container_of(head, struct cfq_io_context, rcu_head); 1427 cic = container_of(head, struct cfq_io_context, rcu_head);
1428 1428
1429 kmem_cache_free(cfq_ioc_pool, cic); 1429 kmem_cache_free(cfq_ioc_pool, cic);
1430 elv_ioc_count_dec(ioc_count); 1430 elv_ioc_count_dec(cfq_ioc_count);
1431 1431
1432 if (ioc_gone) { 1432 if (ioc_gone) {
1433 /* 1433 /*
@@ -1436,7 +1436,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
1436 * complete ioc_gone and set it back to NULL 1436 * complete ioc_gone and set it back to NULL
1437 */ 1437 */
1438 spin_lock(&ioc_gone_lock); 1438 spin_lock(&ioc_gone_lock);
1439 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 1439 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
1440 complete(ioc_gone); 1440 complete(ioc_gone);
1441 ioc_gone = NULL; 1441 ioc_gone = NULL;
1442 } 1442 }
@@ -1562,7 +1562,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1562 INIT_HLIST_NODE(&cic->cic_list); 1562 INIT_HLIST_NODE(&cic->cic_list);
1563 cic->dtor = cfq_free_io_context; 1563 cic->dtor = cfq_free_io_context;
1564 cic->exit = cfq_exit_io_context; 1564 cic->exit = cfq_exit_io_context;
1565 elv_ioc_count_inc(ioc_count); 1565 elv_ioc_count_inc(cfq_ioc_count);
1566 } 1566 }
1567 1567
1568 return cic; 1568 return cic;
@@ -2668,7 +2668,7 @@ static void __exit cfq_exit(void)
2668 * this also protects us from entering cfq_slab_kill() with 2668 * this also protects us from entering cfq_slab_kill() with
2669 * pending RCU callbacks 2669 * pending RCU callbacks
2670 */ 2670 */
2671 if (elv_ioc_count_read(ioc_count)) 2671 if (elv_ioc_count_read(cfq_ioc_count))
2672 wait_for_completion(&all_gone); 2672 wait_for_completion(&all_gone);
2673 cfq_slab_kill(); 2673 cfq_slab_kill();
2674} 2674}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 7fc58af748b4..a7ef465c83b9 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -65,7 +65,7 @@ struct cpu_dbs_info_s {
65 int cpu; 65 int cpu;
66 unsigned int enable:1; 66 unsigned int enable:1;
67}; 67};
68static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 68static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
69 69
70static unsigned int dbs_enable; /* number of CPUs using this policy */ 70static unsigned int dbs_enable; /* number of CPUs using this policy */
71 71
@@ -138,7 +138,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
138 void *data) 138 void *data)
139{ 139{
140 struct cpufreq_freqs *freq = data; 140 struct cpufreq_freqs *freq = data;
141 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, 141 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
142 freq->cpu); 142 freq->cpu);
143 143
144 struct cpufreq_policy *policy; 144 struct cpufreq_policy *policy;
@@ -298,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
298 /* we need to re-evaluate prev_cpu_idle */ 298 /* we need to re-evaluate prev_cpu_idle */
299 for_each_online_cpu(j) { 299 for_each_online_cpu(j) {
300 struct cpu_dbs_info_s *dbs_info; 300 struct cpu_dbs_info_s *dbs_info;
301 dbs_info = &per_cpu(cpu_dbs_info, j); 301 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
302 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 302 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
303 &dbs_info->prev_cpu_wall); 303 &dbs_info->prev_cpu_wall);
304 if (dbs_tuners_ins.ignore_nice) 304 if (dbs_tuners_ins.ignore_nice)
@@ -388,7 +388,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
388 cputime64_t cur_wall_time, cur_idle_time; 388 cputime64_t cur_wall_time, cur_idle_time;
389 unsigned int idle_time, wall_time; 389 unsigned int idle_time, wall_time;
390 390
391 j_dbs_info = &per_cpu(cpu_dbs_info, j); 391 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
392 392
393 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 393 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
394 394
@@ -528,7 +528,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
528 unsigned int j; 528 unsigned int j;
529 int rc; 529 int rc;
530 530
531 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 531 this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
532 532
533 switch (event) { 533 switch (event) {
534 case CPUFREQ_GOV_START: 534 case CPUFREQ_GOV_START:
@@ -548,7 +548,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
548 548
549 for_each_cpu(j, policy->cpus) { 549 for_each_cpu(j, policy->cpus) {
550 struct cpu_dbs_info_s *j_dbs_info; 550 struct cpu_dbs_info_s *j_dbs_info;
551 j_dbs_info = &per_cpu(cpu_dbs_info, j); 551 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
552 j_dbs_info->cur_policy = policy; 552 j_dbs_info->cur_policy = policy;
553 553
554 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 554 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 1911d1729353..36f292a7bd01 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -73,7 +73,7 @@ struct cpu_dbs_info_s {
73 unsigned int enable:1, 73 unsigned int enable:1,
74 sample_type:1; 74 sample_type:1;
75}; 75};
76static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 76static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
77 77
78static unsigned int dbs_enable; /* number of CPUs using this policy */ 78static unsigned int dbs_enable; /* number of CPUs using this policy */
79 79
@@ -151,7 +151,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
151 unsigned int freq_hi, freq_lo; 151 unsigned int freq_hi, freq_lo;
152 unsigned int index = 0; 152 unsigned int index = 0;
153 unsigned int jiffies_total, jiffies_hi, jiffies_lo; 153 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
154 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); 154 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
155 policy->cpu);
155 156
156 if (!dbs_info->freq_table) { 157 if (!dbs_info->freq_table) {
157 dbs_info->freq_lo = 0; 158 dbs_info->freq_lo = 0;
@@ -196,7 +197,7 @@ static void ondemand_powersave_bias_init(void)
196{ 197{
197 int i; 198 int i;
198 for_each_online_cpu(i) { 199 for_each_online_cpu(i) {
199 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); 200 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, i);
200 dbs_info->freq_table = cpufreq_frequency_get_table(i); 201 dbs_info->freq_table = cpufreq_frequency_get_table(i);
201 dbs_info->freq_lo = 0; 202 dbs_info->freq_lo = 0;
202 } 203 }
@@ -297,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
297 /* we need to re-evaluate prev_cpu_idle */ 298 /* we need to re-evaluate prev_cpu_idle */
298 for_each_online_cpu(j) { 299 for_each_online_cpu(j) {
299 struct cpu_dbs_info_s *dbs_info; 300 struct cpu_dbs_info_s *dbs_info;
300 dbs_info = &per_cpu(cpu_dbs_info, j); 301 dbs_info = &per_cpu(od_cpu_dbs_info, j);
301 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 302 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
302 &dbs_info->prev_cpu_wall); 303 &dbs_info->prev_cpu_wall);
303 if (dbs_tuners_ins.ignore_nice) 304 if (dbs_tuners_ins.ignore_nice)
@@ -391,7 +392,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
391 unsigned int load, load_freq; 392 unsigned int load, load_freq;
392 int freq_avg; 393 int freq_avg;
393 394
394 j_dbs_info = &per_cpu(cpu_dbs_info, j); 395 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
395 396
396 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 397 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
397 398
@@ -548,7 +549,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
548 unsigned int j; 549 unsigned int j;
549 int rc; 550 int rc;
550 551
551 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 552 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
552 553
553 switch (event) { 554 switch (event) {
554 case CPUFREQ_GOV_START: 555 case CPUFREQ_GOV_START:
@@ -570,7 +571,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
570 571
571 for_each_cpu(j, policy->cpus) { 572 for_each_cpu(j, policy->cpus) {
572 struct cpu_dbs_info_s *j_dbs_info; 573 struct cpu_dbs_info_s *j_dbs_info;
573 j_dbs_info = &per_cpu(cpu_dbs_info, j); 574 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
574 j_dbs_info->cur_policy = policy; 575 j_dbs_info->cur_policy = policy;
575 576
576 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 577 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 891d2e90753a..7d2987e9b1bb 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -47,10 +47,10 @@
47static DEFINE_SPINLOCK(irq_mapping_update_lock); 47static DEFINE_SPINLOCK(irq_mapping_update_lock);
48 48
49/* IRQ <-> VIRQ mapping. */ 49/* IRQ <-> VIRQ mapping. */
50static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; 50static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
51 51
52/* IRQ <-> IPI mapping */ 52/* IRQ <-> IPI mapping */
53static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; 53static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
54 54
55/* Interrupt types. */ 55/* Interrupt types. */
56enum xen_irq_type { 56enum xen_irq_type {
@@ -602,6 +602,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
602 return IRQ_HANDLED; 602 return IRQ_HANDLED;
603} 603}
604 604
605static DEFINE_PER_CPU(unsigned, xed_nesting_count);
606
605/* 607/*
606 * Search the CPUs pending events bitmasks. For each one found, map 608 * Search the CPUs pending events bitmasks. For each one found, map
607 * the event number to an irq, and feed it into do_IRQ() for 609 * the event number to an irq, and feed it into do_IRQ() for
@@ -617,7 +619,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
617 struct pt_regs *old_regs = set_irq_regs(regs); 619 struct pt_regs *old_regs = set_irq_regs(regs);
618 struct shared_info *s = HYPERVISOR_shared_info; 620 struct shared_info *s = HYPERVISOR_shared_info;
619 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); 621 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
620 static DEFINE_PER_CPU(unsigned, nesting_count);
621 unsigned count; 622 unsigned count;
622 623
623 exit_idle(); 624 exit_idle();
@@ -628,7 +629,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
628 629
629 vcpu_info->evtchn_upcall_pending = 0; 630 vcpu_info->evtchn_upcall_pending = 0;
630 631
631 if (__get_cpu_var(nesting_count)++) 632 if (__get_cpu_var(xed_nesting_count)++)
632 goto out; 633 goto out;
633 634
634#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ 635#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
@@ -653,8 +654,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
653 654
654 BUG_ON(!irqs_disabled()); 655 BUG_ON(!irqs_disabled());
655 656
656 count = __get_cpu_var(nesting_count); 657 count = __get_cpu_var(xed_nesting_count);
657 __get_cpu_var(nesting_count) = 0; 658 __get_cpu_var(xed_nesting_count) = 0;
658 } while(count != 1); 659 } while(count != 1);
659 660
660out: 661out:
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index dccdbed05848..c5c18ac878ab 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -629,6 +629,14 @@
629#define INIT_RAM_FS 629#define INIT_RAM_FS
630#endif 630#endif
631 631
632#define DISCARDS \
633 /DISCARD/ : { \
634 EXIT_TEXT \
635 EXIT_DATA \
636 *(.exitcall.exit) \
637 *(.discard) \
638 }
639
632/** 640/**
633 * PERCPU_VADDR - define output section for percpu area 641 * PERCPU_VADDR - define output section for percpu area
634 * @vaddr: explicit base address (optional) 642 * @vaddr: explicit base address (optional)
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 68438e18fff4..aefc2f12b48c 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -10,22 +10,70 @@
10/* 10/*
11 * Base implementations of per-CPU variable declarations and definitions, where 11 * Base implementations of per-CPU variable declarations and definitions, where
12 * the section in which the variable is to be placed is provided by the 12 * the section in which the variable is to be placed is provided by the
13 * 'section' argument. This may be used to affect the parameters governing the 13 * 'sec' argument. This may be used to affect the parameters governing the
14 * variable's storage. 14 * variable's storage.
15 * 15 *
16 * NOTE! The sections for the DECLARE and for the DEFINE must match, lest 16 * NOTE! The sections for the DECLARE and for the DEFINE must match, lest
17 * linkage errors occur due the compiler generating the wrong code to access 17 * linkage errors occur due the compiler generating the wrong code to access
18 * that section. 18 * that section.
19 */ 19 */
20#define DECLARE_PER_CPU_SECTION(type, name, section) \ 20#define __PCPU_ATTRS(sec) \
21 extern \ 21 __attribute__((section(PER_CPU_BASE_SECTION sec))) \
22 __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ 22 PER_CPU_ATTRIBUTES
23 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name 23
24 24#define __PCPU_DUMMY_ATTRS \
25#define DEFINE_PER_CPU_SECTION(type, name, section) \ 25 __attribute__((section(".discard"), unused))
26 __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ 26
27 PER_CPU_ATTRIBUTES PER_CPU_DEF_ATTRIBUTES \ 27/*
28 * s390 and alpha modules require percpu variables to be defined as
29 * weak to force the compiler to generate GOT based external
30 * references for them. This is necessary because percpu sections
31 * will be located outside of the usually addressable area.
32 *
33 * This definition puts the following two extra restrictions when
34 * defining percpu variables.
35 *
36 * 1. The symbol must be globally unique, even the static ones.
37 * 2. Static percpu variables cannot be defined inside a function.
38 *
39 * Archs which need weak percpu definitions should define
40 * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
41 *
42 * To ensure that the generic code observes the above two
43 * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
44 * definition is used for all cases.
45 */
46#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
47/*
48 * __pcpu_scope_* dummy variable is used to enforce scope. It
49 * receives the static modifier when it's used in front of
50 * DEFINE_PER_CPU() and will trigger build failure if
51 * DECLARE_PER_CPU() is used for the same variable.
52 *
53 * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
54 * such that hidden weak symbol collision, which will cause unrelated
55 * variables to share the same address, can be detected during build.
56 */
57#define DECLARE_PER_CPU_SECTION(type, name, sec) \
58 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
59 extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
60
61#define DEFINE_PER_CPU_SECTION(type, name, sec) \
62 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
63 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
64 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
65 __typeof__(type) per_cpu__##name
66#else
67/*
68 * Normal declaration and definition macros.
69 */
70#define DECLARE_PER_CPU_SECTION(type, name, sec) \
71 extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
72
73#define DEFINE_PER_CPU_SECTION(type, name, sec) \
74 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
28 __typeof__(type) per_cpu__##name 75 __typeof__(type) per_cpu__##name
76#endif
29 77
30/* 78/*
31 * Variant on the per-CPU variable declaration/definition theme used for 79 * Variant on the per-CPU variable declaration/definition theme used for
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 26fd9d12f050..e5000343dd61 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,7 +34,7 @@
34 34
35#ifdef CONFIG_SMP 35#ifdef CONFIG_SMP
36 36
37#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 37#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
38 38
39/* minimum unit size, also is the maximum supported allocation size */ 39/* minimum unit size, also is the maximum supported allocation size */
40#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) 40#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
@@ -80,7 +80,7 @@ extern ssize_t __init pcpu_embed_first_chunk(
80 80
81extern void *__alloc_reserved_percpu(size_t size, size_t align); 81extern void *__alloc_reserved_percpu(size_t size, size_t align);
82 82
83#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 83#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
84 84
85struct percpu_data { 85struct percpu_data {
86 void *ptrs[1]; 86 void *ptrs[1];
@@ -99,11 +99,15 @@ struct percpu_data {
99 (__typeof__(ptr))__p->ptrs[(cpu)]; \ 99 (__typeof__(ptr))__p->ptrs[(cpu)]; \
100}) 100})
101 101
102#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 102#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
103 103
104extern void *__alloc_percpu(size_t size, size_t align); 104extern void *__alloc_percpu(size_t size, size_t align);
105extern void free_percpu(void *__pdata); 105extern void free_percpu(void *__pdata);
106 106
107#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
108extern void __init setup_per_cpu_areas(void);
109#endif
110
107#else /* CONFIG_SMP */ 111#else /* CONFIG_SMP */
108 112
109#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 113#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
@@ -124,6 +128,8 @@ static inline void free_percpu(void *p)
124 kfree(p); 128 kfree(p);
125} 129}
126 130
131static inline void __init setup_per_cpu_areas(void) { }
132
127#endif /* CONFIG_SMP */ 133#endif /* CONFIG_SMP */
128 134
129#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ 135#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
diff --git a/init/main.c b/init/main.c
index 2c5ade79eb81..71832858c5a2 100644
--- a/init/main.c
+++ b/init/main.c
@@ -353,7 +353,6 @@ static void __init smp_init(void)
353#define smp_init() do { } while (0) 353#define smp_init() do { } while (0)
354#endif 354#endif
355 355
356static inline void setup_per_cpu_areas(void) { }
357static inline void setup_nr_cpu_ids(void) { } 356static inline void setup_nr_cpu_ids(void) { }
358static inline void smp_prepare_cpus(unsigned int maxcpus) { } 357static inline void smp_prepare_cpus(unsigned int maxcpus) { }
359 358
@@ -374,29 +373,6 @@ static void __init setup_nr_cpu_ids(void)
374 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; 373 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
375} 374}
376 375
377#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
378unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
379
380EXPORT_SYMBOL(__per_cpu_offset);
381
382static void __init setup_per_cpu_areas(void)
383{
384 unsigned long size, i;
385 char *ptr;
386 unsigned long nr_possible_cpus = num_possible_cpus();
387
388 /* Copy section for each CPU (we discard the original) */
389 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
390 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
391
392 for_each_possible_cpu(i) {
393 __per_cpu_offset[i] = ptr - __per_cpu_start;
394 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
395 ptr += size;
396 }
397}
398#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
399
400/* Called by boot processor to activate the rest. */ 376/* Called by boot processor to activate the rest. */
401static void __init smp_init(void) 377static void __init smp_init(void)
402{ 378{
diff --git a/kernel/module.c b/kernel/module.c
index 38928fcaff2b..f5934954fa99 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -364,7 +364,7 @@ EXPORT_SYMBOL_GPL(find_module);
364 364
365#ifdef CONFIG_SMP 365#ifdef CONFIG_SMP
366 366
367#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 367#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
368 368
369static void *percpu_modalloc(unsigned long size, unsigned long align, 369static void *percpu_modalloc(unsigned long size, unsigned long align,
370 const char *name) 370 const char *name)
@@ -389,7 +389,7 @@ static void percpu_modfree(void *freeme)
389 free_percpu(freeme); 389 free_percpu(freeme);
390} 390}
391 391
392#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 392#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
393 393
394/* Number of blocks used and allocated. */ 394/* Number of blocks used and allocated. */
395static unsigned int pcpu_num_used, pcpu_num_allocated; 395static unsigned int pcpu_num_used, pcpu_num_allocated;
@@ -535,7 +535,7 @@ static int percpu_modinit(void)
535} 535}
536__initcall(percpu_modinit); 536__initcall(percpu_modinit);
537 537
538#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 538#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
539 539
540static unsigned int find_pcpusec(Elf_Ehdr *hdr, 540static unsigned int find_pcpusec(Elf_Ehdr *hdr,
541 Elf_Shdr *sechdrs, 541 Elf_Shdr *sechdrs,
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index d55a50da2347..fc3b97410bbf 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -98,16 +98,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader,
98 98
99void __weak perf_counter_print_debug(void) { } 99void __weak perf_counter_print_debug(void) { }
100 100
101static DEFINE_PER_CPU(int, disable_count); 101static DEFINE_PER_CPU(int, perf_disable_count);
102 102
103void __perf_disable(void) 103void __perf_disable(void)
104{ 104{
105 __get_cpu_var(disable_count)++; 105 __get_cpu_var(perf_disable_count)++;
106} 106}
107 107
108bool __perf_enable(void) 108bool __perf_enable(void)
109{ 109{
110 return !--__get_cpu_var(disable_count); 110 return !--__get_cpu_var(perf_disable_count);
111} 111}
112 112
113void perf_disable(void) 113void perf_disable(void)
diff --git a/kernel/sched.c b/kernel/sched.c
index 7c9098d186e6..34fd81d21784 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -318,12 +318,12 @@ struct task_group root_task_group;
318/* Default task group's sched entity on each cpu */ 318/* Default task group's sched entity on each cpu */
319static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); 319static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
320/* Default task group's cfs_rq on each cpu */ 320/* Default task group's cfs_rq on each cpu */
321static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; 321static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_cfs_rq);
322#endif /* CONFIG_FAIR_GROUP_SCHED */ 322#endif /* CONFIG_FAIR_GROUP_SCHED */
323 323
324#ifdef CONFIG_RT_GROUP_SCHED 324#ifdef CONFIG_RT_GROUP_SCHED
325static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); 325static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
326static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; 326static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
327#endif /* CONFIG_RT_GROUP_SCHED */ 327#endif /* CONFIG_RT_GROUP_SCHED */
328#else /* !CONFIG_USER_SCHED */ 328#else /* !CONFIG_USER_SCHED */
329#define root_task_group init_task_group 329#define root_task_group init_task_group
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 53c8fd376a88..8c193c2eb68a 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1334,7 +1334,7 @@ static __init void event_trace_self_tests(void)
1334 1334
1335#ifdef CONFIG_FUNCTION_TRACER 1335#ifdef CONFIG_FUNCTION_TRACER
1336 1336
1337static DEFINE_PER_CPU(atomic_t, test_event_disable); 1337static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1338 1338
1339static void 1339static void
1340function_test_events_call(unsigned long ip, unsigned long parent_ip) 1340function_test_events_call(unsigned long ip, unsigned long parent_ip)
@@ -1350,7 +1350,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1350 pc = preempt_count(); 1350 pc = preempt_count();
1351 resched = ftrace_preempt_disable(); 1351 resched = ftrace_preempt_disable();
1352 cpu = raw_smp_processor_id(); 1352 cpu = raw_smp_processor_id();
1353 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); 1353 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1354 1354
1355 if (disabled != 1) 1355 if (disabled != 1)
1356 goto out; 1356 goto out;
@@ -1368,7 +1368,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1368 trace_nowake_buffer_unlock_commit(event, flags, pc); 1368 trace_nowake_buffer_unlock_commit(event, flags, pc);
1369 1369
1370 out: 1370 out:
1371 atomic_dec(&per_cpu(test_event_disable, cpu)); 1371 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1372 ftrace_preempt_enable(resched); 1372 ftrace_preempt_enable(resched);
1373} 1373}
1374 1374
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 12327b2bb785..43173c4e0ade 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -790,6 +790,21 @@ config DEBUG_BLOCK_EXT_DEVT
790 790
791 Say N if you are unsure. 791 Say N if you are unsure.
792 792
793config DEBUG_FORCE_WEAK_PER_CPU
794 bool "Force weak per-cpu definitions"
795 depends on DEBUG_KERNEL
796 help
797 s390 and alpha require percpu variables in modules to be
798 defined weak to work around addressing range issue which
799 puts the following two restrictions on percpu variable
800 definitions.
801
802 1. percpu symbols must be unique whether static or not
803 2. percpu variables can't be defined inside a function
804
805 To ensure that generic code follows the above rules, this
806 option forces all percpu variables to be defined as weak.
807
793config LKDTM 808config LKDTM
794 tristate "Linux Kernel Dump Test Tool Module" 809 tristate "Linux Kernel Dump Test Tool Module"
795 depends on DEBUG_KERNEL 810 depends on DEBUG_KERNEL
diff --git a/mm/Makefile b/mm/Makefile
index 5e0bd6426693..c77c6487552f 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -33,7 +33,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
33obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 33obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
34obj-$(CONFIG_FS_XIP) += filemap_xip.o 34obj-$(CONFIG_FS_XIP) += filemap_xip.o
35obj-$(CONFIG_MIGRATION) += migrate.o 35obj-$(CONFIG_MIGRATION) += migrate.o
36ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 36ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
37obj-$(CONFIG_SMP) += percpu.o 37obj-$(CONFIG_SMP) += percpu.o
38else 38else
39obj-$(CONFIG_SMP) += allocpercpu.o 39obj-$(CONFIG_SMP) += allocpercpu.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index dfdee6a47359..df34ceae0c67 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -5,6 +5,8 @@
5 */ 5 */
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/bootmem.h>
9#include <asm/sections.h>
8 10
9#ifndef cache_line_size 11#ifndef cache_line_size
10#define cache_line_size() L1_CACHE_BYTES 12#define cache_line_size() L1_CACHE_BYTES
@@ -147,3 +149,29 @@ void free_percpu(void *__pdata)
147 kfree(__percpu_disguise(__pdata)); 149 kfree(__percpu_disguise(__pdata));
148} 150}
149EXPORT_SYMBOL_GPL(free_percpu); 151EXPORT_SYMBOL_GPL(free_percpu);
152
153/*
154 * Generic percpu area setup.
155 */
156#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
157unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
158
159EXPORT_SYMBOL(__per_cpu_offset);
160
161void __init setup_per_cpu_areas(void)
162{
163 unsigned long size, i;
164 char *ptr;
165 unsigned long nr_possible_cpus = num_possible_cpus();
166
167 /* Copy section for each CPU (we discard the original) */
168 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
169 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
170
171 for_each_possible_cpu(i) {
172 __per_cpu_offset[i] = ptr - __per_cpu_start;
173 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
174 ptr += size;
175 }
176}
177#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c
index d5292fc6f523..177a5169bbde 100644
--- a/mm/kmemleak-test.c
+++ b/mm/kmemleak-test.c
@@ -36,7 +36,7 @@ struct test_node {
36}; 36};
37 37
38static LIST_HEAD(test_list); 38static LIST_HEAD(test_list);
39static DEFINE_PER_CPU(void *, test_pointer); 39static DEFINE_PER_CPU(void *, kmemleak_test_pointer);
40 40
41/* 41/*
42 * Some very simple testing. This function needs to be extended for 42 * Some very simple testing. This function needs to be extended for
@@ -86,9 +86,9 @@ static int __init kmemleak_test_init(void)
86 } 86 }
87 87
88 for_each_possible_cpu(i) { 88 for_each_possible_cpu(i) {
89 per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL); 89 per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
90 pr_info("kmemleak: kmalloc(129) = %p\n", 90 pr_info("kmemleak: kmalloc(129) = %p\n",
91 per_cpu(test_pointer, i)); 91 per_cpu(kmemleak_test_pointer, i));
92 } 92 }
93 93
94 return 0; 94 return 0;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7687879253b9..3c7f5e1afe5f 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -610,6 +610,8 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
610 } 610 }
611} 611}
612 612
613static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
614
613/** 615/**
614 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 616 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
615 * @mapping: address_space which was dirtied 617 * @mapping: address_space which was dirtied
@@ -627,7 +629,6 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
627void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, 629void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
628 unsigned long nr_pages_dirtied) 630 unsigned long nr_pages_dirtied)
629{ 631{
630 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
631 unsigned long ratelimit; 632 unsigned long ratelimit;
632 unsigned long *p; 633 unsigned long *p;
633 634
@@ -640,7 +641,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
640 * tasks in balance_dirty_pages(). Period. 641 * tasks in balance_dirty_pages(). Period.
641 */ 642 */
642 preempt_disable(); 643 preempt_disable();
643 p = &__get_cpu_var(ratelimits); 644 p = &__get_cpu_var(bdp_ratelimits);
644 *p += nr_pages_dirtied; 645 *p += nr_pages_dirtied;
645 if (unlikely(*p >= ratelimit)) { 646 if (unlikely(*p >= ratelimit)) {
646 *p = 0; 647 *p = 0;
diff --git a/mm/percpu.c b/mm/percpu.c
index b70f2acd8853..b14984566f5a 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -43,7 +43,7 @@
43 * 43 *
44 * To use this allocator, arch code should do the followings. 44 * To use this allocator, arch code should do the followings.
45 * 45 *
46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 46 * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
47 * 47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back if they need to be 49 * regular address to percpu pointer and back if they need to be
@@ -1275,3 +1275,41 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1275 reserved_size, dyn_size, 1275 reserved_size, dyn_size,
1276 pcpue_unit_size, pcpue_ptr, NULL); 1276 pcpue_unit_size, pcpue_ptr, NULL);
1277} 1277}
1278
1279/*
1280 * Generic percpu area setup.
1281 *
1282 * The embedding helper is used because its behavior closely resembles
1283 * the original non-dynamic generic percpu area setup. This is
1284 * important because many archs have addressing restrictions and might
1285 * fail if the percpu area is located far away from the previous
1286 * location. As an added bonus, in non-NUMA cases, embedding is
1287 * generally a good idea TLB-wise because percpu area can piggy back
1288 * on the physical linear memory mapping which uses large page
1289 * mappings on applicable archs.
1290 */
1291#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1292unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1293EXPORT_SYMBOL(__per_cpu_offset);
1294
1295void __init setup_per_cpu_areas(void)
1296{
1297 size_t static_size = __per_cpu_end - __per_cpu_start;
1298 ssize_t unit_size;
1299 unsigned long delta;
1300 unsigned int cpu;
1301
1302 /*
1303 * Always reserve area for module percpu variables. That's
1304 * what the legacy allocator did.
1305 */
1306 unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE,
1307 PERCPU_DYNAMIC_RESERVE, -1);
1308 if (unit_size < 0)
1309 panic("Failed to initialized percpu areas.");
1310
1311 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1312 for_each_possible_cpu(cpu)
1313 __per_cpu_offset[cpu] = delta + cpu * unit_size;
1314}
1315#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
diff --git a/mm/quicklist.c b/mm/quicklist.c
index e66d07d1b4ff..6eedf7e473d1 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -19,7 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/quicklist.h> 20#include <linux/quicklist.h>
21 21
22DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; 22DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist);
23 23
24#define FRACTION_OF_NODE_MEM 16 24#define FRACTION_OF_NODE_MEM 16
25 25
diff --git a/mm/slub.c b/mm/slub.c
index 819f056b39c6..ffc895cc3a68 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2092,8 +2092,8 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
2092 */ 2092 */
2093#define NR_KMEM_CACHE_CPU 100 2093#define NR_KMEM_CACHE_CPU 100
2094 2094
2095static DEFINE_PER_CPU(struct kmem_cache_cpu, 2095static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU],
2096 kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; 2096 kmem_cache_cpu);
2097 2097
2098static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); 2098static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
2099static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); 2099static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index cd2b97f1b6e1..a6e0e077ac33 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -37,12 +37,13 @@ __initcall(init_syncookies);
37#define COOKIEBITS 24 /* Upper bits store count */ 37#define COOKIEBITS 24 /* Upper bits store count */
38#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) 38#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
39 39
40static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS]; 40static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
41 ipv4_cookie_scratch);
41 42
42static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, 43static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
43 u32 count, int c) 44 u32 count, int c)
44{ 45{
45 __u32 *tmp = __get_cpu_var(cookie_scratch); 46 __u32 *tmp = __get_cpu_var(ipv4_cookie_scratch);
46 47
47 memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); 48 memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
48 tmp[0] = (__force u32)saddr; 49 tmp[0] = (__force u32)saddr;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 8c2513982b61..6b6ae913b5d4 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -74,12 +74,13 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
74 return child; 74 return child;
75} 75}
76 76
77static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS]; 77static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
78 ipv6_cookie_scratch);
78 79
79static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, 80static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr,
80 __be16 sport, __be16 dport, u32 count, int c) 81 __be16 sport, __be16 dport, u32 count, int c)
81{ 82{
82 __u32 *tmp = __get_cpu_var(cookie_scratch); 83 __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
83 84
84 /* 85 /*
85 * we have 320 bits of information to hash, copy in the remaining 86 * we have 320 bits of information to hash, copy in the remaining
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index 02e3e3d50d4a..301ae51ae409 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -37,7 +37,7 @@
37#include "rds.h" 37#include "rds.h"
38#include "ib.h" 38#include "ib.h"
39 39
40DEFINE_PER_CPU(struct rds_ib_statistics, rds_ib_stats) ____cacheline_aligned; 40DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
41 41
42static char *rds_ib_stat_names[] = { 42static char *rds_ib_stat_names[] = {
43 "ib_connect_raced", 43 "ib_connect_raced",
diff --git a/net/rds/iw_stats.c b/net/rds/iw_stats.c
index ccc7e8f0bf0e..fafea3cc92d7 100644
--- a/net/rds/iw_stats.c
+++ b/net/rds/iw_stats.c
@@ -37,7 +37,7 @@
37#include "rds.h" 37#include "rds.h"
38#include "iw.h" 38#include "iw.h"
39 39
40DEFINE_PER_CPU(struct rds_iw_statistics, rds_iw_stats) ____cacheline_aligned; 40DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_iw_statistics, rds_iw_stats);
41 41
42static char *rds_iw_stat_names[] = { 42static char *rds_iw_stat_names[] = {
43 "iw_connect_raced", 43 "iw_connect_raced",
diff --git a/net/rds/page.c b/net/rds/page.c
index c460743a89ad..de7bb84bcd78 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -39,7 +39,7 @@ struct rds_page_remainder {
39 unsigned long r_offset; 39 unsigned long r_offset;
40}; 40};
41 41
42DEFINE_PER_CPU(struct rds_page_remainder, rds_page_remainders) ____cacheline_aligned; 42DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders);
43 43
44/* 44/*
45 * returns 0 on success or -errno on failure. 45 * returns 0 on success or -errno on failure.
diff --git a/scripts/module-common.lds b/scripts/module-common.lds
new file mode 100644
index 000000000000..47a1f9ae0ede
--- /dev/null
+++ b/scripts/module-common.lds
@@ -0,0 +1,8 @@
1/*
2 * Common module linker script, always used when linking a module.
3 * Archs are free to supply their own linker scripts. ld will
4 * combine them automatically.
5 */
6SECTIONS {
7 /DISCARD/ : { *(.discard) }
8}