aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 12:39:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 12:39:44 -0400
commitada3fa15057205b7d3f727bba5cd26b5912e350f (patch)
tree60962fc9e4021b92f484d1a58e72cd3906d4f3db /arch
parent2f82af08fcc7dc01a7e98a49a5995a77e32a2925 (diff)
parent5579fd7e6aed8860ea0c8e3f11897493153b10ad (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits) powerpc64: convert to dynamic percpu allocator sparc64: use embedding percpu first chunk allocator percpu: kill lpage first chunk allocator x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA percpu: update embedding first chunk allocator to handle sparse units percpu: use group information to allocate vmap areas sparsely vmalloc: implement pcpu_get_vm_areas() vmalloc: separate out insert_vmalloc_vm() percpu: add chunk->base_addr percpu: add pcpu_unit_offsets[] percpu: introduce pcpu_alloc_info and pcpu_group_info percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward percpu: add @align to pcpu_fc_alloc_fn_t percpu: make @dyn_size mandatory for pcpu_setup_first_chunk() percpu: drop @static_size from first chunk allocators percpu: generalize first chunk allocator selection percpu: build first chunk allocators selectively percpu: rename 4k first chunk allocator to page percpu: improve boot messages percpu: fix pcpu_reclaim() locking ... Fix trivial conflict as by Tejun Heo in kernel/sched.c
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/percpu.h100
-rw-r--r--arch/alpha/include/asm/tlbflush.h1
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S9
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/avr32/kernel/vmlinux.lds.S9
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S5
-rw-r--r--arch/blackfin/mm/sram-alloc.c6
-rw-r--r--arch/cris/include/asm/mmu_context.h3
-rw-r--r--arch/cris/kernel/vmlinux.lds.S9
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/kernel/vmlinux.lds.S2
-rw-r--r--arch/h8300/kernel/vmlinux.lds.S5
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/kernel/setup.c6
-rw-r--r--arch/ia64/kernel/smp.c3
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S16
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S10
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds10
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds9
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S7
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S6
-rw-r--r--arch/mips/kernel/vmlinux.lds.S21
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S8
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S8
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/kernel/setup_64.c61
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S9
-rw-r--r--arch/powerpc/mm/stab.c2
-rw-r--r--arch/powerpc/platforms/ps3/smp.c2
-rw-r--r--arch/s390/include/asm/percpu.h32
-rw-r--r--arch/s390/kernel/vmlinux.lds.S9
-rw-r--r--arch/sh/kernel/vmlinux.lds.S10
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/kernel/smp_64.c132
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S8
-rw-r--r--arch/um/include/asm/common.lds.S5
-rw-r--r--arch/um/kernel/dyn.lds.S2
-rw-r--r--arch/um/kernel/uml.lds.S2
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/include/asm/percpu.h9
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c8
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c14
-rw-r--r--arch/x86/kernel/setup_percpu.c364
-rw-r--r--arch/x86/kernel/vmlinux.lds.S11
-rw-r--r--arch/x86/mm/pageattr.c21
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S13
49 files changed, 246 insertions, 745 deletions
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
index b663f1f10b6a..2c12378e3aa9 100644
--- a/arch/alpha/include/asm/percpu.h
+++ b/arch/alpha/include/asm/percpu.h
@@ -1,102 +1,18 @@
1#ifndef __ALPHA_PERCPU_H 1#ifndef __ALPHA_PERCPU_H
2#define __ALPHA_PERCPU_H 2#define __ALPHA_PERCPU_H
3 3
4#include <linux/compiler.h>
5#include <linux/threads.h>
6#include <linux/percpu-defs.h>
7
8/*
9 * Determine the real variable name from the name visible in the
10 * kernel sources.
11 */
12#define per_cpu_var(var) per_cpu__##var
13
14#ifdef CONFIG_SMP
15
16/*
17 * per_cpu_offset() is the offset that has to be added to a
18 * percpu variable to get to the instance for a certain processor.
19 */
20extern unsigned long __per_cpu_offset[NR_CPUS];
21
22#define per_cpu_offset(x) (__per_cpu_offset[x])
23
24#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
25#ifdef CONFIG_DEBUG_PREEMPT
26#define my_cpu_offset per_cpu_offset(smp_processor_id())
27#else
28#define my_cpu_offset __my_cpu_offset
29#endif
30
31#ifndef MODULE
32#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
33#define PER_CPU_DEF_ATTRIBUTES
34#else
35/* 4/*
36 * To calculate addresses of locally defined variables, GCC uses 32-bit 5 * To calculate addresses of locally defined variables, GCC uses
37 * displacement from the GP. Which doesn't work for per cpu variables in 6 * 32-bit displacement from the GP. Which doesn't work for per cpu
38 * modules, as an offset to the kernel per cpu area is way above 4G. 7 * variables in modules, as an offset to the kernel per cpu area is
8 * way above 4G.
39 * 9 *
40 * This forces allocation of a GOT entry for per cpu variable using 10 * Always use weak definitions for percpu variables in modules.
41 * ldq instruction with a 'literal' relocation.
42 */
43#define SHIFT_PERCPU_PTR(var, offset) ({ \
44 extern int simple_identifier_##var(void); \
45 unsigned long __ptr, tmp_gp; \
46 asm ( "br %1, 1f \n\
47 1: ldgp %1, 0(%1) \n\
48 ldq %0, per_cpu__" #var"(%1)\t!literal" \
49 : "=&r"(__ptr), "=&r"(tmp_gp)); \
50 (typeof(&per_cpu_var(var)))(__ptr + (offset)); })
51
52#define PER_CPU_DEF_ATTRIBUTES __used
53
54#endif /* MODULE */
55
56/*
57 * A percpu variable may point to a discarded regions. The following are
58 * established ways to produce a usable pointer from the percpu variable
59 * offset.
60 */ 11 */
61#define per_cpu(var, cpu) \ 12#if defined(MODULE) && defined(CONFIG_SMP)
62 (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu))) 13#define ARCH_NEEDS_WEAK_PER_CPU
63#define __get_cpu_var(var) \
64 (*SHIFT_PERCPU_PTR(var, my_cpu_offset))
65#define __raw_get_cpu_var(var) \
66 (*SHIFT_PERCPU_PTR(var, __my_cpu_offset))
67
68#else /* ! SMP */
69
70#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
71#define __get_cpu_var(var) per_cpu_var(var)
72#define __raw_get_cpu_var(var) per_cpu_var(var)
73
74#define PER_CPU_DEF_ATTRIBUTES
75
76#endif /* SMP */
77
78#ifdef CONFIG_SMP
79#define PER_CPU_BASE_SECTION ".data.percpu"
80#else
81#define PER_CPU_BASE_SECTION ".data"
82#endif
83
84#ifdef CONFIG_SMP
85
86#ifdef MODULE
87#define PER_CPU_SHARED_ALIGNED_SECTION ""
88#else
89#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
90#endif
91#define PER_CPU_FIRST_SECTION ".first"
92
93#else
94
95#define PER_CPU_SHARED_ALIGNED_SECTION ""
96#define PER_CPU_FIRST_SECTION ""
97
98#endif 14#endif
99 15
100#define PER_CPU_ATTRIBUTES 16#include <asm-generic/percpu.h>
101 17
102#endif /* __ALPHA_PERCPU_H */ 18#endif /* __ALPHA_PERCPU_H */
diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h
index 9d87aaa08c0d..e89e0c2e15b1 100644
--- a/arch/alpha/include/asm/tlbflush.h
+++ b/arch/alpha/include/asm/tlbflush.h
@@ -2,6 +2,7 @@
2#define _ALPHA_TLBFLUSH_H 2#define _ALPHA_TLBFLUSH_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/sched.h>
5#include <asm/compiler.h> 6#include <asm/compiler.h>
6#include <asm/pgalloc.h> 7#include <asm/pgalloc.h>
7 8
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index b9d6568e5f7f..6dc03c35caa0 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -134,13 +134,6 @@ SECTIONS
134 __bss_stop = .; 134 __bss_stop = .;
135 _end = .; 135 _end = .;
136 136
137 /* Sections to be discarded */
138 /DISCARD/ : {
139 EXIT_TEXT
140 EXIT_DATA
141 *(.exitcall.exit)
142 }
143
144 .mdebug 0 : { 137 .mdebug 0 : {
145 *(.mdebug) 138 *(.mdebug)
146 } 139 }
@@ -150,4 +143,6 @@ SECTIONS
150 143
151 STABS_DEBUG 144 STABS_DEBUG
152 DWARF_DEBUG 145 DWARF_DEBUG
146
147 DISCARDS
153} 148}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 69371028a202..5cc4812c9763 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -83,6 +83,7 @@ SECTIONS
83 EXIT_TEXT 83 EXIT_TEXT
84 EXIT_DATA 84 EXIT_DATA
85 *(.exitcall.exit) 85 *(.exitcall.exit)
86 *(.discard)
86 *(.ARM.exidx.exit.text) 87 *(.ARM.exidx.exit.text)
87 *(.ARM.extab.exit.text) 88 *(.ARM.extab.exit.text)
88#ifndef CONFIG_HOTPLUG_CPU 89#ifndef CONFIG_HOTPLUG_CPU
diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S
index 7910d41eb886..c4b56654349a 100644
--- a/arch/avr32/kernel/vmlinux.lds.S
+++ b/arch/avr32/kernel/vmlinux.lds.S
@@ -124,14 +124,11 @@ SECTIONS
124 _end = .; 124 _end = .;
125 } 125 }
126 126
127 DWARF_DEBUG
128
127 /* When something in the kernel is NOT compiled as a module, the module 129 /* When something in the kernel is NOT compiled as a module, the module
128 * cleanup code and data are put into these segments. Both can then be 130 * cleanup code and data are put into these segments. Both can then be
129 * thrown away, as cleanup code is never called unless it's a module. 131 * thrown away, as cleanup code is never called unless it's a module.
130 */ 132 */
131 /DISCARD/ : { 133 DISCARDS
132 EXIT_DATA
133 *(.exitcall.exit)
134 }
135
136 DWARF_DEBUG
137} 134}
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 6ac307ca0d80..d7ffe299b979 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -277,8 +277,5 @@ SECTIONS
277 277
278 DWARF_DEBUG 278 DWARF_DEBUG
279 279
280 /DISCARD/ : 280 DISCARDS
281 {
282 *(.exitcall.exit)
283 }
284} 281}
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index 0bc3c4ef0aad..99e4dbb1dfd1 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -42,9 +42,9 @@
42#include <asm/mem_map.h> 42#include <asm/mem_map.h>
43#include "blackfin_sram.h" 43#include "blackfin_sram.h"
44 44
45static DEFINE_PER_CPU(spinlock_t, l1sram_lock) ____cacheline_aligned_in_smp; 45static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
46static DEFINE_PER_CPU(spinlock_t, l1_data_sram_lock) ____cacheline_aligned_in_smp; 46static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
47static DEFINE_PER_CPU(spinlock_t, l1_inst_sram_lock) ____cacheline_aligned_in_smp; 47static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
48static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp; 48static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
49 49
50/* the data structure for L1 scratchpad and DATA SRAM */ 50/* the data structure for L1 scratchpad and DATA SRAM */
diff --git a/arch/cris/include/asm/mmu_context.h b/arch/cris/include/asm/mmu_context.h
index 72ba08dcfd18..1d45fd6365b7 100644
--- a/arch/cris/include/asm/mmu_context.h
+++ b/arch/cris/include/asm/mmu_context.h
@@ -17,7 +17,8 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17 * registers like cr3 on the i386 17 * registers like cr3 on the i386
18 */ 18 */
19 19
20extern volatile DEFINE_PER_CPU(pgd_t *,current_pgd); /* defined in arch/cris/mm/fault.c */ 20/* defined in arch/cris/mm/fault.c */
21DECLARE_PER_CPU(pgd_t *, current_pgd);
21 22
22static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 23static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
23{ 24{
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 0d2adfc794d4..6c81836b9229 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -140,12 +140,7 @@ SECTIONS
140 _end = .; 140 _end = .;
141 __end = .; 141 __end = .;
142 142
143 /* Sections to be discarded */
144 /DISCARD/ : {
145 EXIT_TEXT
146 EXIT_DATA
147 *(.exitcall.exit)
148 }
149
150 dram_end = dram_start + (CONFIG_ETRAX_DRAM_SIZE - __CONFIG_ETRAX_VMEM_SIZE)*1024*1024; 143 dram_end = dram_start + (CONFIG_ETRAX_DRAM_SIZE - __CONFIG_ETRAX_VMEM_SIZE)*1024*1024;
144
145 DISCARDS
151} 146}
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index f925115e3250..4a7cdd9ea1ee 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -29,7 +29,7 @@ extern void die_if_kernel(const char *, struct pt_regs *, long);
29 29
30/* current active page directory */ 30/* current active page directory */
31 31
32volatile DEFINE_PER_CPU(pgd_t *,current_pgd); 32DEFINE_PER_CPU(pgd_t *, current_pgd);
33unsigned long cris_signal_return_page; 33unsigned long cris_signal_return_page;
34 34
35/* 35/*
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index 22d9787406ed..7dbf41f68b52 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -177,6 +177,8 @@ SECTIONS
177 .debug_ranges 0 : { *(.debug_ranges) } 177 .debug_ranges 0 : { *(.debug_ranges) }
178 178
179 .comment 0 : { *(.comment) } 179 .comment 0 : { *(.comment) }
180
181 DISCARDS
180} 182}
181 183
182__kernel_image_size_no_bss = __bss_start - __kernel_image_start; 184__kernel_image_size_no_bss = __bss_start - __kernel_image_start;
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
index 43a87b9085b6..662b02ecb86e 100644
--- a/arch/h8300/kernel/vmlinux.lds.S
+++ b/arch/h8300/kernel/vmlinux.lds.S
@@ -152,9 +152,6 @@ SECTIONS
152 __end = . ; 152 __end = . ;
153 __ramstart = .; 153 __ramstart = .;
154 } 154 }
155 /DISCARD/ : {
156 *(.exitcall.exit)
157 }
158 .romfs : 155 .romfs :
159 { 156 {
160 *(.romfs*) 157 *(.romfs*)
@@ -165,4 +162,6 @@ SECTIONS
165 COMMAND_START = . - 0x200 ; 162 COMMAND_START = . - 0x200 ;
166 __ramend = . ; 163 __ramend = . ;
167 } 164 }
165
166 DISCARDS
168} 167}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index e6246119932a..011a1cdf0eb5 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -89,6 +89,9 @@ config GENERIC_TIME_VSYSCALL
89 bool 89 bool
90 default y 90 default y
91 91
92config HAVE_LEGACY_PER_CPU_AREA
93 def_bool y
94
92config HAVE_SETUP_PER_CPU_AREA 95config HAVE_SETUP_PER_CPU_AREA
93 def_bool y 96 def_bool y
94 97
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 1b23ec126b63..1de86c96801d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -855,11 +855,17 @@ identify_cpu (struct cpuinfo_ia64 *c)
855 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 855 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
856} 856}
857 857
858/*
859 * In UP configuration, setup_per_cpu_areas() is defined in
860 * include/linux/percpu.h
861 */
862#ifdef CONFIG_SMP
858void __init 863void __init
859setup_per_cpu_areas (void) 864setup_per_cpu_areas (void)
860{ 865{
861 /* start_kernel() requires this... */ 866 /* start_kernel() requires this... */
862} 867}
868#endif
863 869
864/* 870/*
865 * Do the following calculations: 871 * Do the following calculations:
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index f0c521b0ba4c..93ebfea43c6c 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -58,7 +58,8 @@ static struct local_tlb_flush_counts {
58 unsigned int count; 58 unsigned int count;
59} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; 59} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
60 60
61static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; 61static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS],
62 shadow_flush_counts);
62 63
63#define IPI_CALL_FUNC 0 64#define IPI_CALL_FUNC 0
64#define IPI_CPU_STOP 1 65#define IPI_CPU_STOP 1
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 4a95e86b9ac2..eb4214d1c5af 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -24,14 +24,14 @@ PHDRS {
24} 24}
25SECTIONS 25SECTIONS
26{ 26{
27 /* Sections to be discarded */ 27 /* unwind exit sections must be discarded before the rest of the
28 sections get included. */
28 /DISCARD/ : { 29 /DISCARD/ : {
29 EXIT_TEXT
30 EXIT_DATA
31 *(.exitcall.exit)
32 *(.IA_64.unwind.exit.text) 30 *(.IA_64.unwind.exit.text)
33 *(.IA_64.unwind_info.exit.text) 31 *(.IA_64.unwind_info.exit.text)
34 } 32 *(.comment)
33 *(.note)
34 }
35 35
36 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */ 36 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
37 phys_start = _start - LOAD_OFFSET; 37 phys_start = _start - LOAD_OFFSET;
@@ -316,7 +316,7 @@ SECTIONS
316 .debug_funcnames 0 : { *(.debug_funcnames) } 316 .debug_funcnames 0 : { *(.debug_funcnames) }
317 .debug_typenames 0 : { *(.debug_typenames) } 317 .debug_typenames 0 : { *(.debug_typenames) }
318 .debug_varnames 0 : { *(.debug_varnames) } 318 .debug_varnames 0 : { *(.debug_varnames) }
319 /* These must appear regardless of . */ 319
320 /DISCARD/ : { *(.comment) } 320 /* Default discards */
321 /DISCARD/ : { *(.note) } 321 DISCARDS
322} 322}
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index e456f062f241..ece1bf994499 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
71DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); 71DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
72EXPORT_PER_CPU_SYMBOL(__sn_hub_info); 72EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
73 73
74DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); 74DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
75EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); 75EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
76 76
77DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); 77DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index 4179adf6c624..de5e21cca6a5 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -120,13 +120,6 @@ SECTIONS
120 120
121 _end = . ; 121 _end = . ;
122 122
123 /* Sections to be discarded */
124 /DISCARD/ : {
125 EXIT_TEXT
126 EXIT_DATA
127 *(.exitcall.exit)
128 }
129
130 /* Stabs debugging sections. */ 123 /* Stabs debugging sections. */
131 .stab 0 : { *(.stab) } 124 .stab 0 : { *(.stab) }
132 .stabstr 0 : { *(.stabstr) } 125 .stabstr 0 : { *(.stabstr) }
@@ -135,4 +128,7 @@ SECTIONS
135 .stab.index 0 : { *(.stab.index) } 128 .stab.index 0 : { *(.stab.index) }
136 .stab.indexstr 0 : { *(.stab.indexstr) } 129 .stab.indexstr 0 : { *(.stab.indexstr) }
137 .comment 0 : { *(.comment) } 130 .comment 0 : { *(.comment) }
131
132 /* Sections to be discarded */
133 DISCARDS
138} 134}
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 01d212bb05a6..47eac19e8f61 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -82,13 +82,6 @@ SECTIONS
82 82
83 _end = . ; 83 _end = . ;
84 84
85 /* Sections to be discarded */
86 /DISCARD/ : {
87 EXIT_TEXT
88 EXIT_DATA
89 *(.exitcall.exit)
90 }
91
92 /* Stabs debugging sections. */ 85 /* Stabs debugging sections. */
93 .stab 0 : { *(.stab) } 86 .stab 0 : { *(.stab) }
94 .stabstr 0 : { *(.stabstr) } 87 .stabstr 0 : { *(.stabstr) }
@@ -97,4 +90,7 @@ SECTIONS
97 .stab.index 0 : { *(.stab.index) } 90 .stab.index 0 : { *(.stab.index) }
98 .stab.indexstr 0 : { *(.stab.indexstr) } 91 .stab.indexstr 0 : { *(.stab.indexstr) }
99 .comment 0 : { *(.comment) } 92 .comment 0 : { *(.comment) }
93
94 /* Sections to be discarded */
95 DISCARDS
100} 96}
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index c192f773db96..03efaf04d7d7 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -77,13 +77,6 @@ __init_begin = .;
77 77
78 _end = . ; 78 _end = . ;
79 79
80 /* Sections to be discarded */
81 /DISCARD/ : {
82 EXIT_TEXT
83 EXIT_DATA
84 *(.exitcall.exit)
85 }
86
87 .crap : { 80 .crap : {
88 /* Stabs debugging sections. */ 81 /* Stabs debugging sections. */
89 *(.stab) 82 *(.stab)
@@ -96,4 +89,6 @@ __init_begin = .;
96 *(.note) 89 *(.note)
97 } 90 }
98 91
92 /* Sections to be discarded */
93 DISCARDS
99} 94}
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index b7fe505e358d..2736a5e309c0 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -184,12 +184,6 @@ SECTIONS {
184 __init_end = .; 184 __init_end = .;
185 } > INIT 185 } > INIT
186 186
187 /DISCARD/ : {
188 EXIT_TEXT
189 EXIT_DATA
190 *(.exitcall.exit)
191 }
192
193 .bss : { 187 .bss : {
194 . = ALIGN(4); 188 . = ALIGN(4);
195 _sbss = . ; 189 _sbss = . ;
@@ -200,5 +194,6 @@ SECTIONS {
200 _end = . ; 194 _end = . ;
201 } > BSS 195 } > BSS
202 196
197 DISCARDS
203} 198}
204 199
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index d34d38dcd12c..ec5fa91a48d8 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -23,8 +23,8 @@ SECTIONS {
23 _stext = . ; 23 _stext = . ;
24 *(.text .text.*) 24 *(.text .text.*)
25 *(.fixup) 25 *(.fixup)
26 26 EXIT_TEXT
27 *(.exitcall.exit) 27 EXIT_CALL
28 SCHED_TEXT 28 SCHED_TEXT
29 LOCK_TEXT 29 LOCK_TEXT
30 KPROBES_TEXT 30 KPROBES_TEXT
@@ -162,4 +162,6 @@ SECTIONS {
162 } 162 }
163 . = ALIGN(4096); 163 . = ALIGN(4096);
164 _end = .; 164 _end = .;
165
166 DISCARDS
165} 167}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 58738c8d754f..1474c18fb777 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -176,17 +176,6 @@ SECTIONS
176 176
177 _end = . ; 177 _end = . ;
178 178
179 /* Sections to be discarded */
180 /DISCARD/ : {
181 *(.exitcall.exit)
182
183 /* ABI crap starts here */
184 *(.MIPS.options)
185 *(.options)
186 *(.pdr)
187 *(.reginfo)
188 }
189
190 /* These mark the ABI of the kernel for debuggers. */ 179 /* These mark the ABI of the kernel for debuggers. */
191 .mdebug.abi32 : { 180 .mdebug.abi32 : {
192 KEEP(*(.mdebug.abi32)) 181 KEEP(*(.mdebug.abi32))
@@ -212,4 +201,14 @@ SECTIONS
212 *(.gptab.bss) 201 *(.gptab.bss)
213 *(.gptab.sbss) 202 *(.gptab.sbss)
214 } 203 }
204
205 /* Sections to be discarded */
206 DISCARDS
207 /DISCARD/ : {
208 /* ABI crap starts here */
209 *(.MIPS.options)
210 *(.options)
211 *(.pdr)
212 *(.reginfo)
213 }
215} 214}
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index f4aa07934654..76f41bdb79c4 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -115,12 +115,10 @@ SECTIONS
115 . = ALIGN(PAGE_SIZE); 115 . = ALIGN(PAGE_SIZE);
116 pg0 = .; 116 pg0 = .;
117 117
118 /* Sections to be discarded */
119 /DISCARD/ : {
120 EXIT_CALL
121 }
122
123 STABS_DEBUG 118 STABS_DEBUG
124 119
125 DWARF_DEBUG 120 DWARF_DEBUG
121
122 /* Sections to be discarded */
123 DISCARDS
126} 124}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index fd2cc4fd2b65..aea1784edbd1 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -237,9 +237,12 @@ SECTIONS
237 /* freed after init ends here */ 237 /* freed after init ends here */
238 _end = . ; 238 _end = . ;
239 239
240 STABS_DEBUG
241 .note 0 : { *(.note) }
242
240 /* Sections to be discarded */ 243 /* Sections to be discarded */
244 DISCARDS
241 /DISCARD/ : { 245 /DISCARD/ : {
242 *(.exitcall.exit)
243#ifdef CONFIG_64BIT 246#ifdef CONFIG_64BIT
244 /* temporary hack until binutils is fixed to not emit these 247 /* temporary hack until binutils is fixed to not emit these
245 * for static binaries 248 * for static binaries
@@ -252,7 +255,4 @@ SECTIONS
252 *(.gnu.hash) 255 *(.gnu.hash)
253#endif 256#endif
254 } 257 }
255
256 STABS_DEBUG
257 .note 0 : { *(.note) }
258} 258}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d00131ca0835..2c42e1526d03 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -49,6 +49,9 @@ config GENERIC_HARDIRQS_NO__DO_IRQ
49config HAVE_SETUP_PER_CPU_AREA 49config HAVE_SETUP_PER_CPU_AREA
50 def_bool PPC64 50 def_bool PPC64
51 51
52config NEED_PER_CPU_EMBED_FIRST_CHUNK
53 def_bool PPC64
54
52config IRQ_PER_CPU 55config IRQ_PER_CPU
53 bool 56 bool
54 default y 57 default y
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 1f6816003ebe..aa6e4500635f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -57,6 +57,7 @@
57#include <asm/cache.h> 57#include <asm/cache.h>
58#include <asm/page.h> 58#include <asm/page.h>
59#include <asm/mmu.h> 59#include <asm/mmu.h>
60#include <asm/mmu-hash64.h>
60#include <asm/firmware.h> 61#include <asm/firmware.h>
61#include <asm/xmon.h> 62#include <asm/xmon.h>
62#include <asm/udbg.h> 63#include <asm/udbg.h>
@@ -569,25 +570,53 @@ void cpu_die(void)
569} 570}
570 571
571#ifdef CONFIG_SMP 572#ifdef CONFIG_SMP
572void __init setup_per_cpu_areas(void) 573#define PCPU_DYN_SIZE ()
574
575static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
573{ 576{
574 int i; 577 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
575 unsigned long size; 578 __pa(MAX_DMA_ADDRESS));
576 char *ptr; 579}
577
578 /* Copy section for each CPU (we discard the original) */
579 size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
580#ifdef CONFIG_MODULES
581 if (size < PERCPU_ENOUGH_ROOM)
582 size = PERCPU_ENOUGH_ROOM;
583#endif
584 580
585 for_each_possible_cpu(i) { 581static void __init pcpu_fc_free(void *ptr, size_t size)
586 ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); 582{
583 free_bootmem(__pa(ptr), size);
584}
587 585
588 paca[i].data_offset = ptr - __per_cpu_start; 586static int pcpu_cpu_distance(unsigned int from, unsigned int to)
589 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 587{
590 } 588 if (cpu_to_node(from) == cpu_to_node(to))
589 return LOCAL_DISTANCE;
590 else
591 return REMOTE_DISTANCE;
592}
593
594void __init setup_per_cpu_areas(void)
595{
596 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
597 size_t atom_size;
598 unsigned long delta;
599 unsigned int cpu;
600 int rc;
601
602 /*
603 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
604 * to group units. For larger mappings, use 1M atom which
605 * should be large enough to contain a number of units.
606 */
607 if (mmu_linear_psize == MMU_PAGE_4K)
608 atom_size = PAGE_SIZE;
609 else
610 atom_size = 1 << 20;
611
612 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
613 pcpu_fc_alloc, pcpu_fc_free);
614 if (rc < 0)
615 panic("cannot initialize percpu area (err=%d)", rc);
616
617 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
618 for_each_possible_cpu(cpu)
619 paca[cpu].data_offset = delta + pcpu_unit_offsets[cpu];
591} 620}
592#endif 621#endif
593 622
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 8ef8a14abc95..244e3658983c 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -37,12 +37,6 @@ jiffies = jiffies_64 + 4;
37#endif 37#endif
38SECTIONS 38SECTIONS
39{ 39{
40 /* Sections to be discarded. */
41 /DISCARD/ : {
42 *(.exitcall.exit)
43 EXIT_DATA
44 }
45
46 . = KERNELBASE; 40 . = KERNELBASE;
47 41
48/* 42/*
@@ -298,4 +292,7 @@ SECTIONS
298 . = ALIGN(PAGE_SIZE); 292 . = ALIGN(PAGE_SIZE);
299 _end = . ; 293 _end = . ;
300 PROVIDE32 (end = .); 294 PROVIDE32 (end = .);
295
296 /* Sections to be discarded. */
297 DISCARDS
301} 298}
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index ab5fb48b3e90..687fddaa24c5 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -31,7 +31,7 @@ struct stab_entry {
31 31
32#define NR_STAB_CACHE_ENTRIES 8 32#define NR_STAB_CACHE_ENTRIES 8
33static DEFINE_PER_CPU(long, stab_cache_ptr); 33static DEFINE_PER_CPU(long, stab_cache_ptr);
34static DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]); 34static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache);
35 35
36/* 36/*
37 * Create a segment table entry for the given esid/vsid pair. 37 * Create a segment table entry for the given esid/vsid pair.
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index f6e04bcc70ef..51ffde40af2b 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -37,7 +37,7 @@
37 */ 37 */
38 38
39#define MSG_COUNT 4 39#define MSG_COUNT 4
40static DEFINE_PER_CPU(unsigned int, ps3_ipi_virqs[MSG_COUNT]); 40static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs);
41 41
42static void do_message_pass(int target, int msg) 42static void do_message_pass(int target, int msg)
43{ 43{
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 408d60b4f75b..f7ad8719d02d 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -1,37 +1,21 @@
1#ifndef __ARCH_S390_PERCPU__ 1#ifndef __ARCH_S390_PERCPU__
2#define __ARCH_S390_PERCPU__ 2#define __ARCH_S390_PERCPU__
3 3
4#include <linux/compiler.h>
5#include <asm/lowcore.h>
6
7/* 4/*
8 * s390 uses its own implementation for per cpu data, the offset of 5 * s390 uses its own implementation for per cpu data, the offset of
9 * the cpu local data area is cached in the cpu's lowcore memory. 6 * the cpu local data area is cached in the cpu's lowcore memory.
10 * For 64 bit module code s390 forces the use of a GOT slot for the
11 * address of the per cpu variable. This is needed because the module
12 * may be more than 4G above the per cpu area.
13 */ 7 */
14#if defined(__s390x__) && defined(MODULE) 8#define __my_cpu_offset S390_lowcore.percpu_offset
15
16#define SHIFT_PERCPU_PTR(ptr,offset) (({ \
17 extern int simple_identifier_##var(void); \
18 unsigned long *__ptr; \
19 asm ( "larl %0, %1@GOTENT" \
20 : "=a" (__ptr) : "X" (ptr) ); \
21 (typeof(ptr))((*__ptr) + (offset)); }))
22
23#else
24
25#define SHIFT_PERCPU_PTR(ptr, offset) (({ \
26 extern int simple_identifier_##var(void); \
27 unsigned long __ptr; \
28 asm ( "" : "=a" (__ptr) : "0" (ptr) ); \
29 (typeof(ptr)) (__ptr + (offset)); }))
30 9
10/*
11 * For 64 bit module code, the module may be more than 4G above the
12 * per cpu area, use weak definitions to force the compiler to
13 * generate external references.
14 */
15#if defined(CONFIG_SMP) && defined(__s390x__) && defined(MODULE)
16#define ARCH_NEEDS_WEAK_PER_CPU
31#endif 17#endif
32 18
33#define __my_cpu_offset S390_lowcore.percpu_offset
34
35#include <asm-generic/percpu.h> 19#include <asm-generic/percpu.h>
36 20
37#endif /* __ARCH_S390_PERCPU__ */ 21#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 7315f9e67e1d..bc15ef93e656 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -84,13 +84,10 @@ SECTIONS
84 84
85 _end = . ; 85 _end = . ;
86 86
87 /* Sections to be discarded */
88 /DISCARD/ : {
89 EXIT_DATA
90 *(.exitcall.exit)
91 }
92
93 /* Debugging sections. */ 87 /* Debugging sections. */
94 STABS_DEBUG 88 STABS_DEBUG
95 DWARF_DEBUG 89 DWARF_DEBUG
90
91 /* Sections to be discarded */
92 DISCARDS
96} 93}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index f53c76acaede..0ce254bca92f 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -163,16 +163,14 @@ SECTIONS
163 _end = . ; 163 _end = . ;
164 } 164 }
165 165
166 STABS_DEBUG
167 DWARF_DEBUG
168
166 /* 169 /*
167 * When something in the kernel is NOT compiled as a module, the 170 * When something in the kernel is NOT compiled as a module, the
168 * module cleanup code and data are put into these segments. Both 171 * module cleanup code and data are put into these segments. Both
169 * can then be thrown away, as cleanup code is never called unless 172 * can then be thrown away, as cleanup code is never called unless
170 * it's a module. 173 * it's a module.
171 */ 174 */
172 /DISCARD/ : { 175 DISCARDS
173 *(.exitcall.exit)
174 }
175
176 STABS_DEBUG
177 DWARF_DEBUG
178} 176}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 2bd5c287538a..86b82348b97c 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -99,7 +99,7 @@ config AUDIT_ARCH
99config HAVE_SETUP_PER_CPU_AREA 99config HAVE_SETUP_PER_CPU_AREA
100 def_bool y if SPARC64 100 def_bool y if SPARC64
101 101
102config HAVE_DYNAMIC_PER_CPU_AREA 102config NEED_PER_CPU_EMBED_FIRST_CHUNK
103 def_bool y if SPARC64 103 def_bool y if SPARC64
104 104
105config GENERIC_HARDIRQS_NO__DO_IRQ 105config GENERIC_HARDIRQS_NO__DO_IRQ
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 3691907a43b4..ff68373ce6d6 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1389,8 +1389,8 @@ void smp_send_stop(void)
1389 * RETURNS: 1389 * RETURNS:
1390 * Pointer to the allocated area on success, NULL on failure. 1390 * Pointer to the allocated area on success, NULL on failure.
1391 */ 1391 */
1392static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, 1392static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1393 unsigned long align) 1393 size_t align)
1394{ 1394{
1395 const unsigned long goal = __pa(MAX_DMA_ADDRESS); 1395 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1396#ifdef CONFIG_NEED_MULTIPLE_NODES 1396#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -1415,127 +1415,35 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
1415#endif 1415#endif
1416} 1416}
1417 1417
1418static size_t pcpur_size __initdata; 1418static void __init pcpu_free_bootmem(void *ptr, size_t size)
1419static void **pcpur_ptrs __initdata;
1420
1421static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
1422{ 1419{
1423 size_t off = (size_t)pageno << PAGE_SHIFT; 1420 free_bootmem(__pa(ptr), size);
1424
1425 if (off >= pcpur_size)
1426 return NULL;
1427
1428 return virt_to_page(pcpur_ptrs[cpu] + off);
1429} 1421}
1430 1422
1431#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL) 1423static int pcpu_cpu_distance(unsigned int from, unsigned int to)
1432
1433static void __init pcpu_map_range(unsigned long start, unsigned long end,
1434 struct page *page)
1435{ 1424{
1436 unsigned long pfn = page_to_pfn(page); 1425 if (cpu_to_node(from) == cpu_to_node(to))
1437 unsigned long pte_base; 1426 return LOCAL_DISTANCE;
1438 1427 else
1439 BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL)); 1428 return REMOTE_DISTANCE;
1440
1441 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
1442 _PAGE_CP_4U | _PAGE_CV_4U |
1443 _PAGE_P_4U | _PAGE_W_4U);
1444 if (tlb_type == hypervisor)
1445 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
1446 _PAGE_CP_4V | _PAGE_CV_4V |
1447 _PAGE_P_4V | _PAGE_W_4V);
1448
1449 while (start < end) {
1450 pgd_t *pgd = pgd_offset_k(start);
1451 unsigned long this_end;
1452 pud_t *pud;
1453 pmd_t *pmd;
1454 pte_t *pte;
1455
1456 pud = pud_offset(pgd, start);
1457 if (pud_none(*pud)) {
1458 pmd_t *new;
1459
1460 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1461 pud_populate(&init_mm, pud, new);
1462 }
1463
1464 pmd = pmd_offset(pud, start);
1465 if (!pmd_present(*pmd)) {
1466 pte_t *new;
1467
1468 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1469 pmd_populate_kernel(&init_mm, pmd, new);
1470 }
1471
1472 pte = pte_offset_kernel(pmd, start);
1473 this_end = (start + PMD_SIZE) & PMD_MASK;
1474 if (this_end > end)
1475 this_end = end;
1476
1477 while (start < this_end) {
1478 unsigned long paddr = pfn << PAGE_SHIFT;
1479
1480 pte_val(*pte) = (paddr | pte_base);
1481
1482 start += PAGE_SIZE;
1483 pte++;
1484 pfn++;
1485 }
1486 }
1487} 1429}
1488 1430
1489void __init setup_per_cpu_areas(void) 1431void __init setup_per_cpu_areas(void)
1490{ 1432{
1491 size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start; 1433 unsigned long delta;
1492 static struct vm_struct vm; 1434 unsigned int cpu;
1493 unsigned long delta, cpu; 1435 int rc;
1494 size_t pcpu_unit_size;
1495 size_t ptrs_size;
1496
1497 pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
1498 PERCPU_DYNAMIC_RESERVE);
1499 dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE;
1500
1501 1436
1502 ptrs_size = PFN_ALIGN(nr_cpu_ids * sizeof(pcpur_ptrs[0])); 1437 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1503 pcpur_ptrs = alloc_bootmem(ptrs_size); 1438 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1504 1439 pcpu_cpu_distance, pcpu_alloc_bootmem,
1505 for_each_possible_cpu(cpu) { 1440 pcpu_free_bootmem);
1506 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE, 1441 if (rc)
1507 PCPU_CHUNK_SIZE); 1442 panic("failed to initialize first chunk (%d)", rc);
1508
1509 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
1510 PCPU_CHUNK_SIZE - pcpur_size);
1511
1512 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
1513 }
1514
1515 /* allocate address and map */
1516 vm.flags = VM_ALLOC;
1517 vm.size = nr_cpu_ids * PCPU_CHUNK_SIZE;
1518 vm_area_register_early(&vm, PCPU_CHUNK_SIZE);
1519
1520 for_each_possible_cpu(cpu) {
1521 unsigned long start = (unsigned long) vm.addr;
1522 unsigned long end;
1523
1524 start += cpu * PCPU_CHUNK_SIZE;
1525 end = start + PCPU_CHUNK_SIZE;
1526 pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu]));
1527 }
1528
1529 pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size,
1530 PERCPU_MODULE_RESERVE, dyn_size,
1531 PCPU_CHUNK_SIZE, vm.addr, NULL);
1532
1533 free_bootmem(__pa(pcpur_ptrs), ptrs_size);
1534 1443
1535 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1444 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1536 for_each_possible_cpu(cpu) { 1445 for_each_possible_cpu(cpu)
1537 __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; 1446 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1538 }
1539 1447
1540 /* Setup %g5 for the boot cpu. */ 1448 /* Setup %g5 for the boot cpu. */
1541 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); 1449 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index fcbbd000ec08..866390feb683 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -171,12 +171,8 @@ SECTIONS
171 } 171 }
172 _end = . ; 172 _end = . ;
173 173
174 /DISCARD/ : {
175 EXIT_TEXT
176 EXIT_DATA
177 *(.exitcall.exit)
178 }
179
180 STABS_DEBUG 174 STABS_DEBUG
181 DWARF_DEBUG 175 DWARF_DEBUG
176
177 DISCARDS
182} 178}
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index cb0248616d49..37ecc5577a9a 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -123,8 +123,3 @@
123 __initramfs_end = .; 123 __initramfs_end = .;
124 } 124 }
125 125
126 /* Sections to be discarded */
127 /DISCARD/ : {
128 *(.exitcall.exit)
129 }
130
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 9975e1ab44fb..715a188c0472 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -156,4 +156,6 @@ SECTIONS
156 STABS_DEBUG 156 STABS_DEBUG
157 157
158 DWARF_DEBUG 158 DWARF_DEBUG
159
160 DISCARDS
159} 161}
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 11b835248b86..2ebd39765db8 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -100,4 +100,6 @@ SECTIONS
100 STABS_DEBUG 100 STABS_DEBUG
101 101
102 DWARF_DEBUG 102 DWARF_DEBUG
103
104 DISCARDS
103} 105}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a800b0faaad6..e98e81a04971 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -150,7 +150,10 @@ config ARCH_HAS_CACHE_LINE_SIZE
150config HAVE_SETUP_PER_CPU_AREA 150config HAVE_SETUP_PER_CPU_AREA
151 def_bool y 151 def_bool y
152 152
153config HAVE_DYNAMIC_PER_CPU_AREA 153config NEED_PER_CPU_EMBED_FIRST_CHUNK
154 def_bool y
155
156config NEED_PER_CPU_PAGE_FIRST_CHUNK
154 def_bool y 157 def_bool y
155 158
156config HAVE_CPUMASK_OF_CPU_MAP 159config HAVE_CPUMASK_OF_CPU_MAP
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 04eacefcfd26..b65a36defeb7 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -168,15 +168,6 @@ do { \
168/* We can use this directly for local CPU (faster). */ 168/* We can use this directly for local CPU (faster). */
169DECLARE_PER_CPU(unsigned long, this_cpu_off); 169DECLARE_PER_CPU(unsigned long, this_cpu_off);
170 170
171#ifdef CONFIG_NEED_MULTIPLE_NODES
172void *pcpu_lpage_remapped(void *kaddr);
173#else
174static inline void *pcpu_lpage_remapped(void *kaddr)
175{
176 return NULL;
177}
178#endif
179
180#endif /* !__ASSEMBLY__ */ 171#endif /* !__ASSEMBLY__ */
181 172
182#ifdef CONFIG_SMP 173#ifdef CONFIG_SMP
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index 6b2a52dd0403..dca325c03999 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,8 +30,8 @@
30#include <asm/apic.h> 30#include <asm/apic.h>
31#include <asm/desc.h> 31#include <asm/desc.h>
32 32
33static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]); 33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
34static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]); 34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
35static DEFINE_PER_CPU(int, cpu_priv_count); 35static DEFINE_PER_CPU(int, cpu_priv_count);
36 36
37static DEFINE_MUTEX(cpu_debug_lock); 37static DEFINE_MUTEX(cpu_debug_lock);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 9bfe9d2ea615..fdd51b554355 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1101,7 +1101,7 @@ void mce_log_therm_throt_event(__u64 status)
1101 */ 1101 */
1102static int check_interval = 5 * 60; /* 5 minutes */ 1102static int check_interval = 5 * 60; /* 5 minutes */
1103 1103
1104static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ 1104static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
1105static DEFINE_PER_CPU(struct timer_list, mce_timer); 1105static DEFINE_PER_CPU(struct timer_list, mce_timer);
1106 1106
1107static void mcheck_timer(unsigned long data) 1107static void mcheck_timer(unsigned long data)
@@ -1120,7 +1120,7 @@ static void mcheck_timer(unsigned long data)
1120 * Alert userspace if needed. If we logged an MCE, reduce the 1120 * Alert userspace if needed. If we logged an MCE, reduce the
1121 * polling interval, otherwise increase the polling interval. 1121 * polling interval, otherwise increase the polling interval.
1122 */ 1122 */
1123 n = &__get_cpu_var(next_interval); 1123 n = &__get_cpu_var(mce_next_interval);
1124 if (mce_notify_irq()) 1124 if (mce_notify_irq())
1125 *n = max(*n/2, HZ/100); 1125 *n = max(*n/2, HZ/100);
1126 else 1126 else
@@ -1335,7 +1335,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
1335static void mce_init_timer(void) 1335static void mce_init_timer(void)
1336{ 1336{
1337 struct timer_list *t = &__get_cpu_var(mce_timer); 1337 struct timer_list *t = &__get_cpu_var(mce_timer);
1338 int *n = &__get_cpu_var(next_interval); 1338 int *n = &__get_cpu_var(mce_next_interval);
1339 1339
1340 if (mce_ignore_ce) 1340 if (mce_ignore_ce)
1341 return; 1341 return;
@@ -1935,7 +1935,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1935 case CPU_DOWN_FAILED: 1935 case CPU_DOWN_FAILED:
1936 case CPU_DOWN_FAILED_FROZEN: 1936 case CPU_DOWN_FAILED_FROZEN:
1937 t->expires = round_jiffies(jiffies + 1937 t->expires = round_jiffies(jiffies +
1938 __get_cpu_var(next_interval)); 1938 __get_cpu_var(mce_next_interval));
1939 add_timer_on(t, cpu); 1939 add_timer_on(t, cpu);
1940 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); 1940 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1941 break; 1941 break;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 1fecba404fd8..8cd5224943b5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -69,7 +69,7 @@ struct threshold_bank {
69 struct threshold_block *blocks; 69 struct threshold_block *blocks;
70 cpumask_var_t cpus; 70 cpumask_var_t cpus;
71}; 71};
72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); 72static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
73 73
74#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
75static unsigned char shared_bank[NR_BANKS] = { 75static unsigned char shared_bank[NR_BANKS] = {
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index f9cd0849bd42..2732e2c1e4d3 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -1211,7 +1211,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
1211 x86_pmu_disable_counter(hwc, idx); 1211 x86_pmu_disable_counter(hwc, idx);
1212} 1212}
1213 1213
1214static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); 1214static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1215 1215
1216/* 1216/*
1217 * Set the next IRQ period, based on the hwc->period_left value. 1217 * Set the next IRQ period, based on the hwc->period_left value.
@@ -1253,7 +1253,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
1253 if (left > x86_pmu.max_period) 1253 if (left > x86_pmu.max_period)
1254 left = x86_pmu.max_period; 1254 left = x86_pmu.max_period;
1255 1255
1256 per_cpu(prev_left[idx], smp_processor_id()) = left; 1256 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1257 1257
1258 /* 1258 /*
1259 * The hw counter starts counting from this counter offset, 1259 * The hw counter starts counting from this counter offset,
@@ -1470,7 +1470,7 @@ void perf_counter_print_debug(void)
1470 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1470 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1471 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1471 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1472 1472
1473 prev_left = per_cpu(prev_left[idx], cpu); 1473 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1474 1474
1475 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", 1475 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1476 cpu, idx, pmc_ctrl); 1476 cpu, idx, pmc_ctrl);
@@ -2110,8 +2110,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2110 entry->ip[entry->nr++] = ip; 2110 entry->ip[entry->nr++] = ip;
2111} 2111}
2112 2112
2113static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); 2113static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2114static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); 2114static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2115static DEFINE_PER_CPU(int, in_nmi_frame); 2115static DEFINE_PER_CPU(int, in_nmi_frame);
2116 2116
2117 2117
@@ -2264,9 +2264,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2264 struct perf_callchain_entry *entry; 2264 struct perf_callchain_entry *entry;
2265 2265
2266 if (in_nmi()) 2266 if (in_nmi())
2267 entry = &__get_cpu_var(nmi_entry); 2267 entry = &__get_cpu_var(pmc_nmi_entry);
2268 else 2268 else
2269 entry = &__get_cpu_var(irq_entry); 2269 entry = &__get_cpu_var(pmc_irq_entry);
2270 2270
2271 entry->nr = 0; 2271 entry->nr = 0;
2272 2272
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 07d81916f212..d559af913e1f 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -55,6 +55,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
55#define PERCPU_FIRST_CHUNK_RESERVE 0 55#define PERCPU_FIRST_CHUNK_RESERVE 0
56#endif 56#endif
57 57
58#ifdef CONFIG_X86_32
58/** 59/**
59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA 60 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
60 * 61 *
@@ -83,6 +84,7 @@ static bool __init pcpu_need_numa(void)
83#endif 84#endif
84 return false; 85 return false;
85} 86}
87#endif
86 88
87/** 89/**
88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu 90 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
@@ -124,308 +126,35 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
124} 126}
125 127
126/* 128/*
127 * Large page remap allocator 129 * Helpers for first chunk memory allocation
128 *
129 * This allocator uses PMD page as unit. A PMD page is allocated for
130 * each cpu and each is remapped into vmalloc area using PMD mapping.
131 * As PMD page is quite large, only part of it is used for the first
132 * chunk. Unused part is returned to the bootmem allocator.
133 *
134 * So, the PMD pages are mapped twice - once to the physical mapping
135 * and to the vmalloc area for the first percpu chunk. The double
136 * mapping does add one more PMD TLB entry pressure but still is much
137 * better than only using 4k mappings while still being NUMA friendly.
138 */ 130 */
139#ifdef CONFIG_NEED_MULTIPLE_NODES 131static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
140struct pcpul_ent {
141 unsigned int cpu;
142 void *ptr;
143};
144
145static size_t pcpul_size;
146static struct pcpul_ent *pcpul_map;
147static struct vm_struct pcpul_vm;
148
149static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
150{ 132{
151 size_t off = (size_t)pageno << PAGE_SHIFT; 133 return pcpu_alloc_bootmem(cpu, size, align);
152
153 if (off >= pcpul_size)
154 return NULL;
155
156 return virt_to_page(pcpul_map[cpu].ptr + off);
157} 134}
158 135
159static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) 136static void __init pcpu_fc_free(void *ptr, size_t size)
160{ 137{
161 size_t map_size, dyn_size; 138 free_bootmem(__pa(ptr), size);
162 unsigned int cpu;
163 int i, j;
164 ssize_t ret;
165
166 if (!chosen) {
167 size_t vm_size = VMALLOC_END - VMALLOC_START;
168 size_t tot_size = nr_cpu_ids * PMD_SIZE;
169
170 /* on non-NUMA, embedding is better */
171 if (!pcpu_need_numa())
172 return -EINVAL;
173
174 /* don't consume more than 20% of vmalloc area */
175 if (tot_size > vm_size / 5) {
176 pr_info("PERCPU: too large chunk size %zuMB for "
177 "large page remap\n", tot_size >> 20);
178 return -EINVAL;
179 }
180 }
181
182 /* need PSE */
183 if (!cpu_has_pse) {
184 pr_warning("PERCPU: lpage allocator requires PSE\n");
185 return -EINVAL;
186 }
187
188 /*
189 * Currently supports only single page. Supporting multiple
190 * pages won't be too difficult if it ever becomes necessary.
191 */
192 pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
193 PERCPU_DYNAMIC_RESERVE);
194 if (pcpul_size > PMD_SIZE) {
195 pr_warning("PERCPU: static data is larger than large page, "
196 "can't use large page\n");
197 return -EINVAL;
198 }
199 dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
200
201 /* allocate pointer array and alloc large pages */
202 map_size = PFN_ALIGN(nr_cpu_ids * sizeof(pcpul_map[0]));
203 pcpul_map = alloc_bootmem(map_size);
204
205 for_each_possible_cpu(cpu) {
206 pcpul_map[cpu].cpu = cpu;
207 pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE,
208 PMD_SIZE);
209 if (!pcpul_map[cpu].ptr) {
210 pr_warning("PERCPU: failed to allocate large page "
211 "for cpu%u\n", cpu);
212 goto enomem;
213 }
214
215 /*
216 * Only use pcpul_size bytes and give back the rest.
217 *
218 * Ingo: The 2MB up-rounding bootmem is needed to make
219 * sure the partial 2MB page is still fully RAM - it's
220 * not well-specified to have a PAT-incompatible area
221 * (unmapped RAM, device memory, etc.) in that hole.
222 */
223 free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size),
224 PMD_SIZE - pcpul_size);
225
226 memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size);
227 }
228
229 /* allocate address and map */
230 pcpul_vm.flags = VM_ALLOC;
231 pcpul_vm.size = nr_cpu_ids * PMD_SIZE;
232 vm_area_register_early(&pcpul_vm, PMD_SIZE);
233
234 for_each_possible_cpu(cpu) {
235 pmd_t *pmd, pmd_v;
236
237 pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr +
238 cpu * PMD_SIZE);
239 pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)),
240 PAGE_KERNEL_LARGE);
241 set_pmd(pmd, pmd_v);
242 }
243
244 /* we're ready, commit */
245 pr_info("PERCPU: Remapped at %p with large pages, static data "
246 "%zu bytes\n", pcpul_vm.addr, static_size);
247
248 ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
249 PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
250 PMD_SIZE, pcpul_vm.addr, NULL);
251
252 /* sort pcpul_map array for pcpu_lpage_remapped() */
253 for (i = 0; i < nr_cpu_ids - 1; i++)
254 for (j = i + 1; j < nr_cpu_ids; j++)
255 if (pcpul_map[i].ptr > pcpul_map[j].ptr) {
256 struct pcpul_ent tmp = pcpul_map[i];
257 pcpul_map[i] = pcpul_map[j];
258 pcpul_map[j] = tmp;
259 }
260
261 return ret;
262
263enomem:
264 for_each_possible_cpu(cpu)
265 if (pcpul_map[cpu].ptr)
266 free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size);
267 free_bootmem(__pa(pcpul_map), map_size);
268 return -ENOMEM;
269} 139}
270 140
271/** 141static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
272 * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
273 * @kaddr: the kernel address in question
274 *
275 * Determine whether @kaddr falls in the pcpul recycled area. This is
276 * used by pageattr to detect VM aliases and break up the pcpu PMD
277 * mapping such that the same physical page is not mapped under
278 * different attributes.
279 *
280 * The recycled area is always at the tail of a partially used PMD
281 * page.
282 *
283 * RETURNS:
284 * Address of corresponding remapped pcpu address if match is found;
285 * otherwise, NULL.
286 */
287void *pcpu_lpage_remapped(void *kaddr)
288{ 142{
289 void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK); 143#ifdef CONFIG_NEED_MULTIPLE_NODES
290 unsigned long offset = (unsigned long)kaddr & ~PMD_MASK; 144 if (early_cpu_to_node(from) == early_cpu_to_node(to))
291 int left = 0, right = nr_cpu_ids - 1; 145 return LOCAL_DISTANCE;
292 int pos; 146 else
293 147 return REMOTE_DISTANCE;
294 /* pcpul in use at all? */
295 if (!pcpul_map)
296 return NULL;
297
298 /* okay, perform binary search */
299 while (left <= right) {
300 pos = (left + right) / 2;
301
302 if (pcpul_map[pos].ptr < pmd_addr)
303 left = pos + 1;
304 else if (pcpul_map[pos].ptr > pmd_addr)
305 right = pos - 1;
306 else {
307 /* it shouldn't be in the area for the first chunk */
308 WARN_ON(offset < pcpul_size);
309
310 return pcpul_vm.addr +
311 pcpul_map[pos].cpu * PMD_SIZE + offset;
312 }
313 }
314
315 return NULL;
316}
317#else 148#else
318static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) 149 return LOCAL_DISTANCE;
319{
320 return -EINVAL;
321}
322#endif 150#endif
323
324/*
325 * Embedding allocator
326 *
327 * The first chunk is sized to just contain the static area plus
328 * module and dynamic reserves and embedded into linear physical
329 * mapping so that it can use PMD mapping without additional TLB
330 * pressure.
331 */
332static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
333{
334 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
335
336 /*
337 * If large page isn't supported, there's no benefit in doing
338 * this. Also, embedding allocation doesn't play well with
339 * NUMA.
340 */
341 if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
342 return -EINVAL;
343
344 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
345 reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
346} 151}
347 152
348/* 153static void __init pcpup_populate_pte(unsigned long addr)
349 * 4k page allocator
350 *
351 * This is the basic allocator. Static percpu area is allocated
352 * page-by-page and most of initialization is done by the generic
353 * setup function.
354 */
355static struct page **pcpu4k_pages __initdata;
356static int pcpu4k_nr_static_pages __initdata;
357
358static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
359{
360 if (pageno < pcpu4k_nr_static_pages)
361 return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
362 return NULL;
363}
364
365static void __init pcpu4k_populate_pte(unsigned long addr)
366{ 154{
367 populate_extra_pte(addr); 155 populate_extra_pte(addr);
368} 156}
369 157
370static ssize_t __init setup_pcpu_4k(size_t static_size)
371{
372 size_t pages_size;
373 unsigned int cpu;
374 int i, j;
375 ssize_t ret;
376
377 pcpu4k_nr_static_pages = PFN_UP(static_size);
378
379 /* unaligned allocations can't be freed, round up to page size */
380 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * nr_cpu_ids
381 * sizeof(pcpu4k_pages[0]));
382 pcpu4k_pages = alloc_bootmem(pages_size);
383
384 /* allocate and copy */
385 j = 0;
386 for_each_possible_cpu(cpu)
387 for (i = 0; i < pcpu4k_nr_static_pages; i++) {
388 void *ptr;
389
390 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
391 if (!ptr) {
392 pr_warning("PERCPU: failed to allocate "
393 "4k page for cpu%u\n", cpu);
394 goto enomem;
395 }
396
397 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
398 pcpu4k_pages[j++] = virt_to_page(ptr);
399 }
400
401 /* we're ready, commit */
402 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
403 pcpu4k_nr_static_pages, static_size);
404
405 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
406 PERCPU_FIRST_CHUNK_RESERVE, -1,
407 -1, NULL, pcpu4k_populate_pte);
408 goto out_free_ar;
409
410enomem:
411 while (--j >= 0)
412 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
413 ret = -ENOMEM;
414out_free_ar:
415 free_bootmem(__pa(pcpu4k_pages), pages_size);
416 return ret;
417}
418
419/* for explicit first chunk allocator selection */
420static char pcpu_chosen_alloc[16] __initdata;
421
422static int __init percpu_alloc_setup(char *str)
423{
424 strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1);
425 return 0;
426}
427early_param("percpu_alloc", percpu_alloc_setup);
428
429static inline void setup_percpu_segment(int cpu) 158static inline void setup_percpu_segment(int cpu)
430{ 159{
431#ifdef CONFIG_X86_32 160#ifdef CONFIG_X86_32
@@ -441,52 +170,49 @@ static inline void setup_percpu_segment(int cpu)
441 170
442void __init setup_per_cpu_areas(void) 171void __init setup_per_cpu_areas(void)
443{ 172{
444 size_t static_size = __per_cpu_end - __per_cpu_start;
445 unsigned int cpu; 173 unsigned int cpu;
446 unsigned long delta; 174 unsigned long delta;
447 size_t pcpu_unit_size; 175 int rc;
448 ssize_t ret;
449 176
450 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 177 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
451 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 178 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
452 179
453 /* 180 /*
454 * Allocate percpu area. If PSE is supported, try to make use 181 * Allocate percpu area. Embedding allocator is our favorite;
455 * of large page mappings. Please read comments on top of 182 * however, on NUMA configurations, it can result in very
456 * each allocator for details. 183 * sparse unit mapping and vmalloc area isn't spacious enough
184 * on 32bit. Use page in that case.
457 */ 185 */
458 ret = -EINVAL; 186#ifdef CONFIG_X86_32
459 if (strlen(pcpu_chosen_alloc)) { 187 if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
460 if (strcmp(pcpu_chosen_alloc, "4k")) { 188 pcpu_chosen_fc = PCPU_FC_PAGE;
461 if (!strcmp(pcpu_chosen_alloc, "lpage")) 189#endif
462 ret = setup_pcpu_lpage(static_size, true); 190 rc = -EINVAL;
463 else if (!strcmp(pcpu_chosen_alloc, "embed")) 191 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
464 ret = setup_pcpu_embed(static_size, true); 192 const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
465 else 193 const size_t dyn_size = PERCPU_MODULE_RESERVE +
466 pr_warning("PERCPU: unknown allocator %s " 194 PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
467 "specified\n", pcpu_chosen_alloc); 195
468 if (ret < 0) 196 rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
469 pr_warning("PERCPU: %s allocator failed (%zd), " 197 dyn_size, atom_size,
470 "falling back to 4k\n", 198 pcpu_cpu_distance,
471 pcpu_chosen_alloc, ret); 199 pcpu_fc_alloc, pcpu_fc_free);
472 } 200 if (rc < 0)
473 } else { 201 pr_warning("PERCPU: %s allocator failed (%d), "
474 ret = setup_pcpu_lpage(static_size, false); 202 "falling back to page size\n",
475 if (ret < 0) 203 pcpu_fc_names[pcpu_chosen_fc], rc);
476 ret = setup_pcpu_embed(static_size, false);
477 } 204 }
478 if (ret < 0) 205 if (rc < 0)
479 ret = setup_pcpu_4k(static_size); 206 rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
480 if (ret < 0) 207 pcpu_fc_alloc, pcpu_fc_free,
481 panic("cannot allocate static percpu area (%zu bytes, err=%zd)", 208 pcpup_populate_pte);
482 static_size, ret); 209 if (rc < 0)
483 210 panic("cannot initialize percpu area (err=%d)", rc);
484 pcpu_unit_size = ret;
485 211
486 /* alrighty, percpu areas up and running */ 212 /* alrighty, percpu areas up and running */
487 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 213 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
488 for_each_possible_cpu(cpu) { 214 for_each_possible_cpu(cpu) {
489 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; 215 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
490 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); 216 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
491 per_cpu(cpu_number, cpu) = cpu; 217 per_cpu(cpu_number, cpu) = cpu;
492 setup_percpu_segment(cpu); 218 setup_percpu_segment(cpu);
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 9fc178255c04..0ccb57d5ee35 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -348,15 +348,12 @@ SECTIONS
348 _end = .; 348 _end = .;
349 } 349 }
350 350
351 /* Sections to be discarded */
352 /DISCARD/ : {
353 *(.exitcall.exit)
354 *(.eh_frame)
355 *(.discard)
356 }
357
358 STABS_DEBUG 351 STABS_DEBUG
359 DWARF_DEBUG 352 DWARF_DEBUG
353
354 /* Sections to be discarded */
355 DISCARDS
356 /DISCARD/ : { *(.eh_frame) }
360} 357}
361 358
362 359
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e245775ec856..24952fdc7e40 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -12,6 +12,7 @@
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/pfn.h> 14#include <linux/pfn.h>
15#include <linux/percpu.h>
15 16
16#include <asm/e820.h> 17#include <asm/e820.h>
17#include <asm/processor.h> 18#include <asm/processor.h>
@@ -686,7 +687,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
686{ 687{
687 struct cpa_data alias_cpa; 688 struct cpa_data alias_cpa;
688 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); 689 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
689 unsigned long vaddr, remapped; 690 unsigned long vaddr;
690 int ret; 691 int ret;
691 692
692 if (cpa->pfn >= max_pfn_mapped) 693 if (cpa->pfn >= max_pfn_mapped)
@@ -744,24 +745,6 @@ static int cpa_process_alias(struct cpa_data *cpa)
744 } 745 }
745#endif 746#endif
746 747
747 /*
748 * If the PMD page was partially used for per-cpu remapping,
749 * the recycled area needs to be split and modified. Because
750 * the area is always proper subset of a PMD page
751 * cpa->numpages is guaranteed to be 1 for these areas, so
752 * there's no need to loop over and check for further remaps.
753 */
754 remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr);
755 if (remapped) {
756 WARN_ON(cpa->numpages > 1);
757 alias_cpa = *cpa;
758 alias_cpa.vaddr = &remapped;
759 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
760 ret = __change_page_attr_set_clr(&alias_cpa, 0);
761 if (ret)
762 return ret;
763 }
764
765 return 0; 748 return 0;
766} 749}
767 750
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 41c159cd872f..921b6ff3b645 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -280,15 +280,6 @@ SECTIONS
280 *(.ResetVector.text) 280 *(.ResetVector.text)
281 } 281 }
282 282
283 /* Sections to be discarded */
284 /DISCARD/ :
285 {
286 *(.exit.literal)
287 EXIT_TEXT
288 EXIT_DATA
289 *(.exitcall.exit)
290 }
291
292 .xt.lit : { *(.xt.lit) } 283 .xt.lit : { *(.xt.lit) }
293 .xt.prop : { *(.xt.prop) } 284 .xt.prop : { *(.xt.prop) }
294 285
@@ -321,4 +312,8 @@ SECTIONS
321 *(.xt.lit) 312 *(.xt.lit)
322 *(.gnu.linkonce.p*) 313 *(.gnu.linkonce.p*)
323 } 314 }
315
316 /* Sections to be discarded */
317 DISCARDS
318 /DISCARD/ : { *(.exit.literal) }
324} 319}