aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-28 14:05:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-28 14:05:28 -0400
commit8326e284f8deb75eee3d32b973464dd96e120843 (patch)
treea2c4e18f4e7984680946cab0303e5369f175d4f5
parent187dd317f0169142e4adf6263852f93c3b6f6a3c (diff)
parente888d7facd1f1460a638151036d15b6cfb3ccc74 (diff)
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, delay: tsc based udelay should have rdtsc_barrier x86, setup: correct include file in <asm/boot.h> x86, setup: Fix typo "CONFIG_x86_64" in <asm/boot.h> x86, mce: percpu mcheck_timer should be pinned x86: Add sysctl to allow panic on IOCK NMI error x86: Fix uv bau sending buffer initialization x86, mce: Fix mce resume on 32bit x86: Move init_gbpages() to setup_arch() x86: ensure percpu lpage doesn't consume too much vmalloc space x86: implement percpu_alloc kernel parameter x86: fix pageattr handling for lpage percpu allocator and re-enable it x86: reorganize cpa_process_alias() x86: prepare setup_pcpu_lpage() for pageattr fix x86: rename remap percpu first chunk allocator to lpage x86: fix duplicate free in setup_pcpu_remap() failure path percpu: fix too lazy vunmap cache flushing x86: Set cpu_llc_id on AMD CPUs
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--arch/x86/include/asm/boot.h6
-rw-r--r--arch/x86/include/asm/percpu.h10
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/x86/kernel/dumpstack.c1
-rw-r--r--arch/x86/kernel/setup.c16
-rw-r--r--arch/x86/kernel/setup_percpu.c219
-rw-r--r--arch/x86/kernel/tlb_uv.c9
-rw-r--r--arch/x86/kernel/traps.c3
-rw-r--r--arch/x86/lib/delay.c3
-rw-r--r--arch/x86/mm/init.c17
-rw-r--r--arch/x86/mm/pageattr.c65
-rw-r--r--arch/x86/power/cpu.c2
-rw-r--r--include/linux/kernel.h1
-rw-r--r--kernel/sysctl.c8
-rw-r--r--mm/percpu.c24
17 files changed, 275 insertions, 123 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 7e3c9f3fb3cd..d77fbd8b79ac 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1915,6 +1915,12 @@ and is between 256 and 4096 characters. It is defined in the file
1915 Format: { 0 | 1 } 1915 Format: { 0 | 1 }
1916 See arch/parisc/kernel/pdc_chassis.c 1916 See arch/parisc/kernel/pdc_chassis.c
1917 1917
1918 percpu_alloc= [X86] Select which percpu first chunk allocator to use.
1919 Allowed values are one of "lpage", "embed" and "4k".
1920 See comments in arch/x86/kernel/setup_percpu.c for
1921 details on each allocator. This parameter is primarily
1922 for debugging and performance comparison.
1923
1918 pf. [PARIDE] 1924 pf. [PARIDE]
1919 See Documentation/blockdev/paride.txt. 1925 See Documentation/blockdev/paride.txt.
1920 1926
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 418e632d4a80..7a1065958ba9 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -8,7 +8,7 @@
8 8
9#ifdef __KERNEL__ 9#ifdef __KERNEL__
10 10
11#include <asm/page_types.h> 11#include <asm/pgtable_types.h>
12 12
13/* Physical address where kernel should be loaded. */ 13/* Physical address where kernel should be loaded. */
14#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ 14#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
@@ -16,10 +16,10 @@
16 & ~(CONFIG_PHYSICAL_ALIGN - 1)) 16 & ~(CONFIG_PHYSICAL_ALIGN - 1))
17 17
18/* Minimum kernel alignment, as a power of two */ 18/* Minimum kernel alignment, as a power of two */
19#ifdef CONFIG_x86_64 19#ifdef CONFIG_X86_64
20#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT 20#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
21#else 21#else
22#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT+1) 22#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER)
23#endif 23#endif
24#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) 24#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
25 25
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 02ecb30982a3..103f1ddb0d85 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -42,6 +42,7 @@
42 42
43#else /* ...!ASSEMBLY */ 43#else /* ...!ASSEMBLY */
44 44
45#include <linux/kernel.h>
45#include <linux/stringify.h> 46#include <linux/stringify.h>
46 47
47#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
@@ -155,6 +156,15 @@ do { \
155/* We can use this directly for local CPU (faster). */ 156/* We can use this directly for local CPU (faster). */
156DECLARE_PER_CPU(unsigned long, this_cpu_off); 157DECLARE_PER_CPU(unsigned long, this_cpu_off);
157 158
159#ifdef CONFIG_NEED_MULTIPLE_NODES
160void *pcpu_lpage_remapped(void *kaddr);
161#else
162static inline void *pcpu_lpage_remapped(void *kaddr)
163{
164 return NULL;
165}
166#endif
167
158#endif /* !__ASSEMBLY__ */ 168#endif /* !__ASSEMBLY__ */
159 169
160#ifdef CONFIG_SMP 170#ifdef CONFIG_SMP
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index e5b27d8f1b47..28e5f5956042 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -258,13 +258,15 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
258{ 258{
259#ifdef CONFIG_X86_HT 259#ifdef CONFIG_X86_HT
260 unsigned bits; 260 unsigned bits;
261 int cpu = smp_processor_id();
261 262
262 bits = c->x86_coreid_bits; 263 bits = c->x86_coreid_bits;
263
264 /* Low order bits define the core id (index of core in socket) */ 264 /* Low order bits define the core id (index of core in socket) */
265 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); 265 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
266 /* Convert the initial APIC ID into the socket ID */ 266 /* Convert the initial APIC ID into the socket ID */
267 c->phys_proc_id = c->initial_apicid >> bits; 267 c->phys_proc_id = c->initial_apicid >> bits;
268 /* use socket ID also for last level cache */
269 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
268#endif 270#endif
269} 271}
270 272
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 284d1de968bc..af425b83202b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1117,7 +1117,7 @@ static void mcheck_timer(unsigned long data)
1117 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); 1117 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
1118 1118
1119 t->expires = jiffies + *n; 1119 t->expires = jiffies + *n;
1120 add_timer(t); 1120 add_timer_on(t, smp_processor_id());
1121} 1121}
1122 1122
1123static void mce_do_trigger(struct work_struct *work) 1123static void mce_do_trigger(struct work_struct *work)
@@ -1321,7 +1321,7 @@ static void mce_init_timer(void)
1321 return; 1321 return;
1322 setup_timer(t, mcheck_timer, smp_processor_id()); 1322 setup_timer(t, mcheck_timer, smp_processor_id());
1323 t->expires = round_jiffies(jiffies + *n); 1323 t->expires = round_jiffies(jiffies + *n);
1324 add_timer(t); 1324 add_timer_on(t, smp_processor_id());
1325} 1325}
1326 1326
1327/* 1327/*
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 95ea5fa7d444..c8405718a4c3 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -22,6 +22,7 @@
22#include "dumpstack.h" 22#include "dumpstack.h"
23 23
24int panic_on_unrecovered_nmi; 24int panic_on_unrecovered_nmi;
25int panic_on_io_nmi;
25unsigned int code_bytes = 64; 26unsigned int code_bytes = 64;
26int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; 27int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
27static int die_counter; 28static int die_counter;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index be5ae80f897f..de2cab132844 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -289,6 +289,20 @@ void * __init extend_brk(size_t size, size_t align)
289 return ret; 289 return ret;
290} 290}
291 291
292#ifdef CONFIG_X86_64
293static void __init init_gbpages(void)
294{
295 if (direct_gbpages && cpu_has_gbpages)
296 printk(KERN_INFO "Using GB pages for direct mapping\n");
297 else
298 direct_gbpages = 0;
299}
300#else
301static inline void init_gbpages(void)
302{
303}
304#endif
305
292static void __init reserve_brk(void) 306static void __init reserve_brk(void)
293{ 307{
294 if (_brk_end > _brk_start) 308 if (_brk_end > _brk_start)
@@ -871,6 +885,8 @@ void __init setup_arch(char **cmdline_p)
871 885
872 reserve_brk(); 886 reserve_brk();
873 887
888 init_gbpages();
889
874 /* max_pfn_mapped is updated here */ 890 /* max_pfn_mapped is updated here */
875 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); 891 max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
876 max_pfn_mapped = max_low_pfn_mapped; 892 max_pfn_mapped = max_low_pfn_mapped;
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 9c3f0823e6aa..29a3eef7cf4a 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -124,7 +124,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
124} 124}
125 125
126/* 126/*
127 * Remap allocator 127 * Large page remap allocator
128 * 128 *
129 * This allocator uses PMD page as unit. A PMD page is allocated for 129 * This allocator uses PMD page as unit. A PMD page is allocated for
130 * each cpu and each is remapped into vmalloc area using PMD mapping. 130 * each cpu and each is remapped into vmalloc area using PMD mapping.
@@ -137,105 +137,185 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
137 * better than only using 4k mappings while still being NUMA friendly. 137 * better than only using 4k mappings while still being NUMA friendly.
138 */ 138 */
139#ifdef CONFIG_NEED_MULTIPLE_NODES 139#ifdef CONFIG_NEED_MULTIPLE_NODES
140static size_t pcpur_size __initdata; 140struct pcpul_ent {
141static void **pcpur_ptrs __initdata; 141 unsigned int cpu;
142 void *ptr;
143};
144
145static size_t pcpul_size;
146static struct pcpul_ent *pcpul_map;
147static struct vm_struct pcpul_vm;
142 148
143static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) 149static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
144{ 150{
145 size_t off = (size_t)pageno << PAGE_SHIFT; 151 size_t off = (size_t)pageno << PAGE_SHIFT;
146 152
147 if (off >= pcpur_size) 153 if (off >= pcpul_size)
148 return NULL; 154 return NULL;
149 155
150 return virt_to_page(pcpur_ptrs[cpu] + off); 156 return virt_to_page(pcpul_map[cpu].ptr + off);
151} 157}
152 158
153static ssize_t __init setup_pcpu_remap(size_t static_size) 159static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
154{ 160{
155 static struct vm_struct vm; 161 size_t map_size, dyn_size;
156 size_t ptrs_size, dyn_size;
157 unsigned int cpu; 162 unsigned int cpu;
163 int i, j;
158 ssize_t ret; 164 ssize_t ret;
159 165
160 /* 166 if (!chosen) {
161 * If large page isn't supported, there's no benefit in doing 167 size_t vm_size = VMALLOC_END - VMALLOC_START;
162 * this. Also, on non-NUMA, embedding is better. 168 size_t tot_size = num_possible_cpus() * PMD_SIZE;
163 * 169
164 * NOTE: disabled for now. 170 /* on non-NUMA, embedding is better */
165 */ 171 if (!pcpu_need_numa())
166 if (true || !cpu_has_pse || !pcpu_need_numa()) 172 return -EINVAL;
173
174 /* don't consume more than 20% of vmalloc area */
175 if (tot_size > vm_size / 5) {
176 pr_info("PERCPU: too large chunk size %zuMB for "
177 "large page remap\n", tot_size >> 20);
178 return -EINVAL;
179 }
180 }
181
182 /* need PSE */
183 if (!cpu_has_pse) {
184 pr_warning("PERCPU: lpage allocator requires PSE\n");
167 return -EINVAL; 185 return -EINVAL;
186 }
168 187
169 /* 188 /*
170 * Currently supports only single page. Supporting multiple 189 * Currently supports only single page. Supporting multiple
171 * pages won't be too difficult if it ever becomes necessary. 190 * pages won't be too difficult if it ever becomes necessary.
172 */ 191 */
173 pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + 192 pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
174 PERCPU_DYNAMIC_RESERVE); 193 PERCPU_DYNAMIC_RESERVE);
175 if (pcpur_size > PMD_SIZE) { 194 if (pcpul_size > PMD_SIZE) {
176 pr_warning("PERCPU: static data is larger than large page, " 195 pr_warning("PERCPU: static data is larger than large page, "
177 "can't use large page\n"); 196 "can't use large page\n");
178 return -EINVAL; 197 return -EINVAL;
179 } 198 }
180 dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; 199 dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
181 200
182 /* allocate pointer array and alloc large pages */ 201 /* allocate pointer array and alloc large pages */
183 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); 202 map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0]));
184 pcpur_ptrs = alloc_bootmem(ptrs_size); 203 pcpul_map = alloc_bootmem(map_size);
185 204
186 for_each_possible_cpu(cpu) { 205 for_each_possible_cpu(cpu) {
187 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE); 206 pcpul_map[cpu].cpu = cpu;
188 if (!pcpur_ptrs[cpu]) 207 pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE,
208 PMD_SIZE);
209 if (!pcpul_map[cpu].ptr) {
210 pr_warning("PERCPU: failed to allocate large page "
211 "for cpu%u\n", cpu);
189 goto enomem; 212 goto enomem;
213 }
190 214
191 /* 215 /*
192 * Only use pcpur_size bytes and give back the rest. 216 * Only use pcpul_size bytes and give back the rest.
193 * 217 *
194 * Ingo: The 2MB up-rounding bootmem is needed to make 218 * Ingo: The 2MB up-rounding bootmem is needed to make
195 * sure the partial 2MB page is still fully RAM - it's 219 * sure the partial 2MB page is still fully RAM - it's
196 * not well-specified to have a PAT-incompatible area 220 * not well-specified to have a PAT-incompatible area
197 * (unmapped RAM, device memory, etc.) in that hole. 221 * (unmapped RAM, device memory, etc.) in that hole.
198 */ 222 */
199 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), 223 free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size),
200 PMD_SIZE - pcpur_size); 224 PMD_SIZE - pcpul_size);
201 225
202 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); 226 memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size);
203 } 227 }
204 228
205 /* allocate address and map */ 229 /* allocate address and map */
206 vm.flags = VM_ALLOC; 230 pcpul_vm.flags = VM_ALLOC;
207 vm.size = num_possible_cpus() * PMD_SIZE; 231 pcpul_vm.size = num_possible_cpus() * PMD_SIZE;
208 vm_area_register_early(&vm, PMD_SIZE); 232 vm_area_register_early(&pcpul_vm, PMD_SIZE);
209 233
210 for_each_possible_cpu(cpu) { 234 for_each_possible_cpu(cpu) {
211 pmd_t *pmd; 235 pmd_t *pmd, pmd_v;
212 236
213 pmd = populate_extra_pmd((unsigned long)vm.addr 237 pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr +
214 + cpu * PMD_SIZE); 238 cpu * PMD_SIZE);
215 set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])), 239 pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)),
216 PAGE_KERNEL_LARGE)); 240 PAGE_KERNEL_LARGE);
241 set_pmd(pmd, pmd_v);
217 } 242 }
218 243
219 /* we're ready, commit */ 244 /* we're ready, commit */
220 pr_info("PERCPU: Remapped at %p with large pages, static data " 245 pr_info("PERCPU: Remapped at %p with large pages, static data "
221 "%zu bytes\n", vm.addr, static_size); 246 "%zu bytes\n", pcpul_vm.addr, static_size);
222 247
223 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, 248 ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
224 PERCPU_FIRST_CHUNK_RESERVE, dyn_size, 249 PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
225 PMD_SIZE, vm.addr, NULL); 250 PMD_SIZE, pcpul_vm.addr, NULL);
226 goto out_free_ar; 251
252 /* sort pcpul_map array for pcpu_lpage_remapped() */
253 for (i = 0; i < num_possible_cpus() - 1; i++)
254 for (j = i + 1; j < num_possible_cpus(); j++)
255 if (pcpul_map[i].ptr > pcpul_map[j].ptr) {
256 struct pcpul_ent tmp = pcpul_map[i];
257 pcpul_map[i] = pcpul_map[j];
258 pcpul_map[j] = tmp;
259 }
260
261 return ret;
227 262
228enomem: 263enomem:
229 for_each_possible_cpu(cpu) 264 for_each_possible_cpu(cpu)
230 if (pcpur_ptrs[cpu]) 265 if (pcpul_map[cpu].ptr)
231 free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE); 266 free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size);
232 ret = -ENOMEM; 267 free_bootmem(__pa(pcpul_map), map_size);
233out_free_ar: 268 return -ENOMEM;
234 free_bootmem(__pa(pcpur_ptrs), ptrs_size); 269}
235 return ret; 270
271/**
272 * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
273 * @kaddr: the kernel address in question
274 *
275 * Determine whether @kaddr falls in the pcpul recycled area. This is
276 * used by pageattr to detect VM aliases and break up the pcpu PMD
277 * mapping such that the same physical page is not mapped under
278 * different attributes.
279 *
280 * The recycled area is always at the tail of a partially used PMD
281 * page.
282 *
283 * RETURNS:
284 * Address of corresponding remapped pcpu address if match is found;
285 * otherwise, NULL.
286 */
287void *pcpu_lpage_remapped(void *kaddr)
288{
289 void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK);
290 unsigned long offset = (unsigned long)kaddr & ~PMD_MASK;
291 int left = 0, right = num_possible_cpus() - 1;
292 int pos;
293
294 /* pcpul in use at all? */
295 if (!pcpul_map)
296 return NULL;
297
298 /* okay, perform binary search */
299 while (left <= right) {
300 pos = (left + right) / 2;
301
302 if (pcpul_map[pos].ptr < pmd_addr)
303 left = pos + 1;
304 else if (pcpul_map[pos].ptr > pmd_addr)
305 right = pos - 1;
306 else {
307 /* it shouldn't be in the area for the first chunk */
308 WARN_ON(offset < pcpul_size);
309
310 return pcpul_vm.addr +
311 pcpul_map[pos].cpu * PMD_SIZE + offset;
312 }
313 }
314
315 return NULL;
236} 316}
237#else 317#else
238static ssize_t __init setup_pcpu_remap(size_t static_size) 318static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
239{ 319{
240 return -EINVAL; 320 return -EINVAL;
241} 321}
@@ -249,7 +329,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
249 * mapping so that it can use PMD mapping without additional TLB 329 * mapping so that it can use PMD mapping without additional TLB
250 * pressure. 330 * pressure.
251 */ 331 */
252static ssize_t __init setup_pcpu_embed(size_t static_size) 332static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
253{ 333{
254 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; 334 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
255 335
@@ -258,7 +338,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size)
258 * this. Also, embedding allocation doesn't play well with 338 * this. Also, embedding allocation doesn't play well with
259 * NUMA. 339 * NUMA.
260 */ 340 */
261 if (!cpu_has_pse || pcpu_need_numa()) 341 if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
262 return -EINVAL; 342 return -EINVAL;
263 343
264 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, 344 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
@@ -308,8 +388,11 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
308 void *ptr; 388 void *ptr;
309 389
310 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); 390 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
311 if (!ptr) 391 if (!ptr) {
392 pr_warning("PERCPU: failed to allocate "
393 "4k page for cpu%u\n", cpu);
312 goto enomem; 394 goto enomem;
395 }
313 396
314 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); 397 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
315 pcpu4k_pages[j++] = virt_to_page(ptr); 398 pcpu4k_pages[j++] = virt_to_page(ptr);
@@ -333,6 +416,16 @@ out_free_ar:
333 return ret; 416 return ret;
334} 417}
335 418
419/* for explicit first chunk allocator selection */
420static char pcpu_chosen_alloc[16] __initdata;
421
422static int __init percpu_alloc_setup(char *str)
423{
424 strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1);
425 return 0;
426}
427early_param("percpu_alloc", percpu_alloc_setup);
428
336static inline void setup_percpu_segment(int cpu) 429static inline void setup_percpu_segment(int cpu)
337{ 430{
338#ifdef CONFIG_X86_32 431#ifdef CONFIG_X86_32
@@ -346,11 +439,6 @@ static inline void setup_percpu_segment(int cpu)
346#endif 439#endif
347} 440}
348 441
349/*
350 * Great future plan:
351 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
352 * Always point %gs to its beginning
353 */
354void __init setup_per_cpu_areas(void) 442void __init setup_per_cpu_areas(void)
355{ 443{
356 size_t static_size = __per_cpu_end - __per_cpu_start; 444 size_t static_size = __per_cpu_end - __per_cpu_start;
@@ -367,9 +455,26 @@ void __init setup_per_cpu_areas(void)
367 * of large page mappings. Please read comments on top of 455 * of large page mappings. Please read comments on top of
368 * each allocator for details. 456 * each allocator for details.
369 */ 457 */
370 ret = setup_pcpu_remap(static_size); 458 ret = -EINVAL;
371 if (ret < 0) 459 if (strlen(pcpu_chosen_alloc)) {
372 ret = setup_pcpu_embed(static_size); 460 if (strcmp(pcpu_chosen_alloc, "4k")) {
461 if (!strcmp(pcpu_chosen_alloc, "lpage"))
462 ret = setup_pcpu_lpage(static_size, true);
463 else if (!strcmp(pcpu_chosen_alloc, "embed"))
464 ret = setup_pcpu_embed(static_size, true);
465 else
466 pr_warning("PERCPU: unknown allocator %s "
467 "specified\n", pcpu_chosen_alloc);
468 if (ret < 0)
469 pr_warning("PERCPU: %s allocator failed (%zd), "
470 "falling back to 4k\n",
471 pcpu_chosen_alloc, ret);
472 }
473 } else {
474 ret = setup_pcpu_lpage(static_size, false);
475 if (ret < 0)
476 ret = setup_pcpu_embed(static_size, false);
477 }
373 if (ret < 0) 478 if (ret < 0)
374 ret = setup_pcpu_4k(static_size); 479 ret = setup_pcpu_4k(static_size);
375 if (ret < 0) 480 if (ret < 0)
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 124d40c575df..8ccabb8a2f6a 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -711,7 +711,6 @@ uv_activation_descriptor_init(int node, int pnode)
711 unsigned long pa; 711 unsigned long pa;
712 unsigned long m; 712 unsigned long m;
713 unsigned long n; 713 unsigned long n;
714 unsigned long mmr_image;
715 struct bau_desc *adp; 714 struct bau_desc *adp;
716 struct bau_desc *ad2; 715 struct bau_desc *ad2;
717 716
@@ -727,12 +726,8 @@ uv_activation_descriptor_init(int node, int pnode)
727 n = pa >> uv_nshift; 726 n = pa >> uv_nshift;
728 m = pa & uv_mmask; 727 m = pa & uv_mmask;
729 728
730 mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE); 729 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
731 if (mmr_image) { 730 (n << UV_DESC_BASE_PNODE_SHIFT | m));
732 uv_write_global_mmr64(pnode, (unsigned long)
733 UVH_LB_BAU_SB_DESCRIPTOR_BASE,
734 (n << UV_DESC_BASE_PNODE_SHIFT | m));
735 }
736 731
737 /* 732 /*
738 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each 733 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a0f48f5671c0..5204332f475d 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -346,6 +346,9 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
346 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n"); 346 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
347 show_registers(regs); 347 show_registers(regs);
348 348
349 if (panic_on_io_nmi)
350 panic("NMI IOCK error: Not continuing");
351
349 /* Re-enable the IOCK line, wait for a few seconds */ 352 /* Re-enable the IOCK line, wait for a few seconds */
350 reason = (reason & 0xf) | 8; 353 reason = (reason & 0xf) | 8;
351 outb(reason, 0x61); 354 outb(reason, 0x61);
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index f4568605d7d5..ff485d361182 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -55,8 +55,10 @@ static void delay_tsc(unsigned long loops)
55 55
56 preempt_disable(); 56 preempt_disable();
57 cpu = smp_processor_id(); 57 cpu = smp_processor_id();
58 rdtsc_barrier();
58 rdtscl(bclock); 59 rdtscl(bclock);
59 for (;;) { 60 for (;;) {
61 rdtsc_barrier();
60 rdtscl(now); 62 rdtscl(now);
61 if ((now - bclock) >= loops) 63 if ((now - bclock) >= loops)
62 break; 64 break;
@@ -78,6 +80,7 @@ static void delay_tsc(unsigned long loops)
78 if (unlikely(cpu != smp_processor_id())) { 80 if (unlikely(cpu != smp_processor_id())) {
79 loops -= (now - bclock); 81 loops -= (now - bclock);
80 cpu = smp_processor_id(); 82 cpu = smp_processor_id();
83 rdtsc_barrier();
81 rdtscl(bclock); 84 rdtscl(bclock);
82 } 85 }
83 } 86 }
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index f53b57e4086f..47ce9a2ce5e7 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -177,20 +177,6 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
177 return nr_range; 177 return nr_range;
178} 178}
179 179
180#ifdef CONFIG_X86_64
181static void __init init_gbpages(void)
182{
183 if (direct_gbpages && cpu_has_gbpages)
184 printk(KERN_INFO "Using GB pages for direct mapping\n");
185 else
186 direct_gbpages = 0;
187}
188#else
189static inline void init_gbpages(void)
190{
191}
192#endif
193
194/* 180/*
195 * Setup the direct mapping of the physical memory at PAGE_OFFSET. 181 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
196 * This runs before bootmem is initialized and gets pages directly from 182 * This runs before bootmem is initialized and gets pages directly from
@@ -210,9 +196,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
210 196
211 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); 197 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
212 198
213 if (!after_bootmem)
214 init_gbpages();
215
216#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) 199#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
217 /* 200 /*
218 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. 201 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 3cfe9ced8a4c..1b734d7a8966 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -11,6 +11,7 @@
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/pfn.h>
14 15
15#include <asm/e820.h> 16#include <asm/e820.h>
16#include <asm/processor.h> 17#include <asm/processor.h>
@@ -681,8 +682,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
681static int cpa_process_alias(struct cpa_data *cpa) 682static int cpa_process_alias(struct cpa_data *cpa)
682{ 683{
683 struct cpa_data alias_cpa; 684 struct cpa_data alias_cpa;
684 int ret = 0; 685 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
685 unsigned long temp_cpa_vaddr, vaddr; 686 unsigned long vaddr, remapped;
687 int ret;
686 688
687 if (cpa->pfn >= max_pfn_mapped) 689 if (cpa->pfn >= max_pfn_mapped)
688 return 0; 690 return 0;
@@ -706,42 +708,55 @@ static int cpa_process_alias(struct cpa_data *cpa)
706 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { 708 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
707 709
708 alias_cpa = *cpa; 710 alias_cpa = *cpa;
709 temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); 711 alias_cpa.vaddr = &laddr;
710 alias_cpa.vaddr = &temp_cpa_vaddr;
711 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); 712 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
712 713
713
714 ret = __change_page_attr_set_clr(&alias_cpa, 0); 714 ret = __change_page_attr_set_clr(&alias_cpa, 0);
715 if (ret)
716 return ret;
715 } 717 }
716 718
717#ifdef CONFIG_X86_64 719#ifdef CONFIG_X86_64
718 if (ret)
719 return ret;
720 /* 720 /*
721 * No need to redo, when the primary call touched the high 721 * If the primary call didn't touch the high mapping already
722 * mapping already: 722 * and the physical address is inside the kernel map, we need
723 */
724 if (within(vaddr, (unsigned long) _text, _brk_end))
725 return 0;
726
727 /*
728 * If the physical address is inside the kernel map, we need
729 * to touch the high mapped kernel as well: 723 * to touch the high mapped kernel as well:
730 */ 724 */
731 if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) 725 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
732 return 0; 726 within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
727 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
728 __START_KERNEL_map - phys_base;
729 alias_cpa = *cpa;
730 alias_cpa.vaddr = &temp_cpa_vaddr;
731 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
733 732
734 alias_cpa = *cpa; 733 /*
735 temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; 734 * The high mapping range is imprecise, so ignore the
736 alias_cpa.vaddr = &temp_cpa_vaddr; 735 * return value.
737 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); 736 */
737 __change_page_attr_set_clr(&alias_cpa, 0);
738 }
739#endif
738 740
739 /* 741 /*
740 * The high mapping range is imprecise, so ignore the return value. 742 * If the PMD page was partially used for per-cpu remapping,
743 * the recycled area needs to be split and modified. Because
744 * the area is always proper subset of a PMD page
745 * cpa->numpages is guaranteed to be 1 for these areas, so
746 * there's no need to loop over and check for further remaps.
741 */ 747 */
742 __change_page_attr_set_clr(&alias_cpa, 0); 748 remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr);
743#endif 749 if (remapped) {
744 return ret; 750 WARN_ON(cpa->numpages > 1);
751 alias_cpa = *cpa;
752 alias_cpa.vaddr = &remapped;
753 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
754 ret = __change_page_attr_set_clr(&alias_cpa, 0);
755 if (ret)
756 return ret;
757 }
758
759 return 0;
745} 760}
746 761
747static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) 762static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index d277ef1eea51..b3d20b9cac63 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -244,7 +244,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
244 do_fpu_end(); 244 do_fpu_end();
245 mtrr_ap_init(); 245 mtrr_ap_init();
246 246
247#ifdef CONFIG_X86_32 247#ifdef CONFIG_X86_OLD_MCE
248 mcheck_init(&boot_cpu_data); 248 mcheck_init(&boot_cpu_data);
249#endif 249#endif
250} 250}
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index fac104e7186a..d6320a3e8def 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -303,6 +303,7 @@ extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in
303extern int panic_timeout; 303extern int panic_timeout;
304extern int panic_on_oops; 304extern int panic_on_oops;
305extern int panic_on_unrecovered_nmi; 305extern int panic_on_unrecovered_nmi;
306extern int panic_on_io_nmi;
306extern const char *print_tainted(void); 307extern const char *print_tainted(void);
307extern void add_taint(unsigned flag); 308extern void add_taint(unsigned flag);
308extern int test_taint(unsigned flag); 309extern int test_taint(unsigned flag);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c428ba161db1..98e02328c67d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -747,6 +747,14 @@ static struct ctl_table kern_table[] = {
747 .proc_handler = &proc_dointvec, 747 .proc_handler = &proc_dointvec,
748 }, 748 },
749 { 749 {
750 .ctl_name = CTL_UNNUMBERED,
751 .procname = "panic_on_io_nmi",
752 .data = &panic_on_io_nmi,
753 .maxlen = sizeof(int),
754 .mode = 0644,
755 .proc_handler = &proc_dointvec,
756 },
757 {
750 .ctl_name = KERN_BOOTLOADER_TYPE, 758 .ctl_name = KERN_BOOTLOADER_TYPE,
751 .procname = "bootloader_type", 759 .procname = "bootloader_type",
752 .data = &bootloader_type, 760 .data = &bootloader_type,
diff --git a/mm/percpu.c b/mm/percpu.c
index c0b2c1a76e81..b70f2acd8853 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -549,14 +549,14 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
549 * @chunk: chunk of interest 549 * @chunk: chunk of interest
550 * @page_start: page index of the first page to unmap 550 * @page_start: page index of the first page to unmap
551 * @page_end: page index of the last page to unmap + 1 551 * @page_end: page index of the last page to unmap + 1
552 * @flush: whether to flush cache and tlb or not 552 * @flush_tlb: whether to flush tlb or not
553 * 553 *
554 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 554 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
555 * If @flush is true, vcache is flushed before unmapping and tlb 555 * If @flush is true, vcache is flushed before unmapping and tlb
556 * after. 556 * after.
557 */ 557 */
558static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, 558static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
559 bool flush) 559 bool flush_tlb)
560{ 560{
561 unsigned int last = num_possible_cpus() - 1; 561 unsigned int last = num_possible_cpus() - 1;
562 unsigned int cpu; 562 unsigned int cpu;
@@ -569,9 +569,8 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
569 * the whole region at once rather than doing it for each cpu. 569 * the whole region at once rather than doing it for each cpu.
570 * This could be an overkill but is more scalable. 570 * This could be an overkill but is more scalable.
571 */ 571 */
572 if (flush) 572 flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
573 flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), 573 pcpu_chunk_addr(chunk, last, page_end));
574 pcpu_chunk_addr(chunk, last, page_end));
575 574
576 for_each_possible_cpu(cpu) 575 for_each_possible_cpu(cpu)
577 unmap_kernel_range_noflush( 576 unmap_kernel_range_noflush(
@@ -579,7 +578,7 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
579 (page_end - page_start) << PAGE_SHIFT); 578 (page_end - page_start) << PAGE_SHIFT);
580 579
581 /* ditto as flush_cache_vunmap() */ 580 /* ditto as flush_cache_vunmap() */
582 if (flush) 581 if (flush_tlb)
583 flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), 582 flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
584 pcpu_chunk_addr(chunk, last, page_end)); 583 pcpu_chunk_addr(chunk, last, page_end));
585} 584}
@@ -1234,6 +1233,7 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
1234ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, 1233ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1235 ssize_t dyn_size, ssize_t unit_size) 1234 ssize_t dyn_size, ssize_t unit_size)
1236{ 1235{
1236 size_t chunk_size;
1237 unsigned int cpu; 1237 unsigned int cpu;
1238 1238
1239 /* determine parameters and allocate */ 1239 /* determine parameters and allocate */
@@ -1248,11 +1248,15 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1248 } else 1248 } else
1249 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE); 1249 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
1250 1250
1251 pcpue_ptr = __alloc_bootmem_nopanic( 1251 chunk_size = pcpue_unit_size * num_possible_cpus();
1252 num_possible_cpus() * pcpue_unit_size, 1252
1253 PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 1253 pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
1254 if (!pcpue_ptr) 1254 __pa(MAX_DMA_ADDRESS));
1255 if (!pcpue_ptr) {
1256 pr_warning("PERCPU: failed to allocate %zu bytes for "
1257 "embedding\n", chunk_size);
1255 return -ENOMEM; 1258 return -ENOMEM;
1259 }
1256 1260
1257 /* return the leftover and copy */ 1261 /* return the leftover and copy */
1258 for_each_possible_cpu(cpu) { 1262 for_each_possible_cpu(cpu) {