aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/blackfin/include/asm/percpu.h10
-rw-r--r--arch/x86/include/asm/linkage.h16
-rw-r--r--arch/x86/kernel/cpu/amd.c52
-rw-r--r--arch/x86/kernel/cpu/intel.c25
-rw-r--r--arch/x86/kernel/setup_percpu.c73
-rw-r--r--arch/x86/kernel/smpboot.c78
-rw-r--r--arch/x86/kernel/tlb_uv.c2
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/init_32.c23
-rw-r--r--arch/x86/mm/init_64.c8
-rw-r--r--arch/x86/mm/ioremap.c21
-rw-r--r--arch/x86/mm/kmmio.c15
-rw-r--r--arch/x86/mm/memtest.c3
13 files changed, 182 insertions, 146 deletions
diff --git a/arch/blackfin/include/asm/percpu.h b/arch/blackfin/include/asm/percpu.h
index 797c0c165069..c94c7bc88c71 100644
--- a/arch/blackfin/include/asm/percpu.h
+++ b/arch/blackfin/include/asm/percpu.h
@@ -3,14 +3,4 @@
3 3
4#include <asm-generic/percpu.h> 4#include <asm-generic/percpu.h>
5 5
6#ifdef CONFIG_MODULES
7#define PERCPU_MODULE_RESERVE 8192
8#else
9#define PERCPU_MODULE_RESERVE 0
10#endif
11
12#define PERCPU_ENOUGH_ROOM \
13 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
14 PERCPU_MODULE_RESERVE)
15
16#endif /* __ARCH_BLACKFIN_PERCPU__ */ 6#endif /* __ARCH_BLACKFIN_PERCPU__ */
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 9320e2a8a26a..a0d70b46c27c 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -4,11 +4,6 @@
4#undef notrace 4#undef notrace
5#define notrace __attribute__((no_instrument_function)) 5#define notrace __attribute__((no_instrument_function))
6 6
7#ifdef CONFIG_X86_64
8#define __ALIGN .p2align 4,,15
9#define __ALIGN_STR ".p2align 4,,15"
10#endif
11
12#ifdef CONFIG_X86_32 7#ifdef CONFIG_X86_32
13#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) 8#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
14/* 9/*
@@ -50,16 +45,25 @@
50 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ 45 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
51 "g" (arg4), "g" (arg5), "g" (arg6)) 46 "g" (arg4), "g" (arg5), "g" (arg6))
52 47
53#endif 48#endif /* CONFIG_X86_32 */
49
50#ifdef __ASSEMBLY__
54 51
55#define GLOBAL(name) \ 52#define GLOBAL(name) \
56 .globl name; \ 53 .globl name; \
57 name: 54 name:
58 55
56#ifdef CONFIG_X86_64
57#define __ALIGN .p2align 4,,15
58#define __ALIGN_STR ".p2align 4,,15"
59#endif
60
59#ifdef CONFIG_X86_ALIGNMENT_16 61#ifdef CONFIG_X86_ALIGNMENT_16
60#define __ALIGN .align 16,0x90 62#define __ALIGN .align 16,0x90
61#define __ALIGN_STR ".align 16,0x90" 63#define __ALIGN_STR ".align 16,0x90"
62#endif 64#endif
63 65
66#endif /* __ASSEMBLY__ */
67
64#endif /* _ASM_X86_LINKAGE_H */ 68#endif /* _ASM_X86_LINKAGE_H */
65 69
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 25423a5b80ed..f47df59016c5 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -5,6 +5,7 @@
5#include <asm/io.h> 5#include <asm/io.h>
6#include <asm/processor.h> 6#include <asm/processor.h>
7#include <asm/apic.h> 7#include <asm/apic.h>
8#include <asm/cpu.h>
8 9
9#ifdef CONFIG_X86_64 10#ifdef CONFIG_X86_64
10# include <asm/numa_64.h> 11# include <asm/numa_64.h>
@@ -141,6 +142,55 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
141 } 142 }
142} 143}
143 144
145static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
146{
147#ifdef CONFIG_SMP
148 /* calling is from identify_secondary_cpu() ? */
149 if (c->cpu_index == boot_cpu_id)
150 return;
151
152 /*
153 * Certain Athlons might work (for various values of 'work') in SMP
154 * but they are not certified as MP capable.
155 */
156 /* Athlon 660/661 is valid. */
157 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
158 (c->x86_mask == 1)))
159 goto valid_k7;
160
161 /* Duron 670 is valid */
162 if ((c->x86_model == 7) && (c->x86_mask == 0))
163 goto valid_k7;
164
165 /*
166 * Athlon 662, Duron 671, and Athlon >model 7 have capability
167 * bit. It's worth noting that the A5 stepping (662) of some
168 * Athlon XP's have the MP bit set.
169 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
170 * more.
171 */
172 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
173 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
174 (c->x86_model > 7))
175 if (cpu_has_mp)
176 goto valid_k7;
177
178 /* If we get here, not a certified SMP capable AMD system. */
179
180 /*
181 * Don't taint if we are running SMP kernel on a single non-MP
182 * approved Athlon
183 */
184 WARN_ONCE(1, "WARNING: This combination of AMD"
185 "processors is not suitable for SMP.\n");
186 if (!test_taint(TAINT_UNSAFE_SMP))
187 add_taint(TAINT_UNSAFE_SMP);
188
189valid_k7:
190 ;
191#endif
192}
193
144static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 194static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
145{ 195{
146 u32 l, h; 196 u32 l, h;
@@ -175,6 +225,8 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
175 } 225 }
176 226
177 set_cpu_cap(c, X86_FEATURE_K7); 227 set_cpu_cap(c, X86_FEATURE_K7);
228
229 amd_k7_smp_check(c);
178} 230}
179#endif 231#endif
180 232
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1a89a2b68d15..c1c04bf0df77 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -14,6 +14,7 @@
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15#include <asm/ds.h> 15#include <asm/ds.h>
16#include <asm/bugs.h> 16#include <asm/bugs.h>
17#include <asm/cpu.h>
17 18
18#ifdef CONFIG_X86_64 19#ifdef CONFIG_X86_64
19#include <asm/topology.h> 20#include <asm/topology.h>
@@ -116,6 +117,28 @@ static void __cpuinit trap_init_f00f_bug(void)
116} 117}
117#endif 118#endif
118 119
120static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
121{
122#ifdef CONFIG_SMP
123 /* calling is from identify_secondary_cpu() ? */
124 if (c->cpu_index == boot_cpu_id)
125 return;
126
127 /*
128 * Mask B, Pentium, but not Pentium MMX
129 */
130 if (c->x86 == 5 &&
131 c->x86_mask >= 1 && c->x86_mask <= 4 &&
132 c->x86_model <= 3) {
133 /*
134 * Remember we have B step Pentia with bugs
135 */
136 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
137 "with B stepping processors.\n");
138 }
139#endif
140}
141
119static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 142static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
120{ 143{
121 unsigned long lo, hi; 144 unsigned long lo, hi;
@@ -192,6 +215,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
192#ifdef CONFIG_X86_NUMAQ 215#ifdef CONFIG_X86_NUMAQ
193 numaq_tsc_disable(); 216 numaq_tsc_disable();
194#endif 217#endif
218
219 intel_smp_check(c);
195} 220}
196#else 221#else
197static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 222static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index c29f301d3885..efa615f2bf43 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -42,6 +42,19 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
42}; 42};
43EXPORT_SYMBOL(__per_cpu_offset); 43EXPORT_SYMBOL(__per_cpu_offset);
44 44
45/*
46 * On x86_64 symbols referenced from code should be reachable using
47 * 32bit relocations. Reserve space for static percpu variables in
48 * modules so that they are always served from the first chunk which
49 * is located at the percpu segment base. On x86_32, anything can
50 * address anywhere. No need to reserve space in the first chunk.
51 */
52#ifdef CONFIG_X86_64
53#define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
54#else
55#define PERCPU_FIRST_CHUNK_RESERVE 0
56#endif
57
45/** 58/**
46 * pcpu_need_numa - determine percpu allocation needs to consider NUMA 59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
47 * 60 *
@@ -141,7 +154,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
141{ 154{
142 static struct vm_struct vm; 155 static struct vm_struct vm;
143 pg_data_t *last; 156 pg_data_t *last;
144 size_t ptrs_size; 157 size_t ptrs_size, dyn_size;
145 unsigned int cpu; 158 unsigned int cpu;
146 ssize_t ret; 159 ssize_t ret;
147 160
@@ -169,12 +182,14 @@ proceed:
169 * Currently supports only single page. Supporting multiple 182 * Currently supports only single page. Supporting multiple
170 * pages won't be too difficult if it ever becomes necessary. 183 * pages won't be too difficult if it ever becomes necessary.
171 */ 184 */
172 pcpur_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); 185 pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
186 PERCPU_DYNAMIC_RESERVE);
173 if (pcpur_size > PMD_SIZE) { 187 if (pcpur_size > PMD_SIZE) {
174 pr_warning("PERCPU: static data is larger than large page, " 188 pr_warning("PERCPU: static data is larger than large page, "
175 "can't use large page\n"); 189 "can't use large page\n");
176 return -EINVAL; 190 return -EINVAL;
177 } 191 }
192 dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
178 193
179 /* allocate pointer array and alloc large pages */ 194 /* allocate pointer array and alloc large pages */
180 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); 195 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
@@ -217,8 +232,9 @@ proceed:
217 pr_info("PERCPU: Remapped at %p with large pages, static data " 232 pr_info("PERCPU: Remapped at %p with large pages, static data "
218 "%zu bytes\n", vm.addr, static_size); 233 "%zu bytes\n", vm.addr, static_size);
219 234
220 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PMD_SIZE, 235 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
221 pcpur_size - static_size, vm.addr, NULL); 236 PERCPU_FIRST_CHUNK_RESERVE,
237 PMD_SIZE, dyn_size, vm.addr, NULL);
222 goto out_free_ar; 238 goto out_free_ar;
223 239
224enomem: 240enomem:
@@ -241,24 +257,31 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
241 * Embedding allocator 257 * Embedding allocator
242 * 258 *
243 * The first chunk is sized to just contain the static area plus 259 * The first chunk is sized to just contain the static area plus
244 * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using 260 * module and dynamic reserves, and allocated as a contiguous area
245 * bootmem allocator and used as-is without being mapped into vmalloc 261 * using bootmem allocator and used as-is without being mapped into
246 * area. This enables the first chunk to piggy back on the linear 262 * vmalloc area. This enables the first chunk to piggy back on the
247 * physical PMD mapping and doesn't add any additional pressure to 263 * linear physical PMD mapping and doesn't add any additional pressure
248 * TLB. 264 * to TLB. Note that if the needed size is smaller than the minimum
265 * unit size, the leftover is returned to the bootmem allocator.
249 */ 266 */
250static void *pcpue_ptr __initdata; 267static void *pcpue_ptr __initdata;
268static size_t pcpue_size __initdata;
251static size_t pcpue_unit_size __initdata; 269static size_t pcpue_unit_size __initdata;
252 270
253static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) 271static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
254{ 272{
255 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size 273 size_t off = (size_t)pageno << PAGE_SHIFT;
256 + ((size_t)pageno << PAGE_SHIFT)); 274
275 if (off >= pcpue_size)
276 return NULL;
277
278 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
257} 279}
258 280
259static ssize_t __init setup_pcpu_embed(size_t static_size) 281static ssize_t __init setup_pcpu_embed(size_t static_size)
260{ 282{
261 unsigned int cpu; 283 unsigned int cpu;
284 size_t dyn_size;
262 285
263 /* 286 /*
264 * If large page isn't supported, there's no benefit in doing 287 * If large page isn't supported, there's no benefit in doing
@@ -269,25 +292,32 @@ static ssize_t __init setup_pcpu_embed(size_t static_size)
269 return -EINVAL; 292 return -EINVAL;
270 293
271 /* allocate and copy */ 294 /* allocate and copy */
272 pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); 295 pcpue_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
273 pcpue_unit_size = max_t(size_t, pcpue_unit_size, PCPU_MIN_UNIT_SIZE); 296 PERCPU_DYNAMIC_RESERVE);
297 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
298 dyn_size = pcpue_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
299
274 pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, 300 pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
275 PAGE_SIZE); 301 PAGE_SIZE);
276 if (!pcpue_ptr) 302 if (!pcpue_ptr)
277 return -ENOMEM; 303 return -ENOMEM;
278 304
279 for_each_possible_cpu(cpu) 305 for_each_possible_cpu(cpu) {
280 memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load, 306 void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
281 static_size); 307
308 free_bootmem(__pa(ptr + pcpue_size),
309 pcpue_unit_size - pcpue_size);
310 memcpy(ptr, __per_cpu_load, static_size);
311 }
282 312
283 /* we're ready, commit */ 313 /* we're ready, commit */
284 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", 314 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
285 pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size); 315 pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
286 316
287 return pcpu_setup_first_chunk(pcpue_get_page, static_size, 317 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
288 pcpue_unit_size, 318 PERCPU_FIRST_CHUNK_RESERVE,
289 pcpue_unit_size - static_size, pcpue_ptr, 319 pcpue_unit_size, dyn_size,
290 NULL); 320 pcpue_ptr, NULL);
291} 321}
292 322
293/* 323/*
@@ -344,7 +374,8 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
344 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", 374 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
345 pcpu4k_nr_static_pages, static_size); 375 pcpu4k_nr_static_pages, static_size);
346 376
347 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL, 377 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
378 PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL,
348 pcpu4k_populate_pte); 379 pcpu4k_populate_pte);
349 goto out_free_ar; 380 goto out_free_ar;
350 381
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 249334f5080a..ef7d10170c30 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -114,10 +114,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
114 114
115atomic_t init_deasserted; 115atomic_t init_deasserted;
116 116
117
118/* Set if we find a B stepping CPU */
119static int __cpuinitdata smp_b_stepping;
120
121#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) 117#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
122 118
123/* which logical CPUs are on which nodes */ 119/* which logical CPUs are on which nodes */
@@ -271,8 +267,6 @@ static void __cpuinit smp_callin(void)
271 cpumask_set_cpu(cpuid, cpu_callin_mask); 267 cpumask_set_cpu(cpuid, cpu_callin_mask);
272} 268}
273 269
274static int __cpuinitdata unsafe_smp;
275
276/* 270/*
277 * Activate a secondary processor. 271 * Activate a secondary processor.
278 */ 272 */
@@ -340,76 +334,6 @@ notrace static void __cpuinit start_secondary(void *unused)
340 cpu_idle(); 334 cpu_idle();
341} 335}
342 336
343static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
344{
345 /*
346 * Mask B, Pentium, but not Pentium MMX
347 */
348 if (c->x86_vendor == X86_VENDOR_INTEL &&
349 c->x86 == 5 &&
350 c->x86_mask >= 1 && c->x86_mask <= 4 &&
351 c->x86_model <= 3)
352 /*
353 * Remember we have B step Pentia with bugs
354 */
355 smp_b_stepping = 1;
356
357 /*
358 * Certain Athlons might work (for various values of 'work') in SMP
359 * but they are not certified as MP capable.
360 */
361 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
362
363 if (num_possible_cpus() == 1)
364 goto valid_k7;
365
366 /* Athlon 660/661 is valid. */
367 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
368 (c->x86_mask == 1)))
369 goto valid_k7;
370
371 /* Duron 670 is valid */
372 if ((c->x86_model == 7) && (c->x86_mask == 0))
373 goto valid_k7;
374
375 /*
376 * Athlon 662, Duron 671, and Athlon >model 7 have capability
377 * bit. It's worth noting that the A5 stepping (662) of some
378 * Athlon XP's have the MP bit set.
379 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
380 * more.
381 */
382 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
383 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
384 (c->x86_model > 7))
385 if (cpu_has_mp)
386 goto valid_k7;
387
388 /* If we get here, not a certified SMP capable AMD system. */
389 unsafe_smp = 1;
390 }
391
392valid_k7:
393 ;
394}
395
396static void __cpuinit smp_checks(void)
397{
398 if (smp_b_stepping)
399 printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
400 "with B stepping processors.\n");
401
402 /*
403 * Don't taint if we are running SMP kernel on a single non-MP
404 * approved Athlon
405 */
406 if (unsafe_smp && num_online_cpus() > 1) {
407 printk(KERN_INFO "WARNING: This combination of AMD"
408 "processors is not suitable for SMP.\n");
409 add_taint(TAINT_UNSAFE_SMP);
410 }
411}
412
413/* 337/*
414 * The bootstrap kernel entry code has set these up. Save them for 338 * The bootstrap kernel entry code has set these up. Save them for
415 * a given CPU 339 * a given CPU
@@ -423,7 +347,6 @@ void __cpuinit smp_store_cpu_info(int id)
423 c->cpu_index = id; 347 c->cpu_index = id;
424 if (id != 0) 348 if (id != 0)
425 identify_secondary_cpu(c); 349 identify_secondary_cpu(c);
426 smp_apply_quirks(c);
427} 350}
428 351
429 352
@@ -1193,7 +1116,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1193 pr_debug("Boot done.\n"); 1116 pr_debug("Boot done.\n");
1194 1117
1195 impress_friends(); 1118 impress_friends();
1196 smp_checks();
1197#ifdef CONFIG_X86_IO_APIC 1119#ifdef CONFIG_X86_IO_APIC
1198 setup_ioapic_dest(); 1120 setup_ioapic_dest();
1199#endif 1121#endif
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index f04549afcfe9..d038b9c45cf8 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -314,8 +314,6 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
314 int locals = 0; 314 int locals = 0;
315 struct bau_desc *bau_desc; 315 struct bau_desc *bau_desc;
316 316
317 WARN_ON(!in_atomic());
318
319 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 317 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
320 318
321 uv_cpu = uv_blade_processor_id(); 319 uv_cpu = uv_blade_processor_id();
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 6d63e3d1253d..15219e0d1243 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -134,8 +134,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
134{ 134{
135 unsigned long page_size_mask = 0; 135 unsigned long page_size_mask = 0;
136 unsigned long start_pfn, end_pfn; 136 unsigned long start_pfn, end_pfn;
137 unsigned long ret = 0;
137 unsigned long pos; 138 unsigned long pos;
138 unsigned long ret;
139 139
140 struct map_range mr[NR_RANGE_MR]; 140 struct map_range mr[NR_RANGE_MR];
141 int nr_range, i; 141 int nr_range, i;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index d7f5060ab21c..749559ed80f5 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -806,11 +806,6 @@ static unsigned long __init setup_node_bootmem(int nodeid,
806{ 806{
807 unsigned long bootmap_size; 807 unsigned long bootmap_size;
808 808
809 if (start_pfn > max_low_pfn)
810 return bootmap;
811 if (end_pfn > max_low_pfn)
812 end_pfn = max_low_pfn;
813
814 /* don't touch min_low_pfn */ 809 /* don't touch min_low_pfn */
815 bootmap_size = init_bootmem_node(NODE_DATA(nodeid), 810 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
816 bootmap >> PAGE_SHIFT, 811 bootmap >> PAGE_SHIFT,
@@ -843,13 +838,23 @@ void __init setup_bootmem_allocator(void)
843 max_pfn_mapped<<PAGE_SHIFT); 838 max_pfn_mapped<<PAGE_SHIFT);
844 printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); 839 printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
845 840
841 for_each_online_node(nodeid) {
842 unsigned long start_pfn, end_pfn;
843
846#ifdef CONFIG_NEED_MULTIPLE_NODES 844#ifdef CONFIG_NEED_MULTIPLE_NODES
847 for_each_online_node(nodeid) 845 start_pfn = node_start_pfn[nodeid];
848 bootmap = setup_node_bootmem(nodeid, node_start_pfn[nodeid], 846 end_pfn = node_end_pfn[nodeid];
849 node_end_pfn[nodeid], bootmap); 847 if (start_pfn > max_low_pfn)
848 continue;
849 if (end_pfn > max_low_pfn)
850 end_pfn = max_low_pfn;
850#else 851#else
851 bootmap = setup_node_bootmem(0, 0, max_low_pfn, bootmap); 852 start_pfn = 0;
853 end_pfn = max_low_pfn;
852#endif 854#endif
855 bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn,
856 bootmap);
857 }
853 858
854 after_bootmem = 1; 859 after_bootmem = 1;
855} 860}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 66d6be85df82..1753e8020df6 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -85,7 +85,7 @@ early_param("gbpages", parse_direct_gbpages_on);
85pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; 85pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
86EXPORT_SYMBOL_GPL(__supported_pte_mask); 86EXPORT_SYMBOL_GPL(__supported_pte_mask);
87 87
88static int do_not_nx __cpuinitdata; 88static int disable_nx __cpuinitdata;
89 89
90/* 90/*
91 * noexec=on|off 91 * noexec=on|off
@@ -100,9 +100,9 @@ static int __init nonx_setup(char *str)
100 return -EINVAL; 100 return -EINVAL;
101 if (!strncmp(str, "on", 2)) { 101 if (!strncmp(str, "on", 2)) {
102 __supported_pte_mask |= _PAGE_NX; 102 __supported_pte_mask |= _PAGE_NX;
103 do_not_nx = 0; 103 disable_nx = 0;
104 } else if (!strncmp(str, "off", 3)) { 104 } else if (!strncmp(str, "off", 3)) {
105 do_not_nx = 1; 105 disable_nx = 1;
106 __supported_pte_mask &= ~_PAGE_NX; 106 __supported_pte_mask &= ~_PAGE_NX;
107 } 107 }
108 return 0; 108 return 0;
@@ -114,7 +114,7 @@ void __cpuinit check_efer(void)
114 unsigned long efer; 114 unsigned long efer;
115 115
116 rdmsrl(MSR_EFER, efer); 116 rdmsrl(MSR_EFER, efer);
117 if (!(efer & EFER_NX) || do_not_nx) 117 if (!(efer & EFER_NX) || disable_nx)
118 __supported_pte_mask &= ~_PAGE_NX; 118 __supported_pte_mask &= ~_PAGE_NX;
119} 119}
120 120
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 62773abdf088..aca924a30ee6 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -87,6 +87,8 @@ bool __virt_addr_valid(unsigned long x)
87 return false; 87 return false;
88 if (__vmalloc_start_set && is_vmalloc_addr((void *) x)) 88 if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
89 return false; 89 return false;
90 if (x >= FIXADDR_START)
91 return false;
90 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT); 92 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
91} 93}
92EXPORT_SYMBOL(__virt_addr_valid); 94EXPORT_SYMBOL(__virt_addr_valid);
@@ -504,13 +506,19 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr)
504 return &bm_pte[pte_index(addr)]; 506 return &bm_pte[pte_index(addr)];
505} 507}
506 508
509static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
510
507void __init early_ioremap_init(void) 511void __init early_ioremap_init(void)
508{ 512{
509 pmd_t *pmd; 513 pmd_t *pmd;
514 int i;
510 515
511 if (early_ioremap_debug) 516 if (early_ioremap_debug)
512 printk(KERN_INFO "early_ioremap_init()\n"); 517 printk(KERN_INFO "early_ioremap_init()\n");
513 518
519 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
520 slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
521
514 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); 522 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
515 memset(bm_pte, 0, sizeof(bm_pte)); 523 memset(bm_pte, 0, sizeof(bm_pte));
516 pmd_populate_kernel(&init_mm, pmd, bm_pte); 524 pmd_populate_kernel(&init_mm, pmd, bm_pte);
@@ -577,6 +585,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
577 585
578static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; 586static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
579static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; 587static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
588
580static int __init check_early_ioremap_leak(void) 589static int __init check_early_ioremap_leak(void)
581{ 590{
582 int count = 0; 591 int count = 0;
@@ -598,7 +607,8 @@ static int __init check_early_ioremap_leak(void)
598} 607}
599late_initcall(check_early_ioremap_leak); 608late_initcall(check_early_ioremap_leak);
600 609
601static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) 610static void __init __iomem *
611__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
602{ 612{
603 unsigned long offset, last_addr; 613 unsigned long offset, last_addr;
604 unsigned int nrpages; 614 unsigned int nrpages;
@@ -664,9 +674,9 @@ static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned lo
664 --nrpages; 674 --nrpages;
665 } 675 }
666 if (early_ioremap_debug) 676 if (early_ioremap_debug)
667 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); 677 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
668 678
669 prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0)); 679 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
670 return prev_map[slot]; 680 return prev_map[slot];
671} 681}
672 682
@@ -734,8 +744,3 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
734 } 744 }
735 prev_map[slot] = NULL; 745 prev_map[slot] = NULL;
736} 746}
737
738void __this_fixmap_does_not_exist(void)
739{
740 WARN_ON(1);
741}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 9f205030d9aa..6a518dd08a36 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -451,23 +451,24 @@ static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
451 451
452static void remove_kmmio_fault_pages(struct rcu_head *head) 452static void remove_kmmio_fault_pages(struct rcu_head *head)
453{ 453{
454 struct kmmio_delayed_release *dr = container_of( 454 struct kmmio_delayed_release *dr =
455 head, 455 container_of(head, struct kmmio_delayed_release, rcu);
456 struct kmmio_delayed_release,
457 rcu);
458 struct kmmio_fault_page *p = dr->release_list; 456 struct kmmio_fault_page *p = dr->release_list;
459 struct kmmio_fault_page **prevp = &dr->release_list; 457 struct kmmio_fault_page **prevp = &dr->release_list;
460 unsigned long flags; 458 unsigned long flags;
459
461 spin_lock_irqsave(&kmmio_lock, flags); 460 spin_lock_irqsave(&kmmio_lock, flags);
462 while (p) { 461 while (p) {
463 if (!p->count) 462 if (!p->count) {
464 list_del_rcu(&p->list); 463 list_del_rcu(&p->list);
465 else 464 prevp = &p->release_next;
465 } else {
466 *prevp = p->release_next; 466 *prevp = p->release_next;
467 prevp = &p->release_next; 467 }
468 p = p->release_next; 468 p = p->release_next;
469 } 469 }
470 spin_unlock_irqrestore(&kmmio_lock, flags); 470 spin_unlock_irqrestore(&kmmio_lock, flags);
471
471 /* This is the real RCU destroy call. */ 472 /* This is the real RCU destroy call. */
472 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); 473 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
473} 474}
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 0bcd7883d036..605c8be06217 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -100,6 +100,9 @@ static int __init parse_memtest(char *arg)
100{ 100{
101 if (arg) 101 if (arg)
102 memtest_pattern = simple_strtoul(arg, NULL, 0); 102 memtest_pattern = simple_strtoul(arg, NULL, 0);
103 else
104 memtest_pattern = ARRAY_SIZE(patterns);
105
103 return 0; 106 return 0;
104} 107}
105 108