aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-14 15:36:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-14 15:36:46 -0500
commit75b08038ceb62f3bd8935346679920f97c3cf9f6 (patch)
tree66cbc62bb569996c90877bbf010285828f669c9a /arch/x86
parentfb1beb29b5c531b12485d7c32174a77120590481 (diff)
parent70fe440718d9f42bf963c2cffe12008eb5556165 (diff)
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, mce: Clean up thermal init by introducing intel_thermal_supported() x86, mce: Thermal monitoring depends on APIC being enabled x86: Gart: fix breakage due to IOMMU initialization cleanup x86: Move swiotlb initialization before dma32_free_bootmem x86: Fix build warning in arch/x86/mm/mmio-mod.c x86: Remove usedac in feature-removal-schedule.txt x86: Fix duplicated UV BAU interrupt vector nvram: Fix write beyond end condition; prove to gcc copy is safe mm: Adjust do_pages_stat() so gcc can see copy_from_user() is safe x86: Limit the number of processor bootup messages x86: Remove enabling x2apic message for every CPU doc: Add documentation for bootloader_{type,version} x86, msr: Add support for non-contiguous cpumasks x86: Use find_e820() instead of hard coded trampoline address x86, AMD: Fix stale cpuid4_info shared_map data in shared_cpu_map cpumasks Trivial percpu-naming-introduced conflicts in arch/x86/kernel/cpu/intel_cacheinfo.c
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/irq_vectors.h2
-rw-r--r--arch/x86/include/asm/msr.h3
-rw-r--r--arch/x86/include/asm/trampoline.h1
-rw-r--r--arch/x86/kernel/aperture_64.c11
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c15
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/cpu/intel.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c13
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c20
-rw-r--r--arch/x86/kernel/e820.c11
-rw-r--r--arch/x86/kernel/head32.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/mpparse.c3
-rw-r--r--arch/x86/kernel/pci-dma.c5
-rw-r--r--arch/x86/kernel/pci-gart_64.c3
-rw-r--r--arch/x86/kernel/setup.c13
-rw-r--r--arch/x86/kernel/smpboot.c45
-rw-r--r--arch/x86/kernel/trampoline.c20
-rw-r--r--arch/x86/lib/msr.c26
-rw-r--r--arch/x86/mm/mmio-mod.c2
22 files changed, 133 insertions, 78 deletions
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 6a635bd39867..4611f085cd43 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -113,7 +113,7 @@
113 */ 113 */
114#define LOCAL_PENDING_VECTOR 0xec 114#define LOCAL_PENDING_VECTOR 0xec
115 115
116#define UV_BAU_MESSAGE 0xec 116#define UV_BAU_MESSAGE 0xea
117 117
118/* 118/*
119 * Self IPI vector for machine checks 119 * Self IPI vector for machine checks
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 5bef931f8b14..2d228fc9b4b7 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -244,6 +244,9 @@ do { \
244 244
245#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) 245#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
246 246
247struct msr *msrs_alloc(void);
248void msrs_free(struct msr *msrs);
249
247#ifdef CONFIG_SMP 250#ifdef CONFIG_SMP
248int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 251int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
249int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 252int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index 90f06c25221d..cb507bb05d79 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -16,7 +16,6 @@ extern unsigned long initial_code;
16extern unsigned long initial_gs; 16extern unsigned long initial_gs;
17 17
18#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) 18#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
19#define TRAMPOLINE_BASE 0x6000
20 19
21extern unsigned long setup_trampoline(void); 20extern unsigned long setup_trampoline(void);
22extern void __init reserve_trampoline_memory(void); 21extern void __init reserve_trampoline_memory(void);
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index e0dfb6856aa2..3704997e8b25 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -280,7 +280,8 @@ void __init early_gart_iommu_check(void)
280 * or BIOS forget to put that in reserved. 280 * or BIOS forget to put that in reserved.
281 * try to update e820 to make that region as reserved. 281 * try to update e820 to make that region as reserved.
282 */ 282 */
283 int i, fix, slot; 283 u32 agp_aper_base = 0, agp_aper_order = 0;
284 int i, fix, slot, valid_agp = 0;
284 u32 ctl; 285 u32 ctl;
285 u32 aper_size = 0, aper_order = 0, last_aper_order = 0; 286 u32 aper_size = 0, aper_order = 0, last_aper_order = 0;
286 u64 aper_base = 0, last_aper_base = 0; 287 u64 aper_base = 0, last_aper_base = 0;
@@ -290,6 +291,8 @@ void __init early_gart_iommu_check(void)
290 return; 291 return;
291 292
292 /* This is mostly duplicate of iommu_hole_init */ 293 /* This is mostly duplicate of iommu_hole_init */
294 agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp);
295
293 fix = 0; 296 fix = 0;
294 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 297 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
295 int bus; 298 int bus;
@@ -342,10 +345,10 @@ void __init early_gart_iommu_check(void)
342 } 345 }
343 } 346 }
344 347
345 if (!fix) 348 if (valid_agp)
346 return; 349 return;
347 350
348 /* different nodes have different setting, disable them all at first*/ 351 /* disable them all at first */
349 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 352 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
350 int bus; 353 int bus;
351 int dev_base, dev_limit; 354 int dev_base, dev_limit;
@@ -458,8 +461,6 @@ out:
458 461
459 if (aper_alloc) { 462 if (aper_alloc) {
460 /* Got the aperture from the AGP bridge */ 463 /* Got the aperture from the AGP bridge */
461 } else if (!valid_agp) {
462 /* Do nothing */
463 } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) || 464 } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) ||
464 force_iommu || 465 force_iommu ||
465 valid_agp || 466 valid_agp ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index efb2b9cd132c..aa57c079c98f 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1341,7 +1341,7 @@ void enable_x2apic(void)
1341 1341
1342 rdmsr(MSR_IA32_APICBASE, msr, msr2); 1342 rdmsr(MSR_IA32_APICBASE, msr, msr2);
1343 if (!(msr & X2APIC_ENABLE)) { 1343 if (!(msr & X2APIC_ENABLE)) {
1344 pr_info("Enabling x2apic\n"); 1344 printk_once(KERN_INFO "Enabling x2apic\n");
1345 wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); 1345 wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
1346 } 1346 }
1347} 1347}
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index c965e5212714..468489b57aae 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -74,6 +74,7 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
74 unsigned int eax, ebx, ecx, edx, sub_index; 74 unsigned int eax, ebx, ecx, edx, sub_index;
75 unsigned int ht_mask_width, core_plus_mask_width; 75 unsigned int ht_mask_width, core_plus_mask_width;
76 unsigned int core_select_mask, core_level_siblings; 76 unsigned int core_select_mask, core_level_siblings;
77 static bool printed;
77 78
78 if (c->cpuid_level < 0xb) 79 if (c->cpuid_level < 0xb)
79 return; 80 return;
@@ -127,12 +128,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
127 128
128 c->x86_max_cores = (core_level_siblings / smp_num_siblings); 129 c->x86_max_cores = (core_level_siblings / smp_num_siblings);
129 130
130 131 if (!printed) {
131 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 132 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
132 c->phys_proc_id); 133 c->phys_proc_id);
133 if (c->x86_max_cores > 1) 134 if (c->x86_max_cores > 1)
134 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 135 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
135 c->cpu_core_id); 136 c->cpu_core_id);
137 printed = 1;
138 }
136 return; 139 return;
137#endif 140#endif
138} 141}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 7128b3799cec..8dc3ea145c97 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -375,8 +375,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
375 node = nearby_node(apicid); 375 node = nearby_node(apicid);
376 } 376 }
377 numa_set_node(cpu, node); 377 numa_set_node(cpu, node);
378
379 printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
380#endif 378#endif
381} 379}
382 380
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 20399b7b0c3f..4868e4a951ee 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -427,6 +427,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
427#ifdef CONFIG_X86_HT 427#ifdef CONFIG_X86_HT
428 u32 eax, ebx, ecx, edx; 428 u32 eax, ebx, ecx, edx;
429 int index_msb, core_bits; 429 int index_msb, core_bits;
430 static bool printed;
430 431
431 if (!cpu_has(c, X86_FEATURE_HT)) 432 if (!cpu_has(c, X86_FEATURE_HT))
432 return; 433 return;
@@ -442,7 +443,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
442 smp_num_siblings = (ebx & 0xff0000) >> 16; 443 smp_num_siblings = (ebx & 0xff0000) >> 16;
443 444
444 if (smp_num_siblings == 1) { 445 if (smp_num_siblings == 1) {
445 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 446 printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
446 goto out; 447 goto out;
447 } 448 }
448 449
@@ -469,11 +470,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
469 ((1 << core_bits) - 1); 470 ((1 << core_bits) - 1);
470 471
471out: 472out:
472 if ((c->x86_max_cores * smp_num_siblings) > 1) { 473 if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
473 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 474 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
474 c->phys_proc_id); 475 c->phys_proc_id);
475 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 476 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
476 c->cpu_core_id); 477 c->cpu_core_id);
478 printed = 1;
477 } 479 }
478#endif 480#endif
479} 481}
@@ -1115,7 +1117,7 @@ void __cpuinit cpu_init(void)
1115 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) 1117 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
1116 panic("CPU#%d already initialized!\n", cpu); 1118 panic("CPU#%d already initialized!\n", cpu);
1117 1119
1118 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1120 pr_debug("Initializing CPU#%d\n", cpu);
1119 1121
1120 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1122 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1121 1123
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index c900b73f9224..9c31e8b09d2c 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -270,8 +270,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
270 node = cpu_to_node(cpu); 270 node = cpu_to_node(cpu);
271 } 271 }
272 numa_set_node(cpu, node); 272 numa_set_node(cpu, node);
273
274 printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
275#endif 273#endif
276} 274}
277 275
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 0c06bca2a1dc..fc6c8ef92dcc 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -507,18 +507,19 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
507{ 507{
508 struct _cpuid4_info *this_leaf, *sibling_leaf; 508 struct _cpuid4_info *this_leaf, *sibling_leaf;
509 unsigned long num_threads_sharing; 509 unsigned long num_threads_sharing;
510 int index_msb, i; 510 int index_msb, i, sibling;
511 struct cpuinfo_x86 *c = &cpu_data(cpu); 511 struct cpuinfo_x86 *c = &cpu_data(cpu);
512 512
513 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { 513 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
514 struct cpuinfo_x86 *d; 514 for_each_cpu(i, c->llc_shared_map) {
515 for_each_online_cpu(i) {
516 if (!per_cpu(ici_cpuid4_info, i)) 515 if (!per_cpu(ici_cpuid4_info, i))
517 continue; 516 continue;
518 d = &cpu_data(i);
519 this_leaf = CPUID4_INFO_IDX(i, index); 517 this_leaf = CPUID4_INFO_IDX(i, index);
520 cpumask_copy(to_cpumask(this_leaf->shared_cpu_map), 518 for_each_cpu(sibling, c->llc_shared_map) {
521 d->llc_shared_map); 519 if (!cpu_online(sibling))
520 continue;
521 set_bit(sibling, this_leaf->shared_cpu_map);
522 }
522 } 523 }
523 return; 524 return;
524 } 525 }
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 4fef985fc221..81c499eceb21 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -256,6 +256,16 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
256 ack_APIC_irq(); 256 ack_APIC_irq();
257} 257}
258 258
259/* Thermal monitoring depends on APIC, ACPI and clock modulation */
260static int intel_thermal_supported(struct cpuinfo_x86 *c)
261{
262 if (!cpu_has_apic)
263 return 0;
264 if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
265 return 0;
266 return 1;
267}
268
259void __init mcheck_intel_therm_init(void) 269void __init mcheck_intel_therm_init(void)
260{ 270{
261 /* 271 /*
@@ -263,8 +273,7 @@ void __init mcheck_intel_therm_init(void)
263 * LVT value on BSP and use that value to restore APs' thermal LVT 273 * LVT value on BSP and use that value to restore APs' thermal LVT
264 * entry BIOS programmed later 274 * entry BIOS programmed later
265 */ 275 */
266 if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) && 276 if (intel_thermal_supported(&boot_cpu_data))
267 cpu_has(&boot_cpu_data, X86_FEATURE_ACC))
268 lvtthmr_init = apic_read(APIC_LVTTHMR); 277 lvtthmr_init = apic_read(APIC_LVTTHMR);
269} 278}
270 279
@@ -274,8 +283,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
274 int tm2 = 0; 283 int tm2 = 0;
275 u32 l, h; 284 u32 l, h;
276 285
277 /* Thermal monitoring depends on ACPI and clock modulation*/ 286 if (!intel_thermal_supported(c))
278 if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
279 return; 287 return;
280 288
281 /* 289 /*
@@ -339,8 +347,8 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
339 l = apic_read(APIC_LVTTHMR); 347 l = apic_read(APIC_LVTTHMR);
340 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 348 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
341 349
342 printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n", 350 printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
343 cpu, tm2 ? "TM2" : "TM1"); 351 tm2 ? "TM2" : "TM1");
344 352
345 /* enable thermal throttle processing */ 353 /* enable thermal throttle processing */
346 atomic_set(&therm_throt_en, 1); 354 atomic_set(&therm_throt_en, 1);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d17d482a04f4..f50447d961c0 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -732,7 +732,16 @@ struct early_res {
732 char overlap_ok; 732 char overlap_ok;
733}; 733};
734static struct early_res early_res[MAX_EARLY_RES] __initdata = { 734static struct early_res early_res[MAX_EARLY_RES] __initdata = {
735 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */ 735 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
736#ifdef CONFIG_X86_32
737 /*
738 * But first pinch a few for the stack/trampoline stuff
739 * FIXME: Don't need the extra page at 4K, but need to fix
740 * trampoline before removing it. (see the GDT stuff)
741 */
742 { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 },
743#endif
744
736 {} 745 {}
737}; 746};
738 747
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 4f8e2507e8f3..5051b94c9069 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -29,8 +29,6 @@ static void __init i386_default_early_setup(void)
29 29
30void __init i386_start_kernel(void) 30void __init i386_start_kernel(void)
31{ 31{
32 reserve_trampoline_memory();
33
34 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); 32 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
35 33
36#ifdef CONFIG_BLK_DEV_INITRD 34#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 0b06cd778fd9..b5a9896ca1e7 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -98,8 +98,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
98{ 98{
99 copy_bootdata(__va(real_mode_data)); 99 copy_bootdata(__va(real_mode_data));
100 100
101 reserve_trampoline_memory();
102
103 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); 101 reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
104 102
105#ifdef CONFIG_BLK_DEV_INITRD 103#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 35a57c963df9..40b54ceb68b5 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -945,9 +945,6 @@ void __init early_reserve_e820_mpc_new(void)
945{ 945{
946 if (enable_update_mptable && alloc_mptable) { 946 if (enable_update_mptable && alloc_mptable) {
947 u64 startt = 0; 947 u64 startt = 0;
948#ifdef CONFIG_X86_TRAMPOLINE
949 startt = TRAMPOLINE_BASE;
950#endif
951 mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4); 948 mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4);
952 } 949 }
953} 950}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index afcc58b69c7c..fcc2f2bfa39c 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -120,11 +120,14 @@ static void __init dma32_free_bootmem(void)
120 120
121void __init pci_iommu_alloc(void) 121void __init pci_iommu_alloc(void)
122{ 122{
123 int use_swiotlb;
124
125 use_swiotlb = pci_swiotlb_init();
123#ifdef CONFIG_X86_64 126#ifdef CONFIG_X86_64
124 /* free the range so iommu could get some range less than 4G */ 127 /* free the range so iommu could get some range less than 4G */
125 dma32_free_bootmem(); 128 dma32_free_bootmem();
126#endif 129#endif
127 if (pci_swiotlb_init()) 130 if (use_swiotlb)
128 return; 131 return;
129 132
130 gart_iommu_hole_init(); 133 gart_iommu_hole_init();
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index e6a0d402f171..56c0e730d3fe 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -710,7 +710,8 @@ static void gart_iommu_shutdown(void)
710 struct pci_dev *dev; 710 struct pci_dev *dev;
711 int i; 711 int i;
712 712
713 if (no_agp) 713 /* don't shutdown it if there is AGP installed */
714 if (!no_agp)
714 return; 715 return;
715 716
716 for (i = 0; i < num_k8_northbridges; i++) { 717 for (i = 0; i < num_k8_northbridges; i++) {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 946a311a25c9..f7b8b9894b22 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -73,6 +73,7 @@
73 73
74#include <asm/mtrr.h> 74#include <asm/mtrr.h>
75#include <asm/apic.h> 75#include <asm/apic.h>
76#include <asm/trampoline.h>
76#include <asm/e820.h> 77#include <asm/e820.h>
77#include <asm/mpspec.h> 78#include <asm/mpspec.h>
78#include <asm/setup.h> 79#include <asm/setup.h>
@@ -875,6 +876,13 @@ void __init setup_arch(char **cmdline_p)
875 876
876 reserve_brk(); 877 reserve_brk();
877 878
879 /*
880 * Find and reserve possible boot-time SMP configuration:
881 */
882 find_smp_config();
883
884 reserve_trampoline_memory();
885
878#ifdef CONFIG_ACPI_SLEEP 886#ifdef CONFIG_ACPI_SLEEP
879 /* 887 /*
880 * Reserve low memory region for sleep support. 888 * Reserve low memory region for sleep support.
@@ -921,11 +929,6 @@ void __init setup_arch(char **cmdline_p)
921 929
922 early_acpi_boot_init(); 930 early_acpi_boot_init();
923 931
924 /*
925 * Find and reserve possible boot-time SMP configuration:
926 */
927 find_smp_config();
928
929#ifdef CONFIG_ACPI_NUMA 932#ifdef CONFIG_ACPI_NUMA
930 /* 933 /*
931 * Parse SRAT to discover nodes. 934 * Parse SRAT to discover nodes.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 29e6744f51e3..678d0b8c26f3 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -671,6 +671,26 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
671 complete(&c_idle->done); 671 complete(&c_idle->done);
672} 672}
673 673
674/* reduce the number of lines printed when booting a large cpu count system */
675static void __cpuinit announce_cpu(int cpu, int apicid)
676{
677 static int current_node = -1;
678 int node = cpu_to_node(cpu);
679
680 if (system_state == SYSTEM_BOOTING) {
681 if (node != current_node) {
682 if (current_node > (-1))
683 pr_cont(" Ok.\n");
684 current_node = node;
685 pr_info("Booting Node %3d, Processors ", node);
686 }
687 pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
688 return;
689 } else
690 pr_info("Booting Node %d Processor %d APIC 0x%x\n",
691 node, cpu, apicid);
692}
693
674/* 694/*
675 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 695 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
676 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 696 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -737,9 +757,8 @@ do_rest:
737 /* start_ip had better be page-aligned! */ 757 /* start_ip had better be page-aligned! */
738 start_ip = setup_trampoline(); 758 start_ip = setup_trampoline();
739 759
740 /* So we see what's up */ 760 /* So we see what's up */
741 printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n", 761 announce_cpu(cpu, apicid);
742 cpu, apicid, start_ip);
743 762
744 /* 763 /*
745 * This grunge runs the startup process for 764 * This grunge runs the startup process for
@@ -788,21 +807,17 @@ do_rest:
788 udelay(100); 807 udelay(100);
789 } 808 }
790 809
791 if (cpumask_test_cpu(cpu, cpu_callin_mask)) { 810 if (cpumask_test_cpu(cpu, cpu_callin_mask))
792 /* number CPUs logically, starting from 1 (BSP is 0) */ 811 pr_debug("CPU%d: has booted.\n", cpu);
793 pr_debug("OK.\n"); 812 else {
794 printk(KERN_INFO "CPU%d: ", cpu);
795 print_cpu_info(&cpu_data(cpu));
796 pr_debug("CPU has booted.\n");
797 } else {
798 boot_error = 1; 813 boot_error = 1;
799 if (*((volatile unsigned char *)trampoline_base) 814 if (*((volatile unsigned char *)trampoline_base)
800 == 0xA5) 815 == 0xA5)
801 /* trampoline started but...? */ 816 /* trampoline started but...? */
802 printk(KERN_ERR "Stuck ??\n"); 817 pr_err("CPU%d: Stuck ??\n", cpu);
803 else 818 else
804 /* trampoline code not run */ 819 /* trampoline code not run */
805 printk(KERN_ERR "Not responding.\n"); 820 pr_err("CPU%d: Not responding.\n", cpu);
806 if (apic->inquire_remote_apic) 821 if (apic->inquire_remote_apic)
807 apic->inquire_remote_apic(apicid); 822 apic->inquire_remote_apic(apicid);
808 } 823 }
@@ -1293,14 +1308,16 @@ void native_cpu_die(unsigned int cpu)
1293 for (i = 0; i < 10; i++) { 1308 for (i = 0; i < 10; i++) {
1294 /* They ack this in play_dead by setting CPU_DEAD */ 1309 /* They ack this in play_dead by setting CPU_DEAD */
1295 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1310 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1296 printk(KERN_INFO "CPU %d is now offline\n", cpu); 1311 if (system_state == SYSTEM_RUNNING)
1312 pr_info("CPU %u is now offline\n", cpu);
1313
1297 if (1 == num_online_cpus()) 1314 if (1 == num_online_cpus())
1298 alternatives_smp_switch(0); 1315 alternatives_smp_switch(0);
1299 return; 1316 return;
1300 } 1317 }
1301 msleep(100); 1318 msleep(100);
1302 } 1319 }
1303 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 1320 pr_err("CPU %u didn't die...\n", cpu);
1304} 1321}
1305 1322
1306void play_dead_common(void) 1323void play_dead_common(void)
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index cd022121cab6..c652ef62742d 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -12,21 +12,19 @@
12#endif 12#endif
13 13
14/* ready for x86_64 and x86 */ 14/* ready for x86_64 and x86 */
15unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE); 15unsigned char *__trampinitdata trampoline_base;
16 16
17void __init reserve_trampoline_memory(void) 17void __init reserve_trampoline_memory(void)
18{ 18{
19#ifdef CONFIG_X86_32 19 unsigned long mem;
20 /* 20
21 * But first pinch a few for the stack/trampoline stuff
22 * FIXME: Don't need the extra page at 4K, but need to fix
23 * trampoline before removing it. (see the GDT stuff)
24 */
25 reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
26#endif
27 /* Has to be in very low memory so we can execute real-mode AP code. */ 21 /* Has to be in very low memory so we can execute real-mode AP code. */
28 reserve_early(TRAMPOLINE_BASE, TRAMPOLINE_BASE + TRAMPOLINE_SIZE, 22 mem = find_e820_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE);
29 "TRAMPOLINE"); 23 if (mem == -1L)
24 panic("Cannot allocate trampoline\n");
25
26 trampoline_base = __va(mem);
27 reserve_early(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE");
30} 28}
31 29
32/* 30/*
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 41628b104b9e..872834177937 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -7,7 +7,6 @@ struct msr_info {
7 u32 msr_no; 7 u32 msr_no;
8 struct msr reg; 8 struct msr reg;
9 struct msr *msrs; 9 struct msr *msrs;
10 int off;
11 int err; 10 int err;
12}; 11};
13 12
@@ -18,7 +17,7 @@ static void __rdmsr_on_cpu(void *info)
18 int this_cpu = raw_smp_processor_id(); 17 int this_cpu = raw_smp_processor_id();
19 18
20 if (rv->msrs) 19 if (rv->msrs)
21 reg = &rv->msrs[this_cpu - rv->off]; 20 reg = per_cpu_ptr(rv->msrs, this_cpu);
22 else 21 else
23 reg = &rv->reg; 22 reg = &rv->reg;
24 23
@@ -32,7 +31,7 @@ static void __wrmsr_on_cpu(void *info)
32 int this_cpu = raw_smp_processor_id(); 31 int this_cpu = raw_smp_processor_id();
33 32
34 if (rv->msrs) 33 if (rv->msrs)
35 reg = &rv->msrs[this_cpu - rv->off]; 34 reg = per_cpu_ptr(rv->msrs, this_cpu);
36 else 35 else
37 reg = &rv->reg; 36 reg = &rv->reg;
38 37
@@ -80,7 +79,6 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
80 79
81 memset(&rv, 0, sizeof(rv)); 80 memset(&rv, 0, sizeof(rv));
82 81
83 rv.off = cpumask_first(mask);
84 rv.msrs = msrs; 82 rv.msrs = msrs;
85 rv.msr_no = msr_no; 83 rv.msr_no = msr_no;
86 84
@@ -120,6 +118,26 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
120} 118}
121EXPORT_SYMBOL(wrmsr_on_cpus); 119EXPORT_SYMBOL(wrmsr_on_cpus);
122 120
121struct msr *msrs_alloc(void)
122{
123 struct msr *msrs = NULL;
124
125 msrs = alloc_percpu(struct msr);
126 if (!msrs) {
127 pr_warning("%s: error allocating msrs\n", __func__);
128 return NULL;
129 }
130
131 return msrs;
132}
133EXPORT_SYMBOL(msrs_alloc);
134
135void msrs_free(struct msr *msrs)
136{
137 free_percpu(msrs);
138}
139EXPORT_SYMBOL(msrs_free);
140
123/* These "safe" variants are slower and should be used when the target MSR 141/* These "safe" variants are slower and should be used when the target MSR
124 may not actually exist. */ 142 may not actually exist. */
125static void __rdmsr_safe_on_cpu(void *info) 143static void __rdmsr_safe_on_cpu(void *info)
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 4c765e9c4664..34a3291ca103 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -20,7 +20,7 @@
20 * Derived from the read-mod example from relay-examples by Tom Zanussi. 20 * Derived from the read-mod example from relay-examples by Tom Zanussi.
21 */ 21 */
22 22
23#define pr_fmt(fmt) "mmiotrace: " 23#define pr_fmt(fmt) "mmiotrace: " fmt
24 24
25#define DEBUG 1 25#define DEBUG 1
26 26