diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-14 15:36:46 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-14 15:36:46 -0500 |
commit | 75b08038ceb62f3bd8935346679920f97c3cf9f6 (patch) | |
tree | 66cbc62bb569996c90877bbf010285828f669c9a /arch/x86/kernel/cpu | |
parent | fb1beb29b5c531b12485d7c32174a77120590481 (diff) | |
parent | 70fe440718d9f42bf963c2cffe12008eb5556165 (diff) |
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, mce: Clean up thermal init by introducing intel_thermal_supported()
x86, mce: Thermal monitoring depends on APIC being enabled
x86: Gart: fix breakage due to IOMMU initialization cleanup
x86: Move swiotlb initialization before dma32_free_bootmem
x86: Fix build warning in arch/x86/mm/mmio-mod.c
x86: Remove usedac in feature-removal-schedule.txt
x86: Fix duplicated UV BAU interrupt vector
nvram: Fix write beyond end condition; prove to gcc copy is safe
mm: Adjust do_pages_stat() so gcc can see copy_from_user() is safe
x86: Limit the number of processor bootup messages
x86: Remove enabling x2apic message for every CPU
doc: Add documentation for bootloader_{type,version}
x86, msr: Add support for non-contiguous cpumasks
x86: Use find_e820() instead of hard coded trampoline address
x86, AMD: Fix stale cpuid4_info shared_map data in shared_cpu_map cpumasks
Trivial percpu-naming-introduced conflicts in arch/x86/kernel/cpu/intel_cacheinfo.c
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/addon_cpuid_features.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/therm_throt.c | 20 |
6 files changed, 35 insertions, 25 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index c965e5212714..468489b57aae 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
@@ -74,6 +74,7 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | |||
74 | unsigned int eax, ebx, ecx, edx, sub_index; | 74 | unsigned int eax, ebx, ecx, edx, sub_index; |
75 | unsigned int ht_mask_width, core_plus_mask_width; | 75 | unsigned int ht_mask_width, core_plus_mask_width; |
76 | unsigned int core_select_mask, core_level_siblings; | 76 | unsigned int core_select_mask, core_level_siblings; |
77 | static bool printed; | ||
77 | 78 | ||
78 | if (c->cpuid_level < 0xb) | 79 | if (c->cpuid_level < 0xb) |
79 | return; | 80 | return; |
@@ -127,12 +128,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | |||
127 | 128 | ||
128 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); | 129 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); |
129 | 130 | ||
130 | 131 | if (!printed) { | |
131 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 132 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
132 | c->phys_proc_id); | 133 | c->phys_proc_id); |
133 | if (c->x86_max_cores > 1) | 134 | if (c->x86_max_cores > 1) |
134 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 135 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", |
135 | c->cpu_core_id); | 136 | c->cpu_core_id); |
137 | printed = 1; | ||
138 | } | ||
136 | return; | 139 | return; |
137 | #endif | 140 | #endif |
138 | } | 141 | } |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7128b3799cec..8dc3ea145c97 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -375,8 +375,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
375 | node = nearby_node(apicid); | 375 | node = nearby_node(apicid); |
376 | } | 376 | } |
377 | numa_set_node(cpu, node); | 377 | numa_set_node(cpu, node); |
378 | |||
379 | printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); | ||
380 | #endif | 378 | #endif |
381 | } | 379 | } |
382 | 380 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 20399b7b0c3f..4868e4a951ee 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -427,6 +427,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
427 | #ifdef CONFIG_X86_HT | 427 | #ifdef CONFIG_X86_HT |
428 | u32 eax, ebx, ecx, edx; | 428 | u32 eax, ebx, ecx, edx; |
429 | int index_msb, core_bits; | 429 | int index_msb, core_bits; |
430 | static bool printed; | ||
430 | 431 | ||
431 | if (!cpu_has(c, X86_FEATURE_HT)) | 432 | if (!cpu_has(c, X86_FEATURE_HT)) |
432 | return; | 433 | return; |
@@ -442,7 +443,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
442 | smp_num_siblings = (ebx & 0xff0000) >> 16; | 443 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
443 | 444 | ||
444 | if (smp_num_siblings == 1) { | 445 | if (smp_num_siblings == 1) { |
445 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 446 | printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); |
446 | goto out; | 447 | goto out; |
447 | } | 448 | } |
448 | 449 | ||
@@ -469,11 +470,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
469 | ((1 << core_bits) - 1); | 470 | ((1 << core_bits) - 1); |
470 | 471 | ||
471 | out: | 472 | out: |
472 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | 473 | if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { |
473 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 474 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
474 | c->phys_proc_id); | 475 | c->phys_proc_id); |
475 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 476 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", |
476 | c->cpu_core_id); | 477 | c->cpu_core_id); |
478 | printed = 1; | ||
477 | } | 479 | } |
478 | #endif | 480 | #endif |
479 | } | 481 | } |
@@ -1115,7 +1117,7 @@ void __cpuinit cpu_init(void) | |||
1115 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) | 1117 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) |
1116 | panic("CPU#%d already initialized!\n", cpu); | 1118 | panic("CPU#%d already initialized!\n", cpu); |
1117 | 1119 | ||
1118 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | 1120 | pr_debug("Initializing CPU#%d\n", cpu); |
1119 | 1121 | ||
1120 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | 1122 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
1121 | 1123 | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index c900b73f9224..9c31e8b09d2c 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -270,8 +270,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
270 | node = cpu_to_node(cpu); | 270 | node = cpu_to_node(cpu); |
271 | } | 271 | } |
272 | numa_set_node(cpu, node); | 272 | numa_set_node(cpu, node); |
273 | |||
274 | printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); | ||
275 | #endif | 273 | #endif |
276 | } | 274 | } |
277 | 275 | ||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 0c06bca2a1dc..fc6c8ef92dcc 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -507,18 +507,19 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
507 | { | 507 | { |
508 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 508 | struct _cpuid4_info *this_leaf, *sibling_leaf; |
509 | unsigned long num_threads_sharing; | 509 | unsigned long num_threads_sharing; |
510 | int index_msb, i; | 510 | int index_msb, i, sibling; |
511 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 511 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
512 | 512 | ||
513 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | 513 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { |
514 | struct cpuinfo_x86 *d; | 514 | for_each_cpu(i, c->llc_shared_map) { |
515 | for_each_online_cpu(i) { | ||
516 | if (!per_cpu(ici_cpuid4_info, i)) | 515 | if (!per_cpu(ici_cpuid4_info, i)) |
517 | continue; | 516 | continue; |
518 | d = &cpu_data(i); | ||
519 | this_leaf = CPUID4_INFO_IDX(i, index); | 517 | this_leaf = CPUID4_INFO_IDX(i, index); |
520 | cpumask_copy(to_cpumask(this_leaf->shared_cpu_map), | 518 | for_each_cpu(sibling, c->llc_shared_map) { |
521 | d->llc_shared_map); | 519 | if (!cpu_online(sibling)) |
520 | continue; | ||
521 | set_bit(sibling, this_leaf->shared_cpu_map); | ||
522 | } | ||
522 | } | 523 | } |
523 | return; | 524 | return; |
524 | } | 525 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 4fef985fc221..81c499eceb21 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -256,6 +256,16 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) | |||
256 | ack_APIC_irq(); | 256 | ack_APIC_irq(); |
257 | } | 257 | } |
258 | 258 | ||
259 | /* Thermal monitoring depends on APIC, ACPI and clock modulation */ | ||
260 | static int intel_thermal_supported(struct cpuinfo_x86 *c) | ||
261 | { | ||
262 | if (!cpu_has_apic) | ||
263 | return 0; | ||
264 | if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) | ||
265 | return 0; | ||
266 | return 1; | ||
267 | } | ||
268 | |||
259 | void __init mcheck_intel_therm_init(void) | 269 | void __init mcheck_intel_therm_init(void) |
260 | { | 270 | { |
261 | /* | 271 | /* |
@@ -263,8 +273,7 @@ void __init mcheck_intel_therm_init(void) | |||
263 | * LVT value on BSP and use that value to restore APs' thermal LVT | 273 | * LVT value on BSP and use that value to restore APs' thermal LVT |
264 | * entry BIOS programmed later | 274 | * entry BIOS programmed later |
265 | */ | 275 | */ |
266 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) && | 276 | if (intel_thermal_supported(&boot_cpu_data)) |
267 | cpu_has(&boot_cpu_data, X86_FEATURE_ACC)) | ||
268 | lvtthmr_init = apic_read(APIC_LVTTHMR); | 277 | lvtthmr_init = apic_read(APIC_LVTTHMR); |
269 | } | 278 | } |
270 | 279 | ||
@@ -274,8 +283,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | |||
274 | int tm2 = 0; | 283 | int tm2 = 0; |
275 | u32 l, h; | 284 | u32 l, h; |
276 | 285 | ||
277 | /* Thermal monitoring depends on ACPI and clock modulation*/ | 286 | if (!intel_thermal_supported(c)) |
278 | if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) | ||
279 | return; | 287 | return; |
280 | 288 | ||
281 | /* | 289 | /* |
@@ -339,8 +347,8 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | |||
339 | l = apic_read(APIC_LVTTHMR); | 347 | l = apic_read(APIC_LVTTHMR); |
340 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | 348 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); |
341 | 349 | ||
342 | printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n", | 350 | printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n", |
343 | cpu, tm2 ? "TM2" : "TM1"); | 351 | tm2 ? "TM2" : "TM1"); |
344 | 352 | ||
345 | /* enable thermal throttle processing */ | 353 | /* enable thermal throttle processing */ |
346 | atomic_set(&therm_throt_en, 1); | 354 | atomic_set(&therm_throt_en, 1); |