diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
| -rw-r--r-- | arch/x86/kernel/cpu/addon_cpuid_features.c | 54 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/amd.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 257 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/e_powersaver.c | 6 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 6 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/intel.c | 17 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 63 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 21 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 7 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/p4.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perfctr-watchdog.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/proc.c | 20 |
13 files changed, 233 insertions, 228 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 2cf23634b6d9..6882a735d9c0 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | #include <asm/pat.h> | 7 | #include <asm/pat.h> |
| 8 | #include <asm/processor.h> | 8 | #include <asm/processor.h> |
| 9 | 9 | ||
| 10 | #include <mach_apic.h> | 10 | #include <asm/apic.h> |
| 11 | 11 | ||
| 12 | struct cpuid_bit { | 12 | struct cpuid_bit { |
| 13 | u16 feature; | 13 | u16 feature; |
| @@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
| 69 | */ | 69 | */ |
| 70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | 70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) |
| 71 | { | 71 | { |
| 72 | #ifdef CONFIG_X86_SMP | 72 | #ifdef CONFIG_SMP |
| 73 | unsigned int eax, ebx, ecx, edx, sub_index; | 73 | unsigned int eax, ebx, ecx, edx, sub_index; |
| 74 | unsigned int ht_mask_width, core_plus_mask_width; | 74 | unsigned int ht_mask_width, core_plus_mask_width; |
| 75 | unsigned int core_select_mask, core_level_siblings; | 75 | unsigned int core_select_mask, core_level_siblings; |
| @@ -116,22 +116,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | |||
| 116 | 116 | ||
| 117 | core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; | 117 | core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; |
| 118 | 118 | ||
| 119 | #ifdef CONFIG_X86_32 | 119 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width) |
| 120 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width) | ||
| 121 | & core_select_mask; | 120 | & core_select_mask; |
| 122 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); | 121 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width); |
| 123 | /* | 122 | /* |
| 124 | * Reinit the apicid, now that we have extended initial_apicid. | 123 | * Reinit the apicid, now that we have extended initial_apicid. |
| 125 | */ | 124 | */ |
| 126 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | 125 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 127 | #else | 126 | |
| 128 | c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask; | ||
| 129 | c->phys_proc_id = phys_pkg_id(core_plus_mask_width); | ||
| 130 | /* | ||
| 131 | * Reinit the apicid, now that we have extended initial_apicid. | ||
| 132 | */ | ||
| 133 | c->apicid = phys_pkg_id(0); | ||
| 134 | #endif | ||
| 135 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); | 127 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); |
| 136 | 128 | ||
| 137 | 129 | ||
| @@ -143,37 +135,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | |||
| 143 | return; | 135 | return; |
| 144 | #endif | 136 | #endif |
| 145 | } | 137 | } |
| 146 | |||
| 147 | #ifdef CONFIG_X86_PAT | ||
| 148 | void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) | ||
| 149 | { | ||
| 150 | if (!cpu_has_pat) | ||
| 151 | pat_disable("PAT not supported by CPU."); | ||
| 152 | |||
| 153 | switch (c->x86_vendor) { | ||
| 154 | case X86_VENDOR_INTEL: | ||
| 155 | /* | ||
| 156 | * There is a known erratum on Pentium III and Core Solo | ||
| 157 | * and Core Duo CPUs. | ||
| 158 | * " Page with PAT set to WC while associated MTRR is UC | ||
| 159 | * may consolidate to UC " | ||
| 160 | * Because of this erratum, it is better to stick with | ||
| 161 | * setting WC in MTRR rather than using PAT on these CPUs. | ||
| 162 | * | ||
| 163 | * Enable PAT WC only on P4, Core 2 or later CPUs. | ||
| 164 | */ | ||
| 165 | if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15)) | ||
| 166 | return; | ||
| 167 | |||
| 168 | pat_disable("PAT WC disabled due to known CPU erratum."); | ||
| 169 | return; | ||
| 170 | |||
| 171 | case X86_VENDOR_AMD: | ||
| 172 | case X86_VENDOR_CENTAUR: | ||
| 173 | case X86_VENDOR_TRANSMETA: | ||
| 174 | return; | ||
| 175 | } | ||
| 176 | |||
| 177 | pat_disable("PAT disabled. Not yet verified on this CPU type."); | ||
| 178 | } | ||
| 179 | #endif | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7c878f6aa919..25423a5b80ed 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -12,8 +12,6 @@ | |||
| 12 | # include <asm/cacheflush.h> | 12 | # include <asm/cacheflush.h> |
| 13 | #endif | 13 | #endif |
| 14 | 14 | ||
| 15 | #include <mach_apic.h> | ||
| 16 | |||
| 17 | #include "cpu.h" | 15 | #include "cpu.h" |
| 18 | 16 | ||
| 19 | #ifdef CONFIG_X86_32 | 17 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 83492b1f93b1..826d5c876278 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -21,14 +21,14 @@ | |||
| 21 | #include <asm/asm.h> | 21 | #include <asm/asm.h> |
| 22 | #include <asm/numa.h> | 22 | #include <asm/numa.h> |
| 23 | #include <asm/smp.h> | 23 | #include <asm/smp.h> |
| 24 | #ifdef CONFIG_X86_LOCAL_APIC | 24 | #include <asm/cpu.h> |
| 25 | #include <asm/mpspec.h> | 25 | #include <asm/cpumask.h> |
| 26 | #include <asm/apic.h> | 26 | #include <asm/apic.h> |
| 27 | #include <mach_apic.h> | 27 | |
| 28 | #include <asm/genapic.h> | 28 | #ifdef CONFIG_X86_LOCAL_APIC |
| 29 | #include <asm/uv/uv.h> | ||
| 29 | #endif | 30 | #endif |
| 30 | 31 | ||
| 31 | #include <asm/pda.h> | ||
| 32 | #include <asm/pgtable.h> | 32 | #include <asm/pgtable.h> |
| 33 | #include <asm/processor.h> | 33 | #include <asm/processor.h> |
| 34 | #include <asm/desc.h> | 34 | #include <asm/desc.h> |
| @@ -37,6 +37,7 @@ | |||
| 37 | #include <asm/sections.h> | 37 | #include <asm/sections.h> |
| 38 | #include <asm/setup.h> | 38 | #include <asm/setup.h> |
| 39 | #include <asm/hypervisor.h> | 39 | #include <asm/hypervisor.h> |
| 40 | #include <asm/stackprotector.h> | ||
| 40 | 41 | ||
| 41 | #include "cpu.h" | 42 | #include "cpu.h" |
| 42 | 43 | ||
| @@ -50,6 +51,15 @@ cpumask_var_t cpu_initialized_mask; | |||
| 50 | /* representing cpus for which sibling maps can be computed */ | 51 | /* representing cpus for which sibling maps can be computed */ |
| 51 | cpumask_var_t cpu_sibling_setup_mask; | 52 | cpumask_var_t cpu_sibling_setup_mask; |
| 52 | 53 | ||
| 54 | /* correctly size the local cpu masks */ | ||
| 55 | void __init setup_cpu_local_masks(void) | ||
| 56 | { | ||
| 57 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); | ||
| 58 | alloc_bootmem_cpumask_var(&cpu_callin_mask); | ||
| 59 | alloc_bootmem_cpumask_var(&cpu_callout_mask); | ||
| 60 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | ||
| 61 | } | ||
| 62 | |||
| 53 | #else /* CONFIG_X86_32 */ | 63 | #else /* CONFIG_X86_32 */ |
| 54 | 64 | ||
| 55 | cpumask_t cpu_callin_map; | 65 | cpumask_t cpu_callin_map; |
| @@ -62,23 +72,23 @@ cpumask_t cpu_sibling_setup_map; | |||
| 62 | 72 | ||
| 63 | static struct cpu_dev *this_cpu __cpuinitdata; | 73 | static struct cpu_dev *this_cpu __cpuinitdata; |
| 64 | 74 | ||
| 75 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | ||
| 65 | #ifdef CONFIG_X86_64 | 76 | #ifdef CONFIG_X86_64 |
| 66 | /* We need valid kernel segments for data and code in long mode too | 77 | /* |
| 67 | * IRET will check the segment types kkeil 2000/10/28 | 78 | * We need valid kernel segments for data and code in long mode too |
| 68 | * Also sysret mandates a special GDT layout | 79 | * IRET will check the segment types kkeil 2000/10/28 |
| 69 | */ | 80 | * Also sysret mandates a special GDT layout |
| 70 | /* The TLS descriptors are currently at a different place compared to i386. | 81 | * |
| 71 | Hopefully nobody expects them at a fixed place (Wine?) */ | 82 | * The TLS descriptors are currently at a different place compared to i386. |
| 72 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | 83 | * Hopefully nobody expects them at a fixed place (Wine?) |
| 84 | */ | ||
| 73 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | 85 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, |
| 74 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | 86 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, |
| 75 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | 87 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, |
| 76 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | 88 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, |
| 77 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | 89 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, |
| 78 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | 90 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, |
| 79 | } }; | ||
| 80 | #else | 91 | #else |
| 81 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | ||
| 82 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 92 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, |
| 83 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 93 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, |
| 84 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 94 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, |
| @@ -110,9 +120,10 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | |||
| 110 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, | 120 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, |
| 111 | 121 | ||
| 112 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, | 122 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, |
| 113 | [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, | 123 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, |
| 114 | } }; | 124 | GDT_STACK_CANARY_INIT |
| 115 | #endif | 125 | #endif |
| 126 | } }; | ||
| 116 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | 127 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
| 117 | 128 | ||
| 118 | #ifdef CONFIG_X86_32 | 129 | #ifdef CONFIG_X86_32 |
| @@ -213,6 +224,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | |||
| 213 | #endif | 224 | #endif |
| 214 | 225 | ||
| 215 | /* | 226 | /* |
| 227 | * Some CPU features depend on higher CPUID levels, which may not always | ||
| 228 | * be available due to CPUID level capping or broken virtualization | ||
| 229 | * software. Add those features to this table to auto-disable them. | ||
| 230 | */ | ||
| 231 | struct cpuid_dependent_feature { | ||
| 232 | u32 feature; | ||
| 233 | u32 level; | ||
| 234 | }; | ||
| 235 | static const struct cpuid_dependent_feature __cpuinitconst | ||
| 236 | cpuid_dependent_features[] = { | ||
| 237 | { X86_FEATURE_MWAIT, 0x00000005 }, | ||
| 238 | { X86_FEATURE_DCA, 0x00000009 }, | ||
| 239 | { X86_FEATURE_XSAVE, 0x0000000d }, | ||
| 240 | { 0, 0 } | ||
| 241 | }; | ||
| 242 | |||
| 243 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | ||
| 244 | { | ||
| 245 | const struct cpuid_dependent_feature *df; | ||
| 246 | for (df = cpuid_dependent_features; df->feature; df++) { | ||
| 247 | /* | ||
| 248 | * Note: cpuid_level is set to -1 if unavailable, but | ||
| 249 | * extended_extended_level is set to 0 if unavailable | ||
| 250 | * and the legitimate extended levels are all negative | ||
| 251 | * when signed; hence the weird messing around with | ||
| 252 | * signs here... | ||
| 253 | */ | ||
| 254 | if (cpu_has(c, df->feature) && | ||
| 255 | ((s32)df->level < 0 ? | ||
| 256 | (u32)df->level > (u32)c->extended_cpuid_level : | ||
| 257 | (s32)df->level > (s32)c->cpuid_level)) { | ||
| 258 | clear_cpu_cap(c, df->feature); | ||
| 259 | if (warn) | ||
| 260 | printk(KERN_WARNING | ||
| 261 | "CPU: CPU feature %s disabled " | ||
| 262 | "due to lack of CPUID level 0x%x\n", | ||
| 263 | x86_cap_flags[df->feature], | ||
| 264 | df->level); | ||
| 265 | } | ||
| 266 | } | ||
| 267 | } | ||
| 268 | |||
| 269 | /* | ||
| 216 | * Naming convention should be: <Name> [(<Codename>)] | 270 | * Naming convention should be: <Name> [(<Codename>)] |
| 217 | * This table only is used unless init_<vendor>() below doesn't set it; | 271 | * This table only is used unless init_<vendor>() below doesn't set it; |
| 218 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | 272 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used |
| @@ -242,18 +296,29 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | |||
| 242 | 296 | ||
| 243 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | 297 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; |
| 244 | 298 | ||
| 299 | void load_percpu_segment(int cpu) | ||
| 300 | { | ||
| 301 | #ifdef CONFIG_X86_32 | ||
| 302 | loadsegment(fs, __KERNEL_PERCPU); | ||
| 303 | #else | ||
| 304 | loadsegment(gs, 0); | ||
| 305 | wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); | ||
| 306 | #endif | ||
| 307 | load_stack_canary_segment(); | ||
| 308 | } | ||
| 309 | |||
| 245 | /* Current gdt points %fs at the "master" per-cpu area: after this, | 310 | /* Current gdt points %fs at the "master" per-cpu area: after this, |
| 246 | * it's on the real one. */ | 311 | * it's on the real one. */ |
| 247 | void switch_to_new_gdt(void) | 312 | void switch_to_new_gdt(int cpu) |
| 248 | { | 313 | { |
| 249 | struct desc_ptr gdt_descr; | 314 | struct desc_ptr gdt_descr; |
| 250 | 315 | ||
| 251 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | 316 | gdt_descr.address = (long)get_cpu_gdt_table(cpu); |
| 252 | gdt_descr.size = GDT_SIZE - 1; | 317 | gdt_descr.size = GDT_SIZE - 1; |
| 253 | load_gdt(&gdt_descr); | 318 | load_gdt(&gdt_descr); |
| 254 | #ifdef CONFIG_X86_32 | 319 | /* Reload the per-cpu base */ |
| 255 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | 320 | |
| 256 | #endif | 321 | load_percpu_segment(cpu); |
| 257 | } | 322 | } |
| 258 | 323 | ||
| 259 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | 324 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
| @@ -383,11 +448,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
| 383 | } | 448 | } |
| 384 | 449 | ||
| 385 | index_msb = get_count_order(smp_num_siblings); | 450 | index_msb = get_count_order(smp_num_siblings); |
| 386 | #ifdef CONFIG_X86_64 | 451 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); |
| 387 | c->phys_proc_id = phys_pkg_id(index_msb); | ||
| 388 | #else | ||
| 389 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | ||
| 390 | #endif | ||
| 391 | 452 | ||
| 392 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | 453 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
| 393 | 454 | ||
| @@ -395,13 +456,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
| 395 | 456 | ||
| 396 | core_bits = get_count_order(c->x86_max_cores); | 457 | core_bits = get_count_order(c->x86_max_cores); |
| 397 | 458 | ||
| 398 | #ifdef CONFIG_X86_64 | 459 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
| 399 | c->cpu_core_id = phys_pkg_id(index_msb) & | ||
| 400 | ((1 << core_bits) - 1); | 460 | ((1 << core_bits) - 1); |
| 401 | #else | ||
| 402 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | ||
| 403 | ((1 << core_bits) - 1); | ||
| 404 | #endif | ||
| 405 | } | 461 | } |
| 406 | 462 | ||
| 407 | out: | 463 | out: |
| @@ -570,11 +626,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
| 570 | if (this_cpu->c_early_init) | 626 | if (this_cpu->c_early_init) |
| 571 | this_cpu->c_early_init(c); | 627 | this_cpu->c_early_init(c); |
| 572 | 628 | ||
| 573 | validate_pat_support(c); | ||
| 574 | |||
| 575 | #ifdef CONFIG_SMP | 629 | #ifdef CONFIG_SMP |
| 576 | c->cpu_index = boot_cpu_id; | 630 | c->cpu_index = boot_cpu_id; |
| 577 | #endif | 631 | #endif |
| 632 | filter_cpuid_features(c, false); | ||
| 578 | } | 633 | } |
| 579 | 634 | ||
| 580 | void __init early_cpu_init(void) | 635 | void __init early_cpu_init(void) |
| @@ -637,7 +692,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | |||
| 637 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; | 692 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; |
| 638 | #ifdef CONFIG_X86_32 | 693 | #ifdef CONFIG_X86_32 |
| 639 | # ifdef CONFIG_X86_HT | 694 | # ifdef CONFIG_X86_HT |
| 640 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | 695 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 641 | # else | 696 | # else |
| 642 | c->apicid = c->initial_apicid; | 697 | c->apicid = c->initial_apicid; |
| 643 | # endif | 698 | # endif |
| @@ -684,7 +739,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 684 | this_cpu->c_identify(c); | 739 | this_cpu->c_identify(c); |
| 685 | 740 | ||
| 686 | #ifdef CONFIG_X86_64 | 741 | #ifdef CONFIG_X86_64 |
| 687 | c->apicid = phys_pkg_id(0); | 742 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 688 | #endif | 743 | #endif |
| 689 | 744 | ||
| 690 | /* | 745 | /* |
| @@ -708,6 +763,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 708 | * we do "generic changes." | 763 | * we do "generic changes." |
| 709 | */ | 764 | */ |
| 710 | 765 | ||
| 766 | /* Filter out anything that depends on CPUID levels we don't have */ | ||
| 767 | filter_cpuid_features(c, true); | ||
| 768 | |||
| 711 | /* If the model name is still unset, do table lookup. */ | 769 | /* If the model name is still unset, do table lookup. */ |
| 712 | if (!c->x86_model_id[0]) { | 770 | if (!c->x86_model_id[0]) { |
| 713 | char *p; | 771 | char *p; |
| @@ -877,54 +935,22 @@ static __init int setup_disablecpuid(char *arg) | |||
| 877 | __setup("clearcpuid=", setup_disablecpuid); | 935 | __setup("clearcpuid=", setup_disablecpuid); |
| 878 | 936 | ||
| 879 | #ifdef CONFIG_X86_64 | 937 | #ifdef CONFIG_X86_64 |
| 880 | struct x8664_pda **_cpu_pda __read_mostly; | ||
| 881 | EXPORT_SYMBOL(_cpu_pda); | ||
| 882 | |||
| 883 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | 938 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; |
| 884 | 939 | ||
| 885 | static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; | 940 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
| 941 | irq_stack_union) __aligned(PAGE_SIZE); | ||
| 942 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | ||
| 943 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | ||
| 886 | 944 | ||
| 887 | void __cpuinit pda_init(int cpu) | 945 | DEFINE_PER_CPU(unsigned long, kernel_stack) = |
| 888 | { | 946 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; |
| 889 | struct x8664_pda *pda = cpu_pda(cpu); | 947 | EXPORT_PER_CPU_SYMBOL(kernel_stack); |
| 890 | 948 | ||
| 891 | /* Setup up data that may be needed in __get_free_pages early */ | 949 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
| 892 | loadsegment(fs, 0); | ||
| 893 | loadsegment(gs, 0); | ||
| 894 | /* Memory clobbers used to order PDA accessed */ | ||
| 895 | mb(); | ||
| 896 | wrmsrl(MSR_GS_BASE, pda); | ||
| 897 | mb(); | ||
| 898 | |||
| 899 | pda->cpunumber = cpu; | ||
| 900 | pda->irqcount = -1; | ||
| 901 | pda->kernelstack = (unsigned long)stack_thread_info() - | ||
| 902 | PDA_STACKOFFSET + THREAD_SIZE; | ||
| 903 | pda->active_mm = &init_mm; | ||
| 904 | pda->mmu_state = 0; | ||
| 905 | |||
| 906 | if (cpu == 0) { | ||
| 907 | /* others are initialized in smpboot.c */ | ||
| 908 | pda->pcurrent = &init_task; | ||
| 909 | pda->irqstackptr = boot_cpu_stack; | ||
| 910 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
| 911 | } else { | ||
| 912 | if (!pda->irqstackptr) { | ||
| 913 | pda->irqstackptr = (char *) | ||
| 914 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | ||
| 915 | if (!pda->irqstackptr) | ||
| 916 | panic("cannot allocate irqstack for cpu %d", | ||
| 917 | cpu); | ||
| 918 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
| 919 | } | ||
| 920 | 950 | ||
| 921 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | 951 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
| 922 | pda->nodenumber = cpu_to_node(cpu); | 952 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) |
| 923 | } | 953 | __aligned(PAGE_SIZE); |
| 924 | } | ||
| 925 | |||
| 926 | static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | ||
| 927 | DEBUG_STKSZ] __page_aligned_bss; | ||
| 928 | 954 | ||
| 929 | extern asmlinkage void ignore_sysret(void); | 955 | extern asmlinkage void ignore_sysret(void); |
| 930 | 956 | ||
| @@ -957,16 +983,21 @@ unsigned long kernel_eflags; | |||
| 957 | */ | 983 | */ |
| 958 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | 984 | DEFINE_PER_CPU(struct orig_ist, orig_ist); |
| 959 | 985 | ||
| 960 | #else | 986 | #else /* x86_64 */ |
| 961 | 987 | ||
| 962 | /* Make sure %fs is initialized properly in idle threads */ | 988 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 989 | DEFINE_PER_CPU(unsigned long, stack_canary); | ||
| 990 | #endif | ||
| 991 | |||
| 992 | /* Make sure %fs and %gs are initialized properly in idle threads */ | ||
| 963 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | 993 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) |
| 964 | { | 994 | { |
| 965 | memset(regs, 0, sizeof(struct pt_regs)); | 995 | memset(regs, 0, sizeof(struct pt_regs)); |
| 966 | regs->fs = __KERNEL_PERCPU; | 996 | regs->fs = __KERNEL_PERCPU; |
| 997 | regs->gs = __KERNEL_STACK_CANARY; | ||
| 967 | return regs; | 998 | return regs; |
| 968 | } | 999 | } |
| 969 | #endif | 1000 | #endif /* x86_64 */ |
| 970 | 1001 | ||
| 971 | /* | 1002 | /* |
| 972 | * cpu_init() initializes state that is per-CPU. Some data is already | 1003 | * cpu_init() initializes state that is per-CPU. Some data is already |
| @@ -982,15 +1013,14 @@ void __cpuinit cpu_init(void) | |||
| 982 | struct tss_struct *t = &per_cpu(init_tss, cpu); | 1013 | struct tss_struct *t = &per_cpu(init_tss, cpu); |
| 983 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | 1014 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); |
| 984 | unsigned long v; | 1015 | unsigned long v; |
| 985 | char *estacks = NULL; | ||
| 986 | struct task_struct *me; | 1016 | struct task_struct *me; |
| 987 | int i; | 1017 | int i; |
| 988 | 1018 | ||
| 989 | /* CPU 0 is initialised in head64.c */ | 1019 | #ifdef CONFIG_NUMA |
| 990 | if (cpu != 0) | 1020 | if (cpu != 0 && percpu_read(node_number) == 0 && |
| 991 | pda_init(cpu); | 1021 | cpu_to_node(cpu) != NUMA_NO_NODE) |
| 992 | else | 1022 | percpu_write(node_number, cpu_to_node(cpu)); |
| 993 | estacks = boot_exception_stacks; | 1023 | #endif |
| 994 | 1024 | ||
| 995 | me = current; | 1025 | me = current; |
| 996 | 1026 | ||
| @@ -1006,7 +1036,9 @@ void __cpuinit cpu_init(void) | |||
| 1006 | * and set up the GDT descriptor: | 1036 | * and set up the GDT descriptor: |
| 1007 | */ | 1037 | */ |
| 1008 | 1038 | ||
| 1009 | switch_to_new_gdt(); | 1039 | switch_to_new_gdt(cpu); |
| 1040 | loadsegment(fs, 0); | ||
| 1041 | |||
| 1010 | load_idt((const struct desc_ptr *)&idt_descr); | 1042 | load_idt((const struct desc_ptr *)&idt_descr); |
| 1011 | 1043 | ||
| 1012 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | 1044 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); |
| @@ -1017,25 +1049,20 @@ void __cpuinit cpu_init(void) | |||
| 1017 | barrier(); | 1049 | barrier(); |
| 1018 | 1050 | ||
| 1019 | check_efer(); | 1051 | check_efer(); |
| 1020 | if (cpu != 0 && x2apic) | 1052 | if (cpu != 0) |
| 1021 | enable_x2apic(); | 1053 | enable_x2apic(); |
| 1022 | 1054 | ||
| 1023 | /* | 1055 | /* |
| 1024 | * set up and load the per-CPU TSS | 1056 | * set up and load the per-CPU TSS |
| 1025 | */ | 1057 | */ |
| 1026 | if (!orig_ist->ist[0]) { | 1058 | if (!orig_ist->ist[0]) { |
| 1027 | static const unsigned int order[N_EXCEPTION_STACKS] = { | 1059 | static const unsigned int sizes[N_EXCEPTION_STACKS] = { |
| 1028 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | 1060 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, |
| 1029 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | 1061 | [DEBUG_STACK - 1] = DEBUG_STKSZ |
| 1030 | }; | 1062 | }; |
| 1063 | char *estacks = per_cpu(exception_stacks, cpu); | ||
| 1031 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 1064 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
| 1032 | if (cpu) { | 1065 | estacks += sizes[v]; |
| 1033 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | ||
| 1034 | if (!estacks) | ||
| 1035 | panic("Cannot allocate exception " | ||
| 1036 | "stack %ld %d\n", v, cpu); | ||
| 1037 | } | ||
| 1038 | estacks += PAGE_SIZE << order[v]; | ||
| 1039 | orig_ist->ist[v] = t->x86_tss.ist[v] = | 1066 | orig_ist->ist[v] = t->x86_tss.ist[v] = |
| 1040 | (unsigned long)estacks; | 1067 | (unsigned long)estacks; |
| 1041 | } | 1068 | } |
| @@ -1069,22 +1096,19 @@ void __cpuinit cpu_init(void) | |||
| 1069 | */ | 1096 | */ |
| 1070 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) | 1097 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) |
| 1071 | arch_kgdb_ops.correct_hw_break(); | 1098 | arch_kgdb_ops.correct_hw_break(); |
| 1072 | else { | 1099 | else |
| 1073 | #endif | 1100 | #endif |
| 1074 | /* | 1101 | { |
| 1075 | * Clear all 6 debug registers: | 1102 | /* |
| 1076 | */ | 1103 | * Clear all 6 debug registers: |
| 1077 | 1104 | */ | |
| 1078 | set_debugreg(0UL, 0); | 1105 | set_debugreg(0UL, 0); |
| 1079 | set_debugreg(0UL, 1); | 1106 | set_debugreg(0UL, 1); |
| 1080 | set_debugreg(0UL, 2); | 1107 | set_debugreg(0UL, 2); |
| 1081 | set_debugreg(0UL, 3); | 1108 | set_debugreg(0UL, 3); |
| 1082 | set_debugreg(0UL, 6); | 1109 | set_debugreg(0UL, 6); |
| 1083 | set_debugreg(0UL, 7); | 1110 | set_debugreg(0UL, 7); |
| 1084 | #ifdef CONFIG_KGDB | ||
| 1085 | /* If the kgdb is connected no debug regs should be altered. */ | ||
| 1086 | } | 1111 | } |
| 1087 | #endif | ||
| 1088 | 1112 | ||
| 1089 | fpu_init(); | 1113 | fpu_init(); |
| 1090 | 1114 | ||
| @@ -1114,7 +1138,7 @@ void __cpuinit cpu_init(void) | |||
| 1114 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | 1138 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
| 1115 | 1139 | ||
| 1116 | load_idt(&idt_descr); | 1140 | load_idt(&idt_descr); |
| 1117 | switch_to_new_gdt(); | 1141 | switch_to_new_gdt(cpu); |
| 1118 | 1142 | ||
| 1119 | /* | 1143 | /* |
| 1120 | * Set up and load the per-CPU TSS and LDT | 1144 | * Set up and load the per-CPU TSS and LDT |
| @@ -1135,9 +1159,6 @@ void __cpuinit cpu_init(void) | |||
| 1135 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | 1159 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); |
| 1136 | #endif | 1160 | #endif |
| 1137 | 1161 | ||
| 1138 | /* Clear %gs. */ | ||
| 1139 | asm volatile ("mov %0, %%gs" : : "r" (0)); | ||
| 1140 | |||
| 1141 | /* Clear all 6 debug registers: */ | 1162 | /* Clear all 6 debug registers: */ |
| 1142 | set_debugreg(0, 0); | 1163 | set_debugreg(0, 0); |
| 1143 | set_debugreg(0, 1); | 1164 | set_debugreg(0, 1); |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 3babe1f1e912..23da96e57b17 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
| @@ -601,7 +601,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
| 601 | if (!data) | 601 | if (!data) |
| 602 | return -ENOMEM; | 602 | return -ENOMEM; |
| 603 | 603 | ||
| 604 | data->acpi_data = percpu_ptr(acpi_perf_data, cpu); | 604 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); |
| 605 | per_cpu(drv_data, cpu) = data; | 605 | per_cpu(drv_data, cpu) = data; |
| 606 | 606 | ||
| 607 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) | 607 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c index 3f83ea12c47a..35a257dd4bb7 100644 --- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c +++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c | |||
| @@ -204,12 +204,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy) | |||
| 204 | } | 204 | } |
| 205 | /* Enable Enhanced PowerSaver */ | 205 | /* Enable Enhanced PowerSaver */ |
| 206 | rdmsrl(MSR_IA32_MISC_ENABLE, val); | 206 | rdmsrl(MSR_IA32_MISC_ENABLE, val); |
| 207 | if (!(val & 1 << 16)) { | 207 | if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
| 208 | val |= 1 << 16; | 208 | val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; |
| 209 | wrmsrl(MSR_IA32_MISC_ENABLE, val); | 209 | wrmsrl(MSR_IA32_MISC_ENABLE, val); |
| 210 | /* Can be locked at 0 */ | 210 | /* Can be locked at 0 */ |
| 211 | rdmsrl(MSR_IA32_MISC_ENABLE, val); | 211 | rdmsrl(MSR_IA32_MISC_ENABLE, val); |
| 212 | if (!(val & 1 << 16)) { | 212 | if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
| 213 | printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); | 213 | printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); |
| 214 | return -ENODEV; | 214 | return -ENODEV; |
| 215 | } | 215 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index f08998278a3a..c9f1fdc02830 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
| @@ -390,14 +390,14 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
| 390 | enable it if not. */ | 390 | enable it if not. */ |
| 391 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 391 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 392 | 392 | ||
| 393 | if (!(l & (1<<16))) { | 393 | if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
| 394 | l |= (1<<16); | 394 | l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; |
| 395 | dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); | 395 | dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); |
| 396 | wrmsr(MSR_IA32_MISC_ENABLE, l, h); | 396 | wrmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 397 | 397 | ||
| 398 | /* check to see if it stuck */ | 398 | /* check to see if it stuck */ |
| 399 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 399 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 400 | if (!(l & (1<<16))) { | 400 | if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
| 401 | printk(KERN_INFO PFX | 401 | printk(KERN_INFO PFX |
| 402 | "couldn't enable Enhanced SpeedStep\n"); | 402 | "couldn't enable Enhanced SpeedStep\n"); |
| 403 | return -ENODEV; | 403 | return -ENODEV; |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 5fff00c70de0..1a89a2b68d15 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | #ifdef CONFIG_X86_LOCAL_APIC | 25 | #ifdef CONFIG_X86_LOCAL_APIC |
| 26 | #include <asm/mpspec.h> | 26 | #include <asm/mpspec.h> |
| 27 | #include <asm/apic.h> | 27 | #include <asm/apic.h> |
| 28 | #include <mach_apic.h> | ||
| 29 | #endif | 28 | #endif |
| 30 | 29 | ||
| 31 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
| @@ -69,6 +68,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
| 69 | sched_clock_stable = 1; | 68 | sched_clock_stable = 1; |
| 70 | } | 69 | } |
| 71 | 70 | ||
| 71 | /* | ||
| 72 | * There is a known erratum on Pentium III and Core Solo | ||
| 73 | * and Core Duo CPUs. | ||
| 74 | * " Page with PAT set to WC while associated MTRR is UC | ||
| 75 | * may consolidate to UC " | ||
| 76 | * Because of this erratum, it is better to stick with | ||
| 77 | * setting WC in MTRR rather than using PAT on these CPUs. | ||
| 78 | * | ||
| 79 | * Enable PAT WC only on P4, Core 2 or later CPUs. | ||
| 80 | */ | ||
| 81 | if (c->x86 == 6 && c->x86_model < 15) | ||
| 82 | clear_cpu_cap(c, X86_FEATURE_PAT); | ||
| 72 | } | 83 | } |
| 73 | 84 | ||
| 74 | #ifdef CONFIG_X86_32 | 85 | #ifdef CONFIG_X86_32 |
| @@ -141,10 +152,10 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
| 141 | */ | 152 | */ |
| 142 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { | 153 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
| 143 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); | 154 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
| 144 | if ((lo & (1<<9)) == 0) { | 155 | if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { |
| 145 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); | 156 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
| 146 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | 157 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); |
| 147 | lo |= (1<<9); /* Disable hw prefetching */ | 158 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; |
| 148 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); | 159 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); |
| 149 | } | 160 | } |
| 150 | } | 161 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index da299eb85fc0..7293508d8f5c 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
| @@ -147,7 +147,16 @@ struct _cpuid4_info { | |||
| 147 | union _cpuid4_leaf_ecx ecx; | 147 | union _cpuid4_leaf_ecx ecx; |
| 148 | unsigned long size; | 148 | unsigned long size; |
| 149 | unsigned long can_disable; | 149 | unsigned long can_disable; |
| 150 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ | 150 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); |
| 151 | }; | ||
| 152 | |||
| 153 | /* subset of above _cpuid4_info w/o shared_cpu_map */ | ||
| 154 | struct _cpuid4_info_regs { | ||
| 155 | union _cpuid4_leaf_eax eax; | ||
| 156 | union _cpuid4_leaf_ebx ebx; | ||
| 157 | union _cpuid4_leaf_ecx ecx; | ||
| 158 | unsigned long size; | ||
| 159 | unsigned long can_disable; | ||
| 151 | }; | 160 | }; |
| 152 | 161 | ||
| 153 | #ifdef CONFIG_PCI | 162 | #ifdef CONFIG_PCI |
| @@ -278,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
| 278 | } | 287 | } |
| 279 | 288 | ||
| 280 | static void __cpuinit | 289 | static void __cpuinit |
| 281 | amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | 290 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) |
| 282 | { | 291 | { |
| 283 | if (index < 3) | 292 | if (index < 3) |
| 284 | return; | 293 | return; |
| @@ -286,7 +295,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | |||
| 286 | } | 295 | } |
| 287 | 296 | ||
| 288 | static int | 297 | static int |
| 289 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | 298 | __cpuinit cpuid4_cache_lookup_regs(int index, |
| 299 | struct _cpuid4_info_regs *this_leaf) | ||
| 290 | { | 300 | { |
| 291 | union _cpuid4_leaf_eax eax; | 301 | union _cpuid4_leaf_eax eax; |
| 292 | union _cpuid4_leaf_ebx ebx; | 302 | union _cpuid4_leaf_ebx ebx; |
| @@ -314,6 +324,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | |||
| 314 | return 0; | 324 | return 0; |
| 315 | } | 325 | } |
| 316 | 326 | ||
| 327 | static int | ||
| 328 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
| 329 | { | ||
| 330 | struct _cpuid4_info_regs *leaf_regs = | ||
| 331 | (struct _cpuid4_info_regs *)this_leaf; | ||
| 332 | |||
| 333 | return cpuid4_cache_lookup_regs(index, leaf_regs); | ||
| 334 | } | ||
| 335 | |||
| 317 | static int __cpuinit find_num_cache_leaves(void) | 336 | static int __cpuinit find_num_cache_leaves(void) |
| 318 | { | 337 | { |
| 319 | unsigned int eax, ebx, ecx, edx; | 338 | unsigned int eax, ebx, ecx, edx; |
| @@ -353,11 +372,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 353 | * parameters cpuid leaf to find the cache details | 372 | * parameters cpuid leaf to find the cache details |
| 354 | */ | 373 | */ |
| 355 | for (i = 0; i < num_cache_leaves; i++) { | 374 | for (i = 0; i < num_cache_leaves; i++) { |
| 356 | struct _cpuid4_info this_leaf; | 375 | struct _cpuid4_info_regs this_leaf; |
| 357 | |||
| 358 | int retval; | 376 | int retval; |
| 359 | 377 | ||
| 360 | retval = cpuid4_cache_lookup(i, &this_leaf); | 378 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); |
| 361 | if (retval >= 0) { | 379 | if (retval >= 0) { |
| 362 | switch(this_leaf.eax.split.level) { | 380 | switch(this_leaf.eax.split.level) { |
| 363 | case 1: | 381 | case 1: |
| @@ -506,17 +524,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
| 506 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; | 524 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; |
| 507 | 525 | ||
| 508 | if (num_threads_sharing == 1) | 526 | if (num_threads_sharing == 1) |
| 509 | cpu_set(cpu, this_leaf->shared_cpu_map); | 527 | cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); |
| 510 | else { | 528 | else { |
| 511 | index_msb = get_count_order(num_threads_sharing); | 529 | index_msb = get_count_order(num_threads_sharing); |
| 512 | 530 | ||
| 513 | for_each_online_cpu(i) { | 531 | for_each_online_cpu(i) { |
| 514 | if (cpu_data(i).apicid >> index_msb == | 532 | if (cpu_data(i).apicid >> index_msb == |
| 515 | c->apicid >> index_msb) { | 533 | c->apicid >> index_msb) { |
| 516 | cpu_set(i, this_leaf->shared_cpu_map); | 534 | cpumask_set_cpu(i, |
| 535 | to_cpumask(this_leaf->shared_cpu_map)); | ||
| 517 | if (i != cpu && per_cpu(cpuid4_info, i)) { | 536 | if (i != cpu && per_cpu(cpuid4_info, i)) { |
| 518 | sibling_leaf = CPUID4_INFO_IDX(i, index); | 537 | sibling_leaf = |
| 519 | cpu_set(cpu, sibling_leaf->shared_cpu_map); | 538 | CPUID4_INFO_IDX(i, index); |
| 539 | cpumask_set_cpu(cpu, to_cpumask( | ||
| 540 | sibling_leaf->shared_cpu_map)); | ||
| 520 | } | 541 | } |
| 521 | } | 542 | } |
| 522 | } | 543 | } |
| @@ -528,9 +549,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
| 528 | int sibling; | 549 | int sibling; |
| 529 | 550 | ||
| 530 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 551 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
| 531 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { | 552 | for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { |
| 532 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 553 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
| 533 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 554 | cpumask_clear_cpu(cpu, |
| 555 | to_cpumask(sibling_leaf->shared_cpu_map)); | ||
| 534 | } | 556 | } |
| 535 | } | 557 | } |
| 536 | #else | 558 | #else |
| @@ -635,8 +657,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
| 635 | int n = 0; | 657 | int n = 0; |
| 636 | 658 | ||
| 637 | if (len > 1) { | 659 | if (len > 1) { |
| 638 | cpumask_t *mask = &this_leaf->shared_cpu_map; | 660 | const struct cpumask *mask; |
| 639 | 661 | ||
| 662 | mask = to_cpumask(this_leaf->shared_cpu_map); | ||
| 640 | n = type? | 663 | n = type? |
| 641 | cpulist_scnprintf(buf, len-2, mask) : | 664 | cpulist_scnprintf(buf, len-2, mask) : |
| 642 | cpumask_scnprintf(buf, len-2, mask); | 665 | cpumask_scnprintf(buf, len-2, mask); |
| @@ -699,7 +722,8 @@ static struct pci_dev *get_k8_northbridge(int node) | |||
| 699 | 722 | ||
| 700 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) | 723 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) |
| 701 | { | 724 | { |
| 702 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | 725 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); |
| 726 | int node = cpu_to_node(cpumask_first(mask)); | ||
| 703 | struct pci_dev *dev = NULL; | 727 | struct pci_dev *dev = NULL; |
| 704 | ssize_t ret = 0; | 728 | ssize_t ret = 0; |
| 705 | int i; | 729 | int i; |
| @@ -733,7 +757,8 @@ static ssize_t | |||
| 733 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, | 757 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, |
| 734 | size_t count) | 758 | size_t count) |
| 735 | { | 759 | { |
| 736 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | 760 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); |
| 761 | int node = cpu_to_node(cpumask_first(mask)); | ||
| 737 | struct pci_dev *dev = NULL; | 762 | struct pci_dev *dev = NULL; |
| 738 | unsigned int ret, index, val; | 763 | unsigned int ret, index, val; |
| 739 | 764 | ||
| @@ -878,7 +903,7 @@ err_out: | |||
| 878 | return -ENOMEM; | 903 | return -ENOMEM; |
| 879 | } | 904 | } |
| 880 | 905 | ||
| 881 | static cpumask_t cache_dev_map = CPU_MASK_NONE; | 906 | static DECLARE_BITMAP(cache_dev_map, NR_CPUS); |
| 882 | 907 | ||
| 883 | /* Add/Remove cache interface for CPU device */ | 908 | /* Add/Remove cache interface for CPU device */ |
| 884 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | 909 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) |
| @@ -918,7 +943,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
| 918 | } | 943 | } |
| 919 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); | 944 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); |
| 920 | } | 945 | } |
| 921 | cpu_set(cpu, cache_dev_map); | 946 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); |
| 922 | 947 | ||
| 923 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); | 948 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); |
| 924 | return 0; | 949 | return 0; |
| @@ -931,9 +956,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
| 931 | 956 | ||
| 932 | if (per_cpu(cpuid4_info, cpu) == NULL) | 957 | if (per_cpu(cpuid4_info, cpu) == NULL) |
| 933 | return; | 958 | return; |
| 934 | if (!cpu_isset(cpu, cache_dev_map)) | 959 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) |
| 935 | return; | 960 | return; |
| 936 | cpu_clear(cpu, cache_dev_map); | 961 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); |
| 937 | 962 | ||
| 938 | for (i = 0; i < num_cache_leaves; i++) | 963 | for (i = 0; i < num_cache_leaves; i++) |
| 939 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 964 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index f2ee0ae29bd6..9817506dd469 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
| @@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = { | |||
| 67 | struct threshold_bank { | 67 | struct threshold_bank { |
| 68 | struct kobject *kobj; | 68 | struct kobject *kobj; |
| 69 | struct threshold_block *blocks; | 69 | struct threshold_block *blocks; |
| 70 | cpumask_t cpus; | 70 | cpumask_var_t cpus; |
| 71 | }; | 71 | }; |
| 72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); | 72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); |
| 73 | 73 | ||
| @@ -481,7 +481,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 481 | 481 | ||
| 482 | #ifdef CONFIG_SMP | 482 | #ifdef CONFIG_SMP |
| 483 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 483 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
| 484 | i = first_cpu(per_cpu(cpu_core_map, cpu)); | 484 | i = cpumask_first(&per_cpu(cpu_core_map, cpu)); |
| 485 | 485 | ||
| 486 | /* first core not up yet */ | 486 | /* first core not up yet */ |
| 487 | if (cpu_data(i).cpu_core_id) | 487 | if (cpu_data(i).cpu_core_id) |
| @@ -501,7 +501,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 501 | if (err) | 501 | if (err) |
| 502 | goto out; | 502 | goto out; |
| 503 | 503 | ||
| 504 | b->cpus = per_cpu(cpu_core_map, cpu); | 504 | cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); |
| 505 | per_cpu(threshold_banks, cpu)[bank] = b; | 505 | per_cpu(threshold_banks, cpu)[bank] = b; |
| 506 | goto out; | 506 | goto out; |
| 507 | } | 507 | } |
| @@ -512,15 +512,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 512 | err = -ENOMEM; | 512 | err = -ENOMEM; |
| 513 | goto out; | 513 | goto out; |
| 514 | } | 514 | } |
| 515 | if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { | ||
| 516 | kfree(b); | ||
| 517 | err = -ENOMEM; | ||
| 518 | goto out; | ||
| 519 | } | ||
| 515 | 520 | ||
| 516 | b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); | 521 | b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); |
| 517 | if (!b->kobj) | 522 | if (!b->kobj) |
| 518 | goto out_free; | 523 | goto out_free; |
| 519 | 524 | ||
| 520 | #ifndef CONFIG_SMP | 525 | #ifndef CONFIG_SMP |
| 521 | b->cpus = CPU_MASK_ALL; | 526 | cpumask_setall(b->cpus); |
| 522 | #else | 527 | #else |
| 523 | b->cpus = per_cpu(cpu_core_map, cpu); | 528 | cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); |
| 524 | #endif | 529 | #endif |
| 525 | 530 | ||
| 526 | per_cpu(threshold_banks, cpu)[bank] = b; | 531 | per_cpu(threshold_banks, cpu)[bank] = b; |
| @@ -529,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 529 | if (err) | 534 | if (err) |
| 530 | goto out_free; | 535 | goto out_free; |
| 531 | 536 | ||
| 532 | for_each_cpu_mask_nr(i, b->cpus) { | 537 | for_each_cpu(i, b->cpus) { |
| 533 | if (i == cpu) | 538 | if (i == cpu) |
| 534 | continue; | 539 | continue; |
| 535 | 540 | ||
| @@ -545,6 +550,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 545 | 550 | ||
| 546 | out_free: | 551 | out_free: |
| 547 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 552 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
| 553 | free_cpumask_var(b->cpus); | ||
| 548 | kfree(b); | 554 | kfree(b); |
| 549 | out: | 555 | out: |
| 550 | return err; | 556 | return err; |
| @@ -619,7 +625,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
| 619 | #endif | 625 | #endif |
| 620 | 626 | ||
| 621 | /* remove all sibling symlinks before unregistering */ | 627 | /* remove all sibling symlinks before unregistering */ |
| 622 | for_each_cpu_mask_nr(i, b->cpus) { | 628 | for_each_cpu(i, b->cpus) { |
| 623 | if (i == cpu) | 629 | if (i == cpu) |
| 624 | continue; | 630 | continue; |
| 625 | 631 | ||
| @@ -632,6 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
| 632 | free_out: | 638 | free_out: |
| 633 | kobject_del(b->kobj); | 639 | kobject_del(b->kobj); |
| 634 | kobject_put(b->kobj); | 640 | kobject_put(b->kobj); |
| 641 | free_cpumask_var(b->cpus); | ||
| 635 | kfree(b); | 642 | kfree(b); |
| 636 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 643 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
| 637 | } | 644 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index f44c36624360..aa5e287c98e0 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/interrupt.h> | 7 | #include <linux/interrupt.h> |
| 8 | #include <linux/percpu.h> | 8 | #include <linux/percpu.h> |
| 9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
| 10 | #include <asm/apic.h> | ||
| 10 | #include <asm/msr.h> | 11 | #include <asm/msr.h> |
| 11 | #include <asm/mce.h> | 12 | #include <asm/mce.h> |
| 12 | #include <asm/hw_irq.h> | 13 | #include <asm/hw_irq.h> |
| @@ -48,13 +49,13 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 48 | */ | 49 | */ |
| 49 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 50 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 50 | h = apic_read(APIC_LVTTHMR); | 51 | h = apic_read(APIC_LVTTHMR); |
| 51 | if ((l & (1 << 3)) && (h & APIC_DM_SMI)) { | 52 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
| 52 | printk(KERN_DEBUG | 53 | printk(KERN_DEBUG |
| 53 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); | 54 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); |
| 54 | return; | 55 | return; |
| 55 | } | 56 | } |
| 56 | 57 | ||
| 57 | if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) | 58 | if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2)) |
| 58 | tm2 = 1; | 59 | tm2 = 1; |
| 59 | 60 | ||
| 60 | if (h & APIC_VECTOR_MASK) { | 61 | if (h & APIC_VECTOR_MASK) { |
| @@ -72,7 +73,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 72 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); | 73 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); |
| 73 | 74 | ||
| 74 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 75 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 75 | wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h); | 76 | wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); |
| 76 | 77 | ||
| 77 | l = apic_read(APIC_LVTTHMR); | 78 | l = apic_read(APIC_LVTTHMR); |
| 78 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | 79 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); |
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c index 9b60fce09f75..f53bdcbaf382 100644 --- a/arch/x86/kernel/cpu/mcheck/p4.c +++ b/arch/x86/kernel/cpu/mcheck/p4.c | |||
| @@ -85,7 +85,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 85 | */ | 85 | */ |
| 86 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 86 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 87 | h = apic_read(APIC_LVTTHMR); | 87 | h = apic_read(APIC_LVTTHMR); |
| 88 | if ((l & (1<<3)) && (h & APIC_DM_SMI)) { | 88 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
| 89 | printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", | 89 | printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", |
| 90 | cpu); | 90 | cpu); |
| 91 | return; /* -EBUSY */ | 91 | return; /* -EBUSY */ |
| @@ -111,7 +111,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 111 | vendor_thermal_interrupt = intel_thermal_interrupt; | 111 | vendor_thermal_interrupt = intel_thermal_interrupt; |
| 112 | 112 | ||
| 113 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 113 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 114 | wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h); | 114 | wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); |
| 115 | 115 | ||
| 116 | l = apic_read(APIC_LVTTHMR); | 116 | l = apic_read(APIC_LVTTHMR); |
| 117 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | 117 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 9abd48b22674..f6c70a164e32 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | #include <linux/nmi.h> | 19 | #include <linux/nmi.h> |
| 20 | #include <linux/kprobes.h> | 20 | #include <linux/kprobes.h> |
| 21 | 21 | ||
| 22 | #include <asm/apic.h> | 22 | #include <asm/genapic.h> |
| 23 | #include <asm/intel_arch_perfmon.h> | 23 | #include <asm/intel_arch_perfmon.h> |
| 24 | 24 | ||
| 25 | struct nmi_watchdog_ctlblk { | 25 | struct nmi_watchdog_ctlblk { |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 01b1244ef1c0..d67e0e48bc2d 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
| @@ -7,11 +7,10 @@ | |||
| 7 | /* | 7 | /* |
| 8 | * Get CPU information for use by the procfs. | 8 | * Get CPU information for use by the procfs. |
| 9 | */ | 9 | */ |
| 10 | #ifdef CONFIG_X86_32 | ||
| 11 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | 10 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, |
| 12 | unsigned int cpu) | 11 | unsigned int cpu) |
| 13 | { | 12 | { |
| 14 | #ifdef CONFIG_X86_HT | 13 | #ifdef CONFIG_SMP |
| 15 | if (c->x86_max_cores * smp_num_siblings > 1) { | 14 | if (c->x86_max_cores * smp_num_siblings > 1) { |
| 16 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | 15 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); |
| 17 | seq_printf(m, "siblings\t: %d\n", | 16 | seq_printf(m, "siblings\t: %d\n", |
| @@ -24,6 +23,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | |||
| 24 | #endif | 23 | #endif |
| 25 | } | 24 | } |
| 26 | 25 | ||
| 26 | #ifdef CONFIG_X86_32 | ||
| 27 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | 27 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) |
| 28 | { | 28 | { |
| 29 | /* | 29 | /* |
| @@ -50,22 +50,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | |||
| 50 | c->wp_works_ok ? "yes" : "no"); | 50 | c->wp_works_ok ? "yes" : "no"); |
| 51 | } | 51 | } |
| 52 | #else | 52 | #else |
| 53 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | ||
| 54 | unsigned int cpu) | ||
| 55 | { | ||
| 56 | #ifdef CONFIG_SMP | ||
| 57 | if (c->x86_max_cores * smp_num_siblings > 1) { | ||
| 58 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | ||
| 59 | seq_printf(m, "siblings\t: %d\n", | ||
| 60 | cpus_weight(per_cpu(cpu_core_map, cpu))); | ||
| 61 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | ||
| 62 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | ||
| 63 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | ||
| 64 | seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); | ||
| 65 | } | ||
| 66 | #endif | ||
| 67 | } | ||
| 68 | |||
| 69 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | 53 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) |
| 70 | { | 54 | { |
| 71 | seq_printf(m, | 55 | seq_printf(m, |
