diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
| -rw-r--r-- | arch/x86/kernel/cpu/addon_cpuid_features.c | 54 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/amd.c | 54 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/common.c | 268 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/e_powersaver.c | 6 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 13 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 6 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/intel.c | 42 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 69 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/Makefile | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_32.c | 14 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 540 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 43 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 214 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/p4.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/threshold.c | 29 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perfctr-watchdog.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/proc.c | 26 |
20 files changed, 972 insertions, 419 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 2cf23634b6d9..6882a735d9c0 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | #include <asm/pat.h> | 7 | #include <asm/pat.h> |
| 8 | #include <asm/processor.h> | 8 | #include <asm/processor.h> |
| 9 | 9 | ||
| 10 | #include <mach_apic.h> | 10 | #include <asm/apic.h> |
| 11 | 11 | ||
| 12 | struct cpuid_bit { | 12 | struct cpuid_bit { |
| 13 | u16 feature; | 13 | u16 feature; |
| @@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
| 69 | */ | 69 | */ |
| 70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | 70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) |
| 71 | { | 71 | { |
| 72 | #ifdef CONFIG_X86_SMP | 72 | #ifdef CONFIG_SMP |
| 73 | unsigned int eax, ebx, ecx, edx, sub_index; | 73 | unsigned int eax, ebx, ecx, edx, sub_index; |
| 74 | unsigned int ht_mask_width, core_plus_mask_width; | 74 | unsigned int ht_mask_width, core_plus_mask_width; |
| 75 | unsigned int core_select_mask, core_level_siblings; | 75 | unsigned int core_select_mask, core_level_siblings; |
| @@ -116,22 +116,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | |||
| 116 | 116 | ||
| 117 | core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; | 117 | core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; |
| 118 | 118 | ||
| 119 | #ifdef CONFIG_X86_32 | 119 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width) |
| 120 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width) | ||
| 121 | & core_select_mask; | 120 | & core_select_mask; |
| 122 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); | 121 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width); |
| 123 | /* | 122 | /* |
| 124 | * Reinit the apicid, now that we have extended initial_apicid. | 123 | * Reinit the apicid, now that we have extended initial_apicid. |
| 125 | */ | 124 | */ |
| 126 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | 125 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 127 | #else | 126 | |
| 128 | c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask; | ||
| 129 | c->phys_proc_id = phys_pkg_id(core_plus_mask_width); | ||
| 130 | /* | ||
| 131 | * Reinit the apicid, now that we have extended initial_apicid. | ||
| 132 | */ | ||
| 133 | c->apicid = phys_pkg_id(0); | ||
| 134 | #endif | ||
| 135 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); | 127 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); |
| 136 | 128 | ||
| 137 | 129 | ||
| @@ -143,37 +135,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | |||
| 143 | return; | 135 | return; |
| 144 | #endif | 136 | #endif |
| 145 | } | 137 | } |
| 146 | |||
| 147 | #ifdef CONFIG_X86_PAT | ||
| 148 | void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) | ||
| 149 | { | ||
| 150 | if (!cpu_has_pat) | ||
| 151 | pat_disable("PAT not supported by CPU."); | ||
| 152 | |||
| 153 | switch (c->x86_vendor) { | ||
| 154 | case X86_VENDOR_INTEL: | ||
| 155 | /* | ||
| 156 | * There is a known erratum on Pentium III and Core Solo | ||
| 157 | * and Core Duo CPUs. | ||
| 158 | * " Page with PAT set to WC while associated MTRR is UC | ||
| 159 | * may consolidate to UC " | ||
| 160 | * Because of this erratum, it is better to stick with | ||
| 161 | * setting WC in MTRR rather than using PAT on these CPUs. | ||
| 162 | * | ||
| 163 | * Enable PAT WC only on P4, Core 2 or later CPUs. | ||
| 164 | */ | ||
| 165 | if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15)) | ||
| 166 | return; | ||
| 167 | |||
| 168 | pat_disable("PAT WC disabled due to known CPU erratum."); | ||
| 169 | return; | ||
| 170 | |||
| 171 | case X86_VENDOR_AMD: | ||
| 172 | case X86_VENDOR_CENTAUR: | ||
| 173 | case X86_VENDOR_TRANSMETA: | ||
| 174 | return; | ||
| 175 | } | ||
| 176 | |||
| 177 | pat_disable("PAT disabled. Not yet verified on this CPU type."); | ||
| 178 | } | ||
| 179 | #endif | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7c878f6aa919..f47df59016c5 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <asm/io.h> | 5 | #include <asm/io.h> |
| 6 | #include <asm/processor.h> | 6 | #include <asm/processor.h> |
| 7 | #include <asm/apic.h> | 7 | #include <asm/apic.h> |
| 8 | #include <asm/cpu.h> | ||
| 8 | 9 | ||
| 9 | #ifdef CONFIG_X86_64 | 10 | #ifdef CONFIG_X86_64 |
| 10 | # include <asm/numa_64.h> | 11 | # include <asm/numa_64.h> |
| @@ -12,8 +13,6 @@ | |||
| 12 | # include <asm/cacheflush.h> | 13 | # include <asm/cacheflush.h> |
| 13 | #endif | 14 | #endif |
| 14 | 15 | ||
| 15 | #include <mach_apic.h> | ||
| 16 | |||
| 17 | #include "cpu.h" | 16 | #include "cpu.h" |
| 18 | 17 | ||
| 19 | #ifdef CONFIG_X86_32 | 18 | #ifdef CONFIG_X86_32 |
| @@ -143,6 +142,55 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |||
| 143 | } | 142 | } |
| 144 | } | 143 | } |
| 145 | 144 | ||
| 145 | static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) | ||
| 146 | { | ||
| 147 | #ifdef CONFIG_SMP | ||
| 148 | /* calling is from identify_secondary_cpu() ? */ | ||
| 149 | if (c->cpu_index == boot_cpu_id) | ||
| 150 | return; | ||
| 151 | |||
| 152 | /* | ||
| 153 | * Certain Athlons might work (for various values of 'work') in SMP | ||
| 154 | * but they are not certified as MP capable. | ||
| 155 | */ | ||
| 156 | /* Athlon 660/661 is valid. */ | ||
| 157 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || | ||
| 158 | (c->x86_mask == 1))) | ||
| 159 | goto valid_k7; | ||
| 160 | |||
| 161 | /* Duron 670 is valid */ | ||
| 162 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | ||
| 163 | goto valid_k7; | ||
| 164 | |||
| 165 | /* | ||
| 166 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | ||
| 167 | * bit. It's worth noting that the A5 stepping (662) of some | ||
| 168 | * Athlon XP's have the MP bit set. | ||
| 169 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | ||
| 170 | * more. | ||
| 171 | */ | ||
| 172 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | ||
| 173 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | ||
| 174 | (c->x86_model > 7)) | ||
| 175 | if (cpu_has_mp) | ||
| 176 | goto valid_k7; | ||
| 177 | |||
| 178 | /* If we get here, not a certified SMP capable AMD system. */ | ||
| 179 | |||
| 180 | /* | ||
| 181 | * Don't taint if we are running SMP kernel on a single non-MP | ||
| 182 | * approved Athlon | ||
| 183 | */ | ||
| 184 | WARN_ONCE(1, "WARNING: This combination of AMD" | ||
| 185 | "processors is not suitable for SMP.\n"); | ||
| 186 | if (!test_taint(TAINT_UNSAFE_SMP)) | ||
| 187 | add_taint(TAINT_UNSAFE_SMP); | ||
| 188 | |||
| 189 | valid_k7: | ||
| 190 | ; | ||
| 191 | #endif | ||
| 192 | } | ||
| 193 | |||
| 146 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | 194 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) |
| 147 | { | 195 | { |
| 148 | u32 l, h; | 196 | u32 l, h; |
| @@ -177,6 +225,8 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |||
| 177 | } | 225 | } |
| 178 | 226 | ||
| 179 | set_cpu_cap(c, X86_FEATURE_K7); | 227 | set_cpu_cap(c, X86_FEATURE_K7); |
| 228 | |||
| 229 | amd_k7_smp_check(c); | ||
| 180 | } | 230 | } |
| 181 | #endif | 231 | #endif |
| 182 | 232 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 83492b1f93b1..d7dd3c294e2a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -21,14 +21,14 @@ | |||
| 21 | #include <asm/asm.h> | 21 | #include <asm/asm.h> |
| 22 | #include <asm/numa.h> | 22 | #include <asm/numa.h> |
| 23 | #include <asm/smp.h> | 23 | #include <asm/smp.h> |
| 24 | #ifdef CONFIG_X86_LOCAL_APIC | 24 | #include <asm/cpu.h> |
| 25 | #include <asm/mpspec.h> | 25 | #include <asm/cpumask.h> |
| 26 | #include <asm/apic.h> | 26 | #include <asm/apic.h> |
| 27 | #include <mach_apic.h> | 27 | |
| 28 | #include <asm/genapic.h> | 28 | #ifdef CONFIG_X86_LOCAL_APIC |
| 29 | #include <asm/uv/uv.h> | ||
| 29 | #endif | 30 | #endif |
| 30 | 31 | ||
| 31 | #include <asm/pda.h> | ||
| 32 | #include <asm/pgtable.h> | 32 | #include <asm/pgtable.h> |
| 33 | #include <asm/processor.h> | 33 | #include <asm/processor.h> |
| 34 | #include <asm/desc.h> | 34 | #include <asm/desc.h> |
| @@ -37,11 +37,10 @@ | |||
| 37 | #include <asm/sections.h> | 37 | #include <asm/sections.h> |
| 38 | #include <asm/setup.h> | 38 | #include <asm/setup.h> |
| 39 | #include <asm/hypervisor.h> | 39 | #include <asm/hypervisor.h> |
| 40 | #include <asm/stackprotector.h> | ||
| 40 | 41 | ||
| 41 | #include "cpu.h" | 42 | #include "cpu.h" |
| 42 | 43 | ||
| 43 | #ifdef CONFIG_X86_64 | ||
| 44 | |||
| 45 | /* all of these masks are initialized in setup_cpu_local_masks() */ | 44 | /* all of these masks are initialized in setup_cpu_local_masks() */ |
| 46 | cpumask_var_t cpu_callin_mask; | 45 | cpumask_var_t cpu_callin_mask; |
| 47 | cpumask_var_t cpu_callout_mask; | 46 | cpumask_var_t cpu_callout_mask; |
| @@ -50,35 +49,34 @@ cpumask_var_t cpu_initialized_mask; | |||
| 50 | /* representing cpus for which sibling maps can be computed */ | 49 | /* representing cpus for which sibling maps can be computed */ |
| 51 | cpumask_var_t cpu_sibling_setup_mask; | 50 | cpumask_var_t cpu_sibling_setup_mask; |
| 52 | 51 | ||
| 53 | #else /* CONFIG_X86_32 */ | 52 | /* correctly size the local cpu masks */ |
| 54 | 53 | void __init setup_cpu_local_masks(void) | |
| 55 | cpumask_t cpu_callin_map; | 54 | { |
| 56 | cpumask_t cpu_callout_map; | 55 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); |
| 57 | cpumask_t cpu_initialized; | 56 | alloc_bootmem_cpumask_var(&cpu_callin_mask); |
| 58 | cpumask_t cpu_sibling_setup_map; | 57 | alloc_bootmem_cpumask_var(&cpu_callout_mask); |
| 59 | 58 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | |
| 60 | #endif /* CONFIG_X86_32 */ | 59 | } |
| 61 | |||
| 62 | 60 | ||
| 63 | static struct cpu_dev *this_cpu __cpuinitdata; | 61 | static struct cpu_dev *this_cpu __cpuinitdata; |
| 64 | 62 | ||
| 63 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | ||
| 65 | #ifdef CONFIG_X86_64 | 64 | #ifdef CONFIG_X86_64 |
| 66 | /* We need valid kernel segments for data and code in long mode too | 65 | /* |
| 67 | * IRET will check the segment types kkeil 2000/10/28 | 66 | * We need valid kernel segments for data and code in long mode too |
| 68 | * Also sysret mandates a special GDT layout | 67 | * IRET will check the segment types kkeil 2000/10/28 |
| 69 | */ | 68 | * Also sysret mandates a special GDT layout |
| 70 | /* The TLS descriptors are currently at a different place compared to i386. | 69 | * |
| 71 | Hopefully nobody expects them at a fixed place (Wine?) */ | 70 | * The TLS descriptors are currently at a different place compared to i386. |
| 72 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | 71 | * Hopefully nobody expects them at a fixed place (Wine?) |
| 72 | */ | ||
| 73 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | 73 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, |
| 74 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | 74 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, |
| 75 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | 75 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, |
| 76 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | 76 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, |
| 77 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | 77 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, |
| 78 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | 78 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, |
| 79 | } }; | ||
| 80 | #else | 79 | #else |
| 81 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | ||
| 82 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 80 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, |
| 83 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 81 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, |
| 84 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 82 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, |
| @@ -110,9 +108,10 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | |||
| 110 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, | 108 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, |
| 111 | 109 | ||
| 112 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, | 110 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, |
| 113 | [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, | 111 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, |
| 114 | } }; | 112 | GDT_STACK_CANARY_INIT |
| 115 | #endif | 113 | #endif |
| 114 | } }; | ||
| 116 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | 115 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
| 117 | 116 | ||
| 118 | #ifdef CONFIG_X86_32 | 117 | #ifdef CONFIG_X86_32 |
| @@ -213,6 +212,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | |||
| 213 | #endif | 212 | #endif |
| 214 | 213 | ||
| 215 | /* | 214 | /* |
| 215 | * Some CPU features depend on higher CPUID levels, which may not always | ||
| 216 | * be available due to CPUID level capping or broken virtualization | ||
| 217 | * software. Add those features to this table to auto-disable them. | ||
| 218 | */ | ||
| 219 | struct cpuid_dependent_feature { | ||
| 220 | u32 feature; | ||
| 221 | u32 level; | ||
| 222 | }; | ||
| 223 | static const struct cpuid_dependent_feature __cpuinitconst | ||
| 224 | cpuid_dependent_features[] = { | ||
| 225 | { X86_FEATURE_MWAIT, 0x00000005 }, | ||
| 226 | { X86_FEATURE_DCA, 0x00000009 }, | ||
| 227 | { X86_FEATURE_XSAVE, 0x0000000d }, | ||
| 228 | { 0, 0 } | ||
| 229 | }; | ||
| 230 | |||
| 231 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | ||
| 232 | { | ||
| 233 | const struct cpuid_dependent_feature *df; | ||
| 234 | for (df = cpuid_dependent_features; df->feature; df++) { | ||
| 235 | /* | ||
| 236 | * Note: cpuid_level is set to -1 if unavailable, but | ||
| 237 | * extended_extended_level is set to 0 if unavailable | ||
| 238 | * and the legitimate extended levels are all negative | ||
| 239 | * when signed; hence the weird messing around with | ||
| 240 | * signs here... | ||
| 241 | */ | ||
| 242 | if (cpu_has(c, df->feature) && | ||
| 243 | ((s32)df->level < 0 ? | ||
| 244 | (u32)df->level > (u32)c->extended_cpuid_level : | ||
| 245 | (s32)df->level > (s32)c->cpuid_level)) { | ||
| 246 | clear_cpu_cap(c, df->feature); | ||
| 247 | if (warn) | ||
| 248 | printk(KERN_WARNING | ||
| 249 | "CPU: CPU feature %s disabled " | ||
| 250 | "due to lack of CPUID level 0x%x\n", | ||
| 251 | x86_cap_flags[df->feature], | ||
| 252 | df->level); | ||
| 253 | } | ||
| 254 | } | ||
| 255 | } | ||
| 256 | |||
| 257 | /* | ||
| 216 | * Naming convention should be: <Name> [(<Codename>)] | 258 | * Naming convention should be: <Name> [(<Codename>)] |
| 217 | * This table only is used unless init_<vendor>() below doesn't set it; | 259 | * This table only is used unless init_<vendor>() below doesn't set it; |
| 218 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | 260 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used |
| @@ -242,18 +284,29 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | |||
| 242 | 284 | ||
| 243 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | 285 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; |
| 244 | 286 | ||
| 287 | void load_percpu_segment(int cpu) | ||
| 288 | { | ||
| 289 | #ifdef CONFIG_X86_32 | ||
| 290 | loadsegment(fs, __KERNEL_PERCPU); | ||
| 291 | #else | ||
| 292 | loadsegment(gs, 0); | ||
| 293 | wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); | ||
| 294 | #endif | ||
| 295 | load_stack_canary_segment(); | ||
| 296 | } | ||
| 297 | |||
| 245 | /* Current gdt points %fs at the "master" per-cpu area: after this, | 298 | /* Current gdt points %fs at the "master" per-cpu area: after this, |
| 246 | * it's on the real one. */ | 299 | * it's on the real one. */ |
| 247 | void switch_to_new_gdt(void) | 300 | void switch_to_new_gdt(int cpu) |
| 248 | { | 301 | { |
| 249 | struct desc_ptr gdt_descr; | 302 | struct desc_ptr gdt_descr; |
| 250 | 303 | ||
| 251 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | 304 | gdt_descr.address = (long)get_cpu_gdt_table(cpu); |
| 252 | gdt_descr.size = GDT_SIZE - 1; | 305 | gdt_descr.size = GDT_SIZE - 1; |
| 253 | load_gdt(&gdt_descr); | 306 | load_gdt(&gdt_descr); |
| 254 | #ifdef CONFIG_X86_32 | 307 | /* Reload the per-cpu base */ |
| 255 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | 308 | |
| 256 | #endif | 309 | load_percpu_segment(cpu); |
| 257 | } | 310 | } |
| 258 | 311 | ||
| 259 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | 312 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
| @@ -383,11 +436,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
| 383 | } | 436 | } |
| 384 | 437 | ||
| 385 | index_msb = get_count_order(smp_num_siblings); | 438 | index_msb = get_count_order(smp_num_siblings); |
| 386 | #ifdef CONFIG_X86_64 | 439 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); |
| 387 | c->phys_proc_id = phys_pkg_id(index_msb); | ||
| 388 | #else | ||
| 389 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | ||
| 390 | #endif | ||
| 391 | 440 | ||
| 392 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | 441 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
| 393 | 442 | ||
| @@ -395,13 +444,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
| 395 | 444 | ||
| 396 | core_bits = get_count_order(c->x86_max_cores); | 445 | core_bits = get_count_order(c->x86_max_cores); |
| 397 | 446 | ||
| 398 | #ifdef CONFIG_X86_64 | 447 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
| 399 | c->cpu_core_id = phys_pkg_id(index_msb) & | ||
| 400 | ((1 << core_bits) - 1); | ||
| 401 | #else | ||
| 402 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | ||
| 403 | ((1 << core_bits) - 1); | 448 | ((1 << core_bits) - 1); |
| 404 | #endif | ||
| 405 | } | 449 | } |
| 406 | 450 | ||
| 407 | out: | 451 | out: |
| @@ -570,11 +614,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
| 570 | if (this_cpu->c_early_init) | 614 | if (this_cpu->c_early_init) |
| 571 | this_cpu->c_early_init(c); | 615 | this_cpu->c_early_init(c); |
| 572 | 616 | ||
| 573 | validate_pat_support(c); | ||
| 574 | |||
| 575 | #ifdef CONFIG_SMP | 617 | #ifdef CONFIG_SMP |
| 576 | c->cpu_index = boot_cpu_id; | 618 | c->cpu_index = boot_cpu_id; |
| 577 | #endif | 619 | #endif |
| 620 | filter_cpuid_features(c, false); | ||
| 578 | } | 621 | } |
| 579 | 622 | ||
| 580 | void __init early_cpu_init(void) | 623 | void __init early_cpu_init(void) |
| @@ -637,7 +680,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | |||
| 637 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; | 680 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; |
| 638 | #ifdef CONFIG_X86_32 | 681 | #ifdef CONFIG_X86_32 |
| 639 | # ifdef CONFIG_X86_HT | 682 | # ifdef CONFIG_X86_HT |
| 640 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | 683 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 641 | # else | 684 | # else |
| 642 | c->apicid = c->initial_apicid; | 685 | c->apicid = c->initial_apicid; |
| 643 | # endif | 686 | # endif |
| @@ -684,7 +727,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 684 | this_cpu->c_identify(c); | 727 | this_cpu->c_identify(c); |
| 685 | 728 | ||
| 686 | #ifdef CONFIG_X86_64 | 729 | #ifdef CONFIG_X86_64 |
| 687 | c->apicid = phys_pkg_id(0); | 730 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 688 | #endif | 731 | #endif |
| 689 | 732 | ||
| 690 | /* | 733 | /* |
| @@ -708,6 +751,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 708 | * we do "generic changes." | 751 | * we do "generic changes." |
| 709 | */ | 752 | */ |
| 710 | 753 | ||
| 754 | /* Filter out anything that depends on CPUID levels we don't have */ | ||
| 755 | filter_cpuid_features(c, true); | ||
| 756 | |||
| 711 | /* If the model name is still unset, do table lookup. */ | 757 | /* If the model name is still unset, do table lookup. */ |
| 712 | if (!c->x86_model_id[0]) { | 758 | if (!c->x86_model_id[0]) { |
| 713 | char *p; | 759 | char *p; |
| @@ -766,6 +812,7 @@ static void vgetcpu_set_mode(void) | |||
| 766 | void __init identify_boot_cpu(void) | 812 | void __init identify_boot_cpu(void) |
| 767 | { | 813 | { |
| 768 | identify_cpu(&boot_cpu_data); | 814 | identify_cpu(&boot_cpu_data); |
| 815 | init_c1e_mask(); | ||
| 769 | #ifdef CONFIG_X86_32 | 816 | #ifdef CONFIG_X86_32 |
| 770 | sysenter_setup(); | 817 | sysenter_setup(); |
| 771 | enable_sep_cpu(); | 818 | enable_sep_cpu(); |
| @@ -877,54 +924,22 @@ static __init int setup_disablecpuid(char *arg) | |||
| 877 | __setup("clearcpuid=", setup_disablecpuid); | 924 | __setup("clearcpuid=", setup_disablecpuid); |
| 878 | 925 | ||
| 879 | #ifdef CONFIG_X86_64 | 926 | #ifdef CONFIG_X86_64 |
| 880 | struct x8664_pda **_cpu_pda __read_mostly; | ||
| 881 | EXPORT_SYMBOL(_cpu_pda); | ||
| 882 | |||
| 883 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | 927 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; |
| 884 | 928 | ||
| 885 | static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; | 929 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
| 930 | irq_stack_union) __aligned(PAGE_SIZE); | ||
| 931 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | ||
| 932 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | ||
| 886 | 933 | ||
| 887 | void __cpuinit pda_init(int cpu) | 934 | DEFINE_PER_CPU(unsigned long, kernel_stack) = |
| 888 | { | 935 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; |
| 889 | struct x8664_pda *pda = cpu_pda(cpu); | 936 | EXPORT_PER_CPU_SYMBOL(kernel_stack); |
| 890 | 937 | ||
| 891 | /* Setup up data that may be needed in __get_free_pages early */ | 938 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
| 892 | loadsegment(fs, 0); | ||
| 893 | loadsegment(gs, 0); | ||
| 894 | /* Memory clobbers used to order PDA accessed */ | ||
| 895 | mb(); | ||
| 896 | wrmsrl(MSR_GS_BASE, pda); | ||
| 897 | mb(); | ||
| 898 | |||
| 899 | pda->cpunumber = cpu; | ||
| 900 | pda->irqcount = -1; | ||
| 901 | pda->kernelstack = (unsigned long)stack_thread_info() - | ||
| 902 | PDA_STACKOFFSET + THREAD_SIZE; | ||
| 903 | pda->active_mm = &init_mm; | ||
| 904 | pda->mmu_state = 0; | ||
| 905 | |||
| 906 | if (cpu == 0) { | ||
| 907 | /* others are initialized in smpboot.c */ | ||
| 908 | pda->pcurrent = &init_task; | ||
| 909 | pda->irqstackptr = boot_cpu_stack; | ||
| 910 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
| 911 | } else { | ||
| 912 | if (!pda->irqstackptr) { | ||
| 913 | pda->irqstackptr = (char *) | ||
| 914 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | ||
| 915 | if (!pda->irqstackptr) | ||
| 916 | panic("cannot allocate irqstack for cpu %d", | ||
| 917 | cpu); | ||
| 918 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
| 919 | } | ||
| 920 | |||
| 921 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | ||
| 922 | pda->nodenumber = cpu_to_node(cpu); | ||
| 923 | } | ||
| 924 | } | ||
| 925 | 939 | ||
| 926 | static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | 940 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
| 927 | DEBUG_STKSZ] __page_aligned_bss; | 941 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) |
| 942 | __aligned(PAGE_SIZE); | ||
| 928 | 943 | ||
| 929 | extern asmlinkage void ignore_sysret(void); | 944 | extern asmlinkage void ignore_sysret(void); |
| 930 | 945 | ||
| @@ -957,16 +972,21 @@ unsigned long kernel_eflags; | |||
| 957 | */ | 972 | */ |
| 958 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | 973 | DEFINE_PER_CPU(struct orig_ist, orig_ist); |
| 959 | 974 | ||
| 960 | #else | 975 | #else /* x86_64 */ |
| 976 | |||
| 977 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
| 978 | DEFINE_PER_CPU(unsigned long, stack_canary); | ||
| 979 | #endif | ||
| 961 | 980 | ||
| 962 | /* Make sure %fs is initialized properly in idle threads */ | 981 | /* Make sure %fs and %gs are initialized properly in idle threads */ |
| 963 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | 982 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) |
| 964 | { | 983 | { |
| 965 | memset(regs, 0, sizeof(struct pt_regs)); | 984 | memset(regs, 0, sizeof(struct pt_regs)); |
| 966 | regs->fs = __KERNEL_PERCPU; | 985 | regs->fs = __KERNEL_PERCPU; |
| 986 | regs->gs = __KERNEL_STACK_CANARY; | ||
| 967 | return regs; | 987 | return regs; |
| 968 | } | 988 | } |
| 969 | #endif | 989 | #endif /* x86_64 */ |
| 970 | 990 | ||
| 971 | /* | 991 | /* |
| 972 | * cpu_init() initializes state that is per-CPU. Some data is already | 992 | * cpu_init() initializes state that is per-CPU. Some data is already |
| @@ -982,15 +1002,14 @@ void __cpuinit cpu_init(void) | |||
| 982 | struct tss_struct *t = &per_cpu(init_tss, cpu); | 1002 | struct tss_struct *t = &per_cpu(init_tss, cpu); |
| 983 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | 1003 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); |
| 984 | unsigned long v; | 1004 | unsigned long v; |
| 985 | char *estacks = NULL; | ||
| 986 | struct task_struct *me; | 1005 | struct task_struct *me; |
| 987 | int i; | 1006 | int i; |
| 988 | 1007 | ||
| 989 | /* CPU 0 is initialised in head64.c */ | 1008 | #ifdef CONFIG_NUMA |
| 990 | if (cpu != 0) | 1009 | if (cpu != 0 && percpu_read(node_number) == 0 && |
| 991 | pda_init(cpu); | 1010 | cpu_to_node(cpu) != NUMA_NO_NODE) |
| 992 | else | 1011 | percpu_write(node_number, cpu_to_node(cpu)); |
| 993 | estacks = boot_exception_stacks; | 1012 | #endif |
| 994 | 1013 | ||
| 995 | me = current; | 1014 | me = current; |
| 996 | 1015 | ||
| @@ -1006,7 +1025,9 @@ void __cpuinit cpu_init(void) | |||
| 1006 | * and set up the GDT descriptor: | 1025 | * and set up the GDT descriptor: |
| 1007 | */ | 1026 | */ |
| 1008 | 1027 | ||
| 1009 | switch_to_new_gdt(); | 1028 | switch_to_new_gdt(cpu); |
| 1029 | loadsegment(fs, 0); | ||
| 1030 | |||
| 1010 | load_idt((const struct desc_ptr *)&idt_descr); | 1031 | load_idt((const struct desc_ptr *)&idt_descr); |
| 1011 | 1032 | ||
| 1012 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | 1033 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); |
| @@ -1017,25 +1038,20 @@ void __cpuinit cpu_init(void) | |||
| 1017 | barrier(); | 1038 | barrier(); |
| 1018 | 1039 | ||
| 1019 | check_efer(); | 1040 | check_efer(); |
| 1020 | if (cpu != 0 && x2apic) | 1041 | if (cpu != 0) |
| 1021 | enable_x2apic(); | 1042 | enable_x2apic(); |
| 1022 | 1043 | ||
| 1023 | /* | 1044 | /* |
| 1024 | * set up and load the per-CPU TSS | 1045 | * set up and load the per-CPU TSS |
| 1025 | */ | 1046 | */ |
| 1026 | if (!orig_ist->ist[0]) { | 1047 | if (!orig_ist->ist[0]) { |
| 1027 | static const unsigned int order[N_EXCEPTION_STACKS] = { | 1048 | static const unsigned int sizes[N_EXCEPTION_STACKS] = { |
| 1028 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | 1049 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, |
| 1029 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | 1050 | [DEBUG_STACK - 1] = DEBUG_STKSZ |
| 1030 | }; | 1051 | }; |
| 1052 | char *estacks = per_cpu(exception_stacks, cpu); | ||
| 1031 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 1053 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
| 1032 | if (cpu) { | 1054 | estacks += sizes[v]; |
| 1033 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | ||
| 1034 | if (!estacks) | ||
| 1035 | panic("Cannot allocate exception " | ||
| 1036 | "stack %ld %d\n", v, cpu); | ||
| 1037 | } | ||
| 1038 | estacks += PAGE_SIZE << order[v]; | ||
| 1039 | orig_ist->ist[v] = t->x86_tss.ist[v] = | 1055 | orig_ist->ist[v] = t->x86_tss.ist[v] = |
| 1040 | (unsigned long)estacks; | 1056 | (unsigned long)estacks; |
| 1041 | } | 1057 | } |
| @@ -1069,22 +1085,19 @@ void __cpuinit cpu_init(void) | |||
| 1069 | */ | 1085 | */ |
| 1070 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) | 1086 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) |
| 1071 | arch_kgdb_ops.correct_hw_break(); | 1087 | arch_kgdb_ops.correct_hw_break(); |
| 1072 | else { | 1088 | else |
| 1073 | #endif | 1089 | #endif |
| 1074 | /* | 1090 | { |
| 1075 | * Clear all 6 debug registers: | 1091 | /* |
| 1076 | */ | 1092 | * Clear all 6 debug registers: |
| 1077 | 1093 | */ | |
| 1078 | set_debugreg(0UL, 0); | 1094 | set_debugreg(0UL, 0); |
| 1079 | set_debugreg(0UL, 1); | 1095 | set_debugreg(0UL, 1); |
| 1080 | set_debugreg(0UL, 2); | 1096 | set_debugreg(0UL, 2); |
| 1081 | set_debugreg(0UL, 3); | 1097 | set_debugreg(0UL, 3); |
| 1082 | set_debugreg(0UL, 6); | 1098 | set_debugreg(0UL, 6); |
| 1083 | set_debugreg(0UL, 7); | 1099 | set_debugreg(0UL, 7); |
| 1084 | #ifdef CONFIG_KGDB | ||
| 1085 | /* If the kgdb is connected no debug regs should be altered. */ | ||
| 1086 | } | 1100 | } |
| 1087 | #endif | ||
| 1088 | 1101 | ||
| 1089 | fpu_init(); | 1102 | fpu_init(); |
| 1090 | 1103 | ||
| @@ -1114,7 +1127,7 @@ void __cpuinit cpu_init(void) | |||
| 1114 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | 1127 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
| 1115 | 1128 | ||
| 1116 | load_idt(&idt_descr); | 1129 | load_idt(&idt_descr); |
| 1117 | switch_to_new_gdt(); | 1130 | switch_to_new_gdt(cpu); |
| 1118 | 1131 | ||
| 1119 | /* | 1132 | /* |
| 1120 | * Set up and load the per-CPU TSS and LDT | 1133 | * Set up and load the per-CPU TSS and LDT |
| @@ -1135,9 +1148,6 @@ void __cpuinit cpu_init(void) | |||
| 1135 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | 1148 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); |
| 1136 | #endif | 1149 | #endif |
| 1137 | 1150 | ||
| 1138 | /* Clear %gs. */ | ||
| 1139 | asm volatile ("mov %0, %%gs" : : "r" (0)); | ||
| 1140 | |||
| 1141 | /* Clear all 6 debug registers: */ | 1151 | /* Clear all 6 debug registers: */ |
| 1142 | set_debugreg(0, 0); | 1152 | set_debugreg(0, 0); |
| 1143 | set_debugreg(0, 1); | 1153 | set_debugreg(0, 1); |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4b1c319d30c3..22590cf688ae 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
| @@ -601,7 +601,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
| 601 | if (!data) | 601 | if (!data) |
| 602 | return -ENOMEM; | 602 | return -ENOMEM; |
| 603 | 603 | ||
| 604 | data->acpi_data = percpu_ptr(acpi_perf_data, cpu); | 604 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); |
| 605 | per_cpu(drv_data, cpu) = data; | 605 | per_cpu(drv_data, cpu) = data; |
| 606 | 606 | ||
| 607 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) | 607 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c index c2f930d86640..41ab3f064cb1 100644 --- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c +++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c | |||
| @@ -204,12 +204,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy) | |||
| 204 | } | 204 | } |
| 205 | /* Enable Enhanced PowerSaver */ | 205 | /* Enable Enhanced PowerSaver */ |
| 206 | rdmsrl(MSR_IA32_MISC_ENABLE, val); | 206 | rdmsrl(MSR_IA32_MISC_ENABLE, val); |
| 207 | if (!(val & 1 << 16)) { | 207 | if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
| 208 | val |= 1 << 16; | 208 | val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; |
| 209 | wrmsrl(MSR_IA32_MISC_ENABLE, val); | 209 | wrmsrl(MSR_IA32_MISC_ENABLE, val); |
| 210 | /* Can be locked at 0 */ | 210 | /* Can be locked at 0 */ |
| 211 | rdmsrl(MSR_IA32_MISC_ENABLE, val); | 211 | rdmsrl(MSR_IA32_MISC_ENABLE, val); |
| 212 | if (!(val & 1 << 16)) { | 212 | if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
| 213 | printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); | 213 | printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); |
| 214 | return -ENODEV; | 214 | return -ENODEV; |
| 215 | } | 215 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 3178c3acd97e..d8341d17c189 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
| @@ -203,7 +203,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) | |||
| 203 | unsigned int i; | 203 | unsigned int i; |
| 204 | 204 | ||
| 205 | #ifdef CONFIG_SMP | 205 | #ifdef CONFIG_SMP |
| 206 | cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); | 206 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); |
| 207 | #endif | 207 | #endif |
| 208 | 208 | ||
| 209 | /* Errata workaround */ | 209 | /* Errata workaround */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 6428aa17b40e..e8fd76f98883 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
| @@ -56,7 +56,10 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); | |||
| 56 | static int cpu_family = CPU_OPTERON; | 56 | static int cpu_family = CPU_OPTERON; |
| 57 | 57 | ||
| 58 | #ifndef CONFIG_SMP | 58 | #ifndef CONFIG_SMP |
| 59 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | 59 | static inline const struct cpumask *cpu_core_mask(int cpu) |
| 60 | { | ||
| 61 | return cpumask_of(0); | ||
| 62 | } | ||
| 60 | #endif | 63 | #endif |
| 61 | 64 | ||
| 62 | /* Return a frequency in MHz, given an input fid */ | 65 | /* Return a frequency in MHz, given an input fid */ |
| @@ -654,7 +657,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, | |||
| 654 | 657 | ||
| 655 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); | 658 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); |
| 656 | data->powernow_table = powernow_table; | 659 | data->powernow_table = powernow_table; |
| 657 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) | 660 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
| 658 | print_basics(data); | 661 | print_basics(data); |
| 659 | 662 | ||
| 660 | for (j = 0; j < data->numps; j++) | 663 | for (j = 0; j < data->numps; j++) |
| @@ -808,7 +811,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
| 808 | 811 | ||
| 809 | /* fill in data */ | 812 | /* fill in data */ |
| 810 | data->numps = data->acpi_data.state_count; | 813 | data->numps = data->acpi_data.state_count; |
| 811 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) | 814 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
| 812 | print_basics(data); | 815 | print_basics(data); |
| 813 | powernow_k8_acpi_pst_values(data, 0); | 816 | powernow_k8_acpi_pst_values(data, 0); |
| 814 | 817 | ||
| @@ -1224,7 +1227,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
| 1224 | if (cpu_family == CPU_HW_PSTATE) | 1227 | if (cpu_family == CPU_HW_PSTATE) |
| 1225 | cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); | 1228 | cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); |
| 1226 | else | 1229 | else |
| 1227 | cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); | 1230 | cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); |
| 1228 | data->available_cores = pol->cpus; | 1231 | data->available_cores = pol->cpus; |
| 1229 | 1232 | ||
| 1230 | if (cpu_family == CPU_HW_PSTATE) | 1233 | if (cpu_family == CPU_HW_PSTATE) |
| @@ -1286,7 +1289,7 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
| 1286 | unsigned int khz = 0; | 1289 | unsigned int khz = 0; |
| 1287 | unsigned int first; | 1290 | unsigned int first; |
| 1288 | 1291 | ||
| 1289 | first = first_cpu(per_cpu(cpu_core_map, cpu)); | 1292 | first = cpumask_first(cpu_core_mask(cpu)); |
| 1290 | data = per_cpu(powernow_data, first); | 1293 | data = per_cpu(powernow_data, first); |
| 1291 | 1294 | ||
| 1292 | if (!data) | 1295 | if (!data) |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index f08998278a3a..c9f1fdc02830 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
| @@ -390,14 +390,14 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
| 390 | enable it if not. */ | 390 | enable it if not. */ |
| 391 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 391 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 392 | 392 | ||
| 393 | if (!(l & (1<<16))) { | 393 | if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
| 394 | l |= (1<<16); | 394 | l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; |
| 395 | dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); | 395 | dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); |
| 396 | wrmsr(MSR_IA32_MISC_ENABLE, l, h); | 396 | wrmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 397 | 397 | ||
| 398 | /* check to see if it stuck */ | 398 | /* check to see if it stuck */ |
| 399 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 399 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 400 | if (!(l & (1<<16))) { | 400 | if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
| 401 | printk(KERN_INFO PFX | 401 | printk(KERN_INFO PFX |
| 402 | "couldn't enable Enhanced SpeedStep\n"); | 402 | "couldn't enable Enhanced SpeedStep\n"); |
| 403 | return -ENODEV; | 403 | return -ENODEV; |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index dedc1e98f168..1f0ec83d343b 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
| @@ -322,7 +322,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
| 322 | 322 | ||
| 323 | /* only run on CPU to be set, or on its sibling */ | 323 | /* only run on CPU to be set, or on its sibling */ |
| 324 | #ifdef CONFIG_SMP | 324 | #ifdef CONFIG_SMP |
| 325 | cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); | 325 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); |
| 326 | #endif | 326 | #endif |
| 327 | 327 | ||
| 328 | cpus_allowed = current->cpus_allowed; | 328 | cpus_allowed = current->cpus_allowed; |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 24ff26a38ade..191117f1ad51 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
| 14 | #include <asm/ds.h> | 14 | #include <asm/ds.h> |
| 15 | #include <asm/bugs.h> | 15 | #include <asm/bugs.h> |
| 16 | #include <asm/cpu.h> | ||
| 16 | 17 | ||
| 17 | #ifdef CONFIG_X86_64 | 18 | #ifdef CONFIG_X86_64 |
| 18 | #include <asm/topology.h> | 19 | #include <asm/topology.h> |
| @@ -24,7 +25,6 @@ | |||
| 24 | #ifdef CONFIG_X86_LOCAL_APIC | 25 | #ifdef CONFIG_X86_LOCAL_APIC |
| 25 | #include <asm/mpspec.h> | 26 | #include <asm/mpspec.h> |
| 26 | #include <asm/apic.h> | 27 | #include <asm/apic.h> |
| 27 | #include <mach_apic.h> | ||
| 28 | #endif | 28 | #endif |
| 29 | 29 | ||
| 30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
| @@ -63,6 +63,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
| 63 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | 63 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | /* | ||
| 67 | * There is a known erratum on Pentium III and Core Solo | ||
| 68 | * and Core Duo CPUs. | ||
| 69 | * " Page with PAT set to WC while associated MTRR is UC | ||
| 70 | * may consolidate to UC " | ||
| 71 | * Because of this erratum, it is better to stick with | ||
| 72 | * setting WC in MTRR rather than using PAT on these CPUs. | ||
| 73 | * | ||
| 74 | * Enable PAT WC only on P4, Core 2 or later CPUs. | ||
| 75 | */ | ||
| 76 | if (c->x86 == 6 && c->x86_model < 15) | ||
| 77 | clear_cpu_cap(c, X86_FEATURE_PAT); | ||
| 66 | } | 78 | } |
| 67 | 79 | ||
| 68 | #ifdef CONFIG_X86_32 | 80 | #ifdef CONFIG_X86_32 |
| @@ -99,6 +111,28 @@ static void __cpuinit trap_init_f00f_bug(void) | |||
| 99 | } | 111 | } |
| 100 | #endif | 112 | #endif |
| 101 | 113 | ||
| 114 | static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) | ||
| 115 | { | ||
| 116 | #ifdef CONFIG_SMP | ||
| 117 | /* calling is from identify_secondary_cpu() ? */ | ||
| 118 | if (c->cpu_index == boot_cpu_id) | ||
| 119 | return; | ||
| 120 | |||
| 121 | /* | ||
| 122 | * Mask B, Pentium, but not Pentium MMX | ||
| 123 | */ | ||
| 124 | if (c->x86 == 5 && | ||
| 125 | c->x86_mask >= 1 && c->x86_mask <= 4 && | ||
| 126 | c->x86_model <= 3) { | ||
| 127 | /* | ||
| 128 | * Remember we have B step Pentia with bugs | ||
| 129 | */ | ||
| 130 | WARN_ONCE(1, "WARNING: SMP operation may be unreliable" | ||
| 131 | "with B stepping processors.\n"); | ||
| 132 | } | ||
| 133 | #endif | ||
| 134 | } | ||
| 135 | |||
| 102 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | 136 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) |
| 103 | { | 137 | { |
| 104 | unsigned long lo, hi; | 138 | unsigned long lo, hi; |
| @@ -135,10 +169,10 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
| 135 | */ | 169 | */ |
| 136 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { | 170 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
| 137 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); | 171 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
| 138 | if ((lo & (1<<9)) == 0) { | 172 | if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { |
| 139 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); | 173 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
| 140 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | 174 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); |
| 141 | lo |= (1<<9); /* Disable hw prefetching */ | 175 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; |
| 142 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); | 176 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); |
| 143 | } | 177 | } |
| 144 | } | 178 | } |
| @@ -175,6 +209,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
| 175 | #ifdef CONFIG_X86_NUMAQ | 209 | #ifdef CONFIG_X86_NUMAQ |
| 176 | numaq_tsc_disable(); | 210 | numaq_tsc_disable(); |
| 177 | #endif | 211 | #endif |
| 212 | |||
| 213 | intel_smp_check(c); | ||
| 178 | } | 214 | } |
| 179 | #else | 215 | #else |
| 180 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | 216 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index da299eb85fc0..8e6ce2c146d6 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
| @@ -147,10 +147,19 @@ struct _cpuid4_info { | |||
| 147 | union _cpuid4_leaf_ecx ecx; | 147 | union _cpuid4_leaf_ecx ecx; |
| 148 | unsigned long size; | 148 | unsigned long size; |
| 149 | unsigned long can_disable; | 149 | unsigned long can_disable; |
| 150 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ | 150 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); |
| 151 | }; | 151 | }; |
| 152 | 152 | ||
| 153 | #ifdef CONFIG_PCI | 153 | /* subset of above _cpuid4_info w/o shared_cpu_map */ |
| 154 | struct _cpuid4_info_regs { | ||
| 155 | union _cpuid4_leaf_eax eax; | ||
| 156 | union _cpuid4_leaf_ebx ebx; | ||
| 157 | union _cpuid4_leaf_ecx ecx; | ||
| 158 | unsigned long size; | ||
| 159 | unsigned long can_disable; | ||
| 160 | }; | ||
| 161 | |||
| 162 | #if defined(CONFIG_PCI) && defined(CONFIG_SYSFS) | ||
| 154 | static struct pci_device_id k8_nb_id[] = { | 163 | static struct pci_device_id k8_nb_id[] = { |
| 155 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, | 164 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, |
| 156 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, | 165 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, |
| @@ -278,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
| 278 | } | 287 | } |
| 279 | 288 | ||
| 280 | static void __cpuinit | 289 | static void __cpuinit |
| 281 | amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | 290 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) |
| 282 | { | 291 | { |
| 283 | if (index < 3) | 292 | if (index < 3) |
| 284 | return; | 293 | return; |
| @@ -286,7 +295,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | |||
| 286 | } | 295 | } |
| 287 | 296 | ||
| 288 | static int | 297 | static int |
| 289 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | 298 | __cpuinit cpuid4_cache_lookup_regs(int index, |
| 299 | struct _cpuid4_info_regs *this_leaf) | ||
| 290 | { | 300 | { |
| 291 | union _cpuid4_leaf_eax eax; | 301 | union _cpuid4_leaf_eax eax; |
| 292 | union _cpuid4_leaf_ebx ebx; | 302 | union _cpuid4_leaf_ebx ebx; |
| @@ -353,11 +363,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 353 | * parameters cpuid leaf to find the cache details | 363 | * parameters cpuid leaf to find the cache details |
| 354 | */ | 364 | */ |
| 355 | for (i = 0; i < num_cache_leaves; i++) { | 365 | for (i = 0; i < num_cache_leaves; i++) { |
| 356 | struct _cpuid4_info this_leaf; | 366 | struct _cpuid4_info_regs this_leaf; |
| 357 | |||
| 358 | int retval; | 367 | int retval; |
| 359 | 368 | ||
| 360 | retval = cpuid4_cache_lookup(i, &this_leaf); | 369 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); |
| 361 | if (retval >= 0) { | 370 | if (retval >= 0) { |
| 362 | switch(this_leaf.eax.split.level) { | 371 | switch(this_leaf.eax.split.level) { |
| 363 | case 1: | 372 | case 1: |
| @@ -490,6 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 490 | return l2; | 499 | return l2; |
| 491 | } | 500 | } |
| 492 | 501 | ||
| 502 | #ifdef CONFIG_SYSFS | ||
| 503 | |||
| 493 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 504 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
| 494 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 505 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); |
| 495 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 506 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) |
| @@ -506,17 +517,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
| 506 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; | 517 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; |
| 507 | 518 | ||
| 508 | if (num_threads_sharing == 1) | 519 | if (num_threads_sharing == 1) |
| 509 | cpu_set(cpu, this_leaf->shared_cpu_map); | 520 | cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); |
| 510 | else { | 521 | else { |
| 511 | index_msb = get_count_order(num_threads_sharing); | 522 | index_msb = get_count_order(num_threads_sharing); |
| 512 | 523 | ||
| 513 | for_each_online_cpu(i) { | 524 | for_each_online_cpu(i) { |
| 514 | if (cpu_data(i).apicid >> index_msb == | 525 | if (cpu_data(i).apicid >> index_msb == |
| 515 | c->apicid >> index_msb) { | 526 | c->apicid >> index_msb) { |
| 516 | cpu_set(i, this_leaf->shared_cpu_map); | 527 | cpumask_set_cpu(i, |
| 528 | to_cpumask(this_leaf->shared_cpu_map)); | ||
| 517 | if (i != cpu && per_cpu(cpuid4_info, i)) { | 529 | if (i != cpu && per_cpu(cpuid4_info, i)) { |
| 518 | sibling_leaf = CPUID4_INFO_IDX(i, index); | 530 | sibling_leaf = |
| 519 | cpu_set(cpu, sibling_leaf->shared_cpu_map); | 531 | CPUID4_INFO_IDX(i, index); |
| 532 | cpumask_set_cpu(cpu, to_cpumask( | ||
| 533 | sibling_leaf->shared_cpu_map)); | ||
| 520 | } | 534 | } |
| 521 | } | 535 | } |
| 522 | } | 536 | } |
| @@ -528,9 +542,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
| 528 | int sibling; | 542 | int sibling; |
| 529 | 543 | ||
| 530 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 544 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
| 531 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { | 545 | for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { |
| 532 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 546 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
| 533 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 547 | cpumask_clear_cpu(cpu, |
| 548 | to_cpumask(sibling_leaf->shared_cpu_map)); | ||
| 534 | } | 549 | } |
| 535 | } | 550 | } |
| 536 | #else | 551 | #else |
| @@ -549,6 +564,15 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
| 549 | per_cpu(cpuid4_info, cpu) = NULL; | 564 | per_cpu(cpuid4_info, cpu) = NULL; |
| 550 | } | 565 | } |
| 551 | 566 | ||
| 567 | static int | ||
| 568 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
| 569 | { | ||
| 570 | struct _cpuid4_info_regs *leaf_regs = | ||
| 571 | (struct _cpuid4_info_regs *)this_leaf; | ||
| 572 | |||
| 573 | return cpuid4_cache_lookup_regs(index, leaf_regs); | ||
| 574 | } | ||
| 575 | |||
| 552 | static void __cpuinit get_cpu_leaves(void *_retval) | 576 | static void __cpuinit get_cpu_leaves(void *_retval) |
| 553 | { | 577 | { |
| 554 | int j, *retval = _retval, cpu = smp_processor_id(); | 578 | int j, *retval = _retval, cpu = smp_processor_id(); |
| @@ -590,8 +614,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
| 590 | return retval; | 614 | return retval; |
| 591 | } | 615 | } |
| 592 | 616 | ||
| 593 | #ifdef CONFIG_SYSFS | ||
| 594 | |||
| 595 | #include <linux/kobject.h> | 617 | #include <linux/kobject.h> |
| 596 | #include <linux/sysfs.h> | 618 | #include <linux/sysfs.h> |
| 597 | 619 | ||
| @@ -635,8 +657,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
| 635 | int n = 0; | 657 | int n = 0; |
| 636 | 658 | ||
| 637 | if (len > 1) { | 659 | if (len > 1) { |
| 638 | cpumask_t *mask = &this_leaf->shared_cpu_map; | 660 | const struct cpumask *mask; |
| 639 | 661 | ||
| 662 | mask = to_cpumask(this_leaf->shared_cpu_map); | ||
| 640 | n = type? | 663 | n = type? |
| 641 | cpulist_scnprintf(buf, len-2, mask) : | 664 | cpulist_scnprintf(buf, len-2, mask) : |
| 642 | cpumask_scnprintf(buf, len-2, mask); | 665 | cpumask_scnprintf(buf, len-2, mask); |
| @@ -699,7 +722,8 @@ static struct pci_dev *get_k8_northbridge(int node) | |||
| 699 | 722 | ||
| 700 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) | 723 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) |
| 701 | { | 724 | { |
| 702 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | 725 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); |
| 726 | int node = cpu_to_node(cpumask_first(mask)); | ||
| 703 | struct pci_dev *dev = NULL; | 727 | struct pci_dev *dev = NULL; |
| 704 | ssize_t ret = 0; | 728 | ssize_t ret = 0; |
| 705 | int i; | 729 | int i; |
| @@ -733,7 +757,8 @@ static ssize_t | |||
| 733 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, | 757 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, |
| 734 | size_t count) | 758 | size_t count) |
| 735 | { | 759 | { |
| 736 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | 760 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); |
| 761 | int node = cpu_to_node(cpumask_first(mask)); | ||
| 737 | struct pci_dev *dev = NULL; | 762 | struct pci_dev *dev = NULL; |
| 738 | unsigned int ret, index, val; | 763 | unsigned int ret, index, val; |
| 739 | 764 | ||
| @@ -878,7 +903,7 @@ err_out: | |||
| 878 | return -ENOMEM; | 903 | return -ENOMEM; |
| 879 | } | 904 | } |
| 880 | 905 | ||
| 881 | static cpumask_t cache_dev_map = CPU_MASK_NONE; | 906 | static DECLARE_BITMAP(cache_dev_map, NR_CPUS); |
| 882 | 907 | ||
| 883 | /* Add/Remove cache interface for CPU device */ | 908 | /* Add/Remove cache interface for CPU device */ |
| 884 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | 909 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) |
| @@ -918,7 +943,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
| 918 | } | 943 | } |
| 919 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); | 944 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); |
| 920 | } | 945 | } |
| 921 | cpu_set(cpu, cache_dev_map); | 946 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); |
| 922 | 947 | ||
| 923 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); | 948 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); |
| 924 | return 0; | 949 | return 0; |
| @@ -931,9 +956,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
| 931 | 956 | ||
| 932 | if (per_cpu(cpuid4_info, cpu) == NULL) | 957 | if (per_cpu(cpuid4_info, cpu) == NULL) |
| 933 | return; | 958 | return; |
| 934 | if (!cpu_isset(cpu, cache_dev_map)) | 959 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) |
| 935 | return; | 960 | return; |
| 936 | cpu_clear(cpu, cache_dev_map); | 961 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); |
| 937 | 962 | ||
| 938 | for (i = 0; i < num_cache_leaves; i++) | 963 | for (i = 0; i < num_cache_leaves; i++) |
| 939 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 964 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); |
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index d7d2323bbb69..b2f89829bbe8 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile | |||
| @@ -4,3 +4,4 @@ obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o | |||
| 4 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o | 4 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o |
| 5 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o | 5 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o |
| 6 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o | 6 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o |
| 7 | obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c index dfaebce3633e..3552119b091d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_32.c +++ b/arch/x86/kernel/cpu/mcheck/mce_32.c | |||
| @@ -60,20 +60,6 @@ void mcheck_init(struct cpuinfo_x86 *c) | |||
| 60 | } | 60 | } |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static unsigned long old_cr4 __initdata; | ||
| 64 | |||
| 65 | void __init stop_mce(void) | ||
| 66 | { | ||
| 67 | old_cr4 = read_cr4(); | ||
| 68 | clear_in_cr4(X86_CR4_MCE); | ||
| 69 | } | ||
| 70 | |||
| 71 | void __init restart_mce(void) | ||
| 72 | { | ||
| 73 | if (old_cr4 & X86_CR4_MCE) | ||
| 74 | set_in_cr4(X86_CR4_MCE); | ||
| 75 | } | ||
| 76 | |||
| 77 | static int __init mcheck_disable(char *str) | 63 | static int __init mcheck_disable(char *str) |
| 78 | { | 64 | { |
| 79 | mce_disabled = 1; | 65 | mce_disabled = 1; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index fe79985ce0f2..863f89568b1a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. | 3 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. |
| 4 | * Rest from unknown author(s). | 4 | * Rest from unknown author(s). |
| 5 | * 2004 Andi Kleen. Rewrote most of it. | 5 | * 2004 Andi Kleen. Rewrote most of it. |
| 6 | * Copyright 2008 Intel Corporation | ||
| 7 | * Author: Andi Kleen | ||
| 6 | */ | 8 | */ |
| 7 | 9 | ||
| 8 | #include <linux/init.h> | 10 | #include <linux/init.h> |
| @@ -24,6 +26,9 @@ | |||
| 24 | #include <linux/ctype.h> | 26 | #include <linux/ctype.h> |
| 25 | #include <linux/kmod.h> | 27 | #include <linux/kmod.h> |
| 26 | #include <linux/kdebug.h> | 28 | #include <linux/kdebug.h> |
| 29 | #include <linux/kobject.h> | ||
| 30 | #include <linux/sysfs.h> | ||
| 31 | #include <linux/ratelimit.h> | ||
| 27 | #include <asm/processor.h> | 32 | #include <asm/processor.h> |
| 28 | #include <asm/msr.h> | 33 | #include <asm/msr.h> |
| 29 | #include <asm/mce.h> | 34 | #include <asm/mce.h> |
| @@ -32,7 +37,6 @@ | |||
| 32 | #include <asm/idle.h> | 37 | #include <asm/idle.h> |
| 33 | 38 | ||
| 34 | #define MISC_MCELOG_MINOR 227 | 39 | #define MISC_MCELOG_MINOR 227 |
| 35 | #define NR_SYSFS_BANKS 6 | ||
| 36 | 40 | ||
| 37 | atomic_t mce_entry; | 41 | atomic_t mce_entry; |
| 38 | 42 | ||
| @@ -47,7 +51,7 @@ static int mce_dont_init; | |||
| 47 | */ | 51 | */ |
| 48 | static int tolerant = 1; | 52 | static int tolerant = 1; |
| 49 | static int banks; | 53 | static int banks; |
| 50 | static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL }; | 54 | static u64 *bank; |
| 51 | static unsigned long notify_user; | 55 | static unsigned long notify_user; |
| 52 | static int rip_msr; | 56 | static int rip_msr; |
| 53 | static int mce_bootlog = -1; | 57 | static int mce_bootlog = -1; |
| @@ -58,6 +62,19 @@ static char *trigger_argv[2] = { trigger, NULL }; | |||
| 58 | 62 | ||
| 59 | static DECLARE_WAIT_QUEUE_HEAD(mce_wait); | 63 | static DECLARE_WAIT_QUEUE_HEAD(mce_wait); |
| 60 | 64 | ||
| 65 | /* MCA banks polled by the period polling timer for corrected events */ | ||
| 66 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | ||
| 67 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL | ||
| 68 | }; | ||
| 69 | |||
| 70 | /* Do initial initialization of a struct mce */ | ||
| 71 | void mce_setup(struct mce *m) | ||
| 72 | { | ||
| 73 | memset(m, 0, sizeof(struct mce)); | ||
| 74 | m->cpu = smp_processor_id(); | ||
| 75 | rdtscll(m->tsc); | ||
| 76 | } | ||
| 77 | |||
| 61 | /* | 78 | /* |
| 62 | * Lockless MCE logging infrastructure. | 79 | * Lockless MCE logging infrastructure. |
| 63 | * This avoids deadlocks on printk locks without having to break locks. Also | 80 | * This avoids deadlocks on printk locks without having to break locks. Also |
| @@ -119,11 +136,11 @@ static void print_mce(struct mce *m) | |||
| 119 | print_symbol("{%s}", m->ip); | 136 | print_symbol("{%s}", m->ip); |
| 120 | printk("\n"); | 137 | printk("\n"); |
| 121 | } | 138 | } |
| 122 | printk(KERN_EMERG "TSC %Lx ", m->tsc); | 139 | printk(KERN_EMERG "TSC %llx ", m->tsc); |
| 123 | if (m->addr) | 140 | if (m->addr) |
| 124 | printk("ADDR %Lx ", m->addr); | 141 | printk("ADDR %llx ", m->addr); |
| 125 | if (m->misc) | 142 | if (m->misc) |
| 126 | printk("MISC %Lx ", m->misc); | 143 | printk("MISC %llx ", m->misc); |
| 127 | printk("\n"); | 144 | printk("\n"); |
| 128 | printk(KERN_EMERG "This is not a software problem!\n"); | 145 | printk(KERN_EMERG "This is not a software problem!\n"); |
| 129 | printk(KERN_EMERG "Run through mcelog --ascii to decode " | 146 | printk(KERN_EMERG "Run through mcelog --ascii to decode " |
| @@ -149,8 +166,10 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start) | |||
| 149 | panic(msg); | 166 | panic(msg); |
| 150 | } | 167 | } |
| 151 | 168 | ||
| 152 | static int mce_available(struct cpuinfo_x86 *c) | 169 | int mce_available(struct cpuinfo_x86 *c) |
| 153 | { | 170 | { |
| 171 | if (mce_dont_init) | ||
| 172 | return 0; | ||
| 154 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); | 173 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); |
| 155 | } | 174 | } |
| 156 | 175 | ||
| @@ -172,7 +191,77 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) | |||
| 172 | } | 191 | } |
| 173 | 192 | ||
| 174 | /* | 193 | /* |
| 175 | * The actual machine check handler | 194 | * Poll for corrected events or events that happened before reset. |
| 195 | * Those are just logged through /dev/mcelog. | ||
| 196 | * | ||
| 197 | * This is executed in standard interrupt context. | ||
| 198 | */ | ||
| 199 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | ||
| 200 | { | ||
| 201 | struct mce m; | ||
| 202 | int i; | ||
| 203 | |||
| 204 | mce_setup(&m); | ||
| 205 | |||
| 206 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); | ||
| 207 | for (i = 0; i < banks; i++) { | ||
| 208 | if (!bank[i] || !test_bit(i, *b)) | ||
| 209 | continue; | ||
| 210 | |||
| 211 | m.misc = 0; | ||
| 212 | m.addr = 0; | ||
| 213 | m.bank = i; | ||
| 214 | m.tsc = 0; | ||
| 215 | |||
| 216 | barrier(); | ||
| 217 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); | ||
| 218 | if (!(m.status & MCI_STATUS_VAL)) | ||
| 219 | continue; | ||
| 220 | |||
| 221 | /* | ||
| 222 | * Uncorrected events are handled by the exception handler | ||
| 223 | * when it is enabled. But when the exception is disabled log | ||
| 224 | * everything. | ||
| 225 | * | ||
| 226 | * TBD do the same check for MCI_STATUS_EN here? | ||
| 227 | */ | ||
| 228 | if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC)) | ||
| 229 | continue; | ||
| 230 | |||
| 231 | if (m.status & MCI_STATUS_MISCV) | ||
| 232 | rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc); | ||
| 233 | if (m.status & MCI_STATUS_ADDRV) | ||
| 234 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); | ||
| 235 | |||
| 236 | if (!(flags & MCP_TIMESTAMP)) | ||
| 237 | m.tsc = 0; | ||
| 238 | /* | ||
| 239 | * Don't get the IP here because it's unlikely to | ||
| 240 | * have anything to do with the actual error location. | ||
| 241 | */ | ||
| 242 | |||
| 243 | mce_log(&m); | ||
| 244 | add_taint(TAINT_MACHINE_CHECK); | ||
| 245 | |||
| 246 | /* | ||
| 247 | * Clear state for this bank. | ||
| 248 | */ | ||
| 249 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
| 250 | } | ||
| 251 | |||
| 252 | /* | ||
| 253 | * Don't clear MCG_STATUS here because it's only defined for | ||
| 254 | * exceptions. | ||
| 255 | */ | ||
| 256 | } | ||
| 257 | |||
| 258 | /* | ||
| 259 | * The actual machine check handler. This only handles real | ||
| 260 | * exceptions when something got corrupted coming in through int 18. | ||
| 261 | * | ||
| 262 | * This is executed in NMI context not subject to normal locking rules. This | ||
| 263 | * implies that most kernel services cannot be safely used. Don't even | ||
| 264 | * think about putting a printk in there! | ||
| 176 | */ | 265 | */ |
| 177 | void do_machine_check(struct pt_regs * regs, long error_code) | 266 | void do_machine_check(struct pt_regs * regs, long error_code) |
| 178 | { | 267 | { |
| @@ -190,17 +279,18 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 190 | * error. | 279 | * error. |
| 191 | */ | 280 | */ |
| 192 | int kill_it = 0; | 281 | int kill_it = 0; |
| 282 | DECLARE_BITMAP(toclear, MAX_NR_BANKS); | ||
| 193 | 283 | ||
| 194 | atomic_inc(&mce_entry); | 284 | atomic_inc(&mce_entry); |
| 195 | 285 | ||
| 196 | if ((regs | 286 | if (notify_die(DIE_NMI, "machine check", regs, error_code, |
| 197 | && notify_die(DIE_NMI, "machine check", regs, error_code, | ||
| 198 | 18, SIGKILL) == NOTIFY_STOP) | 287 | 18, SIGKILL) == NOTIFY_STOP) |
| 199 | || !banks) | 288 | goto out2; |
| 289 | if (!banks) | ||
| 200 | goto out2; | 290 | goto out2; |
| 201 | 291 | ||
| 202 | memset(&m, 0, sizeof(struct mce)); | 292 | mce_setup(&m); |
| 203 | m.cpu = smp_processor_id(); | 293 | |
| 204 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); | 294 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); |
| 205 | /* if the restart IP is not valid, we're done for */ | 295 | /* if the restart IP is not valid, we're done for */ |
| 206 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) | 296 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) |
| @@ -210,18 +300,32 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 210 | barrier(); | 300 | barrier(); |
| 211 | 301 | ||
| 212 | for (i = 0; i < banks; i++) { | 302 | for (i = 0; i < banks; i++) { |
| 213 | if (i < NR_SYSFS_BANKS && !bank[i]) | 303 | __clear_bit(i, toclear); |
| 304 | if (!bank[i]) | ||
| 214 | continue; | 305 | continue; |
| 215 | 306 | ||
| 216 | m.misc = 0; | 307 | m.misc = 0; |
| 217 | m.addr = 0; | 308 | m.addr = 0; |
| 218 | m.bank = i; | 309 | m.bank = i; |
| 219 | m.tsc = 0; | ||
| 220 | 310 | ||
| 221 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); | 311 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); |
| 222 | if ((m.status & MCI_STATUS_VAL) == 0) | 312 | if ((m.status & MCI_STATUS_VAL) == 0) |
| 223 | continue; | 313 | continue; |
| 224 | 314 | ||
| 315 | /* | ||
| 316 | * Non uncorrected errors are handled by machine_check_poll | ||
| 317 | * Leave them alone. | ||
| 318 | */ | ||
| 319 | if ((m.status & MCI_STATUS_UC) == 0) | ||
| 320 | continue; | ||
| 321 | |||
| 322 | /* | ||
| 323 | * Set taint even when machine check was not enabled. | ||
| 324 | */ | ||
| 325 | add_taint(TAINT_MACHINE_CHECK); | ||
| 326 | |||
| 327 | __set_bit(i, toclear); | ||
| 328 | |||
| 225 | if (m.status & MCI_STATUS_EN) { | 329 | if (m.status & MCI_STATUS_EN) { |
| 226 | /* if PCC was set, there's no way out */ | 330 | /* if PCC was set, there's no way out */ |
| 227 | no_way_out |= !!(m.status & MCI_STATUS_PCC); | 331 | no_way_out |= !!(m.status & MCI_STATUS_PCC); |
| @@ -235,6 +339,12 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 235 | no_way_out = 1; | 339 | no_way_out = 1; |
| 236 | kill_it = 1; | 340 | kill_it = 1; |
| 237 | } | 341 | } |
| 342 | } else { | ||
| 343 | /* | ||
| 344 | * Machine check event was not enabled. Clear, but | ||
| 345 | * ignore. | ||
| 346 | */ | ||
| 347 | continue; | ||
| 238 | } | 348 | } |
| 239 | 349 | ||
| 240 | if (m.status & MCI_STATUS_MISCV) | 350 | if (m.status & MCI_STATUS_MISCV) |
| @@ -243,10 +353,7 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 243 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); | 353 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); |
| 244 | 354 | ||
| 245 | mce_get_rip(&m, regs); | 355 | mce_get_rip(&m, regs); |
| 246 | if (error_code >= 0) | 356 | mce_log(&m); |
| 247 | rdtscll(m.tsc); | ||
| 248 | if (error_code != -2) | ||
| 249 | mce_log(&m); | ||
| 250 | 357 | ||
| 251 | /* Did this bank cause the exception? */ | 358 | /* Did this bank cause the exception? */ |
| 252 | /* Assume that the bank with uncorrectable errors did it, | 359 | /* Assume that the bank with uncorrectable errors did it, |
| @@ -255,14 +362,8 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 255 | panicm = m; | 362 | panicm = m; |
| 256 | panicm_found = 1; | 363 | panicm_found = 1; |
| 257 | } | 364 | } |
| 258 | |||
| 259 | add_taint(TAINT_MACHINE_CHECK); | ||
| 260 | } | 365 | } |
| 261 | 366 | ||
| 262 | /* Never do anything final in the polling timer */ | ||
| 263 | if (!regs) | ||
| 264 | goto out; | ||
| 265 | |||
| 266 | /* If we didn't find an uncorrectable error, pick | 367 | /* If we didn't find an uncorrectable error, pick |
| 267 | the last one (shouldn't happen, just being safe). */ | 368 | the last one (shouldn't happen, just being safe). */ |
| 268 | if (!panicm_found) | 369 | if (!panicm_found) |
| @@ -309,10 +410,11 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 309 | /* notify userspace ASAP */ | 410 | /* notify userspace ASAP */ |
| 310 | set_thread_flag(TIF_MCE_NOTIFY); | 411 | set_thread_flag(TIF_MCE_NOTIFY); |
| 311 | 412 | ||
| 312 | out: | ||
| 313 | /* the last thing we do is clear state */ | 413 | /* the last thing we do is clear state */ |
| 314 | for (i = 0; i < banks; i++) | 414 | for (i = 0; i < banks; i++) { |
| 315 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | 415 | if (test_bit(i, toclear)) |
| 416 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
| 417 | } | ||
| 316 | wrmsrl(MSR_IA32_MCG_STATUS, 0); | 418 | wrmsrl(MSR_IA32_MCG_STATUS, 0); |
| 317 | out2: | 419 | out2: |
| 318 | atomic_dec(&mce_entry); | 420 | atomic_dec(&mce_entry); |
| @@ -332,15 +434,13 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 332 | * and historically has been the register value of the | 434 | * and historically has been the register value of the |
| 333 | * MSR_IA32_THERMAL_STATUS (Intel) msr. | 435 | * MSR_IA32_THERMAL_STATUS (Intel) msr. |
| 334 | */ | 436 | */ |
| 335 | void mce_log_therm_throt_event(unsigned int cpu, __u64 status) | 437 | void mce_log_therm_throt_event(__u64 status) |
| 336 | { | 438 | { |
| 337 | struct mce m; | 439 | struct mce m; |
| 338 | 440 | ||
| 339 | memset(&m, 0, sizeof(m)); | 441 | mce_setup(&m); |
| 340 | m.cpu = cpu; | ||
| 341 | m.bank = MCE_THERMAL_BANK; | 442 | m.bank = MCE_THERMAL_BANK; |
| 342 | m.status = status; | 443 | m.status = status; |
| 343 | rdtscll(m.tsc); | ||
| 344 | mce_log(&m); | 444 | mce_log(&m); |
| 345 | } | 445 | } |
| 346 | #endif /* CONFIG_X86_MCE_INTEL */ | 446 | #endif /* CONFIG_X86_MCE_INTEL */ |
| @@ -353,18 +453,18 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status) | |||
| 353 | 453 | ||
| 354 | static int check_interval = 5 * 60; /* 5 minutes */ | 454 | static int check_interval = 5 * 60; /* 5 minutes */ |
| 355 | static int next_interval; /* in jiffies */ | 455 | static int next_interval; /* in jiffies */ |
| 356 | static void mcheck_timer(struct work_struct *work); | 456 | static void mcheck_timer(unsigned long); |
| 357 | static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); | 457 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
| 358 | 458 | ||
| 359 | static void mcheck_check_cpu(void *info) | 459 | static void mcheck_timer(unsigned long data) |
| 360 | { | 460 | { |
| 361 | if (mce_available(¤t_cpu_data)) | 461 | struct timer_list *t = &per_cpu(mce_timer, data); |
| 362 | do_machine_check(NULL, 0); | ||
| 363 | } | ||
| 364 | 462 | ||
| 365 | static void mcheck_timer(struct work_struct *work) | 463 | WARN_ON(smp_processor_id() != data); |
| 366 | { | 464 | |
| 367 | on_each_cpu(mcheck_check_cpu, NULL, 1); | 465 | if (mce_available(¤t_cpu_data)) |
| 466 | machine_check_poll(MCP_TIMESTAMP, | ||
| 467 | &__get_cpu_var(mce_poll_banks)); | ||
| 368 | 468 | ||
| 369 | /* | 469 | /* |
| 370 | * Alert userspace if needed. If we logged an MCE, reduce the | 470 | * Alert userspace if needed. If we logged an MCE, reduce the |
| @@ -377,31 +477,41 @@ static void mcheck_timer(struct work_struct *work) | |||
| 377 | (int)round_jiffies_relative(check_interval*HZ)); | 477 | (int)round_jiffies_relative(check_interval*HZ)); |
| 378 | } | 478 | } |
| 379 | 479 | ||
| 380 | schedule_delayed_work(&mcheck_work, next_interval); | 480 | t->expires = jiffies + next_interval; |
| 481 | add_timer(t); | ||
| 482 | } | ||
| 483 | |||
| 484 | static void mce_do_trigger(struct work_struct *work) | ||
| 485 | { | ||
| 486 | call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT); | ||
| 381 | } | 487 | } |
| 382 | 488 | ||
| 489 | static DECLARE_WORK(mce_trigger_work, mce_do_trigger); | ||
| 490 | |||
| 383 | /* | 491 | /* |
| 384 | * This is only called from process context. This is where we do | 492 | * Notify the user(s) about new machine check events. |
| 385 | * anything we need to alert userspace about new MCEs. This is called | 493 | * Can be called from interrupt context, but not from machine check/NMI |
| 386 | * directly from the poller and also from entry.S and idle, thanks to | 494 | * context. |
| 387 | * TIF_MCE_NOTIFY. | ||
| 388 | */ | 495 | */ |
| 389 | int mce_notify_user(void) | 496 | int mce_notify_user(void) |
| 390 | { | 497 | { |
| 498 | /* Not more than two messages every minute */ | ||
| 499 | static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); | ||
| 500 | |||
| 391 | clear_thread_flag(TIF_MCE_NOTIFY); | 501 | clear_thread_flag(TIF_MCE_NOTIFY); |
| 392 | if (test_and_clear_bit(0, ¬ify_user)) { | 502 | if (test_and_clear_bit(0, ¬ify_user)) { |
| 393 | static unsigned long last_print; | ||
| 394 | unsigned long now = jiffies; | ||
| 395 | |||
| 396 | wake_up_interruptible(&mce_wait); | 503 | wake_up_interruptible(&mce_wait); |
| 397 | if (trigger[0]) | ||
| 398 | call_usermodehelper(trigger, trigger_argv, NULL, | ||
| 399 | UMH_NO_WAIT); | ||
| 400 | 504 | ||
| 401 | if (time_after_eq(now, last_print + (check_interval*HZ))) { | 505 | /* |
| 402 | last_print = now; | 506 | * There is no risk of missing notifications because |
| 507 | * work_pending is always cleared before the function is | ||
| 508 | * executed. | ||
| 509 | */ | ||
| 510 | if (trigger[0] && !work_pending(&mce_trigger_work)) | ||
| 511 | schedule_work(&mce_trigger_work); | ||
| 512 | |||
| 513 | if (__ratelimit(&ratelimit)) | ||
| 403 | printk(KERN_INFO "Machine check events logged\n"); | 514 | printk(KERN_INFO "Machine check events logged\n"); |
| 404 | } | ||
| 405 | 515 | ||
| 406 | return 1; | 516 | return 1; |
| 407 | } | 517 | } |
| @@ -425,63 +535,78 @@ static struct notifier_block mce_idle_notifier = { | |||
| 425 | 535 | ||
| 426 | static __init int periodic_mcheck_init(void) | 536 | static __init int periodic_mcheck_init(void) |
| 427 | { | 537 | { |
| 428 | next_interval = check_interval * HZ; | 538 | idle_notifier_register(&mce_idle_notifier); |
| 429 | if (next_interval) | 539 | return 0; |
| 430 | schedule_delayed_work(&mcheck_work, | ||
| 431 | round_jiffies_relative(next_interval)); | ||
| 432 | idle_notifier_register(&mce_idle_notifier); | ||
| 433 | return 0; | ||
| 434 | } | 540 | } |
| 435 | __initcall(periodic_mcheck_init); | 541 | __initcall(periodic_mcheck_init); |
| 436 | 542 | ||
| 437 | |||
| 438 | /* | 543 | /* |
| 439 | * Initialize Machine Checks for a CPU. | 544 | * Initialize Machine Checks for a CPU. |
| 440 | */ | 545 | */ |
| 441 | static void mce_init(void *dummy) | 546 | static int mce_cap_init(void) |
| 442 | { | 547 | { |
| 443 | u64 cap; | 548 | u64 cap; |
| 444 | int i; | 549 | unsigned b; |
| 445 | 550 | ||
| 446 | rdmsrl(MSR_IA32_MCG_CAP, cap); | 551 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
| 447 | banks = cap & 0xff; | 552 | b = cap & 0xff; |
| 448 | if (banks > MCE_EXTENDED_BANK) { | 553 | if (b > MAX_NR_BANKS) { |
| 449 | banks = MCE_EXTENDED_BANK; | 554 | printk(KERN_WARNING |
| 450 | printk(KERN_INFO "MCE: warning: using only %d banks\n", | 555 | "MCE: Using only %u machine check banks out of %u\n", |
| 451 | MCE_EXTENDED_BANK); | 556 | MAX_NR_BANKS, b); |
| 557 | b = MAX_NR_BANKS; | ||
| 452 | } | 558 | } |
| 559 | |||
| 560 | /* Don't support asymmetric configurations today */ | ||
| 561 | WARN_ON(banks != 0 && b != banks); | ||
| 562 | banks = b; | ||
| 563 | if (!bank) { | ||
| 564 | bank = kmalloc(banks * sizeof(u64), GFP_KERNEL); | ||
| 565 | if (!bank) | ||
| 566 | return -ENOMEM; | ||
| 567 | memset(bank, 0xff, banks * sizeof(u64)); | ||
| 568 | } | ||
| 569 | |||
| 453 | /* Use accurate RIP reporting if available. */ | 570 | /* Use accurate RIP reporting if available. */ |
| 454 | if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) | 571 | if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) |
| 455 | rip_msr = MSR_IA32_MCG_EIP; | 572 | rip_msr = MSR_IA32_MCG_EIP; |
| 456 | 573 | ||
| 457 | /* Log the machine checks left over from the previous reset. | 574 | return 0; |
| 458 | This also clears all registers */ | 575 | } |
| 459 | do_machine_check(NULL, mce_bootlog ? -1 : -2); | 576 | |
| 577 | static void mce_init(void *dummy) | ||
| 578 | { | ||
| 579 | u64 cap; | ||
| 580 | int i; | ||
| 581 | mce_banks_t all_banks; | ||
| 582 | |||
| 583 | /* | ||
| 584 | * Log the machine checks left over from the previous reset. | ||
| 585 | */ | ||
| 586 | bitmap_fill(all_banks, MAX_NR_BANKS); | ||
| 587 | machine_check_poll(MCP_UC, &all_banks); | ||
| 460 | 588 | ||
| 461 | set_in_cr4(X86_CR4_MCE); | 589 | set_in_cr4(X86_CR4_MCE); |
| 462 | 590 | ||
| 591 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
| 463 | if (cap & MCG_CTL_P) | 592 | if (cap & MCG_CTL_P) |
| 464 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | 593 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
| 465 | 594 | ||
| 466 | for (i = 0; i < banks; i++) { | 595 | for (i = 0; i < banks; i++) { |
| 467 | if (i < NR_SYSFS_BANKS) | 596 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); |
| 468 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); | ||
| 469 | else | ||
| 470 | wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL); | ||
| 471 | |||
| 472 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | 597 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); |
| 473 | } | 598 | } |
| 474 | } | 599 | } |
| 475 | 600 | ||
| 476 | /* Add per CPU specific workarounds here */ | 601 | /* Add per CPU specific workarounds here */ |
| 477 | static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) | 602 | static void mce_cpu_quirks(struct cpuinfo_x86 *c) |
| 478 | { | 603 | { |
| 479 | /* This should be disabled by the BIOS, but isn't always */ | 604 | /* This should be disabled by the BIOS, but isn't always */ |
| 480 | if (c->x86_vendor == X86_VENDOR_AMD) { | 605 | if (c->x86_vendor == X86_VENDOR_AMD) { |
| 481 | if(c->x86 == 15) | 606 | if (c->x86 == 15 && banks > 4) |
| 482 | /* disable GART TBL walk error reporting, which trips off | 607 | /* disable GART TBL walk error reporting, which trips off |
| 483 | incorrectly with the IOMMU & 3ware & Cerberus. */ | 608 | incorrectly with the IOMMU & 3ware & Cerberus. */ |
| 484 | clear_bit(10, &bank[4]); | 609 | clear_bit(10, (unsigned long *)&bank[4]); |
| 485 | if(c->x86 <= 17 && mce_bootlog < 0) | 610 | if(c->x86 <= 17 && mce_bootlog < 0) |
| 486 | /* Lots of broken BIOS around that don't clear them | 611 | /* Lots of broken BIOS around that don't clear them |
| 487 | by default and leave crap in there. Don't log. */ | 612 | by default and leave crap in there. Don't log. */ |
| @@ -504,20 +629,38 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) | |||
| 504 | } | 629 | } |
| 505 | } | 630 | } |
| 506 | 631 | ||
| 632 | static void mce_init_timer(void) | ||
| 633 | { | ||
| 634 | struct timer_list *t = &__get_cpu_var(mce_timer); | ||
| 635 | |||
| 636 | /* data race harmless because everyone sets to the same value */ | ||
| 637 | if (!next_interval) | ||
| 638 | next_interval = check_interval * HZ; | ||
| 639 | if (!next_interval) | ||
| 640 | return; | ||
| 641 | setup_timer(t, mcheck_timer, smp_processor_id()); | ||
| 642 | t->expires = round_jiffies(jiffies + next_interval); | ||
| 643 | add_timer(t); | ||
| 644 | } | ||
| 645 | |||
| 507 | /* | 646 | /* |
| 508 | * Called for each booted CPU to set up machine checks. | 647 | * Called for each booted CPU to set up machine checks. |
| 509 | * Must be called with preempt off. | 648 | * Must be called with preempt off. |
| 510 | */ | 649 | */ |
| 511 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) | 650 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) |
| 512 | { | 651 | { |
| 513 | mce_cpu_quirks(c); | 652 | if (!mce_available(c)) |
| 653 | return; | ||
| 514 | 654 | ||
| 515 | if (mce_dont_init || | 655 | if (mce_cap_init() < 0) { |
| 516 | !mce_available(c)) | 656 | mce_dont_init = 1; |
| 517 | return; | 657 | return; |
| 658 | } | ||
| 659 | mce_cpu_quirks(c); | ||
| 518 | 660 | ||
| 519 | mce_init(NULL); | 661 | mce_init(NULL); |
| 520 | mce_cpu_features(c); | 662 | mce_cpu_features(c); |
| 663 | mce_init_timer(); | ||
| 521 | } | 664 | } |
| 522 | 665 | ||
| 523 | /* | 666 | /* |
| @@ -573,7 +716,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
| 573 | { | 716 | { |
| 574 | unsigned long *cpu_tsc; | 717 | unsigned long *cpu_tsc; |
| 575 | static DEFINE_MUTEX(mce_read_mutex); | 718 | static DEFINE_MUTEX(mce_read_mutex); |
| 576 | unsigned next; | 719 | unsigned prev, next; |
| 577 | char __user *buf = ubuf; | 720 | char __user *buf = ubuf; |
| 578 | int i, err; | 721 | int i, err; |
| 579 | 722 | ||
| @@ -592,25 +735,32 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
| 592 | } | 735 | } |
| 593 | 736 | ||
| 594 | err = 0; | 737 | err = 0; |
| 595 | for (i = 0; i < next; i++) { | 738 | prev = 0; |
| 596 | unsigned long start = jiffies; | 739 | do { |
| 597 | 740 | for (i = prev; i < next; i++) { | |
| 598 | while (!mcelog.entry[i].finished) { | 741 | unsigned long start = jiffies; |
| 599 | if (time_after_eq(jiffies, start + 2)) { | 742 | |
| 600 | memset(mcelog.entry + i,0, sizeof(struct mce)); | 743 | while (!mcelog.entry[i].finished) { |
| 601 | goto timeout; | 744 | if (time_after_eq(jiffies, start + 2)) { |
| 745 | memset(mcelog.entry + i, 0, | ||
| 746 | sizeof(struct mce)); | ||
| 747 | goto timeout; | ||
| 748 | } | ||
| 749 | cpu_relax(); | ||
| 602 | } | 750 | } |
| 603 | cpu_relax(); | 751 | smp_rmb(); |
| 752 | err |= copy_to_user(buf, mcelog.entry + i, | ||
| 753 | sizeof(struct mce)); | ||
| 754 | buf += sizeof(struct mce); | ||
| 755 | timeout: | ||
| 756 | ; | ||
| 604 | } | 757 | } |
| 605 | smp_rmb(); | ||
| 606 | err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce)); | ||
| 607 | buf += sizeof(struct mce); | ||
| 608 | timeout: | ||
| 609 | ; | ||
| 610 | } | ||
| 611 | 758 | ||
| 612 | memset(mcelog.entry, 0, next * sizeof(struct mce)); | 759 | memset(mcelog.entry + prev, 0, |
| 613 | mcelog.next = 0; | 760 | (next - prev) * sizeof(struct mce)); |
| 761 | prev = next; | ||
| 762 | next = cmpxchg(&mcelog.next, prev, 0); | ||
| 763 | } while (next != prev); | ||
| 614 | 764 | ||
| 615 | synchronize_sched(); | 765 | synchronize_sched(); |
| 616 | 766 | ||
| @@ -680,20 +830,6 @@ static struct miscdevice mce_log_device = { | |||
| 680 | &mce_chrdev_ops, | 830 | &mce_chrdev_ops, |
| 681 | }; | 831 | }; |
| 682 | 832 | ||
| 683 | static unsigned long old_cr4 __initdata; | ||
| 684 | |||
| 685 | void __init stop_mce(void) | ||
| 686 | { | ||
| 687 | old_cr4 = read_cr4(); | ||
| 688 | clear_in_cr4(X86_CR4_MCE); | ||
| 689 | } | ||
| 690 | |||
| 691 | void __init restart_mce(void) | ||
| 692 | { | ||
| 693 | if (old_cr4 & X86_CR4_MCE) | ||
| 694 | set_in_cr4(X86_CR4_MCE); | ||
| 695 | } | ||
| 696 | |||
| 697 | /* | 833 | /* |
| 698 | * Old style boot options parsing. Only for compatibility. | 834 | * Old style boot options parsing. Only for compatibility. |
| 699 | */ | 835 | */ |
| @@ -703,8 +839,7 @@ static int __init mcheck_disable(char *str) | |||
| 703 | return 1; | 839 | return 1; |
| 704 | } | 840 | } |
| 705 | 841 | ||
| 706 | /* mce=off disables machine check. Note you can re-enable it later | 842 | /* mce=off disables machine check. |
| 707 | using sysfs. | ||
| 708 | mce=TOLERANCELEVEL (number, see above) | 843 | mce=TOLERANCELEVEL (number, see above) |
| 709 | mce=bootlog Log MCEs from before booting. Disabled by default on AMD. | 844 | mce=bootlog Log MCEs from before booting. Disabled by default on AMD. |
| 710 | mce=nobootlog Don't log MCEs from before booting. */ | 845 | mce=nobootlog Don't log MCEs from before booting. */ |
| @@ -728,6 +863,29 @@ __setup("mce=", mcheck_enable); | |||
| 728 | * Sysfs support | 863 | * Sysfs support |
| 729 | */ | 864 | */ |
| 730 | 865 | ||
| 866 | /* | ||
| 867 | * Disable machine checks on suspend and shutdown. We can't really handle | ||
| 868 | * them later. | ||
| 869 | */ | ||
| 870 | static int mce_disable(void) | ||
| 871 | { | ||
| 872 | int i; | ||
| 873 | |||
| 874 | for (i = 0; i < banks; i++) | ||
| 875 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | ||
| 876 | return 0; | ||
| 877 | } | ||
| 878 | |||
| 879 | static int mce_suspend(struct sys_device *dev, pm_message_t state) | ||
| 880 | { | ||
| 881 | return mce_disable(); | ||
| 882 | } | ||
| 883 | |||
| 884 | static int mce_shutdown(struct sys_device *dev) | ||
| 885 | { | ||
| 886 | return mce_disable(); | ||
| 887 | } | ||
| 888 | |||
| 731 | /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. | 889 | /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. |
| 732 | Only one CPU is active at this time, the others get readded later using | 890 | Only one CPU is active at this time, the others get readded later using |
| 733 | CPU hotplug. */ | 891 | CPU hotplug. */ |
| @@ -738,20 +896,24 @@ static int mce_resume(struct sys_device *dev) | |||
| 738 | return 0; | 896 | return 0; |
| 739 | } | 897 | } |
| 740 | 898 | ||
| 899 | static void mce_cpu_restart(void *data) | ||
| 900 | { | ||
| 901 | del_timer_sync(&__get_cpu_var(mce_timer)); | ||
| 902 | if (mce_available(¤t_cpu_data)) | ||
| 903 | mce_init(NULL); | ||
| 904 | mce_init_timer(); | ||
| 905 | } | ||
| 906 | |||
| 741 | /* Reinit MCEs after user configuration changes */ | 907 | /* Reinit MCEs after user configuration changes */ |
| 742 | static void mce_restart(void) | 908 | static void mce_restart(void) |
| 743 | { | 909 | { |
| 744 | if (next_interval) | ||
| 745 | cancel_delayed_work(&mcheck_work); | ||
| 746 | /* Timer race is harmless here */ | ||
| 747 | on_each_cpu(mce_init, NULL, 1); | ||
| 748 | next_interval = check_interval * HZ; | 910 | next_interval = check_interval * HZ; |
| 749 | if (next_interval) | 911 | on_each_cpu(mce_cpu_restart, NULL, 1); |
| 750 | schedule_delayed_work(&mcheck_work, | ||
| 751 | round_jiffies_relative(next_interval)); | ||
| 752 | } | 912 | } |
| 753 | 913 | ||
| 754 | static struct sysdev_class mce_sysclass = { | 914 | static struct sysdev_class mce_sysclass = { |
| 915 | .suspend = mce_suspend, | ||
| 916 | .shutdown = mce_shutdown, | ||
| 755 | .resume = mce_resume, | 917 | .resume = mce_resume, |
| 756 | .name = "machinecheck", | 918 | .name = "machinecheck", |
| 757 | }; | 919 | }; |
| @@ -778,16 +940,26 @@ void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinit | |||
| 778 | } \ | 940 | } \ |
| 779 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); | 941 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); |
| 780 | 942 | ||
| 781 | /* | 943 | static struct sysdev_attribute *bank_attrs; |
| 782 | * TBD should generate these dynamically based on number of available banks. | 944 | |
| 783 | * Have only 6 contol banks in /sysfs until then. | 945 | static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr, |
| 784 | */ | 946 | char *buf) |
| 785 | ACCESSOR(bank0ctl,bank[0],mce_restart()) | 947 | { |
| 786 | ACCESSOR(bank1ctl,bank[1],mce_restart()) | 948 | u64 b = bank[attr - bank_attrs]; |
| 787 | ACCESSOR(bank2ctl,bank[2],mce_restart()) | 949 | return sprintf(buf, "%llx\n", b); |
| 788 | ACCESSOR(bank3ctl,bank[3],mce_restart()) | 950 | } |
| 789 | ACCESSOR(bank4ctl,bank[4],mce_restart()) | 951 | |
| 790 | ACCESSOR(bank5ctl,bank[5],mce_restart()) | 952 | static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, |
| 953 | const char *buf, size_t siz) | ||
| 954 | { | ||
| 955 | char *end; | ||
| 956 | u64 new = simple_strtoull(buf, &end, 0); | ||
| 957 | if (end == buf) | ||
| 958 | return -EINVAL; | ||
| 959 | bank[attr - bank_attrs] = new; | ||
| 960 | mce_restart(); | ||
| 961 | return end-buf; | ||
| 962 | } | ||
| 791 | 963 | ||
| 792 | static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, | 964 | static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, |
| 793 | char *buf) | 965 | char *buf) |
| @@ -814,13 +986,11 @@ static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger); | |||
| 814 | static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); | 986 | static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); |
| 815 | ACCESSOR(check_interval,check_interval,mce_restart()) | 987 | ACCESSOR(check_interval,check_interval,mce_restart()) |
| 816 | static struct sysdev_attribute *mce_attributes[] = { | 988 | static struct sysdev_attribute *mce_attributes[] = { |
| 817 | &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl, | ||
| 818 | &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl, | ||
| 819 | &attr_tolerant.attr, &attr_check_interval, &attr_trigger, | 989 | &attr_tolerant.attr, &attr_check_interval, &attr_trigger, |
| 820 | NULL | 990 | NULL |
| 821 | }; | 991 | }; |
| 822 | 992 | ||
| 823 | static cpumask_t mce_device_initialized = CPU_MASK_NONE; | 993 | static cpumask_var_t mce_device_initialized; |
| 824 | 994 | ||
| 825 | /* Per cpu sysdev init. All of the cpus still share the same ctl bank */ | 995 | /* Per cpu sysdev init. All of the cpus still share the same ctl bank */ |
| 826 | static __cpuinit int mce_create_device(unsigned int cpu) | 996 | static __cpuinit int mce_create_device(unsigned int cpu) |
| @@ -845,11 +1015,22 @@ static __cpuinit int mce_create_device(unsigned int cpu) | |||
| 845 | if (err) | 1015 | if (err) |
| 846 | goto error; | 1016 | goto error; |
| 847 | } | 1017 | } |
| 848 | cpu_set(cpu, mce_device_initialized); | 1018 | for (i = 0; i < banks; i++) { |
| 1019 | err = sysdev_create_file(&per_cpu(device_mce, cpu), | ||
| 1020 | &bank_attrs[i]); | ||
| 1021 | if (err) | ||
| 1022 | goto error2; | ||
| 1023 | } | ||
| 1024 | cpumask_set_cpu(cpu, mce_device_initialized); | ||
| 849 | 1025 | ||
| 850 | return 0; | 1026 | return 0; |
| 1027 | error2: | ||
| 1028 | while (--i >= 0) { | ||
| 1029 | sysdev_remove_file(&per_cpu(device_mce, cpu), | ||
| 1030 | &bank_attrs[i]); | ||
| 1031 | } | ||
| 851 | error: | 1032 | error: |
| 852 | while (i--) { | 1033 | while (--i >= 0) { |
| 853 | sysdev_remove_file(&per_cpu(device_mce,cpu), | 1034 | sysdev_remove_file(&per_cpu(device_mce,cpu), |
| 854 | mce_attributes[i]); | 1035 | mce_attributes[i]); |
| 855 | } | 1036 | } |
| @@ -862,14 +1043,44 @@ static __cpuinit void mce_remove_device(unsigned int cpu) | |||
| 862 | { | 1043 | { |
| 863 | int i; | 1044 | int i; |
| 864 | 1045 | ||
| 865 | if (!cpu_isset(cpu, mce_device_initialized)) | 1046 | if (!cpumask_test_cpu(cpu, mce_device_initialized)) |
| 866 | return; | 1047 | return; |
| 867 | 1048 | ||
| 868 | for (i = 0; mce_attributes[i]; i++) | 1049 | for (i = 0; mce_attributes[i]; i++) |
| 869 | sysdev_remove_file(&per_cpu(device_mce,cpu), | 1050 | sysdev_remove_file(&per_cpu(device_mce,cpu), |
| 870 | mce_attributes[i]); | 1051 | mce_attributes[i]); |
| 1052 | for (i = 0; i < banks; i++) | ||
| 1053 | sysdev_remove_file(&per_cpu(device_mce, cpu), | ||
| 1054 | &bank_attrs[i]); | ||
| 871 | sysdev_unregister(&per_cpu(device_mce,cpu)); | 1055 | sysdev_unregister(&per_cpu(device_mce,cpu)); |
| 872 | cpu_clear(cpu, mce_device_initialized); | 1056 | cpumask_clear_cpu(cpu, mce_device_initialized); |
| 1057 | } | ||
| 1058 | |||
| 1059 | /* Make sure there are no machine checks on offlined CPUs. */ | ||
| 1060 | static void mce_disable_cpu(void *h) | ||
| 1061 | { | ||
| 1062 | int i; | ||
| 1063 | unsigned long action = *(unsigned long *)h; | ||
| 1064 | |||
| 1065 | if (!mce_available(¤t_cpu_data)) | ||
| 1066 | return; | ||
| 1067 | if (!(action & CPU_TASKS_FROZEN)) | ||
| 1068 | cmci_clear(); | ||
| 1069 | for (i = 0; i < banks; i++) | ||
| 1070 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | ||
| 1071 | } | ||
| 1072 | |||
| 1073 | static void mce_reenable_cpu(void *h) | ||
| 1074 | { | ||
| 1075 | int i; | ||
| 1076 | unsigned long action = *(unsigned long *)h; | ||
| 1077 | |||
| 1078 | if (!mce_available(¤t_cpu_data)) | ||
| 1079 | return; | ||
| 1080 | if (!(action & CPU_TASKS_FROZEN)) | ||
| 1081 | cmci_reenable(); | ||
| 1082 | for (i = 0; i < banks; i++) | ||
| 1083 | wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]); | ||
| 873 | } | 1084 | } |
| 874 | 1085 | ||
| 875 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | 1086 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
| @@ -877,6 +1088,7 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | |||
| 877 | unsigned long action, void *hcpu) | 1088 | unsigned long action, void *hcpu) |
| 878 | { | 1089 | { |
| 879 | unsigned int cpu = (unsigned long)hcpu; | 1090 | unsigned int cpu = (unsigned long)hcpu; |
| 1091 | struct timer_list *t = &per_cpu(mce_timer, cpu); | ||
| 880 | 1092 | ||
| 881 | switch (action) { | 1093 | switch (action) { |
| 882 | case CPU_ONLINE: | 1094 | case CPU_ONLINE: |
| @@ -891,6 +1103,21 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | |||
| 891 | threshold_cpu_callback(action, cpu); | 1103 | threshold_cpu_callback(action, cpu); |
| 892 | mce_remove_device(cpu); | 1104 | mce_remove_device(cpu); |
| 893 | break; | 1105 | break; |
| 1106 | case CPU_DOWN_PREPARE: | ||
| 1107 | case CPU_DOWN_PREPARE_FROZEN: | ||
| 1108 | del_timer_sync(t); | ||
| 1109 | smp_call_function_single(cpu, mce_disable_cpu, &action, 1); | ||
| 1110 | break; | ||
| 1111 | case CPU_DOWN_FAILED: | ||
| 1112 | case CPU_DOWN_FAILED_FROZEN: | ||
| 1113 | t->expires = round_jiffies(jiffies + next_interval); | ||
| 1114 | add_timer_on(t, cpu); | ||
| 1115 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); | ||
| 1116 | break; | ||
| 1117 | case CPU_POST_DEAD: | ||
| 1118 | /* intentionally ignoring frozen here */ | ||
| 1119 | cmci_rediscover(cpu); | ||
| 1120 | break; | ||
| 894 | } | 1121 | } |
| 895 | return NOTIFY_OK; | 1122 | return NOTIFY_OK; |
| 896 | } | 1123 | } |
| @@ -899,6 +1126,34 @@ static struct notifier_block mce_cpu_notifier __cpuinitdata = { | |||
| 899 | .notifier_call = mce_cpu_callback, | 1126 | .notifier_call = mce_cpu_callback, |
| 900 | }; | 1127 | }; |
| 901 | 1128 | ||
| 1129 | static __init int mce_init_banks(void) | ||
| 1130 | { | ||
| 1131 | int i; | ||
| 1132 | |||
| 1133 | bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks, | ||
| 1134 | GFP_KERNEL); | ||
| 1135 | if (!bank_attrs) | ||
| 1136 | return -ENOMEM; | ||
| 1137 | |||
| 1138 | for (i = 0; i < banks; i++) { | ||
| 1139 | struct sysdev_attribute *a = &bank_attrs[i]; | ||
| 1140 | a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i); | ||
| 1141 | if (!a->attr.name) | ||
| 1142 | goto nomem; | ||
| 1143 | a->attr.mode = 0644; | ||
| 1144 | a->show = show_bank; | ||
| 1145 | a->store = set_bank; | ||
| 1146 | } | ||
| 1147 | return 0; | ||
| 1148 | |||
| 1149 | nomem: | ||
| 1150 | while (--i >= 0) | ||
| 1151 | kfree(bank_attrs[i].attr.name); | ||
| 1152 | kfree(bank_attrs); | ||
| 1153 | bank_attrs = NULL; | ||
| 1154 | return -ENOMEM; | ||
| 1155 | } | ||
| 1156 | |||
| 902 | static __init int mce_init_device(void) | 1157 | static __init int mce_init_device(void) |
| 903 | { | 1158 | { |
| 904 | int err; | 1159 | int err; |
| @@ -906,6 +1161,13 @@ static __init int mce_init_device(void) | |||
| 906 | 1161 | ||
| 907 | if (!mce_available(&boot_cpu_data)) | 1162 | if (!mce_available(&boot_cpu_data)) |
| 908 | return -EIO; | 1163 | return -EIO; |
| 1164 | |||
| 1165 | alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); | ||
| 1166 | |||
| 1167 | err = mce_init_banks(); | ||
| 1168 | if (err) | ||
| 1169 | return err; | ||
| 1170 | |||
| 909 | err = sysdev_class_register(&mce_sysclass); | 1171 | err = sysdev_class_register(&mce_sysclass); |
| 910 | if (err) | 1172 | if (err) |
| 911 | return err; | 1173 | return err; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index f2ee0ae29bd6..1f429ee3477d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
| @@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = { | |||
| 67 | struct threshold_bank { | 67 | struct threshold_bank { |
| 68 | struct kobject *kobj; | 68 | struct kobject *kobj; |
| 69 | struct threshold_block *blocks; | 69 | struct threshold_block *blocks; |
| 70 | cpumask_t cpus; | 70 | cpumask_var_t cpus; |
| 71 | }; | 71 | }; |
| 72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); | 72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); |
| 73 | 73 | ||
| @@ -79,6 +79,8 @@ static unsigned char shared_bank[NR_BANKS] = { | |||
| 79 | 79 | ||
| 80 | static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ | 80 | static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ |
| 81 | 81 | ||
| 82 | static void amd_threshold_interrupt(void); | ||
| 83 | |||
| 82 | /* | 84 | /* |
| 83 | * CPU Initialization | 85 | * CPU Initialization |
| 84 | */ | 86 | */ |
| @@ -174,6 +176,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
| 174 | tr.reset = 0; | 176 | tr.reset = 0; |
| 175 | tr.old_limit = 0; | 177 | tr.old_limit = 0; |
| 176 | threshold_restart_bank(&tr); | 178 | threshold_restart_bank(&tr); |
| 179 | |||
| 180 | mce_threshold_vector = amd_threshold_interrupt; | ||
| 177 | } | 181 | } |
| 178 | } | 182 | } |
| 179 | } | 183 | } |
| @@ -187,19 +191,13 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
| 187 | * the interrupt goes off when error_count reaches threshold_limit. | 191 | * the interrupt goes off when error_count reaches threshold_limit. |
| 188 | * the handler will simply log mcelog w/ software defined bank number. | 192 | * the handler will simply log mcelog w/ software defined bank number. |
| 189 | */ | 193 | */ |
| 190 | asmlinkage void mce_threshold_interrupt(void) | 194 | static void amd_threshold_interrupt(void) |
| 191 | { | 195 | { |
| 192 | unsigned int bank, block; | 196 | unsigned int bank, block; |
| 193 | struct mce m; | 197 | struct mce m; |
| 194 | u32 low = 0, high = 0, address = 0; | 198 | u32 low = 0, high = 0, address = 0; |
| 195 | 199 | ||
| 196 | ack_APIC_irq(); | 200 | mce_setup(&m); |
| 197 | exit_idle(); | ||
| 198 | irq_enter(); | ||
| 199 | |||
| 200 | memset(&m, 0, sizeof(m)); | ||
| 201 | rdtscll(m.tsc); | ||
| 202 | m.cpu = smp_processor_id(); | ||
| 203 | 201 | ||
| 204 | /* assume first bank caused it */ | 202 | /* assume first bank caused it */ |
| 205 | for (bank = 0; bank < NR_BANKS; ++bank) { | 203 | for (bank = 0; bank < NR_BANKS; ++bank) { |
| @@ -233,7 +231,8 @@ asmlinkage void mce_threshold_interrupt(void) | |||
| 233 | 231 | ||
| 234 | /* Log the machine check that caused the threshold | 232 | /* Log the machine check that caused the threshold |
| 235 | event. */ | 233 | event. */ |
| 236 | do_machine_check(NULL, 0); | 234 | machine_check_poll(MCP_TIMESTAMP, |
| 235 | &__get_cpu_var(mce_poll_banks)); | ||
| 237 | 236 | ||
| 238 | if (high & MASK_OVERFLOW_HI) { | 237 | if (high & MASK_OVERFLOW_HI) { |
| 239 | rdmsrl(address, m.misc); | 238 | rdmsrl(address, m.misc); |
| @@ -243,13 +242,10 @@ asmlinkage void mce_threshold_interrupt(void) | |||
| 243 | + bank * NR_BLOCKS | 242 | + bank * NR_BLOCKS |
| 244 | + block; | 243 | + block; |
| 245 | mce_log(&m); | 244 | mce_log(&m); |
| 246 | goto out; | 245 | return; |
| 247 | } | 246 | } |
| 248 | } | 247 | } |
| 249 | } | 248 | } |
| 250 | out: | ||
| 251 | inc_irq_stat(irq_threshold_count); | ||
| 252 | irq_exit(); | ||
| 253 | } | 249 | } |
| 254 | 250 | ||
| 255 | /* | 251 | /* |
| @@ -481,7 +477,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 481 | 477 | ||
| 482 | #ifdef CONFIG_SMP | 478 | #ifdef CONFIG_SMP |
| 483 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 479 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
| 484 | i = first_cpu(per_cpu(cpu_core_map, cpu)); | 480 | i = cpumask_first(cpu_core_mask(cpu)); |
| 485 | 481 | ||
| 486 | /* first core not up yet */ | 482 | /* first core not up yet */ |
| 487 | if (cpu_data(i).cpu_core_id) | 483 | if (cpu_data(i).cpu_core_id) |
| @@ -501,7 +497,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 501 | if (err) | 497 | if (err) |
| 502 | goto out; | 498 | goto out; |
| 503 | 499 | ||
| 504 | b->cpus = per_cpu(cpu_core_map, cpu); | 500 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); |
| 505 | per_cpu(threshold_banks, cpu)[bank] = b; | 501 | per_cpu(threshold_banks, cpu)[bank] = b; |
| 506 | goto out; | 502 | goto out; |
| 507 | } | 503 | } |
| @@ -512,15 +508,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 512 | err = -ENOMEM; | 508 | err = -ENOMEM; |
| 513 | goto out; | 509 | goto out; |
| 514 | } | 510 | } |
| 511 | if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { | ||
| 512 | kfree(b); | ||
| 513 | err = -ENOMEM; | ||
| 514 | goto out; | ||
| 515 | } | ||
| 515 | 516 | ||
| 516 | b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); | 517 | b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); |
| 517 | if (!b->kobj) | 518 | if (!b->kobj) |
| 518 | goto out_free; | 519 | goto out_free; |
| 519 | 520 | ||
| 520 | #ifndef CONFIG_SMP | 521 | #ifndef CONFIG_SMP |
| 521 | b->cpus = CPU_MASK_ALL; | 522 | cpumask_setall(b->cpus); |
| 522 | #else | 523 | #else |
| 523 | b->cpus = per_cpu(cpu_core_map, cpu); | 524 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); |
| 524 | #endif | 525 | #endif |
| 525 | 526 | ||
| 526 | per_cpu(threshold_banks, cpu)[bank] = b; | 527 | per_cpu(threshold_banks, cpu)[bank] = b; |
| @@ -529,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 529 | if (err) | 530 | if (err) |
| 530 | goto out_free; | 531 | goto out_free; |
| 531 | 532 | ||
| 532 | for_each_cpu_mask_nr(i, b->cpus) { | 533 | for_each_cpu(i, b->cpus) { |
| 533 | if (i == cpu) | 534 | if (i == cpu) |
| 534 | continue; | 535 | continue; |
| 535 | 536 | ||
| @@ -545,6 +546,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 545 | 546 | ||
| 546 | out_free: | 547 | out_free: |
| 547 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 548 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
| 549 | free_cpumask_var(b->cpus); | ||
| 548 | kfree(b); | 550 | kfree(b); |
| 549 | out: | 551 | out: |
| 550 | return err; | 552 | return err; |
| @@ -619,7 +621,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
| 619 | #endif | 621 | #endif |
| 620 | 622 | ||
| 621 | /* remove all sibling symlinks before unregistering */ | 623 | /* remove all sibling symlinks before unregistering */ |
| 622 | for_each_cpu_mask_nr(i, b->cpus) { | 624 | for_each_cpu(i, b->cpus) { |
| 623 | if (i == cpu) | 625 | if (i == cpu) |
| 624 | continue; | 626 | continue; |
| 625 | 627 | ||
| @@ -632,6 +634,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
| 632 | free_out: | 634 | free_out: |
| 633 | kobject_del(b->kobj); | 635 | kobject_del(b->kobj); |
| 634 | kobject_put(b->kobj); | 636 | kobject_put(b->kobj); |
| 637 | free_cpumask_var(b->cpus); | ||
| 635 | kfree(b); | 638 | kfree(b); |
| 636 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 639 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
| 637 | } | 640 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index f44c36624360..96b2a85545aa 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
| @@ -1,17 +1,21 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Intel specific MCE features. | 2 | * Intel specific MCE features. |
| 3 | * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> | 3 | * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> |
| 4 | * Copyright (C) 2008, 2009 Intel Corporation | ||
| 5 | * Author: Andi Kleen | ||
| 4 | */ | 6 | */ |
| 5 | 7 | ||
| 6 | #include <linux/init.h> | 8 | #include <linux/init.h> |
| 7 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
| 8 | #include <linux/percpu.h> | 10 | #include <linux/percpu.h> |
| 9 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
| 12 | #include <asm/apic.h> | ||
| 10 | #include <asm/msr.h> | 13 | #include <asm/msr.h> |
| 11 | #include <asm/mce.h> | 14 | #include <asm/mce.h> |
| 12 | #include <asm/hw_irq.h> | 15 | #include <asm/hw_irq.h> |
| 13 | #include <asm/idle.h> | 16 | #include <asm/idle.h> |
| 14 | #include <asm/therm_throt.h> | 17 | #include <asm/therm_throt.h> |
| 18 | #include <asm/apic.h> | ||
| 15 | 19 | ||
| 16 | asmlinkage void smp_thermal_interrupt(void) | 20 | asmlinkage void smp_thermal_interrupt(void) |
| 17 | { | 21 | { |
| @@ -24,7 +28,7 @@ asmlinkage void smp_thermal_interrupt(void) | |||
| 24 | 28 | ||
| 25 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | 29 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
| 26 | if (therm_throt_process(msr_val & 1)) | 30 | if (therm_throt_process(msr_val & 1)) |
| 27 | mce_log_therm_throt_event(smp_processor_id(), msr_val); | 31 | mce_log_therm_throt_event(msr_val); |
| 28 | 32 | ||
| 29 | inc_irq_stat(irq_thermal_count); | 33 | inc_irq_stat(irq_thermal_count); |
| 30 | irq_exit(); | 34 | irq_exit(); |
| @@ -48,13 +52,13 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 48 | */ | 52 | */ |
| 49 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 53 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 50 | h = apic_read(APIC_LVTTHMR); | 54 | h = apic_read(APIC_LVTTHMR); |
| 51 | if ((l & (1 << 3)) && (h & APIC_DM_SMI)) { | 55 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
| 52 | printk(KERN_DEBUG | 56 | printk(KERN_DEBUG |
| 53 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); | 57 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); |
| 54 | return; | 58 | return; |
| 55 | } | 59 | } |
| 56 | 60 | ||
| 57 | if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) | 61 | if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2)) |
| 58 | tm2 = 1; | 62 | tm2 = 1; |
| 59 | 63 | ||
| 60 | if (h & APIC_VECTOR_MASK) { | 64 | if (h & APIC_VECTOR_MASK) { |
| @@ -72,7 +76,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 72 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); | 76 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); |
| 73 | 77 | ||
| 74 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 78 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 75 | wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h); | 79 | wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); |
| 76 | 80 | ||
| 77 | l = apic_read(APIC_LVTTHMR); | 81 | l = apic_read(APIC_LVTTHMR); |
| 78 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | 82 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); |
| @@ -84,7 +88,209 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 84 | return; | 88 | return; |
| 85 | } | 89 | } |
| 86 | 90 | ||
| 91 | /* | ||
| 92 | * Support for Intel Correct Machine Check Interrupts. This allows | ||
| 93 | * the CPU to raise an interrupt when a corrected machine check happened. | ||
| 94 | * Normally we pick those up using a regular polling timer. | ||
| 95 | * Also supports reliable discovery of shared banks. | ||
| 96 | */ | ||
| 97 | |||
| 98 | static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); | ||
| 99 | |||
| 100 | /* | ||
| 101 | * cmci_discover_lock protects against parallel discovery attempts | ||
| 102 | * which could race against each other. | ||
| 103 | */ | ||
| 104 | static DEFINE_SPINLOCK(cmci_discover_lock); | ||
| 105 | |||
| 106 | #define CMCI_THRESHOLD 1 | ||
| 107 | |||
| 108 | static int cmci_supported(int *banks) | ||
| 109 | { | ||
| 110 | u64 cap; | ||
| 111 | |||
| 112 | /* | ||
| 113 | * Vendor check is not strictly needed, but the initial | ||
| 114 | * initialization is vendor keyed and this | ||
| 115 | * makes sure none of the backdoors are entered otherwise. | ||
| 116 | */ | ||
| 117 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | ||
| 118 | return 0; | ||
| 119 | if (!cpu_has_apic || lapic_get_maxlvt() < 6) | ||
| 120 | return 0; | ||
| 121 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
| 122 | *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff); | ||
| 123 | return !!(cap & MCG_CMCI_P); | ||
| 124 | } | ||
| 125 | |||
| 126 | /* | ||
| 127 | * The interrupt handler. This is called on every event. | ||
| 128 | * Just call the poller directly to log any events. | ||
| 129 | * This could in theory increase the threshold under high load, | ||
| 130 | * but doesn't for now. | ||
| 131 | */ | ||
| 132 | static void intel_threshold_interrupt(void) | ||
| 133 | { | ||
| 134 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | ||
| 135 | mce_notify_user(); | ||
| 136 | } | ||
| 137 | |||
| 138 | static void print_update(char *type, int *hdr, int num) | ||
| 139 | { | ||
| 140 | if (*hdr == 0) | ||
| 141 | printk(KERN_INFO "CPU %d MCA banks", smp_processor_id()); | ||
| 142 | *hdr = 1; | ||
| 143 | printk(KERN_CONT " %s:%d", type, num); | ||
| 144 | } | ||
| 145 | |||
| 146 | /* | ||
| 147 | * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks | ||
| 148 | * on this CPU. Use the algorithm recommended in the SDM to discover shared | ||
| 149 | * banks. | ||
| 150 | */ | ||
| 151 | static void cmci_discover(int banks, int boot) | ||
| 152 | { | ||
| 153 | unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); | ||
| 154 | int hdr = 0; | ||
| 155 | int i; | ||
| 156 | |||
| 157 | spin_lock(&cmci_discover_lock); | ||
| 158 | for (i = 0; i < banks; i++) { | ||
| 159 | u64 val; | ||
| 160 | |||
| 161 | if (test_bit(i, owned)) | ||
| 162 | continue; | ||
| 163 | |||
| 164 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
| 165 | |||
| 166 | /* Already owned by someone else? */ | ||
| 167 | if (val & CMCI_EN) { | ||
| 168 | if (test_and_clear_bit(i, owned) || boot) | ||
| 169 | print_update("SHD", &hdr, i); | ||
| 170 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | ||
| 171 | continue; | ||
| 172 | } | ||
| 173 | |||
| 174 | val |= CMCI_EN | CMCI_THRESHOLD; | ||
| 175 | wrmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
| 176 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
| 177 | |||
| 178 | /* Did the enable bit stick? -- the bank supports CMCI */ | ||
| 179 | if (val & CMCI_EN) { | ||
| 180 | if (!test_and_set_bit(i, owned) || boot) | ||
| 181 | print_update("CMCI", &hdr, i); | ||
| 182 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | ||
| 183 | } else { | ||
| 184 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | ||
| 185 | } | ||
| 186 | } | ||
| 187 | spin_unlock(&cmci_discover_lock); | ||
| 188 | if (hdr) | ||
| 189 | printk(KERN_CONT "\n"); | ||
| 190 | } | ||
| 191 | |||
| 192 | /* | ||
| 193 | * Just in case we missed an event during initialization check | ||
| 194 | * all the CMCI owned banks. | ||
| 195 | */ | ||
| 196 | void cmci_recheck(void) | ||
| 197 | { | ||
| 198 | unsigned long flags; | ||
| 199 | int banks; | ||
| 200 | |||
| 201 | if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks)) | ||
| 202 | return; | ||
| 203 | local_irq_save(flags); | ||
| 204 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | ||
| 205 | local_irq_restore(flags); | ||
| 206 | } | ||
| 207 | |||
| 208 | /* | ||
| 209 | * Disable CMCI on this CPU for all banks it owns when it goes down. | ||
| 210 | * This allows other CPUs to claim the banks on rediscovery. | ||
| 211 | */ | ||
| 212 | void cmci_clear(void) | ||
| 213 | { | ||
| 214 | int i; | ||
| 215 | int banks; | ||
| 216 | u64 val; | ||
| 217 | |||
| 218 | if (!cmci_supported(&banks)) | ||
| 219 | return; | ||
| 220 | spin_lock(&cmci_discover_lock); | ||
| 221 | for (i = 0; i < banks; i++) { | ||
| 222 | if (!test_bit(i, __get_cpu_var(mce_banks_owned))) | ||
| 223 | continue; | ||
| 224 | /* Disable CMCI */ | ||
| 225 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
| 226 | val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK); | ||
| 227 | wrmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
| 228 | __clear_bit(i, __get_cpu_var(mce_banks_owned)); | ||
| 229 | } | ||
| 230 | spin_unlock(&cmci_discover_lock); | ||
| 231 | } | ||
| 232 | |||
| 233 | /* | ||
| 234 | * After a CPU went down cycle through all the others and rediscover | ||
| 235 | * Must run in process context. | ||
| 236 | */ | ||
| 237 | void cmci_rediscover(int dying) | ||
| 238 | { | ||
| 239 | int banks; | ||
| 240 | int cpu; | ||
| 241 | cpumask_var_t old; | ||
| 242 | |||
| 243 | if (!cmci_supported(&banks)) | ||
| 244 | return; | ||
| 245 | if (!alloc_cpumask_var(&old, GFP_KERNEL)) | ||
| 246 | return; | ||
| 247 | cpumask_copy(old, ¤t->cpus_allowed); | ||
| 248 | |||
| 249 | for_each_online_cpu (cpu) { | ||
| 250 | if (cpu == dying) | ||
| 251 | continue; | ||
| 252 | if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) | ||
| 253 | continue; | ||
| 254 | /* Recheck banks in case CPUs don't all have the same */ | ||
| 255 | if (cmci_supported(&banks)) | ||
| 256 | cmci_discover(banks, 0); | ||
| 257 | } | ||
| 258 | |||
| 259 | set_cpus_allowed_ptr(current, old); | ||
| 260 | free_cpumask_var(old); | ||
| 261 | } | ||
| 262 | |||
| 263 | /* | ||
| 264 | * Reenable CMCI on this CPU in case a CPU down failed. | ||
| 265 | */ | ||
| 266 | void cmci_reenable(void) | ||
| 267 | { | ||
| 268 | int banks; | ||
| 269 | if (cmci_supported(&banks)) | ||
| 270 | cmci_discover(banks, 0); | ||
| 271 | } | ||
| 272 | |||
| 273 | static __cpuinit void intel_init_cmci(void) | ||
| 274 | { | ||
| 275 | int banks; | ||
| 276 | |||
| 277 | if (!cmci_supported(&banks)) | ||
| 278 | return; | ||
| 279 | |||
| 280 | mce_threshold_vector = intel_threshold_interrupt; | ||
| 281 | cmci_discover(banks, 1); | ||
| 282 | /* | ||
| 283 | * For CPU #0 this runs with still disabled APIC, but that's | ||
| 284 | * ok because only the vector is set up. We still do another | ||
| 285 | * check for the banks later for CPU #0 just to make sure | ||
| 286 | * to not miss any events. | ||
| 287 | */ | ||
| 288 | apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED); | ||
| 289 | cmci_recheck(); | ||
| 290 | } | ||
| 291 | |||
| 87 | void mce_intel_feature_init(struct cpuinfo_x86 *c) | 292 | void mce_intel_feature_init(struct cpuinfo_x86 *c) |
| 88 | { | 293 | { |
| 89 | intel_init_thermal(c); | 294 | intel_init_thermal(c); |
| 295 | intel_init_cmci(); | ||
| 90 | } | 296 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c index 9b60fce09f75..f53bdcbaf382 100644 --- a/arch/x86/kernel/cpu/mcheck/p4.c +++ b/arch/x86/kernel/cpu/mcheck/p4.c | |||
| @@ -85,7 +85,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 85 | */ | 85 | */ |
| 86 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 86 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 87 | h = apic_read(APIC_LVTTHMR); | 87 | h = apic_read(APIC_LVTTHMR); |
| 88 | if ((l & (1<<3)) && (h & APIC_DM_SMI)) { | 88 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
| 89 | printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", | 89 | printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", |
| 90 | cpu); | 90 | cpu); |
| 91 | return; /* -EBUSY */ | 91 | return; /* -EBUSY */ |
| @@ -111,7 +111,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 111 | vendor_thermal_interrupt = intel_thermal_interrupt; | 111 | vendor_thermal_interrupt = intel_thermal_interrupt; |
| 112 | 112 | ||
| 113 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 113 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 114 | wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h); | 114 | wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); |
| 115 | 115 | ||
| 116 | l = apic_read(APIC_LVTTHMR); | 116 | l = apic_read(APIC_LVTTHMR); |
| 117 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | 117 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); |
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c new file mode 100644 index 000000000000..23ee9e730f78 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/threshold.c | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | /* | ||
| 2 | * Common corrected MCE threshold handler code: | ||
| 3 | */ | ||
| 4 | #include <linux/interrupt.h> | ||
| 5 | #include <linux/kernel.h> | ||
| 6 | |||
| 7 | #include <asm/irq_vectors.h> | ||
| 8 | #include <asm/apic.h> | ||
| 9 | #include <asm/idle.h> | ||
| 10 | #include <asm/mce.h> | ||
| 11 | |||
| 12 | static void default_threshold_interrupt(void) | ||
| 13 | { | ||
| 14 | printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n", | ||
| 15 | THRESHOLD_APIC_VECTOR); | ||
| 16 | } | ||
| 17 | |||
| 18 | void (*mce_threshold_vector)(void) = default_threshold_interrupt; | ||
| 19 | |||
| 20 | asmlinkage void mce_threshold_interrupt(void) | ||
| 21 | { | ||
| 22 | exit_idle(); | ||
| 23 | irq_enter(); | ||
| 24 | inc_irq_stat(irq_threshold_count); | ||
| 25 | mce_threshold_vector(); | ||
| 26 | irq_exit(); | ||
| 27 | /* Ack only at the end to avoid potential reentry */ | ||
| 28 | ack_APIC_irq(); | ||
| 29 | } | ||
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 9abd48b22674..f6c70a164e32 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | #include <linux/nmi.h> | 19 | #include <linux/nmi.h> |
| 20 | #include <linux/kprobes.h> | 20 | #include <linux/kprobes.h> |
| 21 | 21 | ||
| 22 | #include <asm/apic.h> | 22 | #include <asm/genapic.h> |
| 23 | #include <asm/intel_arch_perfmon.h> | 23 | #include <asm/intel_arch_perfmon.h> |
| 24 | 24 | ||
| 25 | struct nmi_watchdog_ctlblk { | 25 | struct nmi_watchdog_ctlblk { |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 01b1244ef1c0..f93047fed791 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
| @@ -7,15 +7,14 @@ | |||
| 7 | /* | 7 | /* |
| 8 | * Get CPU information for use by the procfs. | 8 | * Get CPU information for use by the procfs. |
| 9 | */ | 9 | */ |
| 10 | #ifdef CONFIG_X86_32 | ||
| 11 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | 10 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, |
| 12 | unsigned int cpu) | 11 | unsigned int cpu) |
| 13 | { | 12 | { |
| 14 | #ifdef CONFIG_X86_HT | 13 | #ifdef CONFIG_SMP |
| 15 | if (c->x86_max_cores * smp_num_siblings > 1) { | 14 | if (c->x86_max_cores * smp_num_siblings > 1) { |
| 16 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | 15 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); |
| 17 | seq_printf(m, "siblings\t: %d\n", | 16 | seq_printf(m, "siblings\t: %d\n", |
| 18 | cpus_weight(per_cpu(cpu_core_map, cpu))); | 17 | cpumask_weight(cpu_sibling_mask(cpu))); |
| 19 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | 18 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); |
| 20 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | 19 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); |
| 21 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | 20 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); |
| @@ -24,6 +23,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | |||
| 24 | #endif | 23 | #endif |
| 25 | } | 24 | } |
| 26 | 25 | ||
| 26 | #ifdef CONFIG_X86_32 | ||
| 27 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | 27 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) |
| 28 | { | 28 | { |
| 29 | /* | 29 | /* |
| @@ -50,22 +50,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | |||
| 50 | c->wp_works_ok ? "yes" : "no"); | 50 | c->wp_works_ok ? "yes" : "no"); |
| 51 | } | 51 | } |
| 52 | #else | 52 | #else |
| 53 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | ||
| 54 | unsigned int cpu) | ||
| 55 | { | ||
| 56 | #ifdef CONFIG_SMP | ||
| 57 | if (c->x86_max_cores * smp_num_siblings > 1) { | ||
| 58 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | ||
| 59 | seq_printf(m, "siblings\t: %d\n", | ||
| 60 | cpus_weight(per_cpu(cpu_core_map, cpu))); | ||
| 61 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | ||
| 62 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | ||
| 63 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | ||
| 64 | seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); | ||
| 65 | } | ||
| 66 | #endif | ||
| 67 | } | ||
| 68 | |||
| 69 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | 53 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) |
| 70 | { | 54 | { |
| 71 | seq_printf(m, | 55 | seq_printf(m, |
| @@ -159,9 +143,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
| 159 | static void *c_start(struct seq_file *m, loff_t *pos) | 143 | static void *c_start(struct seq_file *m, loff_t *pos) |
| 160 | { | 144 | { |
| 161 | if (*pos == 0) /* just in case, cpu 0 is not the first */ | 145 | if (*pos == 0) /* just in case, cpu 0 is not the first */ |
| 162 | *pos = first_cpu(cpu_online_map); | 146 | *pos = cpumask_first(cpu_online_mask); |
| 163 | else | 147 | else |
| 164 | *pos = next_cpu_nr(*pos - 1, cpu_online_map); | 148 | *pos = cpumask_next(*pos - 1, cpu_online_mask); |
| 165 | if ((*pos) < nr_cpu_ids) | 149 | if ((*pos) < nr_cpu_ids) |
| 166 | return &cpu_data(*pos); | 150 | return &cpu_data(*pos); |
| 167 | return NULL; | 151 | return NULL; |
