diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
34 files changed, 3396 insertions, 1785 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 82db7f45e2de..4e242f9a06e4 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
| @@ -14,11 +14,12 @@ obj-y += vmware.o hypervisor.o | |||
| 14 | obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o | 14 | obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o |
| 15 | obj-$(CONFIG_X86_64) += bugs_64.o | 15 | obj-$(CONFIG_X86_64) += bugs_64.o |
| 16 | 16 | ||
| 17 | obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o | ||
| 18 | |||
| 17 | obj-$(CONFIG_CPU_SUP_INTEL) += intel.o | 19 | obj-$(CONFIG_CPU_SUP_INTEL) += intel.o |
| 18 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o | 20 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o |
| 19 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o | 21 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o |
| 20 | obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o | 22 | obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o |
| 21 | obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o | ||
| 22 | obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o | 23 | obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o |
| 23 | obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o | 24 | obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o |
| 24 | 25 | ||
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 2cf23634b6d9..8220ae69849d 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | #include <asm/pat.h> | 7 | #include <asm/pat.h> |
| 8 | #include <asm/processor.h> | 8 | #include <asm/processor.h> |
| 9 | 9 | ||
| 10 | #include <mach_apic.h> | 10 | #include <asm/apic.h> |
| 11 | 11 | ||
| 12 | struct cpuid_bit { | 12 | struct cpuid_bit { |
| 13 | u16 feature; | 13 | u16 feature; |
| @@ -29,7 +29,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
| 29 | u32 regs[4]; | 29 | u32 regs[4]; |
| 30 | const struct cpuid_bit *cb; | 30 | const struct cpuid_bit *cb; |
| 31 | 31 | ||
| 32 | static const struct cpuid_bit cpuid_bits[] = { | 32 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { |
| 33 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, | 33 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, |
| 34 | { 0, 0, 0, 0 } | 34 | { 0, 0, 0, 0 } |
| 35 | }; | 35 | }; |
| @@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
| 69 | */ | 69 | */ |
| 70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | 70 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) |
| 71 | { | 71 | { |
| 72 | #ifdef CONFIG_X86_SMP | 72 | #ifdef CONFIG_SMP |
| 73 | unsigned int eax, ebx, ecx, edx, sub_index; | 73 | unsigned int eax, ebx, ecx, edx, sub_index; |
| 74 | unsigned int ht_mask_width, core_plus_mask_width; | 74 | unsigned int ht_mask_width, core_plus_mask_width; |
| 75 | unsigned int core_select_mask, core_level_siblings; | 75 | unsigned int core_select_mask, core_level_siblings; |
| @@ -116,22 +116,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | |||
| 116 | 116 | ||
| 117 | core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; | 117 | core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; |
| 118 | 118 | ||
| 119 | #ifdef CONFIG_X86_32 | 119 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width) |
| 120 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width) | ||
| 121 | & core_select_mask; | 120 | & core_select_mask; |
| 122 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); | 121 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width); |
| 123 | /* | 122 | /* |
| 124 | * Reinit the apicid, now that we have extended initial_apicid. | 123 | * Reinit the apicid, now that we have extended initial_apicid. |
| 125 | */ | 124 | */ |
| 126 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | 125 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 127 | #else | 126 | |
| 128 | c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask; | ||
| 129 | c->phys_proc_id = phys_pkg_id(core_plus_mask_width); | ||
| 130 | /* | ||
| 131 | * Reinit the apicid, now that we have extended initial_apicid. | ||
| 132 | */ | ||
| 133 | c->apicid = phys_pkg_id(0); | ||
| 134 | #endif | ||
| 135 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); | 127 | c->x86_max_cores = (core_level_siblings / smp_num_siblings); |
| 136 | 128 | ||
| 137 | 129 | ||
| @@ -143,37 +135,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | |||
| 143 | return; | 135 | return; |
| 144 | #endif | 136 | #endif |
| 145 | } | 137 | } |
| 146 | |||
| 147 | #ifdef CONFIG_X86_PAT | ||
| 148 | void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) | ||
| 149 | { | ||
| 150 | if (!cpu_has_pat) | ||
| 151 | pat_disable("PAT not supported by CPU."); | ||
| 152 | |||
| 153 | switch (c->x86_vendor) { | ||
| 154 | case X86_VENDOR_INTEL: | ||
| 155 | /* | ||
| 156 | * There is a known erratum on Pentium III and Core Solo | ||
| 157 | * and Core Duo CPUs. | ||
| 158 | * " Page with PAT set to WC while associated MTRR is UC | ||
| 159 | * may consolidate to UC " | ||
| 160 | * Because of this erratum, it is better to stick with | ||
| 161 | * setting WC in MTRR rather than using PAT on these CPUs. | ||
| 162 | * | ||
| 163 | * Enable PAT WC only on P4, Core 2 or later CPUs. | ||
| 164 | */ | ||
| 165 | if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15)) | ||
| 166 | return; | ||
| 167 | |||
| 168 | pat_disable("PAT WC disabled due to known CPU erratum."); | ||
| 169 | return; | ||
| 170 | |||
| 171 | case X86_VENDOR_AMD: | ||
| 172 | case X86_VENDOR_CENTAUR: | ||
| 173 | case X86_VENDOR_TRANSMETA: | ||
| 174 | return; | ||
| 175 | } | ||
| 176 | |||
| 177 | pat_disable("PAT disabled. Not yet verified on this CPU type."); | ||
| 178 | } | ||
| 179 | #endif | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7c878f6aa919..7e4a459daa64 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <asm/io.h> | 5 | #include <asm/io.h> |
| 6 | #include <asm/processor.h> | 6 | #include <asm/processor.h> |
| 7 | #include <asm/apic.h> | 7 | #include <asm/apic.h> |
| 8 | #include <asm/cpu.h> | ||
| 8 | 9 | ||
| 9 | #ifdef CONFIG_X86_64 | 10 | #ifdef CONFIG_X86_64 |
| 10 | # include <asm/numa_64.h> | 11 | # include <asm/numa_64.h> |
| @@ -12,8 +13,6 @@ | |||
| 12 | # include <asm/cacheflush.h> | 13 | # include <asm/cacheflush.h> |
| 13 | #endif | 14 | #endif |
| 14 | 15 | ||
| 15 | #include <mach_apic.h> | ||
| 16 | |||
| 17 | #include "cpu.h" | 16 | #include "cpu.h" |
| 18 | 17 | ||
| 19 | #ifdef CONFIG_X86_32 | 18 | #ifdef CONFIG_X86_32 |
| @@ -143,6 +142,55 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |||
| 143 | } | 142 | } |
| 144 | } | 143 | } |
| 145 | 144 | ||
| 145 | static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) | ||
| 146 | { | ||
| 147 | #ifdef CONFIG_SMP | ||
| 148 | /* calling is from identify_secondary_cpu() ? */ | ||
| 149 | if (c->cpu_index == boot_cpu_id) | ||
| 150 | return; | ||
| 151 | |||
| 152 | /* | ||
| 153 | * Certain Athlons might work (for various values of 'work') in SMP | ||
| 154 | * but they are not certified as MP capable. | ||
| 155 | */ | ||
| 156 | /* Athlon 660/661 is valid. */ | ||
| 157 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || | ||
| 158 | (c->x86_mask == 1))) | ||
| 159 | goto valid_k7; | ||
| 160 | |||
| 161 | /* Duron 670 is valid */ | ||
| 162 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | ||
| 163 | goto valid_k7; | ||
| 164 | |||
| 165 | /* | ||
| 166 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | ||
| 167 | * bit. It's worth noting that the A5 stepping (662) of some | ||
| 168 | * Athlon XP's have the MP bit set. | ||
| 169 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | ||
| 170 | * more. | ||
| 171 | */ | ||
| 172 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | ||
| 173 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | ||
| 174 | (c->x86_model > 7)) | ||
| 175 | if (cpu_has_mp) | ||
| 176 | goto valid_k7; | ||
| 177 | |||
| 178 | /* If we get here, not a certified SMP capable AMD system. */ | ||
| 179 | |||
| 180 | /* | ||
| 181 | * Don't taint if we are running SMP kernel on a single non-MP | ||
| 182 | * approved Athlon | ||
| 183 | */ | ||
| 184 | WARN_ONCE(1, "WARNING: This combination of AMD" | ||
| 185 | "processors is not suitable for SMP.\n"); | ||
| 186 | if (!test_taint(TAINT_UNSAFE_SMP)) | ||
| 187 | add_taint(TAINT_UNSAFE_SMP); | ||
| 188 | |||
| 189 | valid_k7: | ||
| 190 | ; | ||
| 191 | #endif | ||
| 192 | } | ||
| 193 | |||
| 146 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | 194 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) |
| 147 | { | 195 | { |
| 148 | u32 l, h; | 196 | u32 l, h; |
| @@ -177,6 +225,8 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |||
| 177 | } | 225 | } |
| 178 | 226 | ||
| 179 | set_cpu_cap(c, X86_FEATURE_K7); | 227 | set_cpu_cap(c, X86_FEATURE_K7); |
| 228 | |||
| 229 | amd_k7_smp_check(c); | ||
| 180 | } | 230 | } |
| 181 | #endif | 231 | #endif |
| 182 | 232 | ||
| @@ -452,7 +502,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int | |||
| 452 | } | 502 | } |
| 453 | #endif | 503 | #endif |
| 454 | 504 | ||
| 455 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { | 505 | static const struct cpu_dev __cpuinitconst amd_cpu_dev = { |
| 456 | .c_vendor = "AMD", | 506 | .c_vendor = "AMD", |
| 457 | .c_ident = { "AuthenticAMD" }, | 507 | .c_ident = { "AuthenticAMD" }, |
| 458 | #ifdef CONFIG_X86_32 | 508 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 89bfdd9cacc6..c95e831bb095 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
| @@ -1,11 +1,11 @@ | |||
| 1 | #include <linux/bitops.h> | ||
| 1 | #include <linux/kernel.h> | 2 | #include <linux/kernel.h> |
| 2 | #include <linux/init.h> | 3 | #include <linux/init.h> |
| 3 | #include <linux/bitops.h> | ||
| 4 | 4 | ||
| 5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
| 6 | #include <asm/msr.h> | ||
| 7 | #include <asm/e820.h> | 6 | #include <asm/e820.h> |
| 8 | #include <asm/mtrr.h> | 7 | #include <asm/mtrr.h> |
| 8 | #include <asm/msr.h> | ||
| 9 | 9 | ||
| 10 | #include "cpu.h" | 10 | #include "cpu.h" |
| 11 | 11 | ||
| @@ -276,7 +276,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) | |||
| 276 | */ | 276 | */ |
| 277 | c->x86_capability[5] = cpuid_edx(0xC0000001); | 277 | c->x86_capability[5] = cpuid_edx(0xC0000001); |
| 278 | } | 278 | } |
| 279 | 279 | #ifdef CONFIG_X86_32 | |
| 280 | /* Cyrix III family needs CX8 & PGE explicitly enabled. */ | 280 | /* Cyrix III family needs CX8 & PGE explicitly enabled. */ |
| 281 | if (c->x86_model >= 6 && c->x86_model <= 9) { | 281 | if (c->x86_model >= 6 && c->x86_model <= 9) { |
| 282 | rdmsr(MSR_VIA_FCR, lo, hi); | 282 | rdmsr(MSR_VIA_FCR, lo, hi); |
| @@ -288,6 +288,11 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c) | |||
| 288 | /* Before Nehemiah, the C3's had 3dNOW! */ | 288 | /* Before Nehemiah, the C3's had 3dNOW! */ |
| 289 | if (c->x86_model >= 6 && c->x86_model < 9) | 289 | if (c->x86_model >= 6 && c->x86_model < 9) |
| 290 | set_cpu_cap(c, X86_FEATURE_3DNOW); | 290 | set_cpu_cap(c, X86_FEATURE_3DNOW); |
| 291 | #endif | ||
| 292 | if (c->x86 == 0x6 && c->x86_model >= 0xf) { | ||
| 293 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
| 294 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
| 295 | } | ||
| 291 | 296 | ||
| 292 | display_cacheinfo(c); | 297 | display_cacheinfo(c); |
| 293 | } | 298 | } |
| @@ -316,16 +321,25 @@ enum { | |||
| 316 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | 321 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) |
| 317 | { | 322 | { |
| 318 | switch (c->x86) { | 323 | switch (c->x86) { |
| 324 | #ifdef CONFIG_X86_32 | ||
| 319 | case 5: | 325 | case 5: |
| 320 | /* Emulate MTRRs using Centaur's MCR. */ | 326 | /* Emulate MTRRs using Centaur's MCR. */ |
| 321 | set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); | 327 | set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); |
| 322 | break; | 328 | break; |
| 329 | #endif | ||
| 330 | case 6: | ||
| 331 | if (c->x86_model >= 0xf) | ||
| 332 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
| 333 | break; | ||
| 323 | } | 334 | } |
| 335 | #ifdef CONFIG_X86_64 | ||
| 336 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | ||
| 337 | #endif | ||
| 324 | } | 338 | } |
| 325 | 339 | ||
| 326 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | 340 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) |
| 327 | { | 341 | { |
| 328 | 342 | #ifdef CONFIG_X86_32 | |
| 329 | char *name; | 343 | char *name; |
| 330 | u32 fcr_set = 0; | 344 | u32 fcr_set = 0; |
| 331 | u32 fcr_clr = 0; | 345 | u32 fcr_clr = 0; |
| @@ -337,8 +351,10 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | |||
| 337 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway | 351 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
| 338 | */ | 352 | */ |
| 339 | clear_cpu_cap(c, 0*32+31); | 353 | clear_cpu_cap(c, 0*32+31); |
| 340 | 354 | #endif | |
| 355 | early_init_centaur(c); | ||
| 341 | switch (c->x86) { | 356 | switch (c->x86) { |
| 357 | #ifdef CONFIG_X86_32 | ||
| 342 | case 5: | 358 | case 5: |
| 343 | switch (c->x86_model) { | 359 | switch (c->x86_model) { |
| 344 | case 4: | 360 | case 4: |
| @@ -442,16 +458,20 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | |||
| 442 | } | 458 | } |
| 443 | sprintf(c->x86_model_id, "WinChip %s", name); | 459 | sprintf(c->x86_model_id, "WinChip %s", name); |
| 444 | break; | 460 | break; |
| 445 | 461 | #endif | |
| 446 | case 6: | 462 | case 6: |
| 447 | init_c3(c); | 463 | init_c3(c); |
| 448 | break; | 464 | break; |
| 449 | } | 465 | } |
| 466 | #ifdef CONFIG_X86_64 | ||
| 467 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
| 468 | #endif | ||
| 450 | } | 469 | } |
| 451 | 470 | ||
| 452 | static unsigned int __cpuinit | 471 | static unsigned int __cpuinit |
| 453 | centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 472 | centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
| 454 | { | 473 | { |
| 474 | #ifdef CONFIG_X86_32 | ||
| 455 | /* VIA C3 CPUs (670-68F) need further shifting. */ | 475 | /* VIA C3 CPUs (670-68F) need further shifting. */ |
| 456 | if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) | 476 | if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) |
| 457 | size >>= 8; | 477 | size >>= 8; |
| @@ -464,11 +484,11 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | |||
| 464 | if ((c->x86 == 6) && (c->x86_model == 9) && | 484 | if ((c->x86 == 6) && (c->x86_model == 9) && |
| 465 | (c->x86_mask == 1) && (size == 65)) | 485 | (c->x86_mask == 1) && (size == 65)) |
| 466 | size -= 1; | 486 | size -= 1; |
| 467 | 487 | #endif | |
| 468 | return size; | 488 | return size; |
| 469 | } | 489 | } |
| 470 | 490 | ||
| 471 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | 491 | static const struct cpu_dev __cpuinitconst centaur_cpu_dev = { |
| 472 | .c_vendor = "Centaur", | 492 | .c_vendor = "Centaur", |
| 473 | .c_ident = { "CentaurHauls" }, | 493 | .c_ident = { "CentaurHauls" }, |
| 474 | .c_early_init = early_init_centaur, | 494 | .c_early_init = early_init_centaur, |
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c deleted file mode 100644 index a1625f5a1e78..000000000000 --- a/arch/x86/kernel/cpu/centaur_64.c +++ /dev/null | |||
| @@ -1,37 +0,0 @@ | |||
| 1 | #include <linux/init.h> | ||
| 2 | #include <linux/smp.h> | ||
| 3 | |||
| 4 | #include <asm/cpufeature.h> | ||
| 5 | #include <asm/processor.h> | ||
| 6 | |||
| 7 | #include "cpu.h" | ||
| 8 | |||
| 9 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | ||
| 10 | { | ||
| 11 | if (c->x86 == 0x6 && c->x86_model >= 0xf) | ||
| 12 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
| 13 | |||
| 14 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | ||
| 15 | } | ||
| 16 | |||
| 17 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | ||
| 18 | { | ||
| 19 | early_init_centaur(c); | ||
| 20 | |||
| 21 | if (c->x86 == 0x6 && c->x86_model >= 0xf) { | ||
| 22 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
| 23 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
| 24 | } | ||
| 25 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
| 26 | } | ||
| 27 | |||
| 28 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | ||
| 29 | .c_vendor = "Centaur", | ||
| 30 | .c_ident = { "CentaurHauls" }, | ||
| 31 | .c_early_init = early_init_centaur, | ||
| 32 | .c_init = init_centaur, | ||
| 33 | .c_x86_vendor = X86_VENDOR_CENTAUR, | ||
| 34 | }; | ||
| 35 | |||
| 36 | cpu_dev_register(centaur_cpu_dev); | ||
| 37 | |||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 83492b1f93b1..c4f667896c28 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -1,118 +1,117 @@ | |||
| 1 | #include <linux/init.h> | ||
| 2 | #include <linux/kernel.h> | ||
| 3 | #include <linux/sched.h> | ||
| 4 | #include <linux/string.h> | ||
| 5 | #include <linux/bootmem.h> | 1 | #include <linux/bootmem.h> |
| 2 | #include <linux/linkage.h> | ||
| 6 | #include <linux/bitops.h> | 3 | #include <linux/bitops.h> |
| 4 | #include <linux/kernel.h> | ||
| 7 | #include <linux/module.h> | 5 | #include <linux/module.h> |
| 8 | #include <linux/kgdb.h> | 6 | #include <linux/percpu.h> |
| 9 | #include <linux/topology.h> | 7 | #include <linux/string.h> |
| 10 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
| 9 | #include <linux/sched.h> | ||
| 10 | #include <linux/init.h> | ||
| 11 | #include <linux/kgdb.h> | ||
| 11 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
| 12 | #include <linux/percpu.h> | 13 | #include <linux/io.h> |
| 13 | #include <asm/i387.h> | 14 | |
| 14 | #include <asm/msr.h> | 15 | #include <asm/stackprotector.h> |
| 15 | #include <asm/io.h> | ||
| 16 | #include <asm/linkage.h> | ||
| 17 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
| 17 | #include <asm/hypervisor.h> | ||
| 18 | #include <asm/processor.h> | ||
| 19 | #include <asm/sections.h> | ||
| 20 | #include <asm/topology.h> | ||
| 21 | #include <asm/cpumask.h> | ||
| 22 | #include <asm/pgtable.h> | ||
| 23 | #include <asm/atomic.h> | ||
| 24 | #include <asm/proto.h> | ||
| 25 | #include <asm/setup.h> | ||
| 26 | #include <asm/apic.h> | ||
| 27 | #include <asm/desc.h> | ||
| 28 | #include <asm/i387.h> | ||
| 18 | #include <asm/mtrr.h> | 29 | #include <asm/mtrr.h> |
| 30 | #include <asm/numa.h> | ||
| 31 | #include <asm/asm.h> | ||
| 32 | #include <asm/cpu.h> | ||
| 19 | #include <asm/mce.h> | 33 | #include <asm/mce.h> |
| 34 | #include <asm/msr.h> | ||
| 20 | #include <asm/pat.h> | 35 | #include <asm/pat.h> |
| 21 | #include <asm/asm.h> | ||
| 22 | #include <asm/numa.h> | ||
| 23 | #include <asm/smp.h> | 36 | #include <asm/smp.h> |
| 37 | |||
| 24 | #ifdef CONFIG_X86_LOCAL_APIC | 38 | #ifdef CONFIG_X86_LOCAL_APIC |
| 25 | #include <asm/mpspec.h> | 39 | #include <asm/uv/uv.h> |
| 26 | #include <asm/apic.h> | ||
| 27 | #include <mach_apic.h> | ||
| 28 | #include <asm/genapic.h> | ||
| 29 | #endif | 40 | #endif |
| 30 | 41 | ||
| 31 | #include <asm/pda.h> | ||
| 32 | #include <asm/pgtable.h> | ||
| 33 | #include <asm/processor.h> | ||
| 34 | #include <asm/desc.h> | ||
| 35 | #include <asm/atomic.h> | ||
| 36 | #include <asm/proto.h> | ||
| 37 | #include <asm/sections.h> | ||
| 38 | #include <asm/setup.h> | ||
| 39 | #include <asm/hypervisor.h> | ||
| 40 | |||
| 41 | #include "cpu.h" | 42 | #include "cpu.h" |
| 42 | 43 | ||
| 43 | #ifdef CONFIG_X86_64 | ||
| 44 | |||
| 45 | /* all of these masks are initialized in setup_cpu_local_masks() */ | 44 | /* all of these masks are initialized in setup_cpu_local_masks() */ |
| 46 | cpumask_var_t cpu_callin_mask; | ||
| 47 | cpumask_var_t cpu_callout_mask; | ||
| 48 | cpumask_var_t cpu_initialized_mask; | 45 | cpumask_var_t cpu_initialized_mask; |
| 46 | cpumask_var_t cpu_callout_mask; | ||
| 47 | cpumask_var_t cpu_callin_mask; | ||
| 49 | 48 | ||
| 50 | /* representing cpus for which sibling maps can be computed */ | 49 | /* representing cpus for which sibling maps can be computed */ |
| 51 | cpumask_var_t cpu_sibling_setup_mask; | 50 | cpumask_var_t cpu_sibling_setup_mask; |
| 52 | 51 | ||
| 53 | #else /* CONFIG_X86_32 */ | 52 | /* correctly size the local cpu masks */ |
| 54 | 53 | void __init setup_cpu_local_masks(void) | |
| 55 | cpumask_t cpu_callin_map; | 54 | { |
| 56 | cpumask_t cpu_callout_map; | 55 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); |
| 57 | cpumask_t cpu_initialized; | 56 | alloc_bootmem_cpumask_var(&cpu_callin_mask); |
| 58 | cpumask_t cpu_sibling_setup_map; | 57 | alloc_bootmem_cpumask_var(&cpu_callout_mask); |
| 59 | 58 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | |
| 60 | #endif /* CONFIG_X86_32 */ | 59 | } |
| 61 | |||
| 62 | 60 | ||
| 63 | static struct cpu_dev *this_cpu __cpuinitdata; | 61 | static const struct cpu_dev *this_cpu __cpuinitdata; |
| 64 | 62 | ||
| 63 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | ||
| 65 | #ifdef CONFIG_X86_64 | 64 | #ifdef CONFIG_X86_64 |
| 66 | /* We need valid kernel segments for data and code in long mode too | 65 | /* |
| 67 | * IRET will check the segment types kkeil 2000/10/28 | 66 | * We need valid kernel segments for data and code in long mode too |
| 68 | * Also sysret mandates a special GDT layout | 67 | * IRET will check the segment types kkeil 2000/10/28 |
| 69 | */ | 68 | * Also sysret mandates a special GDT layout |
| 70 | /* The TLS descriptors are currently at a different place compared to i386. | 69 | * |
| 71 | Hopefully nobody expects them at a fixed place (Wine?) */ | 70 | * TLS descriptors are currently at a different place compared to i386. |
| 72 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | 71 | * Hopefully nobody expects them at a fixed place (Wine?) |
| 73 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | 72 | */ |
| 74 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | 73 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, |
| 75 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | 74 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, |
| 76 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | 75 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, |
| 77 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | 76 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, |
| 78 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | 77 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, |
| 79 | } }; | 78 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, |
| 80 | #else | 79 | #else |
| 81 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 80 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, |
| 82 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 81 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, |
| 83 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 82 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, |
| 84 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 83 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, |
| 85 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, | ||
| 86 | /* | 84 | /* |
| 87 | * Segments used for calling PnP BIOS have byte granularity. | 85 | * Segments used for calling PnP BIOS have byte granularity. |
| 88 | * They code segments and data segments have fixed 64k limits, | 86 | * They code segments and data segments have fixed 64k limits, |
| 89 | * the transfer segment sizes are set at run time. | 87 | * the transfer segment sizes are set at run time. |
| 90 | */ | 88 | */ |
| 91 | /* 32-bit code */ | 89 | /* 32-bit code */ |
| 92 | [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, | 90 | [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, |
| 93 | /* 16-bit code */ | 91 | /* 16-bit code */ |
| 94 | [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, | 92 | [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, |
| 95 | /* 16-bit data */ | 93 | /* 16-bit data */ |
| 96 | [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, | 94 | [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, |
| 97 | /* 16-bit data */ | 95 | /* 16-bit data */ |
| 98 | [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, | 96 | [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, |
| 99 | /* 16-bit data */ | 97 | /* 16-bit data */ |
| 100 | [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, | 98 | [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, |
| 101 | /* | 99 | /* |
| 102 | * The APM segments have byte granularity and their bases | 100 | * The APM segments have byte granularity and their bases |
| 103 | * are set at run time. All have 64k limits. | 101 | * are set at run time. All have 64k limits. |
| 104 | */ | 102 | */ |
| 105 | /* 32-bit code */ | 103 | /* 32-bit code */ |
| 106 | [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, | 104 | [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, |
| 107 | /* 16-bit code */ | 105 | /* 16-bit code */ |
| 108 | [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, | 106 | [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, |
| 109 | /* data */ | 107 | /* data */ |
| 110 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, | 108 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, |
| 111 | 109 | ||
| 112 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, | 110 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, |
| 113 | [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, | 111 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, |
| 114 | } }; | 112 | GDT_STACK_CANARY_INIT |
| 115 | #endif | 113 | #endif |
| 114 | } }; | ||
| 116 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | 115 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
| 117 | 116 | ||
| 118 | #ifdef CONFIG_X86_32 | 117 | #ifdef CONFIG_X86_32 |
| @@ -153,16 +152,17 @@ static inline int flag_is_changeable_p(u32 flag) | |||
| 153 | * the CPUID. Add "volatile" to not allow gcc to | 152 | * the CPUID. Add "volatile" to not allow gcc to |
| 154 | * optimize the subsequent calls to this function. | 153 | * optimize the subsequent calls to this function. |
| 155 | */ | 154 | */ |
| 156 | asm volatile ("pushfl\n\t" | 155 | asm volatile ("pushfl \n\t" |
| 157 | "pushfl\n\t" | 156 | "pushfl \n\t" |
| 158 | "popl %0\n\t" | 157 | "popl %0 \n\t" |
| 159 | "movl %0,%1\n\t" | 158 | "movl %0, %1 \n\t" |
| 160 | "xorl %2,%0\n\t" | 159 | "xorl %2, %0 \n\t" |
| 161 | "pushl %0\n\t" | 160 | "pushl %0 \n\t" |
| 162 | "popfl\n\t" | 161 | "popfl \n\t" |
| 163 | "pushfl\n\t" | 162 | "pushfl \n\t" |
| 164 | "popl %0\n\t" | 163 | "popl %0 \n\t" |
| 165 | "popfl\n\t" | 164 | "popfl \n\t" |
| 165 | |||
| 166 | : "=&r" (f1), "=&r" (f2) | 166 | : "=&r" (f1), "=&r" (f2) |
| 167 | : "ir" (flag)); | 167 | : "ir" (flag)); |
| 168 | 168 | ||
| @@ -177,18 +177,22 @@ static int __cpuinit have_cpuid_p(void) | |||
| 177 | 177 | ||
| 178 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 178 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
| 179 | { | 179 | { |
| 180 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | 180 | unsigned long lo, hi; |
| 181 | /* Disable processor serial number */ | 181 | |
| 182 | unsigned long lo, hi; | 182 | if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) |
| 183 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 183 | return; |
| 184 | lo |= 0x200000; | 184 | |
| 185 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 185 | /* Disable processor serial number: */ |
| 186 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | 186 | |
| 187 | clear_cpu_cap(c, X86_FEATURE_PN); | 187 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
| 188 | 188 | lo |= 0x200000; | |
| 189 | /* Disabling the serial number may affect the cpuid level */ | 189 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
| 190 | c->cpuid_level = cpuid_eax(0); | 190 | |
| 191 | } | 191 | printk(KERN_NOTICE "CPU serial number disabled.\n"); |
| 192 | clear_cpu_cap(c, X86_FEATURE_PN); | ||
| 193 | |||
| 194 | /* Disabling the serial number may affect the cpuid level */ | ||
| 195 | c->cpuid_level = cpuid_eax(0); | ||
| 192 | } | 196 | } |
| 193 | 197 | ||
| 194 | static int __init x86_serial_nr_setup(char *s) | 198 | static int __init x86_serial_nr_setup(char *s) |
| @@ -213,16 +217,64 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | |||
| 213 | #endif | 217 | #endif |
| 214 | 218 | ||
| 215 | /* | 219 | /* |
| 220 | * Some CPU features depend on higher CPUID levels, which may not always | ||
| 221 | * be available due to CPUID level capping or broken virtualization | ||
| 222 | * software. Add those features to this table to auto-disable them. | ||
| 223 | */ | ||
| 224 | struct cpuid_dependent_feature { | ||
| 225 | u32 feature; | ||
| 226 | u32 level; | ||
| 227 | }; | ||
| 228 | |||
| 229 | static const struct cpuid_dependent_feature __cpuinitconst | ||
| 230 | cpuid_dependent_features[] = { | ||
| 231 | { X86_FEATURE_MWAIT, 0x00000005 }, | ||
| 232 | { X86_FEATURE_DCA, 0x00000009 }, | ||
| 233 | { X86_FEATURE_XSAVE, 0x0000000d }, | ||
| 234 | { 0, 0 } | ||
| 235 | }; | ||
| 236 | |||
| 237 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | ||
| 238 | { | ||
| 239 | const struct cpuid_dependent_feature *df; | ||
| 240 | |||
| 241 | for (df = cpuid_dependent_features; df->feature; df++) { | ||
| 242 | |||
| 243 | if (!cpu_has(c, df->feature)) | ||
| 244 | continue; | ||
| 245 | /* | ||
| 246 | * Note: cpuid_level is set to -1 if unavailable, but | ||
| 247 | * extended_extended_level is set to 0 if unavailable | ||
| 248 | * and the legitimate extended levels are all negative | ||
| 249 | * when signed; hence the weird messing around with | ||
| 250 | * signs here... | ||
| 251 | */ | ||
| 252 | if (!((s32)df->level < 0 ? | ||
| 253 | (u32)df->level > (u32)c->extended_cpuid_level : | ||
| 254 | (s32)df->level > (s32)c->cpuid_level)) | ||
| 255 | continue; | ||
| 256 | |||
| 257 | clear_cpu_cap(c, df->feature); | ||
| 258 | if (!warn) | ||
| 259 | continue; | ||
| 260 | |||
| 261 | printk(KERN_WARNING | ||
| 262 | "CPU: CPU feature %s disabled, no CPUID level 0x%x\n", | ||
| 263 | x86_cap_flags[df->feature], df->level); | ||
| 264 | } | ||
| 265 | } | ||
| 266 | |||
| 267 | /* | ||
| 216 | * Naming convention should be: <Name> [(<Codename>)] | 268 | * Naming convention should be: <Name> [(<Codename>)] |
| 217 | * This table only is used unless init_<vendor>() below doesn't set it; | 269 | * This table only is used unless init_<vendor>() below doesn't set it; |
| 218 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | 270 | * in particular, if CPUID levels 0x80000002..4 are supported, this |
| 219 | * | 271 | * isn't used |
| 220 | */ | 272 | */ |
| 221 | 273 | ||
| 222 | /* Look up CPU names by table lookup. */ | 274 | /* Look up CPU names by table lookup. */ |
| 223 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | 275 | static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) |
| 224 | { | 276 | { |
| 225 | struct cpu_model_info *info; | 277 | const struct cpu_model_info *info; |
| 226 | 278 | ||
| 227 | if (c->x86_model >= 16) | 279 | if (c->x86_model >= 16) |
| 228 | return NULL; /* Range check */ | 280 | return NULL; /* Range check */ |
| @@ -242,21 +294,34 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | |||
| 242 | 294 | ||
| 243 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | 295 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; |
| 244 | 296 | ||
| 245 | /* Current gdt points %fs at the "master" per-cpu area: after this, | 297 | void load_percpu_segment(int cpu) |
| 246 | * it's on the real one. */ | 298 | { |
| 247 | void switch_to_new_gdt(void) | 299 | #ifdef CONFIG_X86_32 |
| 300 | loadsegment(fs, __KERNEL_PERCPU); | ||
| 301 | #else | ||
| 302 | loadsegment(gs, 0); | ||
| 303 | wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); | ||
| 304 | #endif | ||
| 305 | load_stack_canary_segment(); | ||
| 306 | } | ||
| 307 | |||
| 308 | /* | ||
| 309 | * Current gdt points %fs at the "master" per-cpu area: after this, | ||
| 310 | * it's on the real one. | ||
| 311 | */ | ||
| 312 | void switch_to_new_gdt(int cpu) | ||
| 248 | { | 313 | { |
| 249 | struct desc_ptr gdt_descr; | 314 | struct desc_ptr gdt_descr; |
| 250 | 315 | ||
| 251 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | 316 | gdt_descr.address = (long)get_cpu_gdt_table(cpu); |
| 252 | gdt_descr.size = GDT_SIZE - 1; | 317 | gdt_descr.size = GDT_SIZE - 1; |
| 253 | load_gdt(&gdt_descr); | 318 | load_gdt(&gdt_descr); |
| 254 | #ifdef CONFIG_X86_32 | 319 | /* Reload the per-cpu base */ |
| 255 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | 320 | |
| 256 | #endif | 321 | load_percpu_segment(cpu); |
| 257 | } | 322 | } |
| 258 | 323 | ||
| 259 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | 324 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; |
| 260 | 325 | ||
| 261 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 326 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
| 262 | { | 327 | { |
| @@ -275,7 +340,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) | |||
| 275 | #endif | 340 | #endif |
| 276 | } | 341 | } |
| 277 | 342 | ||
| 278 | static struct cpu_dev __cpuinitdata default_cpu = { | 343 | static const struct cpu_dev __cpuinitconst default_cpu = { |
| 279 | .c_init = default_init, | 344 | .c_init = default_init, |
| 280 | .c_vendor = "Unknown", | 345 | .c_vendor = "Unknown", |
| 281 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | 346 | .c_x86_vendor = X86_VENDOR_UNKNOWN, |
| @@ -289,22 +354,24 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
| 289 | if (c->extended_cpuid_level < 0x80000004) | 354 | if (c->extended_cpuid_level < 0x80000004) |
| 290 | return; | 355 | return; |
| 291 | 356 | ||
| 292 | v = (unsigned int *) c->x86_model_id; | 357 | v = (unsigned int *)c->x86_model_id; |
| 293 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | 358 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
| 294 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | 359 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); |
| 295 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | 360 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); |
| 296 | c->x86_model_id[48] = 0; | 361 | c->x86_model_id[48] = 0; |
| 297 | 362 | ||
| 298 | /* Intel chips right-justify this string for some dumb reason; | 363 | /* |
| 299 | undo that brain damage */ | 364 | * Intel chips right-justify this string for some dumb reason; |
| 365 | * undo that brain damage: | ||
| 366 | */ | ||
| 300 | p = q = &c->x86_model_id[0]; | 367 | p = q = &c->x86_model_id[0]; |
| 301 | while (*p == ' ') | 368 | while (*p == ' ') |
| 302 | p++; | 369 | p++; |
| 303 | if (p != q) { | 370 | if (p != q) { |
| 304 | while (*p) | 371 | while (*p) |
| 305 | *q++ = *p++; | 372 | *q++ = *p++; |
| 306 | while (q <= &c->x86_model_id[48]) | 373 | while (q <= &c->x86_model_id[48]) |
| 307 | *q++ = '\0'; /* Zero-pad the rest */ | 374 | *q++ = '\0'; /* Zero-pad the rest */ |
| 308 | } | 375 | } |
| 309 | } | 376 | } |
| 310 | 377 | ||
| @@ -373,36 +440,30 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
| 373 | 440 | ||
| 374 | if (smp_num_siblings == 1) { | 441 | if (smp_num_siblings == 1) { |
| 375 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 442 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
| 376 | } else if (smp_num_siblings > 1) { | 443 | goto out; |
| 444 | } | ||
| 377 | 445 | ||
| 378 | if (smp_num_siblings > nr_cpu_ids) { | 446 | if (smp_num_siblings <= 1) |
| 379 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", | 447 | goto out; |
| 380 | smp_num_siblings); | ||
| 381 | smp_num_siblings = 1; | ||
| 382 | return; | ||
| 383 | } | ||
| 384 | 448 | ||
| 385 | index_msb = get_count_order(smp_num_siblings); | 449 | if (smp_num_siblings > nr_cpu_ids) { |
| 386 | #ifdef CONFIG_X86_64 | 450 | pr_warning("CPU: Unsupported number of siblings %d", |
| 387 | c->phys_proc_id = phys_pkg_id(index_msb); | 451 | smp_num_siblings); |
| 388 | #else | 452 | smp_num_siblings = 1; |
| 389 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | 453 | return; |
| 390 | #endif | 454 | } |
| 391 | 455 | ||
| 392 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | 456 | index_msb = get_count_order(smp_num_siblings); |
| 457 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); | ||
| 393 | 458 | ||
| 394 | index_msb = get_count_order(smp_num_siblings); | 459 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
| 395 | 460 | ||
| 396 | core_bits = get_count_order(c->x86_max_cores); | 461 | index_msb = get_count_order(smp_num_siblings); |
| 397 | 462 | ||
| 398 | #ifdef CONFIG_X86_64 | 463 | core_bits = get_count_order(c->x86_max_cores); |
| 399 | c->cpu_core_id = phys_pkg_id(index_msb) & | 464 | |
| 400 | ((1 << core_bits) - 1); | 465 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
| 401 | #else | 466 | ((1 << core_bits) - 1); |
| 402 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | ||
| 403 | ((1 << core_bits) - 1); | ||
| 404 | #endif | ||
| 405 | } | ||
| 406 | 467 | ||
| 407 | out: | 468 | out: |
| 408 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | 469 | if ((c->x86_max_cores * smp_num_siblings) > 1) { |
| @@ -417,8 +478,8 @@ out: | |||
| 417 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | 478 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
| 418 | { | 479 | { |
| 419 | char *v = c->x86_vendor_id; | 480 | char *v = c->x86_vendor_id; |
| 420 | int i; | ||
| 421 | static int printed; | 481 | static int printed; |
| 482 | int i; | ||
| 422 | 483 | ||
| 423 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 484 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
| 424 | if (!cpu_devs[i]) | 485 | if (!cpu_devs[i]) |
| @@ -427,6 +488,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
| 427 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | 488 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
| 428 | (cpu_devs[i]->c_ident[1] && | 489 | (cpu_devs[i]->c_ident[1] && |
| 429 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | 490 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
| 491 | |||
| 430 | this_cpu = cpu_devs[i]; | 492 | this_cpu = cpu_devs[i]; |
| 431 | c->x86_vendor = this_cpu->c_x86_vendor; | 493 | c->x86_vendor = this_cpu->c_x86_vendor; |
| 432 | return; | 494 | return; |
| @@ -435,7 +497,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
| 435 | 497 | ||
| 436 | if (!printed) { | 498 | if (!printed) { |
| 437 | printed++; | 499 | printed++; |
| 438 | printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v); | 500 | printk(KERN_ERR |
| 501 | "CPU: vendor_id '%s' unknown, using generic init.\n", v); | ||
| 502 | |||
| 439 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | 503 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); |
| 440 | } | 504 | } |
| 441 | 505 | ||
| @@ -455,14 +519,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |||
| 455 | /* Intel-defined flags: level 0x00000001 */ | 519 | /* Intel-defined flags: level 0x00000001 */ |
| 456 | if (c->cpuid_level >= 0x00000001) { | 520 | if (c->cpuid_level >= 0x00000001) { |
| 457 | u32 junk, tfms, cap0, misc; | 521 | u32 junk, tfms, cap0, misc; |
| 522 | |||
| 458 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | 523 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
| 459 | c->x86 = (tfms >> 8) & 0xf; | 524 | c->x86 = (tfms >> 8) & 0xf; |
| 460 | c->x86_model = (tfms >> 4) & 0xf; | 525 | c->x86_model = (tfms >> 4) & 0xf; |
| 461 | c->x86_mask = tfms & 0xf; | 526 | c->x86_mask = tfms & 0xf; |
| 527 | |||
| 462 | if (c->x86 == 0xf) | 528 | if (c->x86 == 0xf) |
| 463 | c->x86 += (tfms >> 20) & 0xff; | 529 | c->x86 += (tfms >> 20) & 0xff; |
| 464 | if (c->x86 >= 0x6) | 530 | if (c->x86 >= 0x6) |
| 465 | c->x86_model += ((tfms >> 16) & 0xf) << 4; | 531 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
| 532 | |||
| 466 | if (cap0 & (1<<19)) { | 533 | if (cap0 & (1<<19)) { |
| 467 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 534 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
| 468 | c->x86_cache_alignment = c->x86_clflush_size; | 535 | c->x86_cache_alignment = c->x86_clflush_size; |
| @@ -478,6 +545,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
| 478 | /* Intel-defined flags: level 0x00000001 */ | 545 | /* Intel-defined flags: level 0x00000001 */ |
| 479 | if (c->cpuid_level >= 0x00000001) { | 546 | if (c->cpuid_level >= 0x00000001) { |
| 480 | u32 capability, excap; | 547 | u32 capability, excap; |
| 548 | |||
| 481 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 549 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
| 482 | c->x86_capability[0] = capability; | 550 | c->x86_capability[0] = capability; |
| 483 | c->x86_capability[4] = excap; | 551 | c->x86_capability[4] = excap; |
| @@ -486,6 +554,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
| 486 | /* AMD-defined flags: level 0x80000001 */ | 554 | /* AMD-defined flags: level 0x80000001 */ |
| 487 | xlvl = cpuid_eax(0x80000000); | 555 | xlvl = cpuid_eax(0x80000000); |
| 488 | c->extended_cpuid_level = xlvl; | 556 | c->extended_cpuid_level = xlvl; |
| 557 | |||
| 489 | if ((xlvl & 0xffff0000) == 0x80000000) { | 558 | if ((xlvl & 0xffff0000) == 0x80000000) { |
| 490 | if (xlvl >= 0x80000001) { | 559 | if (xlvl >= 0x80000001) { |
| 491 | c->x86_capability[1] = cpuid_edx(0x80000001); | 560 | c->x86_capability[1] = cpuid_edx(0x80000001); |
| @@ -493,13 +562,15 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
| 493 | } | 562 | } |
| 494 | } | 563 | } |
| 495 | 564 | ||
| 496 | #ifdef CONFIG_X86_64 | ||
| 497 | if (c->extended_cpuid_level >= 0x80000008) { | 565 | if (c->extended_cpuid_level >= 0x80000008) { |
| 498 | u32 eax = cpuid_eax(0x80000008); | 566 | u32 eax = cpuid_eax(0x80000008); |
| 499 | 567 | ||
| 500 | c->x86_virt_bits = (eax >> 8) & 0xff; | 568 | c->x86_virt_bits = (eax >> 8) & 0xff; |
| 501 | c->x86_phys_bits = eax & 0xff; | 569 | c->x86_phys_bits = eax & 0xff; |
| 502 | } | 570 | } |
| 571 | #ifdef CONFIG_X86_32 | ||
| 572 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) | ||
| 573 | c->x86_phys_bits = 36; | ||
| 503 | #endif | 574 | #endif |
| 504 | 575 | ||
| 505 | if (c->extended_cpuid_level >= 0x80000007) | 576 | if (c->extended_cpuid_level >= 0x80000007) |
| @@ -546,8 +617,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
| 546 | { | 617 | { |
| 547 | #ifdef CONFIG_X86_64 | 618 | #ifdef CONFIG_X86_64 |
| 548 | c->x86_clflush_size = 64; | 619 | c->x86_clflush_size = 64; |
| 620 | c->x86_phys_bits = 36; | ||
| 621 | c->x86_virt_bits = 48; | ||
| 549 | #else | 622 | #else |
| 550 | c->x86_clflush_size = 32; | 623 | c->x86_clflush_size = 32; |
| 624 | c->x86_phys_bits = 32; | ||
| 625 | c->x86_virt_bits = 32; | ||
| 551 | #endif | 626 | #endif |
| 552 | c->x86_cache_alignment = c->x86_clflush_size; | 627 | c->x86_cache_alignment = c->x86_clflush_size; |
| 553 | 628 | ||
| @@ -570,21 +645,20 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
| 570 | if (this_cpu->c_early_init) | 645 | if (this_cpu->c_early_init) |
| 571 | this_cpu->c_early_init(c); | 646 | this_cpu->c_early_init(c); |
| 572 | 647 | ||
| 573 | validate_pat_support(c); | ||
| 574 | |||
| 575 | #ifdef CONFIG_SMP | 648 | #ifdef CONFIG_SMP |
| 576 | c->cpu_index = boot_cpu_id; | 649 | c->cpu_index = boot_cpu_id; |
| 577 | #endif | 650 | #endif |
| 651 | filter_cpuid_features(c, false); | ||
| 578 | } | 652 | } |
| 579 | 653 | ||
| 580 | void __init early_cpu_init(void) | 654 | void __init early_cpu_init(void) |
| 581 | { | 655 | { |
| 582 | struct cpu_dev **cdev; | 656 | const struct cpu_dev *const *cdev; |
| 583 | int count = 0; | 657 | int count = 0; |
| 584 | 658 | ||
| 585 | printk("KERNEL supported cpus:\n"); | 659 | printk(KERN_INFO "KERNEL supported cpus:\n"); |
| 586 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { | 660 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
| 587 | struct cpu_dev *cpudev = *cdev; | 661 | const struct cpu_dev *cpudev = *cdev; |
| 588 | unsigned int j; | 662 | unsigned int j; |
| 589 | 663 | ||
| 590 | if (count >= X86_VENDOR_NUM) | 664 | if (count >= X86_VENDOR_NUM) |
| @@ -595,7 +669,7 @@ void __init early_cpu_init(void) | |||
| 595 | for (j = 0; j < 2; j++) { | 669 | for (j = 0; j < 2; j++) { |
| 596 | if (!cpudev->c_ident[j]) | 670 | if (!cpudev->c_ident[j]) |
| 597 | continue; | 671 | continue; |
| 598 | printk(" %s %s\n", cpudev->c_vendor, | 672 | printk(KERN_INFO " %s %s\n", cpudev->c_vendor, |
| 599 | cpudev->c_ident[j]); | 673 | cpudev->c_ident[j]); |
| 600 | } | 674 | } |
| 601 | } | 675 | } |
| @@ -637,7 +711,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | |||
| 637 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; | 711 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; |
| 638 | #ifdef CONFIG_X86_32 | 712 | #ifdef CONFIG_X86_32 |
| 639 | # ifdef CONFIG_X86_HT | 713 | # ifdef CONFIG_X86_HT |
| 640 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | 714 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 641 | # else | 715 | # else |
| 642 | c->apicid = c->initial_apicid; | 716 | c->apicid = c->initial_apicid; |
| 643 | # endif | 717 | # endif |
| @@ -671,9 +745,13 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 671 | c->x86_coreid_bits = 0; | 745 | c->x86_coreid_bits = 0; |
| 672 | #ifdef CONFIG_X86_64 | 746 | #ifdef CONFIG_X86_64 |
| 673 | c->x86_clflush_size = 64; | 747 | c->x86_clflush_size = 64; |
| 748 | c->x86_phys_bits = 36; | ||
| 749 | c->x86_virt_bits = 48; | ||
| 674 | #else | 750 | #else |
| 675 | c->cpuid_level = -1; /* CPUID not detected */ | 751 | c->cpuid_level = -1; /* CPUID not detected */ |
| 676 | c->x86_clflush_size = 32; | 752 | c->x86_clflush_size = 32; |
| 753 | c->x86_phys_bits = 32; | ||
| 754 | c->x86_virt_bits = 32; | ||
| 677 | #endif | 755 | #endif |
| 678 | c->x86_cache_alignment = c->x86_clflush_size; | 756 | c->x86_cache_alignment = c->x86_clflush_size; |
| 679 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 757 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
| @@ -684,7 +762,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 684 | this_cpu->c_identify(c); | 762 | this_cpu->c_identify(c); |
| 685 | 763 | ||
| 686 | #ifdef CONFIG_X86_64 | 764 | #ifdef CONFIG_X86_64 |
| 687 | c->apicid = phys_pkg_id(0); | 765 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 688 | #endif | 766 | #endif |
| 689 | 767 | ||
| 690 | /* | 768 | /* |
| @@ -704,13 +782,16 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
| 704 | squash_the_stupid_serial_number(c); | 782 | squash_the_stupid_serial_number(c); |
| 705 | 783 | ||
| 706 | /* | 784 | /* |
| 707 | * The vendor-specific functions might have changed features. Now | 785 | * The vendor-specific functions might have changed features. |
| 708 | * we do "generic changes." | 786 | * Now we do "generic changes." |
| 709 | */ | 787 | */ |
| 710 | 788 | ||
| 789 | /* Filter out anything that depends on CPUID levels we don't have */ | ||
| 790 | filter_cpuid_features(c, true); | ||
| 791 | |||
| 711 | /* If the model name is still unset, do table lookup. */ | 792 | /* If the model name is still unset, do table lookup. */ |
| 712 | if (!c->x86_model_id[0]) { | 793 | if (!c->x86_model_id[0]) { |
| 713 | char *p; | 794 | const char *p; |
| 714 | p = table_lookup_model(c); | 795 | p = table_lookup_model(c); |
| 715 | if (p) | 796 | if (p) |
| 716 | strcpy(c->x86_model_id, p); | 797 | strcpy(c->x86_model_id, p); |
| @@ -766,6 +847,7 @@ static void vgetcpu_set_mode(void) | |||
| 766 | void __init identify_boot_cpu(void) | 847 | void __init identify_boot_cpu(void) |
| 767 | { | 848 | { |
| 768 | identify_cpu(&boot_cpu_data); | 849 | identify_cpu(&boot_cpu_data); |
| 850 | init_c1e_mask(); | ||
| 769 | #ifdef CONFIG_X86_32 | 851 | #ifdef CONFIG_X86_32 |
| 770 | sysenter_setup(); | 852 | sysenter_setup(); |
| 771 | enable_sep_cpu(); | 853 | enable_sep_cpu(); |
| @@ -785,11 +867,11 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | |||
| 785 | } | 867 | } |
| 786 | 868 | ||
| 787 | struct msr_range { | 869 | struct msr_range { |
| 788 | unsigned min; | 870 | unsigned min; |
| 789 | unsigned max; | 871 | unsigned max; |
| 790 | }; | 872 | }; |
| 791 | 873 | ||
| 792 | static struct msr_range msr_range_array[] __cpuinitdata = { | 874 | static const struct msr_range msr_range_array[] __cpuinitconst = { |
| 793 | { 0x00000000, 0x00000418}, | 875 | { 0x00000000, 0x00000418}, |
| 794 | { 0xc0000000, 0xc000040b}, | 876 | { 0xc0000000, 0xc000040b}, |
| 795 | { 0xc0010000, 0xc0010142}, | 877 | { 0xc0010000, 0xc0010142}, |
| @@ -798,14 +880,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = { | |||
| 798 | 880 | ||
| 799 | static void __cpuinit print_cpu_msr(void) | 881 | static void __cpuinit print_cpu_msr(void) |
| 800 | { | 882 | { |
| 883 | unsigned index_min, index_max; | ||
| 801 | unsigned index; | 884 | unsigned index; |
| 802 | u64 val; | 885 | u64 val; |
| 803 | int i; | 886 | int i; |
| 804 | unsigned index_min, index_max; | ||
| 805 | 887 | ||
| 806 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | 888 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { |
| 807 | index_min = msr_range_array[i].min; | 889 | index_min = msr_range_array[i].min; |
| 808 | index_max = msr_range_array[i].max; | 890 | index_max = msr_range_array[i].max; |
| 891 | |||
| 809 | for (index = index_min; index < index_max; index++) { | 892 | for (index = index_min; index < index_max; index++) { |
| 810 | if (rdmsrl_amd_safe(index, &val)) | 893 | if (rdmsrl_amd_safe(index, &val)) |
| 811 | continue; | 894 | continue; |
| @@ -815,6 +898,7 @@ static void __cpuinit print_cpu_msr(void) | |||
| 815 | } | 898 | } |
| 816 | 899 | ||
| 817 | static int show_msr __cpuinitdata; | 900 | static int show_msr __cpuinitdata; |
| 901 | |||
| 818 | static __init int setup_show_msr(char *arg) | 902 | static __init int setup_show_msr(char *arg) |
| 819 | { | 903 | { |
| 820 | int num; | 904 | int num; |
| @@ -836,12 +920,14 @@ __setup("noclflush", setup_noclflush); | |||
| 836 | 920 | ||
| 837 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | 921 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
| 838 | { | 922 | { |
| 839 | char *vendor = NULL; | 923 | const char *vendor = NULL; |
| 840 | 924 | ||
| 841 | if (c->x86_vendor < X86_VENDOR_NUM) | 925 | if (c->x86_vendor < X86_VENDOR_NUM) { |
| 842 | vendor = this_cpu->c_vendor; | 926 | vendor = this_cpu->c_vendor; |
| 843 | else if (c->cpuid_level >= 0) | 927 | } else { |
| 844 | vendor = c->x86_vendor_id; | 928 | if (c->cpuid_level >= 0) |
| 929 | vendor = c->x86_vendor_id; | ||
| 930 | } | ||
| 845 | 931 | ||
| 846 | if (vendor && !strstr(c->x86_model_id, vendor)) | 932 | if (vendor && !strstr(c->x86_model_id, vendor)) |
| 847 | printk(KERN_CONT "%s ", vendor); | 933 | printk(KERN_CONT "%s ", vendor); |
| @@ -868,65 +954,45 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
| 868 | static __init int setup_disablecpuid(char *arg) | 954 | static __init int setup_disablecpuid(char *arg) |
| 869 | { | 955 | { |
| 870 | int bit; | 956 | int bit; |
| 957 | |||
| 871 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | 958 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) |
| 872 | setup_clear_cpu_cap(bit); | 959 | setup_clear_cpu_cap(bit); |
| 873 | else | 960 | else |
| 874 | return 0; | 961 | return 0; |
| 962 | |||
| 875 | return 1; | 963 | return 1; |
| 876 | } | 964 | } |
| 877 | __setup("clearcpuid=", setup_disablecpuid); | 965 | __setup("clearcpuid=", setup_disablecpuid); |
| 878 | 966 | ||
| 879 | #ifdef CONFIG_X86_64 | 967 | #ifdef CONFIG_X86_64 |
| 880 | struct x8664_pda **_cpu_pda __read_mostly; | ||
| 881 | EXPORT_SYMBOL(_cpu_pda); | ||
| 882 | |||
| 883 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | 968 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; |
| 884 | 969 | ||
| 885 | static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; | 970 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
| 971 | irq_stack_union) __aligned(PAGE_SIZE); | ||
| 886 | 972 | ||
| 887 | void __cpuinit pda_init(int cpu) | 973 | DEFINE_PER_CPU(char *, irq_stack_ptr) = |
| 888 | { | 974 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; |
| 889 | struct x8664_pda *pda = cpu_pda(cpu); | ||
| 890 | 975 | ||
| 891 | /* Setup up data that may be needed in __get_free_pages early */ | 976 | DEFINE_PER_CPU(unsigned long, kernel_stack) = |
| 892 | loadsegment(fs, 0); | 977 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; |
| 893 | loadsegment(gs, 0); | 978 | EXPORT_PER_CPU_SYMBOL(kernel_stack); |
| 894 | /* Memory clobbers used to order PDA accessed */ | ||
| 895 | mb(); | ||
| 896 | wrmsrl(MSR_GS_BASE, pda); | ||
| 897 | mb(); | ||
| 898 | |||
| 899 | pda->cpunumber = cpu; | ||
| 900 | pda->irqcount = -1; | ||
| 901 | pda->kernelstack = (unsigned long)stack_thread_info() - | ||
| 902 | PDA_STACKOFFSET + THREAD_SIZE; | ||
| 903 | pda->active_mm = &init_mm; | ||
| 904 | pda->mmu_state = 0; | ||
| 905 | |||
| 906 | if (cpu == 0) { | ||
| 907 | /* others are initialized in smpboot.c */ | ||
| 908 | pda->pcurrent = &init_task; | ||
| 909 | pda->irqstackptr = boot_cpu_stack; | ||
| 910 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
| 911 | } else { | ||
| 912 | if (!pda->irqstackptr) { | ||
| 913 | pda->irqstackptr = (char *) | ||
| 914 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | ||
| 915 | if (!pda->irqstackptr) | ||
| 916 | panic("cannot allocate irqstack for cpu %d", | ||
| 917 | cpu); | ||
| 918 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
| 919 | } | ||
| 920 | 979 | ||
| 921 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | 980 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
| 922 | pda->nodenumber = cpu_to_node(cpu); | ||
| 923 | } | ||
| 924 | } | ||
| 925 | 981 | ||
| 926 | static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | 982 | /* |
| 927 | DEBUG_STKSZ] __page_aligned_bss; | 983 | * Special IST stacks which the CPU switches to when it calls |
| 984 | * an IST-marked descriptor entry. Up to 7 stacks (hardware | ||
| 985 | * limit), all of them are 4K, except the debug stack which | ||
| 986 | * is 8K. | ||
| 987 | */ | ||
| 988 | static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { | ||
| 989 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, | ||
| 990 | [DEBUG_STACK - 1] = DEBUG_STKSZ | ||
| 991 | }; | ||
| 928 | 992 | ||
| 929 | extern asmlinkage void ignore_sysret(void); | 993 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
| 994 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) | ||
| 995 | __aligned(PAGE_SIZE); | ||
| 930 | 996 | ||
| 931 | /* May not be marked __init: used by software suspend */ | 997 | /* May not be marked __init: used by software suspend */ |
| 932 | void syscall_init(void) | 998 | void syscall_init(void) |
| @@ -957,16 +1023,38 @@ unsigned long kernel_eflags; | |||
| 957 | */ | 1023 | */ |
| 958 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | 1024 | DEFINE_PER_CPU(struct orig_ist, orig_ist); |
| 959 | 1025 | ||
| 960 | #else | 1026 | #else /* CONFIG_X86_64 */ |
| 1027 | |||
| 1028 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
| 1029 | DEFINE_PER_CPU(unsigned long, stack_canary); | ||
| 1030 | #endif | ||
| 961 | 1031 | ||
| 962 | /* Make sure %fs is initialized properly in idle threads */ | 1032 | /* Make sure %fs and %gs are initialized properly in idle threads */ |
| 963 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | 1033 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) |
| 964 | { | 1034 | { |
| 965 | memset(regs, 0, sizeof(struct pt_regs)); | 1035 | memset(regs, 0, sizeof(struct pt_regs)); |
| 966 | regs->fs = __KERNEL_PERCPU; | 1036 | regs->fs = __KERNEL_PERCPU; |
| 1037 | regs->gs = __KERNEL_STACK_CANARY; | ||
| 1038 | |||
| 967 | return regs; | 1039 | return regs; |
| 968 | } | 1040 | } |
| 969 | #endif | 1041 | #endif /* CONFIG_X86_64 */ |
| 1042 | |||
| 1043 | /* | ||
| 1044 | * Clear all 6 debug registers: | ||
| 1045 | */ | ||
| 1046 | static void clear_all_debug_regs(void) | ||
| 1047 | { | ||
| 1048 | int i; | ||
| 1049 | |||
| 1050 | for (i = 0; i < 8; i++) { | ||
| 1051 | /* Ignore db4, db5 */ | ||
| 1052 | if ((i == 4) || (i == 5)) | ||
| 1053 | continue; | ||
| 1054 | |||
| 1055 | set_debugreg(0, i); | ||
| 1056 | } | ||
| 1057 | } | ||
| 970 | 1058 | ||
| 971 | /* | 1059 | /* |
| 972 | * cpu_init() initializes state that is per-CPU. Some data is already | 1060 | * cpu_init() initializes state that is per-CPU. Some data is already |
| @@ -976,21 +1064,25 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
| 976 | * A lot of state is already set up in PDA init for 64 bit | 1064 | * A lot of state is already set up in PDA init for 64 bit |
| 977 | */ | 1065 | */ |
| 978 | #ifdef CONFIG_X86_64 | 1066 | #ifdef CONFIG_X86_64 |
| 1067 | |||
| 979 | void __cpuinit cpu_init(void) | 1068 | void __cpuinit cpu_init(void) |
| 980 | { | 1069 | { |
| 981 | int cpu = stack_smp_processor_id(); | 1070 | struct orig_ist *orig_ist; |
| 982 | struct tss_struct *t = &per_cpu(init_tss, cpu); | ||
| 983 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | ||
| 984 | unsigned long v; | ||
| 985 | char *estacks = NULL; | ||
| 986 | struct task_struct *me; | 1071 | struct task_struct *me; |
| 1072 | struct tss_struct *t; | ||
| 1073 | unsigned long v; | ||
| 1074 | int cpu; | ||
| 987 | int i; | 1075 | int i; |
| 988 | 1076 | ||
| 989 | /* CPU 0 is initialised in head64.c */ | 1077 | cpu = stack_smp_processor_id(); |
| 990 | if (cpu != 0) | 1078 | t = &per_cpu(init_tss, cpu); |
| 991 | pda_init(cpu); | 1079 | orig_ist = &per_cpu(orig_ist, cpu); |
| 992 | else | 1080 | |
| 993 | estacks = boot_exception_stacks; | 1081 | #ifdef CONFIG_NUMA |
| 1082 | if (cpu != 0 && percpu_read(node_number) == 0 && | ||
| 1083 | cpu_to_node(cpu) != NUMA_NO_NODE) | ||
| 1084 | percpu_write(node_number, cpu_to_node(cpu)); | ||
| 1085 | #endif | ||
| 994 | 1086 | ||
| 995 | me = current; | 1087 | me = current; |
| 996 | 1088 | ||
| @@ -1006,7 +1098,9 @@ void __cpuinit cpu_init(void) | |||
| 1006 | * and set up the GDT descriptor: | 1098 | * and set up the GDT descriptor: |
| 1007 | */ | 1099 | */ |
| 1008 | 1100 | ||
| 1009 | switch_to_new_gdt(); | 1101 | switch_to_new_gdt(cpu); |
| 1102 | loadsegment(fs, 0); | ||
| 1103 | |||
| 1010 | load_idt((const struct desc_ptr *)&idt_descr); | 1104 | load_idt((const struct desc_ptr *)&idt_descr); |
| 1011 | 1105 | ||
| 1012 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | 1106 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); |
| @@ -1017,31 +1111,24 @@ void __cpuinit cpu_init(void) | |||
| 1017 | barrier(); | 1111 | barrier(); |
| 1018 | 1112 | ||
| 1019 | check_efer(); | 1113 | check_efer(); |
| 1020 | if (cpu != 0 && x2apic) | 1114 | if (cpu != 0) |
| 1021 | enable_x2apic(); | 1115 | enable_x2apic(); |
| 1022 | 1116 | ||
| 1023 | /* | 1117 | /* |
| 1024 | * set up and load the per-CPU TSS | 1118 | * set up and load the per-CPU TSS |
| 1025 | */ | 1119 | */ |
| 1026 | if (!orig_ist->ist[0]) { | 1120 | if (!orig_ist->ist[0]) { |
| 1027 | static const unsigned int order[N_EXCEPTION_STACKS] = { | 1121 | char *estacks = per_cpu(exception_stacks, cpu); |
| 1028 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | 1122 | |
| 1029 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | ||
| 1030 | }; | ||
| 1031 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 1123 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
| 1032 | if (cpu) { | 1124 | estacks += exception_stack_sizes[v]; |
| 1033 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | ||
| 1034 | if (!estacks) | ||
| 1035 | panic("Cannot allocate exception " | ||
| 1036 | "stack %ld %d\n", v, cpu); | ||
| 1037 | } | ||
| 1038 | estacks += PAGE_SIZE << order[v]; | ||
| 1039 | orig_ist->ist[v] = t->x86_tss.ist[v] = | 1125 | orig_ist->ist[v] = t->x86_tss.ist[v] = |
| 1040 | (unsigned long)estacks; | 1126 | (unsigned long)estacks; |
| 1041 | } | 1127 | } |
| 1042 | } | 1128 | } |
| 1043 | 1129 | ||
| 1044 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | 1130 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
| 1131 | |||
| 1045 | /* | 1132 | /* |
| 1046 | * <= is required because the CPU will access up to | 1133 | * <= is required because the CPU will access up to |
| 1047 | * 8 bits beyond the end of the IO permission bitmap. | 1134 | * 8 bits beyond the end of the IO permission bitmap. |
| @@ -1051,8 +1138,7 @@ void __cpuinit cpu_init(void) | |||
| 1051 | 1138 | ||
| 1052 | atomic_inc(&init_mm.mm_count); | 1139 | atomic_inc(&init_mm.mm_count); |
| 1053 | me->active_mm = &init_mm; | 1140 | me->active_mm = &init_mm; |
| 1054 | if (me->mm) | 1141 | BUG_ON(me->mm); |
| 1055 | BUG(); | ||
| 1056 | enter_lazy_tlb(&init_mm, me); | 1142 | enter_lazy_tlb(&init_mm, me); |
| 1057 | 1143 | ||
| 1058 | load_sp0(t, ¤t->thread); | 1144 | load_sp0(t, ¤t->thread); |
| @@ -1069,22 +1155,9 @@ void __cpuinit cpu_init(void) | |||
| 1069 | */ | 1155 | */ |
| 1070 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) | 1156 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) |
| 1071 | arch_kgdb_ops.correct_hw_break(); | 1157 | arch_kgdb_ops.correct_hw_break(); |
| 1072 | else { | 1158 | else |
| 1073 | #endif | ||
| 1074 | /* | ||
| 1075 | * Clear all 6 debug registers: | ||
| 1076 | */ | ||
| 1077 | |||
| 1078 | set_debugreg(0UL, 0); | ||
| 1079 | set_debugreg(0UL, 1); | ||
| 1080 | set_debugreg(0UL, 2); | ||
| 1081 | set_debugreg(0UL, 3); | ||
| 1082 | set_debugreg(0UL, 6); | ||
| 1083 | set_debugreg(0UL, 7); | ||
| 1084 | #ifdef CONFIG_KGDB | ||
| 1085 | /* If the kgdb is connected no debug regs should be altered. */ | ||
| 1086 | } | ||
| 1087 | #endif | 1159 | #endif |
| 1160 | clear_all_debug_regs(); | ||
| 1088 | 1161 | ||
| 1089 | fpu_init(); | 1162 | fpu_init(); |
| 1090 | 1163 | ||
| @@ -1105,7 +1178,8 @@ void __cpuinit cpu_init(void) | |||
| 1105 | 1178 | ||
| 1106 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { | 1179 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { |
| 1107 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); | 1180 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); |
| 1108 | for (;;) local_irq_enable(); | 1181 | for (;;) |
| 1182 | local_irq_enable(); | ||
| 1109 | } | 1183 | } |
| 1110 | 1184 | ||
| 1111 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | 1185 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); |
| @@ -1114,15 +1188,14 @@ void __cpuinit cpu_init(void) | |||
| 1114 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | 1188 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
| 1115 | 1189 | ||
| 1116 | load_idt(&idt_descr); | 1190 | load_idt(&idt_descr); |
| 1117 | switch_to_new_gdt(); | 1191 | switch_to_new_gdt(cpu); |
| 1118 | 1192 | ||
| 1119 | /* | 1193 | /* |
| 1120 | * Set up and load the per-CPU TSS and LDT | 1194 | * Set up and load the per-CPU TSS and LDT |
| 1121 | */ | 1195 | */ |
| 1122 | atomic_inc(&init_mm.mm_count); | 1196 | atomic_inc(&init_mm.mm_count); |
| 1123 | curr->active_mm = &init_mm; | 1197 | curr->active_mm = &init_mm; |
| 1124 | if (curr->mm) | 1198 | BUG_ON(curr->mm); |
| 1125 | BUG(); | ||
| 1126 | enter_lazy_tlb(&init_mm, curr); | 1199 | enter_lazy_tlb(&init_mm, curr); |
| 1127 | 1200 | ||
| 1128 | load_sp0(t, thread); | 1201 | load_sp0(t, thread); |
| @@ -1135,16 +1208,7 @@ void __cpuinit cpu_init(void) | |||
| 1135 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | 1208 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); |
| 1136 | #endif | 1209 | #endif |
| 1137 | 1210 | ||
| 1138 | /* Clear %gs. */ | 1211 | clear_all_debug_regs(); |
| 1139 | asm volatile ("mov %0, %%gs" : : "r" (0)); | ||
| 1140 | |||
| 1141 | /* Clear all 6 debug registers: */ | ||
| 1142 | set_debugreg(0, 0); | ||
| 1143 | set_debugreg(0, 1); | ||
| 1144 | set_debugreg(0, 2); | ||
| 1145 | set_debugreg(0, 3); | ||
| 1146 | set_debugreg(0, 6); | ||
| 1147 | set_debugreg(0, 7); | ||
| 1148 | 1212 | ||
| 1149 | /* | 1213 | /* |
| 1150 | * Force FPU initialization: | 1214 | * Force FPU initialization: |
| @@ -1164,6 +1228,4 @@ void __cpuinit cpu_init(void) | |||
| 1164 | 1228 | ||
| 1165 | xsave_init(); | 1229 | xsave_init(); |
| 1166 | } | 1230 | } |
| 1167 | |||
| 1168 | |||
| 1169 | #endif | 1231 | #endif |
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index de4094a39210..6de9a908e400 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
| @@ -3,33 +3,34 @@ | |||
| 3 | #define ARCH_X86_CPU_H | 3 | #define ARCH_X86_CPU_H |
| 4 | 4 | ||
| 5 | struct cpu_model_info { | 5 | struct cpu_model_info { |
| 6 | int vendor; | 6 | int vendor; |
| 7 | int family; | 7 | int family; |
| 8 | char *model_names[16]; | 8 | const char *model_names[16]; |
| 9 | }; | 9 | }; |
| 10 | 10 | ||
| 11 | /* attempt to consolidate cpu attributes */ | 11 | /* attempt to consolidate cpu attributes */ |
| 12 | struct cpu_dev { | 12 | struct cpu_dev { |
| 13 | char * c_vendor; | 13 | const char *c_vendor; |
| 14 | 14 | ||
| 15 | /* some have two possibilities for cpuid string */ | 15 | /* some have two possibilities for cpuid string */ |
| 16 | char * c_ident[2]; | 16 | const char *c_ident[2]; |
| 17 | 17 | ||
| 18 | struct cpu_model_info c_models[4]; | 18 | struct cpu_model_info c_models[4]; |
| 19 | 19 | ||
| 20 | void (*c_early_init)(struct cpuinfo_x86 *c); | 20 | void (*c_early_init)(struct cpuinfo_x86 *); |
| 21 | void (*c_init)(struct cpuinfo_x86 * c); | 21 | void (*c_init)(struct cpuinfo_x86 *); |
| 22 | void (*c_identify)(struct cpuinfo_x86 * c); | 22 | void (*c_identify)(struct cpuinfo_x86 *); |
| 23 | unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); | 23 | unsigned int (*c_size_cache)(struct cpuinfo_x86 *, unsigned int); |
| 24 | int c_x86_vendor; | 24 | int c_x86_vendor; |
| 25 | }; | 25 | }; |
| 26 | 26 | ||
| 27 | #define cpu_dev_register(cpu_devX) \ | 27 | #define cpu_dev_register(cpu_devX) \ |
| 28 | static struct cpu_dev *__cpu_dev_##cpu_devX __used \ | 28 | static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \ |
| 29 | __attribute__((__section__(".x86_cpu_dev.init"))) = \ | 29 | __attribute__((__section__(".x86_cpu_dev.init"))) = \ |
| 30 | &cpu_devX; | 30 | &cpu_devX; |
| 31 | 31 | ||
| 32 | extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[]; | 32 | extern const struct cpu_dev *const __x86_cpu_dev_start[], |
| 33 | *const __x86_cpu_dev_end[]; | ||
| 33 | 34 | ||
| 34 | extern void display_cacheinfo(struct cpuinfo_x86 *c); | 35 | extern void display_cacheinfo(struct cpuinfo_x86 *c); |
| 35 | 36 | ||
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c new file mode 100755 index 000000000000..46e29ab96c6a --- /dev/null +++ b/arch/x86/kernel/cpu/cpu_debug.c | |||
| @@ -0,0 +1,901 @@ | |||
| 1 | /* | ||
| 2 | * CPU x86 architecture debug code | ||
| 3 | * | ||
| 4 | * Copyright(C) 2009 Jaswinder Singh Rajput | ||
| 5 | * | ||
| 6 | * For licencing details see kernel-base/COPYING | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/interrupt.h> | ||
| 10 | #include <linux/compiler.h> | ||
| 11 | #include <linux/seq_file.h> | ||
| 12 | #include <linux/debugfs.h> | ||
| 13 | #include <linux/kprobes.h> | ||
| 14 | #include <linux/uaccess.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/percpu.h> | ||
| 18 | #include <linux/signal.h> | ||
| 19 | #include <linux/errno.h> | ||
| 20 | #include <linux/sched.h> | ||
| 21 | #include <linux/types.h> | ||
| 22 | #include <linux/init.h> | ||
| 23 | #include <linux/slab.h> | ||
| 24 | #include <linux/smp.h> | ||
| 25 | |||
| 26 | #include <asm/cpu_debug.h> | ||
| 27 | #include <asm/paravirt.h> | ||
| 28 | #include <asm/system.h> | ||
| 29 | #include <asm/traps.h> | ||
| 30 | #include <asm/apic.h> | ||
| 31 | #include <asm/desc.h> | ||
| 32 | |||
| 33 | static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]); | ||
| 34 | static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]); | ||
| 35 | static DEFINE_PER_CPU(unsigned, cpu_modelflag); | ||
| 36 | static DEFINE_PER_CPU(int, cpu_priv_count); | ||
| 37 | static DEFINE_PER_CPU(unsigned, cpu_model); | ||
| 38 | |||
| 39 | static DEFINE_MUTEX(cpu_debug_lock); | ||
| 40 | |||
| 41 | static struct dentry *cpu_debugfs_dir; | ||
| 42 | |||
| 43 | static struct cpu_debug_base cpu_base[] = { | ||
| 44 | { "mc", CPU_MC, 0 }, | ||
| 45 | { "monitor", CPU_MONITOR, 0 }, | ||
| 46 | { "time", CPU_TIME, 0 }, | ||
| 47 | { "pmc", CPU_PMC, 1 }, | ||
| 48 | { "platform", CPU_PLATFORM, 0 }, | ||
| 49 | { "apic", CPU_APIC, 0 }, | ||
| 50 | { "poweron", CPU_POWERON, 0 }, | ||
| 51 | { "control", CPU_CONTROL, 0 }, | ||
| 52 | { "features", CPU_FEATURES, 0 }, | ||
| 53 | { "lastbranch", CPU_LBRANCH, 0 }, | ||
| 54 | { "bios", CPU_BIOS, 0 }, | ||
| 55 | { "freq", CPU_FREQ, 0 }, | ||
| 56 | { "mtrr", CPU_MTRR, 0 }, | ||
| 57 | { "perf", CPU_PERF, 0 }, | ||
| 58 | { "cache", CPU_CACHE, 0 }, | ||
| 59 | { "sysenter", CPU_SYSENTER, 0 }, | ||
| 60 | { "therm", CPU_THERM, 0 }, | ||
| 61 | { "misc", CPU_MISC, 0 }, | ||
| 62 | { "debug", CPU_DEBUG, 0 }, | ||
| 63 | { "pat", CPU_PAT, 0 }, | ||
| 64 | { "vmx", CPU_VMX, 0 }, | ||
| 65 | { "call", CPU_CALL, 0 }, | ||
| 66 | { "base", CPU_BASE, 0 }, | ||
| 67 | { "ver", CPU_VER, 0 }, | ||
| 68 | { "conf", CPU_CONF, 0 }, | ||
| 69 | { "smm", CPU_SMM, 0 }, | ||
| 70 | { "svm", CPU_SVM, 0 }, | ||
| 71 | { "osvm", CPU_OSVM, 0 }, | ||
| 72 | { "tss", CPU_TSS, 0 }, | ||
| 73 | { "cr", CPU_CR, 0 }, | ||
| 74 | { "dt", CPU_DT, 0 }, | ||
| 75 | { "registers", CPU_REG_ALL, 0 }, | ||
| 76 | }; | ||
| 77 | |||
| 78 | static struct cpu_file_base cpu_file[] = { | ||
| 79 | { "index", CPU_REG_ALL, 0 }, | ||
| 80 | { "value", CPU_REG_ALL, 1 }, | ||
| 81 | }; | ||
| 82 | |||
| 83 | /* Intel Registers Range */ | ||
| 84 | static struct cpu_debug_range cpu_intel_range[] = { | ||
| 85 | { 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL }, | ||
| 86 | { 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE }, | ||
| 87 | { 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL }, | ||
| 88 | { 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM }, | ||
| 89 | { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE }, | ||
| 90 | { 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE }, | ||
| 91 | |||
| 92 | { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE }, | ||
| 93 | { 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON }, | ||
| 94 | { 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON }, | ||
| 95 | { 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE }, | ||
| 96 | |||
| 97 | { 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE }, | ||
| 98 | { 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT }, | ||
| 99 | { 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT }, | ||
| 100 | { 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM }, | ||
| 101 | |||
| 102 | { 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE }, | ||
| 103 | { 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 }, | ||
| 104 | { 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE }, | ||
| 105 | { 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON }, | ||
| 106 | |||
| 107 | { 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT }, | ||
| 108 | { 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT }, | ||
| 109 | { 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT }, | ||
| 110 | { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE }, | ||
| 111 | |||
| 112 | { 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 }, | ||
| 113 | { 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 }, | ||
| 114 | { 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX }, | ||
| 115 | { 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 }, | ||
| 116 | { 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT }, | ||
| 117 | |||
| 118 | { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE }, | ||
| 119 | { 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE }, | ||
| 120 | { 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE }, | ||
| 121 | { 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT }, | ||
| 122 | { 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE }, | ||
| 123 | { 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE }, | ||
| 124 | { 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE }, | ||
| 125 | { 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE }, | ||
| 126 | |||
| 127 | { 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT }, | ||
| 128 | { 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON }, | ||
| 129 | { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE }, | ||
| 130 | { 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON }, | ||
| 131 | { 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE }, | ||
| 132 | { 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 }, | ||
| 133 | { 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE }, | ||
| 134 | { 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 }, | ||
| 135 | |||
| 136 | { 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE }, | ||
| 137 | { 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE }, | ||
| 138 | { 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE }, | ||
| 139 | { 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE }, | ||
| 140 | { 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE }, | ||
| 141 | { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE }, | ||
| 142 | |||
| 143 | { 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON }, | ||
| 144 | { 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE }, | ||
| 145 | { 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON }, | ||
| 146 | { 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT }, | ||
| 147 | { 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON }, | ||
| 148 | { 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT }, | ||
| 149 | { 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON }, | ||
| 150 | { 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON }, | ||
| 151 | { 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON }, | ||
| 152 | { 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON }, | ||
| 153 | { 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE }, | ||
| 154 | { 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON }, | ||
| 155 | |||
| 156 | { 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE }, | ||
| 157 | { 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON }, | ||
| 158 | { 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE }, | ||
| 159 | { 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON }, | ||
| 160 | { 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE }, | ||
| 161 | { 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON }, | ||
| 162 | { 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE }, | ||
| 163 | { 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON }, | ||
| 164 | { 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE }, | ||
| 165 | { 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE }, | ||
| 166 | { 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE }, | ||
| 167 | |||
| 168 | { 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE }, | ||
| 169 | { 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON }, | ||
| 170 | { 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON }, | ||
| 171 | |||
| 172 | { 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP }, | ||
| 173 | |||
| 174 | { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON }, | ||
| 175 | { 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON }, | ||
| 176 | { 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON }, | ||
| 177 | { 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON }, | ||
| 178 | }; | ||
| 179 | |||
| 180 | /* AMD Registers Range */ | ||
| 181 | static struct cpu_debug_range cpu_amd_range[] = { | ||
| 182 | { 0x00000000, 0x00000001, CPU_MC, CPU_K10_PLUS, }, | ||
| 183 | { 0x00000010, 0x00000010, CPU_TIME, CPU_K8_PLUS, }, | ||
| 184 | { 0x0000001B, 0x0000001B, CPU_APIC, CPU_K8_PLUS, }, | ||
| 185 | { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_K7_PLUS }, | ||
| 186 | { 0x0000008B, 0x0000008B, CPU_VER, CPU_K8_PLUS }, | ||
| 187 | { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 188 | |||
| 189 | { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_K8_PLUS, }, | ||
| 190 | { 0x00000179, 0x0000017B, CPU_MC, CPU_K8_PLUS, }, | ||
| 191 | { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_K8_PLUS, }, | ||
| 192 | { 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_K8_PLUS, }, | ||
| 193 | |||
| 194 | { 0x00000200, 0x0000020F, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 195 | { 0x00000250, 0x00000250, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 196 | { 0x00000258, 0x00000259, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 197 | { 0x00000268, 0x0000026F, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 198 | { 0x00000277, 0x00000277, CPU_PAT, CPU_K8_PLUS, }, | ||
| 199 | { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 200 | |||
| 201 | { 0x00000400, 0x00000413, CPU_MC, CPU_K8_PLUS, }, | ||
| 202 | |||
| 203 | { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_AMD_ALL, }, | ||
| 204 | { 0xC0000081, 0xC0000084, CPU_CALL, CPU_K8_PLUS, }, | ||
| 205 | { 0xC0000100, 0xC0000102, CPU_BASE, CPU_K8_PLUS, }, | ||
| 206 | { 0xC0000103, 0xC0000103, CPU_TIME, CPU_K10_PLUS, }, | ||
| 207 | |||
| 208 | { 0xC0010000, 0xC0010007, CPU_PMC, CPU_K8_PLUS, }, | ||
| 209 | { 0xC0010010, 0xC0010010, CPU_CONF, CPU_K7_PLUS, }, | ||
| 210 | { 0xC0010015, 0xC0010015, CPU_CONF, CPU_K7_PLUS, }, | ||
| 211 | { 0xC0010016, 0xC001001A, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 212 | { 0xC001001D, 0xC001001D, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 213 | { 0xC001001F, 0xC001001F, CPU_CONF, CPU_K8_PLUS, }, | ||
| 214 | { 0xC0010030, 0xC0010035, CPU_BIOS, CPU_K8_PLUS, }, | ||
| 215 | { 0xC0010044, 0xC0010048, CPU_MC, CPU_K8_PLUS, }, | ||
| 216 | { 0xC0010050, 0xC0010056, CPU_SMM, CPU_K0F_PLUS, }, | ||
| 217 | { 0xC0010058, 0xC0010058, CPU_CONF, CPU_K10_PLUS, }, | ||
| 218 | { 0xC0010060, 0xC0010060, CPU_CACHE, CPU_AMD_11, }, | ||
| 219 | { 0xC0010061, 0xC0010068, CPU_SMM, CPU_K10_PLUS, }, | ||
| 220 | { 0xC0010069, 0xC001006B, CPU_SMM, CPU_AMD_11, }, | ||
| 221 | { 0xC0010070, 0xC0010071, CPU_SMM, CPU_K10_PLUS, }, | ||
| 222 | { 0xC0010111, 0xC0010113, CPU_SMM, CPU_K8_PLUS, }, | ||
| 223 | { 0xC0010114, 0xC0010118, CPU_SVM, CPU_K10_PLUS, }, | ||
| 224 | { 0xC0010140, 0xC0010141, CPU_OSVM, CPU_K10_PLUS, }, | ||
| 225 | { 0xC0011022, 0xC0011023, CPU_CONF, CPU_K10_PLUS, }, | ||
| 226 | }; | ||
| 227 | |||
| 228 | |||
| 229 | /* Intel */ | ||
| 230 | static int get_intel_modelflag(unsigned model) | ||
| 231 | { | ||
| 232 | int flag; | ||
| 233 | |||
| 234 | switch (model) { | ||
| 235 | case 0x0501: | ||
| 236 | case 0x0502: | ||
| 237 | case 0x0504: | ||
| 238 | flag = CPU_INTEL_PENTIUM; | ||
| 239 | break; | ||
| 240 | case 0x0601: | ||
| 241 | case 0x0603: | ||
| 242 | case 0x0605: | ||
| 243 | case 0x0607: | ||
| 244 | case 0x0608: | ||
| 245 | case 0x060A: | ||
| 246 | case 0x060B: | ||
| 247 | flag = CPU_INTEL_P6; | ||
| 248 | break; | ||
| 249 | case 0x0609: | ||
| 250 | case 0x060D: | ||
| 251 | flag = CPU_INTEL_PENTIUM_M; | ||
| 252 | break; | ||
| 253 | case 0x060E: | ||
| 254 | flag = CPU_INTEL_CORE; | ||
| 255 | break; | ||
| 256 | case 0x060F: | ||
| 257 | case 0x0617: | ||
| 258 | flag = CPU_INTEL_CORE2; | ||
| 259 | break; | ||
| 260 | case 0x061C: | ||
| 261 | flag = CPU_INTEL_ATOM; | ||
| 262 | break; | ||
| 263 | case 0x0F00: | ||
| 264 | case 0x0F01: | ||
| 265 | case 0x0F02: | ||
| 266 | case 0x0F03: | ||
| 267 | case 0x0F04: | ||
| 268 | flag = CPU_INTEL_XEON_P4; | ||
| 269 | break; | ||
| 270 | case 0x0F06: | ||
| 271 | flag = CPU_INTEL_XEON_MP; | ||
| 272 | break; | ||
| 273 | default: | ||
| 274 | flag = CPU_NONE; | ||
| 275 | break; | ||
| 276 | } | ||
| 277 | |||
| 278 | return flag; | ||
| 279 | } | ||
| 280 | |||
| 281 | /* AMD */ | ||
| 282 | static int get_amd_modelflag(unsigned model) | ||
| 283 | { | ||
| 284 | int flag; | ||
| 285 | |||
| 286 | switch (model >> 8) { | ||
| 287 | case 0x6: | ||
| 288 | flag = CPU_AMD_K6; | ||
| 289 | break; | ||
| 290 | case 0x7: | ||
| 291 | flag = CPU_AMD_K7; | ||
| 292 | break; | ||
| 293 | case 0x8: | ||
| 294 | flag = CPU_AMD_K8; | ||
| 295 | break; | ||
| 296 | case 0xf: | ||
| 297 | flag = CPU_AMD_0F; | ||
| 298 | break; | ||
| 299 | case 0x10: | ||
| 300 | flag = CPU_AMD_10; | ||
| 301 | break; | ||
| 302 | case 0x11: | ||
| 303 | flag = CPU_AMD_11; | ||
| 304 | break; | ||
| 305 | default: | ||
| 306 | flag = CPU_NONE; | ||
| 307 | break; | ||
| 308 | } | ||
| 309 | |||
| 310 | return flag; | ||
| 311 | } | ||
| 312 | |||
| 313 | static int get_cpu_modelflag(unsigned cpu) | ||
| 314 | { | ||
| 315 | int flag; | ||
| 316 | |||
| 317 | flag = per_cpu(cpu_model, cpu); | ||
| 318 | |||
| 319 | switch (flag >> 16) { | ||
| 320 | case X86_VENDOR_INTEL: | ||
| 321 | flag = get_intel_modelflag(flag); | ||
| 322 | break; | ||
| 323 | case X86_VENDOR_AMD: | ||
| 324 | flag = get_amd_modelflag(flag & 0xffff); | ||
| 325 | break; | ||
| 326 | default: | ||
| 327 | flag = CPU_NONE; | ||
| 328 | break; | ||
| 329 | } | ||
| 330 | |||
| 331 | return flag; | ||
| 332 | } | ||
| 333 | |||
| 334 | static int get_cpu_range_count(unsigned cpu) | ||
| 335 | { | ||
| 336 | int index; | ||
| 337 | |||
| 338 | switch (per_cpu(cpu_model, cpu) >> 16) { | ||
| 339 | case X86_VENDOR_INTEL: | ||
| 340 | index = ARRAY_SIZE(cpu_intel_range); | ||
| 341 | break; | ||
| 342 | case X86_VENDOR_AMD: | ||
| 343 | index = ARRAY_SIZE(cpu_amd_range); | ||
| 344 | break; | ||
| 345 | default: | ||
| 346 | index = 0; | ||
| 347 | break; | ||
| 348 | } | ||
| 349 | |||
| 350 | return index; | ||
| 351 | } | ||
| 352 | |||
| 353 | static int is_typeflag_valid(unsigned cpu, unsigned flag) | ||
| 354 | { | ||
| 355 | unsigned vendor, modelflag; | ||
| 356 | int i, index; | ||
| 357 | |||
| 358 | /* Standard Registers should be always valid */ | ||
| 359 | if (flag >= CPU_TSS) | ||
| 360 | return 1; | ||
| 361 | |||
| 362 | modelflag = per_cpu(cpu_modelflag, cpu); | ||
| 363 | vendor = per_cpu(cpu_model, cpu) >> 16; | ||
| 364 | index = get_cpu_range_count(cpu); | ||
| 365 | |||
| 366 | for (i = 0; i < index; i++) { | ||
| 367 | switch (vendor) { | ||
| 368 | case X86_VENDOR_INTEL: | ||
| 369 | if ((cpu_intel_range[i].model & modelflag) && | ||
| 370 | (cpu_intel_range[i].flag & flag)) | ||
| 371 | return 1; | ||
| 372 | break; | ||
| 373 | case X86_VENDOR_AMD: | ||
| 374 | if ((cpu_amd_range[i].model & modelflag) && | ||
| 375 | (cpu_amd_range[i].flag & flag)) | ||
| 376 | return 1; | ||
| 377 | break; | ||
| 378 | } | ||
| 379 | } | ||
| 380 | |||
| 381 | /* Invalid */ | ||
| 382 | return 0; | ||
| 383 | } | ||
| 384 | |||
| 385 | static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max, | ||
| 386 | int index, unsigned flag) | ||
| 387 | { | ||
| 388 | unsigned modelflag; | ||
| 389 | |||
| 390 | modelflag = per_cpu(cpu_modelflag, cpu); | ||
| 391 | *max = 0; | ||
| 392 | switch (per_cpu(cpu_model, cpu) >> 16) { | ||
| 393 | case X86_VENDOR_INTEL: | ||
| 394 | if ((cpu_intel_range[index].model & modelflag) && | ||
| 395 | (cpu_intel_range[index].flag & flag)) { | ||
| 396 | *min = cpu_intel_range[index].min; | ||
| 397 | *max = cpu_intel_range[index].max; | ||
| 398 | } | ||
| 399 | break; | ||
| 400 | case X86_VENDOR_AMD: | ||
| 401 | if ((cpu_amd_range[index].model & modelflag) && | ||
| 402 | (cpu_amd_range[index].flag & flag)) { | ||
| 403 | *min = cpu_amd_range[index].min; | ||
| 404 | *max = cpu_amd_range[index].max; | ||
| 405 | } | ||
| 406 | break; | ||
| 407 | } | ||
| 408 | |||
| 409 | return *max; | ||
| 410 | } | ||
| 411 | |||
| 412 | /* This function can also be called with seq = NULL for printk */ | ||
| 413 | static void print_cpu_data(struct seq_file *seq, unsigned type, | ||
| 414 | u32 low, u32 high) | ||
| 415 | { | ||
| 416 | struct cpu_private *priv; | ||
| 417 | u64 val = high; | ||
| 418 | |||
| 419 | if (seq) { | ||
| 420 | priv = seq->private; | ||
| 421 | if (priv->file) { | ||
| 422 | val = (val << 32) | low; | ||
| 423 | seq_printf(seq, "0x%llx\n", val); | ||
| 424 | } else | ||
| 425 | seq_printf(seq, " %08x: %08x_%08x\n", | ||
| 426 | type, high, low); | ||
| 427 | } else | ||
| 428 | printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low); | ||
| 429 | } | ||
| 430 | |||
| 431 | /* This function can also be called with seq = NULL for printk */ | ||
| 432 | static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag) | ||
| 433 | { | ||
| 434 | unsigned msr, msr_min, msr_max; | ||
| 435 | struct cpu_private *priv; | ||
| 436 | u32 low, high; | ||
| 437 | int i, range; | ||
| 438 | |||
| 439 | if (seq) { | ||
| 440 | priv = seq->private; | ||
| 441 | if (priv->file) { | ||
| 442 | if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg, | ||
| 443 | &low, &high)) | ||
| 444 | print_cpu_data(seq, priv->reg, low, high); | ||
| 445 | return; | ||
| 446 | } | ||
| 447 | } | ||
| 448 | |||
| 449 | range = get_cpu_range_count(cpu); | ||
| 450 | |||
| 451 | for (i = 0; i < range; i++) { | ||
| 452 | if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag)) | ||
| 453 | continue; | ||
| 454 | |||
| 455 | for (msr = msr_min; msr <= msr_max; msr++) { | ||
| 456 | if (rdmsr_safe_on_cpu(cpu, msr, &low, &high)) | ||
| 457 | continue; | ||
| 458 | print_cpu_data(seq, msr, low, high); | ||
| 459 | } | ||
| 460 | } | ||
| 461 | } | ||
| 462 | |||
| 463 | static void print_tss(void *arg) | ||
| 464 | { | ||
| 465 | struct pt_regs *regs = task_pt_regs(current); | ||
| 466 | struct seq_file *seq = arg; | ||
| 467 | unsigned int seg; | ||
| 468 | |||
| 469 | seq_printf(seq, " RAX\t: %016lx\n", regs->ax); | ||
| 470 | seq_printf(seq, " RBX\t: %016lx\n", regs->bx); | ||
| 471 | seq_printf(seq, " RCX\t: %016lx\n", regs->cx); | ||
| 472 | seq_printf(seq, " RDX\t: %016lx\n", regs->dx); | ||
| 473 | |||
| 474 | seq_printf(seq, " RSI\t: %016lx\n", regs->si); | ||
| 475 | seq_printf(seq, " RDI\t: %016lx\n", regs->di); | ||
| 476 | seq_printf(seq, " RBP\t: %016lx\n", regs->bp); | ||
| 477 | seq_printf(seq, " ESP\t: %016lx\n", regs->sp); | ||
| 478 | |||
| 479 | #ifdef CONFIG_X86_64 | ||
| 480 | seq_printf(seq, " R08\t: %016lx\n", regs->r8); | ||
| 481 | seq_printf(seq, " R09\t: %016lx\n", regs->r9); | ||
| 482 | seq_printf(seq, " R10\t: %016lx\n", regs->r10); | ||
| 483 | seq_printf(seq, " R11\t: %016lx\n", regs->r11); | ||
| 484 | seq_printf(seq, " R12\t: %016lx\n", regs->r12); | ||
| 485 | seq_printf(seq, " R13\t: %016lx\n", regs->r13); | ||
| 486 | seq_printf(seq, " R14\t: %016lx\n", regs->r14); | ||
| 487 | seq_printf(seq, " R15\t: %016lx\n", regs->r15); | ||
| 488 | #endif | ||
| 489 | |||
| 490 | asm("movl %%cs,%0" : "=r" (seg)); | ||
| 491 | seq_printf(seq, " CS\t: %04x\n", seg); | ||
| 492 | asm("movl %%ds,%0" : "=r" (seg)); | ||
| 493 | seq_printf(seq, " DS\t: %04x\n", seg); | ||
| 494 | seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff); | ||
| 495 | asm("movl %%es,%0" : "=r" (seg)); | ||
| 496 | seq_printf(seq, " ES\t: %04x\n", seg); | ||
| 497 | asm("movl %%fs,%0" : "=r" (seg)); | ||
| 498 | seq_printf(seq, " FS\t: %04x\n", seg); | ||
| 499 | asm("movl %%gs,%0" : "=r" (seg)); | ||
| 500 | seq_printf(seq, " GS\t: %04x\n", seg); | ||
| 501 | |||
| 502 | seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags); | ||
| 503 | |||
| 504 | seq_printf(seq, " EIP\t: %016lx\n", regs->ip); | ||
| 505 | } | ||
| 506 | |||
| 507 | static void print_cr(void *arg) | ||
| 508 | { | ||
| 509 | struct seq_file *seq = arg; | ||
| 510 | |||
| 511 | seq_printf(seq, " cr0\t: %016lx\n", read_cr0()); | ||
| 512 | seq_printf(seq, " cr2\t: %016lx\n", read_cr2()); | ||
| 513 | seq_printf(seq, " cr3\t: %016lx\n", read_cr3()); | ||
| 514 | seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe()); | ||
| 515 | #ifdef CONFIG_X86_64 | ||
| 516 | seq_printf(seq, " cr8\t: %016lx\n", read_cr8()); | ||
| 517 | #endif | ||
| 518 | } | ||
| 519 | |||
| 520 | static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt) | ||
| 521 | { | ||
| 522 | seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size)); | ||
| 523 | } | ||
| 524 | |||
| 525 | static void print_dt(void *seq) | ||
| 526 | { | ||
| 527 | struct desc_ptr dt; | ||
| 528 | unsigned long ldt; | ||
| 529 | |||
| 530 | /* IDT */ | ||
| 531 | store_idt((struct desc_ptr *)&dt); | ||
| 532 | print_desc_ptr("IDT", seq, dt); | ||
| 533 | |||
| 534 | /* GDT */ | ||
| 535 | store_gdt((struct desc_ptr *)&dt); | ||
| 536 | print_desc_ptr("GDT", seq, dt); | ||
| 537 | |||
| 538 | /* LDT */ | ||
| 539 | store_ldt(ldt); | ||
| 540 | seq_printf(seq, " LDT\t: %016lx\n", ldt); | ||
| 541 | |||
| 542 | /* TR */ | ||
| 543 | store_tr(ldt); | ||
| 544 | seq_printf(seq, " TR\t: %016lx\n", ldt); | ||
| 545 | } | ||
| 546 | |||
| 547 | static void print_dr(void *arg) | ||
| 548 | { | ||
| 549 | struct seq_file *seq = arg; | ||
| 550 | unsigned long dr; | ||
| 551 | int i; | ||
| 552 | |||
| 553 | for (i = 0; i < 8; i++) { | ||
| 554 | /* Ignore db4, db5 */ | ||
| 555 | if ((i == 4) || (i == 5)) | ||
| 556 | continue; | ||
| 557 | get_debugreg(dr, i); | ||
| 558 | seq_printf(seq, " dr%d\t: %016lx\n", i, dr); | ||
| 559 | } | ||
| 560 | |||
| 561 | seq_printf(seq, "\n MSR\t:\n"); | ||
| 562 | } | ||
| 563 | |||
| 564 | static void print_apic(void *arg) | ||
| 565 | { | ||
| 566 | struct seq_file *seq = arg; | ||
| 567 | |||
| 568 | #ifdef CONFIG_X86_LOCAL_APIC | ||
| 569 | seq_printf(seq, " LAPIC\t:\n"); | ||
| 570 | seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24); | ||
| 571 | seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR)); | ||
| 572 | seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI)); | ||
| 573 | seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI)); | ||
| 574 | seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI)); | ||
| 575 | seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR)); | ||
| 576 | seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR)); | ||
| 577 | seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV)); | ||
| 578 | seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR)); | ||
| 579 | seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR)); | ||
| 580 | seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR)); | ||
| 581 | seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2)); | ||
| 582 | seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT)); | ||
| 583 | seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR)); | ||
| 584 | seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC)); | ||
| 585 | seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0)); | ||
| 586 | seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1)); | ||
| 587 | seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR)); | ||
| 588 | seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT)); | ||
| 589 | seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT)); | ||
| 590 | seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR)); | ||
| 591 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
| 592 | |||
| 593 | seq_printf(seq, "\n MSR\t:\n"); | ||
| 594 | } | ||
| 595 | |||
| 596 | static int cpu_seq_show(struct seq_file *seq, void *v) | ||
| 597 | { | ||
| 598 | struct cpu_private *priv = seq->private; | ||
| 599 | |||
| 600 | if (priv == NULL) | ||
| 601 | return -EINVAL; | ||
| 602 | |||
| 603 | switch (cpu_base[priv->type].flag) { | ||
| 604 | case CPU_TSS: | ||
| 605 | smp_call_function_single(priv->cpu, print_tss, seq, 1); | ||
| 606 | break; | ||
| 607 | case CPU_CR: | ||
| 608 | smp_call_function_single(priv->cpu, print_cr, seq, 1); | ||
| 609 | break; | ||
| 610 | case CPU_DT: | ||
| 611 | smp_call_function_single(priv->cpu, print_dt, seq, 1); | ||
| 612 | break; | ||
| 613 | case CPU_DEBUG: | ||
| 614 | if (priv->file == CPU_INDEX_BIT) | ||
| 615 | smp_call_function_single(priv->cpu, print_dr, seq, 1); | ||
| 616 | print_msr(seq, priv->cpu, cpu_base[priv->type].flag); | ||
| 617 | break; | ||
| 618 | case CPU_APIC: | ||
| 619 | if (priv->file == CPU_INDEX_BIT) | ||
| 620 | smp_call_function_single(priv->cpu, print_apic, seq, 1); | ||
| 621 | print_msr(seq, priv->cpu, cpu_base[priv->type].flag); | ||
| 622 | break; | ||
| 623 | |||
| 624 | default: | ||
| 625 | print_msr(seq, priv->cpu, cpu_base[priv->type].flag); | ||
| 626 | break; | ||
| 627 | } | ||
| 628 | seq_printf(seq, "\n"); | ||
| 629 | |||
| 630 | return 0; | ||
| 631 | } | ||
| 632 | |||
| 633 | static void *cpu_seq_start(struct seq_file *seq, loff_t *pos) | ||
| 634 | { | ||
| 635 | if (*pos == 0) /* One time is enough ;-) */ | ||
| 636 | return seq; | ||
| 637 | |||
| 638 | return NULL; | ||
| 639 | } | ||
| 640 | |||
| 641 | static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
| 642 | { | ||
| 643 | (*pos)++; | ||
| 644 | |||
| 645 | return cpu_seq_start(seq, pos); | ||
| 646 | } | ||
| 647 | |||
| 648 | static void cpu_seq_stop(struct seq_file *seq, void *v) | ||
| 649 | { | ||
| 650 | } | ||
| 651 | |||
| 652 | static const struct seq_operations cpu_seq_ops = { | ||
| 653 | .start = cpu_seq_start, | ||
| 654 | .next = cpu_seq_next, | ||
| 655 | .stop = cpu_seq_stop, | ||
| 656 | .show = cpu_seq_show, | ||
| 657 | }; | ||
| 658 | |||
| 659 | static int cpu_seq_open(struct inode *inode, struct file *file) | ||
| 660 | { | ||
| 661 | struct cpu_private *priv = inode->i_private; | ||
| 662 | struct seq_file *seq; | ||
| 663 | int err; | ||
| 664 | |||
| 665 | err = seq_open(file, &cpu_seq_ops); | ||
| 666 | if (!err) { | ||
| 667 | seq = file->private_data; | ||
| 668 | seq->private = priv; | ||
| 669 | } | ||
| 670 | |||
| 671 | return err; | ||
| 672 | } | ||
| 673 | |||
| 674 | static int write_msr(struct cpu_private *priv, u64 val) | ||
| 675 | { | ||
| 676 | u32 low, high; | ||
| 677 | |||
| 678 | high = (val >> 32) & 0xffffffff; | ||
| 679 | low = val & 0xffffffff; | ||
| 680 | |||
| 681 | if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high)) | ||
| 682 | return 0; | ||
| 683 | |||
| 684 | return -EPERM; | ||
| 685 | } | ||
| 686 | |||
| 687 | static int write_cpu_register(struct cpu_private *priv, const char *buf) | ||
| 688 | { | ||
| 689 | int ret = -EPERM; | ||
| 690 | u64 val; | ||
| 691 | |||
| 692 | ret = strict_strtoull(buf, 0, &val); | ||
| 693 | if (ret < 0) | ||
| 694 | return ret; | ||
| 695 | |||
| 696 | /* Supporting only MSRs */ | ||
| 697 | if (priv->type < CPU_TSS_BIT) | ||
| 698 | return write_msr(priv, val); | ||
| 699 | |||
| 700 | return ret; | ||
| 701 | } | ||
| 702 | |||
| 703 | static ssize_t cpu_write(struct file *file, const char __user *ubuf, | ||
| 704 | size_t count, loff_t *off) | ||
| 705 | { | ||
| 706 | struct seq_file *seq = file->private_data; | ||
| 707 | struct cpu_private *priv = seq->private; | ||
| 708 | char buf[19]; | ||
| 709 | |||
| 710 | if ((priv == NULL) || (count >= sizeof(buf))) | ||
| 711 | return -EINVAL; | ||
| 712 | |||
| 713 | if (copy_from_user(&buf, ubuf, count)) | ||
| 714 | return -EFAULT; | ||
| 715 | |||
| 716 | buf[count] = 0; | ||
| 717 | |||
| 718 | if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write)) | ||
| 719 | if (!write_cpu_register(priv, buf)) | ||
| 720 | return count; | ||
| 721 | |||
| 722 | return -EACCES; | ||
| 723 | } | ||
| 724 | |||
| 725 | static const struct file_operations cpu_fops = { | ||
| 726 | .owner = THIS_MODULE, | ||
| 727 | .open = cpu_seq_open, | ||
| 728 | .read = seq_read, | ||
| 729 | .write = cpu_write, | ||
| 730 | .llseek = seq_lseek, | ||
| 731 | .release = seq_release, | ||
| 732 | }; | ||
| 733 | |||
| 734 | static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | ||
| 735 | unsigned file, struct dentry *dentry) | ||
| 736 | { | ||
| 737 | struct cpu_private *priv = NULL; | ||
| 738 | |||
| 739 | /* Already intialized */ | ||
| 740 | if (file == CPU_INDEX_BIT) | ||
| 741 | if (per_cpu(cpu_arr[type].init, cpu)) | ||
| 742 | return 0; | ||
| 743 | |||
| 744 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
| 745 | if (priv == NULL) | ||
| 746 | return -ENOMEM; | ||
| 747 | |||
| 748 | priv->cpu = cpu; | ||
| 749 | priv->type = type; | ||
| 750 | priv->reg = reg; | ||
| 751 | priv->file = file; | ||
| 752 | mutex_lock(&cpu_debug_lock); | ||
| 753 | per_cpu(priv_arr[type], cpu) = priv; | ||
| 754 | per_cpu(cpu_priv_count, cpu)++; | ||
| 755 | mutex_unlock(&cpu_debug_lock); | ||
| 756 | |||
| 757 | if (file) | ||
| 758 | debugfs_create_file(cpu_file[file].name, S_IRUGO, | ||
| 759 | dentry, (void *)priv, &cpu_fops); | ||
| 760 | else { | ||
| 761 | debugfs_create_file(cpu_base[type].name, S_IRUGO, | ||
| 762 | per_cpu(cpu_arr[type].dentry, cpu), | ||
| 763 | (void *)priv, &cpu_fops); | ||
| 764 | mutex_lock(&cpu_debug_lock); | ||
| 765 | per_cpu(cpu_arr[type].init, cpu) = 1; | ||
| 766 | mutex_unlock(&cpu_debug_lock); | ||
| 767 | } | ||
| 768 | |||
| 769 | return 0; | ||
| 770 | } | ||
| 771 | |||
| 772 | static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg, | ||
| 773 | struct dentry *dentry) | ||
| 774 | { | ||
| 775 | unsigned file; | ||
| 776 | int err = 0; | ||
| 777 | |||
| 778 | for (file = 0; file < ARRAY_SIZE(cpu_file); file++) { | ||
| 779 | err = cpu_create_file(cpu, type, reg, file, dentry); | ||
| 780 | if (err) | ||
| 781 | return err; | ||
| 782 | } | ||
| 783 | |||
| 784 | return err; | ||
| 785 | } | ||
| 786 | |||
| 787 | static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry) | ||
| 788 | { | ||
| 789 | struct dentry *cpu_dentry = NULL; | ||
| 790 | unsigned reg, reg_min, reg_max; | ||
| 791 | int i, range, err = 0; | ||
| 792 | char reg_dir[12]; | ||
| 793 | u32 low, high; | ||
| 794 | |||
| 795 | range = get_cpu_range_count(cpu); | ||
| 796 | |||
| 797 | for (i = 0; i < range; i++) { | ||
| 798 | if (!get_cpu_range(cpu, ®_min, ®_max, i, | ||
| 799 | cpu_base[type].flag)) | ||
| 800 | continue; | ||
| 801 | |||
| 802 | for (reg = reg_min; reg <= reg_max; reg++) { | ||
| 803 | if (rdmsr_safe_on_cpu(cpu, reg, &low, &high)) | ||
| 804 | continue; | ||
| 805 | |||
| 806 | sprintf(reg_dir, "0x%x", reg); | ||
| 807 | cpu_dentry = debugfs_create_dir(reg_dir, dentry); | ||
| 808 | err = cpu_init_regfiles(cpu, type, reg, cpu_dentry); | ||
| 809 | if (err) | ||
| 810 | return err; | ||
| 811 | } | ||
| 812 | } | ||
| 813 | |||
| 814 | return err; | ||
| 815 | } | ||
| 816 | |||
| 817 | static int cpu_init_allreg(unsigned cpu, struct dentry *dentry) | ||
| 818 | { | ||
| 819 | struct dentry *cpu_dentry = NULL; | ||
| 820 | unsigned type; | ||
| 821 | int err = 0; | ||
| 822 | |||
| 823 | for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) { | ||
| 824 | if (!is_typeflag_valid(cpu, cpu_base[type].flag)) | ||
| 825 | continue; | ||
| 826 | cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); | ||
| 827 | per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry; | ||
| 828 | |||
| 829 | if (type < CPU_TSS_BIT) | ||
| 830 | err = cpu_init_msr(cpu, type, cpu_dentry); | ||
| 831 | else | ||
| 832 | err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT, | ||
| 833 | cpu_dentry); | ||
| 834 | if (err) | ||
| 835 | return err; | ||
| 836 | } | ||
| 837 | |||
| 838 | return err; | ||
| 839 | } | ||
| 840 | |||
| 841 | static int cpu_init_cpu(void) | ||
| 842 | { | ||
| 843 | struct dentry *cpu_dentry = NULL; | ||
| 844 | struct cpuinfo_x86 *cpui; | ||
| 845 | char cpu_dir[12]; | ||
| 846 | unsigned cpu; | ||
| 847 | int err = 0; | ||
| 848 | |||
| 849 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) { | ||
| 850 | cpui = &cpu_data(cpu); | ||
| 851 | if (!cpu_has(cpui, X86_FEATURE_MSR)) | ||
| 852 | continue; | ||
| 853 | per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) | | ||
| 854 | (cpui->x86 << 8) | | ||
| 855 | (cpui->x86_model)); | ||
| 856 | per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu); | ||
| 857 | |||
| 858 | sprintf(cpu_dir, "cpu%d", cpu); | ||
| 859 | cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir); | ||
| 860 | err = cpu_init_allreg(cpu, cpu_dentry); | ||
| 861 | |||
| 862 | pr_info("cpu%d(%d) debug files %d\n", | ||
| 863 | cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu)); | ||
| 864 | if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) { | ||
| 865 | pr_err("Register files count %d exceeds limit %d\n", | ||
| 866 | per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES); | ||
| 867 | per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES; | ||
| 868 | err = -ENFILE; | ||
| 869 | } | ||
| 870 | if (err) | ||
| 871 | return err; | ||
| 872 | } | ||
| 873 | |||
| 874 | return err; | ||
| 875 | } | ||
| 876 | |||
| 877 | static int __init cpu_debug_init(void) | ||
| 878 | { | ||
| 879 | cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir); | ||
| 880 | |||
| 881 | return cpu_init_cpu(); | ||
| 882 | } | ||
| 883 | |||
| 884 | static void __exit cpu_debug_exit(void) | ||
| 885 | { | ||
| 886 | int i, cpu; | ||
| 887 | |||
| 888 | if (cpu_debugfs_dir) | ||
| 889 | debugfs_remove_recursive(cpu_debugfs_dir); | ||
| 890 | |||
| 891 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) | ||
| 892 | for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++) | ||
| 893 | kfree(per_cpu(priv_arr[i], cpu)); | ||
| 894 | } | ||
| 895 | |||
| 896 | module_init(cpu_debug_init); | ||
| 897 | module_exit(cpu_debug_exit); | ||
| 898 | |||
| 899 | MODULE_AUTHOR("Jaswinder Singh Rajput"); | ||
| 900 | MODULE_DESCRIPTION("CPU Debug module"); | ||
| 901 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 3babe1f1e912..05209b5cc6ca 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | #include <linux/cpufreq.h> | 33 | #include <linux/cpufreq.h> |
| 34 | #include <linux/compiler.h> | 34 | #include <linux/compiler.h> |
| 35 | #include <linux/dmi.h> | 35 | #include <linux/dmi.h> |
| 36 | #include <linux/ftrace.h> | 36 | #include <trace/power.h> |
| 37 | 37 | ||
| 38 | #include <linux/acpi.h> | 38 | #include <linux/acpi.h> |
| 39 | #include <linux/io.h> | 39 | #include <linux/io.h> |
| @@ -72,6 +72,8 @@ struct acpi_cpufreq_data { | |||
| 72 | 72 | ||
| 73 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); | 73 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); |
| 74 | 74 | ||
| 75 | DEFINE_TRACE(power_mark); | ||
| 76 | |||
| 75 | /* acpi_perf_data is a pointer to percpu data. */ | 77 | /* acpi_perf_data is a pointer to percpu data. */ |
| 76 | static struct acpi_processor_performance *acpi_perf_data; | 78 | static struct acpi_processor_performance *acpi_perf_data; |
| 77 | 79 | ||
| @@ -601,7 +603,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
| 601 | if (!data) | 603 | if (!data) |
| 602 | return -ENOMEM; | 604 | return -ENOMEM; |
| 603 | 605 | ||
| 604 | data->acpi_data = percpu_ptr(acpi_perf_data, cpu); | 606 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); |
| 605 | per_cpu(drv_data, cpu) = data; | 607 | per_cpu(drv_data, cpu) = data; |
| 606 | 608 | ||
| 607 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) | 609 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c index 3f83ea12c47a..35a257dd4bb7 100644 --- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c +++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c | |||
| @@ -204,12 +204,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy) | |||
| 204 | } | 204 | } |
| 205 | /* Enable Enhanced PowerSaver */ | 205 | /* Enable Enhanced PowerSaver */ |
| 206 | rdmsrl(MSR_IA32_MISC_ENABLE, val); | 206 | rdmsrl(MSR_IA32_MISC_ENABLE, val); |
| 207 | if (!(val & 1 << 16)) { | 207 | if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
| 208 | val |= 1 << 16; | 208 | val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; |
| 209 | wrmsrl(MSR_IA32_MISC_ENABLE, val); | 209 | wrmsrl(MSR_IA32_MISC_ENABLE, val); |
| 210 | /* Can be locked at 0 */ | 210 | /* Can be locked at 0 */ |
| 211 | rdmsrl(MSR_IA32_MISC_ENABLE, val); | 211 | rdmsrl(MSR_IA32_MISC_ENABLE, val); |
| 212 | if (!(val & 1 << 16)) { | 212 | if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
| 213 | printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); | 213 | printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); |
| 214 | return -ENODEV; | 214 | return -ENODEV; |
| 215 | } | 215 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 41ed94915f97..6ac55bd341ae 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
| @@ -211,7 +211,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) | |||
| 211 | unsigned int i; | 211 | unsigned int i; |
| 212 | 212 | ||
| 213 | #ifdef CONFIG_SMP | 213 | #ifdef CONFIG_SMP |
| 214 | cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); | 214 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); |
| 215 | #endif | 215 | #endif |
| 216 | 216 | ||
| 217 | /* Errata workaround */ | 217 | /* Errata workaround */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index a15ac94e0b9b..4709ead2db52 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
| @@ -54,7 +54,10 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); | |||
| 54 | static int cpu_family = CPU_OPTERON; | 54 | static int cpu_family = CPU_OPTERON; |
| 55 | 55 | ||
| 56 | #ifndef CONFIG_SMP | 56 | #ifndef CONFIG_SMP |
| 57 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | 57 | static inline const struct cpumask *cpu_core_mask(int cpu) |
| 58 | { | ||
| 59 | return cpumask_of(0); | ||
| 60 | } | ||
| 58 | #endif | 61 | #endif |
| 59 | 62 | ||
| 60 | /* Return a frequency in MHz, given an input fid */ | 63 | /* Return a frequency in MHz, given an input fid */ |
| @@ -699,7 +702,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, | |||
| 699 | 702 | ||
| 700 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); | 703 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); |
| 701 | data->powernow_table = powernow_table; | 704 | data->powernow_table = powernow_table; |
| 702 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) | 705 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
| 703 | print_basics(data); | 706 | print_basics(data); |
| 704 | 707 | ||
| 705 | for (j = 0; j < data->numps; j++) | 708 | for (j = 0; j < data->numps; j++) |
| @@ -862,7 +865,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
| 862 | 865 | ||
| 863 | /* fill in data */ | 866 | /* fill in data */ |
| 864 | data->numps = data->acpi_data.state_count; | 867 | data->numps = data->acpi_data.state_count; |
| 865 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) | 868 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
| 866 | print_basics(data); | 869 | print_basics(data); |
| 867 | powernow_k8_acpi_pst_values(data, 0); | 870 | powernow_k8_acpi_pst_values(data, 0); |
| 868 | 871 | ||
| @@ -1300,7 +1303,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
| 1300 | if (cpu_family == CPU_HW_PSTATE) | 1303 | if (cpu_family == CPU_HW_PSTATE) |
| 1301 | cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); | 1304 | cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); |
| 1302 | else | 1305 | else |
| 1303 | cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); | 1306 | cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); |
| 1304 | data->available_cores = pol->cpus; | 1307 | data->available_cores = pol->cpus; |
| 1305 | 1308 | ||
| 1306 | if (cpu_family == CPU_HW_PSTATE) | 1309 | if (cpu_family == CPU_HW_PSTATE) |
| @@ -1365,7 +1368,7 @@ static unsigned int powernowk8_get(unsigned int cpu) | |||
| 1365 | unsigned int khz = 0; | 1368 | unsigned int khz = 0; |
| 1366 | unsigned int first; | 1369 | unsigned int first; |
| 1367 | 1370 | ||
| 1368 | first = first_cpu(per_cpu(cpu_core_map, cpu)); | 1371 | first = cpumask_first(cpu_core_mask(cpu)); |
| 1369 | data = per_cpu(powernow_data, first); | 1372 | data = per_cpu(powernow_data, first); |
| 1370 | 1373 | ||
| 1371 | if (!data) | 1374 | if (!data) |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index f08998278a3a..c9f1fdc02830 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
| @@ -390,14 +390,14 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) | |||
| 390 | enable it if not. */ | 390 | enable it if not. */ |
| 391 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 391 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 392 | 392 | ||
| 393 | if (!(l & (1<<16))) { | 393 | if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
| 394 | l |= (1<<16); | 394 | l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; |
| 395 | dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); | 395 | dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); |
| 396 | wrmsr(MSR_IA32_MISC_ENABLE, l, h); | 396 | wrmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 397 | 397 | ||
| 398 | /* check to see if it stuck */ | 398 | /* check to see if it stuck */ |
| 399 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 399 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 400 | if (!(l & (1<<16))) { | 400 | if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { |
| 401 | printk(KERN_INFO PFX | 401 | printk(KERN_INFO PFX |
| 402 | "couldn't enable Enhanced SpeedStep\n"); | 402 | "couldn't enable Enhanced SpeedStep\n"); |
| 403 | return -ENODEV; | 403 | return -ENODEV; |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 8bbb11adb315..016c1a4fa3fc 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
| @@ -321,7 +321,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
| 321 | 321 | ||
| 322 | /* only run on CPU to be set, or on its sibling */ | 322 | /* only run on CPU to be set, or on its sibling */ |
| 323 | #ifdef CONFIG_SMP | 323 | #ifdef CONFIG_SMP |
| 324 | cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); | 324 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); |
| 325 | #endif | 325 | #endif |
| 326 | 326 | ||
| 327 | cpus_allowed = current->cpus_allowed; | 327 | cpus_allowed = current->cpus_allowed; |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index ffd0f5ed071a..593171e967ef 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
| @@ -61,23 +61,23 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
| 61 | */ | 61 | */ |
| 62 | static unsigned char Cx86_dir0_msb __cpuinitdata = 0; | 62 | static unsigned char Cx86_dir0_msb __cpuinitdata = 0; |
| 63 | 63 | ||
| 64 | static char Cx86_model[][9] __cpuinitdata = { | 64 | static const char __cpuinitconst Cx86_model[][9] = { |
| 65 | "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", | 65 | "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", |
| 66 | "M II ", "Unknown" | 66 | "M II ", "Unknown" |
| 67 | }; | 67 | }; |
| 68 | static char Cx486_name[][5] __cpuinitdata = { | 68 | static const char __cpuinitconst Cx486_name[][5] = { |
| 69 | "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", | 69 | "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", |
| 70 | "SRx2", "DRx2" | 70 | "SRx2", "DRx2" |
| 71 | }; | 71 | }; |
| 72 | static char Cx486S_name[][4] __cpuinitdata = { | 72 | static const char __cpuinitconst Cx486S_name[][4] = { |
| 73 | "S", "S2", "Se", "S2e" | 73 | "S", "S2", "Se", "S2e" |
| 74 | }; | 74 | }; |
| 75 | static char Cx486D_name[][4] __cpuinitdata = { | 75 | static const char __cpuinitconst Cx486D_name[][4] = { |
| 76 | "DX", "DX2", "?", "?", "?", "DX4" | 76 | "DX", "DX2", "?", "?", "?", "DX4" |
| 77 | }; | 77 | }; |
| 78 | static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; | 78 | static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; |
| 79 | static char cyrix_model_mult1[] __cpuinitdata = "12??43"; | 79 | static const char __cpuinitconst cyrix_model_mult1[] = "12??43"; |
| 80 | static char cyrix_model_mult2[] __cpuinitdata = "12233445"; | 80 | static const char __cpuinitconst cyrix_model_mult2[] = "12233445"; |
| 81 | 81 | ||
| 82 | /* | 82 | /* |
| 83 | * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old | 83 | * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old |
| @@ -435,7 +435,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
| 435 | } | 435 | } |
| 436 | } | 436 | } |
| 437 | 437 | ||
| 438 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { | 438 | static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = { |
| 439 | .c_vendor = "Cyrix", | 439 | .c_vendor = "Cyrix", |
| 440 | .c_ident = { "CyrixInstead" }, | 440 | .c_ident = { "CyrixInstead" }, |
| 441 | .c_early_init = early_init_cyrix, | 441 | .c_early_init = early_init_cyrix, |
| @@ -446,7 +446,7 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { | |||
| 446 | 446 | ||
| 447 | cpu_dev_register(cyrix_cpu_dev); | 447 | cpu_dev_register(cyrix_cpu_dev); |
| 448 | 448 | ||
| 449 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { | 449 | static const struct cpu_dev __cpuinitconst nsc_cpu_dev = { |
| 450 | .c_vendor = "NSC", | 450 | .c_vendor = "NSC", |
| 451 | .c_ident = { "Geode by NSC" }, | 451 | .c_ident = { "Geode by NSC" }, |
| 452 | .c_init = init_nsc, | 452 | .c_init = init_nsc, |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 5fff00c70de0..7437fa133c02 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
| 15 | #include <asm/ds.h> | 15 | #include <asm/ds.h> |
| 16 | #include <asm/bugs.h> | 16 | #include <asm/bugs.h> |
| 17 | #include <asm/cpu.h> | ||
| 17 | 18 | ||
| 18 | #ifdef CONFIG_X86_64 | 19 | #ifdef CONFIG_X86_64 |
| 19 | #include <asm/topology.h> | 20 | #include <asm/topology.h> |
| @@ -25,7 +26,6 @@ | |||
| 25 | #ifdef CONFIG_X86_LOCAL_APIC | 26 | #ifdef CONFIG_X86_LOCAL_APIC |
| 26 | #include <asm/mpspec.h> | 27 | #include <asm/mpspec.h> |
| 27 | #include <asm/apic.h> | 28 | #include <asm/apic.h> |
| 28 | #include <mach_apic.h> | ||
| 29 | #endif | 29 | #endif |
| 30 | 30 | ||
| 31 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 31 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
| @@ -55,6 +55,11 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
| 55 | c->x86_cache_alignment = 128; | 55 | c->x86_cache_alignment = 128; |
| 56 | #endif | 56 | #endif |
| 57 | 57 | ||
| 58 | /* CPUID workaround for 0F33/0F34 CPU */ | ||
| 59 | if (c->x86 == 0xF && c->x86_model == 0x3 | ||
| 60 | && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) | ||
| 61 | c->x86_phys_bits = 36; | ||
| 62 | |||
| 58 | /* | 63 | /* |
| 59 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate | 64 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate |
| 60 | * with P/T states and does not stop in deep C-states. | 65 | * with P/T states and does not stop in deep C-states. |
| @@ -69,6 +74,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
| 69 | sched_clock_stable = 1; | 74 | sched_clock_stable = 1; |
| 70 | } | 75 | } |
| 71 | 76 | ||
| 77 | /* | ||
| 78 | * There is a known erratum on Pentium III and Core Solo | ||
| 79 | * and Core Duo CPUs. | ||
| 80 | * " Page with PAT set to WC while associated MTRR is UC | ||
| 81 | * may consolidate to UC " | ||
| 82 | * Because of this erratum, it is better to stick with | ||
| 83 | * setting WC in MTRR rather than using PAT on these CPUs. | ||
| 84 | * | ||
| 85 | * Enable PAT WC only on P4, Core 2 or later CPUs. | ||
| 86 | */ | ||
| 87 | if (c->x86 == 6 && c->x86_model < 15) | ||
| 88 | clear_cpu_cap(c, X86_FEATURE_PAT); | ||
| 72 | } | 89 | } |
| 73 | 90 | ||
| 74 | #ifdef CONFIG_X86_32 | 91 | #ifdef CONFIG_X86_32 |
| @@ -105,6 +122,28 @@ static void __cpuinit trap_init_f00f_bug(void) | |||
| 105 | } | 122 | } |
| 106 | #endif | 123 | #endif |
| 107 | 124 | ||
| 125 | static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) | ||
| 126 | { | ||
| 127 | #ifdef CONFIG_SMP | ||
| 128 | /* calling is from identify_secondary_cpu() ? */ | ||
| 129 | if (c->cpu_index == boot_cpu_id) | ||
| 130 | return; | ||
| 131 | |||
| 132 | /* | ||
| 133 | * Mask B, Pentium, but not Pentium MMX | ||
| 134 | */ | ||
| 135 | if (c->x86 == 5 && | ||
| 136 | c->x86_mask >= 1 && c->x86_mask <= 4 && | ||
| 137 | c->x86_model <= 3) { | ||
| 138 | /* | ||
| 139 | * Remember we have B step Pentia with bugs | ||
| 140 | */ | ||
| 141 | WARN_ONCE(1, "WARNING: SMP operation may be unreliable" | ||
| 142 | "with B stepping processors.\n"); | ||
| 143 | } | ||
| 144 | #endif | ||
| 145 | } | ||
| 146 | |||
| 108 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | 147 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) |
| 109 | { | 148 | { |
| 110 | unsigned long lo, hi; | 149 | unsigned long lo, hi; |
| @@ -141,10 +180,10 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
| 141 | */ | 180 | */ |
| 142 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { | 181 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
| 143 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); | 182 | rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
| 144 | if ((lo & (1<<9)) == 0) { | 183 | if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { |
| 145 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); | 184 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
| 146 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | 185 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); |
| 147 | lo |= (1<<9); /* Disable hw prefetching */ | 186 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; |
| 148 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); | 187 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); |
| 149 | } | 188 | } |
| 150 | } | 189 | } |
| @@ -181,6 +220,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
| 181 | #ifdef CONFIG_X86_NUMAQ | 220 | #ifdef CONFIG_X86_NUMAQ |
| 182 | numaq_tsc_disable(); | 221 | numaq_tsc_disable(); |
| 183 | #endif | 222 | #endif |
| 223 | |||
| 224 | intel_smp_check(c); | ||
| 184 | } | 225 | } |
| 185 | #else | 226 | #else |
| 186 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | 227 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) |
| @@ -380,7 +421,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i | |||
| 380 | } | 421 | } |
| 381 | #endif | 422 | #endif |
| 382 | 423 | ||
| 383 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { | 424 | static const struct cpu_dev __cpuinitconst intel_cpu_dev = { |
| 384 | .c_vendor = "Intel", | 425 | .c_vendor = "Intel", |
| 385 | .c_ident = { "GenuineIntel" }, | 426 | .c_ident = { "GenuineIntel" }, |
| 386 | #ifdef CONFIG_X86_32 | 427 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index da299eb85fc0..483eda96e102 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
| @@ -32,7 +32,7 @@ struct _cache_table | |||
| 32 | }; | 32 | }; |
| 33 | 33 | ||
| 34 | /* all the cache descriptor types we care about (no TLB or trace cache entries) */ | 34 | /* all the cache descriptor types we care about (no TLB or trace cache entries) */ |
| 35 | static struct _cache_table cache_table[] __cpuinitdata = | 35 | static const struct _cache_table __cpuinitconst cache_table[] = |
| 36 | { | 36 | { |
| 37 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | 37 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
| 38 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ | 38 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ |
| @@ -147,10 +147,19 @@ struct _cpuid4_info { | |||
| 147 | union _cpuid4_leaf_ecx ecx; | 147 | union _cpuid4_leaf_ecx ecx; |
| 148 | unsigned long size; | 148 | unsigned long size; |
| 149 | unsigned long can_disable; | 149 | unsigned long can_disable; |
| 150 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ | 150 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); |
| 151 | }; | 151 | }; |
| 152 | 152 | ||
| 153 | #ifdef CONFIG_PCI | 153 | /* subset of above _cpuid4_info w/o shared_cpu_map */ |
| 154 | struct _cpuid4_info_regs { | ||
| 155 | union _cpuid4_leaf_eax eax; | ||
| 156 | union _cpuid4_leaf_ebx ebx; | ||
| 157 | union _cpuid4_leaf_ecx ecx; | ||
| 158 | unsigned long size; | ||
| 159 | unsigned long can_disable; | ||
| 160 | }; | ||
| 161 | |||
| 162 | #if defined(CONFIG_PCI) && defined(CONFIG_SYSFS) | ||
| 154 | static struct pci_device_id k8_nb_id[] = { | 163 | static struct pci_device_id k8_nb_id[] = { |
| 155 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, | 164 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, |
| 156 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, | 165 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, |
| @@ -197,15 +206,15 @@ union l3_cache { | |||
| 197 | unsigned val; | 206 | unsigned val; |
| 198 | }; | 207 | }; |
| 199 | 208 | ||
| 200 | static unsigned short assocs[] __cpuinitdata = { | 209 | static const unsigned short __cpuinitconst assocs[] = { |
| 201 | [1] = 1, [2] = 2, [4] = 4, [6] = 8, | 210 | [1] = 1, [2] = 2, [4] = 4, [6] = 8, |
| 202 | [8] = 16, [0xa] = 32, [0xb] = 48, | 211 | [8] = 16, [0xa] = 32, [0xb] = 48, |
| 203 | [0xc] = 64, | 212 | [0xc] = 64, |
| 204 | [0xf] = 0xffff // ?? | 213 | [0xf] = 0xffff // ?? |
| 205 | }; | 214 | }; |
| 206 | 215 | ||
| 207 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; | 216 | static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; |
| 208 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; | 217 | static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 }; |
| 209 | 218 | ||
| 210 | static void __cpuinit | 219 | static void __cpuinit |
| 211 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 220 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
| @@ -278,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
| 278 | } | 287 | } |
| 279 | 288 | ||
| 280 | static void __cpuinit | 289 | static void __cpuinit |
| 281 | amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | 290 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) |
| 282 | { | 291 | { |
| 283 | if (index < 3) | 292 | if (index < 3) |
| 284 | return; | 293 | return; |
| @@ -286,7 +295,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | |||
| 286 | } | 295 | } |
| 287 | 296 | ||
| 288 | static int | 297 | static int |
| 289 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | 298 | __cpuinit cpuid4_cache_lookup_regs(int index, |
| 299 | struct _cpuid4_info_regs *this_leaf) | ||
| 290 | { | 300 | { |
| 291 | union _cpuid4_leaf_eax eax; | 301 | union _cpuid4_leaf_eax eax; |
| 292 | union _cpuid4_leaf_ebx ebx; | 302 | union _cpuid4_leaf_ebx ebx; |
| @@ -353,11 +363,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 353 | * parameters cpuid leaf to find the cache details | 363 | * parameters cpuid leaf to find the cache details |
| 354 | */ | 364 | */ |
| 355 | for (i = 0; i < num_cache_leaves; i++) { | 365 | for (i = 0; i < num_cache_leaves; i++) { |
| 356 | struct _cpuid4_info this_leaf; | 366 | struct _cpuid4_info_regs this_leaf; |
| 357 | |||
| 358 | int retval; | 367 | int retval; |
| 359 | 368 | ||
| 360 | retval = cpuid4_cache_lookup(i, &this_leaf); | 369 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); |
| 361 | if (retval >= 0) { | 370 | if (retval >= 0) { |
| 362 | switch(this_leaf.eax.split.level) { | 371 | switch(this_leaf.eax.split.level) { |
| 363 | case 1: | 372 | case 1: |
| @@ -490,6 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 490 | return l2; | 499 | return l2; |
| 491 | } | 500 | } |
| 492 | 501 | ||
| 502 | #ifdef CONFIG_SYSFS | ||
| 503 | |||
| 493 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 504 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
| 494 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 505 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); |
| 495 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 506 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) |
| @@ -506,17 +517,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
| 506 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; | 517 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; |
| 507 | 518 | ||
| 508 | if (num_threads_sharing == 1) | 519 | if (num_threads_sharing == 1) |
| 509 | cpu_set(cpu, this_leaf->shared_cpu_map); | 520 | cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); |
| 510 | else { | 521 | else { |
| 511 | index_msb = get_count_order(num_threads_sharing); | 522 | index_msb = get_count_order(num_threads_sharing); |
| 512 | 523 | ||
| 513 | for_each_online_cpu(i) { | 524 | for_each_online_cpu(i) { |
| 514 | if (cpu_data(i).apicid >> index_msb == | 525 | if (cpu_data(i).apicid >> index_msb == |
| 515 | c->apicid >> index_msb) { | 526 | c->apicid >> index_msb) { |
| 516 | cpu_set(i, this_leaf->shared_cpu_map); | 527 | cpumask_set_cpu(i, |
| 528 | to_cpumask(this_leaf->shared_cpu_map)); | ||
| 517 | if (i != cpu && per_cpu(cpuid4_info, i)) { | 529 | if (i != cpu && per_cpu(cpuid4_info, i)) { |
| 518 | sibling_leaf = CPUID4_INFO_IDX(i, index); | 530 | sibling_leaf = |
| 519 | cpu_set(cpu, sibling_leaf->shared_cpu_map); | 531 | CPUID4_INFO_IDX(i, index); |
| 532 | cpumask_set_cpu(cpu, to_cpumask( | ||
| 533 | sibling_leaf->shared_cpu_map)); | ||
| 520 | } | 534 | } |
| 521 | } | 535 | } |
| 522 | } | 536 | } |
| @@ -528,9 +542,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
| 528 | int sibling; | 542 | int sibling; |
| 529 | 543 | ||
| 530 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 544 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
| 531 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { | 545 | for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { |
| 532 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 546 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
| 533 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 547 | cpumask_clear_cpu(cpu, |
| 548 | to_cpumask(sibling_leaf->shared_cpu_map)); | ||
| 534 | } | 549 | } |
| 535 | } | 550 | } |
| 536 | #else | 551 | #else |
| @@ -549,6 +564,15 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
| 549 | per_cpu(cpuid4_info, cpu) = NULL; | 564 | per_cpu(cpuid4_info, cpu) = NULL; |
| 550 | } | 565 | } |
| 551 | 566 | ||
| 567 | static int | ||
| 568 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
| 569 | { | ||
| 570 | struct _cpuid4_info_regs *leaf_regs = | ||
| 571 | (struct _cpuid4_info_regs *)this_leaf; | ||
| 572 | |||
| 573 | return cpuid4_cache_lookup_regs(index, leaf_regs); | ||
| 574 | } | ||
| 575 | |||
| 552 | static void __cpuinit get_cpu_leaves(void *_retval) | 576 | static void __cpuinit get_cpu_leaves(void *_retval) |
| 553 | { | 577 | { |
| 554 | int j, *retval = _retval, cpu = smp_processor_id(); | 578 | int j, *retval = _retval, cpu = smp_processor_id(); |
| @@ -590,8 +614,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
| 590 | return retval; | 614 | return retval; |
| 591 | } | 615 | } |
| 592 | 616 | ||
| 593 | #ifdef CONFIG_SYSFS | ||
| 594 | |||
| 595 | #include <linux/kobject.h> | 617 | #include <linux/kobject.h> |
| 596 | #include <linux/sysfs.h> | 618 | #include <linux/sysfs.h> |
| 597 | 619 | ||
| @@ -635,8 +657,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
| 635 | int n = 0; | 657 | int n = 0; |
| 636 | 658 | ||
| 637 | if (len > 1) { | 659 | if (len > 1) { |
| 638 | cpumask_t *mask = &this_leaf->shared_cpu_map; | 660 | const struct cpumask *mask; |
| 639 | 661 | ||
| 662 | mask = to_cpumask(this_leaf->shared_cpu_map); | ||
| 640 | n = type? | 663 | n = type? |
| 641 | cpulist_scnprintf(buf, len-2, mask) : | 664 | cpulist_scnprintf(buf, len-2, mask) : |
| 642 | cpumask_scnprintf(buf, len-2, mask); | 665 | cpumask_scnprintf(buf, len-2, mask); |
| @@ -699,7 +722,8 @@ static struct pci_dev *get_k8_northbridge(int node) | |||
| 699 | 722 | ||
| 700 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) | 723 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) |
| 701 | { | 724 | { |
| 702 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | 725 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); |
| 726 | int node = cpu_to_node(cpumask_first(mask)); | ||
| 703 | struct pci_dev *dev = NULL; | 727 | struct pci_dev *dev = NULL; |
| 704 | ssize_t ret = 0; | 728 | ssize_t ret = 0; |
| 705 | int i; | 729 | int i; |
| @@ -733,7 +757,8 @@ static ssize_t | |||
| 733 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, | 757 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, |
| 734 | size_t count) | 758 | size_t count) |
| 735 | { | 759 | { |
| 736 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | 760 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); |
| 761 | int node = cpu_to_node(cpumask_first(mask)); | ||
| 737 | struct pci_dev *dev = NULL; | 762 | struct pci_dev *dev = NULL; |
| 738 | unsigned int ret, index, val; | 763 | unsigned int ret, index, val; |
| 739 | 764 | ||
| @@ -878,7 +903,7 @@ err_out: | |||
| 878 | return -ENOMEM; | 903 | return -ENOMEM; |
| 879 | } | 904 | } |
| 880 | 905 | ||
| 881 | static cpumask_t cache_dev_map = CPU_MASK_NONE; | 906 | static DECLARE_BITMAP(cache_dev_map, NR_CPUS); |
| 882 | 907 | ||
| 883 | /* Add/Remove cache interface for CPU device */ | 908 | /* Add/Remove cache interface for CPU device */ |
| 884 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | 909 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) |
| @@ -918,7 +943,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
| 918 | } | 943 | } |
| 919 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); | 944 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); |
| 920 | } | 945 | } |
| 921 | cpu_set(cpu, cache_dev_map); | 946 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); |
| 922 | 947 | ||
| 923 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); | 948 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); |
| 924 | return 0; | 949 | return 0; |
| @@ -931,9 +956,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
| 931 | 956 | ||
| 932 | if (per_cpu(cpuid4_info, cpu) == NULL) | 957 | if (per_cpu(cpuid4_info, cpu) == NULL) |
| 933 | return; | 958 | return; |
| 934 | if (!cpu_isset(cpu, cache_dev_map)) | 959 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) |
| 935 | return; | 960 | return; |
| 936 | cpu_clear(cpu, cache_dev_map); | 961 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); |
| 937 | 962 | ||
| 938 | for (i = 0; i < num_cache_leaves; i++) | 963 | for (i = 0; i < num_cache_leaves; i++) |
| 939 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 964 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); |
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index d7d2323bbb69..b2f89829bbe8 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile | |||
| @@ -4,3 +4,4 @@ obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o | |||
| 4 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o | 4 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o |
| 5 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o | 5 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o |
| 6 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o | 6 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o |
| 7 | obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c index dfaebce3633e..3552119b091d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_32.c +++ b/arch/x86/kernel/cpu/mcheck/mce_32.c | |||
| @@ -60,20 +60,6 @@ void mcheck_init(struct cpuinfo_x86 *c) | |||
| 60 | } | 60 | } |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static unsigned long old_cr4 __initdata; | ||
| 64 | |||
| 65 | void __init stop_mce(void) | ||
| 66 | { | ||
| 67 | old_cr4 = read_cr4(); | ||
| 68 | clear_in_cr4(X86_CR4_MCE); | ||
| 69 | } | ||
| 70 | |||
| 71 | void __init restart_mce(void) | ||
| 72 | { | ||
| 73 | if (old_cr4 & X86_CR4_MCE) | ||
| 74 | set_in_cr4(X86_CR4_MCE); | ||
| 75 | } | ||
| 76 | |||
| 77 | static int __init mcheck_disable(char *str) | 63 | static int __init mcheck_disable(char *str) |
| 78 | { | 64 | { |
| 79 | mce_disabled = 1; | 65 | mce_disabled = 1; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index fe79985ce0f2..863f89568b1a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. | 3 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. |
| 4 | * Rest from unknown author(s). | 4 | * Rest from unknown author(s). |
| 5 | * 2004 Andi Kleen. Rewrote most of it. | 5 | * 2004 Andi Kleen. Rewrote most of it. |
| 6 | * Copyright 2008 Intel Corporation | ||
| 7 | * Author: Andi Kleen | ||
| 6 | */ | 8 | */ |
| 7 | 9 | ||
| 8 | #include <linux/init.h> | 10 | #include <linux/init.h> |
| @@ -24,6 +26,9 @@ | |||
| 24 | #include <linux/ctype.h> | 26 | #include <linux/ctype.h> |
| 25 | #include <linux/kmod.h> | 27 | #include <linux/kmod.h> |
| 26 | #include <linux/kdebug.h> | 28 | #include <linux/kdebug.h> |
| 29 | #include <linux/kobject.h> | ||
| 30 | #include <linux/sysfs.h> | ||
| 31 | #include <linux/ratelimit.h> | ||
| 27 | #include <asm/processor.h> | 32 | #include <asm/processor.h> |
| 28 | #include <asm/msr.h> | 33 | #include <asm/msr.h> |
| 29 | #include <asm/mce.h> | 34 | #include <asm/mce.h> |
| @@ -32,7 +37,6 @@ | |||
| 32 | #include <asm/idle.h> | 37 | #include <asm/idle.h> |
| 33 | 38 | ||
| 34 | #define MISC_MCELOG_MINOR 227 | 39 | #define MISC_MCELOG_MINOR 227 |
| 35 | #define NR_SYSFS_BANKS 6 | ||
| 36 | 40 | ||
| 37 | atomic_t mce_entry; | 41 | atomic_t mce_entry; |
| 38 | 42 | ||
| @@ -47,7 +51,7 @@ static int mce_dont_init; | |||
| 47 | */ | 51 | */ |
| 48 | static int tolerant = 1; | 52 | static int tolerant = 1; |
| 49 | static int banks; | 53 | static int banks; |
| 50 | static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL }; | 54 | static u64 *bank; |
| 51 | static unsigned long notify_user; | 55 | static unsigned long notify_user; |
| 52 | static int rip_msr; | 56 | static int rip_msr; |
| 53 | static int mce_bootlog = -1; | 57 | static int mce_bootlog = -1; |
| @@ -58,6 +62,19 @@ static char *trigger_argv[2] = { trigger, NULL }; | |||
| 58 | 62 | ||
| 59 | static DECLARE_WAIT_QUEUE_HEAD(mce_wait); | 63 | static DECLARE_WAIT_QUEUE_HEAD(mce_wait); |
| 60 | 64 | ||
| 65 | /* MCA banks polled by the period polling timer for corrected events */ | ||
| 66 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | ||
| 67 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL | ||
| 68 | }; | ||
| 69 | |||
| 70 | /* Do initial initialization of a struct mce */ | ||
| 71 | void mce_setup(struct mce *m) | ||
| 72 | { | ||
| 73 | memset(m, 0, sizeof(struct mce)); | ||
| 74 | m->cpu = smp_processor_id(); | ||
| 75 | rdtscll(m->tsc); | ||
| 76 | } | ||
| 77 | |||
| 61 | /* | 78 | /* |
| 62 | * Lockless MCE logging infrastructure. | 79 | * Lockless MCE logging infrastructure. |
| 63 | * This avoids deadlocks on printk locks without having to break locks. Also | 80 | * This avoids deadlocks on printk locks without having to break locks. Also |
| @@ -119,11 +136,11 @@ static void print_mce(struct mce *m) | |||
| 119 | print_symbol("{%s}", m->ip); | 136 | print_symbol("{%s}", m->ip); |
| 120 | printk("\n"); | 137 | printk("\n"); |
| 121 | } | 138 | } |
| 122 | printk(KERN_EMERG "TSC %Lx ", m->tsc); | 139 | printk(KERN_EMERG "TSC %llx ", m->tsc); |
| 123 | if (m->addr) | 140 | if (m->addr) |
| 124 | printk("ADDR %Lx ", m->addr); | 141 | printk("ADDR %llx ", m->addr); |
| 125 | if (m->misc) | 142 | if (m->misc) |
| 126 | printk("MISC %Lx ", m->misc); | 143 | printk("MISC %llx ", m->misc); |
| 127 | printk("\n"); | 144 | printk("\n"); |
| 128 | printk(KERN_EMERG "This is not a software problem!\n"); | 145 | printk(KERN_EMERG "This is not a software problem!\n"); |
| 129 | printk(KERN_EMERG "Run through mcelog --ascii to decode " | 146 | printk(KERN_EMERG "Run through mcelog --ascii to decode " |
| @@ -149,8 +166,10 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start) | |||
| 149 | panic(msg); | 166 | panic(msg); |
| 150 | } | 167 | } |
| 151 | 168 | ||
| 152 | static int mce_available(struct cpuinfo_x86 *c) | 169 | int mce_available(struct cpuinfo_x86 *c) |
| 153 | { | 170 | { |
| 171 | if (mce_dont_init) | ||
| 172 | return 0; | ||
| 154 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); | 173 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); |
| 155 | } | 174 | } |
| 156 | 175 | ||
| @@ -172,7 +191,77 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) | |||
| 172 | } | 191 | } |
| 173 | 192 | ||
| 174 | /* | 193 | /* |
| 175 | * The actual machine check handler | 194 | * Poll for corrected events or events that happened before reset. |
| 195 | * Those are just logged through /dev/mcelog. | ||
| 196 | * | ||
| 197 | * This is executed in standard interrupt context. | ||
| 198 | */ | ||
| 199 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | ||
| 200 | { | ||
| 201 | struct mce m; | ||
| 202 | int i; | ||
| 203 | |||
| 204 | mce_setup(&m); | ||
| 205 | |||
| 206 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); | ||
| 207 | for (i = 0; i < banks; i++) { | ||
| 208 | if (!bank[i] || !test_bit(i, *b)) | ||
| 209 | continue; | ||
| 210 | |||
| 211 | m.misc = 0; | ||
| 212 | m.addr = 0; | ||
| 213 | m.bank = i; | ||
| 214 | m.tsc = 0; | ||
| 215 | |||
| 216 | barrier(); | ||
| 217 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); | ||
| 218 | if (!(m.status & MCI_STATUS_VAL)) | ||
| 219 | continue; | ||
| 220 | |||
| 221 | /* | ||
| 222 | * Uncorrected events are handled by the exception handler | ||
| 223 | * when it is enabled. But when the exception is disabled log | ||
| 224 | * everything. | ||
| 225 | * | ||
| 226 | * TBD do the same check for MCI_STATUS_EN here? | ||
| 227 | */ | ||
| 228 | if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC)) | ||
| 229 | continue; | ||
| 230 | |||
| 231 | if (m.status & MCI_STATUS_MISCV) | ||
| 232 | rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc); | ||
| 233 | if (m.status & MCI_STATUS_ADDRV) | ||
| 234 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); | ||
| 235 | |||
| 236 | if (!(flags & MCP_TIMESTAMP)) | ||
| 237 | m.tsc = 0; | ||
| 238 | /* | ||
| 239 | * Don't get the IP here because it's unlikely to | ||
| 240 | * have anything to do with the actual error location. | ||
| 241 | */ | ||
| 242 | |||
| 243 | mce_log(&m); | ||
| 244 | add_taint(TAINT_MACHINE_CHECK); | ||
| 245 | |||
| 246 | /* | ||
| 247 | * Clear state for this bank. | ||
| 248 | */ | ||
| 249 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
| 250 | } | ||
| 251 | |||
| 252 | /* | ||
| 253 | * Don't clear MCG_STATUS here because it's only defined for | ||
| 254 | * exceptions. | ||
| 255 | */ | ||
| 256 | } | ||
| 257 | |||
| 258 | /* | ||
| 259 | * The actual machine check handler. This only handles real | ||
| 260 | * exceptions when something got corrupted coming in through int 18. | ||
| 261 | * | ||
| 262 | * This is executed in NMI context not subject to normal locking rules. This | ||
| 263 | * implies that most kernel services cannot be safely used. Don't even | ||
| 264 | * think about putting a printk in there! | ||
| 176 | */ | 265 | */ |
| 177 | void do_machine_check(struct pt_regs * regs, long error_code) | 266 | void do_machine_check(struct pt_regs * regs, long error_code) |
| 178 | { | 267 | { |
| @@ -190,17 +279,18 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 190 | * error. | 279 | * error. |
| 191 | */ | 280 | */ |
| 192 | int kill_it = 0; | 281 | int kill_it = 0; |
| 282 | DECLARE_BITMAP(toclear, MAX_NR_BANKS); | ||
| 193 | 283 | ||
| 194 | atomic_inc(&mce_entry); | 284 | atomic_inc(&mce_entry); |
| 195 | 285 | ||
| 196 | if ((regs | 286 | if (notify_die(DIE_NMI, "machine check", regs, error_code, |
| 197 | && notify_die(DIE_NMI, "machine check", regs, error_code, | ||
| 198 | 18, SIGKILL) == NOTIFY_STOP) | 287 | 18, SIGKILL) == NOTIFY_STOP) |
| 199 | || !banks) | 288 | goto out2; |
| 289 | if (!banks) | ||
| 200 | goto out2; | 290 | goto out2; |
| 201 | 291 | ||
| 202 | memset(&m, 0, sizeof(struct mce)); | 292 | mce_setup(&m); |
| 203 | m.cpu = smp_processor_id(); | 293 | |
| 204 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); | 294 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); |
| 205 | /* if the restart IP is not valid, we're done for */ | 295 | /* if the restart IP is not valid, we're done for */ |
| 206 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) | 296 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) |
| @@ -210,18 +300,32 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 210 | barrier(); | 300 | barrier(); |
| 211 | 301 | ||
| 212 | for (i = 0; i < banks; i++) { | 302 | for (i = 0; i < banks; i++) { |
| 213 | if (i < NR_SYSFS_BANKS && !bank[i]) | 303 | __clear_bit(i, toclear); |
| 304 | if (!bank[i]) | ||
| 214 | continue; | 305 | continue; |
| 215 | 306 | ||
| 216 | m.misc = 0; | 307 | m.misc = 0; |
| 217 | m.addr = 0; | 308 | m.addr = 0; |
| 218 | m.bank = i; | 309 | m.bank = i; |
| 219 | m.tsc = 0; | ||
| 220 | 310 | ||
| 221 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); | 311 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); |
| 222 | if ((m.status & MCI_STATUS_VAL) == 0) | 312 | if ((m.status & MCI_STATUS_VAL) == 0) |
| 223 | continue; | 313 | continue; |
| 224 | 314 | ||
| 315 | /* | ||
| 316 | * Non uncorrected errors are handled by machine_check_poll | ||
| 317 | * Leave them alone. | ||
| 318 | */ | ||
| 319 | if ((m.status & MCI_STATUS_UC) == 0) | ||
| 320 | continue; | ||
| 321 | |||
| 322 | /* | ||
| 323 | * Set taint even when machine check was not enabled. | ||
| 324 | */ | ||
| 325 | add_taint(TAINT_MACHINE_CHECK); | ||
| 326 | |||
| 327 | __set_bit(i, toclear); | ||
| 328 | |||
| 225 | if (m.status & MCI_STATUS_EN) { | 329 | if (m.status & MCI_STATUS_EN) { |
| 226 | /* if PCC was set, there's no way out */ | 330 | /* if PCC was set, there's no way out */ |
| 227 | no_way_out |= !!(m.status & MCI_STATUS_PCC); | 331 | no_way_out |= !!(m.status & MCI_STATUS_PCC); |
| @@ -235,6 +339,12 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 235 | no_way_out = 1; | 339 | no_way_out = 1; |
| 236 | kill_it = 1; | 340 | kill_it = 1; |
| 237 | } | 341 | } |
| 342 | } else { | ||
| 343 | /* | ||
| 344 | * Machine check event was not enabled. Clear, but | ||
| 345 | * ignore. | ||
| 346 | */ | ||
| 347 | continue; | ||
| 238 | } | 348 | } |
| 239 | 349 | ||
| 240 | if (m.status & MCI_STATUS_MISCV) | 350 | if (m.status & MCI_STATUS_MISCV) |
| @@ -243,10 +353,7 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 243 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); | 353 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); |
| 244 | 354 | ||
| 245 | mce_get_rip(&m, regs); | 355 | mce_get_rip(&m, regs); |
| 246 | if (error_code >= 0) | 356 | mce_log(&m); |
| 247 | rdtscll(m.tsc); | ||
| 248 | if (error_code != -2) | ||
| 249 | mce_log(&m); | ||
| 250 | 357 | ||
| 251 | /* Did this bank cause the exception? */ | 358 | /* Did this bank cause the exception? */ |
| 252 | /* Assume that the bank with uncorrectable errors did it, | 359 | /* Assume that the bank with uncorrectable errors did it, |
| @@ -255,14 +362,8 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 255 | panicm = m; | 362 | panicm = m; |
| 256 | panicm_found = 1; | 363 | panicm_found = 1; |
| 257 | } | 364 | } |
| 258 | |||
| 259 | add_taint(TAINT_MACHINE_CHECK); | ||
| 260 | } | 365 | } |
| 261 | 366 | ||
| 262 | /* Never do anything final in the polling timer */ | ||
| 263 | if (!regs) | ||
| 264 | goto out; | ||
| 265 | |||
| 266 | /* If we didn't find an uncorrectable error, pick | 367 | /* If we didn't find an uncorrectable error, pick |
| 267 | the last one (shouldn't happen, just being safe). */ | 368 | the last one (shouldn't happen, just being safe). */ |
| 268 | if (!panicm_found) | 369 | if (!panicm_found) |
| @@ -309,10 +410,11 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 309 | /* notify userspace ASAP */ | 410 | /* notify userspace ASAP */ |
| 310 | set_thread_flag(TIF_MCE_NOTIFY); | 411 | set_thread_flag(TIF_MCE_NOTIFY); |
| 311 | 412 | ||
| 312 | out: | ||
| 313 | /* the last thing we do is clear state */ | 413 | /* the last thing we do is clear state */ |
| 314 | for (i = 0; i < banks; i++) | 414 | for (i = 0; i < banks; i++) { |
| 315 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | 415 | if (test_bit(i, toclear)) |
| 416 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
| 417 | } | ||
| 316 | wrmsrl(MSR_IA32_MCG_STATUS, 0); | 418 | wrmsrl(MSR_IA32_MCG_STATUS, 0); |
| 317 | out2: | 419 | out2: |
| 318 | atomic_dec(&mce_entry); | 420 | atomic_dec(&mce_entry); |
| @@ -332,15 +434,13 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
| 332 | * and historically has been the register value of the | 434 | * and historically has been the register value of the |
| 333 | * MSR_IA32_THERMAL_STATUS (Intel) msr. | 435 | * MSR_IA32_THERMAL_STATUS (Intel) msr. |
| 334 | */ | 436 | */ |
| 335 | void mce_log_therm_throt_event(unsigned int cpu, __u64 status) | 437 | void mce_log_therm_throt_event(__u64 status) |
| 336 | { | 438 | { |
| 337 | struct mce m; | 439 | struct mce m; |
| 338 | 440 | ||
| 339 | memset(&m, 0, sizeof(m)); | 441 | mce_setup(&m); |
| 340 | m.cpu = cpu; | ||
| 341 | m.bank = MCE_THERMAL_BANK; | 442 | m.bank = MCE_THERMAL_BANK; |
| 342 | m.status = status; | 443 | m.status = status; |
| 343 | rdtscll(m.tsc); | ||
| 344 | mce_log(&m); | 444 | mce_log(&m); |
| 345 | } | 445 | } |
| 346 | #endif /* CONFIG_X86_MCE_INTEL */ | 446 | #endif /* CONFIG_X86_MCE_INTEL */ |
| @@ -353,18 +453,18 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status) | |||
| 353 | 453 | ||
| 354 | static int check_interval = 5 * 60; /* 5 minutes */ | 454 | static int check_interval = 5 * 60; /* 5 minutes */ |
| 355 | static int next_interval; /* in jiffies */ | 455 | static int next_interval; /* in jiffies */ |
| 356 | static void mcheck_timer(struct work_struct *work); | 456 | static void mcheck_timer(unsigned long); |
| 357 | static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); | 457 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
| 358 | 458 | ||
| 359 | static void mcheck_check_cpu(void *info) | 459 | static void mcheck_timer(unsigned long data) |
| 360 | { | 460 | { |
| 361 | if (mce_available(¤t_cpu_data)) | 461 | struct timer_list *t = &per_cpu(mce_timer, data); |
| 362 | do_machine_check(NULL, 0); | ||
| 363 | } | ||
| 364 | 462 | ||
| 365 | static void mcheck_timer(struct work_struct *work) | 463 | WARN_ON(smp_processor_id() != data); |
| 366 | { | 464 | |
| 367 | on_each_cpu(mcheck_check_cpu, NULL, 1); | 465 | if (mce_available(¤t_cpu_data)) |
| 466 | machine_check_poll(MCP_TIMESTAMP, | ||
| 467 | &__get_cpu_var(mce_poll_banks)); | ||
| 368 | 468 | ||
| 369 | /* | 469 | /* |
| 370 | * Alert userspace if needed. If we logged an MCE, reduce the | 470 | * Alert userspace if needed. If we logged an MCE, reduce the |
| @@ -377,31 +477,41 @@ static void mcheck_timer(struct work_struct *work) | |||
| 377 | (int)round_jiffies_relative(check_interval*HZ)); | 477 | (int)round_jiffies_relative(check_interval*HZ)); |
| 378 | } | 478 | } |
| 379 | 479 | ||
| 380 | schedule_delayed_work(&mcheck_work, next_interval); | 480 | t->expires = jiffies + next_interval; |
| 481 | add_timer(t); | ||
| 482 | } | ||
| 483 | |||
| 484 | static void mce_do_trigger(struct work_struct *work) | ||
| 485 | { | ||
| 486 | call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT); | ||
| 381 | } | 487 | } |
| 382 | 488 | ||
| 489 | static DECLARE_WORK(mce_trigger_work, mce_do_trigger); | ||
| 490 | |||
| 383 | /* | 491 | /* |
| 384 | * This is only called from process context. This is where we do | 492 | * Notify the user(s) about new machine check events. |
| 385 | * anything we need to alert userspace about new MCEs. This is called | 493 | * Can be called from interrupt context, but not from machine check/NMI |
| 386 | * directly from the poller and also from entry.S and idle, thanks to | 494 | * context. |
| 387 | * TIF_MCE_NOTIFY. | ||
| 388 | */ | 495 | */ |
| 389 | int mce_notify_user(void) | 496 | int mce_notify_user(void) |
| 390 | { | 497 | { |
| 498 | /* Not more than two messages every minute */ | ||
| 499 | static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); | ||
| 500 | |||
| 391 | clear_thread_flag(TIF_MCE_NOTIFY); | 501 | clear_thread_flag(TIF_MCE_NOTIFY); |
| 392 | if (test_and_clear_bit(0, ¬ify_user)) { | 502 | if (test_and_clear_bit(0, ¬ify_user)) { |
| 393 | static unsigned long last_print; | ||
| 394 | unsigned long now = jiffies; | ||
| 395 | |||
| 396 | wake_up_interruptible(&mce_wait); | 503 | wake_up_interruptible(&mce_wait); |
| 397 | if (trigger[0]) | ||
| 398 | call_usermodehelper(trigger, trigger_argv, NULL, | ||
| 399 | UMH_NO_WAIT); | ||
| 400 | 504 | ||
| 401 | if (time_after_eq(now, last_print + (check_interval*HZ))) { | 505 | /* |
| 402 | last_print = now; | 506 | * There is no risk of missing notifications because |
| 507 | * work_pending is always cleared before the function is | ||
| 508 | * executed. | ||
| 509 | */ | ||
| 510 | if (trigger[0] && !work_pending(&mce_trigger_work)) | ||
| 511 | schedule_work(&mce_trigger_work); | ||
| 512 | |||
| 513 | if (__ratelimit(&ratelimit)) | ||
| 403 | printk(KERN_INFO "Machine check events logged\n"); | 514 | printk(KERN_INFO "Machine check events logged\n"); |
| 404 | } | ||
| 405 | 515 | ||
| 406 | return 1; | 516 | return 1; |
| 407 | } | 517 | } |
| @@ -425,63 +535,78 @@ static struct notifier_block mce_idle_notifier = { | |||
| 425 | 535 | ||
| 426 | static __init int periodic_mcheck_init(void) | 536 | static __init int periodic_mcheck_init(void) |
| 427 | { | 537 | { |
| 428 | next_interval = check_interval * HZ; | 538 | idle_notifier_register(&mce_idle_notifier); |
| 429 | if (next_interval) | 539 | return 0; |
| 430 | schedule_delayed_work(&mcheck_work, | ||
| 431 | round_jiffies_relative(next_interval)); | ||
| 432 | idle_notifier_register(&mce_idle_notifier); | ||
| 433 | return 0; | ||
| 434 | } | 540 | } |
| 435 | __initcall(periodic_mcheck_init); | 541 | __initcall(periodic_mcheck_init); |
| 436 | 542 | ||
| 437 | |||
| 438 | /* | 543 | /* |
| 439 | * Initialize Machine Checks for a CPU. | 544 | * Initialize Machine Checks for a CPU. |
| 440 | */ | 545 | */ |
| 441 | static void mce_init(void *dummy) | 546 | static int mce_cap_init(void) |
| 442 | { | 547 | { |
| 443 | u64 cap; | 548 | u64 cap; |
| 444 | int i; | 549 | unsigned b; |
| 445 | 550 | ||
| 446 | rdmsrl(MSR_IA32_MCG_CAP, cap); | 551 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
| 447 | banks = cap & 0xff; | 552 | b = cap & 0xff; |
| 448 | if (banks > MCE_EXTENDED_BANK) { | 553 | if (b > MAX_NR_BANKS) { |
| 449 | banks = MCE_EXTENDED_BANK; | 554 | printk(KERN_WARNING |
| 450 | printk(KERN_INFO "MCE: warning: using only %d banks\n", | 555 | "MCE: Using only %u machine check banks out of %u\n", |
| 451 | MCE_EXTENDED_BANK); | 556 | MAX_NR_BANKS, b); |
| 557 | b = MAX_NR_BANKS; | ||
| 452 | } | 558 | } |
| 559 | |||
| 560 | /* Don't support asymmetric configurations today */ | ||
| 561 | WARN_ON(banks != 0 && b != banks); | ||
| 562 | banks = b; | ||
| 563 | if (!bank) { | ||
| 564 | bank = kmalloc(banks * sizeof(u64), GFP_KERNEL); | ||
| 565 | if (!bank) | ||
| 566 | return -ENOMEM; | ||
| 567 | memset(bank, 0xff, banks * sizeof(u64)); | ||
| 568 | } | ||
| 569 | |||
| 453 | /* Use accurate RIP reporting if available. */ | 570 | /* Use accurate RIP reporting if available. */ |
| 454 | if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) | 571 | if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) |
| 455 | rip_msr = MSR_IA32_MCG_EIP; | 572 | rip_msr = MSR_IA32_MCG_EIP; |
| 456 | 573 | ||
| 457 | /* Log the machine checks left over from the previous reset. | 574 | return 0; |
| 458 | This also clears all registers */ | 575 | } |
| 459 | do_machine_check(NULL, mce_bootlog ? -1 : -2); | 576 | |
| 577 | static void mce_init(void *dummy) | ||
| 578 | { | ||
| 579 | u64 cap; | ||
| 580 | int i; | ||
| 581 | mce_banks_t all_banks; | ||
| 582 | |||
| 583 | /* | ||
| 584 | * Log the machine checks left over from the previous reset. | ||
| 585 | */ | ||
| 586 | bitmap_fill(all_banks, MAX_NR_BANKS); | ||
| 587 | machine_check_poll(MCP_UC, &all_banks); | ||
| 460 | 588 | ||
| 461 | set_in_cr4(X86_CR4_MCE); | 589 | set_in_cr4(X86_CR4_MCE); |
| 462 | 590 | ||
| 591 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
| 463 | if (cap & MCG_CTL_P) | 592 | if (cap & MCG_CTL_P) |
| 464 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | 593 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
| 465 | 594 | ||
| 466 | for (i = 0; i < banks; i++) { | 595 | for (i = 0; i < banks; i++) { |
| 467 | if (i < NR_SYSFS_BANKS) | 596 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); |
| 468 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); | ||
| 469 | else | ||
| 470 | wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL); | ||
| 471 | |||
| 472 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | 597 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); |
| 473 | } | 598 | } |
| 474 | } | 599 | } |
| 475 | 600 | ||
| 476 | /* Add per CPU specific workarounds here */ | 601 | /* Add per CPU specific workarounds here */ |
| 477 | static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) | 602 | static void mce_cpu_quirks(struct cpuinfo_x86 *c) |
| 478 | { | 603 | { |
| 479 | /* This should be disabled by the BIOS, but isn't always */ | 604 | /* This should be disabled by the BIOS, but isn't always */ |
| 480 | if (c->x86_vendor == X86_VENDOR_AMD) { | 605 | if (c->x86_vendor == X86_VENDOR_AMD) { |
| 481 | if(c->x86 == 15) | 606 | if (c->x86 == 15 && banks > 4) |
| 482 | /* disable GART TBL walk error reporting, which trips off | 607 | /* disable GART TBL walk error reporting, which trips off |
| 483 | incorrectly with the IOMMU & 3ware & Cerberus. */ | 608 | incorrectly with the IOMMU & 3ware & Cerberus. */ |
| 484 | clear_bit(10, &bank[4]); | 609 | clear_bit(10, (unsigned long *)&bank[4]); |
| 485 | if(c->x86 <= 17 && mce_bootlog < 0) | 610 | if(c->x86 <= 17 && mce_bootlog < 0) |
| 486 | /* Lots of broken BIOS around that don't clear them | 611 | /* Lots of broken BIOS around that don't clear them |
| 487 | by default and leave crap in there. Don't log. */ | 612 | by default and leave crap in there. Don't log. */ |
| @@ -504,20 +629,38 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) | |||
| 504 | } | 629 | } |
| 505 | } | 630 | } |
| 506 | 631 | ||
| 632 | static void mce_init_timer(void) | ||
| 633 | { | ||
| 634 | struct timer_list *t = &__get_cpu_var(mce_timer); | ||
| 635 | |||
| 636 | /* data race harmless because everyone sets to the same value */ | ||
| 637 | if (!next_interval) | ||
| 638 | next_interval = check_interval * HZ; | ||
| 639 | if (!next_interval) | ||
| 640 | return; | ||
| 641 | setup_timer(t, mcheck_timer, smp_processor_id()); | ||
| 642 | t->expires = round_jiffies(jiffies + next_interval); | ||
| 643 | add_timer(t); | ||
| 644 | } | ||
| 645 | |||
| 507 | /* | 646 | /* |
| 508 | * Called for each booted CPU to set up machine checks. | 647 | * Called for each booted CPU to set up machine checks. |
| 509 | * Must be called with preempt off. | 648 | * Must be called with preempt off. |
| 510 | */ | 649 | */ |
| 511 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) | 650 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) |
| 512 | { | 651 | { |
| 513 | mce_cpu_quirks(c); | 652 | if (!mce_available(c)) |
| 653 | return; | ||
| 514 | 654 | ||
| 515 | if (mce_dont_init || | 655 | if (mce_cap_init() < 0) { |
| 516 | !mce_available(c)) | 656 | mce_dont_init = 1; |
| 517 | return; | 657 | return; |
| 658 | } | ||
| 659 | mce_cpu_quirks(c); | ||
| 518 | 660 | ||
| 519 | mce_init(NULL); | 661 | mce_init(NULL); |
| 520 | mce_cpu_features(c); | 662 | mce_cpu_features(c); |
| 663 | mce_init_timer(); | ||
| 521 | } | 664 | } |
| 522 | 665 | ||
| 523 | /* | 666 | /* |
| @@ -573,7 +716,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
| 573 | { | 716 | { |
| 574 | unsigned long *cpu_tsc; | 717 | unsigned long *cpu_tsc; |
| 575 | static DEFINE_MUTEX(mce_read_mutex); | 718 | static DEFINE_MUTEX(mce_read_mutex); |
| 576 | unsigned next; | 719 | unsigned prev, next; |
| 577 | char __user *buf = ubuf; | 720 | char __user *buf = ubuf; |
| 578 | int i, err; | 721 | int i, err; |
| 579 | 722 | ||
| @@ -592,25 +735,32 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
| 592 | } | 735 | } |
| 593 | 736 | ||
| 594 | err = 0; | 737 | err = 0; |
| 595 | for (i = 0; i < next; i++) { | 738 | prev = 0; |
| 596 | unsigned long start = jiffies; | 739 | do { |
| 597 | 740 | for (i = prev; i < next; i++) { | |
| 598 | while (!mcelog.entry[i].finished) { | 741 | unsigned long start = jiffies; |
| 599 | if (time_after_eq(jiffies, start + 2)) { | 742 | |
| 600 | memset(mcelog.entry + i,0, sizeof(struct mce)); | 743 | while (!mcelog.entry[i].finished) { |
| 601 | goto timeout; | 744 | if (time_after_eq(jiffies, start + 2)) { |
| 745 | memset(mcelog.entry + i, 0, | ||
| 746 | sizeof(struct mce)); | ||
| 747 | goto timeout; | ||
| 748 | } | ||
| 749 | cpu_relax(); | ||
| 602 | } | 750 | } |
| 603 | cpu_relax(); | 751 | smp_rmb(); |
| 752 | err |= copy_to_user(buf, mcelog.entry + i, | ||
| 753 | sizeof(struct mce)); | ||
| 754 | buf += sizeof(struct mce); | ||
| 755 | timeout: | ||
| 756 | ; | ||
| 604 | } | 757 | } |
| 605 | smp_rmb(); | ||
| 606 | err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce)); | ||
| 607 | buf += sizeof(struct mce); | ||
| 608 | timeout: | ||
| 609 | ; | ||
| 610 | } | ||
| 611 | 758 | ||
| 612 | memset(mcelog.entry, 0, next * sizeof(struct mce)); | 759 | memset(mcelog.entry + prev, 0, |
| 613 | mcelog.next = 0; | 760 | (next - prev) * sizeof(struct mce)); |
| 761 | prev = next; | ||
| 762 | next = cmpxchg(&mcelog.next, prev, 0); | ||
| 763 | } while (next != prev); | ||
| 614 | 764 | ||
| 615 | synchronize_sched(); | 765 | synchronize_sched(); |
| 616 | 766 | ||
| @@ -680,20 +830,6 @@ static struct miscdevice mce_log_device = { | |||
| 680 | &mce_chrdev_ops, | 830 | &mce_chrdev_ops, |
| 681 | }; | 831 | }; |
| 682 | 832 | ||
| 683 | static unsigned long old_cr4 __initdata; | ||
| 684 | |||
| 685 | void __init stop_mce(void) | ||
| 686 | { | ||
| 687 | old_cr4 = read_cr4(); | ||
| 688 | clear_in_cr4(X86_CR4_MCE); | ||
| 689 | } | ||
| 690 | |||
| 691 | void __init restart_mce(void) | ||
| 692 | { | ||
| 693 | if (old_cr4 & X86_CR4_MCE) | ||
| 694 | set_in_cr4(X86_CR4_MCE); | ||
| 695 | } | ||
| 696 | |||
| 697 | /* | 833 | /* |
| 698 | * Old style boot options parsing. Only for compatibility. | 834 | * Old style boot options parsing. Only for compatibility. |
| 699 | */ | 835 | */ |
| @@ -703,8 +839,7 @@ static int __init mcheck_disable(char *str) | |||
| 703 | return 1; | 839 | return 1; |
| 704 | } | 840 | } |
| 705 | 841 | ||
| 706 | /* mce=off disables machine check. Note you can re-enable it later | 842 | /* mce=off disables machine check. |
| 707 | using sysfs. | ||
| 708 | mce=TOLERANCELEVEL (number, see above) | 843 | mce=TOLERANCELEVEL (number, see above) |
| 709 | mce=bootlog Log MCEs from before booting. Disabled by default on AMD. | 844 | mce=bootlog Log MCEs from before booting. Disabled by default on AMD. |
| 710 | mce=nobootlog Don't log MCEs from before booting. */ | 845 | mce=nobootlog Don't log MCEs from before booting. */ |
| @@ -728,6 +863,29 @@ __setup("mce=", mcheck_enable); | |||
| 728 | * Sysfs support | 863 | * Sysfs support |
| 729 | */ | 864 | */ |
| 730 | 865 | ||
| 866 | /* | ||
| 867 | * Disable machine checks on suspend and shutdown. We can't really handle | ||
| 868 | * them later. | ||
| 869 | */ | ||
| 870 | static int mce_disable(void) | ||
| 871 | { | ||
| 872 | int i; | ||
| 873 | |||
| 874 | for (i = 0; i < banks; i++) | ||
| 875 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | ||
| 876 | return 0; | ||
| 877 | } | ||
| 878 | |||
| 879 | static int mce_suspend(struct sys_device *dev, pm_message_t state) | ||
| 880 | { | ||
| 881 | return mce_disable(); | ||
| 882 | } | ||
| 883 | |||
| 884 | static int mce_shutdown(struct sys_device *dev) | ||
| 885 | { | ||
| 886 | return mce_disable(); | ||
| 887 | } | ||
| 888 | |||
| 731 | /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. | 889 | /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. |
| 732 | Only one CPU is active at this time, the others get readded later using | 890 | Only one CPU is active at this time, the others get readded later using |
| 733 | CPU hotplug. */ | 891 | CPU hotplug. */ |
| @@ -738,20 +896,24 @@ static int mce_resume(struct sys_device *dev) | |||
| 738 | return 0; | 896 | return 0; |
| 739 | } | 897 | } |
| 740 | 898 | ||
| 899 | static void mce_cpu_restart(void *data) | ||
| 900 | { | ||
| 901 | del_timer_sync(&__get_cpu_var(mce_timer)); | ||
| 902 | if (mce_available(¤t_cpu_data)) | ||
| 903 | mce_init(NULL); | ||
| 904 | mce_init_timer(); | ||
| 905 | } | ||
| 906 | |||
| 741 | /* Reinit MCEs after user configuration changes */ | 907 | /* Reinit MCEs after user configuration changes */ |
| 742 | static void mce_restart(void) | 908 | static void mce_restart(void) |
| 743 | { | 909 | { |
| 744 | if (next_interval) | ||
| 745 | cancel_delayed_work(&mcheck_work); | ||
| 746 | /* Timer race is harmless here */ | ||
| 747 | on_each_cpu(mce_init, NULL, 1); | ||
| 748 | next_interval = check_interval * HZ; | 910 | next_interval = check_interval * HZ; |
| 749 | if (next_interval) | 911 | on_each_cpu(mce_cpu_restart, NULL, 1); |
| 750 | schedule_delayed_work(&mcheck_work, | ||
| 751 | round_jiffies_relative(next_interval)); | ||
| 752 | } | 912 | } |
| 753 | 913 | ||
| 754 | static struct sysdev_class mce_sysclass = { | 914 | static struct sysdev_class mce_sysclass = { |
| 915 | .suspend = mce_suspend, | ||
| 916 | .shutdown = mce_shutdown, | ||
| 755 | .resume = mce_resume, | 917 | .resume = mce_resume, |
| 756 | .name = "machinecheck", | 918 | .name = "machinecheck", |
| 757 | }; | 919 | }; |
| @@ -778,16 +940,26 @@ void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinit | |||
| 778 | } \ | 940 | } \ |
| 779 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); | 941 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); |
| 780 | 942 | ||
| 781 | /* | 943 | static struct sysdev_attribute *bank_attrs; |
| 782 | * TBD should generate these dynamically based on number of available banks. | 944 | |
| 783 | * Have only 6 contol banks in /sysfs until then. | 945 | static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr, |
| 784 | */ | 946 | char *buf) |
| 785 | ACCESSOR(bank0ctl,bank[0],mce_restart()) | 947 | { |
| 786 | ACCESSOR(bank1ctl,bank[1],mce_restart()) | 948 | u64 b = bank[attr - bank_attrs]; |
| 787 | ACCESSOR(bank2ctl,bank[2],mce_restart()) | 949 | return sprintf(buf, "%llx\n", b); |
| 788 | ACCESSOR(bank3ctl,bank[3],mce_restart()) | 950 | } |
| 789 | ACCESSOR(bank4ctl,bank[4],mce_restart()) | 951 | |
| 790 | ACCESSOR(bank5ctl,bank[5],mce_restart()) | 952 | static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, |
| 953 | const char *buf, size_t siz) | ||
| 954 | { | ||
| 955 | char *end; | ||
| 956 | u64 new = simple_strtoull(buf, &end, 0); | ||
| 957 | if (end == buf) | ||
| 958 | return -EINVAL; | ||
| 959 | bank[attr - bank_attrs] = new; | ||
| 960 | mce_restart(); | ||
| 961 | return end-buf; | ||
| 962 | } | ||
| 791 | 963 | ||
| 792 | static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, | 964 | static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, |
| 793 | char *buf) | 965 | char *buf) |
| @@ -814,13 +986,11 @@ static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger); | |||
| 814 | static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); | 986 | static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); |
| 815 | ACCESSOR(check_interval,check_interval,mce_restart()) | 987 | ACCESSOR(check_interval,check_interval,mce_restart()) |
| 816 | static struct sysdev_attribute *mce_attributes[] = { | 988 | static struct sysdev_attribute *mce_attributes[] = { |
| 817 | &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl, | ||
| 818 | &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl, | ||
| 819 | &attr_tolerant.attr, &attr_check_interval, &attr_trigger, | 989 | &attr_tolerant.attr, &attr_check_interval, &attr_trigger, |
| 820 | NULL | 990 | NULL |
| 821 | }; | 991 | }; |
| 822 | 992 | ||
| 823 | static cpumask_t mce_device_initialized = CPU_MASK_NONE; | 993 | static cpumask_var_t mce_device_initialized; |
| 824 | 994 | ||
| 825 | /* Per cpu sysdev init. All of the cpus still share the same ctl bank */ | 995 | /* Per cpu sysdev init. All of the cpus still share the same ctl bank */ |
| 826 | static __cpuinit int mce_create_device(unsigned int cpu) | 996 | static __cpuinit int mce_create_device(unsigned int cpu) |
| @@ -845,11 +1015,22 @@ static __cpuinit int mce_create_device(unsigned int cpu) | |||
| 845 | if (err) | 1015 | if (err) |
| 846 | goto error; | 1016 | goto error; |
| 847 | } | 1017 | } |
| 848 | cpu_set(cpu, mce_device_initialized); | 1018 | for (i = 0; i < banks; i++) { |
| 1019 | err = sysdev_create_file(&per_cpu(device_mce, cpu), | ||
| 1020 | &bank_attrs[i]); | ||
| 1021 | if (err) | ||
| 1022 | goto error2; | ||
| 1023 | } | ||
| 1024 | cpumask_set_cpu(cpu, mce_device_initialized); | ||
| 849 | 1025 | ||
| 850 | return 0; | 1026 | return 0; |
| 1027 | error2: | ||
| 1028 | while (--i >= 0) { | ||
| 1029 | sysdev_remove_file(&per_cpu(device_mce, cpu), | ||
| 1030 | &bank_attrs[i]); | ||
| 1031 | } | ||
| 851 | error: | 1032 | error: |
| 852 | while (i--) { | 1033 | while (--i >= 0) { |
| 853 | sysdev_remove_file(&per_cpu(device_mce,cpu), | 1034 | sysdev_remove_file(&per_cpu(device_mce,cpu), |
| 854 | mce_attributes[i]); | 1035 | mce_attributes[i]); |
| 855 | } | 1036 | } |
| @@ -862,14 +1043,44 @@ static __cpuinit void mce_remove_device(unsigned int cpu) | |||
| 862 | { | 1043 | { |
| 863 | int i; | 1044 | int i; |
| 864 | 1045 | ||
| 865 | if (!cpu_isset(cpu, mce_device_initialized)) | 1046 | if (!cpumask_test_cpu(cpu, mce_device_initialized)) |
| 866 | return; | 1047 | return; |
| 867 | 1048 | ||
| 868 | for (i = 0; mce_attributes[i]; i++) | 1049 | for (i = 0; mce_attributes[i]; i++) |
| 869 | sysdev_remove_file(&per_cpu(device_mce,cpu), | 1050 | sysdev_remove_file(&per_cpu(device_mce,cpu), |
| 870 | mce_attributes[i]); | 1051 | mce_attributes[i]); |
| 1052 | for (i = 0; i < banks; i++) | ||
| 1053 | sysdev_remove_file(&per_cpu(device_mce, cpu), | ||
| 1054 | &bank_attrs[i]); | ||
| 871 | sysdev_unregister(&per_cpu(device_mce,cpu)); | 1055 | sysdev_unregister(&per_cpu(device_mce,cpu)); |
| 872 | cpu_clear(cpu, mce_device_initialized); | 1056 | cpumask_clear_cpu(cpu, mce_device_initialized); |
| 1057 | } | ||
| 1058 | |||
| 1059 | /* Make sure there are no machine checks on offlined CPUs. */ | ||
| 1060 | static void mce_disable_cpu(void *h) | ||
| 1061 | { | ||
| 1062 | int i; | ||
| 1063 | unsigned long action = *(unsigned long *)h; | ||
| 1064 | |||
| 1065 | if (!mce_available(¤t_cpu_data)) | ||
| 1066 | return; | ||
| 1067 | if (!(action & CPU_TASKS_FROZEN)) | ||
| 1068 | cmci_clear(); | ||
| 1069 | for (i = 0; i < banks; i++) | ||
| 1070 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | ||
| 1071 | } | ||
| 1072 | |||
| 1073 | static void mce_reenable_cpu(void *h) | ||
| 1074 | { | ||
| 1075 | int i; | ||
| 1076 | unsigned long action = *(unsigned long *)h; | ||
| 1077 | |||
| 1078 | if (!mce_available(¤t_cpu_data)) | ||
| 1079 | return; | ||
| 1080 | if (!(action & CPU_TASKS_FROZEN)) | ||
| 1081 | cmci_reenable(); | ||
| 1082 | for (i = 0; i < banks; i++) | ||
| 1083 | wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]); | ||
| 873 | } | 1084 | } |
| 874 | 1085 | ||
| 875 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | 1086 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
| @@ -877,6 +1088,7 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | |||
| 877 | unsigned long action, void *hcpu) | 1088 | unsigned long action, void *hcpu) |
| 878 | { | 1089 | { |
| 879 | unsigned int cpu = (unsigned long)hcpu; | 1090 | unsigned int cpu = (unsigned long)hcpu; |
| 1091 | struct timer_list *t = &per_cpu(mce_timer, cpu); | ||
| 880 | 1092 | ||
| 881 | switch (action) { | 1093 | switch (action) { |
| 882 | case CPU_ONLINE: | 1094 | case CPU_ONLINE: |
| @@ -891,6 +1103,21 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | |||
| 891 | threshold_cpu_callback(action, cpu); | 1103 | threshold_cpu_callback(action, cpu); |
| 892 | mce_remove_device(cpu); | 1104 | mce_remove_device(cpu); |
| 893 | break; | 1105 | break; |
| 1106 | case CPU_DOWN_PREPARE: | ||
| 1107 | case CPU_DOWN_PREPARE_FROZEN: | ||
| 1108 | del_timer_sync(t); | ||
| 1109 | smp_call_function_single(cpu, mce_disable_cpu, &action, 1); | ||
| 1110 | break; | ||
| 1111 | case CPU_DOWN_FAILED: | ||
| 1112 | case CPU_DOWN_FAILED_FROZEN: | ||
| 1113 | t->expires = round_jiffies(jiffies + next_interval); | ||
| 1114 | add_timer_on(t, cpu); | ||
| 1115 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); | ||
| 1116 | break; | ||
| 1117 | case CPU_POST_DEAD: | ||
| 1118 | /* intentionally ignoring frozen here */ | ||
| 1119 | cmci_rediscover(cpu); | ||
| 1120 | break; | ||
| 894 | } | 1121 | } |
| 895 | return NOTIFY_OK; | 1122 | return NOTIFY_OK; |
| 896 | } | 1123 | } |
| @@ -899,6 +1126,34 @@ static struct notifier_block mce_cpu_notifier __cpuinitdata = { | |||
| 899 | .notifier_call = mce_cpu_callback, | 1126 | .notifier_call = mce_cpu_callback, |
| 900 | }; | 1127 | }; |
| 901 | 1128 | ||
| 1129 | static __init int mce_init_banks(void) | ||
| 1130 | { | ||
| 1131 | int i; | ||
| 1132 | |||
| 1133 | bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks, | ||
| 1134 | GFP_KERNEL); | ||
| 1135 | if (!bank_attrs) | ||
| 1136 | return -ENOMEM; | ||
| 1137 | |||
| 1138 | for (i = 0; i < banks; i++) { | ||
| 1139 | struct sysdev_attribute *a = &bank_attrs[i]; | ||
| 1140 | a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i); | ||
| 1141 | if (!a->attr.name) | ||
| 1142 | goto nomem; | ||
| 1143 | a->attr.mode = 0644; | ||
| 1144 | a->show = show_bank; | ||
| 1145 | a->store = set_bank; | ||
| 1146 | } | ||
| 1147 | return 0; | ||
| 1148 | |||
| 1149 | nomem: | ||
| 1150 | while (--i >= 0) | ||
| 1151 | kfree(bank_attrs[i].attr.name); | ||
| 1152 | kfree(bank_attrs); | ||
| 1153 | bank_attrs = NULL; | ||
| 1154 | return -ENOMEM; | ||
| 1155 | } | ||
| 1156 | |||
| 902 | static __init int mce_init_device(void) | 1157 | static __init int mce_init_device(void) |
| 903 | { | 1158 | { |
| 904 | int err; | 1159 | int err; |
| @@ -906,6 +1161,13 @@ static __init int mce_init_device(void) | |||
| 906 | 1161 | ||
| 907 | if (!mce_available(&boot_cpu_data)) | 1162 | if (!mce_available(&boot_cpu_data)) |
| 908 | return -EIO; | 1163 | return -EIO; |
| 1164 | |||
| 1165 | alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); | ||
| 1166 | |||
| 1167 | err = mce_init_banks(); | ||
| 1168 | if (err) | ||
| 1169 | return err; | ||
| 1170 | |||
| 909 | err = sysdev_class_register(&mce_sysclass); | 1171 | err = sysdev_class_register(&mce_sysclass); |
| 910 | if (err) | 1172 | if (err) |
| 911 | return err; | 1173 | return err; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index f2ee0ae29bd6..56dde9c4bc96 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
| @@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = { | |||
| 67 | struct threshold_bank { | 67 | struct threshold_bank { |
| 68 | struct kobject *kobj; | 68 | struct kobject *kobj; |
| 69 | struct threshold_block *blocks; | 69 | struct threshold_block *blocks; |
| 70 | cpumask_t cpus; | 70 | cpumask_var_t cpus; |
| 71 | }; | 71 | }; |
| 72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); | 72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); |
| 73 | 73 | ||
| @@ -79,6 +79,8 @@ static unsigned char shared_bank[NR_BANKS] = { | |||
| 79 | 79 | ||
| 80 | static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ | 80 | static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ |
| 81 | 81 | ||
| 82 | static void amd_threshold_interrupt(void); | ||
| 83 | |||
| 82 | /* | 84 | /* |
| 83 | * CPU Initialization | 85 | * CPU Initialization |
| 84 | */ | 86 | */ |
| @@ -90,7 +92,8 @@ struct thresh_restart { | |||
| 90 | }; | 92 | }; |
| 91 | 93 | ||
| 92 | /* must be called with correct cpu affinity */ | 94 | /* must be called with correct cpu affinity */ |
| 93 | static long threshold_restart_bank(void *_tr) | 95 | /* Called via smp_call_function_single() */ |
| 96 | static void threshold_restart_bank(void *_tr) | ||
| 94 | { | 97 | { |
| 95 | struct thresh_restart *tr = _tr; | 98 | struct thresh_restart *tr = _tr; |
| 96 | u32 mci_misc_hi, mci_misc_lo; | 99 | u32 mci_misc_hi, mci_misc_lo; |
| @@ -117,7 +120,6 @@ static long threshold_restart_bank(void *_tr) | |||
| 117 | 120 | ||
| 118 | mci_misc_hi |= MASK_COUNT_EN_HI; | 121 | mci_misc_hi |= MASK_COUNT_EN_HI; |
| 119 | wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi); | 122 | wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi); |
| 120 | return 0; | ||
| 121 | } | 123 | } |
| 122 | 124 | ||
| 123 | /* cpu init entry point, called from mce.c with preempt off */ | 125 | /* cpu init entry point, called from mce.c with preempt off */ |
| @@ -174,6 +176,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
| 174 | tr.reset = 0; | 176 | tr.reset = 0; |
| 175 | tr.old_limit = 0; | 177 | tr.old_limit = 0; |
| 176 | threshold_restart_bank(&tr); | 178 | threshold_restart_bank(&tr); |
| 179 | |||
| 180 | mce_threshold_vector = amd_threshold_interrupt; | ||
| 177 | } | 181 | } |
| 178 | } | 182 | } |
| 179 | } | 183 | } |
| @@ -187,19 +191,13 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
| 187 | * the interrupt goes off when error_count reaches threshold_limit. | 191 | * the interrupt goes off when error_count reaches threshold_limit. |
| 188 | * the handler will simply log mcelog w/ software defined bank number. | 192 | * the handler will simply log mcelog w/ software defined bank number. |
| 189 | */ | 193 | */ |
| 190 | asmlinkage void mce_threshold_interrupt(void) | 194 | static void amd_threshold_interrupt(void) |
| 191 | { | 195 | { |
| 192 | unsigned int bank, block; | 196 | unsigned int bank, block; |
| 193 | struct mce m; | 197 | struct mce m; |
| 194 | u32 low = 0, high = 0, address = 0; | 198 | u32 low = 0, high = 0, address = 0; |
| 195 | 199 | ||
| 196 | ack_APIC_irq(); | 200 | mce_setup(&m); |
| 197 | exit_idle(); | ||
| 198 | irq_enter(); | ||
| 199 | |||
| 200 | memset(&m, 0, sizeof(m)); | ||
| 201 | rdtscll(m.tsc); | ||
| 202 | m.cpu = smp_processor_id(); | ||
| 203 | 201 | ||
| 204 | /* assume first bank caused it */ | 202 | /* assume first bank caused it */ |
| 205 | for (bank = 0; bank < NR_BANKS; ++bank) { | 203 | for (bank = 0; bank < NR_BANKS; ++bank) { |
| @@ -233,7 +231,8 @@ asmlinkage void mce_threshold_interrupt(void) | |||
| 233 | 231 | ||
| 234 | /* Log the machine check that caused the threshold | 232 | /* Log the machine check that caused the threshold |
| 235 | event. */ | 233 | event. */ |
| 236 | do_machine_check(NULL, 0); | 234 | machine_check_poll(MCP_TIMESTAMP, |
| 235 | &__get_cpu_var(mce_poll_banks)); | ||
| 237 | 236 | ||
| 238 | if (high & MASK_OVERFLOW_HI) { | 237 | if (high & MASK_OVERFLOW_HI) { |
| 239 | rdmsrl(address, m.misc); | 238 | rdmsrl(address, m.misc); |
| @@ -243,13 +242,10 @@ asmlinkage void mce_threshold_interrupt(void) | |||
| 243 | + bank * NR_BLOCKS | 242 | + bank * NR_BLOCKS |
| 244 | + block; | 243 | + block; |
| 245 | mce_log(&m); | 244 | mce_log(&m); |
| 246 | goto out; | 245 | return; |
| 247 | } | 246 | } |
| 248 | } | 247 | } |
| 249 | } | 248 | } |
| 250 | out: | ||
| 251 | inc_irq_stat(irq_threshold_count); | ||
| 252 | irq_exit(); | ||
| 253 | } | 249 | } |
| 254 | 250 | ||
| 255 | /* | 251 | /* |
| @@ -283,7 +279,7 @@ static ssize_t store_interrupt_enable(struct threshold_block *b, | |||
| 283 | tr.b = b; | 279 | tr.b = b; |
| 284 | tr.reset = 0; | 280 | tr.reset = 0; |
| 285 | tr.old_limit = 0; | 281 | tr.old_limit = 0; |
| 286 | work_on_cpu(b->cpu, threshold_restart_bank, &tr); | 282 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); |
| 287 | 283 | ||
| 288 | return end - buf; | 284 | return end - buf; |
| 289 | } | 285 | } |
| @@ -305,23 +301,32 @@ static ssize_t store_threshold_limit(struct threshold_block *b, | |||
| 305 | tr.b = b; | 301 | tr.b = b; |
| 306 | tr.reset = 0; | 302 | tr.reset = 0; |
| 307 | 303 | ||
| 308 | work_on_cpu(b->cpu, threshold_restart_bank, &tr); | 304 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); |
| 309 | 305 | ||
| 310 | return end - buf; | 306 | return end - buf; |
| 311 | } | 307 | } |
| 312 | 308 | ||
| 313 | static long local_error_count(void *_b) | 309 | struct threshold_block_cross_cpu { |
| 310 | struct threshold_block *tb; | ||
| 311 | long retval; | ||
| 312 | }; | ||
| 313 | |||
| 314 | static void local_error_count_handler(void *_tbcc) | ||
| 314 | { | 315 | { |
| 315 | struct threshold_block *b = _b; | 316 | struct threshold_block_cross_cpu *tbcc = _tbcc; |
| 317 | struct threshold_block *b = tbcc->tb; | ||
| 316 | u32 low, high; | 318 | u32 low, high; |
| 317 | 319 | ||
| 318 | rdmsr(b->address, low, high); | 320 | rdmsr(b->address, low, high); |
| 319 | return (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit); | 321 | tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit); |
| 320 | } | 322 | } |
| 321 | 323 | ||
| 322 | static ssize_t show_error_count(struct threshold_block *b, char *buf) | 324 | static ssize_t show_error_count(struct threshold_block *b, char *buf) |
| 323 | { | 325 | { |
| 324 | return sprintf(buf, "%lx\n", work_on_cpu(b->cpu, local_error_count, b)); | 326 | struct threshold_block_cross_cpu tbcc = { .tb = b, }; |
| 327 | |||
| 328 | smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1); | ||
| 329 | return sprintf(buf, "%lx\n", tbcc.retval); | ||
| 325 | } | 330 | } |
| 326 | 331 | ||
| 327 | static ssize_t store_error_count(struct threshold_block *b, | 332 | static ssize_t store_error_count(struct threshold_block *b, |
| @@ -329,7 +334,7 @@ static ssize_t store_error_count(struct threshold_block *b, | |||
| 329 | { | 334 | { |
| 330 | struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 }; | 335 | struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 }; |
| 331 | 336 | ||
| 332 | work_on_cpu(b->cpu, threshold_restart_bank, &tr); | 337 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); |
| 333 | return 1; | 338 | return 1; |
| 334 | } | 339 | } |
| 335 | 340 | ||
| @@ -398,7 +403,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, | |||
| 398 | if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) | 403 | if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) |
| 399 | return 0; | 404 | return 0; |
| 400 | 405 | ||
| 401 | if (rdmsr_safe(address, &low, &high)) | 406 | if (rdmsr_safe_on_cpu(cpu, address, &low, &high)) |
| 402 | return 0; | 407 | return 0; |
| 403 | 408 | ||
| 404 | if (!(high & MASK_VALID_HI)) { | 409 | if (!(high & MASK_VALID_HI)) { |
| @@ -462,12 +467,11 @@ out_free: | |||
| 462 | return err; | 467 | return err; |
| 463 | } | 468 | } |
| 464 | 469 | ||
| 465 | static __cpuinit long local_allocate_threshold_blocks(void *_bank) | 470 | static __cpuinit long |
| 471 | local_allocate_threshold_blocks(int cpu, unsigned int bank) | ||
| 466 | { | 472 | { |
| 467 | unsigned int *bank = _bank; | 473 | return allocate_threshold_blocks(cpu, bank, 0, |
| 468 | 474 | MSR_IA32_MC0_MISC + bank * 4); | |
| 469 | return allocate_threshold_blocks(smp_processor_id(), *bank, 0, | ||
| 470 | MSR_IA32_MC0_MISC + *bank * 4); | ||
| 471 | } | 475 | } |
| 472 | 476 | ||
| 473 | /* symlinks sibling shared banks to first core. first core owns dir/files. */ | 477 | /* symlinks sibling shared banks to first core. first core owns dir/files. */ |
| @@ -481,7 +485,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 481 | 485 | ||
| 482 | #ifdef CONFIG_SMP | 486 | #ifdef CONFIG_SMP |
| 483 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 487 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
| 484 | i = first_cpu(per_cpu(cpu_core_map, cpu)); | 488 | i = cpumask_first(cpu_core_mask(cpu)); |
| 485 | 489 | ||
| 486 | /* first core not up yet */ | 490 | /* first core not up yet */ |
| 487 | if (cpu_data(i).cpu_core_id) | 491 | if (cpu_data(i).cpu_core_id) |
| @@ -501,7 +505,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 501 | if (err) | 505 | if (err) |
| 502 | goto out; | 506 | goto out; |
| 503 | 507 | ||
| 504 | b->cpus = per_cpu(cpu_core_map, cpu); | 508 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); |
| 505 | per_cpu(threshold_banks, cpu)[bank] = b; | 509 | per_cpu(threshold_banks, cpu)[bank] = b; |
| 506 | goto out; | 510 | goto out; |
| 507 | } | 511 | } |
| @@ -512,24 +516,29 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 512 | err = -ENOMEM; | 516 | err = -ENOMEM; |
| 513 | goto out; | 517 | goto out; |
| 514 | } | 518 | } |
| 519 | if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { | ||
| 520 | kfree(b); | ||
| 521 | err = -ENOMEM; | ||
| 522 | goto out; | ||
| 523 | } | ||
| 515 | 524 | ||
| 516 | b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); | 525 | b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); |
| 517 | if (!b->kobj) | 526 | if (!b->kobj) |
| 518 | goto out_free; | 527 | goto out_free; |
| 519 | 528 | ||
| 520 | #ifndef CONFIG_SMP | 529 | #ifndef CONFIG_SMP |
| 521 | b->cpus = CPU_MASK_ALL; | 530 | cpumask_setall(b->cpus); |
| 522 | #else | 531 | #else |
| 523 | b->cpus = per_cpu(cpu_core_map, cpu); | 532 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); |
| 524 | #endif | 533 | #endif |
| 525 | 534 | ||
| 526 | per_cpu(threshold_banks, cpu)[bank] = b; | 535 | per_cpu(threshold_banks, cpu)[bank] = b; |
| 527 | 536 | ||
| 528 | err = work_on_cpu(cpu, local_allocate_threshold_blocks, &bank); | 537 | err = local_allocate_threshold_blocks(cpu, bank); |
| 529 | if (err) | 538 | if (err) |
| 530 | goto out_free; | 539 | goto out_free; |
| 531 | 540 | ||
| 532 | for_each_cpu_mask_nr(i, b->cpus) { | 541 | for_each_cpu(i, b->cpus) { |
| 533 | if (i == cpu) | 542 | if (i == cpu) |
| 534 | continue; | 543 | continue; |
| 535 | 544 | ||
| @@ -545,6 +554,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
| 545 | 554 | ||
| 546 | out_free: | 555 | out_free: |
| 547 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 556 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
| 557 | free_cpumask_var(b->cpus); | ||
| 548 | kfree(b); | 558 | kfree(b); |
| 549 | out: | 559 | out: |
| 550 | return err; | 560 | return err; |
| @@ -619,7 +629,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
| 619 | #endif | 629 | #endif |
| 620 | 630 | ||
| 621 | /* remove all sibling symlinks before unregistering */ | 631 | /* remove all sibling symlinks before unregistering */ |
| 622 | for_each_cpu_mask_nr(i, b->cpus) { | 632 | for_each_cpu(i, b->cpus) { |
| 623 | if (i == cpu) | 633 | if (i == cpu) |
| 624 | continue; | 634 | continue; |
| 625 | 635 | ||
| @@ -632,6 +642,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
| 632 | free_out: | 642 | free_out: |
| 633 | kobject_del(b->kobj); | 643 | kobject_del(b->kobj); |
| 634 | kobject_put(b->kobj); | 644 | kobject_put(b->kobj); |
| 645 | free_cpumask_var(b->cpus); | ||
| 635 | kfree(b); | 646 | kfree(b); |
| 636 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 647 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
| 637 | } | 648 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index f44c36624360..d6b72df89d69 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
| @@ -1,17 +1,21 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Intel specific MCE features. | 2 | * Intel specific MCE features. |
| 3 | * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> | 3 | * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> |
| 4 | * Copyright (C) 2008, 2009 Intel Corporation | ||
| 5 | * Author: Andi Kleen | ||
| 4 | */ | 6 | */ |
| 5 | 7 | ||
| 6 | #include <linux/init.h> | 8 | #include <linux/init.h> |
| 7 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
| 8 | #include <linux/percpu.h> | 10 | #include <linux/percpu.h> |
| 9 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
| 12 | #include <asm/apic.h> | ||
| 10 | #include <asm/msr.h> | 13 | #include <asm/msr.h> |
| 11 | #include <asm/mce.h> | 14 | #include <asm/mce.h> |
| 12 | #include <asm/hw_irq.h> | 15 | #include <asm/hw_irq.h> |
| 13 | #include <asm/idle.h> | 16 | #include <asm/idle.h> |
| 14 | #include <asm/therm_throt.h> | 17 | #include <asm/therm_throt.h> |
| 18 | #include <asm/apic.h> | ||
| 15 | 19 | ||
| 16 | asmlinkage void smp_thermal_interrupt(void) | 20 | asmlinkage void smp_thermal_interrupt(void) |
| 17 | { | 21 | { |
| @@ -24,7 +28,7 @@ asmlinkage void smp_thermal_interrupt(void) | |||
| 24 | 28 | ||
| 25 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | 29 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
| 26 | if (therm_throt_process(msr_val & 1)) | 30 | if (therm_throt_process(msr_val & 1)) |
| 27 | mce_log_therm_throt_event(smp_processor_id(), msr_val); | 31 | mce_log_therm_throt_event(msr_val); |
| 28 | 32 | ||
| 29 | inc_irq_stat(irq_thermal_count); | 33 | inc_irq_stat(irq_thermal_count); |
| 30 | irq_exit(); | 34 | irq_exit(); |
| @@ -48,13 +52,13 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 48 | */ | 52 | */ |
| 49 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 53 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 50 | h = apic_read(APIC_LVTTHMR); | 54 | h = apic_read(APIC_LVTTHMR); |
| 51 | if ((l & (1 << 3)) && (h & APIC_DM_SMI)) { | 55 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
| 52 | printk(KERN_DEBUG | 56 | printk(KERN_DEBUG |
| 53 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); | 57 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); |
| 54 | return; | 58 | return; |
| 55 | } | 59 | } |
| 56 | 60 | ||
| 57 | if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) | 61 | if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2)) |
| 58 | tm2 = 1; | 62 | tm2 = 1; |
| 59 | 63 | ||
| 60 | if (h & APIC_VECTOR_MASK) { | 64 | if (h & APIC_VECTOR_MASK) { |
| @@ -72,7 +76,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 72 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); | 76 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); |
| 73 | 77 | ||
| 74 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 78 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 75 | wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h); | 79 | wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); |
| 76 | 80 | ||
| 77 | l = apic_read(APIC_LVTTHMR); | 81 | l = apic_read(APIC_LVTTHMR); |
| 78 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | 82 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); |
| @@ -84,7 +88,209 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 84 | return; | 88 | return; |
| 85 | } | 89 | } |
| 86 | 90 | ||
| 91 | /* | ||
| 92 | * Support for Intel Correct Machine Check Interrupts. This allows | ||
| 93 | * the CPU to raise an interrupt when a corrected machine check happened. | ||
| 94 | * Normally we pick those up using a regular polling timer. | ||
| 95 | * Also supports reliable discovery of shared banks. | ||
| 96 | */ | ||
| 97 | |||
| 98 | static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); | ||
| 99 | |||
| 100 | /* | ||
| 101 | * cmci_discover_lock protects against parallel discovery attempts | ||
| 102 | * which could race against each other. | ||
| 103 | */ | ||
| 104 | static DEFINE_SPINLOCK(cmci_discover_lock); | ||
| 105 | |||
| 106 | #define CMCI_THRESHOLD 1 | ||
| 107 | |||
| 108 | static int cmci_supported(int *banks) | ||
| 109 | { | ||
| 110 | u64 cap; | ||
| 111 | |||
| 112 | /* | ||
| 113 | * Vendor check is not strictly needed, but the initial | ||
| 114 | * initialization is vendor keyed and this | ||
| 115 | * makes sure none of the backdoors are entered otherwise. | ||
| 116 | */ | ||
| 117 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | ||
| 118 | return 0; | ||
| 119 | if (!cpu_has_apic || lapic_get_maxlvt() < 6) | ||
| 120 | return 0; | ||
| 121 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
| 122 | *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff); | ||
| 123 | return !!(cap & MCG_CMCI_P); | ||
| 124 | } | ||
| 125 | |||
| 126 | /* | ||
| 127 | * The interrupt handler. This is called on every event. | ||
| 128 | * Just call the poller directly to log any events. | ||
| 129 | * This could in theory increase the threshold under high load, | ||
| 130 | * but doesn't for now. | ||
| 131 | */ | ||
| 132 | static void intel_threshold_interrupt(void) | ||
| 133 | { | ||
| 134 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | ||
| 135 | mce_notify_user(); | ||
| 136 | } | ||
| 137 | |||
| 138 | static void print_update(char *type, int *hdr, int num) | ||
| 139 | { | ||
| 140 | if (*hdr == 0) | ||
| 141 | printk(KERN_INFO "CPU %d MCA banks", smp_processor_id()); | ||
| 142 | *hdr = 1; | ||
| 143 | printk(KERN_CONT " %s:%d", type, num); | ||
| 144 | } | ||
| 145 | |||
| 146 | /* | ||
| 147 | * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks | ||
| 148 | * on this CPU. Use the algorithm recommended in the SDM to discover shared | ||
| 149 | * banks. | ||
| 150 | */ | ||
| 151 | static void cmci_discover(int banks, int boot) | ||
| 152 | { | ||
| 153 | unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); | ||
| 154 | int hdr = 0; | ||
| 155 | int i; | ||
| 156 | |||
| 157 | spin_lock(&cmci_discover_lock); | ||
| 158 | for (i = 0; i < banks; i++) { | ||
| 159 | u64 val; | ||
| 160 | |||
| 161 | if (test_bit(i, owned)) | ||
| 162 | continue; | ||
| 163 | |||
| 164 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
| 165 | |||
| 166 | /* Already owned by someone else? */ | ||
| 167 | if (val & CMCI_EN) { | ||
| 168 | if (test_and_clear_bit(i, owned) || boot) | ||
| 169 | print_update("SHD", &hdr, i); | ||
| 170 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | ||
| 171 | continue; | ||
| 172 | } | ||
| 173 | |||
| 174 | val |= CMCI_EN | CMCI_THRESHOLD; | ||
| 175 | wrmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
| 176 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
| 177 | |||
| 178 | /* Did the enable bit stick? -- the bank supports CMCI */ | ||
| 179 | if (val & CMCI_EN) { | ||
| 180 | if (!test_and_set_bit(i, owned) || boot) | ||
| 181 | print_update("CMCI", &hdr, i); | ||
| 182 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | ||
| 183 | } else { | ||
| 184 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | ||
| 185 | } | ||
| 186 | } | ||
| 187 | spin_unlock(&cmci_discover_lock); | ||
| 188 | if (hdr) | ||
| 189 | printk(KERN_CONT "\n"); | ||
| 190 | } | ||
| 191 | |||
| 192 | /* | ||
| 193 | * Just in case we missed an event during initialization check | ||
| 194 | * all the CMCI owned banks. | ||
| 195 | */ | ||
| 196 | void cmci_recheck(void) | ||
| 197 | { | ||
| 198 | unsigned long flags; | ||
| 199 | int banks; | ||
| 200 | |||
| 201 | if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks)) | ||
| 202 | return; | ||
| 203 | local_irq_save(flags); | ||
| 204 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | ||
| 205 | local_irq_restore(flags); | ||
| 206 | } | ||
| 207 | |||
| 208 | /* | ||
| 209 | * Disable CMCI on this CPU for all banks it owns when it goes down. | ||
| 210 | * This allows other CPUs to claim the banks on rediscovery. | ||
| 211 | */ | ||
| 212 | void cmci_clear(void) | ||
| 213 | { | ||
| 214 | int i; | ||
| 215 | int banks; | ||
| 216 | u64 val; | ||
| 217 | |||
| 218 | if (!cmci_supported(&banks)) | ||
| 219 | return; | ||
| 220 | spin_lock(&cmci_discover_lock); | ||
| 221 | for (i = 0; i < banks; i++) { | ||
| 222 | if (!test_bit(i, __get_cpu_var(mce_banks_owned))) | ||
| 223 | continue; | ||
| 224 | /* Disable CMCI */ | ||
| 225 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
| 226 | val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK); | ||
| 227 | wrmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
| 228 | __clear_bit(i, __get_cpu_var(mce_banks_owned)); | ||
| 229 | } | ||
| 230 | spin_unlock(&cmci_discover_lock); | ||
| 231 | } | ||
| 232 | |||
| 233 | /* | ||
| 234 | * After a CPU went down cycle through all the others and rediscover | ||
| 235 | * Must run in process context. | ||
| 236 | */ | ||
| 237 | void cmci_rediscover(int dying) | ||
| 238 | { | ||
| 239 | int banks; | ||
| 240 | int cpu; | ||
| 241 | cpumask_var_t old; | ||
| 242 | |||
| 243 | if (!cmci_supported(&banks)) | ||
| 244 | return; | ||
| 245 | if (!alloc_cpumask_var(&old, GFP_KERNEL)) | ||
| 246 | return; | ||
| 247 | cpumask_copy(old, ¤t->cpus_allowed); | ||
| 248 | |||
| 249 | for_each_online_cpu (cpu) { | ||
| 250 | if (cpu == dying) | ||
| 251 | continue; | ||
| 252 | if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) | ||
| 253 | continue; | ||
| 254 | /* Recheck banks in case CPUs don't all have the same */ | ||
| 255 | if (cmci_supported(&banks)) | ||
| 256 | cmci_discover(banks, 0); | ||
| 257 | } | ||
| 258 | |||
| 259 | set_cpus_allowed_ptr(current, old); | ||
| 260 | free_cpumask_var(old); | ||
| 261 | } | ||
| 262 | |||
| 263 | /* | ||
| 264 | * Reenable CMCI on this CPU in case a CPU down failed. | ||
| 265 | */ | ||
| 266 | void cmci_reenable(void) | ||
| 267 | { | ||
| 268 | int banks; | ||
| 269 | if (cmci_supported(&banks)) | ||
| 270 | cmci_discover(banks, 0); | ||
| 271 | } | ||
| 272 | |||
| 273 | static void intel_init_cmci(void) | ||
| 274 | { | ||
| 275 | int banks; | ||
| 276 | |||
| 277 | if (!cmci_supported(&banks)) | ||
| 278 | return; | ||
| 279 | |||
| 280 | mce_threshold_vector = intel_threshold_interrupt; | ||
| 281 | cmci_discover(banks, 1); | ||
| 282 | /* | ||
| 283 | * For CPU #0 this runs with still disabled APIC, but that's | ||
| 284 | * ok because only the vector is set up. We still do another | ||
| 285 | * check for the banks later for CPU #0 just to make sure | ||
| 286 | * to not miss any events. | ||
| 287 | */ | ||
| 288 | apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED); | ||
| 289 | cmci_recheck(); | ||
| 290 | } | ||
| 291 | |||
| 87 | void mce_intel_feature_init(struct cpuinfo_x86 *c) | 292 | void mce_intel_feature_init(struct cpuinfo_x86 *c) |
| 88 | { | 293 | { |
| 89 | intel_init_thermal(c); | 294 | intel_init_thermal(c); |
| 295 | intel_init_cmci(); | ||
| 90 | } | 296 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c index 9b60fce09f75..f53bdcbaf382 100644 --- a/arch/x86/kernel/cpu/mcheck/p4.c +++ b/arch/x86/kernel/cpu/mcheck/p4.c | |||
| @@ -85,7 +85,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 85 | */ | 85 | */ |
| 86 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 86 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 87 | h = apic_read(APIC_LVTTHMR); | 87 | h = apic_read(APIC_LVTTHMR); |
| 88 | if ((l & (1<<3)) && (h & APIC_DM_SMI)) { | 88 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
| 89 | printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", | 89 | printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", |
| 90 | cpu); | 90 | cpu); |
| 91 | return; /* -EBUSY */ | 91 | return; /* -EBUSY */ |
| @@ -111,7 +111,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
| 111 | vendor_thermal_interrupt = intel_thermal_interrupt; | 111 | vendor_thermal_interrupt = intel_thermal_interrupt; |
| 112 | 112 | ||
| 113 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 113 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
| 114 | wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h); | 114 | wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); |
| 115 | 115 | ||
| 116 | l = apic_read(APIC_LVTTHMR); | 116 | l = apic_read(APIC_LVTTHMR); |
| 117 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | 117 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); |
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c new file mode 100644 index 000000000000..23ee9e730f78 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/threshold.c | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | /* | ||
| 2 | * Common corrected MCE threshold handler code: | ||
| 3 | */ | ||
| 4 | #include <linux/interrupt.h> | ||
| 5 | #include <linux/kernel.h> | ||
| 6 | |||
| 7 | #include <asm/irq_vectors.h> | ||
| 8 | #include <asm/apic.h> | ||
| 9 | #include <asm/idle.h> | ||
| 10 | #include <asm/mce.h> | ||
| 11 | |||
| 12 | static void default_threshold_interrupt(void) | ||
| 13 | { | ||
| 14 | printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n", | ||
| 15 | THRESHOLD_APIC_VECTOR); | ||
| 16 | } | ||
| 17 | |||
| 18 | void (*mce_threshold_vector)(void) = default_threshold_interrupt; | ||
| 19 | |||
| 20 | asmlinkage void mce_threshold_interrupt(void) | ||
| 21 | { | ||
| 22 | exit_idle(); | ||
| 23 | irq_enter(); | ||
| 24 | inc_irq_stat(irq_threshold_count); | ||
| 25 | mce_threshold_vector(); | ||
| 26 | irq_exit(); | ||
| 27 | /* Ack only at the end to avoid potential reentry */ | ||
| 28 | ack_APIC_irq(); | ||
| 29 | } | ||
diff --git a/arch/x86/kernel/cpu/mtrr/Makefile b/arch/x86/kernel/cpu/mtrr/Makefile index 191fc0533649..f4361b56f8e9 100644 --- a/arch/x86/kernel/cpu/mtrr/Makefile +++ b/arch/x86/kernel/cpu/mtrr/Makefile | |||
| @@ -1,3 +1,3 @@ | |||
| 1 | obj-y := main.o if.o generic.o state.o | 1 | obj-y := main.o if.o generic.o state.o cleanup.o |
| 2 | obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o | 2 | obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o |
| 3 | 3 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c new file mode 100644 index 000000000000..ce0fe4b5c04f --- /dev/null +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
| @@ -0,0 +1,1101 @@ | |||
| 1 | /* MTRR (Memory Type Range Register) cleanup | ||
| 2 | |||
| 3 | Copyright (C) 2009 Yinghai Lu | ||
| 4 | |||
| 5 | This library is free software; you can redistribute it and/or | ||
| 6 | modify it under the terms of the GNU Library General Public | ||
| 7 | License as published by the Free Software Foundation; either | ||
| 8 | version 2 of the License, or (at your option) any later version. | ||
| 9 | |||
| 10 | This library is distributed in the hope that it will be useful, | ||
| 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | Library General Public License for more details. | ||
| 14 | |||
| 15 | You should have received a copy of the GNU Library General Public | ||
| 16 | License along with this library; if not, write to the Free | ||
| 17 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 18 | */ | ||
| 19 | |||
| 20 | #include <linux/module.h> | ||
| 21 | #include <linux/init.h> | ||
| 22 | #include <linux/pci.h> | ||
| 23 | #include <linux/smp.h> | ||
| 24 | #include <linux/cpu.h> | ||
| 25 | #include <linux/mutex.h> | ||
| 26 | #include <linux/sort.h> | ||
| 27 | |||
| 28 | #include <asm/e820.h> | ||
| 29 | #include <asm/mtrr.h> | ||
| 30 | #include <asm/uaccess.h> | ||
| 31 | #include <asm/processor.h> | ||
| 32 | #include <asm/msr.h> | ||
| 33 | #include <asm/kvm_para.h> | ||
| 34 | #include "mtrr.h" | ||
| 35 | |||
| 36 | /* should be related to MTRR_VAR_RANGES nums */ | ||
| 37 | #define RANGE_NUM 256 | ||
| 38 | |||
| 39 | struct res_range { | ||
| 40 | unsigned long start; | ||
| 41 | unsigned long end; | ||
| 42 | }; | ||
| 43 | |||
| 44 | static int __init | ||
| 45 | add_range(struct res_range *range, int nr_range, unsigned long start, | ||
| 46 | unsigned long end) | ||
| 47 | { | ||
| 48 | /* out of slots */ | ||
| 49 | if (nr_range >= RANGE_NUM) | ||
| 50 | return nr_range; | ||
| 51 | |||
| 52 | range[nr_range].start = start; | ||
| 53 | range[nr_range].end = end; | ||
| 54 | |||
| 55 | nr_range++; | ||
| 56 | |||
| 57 | return nr_range; | ||
| 58 | } | ||
| 59 | |||
| 60 | static int __init | ||
| 61 | add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, | ||
| 62 | unsigned long end) | ||
| 63 | { | ||
| 64 | int i; | ||
| 65 | |||
| 66 | /* try to merge it with old one */ | ||
| 67 | for (i = 0; i < nr_range; i++) { | ||
| 68 | unsigned long final_start, final_end; | ||
| 69 | unsigned long common_start, common_end; | ||
| 70 | |||
| 71 | if (!range[i].end) | ||
| 72 | continue; | ||
| 73 | |||
| 74 | common_start = max(range[i].start, start); | ||
| 75 | common_end = min(range[i].end, end); | ||
| 76 | if (common_start > common_end + 1) | ||
| 77 | continue; | ||
| 78 | |||
| 79 | final_start = min(range[i].start, start); | ||
| 80 | final_end = max(range[i].end, end); | ||
| 81 | |||
| 82 | range[i].start = final_start; | ||
| 83 | range[i].end = final_end; | ||
| 84 | return nr_range; | ||
| 85 | } | ||
| 86 | |||
| 87 | /* need to add that */ | ||
| 88 | return add_range(range, nr_range, start, end); | ||
| 89 | } | ||
| 90 | |||
| 91 | static void __init | ||
| 92 | subtract_range(struct res_range *range, unsigned long start, unsigned long end) | ||
| 93 | { | ||
| 94 | int i, j; | ||
| 95 | |||
| 96 | for (j = 0; j < RANGE_NUM; j++) { | ||
| 97 | if (!range[j].end) | ||
| 98 | continue; | ||
| 99 | |||
| 100 | if (start <= range[j].start && end >= range[j].end) { | ||
| 101 | range[j].start = 0; | ||
| 102 | range[j].end = 0; | ||
| 103 | continue; | ||
| 104 | } | ||
| 105 | |||
| 106 | if (start <= range[j].start && end < range[j].end && | ||
| 107 | range[j].start < end + 1) { | ||
| 108 | range[j].start = end + 1; | ||
| 109 | continue; | ||
| 110 | } | ||
| 111 | |||
| 112 | |||
| 113 | if (start > range[j].start && end >= range[j].end && | ||
| 114 | range[j].end > start - 1) { | ||
| 115 | range[j].end = start - 1; | ||
| 116 | continue; | ||
| 117 | } | ||
| 118 | |||
| 119 | if (start > range[j].start && end < range[j].end) { | ||
| 120 | /* find the new spare */ | ||
| 121 | for (i = 0; i < RANGE_NUM; i++) { | ||
| 122 | if (range[i].end == 0) | ||
| 123 | break; | ||
| 124 | } | ||
| 125 | if (i < RANGE_NUM) { | ||
| 126 | range[i].end = range[j].end; | ||
| 127 | range[i].start = end + 1; | ||
| 128 | } else { | ||
| 129 | printk(KERN_ERR "run of slot in ranges\n"); | ||
| 130 | } | ||
| 131 | range[j].end = start - 1; | ||
| 132 | continue; | ||
| 133 | } | ||
| 134 | } | ||
| 135 | } | ||
| 136 | |||
| 137 | static int __init cmp_range(const void *x1, const void *x2) | ||
| 138 | { | ||
| 139 | const struct res_range *r1 = x1; | ||
| 140 | const struct res_range *r2 = x2; | ||
| 141 | long start1, start2; | ||
| 142 | |||
| 143 | start1 = r1->start; | ||
| 144 | start2 = r2->start; | ||
| 145 | |||
| 146 | return start1 - start2; | ||
| 147 | } | ||
| 148 | |||
| 149 | struct var_mtrr_range_state { | ||
| 150 | unsigned long base_pfn; | ||
| 151 | unsigned long size_pfn; | ||
| 152 | mtrr_type type; | ||
| 153 | }; | ||
| 154 | |||
| 155 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | ||
| 156 | static int __initdata debug_print; | ||
| 157 | |||
| 158 | static int __init | ||
| 159 | x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | ||
| 160 | unsigned long extra_remove_base, | ||
| 161 | unsigned long extra_remove_size) | ||
| 162 | { | ||
| 163 | unsigned long base, size; | ||
| 164 | mtrr_type type; | ||
| 165 | int i; | ||
| 166 | |||
| 167 | for (i = 0; i < num_var_ranges; i++) { | ||
| 168 | type = range_state[i].type; | ||
| 169 | if (type != MTRR_TYPE_WRBACK) | ||
| 170 | continue; | ||
| 171 | base = range_state[i].base_pfn; | ||
| 172 | size = range_state[i].size_pfn; | ||
| 173 | nr_range = add_range_with_merge(range, nr_range, base, | ||
| 174 | base + size - 1); | ||
| 175 | } | ||
| 176 | if (debug_print) { | ||
| 177 | printk(KERN_DEBUG "After WB checking\n"); | ||
| 178 | for (i = 0; i < nr_range; i++) | ||
| 179 | printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", | ||
| 180 | range[i].start, range[i].end + 1); | ||
| 181 | } | ||
| 182 | |||
| 183 | /* take out UC ranges */ | ||
| 184 | for (i = 0; i < num_var_ranges; i++) { | ||
| 185 | type = range_state[i].type; | ||
| 186 | if (type != MTRR_TYPE_UNCACHABLE && | ||
| 187 | type != MTRR_TYPE_WRPROT) | ||
| 188 | continue; | ||
| 189 | size = range_state[i].size_pfn; | ||
| 190 | if (!size) | ||
| 191 | continue; | ||
| 192 | base = range_state[i].base_pfn; | ||
| 193 | if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && | ||
| 194 | (mtrr_state.enabled & 1)) { | ||
| 195 | /* Var MTRR contains UC entry below 1M? Skip it: */ | ||
| 196 | printk(KERN_WARNING "WARNING: BIOS bug: VAR MTRR %d " | ||
| 197 | "contains strange UC entry under 1M, check " | ||
| 198 | "with your system vendor!\n", i); | ||
| 199 | if (base + size <= (1<<(20-PAGE_SHIFT))) | ||
| 200 | continue; | ||
| 201 | size -= (1<<(20-PAGE_SHIFT)) - base; | ||
| 202 | base = 1<<(20-PAGE_SHIFT); | ||
| 203 | } | ||
| 204 | subtract_range(range, base, base + size - 1); | ||
| 205 | } | ||
| 206 | if (extra_remove_size) | ||
| 207 | subtract_range(range, extra_remove_base, | ||
| 208 | extra_remove_base + extra_remove_size - 1); | ||
| 209 | |||
| 210 | /* get new range num */ | ||
| 211 | nr_range = 0; | ||
| 212 | for (i = 0; i < RANGE_NUM; i++) { | ||
| 213 | if (!range[i].end) | ||
| 214 | continue; | ||
| 215 | nr_range++; | ||
| 216 | } | ||
| 217 | if (debug_print) { | ||
| 218 | printk(KERN_DEBUG "After UC checking\n"); | ||
| 219 | for (i = 0; i < nr_range; i++) | ||
| 220 | printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", | ||
| 221 | range[i].start, range[i].end + 1); | ||
| 222 | } | ||
| 223 | |||
| 224 | /* sort the ranges */ | ||
| 225 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | ||
| 226 | if (debug_print) { | ||
| 227 | printk(KERN_DEBUG "After sorting\n"); | ||
| 228 | for (i = 0; i < nr_range; i++) | ||
| 229 | printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", | ||
| 230 | range[i].start, range[i].end + 1); | ||
| 231 | } | ||
| 232 | |||
| 233 | /* clear those is not used */ | ||
| 234 | for (i = nr_range; i < RANGE_NUM; i++) | ||
| 235 | memset(&range[i], 0, sizeof(range[i])); | ||
| 236 | |||
| 237 | return nr_range; | ||
| 238 | } | ||
| 239 | |||
| 240 | static struct res_range __initdata range[RANGE_NUM]; | ||
| 241 | static int __initdata nr_range; | ||
| 242 | |||
| 243 | #ifdef CONFIG_MTRR_SANITIZER | ||
| 244 | |||
| 245 | static unsigned long __init sum_ranges(struct res_range *range, int nr_range) | ||
| 246 | { | ||
| 247 | unsigned long sum; | ||
| 248 | int i; | ||
| 249 | |||
| 250 | sum = 0; | ||
| 251 | for (i = 0; i < nr_range; i++) | ||
| 252 | sum += range[i].end + 1 - range[i].start; | ||
| 253 | |||
| 254 | return sum; | ||
| 255 | } | ||
| 256 | |||
| 257 | static int enable_mtrr_cleanup __initdata = | ||
| 258 | CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT; | ||
| 259 | |||
| 260 | static int __init disable_mtrr_cleanup_setup(char *str) | ||
| 261 | { | ||
| 262 | enable_mtrr_cleanup = 0; | ||
| 263 | return 0; | ||
| 264 | } | ||
| 265 | early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup); | ||
| 266 | |||
| 267 | static int __init enable_mtrr_cleanup_setup(char *str) | ||
| 268 | { | ||
| 269 | enable_mtrr_cleanup = 1; | ||
| 270 | return 0; | ||
| 271 | } | ||
| 272 | early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); | ||
| 273 | |||
| 274 | static int __init mtrr_cleanup_debug_setup(char *str) | ||
| 275 | { | ||
| 276 | debug_print = 1; | ||
| 277 | return 0; | ||
| 278 | } | ||
| 279 | early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); | ||
| 280 | |||
| 281 | struct var_mtrr_state { | ||
| 282 | unsigned long range_startk; | ||
| 283 | unsigned long range_sizek; | ||
| 284 | unsigned long chunk_sizek; | ||
| 285 | unsigned long gran_sizek; | ||
| 286 | unsigned int reg; | ||
| 287 | }; | ||
| 288 | |||
| 289 | static void __init | ||
| 290 | set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | ||
| 291 | unsigned char type, unsigned int address_bits) | ||
| 292 | { | ||
| 293 | u32 base_lo, base_hi, mask_lo, mask_hi; | ||
| 294 | u64 base, mask; | ||
| 295 | |||
| 296 | if (!sizek) { | ||
| 297 | fill_mtrr_var_range(reg, 0, 0, 0, 0); | ||
| 298 | return; | ||
| 299 | } | ||
| 300 | |||
| 301 | mask = (1ULL << address_bits) - 1; | ||
| 302 | mask &= ~((((u64)sizek) << 10) - 1); | ||
| 303 | |||
| 304 | base = ((u64)basek) << 10; | ||
| 305 | |||
| 306 | base |= type; | ||
| 307 | mask |= 0x800; | ||
| 308 | |||
| 309 | base_lo = base & ((1ULL<<32) - 1); | ||
| 310 | base_hi = base >> 32; | ||
| 311 | |||
| 312 | mask_lo = mask & ((1ULL<<32) - 1); | ||
| 313 | mask_hi = mask >> 32; | ||
| 314 | |||
| 315 | fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi); | ||
| 316 | } | ||
| 317 | |||
| 318 | static void __init | ||
| 319 | save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | ||
| 320 | unsigned char type) | ||
| 321 | { | ||
| 322 | range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); | ||
| 323 | range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); | ||
| 324 | range_state[reg].type = type; | ||
| 325 | } | ||
| 326 | |||
| 327 | static void __init | ||
| 328 | set_var_mtrr_all(unsigned int address_bits) | ||
| 329 | { | ||
| 330 | unsigned long basek, sizek; | ||
| 331 | unsigned char type; | ||
| 332 | unsigned int reg; | ||
| 333 | |||
| 334 | for (reg = 0; reg < num_var_ranges; reg++) { | ||
| 335 | basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10); | ||
| 336 | sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10); | ||
| 337 | type = range_state[reg].type; | ||
| 338 | |||
| 339 | set_var_mtrr(reg, basek, sizek, type, address_bits); | ||
| 340 | } | ||
| 341 | } | ||
| 342 | |||
| 343 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) | ||
| 344 | { | ||
| 345 | char factor; | ||
| 346 | unsigned long base = sizek; | ||
| 347 | |||
| 348 | if (base & ((1<<10) - 1)) { | ||
| 349 | /* not MB alignment */ | ||
| 350 | factor = 'K'; | ||
| 351 | } else if (base & ((1<<20) - 1)) { | ||
| 352 | factor = 'M'; | ||
| 353 | base >>= 10; | ||
| 354 | } else { | ||
| 355 | factor = 'G'; | ||
| 356 | base >>= 20; | ||
| 357 | } | ||
| 358 | |||
| 359 | *factorp = factor; | ||
| 360 | |||
| 361 | return base; | ||
| 362 | } | ||
| 363 | |||
| 364 | static unsigned int __init | ||
| 365 | range_to_mtrr(unsigned int reg, unsigned long range_startk, | ||
| 366 | unsigned long range_sizek, unsigned char type) | ||
| 367 | { | ||
| 368 | if (!range_sizek || (reg >= num_var_ranges)) | ||
| 369 | return reg; | ||
| 370 | |||
| 371 | while (range_sizek) { | ||
| 372 | unsigned long max_align, align; | ||
| 373 | unsigned long sizek; | ||
| 374 | |||
| 375 | /* Compute the maximum size I can make a range */ | ||
| 376 | if (range_startk) | ||
| 377 | max_align = ffs(range_startk) - 1; | ||
| 378 | else | ||
| 379 | max_align = 32; | ||
| 380 | align = fls(range_sizek) - 1; | ||
| 381 | if (align > max_align) | ||
| 382 | align = max_align; | ||
| 383 | |||
| 384 | sizek = 1 << align; | ||
| 385 | if (debug_print) { | ||
| 386 | char start_factor = 'K', size_factor = 'K'; | ||
| 387 | unsigned long start_base, size_base; | ||
| 388 | |||
| 389 | start_base = to_size_factor(range_startk, | ||
| 390 | &start_factor), | ||
| 391 | size_base = to_size_factor(sizek, &size_factor), | ||
| 392 | |||
| 393 | printk(KERN_DEBUG "Setting variable MTRR %d, " | ||
| 394 | "base: %ld%cB, range: %ld%cB, type %s\n", | ||
| 395 | reg, start_base, start_factor, | ||
| 396 | size_base, size_factor, | ||
| 397 | (type == MTRR_TYPE_UNCACHABLE) ? "UC" : | ||
| 398 | ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other") | ||
| 399 | ); | ||
| 400 | } | ||
| 401 | save_var_mtrr(reg++, range_startk, sizek, type); | ||
| 402 | range_startk += sizek; | ||
| 403 | range_sizek -= sizek; | ||
| 404 | if (reg >= num_var_ranges) | ||
| 405 | break; | ||
| 406 | } | ||
| 407 | return reg; | ||
| 408 | } | ||
| 409 | |||
| 410 | static unsigned __init | ||
| 411 | range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | ||
| 412 | unsigned long sizek) | ||
| 413 | { | ||
| 414 | unsigned long hole_basek, hole_sizek; | ||
| 415 | unsigned long second_basek, second_sizek; | ||
| 416 | unsigned long range0_basek, range0_sizek; | ||
| 417 | unsigned long range_basek, range_sizek; | ||
| 418 | unsigned long chunk_sizek; | ||
| 419 | unsigned long gran_sizek; | ||
| 420 | |||
| 421 | hole_basek = 0; | ||
| 422 | hole_sizek = 0; | ||
| 423 | second_basek = 0; | ||
| 424 | second_sizek = 0; | ||
| 425 | chunk_sizek = state->chunk_sizek; | ||
| 426 | gran_sizek = state->gran_sizek; | ||
| 427 | |||
| 428 | /* align with gran size, prevent small block used up MTRRs */ | ||
| 429 | range_basek = ALIGN(state->range_startk, gran_sizek); | ||
| 430 | if ((range_basek > basek) && basek) | ||
| 431 | return second_sizek; | ||
| 432 | state->range_sizek -= (range_basek - state->range_startk); | ||
| 433 | range_sizek = ALIGN(state->range_sizek, gran_sizek); | ||
| 434 | |||
| 435 | while (range_sizek > state->range_sizek) { | ||
| 436 | range_sizek -= gran_sizek; | ||
| 437 | if (!range_sizek) | ||
| 438 | return 0; | ||
| 439 | } | ||
| 440 | state->range_sizek = range_sizek; | ||
| 441 | |||
| 442 | /* try to append some small hole */ | ||
| 443 | range0_basek = state->range_startk; | ||
| 444 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); | ||
| 445 | |||
| 446 | /* no increase */ | ||
| 447 | if (range0_sizek == state->range_sizek) { | ||
| 448 | if (debug_print) | ||
| 449 | printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", | ||
| 450 | range0_basek<<10, | ||
| 451 | (range0_basek + state->range_sizek)<<10); | ||
| 452 | state->reg = range_to_mtrr(state->reg, range0_basek, | ||
| 453 | state->range_sizek, MTRR_TYPE_WRBACK); | ||
| 454 | return 0; | ||
| 455 | } | ||
| 456 | |||
| 457 | /* only cut back, when it is not the last */ | ||
| 458 | if (sizek) { | ||
| 459 | while (range0_basek + range0_sizek > (basek + sizek)) { | ||
| 460 | if (range0_sizek >= chunk_sizek) | ||
| 461 | range0_sizek -= chunk_sizek; | ||
| 462 | else | ||
| 463 | range0_sizek = 0; | ||
| 464 | |||
| 465 | if (!range0_sizek) | ||
| 466 | break; | ||
| 467 | } | ||
| 468 | } | ||
| 469 | |||
| 470 | second_try: | ||
| 471 | range_basek = range0_basek + range0_sizek; | ||
| 472 | |||
| 473 | /* one hole in the middle */ | ||
| 474 | if (range_basek > basek && range_basek <= (basek + sizek)) | ||
| 475 | second_sizek = range_basek - basek; | ||
| 476 | |||
| 477 | if (range0_sizek > state->range_sizek) { | ||
| 478 | |||
| 479 | /* one hole in middle or at end */ | ||
| 480 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; | ||
| 481 | |||
| 482 | /* hole size should be less than half of range0 size */ | ||
| 483 | if (hole_sizek >= (range0_sizek >> 1) && | ||
| 484 | range0_sizek >= chunk_sizek) { | ||
| 485 | range0_sizek -= chunk_sizek; | ||
| 486 | second_sizek = 0; | ||
| 487 | hole_sizek = 0; | ||
| 488 | |||
| 489 | goto second_try; | ||
| 490 | } | ||
| 491 | } | ||
| 492 | |||
| 493 | if (range0_sizek) { | ||
| 494 | if (debug_print) | ||
| 495 | printk(KERN_DEBUG "range0: %016lx - %016lx\n", | ||
| 496 | range0_basek<<10, | ||
| 497 | (range0_basek + range0_sizek)<<10); | ||
| 498 | state->reg = range_to_mtrr(state->reg, range0_basek, | ||
| 499 | range0_sizek, MTRR_TYPE_WRBACK); | ||
| 500 | } | ||
| 501 | |||
| 502 | if (range0_sizek < state->range_sizek) { | ||
| 503 | /* need to handle left over */ | ||
| 504 | range_sizek = state->range_sizek - range0_sizek; | ||
| 505 | |||
| 506 | if (debug_print) | ||
| 507 | printk(KERN_DEBUG "range: %016lx - %016lx\n", | ||
| 508 | range_basek<<10, | ||
| 509 | (range_basek + range_sizek)<<10); | ||
| 510 | state->reg = range_to_mtrr(state->reg, range_basek, | ||
| 511 | range_sizek, MTRR_TYPE_WRBACK); | ||
| 512 | } | ||
| 513 | |||
| 514 | if (hole_sizek) { | ||
| 515 | hole_basek = range_basek - hole_sizek - second_sizek; | ||
| 516 | if (debug_print) | ||
| 517 | printk(KERN_DEBUG "hole: %016lx - %016lx\n", | ||
| 518 | hole_basek<<10, | ||
| 519 | (hole_basek + hole_sizek)<<10); | ||
| 520 | state->reg = range_to_mtrr(state->reg, hole_basek, | ||
| 521 | hole_sizek, MTRR_TYPE_UNCACHABLE); | ||
| 522 | } | ||
| 523 | |||
| 524 | return second_sizek; | ||
| 525 | } | ||
| 526 | |||
| 527 | static void __init | ||
| 528 | set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn, | ||
| 529 | unsigned long size_pfn) | ||
| 530 | { | ||
| 531 | unsigned long basek, sizek; | ||
| 532 | unsigned long second_sizek = 0; | ||
| 533 | |||
| 534 | if (state->reg >= num_var_ranges) | ||
| 535 | return; | ||
| 536 | |||
| 537 | basek = base_pfn << (PAGE_SHIFT - 10); | ||
| 538 | sizek = size_pfn << (PAGE_SHIFT - 10); | ||
| 539 | |||
| 540 | /* See if I can merge with the last range */ | ||
| 541 | if ((basek <= 1024) || | ||
| 542 | (state->range_startk + state->range_sizek == basek)) { | ||
| 543 | unsigned long endk = basek + sizek; | ||
| 544 | state->range_sizek = endk - state->range_startk; | ||
| 545 | return; | ||
| 546 | } | ||
| 547 | /* Write the range mtrrs */ | ||
| 548 | if (state->range_sizek != 0) | ||
| 549 | second_sizek = range_to_mtrr_with_hole(state, basek, sizek); | ||
| 550 | |||
| 551 | /* Allocate an msr */ | ||
| 552 | state->range_startk = basek + second_sizek; | ||
| 553 | state->range_sizek = sizek - second_sizek; | ||
| 554 | } | ||
| 555 | |||
| 556 | /* mininum size of mtrr block that can take hole */ | ||
| 557 | static u64 mtrr_chunk_size __initdata = (256ULL<<20); | ||
| 558 | |||
| 559 | static int __init parse_mtrr_chunk_size_opt(char *p) | ||
| 560 | { | ||
| 561 | if (!p) | ||
| 562 | return -EINVAL; | ||
| 563 | mtrr_chunk_size = memparse(p, &p); | ||
| 564 | return 0; | ||
| 565 | } | ||
| 566 | early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); | ||
| 567 | |||
| 568 | /* granity of mtrr of block */ | ||
| 569 | static u64 mtrr_gran_size __initdata; | ||
| 570 | |||
| 571 | static int __init parse_mtrr_gran_size_opt(char *p) | ||
| 572 | { | ||
| 573 | if (!p) | ||
| 574 | return -EINVAL; | ||
| 575 | mtrr_gran_size = memparse(p, &p); | ||
| 576 | return 0; | ||
| 577 | } | ||
| 578 | early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); | ||
| 579 | |||
| 580 | static int nr_mtrr_spare_reg __initdata = | ||
| 581 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; | ||
| 582 | |||
| 583 | static int __init parse_mtrr_spare_reg(char *arg) | ||
| 584 | { | ||
| 585 | if (arg) | ||
| 586 | nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); | ||
| 587 | return 0; | ||
| 588 | } | ||
| 589 | |||
| 590 | early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); | ||
| 591 | |||
| 592 | static int __init | ||
| 593 | x86_setup_var_mtrrs(struct res_range *range, int nr_range, | ||
| 594 | u64 chunk_size, u64 gran_size) | ||
| 595 | { | ||
| 596 | struct var_mtrr_state var_state; | ||
| 597 | int i; | ||
| 598 | int num_reg; | ||
| 599 | |||
| 600 | var_state.range_startk = 0; | ||
| 601 | var_state.range_sizek = 0; | ||
| 602 | var_state.reg = 0; | ||
| 603 | var_state.chunk_sizek = chunk_size >> 10; | ||
| 604 | var_state.gran_sizek = gran_size >> 10; | ||
| 605 | |||
| 606 | memset(range_state, 0, sizeof(range_state)); | ||
| 607 | |||
| 608 | /* Write the range etc */ | ||
| 609 | for (i = 0; i < nr_range; i++) | ||
| 610 | set_var_mtrr_range(&var_state, range[i].start, | ||
| 611 | range[i].end - range[i].start + 1); | ||
| 612 | |||
| 613 | /* Write the last range */ | ||
| 614 | if (var_state.range_sizek != 0) | ||
| 615 | range_to_mtrr_with_hole(&var_state, 0, 0); | ||
| 616 | |||
| 617 | num_reg = var_state.reg; | ||
| 618 | /* Clear out the extra MTRR's */ | ||
| 619 | while (var_state.reg < num_var_ranges) { | ||
| 620 | save_var_mtrr(var_state.reg, 0, 0, 0); | ||
| 621 | var_state.reg++; | ||
| 622 | } | ||
| 623 | |||
| 624 | return num_reg; | ||
| 625 | } | ||
| 626 | |||
| 627 | struct mtrr_cleanup_result { | ||
| 628 | unsigned long gran_sizek; | ||
| 629 | unsigned long chunk_sizek; | ||
| 630 | unsigned long lose_cover_sizek; | ||
| 631 | unsigned int num_reg; | ||
| 632 | int bad; | ||
| 633 | }; | ||
| 634 | |||
| 635 | /* | ||
| 636 | * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G | ||
| 637 | * chunk size: gran_size, ..., 2G | ||
| 638 | * so we need (1+16)*8 | ||
| 639 | */ | ||
| 640 | #define NUM_RESULT 136 | ||
| 641 | #define PSHIFT (PAGE_SHIFT - 10) | ||
| 642 | |||
| 643 | static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; | ||
| 644 | static unsigned long __initdata min_loss_pfn[RANGE_NUM]; | ||
| 645 | |||
| 646 | static void __init print_out_mtrr_range_state(void) | ||
| 647 | { | ||
| 648 | int i; | ||
| 649 | char start_factor = 'K', size_factor = 'K'; | ||
| 650 | unsigned long start_base, size_base; | ||
| 651 | mtrr_type type; | ||
| 652 | |||
| 653 | for (i = 0; i < num_var_ranges; i++) { | ||
| 654 | |||
| 655 | size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10); | ||
| 656 | if (!size_base) | ||
| 657 | continue; | ||
| 658 | |||
| 659 | size_base = to_size_factor(size_base, &size_factor), | ||
| 660 | start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10); | ||
| 661 | start_base = to_size_factor(start_base, &start_factor), | ||
| 662 | type = range_state[i].type; | ||
| 663 | |||
| 664 | printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n", | ||
| 665 | i, start_base, start_factor, | ||
| 666 | size_base, size_factor, | ||
| 667 | (type == MTRR_TYPE_UNCACHABLE) ? "UC" : | ||
| 668 | ((type == MTRR_TYPE_WRPROT) ? "WP" : | ||
| 669 | ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other")) | ||
| 670 | ); | ||
| 671 | } | ||
| 672 | } | ||
| 673 | |||
| 674 | static int __init mtrr_need_cleanup(void) | ||
| 675 | { | ||
| 676 | int i; | ||
| 677 | mtrr_type type; | ||
| 678 | unsigned long size; | ||
| 679 | /* extra one for all 0 */ | ||
| 680 | int num[MTRR_NUM_TYPES + 1]; | ||
| 681 | |||
| 682 | /* check entries number */ | ||
| 683 | memset(num, 0, sizeof(num)); | ||
| 684 | for (i = 0; i < num_var_ranges; i++) { | ||
| 685 | type = range_state[i].type; | ||
| 686 | size = range_state[i].size_pfn; | ||
| 687 | if (type >= MTRR_NUM_TYPES) | ||
| 688 | continue; | ||
| 689 | if (!size) | ||
| 690 | type = MTRR_NUM_TYPES; | ||
| 691 | if (type == MTRR_TYPE_WRPROT) | ||
| 692 | type = MTRR_TYPE_UNCACHABLE; | ||
| 693 | num[type]++; | ||
| 694 | } | ||
| 695 | |||
| 696 | /* check if we got UC entries */ | ||
| 697 | if (!num[MTRR_TYPE_UNCACHABLE]) | ||
| 698 | return 0; | ||
| 699 | |||
| 700 | /* check if we only had WB and UC */ | ||
| 701 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | ||
| 702 | num_var_ranges - num[MTRR_NUM_TYPES]) | ||
| 703 | return 0; | ||
| 704 | |||
| 705 | return 1; | ||
| 706 | } | ||
| 707 | |||
| 708 | static unsigned long __initdata range_sums; | ||
| 709 | static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size, | ||
| 710 | unsigned long extra_remove_base, | ||
| 711 | unsigned long extra_remove_size, | ||
| 712 | int i) | ||
| 713 | { | ||
| 714 | int num_reg; | ||
| 715 | static struct res_range range_new[RANGE_NUM]; | ||
| 716 | static int nr_range_new; | ||
| 717 | unsigned long range_sums_new; | ||
| 718 | |||
| 719 | /* convert ranges to var ranges state */ | ||
| 720 | num_reg = x86_setup_var_mtrrs(range, nr_range, | ||
| 721 | chunk_size, gran_size); | ||
| 722 | |||
| 723 | /* we got new setting in range_state, check it */ | ||
| 724 | memset(range_new, 0, sizeof(range_new)); | ||
| 725 | nr_range_new = x86_get_mtrr_mem_range(range_new, 0, | ||
| 726 | extra_remove_base, extra_remove_size); | ||
| 727 | range_sums_new = sum_ranges(range_new, nr_range_new); | ||
| 728 | |||
| 729 | result[i].chunk_sizek = chunk_size >> 10; | ||
| 730 | result[i].gran_sizek = gran_size >> 10; | ||
| 731 | result[i].num_reg = num_reg; | ||
| 732 | if (range_sums < range_sums_new) { | ||
| 733 | result[i].lose_cover_sizek = | ||
| 734 | (range_sums_new - range_sums) << PSHIFT; | ||
| 735 | result[i].bad = 1; | ||
| 736 | } else | ||
| 737 | result[i].lose_cover_sizek = | ||
| 738 | (range_sums - range_sums_new) << PSHIFT; | ||
| 739 | |||
| 740 | /* double check it */ | ||
| 741 | if (!result[i].bad && !result[i].lose_cover_sizek) { | ||
| 742 | if (nr_range_new != nr_range || | ||
| 743 | memcmp(range, range_new, sizeof(range))) | ||
| 744 | result[i].bad = 1; | ||
| 745 | } | ||
| 746 | |||
| 747 | if (!result[i].bad && (range_sums - range_sums_new < | ||
| 748 | min_loss_pfn[num_reg])) { | ||
| 749 | min_loss_pfn[num_reg] = | ||
| 750 | range_sums - range_sums_new; | ||
| 751 | } | ||
| 752 | } | ||
| 753 | |||
| 754 | static void __init mtrr_print_out_one_result(int i) | ||
| 755 | { | ||
| 756 | char gran_factor, chunk_factor, lose_factor; | ||
| 757 | unsigned long gran_base, chunk_base, lose_base; | ||
| 758 | |||
| 759 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), | ||
| 760 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), | ||
| 761 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), | ||
| 762 | printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", | ||
| 763 | result[i].bad ? "*BAD*" : " ", | ||
| 764 | gran_base, gran_factor, chunk_base, chunk_factor); | ||
| 765 | printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", | ||
| 766 | result[i].num_reg, result[i].bad ? "-" : "", | ||
| 767 | lose_base, lose_factor); | ||
| 768 | } | ||
| 769 | |||
| 770 | static int __init mtrr_search_optimal_index(void) | ||
| 771 | { | ||
| 772 | int i; | ||
| 773 | int num_reg_good; | ||
| 774 | int index_good; | ||
| 775 | |||
| 776 | if (nr_mtrr_spare_reg >= num_var_ranges) | ||
| 777 | nr_mtrr_spare_reg = num_var_ranges - 1; | ||
| 778 | num_reg_good = -1; | ||
| 779 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { | ||
| 780 | if (!min_loss_pfn[i]) | ||
| 781 | num_reg_good = i; | ||
| 782 | } | ||
| 783 | |||
| 784 | index_good = -1; | ||
| 785 | if (num_reg_good != -1) { | ||
| 786 | for (i = 0; i < NUM_RESULT; i++) { | ||
| 787 | if (!result[i].bad && | ||
| 788 | result[i].num_reg == num_reg_good && | ||
| 789 | !result[i].lose_cover_sizek) { | ||
| 790 | index_good = i; | ||
| 791 | break; | ||
| 792 | } | ||
| 793 | } | ||
| 794 | } | ||
| 795 | |||
| 796 | return index_good; | ||
| 797 | } | ||
| 798 | |||
| 799 | |||
| 800 | int __init mtrr_cleanup(unsigned address_bits) | ||
| 801 | { | ||
| 802 | unsigned long extra_remove_base, extra_remove_size; | ||
| 803 | unsigned long base, size, def, dummy; | ||
| 804 | mtrr_type type; | ||
| 805 | u64 chunk_size, gran_size; | ||
| 806 | int index_good; | ||
| 807 | int i; | ||
| 808 | |||
| 809 | if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) | ||
| 810 | return 0; | ||
| 811 | rdmsr(MTRRdefType_MSR, def, dummy); | ||
| 812 | def &= 0xff; | ||
| 813 | if (def != MTRR_TYPE_UNCACHABLE) | ||
| 814 | return 0; | ||
| 815 | |||
| 816 | /* get it and store it aside */ | ||
| 817 | memset(range_state, 0, sizeof(range_state)); | ||
| 818 | for (i = 0; i < num_var_ranges; i++) { | ||
| 819 | mtrr_if->get(i, &base, &size, &type); | ||
| 820 | range_state[i].base_pfn = base; | ||
| 821 | range_state[i].size_pfn = size; | ||
| 822 | range_state[i].type = type; | ||
| 823 | } | ||
| 824 | |||
| 825 | /* check if we need handle it and can handle it */ | ||
| 826 | if (!mtrr_need_cleanup()) | ||
| 827 | return 0; | ||
| 828 | |||
| 829 | /* print original var MTRRs at first, for debugging: */ | ||
| 830 | printk(KERN_DEBUG "original variable MTRRs\n"); | ||
| 831 | print_out_mtrr_range_state(); | ||
| 832 | |||
| 833 | memset(range, 0, sizeof(range)); | ||
| 834 | extra_remove_size = 0; | ||
| 835 | extra_remove_base = 1 << (32 - PAGE_SHIFT); | ||
| 836 | if (mtrr_tom2) | ||
| 837 | extra_remove_size = | ||
| 838 | (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; | ||
| 839 | nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, | ||
| 840 | extra_remove_size); | ||
| 841 | /* | ||
| 842 | * [0, 1M) should always be coverred by var mtrr with WB | ||
| 843 | * and fixed mtrrs should take effective before var mtrr for it | ||
| 844 | */ | ||
| 845 | nr_range = add_range_with_merge(range, nr_range, 0, | ||
| 846 | (1ULL<<(20 - PAGE_SHIFT)) - 1); | ||
| 847 | /* sort the ranges */ | ||
| 848 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | ||
| 849 | |||
| 850 | range_sums = sum_ranges(range, nr_range); | ||
| 851 | printk(KERN_INFO "total RAM coverred: %ldM\n", | ||
| 852 | range_sums >> (20 - PAGE_SHIFT)); | ||
| 853 | |||
| 854 | if (mtrr_chunk_size && mtrr_gran_size) { | ||
| 855 | i = 0; | ||
| 856 | mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, | ||
| 857 | extra_remove_base, extra_remove_size, i); | ||
| 858 | |||
| 859 | mtrr_print_out_one_result(i); | ||
| 860 | |||
| 861 | if (!result[i].bad) { | ||
| 862 | set_var_mtrr_all(address_bits); | ||
| 863 | printk(KERN_DEBUG "New variable MTRRs\n"); | ||
| 864 | print_out_mtrr_range_state(); | ||
| 865 | return 1; | ||
| 866 | } | ||
| 867 | printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " | ||
| 868 | "will find optimal one\n"); | ||
| 869 | } | ||
| 870 | |||
| 871 | i = 0; | ||
| 872 | memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); | ||
| 873 | memset(result, 0, sizeof(result)); | ||
| 874 | for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) { | ||
| 875 | |||
| 876 | for (chunk_size = gran_size; chunk_size < (1ULL<<32); | ||
| 877 | chunk_size <<= 1) { | ||
| 878 | |||
| 879 | if (i >= NUM_RESULT) | ||
| 880 | continue; | ||
| 881 | |||
| 882 | mtrr_calc_range_state(chunk_size, gran_size, | ||
| 883 | extra_remove_base, extra_remove_size, i); | ||
| 884 | if (debug_print) { | ||
| 885 | mtrr_print_out_one_result(i); | ||
| 886 | printk(KERN_INFO "\n"); | ||
| 887 | } | ||
| 888 | |||
| 889 | i++; | ||
| 890 | } | ||
| 891 | } | ||
| 892 | |||
| 893 | /* try to find the optimal index */ | ||
| 894 | index_good = mtrr_search_optimal_index(); | ||
| 895 | |||
| 896 | if (index_good != -1) { | ||
| 897 | printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); | ||
| 898 | i = index_good; | ||
| 899 | mtrr_print_out_one_result(i); | ||
| 900 | |||
| 901 | /* convert ranges to var ranges state */ | ||
| 902 | chunk_size = result[i].chunk_sizek; | ||
| 903 | chunk_size <<= 10; | ||
| 904 | gran_size = result[i].gran_sizek; | ||
| 905 | gran_size <<= 10; | ||
| 906 | x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); | ||
| 907 | set_var_mtrr_all(address_bits); | ||
| 908 | printk(KERN_DEBUG "New variable MTRRs\n"); | ||
| 909 | print_out_mtrr_range_state(); | ||
| 910 | return 1; | ||
| 911 | } else { | ||
| 912 | /* print out all */ | ||
| 913 | for (i = 0; i < NUM_RESULT; i++) | ||
| 914 | mtrr_print_out_one_result(i); | ||
| 915 | } | ||
| 916 | |||
| 917 | printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n"); | ||
| 918 | printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n"); | ||
| 919 | |||
| 920 | return 0; | ||
| 921 | } | ||
| 922 | #else | ||
| 923 | int __init mtrr_cleanup(unsigned address_bits) | ||
| 924 | { | ||
| 925 | return 0; | ||
| 926 | } | ||
| 927 | #endif | ||
| 928 | |||
| 929 | static int disable_mtrr_trim; | ||
| 930 | |||
| 931 | static int __init disable_mtrr_trim_setup(char *str) | ||
| 932 | { | ||
| 933 | disable_mtrr_trim = 1; | ||
| 934 | return 0; | ||
| 935 | } | ||
| 936 | early_param("disable_mtrr_trim", disable_mtrr_trim_setup); | ||
| 937 | |||
| 938 | /* | ||
| 939 | * Newer AMD K8s and later CPUs have a special magic MSR way to force WB | ||
| 940 | * for memory >4GB. Check for that here. | ||
| 941 | * Note this won't check if the MTRRs < 4GB where the magic bit doesn't | ||
| 942 | * apply to are wrong, but so far we don't know of any such case in the wild. | ||
| 943 | */ | ||
| 944 | #define Tom2Enabled (1U << 21) | ||
| 945 | #define Tom2ForceMemTypeWB (1U << 22) | ||
| 946 | |||
| 947 | int __init amd_special_default_mtrr(void) | ||
| 948 | { | ||
| 949 | u32 l, h; | ||
| 950 | |||
| 951 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | ||
| 952 | return 0; | ||
| 953 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) | ||
| 954 | return 0; | ||
| 955 | /* In case some hypervisor doesn't pass SYSCFG through */ | ||
| 956 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) | ||
| 957 | return 0; | ||
| 958 | /* | ||
| 959 | * Memory between 4GB and top of mem is forced WB by this magic bit. | ||
| 960 | * Reserved before K8RevF, but should be zero there. | ||
| 961 | */ | ||
| 962 | if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) == | ||
| 963 | (Tom2Enabled | Tom2ForceMemTypeWB)) | ||
| 964 | return 1; | ||
| 965 | return 0; | ||
| 966 | } | ||
| 967 | |||
| 968 | static u64 __init real_trim_memory(unsigned long start_pfn, | ||
| 969 | unsigned long limit_pfn) | ||
| 970 | { | ||
| 971 | u64 trim_start, trim_size; | ||
| 972 | trim_start = start_pfn; | ||
| 973 | trim_start <<= PAGE_SHIFT; | ||
| 974 | trim_size = limit_pfn; | ||
| 975 | trim_size <<= PAGE_SHIFT; | ||
| 976 | trim_size -= trim_start; | ||
| 977 | |||
| 978 | return e820_update_range(trim_start, trim_size, E820_RAM, | ||
| 979 | E820_RESERVED); | ||
| 980 | } | ||
| 981 | /** | ||
| 982 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs | ||
| 983 | * @end_pfn: ending page frame number | ||
| 984 | * | ||
| 985 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain | ||
| 986 | * memory configurations. This routine checks that the highest MTRR matches | ||
| 987 | * the end of memory, to make sure the MTRRs having a write back type cover | ||
| 988 | * all of the memory the kernel is intending to use. If not, it'll trim any | ||
| 989 | * memory off the end by adjusting end_pfn, removing it from the kernel's | ||
| 990 | * allocation pools, warning the user with an obnoxious message. | ||
| 991 | */ | ||
| 992 | int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | ||
| 993 | { | ||
| 994 | unsigned long i, base, size, highest_pfn = 0, def, dummy; | ||
| 995 | mtrr_type type; | ||
| 996 | u64 total_trim_size; | ||
| 997 | |||
| 998 | /* extra one for all 0 */ | ||
| 999 | int num[MTRR_NUM_TYPES + 1]; | ||
| 1000 | /* | ||
| 1001 | * Make sure we only trim uncachable memory on machines that | ||
| 1002 | * support the Intel MTRR architecture: | ||
| 1003 | */ | ||
| 1004 | if (!is_cpu(INTEL) || disable_mtrr_trim) | ||
| 1005 | return 0; | ||
| 1006 | rdmsr(MTRRdefType_MSR, def, dummy); | ||
| 1007 | def &= 0xff; | ||
| 1008 | if (def != MTRR_TYPE_UNCACHABLE) | ||
| 1009 | return 0; | ||
| 1010 | |||
| 1011 | /* get it and store it aside */ | ||
| 1012 | memset(range_state, 0, sizeof(range_state)); | ||
| 1013 | for (i = 0; i < num_var_ranges; i++) { | ||
| 1014 | mtrr_if->get(i, &base, &size, &type); | ||
| 1015 | range_state[i].base_pfn = base; | ||
| 1016 | range_state[i].size_pfn = size; | ||
| 1017 | range_state[i].type = type; | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | /* Find highest cached pfn */ | ||
| 1021 | for (i = 0; i < num_var_ranges; i++) { | ||
| 1022 | type = range_state[i].type; | ||
| 1023 | if (type != MTRR_TYPE_WRBACK) | ||
| 1024 | continue; | ||
| 1025 | base = range_state[i].base_pfn; | ||
| 1026 | size = range_state[i].size_pfn; | ||
| 1027 | if (highest_pfn < base + size) | ||
| 1028 | highest_pfn = base + size; | ||
| 1029 | } | ||
| 1030 | |||
| 1031 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ | ||
| 1032 | if (!highest_pfn) { | ||
| 1033 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); | ||
| 1034 | return 0; | ||
| 1035 | } | ||
| 1036 | |||
| 1037 | /* check entries number */ | ||
| 1038 | memset(num, 0, sizeof(num)); | ||
| 1039 | for (i = 0; i < num_var_ranges; i++) { | ||
| 1040 | type = range_state[i].type; | ||
| 1041 | if (type >= MTRR_NUM_TYPES) | ||
| 1042 | continue; | ||
| 1043 | size = range_state[i].size_pfn; | ||
| 1044 | if (!size) | ||
| 1045 | type = MTRR_NUM_TYPES; | ||
| 1046 | num[type]++; | ||
| 1047 | } | ||
| 1048 | |||
| 1049 | /* no entry for WB? */ | ||
| 1050 | if (!num[MTRR_TYPE_WRBACK]) | ||
| 1051 | return 0; | ||
| 1052 | |||
| 1053 | /* check if we only had WB and UC */ | ||
| 1054 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | ||
| 1055 | num_var_ranges - num[MTRR_NUM_TYPES]) | ||
| 1056 | return 0; | ||
| 1057 | |||
| 1058 | memset(range, 0, sizeof(range)); | ||
| 1059 | nr_range = 0; | ||
| 1060 | if (mtrr_tom2) { | ||
| 1061 | range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT)); | ||
| 1062 | range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1; | ||
| 1063 | if (highest_pfn < range[nr_range].end + 1) | ||
| 1064 | highest_pfn = range[nr_range].end + 1; | ||
| 1065 | nr_range++; | ||
| 1066 | } | ||
| 1067 | nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); | ||
| 1068 | |||
| 1069 | total_trim_size = 0; | ||
| 1070 | /* check the head */ | ||
| 1071 | if (range[0].start) | ||
| 1072 | total_trim_size += real_trim_memory(0, range[0].start); | ||
| 1073 | /* check the holes */ | ||
| 1074 | for (i = 0; i < nr_range - 1; i++) { | ||
| 1075 | if (range[i].end + 1 < range[i+1].start) | ||
| 1076 | total_trim_size += real_trim_memory(range[i].end + 1, | ||
| 1077 | range[i+1].start); | ||
| 1078 | } | ||
| 1079 | /* check the top */ | ||
| 1080 | i = nr_range - 1; | ||
| 1081 | if (range[i].end + 1 < end_pfn) | ||
| 1082 | total_trim_size += real_trim_memory(range[i].end + 1, | ||
| 1083 | end_pfn); | ||
| 1084 | |||
| 1085 | if (total_trim_size) { | ||
| 1086 | printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover" | ||
| 1087 | " all of memory, losing %lluMB of RAM.\n", | ||
| 1088 | total_trim_size >> 20); | ||
| 1089 | |||
| 1090 | if (!changed_by_mtrr_cleanup) | ||
| 1091 | WARN_ON(1); | ||
| 1092 | |||
| 1093 | printk(KERN_INFO "update e820 for mtrr\n"); | ||
| 1094 | update_e820(); | ||
| 1095 | |||
| 1096 | return 1; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | return 0; | ||
| 1100 | } | ||
| 1101 | |||
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 0c0a455fe95c..0b776c09aff3 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
| @@ -33,13 +33,31 @@ u64 mtrr_tom2; | |||
| 33 | struct mtrr_state_type mtrr_state = {}; | 33 | struct mtrr_state_type mtrr_state = {}; |
| 34 | EXPORT_SYMBOL_GPL(mtrr_state); | 34 | EXPORT_SYMBOL_GPL(mtrr_state); |
| 35 | 35 | ||
| 36 | static int __initdata mtrr_show; | 36 | /** |
| 37 | static int __init mtrr_debug(char *opt) | 37 | * BIOS is expected to clear MtrrFixDramModEn bit, see for example |
| 38 | * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD | ||
| 39 | * Opteron Processors" (26094 Rev. 3.30 February 2006), section | ||
| 40 | * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set | ||
| 41 | * to 1 during BIOS initalization of the fixed MTRRs, then cleared to | ||
| 42 | * 0 for operation." | ||
| 43 | */ | ||
| 44 | static inline void k8_check_syscfg_dram_mod_en(void) | ||
| 38 | { | 45 | { |
| 39 | mtrr_show = 1; | 46 | u32 lo, hi; |
| 40 | return 0; | 47 | |
| 48 | if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && | ||
| 49 | (boot_cpu_data.x86 >= 0x0f))) | ||
| 50 | return; | ||
| 51 | |||
| 52 | rdmsr(MSR_K8_SYSCFG, lo, hi); | ||
| 53 | if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) { | ||
| 54 | printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]" | ||
| 55 | " not cleared by BIOS, clearing this bit\n", | ||
| 56 | smp_processor_id()); | ||
| 57 | lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; | ||
| 58 | mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi); | ||
| 59 | } | ||
| 41 | } | 60 | } |
| 42 | early_param("mtrr.show", mtrr_debug); | ||
| 43 | 61 | ||
| 44 | /* | 62 | /* |
| 45 | * Returns the effective MTRR type for the region | 63 | * Returns the effective MTRR type for the region |
| @@ -174,6 +192,8 @@ get_fixed_ranges(mtrr_type * frs) | |||
| 174 | unsigned int *p = (unsigned int *) frs; | 192 | unsigned int *p = (unsigned int *) frs; |
| 175 | int i; | 193 | int i; |
| 176 | 194 | ||
| 195 | k8_check_syscfg_dram_mod_en(); | ||
| 196 | |||
| 177 | rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]); | 197 | rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]); |
| 178 | 198 | ||
| 179 | for (i = 0; i < 2; i++) | 199 | for (i = 0; i < 2; i++) |
| @@ -188,18 +208,94 @@ void mtrr_save_fixed_ranges(void *info) | |||
| 188 | get_fixed_ranges(mtrr_state.fixed_ranges); | 208 | get_fixed_ranges(mtrr_state.fixed_ranges); |
| 189 | } | 209 | } |
| 190 | 210 | ||
| 191 | static void print_fixed(unsigned base, unsigned step, const mtrr_type*types) | 211 | static unsigned __initdata last_fixed_start; |
| 212 | static unsigned __initdata last_fixed_end; | ||
| 213 | static mtrr_type __initdata last_fixed_type; | ||
| 214 | |||
| 215 | static void __init print_fixed_last(void) | ||
| 216 | { | ||
| 217 | if (!last_fixed_end) | ||
| 218 | return; | ||
| 219 | |||
| 220 | printk(KERN_DEBUG " %05X-%05X %s\n", last_fixed_start, | ||
| 221 | last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); | ||
| 222 | |||
| 223 | last_fixed_end = 0; | ||
| 224 | } | ||
| 225 | |||
| 226 | static void __init update_fixed_last(unsigned base, unsigned end, | ||
| 227 | mtrr_type type) | ||
| 228 | { | ||
| 229 | last_fixed_start = base; | ||
| 230 | last_fixed_end = end; | ||
| 231 | last_fixed_type = type; | ||
| 232 | } | ||
| 233 | |||
| 234 | static void __init print_fixed(unsigned base, unsigned step, | ||
| 235 | const mtrr_type *types) | ||
| 192 | { | 236 | { |
| 193 | unsigned i; | 237 | unsigned i; |
| 194 | 238 | ||
| 195 | for (i = 0; i < 8; ++i, ++types, base += step) | 239 | for (i = 0; i < 8; ++i, ++types, base += step) { |
| 196 | printk(KERN_INFO "MTRR %05X-%05X %s\n", | 240 | if (last_fixed_end == 0) { |
| 197 | base, base + step - 1, mtrr_attrib_to_str(*types)); | 241 | update_fixed_last(base, base + step, *types); |
| 242 | continue; | ||
| 243 | } | ||
| 244 | if (last_fixed_end == base && last_fixed_type == *types) { | ||
| 245 | last_fixed_end = base + step; | ||
| 246 | continue; | ||
| 247 | } | ||
| 248 | /* new segments: gap or different type */ | ||
| 249 | print_fixed_last(); | ||
| 250 | update_fixed_last(base, base + step, *types); | ||
| 251 | } | ||
| 198 | } | 252 | } |
| 199 | 253 | ||
| 200 | static void prepare_set(void); | 254 | static void prepare_set(void); |
| 201 | static void post_set(void); | 255 | static void post_set(void); |
| 202 | 256 | ||
| 257 | static void __init print_mtrr_state(void) | ||
| 258 | { | ||
| 259 | unsigned int i; | ||
| 260 | int high_width; | ||
| 261 | |||
| 262 | printk(KERN_DEBUG "MTRR default type: %s\n", | ||
| 263 | mtrr_attrib_to_str(mtrr_state.def_type)); | ||
| 264 | if (mtrr_state.have_fixed) { | ||
| 265 | printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n", | ||
| 266 | mtrr_state.enabled & 1 ? "en" : "dis"); | ||
| 267 | print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); | ||
| 268 | for (i = 0; i < 2; ++i) | ||
| 269 | print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); | ||
| 270 | for (i = 0; i < 8; ++i) | ||
| 271 | print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); | ||
| 272 | |||
| 273 | /* tail */ | ||
| 274 | print_fixed_last(); | ||
| 275 | } | ||
| 276 | printk(KERN_DEBUG "MTRR variable ranges %sabled:\n", | ||
| 277 | mtrr_state.enabled & 2 ? "en" : "dis"); | ||
| 278 | high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4; | ||
| 279 | for (i = 0; i < num_var_ranges; ++i) { | ||
| 280 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) | ||
| 281 | printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n", | ||
| 282 | i, | ||
| 283 | high_width, | ||
| 284 | mtrr_state.var_ranges[i].base_hi, | ||
| 285 | mtrr_state.var_ranges[i].base_lo >> 12, | ||
| 286 | high_width, | ||
| 287 | mtrr_state.var_ranges[i].mask_hi, | ||
| 288 | mtrr_state.var_ranges[i].mask_lo >> 12, | ||
| 289 | mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); | ||
| 290 | else | ||
| 291 | printk(KERN_DEBUG " %u disabled\n", i); | ||
| 292 | } | ||
| 293 | if (mtrr_tom2) { | ||
| 294 | printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n", | ||
| 295 | mtrr_tom2, mtrr_tom2>>20); | ||
| 296 | } | ||
| 297 | } | ||
| 298 | |||
| 203 | /* Grab all of the MTRR state for this CPU into *state */ | 299 | /* Grab all of the MTRR state for this CPU into *state */ |
| 204 | void __init get_mtrr_state(void) | 300 | void __init get_mtrr_state(void) |
| 205 | { | 301 | { |
| @@ -231,41 +327,9 @@ void __init get_mtrr_state(void) | |||
| 231 | mtrr_tom2 |= low; | 327 | mtrr_tom2 |= low; |
| 232 | mtrr_tom2 &= 0xffffff800000ULL; | 328 | mtrr_tom2 &= 0xffffff800000ULL; |
| 233 | } | 329 | } |
| 234 | if (mtrr_show) { | 330 | |
| 235 | int high_width; | 331 | print_mtrr_state(); |
| 236 | 332 | ||
| 237 | printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type)); | ||
| 238 | if (mtrr_state.have_fixed) { | ||
| 239 | printk(KERN_INFO "MTRR fixed ranges %sabled:\n", | ||
| 240 | mtrr_state.enabled & 1 ? "en" : "dis"); | ||
| 241 | print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); | ||
| 242 | for (i = 0; i < 2; ++i) | ||
| 243 | print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); | ||
| 244 | for (i = 0; i < 8; ++i) | ||
| 245 | print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); | ||
| 246 | } | ||
| 247 | printk(KERN_INFO "MTRR variable ranges %sabled:\n", | ||
| 248 | mtrr_state.enabled & 2 ? "en" : "dis"); | ||
| 249 | high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4; | ||
| 250 | for (i = 0; i < num_var_ranges; ++i) { | ||
| 251 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) | ||
| 252 | printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n", | ||
| 253 | i, | ||
| 254 | high_width, | ||
| 255 | mtrr_state.var_ranges[i].base_hi, | ||
| 256 | mtrr_state.var_ranges[i].base_lo >> 12, | ||
| 257 | high_width, | ||
| 258 | mtrr_state.var_ranges[i].mask_hi, | ||
| 259 | mtrr_state.var_ranges[i].mask_lo >> 12, | ||
| 260 | mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); | ||
| 261 | else | ||
| 262 | printk(KERN_INFO "MTRR %u disabled\n", i); | ||
| 263 | } | ||
| 264 | if (mtrr_tom2) { | ||
| 265 | printk(KERN_INFO "TOM2: %016llx aka %lldM\n", | ||
| 266 | mtrr_tom2, mtrr_tom2>>20); | ||
| 267 | } | ||
| 268 | } | ||
| 269 | mtrr_state_set = 1; | 333 | mtrr_state_set = 1; |
| 270 | 334 | ||
| 271 | /* PAT setup for BP. We need to go through sync steps here */ | 335 | /* PAT setup for BP. We need to go through sync steps here */ |
| @@ -308,27 +372,10 @@ void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) | |||
| 308 | } | 372 | } |
| 309 | 373 | ||
| 310 | /** | 374 | /** |
| 311 | * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs | ||
| 312 | * see AMD publication no. 24593, chapter 3.2.1 for more information | ||
| 313 | */ | ||
| 314 | static inline void k8_enable_fixed_iorrs(void) | ||
| 315 | { | ||
| 316 | unsigned lo, hi; | ||
| 317 | |||
| 318 | rdmsr(MSR_K8_SYSCFG, lo, hi); | ||
| 319 | mtrr_wrmsr(MSR_K8_SYSCFG, lo | ||
| 320 | | K8_MTRRFIXRANGE_DRAM_ENABLE | ||
| 321 | | K8_MTRRFIXRANGE_DRAM_MODIFY, hi); | ||
| 322 | } | ||
| 323 | |||
| 324 | /** | ||
| 325 | * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have | 375 | * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have |
| 326 | * @msr: MSR address of the MTTR which should be checked and updated | 376 | * @msr: MSR address of the MTTR which should be checked and updated |
| 327 | * @changed: pointer which indicates whether the MTRR needed to be changed | 377 | * @changed: pointer which indicates whether the MTRR needed to be changed |
| 328 | * @msrwords: pointer to the MSR values which the MSR should have | 378 | * @msrwords: pointer to the MSR values which the MSR should have |
| 329 | * | ||
| 330 | * If K8 extentions are wanted, update the K8 SYSCFG MSR also. | ||
| 331 | * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information. | ||
| 332 | */ | 379 | */ |
| 333 | static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) | 380 | static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) |
| 334 | { | 381 | { |
| @@ -337,10 +384,6 @@ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) | |||
| 337 | rdmsr(msr, lo, hi); | 384 | rdmsr(msr, lo, hi); |
| 338 | 385 | ||
| 339 | if (lo != msrwords[0] || hi != msrwords[1]) { | 386 | if (lo != msrwords[0] || hi != msrwords[1]) { |
| 340 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | ||
| 341 | (boot_cpu_data.x86 >= 0x0f && boot_cpu_data.x86 <= 0x11) && | ||
| 342 | ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK)) | ||
| 343 | k8_enable_fixed_iorrs(); | ||
| 344 | mtrr_wrmsr(msr, msrwords[0], msrwords[1]); | 387 | mtrr_wrmsr(msr, msrwords[0], msrwords[1]); |
| 345 | *changed = true; | 388 | *changed = true; |
| 346 | } | 389 | } |
| @@ -376,22 +419,31 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
| 376 | { | 419 | { |
| 377 | unsigned int mask_lo, mask_hi, base_lo, base_hi; | 420 | unsigned int mask_lo, mask_hi, base_lo, base_hi; |
| 378 | unsigned int tmp, hi; | 421 | unsigned int tmp, hi; |
| 422 | int cpu; | ||
| 423 | |||
| 424 | /* | ||
| 425 | * get_mtrr doesn't need to update mtrr_state, also it could be called | ||
| 426 | * from any cpu, so try to print it out directly. | ||
| 427 | */ | ||
| 428 | cpu = get_cpu(); | ||
| 379 | 429 | ||
| 380 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); | 430 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); |
| 431 | |||
| 381 | if ((mask_lo & 0x800) == 0) { | 432 | if ((mask_lo & 0x800) == 0) { |
| 382 | /* Invalid (i.e. free) range */ | 433 | /* Invalid (i.e. free) range */ |
| 383 | *base = 0; | 434 | *base = 0; |
| 384 | *size = 0; | 435 | *size = 0; |
| 385 | *type = 0; | 436 | *type = 0; |
| 386 | return; | 437 | goto out_put_cpu; |
| 387 | } | 438 | } |
| 388 | 439 | ||
| 389 | rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); | 440 | rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); |
| 390 | 441 | ||
| 391 | /* Work out the shifted address mask. */ | 442 | /* Work out the shifted address mask: */ |
| 392 | tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; | 443 | tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; |
| 393 | mask_lo = size_or_mask | tmp; | 444 | mask_lo = size_or_mask | tmp; |
| 394 | /* Expand tmp with high bits to all 1s*/ | 445 | |
| 446 | /* Expand tmp with high bits to all 1s: */ | ||
| 395 | hi = fls(tmp); | 447 | hi = fls(tmp); |
| 396 | if (hi > 0) { | 448 | if (hi > 0) { |
| 397 | tmp |= ~((1<<(hi - 1)) - 1); | 449 | tmp |= ~((1<<(hi - 1)) - 1); |
| @@ -402,11 +454,16 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
| 402 | } | 454 | } |
| 403 | } | 455 | } |
| 404 | 456 | ||
| 405 | /* This works correctly if size is a power of two, i.e. a | 457 | /* |
| 406 | contiguous range. */ | 458 | * This works correctly if size is a power of two, i.e. a |
| 459 | * contiguous range: | ||
| 460 | */ | ||
| 407 | *size = -mask_lo; | 461 | *size = -mask_lo; |
| 408 | *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; | 462 | *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; |
| 409 | *type = base_lo & 0xff; | 463 | *type = base_lo & 0xff; |
| 464 | |||
| 465 | out_put_cpu: | ||
| 466 | put_cpu(); | ||
| 410 | } | 467 | } |
| 411 | 468 | ||
| 412 | /** | 469 | /** |
| @@ -419,6 +476,8 @@ static int set_fixed_ranges(mtrr_type * frs) | |||
| 419 | bool changed = false; | 476 | bool changed = false; |
| 420 | int block=-1, range; | 477 | int block=-1, range; |
| 421 | 478 | ||
| 479 | k8_check_syscfg_dram_mod_en(); | ||
| 480 | |||
| 422 | while (fixed_range_blocks[++block].ranges) | 481 | while (fixed_range_blocks[++block].ranges) |
| 423 | for (range=0; range < fixed_range_blocks[block].ranges; range++) | 482 | for (range=0; range < fixed_range_blocks[block].ranges; range++) |
| 424 | set_fixed_range(fixed_range_blocks[block].base_msr + range, | 483 | set_fixed_range(fixed_range_blocks[block].base_msr + range, |
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 4c4214690dd1..fb73a52913a4 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
| @@ -377,10 +377,6 @@ static const struct file_operations mtrr_fops = { | |||
| 377 | .release = mtrr_close, | 377 | .release = mtrr_close, |
| 378 | }; | 378 | }; |
| 379 | 379 | ||
| 380 | |||
| 381 | static struct proc_dir_entry *proc_root_mtrr; | ||
| 382 | |||
| 383 | |||
| 384 | static int mtrr_seq_show(struct seq_file *seq, void *offset) | 380 | static int mtrr_seq_show(struct seq_file *seq, void *offset) |
| 385 | { | 381 | { |
| 386 | char factor; | 382 | char factor; |
| @@ -423,11 +419,7 @@ static int __init mtrr_if_init(void) | |||
| 423 | (!cpu_has(c, X86_FEATURE_CENTAUR_MCR))) | 419 | (!cpu_has(c, X86_FEATURE_CENTAUR_MCR))) |
| 424 | return -ENODEV; | 420 | return -ENODEV; |
| 425 | 421 | ||
| 426 | proc_root_mtrr = | 422 | proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); |
| 427 | proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); | ||
| 428 | |||
| 429 | if (proc_root_mtrr) | ||
| 430 | proc_root_mtrr->owner = THIS_MODULE; | ||
| 431 | return 0; | 423 | return 0; |
| 432 | } | 424 | } |
| 433 | 425 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 236a401b8259..03cda01f57c7 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
| @@ -574,7 +574,7 @@ struct mtrr_value { | |||
| 574 | unsigned long lsize; | 574 | unsigned long lsize; |
| 575 | }; | 575 | }; |
| 576 | 576 | ||
| 577 | static struct mtrr_value mtrr_state[MTRR_MAX_VAR_RANGES]; | 577 | static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; |
| 578 | 578 | ||
| 579 | static int mtrr_save(struct sys_device * sysdev, pm_message_t state) | 579 | static int mtrr_save(struct sys_device * sysdev, pm_message_t state) |
| 580 | { | 580 | { |
| @@ -582,9 +582,9 @@ static int mtrr_save(struct sys_device * sysdev, pm_message_t state) | |||
| 582 | 582 | ||
| 583 | for (i = 0; i < num_var_ranges; i++) { | 583 | for (i = 0; i < num_var_ranges; i++) { |
| 584 | mtrr_if->get(i, | 584 | mtrr_if->get(i, |
| 585 | &mtrr_state[i].lbase, | 585 | &mtrr_value[i].lbase, |
| 586 | &mtrr_state[i].lsize, | 586 | &mtrr_value[i].lsize, |
| 587 | &mtrr_state[i].ltype); | 587 | &mtrr_value[i].ltype); |
| 588 | } | 588 | } |
| 589 | return 0; | 589 | return 0; |
| 590 | } | 590 | } |
| @@ -594,11 +594,11 @@ static int mtrr_restore(struct sys_device * sysdev) | |||
| 594 | int i; | 594 | int i; |
| 595 | 595 | ||
| 596 | for (i = 0; i < num_var_ranges; i++) { | 596 | for (i = 0; i < num_var_ranges; i++) { |
| 597 | if (mtrr_state[i].lsize) | 597 | if (mtrr_value[i].lsize) |
| 598 | set_mtrr(i, | 598 | set_mtrr(i, |
| 599 | mtrr_state[i].lbase, | 599 | mtrr_value[i].lbase, |
| 600 | mtrr_state[i].lsize, | 600 | mtrr_value[i].lsize, |
| 601 | mtrr_state[i].ltype); | 601 | mtrr_value[i].ltype); |
| 602 | } | 602 | } |
| 603 | return 0; | 603 | return 0; |
| 604 | } | 604 | } |
| @@ -610,1058 +610,7 @@ static struct sysdev_driver mtrr_sysdev_driver = { | |||
| 610 | .resume = mtrr_restore, | 610 | .resume = mtrr_restore, |
| 611 | }; | 611 | }; |
| 612 | 612 | ||
| 613 | /* should be related to MTRR_VAR_RANGES nums */ | 613 | int __initdata changed_by_mtrr_cleanup; |
| 614 | #define RANGE_NUM 256 | ||
| 615 | |||
| 616 | struct res_range { | ||
| 617 | unsigned long start; | ||
| 618 | unsigned long end; | ||
| 619 | }; | ||
| 620 | |||
| 621 | static int __init | ||
| 622 | add_range(struct res_range *range, int nr_range, unsigned long start, | ||
| 623 | unsigned long end) | ||
| 624 | { | ||
| 625 | /* out of slots */ | ||
| 626 | if (nr_range >= RANGE_NUM) | ||
| 627 | return nr_range; | ||
| 628 | |||
| 629 | range[nr_range].start = start; | ||
| 630 | range[nr_range].end = end; | ||
| 631 | |||
| 632 | nr_range++; | ||
| 633 | |||
| 634 | return nr_range; | ||
| 635 | } | ||
| 636 | |||
| 637 | static int __init | ||
| 638 | add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, | ||
| 639 | unsigned long end) | ||
| 640 | { | ||
| 641 | int i; | ||
| 642 | |||
| 643 | /* try to merge it with old one */ | ||
| 644 | for (i = 0; i < nr_range; i++) { | ||
| 645 | unsigned long final_start, final_end; | ||
| 646 | unsigned long common_start, common_end; | ||
| 647 | |||
| 648 | if (!range[i].end) | ||
| 649 | continue; | ||
| 650 | |||
| 651 | common_start = max(range[i].start, start); | ||
| 652 | common_end = min(range[i].end, end); | ||
| 653 | if (common_start > common_end + 1) | ||
| 654 | continue; | ||
| 655 | |||
| 656 | final_start = min(range[i].start, start); | ||
| 657 | final_end = max(range[i].end, end); | ||
| 658 | |||
| 659 | range[i].start = final_start; | ||
| 660 | range[i].end = final_end; | ||
| 661 | return nr_range; | ||
| 662 | } | ||
| 663 | |||
| 664 | /* need to add that */ | ||
| 665 | return add_range(range, nr_range, start, end); | ||
| 666 | } | ||
| 667 | |||
| 668 | static void __init | ||
| 669 | subtract_range(struct res_range *range, unsigned long start, unsigned long end) | ||
| 670 | { | ||
| 671 | int i, j; | ||
| 672 | |||
| 673 | for (j = 0; j < RANGE_NUM; j++) { | ||
| 674 | if (!range[j].end) | ||
| 675 | continue; | ||
| 676 | |||
| 677 | if (start <= range[j].start && end >= range[j].end) { | ||
| 678 | range[j].start = 0; | ||
| 679 | range[j].end = 0; | ||
| 680 | continue; | ||
| 681 | } | ||
| 682 | |||
| 683 | if (start <= range[j].start && end < range[j].end && | ||
| 684 | range[j].start < end + 1) { | ||
| 685 | range[j].start = end + 1; | ||
| 686 | continue; | ||
| 687 | } | ||
| 688 | |||
| 689 | |||
| 690 | if (start > range[j].start && end >= range[j].end && | ||
| 691 | range[j].end > start - 1) { | ||
| 692 | range[j].end = start - 1; | ||
| 693 | continue; | ||
| 694 | } | ||
| 695 | |||
| 696 | if (start > range[j].start && end < range[j].end) { | ||
| 697 | /* find the new spare */ | ||
| 698 | for (i = 0; i < RANGE_NUM; i++) { | ||
| 699 | if (range[i].end == 0) | ||
| 700 | break; | ||
| 701 | } | ||
| 702 | if (i < RANGE_NUM) { | ||
| 703 | range[i].end = range[j].end; | ||
| 704 | range[i].start = end + 1; | ||
| 705 | } else { | ||
| 706 | printk(KERN_ERR "run of slot in ranges\n"); | ||
| 707 | } | ||
| 708 | range[j].end = start - 1; | ||
| 709 | continue; | ||
| 710 | } | ||
| 711 | } | ||
| 712 | } | ||
| 713 | |||
| 714 | static int __init cmp_range(const void *x1, const void *x2) | ||
| 715 | { | ||
| 716 | const struct res_range *r1 = x1; | ||
| 717 | const struct res_range *r2 = x2; | ||
| 718 | long start1, start2; | ||
| 719 | |||
| 720 | start1 = r1->start; | ||
| 721 | start2 = r2->start; | ||
| 722 | |||
| 723 | return start1 - start2; | ||
| 724 | } | ||
| 725 | |||
| 726 | struct var_mtrr_range_state { | ||
| 727 | unsigned long base_pfn; | ||
| 728 | unsigned long size_pfn; | ||
| 729 | mtrr_type type; | ||
| 730 | }; | ||
| 731 | |||
| 732 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | ||
| 733 | static int __initdata debug_print; | ||
| 734 | |||
| 735 | static int __init | ||
| 736 | x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | ||
| 737 | unsigned long extra_remove_base, | ||
| 738 | unsigned long extra_remove_size) | ||
| 739 | { | ||
| 740 | unsigned long i, base, size; | ||
| 741 | mtrr_type type; | ||
| 742 | |||
| 743 | for (i = 0; i < num_var_ranges; i++) { | ||
| 744 | type = range_state[i].type; | ||
| 745 | if (type != MTRR_TYPE_WRBACK) | ||
| 746 | continue; | ||
| 747 | base = range_state[i].base_pfn; | ||
| 748 | size = range_state[i].size_pfn; | ||
| 749 | nr_range = add_range_with_merge(range, nr_range, base, | ||
| 750 | base + size - 1); | ||
| 751 | } | ||
| 752 | if (debug_print) { | ||
| 753 | printk(KERN_DEBUG "After WB checking\n"); | ||
| 754 | for (i = 0; i < nr_range; i++) | ||
| 755 | printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", | ||
| 756 | range[i].start, range[i].end + 1); | ||
| 757 | } | ||
| 758 | |||
| 759 | /* take out UC ranges */ | ||
| 760 | for (i = 0; i < num_var_ranges; i++) { | ||
| 761 | type = range_state[i].type; | ||
| 762 | if (type != MTRR_TYPE_UNCACHABLE && | ||
| 763 | type != MTRR_TYPE_WRPROT) | ||
| 764 | continue; | ||
| 765 | size = range_state[i].size_pfn; | ||
| 766 | if (!size) | ||
| 767 | continue; | ||
| 768 | base = range_state[i].base_pfn; | ||
| 769 | subtract_range(range, base, base + size - 1); | ||
| 770 | } | ||
| 771 | if (extra_remove_size) | ||
| 772 | subtract_range(range, extra_remove_base, | ||
| 773 | extra_remove_base + extra_remove_size - 1); | ||
| 774 | |||
| 775 | /* get new range num */ | ||
| 776 | nr_range = 0; | ||
| 777 | for (i = 0; i < RANGE_NUM; i++) { | ||
| 778 | if (!range[i].end) | ||
| 779 | continue; | ||
| 780 | nr_range++; | ||
| 781 | } | ||
| 782 | if (debug_print) { | ||
| 783 | printk(KERN_DEBUG "After UC checking\n"); | ||
| 784 | for (i = 0; i < nr_range; i++) | ||
| 785 | printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", | ||
| 786 | range[i].start, range[i].end + 1); | ||
| 787 | } | ||
| 788 | |||
| 789 | /* sort the ranges */ | ||
| 790 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | ||
| 791 | if (debug_print) { | ||
| 792 | printk(KERN_DEBUG "After sorting\n"); | ||
| 793 | for (i = 0; i < nr_range; i++) | ||
| 794 | printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n", | ||
| 795 | range[i].start, range[i].end + 1); | ||
| 796 | } | ||
| 797 | |||
| 798 | /* clear those is not used */ | ||
| 799 | for (i = nr_range; i < RANGE_NUM; i++) | ||
| 800 | memset(&range[i], 0, sizeof(range[i])); | ||
| 801 | |||
| 802 | return nr_range; | ||
| 803 | } | ||
| 804 | |||
| 805 | static struct res_range __initdata range[RANGE_NUM]; | ||
| 806 | static int __initdata nr_range; | ||
| 807 | |||
| 808 | #ifdef CONFIG_MTRR_SANITIZER | ||
| 809 | |||
| 810 | static unsigned long __init sum_ranges(struct res_range *range, int nr_range) | ||
| 811 | { | ||
| 812 | unsigned long sum; | ||
| 813 | int i; | ||
| 814 | |||
| 815 | sum = 0; | ||
| 816 | for (i = 0; i < nr_range; i++) | ||
| 817 | sum += range[i].end + 1 - range[i].start; | ||
| 818 | |||
| 819 | return sum; | ||
| 820 | } | ||
| 821 | |||
| 822 | static int enable_mtrr_cleanup __initdata = | ||
| 823 | CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT; | ||
| 824 | |||
| 825 | static int __init disable_mtrr_cleanup_setup(char *str) | ||
| 826 | { | ||
| 827 | enable_mtrr_cleanup = 0; | ||
| 828 | return 0; | ||
| 829 | } | ||
| 830 | early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup); | ||
| 831 | |||
| 832 | static int __init enable_mtrr_cleanup_setup(char *str) | ||
| 833 | { | ||
| 834 | enable_mtrr_cleanup = 1; | ||
| 835 | return 0; | ||
| 836 | } | ||
| 837 | early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); | ||
| 838 | |||
| 839 | static int __init mtrr_cleanup_debug_setup(char *str) | ||
| 840 | { | ||
| 841 | debug_print = 1; | ||
| 842 | return 0; | ||
| 843 | } | ||
| 844 | early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); | ||
| 845 | |||
| 846 | struct var_mtrr_state { | ||
| 847 | unsigned long range_startk; | ||
| 848 | unsigned long range_sizek; | ||
| 849 | unsigned long chunk_sizek; | ||
| 850 | unsigned long gran_sizek; | ||
| 851 | unsigned int reg; | ||
| 852 | }; | ||
| 853 | |||
| 854 | static void __init | ||
| 855 | set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | ||
| 856 | unsigned char type, unsigned int address_bits) | ||
| 857 | { | ||
| 858 | u32 base_lo, base_hi, mask_lo, mask_hi; | ||
| 859 | u64 base, mask; | ||
| 860 | |||
| 861 | if (!sizek) { | ||
| 862 | fill_mtrr_var_range(reg, 0, 0, 0, 0); | ||
| 863 | return; | ||
| 864 | } | ||
| 865 | |||
| 866 | mask = (1ULL << address_bits) - 1; | ||
| 867 | mask &= ~((((u64)sizek) << 10) - 1); | ||
| 868 | |||
| 869 | base = ((u64)basek) << 10; | ||
| 870 | |||
| 871 | base |= type; | ||
| 872 | mask |= 0x800; | ||
| 873 | |||
| 874 | base_lo = base & ((1ULL<<32) - 1); | ||
| 875 | base_hi = base >> 32; | ||
| 876 | |||
| 877 | mask_lo = mask & ((1ULL<<32) - 1); | ||
| 878 | mask_hi = mask >> 32; | ||
| 879 | |||
| 880 | fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi); | ||
| 881 | } | ||
| 882 | |||
| 883 | static void __init | ||
| 884 | save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | ||
| 885 | unsigned char type) | ||
| 886 | { | ||
| 887 | range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); | ||
| 888 | range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); | ||
| 889 | range_state[reg].type = type; | ||
| 890 | } | ||
| 891 | |||
| 892 | static void __init | ||
| 893 | set_var_mtrr_all(unsigned int address_bits) | ||
| 894 | { | ||
| 895 | unsigned long basek, sizek; | ||
| 896 | unsigned char type; | ||
| 897 | unsigned int reg; | ||
| 898 | |||
| 899 | for (reg = 0; reg < num_var_ranges; reg++) { | ||
| 900 | basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10); | ||
| 901 | sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10); | ||
| 902 | type = range_state[reg].type; | ||
| 903 | |||
| 904 | set_var_mtrr(reg, basek, sizek, type, address_bits); | ||
| 905 | } | ||
| 906 | } | ||
| 907 | |||
| 908 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) | ||
| 909 | { | ||
| 910 | char factor; | ||
| 911 | unsigned long base = sizek; | ||
| 912 | |||
| 913 | if (base & ((1<<10) - 1)) { | ||
| 914 | /* not MB alignment */ | ||
| 915 | factor = 'K'; | ||
| 916 | } else if (base & ((1<<20) - 1)){ | ||
| 917 | factor = 'M'; | ||
| 918 | base >>= 10; | ||
| 919 | } else { | ||
| 920 | factor = 'G'; | ||
| 921 | base >>= 20; | ||
| 922 | } | ||
| 923 | |||
| 924 | *factorp = factor; | ||
| 925 | |||
| 926 | return base; | ||
| 927 | } | ||
| 928 | |||
| 929 | static unsigned int __init | ||
| 930 | range_to_mtrr(unsigned int reg, unsigned long range_startk, | ||
| 931 | unsigned long range_sizek, unsigned char type) | ||
| 932 | { | ||
| 933 | if (!range_sizek || (reg >= num_var_ranges)) | ||
| 934 | return reg; | ||
| 935 | |||
| 936 | while (range_sizek) { | ||
| 937 | unsigned long max_align, align; | ||
| 938 | unsigned long sizek; | ||
| 939 | |||
| 940 | /* Compute the maximum size I can make a range */ | ||
| 941 | if (range_startk) | ||
| 942 | max_align = ffs(range_startk) - 1; | ||
| 943 | else | ||
| 944 | max_align = 32; | ||
| 945 | align = fls(range_sizek) - 1; | ||
| 946 | if (align > max_align) | ||
| 947 | align = max_align; | ||
| 948 | |||
| 949 | sizek = 1 << align; | ||
| 950 | if (debug_print) { | ||
| 951 | char start_factor = 'K', size_factor = 'K'; | ||
| 952 | unsigned long start_base, size_base; | ||
| 953 | |||
| 954 | start_base = to_size_factor(range_startk, &start_factor), | ||
| 955 | size_base = to_size_factor(sizek, &size_factor), | ||
| 956 | |||
| 957 | printk(KERN_DEBUG "Setting variable MTRR %d, " | ||
| 958 | "base: %ld%cB, range: %ld%cB, type %s\n", | ||
| 959 | reg, start_base, start_factor, | ||
| 960 | size_base, size_factor, | ||
| 961 | (type == MTRR_TYPE_UNCACHABLE)?"UC": | ||
| 962 | ((type == MTRR_TYPE_WRBACK)?"WB":"Other") | ||
| 963 | ); | ||
| 964 | } | ||
| 965 | save_var_mtrr(reg++, range_startk, sizek, type); | ||
| 966 | range_startk += sizek; | ||
| 967 | range_sizek -= sizek; | ||
| 968 | if (reg >= num_var_ranges) | ||
| 969 | break; | ||
| 970 | } | ||
| 971 | return reg; | ||
| 972 | } | ||
| 973 | |||
| 974 | static unsigned __init | ||
| 975 | range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | ||
| 976 | unsigned long sizek) | ||
| 977 | { | ||
| 978 | unsigned long hole_basek, hole_sizek; | ||
| 979 | unsigned long second_basek, second_sizek; | ||
| 980 | unsigned long range0_basek, range0_sizek; | ||
| 981 | unsigned long range_basek, range_sizek; | ||
| 982 | unsigned long chunk_sizek; | ||
| 983 | unsigned long gran_sizek; | ||
| 984 | |||
| 985 | hole_basek = 0; | ||
| 986 | hole_sizek = 0; | ||
| 987 | second_basek = 0; | ||
| 988 | second_sizek = 0; | ||
| 989 | chunk_sizek = state->chunk_sizek; | ||
| 990 | gran_sizek = state->gran_sizek; | ||
| 991 | |||
| 992 | /* align with gran size, prevent small block used up MTRRs */ | ||
| 993 | range_basek = ALIGN(state->range_startk, gran_sizek); | ||
| 994 | if ((range_basek > basek) && basek) | ||
| 995 | return second_sizek; | ||
| 996 | state->range_sizek -= (range_basek - state->range_startk); | ||
| 997 | range_sizek = ALIGN(state->range_sizek, gran_sizek); | ||
| 998 | |||
| 999 | while (range_sizek > state->range_sizek) { | ||
| 1000 | range_sizek -= gran_sizek; | ||
| 1001 | if (!range_sizek) | ||
| 1002 | return 0; | ||
| 1003 | } | ||
| 1004 | state->range_sizek = range_sizek; | ||
| 1005 | |||
| 1006 | /* try to append some small hole */ | ||
| 1007 | range0_basek = state->range_startk; | ||
| 1008 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); | ||
| 1009 | |||
| 1010 | /* no increase */ | ||
| 1011 | if (range0_sizek == state->range_sizek) { | ||
| 1012 | if (debug_print) | ||
| 1013 | printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", | ||
| 1014 | range0_basek<<10, | ||
| 1015 | (range0_basek + state->range_sizek)<<10); | ||
| 1016 | state->reg = range_to_mtrr(state->reg, range0_basek, | ||
| 1017 | state->range_sizek, MTRR_TYPE_WRBACK); | ||
| 1018 | return 0; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | /* only cut back, when it is not the last */ | ||
| 1022 | if (sizek) { | ||
| 1023 | while (range0_basek + range0_sizek > (basek + sizek)) { | ||
| 1024 | if (range0_sizek >= chunk_sizek) | ||
| 1025 | range0_sizek -= chunk_sizek; | ||
| 1026 | else | ||
| 1027 | range0_sizek = 0; | ||
| 1028 | |||
| 1029 | if (!range0_sizek) | ||
| 1030 | break; | ||
| 1031 | } | ||
| 1032 | } | ||
| 1033 | |||
| 1034 | second_try: | ||
| 1035 | range_basek = range0_basek + range0_sizek; | ||
| 1036 | |||
| 1037 | /* one hole in the middle */ | ||
| 1038 | if (range_basek > basek && range_basek <= (basek + sizek)) | ||
| 1039 | second_sizek = range_basek - basek; | ||
| 1040 | |||
| 1041 | if (range0_sizek > state->range_sizek) { | ||
| 1042 | |||
| 1043 | /* one hole in middle or at end */ | ||
| 1044 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; | ||
| 1045 | |||
| 1046 | /* hole size should be less than half of range0 size */ | ||
| 1047 | if (hole_sizek >= (range0_sizek >> 1) && | ||
| 1048 | range0_sizek >= chunk_sizek) { | ||
| 1049 | range0_sizek -= chunk_sizek; | ||
| 1050 | second_sizek = 0; | ||
| 1051 | hole_sizek = 0; | ||
| 1052 | |||
| 1053 | goto second_try; | ||
| 1054 | } | ||
| 1055 | } | ||
| 1056 | |||
| 1057 | if (range0_sizek) { | ||
| 1058 | if (debug_print) | ||
| 1059 | printk(KERN_DEBUG "range0: %016lx - %016lx\n", | ||
| 1060 | range0_basek<<10, | ||
| 1061 | (range0_basek + range0_sizek)<<10); | ||
| 1062 | state->reg = range_to_mtrr(state->reg, range0_basek, | ||
| 1063 | range0_sizek, MTRR_TYPE_WRBACK); | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | if (range0_sizek < state->range_sizek) { | ||
| 1067 | /* need to handle left over */ | ||
| 1068 | range_sizek = state->range_sizek - range0_sizek; | ||
| 1069 | |||
| 1070 | if (debug_print) | ||
| 1071 | printk(KERN_DEBUG "range: %016lx - %016lx\n", | ||
| 1072 | range_basek<<10, | ||
| 1073 | (range_basek + range_sizek)<<10); | ||
| 1074 | state->reg = range_to_mtrr(state->reg, range_basek, | ||
| 1075 | range_sizek, MTRR_TYPE_WRBACK); | ||
| 1076 | } | ||
| 1077 | |||
| 1078 | if (hole_sizek) { | ||
| 1079 | hole_basek = range_basek - hole_sizek - second_sizek; | ||
| 1080 | if (debug_print) | ||
| 1081 | printk(KERN_DEBUG "hole: %016lx - %016lx\n", | ||
| 1082 | hole_basek<<10, | ||
| 1083 | (hole_basek + hole_sizek)<<10); | ||
| 1084 | state->reg = range_to_mtrr(state->reg, hole_basek, | ||
| 1085 | hole_sizek, MTRR_TYPE_UNCACHABLE); | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | return second_sizek; | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | static void __init | ||
| 1092 | set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn, | ||
| 1093 | unsigned long size_pfn) | ||
| 1094 | { | ||
| 1095 | unsigned long basek, sizek; | ||
| 1096 | unsigned long second_sizek = 0; | ||
| 1097 | |||
| 1098 | if (state->reg >= num_var_ranges) | ||
| 1099 | return; | ||
| 1100 | |||
| 1101 | basek = base_pfn << (PAGE_SHIFT - 10); | ||
| 1102 | sizek = size_pfn << (PAGE_SHIFT - 10); | ||
| 1103 | |||
| 1104 | /* See if I can merge with the last range */ | ||
| 1105 | if ((basek <= 1024) || | ||
| 1106 | (state->range_startk + state->range_sizek == basek)) { | ||
| 1107 | unsigned long endk = basek + sizek; | ||
| 1108 | state->range_sizek = endk - state->range_startk; | ||
| 1109 | return; | ||
| 1110 | } | ||
| 1111 | /* Write the range mtrrs */ | ||
| 1112 | if (state->range_sizek != 0) | ||
| 1113 | second_sizek = range_to_mtrr_with_hole(state, basek, sizek); | ||
| 1114 | |||
| 1115 | /* Allocate an msr */ | ||
| 1116 | state->range_startk = basek + second_sizek; | ||
| 1117 | state->range_sizek = sizek - second_sizek; | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | /* mininum size of mtrr block that can take hole */ | ||
| 1121 | static u64 mtrr_chunk_size __initdata = (256ULL<<20); | ||
| 1122 | |||
| 1123 | static int __init parse_mtrr_chunk_size_opt(char *p) | ||
| 1124 | { | ||
| 1125 | if (!p) | ||
| 1126 | return -EINVAL; | ||
| 1127 | mtrr_chunk_size = memparse(p, &p); | ||
| 1128 | return 0; | ||
| 1129 | } | ||
| 1130 | early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); | ||
| 1131 | |||
| 1132 | /* granity of mtrr of block */ | ||
| 1133 | static u64 mtrr_gran_size __initdata; | ||
| 1134 | |||
| 1135 | static int __init parse_mtrr_gran_size_opt(char *p) | ||
| 1136 | { | ||
| 1137 | if (!p) | ||
| 1138 | return -EINVAL; | ||
| 1139 | mtrr_gran_size = memparse(p, &p); | ||
| 1140 | return 0; | ||
| 1141 | } | ||
| 1142 | early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); | ||
| 1143 | |||
| 1144 | static int nr_mtrr_spare_reg __initdata = | ||
| 1145 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; | ||
| 1146 | |||
| 1147 | static int __init parse_mtrr_spare_reg(char *arg) | ||
| 1148 | { | ||
| 1149 | if (arg) | ||
| 1150 | nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); | ||
| 1151 | return 0; | ||
| 1152 | } | ||
| 1153 | |||
| 1154 | early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); | ||
| 1155 | |||
| 1156 | static int __init | ||
| 1157 | x86_setup_var_mtrrs(struct res_range *range, int nr_range, | ||
| 1158 | u64 chunk_size, u64 gran_size) | ||
| 1159 | { | ||
| 1160 | struct var_mtrr_state var_state; | ||
| 1161 | int i; | ||
| 1162 | int num_reg; | ||
| 1163 | |||
| 1164 | var_state.range_startk = 0; | ||
| 1165 | var_state.range_sizek = 0; | ||
| 1166 | var_state.reg = 0; | ||
| 1167 | var_state.chunk_sizek = chunk_size >> 10; | ||
| 1168 | var_state.gran_sizek = gran_size >> 10; | ||
| 1169 | |||
| 1170 | memset(range_state, 0, sizeof(range_state)); | ||
| 1171 | |||
| 1172 | /* Write the range etc */ | ||
| 1173 | for (i = 0; i < nr_range; i++) | ||
| 1174 | set_var_mtrr_range(&var_state, range[i].start, | ||
| 1175 | range[i].end - range[i].start + 1); | ||
| 1176 | |||
| 1177 | /* Write the last range */ | ||
| 1178 | if (var_state.range_sizek != 0) | ||
| 1179 | range_to_mtrr_with_hole(&var_state, 0, 0); | ||
| 1180 | |||
| 1181 | num_reg = var_state.reg; | ||
| 1182 | /* Clear out the extra MTRR's */ | ||
| 1183 | while (var_state.reg < num_var_ranges) { | ||
| 1184 | save_var_mtrr(var_state.reg, 0, 0, 0); | ||
| 1185 | var_state.reg++; | ||
| 1186 | } | ||
| 1187 | |||
| 1188 | return num_reg; | ||
| 1189 | } | ||
| 1190 | |||
| 1191 | struct mtrr_cleanup_result { | ||
| 1192 | unsigned long gran_sizek; | ||
| 1193 | unsigned long chunk_sizek; | ||
| 1194 | unsigned long lose_cover_sizek; | ||
| 1195 | unsigned int num_reg; | ||
| 1196 | int bad; | ||
| 1197 | }; | ||
| 1198 | |||
| 1199 | /* | ||
| 1200 | * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G | ||
| 1201 | * chunk size: gran_size, ..., 2G | ||
| 1202 | * so we need (1+16)*8 | ||
| 1203 | */ | ||
| 1204 | #define NUM_RESULT 136 | ||
| 1205 | #define PSHIFT (PAGE_SHIFT - 10) | ||
| 1206 | |||
| 1207 | static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; | ||
| 1208 | static unsigned long __initdata min_loss_pfn[RANGE_NUM]; | ||
| 1209 | |||
| 1210 | static void __init print_out_mtrr_range_state(void) | ||
| 1211 | { | ||
| 1212 | int i; | ||
| 1213 | char start_factor = 'K', size_factor = 'K'; | ||
| 1214 | unsigned long start_base, size_base; | ||
| 1215 | mtrr_type type; | ||
| 1216 | |||
| 1217 | for (i = 0; i < num_var_ranges; i++) { | ||
| 1218 | |||
| 1219 | size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10); | ||
| 1220 | if (!size_base) | ||
| 1221 | continue; | ||
| 1222 | |||
| 1223 | size_base = to_size_factor(size_base, &size_factor), | ||
| 1224 | start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10); | ||
| 1225 | start_base = to_size_factor(start_base, &start_factor), | ||
| 1226 | type = range_state[i].type; | ||
| 1227 | |||
| 1228 | printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n", | ||
| 1229 | i, start_base, start_factor, | ||
| 1230 | size_base, size_factor, | ||
| 1231 | (type == MTRR_TYPE_UNCACHABLE) ? "UC" : | ||
| 1232 | ((type == MTRR_TYPE_WRPROT) ? "WP" : | ||
| 1233 | ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other")) | ||
| 1234 | ); | ||
| 1235 | } | ||
| 1236 | } | ||
| 1237 | |||
| 1238 | static int __init mtrr_need_cleanup(void) | ||
| 1239 | { | ||
| 1240 | int i; | ||
| 1241 | mtrr_type type; | ||
| 1242 | unsigned long size; | ||
| 1243 | /* extra one for all 0 */ | ||
| 1244 | int num[MTRR_NUM_TYPES + 1]; | ||
| 1245 | |||
| 1246 | /* check entries number */ | ||
| 1247 | memset(num, 0, sizeof(num)); | ||
| 1248 | for (i = 0; i < num_var_ranges; i++) { | ||
| 1249 | type = range_state[i].type; | ||
| 1250 | size = range_state[i].size_pfn; | ||
| 1251 | if (type >= MTRR_NUM_TYPES) | ||
| 1252 | continue; | ||
| 1253 | if (!size) | ||
| 1254 | type = MTRR_NUM_TYPES; | ||
| 1255 | if (type == MTRR_TYPE_WRPROT) | ||
| 1256 | type = MTRR_TYPE_UNCACHABLE; | ||
| 1257 | num[type]++; | ||
| 1258 | } | ||
| 1259 | |||
| 1260 | /* check if we got UC entries */ | ||
| 1261 | if (!num[MTRR_TYPE_UNCACHABLE]) | ||
| 1262 | return 0; | ||
| 1263 | |||
| 1264 | /* check if we only had WB and UC */ | ||
| 1265 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | ||
| 1266 | num_var_ranges - num[MTRR_NUM_TYPES]) | ||
| 1267 | return 0; | ||
| 1268 | |||
| 1269 | return 1; | ||
| 1270 | } | ||
| 1271 | |||
| 1272 | static unsigned long __initdata range_sums; | ||
| 1273 | static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size, | ||
| 1274 | unsigned long extra_remove_base, | ||
| 1275 | unsigned long extra_remove_size, | ||
| 1276 | int i) | ||
| 1277 | { | ||
| 1278 | int num_reg; | ||
| 1279 | static struct res_range range_new[RANGE_NUM]; | ||
| 1280 | static int nr_range_new; | ||
| 1281 | unsigned long range_sums_new; | ||
| 1282 | |||
| 1283 | /* convert ranges to var ranges state */ | ||
| 1284 | num_reg = x86_setup_var_mtrrs(range, nr_range, | ||
| 1285 | chunk_size, gran_size); | ||
| 1286 | |||
| 1287 | /* we got new setting in range_state, check it */ | ||
| 1288 | memset(range_new, 0, sizeof(range_new)); | ||
| 1289 | nr_range_new = x86_get_mtrr_mem_range(range_new, 0, | ||
| 1290 | extra_remove_base, extra_remove_size); | ||
| 1291 | range_sums_new = sum_ranges(range_new, nr_range_new); | ||
| 1292 | |||
| 1293 | result[i].chunk_sizek = chunk_size >> 10; | ||
| 1294 | result[i].gran_sizek = gran_size >> 10; | ||
| 1295 | result[i].num_reg = num_reg; | ||
| 1296 | if (range_sums < range_sums_new) { | ||
| 1297 | result[i].lose_cover_sizek = | ||
| 1298 | (range_sums_new - range_sums) << PSHIFT; | ||
| 1299 | result[i].bad = 1; | ||
| 1300 | } else | ||
| 1301 | result[i].lose_cover_sizek = | ||
| 1302 | (range_sums - range_sums_new) << PSHIFT; | ||
| 1303 | |||
| 1304 | /* double check it */ | ||
| 1305 | if (!result[i].bad && !result[i].lose_cover_sizek) { | ||
| 1306 | if (nr_range_new != nr_range || | ||
| 1307 | memcmp(range, range_new, sizeof(range))) | ||
| 1308 | result[i].bad = 1; | ||
| 1309 | } | ||
| 1310 | |||
| 1311 | if (!result[i].bad && (range_sums - range_sums_new < | ||
| 1312 | min_loss_pfn[num_reg])) { | ||
| 1313 | min_loss_pfn[num_reg] = | ||
| 1314 | range_sums - range_sums_new; | ||
| 1315 | } | ||
| 1316 | } | ||
| 1317 | |||
| 1318 | static void __init mtrr_print_out_one_result(int i) | ||
| 1319 | { | ||
| 1320 | char gran_factor, chunk_factor, lose_factor; | ||
| 1321 | unsigned long gran_base, chunk_base, lose_base; | ||
| 1322 | |||
| 1323 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), | ||
| 1324 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), | ||
| 1325 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), | ||
| 1326 | printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", | ||
| 1327 | result[i].bad ? "*BAD*" : " ", | ||
| 1328 | gran_base, gran_factor, chunk_base, chunk_factor); | ||
| 1329 | printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", | ||
| 1330 | result[i].num_reg, result[i].bad ? "-" : "", | ||
| 1331 | lose_base, lose_factor); | ||
| 1332 | } | ||
| 1333 | |||
| 1334 | static int __init mtrr_search_optimal_index(void) | ||
| 1335 | { | ||
| 1336 | int i; | ||
| 1337 | int num_reg_good; | ||
| 1338 | int index_good; | ||
| 1339 | |||
| 1340 | if (nr_mtrr_spare_reg >= num_var_ranges) | ||
| 1341 | nr_mtrr_spare_reg = num_var_ranges - 1; | ||
| 1342 | num_reg_good = -1; | ||
| 1343 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { | ||
| 1344 | if (!min_loss_pfn[i]) | ||
| 1345 | num_reg_good = i; | ||
| 1346 | } | ||
| 1347 | |||
| 1348 | index_good = -1; | ||
| 1349 | if (num_reg_good != -1) { | ||
| 1350 | for (i = 0; i < NUM_RESULT; i++) { | ||
| 1351 | if (!result[i].bad && | ||
| 1352 | result[i].num_reg == num_reg_good && | ||
| 1353 | !result[i].lose_cover_sizek) { | ||
| 1354 | index_good = i; | ||
| 1355 | break; | ||
| 1356 | } | ||
| 1357 | } | ||
| 1358 | } | ||
| 1359 | |||
| 1360 | return index_good; | ||
| 1361 | } | ||
| 1362 | |||
| 1363 | |||
| 1364 | static int __init mtrr_cleanup(unsigned address_bits) | ||
| 1365 | { | ||
| 1366 | unsigned long extra_remove_base, extra_remove_size; | ||
| 1367 | unsigned long base, size, def, dummy; | ||
| 1368 | mtrr_type type; | ||
| 1369 | u64 chunk_size, gran_size; | ||
| 1370 | int index_good; | ||
| 1371 | int i; | ||
| 1372 | |||
| 1373 | if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) | ||
| 1374 | return 0; | ||
| 1375 | rdmsr(MTRRdefType_MSR, def, dummy); | ||
| 1376 | def &= 0xff; | ||
| 1377 | if (def != MTRR_TYPE_UNCACHABLE) | ||
| 1378 | return 0; | ||
| 1379 | |||
| 1380 | /* get it and store it aside */ | ||
| 1381 | memset(range_state, 0, sizeof(range_state)); | ||
| 1382 | for (i = 0; i < num_var_ranges; i++) { | ||
| 1383 | mtrr_if->get(i, &base, &size, &type); | ||
| 1384 | range_state[i].base_pfn = base; | ||
| 1385 | range_state[i].size_pfn = size; | ||
| 1386 | range_state[i].type = type; | ||
| 1387 | } | ||
| 1388 | |||
| 1389 | /* check if we need handle it and can handle it */ | ||
| 1390 | if (!mtrr_need_cleanup()) | ||
| 1391 | return 0; | ||
| 1392 | |||
| 1393 | /* print original var MTRRs at first, for debugging: */ | ||
| 1394 | printk(KERN_DEBUG "original variable MTRRs\n"); | ||
| 1395 | print_out_mtrr_range_state(); | ||
| 1396 | |||
| 1397 | memset(range, 0, sizeof(range)); | ||
| 1398 | extra_remove_size = 0; | ||
| 1399 | extra_remove_base = 1 << (32 - PAGE_SHIFT); | ||
| 1400 | if (mtrr_tom2) | ||
| 1401 | extra_remove_size = | ||
| 1402 | (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; | ||
| 1403 | nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, | ||
| 1404 | extra_remove_size); | ||
| 1405 | /* | ||
| 1406 | * [0, 1M) should always be coverred by var mtrr with WB | ||
| 1407 | * and fixed mtrrs should take effective before var mtrr for it | ||
| 1408 | */ | ||
| 1409 | nr_range = add_range_with_merge(range, nr_range, 0, | ||
| 1410 | (1ULL<<(20 - PAGE_SHIFT)) - 1); | ||
| 1411 | /* sort the ranges */ | ||
| 1412 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | ||
| 1413 | |||
| 1414 | range_sums = sum_ranges(range, nr_range); | ||
| 1415 | printk(KERN_INFO "total RAM coverred: %ldM\n", | ||
| 1416 | range_sums >> (20 - PAGE_SHIFT)); | ||
| 1417 | |||
| 1418 | if (mtrr_chunk_size && mtrr_gran_size) { | ||
| 1419 | i = 0; | ||
| 1420 | mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, | ||
| 1421 | extra_remove_base, extra_remove_size, i); | ||
| 1422 | |||
| 1423 | mtrr_print_out_one_result(i); | ||
| 1424 | |||
| 1425 | if (!result[i].bad) { | ||
| 1426 | set_var_mtrr_all(address_bits); | ||
| 1427 | return 1; | ||
| 1428 | } | ||
| 1429 | printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " | ||
| 1430 | "will find optimal one\n"); | ||
| 1431 | } | ||
| 1432 | |||
| 1433 | i = 0; | ||
| 1434 | memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); | ||
| 1435 | memset(result, 0, sizeof(result)); | ||
| 1436 | for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) { | ||
| 1437 | |||
| 1438 | for (chunk_size = gran_size; chunk_size < (1ULL<<32); | ||
| 1439 | chunk_size <<= 1) { | ||
| 1440 | |||
| 1441 | if (i >= NUM_RESULT) | ||
| 1442 | continue; | ||
| 1443 | |||
| 1444 | mtrr_calc_range_state(chunk_size, gran_size, | ||
| 1445 | extra_remove_base, extra_remove_size, i); | ||
| 1446 | if (debug_print) { | ||
| 1447 | mtrr_print_out_one_result(i); | ||
| 1448 | printk(KERN_INFO "\n"); | ||
| 1449 | } | ||
| 1450 | |||
| 1451 | i++; | ||
| 1452 | } | ||
| 1453 | } | ||
| 1454 | |||
| 1455 | /* try to find the optimal index */ | ||
| 1456 | index_good = mtrr_search_optimal_index(); | ||
| 1457 | |||
| 1458 | if (index_good != -1) { | ||
| 1459 | printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); | ||
| 1460 | i = index_good; | ||
| 1461 | mtrr_print_out_one_result(i); | ||
| 1462 | |||
| 1463 | /* convert ranges to var ranges state */ | ||
| 1464 | chunk_size = result[i].chunk_sizek; | ||
| 1465 | chunk_size <<= 10; | ||
| 1466 | gran_size = result[i].gran_sizek; | ||
| 1467 | gran_size <<= 10; | ||
| 1468 | x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); | ||
| 1469 | set_var_mtrr_all(address_bits); | ||
| 1470 | printk(KERN_DEBUG "New variable MTRRs\n"); | ||
| 1471 | print_out_mtrr_range_state(); | ||
| 1472 | return 1; | ||
| 1473 | } else { | ||
| 1474 | /* print out all */ | ||
| 1475 | for (i = 0; i < NUM_RESULT; i++) | ||
| 1476 | mtrr_print_out_one_result(i); | ||
| 1477 | } | ||
| 1478 | |||
| 1479 | printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n"); | ||
| 1480 | printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n"); | ||
| 1481 | |||
| 1482 | return 0; | ||
| 1483 | } | ||
| 1484 | #else | ||
| 1485 | static int __init mtrr_cleanup(unsigned address_bits) | ||
| 1486 | { | ||
| 1487 | return 0; | ||
| 1488 | } | ||
| 1489 | #endif | ||
| 1490 | |||
| 1491 | static int __initdata changed_by_mtrr_cleanup; | ||
| 1492 | |||
| 1493 | static int disable_mtrr_trim; | ||
| 1494 | |||
| 1495 | static int __init disable_mtrr_trim_setup(char *str) | ||
| 1496 | { | ||
| 1497 | disable_mtrr_trim = 1; | ||
| 1498 | return 0; | ||
| 1499 | } | ||
| 1500 | early_param("disable_mtrr_trim", disable_mtrr_trim_setup); | ||
| 1501 | |||
| 1502 | /* | ||
| 1503 | * Newer AMD K8s and later CPUs have a special magic MSR way to force WB | ||
| 1504 | * for memory >4GB. Check for that here. | ||
| 1505 | * Note this won't check if the MTRRs < 4GB where the magic bit doesn't | ||
| 1506 | * apply to are wrong, but so far we don't know of any such case in the wild. | ||
| 1507 | */ | ||
| 1508 | #define Tom2Enabled (1U << 21) | ||
| 1509 | #define Tom2ForceMemTypeWB (1U << 22) | ||
| 1510 | |||
| 1511 | int __init amd_special_default_mtrr(void) | ||
| 1512 | { | ||
| 1513 | u32 l, h; | ||
| 1514 | |||
| 1515 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | ||
| 1516 | return 0; | ||
| 1517 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) | ||
| 1518 | return 0; | ||
| 1519 | /* In case some hypervisor doesn't pass SYSCFG through */ | ||
| 1520 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) | ||
| 1521 | return 0; | ||
| 1522 | /* | ||
| 1523 | * Memory between 4GB and top of mem is forced WB by this magic bit. | ||
| 1524 | * Reserved before K8RevF, but should be zero there. | ||
| 1525 | */ | ||
| 1526 | if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) == | ||
| 1527 | (Tom2Enabled | Tom2ForceMemTypeWB)) | ||
| 1528 | return 1; | ||
| 1529 | return 0; | ||
| 1530 | } | ||
| 1531 | |||
| 1532 | static u64 __init real_trim_memory(unsigned long start_pfn, | ||
| 1533 | unsigned long limit_pfn) | ||
| 1534 | { | ||
| 1535 | u64 trim_start, trim_size; | ||
| 1536 | trim_start = start_pfn; | ||
| 1537 | trim_start <<= PAGE_SHIFT; | ||
| 1538 | trim_size = limit_pfn; | ||
| 1539 | trim_size <<= PAGE_SHIFT; | ||
| 1540 | trim_size -= trim_start; | ||
| 1541 | |||
| 1542 | return e820_update_range(trim_start, trim_size, E820_RAM, | ||
| 1543 | E820_RESERVED); | ||
| 1544 | } | ||
| 1545 | /** | ||
| 1546 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs | ||
| 1547 | * @end_pfn: ending page frame number | ||
| 1548 | * | ||
| 1549 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain | ||
| 1550 | * memory configurations. This routine checks that the highest MTRR matches | ||
| 1551 | * the end of memory, to make sure the MTRRs having a write back type cover | ||
| 1552 | * all of the memory the kernel is intending to use. If not, it'll trim any | ||
| 1553 | * memory off the end by adjusting end_pfn, removing it from the kernel's | ||
| 1554 | * allocation pools, warning the user with an obnoxious message. | ||
| 1555 | */ | ||
| 1556 | int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | ||
| 1557 | { | ||
| 1558 | unsigned long i, base, size, highest_pfn = 0, def, dummy; | ||
| 1559 | mtrr_type type; | ||
| 1560 | u64 total_trim_size; | ||
| 1561 | |||
| 1562 | /* extra one for all 0 */ | ||
| 1563 | int num[MTRR_NUM_TYPES + 1]; | ||
| 1564 | /* | ||
| 1565 | * Make sure we only trim uncachable memory on machines that | ||
| 1566 | * support the Intel MTRR architecture: | ||
| 1567 | */ | ||
| 1568 | if (!is_cpu(INTEL) || disable_mtrr_trim) | ||
| 1569 | return 0; | ||
| 1570 | rdmsr(MTRRdefType_MSR, def, dummy); | ||
| 1571 | def &= 0xff; | ||
| 1572 | if (def != MTRR_TYPE_UNCACHABLE) | ||
| 1573 | return 0; | ||
| 1574 | |||
| 1575 | /* get it and store it aside */ | ||
| 1576 | memset(range_state, 0, sizeof(range_state)); | ||
| 1577 | for (i = 0; i < num_var_ranges; i++) { | ||
| 1578 | mtrr_if->get(i, &base, &size, &type); | ||
| 1579 | range_state[i].base_pfn = base; | ||
| 1580 | range_state[i].size_pfn = size; | ||
| 1581 | range_state[i].type = type; | ||
| 1582 | } | ||
| 1583 | |||
| 1584 | /* Find highest cached pfn */ | ||
| 1585 | for (i = 0; i < num_var_ranges; i++) { | ||
| 1586 | type = range_state[i].type; | ||
| 1587 | if (type != MTRR_TYPE_WRBACK) | ||
| 1588 | continue; | ||
| 1589 | base = range_state[i].base_pfn; | ||
| 1590 | size = range_state[i].size_pfn; | ||
| 1591 | if (highest_pfn < base + size) | ||
| 1592 | highest_pfn = base + size; | ||
| 1593 | } | ||
| 1594 | |||
| 1595 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ | ||
| 1596 | if (!highest_pfn) { | ||
| 1597 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); | ||
| 1598 | return 0; | ||
| 1599 | } | ||
| 1600 | |||
| 1601 | /* check entries number */ | ||
| 1602 | memset(num, 0, sizeof(num)); | ||
| 1603 | for (i = 0; i < num_var_ranges; i++) { | ||
| 1604 | type = range_state[i].type; | ||
| 1605 | if (type >= MTRR_NUM_TYPES) | ||
| 1606 | continue; | ||
| 1607 | size = range_state[i].size_pfn; | ||
| 1608 | if (!size) | ||
| 1609 | type = MTRR_NUM_TYPES; | ||
| 1610 | num[type]++; | ||
| 1611 | } | ||
| 1612 | |||
| 1613 | /* no entry for WB? */ | ||
| 1614 | if (!num[MTRR_TYPE_WRBACK]) | ||
| 1615 | return 0; | ||
| 1616 | |||
| 1617 | /* check if we only had WB and UC */ | ||
| 1618 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | ||
| 1619 | num_var_ranges - num[MTRR_NUM_TYPES]) | ||
| 1620 | return 0; | ||
| 1621 | |||
| 1622 | memset(range, 0, sizeof(range)); | ||
| 1623 | nr_range = 0; | ||
| 1624 | if (mtrr_tom2) { | ||
| 1625 | range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT)); | ||
| 1626 | range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1; | ||
| 1627 | if (highest_pfn < range[nr_range].end + 1) | ||
| 1628 | highest_pfn = range[nr_range].end + 1; | ||
| 1629 | nr_range++; | ||
| 1630 | } | ||
| 1631 | nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); | ||
| 1632 | |||
| 1633 | total_trim_size = 0; | ||
| 1634 | /* check the head */ | ||
| 1635 | if (range[0].start) | ||
| 1636 | total_trim_size += real_trim_memory(0, range[0].start); | ||
| 1637 | /* check the holes */ | ||
| 1638 | for (i = 0; i < nr_range - 1; i++) { | ||
| 1639 | if (range[i].end + 1 < range[i+1].start) | ||
| 1640 | total_trim_size += real_trim_memory(range[i].end + 1, | ||
| 1641 | range[i+1].start); | ||
| 1642 | } | ||
| 1643 | /* check the top */ | ||
| 1644 | i = nr_range - 1; | ||
| 1645 | if (range[i].end + 1 < end_pfn) | ||
| 1646 | total_trim_size += real_trim_memory(range[i].end + 1, | ||
| 1647 | end_pfn); | ||
| 1648 | |||
| 1649 | if (total_trim_size) { | ||
| 1650 | printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover" | ||
| 1651 | " all of memory, losing %lluMB of RAM.\n", | ||
| 1652 | total_trim_size >> 20); | ||
| 1653 | |||
| 1654 | if (!changed_by_mtrr_cleanup) | ||
| 1655 | WARN_ON(1); | ||
| 1656 | |||
| 1657 | printk(KERN_INFO "update e820 for mtrr\n"); | ||
| 1658 | update_e820(); | ||
| 1659 | |||
| 1660 | return 1; | ||
| 1661 | } | ||
| 1662 | |||
| 1663 | return 0; | ||
| 1664 | } | ||
| 1665 | 614 | ||
| 1666 | /** | 615 | /** |
| 1667 | * mtrr_bp_init - initialize mtrrs on the boot CPU | 616 | * mtrr_bp_init - initialize mtrrs on the boot CPU |
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h index ffd60409cc6d..77f67f7b347a 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h | |||
| @@ -79,6 +79,7 @@ extern struct mtrr_ops * mtrr_if; | |||
| 79 | 79 | ||
| 80 | extern unsigned int num_var_ranges; | 80 | extern unsigned int num_var_ranges; |
| 81 | extern u64 mtrr_tom2; | 81 | extern u64 mtrr_tom2; |
| 82 | extern struct mtrr_state_type mtrr_state; | ||
| 82 | 83 | ||
| 83 | void mtrr_state_warn(void); | 84 | void mtrr_state_warn(void); |
| 84 | const char *mtrr_attrib_to_str(int x); | 85 | const char *mtrr_attrib_to_str(int x); |
| @@ -88,3 +89,6 @@ void mtrr_wrmsr(unsigned, unsigned, unsigned); | |||
| 88 | int amd_init_mtrr(void); | 89 | int amd_init_mtrr(void); |
| 89 | int cyrix_init_mtrr(void); | 90 | int cyrix_init_mtrr(void); |
| 90 | int centaur_init_mtrr(void); | 91 | int centaur_init_mtrr(void); |
| 92 | |||
| 93 | extern int changed_by_mtrr_cleanup; | ||
| 94 | extern int mtrr_cleanup(unsigned address_bits); | ||
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 9abd48b22674..f6c70a164e32 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | #include <linux/nmi.h> | 19 | #include <linux/nmi.h> |
| 20 | #include <linux/kprobes.h> | 20 | #include <linux/kprobes.h> |
| 21 | 21 | ||
| 22 | #include <asm/apic.h> | 22 | #include <asm/genapic.h> |
| 23 | #include <asm/intel_arch_perfmon.h> | 23 | #include <asm/intel_arch_perfmon.h> |
| 24 | 24 | ||
| 25 | struct nmi_watchdog_ctlblk { | 25 | struct nmi_watchdog_ctlblk { |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 01b1244ef1c0..f93047fed791 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
| @@ -7,15 +7,14 @@ | |||
| 7 | /* | 7 | /* |
| 8 | * Get CPU information for use by the procfs. | 8 | * Get CPU information for use by the procfs. |
| 9 | */ | 9 | */ |
| 10 | #ifdef CONFIG_X86_32 | ||
| 11 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | 10 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, |
| 12 | unsigned int cpu) | 11 | unsigned int cpu) |
| 13 | { | 12 | { |
| 14 | #ifdef CONFIG_X86_HT | 13 | #ifdef CONFIG_SMP |
| 15 | if (c->x86_max_cores * smp_num_siblings > 1) { | 14 | if (c->x86_max_cores * smp_num_siblings > 1) { |
| 16 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | 15 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); |
| 17 | seq_printf(m, "siblings\t: %d\n", | 16 | seq_printf(m, "siblings\t: %d\n", |
| 18 | cpus_weight(per_cpu(cpu_core_map, cpu))); | 17 | cpumask_weight(cpu_sibling_mask(cpu))); |
| 19 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | 18 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); |
| 20 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | 19 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); |
| 21 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | 20 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); |
| @@ -24,6 +23,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | |||
| 24 | #endif | 23 | #endif |
| 25 | } | 24 | } |
| 26 | 25 | ||
| 26 | #ifdef CONFIG_X86_32 | ||
| 27 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | 27 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) |
| 28 | { | 28 | { |
| 29 | /* | 29 | /* |
| @@ -50,22 +50,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | |||
| 50 | c->wp_works_ok ? "yes" : "no"); | 50 | c->wp_works_ok ? "yes" : "no"); |
| 51 | } | 51 | } |
| 52 | #else | 52 | #else |
| 53 | static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | ||
| 54 | unsigned int cpu) | ||
| 55 | { | ||
| 56 | #ifdef CONFIG_SMP | ||
| 57 | if (c->x86_max_cores * smp_num_siblings > 1) { | ||
| 58 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | ||
| 59 | seq_printf(m, "siblings\t: %d\n", | ||
| 60 | cpus_weight(per_cpu(cpu_core_map, cpu))); | ||
| 61 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | ||
| 62 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | ||
| 63 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | ||
| 64 | seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); | ||
| 65 | } | ||
| 66 | #endif | ||
| 67 | } | ||
| 68 | |||
| 69 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) | 53 | static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) |
| 70 | { | 54 | { |
| 71 | seq_printf(m, | 55 | seq_printf(m, |
| @@ -159,9 +143,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
| 159 | static void *c_start(struct seq_file *m, loff_t *pos) | 143 | static void *c_start(struct seq_file *m, loff_t *pos) |
| 160 | { | 144 | { |
| 161 | if (*pos == 0) /* just in case, cpu 0 is not the first */ | 145 | if (*pos == 0) /* just in case, cpu 0 is not the first */ |
| 162 | *pos = first_cpu(cpu_online_map); | 146 | *pos = cpumask_first(cpu_online_mask); |
| 163 | else | 147 | else |
| 164 | *pos = next_cpu_nr(*pos - 1, cpu_online_map); | 148 | *pos = cpumask_next(*pos - 1, cpu_online_mask); |
| 165 | if ((*pos) < nr_cpu_ids) | 149 | if ((*pos) < nr_cpu_ids) |
| 166 | return &cpu_data(*pos); | 150 | return &cpu_data(*pos); |
| 167 | return NULL; | 151 | return NULL; |
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index 52b3fefbd5af..bb62b3e5caad 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
| @@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
| 98 | #endif | 98 | #endif |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { | 101 | static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = { |
| 102 | .c_vendor = "Transmeta", | 102 | .c_vendor = "Transmeta", |
| 103 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, | 103 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, |
| 104 | .c_early_init = early_init_transmeta, | 104 | .c_early_init = early_init_transmeta, |
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index e777f79e0960..fd2c37bf7acb 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | * so no special init takes place. | 8 | * so no special init takes place. |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | static struct cpu_dev umc_cpu_dev __cpuinitdata = { | 11 | static const struct cpu_dev __cpuinitconst umc_cpu_dev = { |
| 12 | .c_vendor = "UMC", | 12 | .c_vendor = "UMC", |
| 13 | .c_ident = { "UMC UMC UMC" }, | 13 | .c_ident = { "UMC UMC UMC" }, |
| 14 | .c_models = { | 14 | .c_models = { |
