diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 87 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 715 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce-internal.h | 11 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce-severity.c | 66 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 154 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel.c | 63 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/amd.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/core_early.c | 75 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/intel.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/intel_early.c | 345 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/intel_lib.c | 22 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mkcapflags.sh | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 10 |
16 files changed, 656 insertions, 937 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index a220239cea65..fd470ebf924e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include <linux/io.h> | 6 | #include <linux/io.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/random.h> | ||
8 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
9 | #include <asm/apic.h> | 10 | #include <asm/apic.h> |
10 | #include <asm/cpu.h> | 11 | #include <asm/cpu.h> |
@@ -488,6 +489,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) | |||
488 | 489 | ||
489 | va_align.mask = (upperbit - 1) & PAGE_MASK; | 490 | va_align.mask = (upperbit - 1) & PAGE_MASK; |
490 | va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; | 491 | va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; |
492 | |||
493 | /* A random value per boot for bit slice [12:upper_bit) */ | ||
494 | va_align.bits = get_random_int() & va_align.mask; | ||
491 | } | 495 | } |
492 | } | 496 | } |
493 | 497 | ||
@@ -711,6 +715,11 @@ static void init_amd(struct cpuinfo_x86 *c) | |||
711 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); | 715 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); |
712 | 716 | ||
713 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); | 717 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); |
718 | |||
719 | /* 3DNow or LM implies PREFETCHW */ | ||
720 | if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH)) | ||
721 | if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) | ||
722 | set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); | ||
714 | } | 723 | } |
715 | 724 | ||
716 | #ifdef CONFIG_X86_32 | 725 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 2346c95c6ab1..3f70538012e2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -959,38 +959,37 @@ static void identify_cpu(struct cpuinfo_x86 *c) | |||
959 | #endif | 959 | #endif |
960 | } | 960 | } |
961 | 961 | ||
962 | #ifdef CONFIG_X86_64 | 962 | /* |
963 | #ifdef CONFIG_IA32_EMULATION | 963 | * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions |
964 | /* May not be __init: called during resume */ | 964 | * on 32-bit kernels: |
965 | static void syscall32_cpu_init(void) | 965 | */ |
966 | { | ||
967 | /* Load these always in case some future AMD CPU supports | ||
968 | SYSENTER from compat mode too. */ | ||
969 | wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); | ||
970 | wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); | ||
971 | wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); | ||
972 | |||
973 | wrmsrl(MSR_CSTAR, ia32_cstar_target); | ||
974 | } | ||
975 | #endif /* CONFIG_IA32_EMULATION */ | ||
976 | #endif /* CONFIG_X86_64 */ | ||
977 | |||
978 | #ifdef CONFIG_X86_32 | 966 | #ifdef CONFIG_X86_32 |
979 | void enable_sep_cpu(void) | 967 | void enable_sep_cpu(void) |
980 | { | 968 | { |
981 | int cpu = get_cpu(); | 969 | struct tss_struct *tss; |
982 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 970 | int cpu; |
983 | 971 | ||
984 | if (!boot_cpu_has(X86_FEATURE_SEP)) { | 972 | cpu = get_cpu(); |
985 | put_cpu(); | 973 | tss = &per_cpu(cpu_tss, cpu); |
986 | return; | 974 | |
987 | } | 975 | if (!boot_cpu_has(X86_FEATURE_SEP)) |
976 | goto out; | ||
977 | |||
978 | /* | ||
979 | * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- | ||
980 | * see the big comment in struct x86_hw_tss's definition. | ||
981 | */ | ||
988 | 982 | ||
989 | tss->x86_tss.ss1 = __KERNEL_CS; | 983 | tss->x86_tss.ss1 = __KERNEL_CS; |
990 | tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss; | 984 | wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); |
991 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); | 985 | |
992 | wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0); | 986 | wrmsr(MSR_IA32_SYSENTER_ESP, |
993 | wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0); | 987 | (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack), |
988 | 0); | ||
989 | |||
990 | wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)ia32_sysenter_target, 0); | ||
991 | |||
992 | out: | ||
994 | put_cpu(); | 993 | put_cpu(); |
995 | } | 994 | } |
996 | #endif | 995 | #endif |
@@ -1118,7 +1117,7 @@ static __init int setup_disablecpuid(char *arg) | |||
1118 | __setup("clearcpuid=", setup_disablecpuid); | 1117 | __setup("clearcpuid=", setup_disablecpuid); |
1119 | 1118 | ||
1120 | DEFINE_PER_CPU(unsigned long, kernel_stack) = | 1119 | DEFINE_PER_CPU(unsigned long, kernel_stack) = |
1121 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; | 1120 | (unsigned long)&init_thread_union + THREAD_SIZE; |
1122 | EXPORT_PER_CPU_SYMBOL(kernel_stack); | 1121 | EXPORT_PER_CPU_SYMBOL(kernel_stack); |
1123 | 1122 | ||
1124 | #ifdef CONFIG_X86_64 | 1123 | #ifdef CONFIG_X86_64 |
@@ -1130,8 +1129,8 @@ DEFINE_PER_CPU_FIRST(union irq_stack_union, | |||
1130 | irq_stack_union) __aligned(PAGE_SIZE) __visible; | 1129 | irq_stack_union) __aligned(PAGE_SIZE) __visible; |
1131 | 1130 | ||
1132 | /* | 1131 | /* |
1133 | * The following four percpu variables are hot. Align current_task to | 1132 | * The following percpu variables are hot. Align current_task to |
1134 | * cacheline size such that all four fall in the same cacheline. | 1133 | * cacheline size such that they fall in the same cacheline. |
1135 | */ | 1134 | */ |
1136 | DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = | 1135 | DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = |
1137 | &init_task; | 1136 | &init_task; |
@@ -1171,10 +1170,23 @@ void syscall_init(void) | |||
1171 | */ | 1170 | */ |
1172 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | 1171 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); |
1173 | wrmsrl(MSR_LSTAR, system_call); | 1172 | wrmsrl(MSR_LSTAR, system_call); |
1174 | wrmsrl(MSR_CSTAR, ignore_sysret); | ||
1175 | 1173 | ||
1176 | #ifdef CONFIG_IA32_EMULATION | 1174 | #ifdef CONFIG_IA32_EMULATION |
1177 | syscall32_cpu_init(); | 1175 | wrmsrl(MSR_CSTAR, ia32_cstar_target); |
1176 | /* | ||
1177 | * This only works on Intel CPUs. | ||
1178 | * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. | ||
1179 | * This does not cause SYSENTER to jump to the wrong location, because | ||
1180 | * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). | ||
1181 | */ | ||
1182 | wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); | ||
1183 | wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); | ||
1184 | wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); | ||
1185 | #else | ||
1186 | wrmsrl(MSR_CSTAR, ignore_sysret); | ||
1187 | wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); | ||
1188 | wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); | ||
1189 | wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); | ||
1178 | #endif | 1190 | #endif |
1179 | 1191 | ||
1180 | /* Flags to clear on syscall */ | 1192 | /* Flags to clear on syscall */ |
@@ -1226,6 +1238,15 @@ DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; | |||
1226 | EXPORT_PER_CPU_SYMBOL(__preempt_count); | 1238 | EXPORT_PER_CPU_SYMBOL(__preempt_count); |
1227 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); | 1239 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); |
1228 | 1240 | ||
1241 | /* | ||
1242 | * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find | ||
1243 | * the top of the kernel stack. Use an extra percpu variable to track the | ||
1244 | * top of the kernel stack directly. | ||
1245 | */ | ||
1246 | DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = | ||
1247 | (unsigned long)&init_thread_union + THREAD_SIZE; | ||
1248 | EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); | ||
1249 | |||
1229 | #ifdef CONFIG_CC_STACKPROTECTOR | 1250 | #ifdef CONFIG_CC_STACKPROTECTOR |
1230 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); | 1251 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
1231 | #endif | 1252 | #endif |
@@ -1307,7 +1328,7 @@ void cpu_init(void) | |||
1307 | */ | 1328 | */ |
1308 | load_ucode_ap(); | 1329 | load_ucode_ap(); |
1309 | 1330 | ||
1310 | t = &per_cpu(init_tss, cpu); | 1331 | t = &per_cpu(cpu_tss, cpu); |
1311 | oist = &per_cpu(orig_ist, cpu); | 1332 | oist = &per_cpu(orig_ist, cpu); |
1312 | 1333 | ||
1313 | #ifdef CONFIG_NUMA | 1334 | #ifdef CONFIG_NUMA |
@@ -1391,7 +1412,7 @@ void cpu_init(void) | |||
1391 | { | 1412 | { |
1392 | int cpu = smp_processor_id(); | 1413 | int cpu = smp_processor_id(); |
1393 | struct task_struct *curr = current; | 1414 | struct task_struct *curr = current; |
1394 | struct tss_struct *t = &per_cpu(init_tss, cpu); | 1415 | struct tss_struct *t = &per_cpu(cpu_tss, cpu); |
1395 | struct thread_struct *thread = &curr->thread; | 1416 | struct thread_struct *thread = &curr->thread; |
1396 | 1417 | ||
1397 | wait_for_master_cpu(cpu); | 1418 | wait_for_master_cpu(cpu); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 659643376dbf..edcb0e28c336 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -7,16 +7,14 @@ | |||
7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. | 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
12 | #include <linux/device.h> | 11 | #include <linux/cacheinfo.h> |
13 | #include <linux/compiler.h> | ||
14 | #include <linux/cpu.h> | 12 | #include <linux/cpu.h> |
15 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/sysfs.h> | ||
16 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
17 | 16 | ||
18 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
19 | #include <linux/smp.h> | ||
20 | #include <asm/amd_nb.h> | 18 | #include <asm/amd_nb.h> |
21 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
22 | 20 | ||
@@ -116,10 +114,10 @@ static const struct _cache_table cache_table[] = | |||
116 | 114 | ||
117 | 115 | ||
118 | enum _cache_type { | 116 | enum _cache_type { |
119 | CACHE_TYPE_NULL = 0, | 117 | CTYPE_NULL = 0, |
120 | CACHE_TYPE_DATA = 1, | 118 | CTYPE_DATA = 1, |
121 | CACHE_TYPE_INST = 2, | 119 | CTYPE_INST = 2, |
122 | CACHE_TYPE_UNIFIED = 3 | 120 | CTYPE_UNIFIED = 3 |
123 | }; | 121 | }; |
124 | 122 | ||
125 | union _cpuid4_leaf_eax { | 123 | union _cpuid4_leaf_eax { |
@@ -159,11 +157,6 @@ struct _cpuid4_info_regs { | |||
159 | struct amd_northbridge *nb; | 157 | struct amd_northbridge *nb; |
160 | }; | 158 | }; |
161 | 159 | ||
162 | struct _cpuid4_info { | ||
163 | struct _cpuid4_info_regs base; | ||
164 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); | ||
165 | }; | ||
166 | |||
167 | unsigned short num_cache_leaves; | 160 | unsigned short num_cache_leaves; |
168 | 161 | ||
169 | /* AMD doesn't have CPUID4. Emulate it here to report the same | 162 | /* AMD doesn't have CPUID4. Emulate it here to report the same |
@@ -220,6 +213,13 @@ static const unsigned short assocs[] = { | |||
220 | static const unsigned char levels[] = { 1, 1, 2, 3 }; | 213 | static const unsigned char levels[] = { 1, 1, 2, 3 }; |
221 | static const unsigned char types[] = { 1, 2, 3, 3 }; | 214 | static const unsigned char types[] = { 1, 2, 3, 3 }; |
222 | 215 | ||
216 | static const enum cache_type cache_type_map[] = { | ||
217 | [CTYPE_NULL] = CACHE_TYPE_NOCACHE, | ||
218 | [CTYPE_DATA] = CACHE_TYPE_DATA, | ||
219 | [CTYPE_INST] = CACHE_TYPE_INST, | ||
220 | [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED, | ||
221 | }; | ||
222 | |||
223 | static void | 223 | static void |
224 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 224 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
225 | union _cpuid4_leaf_ebx *ebx, | 225 | union _cpuid4_leaf_ebx *ebx, |
@@ -291,14 +291,8 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
291 | (ebx->split.ways_of_associativity + 1) - 1; | 291 | (ebx->split.ways_of_associativity + 1) - 1; |
292 | } | 292 | } |
293 | 293 | ||
294 | struct _cache_attr { | ||
295 | struct attribute attr; | ||
296 | ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int); | ||
297 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count, | ||
298 | unsigned int); | ||
299 | }; | ||
300 | |||
301 | #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS) | 294 | #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS) |
295 | |||
302 | /* | 296 | /* |
303 | * L3 cache descriptors | 297 | * L3 cache descriptors |
304 | */ | 298 | */ |
@@ -325,20 +319,6 @@ static void amd_calc_l3_indices(struct amd_northbridge *nb) | |||
325 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; | 319 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; |
326 | } | 320 | } |
327 | 321 | ||
328 | static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) | ||
329 | { | ||
330 | int node; | ||
331 | |||
332 | /* only for L3, and not in virtualized environments */ | ||
333 | if (index < 3) | ||
334 | return; | ||
335 | |||
336 | node = amd_get_nb_id(smp_processor_id()); | ||
337 | this_leaf->nb = node_to_amd_nb(node); | ||
338 | if (this_leaf->nb && !this_leaf->nb->l3_cache.indices) | ||
339 | amd_calc_l3_indices(this_leaf->nb); | ||
340 | } | ||
341 | |||
342 | /* | 322 | /* |
343 | * check whether a slot used for disabling an L3 index is occupied. | 323 | * check whether a slot used for disabling an L3 index is occupied. |
344 | * @l3: L3 cache descriptor | 324 | * @l3: L3 cache descriptor |
@@ -359,15 +339,13 @@ int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) | |||
359 | return -1; | 339 | return -1; |
360 | } | 340 | } |
361 | 341 | ||
362 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | 342 | static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf, |
363 | unsigned int slot) | 343 | unsigned int slot) |
364 | { | 344 | { |
365 | int index; | 345 | int index; |
346 | struct amd_northbridge *nb = this_leaf->priv; | ||
366 | 347 | ||
367 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | 348 | index = amd_get_l3_disable_slot(nb, slot); |
368 | return -EINVAL; | ||
369 | |||
370 | index = amd_get_l3_disable_slot(this_leaf->base.nb, slot); | ||
371 | if (index >= 0) | 349 | if (index >= 0) |
372 | return sprintf(buf, "%d\n", index); | 350 | return sprintf(buf, "%d\n", index); |
373 | 351 | ||
@@ -376,9 +354,10 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | |||
376 | 354 | ||
377 | #define SHOW_CACHE_DISABLE(slot) \ | 355 | #define SHOW_CACHE_DISABLE(slot) \ |
378 | static ssize_t \ | 356 | static ssize_t \ |
379 | show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \ | 357 | cache_disable_##slot##_show(struct device *dev, \ |
380 | unsigned int cpu) \ | 358 | struct device_attribute *attr, char *buf) \ |
381 | { \ | 359 | { \ |
360 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ | ||
382 | return show_cache_disable(this_leaf, buf, slot); \ | 361 | return show_cache_disable(this_leaf, buf, slot); \ |
383 | } | 362 | } |
384 | SHOW_CACHE_DISABLE(0) | 363 | SHOW_CACHE_DISABLE(0) |
@@ -446,25 +425,23 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, | |||
446 | return 0; | 425 | return 0; |
447 | } | 426 | } |
448 | 427 | ||
449 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | 428 | static ssize_t store_cache_disable(struct cacheinfo *this_leaf, |
450 | const char *buf, size_t count, | 429 | const char *buf, size_t count, |
451 | unsigned int slot) | 430 | unsigned int slot) |
452 | { | 431 | { |
453 | unsigned long val = 0; | 432 | unsigned long val = 0; |
454 | int cpu, err = 0; | 433 | int cpu, err = 0; |
434 | struct amd_northbridge *nb = this_leaf->priv; | ||
455 | 435 | ||
456 | if (!capable(CAP_SYS_ADMIN)) | 436 | if (!capable(CAP_SYS_ADMIN)) |
457 | return -EPERM; | 437 | return -EPERM; |
458 | 438 | ||
459 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | 439 | cpu = cpumask_first(&this_leaf->shared_cpu_map); |
460 | return -EINVAL; | ||
461 | |||
462 | cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | ||
463 | 440 | ||
464 | if (kstrtoul(buf, 10, &val) < 0) | 441 | if (kstrtoul(buf, 10, &val) < 0) |
465 | return -EINVAL; | 442 | return -EINVAL; |
466 | 443 | ||
467 | err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); | 444 | err = amd_set_l3_disable_slot(nb, cpu, slot, val); |
468 | if (err) { | 445 | if (err) { |
469 | if (err == -EEXIST) | 446 | if (err == -EEXIST) |
470 | pr_warning("L3 slot %d in use/index already disabled!\n", | 447 | pr_warning("L3 slot %d in use/index already disabled!\n", |
@@ -476,41 +453,36 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
476 | 453 | ||
477 | #define STORE_CACHE_DISABLE(slot) \ | 454 | #define STORE_CACHE_DISABLE(slot) \ |
478 | static ssize_t \ | 455 | static ssize_t \ |
479 | store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ | 456 | cache_disable_##slot##_store(struct device *dev, \ |
480 | const char *buf, size_t count, \ | 457 | struct device_attribute *attr, \ |
481 | unsigned int cpu) \ | 458 | const char *buf, size_t count) \ |
482 | { \ | 459 | { \ |
460 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ | ||
483 | return store_cache_disable(this_leaf, buf, count, slot); \ | 461 | return store_cache_disable(this_leaf, buf, count, slot); \ |
484 | } | 462 | } |
485 | STORE_CACHE_DISABLE(0) | 463 | STORE_CACHE_DISABLE(0) |
486 | STORE_CACHE_DISABLE(1) | 464 | STORE_CACHE_DISABLE(1) |
487 | 465 | ||
488 | static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | 466 | static ssize_t subcaches_show(struct device *dev, |
489 | show_cache_disable_0, store_cache_disable_0); | 467 | struct device_attribute *attr, char *buf) |
490 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | ||
491 | show_cache_disable_1, store_cache_disable_1); | ||
492 | |||
493 | static ssize_t | ||
494 | show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) | ||
495 | { | 468 | { |
496 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | 469 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); |
497 | return -EINVAL; | 470 | int cpu = cpumask_first(&this_leaf->shared_cpu_map); |
498 | 471 | ||
499 | return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); | 472 | return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); |
500 | } | 473 | } |
501 | 474 | ||
502 | static ssize_t | 475 | static ssize_t subcaches_store(struct device *dev, |
503 | store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, | 476 | struct device_attribute *attr, |
504 | unsigned int cpu) | 477 | const char *buf, size_t count) |
505 | { | 478 | { |
479 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | ||
480 | int cpu = cpumask_first(&this_leaf->shared_cpu_map); | ||
506 | unsigned long val; | 481 | unsigned long val; |
507 | 482 | ||
508 | if (!capable(CAP_SYS_ADMIN)) | 483 | if (!capable(CAP_SYS_ADMIN)) |
509 | return -EPERM; | 484 | return -EPERM; |
510 | 485 | ||
511 | if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
512 | return -EINVAL; | ||
513 | |||
514 | if (kstrtoul(buf, 16, &val) < 0) | 486 | if (kstrtoul(buf, 16, &val) < 0) |
515 | return -EINVAL; | 487 | return -EINVAL; |
516 | 488 | ||
@@ -520,9 +492,92 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, | |||
520 | return count; | 492 | return count; |
521 | } | 493 | } |
522 | 494 | ||
523 | static struct _cache_attr subcaches = | 495 | static DEVICE_ATTR_RW(cache_disable_0); |
524 | __ATTR(subcaches, 0644, show_subcaches, store_subcaches); | 496 | static DEVICE_ATTR_RW(cache_disable_1); |
497 | static DEVICE_ATTR_RW(subcaches); | ||
498 | |||
499 | static umode_t | ||
500 | cache_private_attrs_is_visible(struct kobject *kobj, | ||
501 | struct attribute *attr, int unused) | ||
502 | { | ||
503 | struct device *dev = kobj_to_dev(kobj); | ||
504 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | ||
505 | umode_t mode = attr->mode; | ||
506 | |||
507 | if (!this_leaf->priv) | ||
508 | return 0; | ||
509 | |||
510 | if ((attr == &dev_attr_subcaches.attr) && | ||
511 | amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
512 | return mode; | ||
513 | |||
514 | if ((attr == &dev_attr_cache_disable_0.attr || | ||
515 | attr == &dev_attr_cache_disable_1.attr) && | ||
516 | amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | ||
517 | return mode; | ||
518 | |||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | static struct attribute_group cache_private_group = { | ||
523 | .is_visible = cache_private_attrs_is_visible, | ||
524 | }; | ||
525 | |||
526 | static void init_amd_l3_attrs(void) | ||
527 | { | ||
528 | int n = 1; | ||
529 | static struct attribute **amd_l3_attrs; | ||
530 | |||
531 | if (amd_l3_attrs) /* already initialized */ | ||
532 | return; | ||
533 | |||
534 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | ||
535 | n += 2; | ||
536 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
537 | n += 1; | ||
538 | |||
539 | amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL); | ||
540 | if (!amd_l3_attrs) | ||
541 | return; | ||
542 | |||
543 | n = 0; | ||
544 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { | ||
545 | amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr; | ||
546 | amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr; | ||
547 | } | ||
548 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
549 | amd_l3_attrs[n++] = &dev_attr_subcaches.attr; | ||
525 | 550 | ||
551 | cache_private_group.attrs = amd_l3_attrs; | ||
552 | } | ||
553 | |||
554 | const struct attribute_group * | ||
555 | cache_get_priv_group(struct cacheinfo *this_leaf) | ||
556 | { | ||
557 | struct amd_northbridge *nb = this_leaf->priv; | ||
558 | |||
559 | if (this_leaf->level < 3 || !nb) | ||
560 | return NULL; | ||
561 | |||
562 | if (nb && nb->l3_cache.indices) | ||
563 | init_amd_l3_attrs(); | ||
564 | |||
565 | return &cache_private_group; | ||
566 | } | ||
567 | |||
568 | static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) | ||
569 | { | ||
570 | int node; | ||
571 | |||
572 | /* only for L3, and not in virtualized environments */ | ||
573 | if (index < 3) | ||
574 | return; | ||
575 | |||
576 | node = amd_get_nb_id(smp_processor_id()); | ||
577 | this_leaf->nb = node_to_amd_nb(node); | ||
578 | if (this_leaf->nb && !this_leaf->nb->l3_cache.indices) | ||
579 | amd_calc_l3_indices(this_leaf->nb); | ||
580 | } | ||
526 | #else | 581 | #else |
527 | #define amd_init_l3_cache(x, y) | 582 | #define amd_init_l3_cache(x, y) |
528 | #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ | 583 | #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ |
@@ -546,7 +601,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) | |||
546 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | 601 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); |
547 | } | 602 | } |
548 | 603 | ||
549 | if (eax.split.type == CACHE_TYPE_NULL) | 604 | if (eax.split.type == CTYPE_NULL) |
550 | return -EIO; /* better error ? */ | 605 | return -EIO; /* better error ? */ |
551 | 606 | ||
552 | this_leaf->eax = eax; | 607 | this_leaf->eax = eax; |
@@ -575,7 +630,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) | |||
575 | /* Do cpuid(op) loop to find out num_cache_leaves */ | 630 | /* Do cpuid(op) loop to find out num_cache_leaves */ |
576 | cpuid_count(op, i, &eax, &ebx, &ecx, &edx); | 631 | cpuid_count(op, i, &eax, &ebx, &ecx, &edx); |
577 | cache_eax.full = eax; | 632 | cache_eax.full = eax; |
578 | } while (cache_eax.split.type != CACHE_TYPE_NULL); | 633 | } while (cache_eax.split.type != CTYPE_NULL); |
579 | return i; | 634 | return i; |
580 | } | 635 | } |
581 | 636 | ||
@@ -626,9 +681,9 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
626 | 681 | ||
627 | switch (this_leaf.eax.split.level) { | 682 | switch (this_leaf.eax.split.level) { |
628 | case 1: | 683 | case 1: |
629 | if (this_leaf.eax.split.type == CACHE_TYPE_DATA) | 684 | if (this_leaf.eax.split.type == CTYPE_DATA) |
630 | new_l1d = this_leaf.size/1024; | 685 | new_l1d = this_leaf.size/1024; |
631 | else if (this_leaf.eax.split.type == CACHE_TYPE_INST) | 686 | else if (this_leaf.eax.split.type == CTYPE_INST) |
632 | new_l1i = this_leaf.size/1024; | 687 | new_l1i = this_leaf.size/1024; |
633 | break; | 688 | break; |
634 | case 2: | 689 | case 2: |
@@ -747,55 +802,52 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
747 | return l2; | 802 | return l2; |
748 | } | 803 | } |
749 | 804 | ||
750 | #ifdef CONFIG_SYSFS | 805 | static int __cache_amd_cpumap_setup(unsigned int cpu, int index, |
751 | 806 | struct _cpuid4_info_regs *base) | |
752 | /* pointer to _cpuid4_info array (for each cache leaf) */ | ||
753 | static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); | ||
754 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) | ||
755 | |||
756 | #ifdef CONFIG_SMP | ||
757 | |||
758 | static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) | ||
759 | { | 807 | { |
760 | struct _cpuid4_info *this_leaf; | 808 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
809 | struct cacheinfo *this_leaf; | ||
761 | int i, sibling; | 810 | int i, sibling; |
762 | 811 | ||
763 | if (cpu_has_topoext) { | 812 | if (cpu_has_topoext) { |
764 | unsigned int apicid, nshared, first, last; | 813 | unsigned int apicid, nshared, first, last; |
765 | 814 | ||
766 | if (!per_cpu(ici_cpuid4_info, cpu)) | 815 | this_leaf = this_cpu_ci->info_list + index; |
767 | return 0; | 816 | nshared = base->eax.split.num_threads_sharing + 1; |
768 | |||
769 | this_leaf = CPUID4_INFO_IDX(cpu, index); | ||
770 | nshared = this_leaf->base.eax.split.num_threads_sharing + 1; | ||
771 | apicid = cpu_data(cpu).apicid; | 817 | apicid = cpu_data(cpu).apicid; |
772 | first = apicid - (apicid % nshared); | 818 | first = apicid - (apicid % nshared); |
773 | last = first + nshared - 1; | 819 | last = first + nshared - 1; |
774 | 820 | ||
775 | for_each_online_cpu(i) { | 821 | for_each_online_cpu(i) { |
822 | this_cpu_ci = get_cpu_cacheinfo(i); | ||
823 | if (!this_cpu_ci->info_list) | ||
824 | continue; | ||
825 | |||
776 | apicid = cpu_data(i).apicid; | 826 | apicid = cpu_data(i).apicid; |
777 | if ((apicid < first) || (apicid > last)) | 827 | if ((apicid < first) || (apicid > last)) |
778 | continue; | 828 | continue; |
779 | if (!per_cpu(ici_cpuid4_info, i)) | 829 | |
780 | continue; | 830 | this_leaf = this_cpu_ci->info_list + index; |
781 | this_leaf = CPUID4_INFO_IDX(i, index); | ||
782 | 831 | ||
783 | for_each_online_cpu(sibling) { | 832 | for_each_online_cpu(sibling) { |
784 | apicid = cpu_data(sibling).apicid; | 833 | apicid = cpu_data(sibling).apicid; |
785 | if ((apicid < first) || (apicid > last)) | 834 | if ((apicid < first) || (apicid > last)) |
786 | continue; | 835 | continue; |
787 | set_bit(sibling, this_leaf->shared_cpu_map); | 836 | cpumask_set_cpu(sibling, |
837 | &this_leaf->shared_cpu_map); | ||
788 | } | 838 | } |
789 | } | 839 | } |
790 | } else if (index == 3) { | 840 | } else if (index == 3) { |
791 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { | 841 | for_each_cpu(i, cpu_llc_shared_mask(cpu)) { |
792 | if (!per_cpu(ici_cpuid4_info, i)) | 842 | this_cpu_ci = get_cpu_cacheinfo(i); |
843 | if (!this_cpu_ci->info_list) | ||
793 | continue; | 844 | continue; |
794 | this_leaf = CPUID4_INFO_IDX(i, index); | 845 | this_leaf = this_cpu_ci->info_list + index; |
795 | for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { | 846 | for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { |
796 | if (!cpu_online(sibling)) | 847 | if (!cpu_online(sibling)) |
797 | continue; | 848 | continue; |
798 | set_bit(sibling, this_leaf->shared_cpu_map); | 849 | cpumask_set_cpu(sibling, |
850 | &this_leaf->shared_cpu_map); | ||
799 | } | 851 | } |
800 | } | 852 | } |
801 | } else | 853 | } else |
@@ -804,457 +856,86 @@ static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) | |||
804 | return 1; | 856 | return 1; |
805 | } | 857 | } |
806 | 858 | ||
807 | static void cache_shared_cpu_map_setup(unsigned int cpu, int index) | 859 | static void __cache_cpumap_setup(unsigned int cpu, int index, |
860 | struct _cpuid4_info_regs *base) | ||
808 | { | 861 | { |
809 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 862 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
863 | struct cacheinfo *this_leaf, *sibling_leaf; | ||
810 | unsigned long num_threads_sharing; | 864 | unsigned long num_threads_sharing; |
811 | int index_msb, i; | 865 | int index_msb, i; |
812 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 866 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
813 | 867 | ||
814 | if (c->x86_vendor == X86_VENDOR_AMD) { | 868 | if (c->x86_vendor == X86_VENDOR_AMD) { |
815 | if (cache_shared_amd_cpu_map_setup(cpu, index)) | 869 | if (__cache_amd_cpumap_setup(cpu, index, base)) |
816 | return; | 870 | return; |
817 | } | 871 | } |
818 | 872 | ||
819 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 873 | this_leaf = this_cpu_ci->info_list + index; |
820 | num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; | 874 | num_threads_sharing = 1 + base->eax.split.num_threads_sharing; |
821 | 875 | ||
876 | cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); | ||
822 | if (num_threads_sharing == 1) | 877 | if (num_threads_sharing == 1) |
823 | cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); | 878 | return; |
824 | else { | ||
825 | index_msb = get_count_order(num_threads_sharing); | ||
826 | |||
827 | for_each_online_cpu(i) { | ||
828 | if (cpu_data(i).apicid >> index_msb == | ||
829 | c->apicid >> index_msb) { | ||
830 | cpumask_set_cpu(i, | ||
831 | to_cpumask(this_leaf->shared_cpu_map)); | ||
832 | if (i != cpu && per_cpu(ici_cpuid4_info, i)) { | ||
833 | sibling_leaf = | ||
834 | CPUID4_INFO_IDX(i, index); | ||
835 | cpumask_set_cpu(cpu, to_cpumask( | ||
836 | sibling_leaf->shared_cpu_map)); | ||
837 | } | ||
838 | } | ||
839 | } | ||
840 | } | ||
841 | } | ||
842 | static void cache_remove_shared_cpu_map(unsigned int cpu, int index) | ||
843 | { | ||
844 | struct _cpuid4_info *this_leaf, *sibling_leaf; | ||
845 | int sibling; | ||
846 | |||
847 | this_leaf = CPUID4_INFO_IDX(cpu, index); | ||
848 | for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { | ||
849 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | ||
850 | cpumask_clear_cpu(cpu, | ||
851 | to_cpumask(sibling_leaf->shared_cpu_map)); | ||
852 | } | ||
853 | } | ||
854 | #else | ||
855 | static void cache_shared_cpu_map_setup(unsigned int cpu, int index) | ||
856 | { | ||
857 | } | ||
858 | |||
859 | static void cache_remove_shared_cpu_map(unsigned int cpu, int index) | ||
860 | { | ||
861 | } | ||
862 | #endif | ||
863 | |||
864 | static void free_cache_attributes(unsigned int cpu) | ||
865 | { | ||
866 | int i; | ||
867 | |||
868 | for (i = 0; i < num_cache_leaves; i++) | ||
869 | cache_remove_shared_cpu_map(cpu, i); | ||
870 | |||
871 | kfree(per_cpu(ici_cpuid4_info, cpu)); | ||
872 | per_cpu(ici_cpuid4_info, cpu) = NULL; | ||
873 | } | ||
874 | |||
875 | static void get_cpu_leaves(void *_retval) | ||
876 | { | ||
877 | int j, *retval = _retval, cpu = smp_processor_id(); | ||
878 | 879 | ||
879 | /* Do cpuid and store the results */ | 880 | index_msb = get_count_order(num_threads_sharing); |
880 | for (j = 0; j < num_cache_leaves; j++) { | ||
881 | struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j); | ||
882 | 881 | ||
883 | *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base); | 882 | for_each_online_cpu(i) |
884 | if (unlikely(*retval < 0)) { | 883 | if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) { |
885 | int i; | 884 | struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); |
886 | 885 | ||
887 | for (i = 0; i < j; i++) | 886 | if (i == cpu || !sib_cpu_ci->info_list) |
888 | cache_remove_shared_cpu_map(cpu, i); | 887 | continue;/* skip if itself or no cacheinfo */ |
889 | break; | 888 | sibling_leaf = sib_cpu_ci->info_list + index; |
889 | cpumask_set_cpu(i, &this_leaf->shared_cpu_map); | ||
890 | cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map); | ||
890 | } | 891 | } |
891 | cache_shared_cpu_map_setup(cpu, j); | ||
892 | } | ||
893 | } | 892 | } |
894 | 893 | ||
895 | static int detect_cache_attributes(unsigned int cpu) | 894 | static void ci_leaf_init(struct cacheinfo *this_leaf, |
895 | struct _cpuid4_info_regs *base) | ||
896 | { | 896 | { |
897 | int retval; | 897 | this_leaf->level = base->eax.split.level; |
898 | 898 | this_leaf->type = cache_type_map[base->eax.split.type]; | |
899 | if (num_cache_leaves == 0) | 899 | this_leaf->coherency_line_size = |
900 | return -ENOENT; | 900 | base->ebx.split.coherency_line_size + 1; |
901 | 901 | this_leaf->ways_of_associativity = | |
902 | per_cpu(ici_cpuid4_info, cpu) = kzalloc( | 902 | base->ebx.split.ways_of_associativity + 1; |
903 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | 903 | this_leaf->size = base->size; |
904 | if (per_cpu(ici_cpuid4_info, cpu) == NULL) | 904 | this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1; |
905 | return -ENOMEM; | 905 | this_leaf->physical_line_partition = |
906 | 906 | base->ebx.split.physical_line_partition + 1; | |
907 | smp_call_function_single(cpu, get_cpu_leaves, &retval, true); | 907 | this_leaf->priv = base->nb; |
908 | if (retval) { | ||
909 | kfree(per_cpu(ici_cpuid4_info, cpu)); | ||
910 | per_cpu(ici_cpuid4_info, cpu) = NULL; | ||
911 | } | ||
912 | |||
913 | return retval; | ||
914 | } | 908 | } |
915 | 909 | ||
916 | #include <linux/kobject.h> | 910 | static int __init_cache_level(unsigned int cpu) |
917 | #include <linux/sysfs.h> | ||
918 | #include <linux/cpu.h> | ||
919 | |||
920 | /* pointer to kobject for cpuX/cache */ | ||
921 | static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject); | ||
922 | |||
923 | struct _index_kobject { | ||
924 | struct kobject kobj; | ||
925 | unsigned int cpu; | ||
926 | unsigned short index; | ||
927 | }; | ||
928 | |||
929 | /* pointer to array of kobjects for cpuX/cache/indexY */ | ||
930 | static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); | ||
931 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) | ||
932 | |||
933 | #define show_one_plus(file_name, object, val) \ | ||
934 | static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \ | ||
935 | unsigned int cpu) \ | ||
936 | { \ | ||
937 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ | ||
938 | } | ||
939 | |||
940 | show_one_plus(level, base.eax.split.level, 0); | ||
941 | show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1); | ||
942 | show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1); | ||
943 | show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1); | ||
944 | show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1); | ||
945 | |||
946 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, | ||
947 | unsigned int cpu) | ||
948 | { | ||
949 | return sprintf(buf, "%luK\n", this_leaf->base.size / 1024); | ||
950 | } | ||
951 | |||
952 | static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | ||
953 | int type, char *buf) | ||
954 | { | ||
955 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); | ||
956 | int ret; | ||
957 | |||
958 | if (type) | ||
959 | ret = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", | ||
960 | cpumask_pr_args(mask)); | ||
961 | else | ||
962 | ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb", | ||
963 | cpumask_pr_args(mask)); | ||
964 | buf[ret++] = '\n'; | ||
965 | buf[ret] = '\0'; | ||
966 | return ret; | ||
967 | } | ||
968 | |||
969 | static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf, | ||
970 | unsigned int cpu) | ||
971 | { | 911 | { |
972 | return show_shared_cpu_map_func(leaf, 0, buf); | 912 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
973 | } | ||
974 | |||
975 | static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf, | ||
976 | unsigned int cpu) | ||
977 | { | ||
978 | return show_shared_cpu_map_func(leaf, 1, buf); | ||
979 | } | ||
980 | 913 | ||
981 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, | 914 | if (!num_cache_leaves) |
982 | unsigned int cpu) | ||
983 | { | ||
984 | switch (this_leaf->base.eax.split.type) { | ||
985 | case CACHE_TYPE_DATA: | ||
986 | return sprintf(buf, "Data\n"); | ||
987 | case CACHE_TYPE_INST: | ||
988 | return sprintf(buf, "Instruction\n"); | ||
989 | case CACHE_TYPE_UNIFIED: | ||
990 | return sprintf(buf, "Unified\n"); | ||
991 | default: | ||
992 | return sprintf(buf, "Unknown\n"); | ||
993 | } | ||
994 | } | ||
995 | |||
996 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | ||
997 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | ||
998 | |||
999 | #define define_one_ro(_name) \ | ||
1000 | static struct _cache_attr _name = \ | ||
1001 | __ATTR(_name, 0444, show_##_name, NULL) | ||
1002 | |||
1003 | define_one_ro(level); | ||
1004 | define_one_ro(type); | ||
1005 | define_one_ro(coherency_line_size); | ||
1006 | define_one_ro(physical_line_partition); | ||
1007 | define_one_ro(ways_of_associativity); | ||
1008 | define_one_ro(number_of_sets); | ||
1009 | define_one_ro(size); | ||
1010 | define_one_ro(shared_cpu_map); | ||
1011 | define_one_ro(shared_cpu_list); | ||
1012 | |||
1013 | static struct attribute *default_attrs[] = { | ||
1014 | &type.attr, | ||
1015 | &level.attr, | ||
1016 | &coherency_line_size.attr, | ||
1017 | &physical_line_partition.attr, | ||
1018 | &ways_of_associativity.attr, | ||
1019 | &number_of_sets.attr, | ||
1020 | &size.attr, | ||
1021 | &shared_cpu_map.attr, | ||
1022 | &shared_cpu_list.attr, | ||
1023 | NULL | ||
1024 | }; | ||
1025 | |||
1026 | #ifdef CONFIG_AMD_NB | ||
1027 | static struct attribute **amd_l3_attrs(void) | ||
1028 | { | ||
1029 | static struct attribute **attrs; | ||
1030 | int n; | ||
1031 | |||
1032 | if (attrs) | ||
1033 | return attrs; | ||
1034 | |||
1035 | n = ARRAY_SIZE(default_attrs); | ||
1036 | |||
1037 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | ||
1038 | n += 2; | ||
1039 | |||
1040 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
1041 | n += 1; | ||
1042 | |||
1043 | attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); | ||
1044 | if (attrs == NULL) | ||
1045 | return attrs = default_attrs; | ||
1046 | |||
1047 | for (n = 0; default_attrs[n]; n++) | ||
1048 | attrs[n] = default_attrs[n]; | ||
1049 | |||
1050 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { | ||
1051 | attrs[n++] = &cache_disable_0.attr; | ||
1052 | attrs[n++] = &cache_disable_1.attr; | ||
1053 | } | ||
1054 | |||
1055 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
1056 | attrs[n++] = &subcaches.attr; | ||
1057 | |||
1058 | return attrs; | ||
1059 | } | ||
1060 | #endif | ||
1061 | |||
1062 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | ||
1063 | { | ||
1064 | struct _cache_attr *fattr = to_attr(attr); | ||
1065 | struct _index_kobject *this_leaf = to_object(kobj); | ||
1066 | ssize_t ret; | ||
1067 | |||
1068 | ret = fattr->show ? | ||
1069 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | ||
1070 | buf, this_leaf->cpu) : | ||
1071 | 0; | ||
1072 | return ret; | ||
1073 | } | ||
1074 | |||
1075 | static ssize_t store(struct kobject *kobj, struct attribute *attr, | ||
1076 | const char *buf, size_t count) | ||
1077 | { | ||
1078 | struct _cache_attr *fattr = to_attr(attr); | ||
1079 | struct _index_kobject *this_leaf = to_object(kobj); | ||
1080 | ssize_t ret; | ||
1081 | |||
1082 | ret = fattr->store ? | ||
1083 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | ||
1084 | buf, count, this_leaf->cpu) : | ||
1085 | 0; | ||
1086 | return ret; | ||
1087 | } | ||
1088 | |||
1089 | static const struct sysfs_ops sysfs_ops = { | ||
1090 | .show = show, | ||
1091 | .store = store, | ||
1092 | }; | ||
1093 | |||
1094 | static struct kobj_type ktype_cache = { | ||
1095 | .sysfs_ops = &sysfs_ops, | ||
1096 | .default_attrs = default_attrs, | ||
1097 | }; | ||
1098 | |||
1099 | static struct kobj_type ktype_percpu_entry = { | ||
1100 | .sysfs_ops = &sysfs_ops, | ||
1101 | }; | ||
1102 | |||
1103 | static void cpuid4_cache_sysfs_exit(unsigned int cpu) | ||
1104 | { | ||
1105 | kfree(per_cpu(ici_cache_kobject, cpu)); | ||
1106 | kfree(per_cpu(ici_index_kobject, cpu)); | ||
1107 | per_cpu(ici_cache_kobject, cpu) = NULL; | ||
1108 | per_cpu(ici_index_kobject, cpu) = NULL; | ||
1109 | free_cache_attributes(cpu); | ||
1110 | } | ||
1111 | |||
1112 | static int cpuid4_cache_sysfs_init(unsigned int cpu) | ||
1113 | { | ||
1114 | int err; | ||
1115 | |||
1116 | if (num_cache_leaves == 0) | ||
1117 | return -ENOENT; | 915 | return -ENOENT; |
1118 | 916 | if (!this_cpu_ci) | |
1119 | err = detect_cache_attributes(cpu); | 917 | return -EINVAL; |
1120 | if (err) | 918 | this_cpu_ci->num_levels = 3; |
1121 | return err; | 919 | this_cpu_ci->num_leaves = num_cache_leaves; |
1122 | |||
1123 | /* Allocate all required memory */ | ||
1124 | per_cpu(ici_cache_kobject, cpu) = | ||
1125 | kzalloc(sizeof(struct kobject), GFP_KERNEL); | ||
1126 | if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL)) | ||
1127 | goto err_out; | ||
1128 | |||
1129 | per_cpu(ici_index_kobject, cpu) = kzalloc( | ||
1130 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); | ||
1131 | if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL)) | ||
1132 | goto err_out; | ||
1133 | |||
1134 | return 0; | 920 | return 0; |
1135 | |||
1136 | err_out: | ||
1137 | cpuid4_cache_sysfs_exit(cpu); | ||
1138 | return -ENOMEM; | ||
1139 | } | 921 | } |
1140 | 922 | ||
1141 | static DECLARE_BITMAP(cache_dev_map, NR_CPUS); | 923 | static int __populate_cache_leaves(unsigned int cpu) |
1142 | |||
1143 | /* Add/Remove cache interface for CPU device */ | ||
1144 | static int cache_add_dev(struct device *dev) | ||
1145 | { | 924 | { |
1146 | unsigned int cpu = dev->id; | 925 | unsigned int idx, ret; |
1147 | unsigned long i, j; | 926 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
1148 | struct _index_kobject *this_object; | 927 | struct cacheinfo *this_leaf = this_cpu_ci->info_list; |
1149 | struct _cpuid4_info *this_leaf; | 928 | struct _cpuid4_info_regs id4_regs = {}; |
1150 | int retval; | ||
1151 | |||
1152 | retval = cpuid4_cache_sysfs_init(cpu); | ||
1153 | if (unlikely(retval < 0)) | ||
1154 | return retval; | ||
1155 | |||
1156 | retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu), | ||
1157 | &ktype_percpu_entry, | ||
1158 | &dev->kobj, "%s", "cache"); | ||
1159 | if (retval < 0) { | ||
1160 | cpuid4_cache_sysfs_exit(cpu); | ||
1161 | return retval; | ||
1162 | } | ||
1163 | 929 | ||
1164 | for (i = 0; i < num_cache_leaves; i++) { | 930 | for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { |
1165 | this_object = INDEX_KOBJECT_PTR(cpu, i); | 931 | ret = cpuid4_cache_lookup_regs(idx, &id4_regs); |
1166 | this_object->cpu = cpu; | 932 | if (ret) |
1167 | this_object->index = i; | 933 | return ret; |
1168 | 934 | ci_leaf_init(this_leaf++, &id4_regs); | |
1169 | this_leaf = CPUID4_INFO_IDX(cpu, i); | 935 | __cache_cpumap_setup(cpu, idx, &id4_regs); |
1170 | |||
1171 | ktype_cache.default_attrs = default_attrs; | ||
1172 | #ifdef CONFIG_AMD_NB | ||
1173 | if (this_leaf->base.nb) | ||
1174 | ktype_cache.default_attrs = amd_l3_attrs(); | ||
1175 | #endif | ||
1176 | retval = kobject_init_and_add(&(this_object->kobj), | ||
1177 | &ktype_cache, | ||
1178 | per_cpu(ici_cache_kobject, cpu), | ||
1179 | "index%1lu", i); | ||
1180 | if (unlikely(retval)) { | ||
1181 | for (j = 0; j < i; j++) | ||
1182 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); | ||
1183 | kobject_put(per_cpu(ici_cache_kobject, cpu)); | ||
1184 | cpuid4_cache_sysfs_exit(cpu); | ||
1185 | return retval; | ||
1186 | } | ||
1187 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); | ||
1188 | } | 936 | } |
1189 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); | ||
1190 | |||
1191 | kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD); | ||
1192 | return 0; | 937 | return 0; |
1193 | } | 938 | } |
1194 | 939 | ||
1195 | static void cache_remove_dev(struct device *dev) | 940 | DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level) |
1196 | { | 941 | DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves) |
1197 | unsigned int cpu = dev->id; | ||
1198 | unsigned long i; | ||
1199 | |||
1200 | if (per_cpu(ici_cpuid4_info, cpu) == NULL) | ||
1201 | return; | ||
1202 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) | ||
1203 | return; | ||
1204 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); | ||
1205 | |||
1206 | for (i = 0; i < num_cache_leaves; i++) | ||
1207 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); | ||
1208 | kobject_put(per_cpu(ici_cache_kobject, cpu)); | ||
1209 | cpuid4_cache_sysfs_exit(cpu); | ||
1210 | } | ||
1211 | |||
1212 | static int cacheinfo_cpu_callback(struct notifier_block *nfb, | ||
1213 | unsigned long action, void *hcpu) | ||
1214 | { | ||
1215 | unsigned int cpu = (unsigned long)hcpu; | ||
1216 | struct device *dev; | ||
1217 | |||
1218 | dev = get_cpu_device(cpu); | ||
1219 | switch (action) { | ||
1220 | case CPU_ONLINE: | ||
1221 | case CPU_ONLINE_FROZEN: | ||
1222 | cache_add_dev(dev); | ||
1223 | break; | ||
1224 | case CPU_DEAD: | ||
1225 | case CPU_DEAD_FROZEN: | ||
1226 | cache_remove_dev(dev); | ||
1227 | break; | ||
1228 | } | ||
1229 | return NOTIFY_OK; | ||
1230 | } | ||
1231 | |||
1232 | static struct notifier_block cacheinfo_cpu_notifier = { | ||
1233 | .notifier_call = cacheinfo_cpu_callback, | ||
1234 | }; | ||
1235 | |||
1236 | static int __init cache_sysfs_init(void) | ||
1237 | { | ||
1238 | int i, err = 0; | ||
1239 | |||
1240 | if (num_cache_leaves == 0) | ||
1241 | return 0; | ||
1242 | |||
1243 | cpu_notifier_register_begin(); | ||
1244 | for_each_online_cpu(i) { | ||
1245 | struct device *dev = get_cpu_device(i); | ||
1246 | |||
1247 | err = cache_add_dev(dev); | ||
1248 | if (err) | ||
1249 | goto out; | ||
1250 | } | ||
1251 | __register_hotcpu_notifier(&cacheinfo_cpu_notifier); | ||
1252 | |||
1253 | out: | ||
1254 | cpu_notifier_register_done(); | ||
1255 | return err; | ||
1256 | } | ||
1257 | |||
1258 | device_initcall(cache_sysfs_init); | ||
1259 | |||
1260 | #endif | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index 10b46906767f..fe32074b865b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h | |||
@@ -14,6 +14,7 @@ enum severity_level { | |||
14 | }; | 14 | }; |
15 | 15 | ||
16 | #define ATTR_LEN 16 | 16 | #define ATTR_LEN 16 |
17 | #define INITIAL_CHECK_INTERVAL 5 * 60 /* 5 minutes */ | ||
17 | 18 | ||
18 | /* One object for each MCE bank, shared by all CPUs */ | 19 | /* One object for each MCE bank, shared by all CPUs */ |
19 | struct mce_bank { | 20 | struct mce_bank { |
@@ -23,20 +24,20 @@ struct mce_bank { | |||
23 | char attrname[ATTR_LEN]; /* attribute name */ | 24 | char attrname[ATTR_LEN]; /* attribute name */ |
24 | }; | 25 | }; |
25 | 26 | ||
26 | int mce_severity(struct mce *a, int tolerant, char **msg, bool is_excp); | 27 | extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp); |
27 | struct dentry *mce_get_debugfs_dir(void); | 28 | struct dentry *mce_get_debugfs_dir(void); |
28 | 29 | ||
29 | extern struct mce_bank *mce_banks; | 30 | extern struct mce_bank *mce_banks; |
30 | extern mce_banks_t mce_banks_ce_disabled; | 31 | extern mce_banks_t mce_banks_ce_disabled; |
31 | 32 | ||
32 | #ifdef CONFIG_X86_MCE_INTEL | 33 | #ifdef CONFIG_X86_MCE_INTEL |
33 | unsigned long mce_intel_adjust_timer(unsigned long interval); | 34 | unsigned long cmci_intel_adjust_timer(unsigned long interval); |
34 | void mce_intel_cmci_poll(void); | 35 | bool mce_intel_cmci_poll(void); |
35 | void mce_intel_hcpu_update(unsigned long cpu); | 36 | void mce_intel_hcpu_update(unsigned long cpu); |
36 | void cmci_disable_bank(int bank); | 37 | void cmci_disable_bank(int bank); |
37 | #else | 38 | #else |
38 | # define mce_intel_adjust_timer mce_adjust_timer_default | 39 | # define cmci_intel_adjust_timer mce_adjust_timer_default |
39 | static inline void mce_intel_cmci_poll(void) { } | 40 | static inline bool mce_intel_cmci_poll(void) { return false; } |
40 | static inline void mce_intel_hcpu_update(unsigned long cpu) { } | 41 | static inline void mce_intel_hcpu_update(unsigned long cpu) { } |
41 | static inline void cmci_disable_bank(int bank) { } | 42 | static inline void cmci_disable_bank(int bank) { } |
42 | #endif | 43 | #endif |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index 8bb433043a7f..9c682c222071 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c | |||
@@ -186,7 +186,61 @@ static int error_context(struct mce *m) | |||
186 | return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL; | 186 | return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL; |
187 | } | 187 | } |
188 | 188 | ||
189 | int mce_severity(struct mce *m, int tolerant, char **msg, bool is_excp) | 189 | /* |
190 | * See AMD Error Scope Hierarchy table in a newer BKDG. For example | ||
191 | * 49125_15h_Models_30h-3Fh_BKDG.pdf, section "RAS Features" | ||
192 | */ | ||
193 | static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_excp) | ||
194 | { | ||
195 | enum context ctx = error_context(m); | ||
196 | |||
197 | /* Processor Context Corrupt, no need to fumble too much, die! */ | ||
198 | if (m->status & MCI_STATUS_PCC) | ||
199 | return MCE_PANIC_SEVERITY; | ||
200 | |||
201 | if (m->status & MCI_STATUS_UC) { | ||
202 | |||
203 | /* | ||
204 | * On older systems where overflow_recov flag is not present, we | ||
205 | * should simply panic if an error overflow occurs. If | ||
206 | * overflow_recov flag is present and set, then software can try | ||
207 | * to at least kill process to prolong system operation. | ||
208 | */ | ||
209 | if (mce_flags.overflow_recov) { | ||
210 | /* software can try to contain */ | ||
211 | if (!(m->mcgstatus & MCG_STATUS_RIPV) && (ctx == IN_KERNEL)) | ||
212 | return MCE_PANIC_SEVERITY; | ||
213 | |||
214 | /* kill current process */ | ||
215 | return MCE_AR_SEVERITY; | ||
216 | } else { | ||
217 | /* at least one error was not logged */ | ||
218 | if (m->status & MCI_STATUS_OVER) | ||
219 | return MCE_PANIC_SEVERITY; | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * For any other case, return MCE_UC_SEVERITY so that we log the | ||
224 | * error and exit #MC handler. | ||
225 | */ | ||
226 | return MCE_UC_SEVERITY; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * deferred error: poll handler catches these and adds to mce_ring so | ||
231 | * memory-failure can take recovery actions. | ||
232 | */ | ||
233 | if (m->status & MCI_STATUS_DEFERRED) | ||
234 | return MCE_DEFERRED_SEVERITY; | ||
235 | |||
236 | /* | ||
237 | * corrected error: poll handler catches these and passes responsibility | ||
238 | * of decoding the error to EDAC | ||
239 | */ | ||
240 | return MCE_KEEP_SEVERITY; | ||
241 | } | ||
242 | |||
243 | static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_excp) | ||
190 | { | 244 | { |
191 | enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP); | 245 | enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP); |
192 | enum context ctx = error_context(m); | 246 | enum context ctx = error_context(m); |
@@ -216,6 +270,16 @@ int mce_severity(struct mce *m, int tolerant, char **msg, bool is_excp) | |||
216 | } | 270 | } |
217 | } | 271 | } |
218 | 272 | ||
273 | /* Default to mce_severity_intel */ | ||
274 | int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) = | ||
275 | mce_severity_intel; | ||
276 | |||
277 | void __init mcheck_vendor_init_severity(void) | ||
278 | { | ||
279 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | ||
280 | mce_severity = mce_severity_amd; | ||
281 | } | ||
282 | |||
219 | #ifdef CONFIG_DEBUG_FS | 283 | #ifdef CONFIG_DEBUG_FS |
220 | static void *s_start(struct seq_file *f, loff_t *pos) | 284 | static void *s_start(struct seq_file *f, loff_t *pos) |
221 | { | 285 | { |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 3c036cb4a370..e535533d5ab8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -60,11 +60,12 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex); | |||
60 | #define CREATE_TRACE_POINTS | 60 | #define CREATE_TRACE_POINTS |
61 | #include <trace/events/mce.h> | 61 | #include <trace/events/mce.h> |
62 | 62 | ||
63 | #define SPINUNIT 100 /* 100ns */ | 63 | #define SPINUNIT 100 /* 100ns */ |
64 | 64 | ||
65 | DEFINE_PER_CPU(unsigned, mce_exception_count); | 65 | DEFINE_PER_CPU(unsigned, mce_exception_count); |
66 | 66 | ||
67 | struct mce_bank *mce_banks __read_mostly; | 67 | struct mce_bank *mce_banks __read_mostly; |
68 | struct mce_vendor_flags mce_flags __read_mostly; | ||
68 | 69 | ||
69 | struct mca_config mca_cfg __read_mostly = { | 70 | struct mca_config mca_cfg __read_mostly = { |
70 | .bootlog = -1, | 71 | .bootlog = -1, |
@@ -89,9 +90,6 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait); | |||
89 | static DEFINE_PER_CPU(struct mce, mces_seen); | 90 | static DEFINE_PER_CPU(struct mce, mces_seen); |
90 | static int cpu_missing; | 91 | static int cpu_missing; |
91 | 92 | ||
92 | /* CMCI storm detection filter */ | ||
93 | static DEFINE_PER_CPU(unsigned long, mce_polled_error); | ||
94 | |||
95 | /* | 93 | /* |
96 | * MCA banks polled by the period polling timer for corrected events. | 94 | * MCA banks polled by the period polling timer for corrected events. |
97 | * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). | 95 | * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). |
@@ -622,8 +620,9 @@ DEFINE_PER_CPU(unsigned, mce_poll_count); | |||
622 | * is already totally * confused. In this case it's likely it will | 620 | * is already totally * confused. In this case it's likely it will |
623 | * not fully execute the machine check handler either. | 621 | * not fully execute the machine check handler either. |
624 | */ | 622 | */ |
625 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | 623 | bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) |
626 | { | 624 | { |
625 | bool error_logged = false; | ||
627 | struct mce m; | 626 | struct mce m; |
628 | int severity; | 627 | int severity; |
629 | int i; | 628 | int i; |
@@ -646,7 +645,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
646 | if (!(m.status & MCI_STATUS_VAL)) | 645 | if (!(m.status & MCI_STATUS_VAL)) |
647 | continue; | 646 | continue; |
648 | 647 | ||
649 | this_cpu_write(mce_polled_error, 1); | 648 | |
650 | /* | 649 | /* |
651 | * Uncorrected or signalled events are handled by the exception | 650 | * Uncorrected or signalled events are handled by the exception |
652 | * handler when it is enabled, so don't process those here. | 651 | * handler when it is enabled, so don't process those here. |
@@ -679,8 +678,10 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
679 | * Don't get the IP here because it's unlikely to | 678 | * Don't get the IP here because it's unlikely to |
680 | * have anything to do with the actual error location. | 679 | * have anything to do with the actual error location. |
681 | */ | 680 | */ |
682 | if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) | 681 | if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce) { |
682 | error_logged = true; | ||
683 | mce_log(&m); | 683 | mce_log(&m); |
684 | } | ||
684 | 685 | ||
685 | /* | 686 | /* |
686 | * Clear state for this bank. | 687 | * Clear state for this bank. |
@@ -694,6 +695,8 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
694 | */ | 695 | */ |
695 | 696 | ||
696 | sync_core(); | 697 | sync_core(); |
698 | |||
699 | return error_logged; | ||
697 | } | 700 | } |
698 | EXPORT_SYMBOL_GPL(machine_check_poll); | 701 | EXPORT_SYMBOL_GPL(machine_check_poll); |
699 | 702 | ||
@@ -813,7 +816,7 @@ static void mce_reign(void) | |||
813 | * other CPUs. | 816 | * other CPUs. |
814 | */ | 817 | */ |
815 | if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) | 818 | if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) |
816 | mce_panic("Fatal Machine check", m, msg); | 819 | mce_panic("Fatal machine check", m, msg); |
817 | 820 | ||
818 | /* | 821 | /* |
819 | * For UC somewhere we let the CPU who detects it handle it. | 822 | * For UC somewhere we let the CPU who detects it handle it. |
@@ -826,7 +829,7 @@ static void mce_reign(void) | |||
826 | * source or one CPU is hung. Panic. | 829 | * source or one CPU is hung. Panic. |
827 | */ | 830 | */ |
828 | if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) | 831 | if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3) |
829 | mce_panic("Machine check from unknown source", NULL, NULL); | 832 | mce_panic("Fatal machine check from unknown source", NULL, NULL); |
830 | 833 | ||
831 | /* | 834 | /* |
832 | * Now clear all the mces_seen so that they don't reappear on | 835 | * Now clear all the mces_seen so that they don't reappear on |
@@ -1258,7 +1261,7 @@ void mce_log_therm_throt_event(__u64 status) | |||
1258 | * poller finds an MCE, poll 2x faster. When the poller finds no more | 1261 | * poller finds an MCE, poll 2x faster. When the poller finds no more |
1259 | * errors, poll 2x slower (up to check_interval seconds). | 1262 | * errors, poll 2x slower (up to check_interval seconds). |
1260 | */ | 1263 | */ |
1261 | static unsigned long check_interval = 5 * 60; /* 5 minutes */ | 1264 | static unsigned long check_interval = INITIAL_CHECK_INTERVAL; |
1262 | 1265 | ||
1263 | static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ | 1266 | static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ |
1264 | static DEFINE_PER_CPU(struct timer_list, mce_timer); | 1267 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
@@ -1268,49 +1271,57 @@ static unsigned long mce_adjust_timer_default(unsigned long interval) | |||
1268 | return interval; | 1271 | return interval; |
1269 | } | 1272 | } |
1270 | 1273 | ||
1271 | static unsigned long (*mce_adjust_timer)(unsigned long interval) = | 1274 | static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; |
1272 | mce_adjust_timer_default; | ||
1273 | 1275 | ||
1274 | static int cmc_error_seen(void) | 1276 | static void __restart_timer(struct timer_list *t, unsigned long interval) |
1275 | { | 1277 | { |
1276 | unsigned long *v = this_cpu_ptr(&mce_polled_error); | 1278 | unsigned long when = jiffies + interval; |
1279 | unsigned long flags; | ||
1280 | |||
1281 | local_irq_save(flags); | ||
1277 | 1282 | ||
1278 | return test_and_clear_bit(0, v); | 1283 | if (timer_pending(t)) { |
1284 | if (time_before(when, t->expires)) | ||
1285 | mod_timer_pinned(t, when); | ||
1286 | } else { | ||
1287 | t->expires = round_jiffies(when); | ||
1288 | add_timer_on(t, smp_processor_id()); | ||
1289 | } | ||
1290 | |||
1291 | local_irq_restore(flags); | ||
1279 | } | 1292 | } |
1280 | 1293 | ||
1281 | static void mce_timer_fn(unsigned long data) | 1294 | static void mce_timer_fn(unsigned long data) |
1282 | { | 1295 | { |
1283 | struct timer_list *t = this_cpu_ptr(&mce_timer); | 1296 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1297 | int cpu = smp_processor_id(); | ||
1284 | unsigned long iv; | 1298 | unsigned long iv; |
1285 | int notify; | ||
1286 | 1299 | ||
1287 | WARN_ON(smp_processor_id() != data); | 1300 | WARN_ON(cpu != data); |
1301 | |||
1302 | iv = __this_cpu_read(mce_next_interval); | ||
1288 | 1303 | ||
1289 | if (mce_available(this_cpu_ptr(&cpu_info))) { | 1304 | if (mce_available(this_cpu_ptr(&cpu_info))) { |
1290 | machine_check_poll(MCP_TIMESTAMP, | 1305 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks)); |
1291 | this_cpu_ptr(&mce_poll_banks)); | 1306 | |
1292 | mce_intel_cmci_poll(); | 1307 | if (mce_intel_cmci_poll()) { |
1308 | iv = mce_adjust_timer(iv); | ||
1309 | goto done; | ||
1310 | } | ||
1293 | } | 1311 | } |
1294 | 1312 | ||
1295 | /* | 1313 | /* |
1296 | * Alert userspace if needed. If we logged an MCE, reduce the | 1314 | * Alert userspace if needed. If we logged an MCE, reduce the polling |
1297 | * polling interval, otherwise increase the polling interval. | 1315 | * interval, otherwise increase the polling interval. |
1298 | */ | 1316 | */ |
1299 | iv = __this_cpu_read(mce_next_interval); | 1317 | if (mce_notify_irq()) |
1300 | notify = mce_notify_irq(); | ||
1301 | notify |= cmc_error_seen(); | ||
1302 | if (notify) { | ||
1303 | iv = max(iv / 2, (unsigned long) HZ/100); | 1318 | iv = max(iv / 2, (unsigned long) HZ/100); |
1304 | } else { | 1319 | else |
1305 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); | 1320 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); |
1306 | iv = mce_adjust_timer(iv); | 1321 | |
1307 | } | 1322 | done: |
1308 | __this_cpu_write(mce_next_interval, iv); | 1323 | __this_cpu_write(mce_next_interval, iv); |
1309 | /* Might have become 0 after CMCI storm subsided */ | 1324 | __restart_timer(t, iv); |
1310 | if (iv) { | ||
1311 | t->expires = jiffies + iv; | ||
1312 | add_timer_on(t, smp_processor_id()); | ||
1313 | } | ||
1314 | } | 1325 | } |
1315 | 1326 | ||
1316 | /* | 1327 | /* |
@@ -1319,16 +1330,10 @@ static void mce_timer_fn(unsigned long data) | |||
1319 | void mce_timer_kick(unsigned long interval) | 1330 | void mce_timer_kick(unsigned long interval) |
1320 | { | 1331 | { |
1321 | struct timer_list *t = this_cpu_ptr(&mce_timer); | 1332 | struct timer_list *t = this_cpu_ptr(&mce_timer); |
1322 | unsigned long when = jiffies + interval; | ||
1323 | unsigned long iv = __this_cpu_read(mce_next_interval); | 1333 | unsigned long iv = __this_cpu_read(mce_next_interval); |
1324 | 1334 | ||
1325 | if (timer_pending(t)) { | 1335 | __restart_timer(t, interval); |
1326 | if (time_before(when, t->expires)) | 1336 | |
1327 | mod_timer_pinned(t, when); | ||
1328 | } else { | ||
1329 | t->expires = round_jiffies(when); | ||
1330 | add_timer_on(t, smp_processor_id()); | ||
1331 | } | ||
1332 | if (interval < iv) | 1337 | if (interval < iv) |
1333 | __this_cpu_write(mce_next_interval, interval); | 1338 | __this_cpu_write(mce_next_interval, interval); |
1334 | } | 1339 | } |
@@ -1525,45 +1530,46 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) | |||
1525 | * Various K7s with broken bank 0 around. Always disable | 1530 | * Various K7s with broken bank 0 around. Always disable |
1526 | * by default. | 1531 | * by default. |
1527 | */ | 1532 | */ |
1528 | if (c->x86 == 6 && cfg->banks > 0) | 1533 | if (c->x86 == 6 && cfg->banks > 0) |
1529 | mce_banks[0].ctl = 0; | 1534 | mce_banks[0].ctl = 0; |
1530 | 1535 | ||
1531 | /* | 1536 | /* |
1532 | * Turn off MC4_MISC thresholding banks on those models since | 1537 | * overflow_recov is supported for F15h Models 00h-0fh |
1533 | * they're not supported there. | 1538 | * even though we don't have a CPUID bit for it. |
1534 | */ | 1539 | */ |
1535 | if (c->x86 == 0x15 && | 1540 | if (c->x86 == 0x15 && c->x86_model <= 0xf) |
1536 | (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { | 1541 | mce_flags.overflow_recov = 1; |
1537 | int i; | 1542 | |
1538 | u64 val, hwcr; | 1543 | /* |
1539 | bool need_toggle; | 1544 | * Turn off MC4_MISC thresholding banks on those models since |
1540 | u32 msrs[] = { | 1545 | * they're not supported there. |
1546 | */ | ||
1547 | if (c->x86 == 0x15 && | ||
1548 | (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { | ||
1549 | int i; | ||
1550 | u64 hwcr; | ||
1551 | bool need_toggle; | ||
1552 | u32 msrs[] = { | ||
1541 | 0x00000413, /* MC4_MISC0 */ | 1553 | 0x00000413, /* MC4_MISC0 */ |
1542 | 0xc0000408, /* MC4_MISC1 */ | 1554 | 0xc0000408, /* MC4_MISC1 */ |
1543 | }; | 1555 | }; |
1544 | 1556 | ||
1545 | rdmsrl(MSR_K7_HWCR, hwcr); | 1557 | rdmsrl(MSR_K7_HWCR, hwcr); |
1546 | 1558 | ||
1547 | /* McStatusWrEn has to be set */ | 1559 | /* McStatusWrEn has to be set */ |
1548 | need_toggle = !(hwcr & BIT(18)); | 1560 | need_toggle = !(hwcr & BIT(18)); |
1549 | 1561 | ||
1550 | if (need_toggle) | 1562 | if (need_toggle) |
1551 | wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); | 1563 | wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); |
1552 | 1564 | ||
1553 | for (i = 0; i < ARRAY_SIZE(msrs); i++) { | 1565 | /* Clear CntP bit safely */ |
1554 | rdmsrl(msrs[i], val); | 1566 | for (i = 0; i < ARRAY_SIZE(msrs); i++) |
1567 | msr_clear_bit(msrs[i], 62); | ||
1555 | 1568 | ||
1556 | /* CntP bit set? */ | 1569 | /* restore old settings */ |
1557 | if (val & BIT_64(62)) { | 1570 | if (need_toggle) |
1558 | val &= ~BIT_64(62); | 1571 | wrmsrl(MSR_K7_HWCR, hwcr); |
1559 | wrmsrl(msrs[i], val); | 1572 | } |
1560 | } | ||
1561 | } | ||
1562 | |||
1563 | /* restore old settings */ | ||
1564 | if (need_toggle) | ||
1565 | wrmsrl(MSR_K7_HWCR, hwcr); | ||
1566 | } | ||
1567 | } | 1573 | } |
1568 | 1574 | ||
1569 | if (c->x86_vendor == X86_VENDOR_INTEL) { | 1575 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
@@ -1629,10 +1635,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) | |||
1629 | switch (c->x86_vendor) { | 1635 | switch (c->x86_vendor) { |
1630 | case X86_VENDOR_INTEL: | 1636 | case X86_VENDOR_INTEL: |
1631 | mce_intel_feature_init(c); | 1637 | mce_intel_feature_init(c); |
1632 | mce_adjust_timer = mce_intel_adjust_timer; | 1638 | mce_adjust_timer = cmci_intel_adjust_timer; |
1633 | break; | 1639 | break; |
1634 | case X86_VENDOR_AMD: | 1640 | case X86_VENDOR_AMD: |
1635 | mce_amd_feature_init(c); | 1641 | mce_amd_feature_init(c); |
1642 | mce_flags.overflow_recov = cpuid_ebx(0x80000007) & 0x1; | ||
1636 | break; | 1643 | break; |
1637 | default: | 1644 | default: |
1638 | break; | 1645 | break; |
@@ -2017,6 +2024,7 @@ __setup("mce", mcheck_enable); | |||
2017 | int __init mcheck_init(void) | 2024 | int __init mcheck_init(void) |
2018 | { | 2025 | { |
2019 | mcheck_intel_therm_init(); | 2026 | mcheck_intel_therm_init(); |
2027 | mcheck_vendor_init_severity(); | ||
2020 | 2028 | ||
2021 | return 0; | 2029 | return 0; |
2022 | } | 2030 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index f1c3769bbd64..55ad9b37cae8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -79,7 +79,7 @@ static inline bool is_shared_bank(int bank) | |||
79 | return (bank == 4); | 79 | return (bank == 4); |
80 | } | 80 | } |
81 | 81 | ||
82 | static const char * const bank4_names(struct threshold_block *b) | 82 | static const char *bank4_names(const struct threshold_block *b) |
83 | { | 83 | { |
84 | switch (b->address) { | 84 | switch (b->address) { |
85 | /* MSR4_MISC0 */ | 85 | /* MSR4_MISC0 */ |
@@ -250,6 +250,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
250 | if (!b.interrupt_capable) | 250 | if (!b.interrupt_capable) |
251 | goto init; | 251 | goto init; |
252 | 252 | ||
253 | b.interrupt_enable = 1; | ||
253 | new = (high & MASK_LVTOFF_HI) >> 20; | 254 | new = (high & MASK_LVTOFF_HI) >> 20; |
254 | offset = setup_APIC_mce(offset, new); | 255 | offset = setup_APIC_mce(offset, new); |
255 | 256 | ||
@@ -322,6 +323,8 @@ static void amd_threshold_interrupt(void) | |||
322 | log: | 323 | log: |
323 | mce_setup(&m); | 324 | mce_setup(&m); |
324 | rdmsrl(MSR_IA32_MCx_STATUS(bank), m.status); | 325 | rdmsrl(MSR_IA32_MCx_STATUS(bank), m.status); |
326 | if (!(m.status & MCI_STATUS_VAL)) | ||
327 | return; | ||
325 | m.misc = ((u64)high << 32) | low; | 328 | m.misc = ((u64)high << 32) | low; |
326 | m.bank = bank; | 329 | m.bank = bank; |
327 | mce_log(&m); | 330 | mce_log(&m); |
@@ -497,10 +500,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, | |||
497 | b->interrupt_capable = lvt_interrupt_supported(bank, high); | 500 | b->interrupt_capable = lvt_interrupt_supported(bank, high); |
498 | b->threshold_limit = THRESHOLD_MAX; | 501 | b->threshold_limit = THRESHOLD_MAX; |
499 | 502 | ||
500 | if (b->interrupt_capable) | 503 | if (b->interrupt_capable) { |
501 | threshold_ktype.default_attrs[2] = &interrupt_enable.attr; | 504 | threshold_ktype.default_attrs[2] = &interrupt_enable.attr; |
502 | else | 505 | b->interrupt_enable = 1; |
506 | } else { | ||
503 | threshold_ktype.default_attrs[2] = NULL; | 507 | threshold_ktype.default_attrs[2] = NULL; |
508 | } | ||
504 | 509 | ||
505 | INIT_LIST_HEAD(&b->miscj); | 510 | INIT_LIST_HEAD(&b->miscj); |
506 | 511 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index b3c97bafc123..b4a41cf030ed 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -39,6 +39,15 @@ | |||
39 | static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); | 39 | static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * CMCI storm detection backoff counter | ||
43 | * | ||
44 | * During storm, we reset this counter to INITIAL_CHECK_INTERVAL in case we've | ||
45 | * encountered an error. If not, we decrement it by one. We signal the end of | ||
46 | * the CMCI storm when it reaches 0. | ||
47 | */ | ||
48 | static DEFINE_PER_CPU(int, cmci_backoff_cnt); | ||
49 | |||
50 | /* | ||
42 | * cmci_discover_lock protects against parallel discovery attempts | 51 | * cmci_discover_lock protects against parallel discovery attempts |
43 | * which could race against each other. | 52 | * which could race against each other. |
44 | */ | 53 | */ |
@@ -46,7 +55,7 @@ static DEFINE_RAW_SPINLOCK(cmci_discover_lock); | |||
46 | 55 | ||
47 | #define CMCI_THRESHOLD 1 | 56 | #define CMCI_THRESHOLD 1 |
48 | #define CMCI_POLL_INTERVAL (30 * HZ) | 57 | #define CMCI_POLL_INTERVAL (30 * HZ) |
49 | #define CMCI_STORM_INTERVAL (1 * HZ) | 58 | #define CMCI_STORM_INTERVAL (HZ) |
50 | #define CMCI_STORM_THRESHOLD 15 | 59 | #define CMCI_STORM_THRESHOLD 15 |
51 | 60 | ||
52 | static DEFINE_PER_CPU(unsigned long, cmci_time_stamp); | 61 | static DEFINE_PER_CPU(unsigned long, cmci_time_stamp); |
@@ -82,11 +91,21 @@ static int cmci_supported(int *banks) | |||
82 | return !!(cap & MCG_CMCI_P); | 91 | return !!(cap & MCG_CMCI_P); |
83 | } | 92 | } |
84 | 93 | ||
85 | void mce_intel_cmci_poll(void) | 94 | bool mce_intel_cmci_poll(void) |
86 | { | 95 | { |
87 | if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) | 96 | if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) |
88 | return; | 97 | return false; |
89 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); | 98 | |
99 | /* | ||
100 | * Reset the counter if we've logged an error in the last poll | ||
101 | * during the storm. | ||
102 | */ | ||
103 | if (machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned))) | ||
104 | this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL); | ||
105 | else | ||
106 | this_cpu_dec(cmci_backoff_cnt); | ||
107 | |||
108 | return true; | ||
90 | } | 109 | } |
91 | 110 | ||
92 | void mce_intel_hcpu_update(unsigned long cpu) | 111 | void mce_intel_hcpu_update(unsigned long cpu) |
@@ -97,31 +116,32 @@ void mce_intel_hcpu_update(unsigned long cpu) | |||
97 | per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE; | 116 | per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE; |
98 | } | 117 | } |
99 | 118 | ||
100 | unsigned long mce_intel_adjust_timer(unsigned long interval) | 119 | unsigned long cmci_intel_adjust_timer(unsigned long interval) |
101 | { | 120 | { |
102 | int r; | 121 | if ((this_cpu_read(cmci_backoff_cnt) > 0) && |
103 | 122 | (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) { | |
104 | if (interval < CMCI_POLL_INTERVAL) | 123 | mce_notify_irq(); |
105 | return interval; | 124 | return CMCI_STORM_INTERVAL; |
125 | } | ||
106 | 126 | ||
107 | switch (__this_cpu_read(cmci_storm_state)) { | 127 | switch (__this_cpu_read(cmci_storm_state)) { |
108 | case CMCI_STORM_ACTIVE: | 128 | case CMCI_STORM_ACTIVE: |
129 | |||
109 | /* | 130 | /* |
110 | * We switch back to interrupt mode once the poll timer has | 131 | * We switch back to interrupt mode once the poll timer has |
111 | * silenced itself. That means no events recorded and the | 132 | * silenced itself. That means no events recorded and the timer |
112 | * timer interval is back to our poll interval. | 133 | * interval is back to our poll interval. |
113 | */ | 134 | */ |
114 | __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED); | 135 | __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED); |
115 | r = atomic_sub_return(1, &cmci_storm_on_cpus); | 136 | if (!atomic_sub_return(1, &cmci_storm_on_cpus)) |
116 | if (r == 0) | ||
117 | pr_notice("CMCI storm subsided: switching to interrupt mode\n"); | 137 | pr_notice("CMCI storm subsided: switching to interrupt mode\n"); |
138 | |||
118 | /* FALLTHROUGH */ | 139 | /* FALLTHROUGH */ |
119 | 140 | ||
120 | case CMCI_STORM_SUBSIDED: | 141 | case CMCI_STORM_SUBSIDED: |
121 | /* | 142 | /* |
122 | * We wait for all cpus to go back to SUBSIDED | 143 | * We wait for all CPUs to go back to SUBSIDED state. When that |
123 | * state. When that happens we switch back to | 144 | * happens we switch back to interrupt mode. |
124 | * interrupt mode. | ||
125 | */ | 145 | */ |
126 | if (!atomic_read(&cmci_storm_on_cpus)) { | 146 | if (!atomic_read(&cmci_storm_on_cpus)) { |
127 | __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); | 147 | __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); |
@@ -130,10 +150,8 @@ unsigned long mce_intel_adjust_timer(unsigned long interval) | |||
130 | } | 150 | } |
131 | return CMCI_POLL_INTERVAL; | 151 | return CMCI_POLL_INTERVAL; |
132 | default: | 152 | default: |
133 | /* | 153 | |
134 | * We have shiny weather. Let the poll do whatever it | 154 | /* We have shiny weather. Let the poll do whatever it thinks. */ |
135 | * thinks. | ||
136 | */ | ||
137 | return interval; | 155 | return interval; |
138 | } | 156 | } |
139 | } | 157 | } |
@@ -178,7 +196,8 @@ static bool cmci_storm_detect(void) | |||
178 | cmci_storm_disable_banks(); | 196 | cmci_storm_disable_banks(); |
179 | __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); | 197 | __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); |
180 | r = atomic_add_return(1, &cmci_storm_on_cpus); | 198 | r = atomic_add_return(1, &cmci_storm_on_cpus); |
181 | mce_timer_kick(CMCI_POLL_INTERVAL); | 199 | mce_timer_kick(CMCI_STORM_INTERVAL); |
200 | this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL); | ||
182 | 201 | ||
183 | if (r == 1) | 202 | if (r == 1) |
184 | pr_notice("CMCI storm detected: switching to poll mode\n"); | 203 | pr_notice("CMCI storm detected: switching to poll mode\n"); |
@@ -195,6 +214,7 @@ static void intel_threshold_interrupt(void) | |||
195 | { | 214 | { |
196 | if (cmci_storm_detect()) | 215 | if (cmci_storm_detect()) |
197 | return; | 216 | return; |
217 | |||
198 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); | 218 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
199 | mce_notify_irq(); | 219 | mce_notify_irq(); |
200 | } | 220 | } |
@@ -286,6 +306,7 @@ void cmci_recheck(void) | |||
286 | 306 | ||
287 | if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) | 307 | if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) |
288 | return; | 308 | return; |
309 | |||
289 | local_irq_save(flags); | 310 | local_irq_save(flags); |
290 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); | 311 | machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); |
291 | local_irq_restore(flags); | 312 | local_irq_restore(flags); |
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index bfbbe6195e2d..12829c3ced3c 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 21 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
22 | 22 | ||
23 | #include <linux/firmware.h> | 23 | #include <linux/firmware.h> |
24 | #include <linux/pci_ids.h> | ||
25 | #include <linux/uaccess.h> | 24 | #include <linux/uaccess.h> |
26 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
27 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c index d45df4bd16ab..a413a69cbd74 100644 --- a/arch/x86/kernel/cpu/microcode/core_early.c +++ b/arch/x86/kernel/cpu/microcode/core_early.c | |||
@@ -23,57 +23,6 @@ | |||
23 | #include <asm/processor.h> | 23 | #include <asm/processor.h> |
24 | #include <asm/cmdline.h> | 24 | #include <asm/cmdline.h> |
25 | 25 | ||
26 | #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) | ||
27 | #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u') | ||
28 | #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I') | ||
29 | #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l') | ||
30 | #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') | ||
31 | #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') | ||
32 | #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') | ||
33 | |||
34 | #define CPUID_IS(a, b, c, ebx, ecx, edx) \ | ||
35 | (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c)))) | ||
36 | |||
37 | /* | ||
38 | * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. | ||
39 | * x86_vendor() gets vendor id for BSP. | ||
40 | * | ||
41 | * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify | ||
42 | * coding, we still use x86_vendor() to get vendor id for AP. | ||
43 | * | ||
44 | * x86_vendor() gets vendor information directly through cpuid. | ||
45 | */ | ||
46 | static int x86_vendor(void) | ||
47 | { | ||
48 | u32 eax = 0x00000000; | ||
49 | u32 ebx, ecx = 0, edx; | ||
50 | |||
51 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
52 | |||
53 | if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) | ||
54 | return X86_VENDOR_INTEL; | ||
55 | |||
56 | if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) | ||
57 | return X86_VENDOR_AMD; | ||
58 | |||
59 | return X86_VENDOR_UNKNOWN; | ||
60 | } | ||
61 | |||
62 | static int x86_family(void) | ||
63 | { | ||
64 | u32 eax = 0x00000001; | ||
65 | u32 ebx, ecx = 0, edx; | ||
66 | int x86; | ||
67 | |||
68 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
69 | |||
70 | x86 = (eax >> 8) & 0xf; | ||
71 | if (x86 == 15) | ||
72 | x86 += (eax >> 20) & 0xff; | ||
73 | |||
74 | return x86; | ||
75 | } | ||
76 | |||
77 | static bool __init check_loader_disabled_bsp(void) | 26 | static bool __init check_loader_disabled_bsp(void) |
78 | { | 27 | { |
79 | #ifdef CONFIG_X86_32 | 28 | #ifdef CONFIG_X86_32 |
@@ -96,7 +45,7 @@ static bool __init check_loader_disabled_bsp(void) | |||
96 | 45 | ||
97 | void __init load_ucode_bsp(void) | 46 | void __init load_ucode_bsp(void) |
98 | { | 47 | { |
99 | int vendor, x86; | 48 | int vendor, family; |
100 | 49 | ||
101 | if (check_loader_disabled_bsp()) | 50 | if (check_loader_disabled_bsp()) |
102 | return; | 51 | return; |
@@ -105,15 +54,15 @@ void __init load_ucode_bsp(void) | |||
105 | return; | 54 | return; |
106 | 55 | ||
107 | vendor = x86_vendor(); | 56 | vendor = x86_vendor(); |
108 | x86 = x86_family(); | 57 | family = x86_family(); |
109 | 58 | ||
110 | switch (vendor) { | 59 | switch (vendor) { |
111 | case X86_VENDOR_INTEL: | 60 | case X86_VENDOR_INTEL: |
112 | if (x86 >= 6) | 61 | if (family >= 6) |
113 | load_ucode_intel_bsp(); | 62 | load_ucode_intel_bsp(); |
114 | break; | 63 | break; |
115 | case X86_VENDOR_AMD: | 64 | case X86_VENDOR_AMD: |
116 | if (x86 >= 0x10) | 65 | if (family >= 0x10) |
117 | load_ucode_amd_bsp(); | 66 | load_ucode_amd_bsp(); |
118 | break; | 67 | break; |
119 | default: | 68 | default: |
@@ -132,7 +81,7 @@ static bool check_loader_disabled_ap(void) | |||
132 | 81 | ||
133 | void load_ucode_ap(void) | 82 | void load_ucode_ap(void) |
134 | { | 83 | { |
135 | int vendor, x86; | 84 | int vendor, family; |
136 | 85 | ||
137 | if (check_loader_disabled_ap()) | 86 | if (check_loader_disabled_ap()) |
138 | return; | 87 | return; |
@@ -141,15 +90,15 @@ void load_ucode_ap(void) | |||
141 | return; | 90 | return; |
142 | 91 | ||
143 | vendor = x86_vendor(); | 92 | vendor = x86_vendor(); |
144 | x86 = x86_family(); | 93 | family = x86_family(); |
145 | 94 | ||
146 | switch (vendor) { | 95 | switch (vendor) { |
147 | case X86_VENDOR_INTEL: | 96 | case X86_VENDOR_INTEL: |
148 | if (x86 >= 6) | 97 | if (family >= 6) |
149 | load_ucode_intel_ap(); | 98 | load_ucode_intel_ap(); |
150 | break; | 99 | break; |
151 | case X86_VENDOR_AMD: | 100 | case X86_VENDOR_AMD: |
152 | if (x86 >= 0x10) | 101 | if (family >= 0x10) |
153 | load_ucode_amd_ap(); | 102 | load_ucode_amd_ap(); |
154 | break; | 103 | break; |
155 | default: | 104 | default: |
@@ -179,18 +128,18 @@ int __init save_microcode_in_initrd(void) | |||
179 | 128 | ||
180 | void reload_early_microcode(void) | 129 | void reload_early_microcode(void) |
181 | { | 130 | { |
182 | int vendor, x86; | 131 | int vendor, family; |
183 | 132 | ||
184 | vendor = x86_vendor(); | 133 | vendor = x86_vendor(); |
185 | x86 = x86_family(); | 134 | family = x86_family(); |
186 | 135 | ||
187 | switch (vendor) { | 136 | switch (vendor) { |
188 | case X86_VENDOR_INTEL: | 137 | case X86_VENDOR_INTEL: |
189 | if (x86 >= 6) | 138 | if (family >= 6) |
190 | reload_ucode_intel(); | 139 | reload_ucode_intel(); |
191 | break; | 140 | break; |
192 | case X86_VENDOR_AMD: | 141 | case X86_VENDOR_AMD: |
193 | if (x86 >= 0x10) | 142 | if (family >= 0x10) |
194 | reload_ucode_amd(); | 143 | reload_ucode_amd(); |
195 | break; | 144 | break; |
196 | default: | 145 | default: |
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 746e7fd08aad..a41beadb3db9 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
@@ -124,7 +124,7 @@ static int get_matching_mc(struct microcode_intel *mc_intel, int cpu) | |||
124 | cpf = cpu_sig.pf; | 124 | cpf = cpu_sig.pf; |
125 | crev = cpu_sig.rev; | 125 | crev = cpu_sig.rev; |
126 | 126 | ||
127 | return get_matching_microcode(csig, cpf, mc_intel, crev); | 127 | return get_matching_microcode(csig, cpf, crev, mc_intel); |
128 | } | 128 | } |
129 | 129 | ||
130 | static int apply_microcode_intel(int cpu) | 130 | static int apply_microcode_intel(int cpu) |
@@ -226,7 +226,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
226 | 226 | ||
227 | csig = uci->cpu_sig.sig; | 227 | csig = uci->cpu_sig.sig; |
228 | cpf = uci->cpu_sig.pf; | 228 | cpf = uci->cpu_sig.pf; |
229 | if (get_matching_microcode(csig, cpf, mc, new_rev)) { | 229 | if (get_matching_microcode(csig, cpf, new_rev, mc)) { |
230 | vfree(new_mc); | 230 | vfree(new_mc); |
231 | new_rev = mc_header.rev; | 231 | new_rev = mc_header.rev; |
232 | new_mc = mc; | 232 | new_mc = mc; |
diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c index 420eb933189c..2f49ab4ac0ae 100644 --- a/arch/x86/kernel/cpu/microcode/intel_early.c +++ b/arch/x86/kernel/cpu/microcode/intel_early.c | |||
@@ -16,6 +16,14 @@ | |||
16 | * as published by the Free Software Foundation; either version | 16 | * as published by the Free Software Foundation; either version |
17 | * 2 of the License, or (at your option) any later version. | 17 | * 2 of the License, or (at your option) any later version. |
18 | */ | 18 | */ |
19 | |||
20 | /* | ||
21 | * This needs to be before all headers so that pr_debug in printk.h doesn't turn | ||
22 | * printk calls into no_printk(). | ||
23 | * | ||
24 | *#define DEBUG | ||
25 | */ | ||
26 | |||
19 | #include <linux/module.h> | 27 | #include <linux/module.h> |
20 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
21 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
@@ -28,6 +36,9 @@ | |||
28 | #include <asm/tlbflush.h> | 36 | #include <asm/tlbflush.h> |
29 | #include <asm/setup.h> | 37 | #include <asm/setup.h> |
30 | 38 | ||
39 | #undef pr_fmt | ||
40 | #define pr_fmt(fmt) "microcode: " fmt | ||
41 | |||
31 | static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT]; | 42 | static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT]; |
32 | static struct mc_saved_data { | 43 | static struct mc_saved_data { |
33 | unsigned int mc_saved_count; | 44 | unsigned int mc_saved_count; |
@@ -35,50 +46,45 @@ static struct mc_saved_data { | |||
35 | } mc_saved_data; | 46 | } mc_saved_data; |
36 | 47 | ||
37 | static enum ucode_state | 48 | static enum ucode_state |
38 | generic_load_microcode_early(struct microcode_intel **mc_saved_p, | 49 | load_microcode_early(struct microcode_intel **saved, |
39 | unsigned int mc_saved_count, | 50 | unsigned int num_saved, struct ucode_cpu_info *uci) |
40 | struct ucode_cpu_info *uci) | ||
41 | { | 51 | { |
42 | struct microcode_intel *ucode_ptr, *new_mc = NULL; | 52 | struct microcode_intel *ucode_ptr, *new_mc = NULL; |
43 | int new_rev = uci->cpu_sig.rev; | 53 | struct microcode_header_intel *mc_hdr; |
44 | enum ucode_state state = UCODE_OK; | 54 | int new_rev, ret, i; |
45 | unsigned int mc_size; | ||
46 | struct microcode_header_intel *mc_header; | ||
47 | unsigned int csig = uci->cpu_sig.sig; | ||
48 | unsigned int cpf = uci->cpu_sig.pf; | ||
49 | int i; | ||
50 | 55 | ||
51 | for (i = 0; i < mc_saved_count; i++) { | 56 | new_rev = uci->cpu_sig.rev; |
52 | ucode_ptr = mc_saved_p[i]; | ||
53 | 57 | ||
54 | mc_header = (struct microcode_header_intel *)ucode_ptr; | 58 | for (i = 0; i < num_saved; i++) { |
55 | mc_size = get_totalsize(mc_header); | 59 | ucode_ptr = saved[i]; |
56 | if (get_matching_microcode(csig, cpf, ucode_ptr, new_rev)) { | 60 | mc_hdr = (struct microcode_header_intel *)ucode_ptr; |
57 | new_rev = mc_header->rev; | ||
58 | new_mc = ucode_ptr; | ||
59 | } | ||
60 | } | ||
61 | 61 | ||
62 | if (!new_mc) { | 62 | ret = get_matching_microcode(uci->cpu_sig.sig, |
63 | state = UCODE_NFOUND; | 63 | uci->cpu_sig.pf, |
64 | goto out; | 64 | new_rev, |
65 | ucode_ptr); | ||
66 | if (!ret) | ||
67 | continue; | ||
68 | |||
69 | new_rev = mc_hdr->rev; | ||
70 | new_mc = ucode_ptr; | ||
65 | } | 71 | } |
66 | 72 | ||
73 | if (!new_mc) | ||
74 | return UCODE_NFOUND; | ||
75 | |||
67 | uci->mc = (struct microcode_intel *)new_mc; | 76 | uci->mc = (struct microcode_intel *)new_mc; |
68 | out: | 77 | return UCODE_OK; |
69 | return state; | ||
70 | } | 78 | } |
71 | 79 | ||
72 | static void | 80 | static inline void |
73 | microcode_pointer(struct microcode_intel **mc_saved, | 81 | copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd, |
74 | unsigned long *mc_saved_in_initrd, | 82 | unsigned long off, int num_saved) |
75 | unsigned long initrd_start, int mc_saved_count) | ||
76 | { | 83 | { |
77 | int i; | 84 | int i; |
78 | 85 | ||
79 | for (i = 0; i < mc_saved_count; i++) | 86 | for (i = 0; i < num_saved; i++) |
80 | mc_saved[i] = (struct microcode_intel *) | 87 | mc_saved[i] = (struct microcode_intel *)(initrd[i] + off); |
81 | (mc_saved_in_initrd[i] + initrd_start); | ||
82 | } | 88 | } |
83 | 89 | ||
84 | #ifdef CONFIG_X86_32 | 90 | #ifdef CONFIG_X86_32 |
@@ -102,55 +108,27 @@ microcode_phys(struct microcode_intel **mc_saved_tmp, | |||
102 | #endif | 108 | #endif |
103 | 109 | ||
104 | static enum ucode_state | 110 | static enum ucode_state |
105 | load_microcode(struct mc_saved_data *mc_saved_data, | 111 | load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd, |
106 | unsigned long *mc_saved_in_initrd, | 112 | unsigned long initrd_start, struct ucode_cpu_info *uci) |
107 | unsigned long initrd_start, | ||
108 | struct ucode_cpu_info *uci) | ||
109 | { | 113 | { |
110 | struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT]; | 114 | struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT]; |
111 | unsigned int count = mc_saved_data->mc_saved_count; | 115 | unsigned int count = mc_saved_data->mc_saved_count; |
112 | 116 | ||
113 | if (!mc_saved_data->mc_saved) { | 117 | if (!mc_saved_data->mc_saved) { |
114 | microcode_pointer(mc_saved_tmp, mc_saved_in_initrd, | 118 | copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count); |
115 | initrd_start, count); | ||
116 | 119 | ||
117 | return generic_load_microcode_early(mc_saved_tmp, count, uci); | 120 | return load_microcode_early(mc_saved_tmp, count, uci); |
118 | } else { | 121 | } else { |
119 | #ifdef CONFIG_X86_32 | 122 | #ifdef CONFIG_X86_32 |
120 | microcode_phys(mc_saved_tmp, mc_saved_data); | 123 | microcode_phys(mc_saved_tmp, mc_saved_data); |
121 | return generic_load_microcode_early(mc_saved_tmp, count, uci); | 124 | return load_microcode_early(mc_saved_tmp, count, uci); |
122 | #else | 125 | #else |
123 | return generic_load_microcode_early(mc_saved_data->mc_saved, | 126 | return load_microcode_early(mc_saved_data->mc_saved, |
124 | count, uci); | 127 | count, uci); |
125 | #endif | 128 | #endif |
126 | } | 129 | } |
127 | } | 130 | } |
128 | 131 | ||
129 | static u8 get_x86_family(unsigned long sig) | ||
130 | { | ||
131 | u8 x86; | ||
132 | |||
133 | x86 = (sig >> 8) & 0xf; | ||
134 | |||
135 | if (x86 == 0xf) | ||
136 | x86 += (sig >> 20) & 0xff; | ||
137 | |||
138 | return x86; | ||
139 | } | ||
140 | |||
141 | static u8 get_x86_model(unsigned long sig) | ||
142 | { | ||
143 | u8 x86, x86_model; | ||
144 | |||
145 | x86 = get_x86_family(sig); | ||
146 | x86_model = (sig >> 4) & 0xf; | ||
147 | |||
148 | if (x86 == 0x6 || x86 == 0xf) | ||
149 | x86_model += ((sig >> 16) & 0xf) << 4; | ||
150 | |||
151 | return x86_model; | ||
152 | } | ||
153 | |||
154 | /* | 132 | /* |
155 | * Given CPU signature and a microcode patch, this function finds if the | 133 | * Given CPU signature and a microcode patch, this function finds if the |
156 | * microcode patch has matching family and model with the CPU. | 134 | * microcode patch has matching family and model with the CPU. |
@@ -159,42 +137,40 @@ static enum ucode_state | |||
159 | matching_model_microcode(struct microcode_header_intel *mc_header, | 137 | matching_model_microcode(struct microcode_header_intel *mc_header, |
160 | unsigned long sig) | 138 | unsigned long sig) |
161 | { | 139 | { |
162 | u8 x86, x86_model; | 140 | unsigned int fam, model; |
163 | u8 x86_ucode, x86_model_ucode; | 141 | unsigned int fam_ucode, model_ucode; |
164 | struct extended_sigtable *ext_header; | 142 | struct extended_sigtable *ext_header; |
165 | unsigned long total_size = get_totalsize(mc_header); | 143 | unsigned long total_size = get_totalsize(mc_header); |
166 | unsigned long data_size = get_datasize(mc_header); | 144 | unsigned long data_size = get_datasize(mc_header); |
167 | int ext_sigcount, i; | 145 | int ext_sigcount, i; |
168 | struct extended_signature *ext_sig; | 146 | struct extended_signature *ext_sig; |
169 | 147 | ||
170 | x86 = get_x86_family(sig); | 148 | fam = __x86_family(sig); |
171 | x86_model = get_x86_model(sig); | 149 | model = x86_model(sig); |
172 | 150 | ||
173 | x86_ucode = get_x86_family(mc_header->sig); | 151 | fam_ucode = __x86_family(mc_header->sig); |
174 | x86_model_ucode = get_x86_model(mc_header->sig); | 152 | model_ucode = x86_model(mc_header->sig); |
175 | 153 | ||
176 | if (x86 == x86_ucode && x86_model == x86_model_ucode) | 154 | if (fam == fam_ucode && model == model_ucode) |
177 | return UCODE_OK; | 155 | return UCODE_OK; |
178 | 156 | ||
179 | /* Look for ext. headers: */ | 157 | /* Look for ext. headers: */ |
180 | if (total_size <= data_size + MC_HEADER_SIZE) | 158 | if (total_size <= data_size + MC_HEADER_SIZE) |
181 | return UCODE_NFOUND; | 159 | return UCODE_NFOUND; |
182 | 160 | ||
183 | ext_header = (struct extended_sigtable *) | 161 | ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE; |
184 | mc_header + data_size + MC_HEADER_SIZE; | 162 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; |
185 | ext_sigcount = ext_header->count; | 163 | ext_sigcount = ext_header->count; |
186 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; | ||
187 | 164 | ||
188 | for (i = 0; i < ext_sigcount; i++) { | 165 | for (i = 0; i < ext_sigcount; i++) { |
189 | x86_ucode = get_x86_family(ext_sig->sig); | 166 | fam_ucode = __x86_family(ext_sig->sig); |
190 | x86_model_ucode = get_x86_model(ext_sig->sig); | 167 | model_ucode = x86_model(ext_sig->sig); |
191 | 168 | ||
192 | if (x86 == x86_ucode && x86_model == x86_model_ucode) | 169 | if (fam == fam_ucode && model == model_ucode) |
193 | return UCODE_OK; | 170 | return UCODE_OK; |
194 | 171 | ||
195 | ext_sig++; | 172 | ext_sig++; |
196 | } | 173 | } |
197 | |||
198 | return UCODE_NFOUND; | 174 | return UCODE_NFOUND; |
199 | } | 175 | } |
200 | 176 | ||
@@ -204,7 +180,7 @@ save_microcode(struct mc_saved_data *mc_saved_data, | |||
204 | unsigned int mc_saved_count) | 180 | unsigned int mc_saved_count) |
205 | { | 181 | { |
206 | int i, j; | 182 | int i, j; |
207 | struct microcode_intel **mc_saved_p; | 183 | struct microcode_intel **saved_ptr; |
208 | int ret; | 184 | int ret; |
209 | 185 | ||
210 | if (!mc_saved_count) | 186 | if (!mc_saved_count) |
@@ -213,39 +189,45 @@ save_microcode(struct mc_saved_data *mc_saved_data, | |||
213 | /* | 189 | /* |
214 | * Copy new microcode data. | 190 | * Copy new microcode data. |
215 | */ | 191 | */ |
216 | mc_saved_p = kmalloc(mc_saved_count*sizeof(struct microcode_intel *), | 192 | saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL); |
217 | GFP_KERNEL); | 193 | if (!saved_ptr) |
218 | if (!mc_saved_p) | ||
219 | return -ENOMEM; | 194 | return -ENOMEM; |
220 | 195 | ||
221 | for (i = 0; i < mc_saved_count; i++) { | 196 | for (i = 0; i < mc_saved_count; i++) { |
222 | struct microcode_intel *mc = mc_saved_src[i]; | 197 | struct microcode_header_intel *mc_hdr; |
223 | struct microcode_header_intel *mc_header = &mc->hdr; | 198 | struct microcode_intel *mc; |
224 | unsigned long mc_size = get_totalsize(mc_header); | 199 | unsigned long size; |
225 | mc_saved_p[i] = kmalloc(mc_size, GFP_KERNEL); | 200 | |
226 | if (!mc_saved_p[i]) { | ||
227 | ret = -ENOMEM; | ||
228 | goto err; | ||
229 | } | ||
230 | if (!mc_saved_src[i]) { | 201 | if (!mc_saved_src[i]) { |
231 | ret = -EINVAL; | 202 | ret = -EINVAL; |
232 | goto err; | 203 | goto err; |
233 | } | 204 | } |
234 | memcpy(mc_saved_p[i], mc, mc_size); | 205 | |
206 | mc = mc_saved_src[i]; | ||
207 | mc_hdr = &mc->hdr; | ||
208 | size = get_totalsize(mc_hdr); | ||
209 | |||
210 | saved_ptr[i] = kmalloc(size, GFP_KERNEL); | ||
211 | if (!saved_ptr[i]) { | ||
212 | ret = -ENOMEM; | ||
213 | goto err; | ||
214 | } | ||
215 | |||
216 | memcpy(saved_ptr[i], mc, size); | ||
235 | } | 217 | } |
236 | 218 | ||
237 | /* | 219 | /* |
238 | * Point to newly saved microcode. | 220 | * Point to newly saved microcode. |
239 | */ | 221 | */ |
240 | mc_saved_data->mc_saved = mc_saved_p; | 222 | mc_saved_data->mc_saved = saved_ptr; |
241 | mc_saved_data->mc_saved_count = mc_saved_count; | 223 | mc_saved_data->mc_saved_count = mc_saved_count; |
242 | 224 | ||
243 | return 0; | 225 | return 0; |
244 | 226 | ||
245 | err: | 227 | err: |
246 | for (j = 0; j <= i; j++) | 228 | for (j = 0; j <= i; j++) |
247 | kfree(mc_saved_p[j]); | 229 | kfree(saved_ptr[j]); |
248 | kfree(mc_saved_p); | 230 | kfree(saved_ptr); |
249 | 231 | ||
250 | return ret; | 232 | return ret; |
251 | } | 233 | } |
@@ -257,48 +239,45 @@ err: | |||
257 | * - or if it is a newly discovered microcode patch. | 239 | * - or if it is a newly discovered microcode patch. |
258 | * | 240 | * |
259 | * The microcode patch should have matching model with CPU. | 241 | * The microcode patch should have matching model with CPU. |
242 | * | ||
243 | * Returns: The updated number @num_saved of saved microcode patches. | ||
260 | */ | 244 | */ |
261 | static void _save_mc(struct microcode_intel **mc_saved, u8 *ucode_ptr, | 245 | static unsigned int _save_mc(struct microcode_intel **mc_saved, |
262 | unsigned int *mc_saved_count_p) | 246 | u8 *ucode_ptr, unsigned int num_saved) |
263 | { | 247 | { |
264 | int i; | 248 | struct microcode_header_intel *mc_hdr, *mc_saved_hdr; |
265 | int found = 0; | 249 | unsigned int sig, pf, new_rev; |
266 | unsigned int mc_saved_count = *mc_saved_count_p; | 250 | int found = 0, i; |
267 | struct microcode_header_intel *mc_header; | 251 | |
252 | mc_hdr = (struct microcode_header_intel *)ucode_ptr; | ||
253 | |||
254 | for (i = 0; i < num_saved; i++) { | ||
255 | mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i]; | ||
256 | sig = mc_saved_hdr->sig; | ||
257 | pf = mc_saved_hdr->pf; | ||
258 | new_rev = mc_hdr->rev; | ||
259 | |||
260 | if (!get_matching_sig(sig, pf, new_rev, ucode_ptr)) | ||
261 | continue; | ||
262 | |||
263 | found = 1; | ||
264 | |||
265 | if (!revision_is_newer(mc_hdr, new_rev)) | ||
266 | continue; | ||
268 | 267 | ||
269 | mc_header = (struct microcode_header_intel *)ucode_ptr; | ||
270 | for (i = 0; i < mc_saved_count; i++) { | ||
271 | unsigned int sig, pf; | ||
272 | unsigned int new_rev; | ||
273 | struct microcode_header_intel *mc_saved_header = | ||
274 | (struct microcode_header_intel *)mc_saved[i]; | ||
275 | sig = mc_saved_header->sig; | ||
276 | pf = mc_saved_header->pf; | ||
277 | new_rev = mc_header->rev; | ||
278 | |||
279 | if (get_matching_sig(sig, pf, ucode_ptr, new_rev)) { | ||
280 | found = 1; | ||
281 | if (update_match_revision(mc_header, new_rev)) { | ||
282 | /* | ||
283 | * Found an older ucode saved before. | ||
284 | * Replace the older one with this newer | ||
285 | * one. | ||
286 | */ | ||
287 | mc_saved[i] = | ||
288 | (struct microcode_intel *)ucode_ptr; | ||
289 | break; | ||
290 | } | ||
291 | } | ||
292 | } | ||
293 | if (i >= mc_saved_count && !found) | ||
294 | /* | 268 | /* |
295 | * This ucode is first time discovered in ucode file. | 269 | * Found an older ucode saved earlier. Replace it with |
296 | * Save it to memory. | 270 | * this newer one. |
297 | */ | 271 | */ |
298 | mc_saved[mc_saved_count++] = | 272 | mc_saved[i] = (struct microcode_intel *)ucode_ptr; |
299 | (struct microcode_intel *)ucode_ptr; | 273 | break; |
274 | } | ||
275 | |||
276 | /* Newly detected microcode, save it to memory. */ | ||
277 | if (i >= num_saved && !found) | ||
278 | mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr; | ||
300 | 279 | ||
301 | *mc_saved_count_p = mc_saved_count; | 280 | return num_saved; |
302 | } | 281 | } |
303 | 282 | ||
304 | /* | 283 | /* |
@@ -346,7 +325,7 @@ get_matching_model_microcode(int cpu, unsigned long start, | |||
346 | continue; | 325 | continue; |
347 | } | 326 | } |
348 | 327 | ||
349 | _save_mc(mc_saved_tmp, ucode_ptr, &mc_saved_count); | 328 | mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count); |
350 | 329 | ||
351 | ucode_ptr += mc_size; | 330 | ucode_ptr += mc_size; |
352 | } | 331 | } |
@@ -372,7 +351,7 @@ out: | |||
372 | static int collect_cpu_info_early(struct ucode_cpu_info *uci) | 351 | static int collect_cpu_info_early(struct ucode_cpu_info *uci) |
373 | { | 352 | { |
374 | unsigned int val[2]; | 353 | unsigned int val[2]; |
375 | u8 x86, x86_model; | 354 | unsigned int family, model; |
376 | struct cpu_signature csig; | 355 | struct cpu_signature csig; |
377 | unsigned int eax, ebx, ecx, edx; | 356 | unsigned int eax, ebx, ecx, edx; |
378 | 357 | ||
@@ -387,10 +366,10 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci) | |||
387 | native_cpuid(&eax, &ebx, &ecx, &edx); | 366 | native_cpuid(&eax, &ebx, &ecx, &edx); |
388 | csig.sig = eax; | 367 | csig.sig = eax; |
389 | 368 | ||
390 | x86 = get_x86_family(csig.sig); | 369 | family = __x86_family(csig.sig); |
391 | x86_model = get_x86_model(csig.sig); | 370 | model = x86_model(csig.sig); |
392 | 371 | ||
393 | if ((x86_model >= 5) || (x86 > 6)) { | 372 | if ((model >= 5) || (family > 6)) { |
394 | /* get processor flags from MSR 0x17 */ | 373 | /* get processor flags from MSR 0x17 */ |
395 | native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); | 374 | native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); |
396 | csig.pf = 1 << ((val[1] >> 18) & 7); | 375 | csig.pf = 1 << ((val[1] >> 18) & 7); |
@@ -429,8 +408,7 @@ static void __ref show_saved_mc(void) | |||
429 | sig = uci.cpu_sig.sig; | 408 | sig = uci.cpu_sig.sig; |
430 | pf = uci.cpu_sig.pf; | 409 | pf = uci.cpu_sig.pf; |
431 | rev = uci.cpu_sig.rev; | 410 | rev = uci.cpu_sig.rev; |
432 | pr_debug("CPU%d: sig=0x%x, pf=0x%x, rev=0x%x\n", | 411 | pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); |
433 | smp_processor_id(), sig, pf, rev); | ||
434 | 412 | ||
435 | for (i = 0; i < mc_saved_data.mc_saved_count; i++) { | 413 | for (i = 0; i < mc_saved_data.mc_saved_count; i++) { |
436 | struct microcode_header_intel *mc_saved_header; | 414 | struct microcode_header_intel *mc_saved_header; |
@@ -457,8 +435,7 @@ static void __ref show_saved_mc(void) | |||
457 | if (total_size <= data_size + MC_HEADER_SIZE) | 435 | if (total_size <= data_size + MC_HEADER_SIZE) |
458 | continue; | 436 | continue; |
459 | 437 | ||
460 | ext_header = (struct extended_sigtable *) | 438 | ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE; |
461 | mc_saved_header + data_size + MC_HEADER_SIZE; | ||
462 | ext_sigcount = ext_header->count; | 439 | ext_sigcount = ext_header->count; |
463 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; | 440 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; |
464 | 441 | ||
@@ -515,8 +492,7 @@ int save_mc_for_early(u8 *mc) | |||
515 | * Save the microcode patch mc in mc_save_tmp structure if it's a newer | 492 | * Save the microcode patch mc in mc_save_tmp structure if it's a newer |
516 | * version. | 493 | * version. |
517 | */ | 494 | */ |
518 | 495 | mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count); | |
519 | _save_mc(mc_saved_tmp, mc, &mc_saved_count); | ||
520 | 496 | ||
521 | /* | 497 | /* |
522 | * Save the mc_save_tmp in global mc_saved_data. | 498 | * Save the mc_save_tmp in global mc_saved_data. |
@@ -548,12 +524,10 @@ EXPORT_SYMBOL_GPL(save_mc_for_early); | |||
548 | 524 | ||
549 | static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin"; | 525 | static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin"; |
550 | static __init enum ucode_state | 526 | static __init enum ucode_state |
551 | scan_microcode(unsigned long start, unsigned long end, | 527 | scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd, |
552 | struct mc_saved_data *mc_saved_data, | 528 | unsigned long start, unsigned long size, |
553 | unsigned long *mc_saved_in_initrd, | 529 | struct ucode_cpu_info *uci) |
554 | struct ucode_cpu_info *uci) | ||
555 | { | 530 | { |
556 | unsigned int size = end - start + 1; | ||
557 | struct cpio_data cd; | 531 | struct cpio_data cd; |
558 | long offset = 0; | 532 | long offset = 0; |
559 | #ifdef CONFIG_X86_32 | 533 | #ifdef CONFIG_X86_32 |
@@ -569,10 +543,8 @@ scan_microcode(unsigned long start, unsigned long end, | |||
569 | if (!cd.data) | 543 | if (!cd.data) |
570 | return UCODE_ERROR; | 544 | return UCODE_ERROR; |
571 | 545 | ||
572 | |||
573 | return get_matching_model_microcode(0, start, cd.data, cd.size, | 546 | return get_matching_model_microcode(0, start, cd.data, cd.size, |
574 | mc_saved_data, mc_saved_in_initrd, | 547 | mc_saved_data, initrd, uci); |
575 | uci); | ||
576 | } | 548 | } |
577 | 549 | ||
578 | /* | 550 | /* |
@@ -704,7 +676,7 @@ int __init save_microcode_in_initrd_intel(void) | |||
704 | if (count == 0) | 676 | if (count == 0) |
705 | return ret; | 677 | return ret; |
706 | 678 | ||
707 | microcode_pointer(mc_saved, mc_saved_in_initrd, initrd_start, count); | 679 | copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count); |
708 | ret = save_microcode(&mc_saved_data, mc_saved, count); | 680 | ret = save_microcode(&mc_saved_data, mc_saved, count); |
709 | if (ret) | 681 | if (ret) |
710 | pr_err("Cannot save microcode patches from initrd.\n"); | 682 | pr_err("Cannot save microcode patches from initrd.\n"); |
@@ -716,52 +688,44 @@ int __init save_microcode_in_initrd_intel(void) | |||
716 | 688 | ||
717 | static void __init | 689 | static void __init |
718 | _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data, | 690 | _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data, |
719 | unsigned long *mc_saved_in_initrd, | 691 | unsigned long *initrd, |
720 | unsigned long initrd_start_early, | 692 | unsigned long start, unsigned long size) |
721 | unsigned long initrd_end_early, | ||
722 | struct ucode_cpu_info *uci) | ||
723 | { | 693 | { |
694 | struct ucode_cpu_info uci; | ||
724 | enum ucode_state ret; | 695 | enum ucode_state ret; |
725 | 696 | ||
726 | collect_cpu_info_early(uci); | 697 | collect_cpu_info_early(&uci); |
727 | scan_microcode(initrd_start_early, initrd_end_early, mc_saved_data, | ||
728 | mc_saved_in_initrd, uci); | ||
729 | 698 | ||
730 | ret = load_microcode(mc_saved_data, mc_saved_in_initrd, | 699 | ret = scan_microcode(mc_saved_data, initrd, start, size, &uci); |
731 | initrd_start_early, uci); | 700 | if (ret != UCODE_OK) |
701 | return; | ||
732 | 702 | ||
733 | if (ret == UCODE_OK) | 703 | ret = load_microcode(mc_saved_data, initrd, start, &uci); |
734 | apply_microcode_early(uci, true); | 704 | if (ret != UCODE_OK) |
705 | return; | ||
706 | |||
707 | apply_microcode_early(&uci, true); | ||
735 | } | 708 | } |
736 | 709 | ||
737 | void __init | 710 | void __init load_ucode_intel_bsp(void) |
738 | load_ucode_intel_bsp(void) | ||
739 | { | 711 | { |
740 | u64 ramdisk_image, ramdisk_size; | 712 | u64 start, size; |
741 | unsigned long initrd_start_early, initrd_end_early; | ||
742 | struct ucode_cpu_info uci; | ||
743 | #ifdef CONFIG_X86_32 | 713 | #ifdef CONFIG_X86_32 |
744 | struct boot_params *boot_params_p; | 714 | struct boot_params *p; |
745 | 715 | ||
746 | boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params); | 716 | p = (struct boot_params *)__pa_nodebug(&boot_params); |
747 | ramdisk_image = boot_params_p->hdr.ramdisk_image; | 717 | start = p->hdr.ramdisk_image; |
748 | ramdisk_size = boot_params_p->hdr.ramdisk_size; | 718 | size = p->hdr.ramdisk_size; |
749 | initrd_start_early = ramdisk_image; | ||
750 | initrd_end_early = initrd_start_early + ramdisk_size; | ||
751 | 719 | ||
752 | _load_ucode_intel_bsp( | 720 | _load_ucode_intel_bsp( |
753 | (struct mc_saved_data *)__pa_nodebug(&mc_saved_data), | 721 | (struct mc_saved_data *)__pa_nodebug(&mc_saved_data), |
754 | (unsigned long *)__pa_nodebug(&mc_saved_in_initrd), | 722 | (unsigned long *)__pa_nodebug(&mc_saved_in_initrd), |
755 | initrd_start_early, initrd_end_early, &uci); | 723 | start, size); |
756 | #else | 724 | #else |
757 | ramdisk_image = boot_params.hdr.ramdisk_image; | 725 | start = boot_params.hdr.ramdisk_image + PAGE_OFFSET; |
758 | ramdisk_size = boot_params.hdr.ramdisk_size; | 726 | size = boot_params.hdr.ramdisk_size; |
759 | initrd_start_early = ramdisk_image + PAGE_OFFSET; | 727 | |
760 | initrd_end_early = initrd_start_early + ramdisk_size; | 728 | _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size); |
761 | |||
762 | _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, | ||
763 | initrd_start_early, initrd_end_early, | ||
764 | &uci); | ||
765 | #endif | 729 | #endif |
766 | } | 730 | } |
767 | 731 | ||
@@ -771,6 +735,7 @@ void load_ucode_intel_ap(void) | |||
771 | struct ucode_cpu_info uci; | 735 | struct ucode_cpu_info uci; |
772 | unsigned long *mc_saved_in_initrd_p; | 736 | unsigned long *mc_saved_in_initrd_p; |
773 | unsigned long initrd_start_addr; | 737 | unsigned long initrd_start_addr; |
738 | enum ucode_state ret; | ||
774 | #ifdef CONFIG_X86_32 | 739 | #ifdef CONFIG_X86_32 |
775 | unsigned long *initrd_start_p; | 740 | unsigned long *initrd_start_p; |
776 | 741 | ||
@@ -793,8 +758,12 @@ void load_ucode_intel_ap(void) | |||
793 | return; | 758 | return; |
794 | 759 | ||
795 | collect_cpu_info_early(&uci); | 760 | collect_cpu_info_early(&uci); |
796 | load_microcode(mc_saved_data_p, mc_saved_in_initrd_p, | 761 | ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p, |
797 | initrd_start_addr, &uci); | 762 | initrd_start_addr, &uci); |
763 | |||
764 | if (ret != UCODE_OK) | ||
765 | return; | ||
766 | |||
798 | apply_microcode_early(&uci, true); | 767 | apply_microcode_early(&uci, true); |
799 | } | 768 | } |
800 | 769 | ||
@@ -808,8 +777,8 @@ void reload_ucode_intel(void) | |||
808 | 777 | ||
809 | collect_cpu_info_early(&uci); | 778 | collect_cpu_info_early(&uci); |
810 | 779 | ||
811 | ret = generic_load_microcode_early(mc_saved_data.mc_saved, | 780 | ret = load_microcode_early(mc_saved_data.mc_saved, |
812 | mc_saved_data.mc_saved_count, &uci); | 781 | mc_saved_data.mc_saved_count, &uci); |
813 | if (ret != UCODE_OK) | 782 | if (ret != UCODE_OK) |
814 | return; | 783 | return; |
815 | 784 | ||
diff --git a/arch/x86/kernel/cpu/microcode/intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c index ce69320d0179..cd47a510a3f1 100644 --- a/arch/x86/kernel/cpu/microcode/intel_lib.c +++ b/arch/x86/kernel/cpu/microcode/intel_lib.c | |||
@@ -38,12 +38,6 @@ update_match_cpu(unsigned int csig, unsigned int cpf, | |||
38 | return (!sigmatch(sig, csig, pf, cpf)) ? 0 : 1; | 38 | return (!sigmatch(sig, csig, pf, cpf)) ? 0 : 1; |
39 | } | 39 | } |
40 | 40 | ||
41 | int | ||
42 | update_match_revision(struct microcode_header_intel *mc_header, int rev) | ||
43 | { | ||
44 | return (mc_header->rev <= rev) ? 0 : 1; | ||
45 | } | ||
46 | |||
47 | int microcode_sanity_check(void *mc, int print_err) | 41 | int microcode_sanity_check(void *mc, int print_err) |
48 | { | 42 | { |
49 | unsigned long total_size, data_size, ext_table_size; | 43 | unsigned long total_size, data_size, ext_table_size; |
@@ -128,10 +122,9 @@ int microcode_sanity_check(void *mc, int print_err) | |||
128 | EXPORT_SYMBOL_GPL(microcode_sanity_check); | 122 | EXPORT_SYMBOL_GPL(microcode_sanity_check); |
129 | 123 | ||
130 | /* | 124 | /* |
131 | * return 0 - no update found | 125 | * Returns 1 if update has been found, 0 otherwise. |
132 | * return 1 - found update | ||
133 | */ | 126 | */ |
134 | int get_matching_sig(unsigned int csig, int cpf, void *mc, int rev) | 127 | int get_matching_sig(unsigned int csig, int cpf, int rev, void *mc) |
135 | { | 128 | { |
136 | struct microcode_header_intel *mc_header = mc; | 129 | struct microcode_header_intel *mc_header = mc; |
137 | struct extended_sigtable *ext_header; | 130 | struct extended_sigtable *ext_header; |
@@ -159,16 +152,15 @@ int get_matching_sig(unsigned int csig, int cpf, void *mc, int rev) | |||
159 | } | 152 | } |
160 | 153 | ||
161 | /* | 154 | /* |
162 | * return 0 - no update found | 155 | * Returns 1 if update has been found, 0 otherwise. |
163 | * return 1 - found update | ||
164 | */ | 156 | */ |
165 | int get_matching_microcode(unsigned int csig, int cpf, void *mc, int rev) | 157 | int get_matching_microcode(unsigned int csig, int cpf, int rev, void *mc) |
166 | { | 158 | { |
167 | struct microcode_header_intel *mc_header = mc; | 159 | struct microcode_header_intel *mc_hdr = mc; |
168 | 160 | ||
169 | if (!update_match_revision(mc_header, rev)) | 161 | if (!revision_is_newer(mc_hdr, rev)) |
170 | return 0; | 162 | return 0; |
171 | 163 | ||
172 | return get_matching_sig(csig, cpf, mc, rev); | 164 | return get_matching_sig(csig, cpf, rev, mc); |
173 | } | 165 | } |
174 | EXPORT_SYMBOL_GPL(get_matching_microcode); | 166 | EXPORT_SYMBOL_GPL(get_matching_microcode); |
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh index 36d99a337b49..3f20710a5b23 100644 --- a/arch/x86/kernel/cpu/mkcapflags.sh +++ b/arch/x86/kernel/cpu/mkcapflags.sh | |||
@@ -6,7 +6,7 @@ | |||
6 | IN=$1 | 6 | IN=$1 |
7 | OUT=$2 | 7 | OUT=$2 |
8 | 8 | ||
9 | function dump_array() | 9 | dump_array() |
10 | { | 10 | { |
11 | ARRAY=$1 | 11 | ARRAY=$1 |
12 | SIZE=$2 | 12 | SIZE=$2 |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b71a7f86d68a..e2888a3ad1e3 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -2147,24 +2147,24 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
2147 | static unsigned long code_segment_base(struct pt_regs *regs) | 2147 | static unsigned long code_segment_base(struct pt_regs *regs) |
2148 | { | 2148 | { |
2149 | /* | 2149 | /* |
2150 | * For IA32 we look at the GDT/LDT segment base to convert the | ||
2151 | * effective IP to a linear address. | ||
2152 | */ | ||
2153 | |||
2154 | #ifdef CONFIG_X86_32 | ||
2155 | /* | ||
2150 | * If we are in VM86 mode, add the segment offset to convert to a | 2156 | * If we are in VM86 mode, add the segment offset to convert to a |
2151 | * linear address. | 2157 | * linear address. |
2152 | */ | 2158 | */ |
2153 | if (regs->flags & X86_VM_MASK) | 2159 | if (regs->flags & X86_VM_MASK) |
2154 | return 0x10 * regs->cs; | 2160 | return 0x10 * regs->cs; |
2155 | 2161 | ||
2156 | /* | ||
2157 | * For IA32 we look at the GDT/LDT segment base to convert the | ||
2158 | * effective IP to a linear address. | ||
2159 | */ | ||
2160 | #ifdef CONFIG_X86_32 | ||
2161 | if (user_mode(regs) && regs->cs != __USER_CS) | 2162 | if (user_mode(regs) && regs->cs != __USER_CS) |
2162 | return get_segment_base(regs->cs); | 2163 | return get_segment_base(regs->cs); |
2163 | #else | 2164 | #else |
2164 | if (test_thread_flag(TIF_IA32)) { | 2165 | if (user_mode(regs) && !user_64bit_mode(regs) && |
2165 | if (user_mode(regs) && regs->cs != __USER32_CS) | 2166 | regs->cs != __USER32_CS) |
2166 | return get_segment_base(regs->cs); | 2167 | return get_segment_base(regs->cs); |
2167 | } | ||
2168 | #endif | 2168 | #endif |
2169 | return 0; | 2169 | return 0; |
2170 | } | 2170 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 498b6d967138..258990688a5e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = { | |||
212 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 212 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ |
213 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | 213 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ |
214 | /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | 214 | /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ |
215 | INTEL_EVENT_CONSTRAINT(0x08a3, 0x4), | 215 | INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), |
216 | /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ | 216 | /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ |
217 | INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4), | 217 | INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), |
218 | /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ | 218 | /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ |
219 | INTEL_EVENT_CONSTRAINT(0x04a3, 0xf), | 219 | INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), |
220 | EVENT_CONSTRAINT_END | 220 | EVENT_CONSTRAINT_END |
221 | }; | 221 | }; |
222 | 222 | ||
@@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event | |||
1649 | if (c) | 1649 | if (c) |
1650 | return c; | 1650 | return c; |
1651 | 1651 | ||
1652 | c = intel_pebs_constraints(event); | 1652 | c = intel_shared_regs_constraints(cpuc, event); |
1653 | if (c) | 1653 | if (c) |
1654 | return c; | 1654 | return c; |
1655 | 1655 | ||
1656 | c = intel_shared_regs_constraints(cpuc, event); | 1656 | c = intel_pebs_constraints(event); |
1657 | if (c) | 1657 | if (c) |
1658 | return c; | 1658 | return c; |
1659 | 1659 | ||