diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 77 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 26 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpu.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce-severity.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 36 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/therm_throt.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/cleanup.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/generic.c | 128 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 280 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_ds.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 298 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perfctr-watchdog.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/scattered.c | 6 |
19 files changed, 647 insertions, 288 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ba5f62f45f01..9e093f8fe78c 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -148,7 +148,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) | |||
148 | { | 148 | { |
149 | #ifdef CONFIG_SMP | 149 | #ifdef CONFIG_SMP |
150 | /* calling is from identify_secondary_cpu() ? */ | 150 | /* calling is from identify_secondary_cpu() ? */ |
151 | if (c->cpu_index == boot_cpu_id) | 151 | if (!c->cpu_index) |
152 | return; | 152 | return; |
153 | 153 | ||
154 | /* | 154 | /* |
@@ -253,37 +253,51 @@ static int __cpuinit nearby_node(int apicid) | |||
253 | #endif | 253 | #endif |
254 | 254 | ||
255 | /* | 255 | /* |
256 | * Fixup core topology information for AMD multi-node processors. | 256 | * Fixup core topology information for |
257 | * Assumption: Number of cores in each internal node is the same. | 257 | * (1) AMD multi-node processors |
258 | * Assumption: Number of cores in each internal node is the same. | ||
259 | * (2) AMD processors supporting compute units | ||
258 | */ | 260 | */ |
259 | #ifdef CONFIG_X86_HT | 261 | #ifdef CONFIG_X86_HT |
260 | static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c) | 262 | static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) |
261 | { | 263 | { |
262 | unsigned long long value; | 264 | u32 nodes; |
263 | u32 nodes, cores_per_node; | 265 | u8 node_id; |
264 | int cpu = smp_processor_id(); | 266 | int cpu = smp_processor_id(); |
265 | 267 | ||
266 | if (!cpu_has(c, X86_FEATURE_NODEID_MSR)) | 268 | /* get information required for multi-node processors */ |
267 | return; | 269 | if (cpu_has(c, X86_FEATURE_TOPOEXT)) { |
270 | u32 eax, ebx, ecx, edx; | ||
268 | 271 | ||
269 | /* fixup topology information only once for a core */ | 272 | cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); |
270 | if (cpu_has(c, X86_FEATURE_AMD_DCM)) | 273 | nodes = ((ecx >> 8) & 7) + 1; |
271 | return; | 274 | node_id = ecx & 7; |
272 | 275 | ||
273 | rdmsrl(MSR_FAM10H_NODE_ID, value); | 276 | /* get compute unit information */ |
277 | smp_num_siblings = ((ebx >> 8) & 3) + 1; | ||
278 | c->compute_unit_id = ebx & 0xff; | ||
279 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { | ||
280 | u64 value; | ||
274 | 281 | ||
275 | nodes = ((value >> 3) & 7) + 1; | 282 | rdmsrl(MSR_FAM10H_NODE_ID, value); |
276 | if (nodes == 1) | 283 | nodes = ((value >> 3) & 7) + 1; |
284 | node_id = value & 7; | ||
285 | } else | ||
277 | return; | 286 | return; |
278 | 287 | ||
279 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); | 288 | /* fixup multi-node processor information */ |
280 | cores_per_node = c->x86_max_cores / nodes; | 289 | if (nodes > 1) { |
290 | u32 cores_per_node; | ||
291 | |||
292 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); | ||
293 | cores_per_node = c->x86_max_cores / nodes; | ||
281 | 294 | ||
282 | /* store NodeID, use llc_shared_map to store sibling info */ | 295 | /* store NodeID, use llc_shared_map to store sibling info */ |
283 | per_cpu(cpu_llc_id, cpu) = value & 7; | 296 | per_cpu(cpu_llc_id, cpu) = node_id; |
284 | 297 | ||
285 | /* fixup core id to be in range from 0 to (cores_per_node - 1) */ | 298 | /* core id to be in range from 0 to (cores_per_node - 1) */ |
286 | c->cpu_core_id = c->cpu_core_id % cores_per_node; | 299 | c->cpu_core_id = c->cpu_core_id % cores_per_node; |
300 | } | ||
287 | } | 301 | } |
288 | #endif | 302 | #endif |
289 | 303 | ||
@@ -304,9 +318,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |||
304 | c->phys_proc_id = c->initial_apicid >> bits; | 318 | c->phys_proc_id = c->initial_apicid >> bits; |
305 | /* use socket ID also for last level cache */ | 319 | /* use socket ID also for last level cache */ |
306 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; | 320 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; |
307 | /* fixup topology information on multi-node processors */ | 321 | amd_get_topology(c); |
308 | if ((c->x86 == 0x10) && (c->x86_model == 9)) | ||
309 | amd_fixup_dcm(c); | ||
310 | #endif | 322 | #endif |
311 | } | 323 | } |
312 | 324 | ||
@@ -412,6 +424,23 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |||
412 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); | 424 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); |
413 | } | 425 | } |
414 | #endif | 426 | #endif |
427 | |||
428 | /* We need to do the following only once */ | ||
429 | if (c != &boot_cpu_data) | ||
430 | return; | ||
431 | |||
432 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { | ||
433 | |||
434 | if (c->x86 > 0x10 || | ||
435 | (c->x86 == 0x10 && c->x86_model >= 0x2)) { | ||
436 | u64 val; | ||
437 | |||
438 | rdmsrl(MSR_K7_HWCR, val); | ||
439 | if (!(val & BIT(24))) | ||
440 | printk(KERN_WARNING FW_BUG "TSC doesn't count " | ||
441 | "with P0 frequency!\n"); | ||
442 | } | ||
443 | } | ||
415 | } | 444 | } |
416 | 445 | ||
417 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 446 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
@@ -523,7 +552,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
523 | #endif | 552 | #endif |
524 | 553 | ||
525 | if (c->extended_cpuid_level >= 0x80000006) { | 554 | if (c->extended_cpuid_level >= 0x80000006) { |
526 | if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000)) | 555 | if (cpuid_edx(0x80000006) & 0xf000) |
527 | num_cache_leaves = 4; | 556 | num_cache_leaves = 4; |
528 | else | 557 | else |
529 | num_cache_leaves = 3; | 558 | num_cache_leaves = 3; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 490dac63c2d2..4b68bda30938 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -545,7 +545,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |||
545 | } | 545 | } |
546 | } | 546 | } |
547 | 547 | ||
548 | static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | 548 | void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) |
549 | { | 549 | { |
550 | u32 tfms, xlvl; | 550 | u32 tfms, xlvl; |
551 | u32 ebx; | 551 | u32 ebx; |
@@ -665,7 +665,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
665 | this_cpu->c_early_init(c); | 665 | this_cpu->c_early_init(c); |
666 | 666 | ||
667 | #ifdef CONFIG_SMP | 667 | #ifdef CONFIG_SMP |
668 | c->cpu_index = boot_cpu_id; | 668 | c->cpu_index = 0; |
669 | #endif | 669 | #endif |
670 | filter_cpuid_features(c, false); | 670 | filter_cpuid_features(c, false); |
671 | } | 671 | } |
@@ -704,16 +704,21 @@ void __init early_cpu_init(void) | |||
704 | } | 704 | } |
705 | 705 | ||
706 | /* | 706 | /* |
707 | * The NOPL instruction is supposed to exist on all CPUs with | 707 | * The NOPL instruction is supposed to exist on all CPUs of family >= 6; |
708 | * family >= 6; unfortunately, that's not true in practice because | 708 | * unfortunately, that's not true in practice because of early VIA |
709 | * of early VIA chips and (more importantly) broken virtualizers that | 709 | * chips and (more importantly) broken virtualizers that are not easy |
710 | * are not easy to detect. In the latter case it doesn't even *fail* | 710 | * to detect. In the latter case it doesn't even *fail* reliably, so |
711 | * reliably, so probing for it doesn't even work. Disable it completely | 711 | * probing for it doesn't even work. Disable it completely on 32-bit |
712 | * unless we can find a reliable way to detect all the broken cases. | 712 | * unless we can find a reliable way to detect all the broken cases. |
713 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). | ||
713 | */ | 714 | */ |
714 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | 715 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) |
715 | { | 716 | { |
717 | #ifdef CONFIG_X86_32 | ||
716 | clear_cpu_cap(c, X86_FEATURE_NOPL); | 718 | clear_cpu_cap(c, X86_FEATURE_NOPL); |
719 | #else | ||
720 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
721 | #endif | ||
717 | } | 722 | } |
718 | 723 | ||
719 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 724 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
@@ -1264,13 +1269,6 @@ void __cpuinit cpu_init(void) | |||
1264 | clear_all_debug_regs(); | 1269 | clear_all_debug_regs(); |
1265 | dbg_restore_debug_regs(); | 1270 | dbg_restore_debug_regs(); |
1266 | 1271 | ||
1267 | /* | ||
1268 | * Force FPU initialization: | ||
1269 | */ | ||
1270 | current_thread_info()->status = 0; | ||
1271 | clear_used_math(); | ||
1272 | mxcsr_feature_mask_init(); | ||
1273 | |||
1274 | fpu_init(); | 1272 | fpu_init(); |
1275 | xsave_init(); | 1273 | xsave_init(); |
1276 | } | 1274 | } |
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 3624e8a0f71b..e765633f210e 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
@@ -32,6 +32,8 @@ struct cpu_dev { | |||
32 | extern const struct cpu_dev *const __x86_cpu_dev_start[], | 32 | extern const struct cpu_dev *const __x86_cpu_dev_start[], |
33 | *const __x86_cpu_dev_end[]; | 33 | *const __x86_cpu_dev_end[]; |
34 | 34 | ||
35 | extern void get_cpu_cap(struct cpuinfo_x86 *c); | ||
35 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); | 36 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); |
37 | extern void get_cpu_cap(struct cpuinfo_x86 *c); | ||
36 | 38 | ||
37 | #endif | 39 | #endif |
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index 994230d4dc4e..4f6f679f2799 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c | |||
@@ -368,16 +368,22 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle) | |||
368 | return -ENODEV; | 368 | return -ENODEV; |
369 | 369 | ||
370 | out_obj = output.pointer; | 370 | out_obj = output.pointer; |
371 | if (out_obj->type != ACPI_TYPE_BUFFER) | 371 | if (out_obj->type != ACPI_TYPE_BUFFER) { |
372 | return -ENODEV; | 372 | ret = -ENODEV; |
373 | goto out_free; | ||
374 | } | ||
373 | 375 | ||
374 | errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); | 376 | errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); |
375 | if (errors) | 377 | if (errors) { |
376 | return -ENODEV; | 378 | ret = -ENODEV; |
379 | goto out_free; | ||
380 | } | ||
377 | 381 | ||
378 | supported = *((u32 *)(out_obj->buffer.pointer + 4)); | 382 | supported = *((u32 *)(out_obj->buffer.pointer + 4)); |
379 | if (!(supported & 0x1)) | 383 | if (!(supported & 0x1)) { |
380 | return -ENODEV; | 384 | ret = -ENODEV; |
385 | goto out_free; | ||
386 | } | ||
381 | 387 | ||
382 | out_free: | 388 | out_free: |
383 | kfree(output.pointer); | 389 | kfree(output.pointer); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 85f69cdeae10..d16c2c53d6bf 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; | 39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; |
40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | 40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); |
41 | c->cpuid_level = cpuid_eax(0); | 41 | c->cpuid_level = cpuid_eax(0); |
42 | get_cpu_cap(c); | ||
42 | } | 43 | } |
43 | } | 44 | } |
44 | 45 | ||
@@ -169,7 +170,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) | |||
169 | { | 170 | { |
170 | #ifdef CONFIG_SMP | 171 | #ifdef CONFIG_SMP |
171 | /* calling is from identify_secondary_cpu() ? */ | 172 | /* calling is from identify_secondary_cpu() ? */ |
172 | if (c->cpu_index == boot_cpu_id) | 173 | if (!c->cpu_index) |
173 | return; | 174 | return; |
174 | 175 | ||
175 | /* | 176 | /* |
@@ -283,9 +284,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
283 | /* Don't do the funky fallback heuristics the AMD version employs | 284 | /* Don't do the funky fallback heuristics the AMD version employs |
284 | for now. */ | 285 | for now. */ |
285 | node = apicid_to_node[apicid]; | 286 | node = apicid_to_node[apicid]; |
286 | if (node == NUMA_NO_NODE) | 287 | if (node == NUMA_NO_NODE || !node_online(node)) { |
287 | node = first_node(node_online_map); | ||
288 | else if (!node_online(node)) { | ||
289 | /* reuse the value from init_cpu_to_node() */ | 288 | /* reuse the value from init_cpu_to_node() */ |
290 | node = cpu_to_node(cpu); | 289 | node = cpu_to_node(cpu); |
291 | } | 290 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 898c2f4eab88..12cd823c8d03 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
19 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <asm/k8.h> | 20 | #include <asm/amd_nb.h> |
21 | #include <asm/smp.h> | 21 | #include <asm/smp.h> |
22 | 22 | ||
23 | #define LVL_1_INST 1 | 23 | #define LVL_1_INST 1 |
@@ -306,7 +306,7 @@ struct _cache_attr { | |||
306 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); | 306 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); |
307 | }; | 307 | }; |
308 | 308 | ||
309 | #ifdef CONFIG_CPU_SUP_AMD | 309 | #ifdef CONFIG_AMD_NB |
310 | 310 | ||
311 | /* | 311 | /* |
312 | * L3 cache descriptors | 312 | * L3 cache descriptors |
@@ -369,7 +369,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, | |||
369 | return; | 369 | return; |
370 | 370 | ||
371 | /* not in virtualized environments */ | 371 | /* not in virtualized environments */ |
372 | if (num_k8_northbridges == 0) | 372 | if (k8_northbridges.num == 0) |
373 | return; | 373 | return; |
374 | 374 | ||
375 | /* | 375 | /* |
@@ -377,7 +377,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, | |||
377 | * never freed but this is done only on shutdown so it doesn't matter. | 377 | * never freed but this is done only on shutdown so it doesn't matter. |
378 | */ | 378 | */ |
379 | if (!l3_caches) { | 379 | if (!l3_caches) { |
380 | int size = num_k8_northbridges * sizeof(struct amd_l3_cache *); | 380 | int size = k8_northbridges.num * sizeof(struct amd_l3_cache *); |
381 | 381 | ||
382 | l3_caches = kzalloc(size, GFP_ATOMIC); | 382 | l3_caches = kzalloc(size, GFP_ATOMIC); |
383 | if (!l3_caches) | 383 | if (!l3_caches) |
@@ -556,12 +556,12 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | |||
556 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | 556 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, |
557 | show_cache_disable_1, store_cache_disable_1); | 557 | show_cache_disable_1, store_cache_disable_1); |
558 | 558 | ||
559 | #else /* CONFIG_CPU_SUP_AMD */ | 559 | #else /* CONFIG_AMD_NB */ |
560 | static void __cpuinit | 560 | static void __cpuinit |
561 | amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index) | 561 | amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index) |
562 | { | 562 | { |
563 | }; | 563 | }; |
564 | #endif /* CONFIG_CPU_SUP_AMD */ | 564 | #endif /* CONFIG_AMD_NB */ |
565 | 565 | ||
566 | static int | 566 | static int |
567 | __cpuinit cpuid4_cache_lookup_regs(int index, | 567 | __cpuinit cpuid4_cache_lookup_regs(int index, |
@@ -1000,7 +1000,7 @@ static struct attribute *default_attrs[] = { | |||
1000 | 1000 | ||
1001 | static struct attribute *default_l3_attrs[] = { | 1001 | static struct attribute *default_l3_attrs[] = { |
1002 | DEFAULT_SYSFS_CACHE_ATTRS, | 1002 | DEFAULT_SYSFS_CACHE_ATTRS, |
1003 | #ifdef CONFIG_CPU_SUP_AMD | 1003 | #ifdef CONFIG_AMD_NB |
1004 | &cache_disable_0.attr, | 1004 | &cache_disable_0.attr, |
1005 | &cache_disable_1.attr, | 1005 | &cache_disable_1.attr, |
1006 | #endif | 1006 | #endif |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index 8a85dd1b1aa1..1e8d66c1336a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c | |||
@@ -192,6 +192,7 @@ static const struct file_operations severities_coverage_fops = { | |||
192 | .release = seq_release, | 192 | .release = seq_release, |
193 | .read = seq_read, | 193 | .read = seq_read, |
194 | .write = severities_coverage_write, | 194 | .write = severities_coverage_write, |
195 | .llseek = seq_lseek, | ||
195 | }; | 196 | }; |
196 | 197 | ||
197 | static int __init severities_debugfs_init(void) | 198 | static int __init severities_debugfs_init(void) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index ed41562909fe..7a35b72d7c03 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -1665,6 +1665,7 @@ struct file_operations mce_chrdev_ops = { | |||
1665 | .read = mce_read, | 1665 | .read = mce_read, |
1666 | .poll = mce_poll, | 1666 | .poll = mce_poll, |
1667 | .unlocked_ioctl = mce_ioctl, | 1667 | .unlocked_ioctl = mce_ioctl, |
1668 | .llseek = no_llseek, | ||
1668 | }; | 1669 | }; |
1669 | EXPORT_SYMBOL_GPL(mce_chrdev_ops); | 1670 | EXPORT_SYMBOL_GPL(mce_chrdev_ops); |
1670 | 1671 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 5e975298fa81..80c482382d5c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -131,7 +131,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
131 | u32 low = 0, high = 0, address = 0; | 131 | u32 low = 0, high = 0, address = 0; |
132 | unsigned int bank, block; | 132 | unsigned int bank, block; |
133 | struct thresh_restart tr; | 133 | struct thresh_restart tr; |
134 | u8 lvt_off; | 134 | int lvt_off = -1; |
135 | u8 offset; | ||
135 | 136 | ||
136 | for (bank = 0; bank < NR_BANKS; ++bank) { | 137 | for (bank = 0; bank < NR_BANKS; ++bank) { |
137 | for (block = 0; block < NR_BLOCKS; ++block) { | 138 | for (block = 0; block < NR_BLOCKS; ++block) { |
@@ -141,6 +142,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
141 | address = (low & MASK_BLKPTR_LO) >> 21; | 142 | address = (low & MASK_BLKPTR_LO) >> 21; |
142 | if (!address) | 143 | if (!address) |
143 | break; | 144 | break; |
145 | |||
144 | address += MCG_XBLK_ADDR; | 146 | address += MCG_XBLK_ADDR; |
145 | } else | 147 | } else |
146 | ++address; | 148 | ++address; |
@@ -148,12 +150,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
148 | if (rdmsr_safe(address, &low, &high)) | 150 | if (rdmsr_safe(address, &low, &high)) |
149 | break; | 151 | break; |
150 | 152 | ||
151 | if (!(high & MASK_VALID_HI)) { | 153 | if (!(high & MASK_VALID_HI)) |
152 | if (block) | 154 | continue; |
153 | continue; | ||
154 | else | ||
155 | break; | ||
156 | } | ||
157 | 155 | ||
158 | if (!(high & MASK_CNTP_HI) || | 156 | if (!(high & MASK_CNTP_HI) || |
159 | (high & MASK_LOCKED_HI)) | 157 | (high & MASK_LOCKED_HI)) |
@@ -165,8 +163,28 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
165 | if (shared_bank[bank] && c->cpu_core_id) | 163 | if (shared_bank[bank] && c->cpu_core_id) |
166 | break; | 164 | break; |
167 | #endif | 165 | #endif |
168 | lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR, | 166 | offset = (high & MASK_LVTOFF_HI) >> 20; |
169 | APIC_EILVT_MSG_FIX, 0); | 167 | if (lvt_off < 0) { |
168 | if (setup_APIC_eilvt(offset, | ||
169 | THRESHOLD_APIC_VECTOR, | ||
170 | APIC_EILVT_MSG_FIX, 0)) { | ||
171 | pr_err(FW_BUG "cpu %d, failed to " | ||
172 | "setup threshold interrupt " | ||
173 | "for bank %d, block %d " | ||
174 | "(MSR%08X=0x%x%08x)", | ||
175 | smp_processor_id(), bank, block, | ||
176 | address, high, low); | ||
177 | continue; | ||
178 | } | ||
179 | lvt_off = offset; | ||
180 | } else if (lvt_off != offset) { | ||
181 | pr_err(FW_BUG "cpu %d, invalid threshold " | ||
182 | "interrupt offset %d for bank %d," | ||
183 | "block %d (MSR%08X=0x%x%08x)", | ||
184 | smp_processor_id(), lvt_off, bank, | ||
185 | block, address, high, low); | ||
186 | continue; | ||
187 | } | ||
170 | 188 | ||
171 | high &= ~MASK_LVTOFF_HI; | 189 | high &= ~MASK_LVTOFF_HI; |
172 | high |= lvt_off << 20; | 190 | high |= lvt_off << 20; |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index d9368eeda309..4b683267eca5 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -216,7 +216,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, | |||
216 | err = sysfs_add_file_to_group(&sys_dev->kobj, | 216 | err = sysfs_add_file_to_group(&sys_dev->kobj, |
217 | &attr_core_power_limit_count.attr, | 217 | &attr_core_power_limit_count.attr, |
218 | thermal_attr_group.name); | 218 | thermal_attr_group.name); |
219 | if (cpu_has(c, X86_FEATURE_PTS)) | 219 | if (cpu_has(c, X86_FEATURE_PTS)) { |
220 | err = sysfs_add_file_to_group(&sys_dev->kobj, | 220 | err = sysfs_add_file_to_group(&sys_dev->kobj, |
221 | &attr_package_throttle_count.attr, | 221 | &attr_package_throttle_count.attr, |
222 | thermal_attr_group.name); | 222 | thermal_attr_group.name); |
@@ -224,6 +224,7 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, | |||
224 | err = sysfs_add_file_to_group(&sys_dev->kobj, | 224 | err = sysfs_add_file_to_group(&sys_dev->kobj, |
225 | &attr_package_power_limit_count.attr, | 225 | &attr_package_power_limit_count.attr, |
226 | thermal_attr_group.name); | 226 | thermal_attr_group.name); |
227 | } | ||
227 | 228 | ||
228 | return err; | 229 | return err; |
229 | } | 230 | } |
@@ -349,7 +350,7 @@ static void intel_thermal_interrupt(void) | |||
349 | 350 | ||
350 | static void unexpected_thermal_interrupt(void) | 351 | static void unexpected_thermal_interrupt(void) |
351 | { | 352 | { |
352 | printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n", | 353 | printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n", |
353 | smp_processor_id()); | 354 | smp_processor_id()); |
354 | add_taint(TAINT_MACHINE_CHECK); | 355 | add_taint(TAINT_MACHINE_CHECK); |
355 | } | 356 | } |
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index c5f59d071425..ac140c7be396 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -827,7 +827,7 @@ int __init amd_special_default_mtrr(void) | |||
827 | 827 | ||
828 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | 828 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) |
829 | return 0; | 829 | return 0; |
830 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) | 830 | if (boot_cpu_data.x86 < 0xf) |
831 | return 0; | 831 | return 0; |
832 | /* In case some hypervisor doesn't pass SYSCFG through: */ | 832 | /* In case some hypervisor doesn't pass SYSCFG through: */ |
833 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) | 833 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 7d28d7d03885..9f27228ceffd 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -64,18 +64,59 @@ static inline void k8_check_syscfg_dram_mod_en(void) | |||
64 | } | 64 | } |
65 | } | 65 | } |
66 | 66 | ||
67 | /* Get the size of contiguous MTRR range */ | ||
68 | static u64 get_mtrr_size(u64 mask) | ||
69 | { | ||
70 | u64 size; | ||
71 | |||
72 | mask >>= PAGE_SHIFT; | ||
73 | mask |= size_or_mask; | ||
74 | size = -mask; | ||
75 | size <<= PAGE_SHIFT; | ||
76 | return size; | ||
77 | } | ||
78 | |||
67 | /* | 79 | /* |
68 | * Returns the effective MTRR type for the region | 80 | * Check and return the effective type for MTRR-MTRR type overlap. |
69 | * Error returns: | 81 | * Returns 1 if the effective type is UNCACHEABLE, else returns 0 |
70 | * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR | ||
71 | * - 0xFF - when MTRR is not enabled | ||
72 | */ | 82 | */ |
73 | u8 mtrr_type_lookup(u64 start, u64 end) | 83 | static int check_type_overlap(u8 *prev, u8 *curr) |
84 | { | ||
85 | if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) { | ||
86 | *prev = MTRR_TYPE_UNCACHABLE; | ||
87 | *curr = MTRR_TYPE_UNCACHABLE; | ||
88 | return 1; | ||
89 | } | ||
90 | |||
91 | if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) || | ||
92 | (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) { | ||
93 | *prev = MTRR_TYPE_WRTHROUGH; | ||
94 | *curr = MTRR_TYPE_WRTHROUGH; | ||
95 | } | ||
96 | |||
97 | if (*prev != *curr) { | ||
98 | *prev = MTRR_TYPE_UNCACHABLE; | ||
99 | *curr = MTRR_TYPE_UNCACHABLE; | ||
100 | return 1; | ||
101 | } | ||
102 | |||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Error/Semi-error returns: | ||
108 | * 0xFF - when MTRR is not enabled | ||
109 | * *repeat == 1 implies [start:end] spanned across MTRR range and type returned | ||
110 | * corresponds only to [start:*partial_end]. | ||
111 | * Caller has to lookup again for [*partial_end:end]. | ||
112 | */ | ||
113 | static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat) | ||
74 | { | 114 | { |
75 | int i; | 115 | int i; |
76 | u64 base, mask; | 116 | u64 base, mask; |
77 | u8 prev_match, curr_match; | 117 | u8 prev_match, curr_match; |
78 | 118 | ||
119 | *repeat = 0; | ||
79 | if (!mtrr_state_set) | 120 | if (!mtrr_state_set) |
80 | return 0xFF; | 121 | return 0xFF; |
81 | 122 | ||
@@ -126,8 +167,34 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
126 | 167 | ||
127 | start_state = ((start & mask) == (base & mask)); | 168 | start_state = ((start & mask) == (base & mask)); |
128 | end_state = ((end & mask) == (base & mask)); | 169 | end_state = ((end & mask) == (base & mask)); |
129 | if (start_state != end_state) | 170 | |
130 | return 0xFE; | 171 | if (start_state != end_state) { |
172 | /* | ||
173 | * We have start:end spanning across an MTRR. | ||
174 | * We split the region into | ||
175 | * either | ||
176 | * (start:mtrr_end) (mtrr_end:end) | ||
177 | * or | ||
178 | * (start:mtrr_start) (mtrr_start:end) | ||
179 | * depending on kind of overlap. | ||
180 | * Return the type for first region and a pointer to | ||
181 | * the start of second region so that caller will | ||
182 | * lookup again on the second region. | ||
183 | * Note: This way we handle multiple overlaps as well. | ||
184 | */ | ||
185 | if (start_state) | ||
186 | *partial_end = base + get_mtrr_size(mask); | ||
187 | else | ||
188 | *partial_end = base; | ||
189 | |||
190 | if (unlikely(*partial_end <= start)) { | ||
191 | WARN_ON(1); | ||
192 | *partial_end = start + PAGE_SIZE; | ||
193 | } | ||
194 | |||
195 | end = *partial_end - 1; /* end is inclusive */ | ||
196 | *repeat = 1; | ||
197 | } | ||
131 | 198 | ||
132 | if ((start & mask) != (base & mask)) | 199 | if ((start & mask) != (base & mask)) |
133 | continue; | 200 | continue; |
@@ -138,21 +205,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
138 | continue; | 205 | continue; |
139 | } | 206 | } |
140 | 207 | ||
141 | if (prev_match == MTRR_TYPE_UNCACHABLE || | 208 | if (check_type_overlap(&prev_match, &curr_match)) |
142 | curr_match == MTRR_TYPE_UNCACHABLE) { | 209 | return curr_match; |
143 | return MTRR_TYPE_UNCACHABLE; | ||
144 | } | ||
145 | |||
146 | if ((prev_match == MTRR_TYPE_WRBACK && | ||
147 | curr_match == MTRR_TYPE_WRTHROUGH) || | ||
148 | (prev_match == MTRR_TYPE_WRTHROUGH && | ||
149 | curr_match == MTRR_TYPE_WRBACK)) { | ||
150 | prev_match = MTRR_TYPE_WRTHROUGH; | ||
151 | curr_match = MTRR_TYPE_WRTHROUGH; | ||
152 | } | ||
153 | |||
154 | if (prev_match != curr_match) | ||
155 | return MTRR_TYPE_UNCACHABLE; | ||
156 | } | 210 | } |
157 | 211 | ||
158 | if (mtrr_tom2) { | 212 | if (mtrr_tom2) { |
@@ -166,6 +220,36 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
166 | return mtrr_state.def_type; | 220 | return mtrr_state.def_type; |
167 | } | 221 | } |
168 | 222 | ||
223 | /* | ||
224 | * Returns the effective MTRR type for the region | ||
225 | * Error return: | ||
226 | * 0xFF - when MTRR is not enabled | ||
227 | */ | ||
228 | u8 mtrr_type_lookup(u64 start, u64 end) | ||
229 | { | ||
230 | u8 type, prev_type; | ||
231 | int repeat; | ||
232 | u64 partial_end; | ||
233 | |||
234 | type = __mtrr_type_lookup(start, end, &partial_end, &repeat); | ||
235 | |||
236 | /* | ||
237 | * Common path is with repeat = 0. | ||
238 | * However, we can have cases where [start:end] spans across some | ||
239 | * MTRR range. Do repeated lookups for that case here. | ||
240 | */ | ||
241 | while (repeat) { | ||
242 | prev_type = type; | ||
243 | start = partial_end; | ||
244 | type = __mtrr_type_lookup(start, end, &partial_end, &repeat); | ||
245 | |||
246 | if (check_type_overlap(&prev_type, &type)) | ||
247 | return type; | ||
248 | } | ||
249 | |||
250 | return type; | ||
251 | } | ||
252 | |||
169 | /* Get the MSR pair relating to a var range */ | 253 | /* Get the MSR pair relating to a var range */ |
170 | static void | 254 | static void |
171 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | 255 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 03a5b0385ad6..fe73c1844a9a 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -531,7 +531,7 @@ static int x86_pmu_hw_config(struct perf_event *event) | |||
531 | /* | 531 | /* |
532 | * Setup the hardware configuration for a given attr_type | 532 | * Setup the hardware configuration for a given attr_type |
533 | */ | 533 | */ |
534 | static int __hw_perf_event_init(struct perf_event *event) | 534 | static int __x86_pmu_event_init(struct perf_event *event) |
535 | { | 535 | { |
536 | int err; | 536 | int err; |
537 | 537 | ||
@@ -584,7 +584,7 @@ static void x86_pmu_disable_all(void) | |||
584 | } | 584 | } |
585 | } | 585 | } |
586 | 586 | ||
587 | void hw_perf_disable(void) | 587 | static void x86_pmu_disable(struct pmu *pmu) |
588 | { | 588 | { |
589 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 589 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
590 | 590 | ||
@@ -619,7 +619,7 @@ static void x86_pmu_enable_all(int added) | |||
619 | } | 619 | } |
620 | } | 620 | } |
621 | 621 | ||
622 | static const struct pmu pmu; | 622 | static struct pmu pmu; |
623 | 623 | ||
624 | static inline int is_x86_event(struct perf_event *event) | 624 | static inline int is_x86_event(struct perf_event *event) |
625 | { | 625 | { |
@@ -801,10 +801,10 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc, | |||
801 | hwc->last_tag == cpuc->tags[i]; | 801 | hwc->last_tag == cpuc->tags[i]; |
802 | } | 802 | } |
803 | 803 | ||
804 | static int x86_pmu_start(struct perf_event *event); | 804 | static void x86_pmu_start(struct perf_event *event, int flags); |
805 | static void x86_pmu_stop(struct perf_event *event); | 805 | static void x86_pmu_stop(struct perf_event *event, int flags); |
806 | 806 | ||
807 | void hw_perf_enable(void) | 807 | static void x86_pmu_enable(struct pmu *pmu) |
808 | { | 808 | { |
809 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 809 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
810 | struct perf_event *event; | 810 | struct perf_event *event; |
@@ -840,7 +840,14 @@ void hw_perf_enable(void) | |||
840 | match_prev_assignment(hwc, cpuc, i)) | 840 | match_prev_assignment(hwc, cpuc, i)) |
841 | continue; | 841 | continue; |
842 | 842 | ||
843 | x86_pmu_stop(event); | 843 | /* |
844 | * Ensure we don't accidentally enable a stopped | ||
845 | * counter simply because we rescheduled. | ||
846 | */ | ||
847 | if (hwc->state & PERF_HES_STOPPED) | ||
848 | hwc->state |= PERF_HES_ARCH; | ||
849 | |||
850 | x86_pmu_stop(event, PERF_EF_UPDATE); | ||
844 | } | 851 | } |
845 | 852 | ||
846 | for (i = 0; i < cpuc->n_events; i++) { | 853 | for (i = 0; i < cpuc->n_events; i++) { |
@@ -852,7 +859,10 @@ void hw_perf_enable(void) | |||
852 | else if (i < n_running) | 859 | else if (i < n_running) |
853 | continue; | 860 | continue; |
854 | 861 | ||
855 | x86_pmu_start(event); | 862 | if (hwc->state & PERF_HES_ARCH) |
863 | continue; | ||
864 | |||
865 | x86_pmu_start(event, PERF_EF_RELOAD); | ||
856 | } | 866 | } |
857 | cpuc->n_added = 0; | 867 | cpuc->n_added = 0; |
858 | perf_events_lapic_init(); | 868 | perf_events_lapic_init(); |
@@ -953,15 +963,12 @@ static void x86_pmu_enable_event(struct perf_event *event) | |||
953 | } | 963 | } |
954 | 964 | ||
955 | /* | 965 | /* |
956 | * activate a single event | 966 | * Add a single event to the PMU. |
957 | * | 967 | * |
958 | * The event is added to the group of enabled events | 968 | * The event is added to the group of enabled events |
959 | * but only if it can be scehduled with existing events. | 969 | * but only if it can be scehduled with existing events. |
960 | * | ||
961 | * Called with PMU disabled. If successful and return value 1, | ||
962 | * then guaranteed to call perf_enable() and hw_perf_enable() | ||
963 | */ | 970 | */ |
964 | static int x86_pmu_enable(struct perf_event *event) | 971 | static int x86_pmu_add(struct perf_event *event, int flags) |
965 | { | 972 | { |
966 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 973 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
967 | struct hw_perf_event *hwc; | 974 | struct hw_perf_event *hwc; |
@@ -970,58 +977,67 @@ static int x86_pmu_enable(struct perf_event *event) | |||
970 | 977 | ||
971 | hwc = &event->hw; | 978 | hwc = &event->hw; |
972 | 979 | ||
980 | perf_pmu_disable(event->pmu); | ||
973 | n0 = cpuc->n_events; | 981 | n0 = cpuc->n_events; |
974 | n = collect_events(cpuc, event, false); | 982 | ret = n = collect_events(cpuc, event, false); |
975 | if (n < 0) | 983 | if (ret < 0) |
976 | return n; | 984 | goto out; |
985 | |||
986 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | ||
987 | if (!(flags & PERF_EF_START)) | ||
988 | hwc->state |= PERF_HES_ARCH; | ||
977 | 989 | ||
978 | /* | 990 | /* |
979 | * If group events scheduling transaction was started, | 991 | * If group events scheduling transaction was started, |
980 | * skip the schedulability test here, it will be peformed | 992 | * skip the schedulability test here, it will be peformed |
981 | * at commit time(->commit_txn) as a whole | 993 | * at commit time (->commit_txn) as a whole |
982 | */ | 994 | */ |
983 | if (cpuc->group_flag & PERF_EVENT_TXN) | 995 | if (cpuc->group_flag & PERF_EVENT_TXN) |
984 | goto out; | 996 | goto done_collect; |
985 | 997 | ||
986 | ret = x86_pmu.schedule_events(cpuc, n, assign); | 998 | ret = x86_pmu.schedule_events(cpuc, n, assign); |
987 | if (ret) | 999 | if (ret) |
988 | return ret; | 1000 | goto out; |
989 | /* | 1001 | /* |
990 | * copy new assignment, now we know it is possible | 1002 | * copy new assignment, now we know it is possible |
991 | * will be used by hw_perf_enable() | 1003 | * will be used by hw_perf_enable() |
992 | */ | 1004 | */ |
993 | memcpy(cpuc->assign, assign, n*sizeof(int)); | 1005 | memcpy(cpuc->assign, assign, n*sizeof(int)); |
994 | 1006 | ||
995 | out: | 1007 | done_collect: |
996 | cpuc->n_events = n; | 1008 | cpuc->n_events = n; |
997 | cpuc->n_added += n - n0; | 1009 | cpuc->n_added += n - n0; |
998 | cpuc->n_txn += n - n0; | 1010 | cpuc->n_txn += n - n0; |
999 | 1011 | ||
1000 | return 0; | 1012 | ret = 0; |
1013 | out: | ||
1014 | perf_pmu_enable(event->pmu); | ||
1015 | return ret; | ||
1001 | } | 1016 | } |
1002 | 1017 | ||
1003 | static int x86_pmu_start(struct perf_event *event) | 1018 | static void x86_pmu_start(struct perf_event *event, int flags) |
1004 | { | 1019 | { |
1005 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1020 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1006 | int idx = event->hw.idx; | 1021 | int idx = event->hw.idx; |
1007 | 1022 | ||
1008 | if (idx == -1) | 1023 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) |
1009 | return -EAGAIN; | 1024 | return; |
1025 | |||
1026 | if (WARN_ON_ONCE(idx == -1)) | ||
1027 | return; | ||
1028 | |||
1029 | if (flags & PERF_EF_RELOAD) { | ||
1030 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
1031 | x86_perf_event_set_period(event); | ||
1032 | } | ||
1033 | |||
1034 | event->hw.state = 0; | ||
1010 | 1035 | ||
1011 | x86_perf_event_set_period(event); | ||
1012 | cpuc->events[idx] = event; | 1036 | cpuc->events[idx] = event; |
1013 | __set_bit(idx, cpuc->active_mask); | 1037 | __set_bit(idx, cpuc->active_mask); |
1014 | __set_bit(idx, cpuc->running); | 1038 | __set_bit(idx, cpuc->running); |
1015 | x86_pmu.enable(event); | 1039 | x86_pmu.enable(event); |
1016 | perf_event_update_userpage(event); | 1040 | perf_event_update_userpage(event); |
1017 | |||
1018 | return 0; | ||
1019 | } | ||
1020 | |||
1021 | static void x86_pmu_unthrottle(struct perf_event *event) | ||
1022 | { | ||
1023 | int ret = x86_pmu_start(event); | ||
1024 | WARN_ON_ONCE(ret); | ||
1025 | } | 1041 | } |
1026 | 1042 | ||
1027 | void perf_event_print_debug(void) | 1043 | void perf_event_print_debug(void) |
@@ -1078,27 +1094,29 @@ void perf_event_print_debug(void) | |||
1078 | local_irq_restore(flags); | 1094 | local_irq_restore(flags); |
1079 | } | 1095 | } |
1080 | 1096 | ||
1081 | static void x86_pmu_stop(struct perf_event *event) | 1097 | static void x86_pmu_stop(struct perf_event *event, int flags) |
1082 | { | 1098 | { |
1083 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1099 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1084 | struct hw_perf_event *hwc = &event->hw; | 1100 | struct hw_perf_event *hwc = &event->hw; |
1085 | int idx = hwc->idx; | ||
1086 | 1101 | ||
1087 | if (!__test_and_clear_bit(idx, cpuc->active_mask)) | 1102 | if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { |
1088 | return; | 1103 | x86_pmu.disable(event); |
1089 | 1104 | cpuc->events[hwc->idx] = NULL; | |
1090 | x86_pmu.disable(event); | 1105 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); |
1091 | 1106 | hwc->state |= PERF_HES_STOPPED; | |
1092 | /* | 1107 | } |
1093 | * Drain the remaining delta count out of a event | ||
1094 | * that we are disabling: | ||
1095 | */ | ||
1096 | x86_perf_event_update(event); | ||
1097 | 1108 | ||
1098 | cpuc->events[idx] = NULL; | 1109 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { |
1110 | /* | ||
1111 | * Drain the remaining delta count out of a event | ||
1112 | * that we are disabling: | ||
1113 | */ | ||
1114 | x86_perf_event_update(event); | ||
1115 | hwc->state |= PERF_HES_UPTODATE; | ||
1116 | } | ||
1099 | } | 1117 | } |
1100 | 1118 | ||
1101 | static void x86_pmu_disable(struct perf_event *event) | 1119 | static void x86_pmu_del(struct perf_event *event, int flags) |
1102 | { | 1120 | { |
1103 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1121 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1104 | int i; | 1122 | int i; |
@@ -1111,7 +1129,7 @@ static void x86_pmu_disable(struct perf_event *event) | |||
1111 | if (cpuc->group_flag & PERF_EVENT_TXN) | 1129 | if (cpuc->group_flag & PERF_EVENT_TXN) |
1112 | return; | 1130 | return; |
1113 | 1131 | ||
1114 | x86_pmu_stop(event); | 1132 | x86_pmu_stop(event, PERF_EF_UPDATE); |
1115 | 1133 | ||
1116 | for (i = 0; i < cpuc->n_events; i++) { | 1134 | for (i = 0; i < cpuc->n_events; i++) { |
1117 | if (event == cpuc->event_list[i]) { | 1135 | if (event == cpuc->event_list[i]) { |
@@ -1134,7 +1152,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1134 | struct perf_sample_data data; | 1152 | struct perf_sample_data data; |
1135 | struct cpu_hw_events *cpuc; | 1153 | struct cpu_hw_events *cpuc; |
1136 | struct perf_event *event; | 1154 | struct perf_event *event; |
1137 | struct hw_perf_event *hwc; | ||
1138 | int idx, handled = 0; | 1155 | int idx, handled = 0; |
1139 | u64 val; | 1156 | u64 val; |
1140 | 1157 | ||
@@ -1155,7 +1172,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1155 | } | 1172 | } |
1156 | 1173 | ||
1157 | event = cpuc->events[idx]; | 1174 | event = cpuc->events[idx]; |
1158 | hwc = &event->hw; | ||
1159 | 1175 | ||
1160 | val = x86_perf_event_update(event); | 1176 | val = x86_perf_event_update(event); |
1161 | if (val & (1ULL << (x86_pmu.cntval_bits - 1))) | 1177 | if (val & (1ULL << (x86_pmu.cntval_bits - 1))) |
@@ -1171,7 +1187,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1171 | continue; | 1187 | continue; |
1172 | 1188 | ||
1173 | if (perf_event_overflow(event, 1, &data, regs)) | 1189 | if (perf_event_overflow(event, 1, &data, regs)) |
1174 | x86_pmu_stop(event); | 1190 | x86_pmu_stop(event, 0); |
1175 | } | 1191 | } |
1176 | 1192 | ||
1177 | if (handled) | 1193 | if (handled) |
@@ -1180,25 +1196,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1180 | return handled; | 1196 | return handled; |
1181 | } | 1197 | } |
1182 | 1198 | ||
1183 | void smp_perf_pending_interrupt(struct pt_regs *regs) | ||
1184 | { | ||
1185 | irq_enter(); | ||
1186 | ack_APIC_irq(); | ||
1187 | inc_irq_stat(apic_pending_irqs); | ||
1188 | perf_event_do_pending(); | ||
1189 | irq_exit(); | ||
1190 | } | ||
1191 | |||
1192 | void set_perf_event_pending(void) | ||
1193 | { | ||
1194 | #ifdef CONFIG_X86_LOCAL_APIC | ||
1195 | if (!x86_pmu.apic || !x86_pmu_initialized()) | ||
1196 | return; | ||
1197 | |||
1198 | apic->send_IPI_self(LOCAL_PENDING_VECTOR); | ||
1199 | #endif | ||
1200 | } | ||
1201 | |||
1202 | void perf_events_lapic_init(void) | 1199 | void perf_events_lapic_init(void) |
1203 | { | 1200 | { |
1204 | if (!x86_pmu.apic || !x86_pmu_initialized()) | 1201 | if (!x86_pmu.apic || !x86_pmu_initialized()) |
@@ -1388,7 +1385,6 @@ void __init init_hw_perf_events(void) | |||
1388 | x86_pmu.num_counters = X86_PMC_MAX_GENERIC; | 1385 | x86_pmu.num_counters = X86_PMC_MAX_GENERIC; |
1389 | } | 1386 | } |
1390 | x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; | 1387 | x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; |
1391 | perf_max_events = x86_pmu.num_counters; | ||
1392 | 1388 | ||
1393 | if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { | 1389 | if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { |
1394 | WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", | 1390 | WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", |
@@ -1424,6 +1420,7 @@ void __init init_hw_perf_events(void) | |||
1424 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); | 1420 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); |
1425 | pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); | 1421 | pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); |
1426 | 1422 | ||
1423 | perf_pmu_register(&pmu); | ||
1427 | perf_cpu_notifier(x86_pmu_notifier); | 1424 | perf_cpu_notifier(x86_pmu_notifier); |
1428 | } | 1425 | } |
1429 | 1426 | ||
@@ -1437,10 +1434,11 @@ static inline void x86_pmu_read(struct perf_event *event) | |||
1437 | * Set the flag to make pmu::enable() not perform the | 1434 | * Set the flag to make pmu::enable() not perform the |
1438 | * schedulability test, it will be performed at commit time | 1435 | * schedulability test, it will be performed at commit time |
1439 | */ | 1436 | */ |
1440 | static void x86_pmu_start_txn(const struct pmu *pmu) | 1437 | static void x86_pmu_start_txn(struct pmu *pmu) |
1441 | { | 1438 | { |
1442 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1439 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1443 | 1440 | ||
1441 | perf_pmu_disable(pmu); | ||
1444 | cpuc->group_flag |= PERF_EVENT_TXN; | 1442 | cpuc->group_flag |= PERF_EVENT_TXN; |
1445 | cpuc->n_txn = 0; | 1443 | cpuc->n_txn = 0; |
1446 | } | 1444 | } |
@@ -1450,7 +1448,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu) | |||
1450 | * Clear the flag and pmu::enable() will perform the | 1448 | * Clear the flag and pmu::enable() will perform the |
1451 | * schedulability test. | 1449 | * schedulability test. |
1452 | */ | 1450 | */ |
1453 | static void x86_pmu_cancel_txn(const struct pmu *pmu) | 1451 | static void x86_pmu_cancel_txn(struct pmu *pmu) |
1454 | { | 1452 | { |
1455 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1453 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1456 | 1454 | ||
@@ -1460,6 +1458,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu) | |||
1460 | */ | 1458 | */ |
1461 | cpuc->n_added -= cpuc->n_txn; | 1459 | cpuc->n_added -= cpuc->n_txn; |
1462 | cpuc->n_events -= cpuc->n_txn; | 1460 | cpuc->n_events -= cpuc->n_txn; |
1461 | perf_pmu_enable(pmu); | ||
1463 | } | 1462 | } |
1464 | 1463 | ||
1465 | /* | 1464 | /* |
@@ -1467,7 +1466,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu) | |||
1467 | * Perform the group schedulability test as a whole | 1466 | * Perform the group schedulability test as a whole |
1468 | * Return 0 if success | 1467 | * Return 0 if success |
1469 | */ | 1468 | */ |
1470 | static int x86_pmu_commit_txn(const struct pmu *pmu) | 1469 | static int x86_pmu_commit_txn(struct pmu *pmu) |
1471 | { | 1470 | { |
1472 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1471 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1473 | int assign[X86_PMC_IDX_MAX]; | 1472 | int assign[X86_PMC_IDX_MAX]; |
@@ -1489,22 +1488,10 @@ static int x86_pmu_commit_txn(const struct pmu *pmu) | |||
1489 | memcpy(cpuc->assign, assign, n*sizeof(int)); | 1488 | memcpy(cpuc->assign, assign, n*sizeof(int)); |
1490 | 1489 | ||
1491 | cpuc->group_flag &= ~PERF_EVENT_TXN; | 1490 | cpuc->group_flag &= ~PERF_EVENT_TXN; |
1492 | 1491 | perf_pmu_enable(pmu); | |
1493 | return 0; | 1492 | return 0; |
1494 | } | 1493 | } |
1495 | 1494 | ||
1496 | static const struct pmu pmu = { | ||
1497 | .enable = x86_pmu_enable, | ||
1498 | .disable = x86_pmu_disable, | ||
1499 | .start = x86_pmu_start, | ||
1500 | .stop = x86_pmu_stop, | ||
1501 | .read = x86_pmu_read, | ||
1502 | .unthrottle = x86_pmu_unthrottle, | ||
1503 | .start_txn = x86_pmu_start_txn, | ||
1504 | .cancel_txn = x86_pmu_cancel_txn, | ||
1505 | .commit_txn = x86_pmu_commit_txn, | ||
1506 | }; | ||
1507 | |||
1508 | /* | 1495 | /* |
1509 | * validate that we can schedule this event | 1496 | * validate that we can schedule this event |
1510 | */ | 1497 | */ |
@@ -1579,12 +1566,22 @@ out: | |||
1579 | return ret; | 1566 | return ret; |
1580 | } | 1567 | } |
1581 | 1568 | ||
1582 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 1569 | int x86_pmu_event_init(struct perf_event *event) |
1583 | { | 1570 | { |
1584 | const struct pmu *tmp; | 1571 | struct pmu *tmp; |
1585 | int err; | 1572 | int err; |
1586 | 1573 | ||
1587 | err = __hw_perf_event_init(event); | 1574 | switch (event->attr.type) { |
1575 | case PERF_TYPE_RAW: | ||
1576 | case PERF_TYPE_HARDWARE: | ||
1577 | case PERF_TYPE_HW_CACHE: | ||
1578 | break; | ||
1579 | |||
1580 | default: | ||
1581 | return -ENOENT; | ||
1582 | } | ||
1583 | |||
1584 | err = __x86_pmu_event_init(event); | ||
1588 | if (!err) { | 1585 | if (!err) { |
1589 | /* | 1586 | /* |
1590 | * we temporarily connect event to its pmu | 1587 | * we temporarily connect event to its pmu |
@@ -1604,26 +1601,31 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1604 | if (err) { | 1601 | if (err) { |
1605 | if (event->destroy) | 1602 | if (event->destroy) |
1606 | event->destroy(event); | 1603 | event->destroy(event); |
1607 | return ERR_PTR(err); | ||
1608 | } | 1604 | } |
1609 | 1605 | ||
1610 | return &pmu; | 1606 | return err; |
1611 | } | 1607 | } |
1612 | 1608 | ||
1613 | /* | 1609 | static struct pmu pmu = { |
1614 | * callchain support | 1610 | .pmu_enable = x86_pmu_enable, |
1615 | */ | 1611 | .pmu_disable = x86_pmu_disable, |
1616 | 1612 | ||
1617 | static inline | 1613 | .event_init = x86_pmu_event_init, |
1618 | void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
1619 | { | ||
1620 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
1621 | entry->ip[entry->nr++] = ip; | ||
1622 | } | ||
1623 | 1614 | ||
1624 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | 1615 | .add = x86_pmu_add, |
1625 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); | 1616 | .del = x86_pmu_del, |
1617 | .start = x86_pmu_start, | ||
1618 | .stop = x86_pmu_stop, | ||
1619 | .read = x86_pmu_read, | ||
1626 | 1620 | ||
1621 | .start_txn = x86_pmu_start_txn, | ||
1622 | .cancel_txn = x86_pmu_cancel_txn, | ||
1623 | .commit_txn = x86_pmu_commit_txn, | ||
1624 | }; | ||
1625 | |||
1626 | /* | ||
1627 | * callchain support | ||
1628 | */ | ||
1627 | 1629 | ||
1628 | static void | 1630 | static void |
1629 | backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) | 1631 | backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) |
@@ -1645,7 +1647,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) | |||
1645 | { | 1647 | { |
1646 | struct perf_callchain_entry *entry = data; | 1648 | struct perf_callchain_entry *entry = data; |
1647 | 1649 | ||
1648 | callchain_store(entry, addr); | 1650 | perf_callchain_store(entry, addr); |
1649 | } | 1651 | } |
1650 | 1652 | ||
1651 | static const struct stacktrace_ops backtrace_ops = { | 1653 | static const struct stacktrace_ops backtrace_ops = { |
@@ -1656,11 +1658,15 @@ static const struct stacktrace_ops backtrace_ops = { | |||
1656 | .walk_stack = print_context_stack_bp, | 1658 | .walk_stack = print_context_stack_bp, |
1657 | }; | 1659 | }; |
1658 | 1660 | ||
1659 | static void | 1661 | void |
1660 | perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) | 1662 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) |
1661 | { | 1663 | { |
1662 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 1664 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { |
1663 | callchain_store(entry, regs->ip); | 1665 | /* TODO: We don't support guest os callchain now */ |
1666 | return; | ||
1667 | } | ||
1668 | |||
1669 | perf_callchain_store(entry, regs->ip); | ||
1664 | 1670 | ||
1665 | dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); | 1671 | dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); |
1666 | } | 1672 | } |
@@ -1689,7 +1695,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1689 | if (fp < compat_ptr(regs->sp)) | 1695 | if (fp < compat_ptr(regs->sp)) |
1690 | break; | 1696 | break; |
1691 | 1697 | ||
1692 | callchain_store(entry, frame.return_address); | 1698 | perf_callchain_store(entry, frame.return_address); |
1693 | fp = compat_ptr(frame.next_frame); | 1699 | fp = compat_ptr(frame.next_frame); |
1694 | } | 1700 | } |
1695 | return 1; | 1701 | return 1; |
@@ -1702,19 +1708,20 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1702 | } | 1708 | } |
1703 | #endif | 1709 | #endif |
1704 | 1710 | ||
1705 | static void | 1711 | void |
1706 | perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | 1712 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) |
1707 | { | 1713 | { |
1708 | struct stack_frame frame; | 1714 | struct stack_frame frame; |
1709 | const void __user *fp; | 1715 | const void __user *fp; |
1710 | 1716 | ||
1711 | if (!user_mode(regs)) | 1717 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { |
1712 | regs = task_pt_regs(current); | 1718 | /* TODO: We don't support guest os callchain now */ |
1719 | return; | ||
1720 | } | ||
1713 | 1721 | ||
1714 | fp = (void __user *)regs->bp; | 1722 | fp = (void __user *)regs->bp; |
1715 | 1723 | ||
1716 | callchain_store(entry, PERF_CONTEXT_USER); | 1724 | perf_callchain_store(entry, regs->ip); |
1717 | callchain_store(entry, regs->ip); | ||
1718 | 1725 | ||
1719 | if (perf_callchain_user32(regs, entry)) | 1726 | if (perf_callchain_user32(regs, entry)) |
1720 | return; | 1727 | return; |
@@ -1731,52 +1738,11 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1731 | if ((unsigned long)fp < regs->sp) | 1738 | if ((unsigned long)fp < regs->sp) |
1732 | break; | 1739 | break; |
1733 | 1740 | ||
1734 | callchain_store(entry, frame.return_address); | 1741 | perf_callchain_store(entry, frame.return_address); |
1735 | fp = frame.next_frame; | 1742 | fp = frame.next_frame; |
1736 | } | 1743 | } |
1737 | } | 1744 | } |
1738 | 1745 | ||
1739 | static void | ||
1740 | perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
1741 | { | ||
1742 | int is_user; | ||
1743 | |||
1744 | if (!regs) | ||
1745 | return; | ||
1746 | |||
1747 | is_user = user_mode(regs); | ||
1748 | |||
1749 | if (is_user && current->state != TASK_RUNNING) | ||
1750 | return; | ||
1751 | |||
1752 | if (!is_user) | ||
1753 | perf_callchain_kernel(regs, entry); | ||
1754 | |||
1755 | if (current->mm) | ||
1756 | perf_callchain_user(regs, entry); | ||
1757 | } | ||
1758 | |||
1759 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
1760 | { | ||
1761 | struct perf_callchain_entry *entry; | ||
1762 | |||
1763 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
1764 | /* TODO: We don't support guest os callchain now */ | ||
1765 | return NULL; | ||
1766 | } | ||
1767 | |||
1768 | if (in_nmi()) | ||
1769 | entry = &__get_cpu_var(pmc_nmi_entry); | ||
1770 | else | ||
1771 | entry = &__get_cpu_var(pmc_irq_entry); | ||
1772 | |||
1773 | entry->nr = 0; | ||
1774 | |||
1775 | perf_do_callchain(regs, entry); | ||
1776 | |||
1777 | return entry; | ||
1778 | } | ||
1779 | |||
1780 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | 1746 | unsigned long perf_instruction_pointer(struct pt_regs *regs) |
1781 | { | 1747 | { |
1782 | unsigned long ip; | 1748 | unsigned long ip; |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index c2897b7b4a3b..46d58448c3af 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -52,7 +52,7 @@ static __initconst const u64 amd_hw_cache_event_ids | |||
52 | [ C(DTLB) ] = { | 52 | [ C(DTLB) ] = { |
53 | [ C(OP_READ) ] = { | 53 | [ C(OP_READ) ] = { |
54 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | 54 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ |
55 | [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */ | 55 | [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */ |
56 | }, | 56 | }, |
57 | [ C(OP_WRITE) ] = { | 57 | [ C(OP_WRITE) ] = { |
58 | [ C(RESULT_ACCESS) ] = 0, | 58 | [ C(RESULT_ACCESS) ] = 0, |
@@ -66,7 +66,7 @@ static __initconst const u64 amd_hw_cache_event_ids | |||
66 | [ C(ITLB) ] = { | 66 | [ C(ITLB) ] = { |
67 | [ C(OP_READ) ] = { | 67 | [ C(OP_READ) ] = { |
68 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ | 68 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ |
69 | [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ | 69 | [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */ |
70 | }, | 70 | }, |
71 | [ C(OP_WRITE) ] = { | 71 | [ C(OP_WRITE) ] = { |
72 | [ C(RESULT_ACCESS) ] = -1, | 72 | [ C(RESULT_ACCESS) ] = -1, |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index ee05c90012d2..c8f5c088cad1 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -713,18 +713,18 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
713 | struct cpu_hw_events *cpuc; | 713 | struct cpu_hw_events *cpuc; |
714 | int bit, loops; | 714 | int bit, loops; |
715 | u64 status; | 715 | u64 status; |
716 | int handled = 0; | 716 | int handled; |
717 | 717 | ||
718 | perf_sample_data_init(&data, 0); | 718 | perf_sample_data_init(&data, 0); |
719 | 719 | ||
720 | cpuc = &__get_cpu_var(cpu_hw_events); | 720 | cpuc = &__get_cpu_var(cpu_hw_events); |
721 | 721 | ||
722 | intel_pmu_disable_all(); | 722 | intel_pmu_disable_all(); |
723 | intel_pmu_drain_bts_buffer(); | 723 | handled = intel_pmu_drain_bts_buffer(); |
724 | status = intel_pmu_get_status(); | 724 | status = intel_pmu_get_status(); |
725 | if (!status) { | 725 | if (!status) { |
726 | intel_pmu_enable_all(0); | 726 | intel_pmu_enable_all(0); |
727 | return 0; | 727 | return handled; |
728 | } | 728 | } |
729 | 729 | ||
730 | loops = 0; | 730 | loops = 0; |
@@ -763,7 +763,7 @@ again: | |||
763 | data.period = event->hw.last_period; | 763 | data.period = event->hw.last_period; |
764 | 764 | ||
765 | if (perf_event_overflow(event, 1, &data, regs)) | 765 | if (perf_event_overflow(event, 1, &data, regs)) |
766 | x86_pmu_stop(event); | 766 | x86_pmu_stop(event, 0); |
767 | } | 767 | } |
768 | 768 | ||
769 | /* | 769 | /* |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 18018d1311cd..4977f9c400e5 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -214,7 +214,7 @@ static void intel_pmu_disable_bts(void) | |||
214 | update_debugctlmsr(debugctlmsr); | 214 | update_debugctlmsr(debugctlmsr); |
215 | } | 215 | } |
216 | 216 | ||
217 | static void intel_pmu_drain_bts_buffer(void) | 217 | static int intel_pmu_drain_bts_buffer(void) |
218 | { | 218 | { |
219 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 219 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
220 | struct debug_store *ds = cpuc->ds; | 220 | struct debug_store *ds = cpuc->ds; |
@@ -231,16 +231,16 @@ static void intel_pmu_drain_bts_buffer(void) | |||
231 | struct pt_regs regs; | 231 | struct pt_regs regs; |
232 | 232 | ||
233 | if (!event) | 233 | if (!event) |
234 | return; | 234 | return 0; |
235 | 235 | ||
236 | if (!ds) | 236 | if (!ds) |
237 | return; | 237 | return 0; |
238 | 238 | ||
239 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; | 239 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; |
240 | top = (struct bts_record *)(unsigned long)ds->bts_index; | 240 | top = (struct bts_record *)(unsigned long)ds->bts_index; |
241 | 241 | ||
242 | if (top <= at) | 242 | if (top <= at) |
243 | return; | 243 | return 0; |
244 | 244 | ||
245 | ds->bts_index = ds->bts_buffer_base; | 245 | ds->bts_index = ds->bts_buffer_base; |
246 | 246 | ||
@@ -256,7 +256,7 @@ static void intel_pmu_drain_bts_buffer(void) | |||
256 | perf_prepare_sample(&header, &data, event, ®s); | 256 | perf_prepare_sample(&header, &data, event, ®s); |
257 | 257 | ||
258 | if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) | 258 | if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) |
259 | return; | 259 | return 1; |
260 | 260 | ||
261 | for (; at < top; at++) { | 261 | for (; at < top; at++) { |
262 | data.ip = at->from; | 262 | data.ip = at->from; |
@@ -270,6 +270,7 @@ static void intel_pmu_drain_bts_buffer(void) | |||
270 | /* There's new data available. */ | 270 | /* There's new data available. */ |
271 | event->hw.interrupts++; | 271 | event->hw.interrupts++; |
272 | event->pending_kill = POLL_IN; | 272 | event->pending_kill = POLL_IN; |
273 | return 1; | ||
273 | } | 274 | } |
274 | 275 | ||
275 | /* | 276 | /* |
@@ -491,7 +492,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
491 | regs.flags &= ~PERF_EFLAGS_EXACT; | 492 | regs.flags &= ~PERF_EFLAGS_EXACT; |
492 | 493 | ||
493 | if (perf_event_overflow(event, 1, &data, ®s)) | 494 | if (perf_event_overflow(event, 1, &data, ®s)) |
494 | x86_pmu_stop(event); | 495 | x86_pmu_stop(event, 0); |
495 | } | 496 | } |
496 | 497 | ||
497 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | 498 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index b560db3305be..81400b93e694 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -18,6 +18,8 @@ | |||
18 | struct p4_event_bind { | 18 | struct p4_event_bind { |
19 | unsigned int opcode; /* Event code and ESCR selector */ | 19 | unsigned int opcode; /* Event code and ESCR selector */ |
20 | unsigned int escr_msr[2]; /* ESCR MSR for this event */ | 20 | unsigned int escr_msr[2]; /* ESCR MSR for this event */ |
21 | unsigned int escr_emask; /* valid ESCR EventMask bits */ | ||
22 | unsigned int shared; /* event is shared across threads */ | ||
21 | char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */ | 23 | char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */ |
22 | }; | 24 | }; |
23 | 25 | ||
@@ -66,231 +68,435 @@ static struct p4_event_bind p4_event_bind_map[] = { | |||
66 | [P4_EVENT_TC_DELIVER_MODE] = { | 68 | [P4_EVENT_TC_DELIVER_MODE] = { |
67 | .opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE), | 69 | .opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE), |
68 | .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, | 70 | .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, |
71 | .escr_emask = | ||
72 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD) | | ||
73 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DB) | | ||
74 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DI) | | ||
75 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BD) | | ||
76 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BB) | | ||
77 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BI) | | ||
78 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, ID), | ||
79 | .shared = 1, | ||
69 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | 80 | .cntr = { {4, 5, -1}, {6, 7, -1} }, |
70 | }, | 81 | }, |
71 | [P4_EVENT_BPU_FETCH_REQUEST] = { | 82 | [P4_EVENT_BPU_FETCH_REQUEST] = { |
72 | .opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST), | 83 | .opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST), |
73 | .escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 }, | 84 | .escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 }, |
85 | .escr_emask = | ||
86 | P4_ESCR_EMASK_BIT(P4_EVENT_BPU_FETCH_REQUEST, TCMISS), | ||
74 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 87 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
75 | }, | 88 | }, |
76 | [P4_EVENT_ITLB_REFERENCE] = { | 89 | [P4_EVENT_ITLB_REFERENCE] = { |
77 | .opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE), | 90 | .opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE), |
78 | .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 }, | 91 | .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 }, |
92 | .escr_emask = | ||
93 | P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT) | | ||
94 | P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, MISS) | | ||
95 | P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT_UK), | ||
79 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 96 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
80 | }, | 97 | }, |
81 | [P4_EVENT_MEMORY_CANCEL] = { | 98 | [P4_EVENT_MEMORY_CANCEL] = { |
82 | .opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL), | 99 | .opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL), |
83 | .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, | 100 | .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, |
101 | .escr_emask = | ||
102 | P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL) | | ||
103 | P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, 64K_CONF), | ||
84 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 104 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
85 | }, | 105 | }, |
86 | [P4_EVENT_MEMORY_COMPLETE] = { | 106 | [P4_EVENT_MEMORY_COMPLETE] = { |
87 | .opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE), | 107 | .opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE), |
88 | .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, | 108 | .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, |
109 | .escr_emask = | ||
110 | P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, LSC) | | ||
111 | P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, SSC), | ||
89 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 112 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
90 | }, | 113 | }, |
91 | [P4_EVENT_LOAD_PORT_REPLAY] = { | 114 | [P4_EVENT_LOAD_PORT_REPLAY] = { |
92 | .opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY), | 115 | .opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY), |
93 | .escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 }, | 116 | .escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 }, |
117 | .escr_emask = | ||
118 | P4_ESCR_EMASK_BIT(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD), | ||
94 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 119 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
95 | }, | 120 | }, |
96 | [P4_EVENT_STORE_PORT_REPLAY] = { | 121 | [P4_EVENT_STORE_PORT_REPLAY] = { |
97 | .opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY), | 122 | .opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY), |
98 | .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, | 123 | .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, |
124 | .escr_emask = | ||
125 | P4_ESCR_EMASK_BIT(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST), | ||
99 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 126 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
100 | }, | 127 | }, |
101 | [P4_EVENT_MOB_LOAD_REPLAY] = { | 128 | [P4_EVENT_MOB_LOAD_REPLAY] = { |
102 | .opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY), | 129 | .opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY), |
103 | .escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 }, | 130 | .escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 }, |
131 | .escr_emask = | ||
132 | P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STA) | | ||
133 | P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STD) | | ||
134 | P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA) | | ||
135 | P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR), | ||
104 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 136 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
105 | }, | 137 | }, |
106 | [P4_EVENT_PAGE_WALK_TYPE] = { | 138 | [P4_EVENT_PAGE_WALK_TYPE] = { |
107 | .opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE), | 139 | .opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE), |
108 | .escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 }, | 140 | .escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 }, |
141 | .escr_emask = | ||
142 | P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, DTMISS) | | ||
143 | P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, ITMISS), | ||
144 | .shared = 1, | ||
109 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 145 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
110 | }, | 146 | }, |
111 | [P4_EVENT_BSQ_CACHE_REFERENCE] = { | 147 | [P4_EVENT_BSQ_CACHE_REFERENCE] = { |
112 | .opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE), | 148 | .opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE), |
113 | .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 }, | 149 | .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 }, |
150 | .escr_emask = | ||
151 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) | | ||
152 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) | | ||
153 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) | | ||
154 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) | | ||
155 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) | | ||
156 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM) | | ||
157 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) | | ||
158 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) | | ||
159 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS), | ||
114 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 160 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
115 | }, | 161 | }, |
116 | [P4_EVENT_IOQ_ALLOCATION] = { | 162 | [P4_EVENT_IOQ_ALLOCATION] = { |
117 | .opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION), | 163 | .opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION), |
118 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 164 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
165 | .escr_emask = | ||
166 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, DEFAULT) | | ||
167 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_READ) | | ||
168 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE) | | ||
169 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_UC) | | ||
170 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WC) | | ||
171 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WT) | | ||
172 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WP) | | ||
173 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WB) | | ||
174 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OWN) | | ||
175 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OTHER) | | ||
176 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, PREFETCH), | ||
119 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 177 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
120 | }, | 178 | }, |
121 | [P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */ | 179 | [P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */ |
122 | .opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES), | 180 | .opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES), |
123 | .escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 }, | 181 | .escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 }, |
182 | .escr_emask = | ||
183 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT) | | ||
184 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ) | | ||
185 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE) | | ||
186 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC) | | ||
187 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC) | | ||
188 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT) | | ||
189 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP) | | ||
190 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB) | | ||
191 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN) | | ||
192 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER) | | ||
193 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH), | ||
124 | .cntr = { {2, -1, -1}, {3, -1, -1} }, | 194 | .cntr = { {2, -1, -1}, {3, -1, -1} }, |
125 | }, | 195 | }, |
126 | [P4_EVENT_FSB_DATA_ACTIVITY] = { | 196 | [P4_EVENT_FSB_DATA_ACTIVITY] = { |
127 | .opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY), | 197 | .opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY), |
128 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 198 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
199 | .escr_emask = | ||
200 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) | | ||
201 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN) | | ||
202 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER) | | ||
203 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV) | | ||
204 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN) | | ||
205 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER), | ||
206 | .shared = 1, | ||
129 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 207 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
130 | }, | 208 | }, |
131 | [P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */ | 209 | [P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */ |
132 | .opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION), | 210 | .opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION), |
133 | .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 }, | 211 | .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 }, |
212 | .escr_emask = | ||
213 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0) | | ||
214 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1) | | ||
215 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0) | | ||
216 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1) | | ||
217 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE) | | ||
218 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE) | | ||
219 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE) | | ||
220 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE) | | ||
221 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE) | | ||
222 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE) | | ||
223 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0) | | ||
224 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1) | | ||
225 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2), | ||
134 | .cntr = { {0, -1, -1}, {1, -1, -1} }, | 226 | .cntr = { {0, -1, -1}, {1, -1, -1} }, |
135 | }, | 227 | }, |
136 | [P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */ | 228 | [P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */ |
137 | .opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES), | 229 | .opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES), |
138 | .escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 }, | 230 | .escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 }, |
231 | .escr_emask = | ||
232 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0) | | ||
233 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1) | | ||
234 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0) | | ||
235 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1) | | ||
236 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE) | | ||
237 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE) | | ||
238 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE) | | ||
239 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE) | | ||
240 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE) | | ||
241 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE) | | ||
242 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0) | | ||
243 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1) | | ||
244 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2), | ||
139 | .cntr = { {2, -1, -1}, {3, -1, -1} }, | 245 | .cntr = { {2, -1, -1}, {3, -1, -1} }, |
140 | }, | 246 | }, |
141 | [P4_EVENT_SSE_INPUT_ASSIST] = { | 247 | [P4_EVENT_SSE_INPUT_ASSIST] = { |
142 | .opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST), | 248 | .opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST), |
143 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 249 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
250 | .escr_emask = | ||
251 | P4_ESCR_EMASK_BIT(P4_EVENT_SSE_INPUT_ASSIST, ALL), | ||
252 | .shared = 1, | ||
144 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 253 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
145 | }, | 254 | }, |
146 | [P4_EVENT_PACKED_SP_UOP] = { | 255 | [P4_EVENT_PACKED_SP_UOP] = { |
147 | .opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP), | 256 | .opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP), |
148 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 257 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
258 | .escr_emask = | ||
259 | P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_SP_UOP, ALL), | ||
260 | .shared = 1, | ||
149 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 261 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
150 | }, | 262 | }, |
151 | [P4_EVENT_PACKED_DP_UOP] = { | 263 | [P4_EVENT_PACKED_DP_UOP] = { |
152 | .opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP), | 264 | .opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP), |
153 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 265 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
266 | .escr_emask = | ||
267 | P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_DP_UOP, ALL), | ||
268 | .shared = 1, | ||
154 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 269 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
155 | }, | 270 | }, |
156 | [P4_EVENT_SCALAR_SP_UOP] = { | 271 | [P4_EVENT_SCALAR_SP_UOP] = { |
157 | .opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP), | 272 | .opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP), |
158 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 273 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
274 | .escr_emask = | ||
275 | P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_SP_UOP, ALL), | ||
276 | .shared = 1, | ||
159 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 277 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
160 | }, | 278 | }, |
161 | [P4_EVENT_SCALAR_DP_UOP] = { | 279 | [P4_EVENT_SCALAR_DP_UOP] = { |
162 | .opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP), | 280 | .opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP), |
163 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 281 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
282 | .escr_emask = | ||
283 | P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_DP_UOP, ALL), | ||
284 | .shared = 1, | ||
164 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 285 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
165 | }, | 286 | }, |
166 | [P4_EVENT_64BIT_MMX_UOP] = { | 287 | [P4_EVENT_64BIT_MMX_UOP] = { |
167 | .opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP), | 288 | .opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP), |
168 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 289 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
290 | .escr_emask = | ||
291 | P4_ESCR_EMASK_BIT(P4_EVENT_64BIT_MMX_UOP, ALL), | ||
292 | .shared = 1, | ||
169 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 293 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
170 | }, | 294 | }, |
171 | [P4_EVENT_128BIT_MMX_UOP] = { | 295 | [P4_EVENT_128BIT_MMX_UOP] = { |
172 | .opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP), | 296 | .opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP), |
173 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 297 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
298 | .escr_emask = | ||
299 | P4_ESCR_EMASK_BIT(P4_EVENT_128BIT_MMX_UOP, ALL), | ||
300 | .shared = 1, | ||
174 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 301 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
175 | }, | 302 | }, |
176 | [P4_EVENT_X87_FP_UOP] = { | 303 | [P4_EVENT_X87_FP_UOP] = { |
177 | .opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP), | 304 | .opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP), |
178 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 305 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
306 | .escr_emask = | ||
307 | P4_ESCR_EMASK_BIT(P4_EVENT_X87_FP_UOP, ALL), | ||
308 | .shared = 1, | ||
179 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 309 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
180 | }, | 310 | }, |
181 | [P4_EVENT_TC_MISC] = { | 311 | [P4_EVENT_TC_MISC] = { |
182 | .opcode = P4_OPCODE(P4_EVENT_TC_MISC), | 312 | .opcode = P4_OPCODE(P4_EVENT_TC_MISC), |
183 | .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, | 313 | .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, |
314 | .escr_emask = | ||
315 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_MISC, FLUSH), | ||
184 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | 316 | .cntr = { {4, 5, -1}, {6, 7, -1} }, |
185 | }, | 317 | }, |
186 | [P4_EVENT_GLOBAL_POWER_EVENTS] = { | 318 | [P4_EVENT_GLOBAL_POWER_EVENTS] = { |
187 | .opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS), | 319 | .opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS), |
188 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 320 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
321 | .escr_emask = | ||
322 | P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING), | ||
189 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 323 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
190 | }, | 324 | }, |
191 | [P4_EVENT_TC_MS_XFER] = { | 325 | [P4_EVENT_TC_MS_XFER] = { |
192 | .opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER), | 326 | .opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER), |
193 | .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, | 327 | .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, |
328 | .escr_emask = | ||
329 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_MS_XFER, CISC), | ||
194 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | 330 | .cntr = { {4, 5, -1}, {6, 7, -1} }, |
195 | }, | 331 | }, |
196 | [P4_EVENT_UOP_QUEUE_WRITES] = { | 332 | [P4_EVENT_UOP_QUEUE_WRITES] = { |
197 | .opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES), | 333 | .opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES), |
198 | .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, | 334 | .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, |
335 | .escr_emask = | ||
336 | P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD) | | ||
337 | P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER) | | ||
338 | P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM), | ||
199 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | 339 | .cntr = { {4, 5, -1}, {6, 7, -1} }, |
200 | }, | 340 | }, |
201 | [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = { | 341 | [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = { |
202 | .opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE), | 342 | .opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE), |
203 | .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 }, | 343 | .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 }, |
344 | .escr_emask = | ||
345 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL) | | ||
346 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL) | | ||
347 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN) | | ||
348 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT), | ||
204 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | 349 | .cntr = { {4, 5, -1}, {6, 7, -1} }, |
205 | }, | 350 | }, |
206 | [P4_EVENT_RETIRED_BRANCH_TYPE] = { | 351 | [P4_EVENT_RETIRED_BRANCH_TYPE] = { |
207 | .opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE), | 352 | .opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE), |
208 | .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 }, | 353 | .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 }, |
354 | .escr_emask = | ||
355 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) | | ||
356 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) | | ||
357 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) | | ||
358 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT), | ||
209 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | 359 | .cntr = { {4, 5, -1}, {6, 7, -1} }, |
210 | }, | 360 | }, |
211 | [P4_EVENT_RESOURCE_STALL] = { | 361 | [P4_EVENT_RESOURCE_STALL] = { |
212 | .opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL), | 362 | .opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL), |
213 | .escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 }, | 363 | .escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 }, |
364 | .escr_emask = | ||
365 | P4_ESCR_EMASK_BIT(P4_EVENT_RESOURCE_STALL, SBFULL), | ||
214 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 366 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
215 | }, | 367 | }, |
216 | [P4_EVENT_WC_BUFFER] = { | 368 | [P4_EVENT_WC_BUFFER] = { |
217 | .opcode = P4_OPCODE(P4_EVENT_WC_BUFFER), | 369 | .opcode = P4_OPCODE(P4_EVENT_WC_BUFFER), |
218 | .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, | 370 | .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, |
371 | .escr_emask = | ||
372 | P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_EVICTS) | | ||
373 | P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS), | ||
374 | .shared = 1, | ||
219 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 375 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
220 | }, | 376 | }, |
221 | [P4_EVENT_B2B_CYCLES] = { | 377 | [P4_EVENT_B2B_CYCLES] = { |
222 | .opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES), | 378 | .opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES), |
223 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 379 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
380 | .escr_emask = 0, | ||
224 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 381 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
225 | }, | 382 | }, |
226 | [P4_EVENT_BNR] = { | 383 | [P4_EVENT_BNR] = { |
227 | .opcode = P4_OPCODE(P4_EVENT_BNR), | 384 | .opcode = P4_OPCODE(P4_EVENT_BNR), |
228 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 385 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
386 | .escr_emask = 0, | ||
229 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 387 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
230 | }, | 388 | }, |
231 | [P4_EVENT_SNOOP] = { | 389 | [P4_EVENT_SNOOP] = { |
232 | .opcode = P4_OPCODE(P4_EVENT_SNOOP), | 390 | .opcode = P4_OPCODE(P4_EVENT_SNOOP), |
233 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 391 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
392 | .escr_emask = 0, | ||
234 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 393 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
235 | }, | 394 | }, |
236 | [P4_EVENT_RESPONSE] = { | 395 | [P4_EVENT_RESPONSE] = { |
237 | .opcode = P4_OPCODE(P4_EVENT_RESPONSE), | 396 | .opcode = P4_OPCODE(P4_EVENT_RESPONSE), |
238 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 397 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
398 | .escr_emask = 0, | ||
239 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 399 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
240 | }, | 400 | }, |
241 | [P4_EVENT_FRONT_END_EVENT] = { | 401 | [P4_EVENT_FRONT_END_EVENT] = { |
242 | .opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT), | 402 | .opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT), |
243 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | 403 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, |
404 | .escr_emask = | ||
405 | P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, NBOGUS) | | ||
406 | P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, BOGUS), | ||
244 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 407 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
245 | }, | 408 | }, |
246 | [P4_EVENT_EXECUTION_EVENT] = { | 409 | [P4_EVENT_EXECUTION_EVENT] = { |
247 | .opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT), | 410 | .opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT), |
248 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | 411 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, |
412 | .escr_emask = | ||
413 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0) | | ||
414 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1) | | ||
415 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2) | | ||
416 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3) | | ||
417 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) | | ||
418 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) | | ||
419 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) | | ||
420 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3), | ||
249 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 421 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
250 | }, | 422 | }, |
251 | [P4_EVENT_REPLAY_EVENT] = { | 423 | [P4_EVENT_REPLAY_EVENT] = { |
252 | .opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT), | 424 | .opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT), |
253 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | 425 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, |
426 | .escr_emask = | ||
427 | P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, NBOGUS) | | ||
428 | P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, BOGUS), | ||
254 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 429 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
255 | }, | 430 | }, |
256 | [P4_EVENT_INSTR_RETIRED] = { | 431 | [P4_EVENT_INSTR_RETIRED] = { |
257 | .opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED), | 432 | .opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED), |
258 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | 433 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, |
434 | .escr_emask = | ||
435 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) | | ||
436 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSTAG) | | ||
437 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG) | | ||
438 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSTAG), | ||
259 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 439 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
260 | }, | 440 | }, |
261 | [P4_EVENT_UOPS_RETIRED] = { | 441 | [P4_EVENT_UOPS_RETIRED] = { |
262 | .opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED), | 442 | .opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED), |
263 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | 443 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, |
444 | .escr_emask = | ||
445 | P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, NBOGUS) | | ||
446 | P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, BOGUS), | ||
264 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 447 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
265 | }, | 448 | }, |
266 | [P4_EVENT_UOP_TYPE] = { | 449 | [P4_EVENT_UOP_TYPE] = { |
267 | .opcode = P4_OPCODE(P4_EVENT_UOP_TYPE), | 450 | .opcode = P4_OPCODE(P4_EVENT_UOP_TYPE), |
268 | .escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 }, | 451 | .escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 }, |
452 | .escr_emask = | ||
453 | P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGLOADS) | | ||
454 | P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGSTORES), | ||
269 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 455 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
270 | }, | 456 | }, |
271 | [P4_EVENT_BRANCH_RETIRED] = { | 457 | [P4_EVENT_BRANCH_RETIRED] = { |
272 | .opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED), | 458 | .opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED), |
273 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | 459 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, |
460 | .escr_emask = | ||
461 | P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNP) | | ||
462 | P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNM) | | ||
463 | P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTP) | | ||
464 | P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTM), | ||
274 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 465 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
275 | }, | 466 | }, |
276 | [P4_EVENT_MISPRED_BRANCH_RETIRED] = { | 467 | [P4_EVENT_MISPRED_BRANCH_RETIRED] = { |
277 | .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), | 468 | .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), |
278 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | 469 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, |
470 | .escr_emask = | ||
471 | P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), | ||
279 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 472 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
280 | }, | 473 | }, |
281 | [P4_EVENT_X87_ASSIST] = { | 474 | [P4_EVENT_X87_ASSIST] = { |
282 | .opcode = P4_OPCODE(P4_EVENT_X87_ASSIST), | 475 | .opcode = P4_OPCODE(P4_EVENT_X87_ASSIST), |
283 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | 476 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, |
477 | .escr_emask = | ||
478 | P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSU) | | ||
479 | P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSO) | | ||
480 | P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAO) | | ||
481 | P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAU) | | ||
482 | P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, PREA), | ||
284 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 483 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
285 | }, | 484 | }, |
286 | [P4_EVENT_MACHINE_CLEAR] = { | 485 | [P4_EVENT_MACHINE_CLEAR] = { |
287 | .opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR), | 486 | .opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR), |
288 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | 487 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, |
488 | .escr_emask = | ||
489 | P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, CLEAR) | | ||
490 | P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, MOCLEAR) | | ||
491 | P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, SMCLEAR), | ||
289 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 492 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
290 | }, | 493 | }, |
291 | [P4_EVENT_INSTR_COMPLETED] = { | 494 | [P4_EVENT_INSTR_COMPLETED] = { |
292 | .opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED), | 495 | .opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED), |
293 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | 496 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, |
497 | .escr_emask = | ||
498 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, NBOGUS) | | ||
499 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, BOGUS), | ||
294 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 500 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
295 | }, | 501 | }, |
296 | }; | 502 | }; |
@@ -428,29 +634,73 @@ static u64 p4_pmu_event_map(int hw_event) | |||
428 | return config; | 634 | return config; |
429 | } | 635 | } |
430 | 636 | ||
637 | /* check cpu model specifics */ | ||
638 | static bool p4_event_match_cpu_model(unsigned int event_idx) | ||
639 | { | ||
640 | /* INSTR_COMPLETED event only exist for model 3, 4, 6 (Prescott) */ | ||
641 | if (event_idx == P4_EVENT_INSTR_COMPLETED) { | ||
642 | if (boot_cpu_data.x86_model != 3 && | ||
643 | boot_cpu_data.x86_model != 4 && | ||
644 | boot_cpu_data.x86_model != 6) | ||
645 | return false; | ||
646 | } | ||
647 | |||
648 | /* | ||
649 | * For info | ||
650 | * - IQ_ESCR0, IQ_ESCR1 only for models 1 and 2 | ||
651 | */ | ||
652 | |||
653 | return true; | ||
654 | } | ||
655 | |||
431 | static int p4_validate_raw_event(struct perf_event *event) | 656 | static int p4_validate_raw_event(struct perf_event *event) |
432 | { | 657 | { |
433 | unsigned int v; | 658 | unsigned int v, emask; |
434 | 659 | ||
435 | /* user data may have out-of-bound event index */ | 660 | /* User data may have out-of-bound event index */ |
436 | v = p4_config_unpack_event(event->attr.config); | 661 | v = p4_config_unpack_event(event->attr.config); |
437 | if (v >= ARRAY_SIZE(p4_event_bind_map)) { | 662 | if (v >= ARRAY_SIZE(p4_event_bind_map)) |
438 | pr_warning("P4 PMU: Unknown event code: %d\n", v); | 663 | return -EINVAL; |
664 | |||
665 | /* It may be unsupported: */ | ||
666 | if (!p4_event_match_cpu_model(v)) | ||
439 | return -EINVAL; | 667 | return -EINVAL; |
668 | |||
669 | /* | ||
670 | * NOTE: P4_CCCR_THREAD_ANY has not the same meaning as | ||
671 | * in Architectural Performance Monitoring, it means not | ||
672 | * on _which_ logical cpu to count but rather _when_, ie it | ||
673 | * depends on logical cpu state -- count event if one cpu active, | ||
674 | * none, both or any, so we just allow user to pass any value | ||
675 | * desired. | ||
676 | * | ||
677 | * In turn we always set Tx_OS/Tx_USR bits bound to logical | ||
678 | * cpu without their propagation to another cpu | ||
679 | */ | ||
680 | |||
681 | /* | ||
682 | * if an event is shared accross the logical threads | ||
683 | * the user needs special permissions to be able to use it | ||
684 | */ | ||
685 | if (p4_event_bind_map[v].shared) { | ||
686 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | ||
687 | return -EACCES; | ||
440 | } | 688 | } |
441 | 689 | ||
690 | /* ESCR EventMask bits may be invalid */ | ||
691 | emask = p4_config_unpack_escr(event->attr.config) & P4_ESCR_EVENTMASK_MASK; | ||
692 | if (emask & ~p4_event_bind_map[v].escr_emask) | ||
693 | return -EINVAL; | ||
694 | |||
442 | /* | 695 | /* |
443 | * it may have some screwed PEBS bits | 696 | * it may have some invalid PEBS bits |
444 | */ | 697 | */ |
445 | if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE)) { | 698 | if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE)) |
446 | pr_warning("P4 PMU: PEBS are not supported yet\n"); | ||
447 | return -EINVAL; | 699 | return -EINVAL; |
448 | } | 700 | |
449 | v = p4_config_unpack_metric(event->attr.config); | 701 | v = p4_config_unpack_metric(event->attr.config); |
450 | if (v >= ARRAY_SIZE(p4_pebs_bind_map)) { | 702 | if (v >= ARRAY_SIZE(p4_pebs_bind_map)) |
451 | pr_warning("P4 PMU: Unknown metric code: %d\n", v); | ||
452 | return -EINVAL; | 703 | return -EINVAL; |
453 | } | ||
454 | 704 | ||
455 | return 0; | 705 | return 0; |
456 | } | 706 | } |
@@ -478,27 +728,21 @@ static int p4_hw_config(struct perf_event *event) | |||
478 | 728 | ||
479 | if (event->attr.type == PERF_TYPE_RAW) { | 729 | if (event->attr.type == PERF_TYPE_RAW) { |
480 | 730 | ||
731 | /* | ||
732 | * Clear bits we reserve to be managed by kernel itself | ||
733 | * and never allowed from a user space | ||
734 | */ | ||
735 | event->attr.config &= P4_CONFIG_MASK; | ||
736 | |||
481 | rc = p4_validate_raw_event(event); | 737 | rc = p4_validate_raw_event(event); |
482 | if (rc) | 738 | if (rc) |
483 | goto out; | 739 | goto out; |
484 | 740 | ||
485 | /* | 741 | /* |
486 | * We don't control raw events so it's up to the caller | ||
487 | * to pass sane values (and we don't count the thread number | ||
488 | * on HT machine but allow HT-compatible specifics to be | ||
489 | * passed on) | ||
490 | * | ||
491 | * Note that for RAW events we allow user to use P4_CCCR_RESERVED | 742 | * Note that for RAW events we allow user to use P4_CCCR_RESERVED |
492 | * bits since we keep additional info here (for cache events and etc) | 743 | * bits since we keep additional info here (for cache events and etc) |
493 | * | ||
494 | * XXX: HT wide things should check perf_paranoid_cpu() && | ||
495 | * CAP_SYS_ADMIN | ||
496 | */ | 744 | */ |
497 | event->hw.config |= event->attr.config & | 745 | event->hw.config |= event->attr.config; |
498 | (p4_config_pack_escr(P4_ESCR_MASK_HT) | | ||
499 | p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED)); | ||
500 | |||
501 | event->hw.config &= ~P4_CCCR_FORCE_OVF; | ||
502 | } | 746 | } |
503 | 747 | ||
504 | rc = x86_setup_perfctr(event); | 748 | rc = x86_setup_perfctr(event); |
@@ -660,8 +904,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
660 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 904 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
661 | int overflow; | 905 | int overflow; |
662 | 906 | ||
663 | if (!test_bit(idx, cpuc->active_mask)) | 907 | if (!test_bit(idx, cpuc->active_mask)) { |
908 | /* catch in-flight IRQs */ | ||
909 | if (__test_and_clear_bit(idx, cpuc->running)) | ||
910 | handled++; | ||
664 | continue; | 911 | continue; |
912 | } | ||
665 | 913 | ||
666 | event = cpuc->events[idx]; | 914 | event = cpuc->events[idx]; |
667 | hwc = &event->hw; | 915 | hwc = &event->hw; |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index fb329e9f8494..d9f4ff8fcd69 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -700,11 +700,10 @@ static void probe_nmi_watchdog(void) | |||
700 | { | 700 | { |
701 | switch (boot_cpu_data.x86_vendor) { | 701 | switch (boot_cpu_data.x86_vendor) { |
702 | case X86_VENDOR_AMD: | 702 | case X86_VENDOR_AMD: |
703 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && | 703 | if (boot_cpu_data.x86 == 6 || |
704 | boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17) | 704 | (boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x15)) |
705 | return; | 705 | wd_ops = &k7_wd_ops; |
706 | wd_ops = &k7_wd_ops; | 706 | return; |
707 | break; | ||
708 | case X86_VENDOR_INTEL: | 707 | case X86_VENDOR_INTEL: |
709 | /* Work around where perfctr1 doesn't have a working enable | 708 | /* Work around where perfctr1 doesn't have a working enable |
710 | * bit as described in the following errata: | 709 | * bit as described in the following errata: |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index d49079515122..c7f64e6f537a 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -44,6 +44,12 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
44 | { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, | 44 | { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, |
45 | { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, | 45 | { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, |
46 | { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 }, | 46 | { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 }, |
47 | { X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 }, | ||
48 | { X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 }, | ||
49 | { X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 }, | ||
50 | { X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 }, | ||
51 | { X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 }, | ||
52 | { X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 }, | ||
47 | { 0, 0, 0, 0, 0 } | 53 | { 0, 0, 0, 0, 0 } |
48 | }; | 54 | }; |
49 | 55 | ||