diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-03-27 05:10:47 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-03-27 05:10:47 -0400 |
commit | b381e63b48a0b6befc7b4e55408c39012a0dcf8c (patch) | |
tree | 92e70447e1b0cf721ef40515db6b97cb881c5066 /arch/x86 | |
parent | 4e6d7c2aa95158315902647963b359b32da5c295 (diff) | |
parent | ccd41c86ad4d464d0ed4e48d80759ff85c2115b0 (diff) |
Merge branch 'perf/core' into perf/timer, before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/cpufeature.h | 9 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 39 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 82 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.h | 21 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd_ibs.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 304 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_cqm.c | 1379 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_lbr.c | 307 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes/core.c | 9 |
12 files changed, 2018 insertions, 152 deletions
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 90a54851aedc..361922dcc9b1 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <asm/disabled-features.h> | 12 | #include <asm/disabled-features.h> |
13 | #endif | 13 | #endif |
14 | 14 | ||
15 | #define NCAPINTS 11 /* N 32-bit words worth of info */ | 15 | #define NCAPINTS 13 /* N 32-bit words worth of info */ |
16 | #define NBUGINTS 1 /* N 32-bit bug flags */ | 16 | #define NBUGINTS 1 /* N 32-bit bug flags */ |
17 | 17 | ||
18 | /* | 18 | /* |
@@ -226,6 +226,7 @@ | |||
226 | #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ | 226 | #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ |
227 | #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ | 227 | #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ |
228 | #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ | 228 | #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ |
229 | #define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ | ||
229 | #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ | 230 | #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ |
230 | #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ | 231 | #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ |
231 | #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ | 232 | #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ |
@@ -242,6 +243,12 @@ | |||
242 | #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ | 243 | #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ |
243 | #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ | 244 | #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ |
244 | 245 | ||
246 | /* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ | ||
247 | #define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ | ||
248 | |||
249 | /* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ | ||
250 | #define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ | ||
251 | |||
245 | /* | 252 | /* |
246 | * BUG word(s) | 253 | * BUG word(s) |
247 | */ | 254 | */ |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index ec1c93588cef..a12d50e04d7a 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -109,6 +109,9 @@ struct cpuinfo_x86 { | |||
109 | /* in KB - valid for CPUS which support this call: */ | 109 | /* in KB - valid for CPUS which support this call: */ |
110 | int x86_cache_size; | 110 | int x86_cache_size; |
111 | int x86_cache_alignment; /* In bytes */ | 111 | int x86_cache_alignment; /* In bytes */ |
112 | /* Cache QoS architectural values: */ | ||
113 | int x86_cache_max_rmid; /* max index */ | ||
114 | int x86_cache_occ_scale; /* scale to bytes */ | ||
112 | int x86_power; | 115 | int x86_power; |
113 | unsigned long loops_per_jiffy; | 116 | unsigned long loops_per_jiffy; |
114 | /* cpuid returned max cores value: */ | 117 | /* cpuid returned max cores value: */ |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 80091ae54c2b..6c1ca139f736 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o | |||
39 | endif | 39 | endif |
40 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o | 40 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o |
41 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o | 41 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o |
42 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o | 42 | obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o perf_event_intel_cqm.o |
43 | 43 | ||
44 | obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \ | 44 | obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \ |
45 | perf_event_intel_uncore_snb.o \ | 45 | perf_event_intel_uncore_snb.o \ |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 2346c95c6ab1..1cd4a1a44b95 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -646,6 +646,30 @@ void get_cpu_cap(struct cpuinfo_x86 *c) | |||
646 | c->x86_capability[10] = eax; | 646 | c->x86_capability[10] = eax; |
647 | } | 647 | } |
648 | 648 | ||
649 | /* Additional Intel-defined flags: level 0x0000000F */ | ||
650 | if (c->cpuid_level >= 0x0000000F) { | ||
651 | u32 eax, ebx, ecx, edx; | ||
652 | |||
653 | /* QoS sub-leaf, EAX=0Fh, ECX=0 */ | ||
654 | cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); | ||
655 | c->x86_capability[11] = edx; | ||
656 | if (cpu_has(c, X86_FEATURE_CQM_LLC)) { | ||
657 | /* will be overridden if occupancy monitoring exists */ | ||
658 | c->x86_cache_max_rmid = ebx; | ||
659 | |||
660 | /* QoS sub-leaf, EAX=0Fh, ECX=1 */ | ||
661 | cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); | ||
662 | c->x86_capability[12] = edx; | ||
663 | if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) { | ||
664 | c->x86_cache_max_rmid = ecx; | ||
665 | c->x86_cache_occ_scale = ebx; | ||
666 | } | ||
667 | } else { | ||
668 | c->x86_cache_max_rmid = -1; | ||
669 | c->x86_cache_occ_scale = -1; | ||
670 | } | ||
671 | } | ||
672 | |||
649 | /* AMD-defined flags: level 0x80000001 */ | 673 | /* AMD-defined flags: level 0x80000001 */ |
650 | xlvl = cpuid_eax(0x80000000); | 674 | xlvl = cpuid_eax(0x80000000); |
651 | c->extended_cpuid_level = xlvl; | 675 | c->extended_cpuid_level = xlvl; |
@@ -834,6 +858,20 @@ static void generic_identify(struct cpuinfo_x86 *c) | |||
834 | detect_nopl(c); | 858 | detect_nopl(c); |
835 | } | 859 | } |
836 | 860 | ||
861 | static void x86_init_cache_qos(struct cpuinfo_x86 *c) | ||
862 | { | ||
863 | /* | ||
864 | * The heavy lifting of max_rmid and cache_occ_scale are handled | ||
865 | * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu | ||
866 | * in case CQM bits really aren't there in this CPU. | ||
867 | */ | ||
868 | if (c != &boot_cpu_data) { | ||
869 | boot_cpu_data.x86_cache_max_rmid = | ||
870 | min(boot_cpu_data.x86_cache_max_rmid, | ||
871 | c->x86_cache_max_rmid); | ||
872 | } | ||
873 | } | ||
874 | |||
837 | /* | 875 | /* |
838 | * This does the hard work of actually picking apart the CPU stuff... | 876 | * This does the hard work of actually picking apart the CPU stuff... |
839 | */ | 877 | */ |
@@ -923,6 +961,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) | |||
923 | 961 | ||
924 | init_hypervisor(c); | 962 | init_hypervisor(c); |
925 | x86_init_rdrand(c); | 963 | x86_init_rdrand(c); |
964 | x86_init_cache_qos(c); | ||
926 | 965 | ||
927 | /* | 966 | /* |
928 | * Clear/Set all flags overriden by options, need do it | 967 | * Clear/Set all flags overriden by options, need do it |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b71a7f86d68a..ac41b3ad1fc9 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -399,39 +399,41 @@ int x86_pmu_hw_config(struct perf_event *event) | |||
399 | 399 | ||
400 | if (event->attr.precise_ip > precise) | 400 | if (event->attr.precise_ip > precise) |
401 | return -EOPNOTSUPP; | 401 | return -EOPNOTSUPP; |
402 | /* | 402 | } |
403 | * check that PEBS LBR correction does not conflict with | 403 | /* |
404 | * whatever the user is asking with attr->branch_sample_type | 404 | * check that PEBS LBR correction does not conflict with |
405 | */ | 405 | * whatever the user is asking with attr->branch_sample_type |
406 | if (event->attr.precise_ip > 1 && | 406 | */ |
407 | x86_pmu.intel_cap.pebs_format < 2) { | 407 | if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { |
408 | u64 *br_type = &event->attr.branch_sample_type; | 408 | u64 *br_type = &event->attr.branch_sample_type; |
409 | 409 | ||
410 | if (has_branch_stack(event)) { | 410 | if (has_branch_stack(event)) { |
411 | if (!precise_br_compat(event)) | 411 | if (!precise_br_compat(event)) |
412 | return -EOPNOTSUPP; | 412 | return -EOPNOTSUPP; |
413 | 413 | ||
414 | /* branch_sample_type is compatible */ | 414 | /* branch_sample_type is compatible */ |
415 | 415 | ||
416 | } else { | 416 | } else { |
417 | /* | 417 | /* |
418 | * user did not specify branch_sample_type | 418 | * user did not specify branch_sample_type |
419 | * | 419 | * |
420 | * For PEBS fixups, we capture all | 420 | * For PEBS fixups, we capture all |
421 | * the branches at the priv level of the | 421 | * the branches at the priv level of the |
422 | * event. | 422 | * event. |
423 | */ | 423 | */ |
424 | *br_type = PERF_SAMPLE_BRANCH_ANY; | 424 | *br_type = PERF_SAMPLE_BRANCH_ANY; |
425 | 425 | ||
426 | if (!event->attr.exclude_user) | 426 | if (!event->attr.exclude_user) |
427 | *br_type |= PERF_SAMPLE_BRANCH_USER; | 427 | *br_type |= PERF_SAMPLE_BRANCH_USER; |
428 | 428 | ||
429 | if (!event->attr.exclude_kernel) | 429 | if (!event->attr.exclude_kernel) |
430 | *br_type |= PERF_SAMPLE_BRANCH_KERNEL; | 430 | *br_type |= PERF_SAMPLE_BRANCH_KERNEL; |
431 | } | ||
432 | } | 431 | } |
433 | } | 432 | } |
434 | 433 | ||
434 | if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK) | ||
435 | event->attach_state |= PERF_ATTACH_TASK_DATA; | ||
436 | |||
435 | /* | 437 | /* |
436 | * Generate PMC IRQs: | 438 | * Generate PMC IRQs: |
437 | * (keep 'enabled' bit clear for now) | 439 | * (keep 'enabled' bit clear for now) |
@@ -449,6 +451,12 @@ int x86_pmu_hw_config(struct perf_event *event) | |||
449 | if (event->attr.type == PERF_TYPE_RAW) | 451 | if (event->attr.type == PERF_TYPE_RAW) |
450 | event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; | 452 | event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; |
451 | 453 | ||
454 | if (event->attr.sample_period && x86_pmu.limit_period) { | ||
455 | if (x86_pmu.limit_period(event, event->attr.sample_period) > | ||
456 | event->attr.sample_period) | ||
457 | return -EINVAL; | ||
458 | } | ||
459 | |||
452 | return x86_setup_perfctr(event); | 460 | return x86_setup_perfctr(event); |
453 | } | 461 | } |
454 | 462 | ||
@@ -986,6 +994,9 @@ int x86_perf_event_set_period(struct perf_event *event) | |||
986 | if (left > x86_pmu.max_period) | 994 | if (left > x86_pmu.max_period) |
987 | left = x86_pmu.max_period; | 995 | left = x86_pmu.max_period; |
988 | 996 | ||
997 | if (x86_pmu.limit_period) | ||
998 | left = x86_pmu.limit_period(event, left); | ||
999 | |||
989 | per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; | 1000 | per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; |
990 | 1001 | ||
991 | /* | 1002 | /* |
@@ -1033,7 +1044,6 @@ static int x86_pmu_add(struct perf_event *event, int flags) | |||
1033 | 1044 | ||
1034 | hwc = &event->hw; | 1045 | hwc = &event->hw; |
1035 | 1046 | ||
1036 | perf_pmu_disable(event->pmu); | ||
1037 | n0 = cpuc->n_events; | 1047 | n0 = cpuc->n_events; |
1038 | ret = n = collect_events(cpuc, event, false); | 1048 | ret = n = collect_events(cpuc, event, false); |
1039 | if (ret < 0) | 1049 | if (ret < 0) |
@@ -1071,7 +1081,6 @@ done_collect: | |||
1071 | 1081 | ||
1072 | ret = 0; | 1082 | ret = 0; |
1073 | out: | 1083 | out: |
1074 | perf_pmu_enable(event->pmu); | ||
1075 | return ret; | 1084 | return ret; |
1076 | } | 1085 | } |
1077 | 1086 | ||
@@ -1914,10 +1923,10 @@ static const struct attribute_group *x86_pmu_attr_groups[] = { | |||
1914 | NULL, | 1923 | NULL, |
1915 | }; | 1924 | }; |
1916 | 1925 | ||
1917 | static void x86_pmu_flush_branch_stack(void) | 1926 | static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) |
1918 | { | 1927 | { |
1919 | if (x86_pmu.flush_branch_stack) | 1928 | if (x86_pmu.sched_task) |
1920 | x86_pmu.flush_branch_stack(); | 1929 | x86_pmu.sched_task(ctx, sched_in); |
1921 | } | 1930 | } |
1922 | 1931 | ||
1923 | void perf_check_microcode(void) | 1932 | void perf_check_microcode(void) |
@@ -1949,7 +1958,8 @@ static struct pmu pmu = { | |||
1949 | .commit_txn = x86_pmu_commit_txn, | 1958 | .commit_txn = x86_pmu_commit_txn, |
1950 | 1959 | ||
1951 | .event_idx = x86_pmu_event_idx, | 1960 | .event_idx = x86_pmu_event_idx, |
1952 | .flush_branch_stack = x86_pmu_flush_branch_stack, | 1961 | .sched_task = x86_pmu_sched_task, |
1962 | .task_ctx_size = sizeof(struct x86_perf_task_context), | ||
1953 | }; | 1963 | }; |
1954 | 1964 | ||
1955 | void arch_perf_update_userpage(struct perf_event *event, | 1965 | void arch_perf_update_userpage(struct perf_event *event, |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index df525d2be1e8..87e5081f4cdc 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -451,6 +451,7 @@ struct x86_pmu { | |||
451 | struct x86_pmu_quirk *quirks; | 451 | struct x86_pmu_quirk *quirks; |
452 | int perfctr_second_write; | 452 | int perfctr_second_write; |
453 | bool late_ack; | 453 | bool late_ack; |
454 | unsigned (*limit_period)(struct perf_event *event, unsigned l); | ||
454 | 455 | ||
455 | /* | 456 | /* |
456 | * sysfs attrs | 457 | * sysfs attrs |
@@ -472,7 +473,8 @@ struct x86_pmu { | |||
472 | void (*cpu_dead)(int cpu); | 473 | void (*cpu_dead)(int cpu); |
473 | 474 | ||
474 | void (*check_microcode)(void); | 475 | void (*check_microcode)(void); |
475 | void (*flush_branch_stack)(void); | 476 | void (*sched_task)(struct perf_event_context *ctx, |
477 | bool sched_in); | ||
476 | 478 | ||
477 | /* | 479 | /* |
478 | * Intel Arch Perfmon v2+ | 480 | * Intel Arch Perfmon v2+ |
@@ -515,6 +517,13 @@ struct x86_pmu { | |||
515 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | 517 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); |
516 | }; | 518 | }; |
517 | 519 | ||
520 | struct x86_perf_task_context { | ||
521 | u64 lbr_from[MAX_LBR_ENTRIES]; | ||
522 | u64 lbr_to[MAX_LBR_ENTRIES]; | ||
523 | int lbr_callstack_users; | ||
524 | int lbr_stack_state; | ||
525 | }; | ||
526 | |||
518 | #define x86_add_quirk(func_) \ | 527 | #define x86_add_quirk(func_) \ |
519 | do { \ | 528 | do { \ |
520 | static struct x86_pmu_quirk __quirk __initdata = { \ | 529 | static struct x86_pmu_quirk __quirk __initdata = { \ |
@@ -546,6 +555,12 @@ static struct perf_pmu_events_attr event_attr_##v = { \ | |||
546 | 555 | ||
547 | extern struct x86_pmu x86_pmu __read_mostly; | 556 | extern struct x86_pmu x86_pmu __read_mostly; |
548 | 557 | ||
558 | static inline bool x86_pmu_has_lbr_callstack(void) | ||
559 | { | ||
560 | return x86_pmu.lbr_sel_map && | ||
561 | x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0; | ||
562 | } | ||
563 | |||
549 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | 564 | DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
550 | 565 | ||
551 | int x86_perf_event_set_period(struct perf_event *event); | 566 | int x86_perf_event_set_period(struct perf_event *event); |
@@ -727,6 +742,8 @@ void intel_pmu_pebs_disable_all(void); | |||
727 | 742 | ||
728 | void intel_ds_init(void); | 743 | void intel_ds_init(void); |
729 | 744 | ||
745 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); | ||
746 | |||
730 | void intel_pmu_lbr_reset(void); | 747 | void intel_pmu_lbr_reset(void); |
731 | 748 | ||
732 | void intel_pmu_lbr_enable(struct perf_event *event); | 749 | void intel_pmu_lbr_enable(struct perf_event *event); |
@@ -747,6 +764,8 @@ void intel_pmu_lbr_init_atom(void); | |||
747 | 764 | ||
748 | void intel_pmu_lbr_init_snb(void); | 765 | void intel_pmu_lbr_init_snb(void); |
749 | 766 | ||
767 | void intel_pmu_lbr_init_hsw(void); | ||
768 | |||
750 | int intel_pmu_setup_lbr_filter(struct perf_event *event); | 769 | int intel_pmu_setup_lbr_filter(struct perf_event *event); |
751 | 770 | ||
752 | int p4_pmu_init(void); | 771 | int p4_pmu_init(void); |
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index a61f5c6911da..989d3c215d2b 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c | |||
@@ -796,7 +796,7 @@ static int setup_ibs_ctl(int ibs_eilvt_off) | |||
796 | * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that | 796 | * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that |
797 | * is using the new offset. | 797 | * is using the new offset. |
798 | */ | 798 | */ |
799 | static int force_ibs_eilvt_setup(void) | 799 | static void force_ibs_eilvt_setup(void) |
800 | { | 800 | { |
801 | int offset; | 801 | int offset; |
802 | int ret; | 802 | int ret; |
@@ -811,26 +811,24 @@ static int force_ibs_eilvt_setup(void) | |||
811 | 811 | ||
812 | if (offset == APIC_EILVT_NR_MAX) { | 812 | if (offset == APIC_EILVT_NR_MAX) { |
813 | printk(KERN_DEBUG "No EILVT entry available\n"); | 813 | printk(KERN_DEBUG "No EILVT entry available\n"); |
814 | return -EBUSY; | 814 | return; |
815 | } | 815 | } |
816 | 816 | ||
817 | ret = setup_ibs_ctl(offset); | 817 | ret = setup_ibs_ctl(offset); |
818 | if (ret) | 818 | if (ret) |
819 | goto out; | 819 | goto out; |
820 | 820 | ||
821 | if (!ibs_eilvt_valid()) { | 821 | if (!ibs_eilvt_valid()) |
822 | ret = -EFAULT; | ||
823 | goto out; | 822 | goto out; |
824 | } | ||
825 | 823 | ||
826 | pr_info("IBS: LVT offset %d assigned\n", offset); | 824 | pr_info("IBS: LVT offset %d assigned\n", offset); |
827 | 825 | ||
828 | return 0; | 826 | return; |
829 | out: | 827 | out: |
830 | preempt_disable(); | 828 | preempt_disable(); |
831 | put_eilvt(offset); | 829 | put_eilvt(offset); |
832 | preempt_enable(); | 830 | preempt_enable(); |
833 | return ret; | 831 | return; |
834 | } | 832 | } |
835 | 833 | ||
836 | static void ibs_eilvt_setup(void) | 834 | static void ibs_eilvt_setup(void) |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 498b6d967138..fc6dbc46af4a 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -220,6 +220,15 @@ static struct event_constraint intel_hsw_event_constraints[] = { | |||
220 | EVENT_CONSTRAINT_END | 220 | EVENT_CONSTRAINT_END |
221 | }; | 221 | }; |
222 | 222 | ||
223 | struct event_constraint intel_bdw_event_constraints[] = { | ||
224 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | ||
225 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | ||
226 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | ||
227 | INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ | ||
228 | INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */ | ||
229 | EVENT_CONSTRAINT_END | ||
230 | }; | ||
231 | |||
223 | static u64 intel_pmu_event_map(int hw_event) | 232 | static u64 intel_pmu_event_map(int hw_event) |
224 | { | 233 | { |
225 | return intel_perfmon_event_map[hw_event]; | 234 | return intel_perfmon_event_map[hw_event]; |
@@ -415,6 +424,202 @@ static __initconst const u64 snb_hw_cache_event_ids | |||
415 | 424 | ||
416 | }; | 425 | }; |
417 | 426 | ||
427 | /* | ||
428 | * Notes on the events: | ||
429 | * - data reads do not include code reads (comparable to earlier tables) | ||
430 | * - data counts include speculative execution (except L1 write, dtlb, bpu) | ||
431 | * - remote node access includes remote memory, remote cache, remote mmio. | ||
432 | * - prefetches are not included in the counts because they are not | ||
433 | * reliably counted. | ||
434 | */ | ||
435 | |||
436 | #define HSW_DEMAND_DATA_RD BIT_ULL(0) | ||
437 | #define HSW_DEMAND_RFO BIT_ULL(1) | ||
438 | #define HSW_ANY_RESPONSE BIT_ULL(16) | ||
439 | #define HSW_SUPPLIER_NONE BIT_ULL(17) | ||
440 | #define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22) | ||
441 | #define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27) | ||
442 | #define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28) | ||
443 | #define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29) | ||
444 | #define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \ | ||
445 | HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ | ||
446 | HSW_L3_MISS_REMOTE_HOP2P) | ||
447 | #define HSW_SNOOP_NONE BIT_ULL(31) | ||
448 | #define HSW_SNOOP_NOT_NEEDED BIT_ULL(32) | ||
449 | #define HSW_SNOOP_MISS BIT_ULL(33) | ||
450 | #define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34) | ||
451 | #define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35) | ||
452 | #define HSW_SNOOP_HITM BIT_ULL(36) | ||
453 | #define HSW_SNOOP_NON_DRAM BIT_ULL(37) | ||
454 | #define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \ | ||
455 | HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \ | ||
456 | HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \ | ||
457 | HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM) | ||
458 | #define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM) | ||
459 | #define HSW_DEMAND_READ HSW_DEMAND_DATA_RD | ||
460 | #define HSW_DEMAND_WRITE HSW_DEMAND_RFO | ||
461 | #define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\ | ||
462 | HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P) | ||
463 | #define HSW_LLC_ACCESS HSW_ANY_RESPONSE | ||
464 | |||
465 | #define BDW_L3_MISS_LOCAL BIT(26) | ||
466 | #define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \ | ||
467 | HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \ | ||
468 | HSW_L3_MISS_REMOTE_HOP2P) | ||
469 | |||
470 | |||
471 | static __initconst const u64 hsw_hw_cache_event_ids | ||
472 | [PERF_COUNT_HW_CACHE_MAX] | ||
473 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
474 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
475 | { | ||
476 | [ C(L1D ) ] = { | ||
477 | [ C(OP_READ) ] = { | ||
478 | [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ | ||
479 | [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */ | ||
480 | }, | ||
481 | [ C(OP_WRITE) ] = { | ||
482 | [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ | ||
483 | [ C(RESULT_MISS) ] = 0x0, | ||
484 | }, | ||
485 | [ C(OP_PREFETCH) ] = { | ||
486 | [ C(RESULT_ACCESS) ] = 0x0, | ||
487 | [ C(RESULT_MISS) ] = 0x0, | ||
488 | }, | ||
489 | }, | ||
490 | [ C(L1I ) ] = { | ||
491 | [ C(OP_READ) ] = { | ||
492 | [ C(RESULT_ACCESS) ] = 0x0, | ||
493 | [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */ | ||
494 | }, | ||
495 | [ C(OP_WRITE) ] = { | ||
496 | [ C(RESULT_ACCESS) ] = -1, | ||
497 | [ C(RESULT_MISS) ] = -1, | ||
498 | }, | ||
499 | [ C(OP_PREFETCH) ] = { | ||
500 | [ C(RESULT_ACCESS) ] = 0x0, | ||
501 | [ C(RESULT_MISS) ] = 0x0, | ||
502 | }, | ||
503 | }, | ||
504 | [ C(LL ) ] = { | ||
505 | [ C(OP_READ) ] = { | ||
506 | [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ | ||
507 | [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ | ||
508 | }, | ||
509 | [ C(OP_WRITE) ] = { | ||
510 | [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ | ||
511 | [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ | ||
512 | }, | ||
513 | [ C(OP_PREFETCH) ] = { | ||
514 | [ C(RESULT_ACCESS) ] = 0x0, | ||
515 | [ C(RESULT_MISS) ] = 0x0, | ||
516 | }, | ||
517 | }, | ||
518 | [ C(DTLB) ] = { | ||
519 | [ C(OP_READ) ] = { | ||
520 | [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */ | ||
521 | [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */ | ||
522 | }, | ||
523 | [ C(OP_WRITE) ] = { | ||
524 | [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */ | ||
525 | [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */ | ||
526 | }, | ||
527 | [ C(OP_PREFETCH) ] = { | ||
528 | [ C(RESULT_ACCESS) ] = 0x0, | ||
529 | [ C(RESULT_MISS) ] = 0x0, | ||
530 | }, | ||
531 | }, | ||
532 | [ C(ITLB) ] = { | ||
533 | [ C(OP_READ) ] = { | ||
534 | [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */ | ||
535 | [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */ | ||
536 | }, | ||
537 | [ C(OP_WRITE) ] = { | ||
538 | [ C(RESULT_ACCESS) ] = -1, | ||
539 | [ C(RESULT_MISS) ] = -1, | ||
540 | }, | ||
541 | [ C(OP_PREFETCH) ] = { | ||
542 | [ C(RESULT_ACCESS) ] = -1, | ||
543 | [ C(RESULT_MISS) ] = -1, | ||
544 | }, | ||
545 | }, | ||
546 | [ C(BPU ) ] = { | ||
547 | [ C(OP_READ) ] = { | ||
548 | [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */ | ||
549 | [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */ | ||
550 | }, | ||
551 | [ C(OP_WRITE) ] = { | ||
552 | [ C(RESULT_ACCESS) ] = -1, | ||
553 | [ C(RESULT_MISS) ] = -1, | ||
554 | }, | ||
555 | [ C(OP_PREFETCH) ] = { | ||
556 | [ C(RESULT_ACCESS) ] = -1, | ||
557 | [ C(RESULT_MISS) ] = -1, | ||
558 | }, | ||
559 | }, | ||
560 | [ C(NODE) ] = { | ||
561 | [ C(OP_READ) ] = { | ||
562 | [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ | ||
563 | [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ | ||
564 | }, | ||
565 | [ C(OP_WRITE) ] = { | ||
566 | [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */ | ||
567 | [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */ | ||
568 | }, | ||
569 | [ C(OP_PREFETCH) ] = { | ||
570 | [ C(RESULT_ACCESS) ] = 0x0, | ||
571 | [ C(RESULT_MISS) ] = 0x0, | ||
572 | }, | ||
573 | }, | ||
574 | }; | ||
575 | |||
576 | static __initconst const u64 hsw_hw_cache_extra_regs | ||
577 | [PERF_COUNT_HW_CACHE_MAX] | ||
578 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
579 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
580 | { | ||
581 | [ C(LL ) ] = { | ||
582 | [ C(OP_READ) ] = { | ||
583 | [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| | ||
584 | HSW_LLC_ACCESS, | ||
585 | [ C(RESULT_MISS) ] = HSW_DEMAND_READ| | ||
586 | HSW_L3_MISS|HSW_ANY_SNOOP, | ||
587 | }, | ||
588 | [ C(OP_WRITE) ] = { | ||
589 | [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| | ||
590 | HSW_LLC_ACCESS, | ||
591 | [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE| | ||
592 | HSW_L3_MISS|HSW_ANY_SNOOP, | ||
593 | }, | ||
594 | [ C(OP_PREFETCH) ] = { | ||
595 | [ C(RESULT_ACCESS) ] = 0x0, | ||
596 | [ C(RESULT_MISS) ] = 0x0, | ||
597 | }, | ||
598 | }, | ||
599 | [ C(NODE) ] = { | ||
600 | [ C(OP_READ) ] = { | ||
601 | [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ| | ||
602 | HSW_L3_MISS_LOCAL_DRAM| | ||
603 | HSW_SNOOP_DRAM, | ||
604 | [ C(RESULT_MISS) ] = HSW_DEMAND_READ| | ||
605 | HSW_L3_MISS_REMOTE| | ||
606 | HSW_SNOOP_DRAM, | ||
607 | }, | ||
608 | [ C(OP_WRITE) ] = { | ||
609 | [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE| | ||
610 | HSW_L3_MISS_LOCAL_DRAM| | ||
611 | HSW_SNOOP_DRAM, | ||
612 | [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE| | ||
613 | HSW_L3_MISS_REMOTE| | ||
614 | HSW_SNOOP_DRAM, | ||
615 | }, | ||
616 | [ C(OP_PREFETCH) ] = { | ||
617 | [ C(RESULT_ACCESS) ] = 0x0, | ||
618 | [ C(RESULT_MISS) ] = 0x0, | ||
619 | }, | ||
620 | }, | ||
621 | }; | ||
622 | |||
418 | static __initconst const u64 westmere_hw_cache_event_ids | 623 | static __initconst const u64 westmere_hw_cache_event_ids |
419 | [PERF_COUNT_HW_CACHE_MAX] | 624 | [PERF_COUNT_HW_CACHE_MAX] |
420 | [PERF_COUNT_HW_CACHE_OP_MAX] | 625 | [PERF_COUNT_HW_CACHE_OP_MAX] |
@@ -1029,20 +1234,6 @@ static __initconst const u64 slm_hw_cache_event_ids | |||
1029 | }, | 1234 | }, |
1030 | }; | 1235 | }; |
1031 | 1236 | ||
1032 | static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) | ||
1033 | { | ||
1034 | /* user explicitly requested branch sampling */ | ||
1035 | if (has_branch_stack(event)) | ||
1036 | return true; | ||
1037 | |||
1038 | /* implicit branch sampling to correct PEBS skid */ | ||
1039 | if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 && | ||
1040 | x86_pmu.intel_cap.pebs_format < 2) | ||
1041 | return true; | ||
1042 | |||
1043 | return false; | ||
1044 | } | ||
1045 | |||
1046 | static void intel_pmu_disable_all(void) | 1237 | static void intel_pmu_disable_all(void) |
1047 | { | 1238 | { |
1048 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 1239 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
@@ -1207,7 +1398,7 @@ static void intel_pmu_disable_event(struct perf_event *event) | |||
1207 | * must disable before any actual event | 1398 | * must disable before any actual event |
1208 | * because any event may be combined with LBR | 1399 | * because any event may be combined with LBR |
1209 | */ | 1400 | */ |
1210 | if (intel_pmu_needs_lbr_smpl(event)) | 1401 | if (needs_branch_stack(event)) |
1211 | intel_pmu_lbr_disable(event); | 1402 | intel_pmu_lbr_disable(event); |
1212 | 1403 | ||
1213 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1404 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
@@ -1268,7 +1459,7 @@ static void intel_pmu_enable_event(struct perf_event *event) | |||
1268 | * must enabled before any actual event | 1459 | * must enabled before any actual event |
1269 | * because any event may be combined with LBR | 1460 | * because any event may be combined with LBR |
1270 | */ | 1461 | */ |
1271 | if (intel_pmu_needs_lbr_smpl(event)) | 1462 | if (needs_branch_stack(event)) |
1272 | intel_pmu_lbr_enable(event); | 1463 | intel_pmu_lbr_enable(event); |
1273 | 1464 | ||
1274 | if (event->attr.exclude_host) | 1465 | if (event->attr.exclude_host) |
@@ -1747,7 +1938,7 @@ static int intel_pmu_hw_config(struct perf_event *event) | |||
1747 | if (event->attr.precise_ip && x86_pmu.pebs_aliases) | 1938 | if (event->attr.precise_ip && x86_pmu.pebs_aliases) |
1748 | x86_pmu.pebs_aliases(event); | 1939 | x86_pmu.pebs_aliases(event); |
1749 | 1940 | ||
1750 | if (intel_pmu_needs_lbr_smpl(event)) { | 1941 | if (needs_branch_stack(event)) { |
1751 | ret = intel_pmu_setup_lbr_filter(event); | 1942 | ret = intel_pmu_setup_lbr_filter(event); |
1752 | if (ret) | 1943 | if (ret) |
1753 | return ret; | 1944 | return ret; |
@@ -1905,6 +2096,32 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |||
1905 | return c; | 2096 | return c; |
1906 | } | 2097 | } |
1907 | 2098 | ||
2099 | /* | ||
2100 | * Broadwell: | ||
2101 | * | ||
2102 | * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared | ||
2103 | * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine | ||
2104 | * the two to enforce a minimum period of 128 (the smallest value that has bits | ||
2105 | * 0-5 cleared and >= 100). | ||
2106 | * | ||
2107 | * Because of how the code in x86_perf_event_set_period() works, the truncation | ||
2108 | * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period | ||
2109 | * to make up for the 'lost' events due to carrying the 'error' in period_left. | ||
2110 | * | ||
2111 | * Therefore the effective (average) period matches the requested period, | ||
2112 | * despite coarser hardware granularity. | ||
2113 | */ | ||
2114 | static unsigned bdw_limit_period(struct perf_event *event, unsigned left) | ||
2115 | { | ||
2116 | if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == | ||
2117 | X86_CONFIG(.event=0xc0, .umask=0x01)) { | ||
2118 | if (left < 128) | ||
2119 | left = 128; | ||
2120 | left &= ~0x3fu; | ||
2121 | } | ||
2122 | return left; | ||
2123 | } | ||
2124 | |||
1908 | PMU_FORMAT_ATTR(event, "config:0-7" ); | 2125 | PMU_FORMAT_ATTR(event, "config:0-7" ); |
1909 | PMU_FORMAT_ATTR(umask, "config:8-15" ); | 2126 | PMU_FORMAT_ATTR(umask, "config:8-15" ); |
1910 | PMU_FORMAT_ATTR(edge, "config:18" ); | 2127 | PMU_FORMAT_ATTR(edge, "config:18" ); |
@@ -2044,18 +2261,6 @@ static void intel_pmu_cpu_dying(int cpu) | |||
2044 | fini_debug_store_on_cpu(cpu); | 2261 | fini_debug_store_on_cpu(cpu); |
2045 | } | 2262 | } |
2046 | 2263 | ||
2047 | static void intel_pmu_flush_branch_stack(void) | ||
2048 | { | ||
2049 | /* | ||
2050 | * Intel LBR does not tag entries with the | ||
2051 | * PID of the current task, then we need to | ||
2052 | * flush it on ctxsw | ||
2053 | * For now, we simply reset it | ||
2054 | */ | ||
2055 | if (x86_pmu.lbr_nr) | ||
2056 | intel_pmu_lbr_reset(); | ||
2057 | } | ||
2058 | |||
2059 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); | 2264 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); |
2060 | 2265 | ||
2061 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); | 2266 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); |
@@ -2107,7 +2312,7 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
2107 | .cpu_starting = intel_pmu_cpu_starting, | 2312 | .cpu_starting = intel_pmu_cpu_starting, |
2108 | .cpu_dying = intel_pmu_cpu_dying, | 2313 | .cpu_dying = intel_pmu_cpu_dying, |
2109 | .guest_get_msrs = intel_guest_get_msrs, | 2314 | .guest_get_msrs = intel_guest_get_msrs, |
2110 | .flush_branch_stack = intel_pmu_flush_branch_stack, | 2315 | .sched_task = intel_pmu_lbr_sched_task, |
2111 | }; | 2316 | }; |
2112 | 2317 | ||
2113 | static __init void intel_clovertown_quirk(void) | 2318 | static __init void intel_clovertown_quirk(void) |
@@ -2546,10 +2751,10 @@ __init int intel_pmu_init(void) | |||
2546 | case 69: /* 22nm Haswell ULT */ | 2751 | case 69: /* 22nm Haswell ULT */ |
2547 | case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ | 2752 | case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ |
2548 | x86_pmu.late_ack = true; | 2753 | x86_pmu.late_ack = true; |
2549 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | 2754 | memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); |
2550 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | 2755 | memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); |
2551 | 2756 | ||
2552 | intel_pmu_lbr_init_snb(); | 2757 | intel_pmu_lbr_init_hsw(); |
2553 | 2758 | ||
2554 | x86_pmu.event_constraints = intel_hsw_event_constraints; | 2759 | x86_pmu.event_constraints = intel_hsw_event_constraints; |
2555 | x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; | 2760 | x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; |
@@ -2566,6 +2771,39 @@ __init int intel_pmu_init(void) | |||
2566 | pr_cont("Haswell events, "); | 2771 | pr_cont("Haswell events, "); |
2567 | break; | 2772 | break; |
2568 | 2773 | ||
2774 | case 61: /* 14nm Broadwell Core-M */ | ||
2775 | case 86: /* 14nm Broadwell Xeon D */ | ||
2776 | x86_pmu.late_ack = true; | ||
2777 | memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | ||
2778 | memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | ||
2779 | |||
2780 | /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */ | ||
2781 | hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ | | ||
2782 | BDW_L3_MISS|HSW_SNOOP_DRAM; | ||
2783 | hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS| | ||
2784 | HSW_SNOOP_DRAM; | ||
2785 | hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ| | ||
2786 | BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; | ||
2787 | hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE| | ||
2788 | BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM; | ||
2789 | |||
2790 | intel_pmu_lbr_init_snb(); | ||
2791 | |||
2792 | x86_pmu.event_constraints = intel_bdw_event_constraints; | ||
2793 | x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints; | ||
2794 | x86_pmu.extra_regs = intel_snbep_extra_regs; | ||
2795 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | ||
2796 | /* all extra regs are per-cpu when HT is on */ | ||
2797 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | ||
2798 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; | ||
2799 | |||
2800 | x86_pmu.hw_config = hsw_hw_config; | ||
2801 | x86_pmu.get_event_constraints = hsw_get_event_constraints; | ||
2802 | x86_pmu.cpu_events = hsw_events_attrs; | ||
2803 | x86_pmu.limit_period = bdw_limit_period; | ||
2804 | pr_cont("Broadwell events, "); | ||
2805 | break; | ||
2806 | |||
2569 | default: | 2807 | default: |
2570 | switch (x86_pmu.version) { | 2808 | switch (x86_pmu.version) { |
2571 | case 1: | 2809 | case 1: |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c new file mode 100644 index 000000000000..e4d1b8b738fa --- /dev/null +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c | |||
@@ -0,0 +1,1379 @@ | |||
1 | /* | ||
2 | * Intel Cache Quality-of-Service Monitoring (CQM) support. | ||
3 | * | ||
4 | * Based very, very heavily on work by Peter Zijlstra. | ||
5 | */ | ||
6 | |||
7 | #include <linux/perf_event.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <asm/cpu_device_id.h> | ||
10 | #include "perf_event.h" | ||
11 | |||
12 | #define MSR_IA32_PQR_ASSOC 0x0c8f | ||
13 | #define MSR_IA32_QM_CTR 0x0c8e | ||
14 | #define MSR_IA32_QM_EVTSEL 0x0c8d | ||
15 | |||
16 | static unsigned int cqm_max_rmid = -1; | ||
17 | static unsigned int cqm_l3_scale; /* supposedly cacheline size */ | ||
18 | |||
19 | struct intel_cqm_state { | ||
20 | raw_spinlock_t lock; | ||
21 | int rmid; | ||
22 | int cnt; | ||
23 | }; | ||
24 | |||
25 | static DEFINE_PER_CPU(struct intel_cqm_state, cqm_state); | ||
26 | |||
27 | /* | ||
28 | * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru. | ||
29 | * Also protects event->hw.cqm_rmid | ||
30 | * | ||
31 | * Hold either for stability, both for modification of ->hw.cqm_rmid. | ||
32 | */ | ||
33 | static DEFINE_MUTEX(cache_mutex); | ||
34 | static DEFINE_RAW_SPINLOCK(cache_lock); | ||
35 | |||
36 | /* | ||
37 | * Groups of events that have the same target(s), one RMID per group. | ||
38 | */ | ||
39 | static LIST_HEAD(cache_groups); | ||
40 | |||
41 | /* | ||
42 | * Mask of CPUs for reading CQM values. We only need one per-socket. | ||
43 | */ | ||
44 | static cpumask_t cqm_cpumask; | ||
45 | |||
46 | #define RMID_VAL_ERROR (1ULL << 63) | ||
47 | #define RMID_VAL_UNAVAIL (1ULL << 62) | ||
48 | |||
49 | #define QOS_L3_OCCUP_EVENT_ID (1 << 0) | ||
50 | |||
51 | #define QOS_EVENT_MASK QOS_L3_OCCUP_EVENT_ID | ||
52 | |||
53 | /* | ||
54 | * This is central to the rotation algorithm in __intel_cqm_rmid_rotate(). | ||
55 | * | ||
56 | * This rmid is always free and is guaranteed to have an associated | ||
57 | * near-zero occupancy value, i.e. no cachelines are tagged with this | ||
58 | * RMID, once __intel_cqm_rmid_rotate() returns. | ||
59 | */ | ||
60 | static unsigned int intel_cqm_rotation_rmid; | ||
61 | |||
62 | #define INVALID_RMID (-1) | ||
63 | |||
64 | /* | ||
65 | * Is @rmid valid for programming the hardware? | ||
66 | * | ||
67 | * rmid 0 is reserved by the hardware for all non-monitored tasks, which | ||
68 | * means that we should never come across an rmid with that value. | ||
69 | * Likewise, an rmid value of -1 is used to indicate "no rmid currently | ||
70 | * assigned" and is used as part of the rotation code. | ||
71 | */ | ||
72 | static inline bool __rmid_valid(unsigned int rmid) | ||
73 | { | ||
74 | if (!rmid || rmid == INVALID_RMID) | ||
75 | return false; | ||
76 | |||
77 | return true; | ||
78 | } | ||
79 | |||
80 | static u64 __rmid_read(unsigned int rmid) | ||
81 | { | ||
82 | u64 val; | ||
83 | |||
84 | /* | ||
85 | * Ignore the SDM, this thing is _NOTHING_ like a regular perfcnt, | ||
86 | * it just says that to increase confusion. | ||
87 | */ | ||
88 | wrmsr(MSR_IA32_QM_EVTSEL, QOS_L3_OCCUP_EVENT_ID, rmid); | ||
89 | rdmsrl(MSR_IA32_QM_CTR, val); | ||
90 | |||
91 | /* | ||
92 | * Aside from the ERROR and UNAVAIL bits, assume this thing returns | ||
93 | * the number of cachelines tagged with @rmid. | ||
94 | */ | ||
95 | return val; | ||
96 | } | ||
97 | |||
98 | enum rmid_recycle_state { | ||
99 | RMID_YOUNG = 0, | ||
100 | RMID_AVAILABLE, | ||
101 | RMID_DIRTY, | ||
102 | }; | ||
103 | |||
104 | struct cqm_rmid_entry { | ||
105 | unsigned int rmid; | ||
106 | enum rmid_recycle_state state; | ||
107 | struct list_head list; | ||
108 | unsigned long queue_time; | ||
109 | }; | ||
110 | |||
111 | /* | ||
112 | * cqm_rmid_free_lru - A least recently used list of RMIDs. | ||
113 | * | ||
114 | * Oldest entry at the head, newest (most recently used) entry at the | ||
115 | * tail. This list is never traversed, it's only used to keep track of | ||
116 | * the lru order. That is, we only pick entries of the head or insert | ||
117 | * them on the tail. | ||
118 | * | ||
119 | * All entries on the list are 'free', and their RMIDs are not currently | ||
120 | * in use. To mark an RMID as in use, remove its entry from the lru | ||
121 | * list. | ||
122 | * | ||
123 | * | ||
124 | * cqm_rmid_limbo_lru - list of currently unused but (potentially) dirty RMIDs. | ||
125 | * | ||
126 | * This list is contains RMIDs that no one is currently using but that | ||
127 | * may have a non-zero occupancy value associated with them. The | ||
128 | * rotation worker moves RMIDs from the limbo list to the free list once | ||
129 | * the occupancy value drops below __intel_cqm_threshold. | ||
130 | * | ||
131 | * Both lists are protected by cache_mutex. | ||
132 | */ | ||
133 | static LIST_HEAD(cqm_rmid_free_lru); | ||
134 | static LIST_HEAD(cqm_rmid_limbo_lru); | ||
135 | |||
136 | /* | ||
137 | * We use a simple array of pointers so that we can lookup a struct | ||
138 | * cqm_rmid_entry in O(1). This alleviates the callers of __get_rmid() | ||
139 | * and __put_rmid() from having to worry about dealing with struct | ||
140 | * cqm_rmid_entry - they just deal with rmids, i.e. integers. | ||
141 | * | ||
142 | * Once this array is initialized it is read-only. No locks are required | ||
143 | * to access it. | ||
144 | * | ||
145 | * All entries for all RMIDs can be looked up in the this array at all | ||
146 | * times. | ||
147 | */ | ||
148 | static struct cqm_rmid_entry **cqm_rmid_ptrs; | ||
149 | |||
150 | static inline struct cqm_rmid_entry *__rmid_entry(int rmid) | ||
151 | { | ||
152 | struct cqm_rmid_entry *entry; | ||
153 | |||
154 | entry = cqm_rmid_ptrs[rmid]; | ||
155 | WARN_ON(entry->rmid != rmid); | ||
156 | |||
157 | return entry; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * Returns < 0 on fail. | ||
162 | * | ||
163 | * We expect to be called with cache_mutex held. | ||
164 | */ | ||
165 | static int __get_rmid(void) | ||
166 | { | ||
167 | struct cqm_rmid_entry *entry; | ||
168 | |||
169 | lockdep_assert_held(&cache_mutex); | ||
170 | |||
171 | if (list_empty(&cqm_rmid_free_lru)) | ||
172 | return INVALID_RMID; | ||
173 | |||
174 | entry = list_first_entry(&cqm_rmid_free_lru, struct cqm_rmid_entry, list); | ||
175 | list_del(&entry->list); | ||
176 | |||
177 | return entry->rmid; | ||
178 | } | ||
179 | |||
180 | static void __put_rmid(unsigned int rmid) | ||
181 | { | ||
182 | struct cqm_rmid_entry *entry; | ||
183 | |||
184 | lockdep_assert_held(&cache_mutex); | ||
185 | |||
186 | WARN_ON(!__rmid_valid(rmid)); | ||
187 | entry = __rmid_entry(rmid); | ||
188 | |||
189 | entry->queue_time = jiffies; | ||
190 | entry->state = RMID_YOUNG; | ||
191 | |||
192 | list_add_tail(&entry->list, &cqm_rmid_limbo_lru); | ||
193 | } | ||
194 | |||
195 | static int intel_cqm_setup_rmid_cache(void) | ||
196 | { | ||
197 | struct cqm_rmid_entry *entry; | ||
198 | unsigned int nr_rmids; | ||
199 | int r = 0; | ||
200 | |||
201 | nr_rmids = cqm_max_rmid + 1; | ||
202 | cqm_rmid_ptrs = kmalloc(sizeof(struct cqm_rmid_entry *) * | ||
203 | nr_rmids, GFP_KERNEL); | ||
204 | if (!cqm_rmid_ptrs) | ||
205 | return -ENOMEM; | ||
206 | |||
207 | for (; r <= cqm_max_rmid; r++) { | ||
208 | struct cqm_rmid_entry *entry; | ||
209 | |||
210 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | ||
211 | if (!entry) | ||
212 | goto fail; | ||
213 | |||
214 | INIT_LIST_HEAD(&entry->list); | ||
215 | entry->rmid = r; | ||
216 | cqm_rmid_ptrs[r] = entry; | ||
217 | |||
218 | list_add_tail(&entry->list, &cqm_rmid_free_lru); | ||
219 | } | ||
220 | |||
221 | /* | ||
222 | * RMID 0 is special and is always allocated. It's used for all | ||
223 | * tasks that are not monitored. | ||
224 | */ | ||
225 | entry = __rmid_entry(0); | ||
226 | list_del(&entry->list); | ||
227 | |||
228 | mutex_lock(&cache_mutex); | ||
229 | intel_cqm_rotation_rmid = __get_rmid(); | ||
230 | mutex_unlock(&cache_mutex); | ||
231 | |||
232 | return 0; | ||
233 | fail: | ||
234 | while (r--) | ||
235 | kfree(cqm_rmid_ptrs[r]); | ||
236 | |||
237 | kfree(cqm_rmid_ptrs); | ||
238 | return -ENOMEM; | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * Determine if @a and @b measure the same set of tasks. | ||
243 | * | ||
244 | * If @a and @b measure the same set of tasks then we want to share a | ||
245 | * single RMID. | ||
246 | */ | ||
247 | static bool __match_event(struct perf_event *a, struct perf_event *b) | ||
248 | { | ||
249 | /* Per-cpu and task events don't mix */ | ||
250 | if ((a->attach_state & PERF_ATTACH_TASK) != | ||
251 | (b->attach_state & PERF_ATTACH_TASK)) | ||
252 | return false; | ||
253 | |||
254 | #ifdef CONFIG_CGROUP_PERF | ||
255 | if (a->cgrp != b->cgrp) | ||
256 | return false; | ||
257 | #endif | ||
258 | |||
259 | /* If not task event, we're machine wide */ | ||
260 | if (!(b->attach_state & PERF_ATTACH_TASK)) | ||
261 | return true; | ||
262 | |||
263 | /* | ||
264 | * Events that target same task are placed into the same cache group. | ||
265 | */ | ||
266 | if (a->hw.target == b->hw.target) | ||
267 | return true; | ||
268 | |||
269 | /* | ||
270 | * Are we an inherited event? | ||
271 | */ | ||
272 | if (b->parent == a) | ||
273 | return true; | ||
274 | |||
275 | return false; | ||
276 | } | ||
277 | |||
278 | #ifdef CONFIG_CGROUP_PERF | ||
279 | static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) | ||
280 | { | ||
281 | if (event->attach_state & PERF_ATTACH_TASK) | ||
282 | return perf_cgroup_from_task(event->hw.target); | ||
283 | |||
284 | return event->cgrp; | ||
285 | } | ||
286 | #endif | ||
287 | |||
288 | /* | ||
289 | * Determine if @a's tasks intersect with @b's tasks | ||
290 | * | ||
291 | * There are combinations of events that we explicitly prohibit, | ||
292 | * | ||
293 | * PROHIBITS | ||
294 | * system-wide -> cgroup and task | ||
295 | * cgroup -> system-wide | ||
296 | * -> task in cgroup | ||
297 | * task -> system-wide | ||
298 | * -> task in cgroup | ||
299 | * | ||
300 | * Call this function before allocating an RMID. | ||
301 | */ | ||
302 | static bool __conflict_event(struct perf_event *a, struct perf_event *b) | ||
303 | { | ||
304 | #ifdef CONFIG_CGROUP_PERF | ||
305 | /* | ||
306 | * We can have any number of cgroups but only one system-wide | ||
307 | * event at a time. | ||
308 | */ | ||
309 | if (a->cgrp && b->cgrp) { | ||
310 | struct perf_cgroup *ac = a->cgrp; | ||
311 | struct perf_cgroup *bc = b->cgrp; | ||
312 | |||
313 | /* | ||
314 | * This condition should have been caught in | ||
315 | * __match_event() and we should be sharing an RMID. | ||
316 | */ | ||
317 | WARN_ON_ONCE(ac == bc); | ||
318 | |||
319 | if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) || | ||
320 | cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup)) | ||
321 | return true; | ||
322 | |||
323 | return false; | ||
324 | } | ||
325 | |||
326 | if (a->cgrp || b->cgrp) { | ||
327 | struct perf_cgroup *ac, *bc; | ||
328 | |||
329 | /* | ||
330 | * cgroup and system-wide events are mutually exclusive | ||
331 | */ | ||
332 | if ((a->cgrp && !(b->attach_state & PERF_ATTACH_TASK)) || | ||
333 | (b->cgrp && !(a->attach_state & PERF_ATTACH_TASK))) | ||
334 | return true; | ||
335 | |||
336 | /* | ||
337 | * Ensure neither event is part of the other's cgroup | ||
338 | */ | ||
339 | ac = event_to_cgroup(a); | ||
340 | bc = event_to_cgroup(b); | ||
341 | if (ac == bc) | ||
342 | return true; | ||
343 | |||
344 | /* | ||
345 | * Must have cgroup and non-intersecting task events. | ||
346 | */ | ||
347 | if (!ac || !bc) | ||
348 | return false; | ||
349 | |||
350 | /* | ||
351 | * We have cgroup and task events, and the task belongs | ||
352 | * to a cgroup. Check for for overlap. | ||
353 | */ | ||
354 | if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) || | ||
355 | cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup)) | ||
356 | return true; | ||
357 | |||
358 | return false; | ||
359 | } | ||
360 | #endif | ||
361 | /* | ||
362 | * If one of them is not a task, same story as above with cgroups. | ||
363 | */ | ||
364 | if (!(a->attach_state & PERF_ATTACH_TASK) || | ||
365 | !(b->attach_state & PERF_ATTACH_TASK)) | ||
366 | return true; | ||
367 | |||
368 | /* | ||
369 | * Must be non-overlapping. | ||
370 | */ | ||
371 | return false; | ||
372 | } | ||
373 | |||
374 | struct rmid_read { | ||
375 | unsigned int rmid; | ||
376 | atomic64_t value; | ||
377 | }; | ||
378 | |||
379 | static void __intel_cqm_event_count(void *info); | ||
380 | |||
381 | /* | ||
382 | * Exchange the RMID of a group of events. | ||
383 | */ | ||
384 | static unsigned int | ||
385 | intel_cqm_xchg_rmid(struct perf_event *group, unsigned int rmid) | ||
386 | { | ||
387 | struct perf_event *event; | ||
388 | unsigned int old_rmid = group->hw.cqm_rmid; | ||
389 | struct list_head *head = &group->hw.cqm_group_entry; | ||
390 | |||
391 | lockdep_assert_held(&cache_mutex); | ||
392 | |||
393 | /* | ||
394 | * If our RMID is being deallocated, perform a read now. | ||
395 | */ | ||
396 | if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) { | ||
397 | struct rmid_read rr = { | ||
398 | .value = ATOMIC64_INIT(0), | ||
399 | .rmid = old_rmid, | ||
400 | }; | ||
401 | |||
402 | on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, | ||
403 | &rr, 1); | ||
404 | local64_set(&group->count, atomic64_read(&rr.value)); | ||
405 | } | ||
406 | |||
407 | raw_spin_lock_irq(&cache_lock); | ||
408 | |||
409 | group->hw.cqm_rmid = rmid; | ||
410 | list_for_each_entry(event, head, hw.cqm_group_entry) | ||
411 | event->hw.cqm_rmid = rmid; | ||
412 | |||
413 | raw_spin_unlock_irq(&cache_lock); | ||
414 | |||
415 | return old_rmid; | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * If we fail to assign a new RMID for intel_cqm_rotation_rmid because | ||
420 | * cachelines are still tagged with RMIDs in limbo, we progressively | ||
421 | * increment the threshold until we find an RMID in limbo with <= | ||
422 | * __intel_cqm_threshold lines tagged. This is designed to mitigate the | ||
423 | * problem where cachelines tagged with an RMID are not steadily being | ||
424 | * evicted. | ||
425 | * | ||
426 | * On successful rotations we decrease the threshold back towards zero. | ||
427 | * | ||
428 | * __intel_cqm_max_threshold provides an upper bound on the threshold, | ||
429 | * and is measured in bytes because it's exposed to userland. | ||
430 | */ | ||
431 | static unsigned int __intel_cqm_threshold; | ||
432 | static unsigned int __intel_cqm_max_threshold; | ||
433 | |||
434 | /* | ||
435 | * Test whether an RMID has a zero occupancy value on this cpu. | ||
436 | */ | ||
437 | static void intel_cqm_stable(void *arg) | ||
438 | { | ||
439 | struct cqm_rmid_entry *entry; | ||
440 | |||
441 | list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) { | ||
442 | if (entry->state != RMID_AVAILABLE) | ||
443 | break; | ||
444 | |||
445 | if (__rmid_read(entry->rmid) > __intel_cqm_threshold) | ||
446 | entry->state = RMID_DIRTY; | ||
447 | } | ||
448 | } | ||
449 | |||
450 | /* | ||
451 | * If we have group events waiting for an RMID that don't conflict with | ||
452 | * events already running, assign @rmid. | ||
453 | */ | ||
454 | static bool intel_cqm_sched_in_event(unsigned int rmid) | ||
455 | { | ||
456 | struct perf_event *leader, *event; | ||
457 | |||
458 | lockdep_assert_held(&cache_mutex); | ||
459 | |||
460 | leader = list_first_entry(&cache_groups, struct perf_event, | ||
461 | hw.cqm_groups_entry); | ||
462 | event = leader; | ||
463 | |||
464 | list_for_each_entry_continue(event, &cache_groups, | ||
465 | hw.cqm_groups_entry) { | ||
466 | if (__rmid_valid(event->hw.cqm_rmid)) | ||
467 | continue; | ||
468 | |||
469 | if (__conflict_event(event, leader)) | ||
470 | continue; | ||
471 | |||
472 | intel_cqm_xchg_rmid(event, rmid); | ||
473 | return true; | ||
474 | } | ||
475 | |||
476 | return false; | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * Initially use this constant for both the limbo queue time and the | ||
481 | * rotation timer interval, pmu::hrtimer_interval_ms. | ||
482 | * | ||
483 | * They don't need to be the same, but the two are related since if you | ||
484 | * rotate faster than you recycle RMIDs, you may run out of available | ||
485 | * RMIDs. | ||
486 | */ | ||
487 | #define RMID_DEFAULT_QUEUE_TIME 250 /* ms */ | ||
488 | |||
489 | static unsigned int __rmid_queue_time_ms = RMID_DEFAULT_QUEUE_TIME; | ||
490 | |||
491 | /* | ||
492 | * intel_cqm_rmid_stabilize - move RMIDs from limbo to free list | ||
493 | * @nr_available: number of freeable RMIDs on the limbo list | ||
494 | * | ||
495 | * Quiescent state; wait for all 'freed' RMIDs to become unused, i.e. no | ||
496 | * cachelines are tagged with those RMIDs. After this we can reuse them | ||
497 | * and know that the current set of active RMIDs is stable. | ||
498 | * | ||
499 | * Return %true or %false depending on whether stabilization needs to be | ||
500 | * reattempted. | ||
501 | * | ||
502 | * If we return %true then @nr_available is updated to indicate the | ||
503 | * number of RMIDs on the limbo list that have been queued for the | ||
504 | * minimum queue time (RMID_AVAILABLE), but whose data occupancy values | ||
505 | * are above __intel_cqm_threshold. | ||
506 | */ | ||
507 | static bool intel_cqm_rmid_stabilize(unsigned int *available) | ||
508 | { | ||
509 | struct cqm_rmid_entry *entry, *tmp; | ||
510 | |||
511 | lockdep_assert_held(&cache_mutex); | ||
512 | |||
513 | *available = 0; | ||
514 | list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) { | ||
515 | unsigned long min_queue_time; | ||
516 | unsigned long now = jiffies; | ||
517 | |||
518 | /* | ||
519 | * We hold RMIDs placed into limbo for a minimum queue | ||
520 | * time. Before the minimum queue time has elapsed we do | ||
521 | * not recycle RMIDs. | ||
522 | * | ||
523 | * The reasoning is that until a sufficient time has | ||
524 | * passed since we stopped using an RMID, any RMID | ||
525 | * placed onto the limbo list will likely still have | ||
526 | * data tagged in the cache, which means we'll probably | ||
527 | * fail to recycle it anyway. | ||
528 | * | ||
529 | * We can save ourselves an expensive IPI by skipping | ||
530 | * any RMIDs that have not been queued for the minimum | ||
531 | * time. | ||
532 | */ | ||
533 | min_queue_time = entry->queue_time + | ||
534 | msecs_to_jiffies(__rmid_queue_time_ms); | ||
535 | |||
536 | if (time_after(min_queue_time, now)) | ||
537 | break; | ||
538 | |||
539 | entry->state = RMID_AVAILABLE; | ||
540 | (*available)++; | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * Fast return if none of the RMIDs on the limbo list have been | ||
545 | * sitting on the queue for the minimum queue time. | ||
546 | */ | ||
547 | if (!*available) | ||
548 | return false; | ||
549 | |||
550 | /* | ||
551 | * Test whether an RMID is free for each package. | ||
552 | */ | ||
553 | on_each_cpu_mask(&cqm_cpumask, intel_cqm_stable, NULL, true); | ||
554 | |||
555 | list_for_each_entry_safe(entry, tmp, &cqm_rmid_limbo_lru, list) { | ||
556 | /* | ||
557 | * Exhausted all RMIDs that have waited min queue time. | ||
558 | */ | ||
559 | if (entry->state == RMID_YOUNG) | ||
560 | break; | ||
561 | |||
562 | if (entry->state == RMID_DIRTY) | ||
563 | continue; | ||
564 | |||
565 | list_del(&entry->list); /* remove from limbo */ | ||
566 | |||
567 | /* | ||
568 | * The rotation RMID gets priority if it's | ||
569 | * currently invalid. In which case, skip adding | ||
570 | * the RMID to the the free lru. | ||
571 | */ | ||
572 | if (!__rmid_valid(intel_cqm_rotation_rmid)) { | ||
573 | intel_cqm_rotation_rmid = entry->rmid; | ||
574 | continue; | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * If we have groups waiting for RMIDs, hand | ||
579 | * them one now provided they don't conflict. | ||
580 | */ | ||
581 | if (intel_cqm_sched_in_event(entry->rmid)) | ||
582 | continue; | ||
583 | |||
584 | /* | ||
585 | * Otherwise place it onto the free list. | ||
586 | */ | ||
587 | list_add_tail(&entry->list, &cqm_rmid_free_lru); | ||
588 | } | ||
589 | |||
590 | |||
591 | return __rmid_valid(intel_cqm_rotation_rmid); | ||
592 | } | ||
593 | |||
594 | /* | ||
595 | * Pick a victim group and move it to the tail of the group list. | ||
596 | * @next: The first group without an RMID | ||
597 | */ | ||
598 | static void __intel_cqm_pick_and_rotate(struct perf_event *next) | ||
599 | { | ||
600 | struct perf_event *rotor; | ||
601 | unsigned int rmid; | ||
602 | |||
603 | lockdep_assert_held(&cache_mutex); | ||
604 | |||
605 | rotor = list_first_entry(&cache_groups, struct perf_event, | ||
606 | hw.cqm_groups_entry); | ||
607 | |||
608 | /* | ||
609 | * The group at the front of the list should always have a valid | ||
610 | * RMID. If it doesn't then no groups have RMIDs assigned and we | ||
611 | * don't need to rotate the list. | ||
612 | */ | ||
613 | if (next == rotor) | ||
614 | return; | ||
615 | |||
616 | rmid = intel_cqm_xchg_rmid(rotor, INVALID_RMID); | ||
617 | __put_rmid(rmid); | ||
618 | |||
619 | list_rotate_left(&cache_groups); | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * Deallocate the RMIDs from any events that conflict with @event, and | ||
624 | * place them on the back of the group list. | ||
625 | */ | ||
626 | static void intel_cqm_sched_out_conflicting_events(struct perf_event *event) | ||
627 | { | ||
628 | struct perf_event *group, *g; | ||
629 | unsigned int rmid; | ||
630 | |||
631 | lockdep_assert_held(&cache_mutex); | ||
632 | |||
633 | list_for_each_entry_safe(group, g, &cache_groups, hw.cqm_groups_entry) { | ||
634 | if (group == event) | ||
635 | continue; | ||
636 | |||
637 | rmid = group->hw.cqm_rmid; | ||
638 | |||
639 | /* | ||
640 | * Skip events that don't have a valid RMID. | ||
641 | */ | ||
642 | if (!__rmid_valid(rmid)) | ||
643 | continue; | ||
644 | |||
645 | /* | ||
646 | * No conflict? No problem! Leave the event alone. | ||
647 | */ | ||
648 | if (!__conflict_event(group, event)) | ||
649 | continue; | ||
650 | |||
651 | intel_cqm_xchg_rmid(group, INVALID_RMID); | ||
652 | __put_rmid(rmid); | ||
653 | } | ||
654 | } | ||
655 | |||
656 | /* | ||
657 | * Attempt to rotate the groups and assign new RMIDs. | ||
658 | * | ||
659 | * We rotate for two reasons, | ||
660 | * 1. To handle the scheduling of conflicting events | ||
661 | * 2. To recycle RMIDs | ||
662 | * | ||
663 | * Rotating RMIDs is complicated because the hardware doesn't give us | ||
664 | * any clues. | ||
665 | * | ||
666 | * There's problems with the hardware interface; when you change the | ||
667 | * task:RMID map cachelines retain their 'old' tags, giving a skewed | ||
668 | * picture. In order to work around this, we must always keep one free | ||
669 | * RMID - intel_cqm_rotation_rmid. | ||
670 | * | ||
671 | * Rotation works by taking away an RMID from a group (the old RMID), | ||
672 | * and assigning the free RMID to another group (the new RMID). We must | ||
673 | * then wait for the old RMID to not be used (no cachelines tagged). | ||
674 | * This ensure that all cachelines are tagged with 'active' RMIDs. At | ||
675 | * this point we can start reading values for the new RMID and treat the | ||
676 | * old RMID as the free RMID for the next rotation. | ||
677 | * | ||
678 | * Return %true or %false depending on whether we did any rotating. | ||
679 | */ | ||
680 | static bool __intel_cqm_rmid_rotate(void) | ||
681 | { | ||
682 | struct perf_event *group, *start = NULL; | ||
683 | unsigned int threshold_limit; | ||
684 | unsigned int nr_needed = 0; | ||
685 | unsigned int nr_available; | ||
686 | bool rotated = false; | ||
687 | |||
688 | mutex_lock(&cache_mutex); | ||
689 | |||
690 | again: | ||
691 | /* | ||
692 | * Fast path through this function if there are no groups and no | ||
693 | * RMIDs that need cleaning. | ||
694 | */ | ||
695 | if (list_empty(&cache_groups) && list_empty(&cqm_rmid_limbo_lru)) | ||
696 | goto out; | ||
697 | |||
698 | list_for_each_entry(group, &cache_groups, hw.cqm_groups_entry) { | ||
699 | if (!__rmid_valid(group->hw.cqm_rmid)) { | ||
700 | if (!start) | ||
701 | start = group; | ||
702 | nr_needed++; | ||
703 | } | ||
704 | } | ||
705 | |||
706 | /* | ||
707 | * We have some event groups, but they all have RMIDs assigned | ||
708 | * and no RMIDs need cleaning. | ||
709 | */ | ||
710 | if (!nr_needed && list_empty(&cqm_rmid_limbo_lru)) | ||
711 | goto out; | ||
712 | |||
713 | if (!nr_needed) | ||
714 | goto stabilize; | ||
715 | |||
716 | /* | ||
717 | * We have more event groups without RMIDs than available RMIDs, | ||
718 | * or we have event groups that conflict with the ones currently | ||
719 | * scheduled. | ||
720 | * | ||
721 | * We force deallocate the rmid of the group at the head of | ||
722 | * cache_groups. The first event group without an RMID then gets | ||
723 | * assigned intel_cqm_rotation_rmid. This ensures we always make | ||
724 | * forward progress. | ||
725 | * | ||
726 | * Rotate the cache_groups list so the previous head is now the | ||
727 | * tail. | ||
728 | */ | ||
729 | __intel_cqm_pick_and_rotate(start); | ||
730 | |||
731 | /* | ||
732 | * If the rotation is going to succeed, reduce the threshold so | ||
733 | * that we don't needlessly reuse dirty RMIDs. | ||
734 | */ | ||
735 | if (__rmid_valid(intel_cqm_rotation_rmid)) { | ||
736 | intel_cqm_xchg_rmid(start, intel_cqm_rotation_rmid); | ||
737 | intel_cqm_rotation_rmid = __get_rmid(); | ||
738 | |||
739 | intel_cqm_sched_out_conflicting_events(start); | ||
740 | |||
741 | if (__intel_cqm_threshold) | ||
742 | __intel_cqm_threshold--; | ||
743 | } | ||
744 | |||
745 | rotated = true; | ||
746 | |||
747 | stabilize: | ||
748 | /* | ||
749 | * We now need to stablize the RMID we freed above (if any) to | ||
750 | * ensure that the next time we rotate we have an RMID with zero | ||
751 | * occupancy value. | ||
752 | * | ||
753 | * Alternatively, if we didn't need to perform any rotation, | ||
754 | * we'll have a bunch of RMIDs in limbo that need stabilizing. | ||
755 | */ | ||
756 | threshold_limit = __intel_cqm_max_threshold / cqm_l3_scale; | ||
757 | |||
758 | while (intel_cqm_rmid_stabilize(&nr_available) && | ||
759 | __intel_cqm_threshold < threshold_limit) { | ||
760 | unsigned int steal_limit; | ||
761 | |||
762 | /* | ||
763 | * Don't spin if nobody is actively waiting for an RMID, | ||
764 | * the rotation worker will be kicked as soon as an | ||
765 | * event needs an RMID anyway. | ||
766 | */ | ||
767 | if (!nr_needed) | ||
768 | break; | ||
769 | |||
770 | /* Allow max 25% of RMIDs to be in limbo. */ | ||
771 | steal_limit = (cqm_max_rmid + 1) / 4; | ||
772 | |||
773 | /* | ||
774 | * We failed to stabilize any RMIDs so our rotation | ||
775 | * logic is now stuck. In order to make forward progress | ||
776 | * we have a few options: | ||
777 | * | ||
778 | * 1. rotate ("steal") another RMID | ||
779 | * 2. increase the threshold | ||
780 | * 3. do nothing | ||
781 | * | ||
782 | * We do both of 1. and 2. until we hit the steal limit. | ||
783 | * | ||
784 | * The steal limit prevents all RMIDs ending up on the | ||
785 | * limbo list. This can happen if every RMID has a | ||
786 | * non-zero occupancy above threshold_limit, and the | ||
787 | * occupancy values aren't dropping fast enough. | ||
788 | * | ||
789 | * Note that there is prioritisation at work here - we'd | ||
790 | * rather increase the number of RMIDs on the limbo list | ||
791 | * than increase the threshold, because increasing the | ||
792 | * threshold skews the event data (because we reuse | ||
793 | * dirty RMIDs) - threshold bumps are a last resort. | ||
794 | */ | ||
795 | if (nr_available < steal_limit) | ||
796 | goto again; | ||
797 | |||
798 | __intel_cqm_threshold++; | ||
799 | } | ||
800 | |||
801 | out: | ||
802 | mutex_unlock(&cache_mutex); | ||
803 | return rotated; | ||
804 | } | ||
805 | |||
806 | static void intel_cqm_rmid_rotate(struct work_struct *work); | ||
807 | |||
808 | static DECLARE_DELAYED_WORK(intel_cqm_rmid_work, intel_cqm_rmid_rotate); | ||
809 | |||
810 | static struct pmu intel_cqm_pmu; | ||
811 | |||
812 | static void intel_cqm_rmid_rotate(struct work_struct *work) | ||
813 | { | ||
814 | unsigned long delay; | ||
815 | |||
816 | __intel_cqm_rmid_rotate(); | ||
817 | |||
818 | delay = msecs_to_jiffies(intel_cqm_pmu.hrtimer_interval_ms); | ||
819 | schedule_delayed_work(&intel_cqm_rmid_work, delay); | ||
820 | } | ||
821 | |||
822 | /* | ||
823 | * Find a group and setup RMID. | ||
824 | * | ||
825 | * If we're part of a group, we use the group's RMID. | ||
826 | */ | ||
827 | static void intel_cqm_setup_event(struct perf_event *event, | ||
828 | struct perf_event **group) | ||
829 | { | ||
830 | struct perf_event *iter; | ||
831 | unsigned int rmid; | ||
832 | bool conflict = false; | ||
833 | |||
834 | list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) { | ||
835 | rmid = iter->hw.cqm_rmid; | ||
836 | |||
837 | if (__match_event(iter, event)) { | ||
838 | /* All tasks in a group share an RMID */ | ||
839 | event->hw.cqm_rmid = rmid; | ||
840 | *group = iter; | ||
841 | return; | ||
842 | } | ||
843 | |||
844 | /* | ||
845 | * We only care about conflicts for events that are | ||
846 | * actually scheduled in (and hence have a valid RMID). | ||
847 | */ | ||
848 | if (__conflict_event(iter, event) && __rmid_valid(rmid)) | ||
849 | conflict = true; | ||
850 | } | ||
851 | |||
852 | if (conflict) | ||
853 | rmid = INVALID_RMID; | ||
854 | else | ||
855 | rmid = __get_rmid(); | ||
856 | |||
857 | event->hw.cqm_rmid = rmid; | ||
858 | } | ||
859 | |||
860 | static void intel_cqm_event_read(struct perf_event *event) | ||
861 | { | ||
862 | unsigned long flags; | ||
863 | unsigned int rmid; | ||
864 | u64 val; | ||
865 | |||
866 | /* | ||
867 | * Task events are handled by intel_cqm_event_count(). | ||
868 | */ | ||
869 | if (event->cpu == -1) | ||
870 | return; | ||
871 | |||
872 | raw_spin_lock_irqsave(&cache_lock, flags); | ||
873 | rmid = event->hw.cqm_rmid; | ||
874 | |||
875 | if (!__rmid_valid(rmid)) | ||
876 | goto out; | ||
877 | |||
878 | val = __rmid_read(rmid); | ||
879 | |||
880 | /* | ||
881 | * Ignore this reading on error states and do not update the value. | ||
882 | */ | ||
883 | if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) | ||
884 | goto out; | ||
885 | |||
886 | local64_set(&event->count, val); | ||
887 | out: | ||
888 | raw_spin_unlock_irqrestore(&cache_lock, flags); | ||
889 | } | ||
890 | |||
891 | static void __intel_cqm_event_count(void *info) | ||
892 | { | ||
893 | struct rmid_read *rr = info; | ||
894 | u64 val; | ||
895 | |||
896 | val = __rmid_read(rr->rmid); | ||
897 | |||
898 | if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) | ||
899 | return; | ||
900 | |||
901 | atomic64_add(val, &rr->value); | ||
902 | } | ||
903 | |||
904 | static inline bool cqm_group_leader(struct perf_event *event) | ||
905 | { | ||
906 | return !list_empty(&event->hw.cqm_groups_entry); | ||
907 | } | ||
908 | |||
909 | static u64 intel_cqm_event_count(struct perf_event *event) | ||
910 | { | ||
911 | unsigned long flags; | ||
912 | struct rmid_read rr = { | ||
913 | .value = ATOMIC64_INIT(0), | ||
914 | }; | ||
915 | |||
916 | /* | ||
917 | * We only need to worry about task events. System-wide events | ||
918 | * are handled like usual, i.e. entirely with | ||
919 | * intel_cqm_event_read(). | ||
920 | */ | ||
921 | if (event->cpu != -1) | ||
922 | return __perf_event_count(event); | ||
923 | |||
924 | /* | ||
925 | * Only the group leader gets to report values. This stops us | ||
926 | * reporting duplicate values to userspace, and gives us a clear | ||
927 | * rule for which task gets to report the values. | ||
928 | * | ||
929 | * Note that it is impossible to attribute these values to | ||
930 | * specific packages - we forfeit that ability when we create | ||
931 | * task events. | ||
932 | */ | ||
933 | if (!cqm_group_leader(event)) | ||
934 | return 0; | ||
935 | |||
936 | /* | ||
937 | * Notice that we don't perform the reading of an RMID | ||
938 | * atomically, because we can't hold a spin lock across the | ||
939 | * IPIs. | ||
940 | * | ||
941 | * Speculatively perform the read, since @event might be | ||
942 | * assigned a different (possibly invalid) RMID while we're | ||
943 | * busying performing the IPI calls. It's therefore necessary to | ||
944 | * check @event's RMID afterwards, and if it has changed, | ||
945 | * discard the result of the read. | ||
946 | */ | ||
947 | rr.rmid = ACCESS_ONCE(event->hw.cqm_rmid); | ||
948 | |||
949 | if (!__rmid_valid(rr.rmid)) | ||
950 | goto out; | ||
951 | |||
952 | on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, &rr, 1); | ||
953 | |||
954 | raw_spin_lock_irqsave(&cache_lock, flags); | ||
955 | if (event->hw.cqm_rmid == rr.rmid) | ||
956 | local64_set(&event->count, atomic64_read(&rr.value)); | ||
957 | raw_spin_unlock_irqrestore(&cache_lock, flags); | ||
958 | out: | ||
959 | return __perf_event_count(event); | ||
960 | } | ||
961 | |||
962 | static void intel_cqm_event_start(struct perf_event *event, int mode) | ||
963 | { | ||
964 | struct intel_cqm_state *state = this_cpu_ptr(&cqm_state); | ||
965 | unsigned int rmid = event->hw.cqm_rmid; | ||
966 | unsigned long flags; | ||
967 | |||
968 | if (!(event->hw.cqm_state & PERF_HES_STOPPED)) | ||
969 | return; | ||
970 | |||
971 | event->hw.cqm_state &= ~PERF_HES_STOPPED; | ||
972 | |||
973 | raw_spin_lock_irqsave(&state->lock, flags); | ||
974 | |||
975 | if (state->cnt++) | ||
976 | WARN_ON_ONCE(state->rmid != rmid); | ||
977 | else | ||
978 | WARN_ON_ONCE(state->rmid); | ||
979 | |||
980 | state->rmid = rmid; | ||
981 | wrmsrl(MSR_IA32_PQR_ASSOC, state->rmid); | ||
982 | |||
983 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
984 | } | ||
985 | |||
986 | static void intel_cqm_event_stop(struct perf_event *event, int mode) | ||
987 | { | ||
988 | struct intel_cqm_state *state = this_cpu_ptr(&cqm_state); | ||
989 | unsigned long flags; | ||
990 | |||
991 | if (event->hw.cqm_state & PERF_HES_STOPPED) | ||
992 | return; | ||
993 | |||
994 | event->hw.cqm_state |= PERF_HES_STOPPED; | ||
995 | |||
996 | raw_spin_lock_irqsave(&state->lock, flags); | ||
997 | intel_cqm_event_read(event); | ||
998 | |||
999 | if (!--state->cnt) { | ||
1000 | state->rmid = 0; | ||
1001 | wrmsrl(MSR_IA32_PQR_ASSOC, 0); | ||
1002 | } else { | ||
1003 | WARN_ON_ONCE(!state->rmid); | ||
1004 | } | ||
1005 | |||
1006 | raw_spin_unlock_irqrestore(&state->lock, flags); | ||
1007 | } | ||
1008 | |||
1009 | static int intel_cqm_event_add(struct perf_event *event, int mode) | ||
1010 | { | ||
1011 | unsigned long flags; | ||
1012 | unsigned int rmid; | ||
1013 | |||
1014 | raw_spin_lock_irqsave(&cache_lock, flags); | ||
1015 | |||
1016 | event->hw.cqm_state = PERF_HES_STOPPED; | ||
1017 | rmid = event->hw.cqm_rmid; | ||
1018 | |||
1019 | if (__rmid_valid(rmid) && (mode & PERF_EF_START)) | ||
1020 | intel_cqm_event_start(event, mode); | ||
1021 | |||
1022 | raw_spin_unlock_irqrestore(&cache_lock, flags); | ||
1023 | |||
1024 | return 0; | ||
1025 | } | ||
1026 | |||
1027 | static void intel_cqm_event_del(struct perf_event *event, int mode) | ||
1028 | { | ||
1029 | intel_cqm_event_stop(event, mode); | ||
1030 | } | ||
1031 | |||
1032 | static void intel_cqm_event_destroy(struct perf_event *event) | ||
1033 | { | ||
1034 | struct perf_event *group_other = NULL; | ||
1035 | |||
1036 | mutex_lock(&cache_mutex); | ||
1037 | |||
1038 | /* | ||
1039 | * If there's another event in this group... | ||
1040 | */ | ||
1041 | if (!list_empty(&event->hw.cqm_group_entry)) { | ||
1042 | group_other = list_first_entry(&event->hw.cqm_group_entry, | ||
1043 | struct perf_event, | ||
1044 | hw.cqm_group_entry); | ||
1045 | list_del(&event->hw.cqm_group_entry); | ||
1046 | } | ||
1047 | |||
1048 | /* | ||
1049 | * And we're the group leader.. | ||
1050 | */ | ||
1051 | if (cqm_group_leader(event)) { | ||
1052 | /* | ||
1053 | * If there was a group_other, make that leader, otherwise | ||
1054 | * destroy the group and return the RMID. | ||
1055 | */ | ||
1056 | if (group_other) { | ||
1057 | list_replace(&event->hw.cqm_groups_entry, | ||
1058 | &group_other->hw.cqm_groups_entry); | ||
1059 | } else { | ||
1060 | unsigned int rmid = event->hw.cqm_rmid; | ||
1061 | |||
1062 | if (__rmid_valid(rmid)) | ||
1063 | __put_rmid(rmid); | ||
1064 | list_del(&event->hw.cqm_groups_entry); | ||
1065 | } | ||
1066 | } | ||
1067 | |||
1068 | mutex_unlock(&cache_mutex); | ||
1069 | } | ||
1070 | |||
1071 | static int intel_cqm_event_init(struct perf_event *event) | ||
1072 | { | ||
1073 | struct perf_event *group = NULL; | ||
1074 | bool rotate = false; | ||
1075 | |||
1076 | if (event->attr.type != intel_cqm_pmu.type) | ||
1077 | return -ENOENT; | ||
1078 | |||
1079 | if (event->attr.config & ~QOS_EVENT_MASK) | ||
1080 | return -EINVAL; | ||
1081 | |||
1082 | /* unsupported modes and filters */ | ||
1083 | if (event->attr.exclude_user || | ||
1084 | event->attr.exclude_kernel || | ||
1085 | event->attr.exclude_hv || | ||
1086 | event->attr.exclude_idle || | ||
1087 | event->attr.exclude_host || | ||
1088 | event->attr.exclude_guest || | ||
1089 | event->attr.sample_period) /* no sampling */ | ||
1090 | return -EINVAL; | ||
1091 | |||
1092 | INIT_LIST_HEAD(&event->hw.cqm_group_entry); | ||
1093 | INIT_LIST_HEAD(&event->hw.cqm_groups_entry); | ||
1094 | |||
1095 | event->destroy = intel_cqm_event_destroy; | ||
1096 | |||
1097 | mutex_lock(&cache_mutex); | ||
1098 | |||
1099 | /* Will also set rmid */ | ||
1100 | intel_cqm_setup_event(event, &group); | ||
1101 | |||
1102 | if (group) { | ||
1103 | list_add_tail(&event->hw.cqm_group_entry, | ||
1104 | &group->hw.cqm_group_entry); | ||
1105 | } else { | ||
1106 | list_add_tail(&event->hw.cqm_groups_entry, | ||
1107 | &cache_groups); | ||
1108 | |||
1109 | /* | ||
1110 | * All RMIDs are either in use or have recently been | ||
1111 | * used. Kick the rotation worker to clean/free some. | ||
1112 | * | ||
1113 | * We only do this for the group leader, rather than for | ||
1114 | * every event in a group to save on needless work. | ||
1115 | */ | ||
1116 | if (!__rmid_valid(event->hw.cqm_rmid)) | ||
1117 | rotate = true; | ||
1118 | } | ||
1119 | |||
1120 | mutex_unlock(&cache_mutex); | ||
1121 | |||
1122 | if (rotate) | ||
1123 | schedule_delayed_work(&intel_cqm_rmid_work, 0); | ||
1124 | |||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | EVENT_ATTR_STR(llc_occupancy, intel_cqm_llc, "event=0x01"); | ||
1129 | EVENT_ATTR_STR(llc_occupancy.per-pkg, intel_cqm_llc_pkg, "1"); | ||
1130 | EVENT_ATTR_STR(llc_occupancy.unit, intel_cqm_llc_unit, "Bytes"); | ||
1131 | EVENT_ATTR_STR(llc_occupancy.scale, intel_cqm_llc_scale, NULL); | ||
1132 | EVENT_ATTR_STR(llc_occupancy.snapshot, intel_cqm_llc_snapshot, "1"); | ||
1133 | |||
1134 | static struct attribute *intel_cqm_events_attr[] = { | ||
1135 | EVENT_PTR(intel_cqm_llc), | ||
1136 | EVENT_PTR(intel_cqm_llc_pkg), | ||
1137 | EVENT_PTR(intel_cqm_llc_unit), | ||
1138 | EVENT_PTR(intel_cqm_llc_scale), | ||
1139 | EVENT_PTR(intel_cqm_llc_snapshot), | ||
1140 | NULL, | ||
1141 | }; | ||
1142 | |||
1143 | static struct attribute_group intel_cqm_events_group = { | ||
1144 | .name = "events", | ||
1145 | .attrs = intel_cqm_events_attr, | ||
1146 | }; | ||
1147 | |||
1148 | PMU_FORMAT_ATTR(event, "config:0-7"); | ||
1149 | static struct attribute *intel_cqm_formats_attr[] = { | ||
1150 | &format_attr_event.attr, | ||
1151 | NULL, | ||
1152 | }; | ||
1153 | |||
1154 | static struct attribute_group intel_cqm_format_group = { | ||
1155 | .name = "format", | ||
1156 | .attrs = intel_cqm_formats_attr, | ||
1157 | }; | ||
1158 | |||
1159 | static ssize_t | ||
1160 | max_recycle_threshold_show(struct device *dev, struct device_attribute *attr, | ||
1161 | char *page) | ||
1162 | { | ||
1163 | ssize_t rv; | ||
1164 | |||
1165 | mutex_lock(&cache_mutex); | ||
1166 | rv = snprintf(page, PAGE_SIZE-1, "%u\n", __intel_cqm_max_threshold); | ||
1167 | mutex_unlock(&cache_mutex); | ||
1168 | |||
1169 | return rv; | ||
1170 | } | ||
1171 | |||
1172 | static ssize_t | ||
1173 | max_recycle_threshold_store(struct device *dev, | ||
1174 | struct device_attribute *attr, | ||
1175 | const char *buf, size_t count) | ||
1176 | { | ||
1177 | unsigned int bytes, cachelines; | ||
1178 | int ret; | ||
1179 | |||
1180 | ret = kstrtouint(buf, 0, &bytes); | ||
1181 | if (ret) | ||
1182 | return ret; | ||
1183 | |||
1184 | mutex_lock(&cache_mutex); | ||
1185 | |||
1186 | __intel_cqm_max_threshold = bytes; | ||
1187 | cachelines = bytes / cqm_l3_scale; | ||
1188 | |||
1189 | /* | ||
1190 | * The new maximum takes effect immediately. | ||
1191 | */ | ||
1192 | if (__intel_cqm_threshold > cachelines) | ||
1193 | __intel_cqm_threshold = cachelines; | ||
1194 | |||
1195 | mutex_unlock(&cache_mutex); | ||
1196 | |||
1197 | return count; | ||
1198 | } | ||
1199 | |||
1200 | static DEVICE_ATTR_RW(max_recycle_threshold); | ||
1201 | |||
1202 | static struct attribute *intel_cqm_attrs[] = { | ||
1203 | &dev_attr_max_recycle_threshold.attr, | ||
1204 | NULL, | ||
1205 | }; | ||
1206 | |||
1207 | static const struct attribute_group intel_cqm_group = { | ||
1208 | .attrs = intel_cqm_attrs, | ||
1209 | }; | ||
1210 | |||
1211 | static const struct attribute_group *intel_cqm_attr_groups[] = { | ||
1212 | &intel_cqm_events_group, | ||
1213 | &intel_cqm_format_group, | ||
1214 | &intel_cqm_group, | ||
1215 | NULL, | ||
1216 | }; | ||
1217 | |||
1218 | static struct pmu intel_cqm_pmu = { | ||
1219 | .hrtimer_interval_ms = RMID_DEFAULT_QUEUE_TIME, | ||
1220 | .attr_groups = intel_cqm_attr_groups, | ||
1221 | .task_ctx_nr = perf_sw_context, | ||
1222 | .event_init = intel_cqm_event_init, | ||
1223 | .add = intel_cqm_event_add, | ||
1224 | .del = intel_cqm_event_del, | ||
1225 | .start = intel_cqm_event_start, | ||
1226 | .stop = intel_cqm_event_stop, | ||
1227 | .read = intel_cqm_event_read, | ||
1228 | .count = intel_cqm_event_count, | ||
1229 | }; | ||
1230 | |||
1231 | static inline void cqm_pick_event_reader(int cpu) | ||
1232 | { | ||
1233 | int phys_id = topology_physical_package_id(cpu); | ||
1234 | int i; | ||
1235 | |||
1236 | for_each_cpu(i, &cqm_cpumask) { | ||
1237 | if (phys_id == topology_physical_package_id(i)) | ||
1238 | return; /* already got reader for this socket */ | ||
1239 | } | ||
1240 | |||
1241 | cpumask_set_cpu(cpu, &cqm_cpumask); | ||
1242 | } | ||
1243 | |||
1244 | static void intel_cqm_cpu_prepare(unsigned int cpu) | ||
1245 | { | ||
1246 | struct intel_cqm_state *state = &per_cpu(cqm_state, cpu); | ||
1247 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
1248 | |||
1249 | raw_spin_lock_init(&state->lock); | ||
1250 | state->rmid = 0; | ||
1251 | state->cnt = 0; | ||
1252 | |||
1253 | WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid); | ||
1254 | WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale); | ||
1255 | } | ||
1256 | |||
1257 | static void intel_cqm_cpu_exit(unsigned int cpu) | ||
1258 | { | ||
1259 | int phys_id = topology_physical_package_id(cpu); | ||
1260 | int i; | ||
1261 | |||
1262 | /* | ||
1263 | * Is @cpu a designated cqm reader? | ||
1264 | */ | ||
1265 | if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask)) | ||
1266 | return; | ||
1267 | |||
1268 | for_each_online_cpu(i) { | ||
1269 | if (i == cpu) | ||
1270 | continue; | ||
1271 | |||
1272 | if (phys_id == topology_physical_package_id(i)) { | ||
1273 | cpumask_set_cpu(i, &cqm_cpumask); | ||
1274 | break; | ||
1275 | } | ||
1276 | } | ||
1277 | } | ||
1278 | |||
1279 | static int intel_cqm_cpu_notifier(struct notifier_block *nb, | ||
1280 | unsigned long action, void *hcpu) | ||
1281 | { | ||
1282 | unsigned int cpu = (unsigned long)hcpu; | ||
1283 | |||
1284 | switch (action & ~CPU_TASKS_FROZEN) { | ||
1285 | case CPU_UP_PREPARE: | ||
1286 | intel_cqm_cpu_prepare(cpu); | ||
1287 | break; | ||
1288 | case CPU_DOWN_PREPARE: | ||
1289 | intel_cqm_cpu_exit(cpu); | ||
1290 | break; | ||
1291 | case CPU_STARTING: | ||
1292 | cqm_pick_event_reader(cpu); | ||
1293 | break; | ||
1294 | } | ||
1295 | |||
1296 | return NOTIFY_OK; | ||
1297 | } | ||
1298 | |||
1299 | static const struct x86_cpu_id intel_cqm_match[] = { | ||
1300 | { .vendor = X86_VENDOR_INTEL, .feature = X86_FEATURE_CQM_OCCUP_LLC }, | ||
1301 | {} | ||
1302 | }; | ||
1303 | |||
1304 | static int __init intel_cqm_init(void) | ||
1305 | { | ||
1306 | char *str, scale[20]; | ||
1307 | int i, cpu, ret; | ||
1308 | |||
1309 | if (!x86_match_cpu(intel_cqm_match)) | ||
1310 | return -ENODEV; | ||
1311 | |||
1312 | cqm_l3_scale = boot_cpu_data.x86_cache_occ_scale; | ||
1313 | |||
1314 | /* | ||
1315 | * It's possible that not all resources support the same number | ||
1316 | * of RMIDs. Instead of making scheduling much more complicated | ||
1317 | * (where we have to match a task's RMID to a cpu that supports | ||
1318 | * that many RMIDs) just find the minimum RMIDs supported across | ||
1319 | * all cpus. | ||
1320 | * | ||
1321 | * Also, check that the scales match on all cpus. | ||
1322 | */ | ||
1323 | cpu_notifier_register_begin(); | ||
1324 | |||
1325 | for_each_online_cpu(cpu) { | ||
1326 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
1327 | |||
1328 | if (c->x86_cache_max_rmid < cqm_max_rmid) | ||
1329 | cqm_max_rmid = c->x86_cache_max_rmid; | ||
1330 | |||
1331 | if (c->x86_cache_occ_scale != cqm_l3_scale) { | ||
1332 | pr_err("Multiple LLC scale values, disabling\n"); | ||
1333 | ret = -EINVAL; | ||
1334 | goto out; | ||
1335 | } | ||
1336 | } | ||
1337 | |||
1338 | /* | ||
1339 | * A reasonable upper limit on the max threshold is the number | ||
1340 | * of lines tagged per RMID if all RMIDs have the same number of | ||
1341 | * lines tagged in the LLC. | ||
1342 | * | ||
1343 | * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC. | ||
1344 | */ | ||
1345 | __intel_cqm_max_threshold = | ||
1346 | boot_cpu_data.x86_cache_size * 1024 / (cqm_max_rmid + 1); | ||
1347 | |||
1348 | snprintf(scale, sizeof(scale), "%u", cqm_l3_scale); | ||
1349 | str = kstrdup(scale, GFP_KERNEL); | ||
1350 | if (!str) { | ||
1351 | ret = -ENOMEM; | ||
1352 | goto out; | ||
1353 | } | ||
1354 | |||
1355 | event_attr_intel_cqm_llc_scale.event_str = str; | ||
1356 | |||
1357 | ret = intel_cqm_setup_rmid_cache(); | ||
1358 | if (ret) | ||
1359 | goto out; | ||
1360 | |||
1361 | for_each_online_cpu(i) { | ||
1362 | intel_cqm_cpu_prepare(i); | ||
1363 | cqm_pick_event_reader(i); | ||
1364 | } | ||
1365 | |||
1366 | __perf_cpu_notifier(intel_cqm_cpu_notifier); | ||
1367 | |||
1368 | ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1); | ||
1369 | if (ret) | ||
1370 | pr_err("Intel CQM perf registration failed: %d\n", ret); | ||
1371 | else | ||
1372 | pr_info("Intel CQM monitoring enabled\n"); | ||
1373 | |||
1374 | out: | ||
1375 | cpu_notifier_register_done(); | ||
1376 | |||
1377 | return ret; | ||
1378 | } | ||
1379 | device_initcall(intel_cqm_init); | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 58f1a94beaf0..0473874109cb 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c | |||
@@ -39,6 +39,7 @@ static enum { | |||
39 | #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */ | 39 | #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */ |
40 | #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */ | 40 | #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */ |
41 | #define LBR_FAR_BIT 8 /* do not capture far branches */ | 41 | #define LBR_FAR_BIT 8 /* do not capture far branches */ |
42 | #define LBR_CALL_STACK_BIT 9 /* enable call stack */ | ||
42 | 43 | ||
43 | #define LBR_KERNEL (1 << LBR_KERNEL_BIT) | 44 | #define LBR_KERNEL (1 << LBR_KERNEL_BIT) |
44 | #define LBR_USER (1 << LBR_USER_BIT) | 45 | #define LBR_USER (1 << LBR_USER_BIT) |
@@ -49,6 +50,7 @@ static enum { | |||
49 | #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT) | 50 | #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT) |
50 | #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT) | 51 | #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT) |
51 | #define LBR_FAR (1 << LBR_FAR_BIT) | 52 | #define LBR_FAR (1 << LBR_FAR_BIT) |
53 | #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT) | ||
52 | 54 | ||
53 | #define LBR_PLM (LBR_KERNEL | LBR_USER) | 55 | #define LBR_PLM (LBR_KERNEL | LBR_USER) |
54 | 56 | ||
@@ -69,33 +71,31 @@ static enum { | |||
69 | #define LBR_FROM_FLAG_IN_TX (1ULL << 62) | 71 | #define LBR_FROM_FLAG_IN_TX (1ULL << 62) |
70 | #define LBR_FROM_FLAG_ABORT (1ULL << 61) | 72 | #define LBR_FROM_FLAG_ABORT (1ULL << 61) |
71 | 73 | ||
72 | #define for_each_branch_sample_type(x) \ | ||
73 | for ((x) = PERF_SAMPLE_BRANCH_USER; \ | ||
74 | (x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1) | ||
75 | |||
76 | /* | 74 | /* |
77 | * x86control flow change classification | 75 | * x86control flow change classification |
78 | * x86control flow changes include branches, interrupts, traps, faults | 76 | * x86control flow changes include branches, interrupts, traps, faults |
79 | */ | 77 | */ |
80 | enum { | 78 | enum { |
81 | X86_BR_NONE = 0, /* unknown */ | 79 | X86_BR_NONE = 0, /* unknown */ |
82 | 80 | ||
83 | X86_BR_USER = 1 << 0, /* branch target is user */ | 81 | X86_BR_USER = 1 << 0, /* branch target is user */ |
84 | X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ | 82 | X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ |
85 | 83 | ||
86 | X86_BR_CALL = 1 << 2, /* call */ | 84 | X86_BR_CALL = 1 << 2, /* call */ |
87 | X86_BR_RET = 1 << 3, /* return */ | 85 | X86_BR_RET = 1 << 3, /* return */ |
88 | X86_BR_SYSCALL = 1 << 4, /* syscall */ | 86 | X86_BR_SYSCALL = 1 << 4, /* syscall */ |
89 | X86_BR_SYSRET = 1 << 5, /* syscall return */ | 87 | X86_BR_SYSRET = 1 << 5, /* syscall return */ |
90 | X86_BR_INT = 1 << 6, /* sw interrupt */ | 88 | X86_BR_INT = 1 << 6, /* sw interrupt */ |
91 | X86_BR_IRET = 1 << 7, /* return from interrupt */ | 89 | X86_BR_IRET = 1 << 7, /* return from interrupt */ |
92 | X86_BR_JCC = 1 << 8, /* conditional */ | 90 | X86_BR_JCC = 1 << 8, /* conditional */ |
93 | X86_BR_JMP = 1 << 9, /* jump */ | 91 | X86_BR_JMP = 1 << 9, /* jump */ |
94 | X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ | 92 | X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ |
95 | X86_BR_IND_CALL = 1 << 11,/* indirect calls */ | 93 | X86_BR_IND_CALL = 1 << 11,/* indirect calls */ |
96 | X86_BR_ABORT = 1 << 12,/* transaction abort */ | 94 | X86_BR_ABORT = 1 << 12,/* transaction abort */ |
97 | X86_BR_IN_TX = 1 << 13,/* in transaction */ | 95 | X86_BR_IN_TX = 1 << 13,/* in transaction */ |
98 | X86_BR_NO_TX = 1 << 14,/* not in transaction */ | 96 | X86_BR_NO_TX = 1 << 14,/* not in transaction */ |
97 | X86_BR_ZERO_CALL = 1 << 15,/* zero length call */ | ||
98 | X86_BR_CALL_STACK = 1 << 16,/* call stack */ | ||
99 | }; | 99 | }; |
100 | 100 | ||
101 | #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) | 101 | #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) |
@@ -112,13 +112,15 @@ enum { | |||
112 | X86_BR_JMP |\ | 112 | X86_BR_JMP |\ |
113 | X86_BR_IRQ |\ | 113 | X86_BR_IRQ |\ |
114 | X86_BR_ABORT |\ | 114 | X86_BR_ABORT |\ |
115 | X86_BR_IND_CALL) | 115 | X86_BR_IND_CALL |\ |
116 | X86_BR_ZERO_CALL) | ||
116 | 117 | ||
117 | #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) | 118 | #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) |
118 | 119 | ||
119 | #define X86_BR_ANY_CALL \ | 120 | #define X86_BR_ANY_CALL \ |
120 | (X86_BR_CALL |\ | 121 | (X86_BR_CALL |\ |
121 | X86_BR_IND_CALL |\ | 122 | X86_BR_IND_CALL |\ |
123 | X86_BR_ZERO_CALL |\ | ||
122 | X86_BR_SYSCALL |\ | 124 | X86_BR_SYSCALL |\ |
123 | X86_BR_IRQ |\ | 125 | X86_BR_IRQ |\ |
124 | X86_BR_INT) | 126 | X86_BR_INT) |
@@ -132,14 +134,23 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); | |||
132 | 134 | ||
133 | static void __intel_pmu_lbr_enable(void) | 135 | static void __intel_pmu_lbr_enable(void) |
134 | { | 136 | { |
135 | u64 debugctl; | ||
136 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 137 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
138 | u64 debugctl, lbr_select = 0; | ||
137 | 139 | ||
138 | if (cpuc->lbr_sel) | 140 | if (cpuc->lbr_sel) { |
139 | wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); | 141 | lbr_select = cpuc->lbr_sel->config; |
142 | wrmsrl(MSR_LBR_SELECT, lbr_select); | ||
143 | } | ||
140 | 144 | ||
141 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); | 145 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
142 | debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); | 146 | debugctl |= DEBUGCTLMSR_LBR; |
147 | /* | ||
148 | * LBR callstack does not work well with FREEZE_LBRS_ON_PMI. | ||
149 | * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions | ||
150 | * may cause superfluous increase/decrease of LBR_TOS. | ||
151 | */ | ||
152 | if (!(lbr_select & LBR_CALL_STACK)) | ||
153 | debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI; | ||
143 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); | 154 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
144 | } | 155 | } |
145 | 156 | ||
@@ -181,9 +192,116 @@ void intel_pmu_lbr_reset(void) | |||
181 | intel_pmu_lbr_reset_64(); | 192 | intel_pmu_lbr_reset_64(); |
182 | } | 193 | } |
183 | 194 | ||
195 | /* | ||
196 | * TOS = most recently recorded branch | ||
197 | */ | ||
198 | static inline u64 intel_pmu_lbr_tos(void) | ||
199 | { | ||
200 | u64 tos; | ||
201 | |||
202 | rdmsrl(x86_pmu.lbr_tos, tos); | ||
203 | return tos; | ||
204 | } | ||
205 | |||
206 | enum { | ||
207 | LBR_NONE, | ||
208 | LBR_VALID, | ||
209 | }; | ||
210 | |||
211 | static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) | ||
212 | { | ||
213 | int i; | ||
214 | unsigned lbr_idx, mask; | ||
215 | u64 tos; | ||
216 | |||
217 | if (task_ctx->lbr_callstack_users == 0 || | ||
218 | task_ctx->lbr_stack_state == LBR_NONE) { | ||
219 | intel_pmu_lbr_reset(); | ||
220 | return; | ||
221 | } | ||
222 | |||
223 | mask = x86_pmu.lbr_nr - 1; | ||
224 | tos = intel_pmu_lbr_tos(); | ||
225 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | ||
226 | lbr_idx = (tos - i) & mask; | ||
227 | wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); | ||
228 | wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); | ||
229 | } | ||
230 | task_ctx->lbr_stack_state = LBR_NONE; | ||
231 | } | ||
232 | |||
233 | static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) | ||
234 | { | ||
235 | int i; | ||
236 | unsigned lbr_idx, mask; | ||
237 | u64 tos; | ||
238 | |||
239 | if (task_ctx->lbr_callstack_users == 0) { | ||
240 | task_ctx->lbr_stack_state = LBR_NONE; | ||
241 | return; | ||
242 | } | ||
243 | |||
244 | mask = x86_pmu.lbr_nr - 1; | ||
245 | tos = intel_pmu_lbr_tos(); | ||
246 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | ||
247 | lbr_idx = (tos - i) & mask; | ||
248 | rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); | ||
249 | rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); | ||
250 | } | ||
251 | task_ctx->lbr_stack_state = LBR_VALID; | ||
252 | } | ||
253 | |||
254 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) | ||
255 | { | ||
256 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
257 | struct x86_perf_task_context *task_ctx; | ||
258 | |||
259 | if (!x86_pmu.lbr_nr) | ||
260 | return; | ||
261 | |||
262 | /* | ||
263 | * If LBR callstack feature is enabled and the stack was saved when | ||
264 | * the task was scheduled out, restore the stack. Otherwise flush | ||
265 | * the LBR stack. | ||
266 | */ | ||
267 | task_ctx = ctx ? ctx->task_ctx_data : NULL; | ||
268 | if (task_ctx) { | ||
269 | if (sched_in) { | ||
270 | __intel_pmu_lbr_restore(task_ctx); | ||
271 | cpuc->lbr_context = ctx; | ||
272 | } else { | ||
273 | __intel_pmu_lbr_save(task_ctx); | ||
274 | } | ||
275 | return; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * When sampling the branck stack in system-wide, it may be | ||
280 | * necessary to flush the stack on context switch. This happens | ||
281 | * when the branch stack does not tag its entries with the pid | ||
282 | * of the current task. Otherwise it becomes impossible to | ||
283 | * associate a branch entry with a task. This ambiguity is more | ||
284 | * likely to appear when the branch stack supports priv level | ||
285 | * filtering and the user sets it to monitor only at the user | ||
286 | * level (which could be a useful measurement in system-wide | ||
287 | * mode). In that case, the risk is high of having a branch | ||
288 | * stack with branch from multiple tasks. | ||
289 | */ | ||
290 | if (sched_in) { | ||
291 | intel_pmu_lbr_reset(); | ||
292 | cpuc->lbr_context = ctx; | ||
293 | } | ||
294 | } | ||
295 | |||
296 | static inline bool branch_user_callstack(unsigned br_sel) | ||
297 | { | ||
298 | return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK); | ||
299 | } | ||
300 | |||
184 | void intel_pmu_lbr_enable(struct perf_event *event) | 301 | void intel_pmu_lbr_enable(struct perf_event *event) |
185 | { | 302 | { |
186 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 303 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
304 | struct x86_perf_task_context *task_ctx; | ||
187 | 305 | ||
188 | if (!x86_pmu.lbr_nr) | 306 | if (!x86_pmu.lbr_nr) |
189 | return; | 307 | return; |
@@ -198,18 +316,33 @@ void intel_pmu_lbr_enable(struct perf_event *event) | |||
198 | } | 316 | } |
199 | cpuc->br_sel = event->hw.branch_reg.reg; | 317 | cpuc->br_sel = event->hw.branch_reg.reg; |
200 | 318 | ||
319 | if (branch_user_callstack(cpuc->br_sel) && event->ctx && | ||
320 | event->ctx->task_ctx_data) { | ||
321 | task_ctx = event->ctx->task_ctx_data; | ||
322 | task_ctx->lbr_callstack_users++; | ||
323 | } | ||
324 | |||
201 | cpuc->lbr_users++; | 325 | cpuc->lbr_users++; |
326 | perf_sched_cb_inc(event->ctx->pmu); | ||
202 | } | 327 | } |
203 | 328 | ||
204 | void intel_pmu_lbr_disable(struct perf_event *event) | 329 | void intel_pmu_lbr_disable(struct perf_event *event) |
205 | { | 330 | { |
206 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | 331 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
332 | struct x86_perf_task_context *task_ctx; | ||
207 | 333 | ||
208 | if (!x86_pmu.lbr_nr) | 334 | if (!x86_pmu.lbr_nr) |
209 | return; | 335 | return; |
210 | 336 | ||
337 | if (branch_user_callstack(cpuc->br_sel) && event->ctx && | ||
338 | event->ctx->task_ctx_data) { | ||
339 | task_ctx = event->ctx->task_ctx_data; | ||
340 | task_ctx->lbr_callstack_users--; | ||
341 | } | ||
342 | |||
211 | cpuc->lbr_users--; | 343 | cpuc->lbr_users--; |
212 | WARN_ON_ONCE(cpuc->lbr_users < 0); | 344 | WARN_ON_ONCE(cpuc->lbr_users < 0); |
345 | perf_sched_cb_dec(event->ctx->pmu); | ||
213 | 346 | ||
214 | if (cpuc->enabled && !cpuc->lbr_users) { | 347 | if (cpuc->enabled && !cpuc->lbr_users) { |
215 | __intel_pmu_lbr_disable(); | 348 | __intel_pmu_lbr_disable(); |
@@ -234,18 +367,6 @@ void intel_pmu_lbr_disable_all(void) | |||
234 | __intel_pmu_lbr_disable(); | 367 | __intel_pmu_lbr_disable(); |
235 | } | 368 | } |
236 | 369 | ||
237 | /* | ||
238 | * TOS = most recently recorded branch | ||
239 | */ | ||
240 | static inline u64 intel_pmu_lbr_tos(void) | ||
241 | { | ||
242 | u64 tos; | ||
243 | |||
244 | rdmsrl(x86_pmu.lbr_tos, tos); | ||
245 | |||
246 | return tos; | ||
247 | } | ||
248 | |||
249 | static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) | 370 | static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) |
250 | { | 371 | { |
251 | unsigned long mask = x86_pmu.lbr_nr - 1; | 372 | unsigned long mask = x86_pmu.lbr_nr - 1; |
@@ -350,7 +471,7 @@ void intel_pmu_lbr_read(void) | |||
350 | * - in case there is no HW filter | 471 | * - in case there is no HW filter |
351 | * - in case the HW filter has errata or limitations | 472 | * - in case the HW filter has errata or limitations |
352 | */ | 473 | */ |
353 | static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) | 474 | static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) |
354 | { | 475 | { |
355 | u64 br_type = event->attr.branch_sample_type; | 476 | u64 br_type = event->attr.branch_sample_type; |
356 | int mask = 0; | 477 | int mask = 0; |
@@ -387,11 +508,21 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) | |||
387 | if (br_type & PERF_SAMPLE_BRANCH_COND) | 508 | if (br_type & PERF_SAMPLE_BRANCH_COND) |
388 | mask |= X86_BR_JCC; | 509 | mask |= X86_BR_JCC; |
389 | 510 | ||
511 | if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) { | ||
512 | if (!x86_pmu_has_lbr_callstack()) | ||
513 | return -EOPNOTSUPP; | ||
514 | if (mask & ~(X86_BR_USER | X86_BR_KERNEL)) | ||
515 | return -EINVAL; | ||
516 | mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET | | ||
517 | X86_BR_CALL_STACK; | ||
518 | } | ||
519 | |||
390 | /* | 520 | /* |
391 | * stash actual user request into reg, it may | 521 | * stash actual user request into reg, it may |
392 | * be used by fixup code for some CPU | 522 | * be used by fixup code for some CPU |
393 | */ | 523 | */ |
394 | event->hw.branch_reg.reg = mask; | 524 | event->hw.branch_reg.reg = mask; |
525 | return 0; | ||
395 | } | 526 | } |
396 | 527 | ||
397 | /* | 528 | /* |
@@ -403,14 +534,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) | |||
403 | { | 534 | { |
404 | struct hw_perf_event_extra *reg; | 535 | struct hw_perf_event_extra *reg; |
405 | u64 br_type = event->attr.branch_sample_type; | 536 | u64 br_type = event->attr.branch_sample_type; |
406 | u64 mask = 0, m; | 537 | u64 mask = 0, v; |
407 | u64 v; | 538 | int i; |
408 | 539 | ||
409 | for_each_branch_sample_type(m) { | 540 | for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) { |
410 | if (!(br_type & m)) | 541 | if (!(br_type & (1ULL << i))) |
411 | continue; | 542 | continue; |
412 | 543 | ||
413 | v = x86_pmu.lbr_sel_map[m]; | 544 | v = x86_pmu.lbr_sel_map[i]; |
414 | if (v == LBR_NOT_SUPP) | 545 | if (v == LBR_NOT_SUPP) |
415 | return -EOPNOTSUPP; | 546 | return -EOPNOTSUPP; |
416 | 547 | ||
@@ -420,8 +551,12 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) | |||
420 | reg = &event->hw.branch_reg; | 551 | reg = &event->hw.branch_reg; |
421 | reg->idx = EXTRA_REG_LBR; | 552 | reg->idx = EXTRA_REG_LBR; |
422 | 553 | ||
423 | /* LBR_SELECT operates in suppress mode so invert mask */ | 554 | /* |
424 | reg->config = ~mask & x86_pmu.lbr_sel_mask; | 555 | * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate |
556 | * in suppress mode. So LBR_SELECT should be set to | ||
557 | * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK) | ||
558 | */ | ||
559 | reg->config = mask ^ x86_pmu.lbr_sel_mask; | ||
425 | 560 | ||
426 | return 0; | 561 | return 0; |
427 | } | 562 | } |
@@ -439,7 +574,9 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event) | |||
439 | /* | 574 | /* |
440 | * setup SW LBR filter | 575 | * setup SW LBR filter |
441 | */ | 576 | */ |
442 | intel_pmu_setup_sw_lbr_filter(event); | 577 | ret = intel_pmu_setup_sw_lbr_filter(event); |
578 | if (ret) | ||
579 | return ret; | ||
443 | 580 | ||
444 | /* | 581 | /* |
445 | * setup HW LBR filter, if any | 582 | * setup HW LBR filter, if any |
@@ -568,6 +705,12 @@ static int branch_type(unsigned long from, unsigned long to, int abort) | |||
568 | ret = X86_BR_INT; | 705 | ret = X86_BR_INT; |
569 | break; | 706 | break; |
570 | case 0xe8: /* call near rel */ | 707 | case 0xe8: /* call near rel */ |
708 | insn_get_immediate(&insn); | ||
709 | if (insn.immediate1.value == 0) { | ||
710 | /* zero length call */ | ||
711 | ret = X86_BR_ZERO_CALL; | ||
712 | break; | ||
713 | } | ||
571 | case 0x9a: /* call far absolute */ | 714 | case 0x9a: /* call far absolute */ |
572 | ret = X86_BR_CALL; | 715 | ret = X86_BR_CALL; |
573 | break; | 716 | break; |
@@ -678,35 +821,49 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) | |||
678 | /* | 821 | /* |
679 | * Map interface branch filters onto LBR filters | 822 | * Map interface branch filters onto LBR filters |
680 | */ | 823 | */ |
681 | static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { | 824 | static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { |
682 | [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY, | 825 | [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY, |
683 | [PERF_SAMPLE_BRANCH_USER] = LBR_USER, | 826 | [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER, |
684 | [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL, | 827 | [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL, |
685 | [PERF_SAMPLE_BRANCH_HV] = LBR_IGN, | 828 | [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN, |
686 | [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_REL_JMP | 829 | [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP |
687 | | LBR_IND_JMP | LBR_FAR, | 830 | | LBR_IND_JMP | LBR_FAR, |
688 | /* | 831 | /* |
689 | * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches | 832 | * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches |
690 | */ | 833 | */ |
691 | [PERF_SAMPLE_BRANCH_ANY_CALL] = | 834 | [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = |
692 | LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR, | 835 | LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR, |
693 | /* | 836 | /* |
694 | * NHM/WSM erratum: must include IND_JMP to capture IND_CALL | 837 | * NHM/WSM erratum: must include IND_JMP to capture IND_CALL |
695 | */ | 838 | */ |
696 | [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP, | 839 | [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP, |
697 | [PERF_SAMPLE_BRANCH_COND] = LBR_JCC, | 840 | [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, |
698 | }; | 841 | }; |
699 | 842 | ||
700 | static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { | 843 | static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { |
701 | [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY, | 844 | [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY, |
702 | [PERF_SAMPLE_BRANCH_USER] = LBR_USER, | 845 | [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER, |
703 | [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL, | 846 | [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL, |
704 | [PERF_SAMPLE_BRANCH_HV] = LBR_IGN, | 847 | [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN, |
705 | [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_FAR, | 848 | [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR, |
706 | [PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL | 849 | [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL |
707 | | LBR_FAR, | 850 | | LBR_FAR, |
708 | [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL, | 851 | [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL, |
709 | [PERF_SAMPLE_BRANCH_COND] = LBR_JCC, | 852 | [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, |
853 | }; | ||
854 | |||
855 | static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { | ||
856 | [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY, | ||
857 | [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER, | ||
858 | [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL, | ||
859 | [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN, | ||
860 | [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR, | ||
861 | [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL | ||
862 | | LBR_FAR, | ||
863 | [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL, | ||
864 | [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, | ||
865 | [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL | ||
866 | | LBR_RETURN | LBR_CALL_STACK, | ||
710 | }; | 867 | }; |
711 | 868 | ||
712 | /* core */ | 869 | /* core */ |
@@ -765,6 +922,20 @@ void __init intel_pmu_lbr_init_snb(void) | |||
765 | pr_cont("16-deep LBR, "); | 922 | pr_cont("16-deep LBR, "); |
766 | } | 923 | } |
767 | 924 | ||
925 | /* haswell */ | ||
926 | void intel_pmu_lbr_init_hsw(void) | ||
927 | { | ||
928 | x86_pmu.lbr_nr = 16; | ||
929 | x86_pmu.lbr_tos = MSR_LBR_TOS; | ||
930 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; | ||
931 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; | ||
932 | |||
933 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; | ||
934 | x86_pmu.lbr_sel_map = hsw_lbr_sel_map; | ||
935 | |||
936 | pr_cont("16-deep LBR, "); | ||
937 | } | ||
938 | |||
768 | /* atom */ | 939 | /* atom */ |
769 | void __init intel_pmu_lbr_init_atom(void) | 940 | void __init intel_pmu_lbr_init_atom(void) |
770 | { | 941 | { |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index 21af6149edf2..12d9548457e7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c | |||
@@ -1132,8 +1132,7 @@ static int snbep_pci2phy_map_init(int devid) | |||
1132 | } | 1132 | } |
1133 | } | 1133 | } |
1134 | 1134 | ||
1135 | if (ubox_dev) | 1135 | pci_dev_put(ubox_dev); |
1136 | pci_dev_put(ubox_dev); | ||
1137 | 1136 | ||
1138 | return err ? pcibios_err_to_errno(err) : 0; | 1137 | return err ? pcibios_err_to_errno(err) : 0; |
1139 | } | 1138 | } |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 4e3d5a9621fe..03189d86357d 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -354,6 +354,7 @@ int __copy_instruction(u8 *dest, u8 *src) | |||
354 | { | 354 | { |
355 | struct insn insn; | 355 | struct insn insn; |
356 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | 356 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
357 | int length; | ||
357 | unsigned long recovered_insn = | 358 | unsigned long recovered_insn = |
358 | recover_probed_instruction(buf, (unsigned long)src); | 359 | recover_probed_instruction(buf, (unsigned long)src); |
359 | 360 | ||
@@ -361,16 +362,18 @@ int __copy_instruction(u8 *dest, u8 *src) | |||
361 | return 0; | 362 | return 0; |
362 | kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); | 363 | kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); |
363 | insn_get_length(&insn); | 364 | insn_get_length(&insn); |
365 | length = insn.length; | ||
366 | |||
364 | /* Another subsystem puts a breakpoint, failed to recover */ | 367 | /* Another subsystem puts a breakpoint, failed to recover */ |
365 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) | 368 | if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) |
366 | return 0; | 369 | return 0; |
367 | memcpy(dest, insn.kaddr, insn.length); | 370 | memcpy(dest, insn.kaddr, length); |
368 | 371 | ||
369 | #ifdef CONFIG_X86_64 | 372 | #ifdef CONFIG_X86_64 |
370 | if (insn_rip_relative(&insn)) { | 373 | if (insn_rip_relative(&insn)) { |
371 | s64 newdisp; | 374 | s64 newdisp; |
372 | u8 *disp; | 375 | u8 *disp; |
373 | kernel_insn_init(&insn, dest, insn.length); | 376 | kernel_insn_init(&insn, dest, length); |
374 | insn_get_displacement(&insn); | 377 | insn_get_displacement(&insn); |
375 | /* | 378 | /* |
376 | * The copied instruction uses the %rip-relative addressing | 379 | * The copied instruction uses the %rip-relative addressing |
@@ -394,7 +397,7 @@ int __copy_instruction(u8 *dest, u8 *src) | |||
394 | *(s32 *) disp = (s32) newdisp; | 397 | *(s32 *) disp = (s32) newdisp; |
395 | } | 398 | } |
396 | #endif | 399 | #endif |
397 | return insn.length; | 400 | return length; |
398 | } | 401 | } |
399 | 402 | ||
400 | static int arch_copy_kprobe(struct kprobe *p) | 403 | static int arch_copy_kprobe(struct kprobe *p) |