aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/perf_event.h8
-rw-r--r--arch/x86/kernel/cpu/perf_event.h48
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c107
3 files changed, 85 insertions, 78 deletions
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8249df45d2f2..8dfc9fd094a3 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -51,6 +51,14 @@
51 ARCH_PERFMON_EVENTSEL_EDGE | \ 51 ARCH_PERFMON_EVENTSEL_EDGE | \
52 ARCH_PERFMON_EVENTSEL_INV | \ 52 ARCH_PERFMON_EVENTSEL_INV | \
53 ARCH_PERFMON_EVENTSEL_CMASK) 53 ARCH_PERFMON_EVENTSEL_CMASK)
54#define X86_ALL_EVENT_FLAGS \
55 (ARCH_PERFMON_EVENTSEL_EDGE | \
56 ARCH_PERFMON_EVENTSEL_INV | \
57 ARCH_PERFMON_EVENTSEL_CMASK | \
58 ARCH_PERFMON_EVENTSEL_ANY | \
59 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
60 HSW_IN_TX | \
61 HSW_IN_TX_CHECKPOINTED)
54#define AMD64_RAW_EVENT_MASK \ 62#define AMD64_RAW_EVENT_MASK \
55 (X86_RAW_EVENT_MASK | \ 63 (X86_RAW_EVENT_MASK | \
56 AMD64_EVENTSEL_EVENT) 64 AMD64_EVENTSEL_EVENT)
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 8ade93111e03..fc5eb390b368 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -67,8 +67,10 @@ struct event_constraint {
67 */ 67 */
68#define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */ 68#define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */
69#define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */ 69#define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */
70#define PERF_X86_EVENT_PEBS_ST_HSW 0x4 /* haswell style st data sampling */ 70#define PERF_X86_EVENT_PEBS_ST_HSW 0x4 /* haswell style datala, store */
71#define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */ 71#define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */
72#define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */
73#define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */
72 74
73struct amd_nb { 75struct amd_nb {
74 int nb_id; /* NorthBridge id */ 76 int nb_id; /* NorthBridge id */
@@ -252,18 +254,52 @@ struct cpu_hw_events {
252 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) 254 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
253 255
254#define INTEL_PLD_CONSTRAINT(c, n) \ 256#define INTEL_PLD_CONSTRAINT(c, n) \
255 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ 257 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
256 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) 258 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
257 259
258#define INTEL_PST_CONSTRAINT(c, n) \ 260#define INTEL_PST_CONSTRAINT(c, n) \
259 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ 261 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
260 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) 262 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
261 263
262/* DataLA version of store sampling without extra enable bit. */ 264/* Event constraint, but match on all event flags too. */
263#define INTEL_PST_HSW_CONSTRAINT(c, n) \ 265#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
264 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ 266 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
267
268/* Check only flags, but allow all event/umask */
269#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
270 EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
271
272/* Check flags and event code, and set the HSW store flag */
273#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
274 __EVENT_CONSTRAINT(code, n, \
275 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
276 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
277
278/* Check flags and event code, and set the HSW load flag */
279#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
280 __EVENT_CONSTRAINT(code, n, \
281 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
282 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
283
284/* Check flags and event code/umask, and set the HSW store flag */
285#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
286 __EVENT_CONSTRAINT(code, n, \
287 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
265 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) 288 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
266 289
290/* Check flags and event code/umask, and set the HSW load flag */
291#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
292 __EVENT_CONSTRAINT(code, n, \
293 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
294 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
295
296/* Check flags and event code/umask, and set the HSW N/A flag */
297#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
298 __EVENT_CONSTRAINT(code, n, \
299 INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
300 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
301
302
267/* 303/*
268 * We define the end marker as having a weight of -1 304 * We define the end marker as having a weight of -1
269 * to enable blacklisting of events using a counter bitmask 305 * to enable blacklisting of events using a counter bitmask
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 696ade311ded..aca77e99e676 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -569,28 +569,10 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
569}; 569};
570 570
571struct event_constraint intel_slm_pebs_event_constraints[] = { 571struct event_constraint intel_slm_pebs_event_constraints[] = {
572 INTEL_UEVENT_CONSTRAINT(0x0103, 0x1), /* REHABQ.LD_BLOCK_ST_FORWARD_PS */ 572 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
573 INTEL_UEVENT_CONSTRAINT(0x0803, 0x1), /* REHABQ.LD_SPLITS_PS */ 573 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
574 INTEL_UEVENT_CONSTRAINT(0x0204, 0x1), /* MEM_UOPS_RETIRED.L2_HIT_LOADS_PS */ 574 /* Allow all events as PEBS with no flags */
575 INTEL_UEVENT_CONSTRAINT(0x0404, 0x1), /* MEM_UOPS_RETIRED.L2_MISS_LOADS_PS */ 575 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
576 INTEL_UEVENT_CONSTRAINT(0x0804, 0x1), /* MEM_UOPS_RETIRED.DTLB_MISS_LOADS_PS */
577 INTEL_UEVENT_CONSTRAINT(0x2004, 0x1), /* MEM_UOPS_RETIRED.HITM_PS */
578 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY_PS */
579 INTEL_UEVENT_CONSTRAINT(0x00c4, 0x1), /* BR_INST_RETIRED.ALL_BRANCHES_PS */
580 INTEL_UEVENT_CONSTRAINT(0x7ec4, 0x1), /* BR_INST_RETIRED.JCC_PS */
581 INTEL_UEVENT_CONSTRAINT(0xbfc4, 0x1), /* BR_INST_RETIRED.FAR_BRANCH_PS */
582 INTEL_UEVENT_CONSTRAINT(0xebc4, 0x1), /* BR_INST_RETIRED.NON_RETURN_IND_PS */
583 INTEL_UEVENT_CONSTRAINT(0xf7c4, 0x1), /* BR_INST_RETIRED.RETURN_PS */
584 INTEL_UEVENT_CONSTRAINT(0xf9c4, 0x1), /* BR_INST_RETIRED.CALL_PS */
585 INTEL_UEVENT_CONSTRAINT(0xfbc4, 0x1), /* BR_INST_RETIRED.IND_CALL_PS */
586 INTEL_UEVENT_CONSTRAINT(0xfdc4, 0x1), /* BR_INST_RETIRED.REL_CALL_PS */
587 INTEL_UEVENT_CONSTRAINT(0xfec4, 0x1), /* BR_INST_RETIRED.TAKEN_JCC_PS */
588 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_MISP_RETIRED.ALL_BRANCHES_PS */
589 INTEL_UEVENT_CONSTRAINT(0x7ec5, 0x1), /* BR_INST_MISP_RETIRED.JCC_PS */
590 INTEL_UEVENT_CONSTRAINT(0xebc5, 0x1), /* BR_INST_MISP_RETIRED.NON_RETURN_IND_PS */
591 INTEL_UEVENT_CONSTRAINT(0xf7c5, 0x1), /* BR_INST_MISP_RETIRED.RETURN_PS */
592 INTEL_UEVENT_CONSTRAINT(0xfbc5, 0x1), /* BR_INST_MISP_RETIRED.IND_CALL_PS */
593 INTEL_UEVENT_CONSTRAINT(0xfec5, 0x1), /* BR_INST_MISP_RETIRED.TAKEN_JCC_PS */
594 EVENT_CONSTRAINT_END 576 EVENT_CONSTRAINT_END
595}; 577};
596 578
@@ -626,68 +608,44 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
626 608
627struct event_constraint intel_snb_pebs_event_constraints[] = { 609struct event_constraint intel_snb_pebs_event_constraints[] = {
628 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ 610 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
629 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
630 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
631 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
632 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
633 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ 611 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
634 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ 612 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
635 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ 613 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
636 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 614 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
637 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 615 /* Allow all events as PEBS with no flags */
638 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 616 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
639 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
640 EVENT_CONSTRAINT_END 617 EVENT_CONSTRAINT_END
641}; 618};
642 619
643struct event_constraint intel_ivb_pebs_event_constraints[] = { 620struct event_constraint intel_ivb_pebs_event_constraints[] = {
644 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ 621 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
645 INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
646 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
647 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
648 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
649 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ 622 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
650 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ 623 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
651 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ 624 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
652 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 625 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
653 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 626 /* Allow all events as PEBS with no flags */
654 INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 627 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
655 EVENT_CONSTRAINT_END 628 EVENT_CONSTRAINT_END
656}; 629};
657 630
658struct event_constraint intel_hsw_pebs_event_constraints[] = { 631struct event_constraint intel_hsw_pebs_event_constraints[] = {
659 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ 632 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
660 INTEL_PST_HSW_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ 633 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
661 INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ 634 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
662 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ 635 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
663 INTEL_UEVENT_CONSTRAINT(0x01c5, 0xf), /* BR_MISP_RETIRED.CONDITIONAL */ 636 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
664 INTEL_UEVENT_CONSTRAINT(0x04c5, 0xf), /* BR_MISP_RETIRED.ALL_BRANCHES */ 637 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
665 INTEL_UEVENT_CONSTRAINT(0x20c5, 0xf), /* BR_MISP_RETIRED.NEAR_TAKEN */ 638 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
666 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.* */ 639 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
667 /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ 640 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
668 INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), 641 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
669 /* MEM_UOPS_RETIRED.STLB_MISS_STORES */ 642 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
670 INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), 643 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
671 INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ 644 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
672 INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */ 645 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
673 /* MEM_UOPS_RETIRED.SPLIT_STORES */ 646 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
674 INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), 647 /* Allow all events as PEBS with no flags */
675 INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */ 648 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
676 INTEL_PST_HSW_CONSTRAINT(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
677 INTEL_UEVENT_CONSTRAINT(0x01d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L1_HIT */
678 INTEL_UEVENT_CONSTRAINT(0x02d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L2_HIT */
679 INTEL_UEVENT_CONSTRAINT(0x04d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L3_HIT */
680 /* MEM_LOAD_UOPS_RETIRED.HIT_LFB */
681 INTEL_UEVENT_CONSTRAINT(0x40d1, 0xf),
682 /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS */
683 INTEL_UEVENT_CONSTRAINT(0x01d2, 0xf),
684 /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT */
685 INTEL_UEVENT_CONSTRAINT(0x02d2, 0xf),
686 /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM */
687 INTEL_UEVENT_CONSTRAINT(0x01d3, 0xf),
688 INTEL_UEVENT_CONSTRAINT(0x04c8, 0xf), /* HLE_RETIRED.Abort */
689 INTEL_UEVENT_CONSTRAINT(0x04c9, 0xf), /* RTM_RETIRED.Abort */
690
691 EVENT_CONSTRAINT_END 649 EVENT_CONSTRAINT_END
692}; 650};
693 651
@@ -880,7 +838,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
880 838
881 fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; 839 fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
882 fst = event->hw.flags & (PERF_X86_EVENT_PEBS_ST | 840 fst = event->hw.flags & (PERF_X86_EVENT_PEBS_ST |
883 PERF_X86_EVENT_PEBS_ST_HSW); 841 PERF_X86_EVENT_PEBS_ST_HSW |
842 PERF_X86_EVENT_PEBS_LD_HSW |
843 PERF_X86_EVENT_PEBS_NA_HSW);
884 844
885 perf_sample_data_init(&data, 0, event->hw.last_period); 845 perf_sample_data_init(&data, 0, event->hw.last_period);
886 846
@@ -903,7 +863,10 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
903 if (sample_type & PERF_SAMPLE_DATA_SRC) { 863 if (sample_type & PERF_SAMPLE_DATA_SRC) {
904 if (fll) 864 if (fll)
905 data.data_src.val = load_latency_data(pebs->dse); 865 data.data_src.val = load_latency_data(pebs->dse);
906 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) 866 else if (event->hw.flags &
867 (PERF_X86_EVENT_PEBS_ST_HSW|
868 PERF_X86_EVENT_PEBS_LD_HSW|
869 PERF_X86_EVENT_PEBS_NA_HSW))
907 data.data_src.val = 870 data.data_src.val =
908 precise_store_data_hsw(event, pebs->dse); 871 precise_store_data_hsw(event, pebs->dse);
909 else 872 else