aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYan, Zheng <zheng.z.yan@intel.com>2015-05-06 15:33:48 -0400
committerIngo Molnar <mingo@kernel.org>2015-06-07 10:08:40 -0400
commit43cf76312faefed098c057082abac8a3d521e1dc (patch)
tree3da139a6fbaae870eb54c1e9d4d2f697695d5c02
parent851559e35fd5ab637783ba395e55edd50f761229 (diff)
perf/x86/intel: Introduce setup_pebs_sample_data()
Move code that sets up the PEBS sample data to a separate function. Signed-off-by: Yan, Zheng <zheng.z.yan@intel.com> Signed-off-by: Kan Liang <kan.liang@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: acme@infradead.org Cc: eranian@google.com Link: http://lkml.kernel.org/r/1430940834-8964-3-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c95
1 files changed, 52 insertions, 43 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 4802d5dec222..a5fe561c4902 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -853,8 +853,10 @@ static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs)
853 return txn; 853 return txn;
854} 854}
855 855
856static void __intel_pmu_pebs_event(struct perf_event *event, 856static void setup_pebs_sample_data(struct perf_event *event,
857 struct pt_regs *iregs, void *__pebs) 857 struct pt_regs *iregs, void *__pebs,
858 struct perf_sample_data *data,
859 struct pt_regs *regs)
858{ 860{
859#define PERF_X86_EVENT_PEBS_HSW_PREC \ 861#define PERF_X86_EVENT_PEBS_HSW_PREC \
860 (PERF_X86_EVENT_PEBS_ST_HSW | \ 862 (PERF_X86_EVENT_PEBS_ST_HSW | \
@@ -866,30 +868,25 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
866 */ 868 */
867 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 869 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
868 struct pebs_record_hsw *pebs = __pebs; 870 struct pebs_record_hsw *pebs = __pebs;
869 struct perf_sample_data data;
870 struct pt_regs regs;
871 u64 sample_type; 871 u64 sample_type;
872 int fll, fst, dsrc; 872 int fll, fst, dsrc;
873 int fl = event->hw.flags; 873 int fl = event->hw.flags;
874 874
875 if (!intel_pmu_save_and_restart(event))
876 return;
877
878 sample_type = event->attr.sample_type; 875 sample_type = event->attr.sample_type;
879 dsrc = sample_type & PERF_SAMPLE_DATA_SRC; 876 dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
880 877
881 fll = fl & PERF_X86_EVENT_PEBS_LDLAT; 878 fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
882 fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); 879 fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
883 880
884 perf_sample_data_init(&data, 0, event->hw.last_period); 881 perf_sample_data_init(data, 0, event->hw.last_period);
885 882
886 data.period = event->hw.last_period; 883 data->period = event->hw.last_period;
887 884
888 /* 885 /*
889 * Use latency for weight (only avail with PEBS-LL) 886 * Use latency for weight (only avail with PEBS-LL)
890 */ 887 */
891 if (fll && (sample_type & PERF_SAMPLE_WEIGHT)) 888 if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
892 data.weight = pebs->lat; 889 data->weight = pebs->lat;
893 890
894 /* 891 /*
895 * data.data_src encodes the data source 892 * data.data_src encodes the data source
@@ -902,7 +899,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
902 val = precise_datala_hsw(event, pebs->dse); 899 val = precise_datala_hsw(event, pebs->dse);
903 else if (fst) 900 else if (fst)
904 val = precise_store_data(pebs->dse); 901 val = precise_store_data(pebs->dse);
905 data.data_src.val = val; 902 data->data_src.val = val;
906 } 903 }
907 904
908 /* 905 /*
@@ -915,58 +912,70 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
915 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. 912 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
916 * A possible PERF_SAMPLE_REGS will have to transfer all regs. 913 * A possible PERF_SAMPLE_REGS will have to transfer all regs.
917 */ 914 */
918 regs = *iregs; 915 *regs = *iregs;
919 regs.flags = pebs->flags; 916 regs->flags = pebs->flags;
920 set_linear_ip(&regs, pebs->ip); 917 set_linear_ip(regs, pebs->ip);
921 regs.bp = pebs->bp; 918 regs->bp = pebs->bp;
922 regs.sp = pebs->sp; 919 regs->sp = pebs->sp;
923 920
924 if (sample_type & PERF_SAMPLE_REGS_INTR) { 921 if (sample_type & PERF_SAMPLE_REGS_INTR) {
925 regs.ax = pebs->ax; 922 regs->ax = pebs->ax;
926 regs.bx = pebs->bx; 923 regs->bx = pebs->bx;
927 regs.cx = pebs->cx; 924 regs->cx = pebs->cx;
928 regs.dx = pebs->dx; 925 regs->dx = pebs->dx;
929 regs.si = pebs->si; 926 regs->si = pebs->si;
930 regs.di = pebs->di; 927 regs->di = pebs->di;
931 regs.bp = pebs->bp; 928 regs->bp = pebs->bp;
932 regs.sp = pebs->sp; 929 regs->sp = pebs->sp;
933 930
934 regs.flags = pebs->flags; 931 regs->flags = pebs->flags;
935#ifndef CONFIG_X86_32 932#ifndef CONFIG_X86_32
936 regs.r8 = pebs->r8; 933 regs->r8 = pebs->r8;
937 regs.r9 = pebs->r9; 934 regs->r9 = pebs->r9;
938 regs.r10 = pebs->r10; 935 regs->r10 = pebs->r10;
939 regs.r11 = pebs->r11; 936 regs->r11 = pebs->r11;
940 regs.r12 = pebs->r12; 937 regs->r12 = pebs->r12;
941 regs.r13 = pebs->r13; 938 regs->r13 = pebs->r13;
942 regs.r14 = pebs->r14; 939 regs->r14 = pebs->r14;
943 regs.r15 = pebs->r15; 940 regs->r15 = pebs->r15;
944#endif 941#endif
945 } 942 }
946 943
947 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) { 944 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
948 regs.ip = pebs->real_ip; 945 regs->ip = pebs->real_ip;
949 regs.flags |= PERF_EFLAGS_EXACT; 946 regs->flags |= PERF_EFLAGS_EXACT;
950 } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs)) 947 } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
951 regs.flags |= PERF_EFLAGS_EXACT; 948 regs->flags |= PERF_EFLAGS_EXACT;
952 else 949 else
953 regs.flags &= ~PERF_EFLAGS_EXACT; 950 regs->flags &= ~PERF_EFLAGS_EXACT;
954 951
955 if ((sample_type & PERF_SAMPLE_ADDR) && 952 if ((sample_type & PERF_SAMPLE_ADDR) &&
956 x86_pmu.intel_cap.pebs_format >= 1) 953 x86_pmu.intel_cap.pebs_format >= 1)
957 data.addr = pebs->dla; 954 data->addr = pebs->dla;
958 955
959 if (x86_pmu.intel_cap.pebs_format >= 2) { 956 if (x86_pmu.intel_cap.pebs_format >= 2) {
960 /* Only set the TSX weight when no memory weight. */ 957 /* Only set the TSX weight when no memory weight. */
961 if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll) 958 if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
962 data.weight = intel_hsw_weight(pebs); 959 data->weight = intel_hsw_weight(pebs);
963 960
964 if (sample_type & PERF_SAMPLE_TRANSACTION) 961 if (sample_type & PERF_SAMPLE_TRANSACTION)
965 data.txn = intel_hsw_transaction(pebs); 962 data->txn = intel_hsw_transaction(pebs);
966 } 963 }
967 964
968 if (has_branch_stack(event)) 965 if (has_branch_stack(event))
969 data.br_stack = &cpuc->lbr_stack; 966 data->br_stack = &cpuc->lbr_stack;
967}
968
969static void __intel_pmu_pebs_event(struct perf_event *event,
970 struct pt_regs *iregs, void *__pebs)
971{
972 struct perf_sample_data data;
973 struct pt_regs regs;
974
975 if (!intel_pmu_save_and_restart(event))
976 return;
977
978 setup_pebs_sample_data(event, iregs, __pebs, &data, &regs);
970 979
971 if (perf_event_overflow(event, &data, &regs)) 980 if (perf_event_overflow(event, &data, &regs))
972 x86_pmu_stop(event, 0); 981 x86_pmu_stop(event, 0);