aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2012-04-02 14:19:08 -0400
committerIngo Molnar <mingo@kernel.org>2012-05-09 09:23:12 -0400
commitfd0d000b2c34aa43d4e92dcf0dfaeda7e123008a (patch)
tree8b81831cf37f1be6dd3cc9be772952d5c835a550
parentc75841a398d667d9968245b9519d93cedbfb4780 (diff)
perf: Pass last sampling period to perf_sample_data_init()
We always need to pass the last sample period to perf_sample_data_init(), otherwise the event distribution will be wrong. Thus, modifiyng the function interface with the required period as argument. So basically a pattern like this: perf_sample_data_init(&data, ~0ULL); data.period = event->hw.last_period; will now be like that: perf_sample_data_init(&data, ~0ULL, event->hw.last_period); Avoids unininitialized data.period and simplifies code. Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1333390758-10893-3-git-send-email-robert.richter@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/alpha/kernel/perf_event.c3
-rw-r--r--arch/arm/kernel/perf_event_v6.c4
-rw-r--r--arch/arm/kernel/perf_event_v7.c4
-rw-r--r--arch/arm/kernel/perf_event_xscale.c8
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c3
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c3
-rw-r--r--arch/sparc/kernel/perf_event.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c6
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--kernel/events/core.c9
15 files changed, 25 insertions, 43 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 0dae252f7a33..d821b17047e0 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -824,7 +824,6 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
824 824
825 idx = la_ptr; 825 idx = la_ptr;
826 826
827 perf_sample_data_init(&data, 0);
828 for (j = 0; j < cpuc->n_events; j++) { 827 for (j = 0; j < cpuc->n_events; j++) {
829 if (cpuc->current_idx[j] == idx) 828 if (cpuc->current_idx[j] == idx)
830 break; 829 break;
@@ -848,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
848 847
849 hwc = &event->hw; 848 hwc = &event->hw;
850 alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); 849 alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
851 data.period = event->hw.last_period; 850 perf_sample_data_init(&data, 0, hwc->last_period);
852 851
853 if (alpha_perf_event_set_period(event, hwc, idx)) { 852 if (alpha_perf_event_set_period(event, hwc, idx)) {
854 if (perf_event_overflow(event, &data, regs)) { 853 if (perf_event_overflow(event, &data, regs)) {
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index b78af0cc6ef3..ab627a740fa3 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -489,8 +489,6 @@ armv6pmu_handle_irq(int irq_num,
489 */ 489 */
490 armv6_pmcr_write(pmcr); 490 armv6_pmcr_write(pmcr);
491 491
492 perf_sample_data_init(&data, 0);
493
494 cpuc = &__get_cpu_var(cpu_hw_events); 492 cpuc = &__get_cpu_var(cpu_hw_events);
495 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 493 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
496 struct perf_event *event = cpuc->events[idx]; 494 struct perf_event *event = cpuc->events[idx];
@@ -509,7 +507,7 @@ armv6pmu_handle_irq(int irq_num,
509 507
510 hwc = &event->hw; 508 hwc = &event->hw;
511 armpmu_event_update(event, hwc, idx); 509 armpmu_event_update(event, hwc, idx);
512 data.period = event->hw.last_period; 510 perf_sample_data_init(&data, 0, hwc->last_period);
513 if (!armpmu_event_set_period(event, hwc, idx)) 511 if (!armpmu_event_set_period(event, hwc, idx))
514 continue; 512 continue;
515 513
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 00755d82e2f2..d3c536068162 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1077,8 +1077,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1077 */ 1077 */
1078 regs = get_irq_regs(); 1078 regs = get_irq_regs();
1079 1079
1080 perf_sample_data_init(&data, 0);
1081
1082 cpuc = &__get_cpu_var(cpu_hw_events); 1080 cpuc = &__get_cpu_var(cpu_hw_events);
1083 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 1081 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1084 struct perf_event *event = cpuc->events[idx]; 1082 struct perf_event *event = cpuc->events[idx];
@@ -1097,7 +1095,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1097 1095
1098 hwc = &event->hw; 1096 hwc = &event->hw;
1099 armpmu_event_update(event, hwc, idx); 1097 armpmu_event_update(event, hwc, idx);
1100 data.period = event->hw.last_period; 1098 perf_sample_data_init(&data, 0, hwc->last_period);
1101 if (!armpmu_event_set_period(event, hwc, idx)) 1099 if (!armpmu_event_set_period(event, hwc, idx))
1102 continue; 1100 continue;
1103 1101
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 71a21e6712f5..e34e7254e652 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -248,8 +248,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
248 248
249 regs = get_irq_regs(); 249 regs = get_irq_regs();
250 250
251 perf_sample_data_init(&data, 0);
252
253 cpuc = &__get_cpu_var(cpu_hw_events); 251 cpuc = &__get_cpu_var(cpu_hw_events);
254 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 252 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
255 struct perf_event *event = cpuc->events[idx]; 253 struct perf_event *event = cpuc->events[idx];
@@ -263,7 +261,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
263 261
264 hwc = &event->hw; 262 hwc = &event->hw;
265 armpmu_event_update(event, hwc, idx); 263 armpmu_event_update(event, hwc, idx);
266 data.period = event->hw.last_period; 264 perf_sample_data_init(&data, 0, hwc->last_period);
267 if (!armpmu_event_set_period(event, hwc, idx)) 265 if (!armpmu_event_set_period(event, hwc, idx))
268 continue; 266 continue;
269 267
@@ -588,8 +586,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
588 586
589 regs = get_irq_regs(); 587 regs = get_irq_regs();
590 588
591 perf_sample_data_init(&data, 0);
592
593 cpuc = &__get_cpu_var(cpu_hw_events); 589 cpuc = &__get_cpu_var(cpu_hw_events);
594 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 590 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
595 struct perf_event *event = cpuc->events[idx]; 591 struct perf_event *event = cpuc->events[idx];
@@ -603,7 +599,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
603 599
604 hwc = &event->hw; 600 hwc = &event->hw;
605 armpmu_event_update(event, hwc, idx); 601 armpmu_event_update(event, hwc, idx);
606 data.period = event->hw.last_period; 602 perf_sample_data_init(&data, 0, hwc->last_period);
607 if (!armpmu_event_set_period(event, hwc, idx)) 603 if (!armpmu_event_set_period(event, hwc, idx))
608 continue; 604 continue;
609 605
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 811084f4e422..ab73fa2fb9b5 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1325,7 +1325,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
1325 1325
1326 regs = get_irq_regs(); 1326 regs = get_irq_regs();
1327 1327
1328 perf_sample_data_init(&data, 0); 1328 perf_sample_data_init(&data, 0, 0);
1329 1329
1330 switch (counters) { 1330 switch (counters) {
1331#define HANDLE_COUNTER(n) \ 1331#define HANDLE_COUNTER(n) \
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 02aee03e713c..8f84bcba18da 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1299,8 +1299,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1299 if (record) { 1299 if (record) {
1300 struct perf_sample_data data; 1300 struct perf_sample_data data;
1301 1301
1302 perf_sample_data_init(&data, ~0ULL); 1302 perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
1303 data.period = event->hw.last_period;
1304 1303
1305 if (event->attr.sample_type & PERF_SAMPLE_ADDR) 1304 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1306 perf_get_data_addr(regs, &data.addr); 1305 perf_get_data_addr(regs, &data.addr);
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index 0a6d2a9d569c..106c53354675 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -613,8 +613,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
613 if (record) { 613 if (record) {
614 struct perf_sample_data data; 614 struct perf_sample_data data;
615 615
616 perf_sample_data_init(&data, 0); 616 perf_sample_data_init(&data, 0, event->hw.last_period);
617 data.period = event->hw.last_period;
618 617
619 if (perf_event_overflow(event, &data, regs)) 618 if (perf_event_overflow(event, &data, regs))
620 fsl_emb_pmu_stop(event, 0); 619 fsl_emb_pmu_stop(event, 0);
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 28559ce5eeb5..5713957dcb8a 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1296,8 +1296,6 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1296 1296
1297 regs = args->regs; 1297 regs = args->regs;
1298 1298
1299 perf_sample_data_init(&data, 0);
1300
1301 cpuc = &__get_cpu_var(cpu_hw_events); 1299 cpuc = &__get_cpu_var(cpu_hw_events);
1302 1300
1303 /* If the PMU has the TOE IRQ enable bits, we need to do a 1301 /* If the PMU has the TOE IRQ enable bits, we need to do a
@@ -1321,7 +1319,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1321 if (val & (1ULL << 31)) 1319 if (val & (1ULL << 31))
1322 continue; 1320 continue;
1323 1321
1324 data.period = event->hw.last_period; 1322 perf_sample_data_init(&data, 0, hwc->last_period);
1325 if (!sparc_perf_event_set_period(event, hwc, idx)) 1323 if (!sparc_perf_event_set_period(event, hwc, idx))
1326 continue; 1324 continue;
1327 1325
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index e33e9cf160eb..e049d6da0183 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1183,8 +1183,6 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
1183 int idx, handled = 0; 1183 int idx, handled = 0;
1184 u64 val; 1184 u64 val;
1185 1185
1186 perf_sample_data_init(&data, 0);
1187
1188 cpuc = &__get_cpu_var(cpu_hw_events); 1186 cpuc = &__get_cpu_var(cpu_hw_events);
1189 1187
1190 /* 1188 /*
@@ -1219,7 +1217,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
1219 * event overflow 1217 * event overflow
1220 */ 1218 */
1221 handled++; 1219 handled++;
1222 data.period = event->hw.last_period; 1220 perf_sample_data_init(&data, 0, event->hw.last_period);
1223 1221
1224 if (!x86_perf_event_set_period(event)) 1222 if (!x86_perf_event_set_period(event))
1225 continue; 1223 continue;
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index c8f69bea6624..2317228b5299 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -398,8 +398,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
398 } 398 }
399 399
400 perf_ibs_event_update(perf_ibs, event, config); 400 perf_ibs_event_update(perf_ibs, event, config);
401 perf_sample_data_init(&data, 0); 401 perf_sample_data_init(&data, 0, hwc->last_period);
402 data.period = event->hw.last_period;
403 402
404 if (event->attr.sample_type & PERF_SAMPLE_RAW) { 403 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
405 ibs_data.caps = ibs_caps; 404 ibs_data.caps = ibs_caps;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 26b3e2fef104..166546ec6aef 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1027,8 +1027,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1027 u64 status; 1027 u64 status;
1028 int handled; 1028 int handled;
1029 1029
1030 perf_sample_data_init(&data, 0);
1031
1032 cpuc = &__get_cpu_var(cpu_hw_events); 1030 cpuc = &__get_cpu_var(cpu_hw_events);
1033 1031
1034 /* 1032 /*
@@ -1082,7 +1080,7 @@ again:
1082 if (!intel_pmu_save_and_restart(event)) 1080 if (!intel_pmu_save_and_restart(event))
1083 continue; 1081 continue;
1084 1082
1085 data.period = event->hw.last_period; 1083 perf_sample_data_init(&data, 0, event->hw.last_period);
1086 1084
1087 if (has_branch_stack(event)) 1085 if (has_branch_stack(event))
1088 data.br_stack = &cpuc->lbr_stack; 1086 data.br_stack = &cpuc->lbr_stack;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 7f64df19e7dd..5a3edc27f6e5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -316,8 +316,7 @@ int intel_pmu_drain_bts_buffer(void)
316 316
317 ds->bts_index = ds->bts_buffer_base; 317 ds->bts_index = ds->bts_buffer_base;
318 318
319 perf_sample_data_init(&data, 0); 319 perf_sample_data_init(&data, 0, event->hw.last_period);
320 data.period = event->hw.last_period;
321 regs.ip = 0; 320 regs.ip = 0;
322 321
323 /* 322 /*
@@ -564,8 +563,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
564 if (!intel_pmu_save_and_restart(event)) 563 if (!intel_pmu_save_and_restart(event))
565 return; 564 return;
566 565
567 perf_sample_data_init(&data, 0); 566 perf_sample_data_init(&data, 0, event->hw.last_period);
568 data.period = event->hw.last_period;
569 567
570 /* 568 /*
571 * We use the interrupt regs as a base because the PEBS record 569 * We use the interrupt regs as a base because the PEBS record
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index a2dfacfd7103..47124a73dd73 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -1005,8 +1005,6 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
1005 int idx, handled = 0; 1005 int idx, handled = 0;
1006 u64 val; 1006 u64 val;
1007 1007
1008 perf_sample_data_init(&data, 0);
1009
1010 cpuc = &__get_cpu_var(cpu_hw_events); 1008 cpuc = &__get_cpu_var(cpu_hw_events);
1011 1009
1012 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1010 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -1034,10 +1032,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
1034 handled += overflow; 1032 handled += overflow;
1035 1033
1036 /* event overflow for sure */ 1034 /* event overflow for sure */
1037 data.period = event->hw.last_period; 1035 perf_sample_data_init(&data, 0, hwc->last_period);
1038 1036
1039 if (!x86_perf_event_set_period(event)) 1037 if (!x86_perf_event_set_period(event))
1040 continue; 1038 continue;
1039
1040
1041 if (perf_event_overflow(event, &data, regs)) 1041 if (perf_event_overflow(event, &data, regs))
1042 x86_pmu_stop(event, 0); 1042 x86_pmu_stop(event, 0);
1043 } 1043 }
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index ddbb6a901f65..f32578634d9d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1132,11 +1132,14 @@ struct perf_sample_data {
1132 struct perf_branch_stack *br_stack; 1132 struct perf_branch_stack *br_stack;
1133}; 1133};
1134 1134
1135static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) 1135static inline void perf_sample_data_init(struct perf_sample_data *data,
1136 u64 addr, u64 period)
1136{ 1137{
1138 /* remaining struct members initialized in perf_prepare_sample() */
1137 data->addr = addr; 1139 data->addr = addr;
1138 data->raw = NULL; 1140 data->raw = NULL;
1139 data->br_stack = NULL; 1141 data->br_stack = NULL;
1142 data->period = period;
1140} 1143}
1141 1144
1142extern void perf_output_sample(struct perf_output_handle *handle, 1145extern void perf_output_sample(struct perf_output_handle *handle,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9789a56b7d54..00c58df9f4e2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4957,7 +4957,7 @@ void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
4957 if (rctx < 0) 4957 if (rctx < 0)
4958 return; 4958 return;
4959 4959
4960 perf_sample_data_init(&data, addr); 4960 perf_sample_data_init(&data, addr, 0);
4961 4961
4962 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); 4962 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4963 4963
@@ -5215,7 +5215,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5215 .data = record, 5215 .data = record,
5216 }; 5216 };
5217 5217
5218 perf_sample_data_init(&data, addr); 5218 perf_sample_data_init(&data, addr, 0);
5219 data.raw = &raw; 5219 data.raw = &raw;
5220 5220
5221 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 5221 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
@@ -5318,7 +5318,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
5318 struct perf_sample_data sample; 5318 struct perf_sample_data sample;
5319 struct pt_regs *regs = data; 5319 struct pt_regs *regs = data;
5320 5320
5321 perf_sample_data_init(&sample, bp->attr.bp_addr); 5321 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
5322 5322
5323 if (!bp->hw.state && !perf_exclude_event(bp, regs)) 5323 if (!bp->hw.state && !perf_exclude_event(bp, regs))
5324 perf_swevent_event(bp, 1, &sample, regs); 5324 perf_swevent_event(bp, 1, &sample, regs);
@@ -5344,8 +5344,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5344 5344
5345 event->pmu->read(event); 5345 event->pmu->read(event);
5346 5346
5347 perf_sample_data_init(&data, 0); 5347 perf_sample_data_init(&data, 0, event->hw.last_period);
5348 data.period = event->hw.last_period;
5349 regs = get_irq_regs(); 5348 regs = get_irq_regs();
5350 5349
5351 if (regs && !perf_exclude_event(event, regs)) { 5350 if (regs && !perf_exclude_event(event, regs)) {