diff options
author | Robert Richter <robert.richter@amd.com> | 2012-04-02 14:19:08 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-05-09 09:23:12 -0400 |
commit | fd0d000b2c34aa43d4e92dcf0dfaeda7e123008a (patch) | |
tree | 8b81831cf37f1be6dd3cc9be772952d5c835a550 /arch/arm/kernel | |
parent | c75841a398d667d9968245b9519d93cedbfb4780 (diff) |
perf: Pass last sampling period to perf_sample_data_init()
We always need to pass the last sample period to
perf_sample_data_init(), otherwise the event distribution will be
wrong. Thus, modifiyng the function interface with the required period
as argument. So basically a pattern like this:
perf_sample_data_init(&data, ~0ULL);
data.period = event->hw.last_period;
will now be like that:
perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
Avoids unininitialized data.period and simplifies code.
Signed-off-by: Robert Richter <robert.richter@amd.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1333390758-10893-3-git-send-email-robert.richter@amd.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 8 |
3 files changed, 4 insertions, 12 deletions
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index b78af0cc6ef3..ab627a740fa3 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -489,8 +489,6 @@ armv6pmu_handle_irq(int irq_num, | |||
489 | */ | 489 | */ |
490 | armv6_pmcr_write(pmcr); | 490 | armv6_pmcr_write(pmcr); |
491 | 491 | ||
492 | perf_sample_data_init(&data, 0); | ||
493 | |||
494 | cpuc = &__get_cpu_var(cpu_hw_events); | 492 | cpuc = &__get_cpu_var(cpu_hw_events); |
495 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 493 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
496 | struct perf_event *event = cpuc->events[idx]; | 494 | struct perf_event *event = cpuc->events[idx]; |
@@ -509,7 +507,7 @@ armv6pmu_handle_irq(int irq_num, | |||
509 | 507 | ||
510 | hwc = &event->hw; | 508 | hwc = &event->hw; |
511 | armpmu_event_update(event, hwc, idx); | 509 | armpmu_event_update(event, hwc, idx); |
512 | data.period = event->hw.last_period; | 510 | perf_sample_data_init(&data, 0, hwc->last_period); |
513 | if (!armpmu_event_set_period(event, hwc, idx)) | 511 | if (!armpmu_event_set_period(event, hwc, idx)) |
514 | continue; | 512 | continue; |
515 | 513 | ||
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 00755d82e2f2..d3c536068162 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -1077,8 +1077,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1077 | */ | 1077 | */ |
1078 | regs = get_irq_regs(); | 1078 | regs = get_irq_regs(); |
1079 | 1079 | ||
1080 | perf_sample_data_init(&data, 0); | ||
1081 | |||
1082 | cpuc = &__get_cpu_var(cpu_hw_events); | 1080 | cpuc = &__get_cpu_var(cpu_hw_events); |
1083 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 1081 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
1084 | struct perf_event *event = cpuc->events[idx]; | 1082 | struct perf_event *event = cpuc->events[idx]; |
@@ -1097,7 +1095,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1097 | 1095 | ||
1098 | hwc = &event->hw; | 1096 | hwc = &event->hw; |
1099 | armpmu_event_update(event, hwc, idx); | 1097 | armpmu_event_update(event, hwc, idx); |
1100 | data.period = event->hw.last_period; | 1098 | perf_sample_data_init(&data, 0, hwc->last_period); |
1101 | if (!armpmu_event_set_period(event, hwc, idx)) | 1099 | if (!armpmu_event_set_period(event, hwc, idx)) |
1102 | continue; | 1100 | continue; |
1103 | 1101 | ||
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 71a21e6712f5..e34e7254e652 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -248,8 +248,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
248 | 248 | ||
249 | regs = get_irq_regs(); | 249 | regs = get_irq_regs(); |
250 | 250 | ||
251 | perf_sample_data_init(&data, 0); | ||
252 | |||
253 | cpuc = &__get_cpu_var(cpu_hw_events); | 251 | cpuc = &__get_cpu_var(cpu_hw_events); |
254 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 252 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
255 | struct perf_event *event = cpuc->events[idx]; | 253 | struct perf_event *event = cpuc->events[idx]; |
@@ -263,7 +261,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
263 | 261 | ||
264 | hwc = &event->hw; | 262 | hwc = &event->hw; |
265 | armpmu_event_update(event, hwc, idx); | 263 | armpmu_event_update(event, hwc, idx); |
266 | data.period = event->hw.last_period; | 264 | perf_sample_data_init(&data, 0, hwc->last_period); |
267 | if (!armpmu_event_set_period(event, hwc, idx)) | 265 | if (!armpmu_event_set_period(event, hwc, idx)) |
268 | continue; | 266 | continue; |
269 | 267 | ||
@@ -588,8 +586,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
588 | 586 | ||
589 | regs = get_irq_regs(); | 587 | regs = get_irq_regs(); |
590 | 588 | ||
591 | perf_sample_data_init(&data, 0); | ||
592 | |||
593 | cpuc = &__get_cpu_var(cpu_hw_events); | 589 | cpuc = &__get_cpu_var(cpu_hw_events); |
594 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 590 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
595 | struct perf_event *event = cpuc->events[idx]; | 591 | struct perf_event *event = cpuc->events[idx]; |
@@ -603,7 +599,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
603 | 599 | ||
604 | hwc = &event->hw; | 600 | hwc = &event->hw; |
605 | armpmu_event_update(event, hwc, idx); | 601 | armpmu_event_update(event, hwc, idx); |
606 | data.period = event->hw.last_period; | 602 | perf_sample_data_init(&data, 0, hwc->last_period); |
607 | if (!armpmu_event_set_period(event, hwc, idx)) | 603 | if (!armpmu_event_set_period(event, hwc, idx)) |
608 | continue; | 604 | continue; |
609 | 605 | ||