aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-11-27 08:54:38 -0500
committerIngo Molnar <mingo@kernel.org>2013-12-17 09:21:33 -0500
commitbad7192b842c83e580747ca57104dd51fe08c223 (patch)
tree5a73fe2fc627384fa6ab621cc59433dc922f1155 /kernel/events
parent7fd565e27547c913b83b46d94662103be81a88ec (diff)
perf: Fix PERF_EVENT_IOC_PERIOD to force-reset the period
Vince Weaver reports that, on all architectures apart from ARM, PERF_EVENT_IOC_PERIOD doesn't actually update the period until the next event fires. This is counter-intuitive behaviour and is better dealt with in the core code. This patch ensures that the period is forcefully reset when dealing with such a request in the core code. A subsequent patch removes the equivalent hack from the ARM back-end. Reported-by: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Link: http://lkml.kernel.org/r/1385560479-11014-1-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 403b781daafb..89d34f9bb8cb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3527,7 +3527,7 @@ static void perf_event_for_each(struct perf_event *event,
3527static int perf_event_period(struct perf_event *event, u64 __user *arg) 3527static int perf_event_period(struct perf_event *event, u64 __user *arg)
3528{ 3528{
3529 struct perf_event_context *ctx = event->ctx; 3529 struct perf_event_context *ctx = event->ctx;
3530 int ret = 0; 3530 int ret = 0, active;
3531 u64 value; 3531 u64 value;
3532 3532
3533 if (!is_sampling_event(event)) 3533 if (!is_sampling_event(event))
@@ -3551,6 +3551,20 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
3551 event->attr.sample_period = value; 3551 event->attr.sample_period = value;
3552 event->hw.sample_period = value; 3552 event->hw.sample_period = value;
3553 } 3553 }
3554
3555 active = (event->state == PERF_EVENT_STATE_ACTIVE);
3556 if (active) {
3557 perf_pmu_disable(ctx->pmu);
3558 event->pmu->stop(event, PERF_EF_UPDATE);
3559 }
3560
3561 local64_set(&event->hw.period_left, 0);
3562
3563 if (active) {
3564 event->pmu->start(event, PERF_EF_RELOAD);
3565 perf_pmu_enable(ctx->pmu);
3566 }
3567
3554unlock: 3568unlock:
3555 raw_spin_unlock_irq(&ctx->lock); 3569 raw_spin_unlock_irq(&ctx->lock);
3556 3570