aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-02-15 16:26:07 -0500
committerIngo Molnar <mingo@elte.hu>2011-02-16 07:25:29 -0500
commit4fe757dd48a9e95e1a071291f15dda5421dacb66 (patch)
tree9981eaf986d477d096cdb0388e0f95a80eeb2c38 /kernel/perf_event.c
parent7d44ec193d95416d1342cdd86392a1eeb7461186 (diff)
perf: Fix throttle logic
It was possible to call pmu::start() on an already running event. In particular this lead so some wreckage as the hrtimer events would re-initialize active timers. This was due to throttled events being activated again by scheduling. Scheduling in a context would add and force start events, resulting in running events with a possible throttle status. The next tick to hit that task will then try to unthrottle the event and call ->start() on an already running event. Reported-by: Jeff Moyer <jmoyer@redhat.com> Cc: <stable@kernel.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 999835b6112b..656222fcf767 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -782,6 +782,10 @@ retry:
782 raw_spin_unlock_irq(&ctx->lock); 782 raw_spin_unlock_irq(&ctx->lock);
783} 783}
784 784
785#define MAX_INTERRUPTS (~0ULL)
786
787static void perf_log_throttle(struct perf_event *event, int enable);
788
785static int 789static int
786event_sched_in(struct perf_event *event, 790event_sched_in(struct perf_event *event,
787 struct perf_cpu_context *cpuctx, 791 struct perf_cpu_context *cpuctx,
@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event,
794 798
795 event->state = PERF_EVENT_STATE_ACTIVE; 799 event->state = PERF_EVENT_STATE_ACTIVE;
796 event->oncpu = smp_processor_id(); 800 event->oncpu = smp_processor_id();
801
802 /*
803 * Unthrottle events, since we scheduled we might have missed several
804 * ticks already, also for a heavily scheduling task there is little
805 * guarantee it'll get a tick in a timely manner.
806 */
807 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
808 perf_log_throttle(event, 1);
809 event->hw.interrupts = 0;
810 }
811
797 /* 812 /*
798 * The new state must be visible before we turn it on in the hardware: 813 * The new state must be visible before we turn it on in the hardware:
799 */ 814 */
@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task)
1596 } 1611 }
1597} 1612}
1598 1613
1599#define MAX_INTERRUPTS (~0ULL)
1600
1601static void perf_log_throttle(struct perf_event *event, int enable);
1602
1603static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 1614static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1604{ 1615{
1605 u64 frequency = event->attr.sample_freq; 1616 u64 frequency = event->attr.sample_freq;