diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-05-23 12:28:55 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-23 13:37:44 -0400 |
commit | e220d2dcb944c5c488b6855d15ec66d76900514f (patch) | |
tree | bbaa8ead4276d59d6d73d49a28fd6e1e1ed3259a /kernel | |
parent | c6eb13847ba081552d2af644219bddeff7110caf (diff) |
perf_counter: Fix dynamic irq_period logging
We call perf_adjust_freq() from perf_counter_task_tick() which
is is called under the rq->lock causing lock recursion.
However, it's no longer required to be called under the
rq->lock, so remove it from under it.
Also, fix up some related comments.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090523163012.476197912@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 3 | ||||
-rw-r--r-- | kernel/sched.c | 3 |
2 files changed, 4 insertions, 2 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index c10055416dea..2f410ea2cb39 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -2559,7 +2559,8 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, | |||
2559 | } | 2559 | } |
2560 | 2560 | ||
2561 | /* | 2561 | /* |
2562 | * | 2562 | * Log irq_period changes so that analyzing tools can re-normalize the |
2563 | * event flow. | ||
2563 | */ | 2564 | */ |
2564 | 2565 | ||
2565 | static void perf_log_period(struct perf_counter *counter, u64 period) | 2566 | static void perf_log_period(struct perf_counter *counter, u64 period) |
diff --git a/kernel/sched.c b/kernel/sched.c index 4c0d58bce6b2..ad079f07c9c8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4875,9 +4875,10 @@ void scheduler_tick(void) | |||
4875 | update_rq_clock(rq); | 4875 | update_rq_clock(rq); |
4876 | update_cpu_load(rq); | 4876 | update_cpu_load(rq); |
4877 | curr->sched_class->task_tick(rq, curr, 0); | 4877 | curr->sched_class->task_tick(rq, curr, 0); |
4878 | perf_counter_task_tick(curr, cpu); | ||
4879 | spin_unlock(&rq->lock); | 4878 | spin_unlock(&rq->lock); |
4880 | 4879 | ||
4880 | perf_counter_task_tick(curr, cpu); | ||
4881 | |||
4881 | #ifdef CONFIG_SMP | 4882 | #ifdef CONFIG_SMP |
4882 | rq->idle_at_tick = idle_cpu(cpu); | 4883 | rq->idle_at_tick = idle_cpu(cpu); |
4883 | trigger_load_balance(rq, cpu); | 4884 | trigger_load_balance(rq, cpu); |