aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-04-06 05:45:12 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 04:49:01 -0400
commit849691a6cd40270ff5f4a8846d5f6bf8df663ffc (patch)
treeb61157f375905d21bf0facae603e4247e1de9007 /kernel/sched.c
parenta39d6f2556c4a19f58f538c6aa28bf8faca4fcb8 (diff)
perf_counter: remove rq->lock usage
Now that all the task runtime clock users are gone, remove the ugly rq->lock usage from perf counters, which solves the nasty deadlock seen when a software task clock counter was read from an NMI overflow context. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094518.531137582@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c20
1 files changed, 0 insertions, 20 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f76e3c0188a2..0de2f814fb18 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -997,26 +997,6 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
997 } 997 }
998} 998}
999 999
1000void curr_rq_lock_irq_save(unsigned long *flags)
1001 __acquires(rq->lock)
1002{
1003 struct rq *rq;
1004
1005 local_irq_save(*flags);
1006 rq = cpu_rq(smp_processor_id());
1007 spin_lock(&rq->lock);
1008}
1009
1010void curr_rq_unlock_irq_restore(unsigned long *flags)
1011 __releases(rq->lock)
1012{
1013 struct rq *rq;
1014
1015 rq = cpu_rq(smp_processor_id());
1016 spin_unlock(&rq->lock);
1017 local_irq_restore(*flags);
1018}
1019
1020void task_rq_unlock_wait(struct task_struct *p) 1000void task_rq_unlock_wait(struct task_struct *p)
1021{ 1001{
1022 struct rq *rq = task_rq(p); 1002 struct rq *rq = task_rq(p);