aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-06-11 08:46:54 -0400
committerThomas Gleixner <tglx@linutronix.de>2015-06-18 18:25:27 -0400
commitcbce1a686700595de65ee363b9b3283ae85d8fc5 (patch)
treef5ab615f38d916532fe96df9914aa2df926ba98a /kernel/sched/sched.h
parenta24fc60d63da2b0b31bf7c876d12a51ed4b778bd (diff)
sched,lockdep: Employ lock pinning
Employ the new lockdep lock pinning annotation to ensure no 'accidental' lock-breaks happen with rq->lock. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: ktkhai@parallels.com Cc: rostedt@goodmis.org Cc: juri.lelli@gmail.com Cc: pang.xunlei@linaro.org Cc: oleg@redhat.com Cc: wanpeng.li@linux.intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/20150611124744.003233193@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h10
1 files changed, 8 insertions, 2 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 62949ab06bc2..ef02d11654cd 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1439,8 +1439,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
1439 for (;;) { 1439 for (;;) {
1440 rq = task_rq(p); 1440 rq = task_rq(p);
1441 raw_spin_lock(&rq->lock); 1441 raw_spin_lock(&rq->lock);
1442 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) 1442 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1443 lockdep_pin_lock(&rq->lock);
1443 return rq; 1444 return rq;
1445 }
1444 raw_spin_unlock(&rq->lock); 1446 raw_spin_unlock(&rq->lock);
1445 1447
1446 while (unlikely(task_on_rq_migrating(p))) 1448 while (unlikely(task_on_rq_migrating(p)))
@@ -1477,8 +1479,10 @@ static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flag
1477 * If we observe the new cpu in task_rq_lock, the acquire will 1479 * If we observe the new cpu in task_rq_lock, the acquire will
1478 * pair with the WMB to ensure we must then also see migrating. 1480 * pair with the WMB to ensure we must then also see migrating.
1479 */ 1481 */
1480 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) 1482 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1483 lockdep_pin_lock(&rq->lock);
1481 return rq; 1484 return rq;
1485 }
1482 raw_spin_unlock(&rq->lock); 1486 raw_spin_unlock(&rq->lock);
1483 raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 1487 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1484 1488
@@ -1490,6 +1494,7 @@ static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flag
1490static inline void __task_rq_unlock(struct rq *rq) 1494static inline void __task_rq_unlock(struct rq *rq)
1491 __releases(rq->lock) 1495 __releases(rq->lock)
1492{ 1496{
1497 lockdep_unpin_lock(&rq->lock);
1493 raw_spin_unlock(&rq->lock); 1498 raw_spin_unlock(&rq->lock);
1494} 1499}
1495 1500
@@ -1498,6 +1503,7 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1498 __releases(rq->lock) 1503 __releases(rq->lock)
1499 __releases(p->pi_lock) 1504 __releases(p->pi_lock)
1500{ 1505{
1506 lockdep_unpin_lock(&rq->lock);
1501 raw_spin_unlock(&rq->lock); 1507 raw_spin_unlock(&rq->lock);
1502 raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 1508 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1503} 1509}