aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-08-01 13:25:08 -0400
committerIngo Molnar <mingo@kernel.org>2016-05-05 03:23:59 -0400
commite7904a28f5331c21d17af638cb477c83662e3cb6 (patch)
tree4fd496dcdf41f61964125682664f57d50f3527e2 /kernel/sched
parenteb58075149b7f0300ff19142e6245fe75db2a081 (diff)
locking/lockdep, sched/core: Implement a better lock pinning scheme
The problem with the existing lock pinning is that each pin is of value 1; this mean you can simply unpin if you know its pinned, without having any extra information. This scheme generates a random (16 bit) cookie for each pin and requires this same cookie to unpin. This means you have to keep the cookie in context. No objsize difference for !LOCKDEP kernels. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c79
-rw-r--r--kernel/sched/deadline.c11
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/sched/idle_task.c2
-rw-r--r--kernel/sched/rt.c6
-rw-r--r--kernel/sched/sched.h8
-rw-r--r--kernel/sched/stop_task.c2
7 files changed, 61 insertions, 53 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7c7db60115b4..71c5a753e6e9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -184,7 +184,7 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
184 rq = task_rq(p); 184 rq = task_rq(p);
185 raw_spin_lock(&rq->lock); 185 raw_spin_lock(&rq->lock);
186 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 186 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
187 lockdep_pin_lock(&rq->lock); 187 rf->cookie = lockdep_pin_lock(&rq->lock);
188 return rq; 188 return rq;
189 } 189 }
190 raw_spin_unlock(&rq->lock); 190 raw_spin_unlock(&rq->lock);
@@ -224,7 +224,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
224 * pair with the WMB to ensure we must then also see migrating. 224 * pair with the WMB to ensure we must then also see migrating.
225 */ 225 */
226 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { 226 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
227 lockdep_pin_lock(&rq->lock); 227 rf->cookie = lockdep_pin_lock(&rq->lock);
228 return rq; 228 return rq;
229 } 229 }
230 raw_spin_unlock(&rq->lock); 230 raw_spin_unlock(&rq->lock);
@@ -1193,9 +1193,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
1193 * OK, since we're going to drop the lock immediately 1193 * OK, since we're going to drop the lock immediately
1194 * afterwards anyway. 1194 * afterwards anyway.
1195 */ 1195 */
1196 lockdep_unpin_lock(&rq->lock); 1196 lockdep_unpin_lock(&rq->lock, rf.cookie);
1197 rq = move_queued_task(rq, p, dest_cpu); 1197 rq = move_queued_task(rq, p, dest_cpu);
1198 lockdep_pin_lock(&rq->lock); 1198 lockdep_repin_lock(&rq->lock, rf.cookie);
1199 } 1199 }
1200out: 1200out:
1201 task_rq_unlock(rq, p, &rf); 1201 task_rq_unlock(rq, p, &rf);
@@ -1669,8 +1669,8 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
1669/* 1669/*
1670 * Mark the task runnable and perform wakeup-preemption. 1670 * Mark the task runnable and perform wakeup-preemption.
1671 */ 1671 */
1672static void 1672static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
1673ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) 1673 struct pin_cookie cookie)
1674{ 1674{
1675 check_preempt_curr(rq, p, wake_flags); 1675 check_preempt_curr(rq, p, wake_flags);
1676 p->state = TASK_RUNNING; 1676 p->state = TASK_RUNNING;
@@ -1682,9 +1682,9 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1682 * Our task @p is fully woken up and running; so its safe to 1682 * Our task @p is fully woken up and running; so its safe to
1683 * drop the rq->lock, hereafter rq is only used for statistics. 1683 * drop the rq->lock, hereafter rq is only used for statistics.
1684 */ 1684 */
1685 lockdep_unpin_lock(&rq->lock); 1685 lockdep_unpin_lock(&rq->lock, cookie);
1686 p->sched_class->task_woken(rq, p); 1686 p->sched_class->task_woken(rq, p);
1687 lockdep_pin_lock(&rq->lock); 1687 lockdep_repin_lock(&rq->lock, cookie);
1688 } 1688 }
1689 1689
1690 if (rq->idle_stamp) { 1690 if (rq->idle_stamp) {
@@ -1702,7 +1702,8 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1702} 1702}
1703 1703
1704static void 1704static void
1705ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) 1705ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
1706 struct pin_cookie cookie)
1706{ 1707{
1707 lockdep_assert_held(&rq->lock); 1708 lockdep_assert_held(&rq->lock);
1708 1709
@@ -1712,7 +1713,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1712#endif 1713#endif
1713 1714
1714 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING); 1715 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1715 ttwu_do_wakeup(rq, p, wake_flags); 1716 ttwu_do_wakeup(rq, p, wake_flags, cookie);
1716} 1717}
1717 1718
1718/* 1719/*
@@ -1731,7 +1732,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
1731 if (task_on_rq_queued(p)) { 1732 if (task_on_rq_queued(p)) {
1732 /* check_preempt_curr() may use rq clock */ 1733 /* check_preempt_curr() may use rq clock */
1733 update_rq_clock(rq); 1734 update_rq_clock(rq);
1734 ttwu_do_wakeup(rq, p, wake_flags); 1735 ttwu_do_wakeup(rq, p, wake_flags, rf.cookie);
1735 ret = 1; 1736 ret = 1;
1736 } 1737 }
1737 __task_rq_unlock(rq, &rf); 1738 __task_rq_unlock(rq, &rf);
@@ -1744,6 +1745,7 @@ void sched_ttwu_pending(void)
1744{ 1745{
1745 struct rq *rq = this_rq(); 1746 struct rq *rq = this_rq();
1746 struct llist_node *llist = llist_del_all(&rq->wake_list); 1747 struct llist_node *llist = llist_del_all(&rq->wake_list);
1748 struct pin_cookie cookie;
1747 struct task_struct *p; 1749 struct task_struct *p;
1748 unsigned long flags; 1750 unsigned long flags;
1749 1751
@@ -1751,15 +1753,15 @@ void sched_ttwu_pending(void)
1751 return; 1753 return;
1752 1754
1753 raw_spin_lock_irqsave(&rq->lock, flags); 1755 raw_spin_lock_irqsave(&rq->lock, flags);
1754 lockdep_pin_lock(&rq->lock); 1756 cookie = lockdep_pin_lock(&rq->lock);
1755 1757
1756 while (llist) { 1758 while (llist) {
1757 p = llist_entry(llist, struct task_struct, wake_entry); 1759 p = llist_entry(llist, struct task_struct, wake_entry);
1758 llist = llist_next(llist); 1760 llist = llist_next(llist);
1759 ttwu_do_activate(rq, p, 0); 1761 ttwu_do_activate(rq, p, 0, cookie);
1760 } 1762 }
1761 1763
1762 lockdep_unpin_lock(&rq->lock); 1764 lockdep_unpin_lock(&rq->lock, cookie);
1763 raw_spin_unlock_irqrestore(&rq->lock, flags); 1765 raw_spin_unlock_irqrestore(&rq->lock, flags);
1764} 1766}
1765 1767
@@ -1846,6 +1848,7 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
1846static void ttwu_queue(struct task_struct *p, int cpu) 1848static void ttwu_queue(struct task_struct *p, int cpu)
1847{ 1849{
1848 struct rq *rq = cpu_rq(cpu); 1850 struct rq *rq = cpu_rq(cpu);
1851 struct pin_cookie cookie;
1849 1852
1850#if defined(CONFIG_SMP) 1853#if defined(CONFIG_SMP)
1851 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { 1854 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
@@ -1856,9 +1859,9 @@ static void ttwu_queue(struct task_struct *p, int cpu)
1856#endif 1859#endif
1857 1860
1858 raw_spin_lock(&rq->lock); 1861 raw_spin_lock(&rq->lock);
1859 lockdep_pin_lock(&rq->lock); 1862 cookie = lockdep_pin_lock(&rq->lock);
1860 ttwu_do_activate(rq, p, 0); 1863 ttwu_do_activate(rq, p, 0, cookie);
1861 lockdep_unpin_lock(&rq->lock); 1864 lockdep_unpin_lock(&rq->lock, cookie);
1862 raw_spin_unlock(&rq->lock); 1865 raw_spin_unlock(&rq->lock);
1863} 1866}
1864 1867
@@ -2055,7 +2058,7 @@ out:
2055 * ensure that this_rq() is locked, @p is bound to this_rq() and not 2058 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2056 * the current task. 2059 * the current task.
2057 */ 2060 */
2058static void try_to_wake_up_local(struct task_struct *p) 2061static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
2059{ 2062{
2060 struct rq *rq = task_rq(p); 2063 struct rq *rq = task_rq(p);
2061 2064
@@ -2072,11 +2075,11 @@ static void try_to_wake_up_local(struct task_struct *p)
2072 * disabled avoiding further scheduler activity on it and we've 2075 * disabled avoiding further scheduler activity on it and we've
2073 * not yet picked a replacement task. 2076 * not yet picked a replacement task.
2074 */ 2077 */
2075 lockdep_unpin_lock(&rq->lock); 2078 lockdep_unpin_lock(&rq->lock, cookie);
2076 raw_spin_unlock(&rq->lock); 2079 raw_spin_unlock(&rq->lock);
2077 raw_spin_lock(&p->pi_lock); 2080 raw_spin_lock(&p->pi_lock);
2078 raw_spin_lock(&rq->lock); 2081 raw_spin_lock(&rq->lock);
2079 lockdep_pin_lock(&rq->lock); 2082 lockdep_repin_lock(&rq->lock, cookie);
2080 } 2083 }
2081 2084
2082 if (!(p->state & TASK_NORMAL)) 2085 if (!(p->state & TASK_NORMAL))
@@ -2087,7 +2090,7 @@ static void try_to_wake_up_local(struct task_struct *p)
2087 if (!task_on_rq_queued(p)) 2090 if (!task_on_rq_queued(p))
2088 ttwu_activate(rq, p, ENQUEUE_WAKEUP); 2091 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2089 2092
2090 ttwu_do_wakeup(rq, p, 0); 2093 ttwu_do_wakeup(rq, p, 0, cookie);
2091 if (schedstat_enabled()) 2094 if (schedstat_enabled())
2092 ttwu_stat(p, smp_processor_id(), 0); 2095 ttwu_stat(p, smp_processor_id(), 0);
2093out: 2096out:
@@ -2515,9 +2518,9 @@ void wake_up_new_task(struct task_struct *p)
2515 * Nothing relies on rq->lock after this, so its fine to 2518 * Nothing relies on rq->lock after this, so its fine to
2516 * drop it. 2519 * drop it.
2517 */ 2520 */
2518 lockdep_unpin_lock(&rq->lock); 2521 lockdep_unpin_lock(&rq->lock, rf.cookie);
2519 p->sched_class->task_woken(rq, p); 2522 p->sched_class->task_woken(rq, p);
2520 lockdep_pin_lock(&rq->lock); 2523 lockdep_repin_lock(&rq->lock, rf.cookie);
2521 } 2524 }
2522#endif 2525#endif
2523 task_rq_unlock(rq, p, &rf); 2526 task_rq_unlock(rq, p, &rf);
@@ -2782,7 +2785,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
2782 */ 2785 */
2783static __always_inline struct rq * 2786static __always_inline struct rq *
2784context_switch(struct rq *rq, struct task_struct *prev, 2787context_switch(struct rq *rq, struct task_struct *prev,
2785 struct task_struct *next) 2788 struct task_struct *next, struct pin_cookie cookie)
2786{ 2789{
2787 struct mm_struct *mm, *oldmm; 2790 struct mm_struct *mm, *oldmm;
2788 2791
@@ -2814,7 +2817,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2814 * of the scheduler it's an obvious special-case), so we 2817 * of the scheduler it's an obvious special-case), so we
2815 * do an early lockdep release here: 2818 * do an early lockdep release here:
2816 */ 2819 */
2817 lockdep_unpin_lock(&rq->lock); 2820 lockdep_unpin_lock(&rq->lock, cookie);
2818 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 2821 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2819 2822
2820 /* Here we just switch the register state and the stack. */ 2823 /* Here we just switch the register state and the stack. */
@@ -3154,7 +3157,7 @@ static inline void schedule_debug(struct task_struct *prev)
3154 * Pick up the highest-prio task: 3157 * Pick up the highest-prio task:
3155 */ 3158 */
3156static inline struct task_struct * 3159static inline struct task_struct *
3157pick_next_task(struct rq *rq, struct task_struct *prev) 3160pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
3158{ 3161{
3159 const struct sched_class *class = &fair_sched_class; 3162 const struct sched_class *class = &fair_sched_class;
3160 struct task_struct *p; 3163 struct task_struct *p;
@@ -3165,20 +3168,20 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
3165 */ 3168 */
3166 if (likely(prev->sched_class == class && 3169 if (likely(prev->sched_class == class &&
3167 rq->nr_running == rq->cfs.h_nr_running)) { 3170 rq->nr_running == rq->cfs.h_nr_running)) {
3168 p = fair_sched_class.pick_next_task(rq, prev); 3171 p = fair_sched_class.pick_next_task(rq, prev, cookie);
3169 if (unlikely(p == RETRY_TASK)) 3172 if (unlikely(p == RETRY_TASK))
3170 goto again; 3173 goto again;
3171 3174
3172 /* assumes fair_sched_class->next == idle_sched_class */ 3175 /* assumes fair_sched_class->next == idle_sched_class */
3173 if (unlikely(!p)) 3176 if (unlikely(!p))
3174 p = idle_sched_class.pick_next_task(rq, prev); 3177 p = idle_sched_class.pick_next_task(rq, prev, cookie);
3175 3178
3176 return p; 3179 return p;
3177 } 3180 }
3178 3181
3179again: 3182again:
3180 for_each_class(class) { 3183 for_each_class(class) {
3181 p = class->pick_next_task(rq, prev); 3184 p = class->pick_next_task(rq, prev, cookie);
3182 if (p) { 3185 if (p) {
3183 if (unlikely(p == RETRY_TASK)) 3186 if (unlikely(p == RETRY_TASK))
3184 goto again; 3187 goto again;
@@ -3232,6 +3235,7 @@ static void __sched notrace __schedule(bool preempt)
3232{ 3235{
3233 struct task_struct *prev, *next; 3236 struct task_struct *prev, *next;
3234 unsigned long *switch_count; 3237 unsigned long *switch_count;
3238 struct pin_cookie cookie;
3235 struct rq *rq; 3239 struct rq *rq;
3236 int cpu; 3240 int cpu;
3237 3241
@@ -3265,7 +3269,7 @@ static void __sched notrace __schedule(bool preempt)
3265 */ 3269 */
3266 smp_mb__before_spinlock(); 3270 smp_mb__before_spinlock();
3267 raw_spin_lock(&rq->lock); 3271 raw_spin_lock(&rq->lock);
3268 lockdep_pin_lock(&rq->lock); 3272 cookie = lockdep_pin_lock(&rq->lock);
3269 3273
3270 rq->clock_skip_update <<= 1; /* promote REQ to ACT */ 3274 rq->clock_skip_update <<= 1; /* promote REQ to ACT */
3271 3275
@@ -3287,7 +3291,7 @@ static void __sched notrace __schedule(bool preempt)
3287 3291
3288 to_wakeup = wq_worker_sleeping(prev); 3292 to_wakeup = wq_worker_sleeping(prev);
3289 if (to_wakeup) 3293 if (to_wakeup)
3290 try_to_wake_up_local(to_wakeup); 3294 try_to_wake_up_local(to_wakeup, cookie);
3291 } 3295 }
3292 } 3296 }
3293 switch_count = &prev->nvcsw; 3297 switch_count = &prev->nvcsw;
@@ -3296,7 +3300,7 @@ static void __sched notrace __schedule(bool preempt)
3296 if (task_on_rq_queued(prev)) 3300 if (task_on_rq_queued(prev))
3297 update_rq_clock(rq); 3301 update_rq_clock(rq);
3298 3302
3299 next = pick_next_task(rq, prev); 3303 next = pick_next_task(rq, prev, cookie);
3300 clear_tsk_need_resched(prev); 3304 clear_tsk_need_resched(prev);
3301 clear_preempt_need_resched(); 3305 clear_preempt_need_resched();
3302 rq->clock_skip_update = 0; 3306 rq->clock_skip_update = 0;
@@ -3307,9 +3311,9 @@ static void __sched notrace __schedule(bool preempt)
3307 ++*switch_count; 3311 ++*switch_count;
3308 3312
3309 trace_sched_switch(preempt, prev, next); 3313 trace_sched_switch(preempt, prev, next);
3310 rq = context_switch(rq, prev, next); /* unlocks the rq */ 3314 rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
3311 } else { 3315 } else {
3312 lockdep_unpin_lock(&rq->lock); 3316 lockdep_unpin_lock(&rq->lock, cookie);
3313 raw_spin_unlock_irq(&rq->lock); 3317 raw_spin_unlock_irq(&rq->lock);
3314 } 3318 }
3315 3319
@@ -5392,6 +5396,7 @@ static void migrate_tasks(struct rq *dead_rq)
5392{ 5396{
5393 struct rq *rq = dead_rq; 5397 struct rq *rq = dead_rq;
5394 struct task_struct *next, *stop = rq->stop; 5398 struct task_struct *next, *stop = rq->stop;
5399 struct pin_cookie cookie;
5395 int dest_cpu; 5400 int dest_cpu;
5396 5401
5397 /* 5402 /*
@@ -5423,8 +5428,8 @@ static void migrate_tasks(struct rq *dead_rq)
5423 /* 5428 /*
5424 * pick_next_task assumes pinned rq->lock. 5429 * pick_next_task assumes pinned rq->lock.
5425 */ 5430 */
5426 lockdep_pin_lock(&rq->lock); 5431 cookie = lockdep_pin_lock(&rq->lock);
5427 next = pick_next_task(rq, &fake_task); 5432 next = pick_next_task(rq, &fake_task, cookie);
5428 BUG_ON(!next); 5433 BUG_ON(!next);
5429 next->sched_class->put_prev_task(rq, next); 5434 next->sched_class->put_prev_task(rq, next);
5430 5435
@@ -5437,7 +5442,7 @@ static void migrate_tasks(struct rq *dead_rq)
5437 * because !cpu_active at this point, which means load-balance 5442 * because !cpu_active at this point, which means load-balance
5438 * will not interfere. Also, stop-machine. 5443 * will not interfere. Also, stop-machine.
5439 */ 5444 */
5440 lockdep_unpin_lock(&rq->lock); 5445 lockdep_unpin_lock(&rq->lock, cookie);
5441 raw_spin_unlock(&rq->lock); 5446 raw_spin_unlock(&rq->lock);
5442 raw_spin_lock(&next->pi_lock); 5447 raw_spin_lock(&next->pi_lock);
5443 raw_spin_lock(&rq->lock); 5448 raw_spin_lock(&rq->lock);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 738e3c84dfe1..ba53a87bb978 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -670,9 +670,9 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
670 * Nothing relies on rq->lock after this, so its safe to drop 670 * Nothing relies on rq->lock after this, so its safe to drop
671 * rq->lock. 671 * rq->lock.
672 */ 672 */
673 lockdep_unpin_lock(&rq->lock); 673 lockdep_unpin_lock(&rq->lock, rf.cookie);
674 push_dl_task(rq); 674 push_dl_task(rq);
675 lockdep_pin_lock(&rq->lock); 675 lockdep_repin_lock(&rq->lock, rf.cookie);
676 } 676 }
677#endif 677#endif
678 678
@@ -1125,7 +1125,8 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1125 return rb_entry(left, struct sched_dl_entity, rb_node); 1125 return rb_entry(left, struct sched_dl_entity, rb_node);
1126} 1126}
1127 1127
1128struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) 1128struct task_struct *
1129pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
1129{ 1130{
1130 struct sched_dl_entity *dl_se; 1131 struct sched_dl_entity *dl_se;
1131 struct task_struct *p; 1132 struct task_struct *p;
@@ -1140,9 +1141,9 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1140 * disabled avoiding further scheduler activity on it and we're 1141 * disabled avoiding further scheduler activity on it and we're
1141 * being very careful to re-start the picking loop. 1142 * being very careful to re-start the picking loop.
1142 */ 1143 */
1143 lockdep_unpin_lock(&rq->lock); 1144 lockdep_unpin_lock(&rq->lock, cookie);
1144 pull_dl_task(rq); 1145 pull_dl_task(rq);
1145 lockdep_pin_lock(&rq->lock); 1146 lockdep_repin_lock(&rq->lock, cookie);
1146 /* 1147 /*
1147 * pull_rt_task() can drop (and re-acquire) rq->lock; this 1148 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1148 * means a stop task can slip in, in which case we need to 1149 * means a stop task can slip in, in which case we need to
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b8a33abce650..91395e1552ae 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5542,7 +5542,7 @@ preempt:
5542} 5542}
5543 5543
5544static struct task_struct * 5544static struct task_struct *
5545pick_next_task_fair(struct rq *rq, struct task_struct *prev) 5545pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
5546{ 5546{
5547 struct cfs_rq *cfs_rq = &rq->cfs; 5547 struct cfs_rq *cfs_rq = &rq->cfs;
5548 struct sched_entity *se; 5548 struct sched_entity *se;
@@ -5655,9 +5655,9 @@ idle:
5655 * further scheduler activity on it and we're being very careful to 5655 * further scheduler activity on it and we're being very careful to
5656 * re-start the picking loop. 5656 * re-start the picking loop.
5657 */ 5657 */
5658 lockdep_unpin_lock(&rq->lock); 5658 lockdep_unpin_lock(&rq->lock, cookie);
5659 new_tasks = idle_balance(rq); 5659 new_tasks = idle_balance(rq);
5660 lockdep_pin_lock(&rq->lock); 5660 lockdep_repin_lock(&rq->lock, cookie);
5661 /* 5661 /*
5662 * Because idle_balance() releases (and re-acquires) rq->lock, it is 5662 * Because idle_balance() releases (and re-acquires) rq->lock, it is
5663 * possible for any higher priority task to appear. In that case we 5663 * possible for any higher priority task to appear. In that case we
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 47ce94931f1b..2ce5458bbe1d 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -24,7 +24,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
24} 24}
25 25
26static struct task_struct * 26static struct task_struct *
27pick_next_task_idle(struct rq *rq, struct task_struct *prev) 27pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
28{ 28{
29 put_prev_task(rq, prev); 29 put_prev_task(rq, prev);
30 30
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 19e13060fcd5..68deaf901a12 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1524,7 +1524,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
1524} 1524}
1525 1525
1526static struct task_struct * 1526static struct task_struct *
1527pick_next_task_rt(struct rq *rq, struct task_struct *prev) 1527pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
1528{ 1528{
1529 struct task_struct *p; 1529 struct task_struct *p;
1530 struct rt_rq *rt_rq = &rq->rt; 1530 struct rt_rq *rt_rq = &rq->rt;
@@ -1536,9 +1536,9 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1536 * disabled avoiding further scheduler activity on it and we're 1536 * disabled avoiding further scheduler activity on it and we're
1537 * being very careful to re-start the picking loop. 1537 * being very careful to re-start the picking loop.
1538 */ 1538 */
1539 lockdep_unpin_lock(&rq->lock); 1539 lockdep_unpin_lock(&rq->lock, cookie);
1540 pull_rt_task(rq); 1540 pull_rt_task(rq);
1541 lockdep_pin_lock(&rq->lock); 1541 lockdep_repin_lock(&rq->lock, cookie);
1542 /* 1542 /*
1543 * pull_rt_task() can drop (and re-acquire) rq->lock; this 1543 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1544 * means a dl or stop task can slip in, in which case we need 1544 * means a dl or stop task can slip in, in which case we need
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a5eecb1e5e4b..0b6a838e9e73 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1202,7 +1202,8 @@ struct sched_class {
1202 * tasks. 1202 * tasks.
1203 */ 1203 */
1204 struct task_struct * (*pick_next_task) (struct rq *rq, 1204 struct task_struct * (*pick_next_task) (struct rq *rq,
1205 struct task_struct *prev); 1205 struct task_struct *prev,
1206 struct pin_cookie cookie);
1206 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 1207 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1207 1208
1208#ifdef CONFIG_SMP 1209#ifdef CONFIG_SMP
@@ -1453,6 +1454,7 @@ static inline void sched_avg_update(struct rq *rq) { }
1453 1454
1454struct rq_flags { 1455struct rq_flags {
1455 unsigned long flags; 1456 unsigned long flags;
1457 struct pin_cookie cookie;
1456}; 1458};
1457 1459
1458struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1460struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
@@ -1464,7 +1466,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1464static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) 1466static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1465 __releases(rq->lock) 1467 __releases(rq->lock)
1466{ 1468{
1467 lockdep_unpin_lock(&rq->lock); 1469 lockdep_unpin_lock(&rq->lock, rf->cookie);
1468 raw_spin_unlock(&rq->lock); 1470 raw_spin_unlock(&rq->lock);
1469} 1471}
1470 1472
@@ -1473,7 +1475,7 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1473 __releases(rq->lock) 1475 __releases(rq->lock)
1474 __releases(p->pi_lock) 1476 __releases(p->pi_lock)
1475{ 1477{
1476 lockdep_unpin_lock(&rq->lock); 1478 lockdep_unpin_lock(&rq->lock, rf->cookie);
1477 raw_spin_unlock(&rq->lock); 1479 raw_spin_unlock(&rq->lock);
1478 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 1480 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1479} 1481}
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index cbc67da10954..604297a08b3a 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -24,7 +24,7 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
24} 24}
25 25
26static struct task_struct * 26static struct task_struct *
27pick_next_task_stop(struct rq *rq, struct task_struct *prev) 27pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
28{ 28{
29 struct task_struct *stop = rq->stop; 29 struct task_struct *stop = rq->stop;
30 30