diff options
-rw-r--r-- | kernel/sched/core.c | 42 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 8 | ||||
-rw-r--r-- | kernel/sched/fair.c | 11 | ||||
-rw-r--r-- | kernel/sched/rt.c | 8 | ||||
-rw-r--r-- | kernel/sched/sched.h | 10 |
5 files changed, 71 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1ddc129c5f66..c74191aa4e6a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1201,8 +1201,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
1201 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); | 1201 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
1202 | tlb_migrate_finish(p->mm); | 1202 | tlb_migrate_finish(p->mm); |
1203 | return 0; | 1203 | return 0; |
1204 | } else if (task_on_rq_queued(p)) | 1204 | } else if (task_on_rq_queued(p)) { |
1205 | /* | ||
1206 | * OK, since we're going to drop the lock immediately | ||
1207 | * afterwards anyway. | ||
1208 | */ | ||
1209 | lockdep_unpin_lock(&rq->lock); | ||
1205 | rq = move_queued_task(rq, p, dest_cpu); | 1210 | rq = move_queued_task(rq, p, dest_cpu); |
1211 | lockdep_pin_lock(&rq->lock); | ||
1212 | } | ||
1206 | out: | 1213 | out: |
1207 | task_rq_unlock(rq, p, &flags); | 1214 | task_rq_unlock(rq, p, &flags); |
1208 | 1215 | ||
@@ -1562,6 +1569,8 @@ out: | |||
1562 | static inline | 1569 | static inline |
1563 | int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) | 1570 | int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) |
1564 | { | 1571 | { |
1572 | lockdep_assert_held(&p->pi_lock); | ||
1573 | |||
1565 | if (p->nr_cpus_allowed > 1) | 1574 | if (p->nr_cpus_allowed > 1) |
1566 | cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); | 1575 | cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); |
1567 | 1576 | ||
@@ -1652,9 +1661,12 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) | |||
1652 | #ifdef CONFIG_SMP | 1661 | #ifdef CONFIG_SMP |
1653 | if (p->sched_class->task_woken) { | 1662 | if (p->sched_class->task_woken) { |
1654 | /* | 1663 | /* |
1655 | * XXX can drop rq->lock; most likely ok. | 1664 | * Our task @p is fully woken up and running; so its safe to |
1665 | * drop the rq->lock, hereafter rq is only used for statistics. | ||
1656 | */ | 1666 | */ |
1667 | lockdep_unpin_lock(&rq->lock); | ||
1657 | p->sched_class->task_woken(rq, p); | 1668 | p->sched_class->task_woken(rq, p); |
1669 | lockdep_pin_lock(&rq->lock); | ||
1658 | } | 1670 | } |
1659 | 1671 | ||
1660 | if (rq->idle_stamp) { | 1672 | if (rq->idle_stamp) { |
@@ -1674,6 +1686,8 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) | |||
1674 | static void | 1686 | static void |
1675 | ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) | 1687 | ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) |
1676 | { | 1688 | { |
1689 | lockdep_assert_held(&rq->lock); | ||
1690 | |||
1677 | #ifdef CONFIG_SMP | 1691 | #ifdef CONFIG_SMP |
1678 | if (p->sched_contributes_to_load) | 1692 | if (p->sched_contributes_to_load) |
1679 | rq->nr_uninterruptible--; | 1693 | rq->nr_uninterruptible--; |
@@ -1718,6 +1732,7 @@ void sched_ttwu_pending(void) | |||
1718 | return; | 1732 | return; |
1719 | 1733 | ||
1720 | raw_spin_lock_irqsave(&rq->lock, flags); | 1734 | raw_spin_lock_irqsave(&rq->lock, flags); |
1735 | lockdep_pin_lock(&rq->lock); | ||
1721 | 1736 | ||
1722 | while (llist) { | 1737 | while (llist) { |
1723 | p = llist_entry(llist, struct task_struct, wake_entry); | 1738 | p = llist_entry(llist, struct task_struct, wake_entry); |
@@ -1725,6 +1740,7 @@ void sched_ttwu_pending(void) | |||
1725 | ttwu_do_activate(rq, p, 0); | 1740 | ttwu_do_activate(rq, p, 0); |
1726 | } | 1741 | } |
1727 | 1742 | ||
1743 | lockdep_unpin_lock(&rq->lock); | ||
1728 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 1744 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
1729 | } | 1745 | } |
1730 | 1746 | ||
@@ -1821,7 +1837,9 @@ static void ttwu_queue(struct task_struct *p, int cpu) | |||
1821 | #endif | 1837 | #endif |
1822 | 1838 | ||
1823 | raw_spin_lock(&rq->lock); | 1839 | raw_spin_lock(&rq->lock); |
1840 | lockdep_pin_lock(&rq->lock); | ||
1824 | ttwu_do_activate(rq, p, 0); | 1841 | ttwu_do_activate(rq, p, 0); |
1842 | lockdep_unpin_lock(&rq->lock); | ||
1825 | raw_spin_unlock(&rq->lock); | 1843 | raw_spin_unlock(&rq->lock); |
1826 | } | 1844 | } |
1827 | 1845 | ||
@@ -1916,9 +1934,17 @@ static void try_to_wake_up_local(struct task_struct *p) | |||
1916 | lockdep_assert_held(&rq->lock); | 1934 | lockdep_assert_held(&rq->lock); |
1917 | 1935 | ||
1918 | if (!raw_spin_trylock(&p->pi_lock)) { | 1936 | if (!raw_spin_trylock(&p->pi_lock)) { |
1937 | /* | ||
1938 | * This is OK, because current is on_cpu, which avoids it being | ||
1939 | * picked for load-balance and preemption/IRQs are still | ||
1940 | * disabled avoiding further scheduler activity on it and we've | ||
1941 | * not yet picked a replacement task. | ||
1942 | */ | ||
1943 | lockdep_unpin_lock(&rq->lock); | ||
1919 | raw_spin_unlock(&rq->lock); | 1944 | raw_spin_unlock(&rq->lock); |
1920 | raw_spin_lock(&p->pi_lock); | 1945 | raw_spin_lock(&p->pi_lock); |
1921 | raw_spin_lock(&rq->lock); | 1946 | raw_spin_lock(&rq->lock); |
1947 | lockdep_pin_lock(&rq->lock); | ||
1922 | } | 1948 | } |
1923 | 1949 | ||
1924 | if (!(p->state & TASK_NORMAL)) | 1950 | if (!(p->state & TASK_NORMAL)) |
@@ -2530,6 +2556,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2530 | * of the scheduler it's an obvious special-case), so we | 2556 | * of the scheduler it's an obvious special-case), so we |
2531 | * do an early lockdep release here: | 2557 | * do an early lockdep release here: |
2532 | */ | 2558 | */ |
2559 | lockdep_unpin_lock(&rq->lock); | ||
2533 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); | 2560 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
2534 | 2561 | ||
2535 | context_tracking_task_switch(prev, next); | 2562 | context_tracking_task_switch(prev, next); |
@@ -2953,6 +2980,7 @@ static void __sched __schedule(void) | |||
2953 | */ | 2980 | */ |
2954 | smp_mb__before_spinlock(); | 2981 | smp_mb__before_spinlock(); |
2955 | raw_spin_lock_irq(&rq->lock); | 2982 | raw_spin_lock_irq(&rq->lock); |
2983 | lockdep_pin_lock(&rq->lock); | ||
2956 | 2984 | ||
2957 | rq->clock_skip_update <<= 1; /* promote REQ to ACT */ | 2985 | rq->clock_skip_update <<= 1; /* promote REQ to ACT */ |
2958 | 2986 | ||
@@ -2995,8 +3023,10 @@ static void __sched __schedule(void) | |||
2995 | 3023 | ||
2996 | rq = context_switch(rq, prev, next); /* unlocks the rq */ | 3024 | rq = context_switch(rq, prev, next); /* unlocks the rq */ |
2997 | cpu = cpu_of(rq); | 3025 | cpu = cpu_of(rq); |
2998 | } else | 3026 | } else { |
3027 | lockdep_unpin_lock(&rq->lock); | ||
2999 | raw_spin_unlock_irq(&rq->lock); | 3028 | raw_spin_unlock_irq(&rq->lock); |
3029 | } | ||
3000 | 3030 | ||
3001 | balance_callback(rq); | 3031 | balance_callback(rq); |
3002 | } | 3032 | } |
@@ -5065,6 +5095,11 @@ static void migrate_tasks(struct rq *dead_rq) | |||
5065 | if (rq->nr_running == 1) | 5095 | if (rq->nr_running == 1) |
5066 | break; | 5096 | break; |
5067 | 5097 | ||
5098 | /* | ||
5099 | * Ensure rq->lock covers the entire task selection | ||
5100 | * until the migration. | ||
5101 | */ | ||
5102 | lockdep_pin_lock(&rq->lock); | ||
5068 | next = pick_next_task(rq, &fake_task); | 5103 | next = pick_next_task(rq, &fake_task); |
5069 | BUG_ON(!next); | 5104 | BUG_ON(!next); |
5070 | next->sched_class->put_prev_task(rq, next); | 5105 | next->sched_class->put_prev_task(rq, next); |
@@ -5072,6 +5107,7 @@ static void migrate_tasks(struct rq *dead_rq) | |||
5072 | /* Find suitable destination for @next, with force if needed. */ | 5107 | /* Find suitable destination for @next, with force if needed. */ |
5073 | dest_cpu = select_fallback_rq(dead_rq->cpu, next); | 5108 | dest_cpu = select_fallback_rq(dead_rq->cpu, next); |
5074 | 5109 | ||
5110 | lockdep_unpin_lock(&rq->lock); | ||
5075 | rq = __migrate_task(rq, next, dest_cpu); | 5111 | rq = __migrate_task(rq, next, dest_cpu); |
5076 | if (rq != dead_rq) { | 5112 | if (rq != dead_rq) { |
5077 | raw_spin_unlock(&rq->lock); | 5113 | raw_spin_unlock(&rq->lock); |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 6318f43971c9..e8146415a688 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -1151,7 +1151,15 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) | |||
1151 | dl_rq = &rq->dl; | 1151 | dl_rq = &rq->dl; |
1152 | 1152 | ||
1153 | if (need_pull_dl_task(rq, prev)) { | 1153 | if (need_pull_dl_task(rq, prev)) { |
1154 | /* | ||
1155 | * This is OK, because current is on_cpu, which avoids it being | ||
1156 | * picked for load-balance and preemption/IRQs are still | ||
1157 | * disabled avoiding further scheduler activity on it and we're | ||
1158 | * being very careful to re-start the picking loop. | ||
1159 | */ | ||
1160 | lockdep_unpin_lock(&rq->lock); | ||
1154 | pull_dl_task(rq); | 1161 | pull_dl_task(rq); |
1162 | lockdep_pin_lock(&rq->lock); | ||
1155 | /* | 1163 | /* |
1156 | * pull_rt_task() can drop (and re-acquire) rq->lock; this | 1164 | * pull_rt_task() can drop (and re-acquire) rq->lock; this |
1157 | * means a stop task can slip in, in which case we need to | 1165 | * means a stop task can slip in, in which case we need to |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7210ae848909..509ef63d0d6f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -5392,7 +5392,15 @@ simple: | |||
5392 | return p; | 5392 | return p; |
5393 | 5393 | ||
5394 | idle: | 5394 | idle: |
5395 | /* | ||
5396 | * This is OK, because current is on_cpu, which avoids it being picked | ||
5397 | * for load-balance and preemption/IRQs are still disabled avoiding | ||
5398 | * further scheduler activity on it and we're being very careful to | ||
5399 | * re-start the picking loop. | ||
5400 | */ | ||
5401 | lockdep_unpin_lock(&rq->lock); | ||
5395 | new_tasks = idle_balance(rq); | 5402 | new_tasks = idle_balance(rq); |
5403 | lockdep_pin_lock(&rq->lock); | ||
5396 | /* | 5404 | /* |
5397 | * Because idle_balance() releases (and re-acquires) rq->lock, it is | 5405 | * Because idle_balance() releases (and re-acquires) rq->lock, it is |
5398 | * possible for any higher priority task to appear. In that case we | 5406 | * possible for any higher priority task to appear. In that case we |
@@ -7426,9 +7434,6 @@ static int idle_balance(struct rq *this_rq) | |||
7426 | goto out; | 7434 | goto out; |
7427 | } | 7435 | } |
7428 | 7436 | ||
7429 | /* | ||
7430 | * Drop the rq->lock, but keep IRQ/preempt disabled. | ||
7431 | */ | ||
7432 | raw_spin_unlock(&this_rq->lock); | 7437 | raw_spin_unlock(&this_rq->lock); |
7433 | 7438 | ||
7434 | update_blocked_averages(this_cpu); | 7439 | update_blocked_averages(this_cpu); |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 460f85888b74..0d193a243e96 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -1478,7 +1478,15 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) | |||
1478 | struct rt_rq *rt_rq = &rq->rt; | 1478 | struct rt_rq *rt_rq = &rq->rt; |
1479 | 1479 | ||
1480 | if (need_pull_rt_task(rq, prev)) { | 1480 | if (need_pull_rt_task(rq, prev)) { |
1481 | /* | ||
1482 | * This is OK, because current is on_cpu, which avoids it being | ||
1483 | * picked for load-balance and preemption/IRQs are still | ||
1484 | * disabled avoiding further scheduler activity on it and we're | ||
1485 | * being very careful to re-start the picking loop. | ||
1486 | */ | ||
1487 | lockdep_unpin_lock(&rq->lock); | ||
1481 | pull_rt_task(rq); | 1488 | pull_rt_task(rq); |
1489 | lockdep_pin_lock(&rq->lock); | ||
1482 | /* | 1490 | /* |
1483 | * pull_rt_task() can drop (and re-acquire) rq->lock; this | 1491 | * pull_rt_task() can drop (and re-acquire) rq->lock; this |
1484 | * means a dl or stop task can slip in, in which case we need | 1492 | * means a dl or stop task can slip in, in which case we need |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 62949ab06bc2..ef02d11654cd 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1439,8 +1439,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) | |||
1439 | for (;;) { | 1439 | for (;;) { |
1440 | rq = task_rq(p); | 1440 | rq = task_rq(p); |
1441 | raw_spin_lock(&rq->lock); | 1441 | raw_spin_lock(&rq->lock); |
1442 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) | 1442 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { |
1443 | lockdep_pin_lock(&rq->lock); | ||
1443 | return rq; | 1444 | return rq; |
1445 | } | ||
1444 | raw_spin_unlock(&rq->lock); | 1446 | raw_spin_unlock(&rq->lock); |
1445 | 1447 | ||
1446 | while (unlikely(task_on_rq_migrating(p))) | 1448 | while (unlikely(task_on_rq_migrating(p))) |
@@ -1477,8 +1479,10 @@ static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flag | |||
1477 | * If we observe the new cpu in task_rq_lock, the acquire will | 1479 | * If we observe the new cpu in task_rq_lock, the acquire will |
1478 | * pair with the WMB to ensure we must then also see migrating. | 1480 | * pair with the WMB to ensure we must then also see migrating. |
1479 | */ | 1481 | */ |
1480 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) | 1482 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { |
1483 | lockdep_pin_lock(&rq->lock); | ||
1481 | return rq; | 1484 | return rq; |
1485 | } | ||
1482 | raw_spin_unlock(&rq->lock); | 1486 | raw_spin_unlock(&rq->lock); |
1483 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); | 1487 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); |
1484 | 1488 | ||
@@ -1490,6 +1494,7 @@ static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flag | |||
1490 | static inline void __task_rq_unlock(struct rq *rq) | 1494 | static inline void __task_rq_unlock(struct rq *rq) |
1491 | __releases(rq->lock) | 1495 | __releases(rq->lock) |
1492 | { | 1496 | { |
1497 | lockdep_unpin_lock(&rq->lock); | ||
1493 | raw_spin_unlock(&rq->lock); | 1498 | raw_spin_unlock(&rq->lock); |
1494 | } | 1499 | } |
1495 | 1500 | ||
@@ -1498,6 +1503,7 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) | |||
1498 | __releases(rq->lock) | 1503 | __releases(rq->lock) |
1499 | __releases(p->pi_lock) | 1504 | __releases(p->pi_lock) |
1500 | { | 1505 | { |
1506 | lockdep_unpin_lock(&rq->lock); | ||
1501 | raw_spin_unlock(&rq->lock); | 1507 | raw_spin_unlock(&rq->lock); |
1502 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); | 1508 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); |
1503 | } | 1509 | } |