diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-19 13:40:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-19 13:40:51 -0400 |
commit | 8f98f6f5d68028018d2362c13b5527c920d9115f (patch) | |
tree | a64304991bba83bf3d59daf047b200c76b85da9c /kernel | |
parent | 8de3f7a70572c55201f02b9db804e8eef13cb840 (diff) | |
parent | a1d9a3231eac4117cadaf4b6bba5b2902c15a33e (diff) |
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar:
"Two fixes:
- a SCHED_DEADLINE task selection fix
- a sched/numa related lockdep splat fix"
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched: Check for stop task appearance when balancing happens
sched/numa: Fix task_numa_free() lockdep splat
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/deadline.c | 11 | ||||
-rw-r--r-- | kernel/sched/fair.c | 16 | ||||
-rw-r--r-- | kernel/sched/rt.c | 7 | ||||
-rw-r--r-- | kernel/sched/sched.h | 9 |
4 files changed, 32 insertions, 11 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 27ef40925525..b08095786cb8 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -1021,8 +1021,17 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) | |||
1021 | 1021 | ||
1022 | dl_rq = &rq->dl; | 1022 | dl_rq = &rq->dl; |
1023 | 1023 | ||
1024 | if (need_pull_dl_task(rq, prev)) | 1024 | if (need_pull_dl_task(rq, prev)) { |
1025 | pull_dl_task(rq); | 1025 | pull_dl_task(rq); |
1026 | /* | ||
1027 | * pull_rt_task() can drop (and re-acquire) rq->lock; this | ||
1028 | * means a stop task can slip in, in which case we need to | ||
1029 | * re-start task selection. | ||
1030 | */ | ||
1031 | if (rq->stop && rq->stop->on_rq) | ||
1032 | return RETRY_TASK; | ||
1033 | } | ||
1034 | |||
1026 | /* | 1035 | /* |
1027 | * When prev is DL, we may throttle it in put_prev_task(). | 1036 | * When prev is DL, we may throttle it in put_prev_task(). |
1028 | * So, we update time before we check for dl_nr_running. | 1037 | * So, we update time before we check for dl_nr_running. |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7e9bd0b1fa9e..7570dd969c28 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1497,7 +1497,7 @@ static void task_numa_placement(struct task_struct *p) | |||
1497 | /* If the task is part of a group prevent parallel updates to group stats */ | 1497 | /* If the task is part of a group prevent parallel updates to group stats */ |
1498 | if (p->numa_group) { | 1498 | if (p->numa_group) { |
1499 | group_lock = &p->numa_group->lock; | 1499 | group_lock = &p->numa_group->lock; |
1500 | spin_lock(group_lock); | 1500 | spin_lock_irq(group_lock); |
1501 | } | 1501 | } |
1502 | 1502 | ||
1503 | /* Find the node with the highest number of faults */ | 1503 | /* Find the node with the highest number of faults */ |
@@ -1572,7 +1572,7 @@ static void task_numa_placement(struct task_struct *p) | |||
1572 | } | 1572 | } |
1573 | } | 1573 | } |
1574 | 1574 | ||
1575 | spin_unlock(group_lock); | 1575 | spin_unlock_irq(group_lock); |
1576 | } | 1576 | } |
1577 | 1577 | ||
1578 | /* Preferred node as the node with the most faults */ | 1578 | /* Preferred node as the node with the most faults */ |
@@ -1677,7 +1677,8 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, | |||
1677 | if (!join) | 1677 | if (!join) |
1678 | return; | 1678 | return; |
1679 | 1679 | ||
1680 | double_lock(&my_grp->lock, &grp->lock); | 1680 | BUG_ON(irqs_disabled()); |
1681 | double_lock_irq(&my_grp->lock, &grp->lock); | ||
1681 | 1682 | ||
1682 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { | 1683 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { |
1683 | my_grp->faults[i] -= p->numa_faults_memory[i]; | 1684 | my_grp->faults[i] -= p->numa_faults_memory[i]; |
@@ -1691,7 +1692,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags, | |||
1691 | grp->nr_tasks++; | 1692 | grp->nr_tasks++; |
1692 | 1693 | ||
1693 | spin_unlock(&my_grp->lock); | 1694 | spin_unlock(&my_grp->lock); |
1694 | spin_unlock(&grp->lock); | 1695 | spin_unlock_irq(&grp->lock); |
1695 | 1696 | ||
1696 | rcu_assign_pointer(p->numa_group, grp); | 1697 | rcu_assign_pointer(p->numa_group, grp); |
1697 | 1698 | ||
@@ -1710,14 +1711,14 @@ void task_numa_free(struct task_struct *p) | |||
1710 | void *numa_faults = p->numa_faults_memory; | 1711 | void *numa_faults = p->numa_faults_memory; |
1711 | 1712 | ||
1712 | if (grp) { | 1713 | if (grp) { |
1713 | spin_lock(&grp->lock); | 1714 | spin_lock_irq(&grp->lock); |
1714 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) | 1715 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) |
1715 | grp->faults[i] -= p->numa_faults_memory[i]; | 1716 | grp->faults[i] -= p->numa_faults_memory[i]; |
1716 | grp->total_faults -= p->total_numa_faults; | 1717 | grp->total_faults -= p->total_numa_faults; |
1717 | 1718 | ||
1718 | list_del(&p->numa_entry); | 1719 | list_del(&p->numa_entry); |
1719 | grp->nr_tasks--; | 1720 | grp->nr_tasks--; |
1720 | spin_unlock(&grp->lock); | 1721 | spin_unlock_irq(&grp->lock); |
1721 | rcu_assign_pointer(p->numa_group, NULL); | 1722 | rcu_assign_pointer(p->numa_group, NULL); |
1722 | put_numa_group(grp); | 1723 | put_numa_group(grp); |
1723 | } | 1724 | } |
@@ -6727,7 +6728,8 @@ static int idle_balance(struct rq *this_rq) | |||
6727 | out: | 6728 | out: |
6728 | /* Is there a task of a high priority class? */ | 6729 | /* Is there a task of a high priority class? */ |
6729 | if (this_rq->nr_running != this_rq->cfs.h_nr_running && | 6730 | if (this_rq->nr_running != this_rq->cfs.h_nr_running && |
6730 | (this_rq->dl.dl_nr_running || | 6731 | ((this_rq->stop && this_rq->stop->on_rq) || |
6732 | this_rq->dl.dl_nr_running || | ||
6731 | (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt)))) | 6733 | (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt)))) |
6732 | pulled_task = -1; | 6734 | pulled_task = -1; |
6733 | 6735 | ||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index d8cdf1618551..bd2267ad404f 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -1362,10 +1362,11 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) | |||
1362 | pull_rt_task(rq); | 1362 | pull_rt_task(rq); |
1363 | /* | 1363 | /* |
1364 | * pull_rt_task() can drop (and re-acquire) rq->lock; this | 1364 | * pull_rt_task() can drop (and re-acquire) rq->lock; this |
1365 | * means a dl task can slip in, in which case we need to | 1365 | * means a dl or stop task can slip in, in which case we need |
1366 | * re-start task selection. | 1366 | * to re-start task selection. |
1367 | */ | 1367 | */ |
1368 | if (unlikely(rq->dl.dl_nr_running)) | 1368 | if (unlikely((rq->stop && rq->stop->on_rq) || |
1369 | rq->dl.dl_nr_running)) | ||
1369 | return RETRY_TASK; | 1370 | return RETRY_TASK; |
1370 | } | 1371 | } |
1371 | 1372 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c9007f28d3a2..456e492a3dca 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1385,6 +1385,15 @@ static inline void double_lock(spinlock_t *l1, spinlock_t *l2) | |||
1385 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); | 1385 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
1386 | } | 1386 | } |
1387 | 1387 | ||
1388 | static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) | ||
1389 | { | ||
1390 | if (l1 > l2) | ||
1391 | swap(l1, l2); | ||
1392 | |||
1393 | spin_lock_irq(l1); | ||
1394 | spin_lock_nested(l2, SINGLE_DEPTH_NESTING); | ||
1395 | } | ||
1396 | |||
1388 | static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) | 1397 | static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) |
1389 | { | 1398 | { |
1390 | if (l1 > l2) | 1399 | if (l1 > l2) |