diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-02-02 22:26:29 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-02-02 22:26:29 -0500 |
commit | 31c952dcf83d5b0fd57b514cbe8a1664647c26e7 (patch) | |
tree | fc424cd78e818327a3949c2707c8487506ef1408 /kernel | |
parent | 9e6235e997bf091326b2f3ac92217c2ac2e27eb5 (diff) | |
parent | 3d398703ef06fd97b4c28c86b580546d5b57e7b7 (diff) |
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched_rt: don't use first_cpu on cpumask created with cpumask_and
sched: fix buddie group latency
sched: clear buddies more aggressively
sched: symmetric sync vs avg_overlap
sched: fix sync wakeups
cpuset: fix possible deadlock in async_rebuild_sched_domains
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 13 | ||||
-rw-r--r-- | kernel/sched.c | 10 | ||||
-rw-r--r-- | kernel/sched_fair.c | 32 | ||||
-rw-r--r-- | kernel/sched_rt.c | 4 |
4 files changed, 45 insertions, 14 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index a85678865c5e..f76db9dcaa05 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -61,6 +61,14 @@ | |||
61 | #include <linux/cgroup.h> | 61 | #include <linux/cgroup.h> |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Workqueue for cpuset related tasks. | ||
65 | * | ||
66 | * Using kevent workqueue may cause deadlock when memory_migrate | ||
67 | * is set. So we create a separate workqueue thread for cpuset. | ||
68 | */ | ||
69 | static struct workqueue_struct *cpuset_wq; | ||
70 | |||
71 | /* | ||
64 | * Tracks how many cpusets are currently defined in system. | 72 | * Tracks how many cpusets are currently defined in system. |
65 | * When there is only one cpuset (the root cpuset) we can | 73 | * When there is only one cpuset (the root cpuset) we can |
66 | * short circuit some hooks. | 74 | * short circuit some hooks. |
@@ -831,7 +839,7 @@ static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains); | |||
831 | */ | 839 | */ |
832 | static void async_rebuild_sched_domains(void) | 840 | static void async_rebuild_sched_domains(void) |
833 | { | 841 | { |
834 | schedule_work(&rebuild_sched_domains_work); | 842 | queue_work(cpuset_wq, &rebuild_sched_domains_work); |
835 | } | 843 | } |
836 | 844 | ||
837 | /* | 845 | /* |
@@ -2111,6 +2119,9 @@ void __init cpuset_init_smp(void) | |||
2111 | 2119 | ||
2112 | hotcpu_notifier(cpuset_track_online_cpus, 0); | 2120 | hotcpu_notifier(cpuset_track_online_cpus, 0); |
2113 | hotplug_memory_notifier(cpuset_track_online_nodes, 10); | 2121 | hotplug_memory_notifier(cpuset_track_online_nodes, 10); |
2122 | |||
2123 | cpuset_wq = create_singlethread_workqueue("cpuset"); | ||
2124 | BUG_ON(!cpuset_wq); | ||
2114 | } | 2125 | } |
2115 | 2126 | ||
2116 | /** | 2127 | /** |
diff --git a/kernel/sched.c b/kernel/sched.c index 52bbf1c842a8..242d0d47a70d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2266,6 +2266,16 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2266 | if (!sched_feat(SYNC_WAKEUPS)) | 2266 | if (!sched_feat(SYNC_WAKEUPS)) |
2267 | sync = 0; | 2267 | sync = 0; |
2268 | 2268 | ||
2269 | if (!sync) { | ||
2270 | if (current->se.avg_overlap < sysctl_sched_migration_cost && | ||
2271 | p->se.avg_overlap < sysctl_sched_migration_cost) | ||
2272 | sync = 1; | ||
2273 | } else { | ||
2274 | if (current->se.avg_overlap >= sysctl_sched_migration_cost || | ||
2275 | p->se.avg_overlap >= sysctl_sched_migration_cost) | ||
2276 | sync = 0; | ||
2277 | } | ||
2278 | |||
2269 | #ifdef CONFIG_SMP | 2279 | #ifdef CONFIG_SMP |
2270 | if (sched_feat(LB_WAKEUP_UPDATE)) { | 2280 | if (sched_feat(LB_WAKEUP_UPDATE)) { |
2271 | struct sched_domain *sd; | 2281 | struct sched_domain *sd; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5cc1c162044f..a7e50ba185ac 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -719,7 +719,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | |||
719 | __enqueue_entity(cfs_rq, se); | 719 | __enqueue_entity(cfs_rq, se); |
720 | } | 720 | } |
721 | 721 | ||
722 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | 722 | static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) |
723 | { | 723 | { |
724 | if (cfs_rq->last == se) | 724 | if (cfs_rq->last == se) |
725 | cfs_rq->last = NULL; | 725 | cfs_rq->last = NULL; |
@@ -728,6 +728,12 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
728 | cfs_rq->next = NULL; | 728 | cfs_rq->next = NULL; |
729 | } | 729 | } |
730 | 730 | ||
731 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
732 | { | ||
733 | for_each_sched_entity(se) | ||
734 | __clear_buddies(cfs_rq_of(se), se); | ||
735 | } | ||
736 | |||
731 | static void | 737 | static void |
732 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | 738 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) |
733 | { | 739 | { |
@@ -768,8 +774,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
768 | 774 | ||
769 | ideal_runtime = sched_slice(cfs_rq, curr); | 775 | ideal_runtime = sched_slice(cfs_rq, curr); |
770 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 776 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; |
771 | if (delta_exec > ideal_runtime) | 777 | if (delta_exec > ideal_runtime) { |
772 | resched_task(rq_of(cfs_rq)->curr); | 778 | resched_task(rq_of(cfs_rq)->curr); |
779 | /* | ||
780 | * The current task ran long enough, ensure it doesn't get | ||
781 | * re-elected due to buddy favours. | ||
782 | */ | ||
783 | clear_buddies(cfs_rq, curr); | ||
784 | } | ||
773 | } | 785 | } |
774 | 786 | ||
775 | static void | 787 | static void |
@@ -1179,20 +1191,15 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq, | |||
1179 | int idx, unsigned long load, unsigned long this_load, | 1191 | int idx, unsigned long load, unsigned long this_load, |
1180 | unsigned int imbalance) | 1192 | unsigned int imbalance) |
1181 | { | 1193 | { |
1182 | struct task_struct *curr = this_rq->curr; | ||
1183 | struct task_group *tg; | ||
1184 | unsigned long tl = this_load; | 1194 | unsigned long tl = this_load; |
1185 | unsigned long tl_per_task; | 1195 | unsigned long tl_per_task; |
1196 | struct task_group *tg; | ||
1186 | unsigned long weight; | 1197 | unsigned long weight; |
1187 | int balanced; | 1198 | int balanced; |
1188 | 1199 | ||
1189 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) | 1200 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) |
1190 | return 0; | 1201 | return 0; |
1191 | 1202 | ||
1192 | if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost || | ||
1193 | p->se.avg_overlap > sysctl_sched_migration_cost)) | ||
1194 | sync = 0; | ||
1195 | |||
1196 | /* | 1203 | /* |
1197 | * If sync wakeup then subtract the (maximum possible) | 1204 | * If sync wakeup then subtract the (maximum possible) |
1198 | * effect of the currently running task from the load | 1205 | * effect of the currently running task from the load |
@@ -1419,9 +1426,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
1419 | if (!sched_feat(WAKEUP_PREEMPT)) | 1426 | if (!sched_feat(WAKEUP_PREEMPT)) |
1420 | return; | 1427 | return; |
1421 | 1428 | ||
1422 | if (sched_feat(WAKEUP_OVERLAP) && (sync || | 1429 | if (sched_feat(WAKEUP_OVERLAP) && sync) { |
1423 | (se->avg_overlap < sysctl_sched_migration_cost && | ||
1424 | pse->avg_overlap < sysctl_sched_migration_cost))) { | ||
1425 | resched_task(curr); | 1430 | resched_task(curr); |
1426 | return; | 1431 | return; |
1427 | } | 1432 | } |
@@ -1452,6 +1457,11 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) | |||
1452 | 1457 | ||
1453 | do { | 1458 | do { |
1454 | se = pick_next_entity(cfs_rq); | 1459 | se = pick_next_entity(cfs_rq); |
1460 | /* | ||
1461 | * If se was a buddy, clear it so that it will have to earn | ||
1462 | * the favour again. | ||
1463 | */ | ||
1464 | __clear_buddies(cfs_rq, se); | ||
1455 | set_next_entity(cfs_rq, se); | 1465 | set_next_entity(cfs_rq, se); |
1456 | cfs_rq = group_cfs_rq(se); | 1466 | cfs_rq = group_cfs_rq(se); |
1457 | } while (cfs_rq); | 1467 | } while (cfs_rq); |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 954e1a81b796..bac1061cea2f 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -968,8 +968,8 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | |||
968 | if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) | 968 | if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) |
969 | return this_cpu; | 969 | return this_cpu; |
970 | 970 | ||
971 | first = first_cpu(*mask); | 971 | first = cpumask_first(mask); |
972 | if (first != NR_CPUS) | 972 | if (first < nr_cpu_ids) |
973 | return first; | 973 | return first; |
974 | 974 | ||
975 | return -1; | 975 | return -1; |