diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-28 15:56:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-28 15:56:46 -0400 |
commit | 08a8b79600101fd6e13dcf05409b330e7f5b0478 (patch) | |
tree | 461cf4061eb33d96966c5c348029bc3b5cb523bf /kernel | |
parent | 1ba4b8cb94e59b17fd0142a509eb583695c36db6 (diff) | |
parent | 1e1b6c511d1b23cb7c3b619d82fc7bd9f620565d (diff) |
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
cpuset: Fix cpuset_cpus_allowed_fallback(), don't update tsk->rt.nr_cpus_allowed
sched: Fix ->min_vruntime calculation in dequeue_entity()
sched: Fix ttwu() for __ARCH_WANT_INTERRUPTS_ON_CTXSW
sched: More sched_domain iterations fixes
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 4 | ||||
-rw-r--r-- | kernel/kthread.c | 4 | ||||
-rw-r--r-- | kernel/sched.c | 56 | ||||
-rw-r--r-- | kernel/sched_fair.c | 5 | ||||
-rw-r--r-- | kernel/sched_rt.c | 10 | ||||
-rw-r--r-- | kernel/sched_stats.h | 4 |
6 files changed, 57 insertions, 26 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1ceeb049c827..9c9b7545c810 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -2190,7 +2190,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk) | |||
2190 | rcu_read_lock(); | 2190 | rcu_read_lock(); |
2191 | cs = task_cs(tsk); | 2191 | cs = task_cs(tsk); |
2192 | if (cs) | 2192 | if (cs) |
2193 | cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed); | 2193 | do_set_cpus_allowed(tsk, cs->cpus_allowed); |
2194 | rcu_read_unlock(); | 2194 | rcu_read_unlock(); |
2195 | 2195 | ||
2196 | /* | 2196 | /* |
@@ -2217,7 +2217,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk) | |||
2217 | * Like above we can temporary set any mask and rely on | 2217 | * Like above we can temporary set any mask and rely on |
2218 | * set_cpus_allowed_ptr() as synchronization point. | 2218 | * set_cpus_allowed_ptr() as synchronization point. |
2219 | */ | 2219 | */ |
2220 | cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask); | 2220 | do_set_cpus_allowed(tsk, cpu_possible_mask); |
2221 | cpu = cpumask_any(cpu_active_mask); | 2221 | cpu = cpumask_any(cpu_active_mask); |
2222 | } | 2222 | } |
2223 | 2223 | ||
diff --git a/kernel/kthread.c b/kernel/kthread.c index 3b34d2732bce..4ba7cccb4994 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -202,8 +202,8 @@ void kthread_bind(struct task_struct *p, unsigned int cpu) | |||
202 | return; | 202 | return; |
203 | } | 203 | } |
204 | 204 | ||
205 | p->cpus_allowed = cpumask_of_cpu(cpu); | 205 | /* It's safe because the task is inactive. */ |
206 | p->rt.nr_cpus_allowed = 1; | 206 | do_set_cpus_allowed(p, cpumask_of(cpu)); |
207 | p->flags |= PF_THREAD_BOUND; | 207 | p->flags |= PF_THREAD_BOUND; |
208 | } | 208 | } |
209 | EXPORT_SYMBOL(kthread_bind); | 209 | EXPORT_SYMBOL(kthread_bind); |
diff --git a/kernel/sched.c b/kernel/sched.c index 5e43e9dc65d1..cbb3a0eee58e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2573,7 +2573,26 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu) | |||
2573 | if (!next) | 2573 | if (!next) |
2574 | smp_send_reschedule(cpu); | 2574 | smp_send_reschedule(cpu); |
2575 | } | 2575 | } |
2576 | #endif | 2576 | |
2577 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
2578 | static int ttwu_activate_remote(struct task_struct *p, int wake_flags) | ||
2579 | { | ||
2580 | struct rq *rq; | ||
2581 | int ret = 0; | ||
2582 | |||
2583 | rq = __task_rq_lock(p); | ||
2584 | if (p->on_cpu) { | ||
2585 | ttwu_activate(rq, p, ENQUEUE_WAKEUP); | ||
2586 | ttwu_do_wakeup(rq, p, wake_flags); | ||
2587 | ret = 1; | ||
2588 | } | ||
2589 | __task_rq_unlock(rq); | ||
2590 | |||
2591 | return ret; | ||
2592 | |||
2593 | } | ||
2594 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
2595 | #endif /* CONFIG_SMP */ | ||
2577 | 2596 | ||
2578 | static void ttwu_queue(struct task_struct *p, int cpu) | 2597 | static void ttwu_queue(struct task_struct *p, int cpu) |
2579 | { | 2598 | { |
@@ -2631,17 +2650,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
2631 | while (p->on_cpu) { | 2650 | while (p->on_cpu) { |
2632 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 2651 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
2633 | /* | 2652 | /* |
2634 | * If called from interrupt context we could have landed in the | 2653 | * In case the architecture enables interrupts in |
2635 | * middle of schedule(), in this case we should take care not | 2654 | * context_switch(), we cannot busy wait, since that |
2636 | * to spin on ->on_cpu if p is current, since that would | 2655 | * would lead to deadlocks when an interrupt hits and |
2637 | * deadlock. | 2656 | * tries to wake up @prev. So bail and do a complete |
2657 | * remote wakeup. | ||
2638 | */ | 2658 | */ |
2639 | if (p == current) { | 2659 | if (ttwu_activate_remote(p, wake_flags)) |
2640 | ttwu_queue(p, cpu); | ||
2641 | goto stat; | 2660 | goto stat; |
2642 | } | 2661 | #else |
2643 | #endif | ||
2644 | cpu_relax(); | 2662 | cpu_relax(); |
2663 | #endif | ||
2645 | } | 2664 | } |
2646 | /* | 2665 | /* |
2647 | * Pairs with the smp_wmb() in finish_lock_switch(). | 2666 | * Pairs with the smp_wmb() in finish_lock_switch(). |
@@ -5841,7 +5860,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5841 | idle->state = TASK_RUNNING; | 5860 | idle->state = TASK_RUNNING; |
5842 | idle->se.exec_start = sched_clock(); | 5861 | idle->se.exec_start = sched_clock(); |
5843 | 5862 | ||
5844 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); | 5863 | do_set_cpus_allowed(idle, cpumask_of(cpu)); |
5845 | /* | 5864 | /* |
5846 | * We're having a chicken and egg problem, even though we are | 5865 | * We're having a chicken and egg problem, even though we are |
5847 | * holding rq->lock, the cpu isn't yet set to this cpu so the | 5866 | * holding rq->lock, the cpu isn't yet set to this cpu so the |
@@ -5929,6 +5948,16 @@ static inline void sched_init_granularity(void) | |||
5929 | } | 5948 | } |
5930 | 5949 | ||
5931 | #ifdef CONFIG_SMP | 5950 | #ifdef CONFIG_SMP |
5951 | void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) | ||
5952 | { | ||
5953 | if (p->sched_class && p->sched_class->set_cpus_allowed) | ||
5954 | p->sched_class->set_cpus_allowed(p, new_mask); | ||
5955 | else { | ||
5956 | cpumask_copy(&p->cpus_allowed, new_mask); | ||
5957 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); | ||
5958 | } | ||
5959 | } | ||
5960 | |||
5932 | /* | 5961 | /* |
5933 | * This is how migration works: | 5962 | * This is how migration works: |
5934 | * | 5963 | * |
@@ -5974,12 +6003,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
5974 | goto out; | 6003 | goto out; |
5975 | } | 6004 | } |
5976 | 6005 | ||
5977 | if (p->sched_class->set_cpus_allowed) | 6006 | do_set_cpus_allowed(p, new_mask); |
5978 | p->sched_class->set_cpus_allowed(p, new_mask); | ||
5979 | else { | ||
5980 | cpumask_copy(&p->cpus_allowed, new_mask); | ||
5981 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); | ||
5982 | } | ||
5983 | 6007 | ||
5984 | /* Can the task run on the task's current CPU? If so, we're done */ | 6008 | /* Can the task run on the task's current CPU? If so, we're done */ |
5985 | if (cpumask_test_cpu(task_cpu(p), new_mask)) | 6009 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e32a9b70ee9c..433491c2dc8f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1076,8 +1076,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
1076 | se->on_rq = 0; | 1076 | se->on_rq = 0; |
1077 | update_cfs_load(cfs_rq, 0); | 1077 | update_cfs_load(cfs_rq, 0); |
1078 | account_entity_dequeue(cfs_rq, se); | 1078 | account_entity_dequeue(cfs_rq, se); |
1079 | update_min_vruntime(cfs_rq); | ||
1080 | update_cfs_shares(cfs_rq); | ||
1081 | 1079 | ||
1082 | /* | 1080 | /* |
1083 | * Normalize the entity after updating the min_vruntime because the | 1081 | * Normalize the entity after updating the min_vruntime because the |
@@ -1086,6 +1084,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
1086 | */ | 1084 | */ |
1087 | if (!(flags & DEQUEUE_SLEEP)) | 1085 | if (!(flags & DEQUEUE_SLEEP)) |
1088 | se->vruntime -= cfs_rq->min_vruntime; | 1086 | se->vruntime -= cfs_rq->min_vruntime; |
1087 | |||
1088 | update_min_vruntime(cfs_rq); | ||
1089 | update_cfs_shares(cfs_rq); | ||
1089 | } | 1090 | } |
1090 | 1091 | ||
1091 | /* | 1092 | /* |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 64b2a37c07d0..88725c939e0b 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1263,6 +1263,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
1263 | if (!cpumask_test_cpu(this_cpu, lowest_mask)) | 1263 | if (!cpumask_test_cpu(this_cpu, lowest_mask)) |
1264 | this_cpu = -1; /* Skip this_cpu opt if not among lowest */ | 1264 | this_cpu = -1; /* Skip this_cpu opt if not among lowest */ |
1265 | 1265 | ||
1266 | rcu_read_lock(); | ||
1266 | for_each_domain(cpu, sd) { | 1267 | for_each_domain(cpu, sd) { |
1267 | if (sd->flags & SD_WAKE_AFFINE) { | 1268 | if (sd->flags & SD_WAKE_AFFINE) { |
1268 | int best_cpu; | 1269 | int best_cpu; |
@@ -1272,15 +1273,20 @@ static int find_lowest_rq(struct task_struct *task) | |||
1272 | * remote processor. | 1273 | * remote processor. |
1273 | */ | 1274 | */ |
1274 | if (this_cpu != -1 && | 1275 | if (this_cpu != -1 && |
1275 | cpumask_test_cpu(this_cpu, sched_domain_span(sd))) | 1276 | cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { |
1277 | rcu_read_unlock(); | ||
1276 | return this_cpu; | 1278 | return this_cpu; |
1279 | } | ||
1277 | 1280 | ||
1278 | best_cpu = cpumask_first_and(lowest_mask, | 1281 | best_cpu = cpumask_first_and(lowest_mask, |
1279 | sched_domain_span(sd)); | 1282 | sched_domain_span(sd)); |
1280 | if (best_cpu < nr_cpu_ids) | 1283 | if (best_cpu < nr_cpu_ids) { |
1284 | rcu_read_unlock(); | ||
1281 | return best_cpu; | 1285 | return best_cpu; |
1286 | } | ||
1282 | } | 1287 | } |
1283 | } | 1288 | } |
1289 | rcu_read_unlock(); | ||
1284 | 1290 | ||
1285 | /* | 1291 | /* |
1286 | * And finally, if there were no matches within the domains | 1292 | * And finally, if there were no matches within the domains |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 48ddf431db0e..331e01bcd026 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -37,7 +37,7 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
37 | 37 | ||
38 | #ifdef CONFIG_SMP | 38 | #ifdef CONFIG_SMP |
39 | /* domain-specific stats */ | 39 | /* domain-specific stats */ |
40 | preempt_disable(); | 40 | rcu_read_lock(); |
41 | for_each_domain(cpu, sd) { | 41 | for_each_domain(cpu, sd) { |
42 | enum cpu_idle_type itype; | 42 | enum cpu_idle_type itype; |
43 | 43 | ||
@@ -64,7 +64,7 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
64 | sd->ttwu_wake_remote, sd->ttwu_move_affine, | 64 | sd->ttwu_wake_remote, sd->ttwu_move_affine, |
65 | sd->ttwu_move_balance); | 65 | sd->ttwu_move_balance); |
66 | } | 66 | } |
67 | preempt_enable(); | 67 | rcu_read_unlock(); |
68 | #endif | 68 | #endif |
69 | } | 69 | } |
70 | kfree(mask_str); | 70 | kfree(mask_str); |