aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-01 13:44:36 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-01 13:44:36 -0500
commit8ea85c2817301adb986b3b86dc20414595b776be (patch)
treed918bd0c2427a4e907475271863f88530eef4bf1 /kernel
parentbdd846678387d1313f689c9d341bd9743c9aa451 (diff)
parent9d3cfc4c1d17c6d3bc1373e3b954c56b92607755 (diff)
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: Correct printk whitespace in warning from cpu down task check sched: Fix incorrect sanity check sched: Fix fork vs hotplug vs cpuset namespaces
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c10
-rw-r--r--kernel/fork.c15
-rw-r--r--kernel/sched.c39
3 files changed, 32 insertions, 32 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 1c8ddd6ee940..677f25376a38 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -151,13 +151,13 @@ static inline void check_for_tasks(int cpu)
151 151
152 write_lock_irq(&tasklist_lock); 152 write_lock_irq(&tasklist_lock);
153 for_each_process(p) { 153 for_each_process(p) {
154 if (task_cpu(p) == cpu && 154 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
155 (!cputime_eq(p->utime, cputime_zero) || 155 (!cputime_eq(p->utime, cputime_zero) ||
156 !cputime_eq(p->stime, cputime_zero))) 156 !cputime_eq(p->stime, cputime_zero)))
157 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ 157 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
158 (state = %ld, flags = %x) \n", 158 "(state = %ld, flags = %x)\n",
159 p->comm, task_pid_nr(p), cpu, 159 p->comm, task_pid_nr(p), cpu,
160 p->state, p->flags); 160 p->state, p->flags);
161 } 161 }
162 write_unlock_irq(&tasklist_lock); 162 write_unlock_irq(&tasklist_lock);
163} 163}
diff --git a/kernel/fork.c b/kernel/fork.c
index 5b2959b3ffc2..f88bd984df35 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1241,21 +1241,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1241 /* Need tasklist lock for parent etc handling! */ 1241 /* Need tasklist lock for parent etc handling! */
1242 write_lock_irq(&tasklist_lock); 1242 write_lock_irq(&tasklist_lock);
1243 1243
1244 /*
1245 * The task hasn't been attached yet, so its cpus_allowed mask will
1246 * not be changed, nor will its assigned CPU.
1247 *
1248 * The cpus_allowed mask of the parent may have changed after it was
1249 * copied first time - so re-copy it here, then check the child's CPU
1250 * to ensure it is on a valid CPU (and if not, just force it back to
1251 * parent's CPU). This avoids alot of nasty races.
1252 */
1253 p->cpus_allowed = current->cpus_allowed;
1254 p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
1255 if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1256 !cpu_online(task_cpu(p))))
1257 set_task_cpu(p, smp_processor_id());
1258
1259 /* CLONE_PARENT re-uses the old parent */ 1244 /* CLONE_PARENT re-uses the old parent */
1260 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { 1245 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1261 p->real_parent = current->real_parent; 1246 p->real_parent = current->real_parent;
diff --git a/kernel/sched.c b/kernel/sched.c
index 4508fe7048be..3a8fb30a91b1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2320,14 +2320,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
2320} 2320}
2321 2321
2322/* 2322/*
2323 * Called from: 2323 * Gets called from 3 sites (exec, fork, wakeup), since it is called without
2324 * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
2325 * by:
2324 * 2326 *
2325 * - fork, @p is stable because it isn't on the tasklist yet 2327 * exec: is unstable, retry loop
2326 * 2328 * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
2327 * - exec, @p is unstable, retry loop
2328 *
2329 * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so
2330 * we should be good.
2331 */ 2329 */
2332static inline 2330static inline
2333int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) 2331int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
@@ -2620,9 +2618,6 @@ void sched_fork(struct task_struct *p, int clone_flags)
2620 if (p->sched_class->task_fork) 2618 if (p->sched_class->task_fork)
2621 p->sched_class->task_fork(p); 2619 p->sched_class->task_fork(p);
2622 2620
2623#ifdef CONFIG_SMP
2624 cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
2625#endif
2626 set_task_cpu(p, cpu); 2621 set_task_cpu(p, cpu);
2627 2622
2628#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2623#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
@@ -2652,6 +2647,21 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2652{ 2647{
2653 unsigned long flags; 2648 unsigned long flags;
2654 struct rq *rq; 2649 struct rq *rq;
2650 int cpu = get_cpu();
2651
2652#ifdef CONFIG_SMP
2653 /*
2654 * Fork balancing, do it here and not earlier because:
2655 * - cpus_allowed can change in the fork path
2656 * - any previously selected cpu might disappear through hotplug
2657 *
2658 * We still have TASK_WAKING but PF_STARTING is gone now, meaning
2659 * ->cpus_allowed is stable, we have preemption disabled, meaning
2660 * cpu_online_mask is stable.
2661 */
2662 cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
2663 set_task_cpu(p, cpu);
2664#endif
2655 2665
2656 rq = task_rq_lock(p, &flags); 2666 rq = task_rq_lock(p, &flags);
2657 BUG_ON(p->state != TASK_WAKING); 2667 BUG_ON(p->state != TASK_WAKING);
@@ -2665,6 +2675,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2665 p->sched_class->task_woken(rq, p); 2675 p->sched_class->task_woken(rq, p);
2666#endif 2676#endif
2667 task_rq_unlock(rq, &flags); 2677 task_rq_unlock(rq, &flags);
2678 put_cpu();
2668} 2679}
2669 2680
2670#ifdef CONFIG_PREEMPT_NOTIFIERS 2681#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -7139,14 +7150,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
7139 * the ->cpus_allowed mask from under waking tasks, which would be 7150 * the ->cpus_allowed mask from under waking tasks, which would be
7140 * possible when we change rq->lock in ttwu(), so synchronize against 7151 * possible when we change rq->lock in ttwu(), so synchronize against
7141 * TASK_WAKING to avoid that. 7152 * TASK_WAKING to avoid that.
7153 *
7154 * Make an exception for freshly cloned tasks, since cpuset namespaces
7155 * might move the task about, we have to validate the target in
7156 * wake_up_new_task() anyway since the cpu might have gone away.
7142 */ 7157 */
7143again: 7158again:
7144 while (p->state == TASK_WAKING) 7159 while (p->state == TASK_WAKING && !(p->flags & PF_STARTING))
7145 cpu_relax(); 7160 cpu_relax();
7146 7161
7147 rq = task_rq_lock(p, &flags); 7162 rq = task_rq_lock(p, &flags);
7148 7163
7149 if (p->state == TASK_WAKING) { 7164 if (p->state == TASK_WAKING && !(p->flags & PF_STARTING)) {
7150 task_rq_unlock(rq, &flags); 7165 task_rq_unlock(rq, &flags);
7151 goto again; 7166 goto again;
7152 } 7167 }