aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c44
1 files changed, 31 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c535cc4f6428..3a8fb30a91b1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2320,14 +2320,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
2320} 2320}
2321 2321
2322/* 2322/*
2323 * Called from: 2323 * Gets called from 3 sites (exec, fork, wakeup), since it is called without
2324 * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
2325 * by:
2324 * 2326 *
2325 * - fork, @p is stable because it isn't on the tasklist yet 2327 * exec: is unstable, retry loop
2326 * 2328 * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
2327 * - exec, @p is unstable, retry loop
2328 *
2329 * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so
2330 * we should be good.
2331 */ 2329 */
2332static inline 2330static inline
2333int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) 2331int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
@@ -2620,9 +2618,6 @@ void sched_fork(struct task_struct *p, int clone_flags)
2620 if (p->sched_class->task_fork) 2618 if (p->sched_class->task_fork)
2621 p->sched_class->task_fork(p); 2619 p->sched_class->task_fork(p);
2622 2620
2623#ifdef CONFIG_SMP
2624 cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
2625#endif
2626 set_task_cpu(p, cpu); 2621 set_task_cpu(p, cpu);
2627 2622
2628#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2623#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
@@ -2652,6 +2647,21 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2652{ 2647{
2653 unsigned long flags; 2648 unsigned long flags;
2654 struct rq *rq; 2649 struct rq *rq;
2650 int cpu = get_cpu();
2651
2652#ifdef CONFIG_SMP
2653 /*
2654 * Fork balancing, do it here and not earlier because:
2655 * - cpus_allowed can change in the fork path
2656 * - any previously selected cpu might disappear through hotplug
2657 *
2658 * We still have TASK_WAKING but PF_STARTING is gone now, meaning
2659 * ->cpus_allowed is stable, we have preemption disabled, meaning
2660 * cpu_online_mask is stable.
2661 */
2662 cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
2663 set_task_cpu(p, cpu);
2664#endif
2655 2665
2656 rq = task_rq_lock(p, &flags); 2666 rq = task_rq_lock(p, &flags);
2657 BUG_ON(p->state != TASK_WAKING); 2667 BUG_ON(p->state != TASK_WAKING);
@@ -2665,6 +2675,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2665 p->sched_class->task_woken(rq, p); 2675 p->sched_class->task_woken(rq, p);
2666#endif 2676#endif
2667 task_rq_unlock(rq, &flags); 2677 task_rq_unlock(rq, &flags);
2678 put_cpu();
2668} 2679}
2669 2680
2670#ifdef CONFIG_PREEMPT_NOTIFIERS 2681#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -5530,8 +5541,11 @@ need_resched_nonpreemptible:
5530 5541
5531 post_schedule(rq); 5542 post_schedule(rq);
5532 5543
5533 if (unlikely(reacquire_kernel_lock(current) < 0)) 5544 if (unlikely(reacquire_kernel_lock(current) < 0)) {
5545 prev = rq->curr;
5546 switch_count = &prev->nivcsw;
5534 goto need_resched_nonpreemptible; 5547 goto need_resched_nonpreemptible;
5548 }
5535 5549
5536 preempt_enable_no_resched(); 5550 preempt_enable_no_resched();
5537 if (need_resched()) 5551 if (need_resched())
@@ -7136,14 +7150,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
7136 * the ->cpus_allowed mask from under waking tasks, which would be 7150 * the ->cpus_allowed mask from under waking tasks, which would be
7137 * possible when we change rq->lock in ttwu(), so synchronize against 7151 * possible when we change rq->lock in ttwu(), so synchronize against
7138 * TASK_WAKING to avoid that. 7152 * TASK_WAKING to avoid that.
7153 *
7154 * Make an exception for freshly cloned tasks, since cpuset namespaces
7155 * might move the task about, we have to validate the target in
7156 * wake_up_new_task() anyway since the cpu might have gone away.
7139 */ 7157 */
7140again: 7158again:
7141 while (p->state == TASK_WAKING) 7159 while (p->state == TASK_WAKING && !(p->flags & PF_STARTING))
7142 cpu_relax(); 7160 cpu_relax();
7143 7161
7144 rq = task_rq_lock(p, &flags); 7162 rq = task_rq_lock(p, &flags);
7145 7163
7146 if (p->state == TASK_WAKING) { 7164 if (p->state == TASK_WAKING && !(p->flags & PF_STARTING)) {
7147 task_rq_unlock(rq, &flags); 7165 task_rq_unlock(rq, &flags);
7148 goto again; 7166 goto again;
7149 } 7167 }