diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 44 |
1 files changed, 31 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6cee227b1459..f96be9370b75 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2286,14 +2286,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
2286 | } | 2286 | } |
2287 | 2287 | ||
2288 | /* | 2288 | /* |
2289 | * Called from: | 2289 | * Gets called from 3 sites (exec, fork, wakeup), since it is called without |
2290 | * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done | ||
2291 | * by: | ||
2290 | * | 2292 | * |
2291 | * - fork, @p is stable because it isn't on the tasklist yet | 2293 | * exec: is unstable, retry loop |
2292 | * | 2294 | * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING |
2293 | * - exec, @p is unstable, retry loop | ||
2294 | * | ||
2295 | * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so | ||
2296 | * we should be good. | ||
2297 | */ | 2295 | */ |
2298 | static inline | 2296 | static inline |
2299 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) | 2297 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) |
@@ -2586,9 +2584,6 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2586 | if (p->sched_class->task_fork) | 2584 | if (p->sched_class->task_fork) |
2587 | p->sched_class->task_fork(p); | 2585 | p->sched_class->task_fork(p); |
2588 | 2586 | ||
2589 | #ifdef CONFIG_SMP | ||
2590 | cpu = select_task_rq(p, SD_BALANCE_FORK, 0); | ||
2591 | #endif | ||
2592 | set_task_cpu(p, cpu); | 2587 | set_task_cpu(p, cpu); |
2593 | 2588 | ||
2594 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 2589 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
@@ -2618,6 +2613,21 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2618 | { | 2613 | { |
2619 | unsigned long flags; | 2614 | unsigned long flags; |
2620 | struct rq *rq; | 2615 | struct rq *rq; |
2616 | int cpu __maybe_unused = get_cpu(); | ||
2617 | |||
2618 | #ifdef CONFIG_SMP | ||
2619 | /* | ||
2620 | * Fork balancing, do it here and not earlier because: | ||
2621 | * - cpus_allowed can change in the fork path | ||
2622 | * - any previously selected cpu might disappear through hotplug | ||
2623 | * | ||
2624 | * We still have TASK_WAKING but PF_STARTING is gone now, meaning | ||
2625 | * ->cpus_allowed is stable, we have preemption disabled, meaning | ||
2626 | * cpu_online_mask is stable. | ||
2627 | */ | ||
2628 | cpu = select_task_rq(p, SD_BALANCE_FORK, 0); | ||
2629 | set_task_cpu(p, cpu); | ||
2630 | #endif | ||
2621 | 2631 | ||
2622 | rq = task_rq_lock(p, &flags); | 2632 | rq = task_rq_lock(p, &flags); |
2623 | BUG_ON(p->state != TASK_WAKING); | 2633 | BUG_ON(p->state != TASK_WAKING); |
@@ -2631,6 +2641,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2631 | p->sched_class->task_woken(rq, p); | 2641 | p->sched_class->task_woken(rq, p); |
2632 | #endif | 2642 | #endif |
2633 | task_rq_unlock(rq, &flags); | 2643 | task_rq_unlock(rq, &flags); |
2644 | put_cpu(); | ||
2634 | } | 2645 | } |
2635 | 2646 | ||
2636 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 2647 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
@@ -3687,8 +3698,11 @@ need_resched_nonpreemptible: | |||
3687 | 3698 | ||
3688 | post_schedule(rq); | 3699 | post_schedule(rq); |
3689 | 3700 | ||
3690 | if (unlikely(reacquire_kernel_lock(current) < 0)) | 3701 | if (unlikely(reacquire_kernel_lock(current) < 0)) { |
3702 | prev = rq->curr; | ||
3703 | switch_count = &prev->nivcsw; | ||
3691 | goto need_resched_nonpreemptible; | 3704 | goto need_resched_nonpreemptible; |
3705 | } | ||
3692 | 3706 | ||
3693 | preempt_enable_no_resched(); | 3707 | preempt_enable_no_resched(); |
3694 | if (need_resched()) | 3708 | if (need_resched()) |
@@ -5293,14 +5307,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
5293 | * the ->cpus_allowed mask from under waking tasks, which would be | 5307 | * the ->cpus_allowed mask from under waking tasks, which would be |
5294 | * possible when we change rq->lock in ttwu(), so synchronize against | 5308 | * possible when we change rq->lock in ttwu(), so synchronize against |
5295 | * TASK_WAKING to avoid that. | 5309 | * TASK_WAKING to avoid that. |
5310 | * | ||
5311 | * Make an exception for freshly cloned tasks, since cpuset namespaces | ||
5312 | * might move the task about, we have to validate the target in | ||
5313 | * wake_up_new_task() anyway since the cpu might have gone away. | ||
5296 | */ | 5314 | */ |
5297 | again: | 5315 | again: |
5298 | while (p->state == TASK_WAKING) | 5316 | while (p->state == TASK_WAKING && !(p->flags & PF_STARTING)) |
5299 | cpu_relax(); | 5317 | cpu_relax(); |
5300 | 5318 | ||
5301 | rq = task_rq_lock(p, &flags); | 5319 | rq = task_rq_lock(p, &flags); |
5302 | 5320 | ||
5303 | if (p->state == TASK_WAKING) { | 5321 | if (p->state == TASK_WAKING && !(p->flags & PF_STARTING)) { |
5304 | task_rq_unlock(rq, &flags); | 5322 | task_rq_unlock(rq, &flags); |
5305 | goto again; | 5323 | goto again; |
5306 | } | 5324 | } |