diff options
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 38 |
1 files changed, 34 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0869b20fba81..e053c31d96da 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -951,8 +951,13 @@ struct migration_arg { | |||
951 | static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, | 951 | static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, |
952 | struct task_struct *p, int dest_cpu) | 952 | struct task_struct *p, int dest_cpu) |
953 | { | 953 | { |
954 | if (unlikely(!cpu_active(dest_cpu))) | 954 | if (p->flags & PF_KTHREAD) { |
955 | return rq; | 955 | if (unlikely(!cpu_online(dest_cpu))) |
956 | return rq; | ||
957 | } else { | ||
958 | if (unlikely(!cpu_active(dest_cpu))) | ||
959 | return rq; | ||
960 | } | ||
956 | 961 | ||
957 | /* Affinity changed (again). */ | 962 | /* Affinity changed (again). */ |
958 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) | 963 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
@@ -2635,6 +2640,16 @@ static struct rq *finish_task_switch(struct task_struct *prev) | |||
2635 | prev_state = prev->state; | 2640 | prev_state = prev->state; |
2636 | vtime_task_switch(prev); | 2641 | vtime_task_switch(prev); |
2637 | perf_event_task_sched_in(prev, current); | 2642 | perf_event_task_sched_in(prev, current); |
2643 | /* | ||
2644 | * The membarrier system call requires a full memory barrier | ||
2645 | * after storing to rq->curr, before going back to user-space. | ||
2646 | * | ||
2647 | * TODO: This smp_mb__after_unlock_lock can go away if PPC end | ||
2648 | * up adding a full barrier to switch_mm(), or we should figure | ||
2649 | * out if a smp_mb__after_unlock_lock is really the proper API | ||
2650 | * to use. | ||
2651 | */ | ||
2652 | smp_mb__after_unlock_lock(); | ||
2638 | finish_lock_switch(rq, prev); | 2653 | finish_lock_switch(rq, prev); |
2639 | finish_arch_post_lock_switch(); | 2654 | finish_arch_post_lock_switch(); |
2640 | 2655 | ||
@@ -3324,6 +3339,21 @@ static void __sched notrace __schedule(bool preempt) | |||
3324 | if (likely(prev != next)) { | 3339 | if (likely(prev != next)) { |
3325 | rq->nr_switches++; | 3340 | rq->nr_switches++; |
3326 | rq->curr = next; | 3341 | rq->curr = next; |
3342 | /* | ||
3343 | * The membarrier system call requires each architecture | ||
3344 | * to have a full memory barrier after updating | ||
3345 | * rq->curr, before returning to user-space. For TSO | ||
3346 | * (e.g. x86), the architecture must provide its own | ||
3347 | * barrier in switch_mm(). For weakly ordered machines | ||
3348 | * for which spin_unlock() acts as a full memory | ||
3349 | * barrier, finish_lock_switch() in common code takes | ||
3350 | * care of this barrier. For weakly ordered machines for | ||
3351 | * which spin_unlock() acts as a RELEASE barrier (only | ||
3352 | * arm64 and PowerPC), arm64 has a full barrier in | ||
3353 | * switch_to(), and PowerPC has | ||
3354 | * smp_mb__after_unlock_lock() before | ||
3355 | * finish_lock_switch(). | ||
3356 | */ | ||
3327 | ++*switch_count; | 3357 | ++*switch_count; |
3328 | 3358 | ||
3329 | trace_sched_switch(preempt, prev, next); | 3359 | trace_sched_switch(preempt, prev, next); |
@@ -3352,8 +3382,8 @@ void __noreturn do_task_dead(void) | |||
3352 | * To avoid it, we have to wait for releasing tsk->pi_lock which | 3382 | * To avoid it, we have to wait for releasing tsk->pi_lock which |
3353 | * is held by try_to_wake_up() | 3383 | * is held by try_to_wake_up() |
3354 | */ | 3384 | */ |
3355 | smp_mb(); | 3385 | raw_spin_lock_irq(¤t->pi_lock); |
3356 | raw_spin_unlock_wait(¤t->pi_lock); | 3386 | raw_spin_unlock_irq(¤t->pi_lock); |
3357 | 3387 | ||
3358 | /* Causes final put_task_struct in finish_task_switch(): */ | 3388 | /* Causes final put_task_struct in finish_task_switch(): */ |
3359 | __set_current_state(TASK_DEAD); | 3389 | __set_current_state(TASK_DEAD); |