diff options
| -rw-r--r-- | kernel/sched/core.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2a906f20fba7..44817c640e99 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -2016,6 +2016,28 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
| 2016 | success = 1; /* we're going to change ->state */ | 2016 | success = 1; /* we're going to change ->state */ |
| 2017 | cpu = task_cpu(p); | 2017 | cpu = task_cpu(p); |
| 2018 | 2018 | ||
| 2019 | /* | ||
| 2020 | * Ensure we load p->on_rq _after_ p->state, otherwise it would | ||
| 2021 | * be possible to, falsely, observe p->on_rq == 0 and get stuck | ||
| 2022 | * in smp_cond_load_acquire() below. | ||
| 2023 | * | ||
| 2024 | * sched_ttwu_pending() try_to_wake_up() | ||
| 2025 | * [S] p->on_rq = 1; [L] P->state | ||
| 2026 | * UNLOCK rq->lock -----. | ||
| 2027 | * \ | ||
| 2028 | * +--- RMB | ||
| 2029 | * schedule() / | ||
| 2030 | * LOCK rq->lock -----' | ||
| 2031 | * UNLOCK rq->lock | ||
| 2032 | * | ||
| 2033 | * [task p] | ||
| 2034 | * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq | ||
| 2035 | * | ||
| 2036 | * Pairs with the UNLOCK+LOCK on rq->lock from the | ||
| 2037 | * last wakeup of our task and the schedule that got our task | ||
| 2038 | * current. | ||
| 2039 | */ | ||
| 2040 | smp_rmb(); | ||
| 2019 | if (p->on_rq && ttwu_remote(p, wake_flags)) | 2041 | if (p->on_rq && ttwu_remote(p, wake_flags)) |
| 2020 | goto stat; | 2042 | goto stat; |
| 2021 | 2043 | ||
