diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-05-13 13:53:08 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-05-13 13:53:08 -0400 |
| commit | 66e1c94db3cd4e094de66a6be68c3ab6d17e0c52 (patch) | |
| tree | 920eecb13e08704407ce3aa9739699366b3ef130 /include/linux | |
| parent | 86a4ac433b927a610c09aa6cfb1926d94a6b37b7 (diff) | |
| parent | e0f6d1a526b6adfa9ca3b336b83ece0eed345033 (diff) | |
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86/pti updates from Thomas Gleixner:
"A mixed bag of fixes and updates for the ghosts which are hunting us.
The scheduler fixes have been pulled into that branch to avoid
conflicts.
- A set of fixes to address a khread_parkme() race which caused lost
wakeups and loss of state.
- A deadlock fix for stop_machine() solved by moving the wakeups
outside of the stopper_lock held region.
- A set of Spectre V1 array access restrictions. The possible
problematic spots were discuvered by Dan Carpenters new checks in
smatch.
- Removal of an unused file which was forgotten when the rest of that
functionality was removed"
* 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/vdso: Remove unused file
perf/x86/cstate: Fix possible Spectre-v1 indexing for pkg_msr
perf/x86/msr: Fix possible Spectre-v1 indexing in the MSR driver
perf/x86: Fix possible Spectre-v1 indexing for x86_pmu::event_map()
perf/x86: Fix possible Spectre-v1 indexing for hw_perf_event cache_*
perf/core: Fix possible Spectre-v1 indexing for ->aux_pages[]
sched/autogroup: Fix possible Spectre-v1 indexing for sched_prio_to_weight[]
sched/core: Fix possible Spectre-v1 indexing for sched_prio_to_weight[]
sched/core: Introduce set_special_state()
kthread, sched/wait: Fix kthread_parkme() completion issue
kthread, sched/wait: Fix kthread_parkme() wait-loop
sched/fair: Fix the update of blocked load when newly idle
stop_machine, sched: Fix migrate_swap() vs. active_balance() deadlock
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/kthread.h | 1 | ||||
| -rw-r--r-- | include/linux/sched.h | 50 | ||||
| -rw-r--r-- | include/linux/sched/signal.h | 2 |
3 files changed, 47 insertions, 6 deletions
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index c1961761311d..2803264c512f 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
| @@ -62,6 +62,7 @@ void *kthread_probe_data(struct task_struct *k); | |||
| 62 | int kthread_park(struct task_struct *k); | 62 | int kthread_park(struct task_struct *k); |
| 63 | void kthread_unpark(struct task_struct *k); | 63 | void kthread_unpark(struct task_struct *k); |
| 64 | void kthread_parkme(void); | 64 | void kthread_parkme(void); |
| 65 | void kthread_park_complete(struct task_struct *k); | ||
| 65 | 66 | ||
| 66 | int kthreadd(void *unused); | 67 | int kthreadd(void *unused); |
| 67 | extern struct task_struct *kthreadd_task; | 68 | extern struct task_struct *kthreadd_task; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index b3d697f3b573..c2413703f45d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -112,17 +112,36 @@ struct task_group; | |||
| 112 | 112 | ||
| 113 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP | 113 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
| 114 | 114 | ||
| 115 | /* | ||
| 116 | * Special states are those that do not use the normal wait-loop pattern. See | ||
| 117 | * the comment with set_special_state(). | ||
| 118 | */ | ||
| 119 | #define is_special_task_state(state) \ | ||
| 120 | ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD)) | ||
| 121 | |||
| 115 | #define __set_current_state(state_value) \ | 122 | #define __set_current_state(state_value) \ |
| 116 | do { \ | 123 | do { \ |
| 124 | WARN_ON_ONCE(is_special_task_state(state_value));\ | ||
| 117 | current->task_state_change = _THIS_IP_; \ | 125 | current->task_state_change = _THIS_IP_; \ |
| 118 | current->state = (state_value); \ | 126 | current->state = (state_value); \ |
| 119 | } while (0) | 127 | } while (0) |
| 128 | |||
| 120 | #define set_current_state(state_value) \ | 129 | #define set_current_state(state_value) \ |
| 121 | do { \ | 130 | do { \ |
| 131 | WARN_ON_ONCE(is_special_task_state(state_value));\ | ||
| 122 | current->task_state_change = _THIS_IP_; \ | 132 | current->task_state_change = _THIS_IP_; \ |
| 123 | smp_store_mb(current->state, (state_value)); \ | 133 | smp_store_mb(current->state, (state_value)); \ |
| 124 | } while (0) | 134 | } while (0) |
| 125 | 135 | ||
| 136 | #define set_special_state(state_value) \ | ||
| 137 | do { \ | ||
| 138 | unsigned long flags; /* may shadow */ \ | ||
| 139 | WARN_ON_ONCE(!is_special_task_state(state_value)); \ | ||
| 140 | raw_spin_lock_irqsave(¤t->pi_lock, flags); \ | ||
| 141 | current->task_state_change = _THIS_IP_; \ | ||
| 142 | current->state = (state_value); \ | ||
| 143 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ | ||
| 144 | } while (0) | ||
| 126 | #else | 145 | #else |
| 127 | /* | 146 | /* |
| 128 | * set_current_state() includes a barrier so that the write of current->state | 147 | * set_current_state() includes a barrier so that the write of current->state |
| @@ -144,8 +163,8 @@ struct task_group; | |||
| 144 | * | 163 | * |
| 145 | * The above is typically ordered against the wakeup, which does: | 164 | * The above is typically ordered against the wakeup, which does: |
| 146 | * | 165 | * |
| 147 | * need_sleep = false; | 166 | * need_sleep = false; |
| 148 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); | 167 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); |
| 149 | * | 168 | * |
| 150 | * Where wake_up_state() (and all other wakeup primitives) imply enough | 169 | * Where wake_up_state() (and all other wakeup primitives) imply enough |
| 151 | * barriers to order the store of the variable against wakeup. | 170 | * barriers to order the store of the variable against wakeup. |
| @@ -154,12 +173,33 @@ struct task_group; | |||
| 154 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a | 173 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a |
| 155 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). | 174 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). |
| 156 | * | 175 | * |
| 157 | * This is obviously fine, since they both store the exact same value. | 176 | * However, with slightly different timing the wakeup TASK_RUNNING store can |
| 177 | * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not | ||
| 178 | * a problem either because that will result in one extra go around the loop | ||
| 179 | * and our @cond test will save the day. | ||
| 158 | * | 180 | * |
| 159 | * Also see the comments of try_to_wake_up(). | 181 | * Also see the comments of try_to_wake_up(). |
| 160 | */ | 182 | */ |
| 161 | #define __set_current_state(state_value) do { current->state = (state_value); } while (0) | 183 | #define __set_current_state(state_value) \ |
| 162 | #define set_current_state(state_value) smp_store_mb(current->state, (state_value)) | 184 | current->state = (state_value) |
| 185 | |||
| 186 | #define set_current_state(state_value) \ | ||
| 187 | smp_store_mb(current->state, (state_value)) | ||
| 188 | |||
| 189 | /* | ||
| 190 | * set_special_state() should be used for those states when the blocking task | ||
| 191 | * can not use the regular condition based wait-loop. In that case we must | ||
| 192 | * serialize against wakeups such that any possible in-flight TASK_RUNNING stores | ||
| 193 | * will not collide with our state change. | ||
| 194 | */ | ||
| 195 | #define set_special_state(state_value) \ | ||
| 196 | do { \ | ||
| 197 | unsigned long flags; /* may shadow */ \ | ||
| 198 | raw_spin_lock_irqsave(¤t->pi_lock, flags); \ | ||
| 199 | current->state = (state_value); \ | ||
| 200 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ | ||
| 201 | } while (0) | ||
| 202 | |||
| 163 | #endif | 203 | #endif |
| 164 | 204 | ||
| 165 | /* Task command name length: */ | 205 | /* Task command name length: */ |
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index a7ce74c74e49..113d1ad1ced7 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h | |||
| @@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void) | |||
| 280 | { | 280 | { |
| 281 | spin_lock_irq(¤t->sighand->siglock); | 281 | spin_lock_irq(¤t->sighand->siglock); |
| 282 | if (current->jobctl & JOBCTL_STOP_DEQUEUED) | 282 | if (current->jobctl & JOBCTL_STOP_DEQUEUED) |
| 283 | __set_current_state(TASK_STOPPED); | 283 | set_special_state(TASK_STOPPED); |
| 284 | spin_unlock_irq(¤t->sighand->siglock); | 284 | spin_unlock_irq(¤t->sighand->siglock); |
| 285 | 285 | ||
| 286 | schedule(); | 286 | schedule(); |
