aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kthread.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kthread.c')
-rw-r--r--kernel/kthread.c30
1 files changed, 24 insertions, 6 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 481951bf091d..750cb8082694 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -177,9 +177,20 @@ void *kthread_probe_data(struct task_struct *task)
177static void __kthread_parkme(struct kthread *self) 177static void __kthread_parkme(struct kthread *self)
178{ 178{
179 for (;;) { 179 for (;;) {
180 set_current_state(TASK_PARKED); 180 /*
181 * TASK_PARKED is a special state; we must serialize against
182 * possible pending wakeups to avoid store-store collisions on
183 * task->state.
184 *
185 * Such a collision might possibly result in the task state
186 * changin from TASK_PARKED and us failing the
187 * wait_task_inactive() in kthread_park().
188 */
189 set_special_state(TASK_PARKED);
181 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) 190 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
182 break; 191 break;
192
193 complete_all(&self->parked);
183 schedule(); 194 schedule();
184 } 195 }
185 __set_current_state(TASK_RUNNING); 196 __set_current_state(TASK_RUNNING);
@@ -191,11 +202,6 @@ void kthread_parkme(void)
191} 202}
192EXPORT_SYMBOL_GPL(kthread_parkme); 203EXPORT_SYMBOL_GPL(kthread_parkme);
193 204
194void kthread_park_complete(struct task_struct *k)
195{
196 complete_all(&to_kthread(k)->parked);
197}
198
199static int kthread(void *_create) 205static int kthread(void *_create)
200{ 206{
201 /* Copy data: it's on kthread's stack */ 207 /* Copy data: it's on kthread's stack */
@@ -461,6 +467,9 @@ void kthread_unpark(struct task_struct *k)
461 467
462 reinit_completion(&kthread->parked); 468 reinit_completion(&kthread->parked);
463 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 469 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
470 /*
471 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
472 */
464 wake_up_state(k, TASK_PARKED); 473 wake_up_state(k, TASK_PARKED);
465} 474}
466EXPORT_SYMBOL_GPL(kthread_unpark); 475EXPORT_SYMBOL_GPL(kthread_unpark);
@@ -487,7 +496,16 @@ int kthread_park(struct task_struct *k)
487 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 496 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
488 if (k != current) { 497 if (k != current) {
489 wake_up_process(k); 498 wake_up_process(k);
499 /*
500 * Wait for __kthread_parkme() to complete(), this means we
501 * _will_ have TASK_PARKED and are about to call schedule().
502 */
490 wait_for_completion(&kthread->parked); 503 wait_for_completion(&kthread->parked);
504 /*
505 * Now wait for that schedule() to complete and the task to
506 * get scheduled out.
507 */
508 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
491 } 509 }
492 510
493 return 0; 511 return 0;