aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2016-11-29 12:51:07 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-12-08 08:36:19 -0500
commitcf380a4a96e2260742051fa7fc831596bb26cc8b (patch)
tree8f27e5d81c85ab8de3c4a2f92eee70d7ad092022
parentefb29fbfa50c490dac64a9418ebe553be82df781 (diff)
kthread: Don't use to_live_kthread() in kthread_[un]park()
Now that to_kthread() is always validm change kthread_park() and kthread_unpark() to use it and kill to_live_kthread(). The conversion of kthread_unpark() is trivial. If KTHREAD_IS_PARKED is set then the task has called complete(&self->parked) and there the function cannot race against a concurrent kthread_stop() and exit. kthread_park() is more tricky, because its semantics are not well defined. It returns -ENOSYS if the thread exited but this can never happen and as Roman pointed out kthread_park() can obviously block forever if it would race with the exiting kthread. The usage of kthread_park() in cpuhp code (cpu.c, smpboot.c, stop_machine.c) is fine. It can never see an exiting/exited kthread, smpboot_destroy_threads() clears *ht->store, smpboot_park_thread() checks it is not NULL under the same smpboot_threads_lock. cpuhp_threads and cpu_stop_threads never exit, so other callers are fine too. But it has two more users: - watchdog_park_threads(): The code is actually correct, get_online_cpus() ensures that kthread_park() can't race with itself (note that kthread_park() can't handle this race correctly), but it should not use kthread_park() directly. - drivers/gpu/drm/amd/scheduler/gpu_scheduler.c should not use kthread_park() either. kthread_park() must not be called after amd_sched_fini() which does kthread_stop(), otherwise even to_live_kthread() is not safe because task_struct can be already freed and sched->thread can point to nowhere. The usage of kthread_park/unpark should either be restricted to core code which is properly protected against the exit race or made more robust so it is safe to use it in drivers. To catch eventual exit issues, add a WARN_ON(PF_EXITING) for now. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Chunming Zhou <David1.Zhou@amd.com> Cc: Roman Pen <roman.penyaev@profitbricks.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Tejun Heo <tj@kernel.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20161129175107.GA5339@redhat.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/kthread.c69
1 files changed, 24 insertions, 45 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4dcbc8b5d6b6..01d27164e5b7 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -78,21 +78,6 @@ void free_kthread_struct(struct task_struct *k)
78 kfree(to_kthread(k)); 78 kfree(to_kthread(k));
79} 79}
80 80
81#define __to_kthread(vfork) \
82 container_of(vfork, struct kthread, exited)
83
84/*
85 * TODO: kill it and use to_kthread(). But we still need the users
86 * like kthread_stop() which has to sync with the exiting kthread.
87 */
88static struct kthread *to_live_kthread(struct task_struct *k)
89{
90 struct completion *vfork = ACCESS_ONCE(k->vfork_done);
91 if (likely(vfork))
92 return __to_kthread(vfork);
93 return NULL;
94}
95
96/** 81/**
97 * kthread_should_stop - should this kthread return now? 82 * kthread_should_stop - should this kthread return now?
98 * 83 *
@@ -441,8 +426,18 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
441 return p; 426 return p;
442} 427}
443 428
444static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) 429/**
430 * kthread_unpark - unpark a thread created by kthread_create().
431 * @k: thread created by kthread_create().
432 *
433 * Sets kthread_should_park() for @k to return false, wakes it, and
434 * waits for it to return. If the thread is marked percpu then its
435 * bound to the cpu again.
436 */
437void kthread_unpark(struct task_struct *k)
445{ 438{
439 struct kthread *kthread = to_kthread(k);
440
446 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 441 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
447 /* 442 /*
448 * We clear the IS_PARKED bit here as we don't wait 443 * We clear the IS_PARKED bit here as we don't wait
@@ -460,22 +455,6 @@ static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
460 wake_up_state(k, TASK_PARKED); 455 wake_up_state(k, TASK_PARKED);
461 } 456 }
462} 457}
463
464/**
465 * kthread_unpark - unpark a thread created by kthread_create().
466 * @k: thread created by kthread_create().
467 *
468 * Sets kthread_should_park() for @k to return false, wakes it, and
469 * waits for it to return. If the thread is marked percpu then its
470 * bound to the cpu again.
471 */
472void kthread_unpark(struct task_struct *k)
473{
474 struct kthread *kthread = to_live_kthread(k);
475
476 if (kthread)
477 __kthread_unpark(k, kthread);
478}
479EXPORT_SYMBOL_GPL(kthread_unpark); 458EXPORT_SYMBOL_GPL(kthread_unpark);
480 459
481/** 460/**
@@ -492,20 +471,20 @@ EXPORT_SYMBOL_GPL(kthread_unpark);
492 */ 471 */
493int kthread_park(struct task_struct *k) 472int kthread_park(struct task_struct *k)
494{ 473{
495 struct kthread *kthread = to_live_kthread(k); 474 struct kthread *kthread = to_kthread(k);
496 int ret = -ENOSYS; 475
497 476 if (WARN_ON(k->flags & PF_EXITING))
498 if (kthread) { 477 return -ENOSYS;
499 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) { 478
500 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 479 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
501 if (k != current) { 480 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
502 wake_up_process(k); 481 if (k != current) {
503 wait_for_completion(&kthread->parked); 482 wake_up_process(k);
504 } 483 wait_for_completion(&kthread->parked);
505 } 484 }
506 ret = 0;
507 } 485 }
508 return ret; 486
487 return 0;
509} 488}
510EXPORT_SYMBOL_GPL(kthread_park); 489EXPORT_SYMBOL_GPL(kthread_park);
511 490
@@ -534,7 +513,7 @@ int kthread_stop(struct task_struct *k)
534 get_task_struct(k); 513 get_task_struct(k);
535 kthread = to_kthread(k); 514 kthread = to_kthread(k);
536 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); 515 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
537 __kthread_unpark(k, kthread); 516 kthread_unpark(k);
538 wake_up_process(k); 517 wake_up_process(k);
539 wait_for_completion(&kthread->exited); 518 wait_for_completion(&kthread->exited);
540 ret = k->exit_code; 519 ret = k->exit_code;