diff options
author | Oleg Nesterov <oleg@redhat.com> | 2009-06-17 19:27:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-18 16:03:54 -0400 |
commit | 371cbb387e33651b4c1326457116568ff01ac422 (patch) | |
tree | 4678d02d44fc9764f316e5d84e5d307cc14a0923 | |
parent | 63706172f332fd3f6e7458ebfb35fa6de9c21dc5 (diff) |
kthreads: simplify migration_thread() exit path
Now that kthread_stop() can be used even if the task has already exited,
we can kill the "wait_to_die:" loop in migration_thread(). But we must
pin rq->migration_thread after creation.
Actually, I don't think CPU_UP_CANCELED or CPU_DEAD should wait for
->migration_thread exit. Perhaps we can simplify this code a bit more.
migration_call() can set ->should_stop and forget about this thread. But
we need a new helper in kthred.c for that.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Vitaliy Gusev <vgusev@openvz.org
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | kernel/sched.c | 14 |
1 files changed, 4 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 8fb88a906aaa..247fd0fedd0b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -7045,7 +7045,7 @@ static int migration_thread(void *data) | |||
7045 | 7045 | ||
7046 | if (cpu_is_offline(cpu)) { | 7046 | if (cpu_is_offline(cpu)) { |
7047 | spin_unlock_irq(&rq->lock); | 7047 | spin_unlock_irq(&rq->lock); |
7048 | goto wait_to_die; | 7048 | break; |
7049 | } | 7049 | } |
7050 | 7050 | ||
7051 | if (rq->active_balance) { | 7051 | if (rq->active_balance) { |
@@ -7071,16 +7071,7 @@ static int migration_thread(void *data) | |||
7071 | complete(&req->done); | 7071 | complete(&req->done); |
7072 | } | 7072 | } |
7073 | __set_current_state(TASK_RUNNING); | 7073 | __set_current_state(TASK_RUNNING); |
7074 | return 0; | ||
7075 | 7074 | ||
7076 | wait_to_die: | ||
7077 | /* Wait for kthread_stop */ | ||
7078 | set_current_state(TASK_INTERRUPTIBLE); | ||
7079 | while (!kthread_should_stop()) { | ||
7080 | schedule(); | ||
7081 | set_current_state(TASK_INTERRUPTIBLE); | ||
7082 | } | ||
7083 | __set_current_state(TASK_RUNNING); | ||
7084 | return 0; | 7075 | return 0; |
7085 | } | 7076 | } |
7086 | 7077 | ||
@@ -7494,6 +7485,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7494 | rq = task_rq_lock(p, &flags); | 7485 | rq = task_rq_lock(p, &flags); |
7495 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); | 7486 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); |
7496 | task_rq_unlock(rq, &flags); | 7487 | task_rq_unlock(rq, &flags); |
7488 | get_task_struct(p); | ||
7497 | cpu_rq(cpu)->migration_thread = p; | 7489 | cpu_rq(cpu)->migration_thread = p; |
7498 | break; | 7490 | break; |
7499 | 7491 | ||
@@ -7524,6 +7516,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7524 | kthread_bind(cpu_rq(cpu)->migration_thread, | 7516 | kthread_bind(cpu_rq(cpu)->migration_thread, |
7525 | cpumask_any(cpu_online_mask)); | 7517 | cpumask_any(cpu_online_mask)); |
7526 | kthread_stop(cpu_rq(cpu)->migration_thread); | 7518 | kthread_stop(cpu_rq(cpu)->migration_thread); |
7519 | put_task_struct(cpu_rq(cpu)->migration_thread); | ||
7527 | cpu_rq(cpu)->migration_thread = NULL; | 7520 | cpu_rq(cpu)->migration_thread = NULL; |
7528 | break; | 7521 | break; |
7529 | 7522 | ||
@@ -7533,6 +7526,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7533 | migrate_live_tasks(cpu); | 7526 | migrate_live_tasks(cpu); |
7534 | rq = cpu_rq(cpu); | 7527 | rq = cpu_rq(cpu); |
7535 | kthread_stop(rq->migration_thread); | 7528 | kthread_stop(rq->migration_thread); |
7529 | put_task_struct(rq->migration_thread); | ||
7536 | rq->migration_thread = NULL; | 7530 | rq->migration_thread = NULL; |
7537 | /* Idle task back to normal (off runqueue, low prio) */ | 7531 | /* Idle task back to normal (off runqueue, low prio) */ |
7538 | spin_lock_irq(&rq->lock); | 7532 | spin_lock_irq(&rq->lock); |