aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-09-18 15:48:43 -0400
committerTejun Heo <tj@kernel.org>2012-09-19 13:13:12 -0400
commited48ece27cd3d5ee0354c32bbaec0f3e1d4715c3 (patch)
tree9ead3fba10ccd3118e6c4f38ed61cbf2bb2cbb3f /kernel/workqueue.c
parent960bd11bf2daf669d0d910428fd9ef5a15c3d7cb (diff)
workqueue: reimplement work_on_cpu() using system_wq
The existing work_on_cpu() implementation is hugely inefficient. It creates a new kthread, execute that single function and then let the kthread die on each invocation. Now that system_wq can handle concurrent executions, there's no advantage of doing this. Reimplement work_on_cpu() using system_wq which makes it simpler and way more efficient. stable: While this isn't a fix in itself, it's needed to fix a workqueue related bug in cpufreq/powernow-k8. AFAICS, this shouldn't break other existing users. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Jiri Kosina <jkosina@suse.cz> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Len Brown <lenb@kernel.org> Cc: Rafael J. Wysocki <rjw@sisk.pl> Cc: stable@vger.kernel.org
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c25
1 files changed, 8 insertions, 17 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b80065a2450a..3c5a79e2134c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3576,18 +3576,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3576#ifdef CONFIG_SMP 3576#ifdef CONFIG_SMP
3577 3577
3578struct work_for_cpu { 3578struct work_for_cpu {
3579 struct completion completion; 3579 struct work_struct work;
3580 long (*fn)(void *); 3580 long (*fn)(void *);
3581 void *arg; 3581 void *arg;
3582 long ret; 3582 long ret;
3583}; 3583};
3584 3584
3585static int do_work_for_cpu(void *_wfc) 3585static void work_for_cpu_fn(struct work_struct *work)
3586{ 3586{
3587 struct work_for_cpu *wfc = _wfc; 3587 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
3588
3588 wfc->ret = wfc->fn(wfc->arg); 3589 wfc->ret = wfc->fn(wfc->arg);
3589 complete(&wfc->completion);
3590 return 0;
3591} 3590}
3592 3591
3593/** 3592/**
@@ -3602,19 +3601,11 @@ static int do_work_for_cpu(void *_wfc)
3602 */ 3601 */
3603long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 3602long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3604{ 3603{
3605 struct task_struct *sub_thread; 3604 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
3606 struct work_for_cpu wfc = {
3607 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3608 .fn = fn,
3609 .arg = arg,
3610 };
3611 3605
3612 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); 3606 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
3613 if (IS_ERR(sub_thread)) 3607 schedule_work_on(cpu, &wfc.work);
3614 return PTR_ERR(sub_thread); 3608 flush_work(&wfc.work);
3615 kthread_bind(sub_thread, cpu);
3616 wake_up_process(sub_thread);
3617 wait_for_completion(&wfc.completion);
3618 return wfc.ret; 3609 return wfc.ret;
3619} 3610}
3620EXPORT_SYMBOL_GPL(work_on_cpu); 3611EXPORT_SYMBOL_GPL(work_on_cpu);