aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-09-19 14:00:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-09-19 14:00:07 -0400
commitc5c473e29c641380aef4a9d1f9c39de49219980f (patch)
tree7cc1d52fa7757ecd0903fc6e86bb22188d2a8bbd /kernel
parent925a6f0bf8bd122d5d2429af7f0ca0fecf4ae71f (diff)
parent6889125b8b4e09c5e53e6ecab3433bed1ce198c9 (diff)
Merge branch 'for-3.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue / powernow-k8 fix from Tejun Heo: "This is the fix for the bug where cpufreq/powernow-k8 was tripping BUG_ON() in try_to_wake_up_local() by migrating workqueue worker to a different CPU. https://bugzilla.kernel.org/show_bug.cgi?id=47301 As discussed, the fix is now two parts - one to reimplement work_on_cpu() so that it doesn't create a new kthread each time and the actual fix which makes powernow-k8 use work_on_cpu() instead of performing manual migration. While pretty late in the merge cycle, both changes are on the safer side. Jiri and I verified two existing users of work_on_cpu() and Duncan confirmed that the powernow-k8 fix survived about 18 hours of testing." * 'for-3.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: cpufreq/powernow-k8: workqueue user shouldn't migrate the kworker to another CPU workqueue: reimplement work_on_cpu() using system_wq
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c25
1 files changed, 8 insertions, 17 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b80065a2450a..3c5a79e2134c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3576,18 +3576,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3576#ifdef CONFIG_SMP 3576#ifdef CONFIG_SMP
3577 3577
3578struct work_for_cpu { 3578struct work_for_cpu {
3579 struct completion completion; 3579 struct work_struct work;
3580 long (*fn)(void *); 3580 long (*fn)(void *);
3581 void *arg; 3581 void *arg;
3582 long ret; 3582 long ret;
3583}; 3583};
3584 3584
3585static int do_work_for_cpu(void *_wfc) 3585static void work_for_cpu_fn(struct work_struct *work)
3586{ 3586{
3587 struct work_for_cpu *wfc = _wfc; 3587 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
3588
3588 wfc->ret = wfc->fn(wfc->arg); 3589 wfc->ret = wfc->fn(wfc->arg);
3589 complete(&wfc->completion);
3590 return 0;
3591} 3590}
3592 3591
3593/** 3592/**
@@ -3602,19 +3601,11 @@ static int do_work_for_cpu(void *_wfc)
3602 */ 3601 */
3603long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 3602long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3604{ 3603{
3605 struct task_struct *sub_thread; 3604 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
3606 struct work_for_cpu wfc = {
3607 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3608 .fn = fn,
3609 .arg = arg,
3610 };
3611 3605
3612 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); 3606 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
3613 if (IS_ERR(sub_thread)) 3607 schedule_work_on(cpu, &wfc.work);
3614 return PTR_ERR(sub_thread); 3608 flush_work(&wfc.work);
3615 kthread_bind(sub_thread, cpu);
3616 wake_up_process(sub_thread);
3617 wait_for_completion(&wfc.completion);
3618 return wfc.ret; 3609 return wfc.ret;
3619} 3610}
3620EXPORT_SYMBOL_GPL(work_on_cpu); 3611EXPORT_SYMBOL_GPL(work_on_cpu);