aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c37
1 files changed, 18 insertions, 19 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1e1373bcb3e3..3c5a79e2134c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1349,8 +1349,16 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1349 struct worker *worker = container_of(work, struct worker, rebind_work); 1349 struct worker *worker = container_of(work, struct worker, rebind_work);
1350 struct global_cwq *gcwq = worker->pool->gcwq; 1350 struct global_cwq *gcwq = worker->pool->gcwq;
1351 1351
1352 if (worker_maybe_bind_and_lock(worker)) 1352 worker_maybe_bind_and_lock(worker);
1353 worker_clr_flags(worker, WORKER_REBIND); 1353
1354 /*
1355 * %WORKER_REBIND must be cleared even if the above binding failed;
1356 * otherwise, we may confuse the next CPU_UP cycle or oops / get
1357 * stuck by calling idle_worker_rebind() prematurely. If CPU went
1358 * down again inbetween, %WORKER_UNBOUND would be set, so clearing
1359 * %WORKER_REBIND is always safe.
1360 */
1361 worker_clr_flags(worker, WORKER_REBIND);
1354 1362
1355 spin_unlock_irq(&gcwq->lock); 1363 spin_unlock_irq(&gcwq->lock);
1356} 1364}
@@ -3568,18 +3576,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3568#ifdef CONFIG_SMP 3576#ifdef CONFIG_SMP
3569 3577
3570struct work_for_cpu { 3578struct work_for_cpu {
3571 struct completion completion; 3579 struct work_struct work;
3572 long (*fn)(void *); 3580 long (*fn)(void *);
3573 void *arg; 3581 void *arg;
3574 long ret; 3582 long ret;
3575}; 3583};
3576 3584
3577static int do_work_for_cpu(void *_wfc) 3585static void work_for_cpu_fn(struct work_struct *work)
3578{ 3586{
3579 struct work_for_cpu *wfc = _wfc; 3587 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
3588
3580 wfc->ret = wfc->fn(wfc->arg); 3589 wfc->ret = wfc->fn(wfc->arg);
3581 complete(&wfc->completion);
3582 return 0;
3583} 3590}
3584 3591
3585/** 3592/**
@@ -3594,19 +3601,11 @@ static int do_work_for_cpu(void *_wfc)
3594 */ 3601 */
3595long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 3602long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3596{ 3603{
3597 struct task_struct *sub_thread; 3604 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
3598 struct work_for_cpu wfc = {
3599 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3600 .fn = fn,
3601 .arg = arg,
3602 };
3603 3605
3604 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); 3606 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
3605 if (IS_ERR(sub_thread)) 3607 schedule_work_on(cpu, &wfc.work);
3606 return PTR_ERR(sub_thread); 3608 flush_work(&wfc.work);
3607 kthread_bind(sub_thread, cpu);
3608 wake_up_process(sub_thread);
3609 wait_for_completion(&wfc.completion);
3610 return wfc.ret; 3609 return wfc.ret;
3611} 3610}
3612EXPORT_SYMBOL_GPL(work_on_cpu); 3611EXPORT_SYMBOL_GPL(work_on_cpu);