aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/workqueue.c44
1 files changed, 25 insertions, 19 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d80dbdceadb8..1d1933cf3778 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -64,6 +64,7 @@ struct workqueue_struct {
64 64
65/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 65/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
66 threads to each one as cpus come/go. */ 66 threads to each one as cpus come/go. */
67static long migrate_sequence __read_mostly;
67static DEFINE_MUTEX(workqueue_mutex); 68static DEFINE_MUTEX(workqueue_mutex);
68static LIST_HEAD(workqueues); 69static LIST_HEAD(workqueues);
69 70
@@ -421,13 +422,7 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
421 * Probably keventd trying to flush its own queue. So simply run 422 * Probably keventd trying to flush its own queue. So simply run
422 * it by hand rather than deadlocking. 423 * it by hand rather than deadlocking.
423 */ 424 */
424 preempt_enable();
425 /*
426 * We can still touch *cwq here because we are keventd, and
427 * hot-unplug will be waiting us to exit.
428 */
429 run_workqueue(cwq); 425 run_workqueue(cwq);
430 preempt_disable();
431 } else { 426 } else {
432 struct wq_barrier barr; 427 struct wq_barrier barr;
433 int active = 0; 428 int active = 0;
@@ -439,11 +434,8 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
439 } 434 }
440 spin_unlock_irq(&cwq->lock); 435 spin_unlock_irq(&cwq->lock);
441 436
442 if (active) { 437 if (active)
443 preempt_enable();
444 wait_for_completion(&barr.done); 438 wait_for_completion(&barr.done);
445 preempt_disable();
446 }
447 } 439 }
448} 440}
449 441
@@ -462,17 +454,21 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
462 */ 454 */
463void fastcall flush_workqueue(struct workqueue_struct *wq) 455void fastcall flush_workqueue(struct workqueue_struct *wq)
464{ 456{
465 preempt_disable(); /* CPU hotplug */
466 if (is_single_threaded(wq)) { 457 if (is_single_threaded(wq)) {
467 /* Always use first cpu's area. */ 458 /* Always use first cpu's area. */
468 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); 459 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
469 } else { 460 } else {
461 long sequence;
470 int cpu; 462 int cpu;
463again:
464 sequence = migrate_sequence;
471 465
472 for_each_online_cpu(cpu) 466 for_each_possible_cpu(cpu)
473 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 467 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
468
469 if (unlikely(sequence != migrate_sequence))
470 goto again;
474 } 471 }
475 preempt_enable();
476} 472}
477EXPORT_SYMBOL_GPL(flush_workqueue); 473EXPORT_SYMBOL_GPL(flush_workqueue);
478 474
@@ -544,17 +540,21 @@ out:
544} 540}
545EXPORT_SYMBOL_GPL(flush_work); 541EXPORT_SYMBOL_GPL(flush_work);
546 542
547static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 543static void init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
548 int cpu)
549{ 544{
550 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 545 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
551 struct task_struct *p;
552 546
553 spin_lock_init(&cwq->lock);
554 cwq->wq = wq; 547 cwq->wq = wq;
555 cwq->thread = NULL; 548 spin_lock_init(&cwq->lock);
556 INIT_LIST_HEAD(&cwq->worklist); 549 INIT_LIST_HEAD(&cwq->worklist);
557 init_waitqueue_head(&cwq->more_work); 550 init_waitqueue_head(&cwq->more_work);
551}
552
553static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
554 int cpu)
555{
556 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
557 struct task_struct *p;
558 558
559 if (is_single_threaded(wq)) 559 if (is_single_threaded(wq))
560 p = kthread_create(worker_thread, cwq, "%s", wq->name); 560 p = kthread_create(worker_thread, cwq, "%s", wq->name);
@@ -589,6 +589,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
589 mutex_lock(&workqueue_mutex); 589 mutex_lock(&workqueue_mutex);
590 if (singlethread) { 590 if (singlethread) {
591 INIT_LIST_HEAD(&wq->list); 591 INIT_LIST_HEAD(&wq->list);
592 init_cpu_workqueue(wq, singlethread_cpu);
592 p = create_workqueue_thread(wq, singlethread_cpu); 593 p = create_workqueue_thread(wq, singlethread_cpu);
593 if (!p) 594 if (!p)
594 destroy = 1; 595 destroy = 1;
@@ -596,7 +597,11 @@ struct workqueue_struct *__create_workqueue(const char *name,
596 wake_up_process(p); 597 wake_up_process(p);
597 } else { 598 } else {
598 list_add(&wq->list, &workqueues); 599 list_add(&wq->list, &workqueues);
599 for_each_online_cpu(cpu) { 600 for_each_possible_cpu(cpu) {
601 init_cpu_workqueue(wq, cpu);
602 if (!cpu_online(cpu))
603 continue;
604
600 p = create_workqueue_thread(wq, cpu); 605 p = create_workqueue_thread(wq, cpu);
601 if (p) { 606 if (p) {
602 kthread_bind(p, cpu); 607 kthread_bind(p, cpu);
@@ -831,6 +836,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
831 836
832 spin_lock_irq(&cwq->lock); 837 spin_lock_irq(&cwq->lock);
833 list_replace_init(&cwq->worklist, &list); 838 list_replace_init(&cwq->worklist, &list);
839 migrate_sequence++;
834 840
835 while (!list_empty(&list)) { 841 while (!list_empty(&list)) {
836 printk("Taking work for %s\n", wq->name); 842 printk("Taking work for %s\n", wq->name);