aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-08-14 02:24:26 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2006-08-14 15:54:29 -0400
commit9b41ea7289a589993d3daabc61f999b4147872c4 (patch)
tree35a79670ca4493832f1da026a047e3fc9eec0910 /kernel/workqueue.c
parent2b25742556b1a351ce4821f6feddcba23bdd930b (diff)
[PATCH] workqueue: remove lock_cpu_hotplug()
Use a private lock instead. It protects all per-cpu data structures in workqueue.c, including the workqueues list. Fix a bug in schedule_on_each_cpu(): it was forgetting to lock down the per-cpu resources. Unfixed long-standing bug: if someone unplugs the CPU identified by `singlethread_cpu' the kernel will get very sick. Cc: Dave Jones <davej@codemonkey.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 448e8f7b342d..835fe28b87a8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -68,7 +68,7 @@ struct workqueue_struct {
68 68
69/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 69/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
70 threads to each one as cpus come/go. */ 70 threads to each one as cpus come/go. */
71static DEFINE_SPINLOCK(workqueue_lock); 71static DEFINE_MUTEX(workqueue_mutex);
72static LIST_HEAD(workqueues); 72static LIST_HEAD(workqueues);
73 73
74static int singlethread_cpu; 74static int singlethread_cpu;
@@ -320,10 +320,10 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
320 } else { 320 } else {
321 int cpu; 321 int cpu;
322 322
323 lock_cpu_hotplug(); 323 mutex_lock(&workqueue_mutex);
324 for_each_online_cpu(cpu) 324 for_each_online_cpu(cpu)
325 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 325 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
326 unlock_cpu_hotplug(); 326 mutex_unlock(&workqueue_mutex);
327 } 327 }
328} 328}
329EXPORT_SYMBOL_GPL(flush_workqueue); 329EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -371,8 +371,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
371 } 371 }
372 372
373 wq->name = name; 373 wq->name = name;
374 /* We don't need the distraction of CPUs appearing and vanishing. */ 374 mutex_lock(&workqueue_mutex);
375 lock_cpu_hotplug();
376 if (singlethread) { 375 if (singlethread) {
377 INIT_LIST_HEAD(&wq->list); 376 INIT_LIST_HEAD(&wq->list);
378 p = create_workqueue_thread(wq, singlethread_cpu); 377 p = create_workqueue_thread(wq, singlethread_cpu);
@@ -381,9 +380,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
381 else 380 else
382 wake_up_process(p); 381 wake_up_process(p);
383 } else { 382 } else {
384 spin_lock(&workqueue_lock);
385 list_add(&wq->list, &workqueues); 383 list_add(&wq->list, &workqueues);
386 spin_unlock(&workqueue_lock);
387 for_each_online_cpu(cpu) { 384 for_each_online_cpu(cpu) {
388 p = create_workqueue_thread(wq, cpu); 385 p = create_workqueue_thread(wq, cpu);
389 if (p) { 386 if (p) {
@@ -393,7 +390,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
393 destroy = 1; 390 destroy = 1;
394 } 391 }
395 } 392 }
396 unlock_cpu_hotplug(); 393 mutex_unlock(&workqueue_mutex);
397 394
398 /* 395 /*
399 * Was there any error during startup? If yes then clean up: 396 * Was there any error during startup? If yes then clean up:
@@ -434,17 +431,15 @@ void destroy_workqueue(struct workqueue_struct *wq)
434 flush_workqueue(wq); 431 flush_workqueue(wq);
435 432
436 /* We don't need the distraction of CPUs appearing and vanishing. */ 433 /* We don't need the distraction of CPUs appearing and vanishing. */
437 lock_cpu_hotplug(); 434 mutex_lock(&workqueue_mutex);
438 if (is_single_threaded(wq)) 435 if (is_single_threaded(wq))
439 cleanup_workqueue_thread(wq, singlethread_cpu); 436 cleanup_workqueue_thread(wq, singlethread_cpu);
440 else { 437 else {
441 for_each_online_cpu(cpu) 438 for_each_online_cpu(cpu)
442 cleanup_workqueue_thread(wq, cpu); 439 cleanup_workqueue_thread(wq, cpu);
443 spin_lock(&workqueue_lock);
444 list_del(&wq->list); 440 list_del(&wq->list);
445 spin_unlock(&workqueue_lock);
446 } 441 }
447 unlock_cpu_hotplug(); 442 mutex_unlock(&workqueue_mutex);
448 free_percpu(wq->cpu_wq); 443 free_percpu(wq->cpu_wq);
449 kfree(wq); 444 kfree(wq);
450} 445}
@@ -515,11 +510,13 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info)
515 if (!works) 510 if (!works)
516 return -ENOMEM; 511 return -ENOMEM;
517 512
513 mutex_lock(&workqueue_mutex);
518 for_each_online_cpu(cpu) { 514 for_each_online_cpu(cpu) {
519 INIT_WORK(per_cpu_ptr(works, cpu), func, info); 515 INIT_WORK(per_cpu_ptr(works, cpu), func, info);
520 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 516 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
521 per_cpu_ptr(works, cpu)); 517 per_cpu_ptr(works, cpu));
522 } 518 }
519 mutex_unlock(&workqueue_mutex);
523 flush_workqueue(keventd_wq); 520 flush_workqueue(keventd_wq);
524 free_percpu(works); 521 free_percpu(works);
525 return 0; 522 return 0;
@@ -635,6 +632,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
635 632
636 switch (action) { 633 switch (action) {
637 case CPU_UP_PREPARE: 634 case CPU_UP_PREPARE:
635 mutex_lock(&workqueue_mutex);
638 /* Create a new workqueue thread for it. */ 636 /* Create a new workqueue thread for it. */
639 list_for_each_entry(wq, &workqueues, list) { 637 list_for_each_entry(wq, &workqueues, list) {
640 if (!create_workqueue_thread(wq, hotcpu)) { 638 if (!create_workqueue_thread(wq, hotcpu)) {
@@ -653,6 +651,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
653 kthread_bind(cwq->thread, hotcpu); 651 kthread_bind(cwq->thread, hotcpu);
654 wake_up_process(cwq->thread); 652 wake_up_process(cwq->thread);
655 } 653 }
654 mutex_unlock(&workqueue_mutex);
656 break; 655 break;
657 656
658 case CPU_UP_CANCELED: 657 case CPU_UP_CANCELED:
@@ -664,6 +663,15 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
664 any_online_cpu(cpu_online_map)); 663 any_online_cpu(cpu_online_map));
665 cleanup_workqueue_thread(wq, hotcpu); 664 cleanup_workqueue_thread(wq, hotcpu);
666 } 665 }
666 mutex_unlock(&workqueue_mutex);
667 break;
668
669 case CPU_DOWN_PREPARE:
670 mutex_lock(&workqueue_mutex);
671 break;
672
673 case CPU_DOWN_FAILED:
674 mutex_unlock(&workqueue_mutex);
667 break; 675 break;
668 676
669 case CPU_DEAD: 677 case CPU_DEAD:
@@ -671,6 +679,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
671 cleanup_workqueue_thread(wq, hotcpu); 679 cleanup_workqueue_thread(wq, hotcpu);
672 list_for_each_entry(wq, &workqueues, list) 680 list_for_each_entry(wq, &workqueues, list)
673 take_over_work(wq, hotcpu); 681 take_over_work(wq, hotcpu);
682 mutex_unlock(&workqueue_mutex);
674 break; 683 break;
675 } 684 }
676 685