diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 40 |
1 files changed, 34 insertions, 6 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2bd5aee1c736..82c4fa70595c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -29,7 +29,8 @@ | |||
29 | #include <linux/kthread.h> | 29 | #include <linux/kthread.h> |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * The per-CPU workqueue (if single thread, we always use cpu 0's). | 32 | * The per-CPU workqueue (if single thread, we always use the first |
33 | * possible cpu). | ||
33 | * | 34 | * |
34 | * The sequence counters are for flush_scheduled_work(). It wants to wait | 35 | * The sequence counters are for flush_scheduled_work(). It wants to wait |
35 | * until until all currently-scheduled works are completed, but it doesn't | 36 | * until until all currently-scheduled works are completed, but it doesn't |
@@ -69,6 +70,8 @@ struct workqueue_struct { | |||
69 | static DEFINE_SPINLOCK(workqueue_lock); | 70 | static DEFINE_SPINLOCK(workqueue_lock); |
70 | static LIST_HEAD(workqueues); | 71 | static LIST_HEAD(workqueues); |
71 | 72 | ||
73 | static int singlethread_cpu; | ||
74 | |||
72 | /* If it's single threaded, it isn't in the list of workqueues. */ | 75 | /* If it's single threaded, it isn't in the list of workqueues. */ |
73 | static inline int is_single_threaded(struct workqueue_struct *wq) | 76 | static inline int is_single_threaded(struct workqueue_struct *wq) |
74 | { | 77 | { |
@@ -102,7 +105,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | |||
102 | 105 | ||
103 | if (!test_and_set_bit(0, &work->pending)) { | 106 | if (!test_and_set_bit(0, &work->pending)) { |
104 | if (unlikely(is_single_threaded(wq))) | 107 | if (unlikely(is_single_threaded(wq))) |
105 | cpu = any_online_cpu(cpu_online_map); | 108 | cpu = singlethread_cpu; |
106 | BUG_ON(!list_empty(&work->entry)); | 109 | BUG_ON(!list_empty(&work->entry)); |
107 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 110 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
108 | ret = 1; | 111 | ret = 1; |
@@ -118,7 +121,7 @@ static void delayed_work_timer_fn(unsigned long __data) | |||
118 | int cpu = smp_processor_id(); | 121 | int cpu = smp_processor_id(); |
119 | 122 | ||
120 | if (unlikely(is_single_threaded(wq))) | 123 | if (unlikely(is_single_threaded(wq))) |
121 | cpu = any_online_cpu(cpu_online_map); | 124 | cpu = singlethread_cpu; |
122 | 125 | ||
123 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 126 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
124 | } | 127 | } |
@@ -267,7 +270,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
267 | 270 | ||
268 | if (is_single_threaded(wq)) { | 271 | if (is_single_threaded(wq)) { |
269 | /* Always use first cpu's area. */ | 272 | /* Always use first cpu's area. */ |
270 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map))); | 273 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); |
271 | } else { | 274 | } else { |
272 | int cpu; | 275 | int cpu; |
273 | 276 | ||
@@ -315,12 +318,17 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
315 | return NULL; | 318 | return NULL; |
316 | 319 | ||
317 | wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); | 320 | wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); |
321 | if (!wq->cpu_wq) { | ||
322 | kfree(wq); | ||
323 | return NULL; | ||
324 | } | ||
325 | |||
318 | wq->name = name; | 326 | wq->name = name; |
319 | /* We don't need the distraction of CPUs appearing and vanishing. */ | 327 | /* We don't need the distraction of CPUs appearing and vanishing. */ |
320 | lock_cpu_hotplug(); | 328 | lock_cpu_hotplug(); |
321 | if (singlethread) { | 329 | if (singlethread) { |
322 | INIT_LIST_HEAD(&wq->list); | 330 | INIT_LIST_HEAD(&wq->list); |
323 | p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map)); | 331 | p = create_workqueue_thread(wq, singlethread_cpu); |
324 | if (!p) | 332 | if (!p) |
325 | destroy = 1; | 333 | destroy = 1; |
326 | else | 334 | else |
@@ -374,7 +382,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
374 | /* We don't need the distraction of CPUs appearing and vanishing. */ | 382 | /* We don't need the distraction of CPUs appearing and vanishing. */ |
375 | lock_cpu_hotplug(); | 383 | lock_cpu_hotplug(); |
376 | if (is_single_threaded(wq)) | 384 | if (is_single_threaded(wq)) |
377 | cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map)); | 385 | cleanup_workqueue_thread(wq, singlethread_cpu); |
378 | else { | 386 | else { |
379 | for_each_online_cpu(cpu) | 387 | for_each_online_cpu(cpu) |
380 | cleanup_workqueue_thread(wq, cpu); | 388 | cleanup_workqueue_thread(wq, cpu); |
@@ -419,6 +427,25 @@ int schedule_delayed_work_on(int cpu, | |||
419 | return ret; | 427 | return ret; |
420 | } | 428 | } |
421 | 429 | ||
430 | int schedule_on_each_cpu(void (*func) (void *info), void *info) | ||
431 | { | ||
432 | int cpu; | ||
433 | struct work_struct *work; | ||
434 | |||
435 | work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL); | ||
436 | |||
437 | if (!work) | ||
438 | return -ENOMEM; | ||
439 | for_each_online_cpu(cpu) { | ||
440 | INIT_WORK(work + cpu, func, info); | ||
441 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), | ||
442 | work + cpu); | ||
443 | } | ||
444 | flush_workqueue(keventd_wq); | ||
445 | kfree(work); | ||
446 | return 0; | ||
447 | } | ||
448 | |||
422 | void flush_scheduled_work(void) | 449 | void flush_scheduled_work(void) |
423 | { | 450 | { |
424 | flush_workqueue(keventd_wq); | 451 | flush_workqueue(keventd_wq); |
@@ -543,6 +570,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
543 | 570 | ||
544 | void init_workqueues(void) | 571 | void init_workqueues(void) |
545 | { | 572 | { |
573 | singlethread_cpu = first_cpu(cpu_possible_map); | ||
546 | hotcpu_notifier(workqueue_cpu_callback, 0); | 574 | hotcpu_notifier(workqueue_cpu_callback, 0); |
547 | keventd_wq = create_workqueue("events"); | 575 | keventd_wq = create_workqueue("events"); |
548 | BUG_ON(!keventd_wq); | 576 | BUG_ON(!keventd_wq); |