diff options
| author | Anton Altaparmakov <aia21@cantab.net> | 2005-12-05 10:48:41 -0500 |
|---|---|---|
| committer | Anton Altaparmakov <aia21@cantab.net> | 2005-12-05 10:48:41 -0500 |
| commit | 292d4ed32e35df4755052b5002e533348d1648fd (patch) | |
| tree | 8522e6bab962696bd25a6c02fb068c674a09b7ee /kernel/workqueue.c | |
| parent | 3c6af7fa787f21f8873a050568ed892312899eb5 (diff) | |
| parent | e4f5c82a92c2a546a16af1614114eec19120e40a (diff) | |
Merge branch 'master' of /usr/src/ntfs-2.6/
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 42df83d7fad2..2bd5aee1c736 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -102,7 +102,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | |||
| 102 | 102 | ||
| 103 | if (!test_and_set_bit(0, &work->pending)) { | 103 | if (!test_and_set_bit(0, &work->pending)) { |
| 104 | if (unlikely(is_single_threaded(wq))) | 104 | if (unlikely(is_single_threaded(wq))) |
| 105 | cpu = 0; | 105 | cpu = any_online_cpu(cpu_online_map); |
| 106 | BUG_ON(!list_empty(&work->entry)); | 106 | BUG_ON(!list_empty(&work->entry)); |
| 107 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 107 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
| 108 | ret = 1; | 108 | ret = 1; |
| @@ -118,7 +118,7 @@ static void delayed_work_timer_fn(unsigned long __data) | |||
| 118 | int cpu = smp_processor_id(); | 118 | int cpu = smp_processor_id(); |
| 119 | 119 | ||
| 120 | if (unlikely(is_single_threaded(wq))) | 120 | if (unlikely(is_single_threaded(wq))) |
| 121 | cpu = 0; | 121 | cpu = any_online_cpu(cpu_online_map); |
| 122 | 122 | ||
| 123 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 123 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
| 124 | } | 124 | } |
| @@ -266,8 +266,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
| 266 | might_sleep(); | 266 | might_sleep(); |
| 267 | 267 | ||
| 268 | if (is_single_threaded(wq)) { | 268 | if (is_single_threaded(wq)) { |
| 269 | /* Always use cpu 0's area. */ | 269 | /* Always use first cpu's area. */ |
| 270 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, 0)); | 270 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map))); |
| 271 | } else { | 271 | } else { |
| 272 | int cpu; | 272 | int cpu; |
| 273 | 273 | ||
| @@ -320,7 +320,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
| 320 | lock_cpu_hotplug(); | 320 | lock_cpu_hotplug(); |
| 321 | if (singlethread) { | 321 | if (singlethread) { |
| 322 | INIT_LIST_HEAD(&wq->list); | 322 | INIT_LIST_HEAD(&wq->list); |
| 323 | p = create_workqueue_thread(wq, 0); | 323 | p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map)); |
| 324 | if (!p) | 324 | if (!p) |
| 325 | destroy = 1; | 325 | destroy = 1; |
| 326 | else | 326 | else |
| @@ -374,7 +374,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 374 | /* We don't need the distraction of CPUs appearing and vanishing. */ | 374 | /* We don't need the distraction of CPUs appearing and vanishing. */ |
| 375 | lock_cpu_hotplug(); | 375 | lock_cpu_hotplug(); |
| 376 | if (is_single_threaded(wq)) | 376 | if (is_single_threaded(wq)) |
| 377 | cleanup_workqueue_thread(wq, 0); | 377 | cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map)); |
| 378 | else { | 378 | else { |
| 379 | for_each_online_cpu(cpu) | 379 | for_each_online_cpu(cpu) |
| 380 | cleanup_workqueue_thread(wq, cpu); | 380 | cleanup_workqueue_thread(wq, cpu); |
