aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-25 05:37:07 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-25 05:37:07 -0400
commit0e2f65ee30eee2db054f7fd73f462c5da33ec963 (patch)
tree26c61eb7745da0c0d9135e9d12088f570cb8530d /kernel/workqueue.c
parentda7878d75b8520c9ae00d27dfbbce546a7bfdfbb (diff)
parentfb2e405fc1fc8b20d9c78eaa1c7fd5a297efde43 (diff)
Merge branch 'linus' into x86/pebs
Conflicts: arch/x86/Kconfig.cpu arch/x86/kernel/cpu/intel.c arch/x86/kernel/setup_64.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c47
1 files changed, 42 insertions, 5 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 29fc39f1029c..6fd158b21026 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -13,7 +13,7 @@
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu> 14 * Theodore Ts'o <tytso@mit.edu>
15 * 15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>. 16 * Made to use alloc_percpu by Christoph Lameter.
17 */ 17 */
18 18
19#include <linux/module.h> 19#include <linux/module.h>
@@ -140,7 +140,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
140 wake_up(&cwq->more_work); 140 wake_up(&cwq->more_work);
141} 141}
142 142
143/* Preempt must be disabled. */
144static void __queue_work(struct cpu_workqueue_struct *cwq, 143static void __queue_work(struct cpu_workqueue_struct *cwq,
145 struct work_struct *work) 144 struct work_struct *work)
146{ 145{
@@ -175,6 +174,31 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work)
175} 174}
176EXPORT_SYMBOL_GPL(queue_work); 175EXPORT_SYMBOL_GPL(queue_work);
177 176
177/**
178 * queue_work_on - queue work on specific cpu
179 * @cpu: CPU number to execute work on
180 * @wq: workqueue to use
181 * @work: work to queue
182 *
183 * Returns 0 if @work was already on a queue, non-zero otherwise.
184 *
185 * We queue the work to a specific CPU, the caller must ensure it
186 * can't go away.
187 */
188int
189queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
190{
191 int ret = 0;
192
193 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
194 BUG_ON(!list_empty(&work->entry));
195 __queue_work(wq_per_cpu(wq, cpu), work);
196 ret = 1;
197 }
198 return ret;
199}
200EXPORT_SYMBOL_GPL(queue_work_on);
201
178static void delayed_work_timer_fn(unsigned long __data) 202static void delayed_work_timer_fn(unsigned long __data)
179{ 203{
180 struct delayed_work *dwork = (struct delayed_work *)__data; 204 struct delayed_work *dwork = (struct delayed_work *)__data;
@@ -397,7 +421,7 @@ void flush_workqueue(struct workqueue_struct *wq)
397 might_sleep(); 421 might_sleep();
398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 422 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
399 lock_release(&wq->lockdep_map, 1, _THIS_IP_); 423 lock_release(&wq->lockdep_map, 1, _THIS_IP_);
400 for_each_cpu_mask(cpu, *cpu_map) 424 for_each_cpu_mask_nr(cpu, *cpu_map)
401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 425 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
402} 426}
403EXPORT_SYMBOL_GPL(flush_workqueue); 427EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -477,7 +501,7 @@ static void wait_on_work(struct work_struct *work)
477 wq = cwq->wq; 501 wq = cwq->wq;
478 cpu_map = wq_cpu_map(wq); 502 cpu_map = wq_cpu_map(wq);
479 503
480 for_each_cpu_mask(cpu, *cpu_map) 504 for_each_cpu_mask_nr(cpu, *cpu_map)
481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 505 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
482} 506}
483 507
@@ -553,6 +577,19 @@ int schedule_work(struct work_struct *work)
553} 577}
554EXPORT_SYMBOL(schedule_work); 578EXPORT_SYMBOL(schedule_work);
555 579
580/*
581 * schedule_work_on - put work task on a specific cpu
582 * @cpu: cpu to put the work task on
583 * @work: job to be done
584 *
585 * This puts a job on a specific cpu
586 */
587int schedule_work_on(int cpu, struct work_struct *work)
588{
589 return queue_work_on(cpu, keventd_wq, work);
590}
591EXPORT_SYMBOL(schedule_work_on);
592
556/** 593/**
557 * schedule_delayed_work - put work task in global workqueue after delay 594 * schedule_delayed_work - put work task in global workqueue after delay
558 * @dwork: job to be done 595 * @dwork: job to be done
@@ -813,7 +850,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
813 list_del(&wq->list); 850 list_del(&wq->list);
814 spin_unlock(&workqueue_lock); 851 spin_unlock(&workqueue_lock);
815 852
816 for_each_cpu_mask(cpu, *cpu_map) 853 for_each_cpu_mask_nr(cpu, *cpu_map)
817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 854 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
818 put_online_cpus(); 855 put_online_cpus();
819 856