diff options
author | Zhang Rui <rui.zhang@intel.com> | 2008-07-24 00:28:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-24 13:47:23 -0400 |
commit | c1a220e7acf8ad2c03504891f4a70cd9c32c904b (patch) | |
tree | 902104a5a5debb5b881d7af4110ad1258ea5b0bd /kernel/workqueue.c | |
parent | 0d83304c7e7bd3b05be90281b3a47841bc8f057a (diff) |
pm: introduce new interfaces schedule_work_on() and queue_work_on()
This interface allows adding a job on a specific cpu.
Although a work struct on a cpu will be scheduled to other cpu if the cpu
dies, there is a recursion if a work task tries to offline the cpu it's
running on. we need to schedule the task to a specific cpu in this case.
http://bugzilla.kernel.org/show_bug.cgi?id=10897
[oleg@tv-sign.ru: cleanups]
Signed-off-by: Zhang Rui <rui.zhang@intel.com>
Tested-by: Rus <harbour@sfinx.od.ua>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Acked-by: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 39 |
1 files changed, 38 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a6d36346d10a..6fd158b21026 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -140,7 +140,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq, | |||
140 | wake_up(&cwq->more_work); | 140 | wake_up(&cwq->more_work); |
141 | } | 141 | } |
142 | 142 | ||
143 | /* Preempt must be disabled. */ | ||
144 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 143 | static void __queue_work(struct cpu_workqueue_struct *cwq, |
145 | struct work_struct *work) | 144 | struct work_struct *work) |
146 | { | 145 | { |
@@ -175,6 +174,31 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work) | |||
175 | } | 174 | } |
176 | EXPORT_SYMBOL_GPL(queue_work); | 175 | EXPORT_SYMBOL_GPL(queue_work); |
177 | 176 | ||
177 | /** | ||
178 | * queue_work_on - queue work on specific cpu | ||
179 | * @cpu: CPU number to execute work on | ||
180 | * @wq: workqueue to use | ||
181 | * @work: work to queue | ||
182 | * | ||
183 | * Returns 0 if @work was already on a queue, non-zero otherwise. | ||
184 | * | ||
185 | * We queue the work to a specific CPU, the caller must ensure it | ||
186 | * can't go away. | ||
187 | */ | ||
188 | int | ||
189 | queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) | ||
190 | { | ||
191 | int ret = 0; | ||
192 | |||
193 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | ||
194 | BUG_ON(!list_empty(&work->entry)); | ||
195 | __queue_work(wq_per_cpu(wq, cpu), work); | ||
196 | ret = 1; | ||
197 | } | ||
198 | return ret; | ||
199 | } | ||
200 | EXPORT_SYMBOL_GPL(queue_work_on); | ||
201 | |||
178 | static void delayed_work_timer_fn(unsigned long __data) | 202 | static void delayed_work_timer_fn(unsigned long __data) |
179 | { | 203 | { |
180 | struct delayed_work *dwork = (struct delayed_work *)__data; | 204 | struct delayed_work *dwork = (struct delayed_work *)__data; |
@@ -553,6 +577,19 @@ int schedule_work(struct work_struct *work) | |||
553 | } | 577 | } |
554 | EXPORT_SYMBOL(schedule_work); | 578 | EXPORT_SYMBOL(schedule_work); |
555 | 579 | ||
580 | /* | ||
581 | * schedule_work_on - put work task on a specific cpu | ||
582 | * @cpu: cpu to put the work task on | ||
583 | * @work: job to be done | ||
584 | * | ||
585 | * This puts a job on a specific cpu | ||
586 | */ | ||
587 | int schedule_work_on(int cpu, struct work_struct *work) | ||
588 | { | ||
589 | return queue_work_on(cpu, keventd_wq, work); | ||
590 | } | ||
591 | EXPORT_SYMBOL(schedule_work_on); | ||
592 | |||
556 | /** | 593 | /** |
557 | * schedule_delayed_work - put work task in global workqueue after delay | 594 | * schedule_delayed_work - put work task in global workqueue after delay |
558 | * @dwork: job to be done | 595 | * @dwork: job to be done |