diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-05-09 05:34:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:52 -0400 |
commit | ed7c0feede39d70092d048ec30f59bb1df69eec6 (patch) | |
tree | 67ff89df1a5db2037da34ddf17002b4c7ff218df | |
parent | 06ba38a9a0f6ceffe70343f684c5a690e3710ef4 (diff) |
make queue_delayed_work() friendly to flush_fork()
Currently typeof(delayed_work->work.data) is
"struct workqueue_struct" when the timer is pending
"struct cpu_workqueue_struct" whe the work is queued
This makes impossible to use flush_fork(delayed_work->work) in addition
to cancel_delayed_work/cancel_rearming_delayed_work, not good.
Change queue_delayed_work/delayed_work_timer_fn to use cwq, not wq. This
complicates (and uglifies) these functions a little bit, but alows us to
use flush_fork(dwork) and imho makes the whole code more consistent.
Also, document the fact that cancel_rearming_delayed_work() doesn't garantee
the completion of work->func() upon return.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | kernel/workqueue.c | 28 |
1 files changed, 18 insertions, 10 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7d1ebfc1a995..d107e1c3b071 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -90,18 +90,20 @@ static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) | |||
90 | * Set the workqueue on which a work item is to be run | 90 | * Set the workqueue on which a work item is to be run |
91 | * - Must *only* be called if the pending flag is set | 91 | * - Must *only* be called if the pending flag is set |
92 | */ | 92 | */ |
93 | static inline void set_wq_data(struct work_struct *work, void *wq) | 93 | static inline void set_wq_data(struct work_struct *work, |
94 | struct cpu_workqueue_struct *cwq) | ||
94 | { | 95 | { |
95 | unsigned long new; | 96 | unsigned long new; |
96 | 97 | ||
97 | BUG_ON(!work_pending(work)); | 98 | BUG_ON(!work_pending(work)); |
98 | 99 | ||
99 | new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); | 100 | new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); |
100 | new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); | 101 | new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); |
101 | atomic_long_set(&work->data, new); | 102 | atomic_long_set(&work->data, new); |
102 | } | 103 | } |
103 | 104 | ||
104 | static inline void *get_wq_data(struct work_struct *work) | 105 | static inline |
106 | struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | ||
105 | { | 107 | { |
106 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 108 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); |
107 | } | 109 | } |
@@ -157,7 +159,8 @@ EXPORT_SYMBOL_GPL(queue_work); | |||
157 | void delayed_work_timer_fn(unsigned long __data) | 159 | void delayed_work_timer_fn(unsigned long __data) |
158 | { | 160 | { |
159 | struct delayed_work *dwork = (struct delayed_work *)__data; | 161 | struct delayed_work *dwork = (struct delayed_work *)__data; |
160 | struct workqueue_struct *wq = get_wq_data(&dwork->work); | 162 | struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); |
163 | struct workqueue_struct *wq = cwq->wq; | ||
161 | int cpu = smp_processor_id(); | 164 | int cpu = smp_processor_id(); |
162 | 165 | ||
163 | if (unlikely(is_single_threaded(wq))) | 166 | if (unlikely(is_single_threaded(wq))) |
@@ -189,8 +192,9 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, | |||
189 | BUG_ON(timer_pending(timer)); | 192 | BUG_ON(timer_pending(timer)); |
190 | BUG_ON(!list_empty(&work->entry)); | 193 | BUG_ON(!list_empty(&work->entry)); |
191 | 194 | ||
192 | /* This stores wq for the moment, for the timer_fn */ | 195 | /* This stores cwq for the moment, for the timer_fn */ |
193 | set_wq_data(work, wq); | 196 | set_wq_data(work, |
197 | per_cpu_ptr(wq->cpu_wq, raw_smp_processor_id())); | ||
194 | timer->expires = jiffies + delay; | 198 | timer->expires = jiffies + delay; |
195 | timer->data = (unsigned long)dwork; | 199 | timer->data = (unsigned long)dwork; |
196 | timer->function = delayed_work_timer_fn; | 200 | timer->function = delayed_work_timer_fn; |
@@ -221,8 +225,9 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
221 | BUG_ON(timer_pending(timer)); | 225 | BUG_ON(timer_pending(timer)); |
222 | BUG_ON(!list_empty(&work->entry)); | 226 | BUG_ON(!list_empty(&work->entry)); |
223 | 227 | ||
224 | /* This stores wq for the moment, for the timer_fn */ | 228 | /* This stores cwq for the moment, for the timer_fn */ |
225 | set_wq_data(work, wq); | 229 | set_wq_data(work, |
230 | per_cpu_ptr(wq->cpu_wq, raw_smp_processor_id())); | ||
226 | timer->expires = jiffies + delay; | 231 | timer->expires = jiffies + delay; |
227 | timer->data = (unsigned long)dwork; | 232 | timer->data = (unsigned long)dwork; |
228 | timer->function = delayed_work_timer_fn; | 233 | timer->function = delayed_work_timer_fn; |
@@ -562,9 +567,12 @@ void flush_work_keventd(struct work_struct *work) | |||
562 | EXPORT_SYMBOL(flush_work_keventd); | 567 | EXPORT_SYMBOL(flush_work_keventd); |
563 | 568 | ||
564 | /** | 569 | /** |
565 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work. | 570 | * cancel_rearming_delayed_workqueue - kill off a delayed work whose handler rearms the delayed work. |
566 | * @wq: the controlling workqueue structure | 571 | * @wq: the controlling workqueue structure |
567 | * @dwork: the delayed work struct | 572 | * @dwork: the delayed work struct |
573 | * | ||
574 | * Note that the work callback function may still be running on return from | ||
575 | * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it. | ||
568 | */ | 576 | */ |
569 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | 577 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, |
570 | struct delayed_work *dwork) | 578 | struct delayed_work *dwork) |
@@ -579,7 +587,7 @@ void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | |||
579 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | 587 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); |
580 | 588 | ||
581 | /** | 589 | /** |
582 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work. | 590 | * cancel_rearming_delayed_work - kill off a delayed keventd work whose handler rearms the delayed work. |
583 | * @dwork: the delayed work struct | 591 | * @dwork: the delayed work struct |
584 | */ | 592 | */ |
585 | void cancel_rearming_delayed_work(struct delayed_work *dwork) | 593 | void cancel_rearming_delayed_work(struct delayed_work *dwork) |