diff options
| author | Oleg Nesterov <oleg@redhat.com> | 2010-04-23 11:40:40 -0400 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2010-04-30 02:57:25 -0400 |
| commit | 4d707b9f48e2c4aa94b96f1133813b73df71fb55 (patch) | |
| tree | 2e8e6c44e55bcea9ae2de200ebb3edaac81c2a88 /kernel/workqueue.c | |
| parent | eef6a7d5c2f38adadab8240fabf43730fe796482 (diff) | |
workqueue: change cancel_work_sync() to clear work->data
In short: change cancel_work_sync(work) to mark this work as "never
queued" upon return.
When cancel_work_sync(work) succeeds, we know that this work can't be
queued or running, and since we own WORK_STRUCT_PENDING nobody can change
the bits in work->data under us. This means we can also clear the "cwq"
part along with _PENDING bit lockless before return, unless the work is
queued nobody can assume get_wq_data() is stable even under cwq->lock.
This change can speedup the subsequent cancel/flush requests, and as
Dmitry pointed out this simplifies the usage of work_struct's which
can be queued on different workqueues. Consider this pseudo code from
the input subsystem:
struct workqueue_struct *WQ;
struct work_struct *WORK;
for (;;) {
WQ = create_workqueue();
...
if (condition())
queue_work(WQ, WORK);
...
cancel_work_sync(WORK);
destroy_workqueue(WQ);
}
If condition() returns T and then F, cancel_work_sync() will crash the
kernel because WORK->data still points to the already destroyed workqueue.
With this patch the code like above becomes correct.
Suggested-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 12 |
1 files changed, 11 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0225fea89340..77dabbf64b8f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -229,6 +229,16 @@ static inline void set_wq_data(struct work_struct *work, | |||
| 229 | atomic_long_set(&work->data, new); | 229 | atomic_long_set(&work->data, new); |
| 230 | } | 230 | } |
| 231 | 231 | ||
| 232 | /* | ||
| 233 | * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued. | ||
| 234 | */ | ||
| 235 | static inline void clear_wq_data(struct work_struct *work) | ||
| 236 | { | ||
| 237 | unsigned long flags = *work_data_bits(work) & | ||
| 238 | (1UL << WORK_STRUCT_STATIC); | ||
| 239 | atomic_long_set(&work->data, flags); | ||
| 240 | } | ||
| 241 | |||
| 232 | static inline | 242 | static inline |
| 233 | struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | 243 | struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) |
| 234 | { | 244 | { |
| @@ -671,7 +681,7 @@ static int __cancel_work_timer(struct work_struct *work, | |||
| 671 | wait_on_work(work); | 681 | wait_on_work(work); |
| 672 | } while (unlikely(ret < 0)); | 682 | } while (unlikely(ret < 0)); |
| 673 | 683 | ||
| 674 | work_clear_pending(work); | 684 | clear_wq_data(work); |
| 675 | return ret; | 685 | return ret; |
| 676 | } | 686 | } |
| 677 | 687 | ||
