aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-08-21 16:18:24 -0400
committerTejun Heo <tj@kernel.org>2012-08-21 16:18:24 -0400
commit57b30ae77bf00d2318df711ef9a4d2a9be0a3a2a (patch)
treed6e084bf0e2b82bb39302ee0e94e6f3f04762dbc /include/linux/workqueue.h
parente7c2f967445dd2041f0f8e3179cca22bb8bb7f79 (diff)
workqueue: reimplement cancel_delayed_work() using try_to_grab_pending()
cancel_delayed_work() can't be called from IRQ handlers due to its use of del_timer_sync() and can't cancel work items which are already transferred from timer to worklist. Also, unlike other flush and cancel functions, a canceled delayed_work would still point to the last associated cpu_workqueue. If the workqueue is destroyed afterwards and the work item is re-used on a different workqueue, the queueing code can oops trying to dereference already freed cpu_workqueue. This patch reimplements cancel_delayed_work() using try_to_grab_pending() and set_work_cpu_and_clear_pending(). This allows the function to be called from IRQ handlers and makes its behavior consistent with other flush / cancel functions. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h17
1 files changed, 1 insertions, 16 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index d86b320319e0..4898289564ab 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -420,6 +420,7 @@ extern bool flush_work(struct work_struct *work);
420extern bool cancel_work_sync(struct work_struct *work); 420extern bool cancel_work_sync(struct work_struct *work);
421 421
422extern bool flush_delayed_work(struct delayed_work *dwork); 422extern bool flush_delayed_work(struct delayed_work *dwork);
423extern bool cancel_delayed_work(struct delayed_work *dwork);
423extern bool cancel_delayed_work_sync(struct delayed_work *dwork); 424extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
424 425
425extern void workqueue_set_max_active(struct workqueue_struct *wq, 426extern void workqueue_set_max_active(struct workqueue_struct *wq,
@@ -429,22 +430,6 @@ extern unsigned int work_cpu(struct work_struct *work);
429extern unsigned int work_busy(struct work_struct *work); 430extern unsigned int work_busy(struct work_struct *work);
430 431
431/* 432/*
432 * Kill off a pending schedule_delayed_work(). Note that the work callback
433 * function may still be running on return from cancel_delayed_work(), unless
434 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
435 * cancel_work_sync() to wait on it.
436 */
437static inline bool cancel_delayed_work(struct delayed_work *work)
438{
439 bool ret;
440
441 ret = del_timer_sync(&work->timer);
442 if (ret)
443 work_clear_pending(&work->work);
444 return ret;
445}
446
447/*
448 * Like above, but uses del_timer() instead of del_timer_sync(). This means, 433 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
449 * if it returns 0 the timer function may be running and the queueing is in 434 * if it returns 0 the timer function may be running and the queueing is in
450 * progress. 435 * progress.