aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-08-21 16:18:24 -0400
committerTejun Heo <tj@kernel.org>2012-08-21 16:18:24 -0400
commite0aecdd874d78b7129a64b056c20e529e2c916df (patch)
tree0eacde209b1f46beb5293537c85ab8217c7023f4
parentf991b318cc6627a493b0d317a565bb7c3271f36b (diff)
workqueue: use irqsafe timer for delayed_work
Up to now, for delayed_works, try_to_grab_pending() couldn't be used from IRQ handlers because IRQs may happen while delayed_work_timer_fn() is in progress leading to indefinite -EAGAIN. This patch makes delayed_work use the new TIMER_IRQSAFE flag for delayed_work->timer. This makes try_to_grab_pending() and thus mod_delayed_work_on() safe to call from IRQ handlers. Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--include/linux/workqueue.h8
-rw-r--r--kernel/workqueue.c20
2 files changed, 16 insertions, 12 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index e84ebb69607..d86b320319e 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -136,7 +136,8 @@ struct execute_work {
136#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ 136#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
137 .work = __WORK_INITIALIZER((n).work, (f)), \ 137 .work = __WORK_INITIALIZER((n).work, (f)), \
138 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \ 138 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
139 0, (unsigned long)&(n), (tflags)), \ 139 0, (unsigned long)&(n), \
140 (tflags) | TIMER_IRQSAFE), \
140 } 141 }
141 142
142#define DECLARE_WORK(n, f) \ 143#define DECLARE_WORK(n, f) \
@@ -214,7 +215,8 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
214 do { \ 215 do { \
215 INIT_WORK(&(_work)->work, (_func)); \ 216 INIT_WORK(&(_work)->work, (_func)); \
216 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \ 217 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
217 (unsigned long)(_work), (_tflags)); \ 218 (unsigned long)(_work), \
219 (_tflags) | TIMER_IRQSAFE); \
218 } while (0) 220 } while (0)
219 221
220#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ 222#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
@@ -223,7 +225,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
223 __setup_timer_on_stack(&(_work)->timer, \ 225 __setup_timer_on_stack(&(_work)->timer, \
224 delayed_work_timer_fn, \ 226 delayed_work_timer_fn, \
225 (unsigned long)(_work), \ 227 (unsigned long)(_work), \
226 (_tflags)); \ 228 (_tflags) | TIMER_IRQSAFE); \
227 } while (0) 229 } while (0)
228 230
229#define INIT_DELAYED_WORK(_work, _func) \ 231#define INIT_DELAYED_WORK(_work, _func) \
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 85bd3409b9f..b394df8beae 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1048,16 +1048,14 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1048 * for arbitrarily long 1048 * for arbitrarily long
1049 * 1049 *
1050 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1050 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1051 * preempted while holding PENDING and @work off queue, preemption must be 1051 * interrupted while holding PENDING and @work off queue, irq must be
1052 * disabled on entry. This ensures that we don't return -EAGAIN while 1052 * disabled on entry. This, combined with delayed_work->timer being
1053 * another task is preempted in this function. 1053 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1054 * 1054 *
1055 * On successful return, >= 0, irq is disabled and the caller is 1055 * On successful return, >= 0, irq is disabled and the caller is
1056 * responsible for releasing it using local_irq_restore(*@flags). 1056 * responsible for releasing it using local_irq_restore(*@flags).
1057 * 1057 *
1058 * This function is safe to call from any context other than IRQ handler. 1058 * This function is safe to call from any context including IRQ handler.
1059 * An IRQ handler may run on top of delayed_work_timer_fn() which can make
1060 * this function return -EAGAIN perpetually.
1061 */ 1059 */
1062static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1060static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1063 unsigned long *flags) 1061 unsigned long *flags)
@@ -1072,6 +1070,11 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1072 if (is_dwork) { 1070 if (is_dwork) {
1073 struct delayed_work *dwork = to_delayed_work(work); 1071 struct delayed_work *dwork = to_delayed_work(work);
1074 1072
1073 /*
1074 * dwork->timer is irqsafe. If del_timer() fails, it's
1075 * guaranteed that the timer is not queued anywhere and not
1076 * running on the local CPU.
1077 */
1075 if (likely(del_timer(&dwork->timer))) 1078 if (likely(del_timer(&dwork->timer)))
1076 return 1; 1079 return 1;
1077 } 1080 }
@@ -1327,9 +1330,8 @@ void delayed_work_timer_fn(unsigned long __data)
1327 struct delayed_work *dwork = (struct delayed_work *)__data; 1330 struct delayed_work *dwork = (struct delayed_work *)__data;
1328 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); 1331 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1329 1332
1330 local_irq_disable(); 1333 /* should have been called from irqsafe timer with irq already off */
1331 __queue_work(dwork->cpu, cwq->wq, &dwork->work); 1334 __queue_work(dwork->cpu, cwq->wq, &dwork->work);
1332 local_irq_enable();
1333} 1335}
1334EXPORT_SYMBOL_GPL(delayed_work_timer_fn); 1336EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
1335 1337
@@ -1444,7 +1446,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
1444 * Returns %false if @dwork was idle and queued, %true if @dwork was 1446 * Returns %false if @dwork was idle and queued, %true if @dwork was
1445 * pending and its timer was modified. 1447 * pending and its timer was modified.
1446 * 1448 *
1447 * This function is safe to call from any context other than IRQ handler. 1449 * This function is safe to call from any context including IRQ handler.
1448 * See try_to_grab_pending() for details. 1450 * See try_to_grab_pending() for details.
1449 */ 1451 */
1450bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 1452bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,