aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2013-02-06 21:04:53 -0500
committerTejun Heo <tj@kernel.org>2013-02-06 21:04:53 -0500
commit60c057bca22285efefbba033624763a778f243bf (patch)
tree8e469c390b5b60ad6b4d7c94bc07522f857032bc /kernel
parent038366c5cf23ae737b9f72169dd8ade2d105755b (diff)
workqueue: add delayed_work->wq to simplify reentrancy handling
To avoid executing the same work item from multiple CPUs concurrently, a work_struct records the last pool it was on in its ->data so that, on the next queueing, the pool can be queried to determine whether the work item is still executing or not. A delayed_work goes through timer before actually being queued on the target workqueue and the timer needs to know the target workqueue and CPU. This is currently achieved by modifying delayed_work->work.data such that it points to the cwq which points to the target workqueue and the last CPU the work item was on. __queue_delayed_work() extracts the last CPU from delayed_work->work.data and then combines it with the target workqueue to create new work.data. The only thing this rather ugly hack achieves is encoding the target workqueue into delayed_work->work.data without using a separate field, which could be a trade off one can make; unfortunately, this entangles work->data management between regular workqueue and delayed_work code by setting cwq pointer before the work item is actually queued and becomes a hindrance for further improvements of work->data handling. This can be easily made sane by adding a target workqueue field to delayed_work. While delayed_work is used widely in the kernel and this does make it a bit larger (<5%), I think this is the right trade-off especially given the prospect of much saner handling of work->data which currently involves quite tricky memory barrier dancing, and don't expect to see any measureable effect. Add delayed_work->wq and drop the delayed_work->work.data overloading. tj: Rewrote the description. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c32
1 files changed, 3 insertions, 29 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a229a56f3a32..41a502ce3802 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1339,10 +1339,9 @@ EXPORT_SYMBOL_GPL(queue_work);
1339void delayed_work_timer_fn(unsigned long __data) 1339void delayed_work_timer_fn(unsigned long __data)
1340{ 1340{
1341 struct delayed_work *dwork = (struct delayed_work *)__data; 1341 struct delayed_work *dwork = (struct delayed_work *)__data;
1342 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1343 1342
1344 /* should have been called from irqsafe timer with irq already off */ 1343 /* should have been called from irqsafe timer with irq already off */
1345 __queue_work(dwork->cpu, cwq->wq, &dwork->work); 1344 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1346} 1345}
1347EXPORT_SYMBOL_GPL(delayed_work_timer_fn); 1346EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
1348 1347
@@ -1351,7 +1350,6 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1351{ 1350{
1352 struct timer_list *timer = &dwork->timer; 1351 struct timer_list *timer = &dwork->timer;
1353 struct work_struct *work = &dwork->work; 1352 struct work_struct *work = &dwork->work;
1354 unsigned int lcpu;
1355 1353
1356 WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1354 WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1357 timer->data != (unsigned long)dwork); 1355 timer->data != (unsigned long)dwork);
@@ -1371,30 +1369,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1371 1369
1372 timer_stats_timer_set_start_info(&dwork->timer); 1370 timer_stats_timer_set_start_info(&dwork->timer);
1373 1371
1374 /* 1372 dwork->wq = wq;
1375 * This stores cwq for the moment, for the timer_fn. Note that the
1376 * work's pool is preserved to allow reentrance detection for
1377 * delayed works.
1378 */
1379 if (!(wq->flags & WQ_UNBOUND)) {
1380 struct worker_pool *pool = get_work_pool(work);
1381
1382 /*
1383 * If we cannot get the last pool from @work directly,
1384 * select the last CPU such that it avoids unnecessarily
1385 * triggering non-reentrancy check in __queue_work().
1386 */
1387 lcpu = cpu;
1388 if (pool)
1389 lcpu = pool->cpu;
1390 if (lcpu == WORK_CPU_UNBOUND)
1391 lcpu = raw_smp_processor_id();
1392 } else {
1393 lcpu = WORK_CPU_UNBOUND;
1394 }
1395
1396 set_work_cwq(work, get_cwq(lcpu, wq), 0);
1397
1398 dwork->cpu = cpu; 1373 dwork->cpu = cpu;
1399 timer->expires = jiffies + delay; 1374 timer->expires = jiffies + delay;
1400 1375
@@ -2944,8 +2919,7 @@ bool flush_delayed_work(struct delayed_work *dwork)
2944{ 2919{
2945 local_irq_disable(); 2920 local_irq_disable();
2946 if (del_timer_sync(&dwork->timer)) 2921 if (del_timer_sync(&dwork->timer))
2947 __queue_work(dwork->cpu, 2922 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
2948 get_work_cwq(&dwork->work)->wq, &dwork->work);
2949 local_irq_enable(); 2923 local_irq_enable();
2950 return flush_work(&dwork->work); 2924 return flush_work(&dwork->work);
2951} 2925}