aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-08-21 16:18:24 -0400
committerTejun Heo <tj@kernel.org>2012-08-21 16:18:24 -0400
commite7c2f967445dd2041f0f8e3179cca22bb8bb7f79 (patch)
treecb6c1d3593d2497e740d313f55592f41e8ae2039 /block
parente0aecdd874d78b7129a64b056c20e529e2c916df (diff)
workqueue: use mod_delayed_work() instead of __cancel + queue
Now that mod_delayed_work() is safe to call from IRQ handlers, __cancel_delayed_work() followed by queue_delayed_work() can be replaced with mod_delayed_work(). Most conversions are straight-forward except for the following. * net/core/link_watch.c: linkwatch_schedule_work() was doing a quite elaborate dancing around its delayed_work. Collapse it such that linkwatch_work is queued for immediate execution if LW_URGENT and existing timer is kept otherwise. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Tomi Valkeinen <tomi.valkeinen@ti.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-throttle.c7
2 files changed, 3 insertions, 10 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 4b4dbdfbca89..4b8b606dbb01 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -319,10 +319,8 @@ EXPORT_SYMBOL(__blk_run_queue);
319 */ 319 */
320void blk_run_queue_async(struct request_queue *q) 320void blk_run_queue_async(struct request_queue *q)
321{ 321{
322 if (likely(!blk_queue_stopped(q))) { 322 if (likely(!blk_queue_stopped(q)))
323 __cancel_delayed_work(&q->delay_work); 323 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
324 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
325 }
326} 324}
327EXPORT_SYMBOL(blk_run_queue_async); 325EXPORT_SYMBOL(blk_run_queue_async);
328 326
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5a58e779912b..a9664fa0b609 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -929,12 +929,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
929 929
930 /* schedule work if limits changed even if no bio is queued */ 930 /* schedule work if limits changed even if no bio is queued */
931 if (total_nr_queued(td) || td->limits_changed) { 931 if (total_nr_queued(td) || td->limits_changed) {
932 /* 932 mod_delayed_work(kthrotld_workqueue, dwork, delay);
933 * We might have a work scheduled to be executed in future.
934 * Cancel that and schedule a new one.
935 */
936 __cancel_delayed_work(dwork);
937 queue_delayed_work(kthrotld_workqueue, dwork, delay);
938 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 933 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
939 delay, jiffies); 934 delay, jiffies);
940 } 935 }