aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-08-21 16:18:24 -0400
committerTejun Heo <tj@kernel.org>2012-08-21 16:18:24 -0400
commite7c2f967445dd2041f0f8e3179cca22bb8bb7f79 (patch)
treecb6c1d3593d2497e740d313f55592f41e8ae2039
parente0aecdd874d78b7129a64b056c20e529e2c916df (diff)
workqueue: use mod_delayed_work() instead of __cancel + queue
Now that mod_delayed_work() is safe to call from IRQ handlers, __cancel_delayed_work() followed by queue_delayed_work() can be replaced with mod_delayed_work(). Most conversions are straight-forward except for the following. * net/core/link_watch.c: linkwatch_schedule_work() was doing a quite elaborate dancing around its delayed_work. Collapse it such that linkwatch_work is queued for immediate execution if LW_URGENT and existing timer is kept otherwise. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Tomi Valkeinen <tomi.valkeinen@ti.com>
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-throttle.c7
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/infiniband/core/mad.c14
-rw-r--r--drivers/input/keyboard/qt2160.c3
-rw-r--r--drivers/input/mouse/synaptics_i2c.c7
-rw-r--r--net/core/link_watch.c21
7 files changed, 17 insertions, 44 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 4b4dbdfbca89..4b8b606dbb01 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -319,10 +319,8 @@ EXPORT_SYMBOL(__blk_run_queue);
319 */ 319 */
320void blk_run_queue_async(struct request_queue *q) 320void blk_run_queue_async(struct request_queue *q)
321{ 321{
322 if (likely(!blk_queue_stopped(q))) { 322 if (likely(!blk_queue_stopped(q)))
323 __cancel_delayed_work(&q->delay_work); 323 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
324 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
325 }
326} 324}
327EXPORT_SYMBOL(blk_run_queue_async); 325EXPORT_SYMBOL(blk_run_queue_async);
328 326
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5a58e779912b..a9664fa0b609 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -929,12 +929,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
929 929
930 /* schedule work if limits changed even if no bio is queued */ 930 /* schedule work if limits changed even if no bio is queued */
931 if (total_nr_queued(td) || td->limits_changed) { 931 if (total_nr_queued(td) || td->limits_changed) {
932 /* 932 mod_delayed_work(kthrotld_workqueue, dwork, delay);
933 * We might have a work scheduled to be executed in future.
934 * Cancel that and schedule a new one.
935 */
936 __cancel_delayed_work(dwork);
937 queue_delayed_work(kthrotld_workqueue, dwork, delay);
938 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 933 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
939 delay, jiffies); 934 delay, jiffies);
940 } 935 }
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a7d6347aaa79..55a5bc002c06 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -672,7 +672,6 @@ static void __reschedule_timeout(int drive, const char *message)
672 672
673 if (drive == current_reqD) 673 if (drive == current_reqD)
674 drive = current_drive; 674 drive = current_drive;
675 __cancel_delayed_work(&fd_timeout);
676 675
677 if (drive < 0 || drive >= N_DRIVE) { 676 if (drive < 0 || drive >= N_DRIVE) {
678 delay = 20UL * HZ; 677 delay = 20UL * HZ;
@@ -680,7 +679,7 @@ static void __reschedule_timeout(int drive, const char *message)
680 } else 679 } else
681 delay = UDP->timeout; 680 delay = UDP->timeout;
682 681
683 queue_delayed_work(floppy_wq, &fd_timeout, delay); 682 mod_delayed_work(floppy_wq, &fd_timeout, delay);
684 if (UDP->flags & FD_DEBUG) 683 if (UDP->flags & FD_DEBUG)
685 DPRINT("reschedule timeout %s\n", message); 684 DPRINT("reschedule timeout %s\n", message);
686 timeout_message = message; 685 timeout_message = message;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index b0d0bc8a6fb6..b5938147fc89 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2013,13 +2013,11 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2013 if (time_after(mad_agent_priv->timeout, 2013 if (time_after(mad_agent_priv->timeout,
2014 mad_send_wr->timeout)) { 2014 mad_send_wr->timeout)) {
2015 mad_agent_priv->timeout = mad_send_wr->timeout; 2015 mad_agent_priv->timeout = mad_send_wr->timeout;
2016 __cancel_delayed_work(&mad_agent_priv->timed_work);
2017 delay = mad_send_wr->timeout - jiffies; 2016 delay = mad_send_wr->timeout - jiffies;
2018 if ((long)delay <= 0) 2017 if ((long)delay <= 0)
2019 delay = 1; 2018 delay = 1;
2020 queue_delayed_work(mad_agent_priv->qp_info-> 2019 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2021 port_priv->wq, 2020 &mad_agent_priv->timed_work, delay);
2022 &mad_agent_priv->timed_work, delay);
2023 } 2021 }
2024 } 2022 }
2025} 2023}
@@ -2052,11 +2050,9 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2052 list_add(&mad_send_wr->agent_list, list_item); 2050 list_add(&mad_send_wr->agent_list, list_item);
2053 2051
2054 /* Reschedule a work item if we have a shorter timeout */ 2052 /* Reschedule a work item if we have a shorter timeout */
2055 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { 2053 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2056 __cancel_delayed_work(&mad_agent_priv->timed_work); 2054 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2057 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2055 &mad_agent_priv->timed_work, delay);
2058 &mad_agent_priv->timed_work, delay);
2059 }
2060} 2056}
2061 2057
2062void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 2058void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index e7a5e36e1203..76b7d430d03a 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -156,8 +156,7 @@ static irqreturn_t qt2160_irq(int irq, void *_qt2160)
156 156
157 spin_lock_irqsave(&qt2160->lock, flags); 157 spin_lock_irqsave(&qt2160->lock, flags);
158 158
159 __cancel_delayed_work(&qt2160->dwork); 159 mod_delayed_work(system_wq, &qt2160->dwork, 0);
160 schedule_delayed_work(&qt2160->dwork, 0);
161 160
162 spin_unlock_irqrestore(&qt2160->lock, flags); 161 spin_unlock_irqrestore(&qt2160->lock, flags);
163 162
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index f14675702c0f..063a174d3a88 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -376,12 +376,7 @@ static void synaptics_i2c_reschedule_work(struct synaptics_i2c *touch,
376 376
377 spin_lock_irqsave(&touch->lock, flags); 377 spin_lock_irqsave(&touch->lock, flags);
378 378
379 /* 379 mod_delayed_work(system_wq, &touch->dwork, delay);
380 * If work is already scheduled then subsequent schedules will not
381 * change the scheduled time that's why we have to cancel it first.
382 */
383 __cancel_delayed_work(&touch->dwork);
384 schedule_delayed_work(&touch->dwork, delay);
385 380
386 spin_unlock_irqrestore(&touch->lock, flags); 381 spin_unlock_irqrestore(&touch->lock, flags);
387} 382}
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index c3519c6d1b16..8e397a69005a 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -120,22 +120,13 @@ static void linkwatch_schedule_work(int urgent)
120 delay = 0; 120 delay = 0;
121 121
122 /* 122 /*
123 * This is true if we've scheduled it immeditately or if we don't 123 * If urgent, schedule immediate execution; otherwise, don't
124 * need an immediate execution and it's already pending. 124 * override the existing timer.
125 */ 125 */
126 if (schedule_delayed_work(&linkwatch_work, delay) == !delay) 126 if (test_bit(LW_URGENT, &linkwatch_flags))
127 return; 127 mod_delayed_work(system_wq, &linkwatch_work, 0);
128 128 else
129 /* Don't bother if there is nothing urgent. */ 129 schedule_delayed_work(&linkwatch_work, delay);
130 if (!test_bit(LW_URGENT, &linkwatch_flags))
131 return;
132
133 /* It's already running which is good enough. */
134 if (!__cancel_delayed_work(&linkwatch_work))
135 return;
136
137 /* Otherwise we reschedule it again for immediate execution. */
138 schedule_delayed_work(&linkwatch_work, 0);
139} 130}
140 131
141 132