aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-02 11:08:00 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 02:45:54 -0500
commit3cca6dc1c81e2407928dc4c6105252146fd3924f (patch)
treeb78b0d93e7c02abdc37e1d5a6204ab6b94d56fd4 /block/blk-core.c
parent53f22956effe1c9e7961b8c6e4362ecca5e460b7 (diff)
block: add API for delaying work/request_fn a little bit
Currently we use plugging for that, but as plugging is going away, we need an alternative mechanism. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c29
1 files changed, 29 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3cc17e6064d6..e958c7a1e462 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -197,6 +197,32 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
197} 197}
198EXPORT_SYMBOL(blk_dump_rq_flags); 198EXPORT_SYMBOL(blk_dump_rq_flags);
199 199
200static void blk_delay_work(struct work_struct *work)
201{
202 struct request_queue *q;
203
204 q = container_of(work, struct request_queue, delay_work.work);
205 spin_lock_irq(q->queue_lock);
206 q->request_fn(q);
207 spin_unlock_irq(q->queue_lock);
208}
209
210/**
211 * blk_delay_queue - restart queueing after defined interval
212 * @q: The &struct request_queue in question
213 * @msecs: Delay in msecs
214 *
215 * Description:
216 * Sometimes queueing needs to be postponed for a little while, to allow
217 * resources to come back. This function will make sure that queueing is
218 * restarted around the specified time.
219 */
220void blk_delay_queue(struct request_queue *q, unsigned long msecs)
221{
222 schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
223}
224EXPORT_SYMBOL(blk_delay_queue);
225
200/* 226/*
201 * "plug" the device if there are no outstanding requests: this will 227 * "plug" the device if there are no outstanding requests: this will
202 * force the transfer to start only after we have put all the requests 228 * force the transfer to start only after we have put all the requests
@@ -363,6 +389,7 @@ EXPORT_SYMBOL(blk_start_queue);
363void blk_stop_queue(struct request_queue *q) 389void blk_stop_queue(struct request_queue *q)
364{ 390{
365 blk_remove_plug(q); 391 blk_remove_plug(q);
392 cancel_delayed_work(&q->delay_work);
366 queue_flag_set(QUEUE_FLAG_STOPPED, q); 393 queue_flag_set(QUEUE_FLAG_STOPPED, q);
367} 394}
368EXPORT_SYMBOL(blk_stop_queue); 395EXPORT_SYMBOL(blk_stop_queue);
@@ -387,6 +414,7 @@ void blk_sync_queue(struct request_queue *q)
387 del_timer_sync(&q->timeout); 414 del_timer_sync(&q->timeout);
388 cancel_work_sync(&q->unplug_work); 415 cancel_work_sync(&q->unplug_work);
389 throtl_shutdown_timer_wq(q); 416 throtl_shutdown_timer_wq(q);
417 cancel_delayed_work_sync(&q->delay_work);
390} 418}
391EXPORT_SYMBOL(blk_sync_queue); 419EXPORT_SYMBOL(blk_sync_queue);
392 420
@@ -534,6 +562,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
534 INIT_LIST_HEAD(&q->flush_queue[1]); 562 INIT_LIST_HEAD(&q->flush_queue[1]);
535 INIT_LIST_HEAD(&q->flush_data_in_flight); 563 INIT_LIST_HEAD(&q->flush_data_in_flight);
536 INIT_WORK(&q->unplug_work, blk_unplug_work); 564 INIT_WORK(&q->unplug_work, blk_unplug_work);
565 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
537 566
538 kobject_init(&q->kobj, &blk_queue_ktype); 567 kobject_init(&q->kobj, &blk_queue_ktype);
539 568