aboutsummaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-04-18 05:41:33 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-04-18 05:41:33 -0400
commit24ecfbe27f65563909b14492afda2f1c21f7c044 (patch)
treea7e51d903c400d0925f87be5f3069a5a44e0af24 /block/elevator.c
parent4521cc4ed5173f92714f6999a69910c3385fed68 (diff)
block: add blk_run_queue_async
Instead of overloading __blk_run_queue to force an offload to kblockd add a new blk_run_queue_async helper to do it explicitly. I've kept the blk_queue_stopped check for now, but I suspect it's not needed as the check we do when the workqueue items runs should be enough. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 0cdb4e7ebab4..6f6abc08bb56 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
642 */ 642 */
643 elv_drain_elevator(q); 643 elv_drain_elevator(q);
644 while (q->rq.elvpriv) { 644 while (q->rq.elvpriv) {
645 __blk_run_queue(q, false); 645 __blk_run_queue(q);
646 spin_unlock_irq(q->queue_lock); 646 spin_unlock_irq(q->queue_lock);
647 msleep(10); 647 msleep(10);
648 spin_lock_irq(q->queue_lock); 648 spin_lock_irq(q->queue_lock);
@@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
695 * with anything. There's no point in delaying queue 695 * with anything. There's no point in delaying queue
696 * processing. 696 * processing.
697 */ 697 */
698 __blk_run_queue(q, false); 698 __blk_run_queue(q);
699 break; 699 break;
700 700
701 case ELEVATOR_INSERT_SORT_MERGE: 701 case ELEVATOR_INSERT_SORT_MERGE: