diff options
author | Tejun Heo <tj@kernel.org> | 2009-04-22 22:05:17 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-04-28 01:37:33 -0400 |
commit | a7f557923441186a3cdbabc54f1bcacf42b63bf5 (patch) | |
tree | 48e37802174cb98be43d89eb8bc4e466f4d79a02 /block/elevator.c | |
parent | a538cd03be6f363d039daa94199c28cfbd508455 (diff) |
block: kill blk_start_queueing()
blk_start_queueing() is identical to __blk_run_queue() except that it
doesn't check for recursion. None of the current users depends on
blk_start_queueing() running request_fn directly. Replace usages of
blk_start_queueing() with [__]blk_run_queue() and kill it.
[ Impact: removal of mostly duplicate interface function ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/elevator.c')
-rw-r--r-- | block/elevator.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/block/elevator.c b/block/elevator.c index 7073a9072577..2e0fb21485b7 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -599,7 +599,7 @@ void elv_quiesce_start(struct request_queue *q) | |||
599 | */ | 599 | */ |
600 | elv_drain_elevator(q); | 600 | elv_drain_elevator(q); |
601 | while (q->rq.elvpriv) { | 601 | while (q->rq.elvpriv) { |
602 | blk_start_queueing(q); | 602 | __blk_run_queue(q); |
603 | spin_unlock_irq(q->queue_lock); | 603 | spin_unlock_irq(q->queue_lock); |
604 | msleep(10); | 604 | msleep(10); |
605 | spin_lock_irq(q->queue_lock); | 605 | spin_lock_irq(q->queue_lock); |
@@ -643,8 +643,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
643 | * with anything. There's no point in delaying queue | 643 | * with anything. There's no point in delaying queue |
644 | * processing. | 644 | * processing. |
645 | */ | 645 | */ |
646 | blk_remove_plug(q); | 646 | __blk_run_queue(q); |
647 | blk_start_queueing(q); | ||
648 | break; | 647 | break; |
649 | 648 | ||
650 | case ELEVATOR_INSERT_SORT: | 649 | case ELEVATOR_INSERT_SORT: |
@@ -971,7 +970,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq) | |||
971 | blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && | 970 | blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && |
972 | (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { | 971 | (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { |
973 | blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); | 972 | blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); |
974 | blk_start_queueing(q); | 973 | __blk_run_queue(q); |
975 | } | 974 | } |
976 | } | 975 | } |
977 | } | 976 | } |