diff options
author | Tejun Heo <tj@kernel.org> | 2009-04-22 22:05:17 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-04-28 01:37:33 -0400 |
commit | a7f557923441186a3cdbabc54f1bcacf42b63bf5 (patch) | |
tree | 48e37802174cb98be43d89eb8bc4e466f4d79a02 /block | |
parent | a538cd03be6f363d039daa94199c28cfbd508455 (diff) |
block: kill blk_start_queueing()
blk_start_queueing() is identical to __blk_run_queue() except that it
doesn't check for recursion. None of the current users depends on
blk_start_queueing() running request_fn directly. Replace usages of
blk_start_queueing() with [__]blk_run_queue() and kill it.
[ Impact: removal of mostly duplicate interface function ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/as-iosched.c | 6 | ||||
-rw-r--r-- | block/blk-core.c | 28 | ||||
-rw-r--r-- | block/cfq-iosched.c | 6 | ||||
-rw-r--r-- | block/elevator.c | 7 |
4 files changed, 9 insertions, 38 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index c48fa670d221..45bd07059c28 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -1312,12 +1312,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req, | |||
1312 | static void as_work_handler(struct work_struct *work) | 1312 | static void as_work_handler(struct work_struct *work) |
1313 | { | 1313 | { |
1314 | struct as_data *ad = container_of(work, struct as_data, antic_work); | 1314 | struct as_data *ad = container_of(work, struct as_data, antic_work); |
1315 | struct request_queue *q = ad->q; | ||
1316 | unsigned long flags; | ||
1317 | 1315 | ||
1318 | spin_lock_irqsave(q->queue_lock, flags); | 1316 | blk_run_queue(ad->q); |
1319 | blk_start_queueing(q); | ||
1320 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
1321 | } | 1317 | } |
1322 | 1318 | ||
1323 | static int as_may_queue(struct request_queue *q, int rw) | 1319 | static int as_may_queue(struct request_queue *q, int rw) |
diff --git a/block/blk-core.c b/block/blk-core.c index 02f53bc00e4c..8b4a0af7d69f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -433,9 +433,7 @@ EXPORT_SYMBOL(__blk_run_queue); | |||
433 | * | 433 | * |
434 | * Description: | 434 | * Description: |
435 | * Invoke request handling on this queue, if it has pending work to do. | 435 | * Invoke request handling on this queue, if it has pending work to do. |
436 | * May be used to restart queueing when a request has completed. Also | 436 | * May be used to restart queueing when a request has completed. |
437 | * See @blk_start_queueing. | ||
438 | * | ||
439 | */ | 437 | */ |
440 | void blk_run_queue(struct request_queue *q) | 438 | void blk_run_queue(struct request_queue *q) |
441 | { | 439 | { |
@@ -895,28 +893,6 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | |||
895 | EXPORT_SYMBOL(blk_get_request); | 893 | EXPORT_SYMBOL(blk_get_request); |
896 | 894 | ||
897 | /** | 895 | /** |
898 | * blk_start_queueing - initiate dispatch of requests to device | ||
899 | * @q: request queue to kick into gear | ||
900 | * | ||
901 | * This is basically a helper to remove the need to know whether a queue | ||
902 | * is plugged or not if someone just wants to initiate dispatch of requests | ||
903 | * for this queue. Should be used to start queueing on a device outside | ||
904 | * of ->request_fn() context. Also see @blk_run_queue. | ||
905 | * | ||
906 | * The queue lock must be held with interrupts disabled. | ||
907 | */ | ||
908 | void blk_start_queueing(struct request_queue *q) | ||
909 | { | ||
910 | if (!blk_queue_plugged(q)) { | ||
911 | if (unlikely(blk_queue_stopped(q))) | ||
912 | return; | ||
913 | q->request_fn(q); | ||
914 | } else | ||
915 | __generic_unplug_device(q); | ||
916 | } | ||
917 | EXPORT_SYMBOL(blk_start_queueing); | ||
918 | |||
919 | /** | ||
920 | * blk_requeue_request - put a request back on queue | 896 | * blk_requeue_request - put a request back on queue |
921 | * @q: request queue where request should be inserted | 897 | * @q: request queue where request should be inserted |
922 | * @rq: request to be inserted | 898 | * @rq: request to be inserted |
@@ -984,7 +960,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, | |||
984 | 960 | ||
985 | drive_stat_acct(rq, 1); | 961 | drive_stat_acct(rq, 1); |
986 | __elv_add_request(q, rq, where, 0); | 962 | __elv_add_request(q, rq, where, 0); |
987 | blk_start_queueing(q); | 963 | __blk_run_queue(q); |
988 | spin_unlock_irqrestore(q->queue_lock, flags); | 964 | spin_unlock_irqrestore(q->queue_lock, flags); |
989 | } | 965 | } |
990 | EXPORT_SYMBOL(blk_insert_request); | 966 | EXPORT_SYMBOL(blk_insert_request); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index a55a9bd75bd1..def0c698a4bc 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -2088,7 +2088,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
2088 | if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || | 2088 | if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || |
2089 | cfqd->busy_queues > 1) { | 2089 | cfqd->busy_queues > 1) { |
2090 | del_timer(&cfqd->idle_slice_timer); | 2090 | del_timer(&cfqd->idle_slice_timer); |
2091 | blk_start_queueing(cfqd->queue); | 2091 | __blk_run_queue(cfqd->queue); |
2092 | } | 2092 | } |
2093 | cfq_mark_cfqq_must_dispatch(cfqq); | 2093 | cfq_mark_cfqq_must_dispatch(cfqq); |
2094 | } | 2094 | } |
@@ -2100,7 +2100,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
2100 | * this new queue is RT and the current one is BE | 2100 | * this new queue is RT and the current one is BE |
2101 | */ | 2101 | */ |
2102 | cfq_preempt_queue(cfqd, cfqq); | 2102 | cfq_preempt_queue(cfqd, cfqq); |
2103 | blk_start_queueing(cfqd->queue); | 2103 | __blk_run_queue(cfqd->queue); |
2104 | } | 2104 | } |
2105 | } | 2105 | } |
2106 | 2106 | ||
@@ -2345,7 +2345,7 @@ static void cfq_kick_queue(struct work_struct *work) | |||
2345 | struct request_queue *q = cfqd->queue; | 2345 | struct request_queue *q = cfqd->queue; |
2346 | 2346 | ||
2347 | spin_lock_irq(q->queue_lock); | 2347 | spin_lock_irq(q->queue_lock); |
2348 | blk_start_queueing(q); | 2348 | __blk_run_queue(cfqd->queue); |
2349 | spin_unlock_irq(q->queue_lock); | 2349 | spin_unlock_irq(q->queue_lock); |
2350 | } | 2350 | } |
2351 | 2351 | ||
diff --git a/block/elevator.c b/block/elevator.c index 7073a9072577..2e0fb21485b7 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -599,7 +599,7 @@ void elv_quiesce_start(struct request_queue *q) | |||
599 | */ | 599 | */ |
600 | elv_drain_elevator(q); | 600 | elv_drain_elevator(q); |
601 | while (q->rq.elvpriv) { | 601 | while (q->rq.elvpriv) { |
602 | blk_start_queueing(q); | 602 | __blk_run_queue(q); |
603 | spin_unlock_irq(q->queue_lock); | 603 | spin_unlock_irq(q->queue_lock); |
604 | msleep(10); | 604 | msleep(10); |
605 | spin_lock_irq(q->queue_lock); | 605 | spin_lock_irq(q->queue_lock); |
@@ -643,8 +643,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
643 | * with anything. There's no point in delaying queue | 643 | * with anything. There's no point in delaying queue |
644 | * processing. | 644 | * processing. |
645 | */ | 645 | */ |
646 | blk_remove_plug(q); | 646 | __blk_run_queue(q); |
647 | blk_start_queueing(q); | ||
648 | break; | 647 | break; |
649 | 648 | ||
650 | case ELEVATOR_INSERT_SORT: | 649 | case ELEVATOR_INSERT_SORT: |
@@ -971,7 +970,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq) | |||
971 | blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && | 970 | blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && |
972 | (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { | 971 | (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { |
973 | blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); | 972 | blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); |
974 | blk_start_queueing(q); | 973 | __blk_run_queue(q); |
975 | } | 974 | } |
976 | } | 975 | } |
977 | } | 976 | } |