summaryrefslogtreecommitdiffstats
path: root/block/bfq-iosched.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-06-16 12:15:26 -0400
committerJens Axboe <axboe@kernel.dk>2017-06-18 12:08:55 -0400
commit5bbf4e5a8e3a780874b2ed77bd1bd57850f3f6da (patch)
tree06835c621f167a8cc563d57a6d6e29d127078967 /block/bfq-iosched.c
parent44e8c2bff80bb384a608406009948f90a78bf8a3 (diff)
blk-mq-sched: unify request prepare methods
This patch makes sure we always allocate requests in the core blk-mq code and use a common prepare_request method to initialize them for both mq I/O schedulers. For Kyber and additional limit_depth method is added that is called before allocating the request. Also because none of the intializations can really fail the new method does not return an error - instead the bfq finish method is hardened to deal with the no-IOC case. Last but not least this removes the abuse of RQF_QUEUE by the blk-mq scheduling code as RQF_ELFPRIV is all that is needed now. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r--block/bfq-iosched.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index f037b005faa1..60d32700f104 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4292,8 +4292,14 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
4292 4292
4293static void bfq_finish_request(struct request *rq) 4293static void bfq_finish_request(struct request *rq)
4294{ 4294{
4295 struct bfq_queue *bfqq = RQ_BFQQ(rq); 4295 struct bfq_queue *bfqq;
4296 struct bfq_data *bfqd = bfqq->bfqd; 4296 struct bfq_data *bfqd;
4297
4298 if (!rq->elv.icq)
4299 return;
4300
4301 bfqq = RQ_BFQQ(rq);
4302 bfqd = bfqq->bfqd;
4297 4303
4298 if (rq->rq_flags & RQF_STARTED) 4304 if (rq->rq_flags & RQF_STARTED)
4299 bfqg_stats_update_completion(bfqq_group(bfqq), 4305 bfqg_stats_update_completion(bfqq_group(bfqq),
@@ -4394,9 +4400,9 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
4394/* 4400/*
4395 * Allocate bfq data structures associated with this request. 4401 * Allocate bfq data structures associated with this request.
4396 */ 4402 */
4397static int bfq_get_rq_private(struct request_queue *q, struct request *rq, 4403static void bfq_prepare_request(struct request *rq, struct bio *bio)
4398 struct bio *bio)
4399{ 4404{
4405 struct request_queue *q = rq->q;
4400 struct bfq_data *bfqd = q->elevator->elevator_data; 4406 struct bfq_data *bfqd = q->elevator->elevator_data;
4401 struct bfq_io_cq *bic; 4407 struct bfq_io_cq *bic;
4402 const int is_sync = rq_is_sync(rq); 4408 const int is_sync = rq_is_sync(rq);
@@ -4405,7 +4411,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
4405 bool split = false; 4411 bool split = false;
4406 4412
4407 if (!rq->elv.icq) 4413 if (!rq->elv.icq)
4408 return 1; 4414 return;
4409 bic = icq_to_bic(rq->elv.icq); 4415 bic = icq_to_bic(rq->elv.icq);
4410 4416
4411 spin_lock_irq(&bfqd->lock); 4417 spin_lock_irq(&bfqd->lock);
@@ -4466,7 +4472,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
4466 bfq_handle_burst(bfqd, bfqq); 4472 bfq_handle_burst(bfqd, bfqq);
4467 4473
4468 spin_unlock_irq(&bfqd->lock); 4474 spin_unlock_irq(&bfqd->lock);
4469 return 0;
4470} 4475}
4471 4476
4472static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) 4477static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
@@ -4945,7 +4950,7 @@ static struct elv_fs_entry bfq_attrs[] = {
4945 4950
4946static struct elevator_type iosched_bfq_mq = { 4951static struct elevator_type iosched_bfq_mq = {
4947 .ops.mq = { 4952 .ops.mq = {
4948 .get_rq_priv = bfq_get_rq_private, 4953 .prepare_request = bfq_prepare_request,
4949 .finish_request = bfq_finish_request, 4954 .finish_request = bfq_finish_request,
4950 .exit_icq = bfq_exit_icq, 4955 .exit_icq = bfq_exit_icq,
4951 .insert_requests = bfq_insert_requests, 4956 .insert_requests = bfq_insert_requests,