diff options
author | Christoph Hellwig <hch@lst.de> | 2017-06-16 12:15:25 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-06-18 12:08:55 -0400 |
commit | 44e8c2bff80bb384a608406009948f90a78bf8a3 (patch) | |
tree | 416c4aea5fb5c635bdc94170b8fb14ab284f21a4 | |
parent | 9f2107382636cf9a71951eb71ec04f2fb3641b37 (diff) |
blk-mq: refactor blk_mq_sched_assign_ioc
blk_mq_sched_assign_ioc now only handles the assigned of the ioc if
the schedule needs it (bfq only at the moment). The caller to the
per-request initializer is moved out so that it can be merged with
a similar call for the kyber I/O scheduler.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-mq-sched.c | 28 | ||||
-rw-r--r-- | block/blk-mq-sched.h | 3 | ||||
-rw-r--r-- | block/blk-mq.c | 14 |
3 files changed, 17 insertions, 28 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 22601e5c6f19..254d1c164567 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c | |||
@@ -31,12 +31,10 @@ void blk_mq_sched_free_hctx_data(struct request_queue *q, | |||
31 | } | 31 | } |
32 | EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); | 32 | EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); |
33 | 33 | ||
34 | static void __blk_mq_sched_assign_ioc(struct request_queue *q, | 34 | void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio) |
35 | struct request *rq, | ||
36 | struct bio *bio, | ||
37 | struct io_context *ioc) | ||
38 | { | 35 | { |
39 | struct elevator_queue *e = q->elevator; | 36 | struct request_queue *q = rq->q; |
37 | struct io_context *ioc = rq_ioc(bio); | ||
40 | struct io_cq *icq; | 38 | struct io_cq *icq; |
41 | 39 | ||
42 | spin_lock_irq(q->queue_lock); | 40 | spin_lock_irq(q->queue_lock); |
@@ -48,26 +46,8 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q, | |||
48 | if (!icq) | 46 | if (!icq) |
49 | return; | 47 | return; |
50 | } | 48 | } |
51 | |||
52 | rq->elv.icq = icq; | ||
53 | if (e && e->type->ops.mq.get_rq_priv && | ||
54 | e->type->ops.mq.get_rq_priv(q, rq, bio)) { | ||
55 | rq->elv.icq = NULL; | ||
56 | return; | ||
57 | } | ||
58 | |||
59 | rq->rq_flags |= RQF_ELVPRIV; | ||
60 | get_io_context(icq->ioc); | 49 | get_io_context(icq->ioc); |
61 | } | 50 | rq->elv.icq = icq; |
62 | |||
63 | void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq, | ||
64 | struct bio *bio) | ||
65 | { | ||
66 | struct io_context *ioc; | ||
67 | |||
68 | ioc = rq_ioc(bio); | ||
69 | if (ioc) | ||
70 | __blk_mq_sched_assign_ioc(q, rq, bio, ioc); | ||
71 | } | 51 | } |
72 | 52 | ||
73 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) | 53 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) |
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index f34e6a522105..e117edd039b1 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h | |||
@@ -7,8 +7,7 @@ | |||
7 | void blk_mq_sched_free_hctx_data(struct request_queue *q, | 7 | void blk_mq_sched_free_hctx_data(struct request_queue *q, |
8 | void (*exit)(struct blk_mq_hw_ctx *)); | 8 | void (*exit)(struct blk_mq_hw_ctx *)); |
9 | 9 | ||
10 | void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq, | 10 | void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio); |
11 | struct bio *bio); | ||
12 | 11 | ||
13 | void blk_mq_sched_request_inserted(struct request *rq); | 12 | void blk_mq_sched_request_inserted(struct request *rq); |
14 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, | 13 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index e056725679a8..2f380ab7a603 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -315,8 +315,18 @@ allocated: | |||
315 | 315 | ||
316 | if (!op_is_flush(op)) { | 316 | if (!op_is_flush(op)) { |
317 | rq->elv.icq = NULL; | 317 | rq->elv.icq = NULL; |
318 | if (e && e->type->icq_cache) | 318 | if (e && e->type->ops.mq.get_rq_priv) { |
319 | blk_mq_sched_assign_ioc(q, rq, bio); | 319 | if (e->type->icq_cache && rq_ioc(bio)) |
320 | blk_mq_sched_assign_ioc(rq, bio); | ||
321 | |||
322 | if (e->type->ops.mq.get_rq_priv(q, rq, bio)) { | ||
323 | if (rq->elv.icq) | ||
324 | put_io_context(rq->elv.icq->ioc); | ||
325 | rq->elv.icq = NULL; | ||
326 | } else { | ||
327 | rq->rq_flags |= RQF_ELVPRIV; | ||
328 | } | ||
329 | } | ||
320 | } | 330 | } |
321 | data->hctx->queued++; | 331 | data->hctx->queued++; |
322 | return rq; | 332 | return rq; |