aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 18:03:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 18:03:07 -0400
commitd05d7f40791ccbb6e543cc5dd6a6aa08fc71d635 (patch)
treedc0039fe490a41a70de10d58fe8e6136db46463a /block/blk-mq.c
parent75a442efb1ca613f8d1cc71a32c2c9b0aefae4a5 (diff)
parent17007f3994cdb4643355c73f54f0adad006cf59e (diff)
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe: - the big change is the cleanup from Mike Christie, cleaning up our uses of command types and modified flags. This is what will throw some merge conflicts - regression fix for the above for btrfs, from Vincent - following up to the above, better packing of struct request from Christoph - a 2038 fix for blktrace from Arnd - a few trivial/spelling fixes from Bart Van Assche - a front merge check fix from Damien, which could cause issues on SMR drives - Atari partition fix from Gabriel - convert cfq to highres timers, since jiffies isn't granular enough for some devices these days. From Jan and Jeff - CFQ priority boost fix idle classes, from me - cleanup series from Ming, improving our bio/bvec iteration - a direct issue fix for blk-mq from Omar - fix for plug merging not involving the IO scheduler, like we do for other types of merges. From Tahsin - expose DAX type internally and through sysfs. From Toshi and Yigal * 'for-4.8/core' of git://git.kernel.dk/linux-block: (76 commits) block: Fix front merge check block: do not merge requests without consulting with io scheduler block: Fix spelling in a source code comment block: expose QUEUE_FLAG_DAX in sysfs block: add QUEUE_FLAG_DAX for devices to advertise their DAX support Btrfs: fix comparison in __btrfs_map_block() block: atari: Return early for unsupported sector size Doc: block: Fix a typo in queue-sysfs.txt cfq-iosched: Charge at least 1 jiffie instead of 1 ns cfq-iosched: Fix regression in bonnie++ rewrite performance cfq-iosched: Convert slice_resid from u64 to s64 block: Convert fifo_time from ulong to u64 blktrace: avoid using timespec block/blk-cgroup.c: Declare local symbols static block/bio-integrity.c: Add #include "blk.h" block/partition-generic.c: Remove a set-but-not-used variable block: bio: kill BIO_MAX_SIZE cfq-iosched: temporarily boost queue priority for idle classes block: drbd: avoid to use BIO_MAX_SIZE block: bio: remove BIO_MAX_SECTORS ...
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c42
1 files changed, 22 insertions, 20 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f9b9049b1284..2a1920c6d6e5 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -159,16 +159,17 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
159EXPORT_SYMBOL(blk_mq_can_queue); 159EXPORT_SYMBOL(blk_mq_can_queue);
160 160
161static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, 161static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
162 struct request *rq, unsigned int rw_flags) 162 struct request *rq, int op,
163 unsigned int op_flags)
163{ 164{
164 if (blk_queue_io_stat(q)) 165 if (blk_queue_io_stat(q))
165 rw_flags |= REQ_IO_STAT; 166 op_flags |= REQ_IO_STAT;
166 167
167 INIT_LIST_HEAD(&rq->queuelist); 168 INIT_LIST_HEAD(&rq->queuelist);
168 /* csd/requeue_work/fifo_time is initialized before use */ 169 /* csd/requeue_work/fifo_time is initialized before use */
169 rq->q = q; 170 rq->q = q;
170 rq->mq_ctx = ctx; 171 rq->mq_ctx = ctx;
171 rq->cmd_flags |= rw_flags; 172 req_set_op_attrs(rq, op, op_flags);
172 /* do not touch atomic flags, it needs atomic ops against the timer */ 173 /* do not touch atomic flags, it needs atomic ops against the timer */
173 rq->cpu = -1; 174 rq->cpu = -1;
174 INIT_HLIST_NODE(&rq->hash); 175 INIT_HLIST_NODE(&rq->hash);
@@ -203,11 +204,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
203 rq->end_io_data = NULL; 204 rq->end_io_data = NULL;
204 rq->next_rq = NULL; 205 rq->next_rq = NULL;
205 206
206 ctx->rq_dispatched[rw_is_sync(rw_flags)]++; 207 ctx->rq_dispatched[rw_is_sync(op, op_flags)]++;
207} 208}
208 209
209static struct request * 210static struct request *
210__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) 211__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
211{ 212{
212 struct request *rq; 213 struct request *rq;
213 unsigned int tag; 214 unsigned int tag;
@@ -222,7 +223,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
222 } 223 }
223 224
224 rq->tag = tag; 225 rq->tag = tag;
225 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); 226 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags);
226 return rq; 227 return rq;
227 } 228 }
228 229
@@ -246,7 +247,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
246 hctx = q->mq_ops->map_queue(q, ctx->cpu); 247 hctx = q->mq_ops->map_queue(q, ctx->cpu);
247 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 248 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
248 249
249 rq = __blk_mq_alloc_request(&alloc_data, rw); 250 rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
250 if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) { 251 if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) {
251 __blk_mq_run_hw_queue(hctx); 252 __blk_mq_run_hw_queue(hctx);
252 blk_mq_put_ctx(ctx); 253 blk_mq_put_ctx(ctx);
@@ -254,7 +255,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
254 ctx = blk_mq_get_ctx(q); 255 ctx = blk_mq_get_ctx(q);
255 hctx = q->mq_ops->map_queue(q, ctx->cpu); 256 hctx = q->mq_ops->map_queue(q, ctx->cpu);
256 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 257 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
257 rq = __blk_mq_alloc_request(&alloc_data, rw); 258 rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
258 ctx = alloc_data.ctx; 259 ctx = alloc_data.ctx;
259 } 260 }
260 blk_mq_put_ctx(ctx); 261 blk_mq_put_ctx(ctx);
@@ -784,7 +785,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
784 switch (ret) { 785 switch (ret) {
785 case BLK_MQ_RQ_QUEUE_OK: 786 case BLK_MQ_RQ_QUEUE_OK:
786 queued++; 787 queued++;
787 continue; 788 break;
788 case BLK_MQ_RQ_QUEUE_BUSY: 789 case BLK_MQ_RQ_QUEUE_BUSY:
789 list_add(&rq->queuelist, &rq_list); 790 list_add(&rq->queuelist, &rq_list);
790 __blk_mq_requeue_request(rq); 791 __blk_mq_requeue_request(rq);
@@ -1169,28 +1170,29 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1169 struct blk_mq_hw_ctx *hctx; 1170 struct blk_mq_hw_ctx *hctx;
1170 struct blk_mq_ctx *ctx; 1171 struct blk_mq_ctx *ctx;
1171 struct request *rq; 1172 struct request *rq;
1172 int rw = bio_data_dir(bio); 1173 int op = bio_data_dir(bio);
1174 int op_flags = 0;
1173 struct blk_mq_alloc_data alloc_data; 1175 struct blk_mq_alloc_data alloc_data;
1174 1176
1175 blk_queue_enter_live(q); 1177 blk_queue_enter_live(q);
1176 ctx = blk_mq_get_ctx(q); 1178 ctx = blk_mq_get_ctx(q);
1177 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1179 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1178 1180
1179 if (rw_is_sync(bio->bi_rw)) 1181 if (rw_is_sync(bio_op(bio), bio->bi_rw))
1180 rw |= REQ_SYNC; 1182 op_flags |= REQ_SYNC;
1181 1183
1182 trace_block_getrq(q, bio, rw); 1184 trace_block_getrq(q, bio, op);
1183 blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx); 1185 blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx);
1184 rq = __blk_mq_alloc_request(&alloc_data, rw); 1186 rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
1185 if (unlikely(!rq)) { 1187 if (unlikely(!rq)) {
1186 __blk_mq_run_hw_queue(hctx); 1188 __blk_mq_run_hw_queue(hctx);
1187 blk_mq_put_ctx(ctx); 1189 blk_mq_put_ctx(ctx);
1188 trace_block_sleeprq(q, bio, rw); 1190 trace_block_sleeprq(q, bio, op);
1189 1191
1190 ctx = blk_mq_get_ctx(q); 1192 ctx = blk_mq_get_ctx(q);
1191 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1193 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1192 blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); 1194 blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
1193 rq = __blk_mq_alloc_request(&alloc_data, rw); 1195 rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
1194 ctx = alloc_data.ctx; 1196 ctx = alloc_data.ctx;
1195 hctx = alloc_data.hctx; 1197 hctx = alloc_data.hctx;
1196 } 1198 }
@@ -1244,8 +1246,8 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
1244 */ 1246 */
1245static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 1247static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1246{ 1248{
1247 const int is_sync = rw_is_sync(bio->bi_rw); 1249 const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
1248 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); 1250 const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
1249 struct blk_map_ctx data; 1251 struct blk_map_ctx data;
1250 struct request *rq; 1252 struct request *rq;
1251 unsigned int request_count = 0; 1253 unsigned int request_count = 0;
@@ -1338,8 +1340,8 @@ done:
1338 */ 1340 */
1339static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) 1341static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1340{ 1342{
1341 const int is_sync = rw_is_sync(bio->bi_rw); 1343 const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
1342 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); 1344 const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
1343 struct blk_plug *plug; 1345 struct blk_plug *plug;
1344 unsigned int request_count = 0; 1346 unsigned int request_count = 0;
1345 struct blk_map_ctx data; 1347 struct blk_map_ctx data;