summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c28
1 files changed, 11 insertions, 17 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b49c6658eb05..2da1a0ee3318 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -139,14 +139,13 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
139EXPORT_SYMBOL(blk_mq_can_queue); 139EXPORT_SYMBOL(blk_mq_can_queue);
140 140
141static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, 141static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
142 struct request *rq, int op, 142 struct request *rq, unsigned int op)
143 unsigned int op_flags)
144{ 143{
145 INIT_LIST_HEAD(&rq->queuelist); 144 INIT_LIST_HEAD(&rq->queuelist);
146 /* csd/requeue_work/fifo_time is initialized before use */ 145 /* csd/requeue_work/fifo_time is initialized before use */
147 rq->q = q; 146 rq->q = q;
148 rq->mq_ctx = ctx; 147 rq->mq_ctx = ctx;
149 req_set_op_attrs(rq, op, op_flags); 148 rq->cmd_flags = op;
150 if (blk_queue_io_stat(q)) 149 if (blk_queue_io_stat(q))
151 rq->rq_flags |= RQF_IO_STAT; 150 rq->rq_flags |= RQF_IO_STAT;
152 /* do not touch atomic flags, it needs atomic ops against the timer */ 151 /* do not touch atomic flags, it needs atomic ops against the timer */
@@ -183,11 +182,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
183 rq->end_io_data = NULL; 182 rq->end_io_data = NULL;
184 rq->next_rq = NULL; 183 rq->next_rq = NULL;
185 184
186 ctx->rq_dispatched[rw_is_sync(op, op_flags)]++; 185 ctx->rq_dispatched[op_is_sync(op)]++;
187} 186}
188 187
189static struct request * 188static struct request *
190__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags) 189__blk_mq_alloc_request(struct blk_mq_alloc_data *data, unsigned int op)
191{ 190{
192 struct request *rq; 191 struct request *rq;
193 unsigned int tag; 192 unsigned int tag;
@@ -202,7 +201,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
202 } 201 }
203 202
204 rq->tag = tag; 203 rq->tag = tag;
205 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags); 204 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
206 return rq; 205 return rq;
207 } 206 }
208 207
@@ -225,7 +224,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
225 ctx = blk_mq_get_ctx(q); 224 ctx = blk_mq_get_ctx(q);
226 hctx = blk_mq_map_queue(q, ctx->cpu); 225 hctx = blk_mq_map_queue(q, ctx->cpu);
227 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 226 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
228 rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 227 rq = __blk_mq_alloc_request(&alloc_data, rw);
229 blk_mq_put_ctx(ctx); 228 blk_mq_put_ctx(ctx);
230 229
231 if (!rq) { 230 if (!rq) {
@@ -277,7 +276,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
277 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); 276 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
278 277
279 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 278 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
280 rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 279 rq = __blk_mq_alloc_request(&alloc_data, rw);
281 if (!rq) { 280 if (!rq) {
282 ret = -EWOULDBLOCK; 281 ret = -EWOULDBLOCK;
283 goto out_queue_exit; 282 goto out_queue_exit;
@@ -1196,19 +1195,14 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1196 struct blk_mq_hw_ctx *hctx; 1195 struct blk_mq_hw_ctx *hctx;
1197 struct blk_mq_ctx *ctx; 1196 struct blk_mq_ctx *ctx;
1198 struct request *rq; 1197 struct request *rq;
1199 int op = bio_data_dir(bio);
1200 int op_flags = 0;
1201 1198
1202 blk_queue_enter_live(q); 1199 blk_queue_enter_live(q);
1203 ctx = blk_mq_get_ctx(q); 1200 ctx = blk_mq_get_ctx(q);
1204 hctx = blk_mq_map_queue(q, ctx->cpu); 1201 hctx = blk_mq_map_queue(q, ctx->cpu);
1205 1202
1206 if (rw_is_sync(bio_op(bio), bio->bi_opf)) 1203 trace_block_getrq(q, bio, bio->bi_opf);
1207 op_flags |= REQ_SYNC;
1208
1209 trace_block_getrq(q, bio, op);
1210 blk_mq_set_alloc_data(data, q, 0, ctx, hctx); 1204 blk_mq_set_alloc_data(data, q, 0, ctx, hctx);
1211 rq = __blk_mq_alloc_request(data, op, op_flags); 1205 rq = __blk_mq_alloc_request(data, bio->bi_opf);
1212 1206
1213 data->hctx->queued++; 1207 data->hctx->queued++;
1214 return rq; 1208 return rq;
@@ -1256,7 +1250,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
1256 */ 1250 */
1257static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 1251static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1258{ 1252{
1259 const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); 1253 const int is_sync = op_is_sync(bio->bi_opf);
1260 const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); 1254 const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1261 struct blk_mq_alloc_data data; 1255 struct blk_mq_alloc_data data;
1262 struct request *rq; 1256 struct request *rq;
@@ -1350,7 +1344,7 @@ done:
1350 */ 1344 */
1351static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) 1345static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1352{ 1346{
1353 const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); 1347 const int is_sync = op_is_sync(bio->bi_opf);
1354 const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); 1348 const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1355 struct blk_plug *plug; 1349 struct blk_plug *plug;
1356 unsigned int request_count = 0; 1350 unsigned int request_count = 0;