diff options
-rw-r--r-- | block/blk-mq.c | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 29cbc1b5fbdb..3393f29faa9e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -159,16 +159,17 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) | |||
159 | EXPORT_SYMBOL(blk_mq_can_queue); | 159 | EXPORT_SYMBOL(blk_mq_can_queue); |
160 | 160 | ||
161 | static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | 161 | static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, |
162 | struct request *rq, unsigned int rw_flags) | 162 | struct request *rq, int op, |
163 | unsigned int op_flags) | ||
163 | { | 164 | { |
164 | if (blk_queue_io_stat(q)) | 165 | if (blk_queue_io_stat(q)) |
165 | rw_flags |= REQ_IO_STAT; | 166 | op_flags |= REQ_IO_STAT; |
166 | 167 | ||
167 | INIT_LIST_HEAD(&rq->queuelist); | 168 | INIT_LIST_HEAD(&rq->queuelist); |
168 | /* csd/requeue_work/fifo_time is initialized before use */ | 169 | /* csd/requeue_work/fifo_time is initialized before use */ |
169 | rq->q = q; | 170 | rq->q = q; |
170 | rq->mq_ctx = ctx; | 171 | rq->mq_ctx = ctx; |
171 | rq->cmd_flags |= rw_flags; | 172 | req_set_op_attrs(rq, op, op_flags); |
172 | /* do not touch atomic flags, it needs atomic ops against the timer */ | 173 | /* do not touch atomic flags, it needs atomic ops against the timer */ |
173 | rq->cpu = -1; | 174 | rq->cpu = -1; |
174 | INIT_HLIST_NODE(&rq->hash); | 175 | INIT_HLIST_NODE(&rq->hash); |
@@ -203,11 +204,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
203 | rq->end_io_data = NULL; | 204 | rq->end_io_data = NULL; |
204 | rq->next_rq = NULL; | 205 | rq->next_rq = NULL; |
205 | 206 | ||
206 | ctx->rq_dispatched[rw_is_sync(rw_flags)]++; | 207 | ctx->rq_dispatched[rw_is_sync(op | op_flags)]++; |
207 | } | 208 | } |
208 | 209 | ||
209 | static struct request * | 210 | static struct request * |
210 | __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) | 211 | __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags) |
211 | { | 212 | { |
212 | struct request *rq; | 213 | struct request *rq; |
213 | unsigned int tag; | 214 | unsigned int tag; |
@@ -222,7 +223,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) | |||
222 | } | 223 | } |
223 | 224 | ||
224 | rq->tag = tag; | 225 | rq->tag = tag; |
225 | blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); | 226 | blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags); |
226 | return rq; | 227 | return rq; |
227 | } | 228 | } |
228 | 229 | ||
@@ -246,7 +247,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, | |||
246 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 247 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
247 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); | 248 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); |
248 | 249 | ||
249 | rq = __blk_mq_alloc_request(&alloc_data, rw); | 250 | rq = __blk_mq_alloc_request(&alloc_data, rw, 0); |
250 | if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) { | 251 | if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) { |
251 | __blk_mq_run_hw_queue(hctx); | 252 | __blk_mq_run_hw_queue(hctx); |
252 | blk_mq_put_ctx(ctx); | 253 | blk_mq_put_ctx(ctx); |
@@ -254,7 +255,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, | |||
254 | ctx = blk_mq_get_ctx(q); | 255 | ctx = blk_mq_get_ctx(q); |
255 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 256 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
256 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); | 257 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); |
257 | rq = __blk_mq_alloc_request(&alloc_data, rw); | 258 | rq = __blk_mq_alloc_request(&alloc_data, rw, 0); |
258 | ctx = alloc_data.ctx; | 259 | ctx = alloc_data.ctx; |
259 | } | 260 | } |
260 | blk_mq_put_ctx(ctx); | 261 | blk_mq_put_ctx(ctx); |
@@ -1169,7 +1170,8 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
1169 | struct blk_mq_hw_ctx *hctx; | 1170 | struct blk_mq_hw_ctx *hctx; |
1170 | struct blk_mq_ctx *ctx; | 1171 | struct blk_mq_ctx *ctx; |
1171 | struct request *rq; | 1172 | struct request *rq; |
1172 | int rw = bio_data_dir(bio); | 1173 | int op = bio_data_dir(bio); |
1174 | int op_flags = 0; | ||
1173 | struct blk_mq_alloc_data alloc_data; | 1175 | struct blk_mq_alloc_data alloc_data; |
1174 | 1176 | ||
1175 | blk_queue_enter_live(q); | 1177 | blk_queue_enter_live(q); |
@@ -1177,20 +1179,20 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
1177 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 1179 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
1178 | 1180 | ||
1179 | if (rw_is_sync(bio->bi_rw)) | 1181 | if (rw_is_sync(bio->bi_rw)) |
1180 | rw |= REQ_SYNC; | 1182 | op_flags |= REQ_SYNC; |
1181 | 1183 | ||
1182 | trace_block_getrq(q, bio, rw); | 1184 | trace_block_getrq(q, bio, op); |
1183 | blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx); | 1185 | blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx); |
1184 | rq = __blk_mq_alloc_request(&alloc_data, rw); | 1186 | rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); |
1185 | if (unlikely(!rq)) { | 1187 | if (unlikely(!rq)) { |
1186 | __blk_mq_run_hw_queue(hctx); | 1188 | __blk_mq_run_hw_queue(hctx); |
1187 | blk_mq_put_ctx(ctx); | 1189 | blk_mq_put_ctx(ctx); |
1188 | trace_block_sleeprq(q, bio, rw); | 1190 | trace_block_sleeprq(q, bio, op); |
1189 | 1191 | ||
1190 | ctx = blk_mq_get_ctx(q); | 1192 | ctx = blk_mq_get_ctx(q); |
1191 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 1193 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
1192 | blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); | 1194 | blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); |
1193 | rq = __blk_mq_alloc_request(&alloc_data, rw); | 1195 | rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); |
1194 | ctx = alloc_data.ctx; | 1196 | ctx = alloc_data.ctx; |
1195 | hctx = alloc_data.hctx; | 1197 | hctx = alloc_data.hctx; |
1196 | } | 1198 | } |