diff options
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 36 |
1 files changed, 23 insertions, 13 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index b9230c522c6b..43eb3156e110 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -210,24 +210,23 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
210 | } | 210 | } |
211 | 211 | ||
212 | static struct request * | 212 | static struct request * |
213 | __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx, | 213 | __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) |
214 | struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved) | ||
215 | { | 214 | { |
216 | struct request *rq; | 215 | struct request *rq; |
217 | unsigned int tag; | 216 | unsigned int tag; |
218 | 217 | ||
219 | tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved); | 218 | tag = blk_mq_get_tag(data); |
220 | if (tag != BLK_MQ_TAG_FAIL) { | 219 | if (tag != BLK_MQ_TAG_FAIL) { |
221 | rq = hctx->tags->rqs[tag]; | 220 | rq = data->hctx->tags->rqs[tag]; |
222 | 221 | ||
223 | rq->cmd_flags = 0; | 222 | rq->cmd_flags = 0; |
224 | if (blk_mq_tag_busy(hctx)) { | 223 | if (blk_mq_tag_busy(data->hctx)) { |
225 | rq->cmd_flags = REQ_MQ_INFLIGHT; | 224 | rq->cmd_flags = REQ_MQ_INFLIGHT; |
226 | atomic_inc(&hctx->nr_active); | 225 | atomic_inc(&data->hctx->nr_active); |
227 | } | 226 | } |
228 | 227 | ||
229 | rq->tag = tag; | 228 | rq->tag = tag; |
230 | blk_mq_rq_ctx_init(q, ctx, rq, rw); | 229 | blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); |
231 | return rq; | 230 | return rq; |
232 | } | 231 | } |
233 | 232 | ||
@@ -240,22 +239,27 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, | |||
240 | struct blk_mq_ctx *ctx; | 239 | struct blk_mq_ctx *ctx; |
241 | struct blk_mq_hw_ctx *hctx; | 240 | struct blk_mq_hw_ctx *hctx; |
242 | struct request *rq; | 241 | struct request *rq; |
242 | struct blk_mq_alloc_data alloc_data; | ||
243 | 243 | ||
244 | if (blk_mq_queue_enter(q)) | 244 | if (blk_mq_queue_enter(q)) |
245 | return NULL; | 245 | return NULL; |
246 | 246 | ||
247 | ctx = blk_mq_get_ctx(q); | 247 | ctx = blk_mq_get_ctx(q); |
248 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 248 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
249 | blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT, | ||
250 | reserved, ctx, hctx); | ||
249 | 251 | ||
250 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT, | 252 | rq = __blk_mq_alloc_request(&alloc_data, rw); |
251 | reserved); | ||
252 | if (!rq && (gfp & __GFP_WAIT)) { | 253 | if (!rq && (gfp & __GFP_WAIT)) { |
253 | __blk_mq_run_hw_queue(hctx); | 254 | __blk_mq_run_hw_queue(hctx); |
254 | blk_mq_put_ctx(ctx); | 255 | blk_mq_put_ctx(ctx); |
255 | 256 | ||
256 | ctx = blk_mq_get_ctx(q); | 257 | ctx = blk_mq_get_ctx(q); |
257 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 258 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
258 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp, reserved); | 259 | blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx, |
260 | hctx); | ||
261 | rq = __blk_mq_alloc_request(&alloc_data, rw); | ||
262 | ctx = alloc_data.ctx; | ||
259 | } | 263 | } |
260 | blk_mq_put_ctx(ctx); | 264 | blk_mq_put_ctx(ctx); |
261 | return rq; | 265 | return rq; |
@@ -1136,6 +1140,7 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
1136 | struct blk_mq_ctx *ctx; | 1140 | struct blk_mq_ctx *ctx; |
1137 | struct request *rq; | 1141 | struct request *rq; |
1138 | int rw = bio_data_dir(bio); | 1142 | int rw = bio_data_dir(bio); |
1143 | struct blk_mq_alloc_data alloc_data; | ||
1139 | 1144 | ||
1140 | if (unlikely(blk_mq_queue_enter(q))) { | 1145 | if (unlikely(blk_mq_queue_enter(q))) { |
1141 | bio_endio(bio, -EIO); | 1146 | bio_endio(bio, -EIO); |
@@ -1149,7 +1154,9 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
1149 | rw |= REQ_SYNC; | 1154 | rw |= REQ_SYNC; |
1150 | 1155 | ||
1151 | trace_block_getrq(q, bio, rw); | 1156 | trace_block_getrq(q, bio, rw); |
1152 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false); | 1157 | blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx, |
1158 | hctx); | ||
1159 | rq = __blk_mq_alloc_request(&alloc_data, rw); | ||
1153 | if (unlikely(!rq)) { | 1160 | if (unlikely(!rq)) { |
1154 | __blk_mq_run_hw_queue(hctx); | 1161 | __blk_mq_run_hw_queue(hctx); |
1155 | blk_mq_put_ctx(ctx); | 1162 | blk_mq_put_ctx(ctx); |
@@ -1157,8 +1164,11 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
1157 | 1164 | ||
1158 | ctx = blk_mq_get_ctx(q); | 1165 | ctx = blk_mq_get_ctx(q); |
1159 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 1166 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
1160 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, | 1167 | blk_mq_set_alloc_data(&alloc_data, q, |
1161 | __GFP_WAIT|GFP_ATOMIC, false); | 1168 | __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx); |
1169 | rq = __blk_mq_alloc_request(&alloc_data, rw); | ||
1170 | ctx = alloc_data.ctx; | ||
1171 | hctx = alloc_data.hctx; | ||
1162 | } | 1172 | } |
1163 | 1173 | ||
1164 | hctx->queued++; | 1174 | hctx->queued++; |