diff options
author | Christoph Hellwig <hch@lst.de> | 2014-05-27 14:59:47 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-05-28 11:49:21 -0400 |
commit | 5dee857720db15e2c8ef0c03f7eeac00c4c63cb2 (patch) | |
tree | 7607431cfe22086dadbf8c74f2b25ee8555f5bbd /block | |
parent | 4ce01dd1a07d9cf3eaf44fbf4ea9a61b11badccc (diff) |
blk-mq: initialize request in __blk_mq_alloc_request
Both callers if __blk_mq_alloc_request want to initialize the request, so
lift it into the common path.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 62 |
1 files changed, 30 insertions, 32 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 63d581d72a70..04ef7ecb3c7f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -92,30 +92,6 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, | |||
92 | clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); | 92 | clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); |
93 | } | 93 | } |
94 | 94 | ||
95 | static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, | ||
96 | struct blk_mq_ctx *ctx, | ||
97 | gfp_t gfp, bool reserved) | ||
98 | { | ||
99 | struct request *rq; | ||
100 | unsigned int tag; | ||
101 | |||
102 | tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved); | ||
103 | if (tag != BLK_MQ_TAG_FAIL) { | ||
104 | rq = hctx->tags->rqs[tag]; | ||
105 | |||
106 | rq->cmd_flags = 0; | ||
107 | if (blk_mq_tag_busy(hctx)) { | ||
108 | rq->cmd_flags = REQ_MQ_INFLIGHT; | ||
109 | atomic_inc(&hctx->nr_active); | ||
110 | } | ||
111 | |||
112 | rq->tag = tag; | ||
113 | return rq; | ||
114 | } | ||
115 | |||
116 | return NULL; | ||
117 | } | ||
118 | |||
119 | static int blk_mq_queue_enter(struct request_queue *q) | 95 | static int blk_mq_queue_enter(struct request_queue *q) |
120 | { | 96 | { |
121 | int ret; | 97 | int ret; |
@@ -263,6 +239,32 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
263 | ctx->rq_dispatched[rw_is_sync(rw_flags)]++; | 239 | ctx->rq_dispatched[rw_is_sync(rw_flags)]++; |
264 | } | 240 | } |
265 | 241 | ||
242 | static struct request * | ||
243 | __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx, | ||
244 | struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved) | ||
245 | { | ||
246 | struct request *rq; | ||
247 | unsigned int tag; | ||
248 | |||
249 | tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved); | ||
250 | if (tag != BLK_MQ_TAG_FAIL) { | ||
251 | rq = hctx->tags->rqs[tag]; | ||
252 | |||
253 | rq->cmd_flags = 0; | ||
254 | if (blk_mq_tag_busy(hctx)) { | ||
255 | rq->cmd_flags = REQ_MQ_INFLIGHT; | ||
256 | atomic_inc(&hctx->nr_active); | ||
257 | } | ||
258 | |||
259 | rq->tag = tag; | ||
260 | blk_mq_rq_ctx_init(q, ctx, rq, rw); | ||
261 | return rq; | ||
262 | } | ||
263 | |||
264 | return NULL; | ||
265 | } | ||
266 | |||
267 | |||
266 | static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, | 268 | static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, |
267 | int rw, gfp_t gfp, | 269 | int rw, gfp_t gfp, |
268 | bool reserved) | 270 | bool reserved) |
@@ -273,12 +275,10 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, | |||
273 | struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); | 275 | struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); |
274 | struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); | 276 | struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); |
275 | 277 | ||
276 | rq = __blk_mq_alloc_request(hctx, ctx, gfp & ~__GFP_WAIT, | 278 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT, |
277 | reserved); | 279 | reserved); |
278 | if (rq) { | 280 | if (rq) |
279 | blk_mq_rq_ctx_init(q, ctx, rq, rw); | ||
280 | break; | 281 | break; |
281 | } | ||
282 | 282 | ||
283 | if (gfp & __GFP_WAIT) { | 283 | if (gfp & __GFP_WAIT) { |
284 | __blk_mq_run_hw_queue(hctx); | 284 | __blk_mq_run_hw_queue(hctx); |
@@ -1178,10 +1178,8 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
1178 | rw |= REQ_SYNC; | 1178 | rw |= REQ_SYNC; |
1179 | 1179 | ||
1180 | trace_block_getrq(q, bio, rw); | 1180 | trace_block_getrq(q, bio, rw); |
1181 | rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false); | 1181 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false); |
1182 | if (likely(rq)) | 1182 | if (unlikely(!rq)) { |
1183 | blk_mq_rq_ctx_init(q, ctx, rq, rw); | ||
1184 | else { | ||
1185 | blk_mq_put_ctx(ctx); | 1183 | blk_mq_put_ctx(ctx); |
1186 | trace_block_sleeprq(q, bio, rw); | 1184 | trace_block_sleeprq(q, bio, rw); |
1187 | rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC, | 1185 | rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC, |