aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-02-10 11:29:00 -0500
committerJens Axboe <axboe@fb.com>2014-02-10 11:29:00 -0500
commit18741986a4b1dc4b1f171634c4191abc3b0fa023 (patch)
treed0f632fa9b205d5fbcc76ff1cf8cba63112c7da8 /block/blk-mq.c
parentce2c350b2cfe5b5ca5023a6b1ec4d21821d39add (diff)
blk-mq: rework flush sequencing logic
Witch to using a preallocated flush_rq for blk-mq similar to what's done with the old request path. This allows us to set up the request properly with a tag from the actually allowed range and ->rq_disk as needed by some drivers. To make life easier we also switch to dynamic allocation of ->flush_rq for the old path. This effectively reverts most of "blk-mq: fix for flush deadlock" and "blk-mq: Don't reserve a tag for flush request" Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c54
1 files changed, 19 insertions, 35 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 14c8f35946e1..a59b0565e940 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -194,27 +194,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
194} 194}
195 195
196static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, 196static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
197 gfp_t gfp, bool reserved, 197 gfp_t gfp, bool reserved)
198 int rw)
199{ 198{
200 struct request *req; 199 return blk_mq_alloc_rq(hctx, gfp, reserved);
201 bool is_flush = false;
202 /*
203 * flush need allocate a request, leave at least one request for
204 * non-flush IO to avoid deadlock
205 */
206 if ((rw & REQ_FLUSH) && !(rw & REQ_FLUSH_SEQ)) {
207 if (atomic_inc_return(&hctx->pending_flush) >=
208 hctx->queue_depth - hctx->reserved_tags - 1) {
209 atomic_dec(&hctx->pending_flush);
210 return NULL;
211 }
212 is_flush = true;
213 }
214 req = blk_mq_alloc_rq(hctx, gfp, reserved);
215 if (!req && is_flush)
216 atomic_dec(&hctx->pending_flush);
217 return req;
218} 200}
219 201
220static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, 202static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
@@ -227,7 +209,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
227 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 209 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
228 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); 210 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
229 211
230 rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved, rw); 212 rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
231 if (rq) { 213 if (rq) {
232 blk_mq_rq_ctx_init(q, ctx, rq, rw); 214 blk_mq_rq_ctx_init(q, ctx, rq, rw);
233 break; 215 break;
@@ -244,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
244 return rq; 226 return rq;
245} 227}
246 228
247struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 229struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
248 gfp_t gfp, bool reserved)
249{ 230{
250 struct request *rq; 231 struct request *rq;
251 232
252 if (blk_mq_queue_enter(q)) 233 if (blk_mq_queue_enter(q))
253 return NULL; 234 return NULL;
254 235
255 rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); 236 rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
256 if (rq) 237 if (rq)
257 blk_mq_put_ctx(rq->mq_ctx); 238 blk_mq_put_ctx(rq->mq_ctx);
258 return rq; 239 return rq;
@@ -276,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
276/* 257/*
277 * Re-init and set pdu, if we have it 258 * Re-init and set pdu, if we have it
278 */ 259 */
279static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) 260void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
280{ 261{
281 blk_rq_init(hctx->queue, rq); 262 blk_rq_init(hctx->queue, rq);
282 263
@@ -290,9 +271,6 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
290 const int tag = rq->tag; 271 const int tag = rq->tag;
291 struct request_queue *q = rq->q; 272 struct request_queue *q = rq->q;
292 273
293 if ((rq->cmd_flags & REQ_FLUSH) && !(rq->cmd_flags & REQ_FLUSH_SEQ))
294 atomic_dec(&hctx->pending_flush);
295
296 blk_mq_rq_init(hctx, rq); 274 blk_mq_rq_init(hctx, rq);
297 blk_mq_put_tag(hctx->tags, tag); 275 blk_mq_put_tag(hctx->tags, tag);
298 276
@@ -946,14 +924,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
946 hctx = q->mq_ops->map_queue(q, ctx->cpu); 924 hctx = q->mq_ops->map_queue(q, ctx->cpu);
947 925
948 trace_block_getrq(q, bio, rw); 926 trace_block_getrq(q, bio, rw);
949 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false, bio->bi_rw); 927 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
950 if (likely(rq)) 928 if (likely(rq))
951 blk_mq_rq_ctx_init(q, ctx, rq, bio->bi_rw); 929 blk_mq_rq_ctx_init(q, ctx, rq, rw);
952 else { 930 else {
953 blk_mq_put_ctx(ctx); 931 blk_mq_put_ctx(ctx);
954 trace_block_sleeprq(q, bio, rw); 932 trace_block_sleeprq(q, bio, rw);
955 rq = blk_mq_alloc_request_pinned(q, bio->bi_rw, 933 rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
956 __GFP_WAIT|GFP_ATOMIC, false); 934 false);
957 ctx = rq->mq_ctx; 935 ctx = rq->mq_ctx;
958 hctx = q->mq_ops->map_queue(q, ctx->cpu); 936 hctx = q->mq_ops->map_queue(q, ctx->cpu);
959 } 937 }
@@ -1230,9 +1208,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
1230 hctx->queue_num = i; 1208 hctx->queue_num = i;
1231 hctx->flags = reg->flags; 1209 hctx->flags = reg->flags;
1232 hctx->queue_depth = reg->queue_depth; 1210 hctx->queue_depth = reg->queue_depth;
1233 hctx->reserved_tags = reg->reserved_tags;
1234 hctx->cmd_size = reg->cmd_size; 1211 hctx->cmd_size = reg->cmd_size;
1235 atomic_set(&hctx->pending_flush, 0);
1236 1212
1237 blk_mq_init_cpu_notifier(&hctx->cpu_notifier, 1213 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1238 blk_mq_hctx_notify, hctx); 1214 blk_mq_hctx_notify, hctx);
@@ -1412,9 +1388,14 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1412 blk_mq_init_flush(q); 1388 blk_mq_init_flush(q);
1413 blk_mq_init_cpu_queues(q, reg->nr_hw_queues); 1389 blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
1414 1390
1415 if (blk_mq_init_hw_queues(q, reg, driver_data)) 1391 q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
1392 cache_line_size()), GFP_KERNEL);
1393 if (!q->flush_rq)
1416 goto err_hw; 1394 goto err_hw;
1417 1395
1396 if (blk_mq_init_hw_queues(q, reg, driver_data))
1397 goto err_flush_rq;
1398
1418 blk_mq_map_swqueue(q); 1399 blk_mq_map_swqueue(q);
1419 1400
1420 mutex_lock(&all_q_mutex); 1401 mutex_lock(&all_q_mutex);
@@ -1422,6 +1403,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1422 mutex_unlock(&all_q_mutex); 1403 mutex_unlock(&all_q_mutex);
1423 1404
1424 return q; 1405 return q;
1406
1407err_flush_rq:
1408 kfree(q->flush_rq);
1425err_hw: 1409err_hw:
1426 kfree(q->mq_map); 1410 kfree(q->mq_map);
1427err_map: 1411err_map: