aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2013-10-28 15:33:58 -0400
committerJens Axboe <axboe@kernel.dk>2013-10-28 15:33:58 -0400
commit3228f48be2d19b2dd90db96ec16a40187a2946f3 (patch)
tree2f148dd8e82b287fc7b682cabc5614e3046d8949 /block/blk-mq.c
parent280d45f6c35d8d7a0fe20c36caf426e3ac139cf9 (diff)
blk-mq: fix for flush deadlock
The flush state machine takes in a struct request, which then is submitted multiple times to the underling driver. The old block code requeses the same request for each of those, so it does not have an issue with tapping into the request pool. The new one on the other hand allocates a new request for each of the actualy steps of the flush sequence. If have already allocated all of the tags for IO, we will fail allocating the flush request. Set aside a reserved request just for flushes. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ac804c635040..2dc8de86d0d2 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -210,14 +210,15 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
210 return rq; 210 return rq;
211} 211}
212 212
213struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp) 213struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
214 gfp_t gfp, bool reserved)
214{ 215{
215 struct request *rq; 216 struct request *rq;
216 217
217 if (blk_mq_queue_enter(q)) 218 if (blk_mq_queue_enter(q))
218 return NULL; 219 return NULL;
219 220
220 rq = blk_mq_alloc_request_pinned(q, rw, gfp, false); 221 rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
221 blk_mq_put_ctx(rq->mq_ctx); 222 blk_mq_put_ctx(rq->mq_ctx);
222 return rq; 223 return rq;
223} 224}
@@ -1327,6 +1328,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1327 reg->queue_depth = BLK_MQ_MAX_DEPTH; 1328 reg->queue_depth = BLK_MQ_MAX_DEPTH;
1328 } 1329 }
1329 1330
1331 /*
1332 * Set aside a tag for flush requests. It will only be used while
1333 * another flush request is in progress but outside the driver.
1334 *
1335 * TODO: only allocate if flushes are supported
1336 */
1337 reg->queue_depth++;
1338 reg->reserved_tags++;
1339
1330 if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN)) 1340 if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
1331 return ERR_PTR(-EINVAL); 1341 return ERR_PTR(-EINVAL);
1332 1342