diff options
author | David Hildenbrand <dahi@linux.vnet.ibm.com> | 2014-09-18 05:04:31 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-22 13:55:14 -0400 |
commit | 683d0e126232d898a481daa3a4ca032c2b1a9660 (patch) | |
tree | ce6bf69626d6b1b11cd3fb69bb31db506fe77704 /block | |
parent | 538b75341835e3c2041ff066408de10d24fdc830 (diff) |
blk-mq: Avoid race condition with uninitialized requests
This patch should fix the bug reported in
https://lkml.org/lkml/2014/9/11/249.
We have to initialize at least the atomic_flags and the cmd_flags when
allocating storage for the requests.
Otherwise blk_mq_timeout_check() might dereference uninitialized
pointers when racing with the creation of a request.
Also move the reset of cmd_flags for the initializing code to the point
where a request is freed. So we will never end up with pending flush
request indicators that might trigger dereferences of invalid pointers
in blk_mq_timeout_check().
Cc: stable@vger.kernel.org
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Reported-by: Paulo De Rezende Pinatti <ppinatti@linux.vnet.ibm.com>
Tested-by: Paulo De Rezende Pinatti <ppinatti@linux.vnet.ibm.com>
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 4 |
1 files changed, 3 insertions, 1 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index a13c40ca8230..1583ed28ea03 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -203,7 +203,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) | |||
203 | if (tag != BLK_MQ_TAG_FAIL) { | 203 | if (tag != BLK_MQ_TAG_FAIL) { |
204 | rq = data->hctx->tags->rqs[tag]; | 204 | rq = data->hctx->tags->rqs[tag]; |
205 | 205 | ||
206 | rq->cmd_flags = 0; | ||
207 | if (blk_mq_tag_busy(data->hctx)) { | 206 | if (blk_mq_tag_busy(data->hctx)) { |
208 | rq->cmd_flags = REQ_MQ_INFLIGHT; | 207 | rq->cmd_flags = REQ_MQ_INFLIGHT; |
209 | atomic_inc(&data->hctx->nr_active); | 208 | atomic_inc(&data->hctx->nr_active); |
@@ -258,6 +257,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, | |||
258 | 257 | ||
259 | if (rq->cmd_flags & REQ_MQ_INFLIGHT) | 258 | if (rq->cmd_flags & REQ_MQ_INFLIGHT) |
260 | atomic_dec(&hctx->nr_active); | 259 | atomic_dec(&hctx->nr_active); |
260 | rq->cmd_flags = 0; | ||
261 | 261 | ||
262 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | 262 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); |
263 | blk_mq_put_tag(hctx, tag, &ctx->last_tag); | 263 | blk_mq_put_tag(hctx, tag, &ctx->last_tag); |
@@ -1410,6 +1410,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, | |||
1410 | left -= to_do * rq_size; | 1410 | left -= to_do * rq_size; |
1411 | for (j = 0; j < to_do; j++) { | 1411 | for (j = 0; j < to_do; j++) { |
1412 | tags->rqs[i] = p; | 1412 | tags->rqs[i] = p; |
1413 | tags->rqs[i]->atomic_flags = 0; | ||
1414 | tags->rqs[i]->cmd_flags = 0; | ||
1413 | if (set->ops->init_request) { | 1415 | if (set->ops->init_request) { |
1414 | if (set->ops->init_request(set->driver_data, | 1416 | if (set->ops->init_request(set->driver_data, |
1415 | tags->rqs[i], hctx_idx, i, | 1417 | tags->rqs[i], hctx_idx, i, |