diff options
author | Jens Axboe <axboe@fb.com> | 2014-09-22 13:57:32 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-22 13:57:32 -0400 |
commit | 6d11fb454b161a4565c57be6f1c5527235741003 (patch) | |
tree | c238ed3df2f654181c2a0746478a33b32214cc60 /block/blk-mq.c | |
parent | b207892b061da7608878e273ae22ba9bf9be264b (diff) | |
parent | 8b95741569eabc5eb17da71d1d3668cdb0bef86c (diff) |
Merge branch 'for-linus' into for-3.18/core
Moving patches from for-linus to 3.18 instead, pull in this changes
that will go to Linus today.
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 28 |
1 files changed, 17 insertions, 11 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 067e600002d3..e743d28620b2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -203,7 +203,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) | |||
203 | if (tag != BLK_MQ_TAG_FAIL) { | 203 | if (tag != BLK_MQ_TAG_FAIL) { |
204 | rq = data->hctx->tags->rqs[tag]; | 204 | rq = data->hctx->tags->rqs[tag]; |
205 | 205 | ||
206 | rq->cmd_flags = 0; | ||
207 | if (blk_mq_tag_busy(data->hctx)) { | 206 | if (blk_mq_tag_busy(data->hctx)) { |
208 | rq->cmd_flags = REQ_MQ_INFLIGHT; | 207 | rq->cmd_flags = REQ_MQ_INFLIGHT; |
209 | atomic_inc(&data->hctx->nr_active); | 208 | atomic_inc(&data->hctx->nr_active); |
@@ -262,6 +261,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, | |||
262 | 261 | ||
263 | if (rq->cmd_flags & REQ_MQ_INFLIGHT) | 262 | if (rq->cmd_flags & REQ_MQ_INFLIGHT) |
264 | atomic_dec(&hctx->nr_active); | 263 | atomic_dec(&hctx->nr_active); |
264 | rq->cmd_flags = 0; | ||
265 | 265 | ||
266 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | 266 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); |
267 | blk_mq_put_tag(hctx, tag, &ctx->last_tag); | 267 | blk_mq_put_tag(hctx, tag, &ctx->last_tag); |
@@ -397,6 +397,12 @@ static void blk_mq_start_request(struct request *rq, bool last) | |||
397 | blk_add_timer(rq); | 397 | blk_add_timer(rq); |
398 | 398 | ||
399 | /* | 399 | /* |
400 | * Ensure that ->deadline is visible before set the started | ||
401 | * flag and clear the completed flag. | ||
402 | */ | ||
403 | smp_mb__before_atomic(); | ||
404 | |||
405 | /* | ||
400 | * Mark us as started and clear complete. Complete might have been | 406 | * Mark us as started and clear complete. Complete might have been |
401 | * set if requeue raced with timeout, which then marked it as | 407 | * set if requeue raced with timeout, which then marked it as |
402 | * complete. So be sure to clear complete again when we start | 408 | * complete. So be sure to clear complete again when we start |
@@ -477,7 +483,11 @@ static void blk_mq_requeue_work(struct work_struct *work) | |||
477 | blk_mq_insert_request(rq, false, false, false); | 483 | blk_mq_insert_request(rq, false, false, false); |
478 | } | 484 | } |
479 | 485 | ||
480 | blk_mq_run_queues(q, false); | 486 | /* |
487 | * Use the start variant of queue running here, so that running | ||
488 | * the requeue work will kick stopped queues. | ||
489 | */ | ||
490 | blk_mq_start_hw_queues(q); | ||
481 | } | 491 | } |
482 | 492 | ||
483 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) | 493 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) |
@@ -961,14 +971,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, | |||
961 | 971 | ||
962 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 972 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
963 | 973 | ||
964 | if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) && | 974 | spin_lock(&ctx->lock); |
965 | !(rq->cmd_flags & (REQ_FLUSH_SEQ))) { | 975 | __blk_mq_insert_request(hctx, rq, at_head); |
966 | blk_insert_flush(rq); | 976 | spin_unlock(&ctx->lock); |
967 | } else { | ||
968 | spin_lock(&ctx->lock); | ||
969 | __blk_mq_insert_request(hctx, rq, at_head); | ||
970 | spin_unlock(&ctx->lock); | ||
971 | } | ||
972 | 977 | ||
973 | if (run_queue) | 978 | if (run_queue) |
974 | blk_mq_run_hw_queue(hctx, async); | 979 | blk_mq_run_hw_queue(hctx, async); |
@@ -1408,6 +1413,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, | |||
1408 | left -= to_do * rq_size; | 1413 | left -= to_do * rq_size; |
1409 | for (j = 0; j < to_do; j++) { | 1414 | for (j = 0; j < to_do; j++) { |
1410 | tags->rqs[i] = p; | 1415 | tags->rqs[i] = p; |
1416 | tags->rqs[i]->atomic_flags = 0; | ||
1417 | tags->rqs[i]->cmd_flags = 0; | ||
1411 | if (set->ops->init_request) { | 1418 | if (set->ops->init_request) { |
1412 | if (set->ops->init_request(set->driver_data, | 1419 | if (set->ops->init_request(set->driver_data, |
1413 | tags->rqs[i], hctx_idx, i, | 1420 | tags->rqs[i], hctx_idx, i, |
@@ -1960,7 +1967,6 @@ out_unwind: | |||
1960 | while (--i >= 0) | 1967 | while (--i >= 0) |
1961 | blk_mq_free_rq_map(set, set->tags[i], i); | 1968 | blk_mq_free_rq_map(set, set->tags[i], i); |
1962 | 1969 | ||
1963 | set->tags = NULL; | ||
1964 | return -ENOMEM; | 1970 | return -ENOMEM; |
1965 | } | 1971 | } |
1966 | 1972 | ||