diff options
author | Shaohua Li <shli@kernel.org> | 2014-05-30 10:06:42 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-05-30 10:06:42 -0400 |
commit | 2230237500821aedfcf2bba2a79d9cbca389233c (patch) | |
tree | b4b7bee385293f8cc318fb309909ca3e701b903b /block | |
parent | da52f22fa924b4a21d8e11fbfd3eeebd7a90a366 (diff) |
blk-mq: blk_mq_tag_to_rq should handle flush request
flush request is special, which borrows the tag from the parent
request. Hence blk_mq_tag_to_rq needs special handling to return
the flush request from the tag.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-flush.c | 4 | ||||
-rw-r--r-- | block/blk-mq.c | 12 |
2 files changed, 12 insertions, 4 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index ef608b35d9be..ff87c664b7df 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -223,8 +223,10 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
223 | struct request *rq, *n; | 223 | struct request *rq, *n; |
224 | unsigned long flags = 0; | 224 | unsigned long flags = 0; |
225 | 225 | ||
226 | if (q->mq_ops) | 226 | if (q->mq_ops) { |
227 | spin_lock_irqsave(&q->mq_flush_lock, flags); | 227 | spin_lock_irqsave(&q->mq_flush_lock, flags); |
228 | q->flush_rq->cmd_flags = 0; | ||
229 | } | ||
228 | 230 | ||
229 | running = &q->flush_queue[q->flush_running_idx]; | 231 | running = &q->flush_queue[q->flush_running_idx]; |
230 | BUG_ON(q->flush_pending_idx == q->flush_running_idx); | 232 | BUG_ON(q->flush_pending_idx == q->flush_running_idx); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 6160128085fc..21f952ab3581 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -541,9 +541,15 @@ void blk_mq_kick_requeue_list(struct request_queue *q) | |||
541 | } | 541 | } |
542 | EXPORT_SYMBOL(blk_mq_kick_requeue_list); | 542 | EXPORT_SYMBOL(blk_mq_kick_requeue_list); |
543 | 543 | ||
544 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) | 544 | struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag) |
545 | { | 545 | { |
546 | return tags->rqs[tag]; | 546 | struct request_queue *q = hctx->queue; |
547 | |||
548 | if ((q->flush_rq->cmd_flags & REQ_FLUSH_SEQ) && | ||
549 | q->flush_rq->tag == tag) | ||
550 | return q->flush_rq; | ||
551 | |||
552 | return hctx->tags->rqs[tag]; | ||
547 | } | 553 | } |
548 | EXPORT_SYMBOL(blk_mq_tag_to_rq); | 554 | EXPORT_SYMBOL(blk_mq_tag_to_rq); |
549 | 555 | ||
@@ -572,7 +578,7 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags) | |||
572 | if (tag >= hctx->tags->nr_tags) | 578 | if (tag >= hctx->tags->nr_tags) |
573 | break; | 579 | break; |
574 | 580 | ||
575 | rq = blk_mq_tag_to_rq(hctx->tags, tag++); | 581 | rq = blk_mq_tag_to_rq(hctx, tag++); |
576 | if (rq->q != hctx->queue) | 582 | if (rq->q != hctx->queue) |
577 | continue; | 583 | continue; |
578 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) | 584 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) |