aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-09-16 17:44:07 -0400
committerJens Axboe <axboe@fb.com>2014-09-22 13:55:19 -0400
commita57a178a490345c7236b0077b3de005754389ed6 (patch)
tree7d5fa8c1c9df7a70e367f78297545c253c890926
parent683d0e126232d898a481daa3a4ca032c2b1a9660 (diff)
blk-mq: avoid infinite recursion with the FUA flag
We should not insert requests into the flush state machine from blk_mq_insert_request. All incoming flush requests come through blk_{m,s}q_make_request and are handled there, while blk_execute_rq_nowait should only be called for BLOCK_PC requests. All other callers deal with requests that already went through the flush statemchine and shouldn't be reinserted into it. Reported-by: Robert Elliott <Elliott@hp.com> Debugged-by: Ming Lei <ming.lei@canonical.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-exec.c1
-rw-r--r--block/blk-mq.c11
2 files changed, 4 insertions, 8 deletions
diff --git a/block/blk-exec.c b/block/blk-exec.c
index f4d27b12c90b..9924725fa50d 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -56,6 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
56 bool is_pm_resume; 56 bool is_pm_resume;
57 57
58 WARN_ON(irqs_disabled()); 58 WARN_ON(irqs_disabled());
59 WARN_ON(rq->cmd_type == REQ_TYPE_FS);
59 60
60 rq->rq_disk = bd_disk; 61 rq->rq_disk = bd_disk;
61 rq->end_io = done; 62 rq->end_io = done;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1583ed28ea03..a7d70a1bbf36 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -963,14 +963,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
963 963
964 hctx = q->mq_ops->map_queue(q, ctx->cpu); 964 hctx = q->mq_ops->map_queue(q, ctx->cpu);
965 965
966 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) && 966 spin_lock(&ctx->lock);
967 !(rq->cmd_flags & (REQ_FLUSH_SEQ))) { 967 __blk_mq_insert_request(hctx, rq, at_head);
968 blk_insert_flush(rq); 968 spin_unlock(&ctx->lock);
969 } else {
970 spin_lock(&ctx->lock);
971 __blk_mq_insert_request(hctx, rq, at_head);
972 spin_unlock(&ctx->lock);
973 }
974 969
975 if (run_queue) 970 if (run_queue)
976 blk_mq_run_hw_queue(hctx, async); 971 blk_mq_run_hw_queue(hctx, async);