aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMing Lei <tom.leiming@gmail.com>2014-06-09 12:16:41 -0400
committerJens Axboe <axboe@fb.com>2014-06-09 12:20:06 -0400
commit2b8393b43ec672bb263009cd74c056ab01d6ac17 (patch)
tree0cfe6b32335a14310ffec05da4e227b9ffea15ca /block
parent3ee3237239583a6555db4f297d00eebdbb6d76ad (diff)
blk-mq: add timer in blk_mq_start_request
This way will become consistent with non-mq case, also avoid to update rq->deadline twice for mq. The comment said: "We do this early, to ensure we are on the right CPU.", but no percpu stuff is used in blk_add_timer(), so it isn't necessary. Even when inserting from plug list, there is no such guarantee at all. Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c17
1 files changed, 1 insertions, 16 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a5ea37d7e820..e11f5f8e0313 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -411,16 +411,7 @@ static void blk_mq_start_request(struct request *rq, bool last)
411 if (unlikely(blk_bidi_rq(rq))) 411 if (unlikely(blk_bidi_rq(rq)))
412 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); 412 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
413 413
414 /* 414 blk_add_timer(rq);
415 * Just mark start time and set the started bit. Due to memory
416 * ordering, we know we'll see the correct deadline as long as
417 * REQ_ATOMIC_STARTED is seen. Use the default queue timeout,
418 * unless one has been set in the request.
419 */
420 if (!rq->timeout)
421 rq->deadline = jiffies + q->rq_timeout;
422 else
423 rq->deadline = jiffies + rq->timeout;
424 415
425 /* 416 /*
426 * Mark us as started and clear complete. Complete might have been 417 * Mark us as started and clear complete. Complete might have been
@@ -972,11 +963,6 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
972 list_add_tail(&rq->queuelist, &ctx->rq_list); 963 list_add_tail(&rq->queuelist, &ctx->rq_list);
973 964
974 blk_mq_hctx_mark_pending(hctx, ctx); 965 blk_mq_hctx_mark_pending(hctx, ctx);
975
976 /*
977 * We do this early, to ensure we are on the right CPU.
978 */
979 blk_add_timer(rq);
980} 966}
981 967
982void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, 968void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
@@ -1219,7 +1205,6 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1219 1205
1220 blk_mq_bio_to_request(rq, bio); 1206 blk_mq_bio_to_request(rq, bio);
1221 blk_mq_start_request(rq, true); 1207 blk_mq_start_request(rq, true);
1222 blk_add_timer(rq);
1223 1208
1224 /* 1209 /*
1225 * For OK queue, we are done. For error, kill it. Any other 1210 * For OK queue, we are done. For error, kill it. Any other