aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2014-02-11 11:27:14 -0500
committerJens Axboe <axboe@fb.com>2014-02-11 11:34:08 -0500
commit49f5baa5109897b8cee491e8a7c4d74052b6bc1e (patch)
tree9e0cf5285be207bde015af948e8fd7b89760ffcd /block
parent1e93b8c274268038c93763dca65a73b42a081e10 (diff)
blk-mq: pair blk_mq_start_request / blk_mq_requeue_request
Make sure we have a proper pairing between starting and requeueing requests. Move the dma drain and REQ_END setup into blk_mq_start_request, and make sure blk_mq_requeue_request properly undoes them, giving us a pair of function to prepare and unprepare a request without leaving side effects. Together this ensures we always clean up properly after BLK_MQ_RQ_QUEUE_BUSY returns from ->queue_rq. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c49
1 files changed, 26 insertions, 23 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0480710a8b45..1fa9dd153fde 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -377,7 +377,7 @@ void blk_mq_complete_request(struct request *rq)
377} 377}
378EXPORT_SYMBOL(blk_mq_complete_request); 378EXPORT_SYMBOL(blk_mq_complete_request);
379 379
380static void blk_mq_start_request(struct request *rq) 380static void blk_mq_start_request(struct request *rq, bool last)
381{ 381{
382 struct request_queue *q = rq->q; 382 struct request_queue *q = rq->q;
383 383
@@ -390,6 +390,25 @@ static void blk_mq_start_request(struct request *rq)
390 */ 390 */
391 rq->deadline = jiffies + q->rq_timeout; 391 rq->deadline = jiffies + q->rq_timeout;
392 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 392 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
393
394 if (q->dma_drain_size && blk_rq_bytes(rq)) {
395 /*
396 * Make sure space for the drain appears. We know we can do
397 * this because max_hw_segments has been adjusted to be one
398 * fewer than the device can handle.
399 */
400 rq->nr_phys_segments++;
401 }
402
403 /*
404 * Flag the last request in the series so that drivers know when IO
405 * should be kicked off, if they don't do it on a per-request basis.
406 *
407 * Note: the flag isn't the only condition drivers should do kick off.
408 * If drive is busy, the last request might not have the bit set.
409 */
410 if (last)
411 rq->cmd_flags |= REQ_END;
393} 412}
394 413
395static void blk_mq_requeue_request(struct request *rq) 414static void blk_mq_requeue_request(struct request *rq)
@@ -398,6 +417,11 @@ static void blk_mq_requeue_request(struct request *rq)
398 417
399 trace_block_rq_requeue(q, rq); 418 trace_block_rq_requeue(q, rq);
400 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 419 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
420
421 rq->cmd_flags &= ~REQ_END;
422
423 if (q->dma_drain_size && blk_rq_bytes(rq))
424 rq->nr_phys_segments--;
401} 425}
402 426
403struct blk_mq_timeout_data { 427struct blk_mq_timeout_data {
@@ -565,29 +589,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
565 589
566 rq = list_first_entry(&rq_list, struct request, queuelist); 590 rq = list_first_entry(&rq_list, struct request, queuelist);
567 list_del_init(&rq->queuelist); 591 list_del_init(&rq->queuelist);
568 blk_mq_start_request(rq);
569 592
570 if (q->dma_drain_size && blk_rq_bytes(rq)) { 593 blk_mq_start_request(rq, list_empty(&rq_list));
571 /*
572 * make sure space for the drain appears we
573 * know we can do this because max_hw_segments
574 * has been adjusted to be one fewer than the
575 * device can handle
576 */
577 rq->nr_phys_segments++;
578 }
579
580 /*
581 * Last request in the series. Flag it as such, this
582 * enables drivers to know when IO should be kicked off,
583 * if they don't do it on a per-request basis.
584 *
585 * Note: the flag isn't the only condition drivers
586 * should do kick off. If drive is busy, the last
587 * request might not have the bit set.
588 */
589 if (list_empty(&rq_list))
590 rq->cmd_flags |= REQ_END;
591 594
592 ret = q->mq_ops->queue_rq(hctx, rq); 595 ret = q->mq_ops->queue_rq(hctx, rq);
593 switch (ret) { 596 switch (ret) {