aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-12-11 15:58:39 -0500
committerJens Axboe <axboe@fb.com>2014-12-11 15:58:39 -0500
commitfe54303ee2be293c1c5c7a53a152453789cabc2f (patch)
tree359ec0be601521afe7cde6dcbd47e6a66e1364a2 /drivers/block
parent285dffc9101244ac65c29672a1fb3fe614b52238 (diff)
NVMe: fix retry/error logic in nvme_queue_rq()
The logic around retrying and erroring IO in nvme_queue_rq() is broken in a few ways: - If we fail allocating dma memory for a discard, we return retry. We have the 'iod' stored in ->special, but we free the 'iod'. - For a normal request, if we fail dma mapping of setting up prps, we have the same iod situation. Additionally, we haven't set the callback for the request yet, so we also potentially leak IOMMU resources. Get rid of the ->special 'iod' store. The retry is uncommon enough that it's not worth optimizing for or holding on to resources to attempt to speed it up. Additionally, it's usually best practice to free any request related resources when doing retries. Acked-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c39
1 files changed, 16 insertions, 23 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 95f2310255ce..e92bdf4c68fc 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -621,24 +621,15 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
621 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 621 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
622 struct nvme_iod *iod; 622 struct nvme_iod *iod;
623 int psegs = req->nr_phys_segments; 623 int psegs = req->nr_phys_segments;
624 int result = BLK_MQ_RQ_QUEUE_BUSY;
625 enum dma_data_direction dma_dir; 624 enum dma_data_direction dma_dir;
626 unsigned size = !(req->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(req) : 625 unsigned size = !(req->cmd_flags & REQ_DISCARD) ? blk_rq_bytes(req) :
627 sizeof(struct nvme_dsm_range); 626 sizeof(struct nvme_dsm_range);
628 627
629 /*
630 * Requeued IO has already been prepped
631 */
632 iod = req->special;
633 if (iod)
634 goto submit_iod;
635
636 iod = nvme_alloc_iod(psegs, size, ns->dev, GFP_ATOMIC); 628 iod = nvme_alloc_iod(psegs, size, ns->dev, GFP_ATOMIC);
637 if (!iod) 629 if (!iod)
638 return result; 630 return BLK_MQ_RQ_QUEUE_BUSY;
639 631
640 iod->private = req; 632 iod->private = req;
641 req->special = iod;
642 633
643 if (req->cmd_flags & REQ_DISCARD) { 634 if (req->cmd_flags & REQ_DISCARD) {
644 void *range; 635 void *range;
@@ -651,7 +642,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
651 GFP_ATOMIC, 642 GFP_ATOMIC,
652 &iod->first_dma); 643 &iod->first_dma);
653 if (!range) 644 if (!range)
654 goto finish_cmd; 645 goto retry_cmd;
655 iod_list(iod)[0] = (__le64 *)range; 646 iod_list(iod)[0] = (__le64 *)range;
656 iod->npages = 0; 647 iod->npages = 0;
657 } else if (psegs) { 648 } else if (psegs) {
@@ -659,22 +650,22 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
659 650
660 sg_init_table(iod->sg, psegs); 651 sg_init_table(iod->sg, psegs);
661 iod->nents = blk_rq_map_sg(req->q, req, iod->sg); 652 iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
662 if (!iod->nents) { 653 if (!iod->nents)
663 result = BLK_MQ_RQ_QUEUE_ERROR; 654 goto error_cmd;
664 goto finish_cmd;
665 }
666 655
667 if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir)) 656 if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir))
668 goto finish_cmd; 657 goto retry_cmd;
669 658
670 if (blk_rq_bytes(req) != nvme_setup_prps(nvmeq->dev, iod, 659 if (blk_rq_bytes(req) !=
671 blk_rq_bytes(req), GFP_ATOMIC)) 660 nvme_setup_prps(nvmeq->dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
672 goto finish_cmd; 661 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg,
662 iod->nents, dma_dir);
663 goto retry_cmd;
664 }
673 } 665 }
674 666
675 blk_mq_start_request(req); 667 blk_mq_start_request(req);
676 668
677 submit_iod:
678 nvme_set_info(cmd, iod, req_completion); 669 nvme_set_info(cmd, iod, req_completion);
679 spin_lock_irq(&nvmeq->q_lock); 670 spin_lock_irq(&nvmeq->q_lock);
680 if (req->cmd_flags & REQ_DISCARD) 671 if (req->cmd_flags & REQ_DISCARD)
@@ -688,10 +679,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
688 spin_unlock_irq(&nvmeq->q_lock); 679 spin_unlock_irq(&nvmeq->q_lock);
689 return BLK_MQ_RQ_QUEUE_OK; 680 return BLK_MQ_RQ_QUEUE_OK;
690 681
691 finish_cmd: 682 error_cmd:
692 nvme_finish_cmd(nvmeq, req->tag, NULL);
693 nvme_free_iod(nvmeq->dev, iod); 683 nvme_free_iod(nvmeq->dev, iod);
694 return result; 684 return BLK_MQ_RQ_QUEUE_ERROR;
685 retry_cmd:
686 nvme_free_iod(nvmeq->dev, iod);
687 return BLK_MQ_RQ_QUEUE_BUSY;
695} 688}
696 689
697static int nvme_process_cq(struct nvme_queue *nvmeq) 690static int nvme_process_cq(struct nvme_queue *nvmeq)