diff options
author | Christoph Hellwig <hch@lst.de> | 2018-01-17 16:04:38 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-01-17 16:05:35 -0500 |
commit | b0f2853b56a2acaff19cca2c6a608f8ec268d21a (patch) | |
tree | b37a803e6a8ca66d6cc831f2d1992b3f7ef33732 | |
parent | 20469a37aed12a886d0deda5a07c04037923144a (diff) |
nvme-pci: take sglist coalescing in dma_map_sg into account
Some iommu implementations can merge physically and/or virtually
contiguous segments inside sg_map_dma. The NVMe SGL support does not take
this into account and will warn because of falling off a loop. Pass the
number of mapped segments to nvme_pci_setup_sgls so that the SGL setup
can take the number of mapped segments into account.
Reported-by: Fangjian (Turing) <f.fangjian@huawei.com>
Fixes: a7a7cbe3 ("nvme-pci: add SGL support")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Sagi Grimberg <sagi@rimberg.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | drivers/nvme/host/pci.c | 21 |
1 files changed, 9 insertions, 12 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a7e94cc3c70e..4276ebfff22b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -725,20 +725,19 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, | |||
725 | } | 725 | } |
726 | 726 | ||
727 | static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, | 727 | static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, |
728 | struct request *req, struct nvme_rw_command *cmd) | 728 | struct request *req, struct nvme_rw_command *cmd, int entries) |
729 | { | 729 | { |
730 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); | 730 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
731 | int length = blk_rq_payload_bytes(req); | ||
732 | struct dma_pool *pool; | 731 | struct dma_pool *pool; |
733 | struct nvme_sgl_desc *sg_list; | 732 | struct nvme_sgl_desc *sg_list; |
734 | struct scatterlist *sg = iod->sg; | 733 | struct scatterlist *sg = iod->sg; |
735 | int entries = iod->nents, i = 0; | ||
736 | dma_addr_t sgl_dma; | 734 | dma_addr_t sgl_dma; |
735 | int i = 0; | ||
737 | 736 | ||
738 | /* setting the transfer type as SGL */ | 737 | /* setting the transfer type as SGL */ |
739 | cmd->flags = NVME_CMD_SGL_METABUF; | 738 | cmd->flags = NVME_CMD_SGL_METABUF; |
740 | 739 | ||
741 | if (length == sg_dma_len(sg)) { | 740 | if (entries == 1) { |
742 | nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); | 741 | nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); |
743 | return BLK_STS_OK; | 742 | return BLK_STS_OK; |
744 | } | 743 | } |
@@ -778,13 +777,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, | |||
778 | } | 777 | } |
779 | 778 | ||
780 | nvme_pci_sgl_set_data(&sg_list[i++], sg); | 779 | nvme_pci_sgl_set_data(&sg_list[i++], sg); |
781 | |||
782 | length -= sg_dma_len(sg); | ||
783 | sg = sg_next(sg); | 780 | sg = sg_next(sg); |
784 | entries--; | 781 | } while (--entries > 0); |
785 | } while (length > 0); | ||
786 | 782 | ||
787 | WARN_ON(entries > 0); | ||
788 | return BLK_STS_OK; | 783 | return BLK_STS_OK; |
789 | } | 784 | } |
790 | 785 | ||
@@ -796,6 +791,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, | |||
796 | enum dma_data_direction dma_dir = rq_data_dir(req) ? | 791 | enum dma_data_direction dma_dir = rq_data_dir(req) ? |
797 | DMA_TO_DEVICE : DMA_FROM_DEVICE; | 792 | DMA_TO_DEVICE : DMA_FROM_DEVICE; |
798 | blk_status_t ret = BLK_STS_IOERR; | 793 | blk_status_t ret = BLK_STS_IOERR; |
794 | int nr_mapped; | ||
799 | 795 | ||
800 | sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); | 796 | sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); |
801 | iod->nents = blk_rq_map_sg(q, req, iod->sg); | 797 | iod->nents = blk_rq_map_sg(q, req, iod->sg); |
@@ -803,12 +799,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, | |||
803 | goto out; | 799 | goto out; |
804 | 800 | ||
805 | ret = BLK_STS_RESOURCE; | 801 | ret = BLK_STS_RESOURCE; |
806 | if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir, | 802 | nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir, |
807 | DMA_ATTR_NO_WARN)) | 803 | DMA_ATTR_NO_WARN); |
804 | if (!nr_mapped) | ||
808 | goto out; | 805 | goto out; |
809 | 806 | ||
810 | if (iod->use_sgl) | 807 | if (iod->use_sgl) |
811 | ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); | 808 | ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); |
812 | else | 809 | else |
813 | ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); | 810 | ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); |
814 | 811 | ||