diff options
author | Christoph Hellwig <hch@lst.de> | 2015-10-26 04:12:51 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-12-22 11:38:34 -0500 |
commit | bf68405705bd35c09ec1f7528718dce5af88daff (patch) | |
tree | 5a2f78fcb3663462754ffe8706ae1814bf959130 | |
parent | eee417b0697827a6e120199b126b447af3c81b47 (diff) |
nvme: meta_sg doesn't have to be an array
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | drivers/nvme/host/pci.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index ec768b64ab77..24d695a2f6c4 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -174,7 +174,7 @@ struct nvme_iod { | |||
174 | int nents; /* Used in scatterlist */ | 174 | int nents; /* Used in scatterlist */ |
175 | int length; /* Of data, in bytes */ | 175 | int length; /* Of data, in bytes */ |
176 | dma_addr_t first_dma; | 176 | dma_addr_t first_dma; |
177 | struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */ | 177 | struct scatterlist meta_sg; /* metadata requires single contiguous buffer */ |
178 | struct scatterlist sg[0]; | 178 | struct scatterlist sg[0]; |
179 | }; | 179 | }; |
180 | 180 | ||
@@ -594,21 +594,21 @@ static int nvme_map_data(struct nvme_dev *dev, struct nvme_iod *iod, | |||
594 | if (blk_rq_count_integrity_sg(q, req->bio) != 1) | 594 | if (blk_rq_count_integrity_sg(q, req->bio) != 1) |
595 | goto out_unmap; | 595 | goto out_unmap; |
596 | 596 | ||
597 | sg_init_table(iod->meta_sg, 1); | 597 | sg_init_table(&iod->meta_sg, 1); |
598 | if (blk_rq_map_integrity_sg(q, req->bio, iod->meta_sg) != 1) | 598 | if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1) |
599 | goto out_unmap; | 599 | goto out_unmap; |
600 | 600 | ||
601 | if (rq_data_dir(req)) | 601 | if (rq_data_dir(req)) |
602 | nvme_dif_remap(req, nvme_dif_prep); | 602 | nvme_dif_remap(req, nvme_dif_prep); |
603 | 603 | ||
604 | if (!dma_map_sg(dev->dev, iod->meta_sg, 1, dma_dir)) | 604 | if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir)) |
605 | goto out_unmap; | 605 | goto out_unmap; |
606 | } | 606 | } |
607 | 607 | ||
608 | cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); | 608 | cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); |
609 | cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); | 609 | cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); |
610 | if (blk_integrity_rq(req)) | 610 | if (blk_integrity_rq(req)) |
611 | cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg)); | 611 | cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg)); |
612 | return BLK_MQ_RQ_QUEUE_OK; | 612 | return BLK_MQ_RQ_QUEUE_OK; |
613 | 613 | ||
614 | out_unmap: | 614 | out_unmap: |
@@ -628,7 +628,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_iod *iod) | |||
628 | if (blk_integrity_rq(req)) { | 628 | if (blk_integrity_rq(req)) { |
629 | if (!rq_data_dir(req)) | 629 | if (!rq_data_dir(req)) |
630 | nvme_dif_remap(req, nvme_dif_complete); | 630 | nvme_dif_remap(req, nvme_dif_complete); |
631 | dma_unmap_sg(dev->dev, iod->meta_sg, 1, dma_dir); | 631 | dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); |
632 | } | 632 | } |
633 | } | 633 | } |
634 | 634 | ||