aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew.r.wilcox@intel.com>2012-01-06 15:52:56 -0500
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2012-01-10 14:54:22 -0500
commit1c2ad9faaf662b4a525348775deca3ac8e6c35a0 (patch)
tree3b378e4edca145b682cd4952c105ff9e31b3d465 /drivers/block
parentfe304c43c6d63e29ed4fc46a874d7a74313788c5 (diff)
NVMe: Simplify nvme_unmap_user_pages
By using the iod->nents field (the same way other I/O paths do), we can avoid recalculating the number of sg entries at unmap time, and make nvme_unmap_user_pages() easier to call. Also, use the 'write' parameter instead of assuming DMA_FROM_DEVICE. Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 71fc9030b4df..3cf82c27a544 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -1046,6 +1046,7 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1046 offset = 0; 1046 offset = 0;
1047 } 1047 }
1048 sg_mark_end(&sg[i - 1]); 1048 sg_mark_end(&sg[i - 1]);
1049 iod->nents = count;
1049 1050
1050 err = -ENOMEM; 1051 err = -ENOMEM;
1051 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, 1052 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
@@ -1066,16 +1067,15 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1066} 1067}
1067 1068
1068static void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 1069static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
1069 unsigned long addr, int length, struct nvme_iod *iod) 1070 struct nvme_iod *iod)
1070{ 1071{
1071 struct scatterlist *sg = iod->sg; 1072 int i;
1072 int i, count;
1073 1073
1074 count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE); 1074 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
1075 dma_unmap_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE); 1075 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1076 1076
1077 for (i = 0; i < count; i++) 1077 for (i = 0; i < iod->nents; i++)
1078 put_page(sg_page(&sg[i])); 1078 put_page(sg_page(&iod->sg[i]));
1079} 1079}
1080 1080
1081static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1081static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
@@ -1132,7 +1132,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1132 else 1132 else
1133 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); 1133 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
1134 1134
1135 nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, iod); 1135 nvme_unmap_user_pages(dev, io.opcode & 1, iod);
1136 nvme_free_iod(dev, iod); 1136 nvme_free_iod(dev, iod);
1137 return status; 1137 return status;
1138} 1138}
@@ -1180,8 +1180,7 @@ static int nvme_user_admin_cmd(struct nvme_ns *ns,
1180 status = nvme_submit_admin_cmd(dev, &c, NULL); 1180 status = nvme_submit_admin_cmd(dev, &c, NULL);
1181 1181
1182 if (cmd.data_len) { 1182 if (cmd.data_len) {
1183 nvme_unmap_user_pages(dev, cmd.opcode & 1, cmd.addr, 1183 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
1184 cmd.data_len, iod);
1185 nvme_free_iod(dev, iod); 1184 nvme_free_iod(dev, iod);
1186 } 1185 }
1187 return status; 1186 return status;