aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew.r.wilcox@intel.com>2011-02-10 10:47:55 -0500
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2011-11-04 15:52:57 -0400
commitd567760c409f981d35fc755b51d5bf56a99a467b (patch)
tree12db12b955792272e27640afb20e30f456b200af
parent99802a7aee2b3dd720e382c52b892cc6a8122b11 (diff)
NVMe: Pass the nvme_dev to nvme_free_prps and nvme_setup_prps
We were passing the nvme_queue to access the q_dmadev for the dma_alloc_coherent calls, but since we moved to the dma pool API, we really only need the nvme_dev. Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
-rw-r--r--drivers/block/nvme.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index cd7aeba8310b..2948043483fe 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -256,10 +256,9 @@ struct nvme_prps {
256 __le64 *list[0]; 256 __le64 *list[0];
257}; 257};
258 258
259static void nvme_free_prps(struct nvme_queue *nvmeq, struct nvme_prps *prps) 259static void nvme_free_prps(struct nvme_dev *dev, struct nvme_prps *prps)
260{ 260{
261 const int last_prp = PAGE_SIZE / 8 - 1; 261 const int last_prp = PAGE_SIZE / 8 - 1;
262 struct nvme_dev *dev = nvmeq->dev;
263 int i; 262 int i;
264 dma_addr_t prp_dma; 263 dma_addr_t prp_dma;
265 264
@@ -295,7 +294,7 @@ static struct nvme_bio *alloc_nbio(unsigned nseg, gfp_t gfp)
295 294
296static void free_nbio(struct nvme_queue *nvmeq, struct nvme_bio *nbio) 295static void free_nbio(struct nvme_queue *nvmeq, struct nvme_bio *nbio)
297{ 296{
298 nvme_free_prps(nvmeq, nbio->prps); 297 nvme_free_prps(nvmeq->dev, nbio->prps);
299 kfree(nbio); 298 kfree(nbio);
300} 299}
301 300
@@ -316,11 +315,10 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
316} 315}
317 316
318/* length is in bytes */ 317/* length is in bytes */
319static struct nvme_prps *nvme_setup_prps(struct nvme_queue *nvmeq, 318static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
320 struct nvme_common_command *cmd, 319 struct nvme_common_command *cmd,
321 struct scatterlist *sg, int length) 320 struct scatterlist *sg, int length)
322{ 321{
323 struct nvme_dev *dev = nvmeq->dev;
324 struct dma_pool *pool; 322 struct dma_pool *pool;
325 int dma_len = sg_dma_len(sg); 323 int dma_len = sg_dma_len(sg);
326 u64 dma_addr = sg_dma_address(sg); 324 u64 dma_addr = sg_dma_address(sg);
@@ -458,7 +456,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
458 cmnd->rw.flags = 1; 456 cmnd->rw.flags = 1;
459 cmnd->rw.command_id = cmdid; 457 cmnd->rw.command_id = cmdid;
460 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 458 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
461 nbio->prps = nvme_setup_prps(nvmeq, &cmnd->common, nbio->sg, 459 nbio->prps = nvme_setup_prps(nvmeq->dev, &cmnd->common, nbio->sg,
462 bio->bi_size); 460 bio->bi_size);
463 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); 461 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
464 cmnd->rw.length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1); 462 cmnd->rw.length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
@@ -939,10 +937,10 @@ static int nvme_submit_user_admin_command(struct nvme_dev *dev,
939 nents = nvme_map_user_pages(dev, 0, addr, length, &sg); 937 nents = nvme_map_user_pages(dev, 0, addr, length, &sg);
940 if (nents < 0) 938 if (nents < 0)
941 return nents; 939 return nents;
942 prps = nvme_setup_prps(dev->queues[0], &cmd->common, sg, length); 940 prps = nvme_setup_prps(dev, &cmd->common, sg, length);
943 err = nvme_submit_admin_cmd(dev, cmd, NULL); 941 err = nvme_submit_admin_cmd(dev, cmd, NULL);
944 nvme_unmap_user_pages(dev, 0, addr, length, sg, nents); 942 nvme_unmap_user_pages(dev, 0, addr, length, sg, nents);
945 nvme_free_prps(dev->queues[0], prps); 943 nvme_free_prps(dev, prps);
946 return err ? -EIO : 0; 944 return err ? -EIO : 0;
947} 945}
948 946
@@ -1000,10 +998,10 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1000 c.rw.reftag = cpu_to_le32(io.reftag); /* XXX: endian? */ 998 c.rw.reftag = cpu_to_le32(io.reftag); /* XXX: endian? */
1001 c.rw.apptag = cpu_to_le16(io.apptag); 999 c.rw.apptag = cpu_to_le16(io.apptag);
1002 c.rw.appmask = cpu_to_le16(io.appmask); 1000 c.rw.appmask = cpu_to_le16(io.appmask);
1003 nvmeq = get_nvmeq(ns);
1004 /* XXX: metadata */ 1001 /* XXX: metadata */
1005 prps = nvme_setup_prps(nvmeq, &c.common, sg, length); 1002 prps = nvme_setup_prps(dev, &c.common, sg, length);
1006 1003
1004 nvmeq = get_nvmeq(ns);
1007 /* Since nvme_submit_sync_cmd sleeps, we can't keep preemption 1005 /* Since nvme_submit_sync_cmd sleeps, we can't keep preemption
1008 * disabled. We may be preempted at any point, and be rescheduled 1006 * disabled. We may be preempted at any point, and be rescheduled
1009 * to a different CPU. That will cause cacheline bouncing, but no 1007 * to a different CPU. That will cause cacheline bouncing, but no
@@ -1013,7 +1011,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1013 status = nvme_submit_sync_cmd(nvmeq, &c, &result, IO_TIMEOUT); 1011 status = nvme_submit_sync_cmd(nvmeq, &c, &result, IO_TIMEOUT);
1014 1012
1015 nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents); 1013 nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents);
1016 nvme_free_prps(nvmeq, prps); 1014 nvme_free_prps(dev, prps);
1017 put_user(result, &uio->result); 1015 put_user(result, &uio->result);
1018 return status; 1016 return status;
1019} 1017}
@@ -1041,11 +1039,11 @@ static int nvme_download_firmware(struct nvme_ns *ns,
1041 c.dlfw.opcode = nvme_admin_download_fw; 1039 c.dlfw.opcode = nvme_admin_download_fw;
1042 c.dlfw.numd = cpu_to_le32(dlfw.length); 1040 c.dlfw.numd = cpu_to_le32(dlfw.length);
1043 c.dlfw.offset = cpu_to_le32(dlfw.offset); 1041 c.dlfw.offset = cpu_to_le32(dlfw.offset);
1044 prps = nvme_setup_prps(dev->queues[0], &c.common, sg, dlfw.length * 4); 1042 prps = nvme_setup_prps(dev, &c.common, sg, dlfw.length * 4);
1045 1043
1046 status = nvme_submit_admin_cmd(dev, &c, NULL); 1044 status = nvme_submit_admin_cmd(dev, &c, NULL);
1047 nvme_unmap_user_pages(dev, 0, dlfw.addr, dlfw.length * 4, sg, nents); 1045 nvme_unmap_user_pages(dev, 0, dlfw.addr, dlfw.length * 4, sg, nents);
1048 nvme_free_prps(dev->queues[0], prps); 1046 nvme_free_prps(dev, prps);
1049 return status; 1047 return status;
1050} 1048}
1051 1049