aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme.c
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew.r.wilcox@intel.com>2011-01-26 10:02:29 -0500
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2011-11-04 15:52:52 -0400
commitff22b54fda2078fc3cd1bcdcb7a5ce5d08fd6591 (patch)
tree09c90d334daae2949bd58402e90f6d641f688bd7 /drivers/block/nvme.c
parent7b4fe9b1cb4b9a6f4ae23a12ef96d08d96e2a5da (diff)
NVMe: Add nvme_setup_prps()
Generalise the code from nvme_identify() that sets PRP1 & PRP2 so that it's usable for commands sent by nvme_submit_bio_queue(). Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r--drivers/block/nvme.c70
1 files changed, 46 insertions, 24 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index c0ef1dd1cc90..1e57737b1760 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -240,6 +240,36 @@ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
240 bio_endio(bio, status ? -EIO : 0); 240 bio_endio(bio, status ? -EIO : 0);
241} 241}
242 242
243/* length is in bytes */
244static void nvme_setup_prps(struct nvme_common_command *cmd,
245 struct scatterlist *sg, int length)
246{
247 int dma_len = sg_dma_len(sg);
248 u64 dma_addr = sg_dma_address(sg);
249 int offset = offset_in_page(dma_addr);
250
251 cmd->prp1 = cpu_to_le64(dma_addr);
252 length -= (PAGE_SIZE - offset);
253 if (length <= 0)
254 return;
255
256 dma_len -= (PAGE_SIZE - offset);
257 if (dma_len) {
258 dma_addr += (PAGE_SIZE - offset);
259 } else {
260 sg = sg_next(sg);
261 dma_addr = sg_dma_address(sg);
262 dma_len = sg_dma_len(sg);
263 }
264
265 if (length <= PAGE_SIZE) {
266 cmd->prp2 = cpu_to_le64(dma_addr);
267 return;
268 }
269
270 /* XXX: support PRP lists */
271}
272
243static int nvme_map_bio(struct device *dev, struct nvme_req_info *info, 273static int nvme_map_bio(struct device *dev, struct nvme_req_info *info,
244 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 274 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
245{ 275{
@@ -261,7 +291,7 @@ static int nvme_map_bio(struct device *dev, struct nvme_req_info *info,
261static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, 291static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
262 struct bio *bio) 292 struct bio *bio)
263{ 293{
264 struct nvme_rw_command *cmnd; 294 struct nvme_command *cmnd;
265 struct nvme_req_info *info; 295 struct nvme_req_info *info;
266 enum dma_data_direction dma_dir; 296 enum dma_data_direction dma_dir;
267 int cmdid; 297 int cmdid;
@@ -290,27 +320,26 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
290 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 320 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
291 321
292 spin_lock_irqsave(&nvmeq->q_lock, flags); 322 spin_lock_irqsave(&nvmeq->q_lock, flags);
293 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail].rw; 323 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
294 324
295 if (bio_data_dir(bio)) { 325 if (bio_data_dir(bio)) {
296 cmnd->opcode = nvme_cmd_write; 326 cmnd->rw.opcode = nvme_cmd_write;
297 dma_dir = DMA_TO_DEVICE; 327 dma_dir = DMA_TO_DEVICE;
298 } else { 328 } else {
299 cmnd->opcode = nvme_cmd_read; 329 cmnd->rw.opcode = nvme_cmd_read;
300 dma_dir = DMA_FROM_DEVICE; 330 dma_dir = DMA_FROM_DEVICE;
301 } 331 }
302 332
303 nvme_map_bio(nvmeq->q_dmadev, info, bio, dma_dir, psegs); 333 nvme_map_bio(nvmeq->q_dmadev, info, bio, dma_dir, psegs);
304 334
305 cmnd->flags = 1; 335 cmnd->rw.flags = 1;
306 cmnd->command_id = cmdid; 336 cmnd->rw.command_id = cmdid;
307 cmnd->nsid = cpu_to_le32(ns->ns_id); 337 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
308 cmnd->prp1 = cpu_to_le64(sg_phys(info->sg)); 338 nvme_setup_prps(&cmnd->common, info->sg, bio->bi_size);
309 /* XXX: Support more than one PRP */ 339 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
310 cmnd->slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); 340 cmnd->rw.length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
311 cmnd->length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1); 341 cmnd->rw.control = cpu_to_le16(control);
312 cmnd->control = cpu_to_le16(control); 342 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
313 cmnd->dsmgmt = cpu_to_le32(dsmgmt);
314 343
315 writel(nvmeq->sq_tail, nvmeq->q_db); 344 writel(nvmeq->sq_tail, nvmeq->q_db);
316 if (++nvmeq->sq_tail == nvmeq->q_depth) 345 if (++nvmeq->sq_tail == nvmeq->q_depth)
@@ -667,8 +696,9 @@ static int nvme_identify(struct nvme_ns *ns, unsigned long addr, int cns)
667 goto put_pages; 696 goto put_pages;
668 } 697 }
669 sg_init_table(sg, count); 698 sg_init_table(sg, count);
670 for (i = 0; i < count; i++) 699 sg_set_page(&sg[0], pages[0], PAGE_SIZE - offset, offset);
671 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); 700 if (count > 1)
701 sg_set_page(&sg[1], pages[1], offset, 0);
672 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE); 702 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE);
673 if (!nents) 703 if (!nents)
674 goto put_pages; 704 goto put_pages;
@@ -676,15 +706,7 @@ static int nvme_identify(struct nvme_ns *ns, unsigned long addr, int cns)
676 memset(&c, 0, sizeof(c)); 706 memset(&c, 0, sizeof(c));
677 c.identify.opcode = nvme_admin_identify; 707 c.identify.opcode = nvme_admin_identify;
678 c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id); 708 c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id);
679 c.identify.prp1 = cpu_to_le64(sg_dma_address(&sg[0]) + offset); 709 nvme_setup_prps(&c.common, sg, 4096);
680 if (count > 1) {
681 u64 dma_addr;
682 if (nents > 1)
683 dma_addr = sg_dma_address(&sg[1]);
684 else
685 dma_addr = sg_dma_address(&sg[0]) + PAGE_SIZE;
686 c.identify.prp2 = cpu_to_le64(dma_addr);
687 }
688 c.identify.cns = cpu_to_le32(cns); 710 c.identify.cns = cpu_to_le32(cns);
689 711
690 err = nvme_submit_admin_cmd(dev, &c, NULL); 712 err = nvme_submit_admin_cmd(dev, &c, NULL);