aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-01-13 06:29:12 -0500
committerJens Axboe <axboe@fb.com>2017-01-13 17:17:04 -0500
commitb131c61d62266eb21b0f125f63f3d07e5670d726 (patch)
treedf1987a2b5fc7f625efe66f6b80367f5efe1faab
parentfd102b125e174edbea34e6e7a2d371bc7901c53d (diff)
nvme: use blk_rq_payload_bytes
The new blk_rq_payload_bytes generalizes the payload length hacks that nvme_map_len did before. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/nvme/host/fc.c5
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/pci.c19
-rw-r--r--drivers/nvme/host/rdma.c13
4 files changed, 15 insertions, 30 deletions
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index aa0bc60810a7..fcc9dcfdf675 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1654,13 +1654,12 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1654 struct nvme_fc_fcp_op *op) 1654 struct nvme_fc_fcp_op *op)
1655{ 1655{
1656 struct nvmefc_fcp_req *freq = &op->fcp_req; 1656 struct nvmefc_fcp_req *freq = &op->fcp_req;
1657 u32 map_len = nvme_map_len(rq);
1658 enum dma_data_direction dir; 1657 enum dma_data_direction dir;
1659 int ret; 1658 int ret;
1660 1659
1661 freq->sg_cnt = 0; 1660 freq->sg_cnt = 0;
1662 1661
1663 if (!map_len) 1662 if (!blk_rq_payload_bytes(rq))
1664 return 0; 1663 return 0;
1665 1664
1666 freq->sg_table.sgl = freq->first_sgl; 1665 freq->sg_table.sgl = freq->first_sgl;
@@ -1854,7 +1853,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1854 if (ret) 1853 if (ret)
1855 return ret; 1854 return ret;
1856 1855
1857 data_len = nvme_map_len(rq); 1856 data_len = blk_rq_payload_bytes(rq);
1858 if (data_len) 1857 if (data_len)
1859 io_dir = ((rq_data_dir(rq) == WRITE) ? 1858 io_dir = ((rq_data_dir(rq) == WRITE) ?
1860 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); 1859 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 6377e14586dc..aead6d08ed2c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -225,14 +225,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
225 return (sector >> (ns->lba_shift - 9)); 225 return (sector >> (ns->lba_shift - 9));
226} 226}
227 227
228static inline unsigned nvme_map_len(struct request *rq)
229{
230 if (req_op(rq) == REQ_OP_DISCARD)
231 return sizeof(struct nvme_dsm_range);
232 else
233 return blk_rq_bytes(rq);
234}
235
236static inline void nvme_cleanup_cmd(struct request *req) 228static inline void nvme_cleanup_cmd(struct request *req)
237{ 229{
238 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 230 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 19beeb7b2ac2..3faefabf339c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -306,11 +306,11 @@ static __le64 **iod_list(struct request *req)
306 return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req)); 306 return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
307} 307}
308 308
309static int nvme_init_iod(struct request *rq, unsigned size, 309static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
310 struct nvme_dev *dev)
311{ 310{
312 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 311 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
313 int nseg = blk_rq_nr_phys_segments(rq); 312 int nseg = blk_rq_nr_phys_segments(rq);
313 unsigned int size = blk_rq_payload_bytes(rq);
314 314
315 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 315 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
316 iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC); 316 iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -420,12 +420,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
420} 420}
421#endif 421#endif
422 422
423static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req, 423static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
424 int total_len)
425{ 424{
426 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 425 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
427 struct dma_pool *pool; 426 struct dma_pool *pool;
428 int length = total_len; 427 int length = blk_rq_payload_bytes(req);
429 struct scatterlist *sg = iod->sg; 428 struct scatterlist *sg = iod->sg;
430 int dma_len = sg_dma_len(sg); 429 int dma_len = sg_dma_len(sg);
431 u64 dma_addr = sg_dma_address(sg); 430 u64 dma_addr = sg_dma_address(sg);
@@ -501,7 +500,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
501} 500}
502 501
503static int nvme_map_data(struct nvme_dev *dev, struct request *req, 502static int nvme_map_data(struct nvme_dev *dev, struct request *req,
504 unsigned size, struct nvme_command *cmnd) 503 struct nvme_command *cmnd)
505{ 504{
506 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 505 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
507 struct request_queue *q = req->q; 506 struct request_queue *q = req->q;
@@ -519,7 +518,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
519 DMA_ATTR_NO_WARN)) 518 DMA_ATTR_NO_WARN))
520 goto out; 519 goto out;
521 520
522 if (!nvme_setup_prps(dev, req, size)) 521 if (!nvme_setup_prps(dev, req))
523 goto out_unmap; 522 goto out_unmap;
524 523
525 ret = BLK_MQ_RQ_QUEUE_ERROR; 524 ret = BLK_MQ_RQ_QUEUE_ERROR;
@@ -580,7 +579,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
580 struct nvme_dev *dev = nvmeq->dev; 579 struct nvme_dev *dev = nvmeq->dev;
581 struct request *req = bd->rq; 580 struct request *req = bd->rq;
582 struct nvme_command cmnd; 581 struct nvme_command cmnd;
583 unsigned map_len;
584 int ret = BLK_MQ_RQ_QUEUE_OK; 582 int ret = BLK_MQ_RQ_QUEUE_OK;
585 583
586 /* 584 /*
@@ -600,13 +598,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
600 if (ret != BLK_MQ_RQ_QUEUE_OK) 598 if (ret != BLK_MQ_RQ_QUEUE_OK)
601 return ret; 599 return ret;
602 600
603 map_len = nvme_map_len(req); 601 ret = nvme_init_iod(req, dev);
604 ret = nvme_init_iod(req, map_len, dev);
605 if (ret != BLK_MQ_RQ_QUEUE_OK) 602 if (ret != BLK_MQ_RQ_QUEUE_OK)
606 goto out_free_cmd; 603 goto out_free_cmd;
607 604
608 if (blk_rq_nr_phys_segments(req)) 605 if (blk_rq_nr_phys_segments(req))
609 ret = nvme_map_data(dev, req, map_len, &cmnd); 606 ret = nvme_map_data(dev, req, &cmnd);
610 607
611 if (ret != BLK_MQ_RQ_QUEUE_OK) 608 if (ret != BLK_MQ_RQ_QUEUE_OK)
612 goto out_cleanup_iod; 609 goto out_cleanup_iod;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 34e564857716..557f29b1f1bb 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -981,8 +981,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
981} 981}
982 982
983static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, 983static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
984 struct request *rq, unsigned int map_len, 984 struct request *rq, struct nvme_command *c)
985 struct nvme_command *c)
986{ 985{
987 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 986 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
988 struct nvme_rdma_device *dev = queue->device; 987 struct nvme_rdma_device *dev = queue->device;
@@ -1014,9 +1013,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
1014 } 1013 }
1015 1014
1016 if (count == 1) { 1015 if (count == 1) {
1017 if (rq_data_dir(rq) == WRITE && 1016 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
1018 map_len <= nvme_rdma_inline_data_size(queue) && 1017 blk_rq_payload_bytes(rq) <=
1019 nvme_rdma_queue_idx(queue)) 1018 nvme_rdma_inline_data_size(queue))
1020 return nvme_rdma_map_sg_inline(queue, req, c); 1019 return nvme_rdma_map_sg_inline(queue, req, c);
1021 1020
1022 if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) 1021 if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
@@ -1444,7 +1443,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1444 struct nvme_command *c = sqe->data; 1443 struct nvme_command *c = sqe->data;
1445 bool flush = false; 1444 bool flush = false;
1446 struct ib_device *dev; 1445 struct ib_device *dev;
1447 unsigned int map_len;
1448 int ret; 1446 int ret;
1449 1447
1450 WARN_ON_ONCE(rq->tag < 0); 1448 WARN_ON_ONCE(rq->tag < 0);
@@ -1462,8 +1460,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1462 1460
1463 blk_mq_start_request(rq); 1461 blk_mq_start_request(rq);
1464 1462
1465 map_len = nvme_map_len(rq); 1463 ret = nvme_rdma_map_data(queue, rq, c);
1466 ret = nvme_rdma_map_data(queue, rq, map_len, c);
1467 if (ret < 0) { 1464 if (ret < 0) {
1468 dev_err(queue->ctrl->ctrl.device, 1465 dev_err(queue->ctrl->ctrl.device,
1469 "Failed to map data (%d)\n", ret); 1466 "Failed to map data (%d)\n", ret);