diff options
author | Minwoo Im <minwoo.im.dev@gmail.com> | 2017-12-20 02:30:50 -0500 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2017-12-29 04:31:04 -0500 |
commit | 955b1b5a00ba694159a7d3763412597f707c294d (patch) | |
tree | 84c57107b104ec0174f39b925a4a500b0d676761 | |
parent | 6d0e4827b72afc71349784336d5eb6df4df106e6 (diff) |
nvme-pci: move use_sgl initialization to nvme_init_iod()
A flag "use_sgl" of "struct nvme_iod" has been used in nvme_init_iod()
without being set to any value. It seems like "use_sgl" has been set
in either nvme_pci_setup_prps() or nvme_pci_setup_sgls() which occur
later than nvme_init_iod().
Make "iod->use_sgl" being set in a proper place, nvme_init_iod().
Also move nvme_pci_use_sgls() up above nvme_init_iod() to make it
possible to be called by nvme_init_iod().
Signed-off-by: Minwoo Im <minwoo.im.dev@gmail.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r-- | drivers/nvme/host/pci.c | 42 |
1 files changed, 20 insertions, 22 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index f5800c3c9082..d53550e612bc 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -448,12 +448,31 @@ static void **nvme_pci_iod_list(struct request *req) | |||
448 | return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); | 448 | return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); |
449 | } | 449 | } |
450 | 450 | ||
451 | static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) | ||
452 | { | ||
453 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); | ||
454 | unsigned int avg_seg_size; | ||
455 | |||
456 | avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), | ||
457 | blk_rq_nr_phys_segments(req)); | ||
458 | |||
459 | if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1)))) | ||
460 | return false; | ||
461 | if (!iod->nvmeq->qid) | ||
462 | return false; | ||
463 | if (!sgl_threshold || avg_seg_size < sgl_threshold) | ||
464 | return false; | ||
465 | return true; | ||
466 | } | ||
467 | |||
451 | static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev) | 468 | static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev) |
452 | { | 469 | { |
453 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); | 470 | struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); |
454 | int nseg = blk_rq_nr_phys_segments(rq); | 471 | int nseg = blk_rq_nr_phys_segments(rq); |
455 | unsigned int size = blk_rq_payload_bytes(rq); | 472 | unsigned int size = blk_rq_payload_bytes(rq); |
456 | 473 | ||
474 | iod->use_sgl = nvme_pci_use_sgls(dev, rq); | ||
475 | |||
457 | if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { | 476 | if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { |
458 | size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg, | 477 | size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg, |
459 | iod->use_sgl); | 478 | iod->use_sgl); |
@@ -604,8 +623,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, | |||
604 | dma_addr_t prp_dma; | 623 | dma_addr_t prp_dma; |
605 | int nprps, i; | 624 | int nprps, i; |
606 | 625 | ||
607 | iod->use_sgl = false; | ||
608 | |||
609 | length -= (page_size - offset); | 626 | length -= (page_size - offset); |
610 | if (length <= 0) { | 627 | if (length <= 0) { |
611 | iod->first_dma = 0; | 628 | iod->first_dma = 0; |
@@ -715,8 +732,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, | |||
715 | int entries = iod->nents, i = 0; | 732 | int entries = iod->nents, i = 0; |
716 | dma_addr_t sgl_dma; | 733 | dma_addr_t sgl_dma; |
717 | 734 | ||
718 | iod->use_sgl = true; | ||
719 | |||
720 | /* setting the transfer type as SGL */ | 735 | /* setting the transfer type as SGL */ |
721 | cmd->flags = NVME_CMD_SGL_METABUF; | 736 | cmd->flags = NVME_CMD_SGL_METABUF; |
722 | 737 | ||
@@ -770,23 +785,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, | |||
770 | return BLK_STS_OK; | 785 | return BLK_STS_OK; |
771 | } | 786 | } |
772 | 787 | ||
773 | static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) | ||
774 | { | ||
775 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); | ||
776 | unsigned int avg_seg_size; | ||
777 | |||
778 | avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), | ||
779 | blk_rq_nr_phys_segments(req)); | ||
780 | |||
781 | if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1)))) | ||
782 | return false; | ||
783 | if (!iod->nvmeq->qid) | ||
784 | return false; | ||
785 | if (!sgl_threshold || avg_seg_size < sgl_threshold) | ||
786 | return false; | ||
787 | return true; | ||
788 | } | ||
789 | |||
790 | static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, | 788 | static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, |
791 | struct nvme_command *cmnd) | 789 | struct nvme_command *cmnd) |
792 | { | 790 | { |
@@ -806,7 +804,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, | |||
806 | DMA_ATTR_NO_WARN)) | 804 | DMA_ATTR_NO_WARN)) |
807 | goto out; | 805 | goto out; |
808 | 806 | ||
809 | if (nvme_pci_use_sgls(dev, req)) | 807 | if (iod->use_sgl) |
810 | ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); | 808 | ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); |
811 | else | 809 | else |
812 | ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); | 810 | ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); |