aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2017-07-12 15:59:07 -0400
committerJens Axboe <axboe@kernel.dk>2017-07-20 10:41:56 -0400
commit86eea2895d11dde9bf43fa2046331e84154e00f4 (patch)
tree8a67f36be148096c4beb15f4a0cc5c9c2d727a4e
parentf99cb7af40f99703bacf1640dc8a4b09062c1f0f (diff)
nvme-pci: Remove nvme_setup_prps BUG_ON
This patch replaces the invalid nvme SGL kernel panic with a warning, and returns an appropriate error. The warning will occur only on the first occurance, and sgl details will be printed to help debug how the request was allowed to form. Signed-off-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/nvme/host/pci.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 454f97bbbb2c..1e5c9f2ddba6 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -539,7 +539,7 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
539} 539}
540#endif 540#endif
541 541
542static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) 542static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
543{ 543{
544 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 544 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
545 struct dma_pool *pool; 545 struct dma_pool *pool;
@@ -556,7 +556,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
556 556
557 length -= (page_size - offset); 557 length -= (page_size - offset);
558 if (length <= 0) 558 if (length <= 0)
559 return true; 559 return BLK_STS_OK;
560 560
561 dma_len -= (page_size - offset); 561 dma_len -= (page_size - offset);
562 if (dma_len) { 562 if (dma_len) {
@@ -569,7 +569,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
569 569
570 if (length <= page_size) { 570 if (length <= page_size) {
571 iod->first_dma = dma_addr; 571 iod->first_dma = dma_addr;
572 return true; 572 return BLK_STS_OK;
573 } 573 }
574 574
575 nprps = DIV_ROUND_UP(length, page_size); 575 nprps = DIV_ROUND_UP(length, page_size);
@@ -585,7 +585,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
585 if (!prp_list) { 585 if (!prp_list) {
586 iod->first_dma = dma_addr; 586 iod->first_dma = dma_addr;
587 iod->npages = -1; 587 iod->npages = -1;
588 return false; 588 return BLK_STS_RESOURCE;
589 } 589 }
590 list[0] = prp_list; 590 list[0] = prp_list;
591 iod->first_dma = prp_dma; 591 iod->first_dma = prp_dma;
@@ -595,7 +595,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
595 __le64 *old_prp_list = prp_list; 595 __le64 *old_prp_list = prp_list;
596 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 596 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
597 if (!prp_list) 597 if (!prp_list)
598 return false; 598 return BLK_STS_RESOURCE;
599 list[iod->npages++] = prp_list; 599 list[iod->npages++] = prp_list;
600 prp_list[0] = old_prp_list[i - 1]; 600 prp_list[0] = old_prp_list[i - 1];
601 old_prp_list[i - 1] = cpu_to_le64(prp_dma); 601 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -609,13 +609,29 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
609 break; 609 break;
610 if (dma_len > 0) 610 if (dma_len > 0)
611 continue; 611 continue;
612 BUG_ON(dma_len < 0); 612 if (unlikely(dma_len < 0))
613 goto bad_sgl;
613 sg = sg_next(sg); 614 sg = sg_next(sg);
614 dma_addr = sg_dma_address(sg); 615 dma_addr = sg_dma_address(sg);
615 dma_len = sg_dma_len(sg); 616 dma_len = sg_dma_len(sg);
616 } 617 }
617 618
618 return true; 619 return BLK_STS_OK;
620
621 bad_sgl:
622 if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n",
623 blk_rq_payload_bytes(req), iod->nents)) {
624 for_each_sg(iod->sg, sg, iod->nents, i) {
625 dma_addr_t phys = sg_phys(sg);
626 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
627 "dma_address:%pad dma_length:%d\n", i, &phys,
628 sg->offset, sg->length,
629 &sg_dma_address(sg),
630 sg_dma_len(sg));
631 }
632 }
633 return BLK_STS_IOERR;
634
619} 635}
620 636
621static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 637static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -637,7 +653,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
637 DMA_ATTR_NO_WARN)) 653 DMA_ATTR_NO_WARN))
638 goto out; 654 goto out;
639 655
640 if (!nvme_setup_prps(dev, req)) 656 ret = nvme_setup_prps(dev, req);
657 if (ret != BLK_STS_OK)
641 goto out_unmap; 658 goto out_unmap;
642 659
643 ret = BLK_STS_IOERR; 660 ret = BLK_STS_IOERR;