aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme/host/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host/pci.c')
-rw-r--r--drivers/nvme/host/pci.c48
1 files changed, 27 insertions, 21 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4a2121335f48..3f5a04c586ce 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -24,6 +24,7 @@
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/once.h>
27#include <linux/pci.h> 28#include <linux/pci.h>
28#include <linux/poison.h> 29#include <linux/poison.h>
29#include <linux/t10-pi.h> 30#include <linux/t10-pi.h>
@@ -93,7 +94,7 @@ struct nvme_dev {
93 struct mutex shutdown_lock; 94 struct mutex shutdown_lock;
94 bool subsystem; 95 bool subsystem;
95 void __iomem *cmb; 96 void __iomem *cmb;
96 dma_addr_t cmb_dma_addr; 97 pci_bus_addr_t cmb_bus_addr;
97 u64 cmb_size; 98 u64 cmb_size;
98 u32 cmbsz; 99 u32 cmbsz;
99 u32 cmbloc; 100 u32 cmbloc;
@@ -540,6 +541,20 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
540} 541}
541#endif 542#endif
542 543
544static void nvme_print_sgl(struct scatterlist *sgl, int nents)
545{
546 int i;
547 struct scatterlist *sg;
548
549 for_each_sg(sgl, sg, nents, i) {
550 dma_addr_t phys = sg_phys(sg);
551 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
552 "dma_address:%pad dma_length:%d\n",
553 i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
554 sg_dma_len(sg));
555 }
556}
557
543static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) 558static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
544{ 559{
545 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 560 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -622,19 +637,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
622 return BLK_STS_OK; 637 return BLK_STS_OK;
623 638
624 bad_sgl: 639 bad_sgl:
625 if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n", 640 WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
626 blk_rq_payload_bytes(req), iod->nents)) { 641 "Invalid SGL for payload:%d nents:%d\n",
627 for_each_sg(iod->sg, sg, iod->nents, i) { 642 blk_rq_payload_bytes(req), iod->nents);
628 dma_addr_t phys = sg_phys(sg);
629 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
630 "dma_address:%pad dma_length:%d\n", i, &phys,
631 sg->offset, sg->length,
632 &sg_dma_address(sg),
633 sg_dma_len(sg));
634 }
635 }
636 return BLK_STS_IOERR; 643 return BLK_STS_IOERR;
637
638} 644}
639 645
640static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 646static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -1220,7 +1226,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1220 if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { 1226 if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
1221 unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 1227 unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
1222 dev->ctrl.page_size); 1228 dev->ctrl.page_size);
1223 nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; 1229 nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
1224 nvmeq->sq_cmds_io = dev->cmb + offset; 1230 nvmeq->sq_cmds_io = dev->cmb + offset;
1225 } else { 1231 } else {
1226 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 1232 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
@@ -1313,11 +1319,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1313 if (result < 0) 1319 if (result < 0)
1314 goto release_cq; 1320 goto release_cq;
1315 1321
1322 nvme_init_queue(nvmeq, qid);
1316 result = queue_request_irq(nvmeq); 1323 result = queue_request_irq(nvmeq);
1317 if (result < 0) 1324 if (result < 0)
1318 goto release_sq; 1325 goto release_sq;
1319 1326
1320 nvme_init_queue(nvmeq, qid);
1321 return result; 1327 return result;
1322 1328
1323 release_sq: 1329 release_sq:
@@ -1464,6 +1470,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
1464 return result; 1470 return result;
1465 1471
1466 nvmeq->cq_vector = 0; 1472 nvmeq->cq_vector = 0;
1473 nvme_init_queue(nvmeq, 0);
1467 result = queue_request_irq(nvmeq); 1474 result = queue_request_irq(nvmeq);
1468 if (result) { 1475 if (result) {
1469 nvmeq->cq_vector = -1; 1476 nvmeq->cq_vector = -1;
@@ -1520,7 +1527,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1520 resource_size_t bar_size; 1527 resource_size_t bar_size;
1521 struct pci_dev *pdev = to_pci_dev(dev->dev); 1528 struct pci_dev *pdev = to_pci_dev(dev->dev);
1522 void __iomem *cmb; 1529 void __iomem *cmb;
1523 dma_addr_t dma_addr; 1530 int bar;
1524 1531
1525 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1532 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1526 if (!(NVME_CMB_SZ(dev->cmbsz))) 1533 if (!(NVME_CMB_SZ(dev->cmbsz)))
@@ -1533,7 +1540,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1533 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); 1540 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
1534 size = szu * NVME_CMB_SZ(dev->cmbsz); 1541 size = szu * NVME_CMB_SZ(dev->cmbsz);
1535 offset = szu * NVME_CMB_OFST(dev->cmbloc); 1542 offset = szu * NVME_CMB_OFST(dev->cmbloc);
1536 bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); 1543 bar = NVME_CMB_BIR(dev->cmbloc);
1544 bar_size = pci_resource_len(pdev, bar);
1537 1545
1538 if (offset > bar_size) 1546 if (offset > bar_size)
1539 return NULL; 1547 return NULL;
@@ -1546,12 +1554,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1546 if (size > bar_size - offset) 1554 if (size > bar_size - offset)
1547 size = bar_size - offset; 1555 size = bar_size - offset;
1548 1556
1549 dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; 1557 cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
1550 cmb = ioremap_wc(dma_addr, size);
1551 if (!cmb) 1558 if (!cmb)
1552 return NULL; 1559 return NULL;
1553 1560
1554 dev->cmb_dma_addr = dma_addr; 1561 dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
1555 dev->cmb_size = size; 1562 dev->cmb_size = size;
1556 return cmb; 1563 return cmb;
1557} 1564}
@@ -2156,7 +2163,6 @@ static void nvme_reset_work(struct work_struct *work)
2156 if (result) 2163 if (result)
2157 goto out; 2164 goto out;
2158 2165
2159 nvme_init_queue(dev->queues[0], 0);
2160 result = nvme_alloc_admin_tags(dev); 2166 result = nvme_alloc_admin_tags(dev);
2161 if (result) 2167 if (result)
2162 goto out; 2168 goto out;