aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/pci.c77
-rw-r--r--drivers/nvme/host/scsi.c4
-rw-r--r--drivers/nvme/target/admin-cmd.c8
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/discovery.c4
6 files changed, 71 insertions, 38 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 329381a28edf..79e679d12f3b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -554,7 +554,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
554 554
555 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 555 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
556 c.identify.opcode = nvme_admin_identify; 556 c.identify.opcode = nvme_admin_identify;
557 c.identify.cns = cpu_to_le32(1); 557 c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL);
558 558
559 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 559 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
560 if (!*id) 560 if (!*id)
@@ -572,7 +572,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
572 struct nvme_command c = { }; 572 struct nvme_command c = { };
573 573
574 c.identify.opcode = nvme_admin_identify; 574 c.identify.opcode = nvme_admin_identify;
575 c.identify.cns = cpu_to_le32(2); 575 c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST);
576 c.identify.nsid = cpu_to_le32(nsid); 576 c.identify.nsid = cpu_to_le32(nsid);
577 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); 577 return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
578} 578}
@@ -900,9 +900,9 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
900 return -ENODEV; 900 return -ENODEV;
901 } 901 }
902 902
903 if (ns->ctrl->vs >= NVME_VS(1, 1)) 903 if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
904 memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui)); 904 memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
905 if (ns->ctrl->vs >= NVME_VS(1, 2)) 905 if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
906 memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid)); 906 memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
907 907
908 return 0; 908 return 0;
@@ -1086,6 +1086,8 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
1086 int ret; 1086 int ret;
1087 1087
1088 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 1088 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1089 if (csts == ~0)
1090 return -ENODEV;
1089 if ((csts & NVME_CSTS_RDY) == bit) 1091 if ((csts & NVME_CSTS_RDY) == bit)
1090 break; 1092 break;
1091 1093
@@ -1240,7 +1242,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
1240 } 1242 }
1241 page_shift = NVME_CAP_MPSMIN(cap) + 12; 1243 page_shift = NVME_CAP_MPSMIN(cap) + 12;
1242 1244
1243 if (ctrl->vs >= NVME_VS(1, 1)) 1245 if (ctrl->vs >= NVME_VS(1, 1, 0))
1244 ctrl->subsystem = NVME_CAP_NSSRC(cap); 1246 ctrl->subsystem = NVME_CAP_NSSRC(cap);
1245 1247
1246 ret = nvme_identify_ctrl(ctrl, &id); 1248 ret = nvme_identify_ctrl(ctrl, &id);
@@ -1840,7 +1842,7 @@ static void nvme_scan_work(struct work_struct *work)
1840 return; 1842 return;
1841 1843
1842 nn = le32_to_cpu(id->nn); 1844 nn = le32_to_cpu(id->nn);
1843 if (ctrl->vs >= NVME_VS(1, 1) && 1845 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1844 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 1846 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
1845 if (!nvme_scan_ns_list(ctrl, nn)) 1847 if (!nvme_scan_ns_list(ctrl, nn))
1846 goto done; 1848 goto done;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0fc99f0f2571..0248d0e21fee 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -99,6 +99,7 @@ struct nvme_dev {
99 dma_addr_t cmb_dma_addr; 99 dma_addr_t cmb_dma_addr;
100 u64 cmb_size; 100 u64 cmb_size;
101 u32 cmbsz; 101 u32 cmbsz;
102 u32 cmbloc;
102 struct nvme_ctrl ctrl; 103 struct nvme_ctrl ctrl;
103 struct completion ioq_wait; 104 struct completion ioq_wait;
104}; 105};
@@ -893,7 +894,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
893 "I/O %d QID %d timeout, reset controller\n", 894 "I/O %d QID %d timeout, reset controller\n",
894 req->tag, nvmeq->qid); 895 req->tag, nvmeq->qid);
895 nvme_dev_disable(dev, false); 896 nvme_dev_disable(dev, false);
896 queue_work(nvme_workq, &dev->reset_work); 897 nvme_reset(dev);
897 898
898 /* 899 /*
899 * Mark the request as handled, since the inline shutdown 900 * Mark the request as handled, since the inline shutdown
@@ -1214,7 +1215,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1214 u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 1215 u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
1215 struct nvme_queue *nvmeq; 1216 struct nvme_queue *nvmeq;
1216 1217
1217 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ? 1218 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
1218 NVME_CAP_NSSRC(cap) : 0; 1219 NVME_CAP_NSSRC(cap) : 0;
1219 1220
1220 if (dev->subsystem && 1221 if (dev->subsystem &&
@@ -1291,7 +1292,7 @@ static void nvme_watchdog_timer(unsigned long data)
1291 1292
1292 /* Skip controllers under certain specific conditions. */ 1293 /* Skip controllers under certain specific conditions. */
1293 if (nvme_should_reset(dev, csts)) { 1294 if (nvme_should_reset(dev, csts)) {
1294 if (queue_work(nvme_workq, &dev->reset_work)) 1295 if (!nvme_reset(dev))
1295 dev_warn(dev->dev, 1296 dev_warn(dev->dev,
1296 "Failed status: 0x%x, reset controller.\n", 1297 "Failed status: 0x%x, reset controller.\n",
1297 csts); 1298 csts);
@@ -1331,28 +1332,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
1331 return ret >= 0 ? 0 : ret; 1332 return ret >= 0 ? 0 : ret;
1332} 1333}
1333 1334
1335static ssize_t nvme_cmb_show(struct device *dev,
1336 struct device_attribute *attr,
1337 char *buf)
1338{
1339 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
1340
1341 return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
1342 ndev->cmbloc, ndev->cmbsz);
1343}
1344static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
1345
1334static void __iomem *nvme_map_cmb(struct nvme_dev *dev) 1346static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1335{ 1347{
1336 u64 szu, size, offset; 1348 u64 szu, size, offset;
1337 u32 cmbloc;
1338 resource_size_t bar_size; 1349 resource_size_t bar_size;
1339 struct pci_dev *pdev = to_pci_dev(dev->dev); 1350 struct pci_dev *pdev = to_pci_dev(dev->dev);
1340 void __iomem *cmb; 1351 void __iomem *cmb;
1341 dma_addr_t dma_addr; 1352 dma_addr_t dma_addr;
1342 1353
1343 if (!use_cmb_sqes)
1344 return NULL;
1345
1346 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1354 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1347 if (!(NVME_CMB_SZ(dev->cmbsz))) 1355 if (!(NVME_CMB_SZ(dev->cmbsz)))
1348 return NULL; 1356 return NULL;
1357 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
1349 1358
1350 cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 1359 if (!use_cmb_sqes)
1360 return NULL;
1351 1361
1352 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); 1362 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
1353 size = szu * NVME_CMB_SZ(dev->cmbsz); 1363 size = szu * NVME_CMB_SZ(dev->cmbsz);
1354 offset = szu * NVME_CMB_OFST(cmbloc); 1364 offset = szu * NVME_CMB_OFST(dev->cmbloc);
1355 bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc)); 1365 bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
1356 1366
1357 if (offset > bar_size) 1367 if (offset > bar_size)
1358 return NULL; 1368 return NULL;
@@ -1365,7 +1375,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1365 if (size > bar_size - offset) 1375 if (size > bar_size - offset)
1366 size = bar_size - offset; 1376 size = bar_size - offset;
1367 1377
1368 dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset; 1378 dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
1369 cmb = ioremap_wc(dma_addr, size); 1379 cmb = ioremap_wc(dma_addr, size);
1370 if (!cmb) 1380 if (!cmb)
1371 return NULL; 1381 return NULL;
@@ -1511,9 +1521,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
1511 return 0; 1521 return 0;
1512} 1522}
1513 1523
1514static void nvme_disable_io_queues(struct nvme_dev *dev) 1524static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
1515{ 1525{
1516 int pass, queues = dev->online_queues - 1; 1526 int pass;
1517 unsigned long timeout; 1527 unsigned long timeout;
1518 u8 opcode = nvme_admin_delete_sq; 1528 u8 opcode = nvme_admin_delete_sq;
1519 1529
@@ -1616,9 +1626,25 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1616 dev->q_depth); 1626 dev->q_depth);
1617 } 1627 }
1618 1628
1619 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) 1629 /*
1630 * CMBs can currently only exist on >=1.2 PCIe devices. We only
1631 * populate sysfs if a CMB is implemented. Note that we add the
1632 * CMB attribute to the nvme_ctrl kobj which removes the need to remove
1633 * it on exit. Since nvme_dev_attrs_group has no name we can pass
1634 * NULL as final argument to sysfs_add_file_to_group.
1635 */
1636
1637 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
1620 dev->cmb = nvme_map_cmb(dev); 1638 dev->cmb = nvme_map_cmb(dev);
1621 1639
1640 if (dev->cmbsz) {
1641 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
1642 &dev_attr_cmb.attr, NULL))
1643 dev_warn(dev->dev,
1644 "failed to add sysfs attribute for CMB\n");
1645 }
1646 }
1647
1622 pci_enable_pcie_error_reporting(pdev); 1648 pci_enable_pcie_error_reporting(pdev);
1623 pci_save_state(pdev); 1649 pci_save_state(pdev);
1624 return 0; 1650 return 0;
@@ -1649,7 +1675,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
1649 1675
1650static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 1676static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1651{ 1677{
1652 int i; 1678 int i, queues;
1653 u32 csts = -1; 1679 u32 csts = -1;
1654 1680
1655 del_timer_sync(&dev->watchdog_timer); 1681 del_timer_sync(&dev->watchdog_timer);
@@ -1660,6 +1686,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1660 csts = readl(dev->bar + NVME_REG_CSTS); 1686 csts = readl(dev->bar + NVME_REG_CSTS);
1661 } 1687 }
1662 1688
1689 queues = dev->online_queues - 1;
1663 for (i = dev->queue_count - 1; i > 0; i--) 1690 for (i = dev->queue_count - 1; i > 0; i--)
1664 nvme_suspend_queue(dev->queues[i]); 1691 nvme_suspend_queue(dev->queues[i]);
1665 1692
@@ -1671,7 +1698,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1671 if (dev->queue_count) 1698 if (dev->queue_count)
1672 nvme_suspend_queue(dev->queues[0]); 1699 nvme_suspend_queue(dev->queues[0]);
1673 } else { 1700 } else {
1674 nvme_disable_io_queues(dev); 1701 nvme_disable_io_queues(dev, queues);
1675 nvme_disable_admin_queue(dev, shutdown); 1702 nvme_disable_admin_queue(dev, shutdown);
1676 } 1703 }
1677 nvme_pci_disable(dev); 1704 nvme_pci_disable(dev);
@@ -1818,11 +1845,10 @@ static int nvme_reset(struct nvme_dev *dev)
1818{ 1845{
1819 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) 1846 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
1820 return -ENODEV; 1847 return -ENODEV;
1821 1848 if (work_busy(&dev->reset_work))
1849 return -ENODEV;
1822 if (!queue_work(nvme_workq, &dev->reset_work)) 1850 if (!queue_work(nvme_workq, &dev->reset_work))
1823 return -EBUSY; 1851 return -EBUSY;
1824
1825 flush_work(&dev->reset_work);
1826 return 0; 1852 return 0;
1827} 1853}
1828 1854
@@ -1846,7 +1872,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
1846 1872
1847static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) 1873static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
1848{ 1874{
1849 return nvme_reset(to_nvme_dev(ctrl)); 1875 struct nvme_dev *dev = to_nvme_dev(ctrl);
1876 int ret = nvme_reset(dev);
1877
1878 if (!ret)
1879 flush_work(&dev->reset_work);
1880 return ret;
1850} 1881}
1851 1882
1852static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 1883static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
@@ -1940,7 +1971,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
1940 if (prepare) 1971 if (prepare)
1941 nvme_dev_disable(dev, false); 1972 nvme_dev_disable(dev, false);
1942 else 1973 else
1943 queue_work(nvme_workq, &dev->reset_work); 1974 nvme_reset(dev);
1944} 1975}
1945 1976
1946static void nvme_shutdown(struct pci_dev *pdev) 1977static void nvme_shutdown(struct pci_dev *pdev)
@@ -2009,7 +2040,7 @@ static int nvme_resume(struct device *dev)
2009 struct pci_dev *pdev = to_pci_dev(dev); 2040 struct pci_dev *pdev = to_pci_dev(dev);
2010 struct nvme_dev *ndev = pci_get_drvdata(pdev); 2041 struct nvme_dev *ndev = pci_get_drvdata(pdev);
2011 2042
2012 queue_work(nvme_workq, &ndev->reset_work); 2043 nvme_reset(ndev);
2013 return 0; 2044 return 0;
2014} 2045}
2015#endif 2046#endif
@@ -2048,7 +2079,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
2048 2079
2049 dev_info(dev->ctrl.device, "restart after slot reset\n"); 2080 dev_info(dev->ctrl.device, "restart after slot reset\n");
2050 pci_restore_state(pdev); 2081 pci_restore_state(pdev);
2051 queue_work(nvme_workq, &dev->reset_work); 2082 nvme_reset(dev);
2052 return PCI_ERS_RESULT_RECOVERED; 2083 return PCI_ERS_RESULT_RECOVERED;
2053} 2084}
2054 2085
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index c2a0a1c7d05d..3eaa4d27801e 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -606,7 +606,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
606 eui = id_ns->eui64; 606 eui = id_ns->eui64;
607 len = sizeof(id_ns->eui64); 607 len = sizeof(id_ns->eui64);
608 608
609 if (ns->ctrl->vs >= NVME_VS(1, 2)) { 609 if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) {
610 if (bitmap_empty(eui, len * 8)) { 610 if (bitmap_empty(eui, len * 8)) {
611 eui = id_ns->nguid; 611 eui = id_ns->nguid;
612 len = sizeof(id_ns->nguid); 612 len = sizeof(id_ns->nguid);
@@ -679,7 +679,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
679{ 679{
680 int res; 680 int res;
681 681
682 if (ns->ctrl->vs >= NVME_VS(1, 1)) { 682 if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) {
683 res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len); 683 res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
684 if (res != -EOPNOTSUPP) 684 if (res != -EOPNOTSUPP)
685 return res; 685 return res;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 7ab9c9381b98..6fe4c48a21e4 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -199,7 +199,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
199 */ 199 */
200 200
201 /* we support multiple ports and multiples hosts: */ 201 /* we support multiple ports and multiples hosts: */
202 id->mic = (1 << 0) | (1 << 1); 202 id->cmic = (1 << 0) | (1 << 1);
203 203
204 /* no limit on data transfer sizes for now */ 204 /* no limit on data transfer sizes for now */
205 id->mdts = 0; 205 id->mdts = 0;
@@ -511,13 +511,13 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req)
511 case nvme_admin_identify: 511 case nvme_admin_identify:
512 req->data_len = 4096; 512 req->data_len = 4096;
513 switch (le32_to_cpu(cmd->identify.cns)) { 513 switch (le32_to_cpu(cmd->identify.cns)) {
514 case 0x00: 514 case NVME_ID_CNS_NS:
515 req->execute = nvmet_execute_identify_ns; 515 req->execute = nvmet_execute_identify_ns;
516 return 0; 516 return 0;
517 case 0x01: 517 case NVME_ID_CNS_CTRL:
518 req->execute = nvmet_execute_identify_ctrl; 518 req->execute = nvmet_execute_identify_ctrl;
519 return 0; 519 return 0;
520 case 0x02: 520 case NVME_ID_CNS_NS_ACTIVE_LIST:
521 req->execute = nvmet_execute_identify_nslist; 521 req->execute = nvmet_execute_identify_nslist;
522 return 0; 522 return 0;
523 } 523 }
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6559d5afa7bf..b4cacb6f0258 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -882,7 +882,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
882 if (!subsys) 882 if (!subsys)
883 return NULL; 883 return NULL;
884 884
885 subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */ 885 subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
886 886
887 switch (type) { 887 switch (type) {
888 case NVME_NQN_NVME: 888 case NVME_NQN_NVME:
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 6f65646e89cf..12f39eea569f 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -54,7 +54,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
54 /* we support only dynamic controllers */ 54 /* we support only dynamic controllers */
55 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); 55 e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
56 e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH); 56 e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
57 e->nqntype = type; 57 e->subtype = type;
58 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); 58 memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
59 memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); 59 memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
60 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); 60 memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
@@ -187,7 +187,7 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req)
187 case nvme_admin_identify: 187 case nvme_admin_identify:
188 req->data_len = 4096; 188 req->data_len = 4096;
189 switch (le32_to_cpu(cmd->identify.cns)) { 189 switch (le32_to_cpu(cmd->identify.cns)) {
190 case 0x01: 190 case NVME_ID_CNS_CTRL:
191 req->execute = 191 req->execute =
192 nvmet_execute_identify_disc_ctrl; 192 nvmet_execute_identify_disc_ctrl;
193 return 0; 193 return 0;