diff options
author | Jens Axboe <axboe@kernel.dk> | 2017-08-11 10:07:19 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-08-11 10:07:19 -0400 |
commit | 4a8b53be64045fe74398c178041463318f96d5e9 (patch) | |
tree | 99b075717c313e31529d25690d28b1245803b560 | |
parent | d4acf3650c7c968f46ad932b9a25d1cc24cf4998 (diff) | |
parent | a082b426286d1ead97fb87646ea361d528be023d (diff) |
Merge branch 'nvme-4.13' of git://git.infradead.org/nvme into for-linus
Pull NVMe fixes from Christoph:
"A few more small fixes - the fc/lpfc update is the biggest by far."
-rw-r--r-- | drivers/nvme/host/core.c | 35 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 18 | ||||
-rw-r--r-- | drivers/nvme/target/fc.c | 212 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 4 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 5 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvmet.c | 30 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvmet.h | 1 | ||||
-rw-r--r-- | include/linux/nvme-fc-driver.h | 7 |
8 files changed, 261 insertions, 51 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c49f1f8b2e57..37046ac2c441 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl, | |||
336 | 336 | ||
337 | c.directive.opcode = nvme_admin_directive_recv; | 337 | c.directive.opcode = nvme_admin_directive_recv; |
338 | c.directive.nsid = cpu_to_le32(nsid); | 338 | c.directive.nsid = cpu_to_le32(nsid); |
339 | c.directive.numd = cpu_to_le32(sizeof(*s)); | 339 | c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1); |
340 | c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; | 340 | c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; |
341 | c.directive.dtype = NVME_DIR_STREAMS; | 341 | c.directive.dtype = NVME_DIR_STREAMS; |
342 | 342 | ||
@@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, | |||
1509 | blk_queue_write_cache(q, vwc, vwc); | 1509 | blk_queue_write_cache(q, vwc, vwc); |
1510 | } | 1510 | } |
1511 | 1511 | ||
1512 | static void nvme_configure_apst(struct nvme_ctrl *ctrl) | 1512 | static int nvme_configure_apst(struct nvme_ctrl *ctrl) |
1513 | { | 1513 | { |
1514 | /* | 1514 | /* |
1515 | * APST (Autonomous Power State Transition) lets us program a | 1515 | * APST (Autonomous Power State Transition) lets us program a |
@@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) | |||
1538 | * then don't do anything. | 1538 | * then don't do anything. |
1539 | */ | 1539 | */ |
1540 | if (!ctrl->apsta) | 1540 | if (!ctrl->apsta) |
1541 | return; | 1541 | return 0; |
1542 | 1542 | ||
1543 | if (ctrl->npss > 31) { | 1543 | if (ctrl->npss > 31) { |
1544 | dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); | 1544 | dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); |
1545 | return; | 1545 | return 0; |
1546 | } | 1546 | } |
1547 | 1547 | ||
1548 | table = kzalloc(sizeof(*table), GFP_KERNEL); | 1548 | table = kzalloc(sizeof(*table), GFP_KERNEL); |
1549 | if (!table) | 1549 | if (!table) |
1550 | return; | 1550 | return 0; |
1551 | 1551 | ||
1552 | if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { | 1552 | if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { |
1553 | /* Turn off APST. */ | 1553 | /* Turn off APST. */ |
@@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) | |||
1629 | dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); | 1629 | dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); |
1630 | 1630 | ||
1631 | kfree(table); | 1631 | kfree(table); |
1632 | return ret; | ||
1632 | } | 1633 | } |
1633 | 1634 | ||
1634 | static void nvme_set_latency_tolerance(struct device *dev, s32 val) | 1635 | static void nvme_set_latency_tolerance(struct device *dev, s32 val) |
@@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) | |||
1835 | * In fabrics we need to verify the cntlid matches the | 1836 | * In fabrics we need to verify the cntlid matches the |
1836 | * admin connect | 1837 | * admin connect |
1837 | */ | 1838 | */ |
1838 | if (ctrl->cntlid != le16_to_cpu(id->cntlid)) | 1839 | if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { |
1839 | ret = -EINVAL; | 1840 | ret = -EINVAL; |
1841 | goto out_free; | ||
1842 | } | ||
1840 | 1843 | ||
1841 | if (!ctrl->opts->discovery_nqn && !ctrl->kas) { | 1844 | if (!ctrl->opts->discovery_nqn && !ctrl->kas) { |
1842 | dev_err(ctrl->device, | 1845 | dev_err(ctrl->device, |
1843 | "keep-alive support is mandatory for fabrics\n"); | 1846 | "keep-alive support is mandatory for fabrics\n"); |
1844 | ret = -EINVAL; | 1847 | ret = -EINVAL; |
1848 | goto out_free; | ||
1845 | } | 1849 | } |
1846 | } else { | 1850 | } else { |
1847 | ctrl->cntlid = le16_to_cpu(id->cntlid); | 1851 | ctrl->cntlid = le16_to_cpu(id->cntlid); |
@@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) | |||
1856 | else if (!ctrl->apst_enabled && prev_apst_enabled) | 1860 | else if (!ctrl->apst_enabled && prev_apst_enabled) |
1857 | dev_pm_qos_hide_latency_tolerance(ctrl->device); | 1861 | dev_pm_qos_hide_latency_tolerance(ctrl->device); |
1858 | 1862 | ||
1859 | nvme_configure_apst(ctrl); | 1863 | ret = nvme_configure_apst(ctrl); |
1860 | nvme_configure_directives(ctrl); | 1864 | if (ret < 0) |
1865 | return ret; | ||
1866 | |||
1867 | ret = nvme_configure_directives(ctrl); | ||
1868 | if (ret < 0) | ||
1869 | return ret; | ||
1861 | 1870 | ||
1862 | ctrl->identified = true; | 1871 | ctrl->identified = true; |
1863 | 1872 | ||
1873 | return 0; | ||
1874 | |||
1875 | out_free: | ||
1876 | kfree(id); | ||
1864 | return ret; | 1877 | return ret; |
1865 | } | 1878 | } |
1866 | EXPORT_SYMBOL_GPL(nvme_init_identify); | 1879 | EXPORT_SYMBOL_GPL(nvme_init_identify); |
@@ -2004,9 +2017,11 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, | |||
2004 | if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) | 2017 | if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) |
2005 | return sprintf(buf, "eui.%8phN\n", ns->eui); | 2018 | return sprintf(buf, "eui.%8phN\n", ns->eui); |
2006 | 2019 | ||
2007 | while (ctrl->serial[serial_len - 1] == ' ') | 2020 | while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' || |
2021 | ctrl->serial[serial_len - 1] == '\0')) | ||
2008 | serial_len--; | 2022 | serial_len--; |
2009 | while (ctrl->model[model_len - 1] == ' ') | 2023 | while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' || |
2024 | ctrl->model[model_len - 1] == '\0')) | ||
2010 | model_len--; | 2025 | model_len--; |
2011 | 2026 | ||
2012 | return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, | 2027 | return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index cd888a47d0fc..74a124a06264 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1558,11 +1558,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev) | |||
1558 | if (dev->cmb) { | 1558 | if (dev->cmb) { |
1559 | iounmap(dev->cmb); | 1559 | iounmap(dev->cmb); |
1560 | dev->cmb = NULL; | 1560 | dev->cmb = NULL; |
1561 | if (dev->cmbsz) { | 1561 | sysfs_remove_file_from_group(&dev->ctrl.device->kobj, |
1562 | sysfs_remove_file_from_group(&dev->ctrl.device->kobj, | 1562 | &dev_attr_cmb.attr, NULL); |
1563 | &dev_attr_cmb.attr, NULL); | 1563 | dev->cmbsz = 0; |
1564 | dev->cmbsz = 0; | ||
1565 | } | ||
1566 | } | 1564 | } |
1567 | } | 1565 | } |
1568 | 1566 | ||
@@ -1953,16 +1951,14 @@ static int nvme_pci_enable(struct nvme_dev *dev) | |||
1953 | 1951 | ||
1954 | /* | 1952 | /* |
1955 | * CMBs can currently only exist on >=1.2 PCIe devices. We only | 1953 | * CMBs can currently only exist on >=1.2 PCIe devices. We only |
1956 | * populate sysfs if a CMB is implemented. Note that we add the | 1954 | * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group |
1957 | * CMB attribute to the nvme_ctrl kobj which removes the need to remove | 1955 | * has no name we can pass NULL as final argument to |
1958 | * it on exit. Since nvme_dev_attrs_group has no name we can pass | 1956 | * sysfs_add_file_to_group. |
1959 | * NULL as final argument to sysfs_add_file_to_group. | ||
1960 | */ | 1957 | */ |
1961 | 1958 | ||
1962 | if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { | 1959 | if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { |
1963 | dev->cmb = nvme_map_cmb(dev); | 1960 | dev->cmb = nvme_map_cmb(dev); |
1964 | 1961 | if (dev->cmb) { | |
1965 | if (dev->cmbsz) { | ||
1966 | if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, | 1962 | if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, |
1967 | &dev_attr_cmb.attr, NULL)) | 1963 | &dev_attr_cmb.attr, NULL)) |
1968 | dev_warn(dev->ctrl.device, | 1964 | dev_warn(dev->ctrl.device, |
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 31ca55dfcb1d..1b7f2520a20d 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c | |||
@@ -114,6 +114,11 @@ struct nvmet_fc_tgtport { | |||
114 | struct kref ref; | 114 | struct kref ref; |
115 | }; | 115 | }; |
116 | 116 | ||
117 | struct nvmet_fc_defer_fcp_req { | ||
118 | struct list_head req_list; | ||
119 | struct nvmefc_tgt_fcp_req *fcp_req; | ||
120 | }; | ||
121 | |||
117 | struct nvmet_fc_tgt_queue { | 122 | struct nvmet_fc_tgt_queue { |
118 | bool ninetypercent; | 123 | bool ninetypercent; |
119 | u16 qid; | 124 | u16 qid; |
@@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue { | |||
132 | struct nvmet_fc_tgt_assoc *assoc; | 137 | struct nvmet_fc_tgt_assoc *assoc; |
133 | struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ | 138 | struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ |
134 | struct list_head fod_list; | 139 | struct list_head fod_list; |
140 | struct list_head pending_cmd_list; | ||
141 | struct list_head avail_defer_list; | ||
135 | struct workqueue_struct *work_q; | 142 | struct workqueue_struct *work_q; |
136 | struct kref ref; | 143 | struct kref ref; |
137 | } __aligned(sizeof(unsigned long long)); | 144 | } __aligned(sizeof(unsigned long long)); |
@@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); | |||
223 | static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); | 230 | static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); |
224 | static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); | 231 | static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); |
225 | static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); | 232 | static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); |
233 | static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, | ||
234 | struct nvmet_fc_fcp_iod *fod); | ||
226 | 235 | ||
227 | 236 | ||
228 | /* *********************** FC-NVME DMA Handling **************************** */ | 237 | /* *********************** FC-NVME DMA Handling **************************** */ |
@@ -463,9 +472,9 @@ static struct nvmet_fc_fcp_iod * | |||
463 | nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) | 472 | nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) |
464 | { | 473 | { |
465 | static struct nvmet_fc_fcp_iod *fod; | 474 | static struct nvmet_fc_fcp_iod *fod; |
466 | unsigned long flags; | ||
467 | 475 | ||
468 | spin_lock_irqsave(&queue->qlock, flags); | 476 | lockdep_assert_held(&queue->qlock); |
477 | |||
469 | fod = list_first_entry_or_null(&queue->fod_list, | 478 | fod = list_first_entry_or_null(&queue->fod_list, |
470 | struct nvmet_fc_fcp_iod, fcp_list); | 479 | struct nvmet_fc_fcp_iod, fcp_list); |
471 | if (fod) { | 480 | if (fod) { |
@@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) | |||
477 | * will "inherit" that reference. | 486 | * will "inherit" that reference. |
478 | */ | 487 | */ |
479 | } | 488 | } |
480 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
481 | return fod; | 489 | return fod; |
482 | } | 490 | } |
483 | 491 | ||
484 | 492 | ||
485 | static void | 493 | static void |
494 | nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, | ||
495 | struct nvmet_fc_tgt_queue *queue, | ||
496 | struct nvmefc_tgt_fcp_req *fcpreq) | ||
497 | { | ||
498 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; | ||
499 | |||
500 | /* | ||
501 | * put all admin cmds on hw queue id 0. All io commands go to | ||
502 | * the respective hw queue based on a modulo basis | ||
503 | */ | ||
504 | fcpreq->hwqid = queue->qid ? | ||
505 | ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; | ||
506 | |||
507 | if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) | ||
508 | queue_work_on(queue->cpu, queue->work_q, &fod->work); | ||
509 | else | ||
510 | nvmet_fc_handle_fcp_rqst(tgtport, fod); | ||
511 | } | ||
512 | |||
513 | static void | ||
486 | nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, | 514 | nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, |
487 | struct nvmet_fc_fcp_iod *fod) | 515 | struct nvmet_fc_fcp_iod *fod) |
488 | { | 516 | { |
489 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; | 517 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
490 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; | 518 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
519 | struct nvmet_fc_defer_fcp_req *deferfcp; | ||
491 | unsigned long flags; | 520 | unsigned long flags; |
492 | 521 | ||
493 | fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, | 522 | fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, |
@@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, | |||
495 | 524 | ||
496 | fcpreq->nvmet_fc_private = NULL; | 525 | fcpreq->nvmet_fc_private = NULL; |
497 | 526 | ||
498 | spin_lock_irqsave(&queue->qlock, flags); | ||
499 | list_add_tail(&fod->fcp_list, &fod->queue->fod_list); | ||
500 | fod->active = false; | 527 | fod->active = false; |
501 | fod->abort = false; | 528 | fod->abort = false; |
502 | fod->aborted = false; | 529 | fod->aborted = false; |
503 | fod->writedataactive = false; | 530 | fod->writedataactive = false; |
504 | fod->fcpreq = NULL; | 531 | fod->fcpreq = NULL; |
532 | |||
533 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); | ||
534 | |||
535 | spin_lock_irqsave(&queue->qlock, flags); | ||
536 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, | ||
537 | struct nvmet_fc_defer_fcp_req, req_list); | ||
538 | if (!deferfcp) { | ||
539 | list_add_tail(&fod->fcp_list, &fod->queue->fod_list); | ||
540 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
541 | |||
542 | /* Release reference taken at queue lookup and fod allocation */ | ||
543 | nvmet_fc_tgt_q_put(queue); | ||
544 | return; | ||
545 | } | ||
546 | |||
547 | /* Re-use the fod for the next pending cmd that was deferred */ | ||
548 | list_del(&deferfcp->req_list); | ||
549 | |||
550 | fcpreq = deferfcp->fcp_req; | ||
551 | |||
552 | /* deferfcp can be reused for another IO at a later date */ | ||
553 | list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); | ||
554 | |||
505 | spin_unlock_irqrestore(&queue->qlock, flags); | 555 | spin_unlock_irqrestore(&queue->qlock, flags); |
506 | 556 | ||
557 | /* Save NVME CMD IO in fod */ | ||
558 | memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); | ||
559 | |||
560 | /* Setup new fcpreq to be processed */ | ||
561 | fcpreq->rspaddr = NULL; | ||
562 | fcpreq->rsplen = 0; | ||
563 | fcpreq->nvmet_fc_private = fod; | ||
564 | fod->fcpreq = fcpreq; | ||
565 | fod->active = true; | ||
566 | |||
567 | /* inform LLDD IO is now being processed */ | ||
568 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); | ||
569 | |||
570 | /* Submit deferred IO for processing */ | ||
571 | nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); | ||
572 | |||
507 | /* | 573 | /* |
508 | * release the reference taken at queue lookup and fod allocation | 574 | * Leave the queue lookup get reference taken when |
575 | * fod was originally allocated. | ||
509 | */ | 576 | */ |
510 | nvmet_fc_tgt_q_put(queue); | ||
511 | |||
512 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); | ||
513 | } | 577 | } |
514 | 578 | ||
515 | static int | 579 | static int |
@@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, | |||
569 | queue->port = assoc->tgtport->port; | 633 | queue->port = assoc->tgtport->port; |
570 | queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); | 634 | queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); |
571 | INIT_LIST_HEAD(&queue->fod_list); | 635 | INIT_LIST_HEAD(&queue->fod_list); |
636 | INIT_LIST_HEAD(&queue->avail_defer_list); | ||
637 | INIT_LIST_HEAD(&queue->pending_cmd_list); | ||
572 | atomic_set(&queue->connected, 0); | 638 | atomic_set(&queue->connected, 0); |
573 | atomic_set(&queue->sqtail, 0); | 639 | atomic_set(&queue->sqtail, 0); |
574 | atomic_set(&queue->rsn, 1); | 640 | atomic_set(&queue->rsn, 1); |
@@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) | |||
638 | { | 704 | { |
639 | struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; | 705 | struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; |
640 | struct nvmet_fc_fcp_iod *fod = queue->fod; | 706 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
707 | struct nvmet_fc_defer_fcp_req *deferfcp; | ||
641 | unsigned long flags; | 708 | unsigned long flags; |
642 | int i, writedataactive; | 709 | int i, writedataactive; |
643 | bool disconnect; | 710 | bool disconnect; |
@@ -666,6 +733,35 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) | |||
666 | } | 733 | } |
667 | } | 734 | } |
668 | } | 735 | } |
736 | |||
737 | /* Cleanup defer'ed IOs in queue */ | ||
738 | list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) { | ||
739 | list_del(&deferfcp->req_list); | ||
740 | kfree(deferfcp); | ||
741 | } | ||
742 | |||
743 | for (;;) { | ||
744 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, | ||
745 | struct nvmet_fc_defer_fcp_req, req_list); | ||
746 | if (!deferfcp) | ||
747 | break; | ||
748 | |||
749 | list_del(&deferfcp->req_list); | ||
750 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
751 | |||
752 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, | ||
753 | deferfcp->fcp_req); | ||
754 | |||
755 | tgtport->ops->fcp_abort(&tgtport->fc_target_port, | ||
756 | deferfcp->fcp_req); | ||
757 | |||
758 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, | ||
759 | deferfcp->fcp_req); | ||
760 | |||
761 | kfree(deferfcp); | ||
762 | |||
763 | spin_lock_irqsave(&queue->qlock, flags); | ||
764 | } | ||
669 | spin_unlock_irqrestore(&queue->qlock, flags); | 765 | spin_unlock_irqrestore(&queue->qlock, flags); |
670 | 766 | ||
671 | flush_workqueue(queue->work_q); | 767 | flush_workqueue(queue->work_q); |
@@ -2172,11 +2268,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work) | |||
2172 | * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc | 2268 | * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc |
2173 | * layer for processing. | 2269 | * layer for processing. |
2174 | * | 2270 | * |
2175 | * The nvmet-fc layer will copy cmd payload to an internal structure for | 2271 | * The nvmet_fc layer allocates a local job structure (struct |
2176 | * processing. As such, upon completion of the routine, the LLDD may | 2272 | * nvmet_fc_fcp_iod) from the queue for the io and copies the |
2177 | * immediately free/reuse the CMD IU buffer passed in the call. | 2273 | * CMD IU buffer to the job structure. As such, on a successful |
2274 | * completion (returns 0), the LLDD may immediately free/reuse | ||
2275 | * the CMD IU buffer passed in the call. | ||
2276 | * | ||
2277 | * However, in some circumstances, due to the packetized nature of FC | ||
2278 | * and the api of the FC LLDD which may issue a hw command to send the | ||
2279 | * response, but the LLDD may not get the hw completion for that command | ||
2280 | * and upcall the nvmet_fc layer before a new command may be | ||
2281 | * asynchronously received - its possible for a command to be received | ||
2282 | * before the LLDD and nvmet_fc have recycled the job structure. It gives | ||
2283 | * the appearance of more commands received than fits in the sq. | ||
2284 | * To alleviate this scenario, a temporary queue is maintained in the | ||
2285 | * transport for pending LLDD requests waiting for a queue job structure. | ||
2286 | * In these "overrun" cases, a temporary queue element is allocated | ||
2287 | * the LLDD request and CMD iu buffer information remembered, and the | ||
2288 | * routine returns a -EOVERFLOW status. Subsequently, when a queue job | ||
2289 | * structure is freed, it is immediately reallocated for anything on the | ||
2290 | * pending request list. The LLDDs defer_rcv() callback is called, | ||
2291 | * informing the LLDD that it may reuse the CMD IU buffer, and the io | ||
2292 | * is then started normally with the transport. | ||
2178 | * | 2293 | * |
2179 | * If this routine returns error, the lldd should abort the exchange. | 2294 | * The LLDD, when receiving an -EOVERFLOW completion status, is to treat |
2295 | * the completion as successful but must not reuse the CMD IU buffer | ||
2296 | * until the LLDD's defer_rcv() callback has been called for the | ||
2297 | * corresponding struct nvmefc_tgt_fcp_req pointer. | ||
2298 | * | ||
2299 | * If there is any other condition in which an error occurs, the | ||
2300 | * transport will return a non-zero status indicating the error. | ||
2301 | * In all cases other than -EOVERFLOW, the transport has not accepted the | ||
2302 | * request and the LLDD should abort the exchange. | ||
2180 | * | 2303 | * |
2181 | * @target_port: pointer to the (registered) target port the FCP CMD IU | 2304 | * @target_port: pointer to the (registered) target port the FCP CMD IU |
2182 | * was received on. | 2305 | * was received on. |
@@ -2194,6 +2317,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, | |||
2194 | struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; | 2317 | struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; |
2195 | struct nvmet_fc_tgt_queue *queue; | 2318 | struct nvmet_fc_tgt_queue *queue; |
2196 | struct nvmet_fc_fcp_iod *fod; | 2319 | struct nvmet_fc_fcp_iod *fod; |
2320 | struct nvmet_fc_defer_fcp_req *deferfcp; | ||
2321 | unsigned long flags; | ||
2197 | 2322 | ||
2198 | /* validate iu, so the connection id can be used to find the queue */ | 2323 | /* validate iu, so the connection id can be used to find the queue */ |
2199 | if ((cmdiubuf_len != sizeof(*cmdiu)) || | 2324 | if ((cmdiubuf_len != sizeof(*cmdiu)) || |
@@ -2214,29 +2339,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, | |||
2214 | * when the fod is freed. | 2339 | * when the fod is freed. |
2215 | */ | 2340 | */ |
2216 | 2341 | ||
2342 | spin_lock_irqsave(&queue->qlock, flags); | ||
2343 | |||
2217 | fod = nvmet_fc_alloc_fcp_iod(queue); | 2344 | fod = nvmet_fc_alloc_fcp_iod(queue); |
2218 | if (!fod) { | 2345 | if (fod) { |
2346 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
2347 | |||
2348 | fcpreq->nvmet_fc_private = fod; | ||
2349 | fod->fcpreq = fcpreq; | ||
2350 | |||
2351 | memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); | ||
2352 | |||
2353 | nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); | ||
2354 | |||
2355 | return 0; | ||
2356 | } | ||
2357 | |||
2358 | if (!tgtport->ops->defer_rcv) { | ||
2359 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
2219 | /* release the queue lookup reference */ | 2360 | /* release the queue lookup reference */ |
2220 | nvmet_fc_tgt_q_put(queue); | 2361 | nvmet_fc_tgt_q_put(queue); |
2221 | return -ENOENT; | 2362 | return -ENOENT; |
2222 | } | 2363 | } |
2223 | 2364 | ||
2224 | fcpreq->nvmet_fc_private = fod; | 2365 | deferfcp = list_first_entry_or_null(&queue->avail_defer_list, |
2225 | fod->fcpreq = fcpreq; | 2366 | struct nvmet_fc_defer_fcp_req, req_list); |
2226 | /* | 2367 | if (deferfcp) { |
2227 | * put all admin cmds on hw queue id 0. All io commands go to | 2368 | /* Just re-use one that was previously allocated */ |
2228 | * the respective hw queue based on a modulo basis | 2369 | list_del(&deferfcp->req_list); |
2229 | */ | 2370 | } else { |
2230 | fcpreq->hwqid = queue->qid ? | 2371 | spin_unlock_irqrestore(&queue->qlock, flags); |
2231 | ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; | ||
2232 | memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); | ||
2233 | 2372 | ||
2234 | if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) | 2373 | /* Now we need to dynamically allocate one */ |
2235 | queue_work_on(queue->cpu, queue->work_q, &fod->work); | 2374 | deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); |
2236 | else | 2375 | if (!deferfcp) { |
2237 | nvmet_fc_handle_fcp_rqst(tgtport, fod); | 2376 | /* release the queue lookup reference */ |
2377 | nvmet_fc_tgt_q_put(queue); | ||
2378 | return -ENOMEM; | ||
2379 | } | ||
2380 | spin_lock_irqsave(&queue->qlock, flags); | ||
2381 | } | ||
2238 | 2382 | ||
2239 | return 0; | 2383 | /* For now, use rspaddr / rsplen to save payload information */ |
2384 | fcpreq->rspaddr = cmdiubuf; | ||
2385 | fcpreq->rsplen = cmdiubuf_len; | ||
2386 | deferfcp->fcp_req = fcpreq; | ||
2387 | |||
2388 | /* defer processing till a fod becomes available */ | ||
2389 | list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); | ||
2390 | |||
2391 | /* NOTE: the queue lookup reference is still valid */ | ||
2392 | |||
2393 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
2394 | |||
2395 | return -EOVERFLOW; | ||
2240 | } | 2396 | } |
2241 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); | 2397 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); |
2242 | 2398 | ||
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 4ed48ed38e79..7ee1a94c0b33 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
205 | atomic_read(&tgtp->xmt_ls_rsp_error)); | 205 | atomic_read(&tgtp->xmt_ls_rsp_error)); |
206 | 206 | ||
207 | len += snprintf(buf+len, PAGE_SIZE-len, | 207 | len += snprintf(buf+len, PAGE_SIZE-len, |
208 | "FCP: Rcv %08x Release %08x Drop %08x\n", | 208 | "FCP: Rcv %08x Defer %08x Release %08x " |
209 | "Drop %08x\n", | ||
209 | atomic_read(&tgtp->rcv_fcp_cmd_in), | 210 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
211 | atomic_read(&tgtp->rcv_fcp_cmd_defer), | ||
210 | atomic_read(&tgtp->xmt_fcp_release), | 212 | atomic_read(&tgtp->xmt_fcp_release), |
211 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); | 213 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); |
212 | 214 | ||
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 5cc8b0f7d885..744f3f395b64 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) | |||
782 | atomic_read(&tgtp->xmt_ls_rsp_error)); | 782 | atomic_read(&tgtp->xmt_ls_rsp_error)); |
783 | 783 | ||
784 | len += snprintf(buf + len, size - len, | 784 | len += snprintf(buf + len, size - len, |
785 | "FCP: Rcv %08x Drop %08x\n", | 785 | "FCP: Rcv %08x Defer %08x Release %08x " |
786 | "Drop %08x\n", | ||
786 | atomic_read(&tgtp->rcv_fcp_cmd_in), | 787 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
788 | atomic_read(&tgtp->rcv_fcp_cmd_defer), | ||
789 | atomic_read(&tgtp->xmt_fcp_release), | ||
787 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); | 790 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); |
788 | 791 | ||
789 | if (atomic_read(&tgtp->rcv_fcp_cmd_in) != | 792 | if (atomic_read(&tgtp->rcv_fcp_cmd_in) != |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index fbeec344c6cc..bbbd0f84160d 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c | |||
@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, | |||
841 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); | 841 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
842 | } | 842 | } |
843 | 843 | ||
844 | static void | ||
845 | lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, | ||
846 | struct nvmefc_tgt_fcp_req *rsp) | ||
847 | { | ||
848 | struct lpfc_nvmet_tgtport *tgtp; | ||
849 | struct lpfc_nvmet_rcv_ctx *ctxp = | ||
850 | container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); | ||
851 | struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; | ||
852 | struct lpfc_hba *phba = ctxp->phba; | ||
853 | |||
854 | lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", | ||
855 | ctxp->oxid, ctxp->size, smp_processor_id()); | ||
856 | |||
857 | tgtp = phba->targetport->private; | ||
858 | atomic_inc(&tgtp->rcv_fcp_cmd_defer); | ||
859 | lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ | ||
860 | } | ||
861 | |||
844 | static struct nvmet_fc_target_template lpfc_tgttemplate = { | 862 | static struct nvmet_fc_target_template lpfc_tgttemplate = { |
845 | .targetport_delete = lpfc_nvmet_targetport_delete, | 863 | .targetport_delete = lpfc_nvmet_targetport_delete, |
846 | .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, | 864 | .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, |
847 | .fcp_op = lpfc_nvmet_xmt_fcp_op, | 865 | .fcp_op = lpfc_nvmet_xmt_fcp_op, |
848 | .fcp_abort = lpfc_nvmet_xmt_fcp_abort, | 866 | .fcp_abort = lpfc_nvmet_xmt_fcp_abort, |
849 | .fcp_req_release = lpfc_nvmet_xmt_fcp_release, | 867 | .fcp_req_release = lpfc_nvmet_xmt_fcp_release, |
868 | .defer_rcv = lpfc_nvmet_defer_rcv, | ||
850 | 869 | ||
851 | .max_hw_queues = 1, | 870 | .max_hw_queues = 1, |
852 | .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, | 871 | .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, |
@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, | |||
1504 | return; | 1523 | return; |
1505 | } | 1524 | } |
1506 | 1525 | ||
1526 | /* Processing of FCP command is deferred */ | ||
1527 | if (rc == -EOVERFLOW) { | ||
1528 | lpfc_nvmeio_data(phba, | ||
1529 | "NVMET RCV BUSY: xri x%x sz %d from %06x\n", | ||
1530 | oxid, size, sid); | ||
1531 | /* defer reposting rcv buffer till .defer_rcv callback */ | ||
1532 | ctxp->rqb_buffer = nvmebuf; | ||
1533 | atomic_inc(&tgtp->rcv_fcp_cmd_out); | ||
1534 | return; | ||
1535 | } | ||
1536 | |||
1507 | atomic_inc(&tgtp->rcv_fcp_cmd_drop); | 1537 | atomic_inc(&tgtp->rcv_fcp_cmd_drop); |
1508 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | 1538 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
1509 | "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", | 1539 | "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index e675ef17be08..48a76788b003 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h | |||
@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport { | |||
49 | atomic_t rcv_fcp_cmd_in; | 49 | atomic_t rcv_fcp_cmd_in; |
50 | atomic_t rcv_fcp_cmd_out; | 50 | atomic_t rcv_fcp_cmd_out; |
51 | atomic_t rcv_fcp_cmd_drop; | 51 | atomic_t rcv_fcp_cmd_drop; |
52 | atomic_t rcv_fcp_cmd_defer; | ||
52 | atomic_t xmt_fcp_release; | 53 | atomic_t xmt_fcp_release; |
53 | 54 | ||
54 | /* Stats counters - lpfc_nvmet_xmt_fcp_op */ | 55 | /* Stats counters - lpfc_nvmet_xmt_fcp_op */ |
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 6c8c5d8041b7..2591878c1d48 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h | |||
@@ -346,6 +346,11 @@ struct nvme_fc_remote_port { | |||
346 | * indicating an FC transport Aborted status. | 346 | * indicating an FC transport Aborted status. |
347 | * Entrypoint is Mandatory. | 347 | * Entrypoint is Mandatory. |
348 | * | 348 | * |
349 | * @defer_rcv: Called by the transport to signal the LLLD that it has | ||
350 | * begun processing of a previously received NVME CMD IU. The LLDD | ||
351 | * is now free to re-use the rcv buffer associated with the | ||
352 | * nvmefc_tgt_fcp_req. | ||
353 | * | ||
349 | * @max_hw_queues: indicates the maximum number of hw queues the LLDD | 354 | * @max_hw_queues: indicates the maximum number of hw queues the LLDD |
350 | * supports for cpu affinitization. | 355 | * supports for cpu affinitization. |
351 | * Value is Mandatory. Must be at least 1. | 356 | * Value is Mandatory. Must be at least 1. |
@@ -846,6 +851,8 @@ struct nvmet_fc_target_template { | |||
846 | struct nvmefc_tgt_fcp_req *fcpreq); | 851 | struct nvmefc_tgt_fcp_req *fcpreq); |
847 | void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport, | 852 | void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport, |
848 | struct nvmefc_tgt_fcp_req *fcpreq); | 853 | struct nvmefc_tgt_fcp_req *fcpreq); |
854 | void (*defer_rcv)(struct nvmet_fc_target_port *tgtport, | ||
855 | struct nvmefc_tgt_fcp_req *fcpreq); | ||
849 | 856 | ||
850 | u32 max_hw_queues; | 857 | u32 max_hw_queues; |
851 | u16 max_sgl_segments; | 858 | u16 max_sgl_segments; |