aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_bsg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_bsg.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2111
1 files changed, 1731 insertions, 380 deletions
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 853e5042f39c..7fb0ba4cbfa7 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/list.h>
26 27
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_host.h> 29#include <scsi/scsi_host.h>
@@ -79,8 +80,7 @@ struct lpfc_bsg_iocb {
79struct lpfc_bsg_mbox { 80struct lpfc_bsg_mbox {
80 LPFC_MBOXQ_t *pmboxq; 81 LPFC_MBOXQ_t *pmboxq;
81 MAILBOX_t *mb; 82 MAILBOX_t *mb;
82 struct lpfc_dmabuf *rxbmp; /* for BIU diags */ 83 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
83 struct lpfc_dmabufext *dmp; /* for BIU diags */
84 uint8_t *ext; /* extended mailbox data */ 84 uint8_t *ext; /* extended mailbox data */
85 uint32_t mbOffset; /* from app */ 85 uint32_t mbOffset; /* from app */
86 uint32_t inExtWLen; /* from app */ 86 uint32_t inExtWLen; /* from app */
@@ -332,6 +332,8 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
332 cmd->ulpLe = 1; 332 cmd->ulpLe = 1;
333 cmd->ulpClass = CLASS3; 333 cmd->ulpClass = CLASS3;
334 cmd->ulpContext = ndlp->nlp_rpi; 334 cmd->ulpContext = ndlp->nlp_rpi;
335 if (phba->sli_rev == LPFC_SLI_REV4)
336 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
335 cmd->ulpOwner = OWN_CHIP; 337 cmd->ulpOwner = OWN_CHIP;
336 cmdiocbq->vport = phba->pport; 338 cmdiocbq->vport = phba->pport;
337 cmdiocbq->context3 = bmp; 339 cmdiocbq->context3 = bmp;
@@ -1336,6 +1338,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1336 } 1338 }
1337 1339
1338 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1340 icmd->un.ulpWord[3] = ndlp->nlp_rpi;
1341 if (phba->sli_rev == LPFC_SLI_REV4)
1342 icmd->ulpContext =
1343 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1344
1339 /* The exchange is done, mark the entry as invalid */ 1345 /* The exchange is done, mark the entry as invalid */
1340 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1346 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1341 } else 1347 } else
@@ -1463,11 +1469,91 @@ send_mgmt_rsp_exit:
1463} 1469}
1464 1470
1465/** 1471/**
1466 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command 1472 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1473 * @phba: Pointer to HBA context object.
1467 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1474 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1468 * 1475 *
1469 * This function is responsible for placing a port into diagnostic loopback 1476 * This function is responsible for preparing driver for diag loopback
1470 * mode in order to perform a diagnostic loopback test. 1477 * on device.
1478 */
1479static int
1480lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
1481{
1482 struct lpfc_vport **vports;
1483 struct Scsi_Host *shost;
1484 struct lpfc_sli *psli;
1485 struct lpfc_sli_ring *pring;
1486 int i = 0;
1487
1488 psli = &phba->sli;
1489 if (!psli)
1490 return -ENODEV;
1491
1492 pring = &psli->ring[LPFC_FCP_RING];
1493 if (!pring)
1494 return -ENODEV;
1495
1496 if ((phba->link_state == LPFC_HBA_ERROR) ||
1497 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1498 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1499 return -EACCES;
1500
1501 vports = lpfc_create_vport_work_array(phba);
1502 if (vports) {
1503 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1504 shost = lpfc_shost_from_vport(vports[i]);
1505 scsi_block_requests(shost);
1506 }
1507 lpfc_destroy_vport_work_array(phba, vports);
1508 } else {
1509 shost = lpfc_shost_from_vport(phba->pport);
1510 scsi_block_requests(shost);
1511 }
1512
1513 while (pring->txcmplq_cnt) {
1514 if (i++ > 500) /* wait up to 5 seconds */
1515 break;
1516 msleep(10);
1517 }
1518 return 0;
1519}
1520
1521/**
1522 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1523 * @phba: Pointer to HBA context object.
1524 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1525 *
1526 * This function is responsible for driver exit processing of setting up
1527 * diag loopback mode on device.
1528 */
1529static void
1530lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1531{
1532 struct Scsi_Host *shost;
1533 struct lpfc_vport **vports;
1534 int i;
1535
1536 vports = lpfc_create_vport_work_array(phba);
1537 if (vports) {
1538 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1539 shost = lpfc_shost_from_vport(vports[i]);
1540 scsi_unblock_requests(shost);
1541 }
1542 lpfc_destroy_vport_work_array(phba, vports);
1543 } else {
1544 shost = lpfc_shost_from_vport(phba->pport);
1545 scsi_unblock_requests(shost);
1546 }
1547 return;
1548}
1549
1550/**
1551 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1552 * @phba: Pointer to HBA context object.
1553 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1554 *
1555 * This function is responsible for placing an sli3 port into diagnostic
1556 * loopback mode in order to perform a diagnostic loopback test.
1471 * All new scsi requests are blocked, a small delay is used to allow the 1557 * All new scsi requests are blocked, a small delay is used to allow the
1472 * scsi requests to complete then the link is brought down. If the link is 1558 * scsi requests to complete then the link is brought down. If the link is
1473 * is placed in loopback mode then scsi requests are again allowed 1559 * is placed in loopback mode then scsi requests are again allowed
@@ -1475,17 +1561,11 @@ send_mgmt_rsp_exit:
1475 * All of this is done in-line. 1561 * All of this is done in-line.
1476 */ 1562 */
1477static int 1563static int
1478lpfc_bsg_diag_mode(struct fc_bsg_job *job) 1564lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1479{ 1565{
1480 struct Scsi_Host *shost = job->shost;
1481 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1482 struct lpfc_hba *phba = vport->phba;
1483 struct diag_mode_set *loopback_mode; 1566 struct diag_mode_set *loopback_mode;
1484 struct lpfc_sli *psli = &phba->sli;
1485 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1486 uint32_t link_flags; 1567 uint32_t link_flags;
1487 uint32_t timeout; 1568 uint32_t timeout;
1488 struct lpfc_vport **vports;
1489 LPFC_MBOXQ_t *pmboxq; 1569 LPFC_MBOXQ_t *pmboxq;
1490 int mbxstatus; 1570 int mbxstatus;
1491 int i = 0; 1571 int i = 0;
@@ -1494,53 +1574,33 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1494 /* no data to return just the return code */ 1574 /* no data to return just the return code */
1495 job->reply->reply_payload_rcv_len = 0; 1575 job->reply->reply_payload_rcv_len = 0;
1496 1576
1497 if (job->request_len < 1577 if (job->request_len < sizeof(struct fc_bsg_request) +
1498 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) { 1578 sizeof(struct diag_mode_set)) {
1499 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1579 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1500 "2738 Received DIAG MODE request below minimum " 1580 "2738 Received DIAG MODE request size:%d "
1501 "size\n"); 1581 "below the minimum size:%d\n",
1582 job->request_len,
1583 (int)(sizeof(struct fc_bsg_request) +
1584 sizeof(struct diag_mode_set)));
1502 rc = -EINVAL; 1585 rc = -EINVAL;
1503 goto job_error; 1586 goto job_error;
1504 } 1587 }
1505 1588
1589 rc = lpfc_bsg_diag_mode_enter(phba, job);
1590 if (rc)
1591 goto job_error;
1592
1593 /* bring the link to diagnostic mode */
1506 loopback_mode = (struct diag_mode_set *) 1594 loopback_mode = (struct diag_mode_set *)
1507 job->request->rqst_data.h_vendor.vendor_cmd; 1595 job->request->rqst_data.h_vendor.vendor_cmd;
1508 link_flags = loopback_mode->type; 1596 link_flags = loopback_mode->type;
1509 timeout = loopback_mode->timeout * 100; 1597 timeout = loopback_mode->timeout * 100;
1510 1598
1511 if ((phba->link_state == LPFC_HBA_ERROR) ||
1512 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1513 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
1514 rc = -EACCES;
1515 goto job_error;
1516 }
1517
1518 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1599 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1519 if (!pmboxq) { 1600 if (!pmboxq) {
1520 rc = -ENOMEM; 1601 rc = -ENOMEM;
1521 goto job_error; 1602 goto loopback_mode_exit;
1522 }
1523
1524 vports = lpfc_create_vport_work_array(phba);
1525 if (vports) {
1526 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1527 shost = lpfc_shost_from_vport(vports[i]);
1528 scsi_block_requests(shost);
1529 }
1530
1531 lpfc_destroy_vport_work_array(phba, vports);
1532 } else {
1533 shost = lpfc_shost_from_vport(phba->pport);
1534 scsi_block_requests(shost);
1535 } 1603 }
1536
1537 while (pring->txcmplq_cnt) {
1538 if (i++ > 500) /* wait up to 5 seconds */
1539 break;
1540
1541 msleep(10);
1542 }
1543
1544 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1604 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1545 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1605 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1546 pmboxq->u.mb.mbxOwner = OWN_HOST; 1606 pmboxq->u.mb.mbxOwner = OWN_HOST;
@@ -1594,22 +1654,186 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job)
1594 rc = -ENODEV; 1654 rc = -ENODEV;
1595 1655
1596loopback_mode_exit: 1656loopback_mode_exit:
1597 vports = lpfc_create_vport_work_array(phba); 1657 lpfc_bsg_diag_mode_exit(phba);
1598 if (vports) { 1658
1599 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1659 /*
1600 shost = lpfc_shost_from_vport(vports[i]); 1660 * Let SLI layer release mboxq if mbox command completed after timeout.
1601 scsi_unblock_requests(shost); 1661 */
1662 if (mbxstatus != MBX_TIMEOUT)
1663 mempool_free(pmboxq, phba->mbox_mem_pool);
1664
1665job_error:
1666 /* make error code available to userspace */
1667 job->reply->result = rc;
1668 /* complete the job back to userspace if no error */
1669 if (rc == 0)
1670 job->job_done(job);
1671 return rc;
1672}
1673
1674/**
1675 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1676 * @phba: Pointer to HBA context object.
1677 * @diag: Flag for set link to diag or nomral operation state.
1678 *
1679 * This function is responsible for issuing a sli4 mailbox command for setting
1680 * link to either diag state or normal operation state.
1681 */
1682static int
1683lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1684{
1685 LPFC_MBOXQ_t *pmboxq;
1686 struct lpfc_mbx_set_link_diag_state *link_diag_state;
1687 uint32_t req_len, alloc_len;
1688 int mbxstatus = MBX_SUCCESS, rc;
1689
1690 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1691 if (!pmboxq)
1692 return -ENOMEM;
1693
1694 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1695 sizeof(struct lpfc_sli4_cfg_mhdr));
1696 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1697 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1698 req_len, LPFC_SLI4_MBX_EMBED);
1699 if (alloc_len != req_len) {
1700 rc = -ENOMEM;
1701 goto link_diag_state_set_out;
1702 }
1703 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1704 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1705 phba->sli4_hba.link_state.number);
1706 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1707 phba->sli4_hba.link_state.type);
1708 if (diag)
1709 bf_set(lpfc_mbx_set_diag_state_diag,
1710 &link_diag_state->u.req, 1);
1711 else
1712 bf_set(lpfc_mbx_set_diag_state_diag,
1713 &link_diag_state->u.req, 0);
1714
1715 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1716
1717 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1718 rc = 0;
1719 else
1720 rc = -ENODEV;
1721
1722link_diag_state_set_out:
1723 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1724 mempool_free(pmboxq, phba->mbox_mem_pool);
1725
1726 return rc;
1727}
1728
1729/**
1730 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1731 * @phba: Pointer to HBA context object.
1732 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1733 *
1734 * This function is responsible for placing an sli4 port into diagnostic
1735 * loopback mode in order to perform a diagnostic loopback test.
1736 */
1737static int
1738lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1739{
1740 struct diag_mode_set *loopback_mode;
1741 uint32_t link_flags, timeout, req_len, alloc_len;
1742 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1743 LPFC_MBOXQ_t *pmboxq = NULL;
1744 int mbxstatus, i, rc = 0;
1745
1746 /* no data to return just the return code */
1747 job->reply->reply_payload_rcv_len = 0;
1748
1749 if (job->request_len < sizeof(struct fc_bsg_request) +
1750 sizeof(struct diag_mode_set)) {
1751 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1752 "3011 Received DIAG MODE request size:%d "
1753 "below the minimum size:%d\n",
1754 job->request_len,
1755 (int)(sizeof(struct fc_bsg_request) +
1756 sizeof(struct diag_mode_set)));
1757 rc = -EINVAL;
1758 goto job_error;
1759 }
1760
1761 rc = lpfc_bsg_diag_mode_enter(phba, job);
1762 if (rc)
1763 goto job_error;
1764
1765 /* bring the link to diagnostic mode */
1766 loopback_mode = (struct diag_mode_set *)
1767 job->request->rqst_data.h_vendor.vendor_cmd;
1768 link_flags = loopback_mode->type;
1769 timeout = loopback_mode->timeout * 100;
1770
1771 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1772 if (rc)
1773 goto loopback_mode_exit;
1774
1775 /* wait for link down before proceeding */
1776 i = 0;
1777 while (phba->link_state != LPFC_LINK_DOWN) {
1778 if (i++ > timeout) {
1779 rc = -ETIMEDOUT;
1780 goto loopback_mode_exit;
1781 }
1782 msleep(10);
1783 }
1784 /* set up loopback mode */
1785 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1786 if (!pmboxq) {
1787 rc = -ENOMEM;
1788 goto loopback_mode_exit;
1789 }
1790 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1791 sizeof(struct lpfc_sli4_cfg_mhdr));
1792 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1793 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1794 req_len, LPFC_SLI4_MBX_EMBED);
1795 if (alloc_len != req_len) {
1796 rc = -ENOMEM;
1797 goto loopback_mode_exit;
1798 }
1799 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1800 bf_set(lpfc_mbx_set_diag_state_link_num,
1801 &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
1802 bf_set(lpfc_mbx_set_diag_state_link_type,
1803 &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
1804 if (link_flags == INTERNAL_LOOP_BACK)
1805 bf_set(lpfc_mbx_set_diag_lpbk_type,
1806 &link_diag_loopback->u.req,
1807 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1808 else
1809 bf_set(lpfc_mbx_set_diag_lpbk_type,
1810 &link_diag_loopback->u.req,
1811 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
1812
1813 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1814 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1815 rc = -ENODEV;
1816 else {
1817 phba->link_flag |= LS_LOOPBACK_MODE;
1818 /* wait for the link attention interrupt */
1819 msleep(100);
1820 i = 0;
1821 while (phba->link_state != LPFC_HBA_READY) {
1822 if (i++ > timeout) {
1823 rc = -ETIMEDOUT;
1824 break;
1825 }
1826 msleep(10);
1602 } 1827 }
1603 lpfc_destroy_vport_work_array(phba, vports);
1604 } else {
1605 shost = lpfc_shost_from_vport(phba->pport);
1606 scsi_unblock_requests(shost);
1607 } 1828 }
1608 1829
1830loopback_mode_exit:
1831 lpfc_bsg_diag_mode_exit(phba);
1832
1609 /* 1833 /*
1610 * Let SLI layer release mboxq if mbox command completed after timeout. 1834 * Let SLI layer release mboxq if mbox command completed after timeout.
1611 */ 1835 */
1612 if (mbxstatus != MBX_TIMEOUT) 1836 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1613 mempool_free(pmboxq, phba->mbox_mem_pool); 1837 mempool_free(pmboxq, phba->mbox_mem_pool);
1614 1838
1615job_error: 1839job_error:
@@ -1622,6 +1846,234 @@ job_error:
1622} 1846}
1623 1847
1624/** 1848/**
1849 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
1850 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1851 *
1852 * This function is responsible for responding to check and dispatch bsg diag
1853 * command from the user to proper driver action routines.
1854 */
1855static int
1856lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
1857{
1858 struct Scsi_Host *shost;
1859 struct lpfc_vport *vport;
1860 struct lpfc_hba *phba;
1861 int rc;
1862
1863 shost = job->shost;
1864 if (!shost)
1865 return -ENODEV;
1866 vport = (struct lpfc_vport *)job->shost->hostdata;
1867 if (!vport)
1868 return -ENODEV;
1869 phba = vport->phba;
1870 if (!phba)
1871 return -ENODEV;
1872
1873 if (phba->sli_rev < LPFC_SLI_REV4)
1874 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
1875 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1876 LPFC_SLI_INTF_IF_TYPE_2)
1877 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
1878 else
1879 rc = -ENODEV;
1880
1881 return rc;
1882
1883}
1884
1885/**
1886 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
1887 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
1888 *
1889 * This function is responsible for responding to check and dispatch bsg diag
1890 * command from the user to proper driver action routines.
1891 */
1892static int
1893lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
1894{
1895 struct Scsi_Host *shost;
1896 struct lpfc_vport *vport;
1897 struct lpfc_hba *phba;
1898 int rc;
1899
1900 shost = job->shost;
1901 if (!shost)
1902 return -ENODEV;
1903 vport = (struct lpfc_vport *)job->shost->hostdata;
1904 if (!vport)
1905 return -ENODEV;
1906 phba = vport->phba;
1907 if (!phba)
1908 return -ENODEV;
1909
1910 if (phba->sli_rev < LPFC_SLI_REV4)
1911 return -ENODEV;
1912 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1913 LPFC_SLI_INTF_IF_TYPE_2)
1914 return -ENODEV;
1915
1916 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
1917
1918 if (!rc)
1919 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1920
1921 return rc;
1922}
1923
1924/**
1925 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
1926 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
1927 *
1928 * This function is to perform SLI4 diag link test request from the user
1929 * applicaiton.
1930 */
1931static int
1932lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
1933{
1934 struct Scsi_Host *shost;
1935 struct lpfc_vport *vport;
1936 struct lpfc_hba *phba;
1937 LPFC_MBOXQ_t *pmboxq;
1938 struct sli4_link_diag *link_diag_test_cmd;
1939 uint32_t req_len, alloc_len;
1940 uint32_t timeout;
1941 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
1942 union lpfc_sli4_cfg_shdr *shdr;
1943 uint32_t shdr_status, shdr_add_status;
1944 struct diag_status *diag_status_reply;
1945 int mbxstatus, rc = 0;
1946
1947 shost = job->shost;
1948 if (!shost) {
1949 rc = -ENODEV;
1950 goto job_error;
1951 }
1952 vport = (struct lpfc_vport *)job->shost->hostdata;
1953 if (!vport) {
1954 rc = -ENODEV;
1955 goto job_error;
1956 }
1957 phba = vport->phba;
1958 if (!phba) {
1959 rc = -ENODEV;
1960 goto job_error;
1961 }
1962
1963 if (phba->sli_rev < LPFC_SLI_REV4) {
1964 rc = -ENODEV;
1965 goto job_error;
1966 }
1967 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1968 LPFC_SLI_INTF_IF_TYPE_2) {
1969 rc = -ENODEV;
1970 goto job_error;
1971 }
1972
1973 if (job->request_len < sizeof(struct fc_bsg_request) +
1974 sizeof(struct sli4_link_diag)) {
1975 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1976 "3013 Received LINK DIAG TEST request "
1977 " size:%d below the minimum size:%d\n",
1978 job->request_len,
1979 (int)(sizeof(struct fc_bsg_request) +
1980 sizeof(struct sli4_link_diag)));
1981 rc = -EINVAL;
1982 goto job_error;
1983 }
1984
1985 rc = lpfc_bsg_diag_mode_enter(phba, job);
1986 if (rc)
1987 goto job_error;
1988
1989 link_diag_test_cmd = (struct sli4_link_diag *)
1990 job->request->rqst_data.h_vendor.vendor_cmd;
1991 timeout = link_diag_test_cmd->timeout * 100;
1992
1993 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1994
1995 if (rc)
1996 goto job_error;
1997
1998 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1999 if (!pmboxq) {
2000 rc = -ENOMEM;
2001 goto link_diag_test_exit;
2002 }
2003
2004 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2005 sizeof(struct lpfc_sli4_cfg_mhdr));
2006 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2007 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2008 req_len, LPFC_SLI4_MBX_EMBED);
2009 if (alloc_len != req_len) {
2010 rc = -ENOMEM;
2011 goto link_diag_test_exit;
2012 }
2013 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2014 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2015 phba->sli4_hba.link_state.number);
2016 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2017 phba->sli4_hba.link_state.type);
2018 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2019 link_diag_test_cmd->test_id);
2020 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2021 link_diag_test_cmd->loops);
2022 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2023 link_diag_test_cmd->test_version);
2024 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2025 link_diag_test_cmd->error_action);
2026
2027 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2028
2029 shdr = (union lpfc_sli4_cfg_shdr *)
2030 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2031 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2032 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2033 if (shdr_status || shdr_add_status || mbxstatus) {
2034 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2035 "3010 Run link diag test mailbox failed with "
2036 "mbx_status x%x status x%x, add_status x%x\n",
2037 mbxstatus, shdr_status, shdr_add_status);
2038 }
2039
2040 diag_status_reply = (struct diag_status *)
2041 job->reply->reply_data.vendor_reply.vendor_rsp;
2042
2043 if (job->reply_len <
2044 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
2045 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2046 "3012 Received Run link diag test reply "
2047 "below minimum size (%d): reply_len:%d\n",
2048 (int)(sizeof(struct fc_bsg_request) +
2049 sizeof(struct diag_status)),
2050 job->reply_len);
2051 rc = -EINVAL;
2052 goto job_error;
2053 }
2054
2055 diag_status_reply->mbox_status = mbxstatus;
2056 diag_status_reply->shdr_status = shdr_status;
2057 diag_status_reply->shdr_add_status = shdr_add_status;
2058
2059link_diag_test_exit:
2060 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2061
2062 if (pmboxq)
2063 mempool_free(pmboxq, phba->mbox_mem_pool);
2064
2065 lpfc_bsg_diag_mode_exit(phba);
2066
2067job_error:
2068 /* make error code available to userspace */
2069 job->reply->result = rc;
2070 /* complete the job back to userspace if no error */
2071 if (rc == 0)
2072 job->job_done(job);
2073 return rc;
2074}
2075
2076/**
1625 * lpfcdiag_loop_self_reg - obtains a remote port login id 2077 * lpfcdiag_loop_self_reg - obtains a remote port login id
1626 * @phba: Pointer to HBA context object 2078 * @phba: Pointer to HBA context object
1627 * @rpi: Pointer to a remote port login id 2079 * @rpi: Pointer to a remote port login id
@@ -1851,6 +2303,86 @@ err_get_xri_exit:
1851} 2303}
1852 2304
1853/** 2305/**
2306 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2307 * @phba: Pointer to HBA context object
2308 *
2309 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
2310 * retruns the pointer to the buffer.
2311 **/
2312static struct lpfc_dmabuf *
2313lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2314{
2315 struct lpfc_dmabuf *dmabuf;
2316 struct pci_dev *pcidev = phba->pcidev;
2317
2318 /* allocate dma buffer struct */
2319 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2320 if (!dmabuf)
2321 return NULL;
2322
2323 INIT_LIST_HEAD(&dmabuf->list);
2324
2325 /* now, allocate dma buffer */
2326 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2327 &(dmabuf->phys), GFP_KERNEL);
2328
2329 if (!dmabuf->virt) {
2330 kfree(dmabuf);
2331 return NULL;
2332 }
2333 memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
2334
2335 return dmabuf;
2336}
2337
2338/**
2339 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2340 * @phba: Pointer to HBA context object.
2341 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2342 *
2343 * This routine just simply frees a dma buffer and its associated buffer
2344 * descriptor referred by @dmabuf.
2345 **/
2346static void
2347lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2348{
2349 struct pci_dev *pcidev = phba->pcidev;
2350
2351 if (!dmabuf)
2352 return;
2353
2354 if (dmabuf->virt)
2355 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2356 dmabuf->virt, dmabuf->phys);
2357 kfree(dmabuf);
2358 return;
2359}
2360
2361/**
2362 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2363 * @phba: Pointer to HBA context object.
2364 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2365 *
2366 * This routine just simply frees all dma buffers and their associated buffer
2367 * descriptors referred by @dmabuf_list.
2368 **/
2369static void
2370lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2371 struct list_head *dmabuf_list)
2372{
2373 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2374
2375 if (list_empty(dmabuf_list))
2376 return;
2377
2378 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2379 list_del_init(&dmabuf->list);
2380 lpfc_bsg_dma_page_free(phba, dmabuf);
2381 }
2382 return;
2383}
2384
2385/**
1854 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 2386 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1855 * @phba: Pointer to HBA context object 2387 * @phba: Pointer to HBA context object
1856 * @bpl: Pointer to 64 bit bde structure 2388 * @bpl: Pointer to 64 bit bde structure
@@ -2067,7 +2599,7 @@ err_post_rxbufs_exit:
2067} 2599}
2068 2600
2069/** 2601/**
2070 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself 2602 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
2071 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 2603 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2072 * 2604 *
2073 * This function receives a user data buffer to be transmitted and received on 2605 * This function receives a user data buffer to be transmitted and received on
@@ -2086,7 +2618,7 @@ err_post_rxbufs_exit:
2086 * of loopback mode. 2618 * of loopback mode.
2087 **/ 2619 **/
2088static int 2620static int
2089lpfc_bsg_diag_test(struct fc_bsg_job *job) 2621lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2090{ 2622{
2091 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 2623 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2092 struct lpfc_hba *phba = vport->phba; 2624 struct lpfc_hba *phba = vport->phba;
@@ -2411,7 +2943,7 @@ job_error:
2411} 2943}
2412 2944
2413/** 2945/**
2414 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler 2946 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
2415 * @phba: Pointer to HBA context object. 2947 * @phba: Pointer to HBA context object.
2416 * @pmboxq: Pointer to mailbox command. 2948 * @pmboxq: Pointer to mailbox command.
2417 * 2949 *
@@ -2422,15 +2954,13 @@ job_error:
2422 * of the mailbox. 2954 * of the mailbox.
2423 **/ 2955 **/
2424void 2956void
2425lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2957lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2426{ 2958{
2427 struct bsg_job_data *dd_data; 2959 struct bsg_job_data *dd_data;
2428 struct fc_bsg_job *job; 2960 struct fc_bsg_job *job;
2429 struct lpfc_mbx_nembed_cmd *nembed_sge;
2430 uint32_t size; 2961 uint32_t size;
2431 unsigned long flags; 2962 unsigned long flags;
2432 uint8_t *to; 2963 uint8_t *pmb, *pmb_buf;
2433 uint8_t *from;
2434 2964
2435 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2965 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2436 dd_data = pmboxq->context1; 2966 dd_data = pmboxq->context1;
@@ -2440,62 +2970,21 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2440 return; 2970 return;
2441 } 2971 }
2442 2972
2443 /* build the outgoing buffer to do an sg copy 2973 /*
2444 * the format is the response mailbox followed by any extended 2974 * The outgoing buffer is readily referred from the dma buffer,
2445 * mailbox data 2975 * just need to get header part from mailboxq structure.
2446 */ 2976 */
2447 from = (uint8_t *)&pmboxq->u.mb; 2977 pmb = (uint8_t *)&pmboxq->u.mb;
2448 to = (uint8_t *)dd_data->context_un.mbox.mb; 2978 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
2449 memcpy(to, from, sizeof(MAILBOX_t)); 2979 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
2450 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
2451 /* copy the extended data if any, count is in words */
2452 if (dd_data->context_un.mbox.outExtWLen) {
2453 from = (uint8_t *)dd_data->context_un.mbox.ext;
2454 to += sizeof(MAILBOX_t);
2455 size = dd_data->context_un.mbox.outExtWLen *
2456 sizeof(uint32_t);
2457 memcpy(to, from, size);
2458 } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
2459 from = (uint8_t *)dd_data->context_un.mbox.
2460 dmp->dma.virt;
2461 to += sizeof(MAILBOX_t);
2462 size = dd_data->context_un.mbox.dmp->size;
2463 memcpy(to, from, size);
2464 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2465 (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
2466 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2467 virt;
2468 to += sizeof(MAILBOX_t);
2469 size = pmboxq->u.mb.un.varWords[5];
2470 memcpy(to, from, size);
2471 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2472 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) {
2473 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2474 &pmboxq->u.mb.un.varWords[0];
2475
2476 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2477 virt;
2478 to += sizeof(MAILBOX_t);
2479 size = nembed_sge->sge[0].length;
2480 memcpy(to, from, size);
2481 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
2482 from = (uint8_t *)dd_data->context_un.
2483 mbox.dmp->dma.virt;
2484 to += sizeof(MAILBOX_t);
2485 size = dd_data->context_un.mbox.dmp->size;
2486 memcpy(to, from, size);
2487 }
2488 }
2489 2980
2490 from = (uint8_t *)dd_data->context_un.mbox.mb;
2491 job = dd_data->context_un.mbox.set_job; 2981 job = dd_data->context_un.mbox.set_job;
2492 if (job) { 2982 if (job) {
2493 size = job->reply_payload.payload_len; 2983 size = job->reply_payload.payload_len;
2494 job->reply->reply_payload_rcv_len = 2984 job->reply->reply_payload_rcv_len =
2495 sg_copy_from_buffer(job->reply_payload.sg_list, 2985 sg_copy_from_buffer(job->reply_payload.sg_list,
2496 job->reply_payload.sg_cnt, 2986 job->reply_payload.sg_cnt,
2497 from, size); 2987 pmb_buf, size);
2498 job->reply->result = 0;
2499 /* need to hold the lock until we set job->dd_data to NULL 2988 /* need to hold the lock until we set job->dd_data to NULL
2500 * to hold off the timeout handler returning to the mid-layer 2989 * to hold off the timeout handler returning to the mid-layer
2501 * while we are still processing the job. 2990 * while we are still processing the job.
@@ -2503,28 +2992,19 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2503 job->dd_data = NULL; 2992 job->dd_data = NULL;
2504 dd_data->context_un.mbox.set_job = NULL; 2993 dd_data->context_un.mbox.set_job = NULL;
2505 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2994 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2506 job->job_done(job);
2507 } else { 2995 } else {
2508 dd_data->context_un.mbox.set_job = NULL; 2996 dd_data->context_un.mbox.set_job = NULL;
2509 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2997 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2510 } 2998 }
2511 2999
2512 kfree(dd_data->context_un.mbox.mb);
2513 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3000 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2514 kfree(dd_data->context_un.mbox.ext); 3001 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
2515 if (dd_data->context_un.mbox.dmp) {
2516 dma_free_coherent(&phba->pcidev->dev,
2517 dd_data->context_un.mbox.dmp->size,
2518 dd_data->context_un.mbox.dmp->dma.virt,
2519 dd_data->context_un.mbox.dmp->dma.phys);
2520 kfree(dd_data->context_un.mbox.dmp);
2521 }
2522 if (dd_data->context_un.mbox.rxbmp) {
2523 lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
2524 dd_data->context_un.mbox.rxbmp->phys);
2525 kfree(dd_data->context_un.mbox.rxbmp);
2526 }
2527 kfree(dd_data); 3002 kfree(dd_data);
3003
3004 if (job) {
3005 job->reply->result = 0;
3006 job->job_done(job);
3007 }
2528 return; 3008 return;
2529} 3009}
2530 3010
@@ -2619,6 +3099,1006 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2619} 3099}
2620 3100
2621/** 3101/**
3102 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3103 * @phba: Pointer to HBA context object.
3104 *
3105 * This is routine clean up and reset BSG handling of multi-buffer mbox
3106 * command session.
3107 **/
3108static void
3109lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3110{
3111 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3112 return;
3113
3114 /* free all memory, including dma buffers */
3115 lpfc_bsg_dma_page_list_free(phba,
3116 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3117 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3118 /* multi-buffer write mailbox command pass-through complete */
3119 memset((char *)&phba->mbox_ext_buf_ctx, 0,
3120 sizeof(struct lpfc_mbox_ext_buf_ctx));
3121 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3122
3123 return;
3124}
3125
3126/**
3127 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3128 * @phba: Pointer to HBA context object.
3129 * @pmboxq: Pointer to mailbox command.
3130 *
3131 * This is routine handles BSG job for mailbox commands completions with
3132 * multiple external buffers.
3133 **/
3134static struct fc_bsg_job *
3135lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3136{
3137 struct bsg_job_data *dd_data;
3138 struct fc_bsg_job *job;
3139 uint8_t *pmb, *pmb_buf;
3140 unsigned long flags;
3141 uint32_t size;
3142 int rc = 0;
3143
3144 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3145 dd_data = pmboxq->context1;
3146 /* has the job already timed out? */
3147 if (!dd_data) {
3148 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3149 job = NULL;
3150 goto job_done_out;
3151 }
3152
3153 /*
3154 * The outgoing buffer is readily referred from the dma buffer,
3155 * just need to get header part from mailboxq structure.
3156 */
3157 pmb = (uint8_t *)&pmboxq->u.mb;
3158 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3159 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3160
3161 job = dd_data->context_un.mbox.set_job;
3162 if (job) {
3163 size = job->reply_payload.payload_len;
3164 job->reply->reply_payload_rcv_len =
3165 sg_copy_from_buffer(job->reply_payload.sg_list,
3166 job->reply_payload.sg_cnt,
3167 pmb_buf, size);
3168 /* result for successful */
3169 job->reply->result = 0;
3170 job->dd_data = NULL;
3171 /* need to hold the lock util we set job->dd_data to NULL
3172 * to hold off the timeout handler from midlayer to take
3173 * any action.
3174 */
3175 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3176 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3177 "2937 SLI_CONFIG ext-buffer maibox command "
3178 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3179 phba->mbox_ext_buf_ctx.nembType,
3180 phba->mbox_ext_buf_ctx.mboxType, size);
3181 } else
3182 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3183
3184job_done_out:
3185 if (!job)
3186 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3187 "2938 SLI_CONFIG ext-buffer maibox "
3188 "command (x%x/x%x) failure, rc:x%x\n",
3189 phba->mbox_ext_buf_ctx.nembType,
3190 phba->mbox_ext_buf_ctx.mboxType, rc);
3191 /* state change */
3192 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3193 kfree(dd_data);
3194
3195 return job;
3196}
3197
3198/**
3199 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3200 * @phba: Pointer to HBA context object.
3201 * @pmboxq: Pointer to mailbox command.
3202 *
3203 * This is completion handler function for mailbox read commands with multiple
3204 * external buffers.
3205 **/
3206static void
3207lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3208{
3209 struct fc_bsg_job *job;
3210
3211 /* handle the BSG job with mailbox command */
3212 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3213 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3214
3215 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3216 "2939 SLI_CONFIG ext-buffer rd maibox command "
3217 "complete, ctxState:x%x, mbxStatus:x%x\n",
3218 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3219
3220 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3221
3222 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3223 lpfc_bsg_mbox_ext_session_reset(phba);
3224
3225 /* free base driver mailbox structure memory */
3226 mempool_free(pmboxq, phba->mbox_mem_pool);
3227
3228 /* complete the bsg job if we have it */
3229 if (job)
3230 job->job_done(job);
3231
3232 return;
3233}
3234
3235/**
3236 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3237 * @phba: Pointer to HBA context object.
3238 * @pmboxq: Pointer to mailbox command.
3239 *
3240 * This is completion handler function for mailbox write commands with multiple
3241 * external buffers.
3242 **/
3243static void
3244lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3245{
3246 struct fc_bsg_job *job;
3247
3248 /* handle the BSG job with the mailbox command */
3249 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3250 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3251
3252 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3253 "2940 SLI_CONFIG ext-buffer wr maibox command "
3254 "complete, ctxState:x%x, mbxStatus:x%x\n",
3255 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3256
3257 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3258
3259 /* free all memory, including dma buffers */
3260 mempool_free(pmboxq, phba->mbox_mem_pool);
3261 lpfc_bsg_mbox_ext_session_reset(phba);
3262
3263 /* complete the bsg job if we have it */
3264 if (job)
3265 job->job_done(job);
3266
3267 return;
3268}
3269
3270static void
3271lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3272 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3273 struct lpfc_dmabuf *ext_dmabuf)
3274{
3275 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3276
3277 /* pointer to the start of mailbox command */
3278 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3279
3280 if (nemb_tp == nemb_mse) {
3281 if (index == 0) {
3282 sli_cfg_mbx->un.sli_config_emb0_subsys.
3283 mse[index].pa_hi =
3284 putPaddrHigh(mbx_dmabuf->phys +
3285 sizeof(MAILBOX_t));
3286 sli_cfg_mbx->un.sli_config_emb0_subsys.
3287 mse[index].pa_lo =
3288 putPaddrLow(mbx_dmabuf->phys +
3289 sizeof(MAILBOX_t));
3290 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3291 "2943 SLI_CONFIG(mse)[%d], "
3292 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3293 index,
3294 sli_cfg_mbx->un.sli_config_emb0_subsys.
3295 mse[index].buf_len,
3296 sli_cfg_mbx->un.sli_config_emb0_subsys.
3297 mse[index].pa_hi,
3298 sli_cfg_mbx->un.sli_config_emb0_subsys.
3299 mse[index].pa_lo);
3300 } else {
3301 sli_cfg_mbx->un.sli_config_emb0_subsys.
3302 mse[index].pa_hi =
3303 putPaddrHigh(ext_dmabuf->phys);
3304 sli_cfg_mbx->un.sli_config_emb0_subsys.
3305 mse[index].pa_lo =
3306 putPaddrLow(ext_dmabuf->phys);
3307 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3308 "2944 SLI_CONFIG(mse)[%d], "
3309 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3310 index,
3311 sli_cfg_mbx->un.sli_config_emb0_subsys.
3312 mse[index].buf_len,
3313 sli_cfg_mbx->un.sli_config_emb0_subsys.
3314 mse[index].pa_hi,
3315 sli_cfg_mbx->un.sli_config_emb0_subsys.
3316 mse[index].pa_lo);
3317 }
3318 } else {
3319 if (index == 0) {
3320 sli_cfg_mbx->un.sli_config_emb1_subsys.
3321 hbd[index].pa_hi =
3322 putPaddrHigh(mbx_dmabuf->phys +
3323 sizeof(MAILBOX_t));
3324 sli_cfg_mbx->un.sli_config_emb1_subsys.
3325 hbd[index].pa_lo =
3326 putPaddrLow(mbx_dmabuf->phys +
3327 sizeof(MAILBOX_t));
3328 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3329 "3007 SLI_CONFIG(hbd)[%d], "
3330 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3331 index,
3332 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3333 &sli_cfg_mbx->un.
3334 sli_config_emb1_subsys.hbd[index]),
3335 sli_cfg_mbx->un.sli_config_emb1_subsys.
3336 hbd[index].pa_hi,
3337 sli_cfg_mbx->un.sli_config_emb1_subsys.
3338 hbd[index].pa_lo);
3339
3340 } else {
3341 sli_cfg_mbx->un.sli_config_emb1_subsys.
3342 hbd[index].pa_hi =
3343 putPaddrHigh(ext_dmabuf->phys);
3344 sli_cfg_mbx->un.sli_config_emb1_subsys.
3345 hbd[index].pa_lo =
3346 putPaddrLow(ext_dmabuf->phys);
3347 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3348 "3008 SLI_CONFIG(hbd)[%d], "
3349 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3350 index,
3351 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3352 &sli_cfg_mbx->un.
3353 sli_config_emb1_subsys.hbd[index]),
3354 sli_cfg_mbx->un.sli_config_emb1_subsys.
3355 hbd[index].pa_hi,
3356 sli_cfg_mbx->un.sli_config_emb1_subsys.
3357 hbd[index].pa_lo);
3358 }
3359 }
3360 return;
3361}
3362
3363/**
3364 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3365 * @phba: Pointer to HBA context object.
3366 * @mb: Pointer to a BSG mailbox object.
3367 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3368 * @dmabuff: Pointer to a DMA buffer descriptor.
3369 *
3370 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3371 * non-embedded external bufffers.
3372 **/
3373static int
3374lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3375 enum nemb_type nemb_tp,
3376 struct lpfc_dmabuf *dmabuf)
3377{
3378 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3379 struct dfc_mbox_req *mbox_req;
3380 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3381 uint32_t ext_buf_cnt, ext_buf_index;
3382 struct lpfc_dmabuf *ext_dmabuf = NULL;
3383 struct bsg_job_data *dd_data = NULL;
3384 LPFC_MBOXQ_t *pmboxq = NULL;
3385 MAILBOX_t *pmb;
3386 uint8_t *pmbx;
3387 int rc, i;
3388
3389 mbox_req =
3390 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3391
3392 /* pointer to the start of mailbox command */
3393 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3394
3395 if (nemb_tp == nemb_mse) {
3396 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3397 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3398 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3399 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3400 "2945 Handled SLI_CONFIG(mse) rd, "
3401 "ext_buf_cnt(%d) out of range(%d)\n",
3402 ext_buf_cnt,
3403 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3404 rc = -ERANGE;
3405 goto job_error;
3406 }
3407 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3408 "2941 Handled SLI_CONFIG(mse) rd, "
3409 "ext_buf_cnt:%d\n", ext_buf_cnt);
3410 } else {
3411 /* sanity check on interface type for support */
3412 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3413 LPFC_SLI_INTF_IF_TYPE_2) {
3414 rc = -ENODEV;
3415 goto job_error;
3416 }
3417 /* nemb_tp == nemb_hbd */
3418 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3419 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3420 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3421 "2946 Handled SLI_CONFIG(hbd) rd, "
3422 "ext_buf_cnt(%d) out of range(%d)\n",
3423 ext_buf_cnt,
3424 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3425 rc = -ERANGE;
3426 goto job_error;
3427 }
3428 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3429 "2942 Handled SLI_CONFIG(hbd) rd, "
3430 "ext_buf_cnt:%d\n", ext_buf_cnt);
3431 }
3432
3433 /* reject non-embedded mailbox command with none external buffer */
3434 if (ext_buf_cnt == 0) {
3435 rc = -EPERM;
3436 goto job_error;
3437 } else if (ext_buf_cnt > 1) {
3438 /* additional external read buffers */
3439 for (i = 1; i < ext_buf_cnt; i++) {
3440 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3441 if (!ext_dmabuf) {
3442 rc = -ENOMEM;
3443 goto job_error;
3444 }
3445 list_add_tail(&ext_dmabuf->list,
3446 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3447 }
3448 }
3449
3450 /* bsg tracking structure */
3451 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3452 if (!dd_data) {
3453 rc = -ENOMEM;
3454 goto job_error;
3455 }
3456
3457 /* mailbox command structure for base driver */
3458 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3459 if (!pmboxq) {
3460 rc = -ENOMEM;
3461 goto job_error;
3462 }
3463 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3464
3465 /* for the first external buffer */
3466 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3467
3468 /* for the rest of external buffer descriptors if any */
3469 if (ext_buf_cnt > 1) {
3470 ext_buf_index = 1;
3471 list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3472 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3473 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3474 ext_buf_index, dmabuf,
3475 curr_dmabuf);
3476 ext_buf_index++;
3477 }
3478 }
3479
3480 /* construct base driver mbox command */
3481 pmb = &pmboxq->u.mb;
3482 pmbx = (uint8_t *)dmabuf->virt;
3483 memcpy(pmb, pmbx, sizeof(*pmb));
3484 pmb->mbxOwner = OWN_HOST;
3485 pmboxq->vport = phba->pport;
3486
3487 /* multi-buffer handling context */
3488 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3489 phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3490 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3491 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3492 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3493 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3494
3495 /* callback for multi-buffer read mailbox command */
3496 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3497
3498 /* context fields to callback function */
3499 pmboxq->context1 = dd_data;
3500 dd_data->type = TYPE_MBOX;
3501 dd_data->context_un.mbox.pmboxq = pmboxq;
3502 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3503 dd_data->context_un.mbox.set_job = job;
3504 job->dd_data = dd_data;
3505
3506 /* state change */
3507 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3508
3509 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3510 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3511 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3512 "2947 Issued SLI_CONFIG ext-buffer "
3513 "maibox command, rc:x%x\n", rc);
3514 return 1;
3515 }
3516 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3517 "2948 Failed to issue SLI_CONFIG ext-buffer "
3518 "maibox command, rc:x%x\n", rc);
3519 rc = -EPIPE;
3520
3521job_error:
3522 if (pmboxq)
3523 mempool_free(pmboxq, phba->mbox_mem_pool);
3524 lpfc_bsg_dma_page_list_free(phba,
3525 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3526 kfree(dd_data);
3527 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3528 return rc;
3529}
3530
3531/**
3532 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3533 * @phba: Pointer to HBA context object.
3534 * @mb: Pointer to a BSG mailbox object.
3535 * @dmabuff: Pointer to a DMA buffer descriptor.
3536 *
3537 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3538 * non-embedded external bufffers.
3539 **/
3540static int
3541lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3542 enum nemb_type nemb_tp,
3543 struct lpfc_dmabuf *dmabuf)
3544{
3545 struct dfc_mbox_req *mbox_req;
3546 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3547 uint32_t ext_buf_cnt;
3548 struct bsg_job_data *dd_data = NULL;
3549 LPFC_MBOXQ_t *pmboxq = NULL;
3550 MAILBOX_t *pmb;
3551 uint8_t *mbx;
3552 int rc = 0, i;
3553
3554 mbox_req =
3555 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3556
3557 /* pointer to the start of mailbox command */
3558 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3559
3560 if (nemb_tp == nemb_mse) {
3561 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3562 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3563 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3564 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3565 "2953 Handled SLI_CONFIG(mse) wr, "
3566 "ext_buf_cnt(%d) out of range(%d)\n",
3567 ext_buf_cnt,
3568 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3569 return -ERANGE;
3570 }
3571 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3572 "2949 Handled SLI_CONFIG(mse) wr, "
3573 "ext_buf_cnt:%d\n", ext_buf_cnt);
3574 } else {
3575 /* sanity check on interface type for support */
3576 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3577 LPFC_SLI_INTF_IF_TYPE_2)
3578 return -ENODEV;
3579 /* nemb_tp == nemb_hbd */
3580 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3581 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3582 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3583 "2954 Handled SLI_CONFIG(hbd) wr, "
3584 "ext_buf_cnt(%d) out of range(%d)\n",
3585 ext_buf_cnt,
3586 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3587 return -ERANGE;
3588 }
3589 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3590 "2950 Handled SLI_CONFIG(hbd) wr, "
3591 "ext_buf_cnt:%d\n", ext_buf_cnt);
3592 }
3593
3594 if (ext_buf_cnt == 0)
3595 return -EPERM;
3596
3597 /* for the first external buffer */
3598 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3599
3600 /* log for looking forward */
3601 for (i = 1; i < ext_buf_cnt; i++) {
3602 if (nemb_tp == nemb_mse)
3603 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3604 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
3605 i, sli_cfg_mbx->un.sli_config_emb0_subsys.
3606 mse[i].buf_len);
3607 else
3608 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3609 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
3610 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3611 &sli_cfg_mbx->un.sli_config_emb1_subsys.
3612 hbd[i]));
3613 }
3614
3615 /* multi-buffer handling context */
3616 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3617 phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
3618 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3619 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3620 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3621 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3622
3623 if (ext_buf_cnt == 1) {
3624 /* bsg tracking structure */
3625 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3626 if (!dd_data) {
3627 rc = -ENOMEM;
3628 goto job_error;
3629 }
3630
3631 /* mailbox command structure for base driver */
3632 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3633 if (!pmboxq) {
3634 rc = -ENOMEM;
3635 goto job_error;
3636 }
3637 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3638 pmb = &pmboxq->u.mb;
3639 mbx = (uint8_t *)dmabuf->virt;
3640 memcpy(pmb, mbx, sizeof(*pmb));
3641 pmb->mbxOwner = OWN_HOST;
3642 pmboxq->vport = phba->pport;
3643
3644 /* callback for multi-buffer read mailbox command */
3645 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3646
3647 /* context fields to callback function */
3648 pmboxq->context1 = dd_data;
3649 dd_data->type = TYPE_MBOX;
3650 dd_data->context_un.mbox.pmboxq = pmboxq;
3651 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
3652 dd_data->context_un.mbox.set_job = job;
3653 job->dd_data = dd_data;
3654
3655 /* state change */
3656 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3657
3658 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3659 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3660 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3661 "2955 Issued SLI_CONFIG ext-buffer "
3662 "maibox command, rc:x%x\n", rc);
3663 return 1;
3664 }
3665 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3666 "2956 Failed to issue SLI_CONFIG ext-buffer "
3667 "maibox command, rc:x%x\n", rc);
3668 rc = -EPIPE;
3669 }
3670
3671job_error:
3672 if (pmboxq)
3673 mempool_free(pmboxq, phba->mbox_mem_pool);
3674 kfree(dd_data);
3675
3676 return rc;
3677}
3678
3679/**
3680 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
3681 * @phba: Pointer to HBA context object.
3682 * @mb: Pointer to a BSG mailbox object.
3683 * @dmabuff: Pointer to a DMA buffer descriptor.
3684 *
3685 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
3686 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
3687 * with embedded sussystem 0x1 and opcodes with external HBDs.
3688 **/
3689static int
3690lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3691 struct lpfc_dmabuf *dmabuf)
3692{
3693 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3694 uint32_t subsys;
3695 uint32_t opcode;
3696 int rc = SLI_CONFIG_NOT_HANDLED;
3697
3698 /* state change */
3699 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
3700
3701 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3702
3703 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3704 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3705 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
3706 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3707 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
3708 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3709 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
3710 switch (opcode) {
3711 case FCOE_OPCODE_READ_FCF:
3712 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3713 "2957 Handled SLI_CONFIG "
3714 "subsys_fcoe, opcode:x%x\n",
3715 opcode);
3716 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3717 nemb_mse, dmabuf);
3718 break;
3719 case FCOE_OPCODE_ADD_FCF:
3720 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3721 "2958 Handled SLI_CONFIG "
3722 "subsys_fcoe, opcode:x%x\n",
3723 opcode);
3724 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3725 nemb_mse, dmabuf);
3726 break;
3727 default:
3728 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3729 "2959 Not handled SLI_CONFIG "
3730 "subsys_fcoe, opcode:x%x\n",
3731 opcode);
3732 rc = SLI_CONFIG_NOT_HANDLED;
3733 break;
3734 }
3735 } else {
3736 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3737 "2977 Handled SLI_CONFIG "
3738 "subsys:x%d, opcode:x%x\n",
3739 subsys, opcode);
3740 rc = SLI_CONFIG_NOT_HANDLED;
3741 }
3742 } else {
3743 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
3744 &sli_cfg_mbx->un.sli_config_emb1_subsys);
3745 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
3746 &sli_cfg_mbx->un.sli_config_emb1_subsys);
3747 if (subsys == SLI_CONFIG_SUBSYS_COMN) {
3748 switch (opcode) {
3749 case COMN_OPCODE_READ_OBJECT:
3750 case COMN_OPCODE_READ_OBJECT_LIST:
3751 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3752 "2960 Handled SLI_CONFIG "
3753 "subsys_comn, opcode:x%x\n",
3754 opcode);
3755 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3756 nemb_hbd, dmabuf);
3757 break;
3758 case COMN_OPCODE_WRITE_OBJECT:
3759 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3760 "2961 Handled SLI_CONFIG "
3761 "subsys_comn, opcode:x%x\n",
3762 opcode);
3763 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3764 nemb_hbd, dmabuf);
3765 break;
3766 default:
3767 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3768 "2962 Not handled SLI_CONFIG "
3769 "subsys_comn, opcode:x%x\n",
3770 opcode);
3771 rc = SLI_CONFIG_NOT_HANDLED;
3772 break;
3773 }
3774 } else {
3775 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3776 "2978 Handled SLI_CONFIG "
3777 "subsys:x%d, opcode:x%x\n",
3778 subsys, opcode);
3779 rc = SLI_CONFIG_NOT_HANDLED;
3780 }
3781 }
3782 return rc;
3783}
3784
3785/**
3786 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
3787 * @phba: Pointer to HBA context object.
3788 *
3789 * This routine is for requesting to abort a pass-through mailbox command with
3790 * multiple external buffers due to error condition.
3791 **/
3792static void
3793lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
3794{
3795 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
3796 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
3797 else
3798 lpfc_bsg_mbox_ext_session_reset(phba);
3799 return;
3800}
3801
3802/**
3803 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
3804 * @phba: Pointer to HBA context object.
3805 * @dmabuf: Pointer to a DMA buffer descriptor.
3806 *
3807 * This routine extracts the next mailbox read external buffer back to
3808 * user space through BSG.
3809 **/
3810static int
3811lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
3812{
3813 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3814 struct lpfc_dmabuf *dmabuf;
3815 uint8_t *pbuf;
3816 uint32_t size;
3817 uint32_t index;
3818
3819 index = phba->mbox_ext_buf_ctx.seqNum;
3820 phba->mbox_ext_buf_ctx.seqNum++;
3821
3822 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
3823 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3824
3825 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
3826 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
3827 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
3828 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3829 "2963 SLI_CONFIG (mse) ext-buffer rd get "
3830 "buffer[%d], size:%d\n", index, size);
3831 } else {
3832 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3833 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
3834 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3835 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
3836 "buffer[%d], size:%d\n", index, size);
3837 }
3838 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
3839 return -EPIPE;
3840 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
3841 struct lpfc_dmabuf, list);
3842 list_del_init(&dmabuf->list);
3843 pbuf = (uint8_t *)dmabuf->virt;
3844 job->reply->reply_payload_rcv_len =
3845 sg_copy_from_buffer(job->reply_payload.sg_list,
3846 job->reply_payload.sg_cnt,
3847 pbuf, size);
3848
3849 lpfc_bsg_dma_page_free(phba, dmabuf);
3850
3851 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3852 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3853 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
3854 "command session done\n");
3855 lpfc_bsg_mbox_ext_session_reset(phba);
3856 }
3857
3858 job->reply->result = 0;
3859 job->job_done(job);
3860
3861 return SLI_CONFIG_HANDLED;
3862}
3863
3864/**
3865 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
3866 * @phba: Pointer to HBA context object.
3867 * @dmabuf: Pointer to a DMA buffer descriptor.
3868 *
3869 * This routine sets up the next mailbox read external buffer obtained
3870 * from user space through BSG.
3871 **/
3872static int
3873lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
3874 struct lpfc_dmabuf *dmabuf)
3875{
3876 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3877 struct bsg_job_data *dd_data = NULL;
3878 LPFC_MBOXQ_t *pmboxq = NULL;
3879 MAILBOX_t *pmb;
3880 enum nemb_type nemb_tp;
3881 uint8_t *pbuf;
3882 uint32_t size;
3883 uint32_t index;
3884 int rc;
3885
3886 index = phba->mbox_ext_buf_ctx.seqNum;
3887 phba->mbox_ext_buf_ctx.seqNum++;
3888 nemb_tp = phba->mbox_ext_buf_ctx.nembType;
3889
3890 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
3891 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3892
3893 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3894 if (!dd_data) {
3895 rc = -ENOMEM;
3896 goto job_error;
3897 }
3898
3899 pbuf = (uint8_t *)dmabuf->virt;
3900 size = job->request_payload.payload_len;
3901 sg_copy_to_buffer(job->request_payload.sg_list,
3902 job->request_payload.sg_cnt,
3903 pbuf, size);
3904
3905 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
3906 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3907 "2966 SLI_CONFIG (mse) ext-buffer wr set "
3908 "buffer[%d], size:%d\n",
3909 phba->mbox_ext_buf_ctx.seqNum, size);
3910
3911 } else {
3912 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3913 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
3914 "buffer[%d], size:%d\n",
3915 phba->mbox_ext_buf_ctx.seqNum, size);
3916
3917 }
3918
3919 /* set up external buffer descriptor and add to external buffer list */
3920 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
3921 phba->mbox_ext_buf_ctx.mbx_dmabuf,
3922 dmabuf);
3923 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3924
3925 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3926 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3927 "2968 SLI_CONFIG ext-buffer wr all %d "
3928 "ebuffers received\n",
3929 phba->mbox_ext_buf_ctx.numBuf);
3930 /* mailbox command structure for base driver */
3931 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3932 if (!pmboxq) {
3933 rc = -ENOMEM;
3934 goto job_error;
3935 }
3936 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3937 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3938 pmb = &pmboxq->u.mb;
3939 memcpy(pmb, pbuf, sizeof(*pmb));
3940 pmb->mbxOwner = OWN_HOST;
3941 pmboxq->vport = phba->pport;
3942
3943 /* callback for multi-buffer write mailbox command */
3944 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3945
3946 /* context fields to callback function */
3947 pmboxq->context1 = dd_data;
3948 dd_data->type = TYPE_MBOX;
3949 dd_data->context_un.mbox.pmboxq = pmboxq;
3950 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
3951 dd_data->context_un.mbox.set_job = job;
3952 job->dd_data = dd_data;
3953
3954 /* state change */
3955 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3956
3957 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3958 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3959 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3960 "2969 Issued SLI_CONFIG ext-buffer "
3961 "maibox command, rc:x%x\n", rc);
3962 return 1;
3963 }
3964 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3965 "2970 Failed to issue SLI_CONFIG ext-buffer "
3966 "maibox command, rc:x%x\n", rc);
3967 rc = -EPIPE;
3968 goto job_error;
3969 }
3970
3971 /* wait for additoinal external buffers */
3972 job->reply->result = 0;
3973 job->job_done(job);
3974 return SLI_CONFIG_HANDLED;
3975
3976job_error:
3977 lpfc_bsg_dma_page_free(phba, dmabuf);
3978 kfree(dd_data);
3979
3980 return rc;
3981}
3982
3983/**
3984 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
3985 * @phba: Pointer to HBA context object.
3986 * @mb: Pointer to a BSG mailbox object.
3987 * @dmabuff: Pointer to a DMA buffer descriptor.
3988 *
3989 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
3990 * command with multiple non-embedded external buffers.
3991 **/
3992static int
3993lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
3994 struct lpfc_dmabuf *dmabuf)
3995{
3996 int rc;
3997
3998 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3999 "2971 SLI_CONFIG buffer (type:x%x)\n",
4000 phba->mbox_ext_buf_ctx.mboxType);
4001
4002 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4003 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4004 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4005 "2972 SLI_CONFIG rd buffer state "
4006 "mismatch:x%x\n",
4007 phba->mbox_ext_buf_ctx.state);
4008 lpfc_bsg_mbox_ext_abort(phba);
4009 return -EPIPE;
4010 }
4011 rc = lpfc_bsg_read_ebuf_get(phba, job);
4012 if (rc == SLI_CONFIG_HANDLED)
4013 lpfc_bsg_dma_page_free(phba, dmabuf);
4014 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4015 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4016 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4017 "2973 SLI_CONFIG wr buffer state "
4018 "mismatch:x%x\n",
4019 phba->mbox_ext_buf_ctx.state);
4020 lpfc_bsg_mbox_ext_abort(phba);
4021 return -EPIPE;
4022 }
4023 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4024 }
4025 return rc;
4026}
4027
4028/**
4029 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4030 * @phba: Pointer to HBA context object.
4031 * @mb: Pointer to a BSG mailbox object.
4032 * @dmabuff: Pointer to a DMA buffer descriptor.
4033 *
4034 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4035 * (0x9B) mailbox commands and external buffers.
4036 **/
4037static int
4038lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4039 struct lpfc_dmabuf *dmabuf)
4040{
4041 struct dfc_mbox_req *mbox_req;
4042 int rc;
4043
4044 mbox_req =
4045 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4046
4047 /* mbox command with/without single external buffer */
4048 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4049 return SLI_CONFIG_NOT_HANDLED;
4050
4051 /* mbox command and first external buffer */
4052 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4053 if (mbox_req->extSeqNum == 1) {
4054 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4055 "2974 SLI_CONFIG mailbox: tag:%d, "
4056 "seq:%d\n", mbox_req->extMboxTag,
4057 mbox_req->extSeqNum);
4058 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4059 return rc;
4060 } else
4061 goto sli_cfg_ext_error;
4062 }
4063
4064 /*
4065 * handle additional external buffers
4066 */
4067
4068 /* check broken pipe conditions */
4069 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4070 goto sli_cfg_ext_error;
4071 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4072 goto sli_cfg_ext_error;
4073 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4074 goto sli_cfg_ext_error;
4075
4076 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4077 "2975 SLI_CONFIG mailbox external buffer: "
4078 "extSta:x%x, tag:%d, seq:%d\n",
4079 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4080 mbox_req->extSeqNum);
4081 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4082 return rc;
4083
4084sli_cfg_ext_error:
4085 /* all other cases, broken pipe */
4086 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4087 "2976 SLI_CONFIG mailbox broken pipe: "
4088 "ctxSta:x%x, ctxNumBuf:%d "
4089 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4090 phba->mbox_ext_buf_ctx.state,
4091 phba->mbox_ext_buf_ctx.numBuf,
4092 phba->mbox_ext_buf_ctx.mbxTag,
4093 phba->mbox_ext_buf_ctx.seqNum,
4094 mbox_req->extMboxTag, mbox_req->extSeqNum);
4095
4096 lpfc_bsg_mbox_ext_session_reset(phba);
4097
4098 return -EPIPE;
4099}
4100
4101/**
2622 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 4102 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2623 * @phba: Pointer to HBA context object. 4103 * @phba: Pointer to HBA context object.
2624 * @mb: Pointer to a mailbox object. 4104 * @mb: Pointer to a mailbox object.
@@ -2638,22 +4118,21 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2638 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4118 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
2639 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4119 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
2640 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4120 /* a 4k buffer to hold the mb and extended data from/to the bsg */
2641 MAILBOX_t *mb = NULL; 4121 uint8_t *pmbx = NULL;
2642 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 4122 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
2643 uint32_t size; 4123 struct lpfc_dmabuf *dmabuf = NULL;
2644 struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */ 4124 struct dfc_mbox_req *mbox_req;
2645 struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
2646 struct ulp_bde64 *rxbpl = NULL;
2647 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
2648 job->request->rqst_data.h_vendor.vendor_cmd;
2649 struct READ_EVENT_LOG_VAR *rdEventLog; 4125 struct READ_EVENT_LOG_VAR *rdEventLog;
2650 uint32_t transmit_length, receive_length, mode; 4126 uint32_t transmit_length, receive_length, mode;
4127 struct lpfc_mbx_sli4_config *sli4_config;
2651 struct lpfc_mbx_nembed_cmd *nembed_sge; 4128 struct lpfc_mbx_nembed_cmd *nembed_sge;
2652 struct mbox_header *header; 4129 struct mbox_header *header;
2653 struct ulp_bde64 *bde; 4130 struct ulp_bde64 *bde;
2654 uint8_t *ext = NULL; 4131 uint8_t *ext = NULL;
2655 int rc = 0; 4132 int rc = 0;
2656 uint8_t *from; 4133 uint8_t *from;
4134 uint32_t size;
4135
2657 4136
2658 /* in case no data is transferred */ 4137 /* in case no data is transferred */
2659 job->reply->reply_payload_rcv_len = 0; 4138 job->reply->reply_payload_rcv_len = 0;
@@ -2665,6 +4144,18 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2665 goto job_done; 4144 goto job_done;
2666 } 4145 }
2667 4146
4147 /*
4148 * Don't allow mailbox commands to be sent when blocked or when in
4149 * the middle of discovery
4150 */
4151 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4152 rc = -EAGAIN;
4153 goto job_done;
4154 }
4155
4156 mbox_req =
4157 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4158
2668 /* check if requested extended data lengths are valid */ 4159 /* check if requested extended data lengths are valid */
2669 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4160 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
2670 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { 4161 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
@@ -2672,6 +4163,32 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2672 goto job_done; 4163 goto job_done;
2673 } 4164 }
2674 4165
4166 dmabuf = lpfc_bsg_dma_page_alloc(phba);
4167 if (!dmabuf || !dmabuf->virt) {
4168 rc = -ENOMEM;
4169 goto job_done;
4170 }
4171
4172 /* Get the mailbox command or external buffer from BSG */
4173 pmbx = (uint8_t *)dmabuf->virt;
4174 size = job->request_payload.payload_len;
4175 sg_copy_to_buffer(job->request_payload.sg_list,
4176 job->request_payload.sg_cnt, pmbx, size);
4177
4178 /* Handle possible SLI_CONFIG with non-embedded payloads */
4179 if (phba->sli_rev == LPFC_SLI_REV4) {
4180 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4181 if (rc == SLI_CONFIG_HANDLED)
4182 goto job_cont;
4183 if (rc)
4184 goto job_done;
4185 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4186 }
4187
4188 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4189 if (rc != 0)
4190 goto job_done; /* must be negative */
4191
2675 /* allocate our bsg tracking structure */ 4192 /* allocate our bsg tracking structure */
2676 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4193 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2677 if (!dd_data) { 4194 if (!dd_data) {
@@ -2681,12 +4198,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2681 goto job_done; 4198 goto job_done;
2682 } 4199 }
2683 4200
2684 mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
2685 if (!mb) {
2686 rc = -ENOMEM;
2687 goto job_done;
2688 }
2689
2690 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4201 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2691 if (!pmboxq) { 4202 if (!pmboxq) {
2692 rc = -ENOMEM; 4203 rc = -ENOMEM;
@@ -2694,17 +4205,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2694 } 4205 }
2695 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4206 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2696 4207
2697 size = job->request_payload.payload_len;
2698 sg_copy_to_buffer(job->request_payload.sg_list,
2699 job->request_payload.sg_cnt,
2700 mb, size);
2701
2702 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2703 if (rc != 0)
2704 goto job_done; /* must be negative */
2705
2706 pmb = &pmboxq->u.mb; 4208 pmb = &pmboxq->u.mb;
2707 memcpy(pmb, mb, sizeof(*pmb)); 4209 memcpy(pmb, pmbx, sizeof(*pmb));
2708 pmb->mbxOwner = OWN_HOST; 4210 pmb->mbxOwner = OWN_HOST;
2709 pmboxq->vport = vport; 4211 pmboxq->vport = vport;
2710 4212
@@ -2721,30 +4223,13 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2721 "0x%x while in stopped state.\n", 4223 "0x%x while in stopped state.\n",
2722 pmb->mbxCommand); 4224 pmb->mbxCommand);
2723 4225
2724 /* Don't allow mailbox commands to be sent when blocked
2725 * or when in the middle of discovery
2726 */
2727 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2728 rc = -EAGAIN;
2729 goto job_done;
2730 }
2731
2732 /* extended mailbox commands will need an extended buffer */ 4226 /* extended mailbox commands will need an extended buffer */
2733 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4227 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
2734 ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
2735 if (!ext) {
2736 rc = -ENOMEM;
2737 goto job_done;
2738 }
2739
2740 /* any data for the device? */ 4228 /* any data for the device? */
2741 if (mbox_req->inExtWLen) { 4229 if (mbox_req->inExtWLen) {
2742 from = (uint8_t *)mb; 4230 from = pmbx;
2743 from += sizeof(MAILBOX_t); 4231 ext = from + sizeof(MAILBOX_t);
2744 memcpy((uint8_t *)ext, from,
2745 mbox_req->inExtWLen * sizeof(uint32_t));
2746 } 4232 }
2747
2748 pmboxq->context2 = ext; 4233 pmboxq->context2 = ext;
2749 pmboxq->in_ext_byte_len = 4234 pmboxq->in_ext_byte_len =
2750 mbox_req->inExtWLen * sizeof(uint32_t); 4235 mbox_req->inExtWLen * sizeof(uint32_t);
@@ -2768,46 +4253,17 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2768 rc = -ERANGE; 4253 rc = -ERANGE;
2769 goto job_done; 4254 goto job_done;
2770 } 4255 }
2771
2772 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2773 if (!rxbmp) {
2774 rc = -ENOMEM;
2775 goto job_done;
2776 }
2777
2778 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2779 if (!rxbmp->virt) {
2780 rc = -ENOMEM;
2781 goto job_done;
2782 }
2783
2784 INIT_LIST_HEAD(&rxbmp->list);
2785 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2786 dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
2787 if (!dmp) {
2788 rc = -ENOMEM;
2789 goto job_done;
2790 }
2791
2792 INIT_LIST_HEAD(&dmp->dma.list);
2793 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 4256 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
2794 putPaddrHigh(dmp->dma.phys); 4257 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
2795 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 4258 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
2796 putPaddrLow(dmp->dma.phys); 4259 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
2797 4260
2798 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 4261 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
2799 putPaddrHigh(dmp->dma.phys + 4262 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
2800 pmb->un.varBIUdiag.un.s2. 4263 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
2801 xmit_bde64.tus.f.bdeSize);
2802 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 4264 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
2803 putPaddrLow(dmp->dma.phys + 4265 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
2804 pmb->un.varBIUdiag.un.s2. 4266 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
2805 xmit_bde64.tus.f.bdeSize);
2806
2807 /* copy the transmit data found in the mailbox extension area */
2808 from = (uint8_t *)mb;
2809 from += sizeof(MAILBOX_t);
2810 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
2811 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 4267 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
2812 rdEventLog = &pmb->un.varRdEventLog; 4268 rdEventLog = &pmb->un.varRdEventLog;
2813 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 4269 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
@@ -2823,33 +4279,10 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2823 4279
2824 /* mode zero uses a bde like biu diags command */ 4280 /* mode zero uses a bde like biu diags command */
2825 if (mode == 0) { 4281 if (mode == 0) {
2826 4282 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
2827 /* rebuild the command for sli4 using our own buffers 4283 + sizeof(MAILBOX_t));
2828 * like we do for biu diags 4284 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
2829 */ 4285 + sizeof(MAILBOX_t));
2830
2831 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2832 if (!rxbmp) {
2833 rc = -ENOMEM;
2834 goto job_done;
2835 }
2836
2837 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2838 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2839 if (rxbpl) {
2840 INIT_LIST_HEAD(&rxbmp->list);
2841 dmp = diag_cmd_data_alloc(phba, rxbpl,
2842 receive_length, 0);
2843 }
2844
2845 if (!dmp) {
2846 rc = -ENOMEM;
2847 goto job_done;
2848 }
2849
2850 INIT_LIST_HEAD(&dmp->dma.list);
2851 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2852 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2853 } 4286 }
2854 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4287 } else if (phba->sli_rev == LPFC_SLI_REV4) {
2855 if (pmb->mbxCommand == MBX_DUMP_MEMORY) { 4288 if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
@@ -2860,36 +4293,14 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2860 /* receive length cannot be greater than mailbox 4293 /* receive length cannot be greater than mailbox
2861 * extension size 4294 * extension size
2862 */ 4295 */
2863 if ((receive_length == 0) || 4296 if (receive_length == 0) {
2864 (receive_length > MAILBOX_EXT_SIZE)) {
2865 rc = -ERANGE; 4297 rc = -ERANGE;
2866 goto job_done; 4298 goto job_done;
2867 } 4299 }
2868 4300 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
2869 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4301 + sizeof(MAILBOX_t));
2870 if (!rxbmp) { 4302 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
2871 rc = -ENOMEM; 4303 + sizeof(MAILBOX_t));
2872 goto job_done;
2873 }
2874
2875 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2876 if (!rxbmp->virt) {
2877 rc = -ENOMEM;
2878 goto job_done;
2879 }
2880
2881 INIT_LIST_HEAD(&rxbmp->list);
2882 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2883 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
2884 0);
2885 if (!dmp) {
2886 rc = -ENOMEM;
2887 goto job_done;
2888 }
2889
2890 INIT_LIST_HEAD(&dmp->dma.list);
2891 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2892 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2893 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 4304 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
2894 pmb->un.varUpdateCfg.co) { 4305 pmb->un.varUpdateCfg.co) {
2895 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4306 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
@@ -2899,102 +4310,53 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2899 rc = -ERANGE; 4310 rc = -ERANGE;
2900 goto job_done; 4311 goto job_done;
2901 } 4312 }
2902 4313 bde->addrHigh = putPaddrHigh(dmabuf->phys
2903 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4314 + sizeof(MAILBOX_t));
2904 if (!rxbmp) { 4315 bde->addrLow = putPaddrLow(dmabuf->phys
2905 rc = -ENOMEM; 4316 + sizeof(MAILBOX_t));
2906 goto job_done;
2907 }
2908
2909 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2910 if (!rxbmp->virt) {
2911 rc = -ENOMEM;
2912 goto job_done;
2913 }
2914
2915 INIT_LIST_HEAD(&rxbmp->list);
2916 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2917 dmp = diag_cmd_data_alloc(phba, rxbpl,
2918 bde->tus.f.bdeSize, 0);
2919 if (!dmp) {
2920 rc = -ENOMEM;
2921 goto job_done;
2922 }
2923
2924 INIT_LIST_HEAD(&dmp->dma.list);
2925 bde->addrHigh = putPaddrHigh(dmp->dma.phys);
2926 bde->addrLow = putPaddrLow(dmp->dma.phys);
2927
2928 /* copy the transmit data found in the mailbox
2929 * extension area
2930 */
2931 from = (uint8_t *)mb;
2932 from += sizeof(MAILBOX_t);
2933 memcpy((uint8_t *)dmp->dma.virt, from,
2934 bde->tus.f.bdeSize);
2935 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 4317 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
2936 /* rebuild the command for sli4 using our own buffers 4318 /* Handling non-embedded SLI_CONFIG mailbox command */
2937 * like we do for biu diags 4319 sli4_config = &pmboxq->u.mqe.un.sli4_config;
2938 */ 4320 if (!bf_get(lpfc_mbox_hdr_emb,
2939 header = (struct mbox_header *)&pmb->un.varWords[0]; 4321 &sli4_config->header.cfg_mhdr)) {
2940 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 4322 /* rebuild the command for sli4 using our
2941 &pmb->un.varWords[0]; 4323 * own buffers like we do for biu diags
2942 receive_length = nembed_sge->sge[0].length; 4324 */
2943 4325 header = (struct mbox_header *)
2944 /* receive length cannot be greater than mailbox 4326 &pmb->un.varWords[0];
2945 * extension size 4327 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
2946 */ 4328 &pmb->un.varWords[0];
2947 if ((receive_length == 0) || 4329 receive_length = nembed_sge->sge[0].length;
2948 (receive_length > MAILBOX_EXT_SIZE)) { 4330
2949 rc = -ERANGE; 4331 /* receive length cannot be greater than
2950 goto job_done; 4332 * mailbox extension size
2951 } 4333 */
2952 4334 if ((receive_length == 0) ||
2953 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4335 (receive_length > MAILBOX_EXT_SIZE)) {
2954 if (!rxbmp) { 4336 rc = -ERANGE;
2955 rc = -ENOMEM; 4337 goto job_done;
2956 goto job_done; 4338 }
2957 }
2958
2959 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2960 if (!rxbmp->virt) {
2961 rc = -ENOMEM;
2962 goto job_done;
2963 }
2964 4339
2965 INIT_LIST_HEAD(&rxbmp->list); 4340 nembed_sge->sge[0].pa_hi =
2966 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 4341 putPaddrHigh(dmabuf->phys
2967 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length, 4342 + sizeof(MAILBOX_t));
2968 0); 4343 nembed_sge->sge[0].pa_lo =
2969 if (!dmp) { 4344 putPaddrLow(dmabuf->phys
2970 rc = -ENOMEM; 4345 + sizeof(MAILBOX_t));
2971 goto job_done;
2972 } 4346 }
2973
2974 INIT_LIST_HEAD(&dmp->dma.list);
2975 nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys);
2976 nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys);
2977 /* copy the transmit data found in the mailbox
2978 * extension area
2979 */
2980 from = (uint8_t *)mb;
2981 from += sizeof(MAILBOX_t);
2982 memcpy((uint8_t *)dmp->dma.virt, from,
2983 header->cfg_mhdr.payload_length);
2984 } 4347 }
2985 } 4348 }
2986 4349
2987 dd_data->context_un.mbox.rxbmp = rxbmp; 4350 dd_data->context_un.mbox.dmabuffers = dmabuf;
2988 dd_data->context_un.mbox.dmp = dmp;
2989 4351
2990 /* setup wake call as IOCB callback */ 4352 /* setup wake call as IOCB callback */
2991 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait; 4353 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
2992 4354
2993 /* setup context field to pass wait_queue pointer to wake function */ 4355 /* setup context field to pass wait_queue pointer to wake function */
2994 pmboxq->context1 = dd_data; 4356 pmboxq->context1 = dd_data;
2995 dd_data->type = TYPE_MBOX; 4357 dd_data->type = TYPE_MBOX;
2996 dd_data->context_un.mbox.pmboxq = pmboxq; 4358 dd_data->context_un.mbox.pmboxq = pmboxq;
2997 dd_data->context_un.mbox.mb = mb; 4359 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
2998 dd_data->context_un.mbox.set_job = job; 4360 dd_data->context_un.mbox.set_job = job;
2999 dd_data->context_un.mbox.ext = ext; 4361 dd_data->context_un.mbox.ext = ext;
3000 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4362 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
@@ -3011,11 +4373,11 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3011 } 4373 }
3012 4374
3013 /* job finished, copy the data */ 4375 /* job finished, copy the data */
3014 memcpy(mb, pmb, sizeof(*pmb)); 4376 memcpy(pmbx, pmb, sizeof(*pmb));
3015 job->reply->reply_payload_rcv_len = 4377 job->reply->reply_payload_rcv_len =
3016 sg_copy_from_buffer(job->reply_payload.sg_list, 4378 sg_copy_from_buffer(job->reply_payload.sg_list,
3017 job->reply_payload.sg_cnt, 4379 job->reply_payload.sg_cnt,
3018 mb, size); 4380 pmbx, size);
3019 /* not waiting mbox already done */ 4381 /* not waiting mbox already done */
3020 rc = 0; 4382 rc = 0;
3021 goto job_done; 4383 goto job_done;
@@ -3027,22 +4389,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3027 4389
3028job_done: 4390job_done:
3029 /* common exit for error or job completed inline */ 4391 /* common exit for error or job completed inline */
3030 kfree(mb);
3031 if (pmboxq) 4392 if (pmboxq)
3032 mempool_free(pmboxq, phba->mbox_mem_pool); 4393 mempool_free(pmboxq, phba->mbox_mem_pool);
3033 kfree(ext); 4394 lpfc_bsg_dma_page_free(phba, dmabuf);
3034 if (dmp) {
3035 dma_free_coherent(&phba->pcidev->dev,
3036 dmp->size, dmp->dma.virt,
3037 dmp->dma.phys);
3038 kfree(dmp);
3039 }
3040 if (rxbmp) {
3041 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
3042 kfree(rxbmp);
3043 }
3044 kfree(dd_data); 4395 kfree(dd_data);
3045 4396
4397job_cont:
3046 return rc; 4398 return rc;
3047} 4399}
3048 4400
@@ -3055,37 +4407,28 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
3055{ 4407{
3056 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 4408 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3057 struct lpfc_hba *phba = vport->phba; 4409 struct lpfc_hba *phba = vport->phba;
4410 struct dfc_mbox_req *mbox_req;
3058 int rc = 0; 4411 int rc = 0;
3059 4412
3060 /* in case no data is transferred */ 4413 /* mix-and-match backward compatibility */
3061 job->reply->reply_payload_rcv_len = 0; 4414 job->reply->reply_payload_rcv_len = 0;
3062 if (job->request_len < 4415 if (job->request_len <
3063 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4416 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
3064 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4417 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3065 "2737 Received MBOX_REQ request below " 4418 "2737 Mix-and-match backward compability "
3066 "minimum size\n"); 4419 "between MBOX_REQ old size:%d and "
3067 rc = -EINVAL; 4420 "new request size:%d\n",
3068 goto job_error; 4421 (int)(job->request_len -
3069 } 4422 sizeof(struct fc_bsg_request)),
3070 4423 (int)sizeof(struct dfc_mbox_req));
3071 if (job->request_payload.payload_len != BSG_MBOX_SIZE) { 4424 mbox_req = (struct dfc_mbox_req *)
3072 rc = -EINVAL; 4425 job->request->rqst_data.h_vendor.vendor_cmd;
3073 goto job_error; 4426 mbox_req->extMboxTag = 0;
3074 } 4427 mbox_req->extSeqNum = 0;
3075
3076 if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
3077 rc = -EINVAL;
3078 goto job_error;
3079 }
3080
3081 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
3082 rc = -EAGAIN;
3083 goto job_error;
3084 } 4428 }
3085 4429
3086 rc = lpfc_bsg_issue_mbox(phba, job, vport); 4430 rc = lpfc_bsg_issue_mbox(phba, job, vport);
3087 4431
3088job_error:
3089 if (rc == 0) { 4432 if (rc == 0) {
3090 /* job done */ 4433 /* job done */
3091 job->reply->result = 0; 4434 job->reply->result = 0;
@@ -3416,10 +4759,16 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
3416 rc = lpfc_bsg_send_mgmt_rsp(job); 4759 rc = lpfc_bsg_send_mgmt_rsp(job);
3417 break; 4760 break;
3418 case LPFC_BSG_VENDOR_DIAG_MODE: 4761 case LPFC_BSG_VENDOR_DIAG_MODE:
3419 rc = lpfc_bsg_diag_mode(job); 4762 rc = lpfc_bsg_diag_loopback_mode(job);
4763 break;
4764 case LPFC_BSG_VENDOR_DIAG_MODE_END:
4765 rc = lpfc_sli4_bsg_diag_mode_end(job);
4766 break;
4767 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
4768 rc = lpfc_bsg_diag_loopback_run(job);
3420 break; 4769 break;
3421 case LPFC_BSG_VENDOR_DIAG_TEST: 4770 case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
3422 rc = lpfc_bsg_diag_test(job); 4771 rc = lpfc_sli4_bsg_link_diag_test(job);
3423 break; 4772 break;
3424 case LPFC_BSG_VENDOR_GET_MGMT_REV: 4773 case LPFC_BSG_VENDOR_GET_MGMT_REV:
3425 rc = lpfc_bsg_get_dfc_rev(job); 4774 rc = lpfc_bsg_get_dfc_rev(job);
@@ -3538,6 +4887,8 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
3538 /* the mbox completion handler can now be run */ 4887 /* the mbox completion handler can now be run */
3539 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 4888 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3540 job->job_done(job); 4889 job->job_done(job);
4890 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4891 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
3541 break; 4892 break;
3542 case TYPE_MENLO: 4893 case TYPE_MENLO:
3543 menlo = &dd_data->context_un.menlo; 4894 menlo = &dd_data->context_un.menlo;