aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2009-05-22 14:52:52 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-06-08 12:24:50 -0400
commit04c684968487eb4f98728363a97b8da48f3bb958 (patch)
tree33f59839ca26a1904c4e2d2895598f543266feb0 /drivers/scsi/lpfc/lpfc_sli.c
parent4f774513f7b3fe96648b8936f60f835e6ceaa88e (diff)
[SCSI] lpfc 8.3.2 : Addition of SLI4 Interface - Mailbox handling
The mailbox commands themselves are the same, or very similar to their SLI3 counterparts. This patch genericizes mailbox command handling and adds support for the new SLI4 mailbox queue. Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c505
1 files changed, 450 insertions, 55 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index cf42ada3ffcd..b53af9936282 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -43,24 +43,7 @@
43#include "lpfc_logmsg.h" 43#include "lpfc_logmsg.h"
44#include "lpfc_compat.h" 44#include "lpfc_compat.h"
45#include "lpfc_debugfs.h" 45#include "lpfc_debugfs.h"
46 46#include "lpfc_vport.h"
47/*
48 * Define macro to log: Mailbox command x%x cannot issue Data
49 * This allows multiple uses of lpfc_msgBlk0311
50 * w/o perturbing log msg utility.
51 */
52#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
53 lpfc_printf_log(phba, \
54 KERN_INFO, \
55 LOG_MBOX | LOG_SLI, \
56 "(%d):0311 Mailbox command x%x cannot " \
57 "issue Data: x%x x%x x%x\n", \
58 pmbox->vport ? pmbox->vport->vpi : 0, \
59 pmbox->mb.mbxCommand, \
60 phba->pport->port_state, \
61 psli->sli_flag, \
62 flag)
63
64 47
65/* There are only four IOCB completion types. */ 48/* There are only four IOCB completion types. */
66typedef enum _lpfc_iocb_type { 49typedef enum _lpfc_iocb_type {
@@ -843,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
843 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 826 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
844 if (!pmb) 827 if (!pmb)
845 return -ENOMEM; 828 return -ENOMEM;
846 pmbox = &pmb->mb; 829 pmbox = &pmb->u.mb;
847 phba->link_state = LPFC_INIT_MBX_CMDS; 830 phba->link_state = LPFC_INIT_MBX_CMDS;
848 for (i = 0; i < psli->num_rings; i++) { 831 for (i = 0; i < psli->num_rings; i++) {
849 lpfc_config_ring(phba, i, pmb); 832 lpfc_config_ring(phba, i, pmb);
@@ -1652,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1652 case MBX_HEARTBEAT: 1635 case MBX_HEARTBEAT:
1653 case MBX_PORT_CAPABILITIES: 1636 case MBX_PORT_CAPABILITIES:
1654 case MBX_PORT_IOV_CONTROL: 1637 case MBX_PORT_IOV_CONTROL:
1638 case MBX_SLI4_CONFIG:
1639 case MBX_SLI4_REQ_FTRS:
1640 case MBX_REG_FCFI:
1641 case MBX_UNREG_FCFI:
1642 case MBX_REG_VFI:
1643 case MBX_UNREG_VFI:
1644 case MBX_INIT_VPI:
1645 case MBX_INIT_VFI:
1646 case MBX_RESUME_RPI:
1655 ret = mbxCommand; 1647 ret = mbxCommand;
1656 break; 1648 break;
1657 default: 1649 default:
@@ -1672,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1672 * will wake up thread waiting on the wait queue pointed by context1 1664 * will wake up thread waiting on the wait queue pointed by context1
1673 * of the mailbox. 1665 * of the mailbox.
1674 **/ 1666 **/
1675static void 1667void
1676lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 1668lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1677{ 1669{
1678 wait_queue_head_t *pdone_q; 1670 wait_queue_head_t *pdone_q;
@@ -1706,7 +1698,7 @@ void
1706lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1698lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1707{ 1699{
1708 struct lpfc_dmabuf *mp; 1700 struct lpfc_dmabuf *mp;
1709 uint16_t rpi; 1701 uint16_t rpi, vpi;
1710 int rc; 1702 int rc;
1711 1703
1712 mp = (struct lpfc_dmabuf *) (pmb->context1); 1704 mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1716,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1716 kfree(mp); 1708 kfree(mp);
1717 } 1709 }
1718 1710
1711 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1712 (phba->sli_rev == LPFC_SLI_REV4))
1713 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1714
1719 /* 1715 /*
1720 * If a REG_LOGIN succeeded after node is destroyed or node 1716 * If a REG_LOGIN succeeded after node is destroyed or node
1721 * is in re-discovery driver need to cleanup the RPI. 1717 * is in re-discovery driver need to cleanup the RPI.
1722 */ 1718 */
1723 if (!(phba->pport->load_flag & FC_UNLOADING) && 1719 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1724 pmb->mb.mbxCommand == MBX_REG_LOGIN64 && 1720 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1725 !pmb->mb.mbxStatus) { 1721 !pmb->u.mb.mbxStatus) {
1726 1722 rpi = pmb->u.mb.un.varWords[0];
1727 rpi = pmb->mb.un.varWords[0]; 1723 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1728 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); 1724 lpfc_unreg_login(phba, vpi, rpi, pmb);
1729 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1725 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1730 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1726 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1731 if (rc != MBX_NOT_FINISHED) 1727 if (rc != MBX_NOT_FINISHED)
1732 return; 1728 return;
1733 } 1729 }
1734 1730
1735 mempool_free(pmb, phba->mbox_mem_pool); 1731 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1736 return; 1732 lpfc_sli4_mbox_cmd_free(phba, pmb);
1733 else
1734 mempool_free(pmb, phba->mbox_mem_pool);
1737} 1735}
1738 1736
1739/** 1737/**
@@ -1770,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1770 if (pmb == NULL) 1768 if (pmb == NULL)
1771 break; 1769 break;
1772 1770
1773 pmbox = &pmb->mb; 1771 pmbox = &pmb->u.mb;
1774 1772
1775 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 1773 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1776 if (pmb->vport) { 1774 if (pmb->vport) {
@@ -1799,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1799 /* Unknow mailbox command compl */ 1797 /* Unknow mailbox command compl */
1800 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1801 "(%d):0323 Unknown Mailbox command " 1799 "(%d):0323 Unknown Mailbox command "
1802 "%x Cmpl\n", 1800 "x%x (x%x) Cmpl\n",
1803 pmb->vport ? pmb->vport->vpi : 0, 1801 pmb->vport ? pmb->vport->vpi : 0,
1804 pmbox->mbxCommand); 1802 pmbox->mbxCommand,
1803 lpfc_sli4_mbox_opcode_get(phba, pmb));
1805 phba->link_state = LPFC_HBA_ERROR; 1804 phba->link_state = LPFC_HBA_ERROR;
1806 phba->work_hs = HS_FFER3; 1805 phba->work_hs = HS_FFER3;
1807 lpfc_handle_eratt(phba); 1806 lpfc_handle_eratt(phba);
@@ -1816,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1816 LOG_MBOX | LOG_SLI, 1815 LOG_MBOX | LOG_SLI,
1817 "(%d):0305 Mbox cmd cmpl " 1816 "(%d):0305 Mbox cmd cmpl "
1818 "error - RETRYing Data: x%x " 1817 "error - RETRYing Data: x%x "
1819 "x%x x%x x%x\n", 1818 "(x%x) x%x x%x x%x\n",
1820 pmb->vport ? pmb->vport->vpi :0, 1819 pmb->vport ? pmb->vport->vpi :0,
1821 pmbox->mbxCommand, 1820 pmbox->mbxCommand,
1821 lpfc_sli4_mbox_opcode_get(phba,
1822 pmb),
1822 pmbox->mbxStatus, 1823 pmbox->mbxStatus,
1823 pmbox->un.varWords[0], 1824 pmbox->un.varWords[0],
1824 pmb->vport->port_state); 1825 pmb->vport->port_state);
1825 pmbox->mbxStatus = 0; 1826 pmbox->mbxStatus = 0;
1826 pmbox->mbxOwner = OWN_HOST; 1827 pmbox->mbxOwner = OWN_HOST;
1827 spin_lock_irq(&phba->hbalock);
1828 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1829 spin_unlock_irq(&phba->hbalock);
1830 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1828 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1831 if (rc == MBX_SUCCESS) 1829 if (rc != MBX_NOT_FINISHED)
1832 continue; 1830 continue;
1833 } 1831 }
1834 } 1832 }
1835 1833
1836 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 1834 /* Mailbox cmd <cmd> Cmpl <cmpl> */
1837 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 1835 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
1838 "(%d):0307 Mailbox cmd x%x Cmpl x%p " 1836 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1839 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 1837 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1840 pmb->vport ? pmb->vport->vpi : 0, 1838 pmb->vport ? pmb->vport->vpi : 0,
1841 pmbox->mbxCommand, 1839 pmbox->mbxCommand,
1840 lpfc_sli4_mbox_opcode_get(phba, pmb),
1842 pmb->mbox_cmpl, 1841 pmb->mbox_cmpl,
1843 *((uint32_t *) pmbox), 1842 *((uint32_t *) pmbox),
1844 pmbox->un.varWords[0], 1843 pmbox->un.varWords[0],
@@ -3377,10 +3376,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
3377 } 3376 }
3378 spin_lock_irq(&phba->hbalock); 3377 spin_lock_irq(&phba->hbalock);
3379 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3378 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3379 psli->mbox_active = NULL;
3380 phba->link_flag &= ~LS_IGNORE_ERATT; 3380 phba->link_flag &= ~LS_IGNORE_ERATT;
3381 spin_unlock_irq(&phba->hbalock); 3381 spin_unlock_irq(&phba->hbalock);
3382 3382
3383 psli->mbox_active = NULL;
3384 lpfc_hba_down_post(phba); 3383 lpfc_hba_down_post(phba);
3385 phba->link_state = LPFC_HBA_ERROR; 3384 phba->link_state = LPFC_HBA_ERROR;
3386 3385
@@ -3790,7 +3789,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
3790 if (!pmb) 3789 if (!pmb)
3791 return -ENOMEM; 3790 return -ENOMEM;
3792 3791
3793 pmbox = &pmb->mb; 3792 pmbox = &pmb->u.mb;
3794 3793
3795 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 3794 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
3796 phba->link_state = LPFC_INIT_MBX_CMDS; 3795 phba->link_state = LPFC_INIT_MBX_CMDS;
@@ -3917,33 +3916,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3917 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3916 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3918 "0442 Adapter failed to init, mbxCmd x%x " 3917 "0442 Adapter failed to init, mbxCmd x%x "
3919 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 3918 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
3920 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); 3919 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
3921 spin_lock_irq(&phba->hbalock); 3920 spin_lock_irq(&phba->hbalock);
3922 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 3921 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
3923 spin_unlock_irq(&phba->hbalock); 3922 spin_unlock_irq(&phba->hbalock);
3924 rc = -ENXIO; 3923 rc = -ENXIO;
3925 } else 3924 } else {
3925 /* Allow asynchronous mailbox command to go through */
3926 spin_lock_irq(&phba->hbalock);
3927 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
3928 spin_unlock_irq(&phba->hbalock);
3926 done = 1; 3929 done = 1;
3930 }
3927 } 3931 }
3928 if (!done) { 3932 if (!done) {
3929 rc = -EINVAL; 3933 rc = -EINVAL;
3930 goto do_prep_failed; 3934 goto do_prep_failed;
3931 } 3935 }
3932 if (pmb->mb.un.varCfgPort.sli_mode == 3) { 3936 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
3933 if (!pmb->mb.un.varCfgPort.cMA) { 3937 if (!pmb->u.mb.un.varCfgPort.cMA) {
3934 rc = -ENXIO; 3938 rc = -ENXIO;
3935 goto do_prep_failed; 3939 goto do_prep_failed;
3936 } 3940 }
3937 if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { 3941 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
3938 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3942 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3939 phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; 3943 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
3944 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
3945 phba->max_vpi : phba->max_vports;
3946
3940 } else 3947 } else
3941 phba->max_vpi = 0; 3948 phba->max_vpi = 0;
3942 if (pmb->mb.un.varCfgPort.gerbm) 3949 if (pmb->u.mb.un.varCfgPort.gdss)
3950 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
3951 if (pmb->u.mb.un.varCfgPort.gerbm)
3943 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3952 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3944 if (pmb->mb.un.varCfgPort.gcrp) 3953 if (pmb->u.mb.un.varCfgPort.gcrp)
3945 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3954 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3946 if (pmb->mb.un.varCfgPort.ginb) { 3955 if (pmb->u.mb.un.varCfgPort.ginb) {
3947 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3956 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3948 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; 3957 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3949 phba->port_gp = phba->mbox->us.s3_inb_pgp.port; 3958 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
@@ -3959,7 +3968,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3959 } 3968 }
3960 3969
3961 if (phba->cfg_enable_bg) { 3970 if (phba->cfg_enable_bg) {
3962 if (pmb->mb.un.varCfgPort.gbg) 3971 if (pmb->u.mb.un.varCfgPort.gbg)
3963 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 3972 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3964 else 3973 else
3965 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3974 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -4054,8 +4063,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4054 if (rc) 4063 if (rc)
4055 goto lpfc_sli_hba_setup_error; 4064 goto lpfc_sli_hba_setup_error;
4056 } 4065 }
4057 4066 spin_lock_irq(&phba->hbalock);
4058 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4067 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4068 spin_unlock_irq(&phba->hbalock);
4059 4069
4060 rc = lpfc_config_port_post(phba); 4070 rc = lpfc_config_port_post(phba);
4061 if (rc) 4071 if (rc)
@@ -4596,7 +4606,7 @@ void
4596lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 4606lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
4597{ 4607{
4598 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 4608 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
4599 MAILBOX_t *mb = &pmbox->mb; 4609 MAILBOX_t *mb = &pmbox->u.mb;
4600 struct lpfc_sli *psli = &phba->sli; 4610 struct lpfc_sli *psli = &phba->sli;
4601 struct lpfc_sli_ring *pring; 4611 struct lpfc_sli_ring *pring;
4602 4612
@@ -6414,6 +6424,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
6414} 6424}
6415 6425
6416/** 6426/**
6427 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
6428 * @phba: Pointer to HBA context object.
6429 *
6430 * This routine flushes the mailbox command subsystem. It will unconditionally
6431 * flush all the mailbox commands in the three possible stages in the mailbox
6432 * command sub-system: pending mailbox command queue; the outstanding mailbox
6433 * command; and completed mailbox command queue. It is caller's responsibility
6434 * to make sure that the driver is in the proper state to flush the mailbox
6435 * command sub-system. Namely, the posting of mailbox commands into the
6436 * pending mailbox command queue from the various clients must be stopped;
6437 * either the HBA is in a state that it will never works on the outstanding
6438 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
6439 * mailbox command has been completed.
6440 **/
6441static void
6442lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
6443{
6444 LIST_HEAD(completions);
6445 struct lpfc_sli *psli = &phba->sli;
6446 LPFC_MBOXQ_t *pmb;
6447 unsigned long iflag;
6448
6449 /* Flush all the mailbox commands in the mbox system */
6450 spin_lock_irqsave(&phba->hbalock, iflag);
6451 /* The pending mailbox command queue */
6452 list_splice_init(&phba->sli.mboxq, &completions);
6453 /* The outstanding active mailbox command */
6454 if (psli->mbox_active) {
6455 list_add_tail(&psli->mbox_active->list, &completions);
6456 psli->mbox_active = NULL;
6457 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6458 }
6459 /* The completed mailbox command queue */
6460 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
6461 spin_unlock_irqrestore(&phba->hbalock, iflag);
6462
6463 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
6464 while (!list_empty(&completions)) {
6465 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
6466 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
6467 if (pmb->mbox_cmpl)
6468 pmb->mbox_cmpl(phba, pmb);
6469 }
6470}
6471
6472/**
6417 * lpfc_sli_host_down - Vport cleanup function 6473 * lpfc_sli_host_down - Vport cleanup function
6418 * @vport: Pointer to virtual port object. 6474 * @vport: Pointer to virtual port object.
6419 * 6475 *
@@ -6506,9 +6562,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
6506 struct lpfc_sli *psli = &phba->sli; 6562 struct lpfc_sli *psli = &phba->sli;
6507 struct lpfc_sli_ring *pring; 6563 struct lpfc_sli_ring *pring;
6508 struct lpfc_dmabuf *buf_ptr; 6564 struct lpfc_dmabuf *buf_ptr;
6509 LPFC_MBOXQ_t *pmb;
6510 int i;
6511 unsigned long flags = 0; 6565 unsigned long flags = 0;
6566 int i;
6567
6568 /* Shutdown the mailbox command sub-system */
6569 lpfc_sli_mbox_sys_shutdown(phba);
6512 6570
6513 lpfc_hba_down_prep(phba); 6571 lpfc_hba_down_prep(phba);
6514 6572
@@ -7773,7 +7831,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
7773 7831
7774 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 7832 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
7775 pmb = phba->sli.mbox_active; 7833 pmb = phba->sli.mbox_active;
7776 pmbox = &pmb->mb; 7834 pmbox = &pmb->u.mb;
7777 mbox = phba->mbox; 7835 mbox = phba->mbox;
7778 vport = pmb->vport; 7836 vport = pmb->vport;
7779 7837
@@ -8170,6 +8228,183 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8170} 8228}
8171 8229
8172/** 8230/**
8231 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
8232 * @phba: Pointer to HBA context object.
8233 * @cqe: Pointer to mailbox completion queue entry.
8234 *
8235 * This routine process a mailbox completion queue entry with asynchrous
8236 * event.
8237 *
8238 * Return: true if work posted to worker thread, otherwise false.
8239 **/
8240static bool
8241lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8242{
8243 struct lpfc_cq_event *cq_event;
8244 unsigned long iflags;
8245
8246 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8247 "0392 Async Event: word0:x%x, word1:x%x, "
8248 "word2:x%x, word3:x%x\n", mcqe->word0,
8249 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
8250
8251 /* Allocate a new internal CQ_EVENT entry */
8252 cq_event = lpfc_sli4_cq_event_alloc(phba);
8253 if (!cq_event) {
8254 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8255 "0394 Failed to allocate CQ_EVENT entry\n");
8256 return false;
8257 }
8258
8259 /* Move the CQE into an asynchronous event entry */
8260 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
8261 spin_lock_irqsave(&phba->hbalock, iflags);
8262 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
8263 /* Set the async event flag */
8264 phba->hba_flag |= ASYNC_EVENT;
8265 spin_unlock_irqrestore(&phba->hbalock, iflags);
8266
8267 return true;
8268}
8269
8270/**
8271 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
8272 * @phba: Pointer to HBA context object.
8273 * @cqe: Pointer to mailbox completion queue entry.
8274 *
8275 * This routine process a mailbox completion queue entry with mailbox
8276 * completion event.
8277 *
8278 * Return: true if work posted to worker thread, otherwise false.
8279 **/
8280static bool
8281lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8282{
8283 uint32_t mcqe_status;
8284 MAILBOX_t *mbox, *pmbox;
8285 struct lpfc_mqe *mqe;
8286 struct lpfc_vport *vport;
8287 struct lpfc_nodelist *ndlp;
8288 struct lpfc_dmabuf *mp;
8289 unsigned long iflags;
8290 LPFC_MBOXQ_t *pmb;
8291 bool workposted = false;
8292 int rc;
8293
8294 /* If not a mailbox complete MCQE, out by checking mailbox consume */
8295 if (!bf_get(lpfc_trailer_completed, mcqe))
8296 goto out_no_mqe_complete;
8297
8298 /* Get the reference to the active mbox command */
8299 spin_lock_irqsave(&phba->hbalock, iflags);
8300 pmb = phba->sli.mbox_active;
8301 if (unlikely(!pmb)) {
8302 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
8303 "1832 No pending MBOX command to handle\n");
8304 spin_unlock_irqrestore(&phba->hbalock, iflags);
8305 goto out_no_mqe_complete;
8306 }
8307 spin_unlock_irqrestore(&phba->hbalock, iflags);
8308 mqe = &pmb->u.mqe;
8309 pmbox = (MAILBOX_t *)&pmb->u.mqe;
8310 mbox = phba->mbox;
8311 vport = pmb->vport;
8312
8313 /* Reset heartbeat timer */
8314 phba->last_completion_time = jiffies;
8315 del_timer(&phba->sli.mbox_tmo);
8316
8317 /* Move mbox data to caller's mailbox region, do endian swapping */
8318 if (pmb->mbox_cmpl && mbox)
8319 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
8320 /* Set the mailbox status with SLI4 range 0x4000 */
8321 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
8322 if (mcqe_status != MB_CQE_STATUS_SUCCESS)
8323 bf_set(lpfc_mqe_status, mqe,
8324 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8325
8326 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8327 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8328 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
8329 "MBOX dflt rpi: status:x%x rpi:x%x",
8330 mcqe_status,
8331 pmbox->un.varWords[0], 0);
8332 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
8333 mp = (struct lpfc_dmabuf *)(pmb->context1);
8334 ndlp = (struct lpfc_nodelist *)pmb->context2;
8335 /* Reg_LOGIN of dflt RPI was successful. Now lets get
8336 * RID of the PPI using the same mbox buffer.
8337 */
8338 lpfc_unreg_login(phba, vport->vpi,
8339 pmbox->un.varWords[0], pmb);
8340 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
8341 pmb->context1 = mp;
8342 pmb->context2 = ndlp;
8343 pmb->vport = vport;
8344 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
8345 if (rc != MBX_BUSY)
8346 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8347 LOG_SLI, "0385 rc should "
8348 "have been MBX_BUSY\n");
8349 if (rc != MBX_NOT_FINISHED)
8350 goto send_current_mbox;
8351 }
8352 }
8353 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8354 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8355 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8356
8357 /* There is mailbox completion work to do */
8358 spin_lock_irqsave(&phba->hbalock, iflags);
8359 __lpfc_mbox_cmpl_put(phba, pmb);
8360 phba->work_ha |= HA_MBATT;
8361 spin_unlock_irqrestore(&phba->hbalock, iflags);
8362 workposted = true;
8363
8364send_current_mbox:
8365 spin_lock_irqsave(&phba->hbalock, iflags);
8366 /* Release the mailbox command posting token */
8367 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8368 /* Setting active mailbox pointer need to be in sync to flag clear */
8369 phba->sli.mbox_active = NULL;
8370 spin_unlock_irqrestore(&phba->hbalock, iflags);
8371 /* Wake up worker thread to post the next pending mailbox command */
8372 lpfc_worker_wake_up(phba);
8373out_no_mqe_complete:
8374 if (bf_get(lpfc_trailer_consumed, mcqe))
8375 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
8376 return workposted;
8377}
8378
8379/**
8380 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
8381 * @phba: Pointer to HBA context object.
8382 * @cqe: Pointer to mailbox completion queue entry.
8383 *
8384 * This routine process a mailbox completion queue entry, it invokes the
8385 * proper mailbox complete handling or asynchrous event handling routine
8386 * according to the MCQE's async bit.
8387 *
8388 * Return: true if work posted to worker thread, otherwise false.
8389 **/
8390static bool
8391lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8392{
8393 struct lpfc_mcqe mcqe;
8394 bool workposted;
8395
8396 /* Copy the mailbox MCQE and convert endian order as needed */
8397 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
8398
8399 /* Invoke the proper event handling routine */
8400 if (!bf_get(lpfc_trailer_async, &mcqe))
8401 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
8402 else
8403 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
8404 return workposted;
8405}
8406
8407/**
8173 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 8408 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
8174 * @phba: Pointer to HBA context object. 8409 * @phba: Pointer to HBA context object.
8175 * @wcqe: Pointer to work-queue completion queue entry. 8410 * @wcqe: Pointer to work-queue completion queue entry.
@@ -9247,6 +9482,112 @@ out:
9247} 9482}
9248 9483
9249/** 9484/**
9485 * lpfc_mq_create - Create a mailbox Queue on the HBA
9486 * @phba: HBA structure that indicates port to create a queue on.
9487 * @mq: The queue structure to use to create the mailbox queue.
9488 *
9489 * This function creates a mailbox queue, as detailed in @mq, on a port,
9490 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
9491 *
9492 * The @phba struct is used to send mailbox command to HBA. The @cq struct
9493 * is used to get the entry count and entry size that are necessary to
9494 * determine the number of pages to allocate and use for this queue. This
9495 * function will send the MQ_CREATE mailbox command to the HBA to setup the
9496 * mailbox queue. This function is asynchronous and will wait for the mailbox
9497 * command to finish before continuing.
9498 *
9499 * On success this function will return a zero. If unable to allocate enough
9500 * memory this function will return ENOMEM. If the queue create mailbox command
9501 * fails this function will return ENXIO.
9502 **/
9503uint32_t
9504lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9505 struct lpfc_queue *cq, uint32_t subtype)
9506{
9507 struct lpfc_mbx_mq_create *mq_create;
9508 struct lpfc_dmabuf *dmabuf;
9509 LPFC_MBOXQ_t *mbox;
9510 int rc, length, status = 0;
9511 uint32_t shdr_status, shdr_add_status;
9512 union lpfc_sli4_cfg_shdr *shdr;
9513
9514 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9515 if (!mbox)
9516 return -ENOMEM;
9517 length = (sizeof(struct lpfc_mbx_mq_create) -
9518 sizeof(struct lpfc_sli4_cfg_mhdr));
9519 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9520 LPFC_MBOX_OPCODE_MQ_CREATE,
9521 length, LPFC_SLI4_MBX_EMBED);
9522 mq_create = &mbox->u.mqe.un.mq_create;
9523 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9524 mq->page_count);
9525 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9526 cq->queue_id);
9527 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9528 switch (mq->entry_count) {
9529 default:
9530 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9531 "0362 Unsupported MQ count. (%d)\n",
9532 mq->entry_count);
9533 if (mq->entry_count < 16)
9534 return -EINVAL;
9535 /* otherwise default to smallest count (drop through) */
9536 case 16:
9537 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9538 LPFC_MQ_CNT_16);
9539 break;
9540 case 32:
9541 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9542 LPFC_MQ_CNT_32);
9543 break;
9544 case 64:
9545 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9546 LPFC_MQ_CNT_64);
9547 break;
9548 case 128:
9549 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9550 LPFC_MQ_CNT_128);
9551 break;
9552 }
9553 list_for_each_entry(dmabuf, &mq->page_list, list) {
9554 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9555 putPaddrLow(dmabuf->phys);
9556 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9557 putPaddrHigh(dmabuf->phys);
9558 }
9559 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9560 /* The IOCTL status is embedded in the mailbox subheader. */
9561 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9562 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9563 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9564 if (shdr_status || shdr_add_status || rc) {
9565 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9566 "2502 MQ_CREATE mailbox failed with "
9567 "status x%x add_status x%x, mbx status x%x\n",
9568 shdr_status, shdr_add_status, rc);
9569 status = -ENXIO;
9570 goto out;
9571 }
9572 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9573 if (mq->queue_id == 0xFFFF) {
9574 status = -ENXIO;
9575 goto out;
9576 }
9577 mq->type = LPFC_MQ;
9578 mq->subtype = subtype;
9579 mq->host_index = 0;
9580 mq->hba_index = 0;
9581
9582 /* link the mq onto the parent cq child list */
9583 list_add_tail(&mq->list, &cq->child_list);
9584out:
9585 if (rc != MBX_TIMEOUT)
9586 mempool_free(mbox, phba->mbox_mem_pool);
9587 return status;
9588}
9589
9590/**
9250 * lpfc_wq_create - Create a Work Queue on the HBA 9591 * lpfc_wq_create - Create a Work Queue on the HBA
9251 * @phba: HBA structure that indicates port to create a queue on. 9592 * @phba: HBA structure that indicates port to create a queue on.
9252 * @wq: The queue structure to use to create the work queue. 9593 * @wq: The queue structure to use to create the work queue.
@@ -9615,6 +9956,60 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
9615} 9956}
9616 9957
9617/** 9958/**
9959 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
9960 * @qm: The queue structure associated with the queue to destroy.
9961 *
9962 * This function destroys a queue, as detailed in @mq by sending an mailbox
9963 * command, specific to the type of queue, to the HBA.
9964 *
9965 * The @mq struct is used to get the queue ID of the queue to destroy.
9966 *
9967 * On success this function will return a zero. If the queue destroy mailbox
9968 * command fails this function will return ENXIO.
9969 **/
9970uint32_t
9971lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
9972{
9973 LPFC_MBOXQ_t *mbox;
9974 int rc, length, status = 0;
9975 uint32_t shdr_status, shdr_add_status;
9976 union lpfc_sli4_cfg_shdr *shdr;
9977
9978 if (!mq)
9979 return -ENODEV;
9980 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
9981 if (!mbox)
9982 return -ENOMEM;
9983 length = (sizeof(struct lpfc_mbx_mq_destroy) -
9984 sizeof(struct lpfc_sli4_cfg_mhdr));
9985 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9986 LPFC_MBOX_OPCODE_MQ_DESTROY,
9987 length, LPFC_SLI4_MBX_EMBED);
9988 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
9989 mq->queue_id);
9990 mbox->vport = mq->phba->pport;
9991 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9992 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
9993 /* The IOCTL status is embedded in the mailbox subheader. */
9994 shdr = (union lpfc_sli4_cfg_shdr *)
9995 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
9996 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9997 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9998 if (shdr_status || shdr_add_status || rc) {
9999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10000 "2507 MQ_DESTROY mailbox failed with "
10001 "status x%x add_status x%x, mbx status x%x\n",
10002 shdr_status, shdr_add_status, rc);
10003 status = -ENXIO;
10004 }
10005 /* Remove mq from any list */
10006 list_del_init(&mq->list);
10007 if (rc != MBX_TIMEOUT)
10008 mempool_free(mbox, mq->phba->mbox_mem_pool);
10009 return status;
10010}
10011
10012/**
9618 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 10013 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
9619 * @wq: The queue structure associated with the queue to destroy. 10014 * @wq: The queue structure associated with the queue to destroy.
9620 * 10015 *