aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1263
1 files changed, 1234 insertions, 29 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e2d07d97fa8b..706bb22a6e8e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -29,9 +29,12 @@
29#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
32 33
34#include "lpfc_hw4.h"
33#include "lpfc_hw.h" 35#include "lpfc_hw.h"
34#include "lpfc_sli.h" 36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
35#include "lpfc_nl.h" 38#include "lpfc_nl.h"
36#include "lpfc_disc.h" 39#include "lpfc_disc.h"
37#include "lpfc_scsi.h" 40#include "lpfc_scsi.h"
@@ -121,6 +124,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
121} 124}
122 125
123/** 126/**
127 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
128 * @phba: Pointer to HBA context object.
129 * @xritag: XRI value.
130 *
131 * This function clears the sglq pointer from the array of acive
132 * sglq's. The xritag that is passed in is used to index into the
133 * array. Before the xritag can be used it needs to be adjusted
134 * by subtracting the xribase.
135 *
136 * Returns sglq ponter = success, NULL = Failure.
137 **/
138static struct lpfc_sglq *
139__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
140{
141 uint16_t adj_xri;
142 struct lpfc_sglq *sglq;
143 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
144 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
145 return NULL;
146 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
147 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
148 return sglq;
149}
150
151/**
152 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
153 * @phba: Pointer to HBA context object.
154 * @xritag: XRI value.
155 *
156 * This function returns the sglq pointer from the array of acive
157 * sglq's. The xritag that is passed in is used to index into the
158 * array. Before the xritag can be used it needs to be adjusted
159 * by subtracting the xribase.
160 *
161 * Returns sglq ponter = success, NULL = Failure.
162 **/
163static struct lpfc_sglq *
164__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
165{
166 uint16_t adj_xri;
167 struct lpfc_sglq *sglq;
168 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
169 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
170 return NULL;
171 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
172 return sglq;
173}
174
175/**
176 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
177 * @phba: Pointer to HBA context object.
178 *
179 * This function is called with hbalock held. This function
180 * Gets a new driver sglq object from the sglq list. If the
181 * list is not empty then it is successful, it returns pointer to the newly
182 * allocated sglq object else it returns NULL.
183 **/
184static struct lpfc_sglq *
185__lpfc_sli_get_sglq(struct lpfc_hba *phba)
186{
187 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
188 struct lpfc_sglq *sglq = NULL;
189 uint16_t adj_xri;
190 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
191 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
192 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
193 return sglq;
194}
195
196/**
124 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 197 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
125 * @phba: Pointer to HBA context object. 198 * @phba: Pointer to HBA context object.
126 * 199 *
@@ -298,6 +371,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
298 case CMD_GEN_REQUEST64_CR: 371 case CMD_GEN_REQUEST64_CR:
299 case CMD_GEN_REQUEST64_CX: 372 case CMD_GEN_REQUEST64_CX:
300 case CMD_XMIT_ELS_RSP64_CX: 373 case CMD_XMIT_ELS_RSP64_CX:
374 case DSSCMD_IWRITE64_CR:
375 case DSSCMD_IWRITE64_CX:
376 case DSSCMD_IREAD64_CR:
377 case DSSCMD_IREAD64_CX:
378 case DSSCMD_INVALIDATE_DEK:
379 case DSSCMD_SET_KEK:
380 case DSSCMD_GET_KEK_ID:
381 case DSSCMD_GEN_XFER:
301 type = LPFC_SOL_IOCB; 382 type = LPFC_SOL_IOCB;
302 break; 383 break;
303 case CMD_ABORT_XRI_CN: 384 case CMD_ABORT_XRI_CN:
@@ -2629,6 +2710,56 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
2629 return retval; 2710 return retval;
2630} 2711}
2631 2712
2713/**
2714 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
2715 * @phba: Pointer to HBA context object.
2716 * @mask: Bit mask to be checked.
2717 *
2718 * This function checks the host status register to check if HBA is
2719 * ready. This function will wait in a loop for the HBA to be ready
2720 * If the HBA is not ready , the function will will reset the HBA PCI
2721 * function again. The function returns 1 when HBA fail to be ready
2722 * otherwise returns zero.
2723 **/
2724static int
2725lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
2726{
2727 uint32_t status;
2728 int retval = 0;
2729
2730 /* Read the HBA Host Status Register */
2731 status = lpfc_sli4_post_status_check(phba);
2732
2733 if (status) {
2734 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2735 lpfc_sli_brdrestart(phba);
2736 status = lpfc_sli4_post_status_check(phba);
2737 }
2738
2739 /* Check to see if any errors occurred during init */
2740 if (status) {
2741 phba->link_state = LPFC_HBA_ERROR;
2742 retval = 1;
2743 } else
2744 phba->sli4_hba.intr_enable = 0;
2745
2746 return retval;
2747}
2748
2749/**
2750 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
2751 * @phba: Pointer to HBA context object.
2752 * @mask: Bit mask to be checked.
2753 *
2754 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
2755 * from the API jump table function pointer from the lpfc_hba struct.
2756 **/
2757int
2758lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
2759{
2760 return phba->lpfc_sli_brdready(phba, mask);
2761}
2762
2632#define BARRIER_TEST_PATTERN (0xdeadbeef) 2763#define BARRIER_TEST_PATTERN (0xdeadbeef)
2633 2764
2634/** 2765/**
@@ -2863,7 +2994,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2863} 2994}
2864 2995
2865/** 2996/**
2866 * lpfc_sli_brdrestart - Restart the HBA 2997 * lpfc_sli4_brdreset - Reset a sli-4 HBA
2998 * @phba: Pointer to HBA context object.
2999 *
3000 * This function resets a SLI4 HBA. This function disables PCI layer parity
3001 * checking during resets the device. The caller is not required to hold
3002 * any locks.
3003 *
3004 * This function returns 0 always.
3005 **/
3006int
3007lpfc_sli4_brdreset(struct lpfc_hba *phba)
3008{
3009 struct lpfc_sli *psli = &phba->sli;
3010 uint16_t cfg_value;
3011 uint8_t qindx;
3012
3013 /* Reset HBA */
3014 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3015 "0295 Reset HBA Data: x%x x%x\n",
3016 phba->pport->port_state, psli->sli_flag);
3017
3018 /* perform board reset */
3019 phba->fc_eventTag = 0;
3020 phba->pport->fc_myDID = 0;
3021 phba->pport->fc_prevDID = 0;
3022
3023 /* Turn off parity checking and serr during the physical reset */
3024 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3025 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3026 (cfg_value &
3027 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3028
3029 spin_lock_irq(&phba->hbalock);
3030 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3031 phba->fcf.fcf_flag = 0;
3032 /* Clean up the child queue list for the CQs */
3033 list_del_init(&phba->sli4_hba.mbx_wq->list);
3034 list_del_init(&phba->sli4_hba.els_wq->list);
3035 list_del_init(&phba->sli4_hba.hdr_rq->list);
3036 list_del_init(&phba->sli4_hba.dat_rq->list);
3037 list_del_init(&phba->sli4_hba.mbx_cq->list);
3038 list_del_init(&phba->sli4_hba.els_cq->list);
3039 list_del_init(&phba->sli4_hba.rxq_cq->list);
3040 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3041 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3042 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3043 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3044 spin_unlock_irq(&phba->hbalock);
3045
3046 /* Now physically reset the device */
3047 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3048 "0389 Performing PCI function reset!\n");
3049 /* Perform FCoE PCI function reset */
3050 lpfc_pci_function_reset(phba);
3051
3052 return 0;
3053}
3054
3055/**
3056 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
2867 * @phba: Pointer to HBA context object. 3057 * @phba: Pointer to HBA context object.
2868 * 3058 *
2869 * This function is called in the SLI initialization code path to 3059 * This function is called in the SLI initialization code path to
@@ -2875,8 +3065,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
2875 * The function does not guarantee completion of MBX_RESTART mailbox 3065 * The function does not guarantee completion of MBX_RESTART mailbox
2876 * command before the return of this function. 3066 * command before the return of this function.
2877 **/ 3067 **/
2878int 3068static int
2879lpfc_sli_brdrestart(struct lpfc_hba *phba) 3069lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
2880{ 3070{
2881 MAILBOX_t *mb; 3071 MAILBOX_t *mb;
2882 struct lpfc_sli *psli; 3072 struct lpfc_sli *psli;
@@ -2915,7 +3105,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2915 lpfc_sli_brdreset(phba); 3105 lpfc_sli_brdreset(phba);
2916 phba->pport->stopped = 0; 3106 phba->pport->stopped = 0;
2917 phba->link_state = LPFC_INIT_START; 3107 phba->link_state = LPFC_INIT_START;
2918 3108 phba->hba_flag = 0;
2919 spin_unlock_irq(&phba->hbalock); 3109 spin_unlock_irq(&phba->hbalock);
2920 3110
2921 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3111 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -2930,6 +3120,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
2930} 3120}
2931 3121
2932/** 3122/**
3123 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3124 * @phba: Pointer to HBA context object.
3125 *
3126 * This function is called in the SLI initialization code path to restart
3127 * a SLI4 HBA. The caller is not required to hold any lock.
3128 * At the end of the function, it calls lpfc_hba_down_post function to
3129 * free any pending commands.
3130 **/
3131static int
3132lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3133{
3134 struct lpfc_sli *psli = &phba->sli;
3135
3136
3137 /* Restart HBA */
3138 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3139 "0296 Restart HBA Data: x%x x%x\n",
3140 phba->pport->port_state, psli->sli_flag);
3141
3142 lpfc_sli4_brdreset(phba);
3143
3144 spin_lock_irq(&phba->hbalock);
3145 phba->pport->stopped = 0;
3146 phba->link_state = LPFC_INIT_START;
3147 phba->hba_flag = 0;
3148 spin_unlock_irq(&phba->hbalock);
3149
3150 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3151 psli->stats_start = get_seconds();
3152
3153 lpfc_hba_down_post(phba);
3154
3155 return 0;
3156}
3157
3158/**
3159 * lpfc_sli_brdrestart - Wrapper func for restarting hba
3160 * @phba: Pointer to HBA context object.
3161 *
3162 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
3163 * API jump table function pointer from the lpfc_hba struct.
3164**/
3165int
3166lpfc_sli_brdrestart(struct lpfc_hba *phba)
3167{
3168 return phba->lpfc_sli_brdrestart(phba);
3169}
3170
3171/**
2933 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 3172 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
2934 * @phba: Pointer to HBA context object. 3173 * @phba: Pointer to HBA context object.
2935 * 3174 *
@@ -3353,6 +3592,488 @@ lpfc_sli_hba_setup_error:
3353 return rc; 3592 return rc;
3354} 3593}
3355 3594
3595/**
3596 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
3597 * @phba: Pointer to HBA context object.
3598 * @mboxq: mailbox pointer.
3599 * This function issue a dump mailbox command to read config region
3600 * 23 and parse the records in the region and populate driver
3601 * data structure.
3602 **/
3603static int
3604lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
3605 LPFC_MBOXQ_t *mboxq)
3606{
3607 struct lpfc_dmabuf *mp;
3608 struct lpfc_mqe *mqe;
3609 uint32_t data_length;
3610 int rc;
3611
3612 /* Program the default value of vlan_id and fc_map */
3613 phba->valid_vlan = 0;
3614 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
3615 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
3616 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
3617
3618 mqe = &mboxq->u.mqe;
3619 if (lpfc_dump_fcoe_param(phba, mboxq))
3620 return -ENOMEM;
3621
3622 mp = (struct lpfc_dmabuf *) mboxq->context1;
3623 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3624
3625 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3626 "(%d):2571 Mailbox cmd x%x Status x%x "
3627 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3628 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3629 "CQ: x%x x%x x%x x%x\n",
3630 mboxq->vport ? mboxq->vport->vpi : 0,
3631 bf_get(lpfc_mqe_command, mqe),
3632 bf_get(lpfc_mqe_status, mqe),
3633 mqe->un.mb_words[0], mqe->un.mb_words[1],
3634 mqe->un.mb_words[2], mqe->un.mb_words[3],
3635 mqe->un.mb_words[4], mqe->un.mb_words[5],
3636 mqe->un.mb_words[6], mqe->un.mb_words[7],
3637 mqe->un.mb_words[8], mqe->un.mb_words[9],
3638 mqe->un.mb_words[10], mqe->un.mb_words[11],
3639 mqe->un.mb_words[12], mqe->un.mb_words[13],
3640 mqe->un.mb_words[14], mqe->un.mb_words[15],
3641 mqe->un.mb_words[16], mqe->un.mb_words[50],
3642 mboxq->mcqe.word0,
3643 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
3644 mboxq->mcqe.trailer);
3645
3646 if (rc) {
3647 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3648 kfree(mp);
3649 return -EIO;
3650 }
3651 data_length = mqe->un.mb_words[5];
3652 if (data_length > DMP_FCOEPARAM_RGN_SIZE)
3653 return -EIO;
3654
3655 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
3656 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3657 kfree(mp);
3658 return 0;
3659}
3660
3661/**
3662 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
3663 * @phba: pointer to lpfc hba data structure.
3664 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
3665 * @vpd: pointer to the memory to hold resulting port vpd data.
3666 * @vpd_size: On input, the number of bytes allocated to @vpd.
3667 * On output, the number of data bytes in @vpd.
3668 *
3669 * This routine executes a READ_REV SLI4 mailbox command. In
3670 * addition, this routine gets the port vpd data.
3671 *
3672 * Return codes
3673 * 0 - sucessful
3674 * ENOMEM - could not allocated memory.
3675 **/
3676static int
3677lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
3678 uint8_t *vpd, uint32_t *vpd_size)
3679{
3680 int rc = 0;
3681 uint32_t dma_size;
3682 struct lpfc_dmabuf *dmabuf;
3683 struct lpfc_mqe *mqe;
3684
3685 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3686 if (!dmabuf)
3687 return -ENOMEM;
3688
3689 /*
3690 * Get a DMA buffer for the vpd data resulting from the READ_REV
3691 * mailbox command.
3692 */
3693 dma_size = *vpd_size;
3694 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
3695 dma_size,
3696 &dmabuf->phys,
3697 GFP_KERNEL);
3698 if (!dmabuf->virt) {
3699 kfree(dmabuf);
3700 return -ENOMEM;
3701 }
3702 memset(dmabuf->virt, 0, dma_size);
3703
3704 /*
3705 * The SLI4 implementation of READ_REV conflicts at word1,
3706 * bits 31:16 and SLI4 adds vpd functionality not present
3707 * in SLI3. This code corrects the conflicts.
3708 */
3709 lpfc_read_rev(phba, mboxq);
3710 mqe = &mboxq->u.mqe;
3711 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
3712 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
3713 mqe->un.read_rev.word1 &= 0x0000FFFF;
3714 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
3715 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
3716
3717 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3718 if (rc) {
3719 dma_free_coherent(&phba->pcidev->dev, dma_size,
3720 dmabuf->virt, dmabuf->phys);
3721 return -EIO;
3722 }
3723
3724 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3725 "(%d):0380 Mailbox cmd x%x Status x%x "
3726 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3727 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3728 "CQ: x%x x%x x%x x%x\n",
3729 mboxq->vport ? mboxq->vport->vpi : 0,
3730 bf_get(lpfc_mqe_command, mqe),
3731 bf_get(lpfc_mqe_status, mqe),
3732 mqe->un.mb_words[0], mqe->un.mb_words[1],
3733 mqe->un.mb_words[2], mqe->un.mb_words[3],
3734 mqe->un.mb_words[4], mqe->un.mb_words[5],
3735 mqe->un.mb_words[6], mqe->un.mb_words[7],
3736 mqe->un.mb_words[8], mqe->un.mb_words[9],
3737 mqe->un.mb_words[10], mqe->un.mb_words[11],
3738 mqe->un.mb_words[12], mqe->un.mb_words[13],
3739 mqe->un.mb_words[14], mqe->un.mb_words[15],
3740 mqe->un.mb_words[16], mqe->un.mb_words[50],
3741 mboxq->mcqe.word0,
3742 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
3743 mboxq->mcqe.trailer);
3744
3745 /*
3746 * The available vpd length cannot be bigger than the
3747 * DMA buffer passed to the port. Catch the less than
3748 * case and update the caller's size.
3749 */
3750 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
3751 *vpd_size = mqe->un.read_rev.avail_vpd_len;
3752
3753 lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
3754 dma_free_coherent(&phba->pcidev->dev, dma_size,
3755 dmabuf->virt, dmabuf->phys);
3756 kfree(dmabuf);
3757 return 0;
3758}
3759
3760/**
3761 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
3762 * @phba: pointer to lpfc hba data structure.
3763 *
3764 * This routine is called to explicitly arm the SLI4 device's completion and
3765 * event queues
3766 **/
3767static void
3768lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
3769{
3770 uint8_t fcp_eqidx;
3771
3772 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
3773 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
3774 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
3775 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
3776 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
3777 LPFC_QUEUE_REARM);
3778 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
3779 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
3780 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
3781 LPFC_QUEUE_REARM);
3782}
3783
3784/**
3785 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
3786 * @phba: Pointer to HBA context object.
3787 *
3788 * This function is the main SLI4 device intialization PCI function. This
3789 * function is called by the HBA intialization code, HBA reset code and
3790 * HBA error attention handler code. Caller is not required to hold any
3791 * locks.
3792 **/
3793int
3794lpfc_sli4_hba_setup(struct lpfc_hba *phba)
3795{
3796 int rc;
3797 LPFC_MBOXQ_t *mboxq;
3798 struct lpfc_mqe *mqe;
3799 uint8_t *vpd;
3800 uint32_t vpd_size;
3801 uint32_t ftr_rsp = 0;
3802 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
3803 struct lpfc_vport *vport = phba->pport;
3804 struct lpfc_dmabuf *mp;
3805
3806 /* Perform a PCI function reset to start from clean */
3807 rc = lpfc_pci_function_reset(phba);
3808 if (unlikely(rc))
3809 return -ENODEV;
3810
3811 /* Check the HBA Host Status Register for readyness */
3812 rc = lpfc_sli4_post_status_check(phba);
3813 if (unlikely(rc))
3814 return -ENODEV;
3815 else {
3816 spin_lock_irq(&phba->hbalock);
3817 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
3818 spin_unlock_irq(&phba->hbalock);
3819 }
3820
3821 /*
3822 * Allocate a single mailbox container for initializing the
3823 * port.
3824 */
3825 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3826 if (!mboxq)
3827 return -ENOMEM;
3828
3829 /*
3830 * Continue initialization with default values even if driver failed
3831 * to read FCoE param config regions
3832 */
3833 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
3834 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
3835 "2570 Failed to read FCoE parameters \n");
3836
3837 /* Issue READ_REV to collect vpd and FW information. */
3838 vpd_size = PAGE_SIZE;
3839 vpd = kzalloc(vpd_size, GFP_KERNEL);
3840 if (!vpd) {
3841 rc = -ENOMEM;
3842 goto out_free_mbox;
3843 }
3844
3845 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
3846 if (unlikely(rc))
3847 goto out_free_vpd;
3848
3849 mqe = &mboxq->u.mqe;
3850 if ((bf_get(lpfc_mbx_rd_rev_sli_lvl,
3851 &mqe->un.read_rev) != LPFC_SLI_REV4) ||
3852 (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) {
3853 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3854 "0376 READ_REV Error. SLI Level %d "
3855 "FCoE enabled %d\n",
3856 bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev),
3857 bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
3858 rc = -EIO;
3859 goto out_free_vpd;
3860 }
3861 /* Single threaded at this point, no need for lock */
3862 spin_lock_irq(&phba->hbalock);
3863 phba->hba_flag |= HBA_FCOE_SUPPORT;
3864 spin_unlock_irq(&phba->hbalock);
3865 /*
3866 * Evaluate the read rev and vpd data. Populate the driver
3867 * state with the results. If this routine fails, the failure
3868 * is not fatal as the driver will use generic values.
3869 */
3870 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
3871 if (unlikely(!rc)) {
3872 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3873 "0377 Error %d parsing vpd. "
3874 "Using defaults.\n", rc);
3875 rc = 0;
3876 }
3877
3878 /* By now, we should determine the SLI revision, hard code for now */
3879 phba->sli_rev = LPFC_SLI_REV4;
3880
3881 /*
3882 * Discover the port's supported feature set and match it against the
3883 * hosts requests.
3884 */
3885 lpfc_request_features(phba, mboxq);
3886 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3887 if (unlikely(rc)) {
3888 rc = -EIO;
3889 goto out_free_vpd;
3890 }
3891
3892 /*
3893 * The port must support FCP initiator mode as this is the
3894 * only mode running in the host.
3895 */
3896 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
3897 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
3898 "0378 No support for fcpi mode.\n");
3899 ftr_rsp++;
3900 }
3901
3902 /*
3903 * If the port cannot support the host's requested features
3904 * then turn off the global config parameters to disable the
3905 * feature in the driver. This is not a fatal error.
3906 */
3907 if ((phba->cfg_enable_bg) &&
3908 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
3909 ftr_rsp++;
3910
3911 if (phba->max_vpi && phba->cfg_enable_npiv &&
3912 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
3913 ftr_rsp++;
3914
3915 if (ftr_rsp) {
3916 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
3917 "0379 Feature Mismatch Data: x%08x %08x "
3918 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
3919 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
3920 phba->cfg_enable_npiv, phba->max_vpi);
3921 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
3922 phba->cfg_enable_bg = 0;
3923 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
3924 phba->cfg_enable_npiv = 0;
3925 }
3926
3927 /* These SLI3 features are assumed in SLI4 */
3928 spin_lock_irq(&phba->hbalock);
3929 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
3930 spin_unlock_irq(&phba->hbalock);
3931
3932 /* Read the port's service parameters. */
3933 lpfc_read_sparam(phba, mboxq, vport->vpi);
3934 mboxq->vport = vport;
3935 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3936 mp = (struct lpfc_dmabuf *) mboxq->context1;
3937 if (rc == MBX_SUCCESS) {
3938 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
3939 rc = 0;
3940 }
3941
3942 /*
3943 * This memory was allocated by the lpfc_read_sparam routine. Release
3944 * it to the mbuf pool.
3945 */
3946 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3947 kfree(mp);
3948 mboxq->context1 = NULL;
3949 if (unlikely(rc)) {
3950 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3951 "0382 READ_SPARAM command failed "
3952 "status %d, mbxStatus x%x\n",
3953 rc, bf_get(lpfc_mqe_status, mqe));
3954 phba->link_state = LPFC_HBA_ERROR;
3955 rc = -EIO;
3956 goto out_free_vpd;
3957 }
3958
3959 if (phba->cfg_soft_wwnn)
3960 u64_to_wwn(phba->cfg_soft_wwnn,
3961 vport->fc_sparam.nodeName.u.wwn);
3962 if (phba->cfg_soft_wwpn)
3963 u64_to_wwn(phba->cfg_soft_wwpn,
3964 vport->fc_sparam.portName.u.wwn);
3965 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
3966 sizeof(struct lpfc_name));
3967 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
3968 sizeof(struct lpfc_name));
3969
3970 /* Update the fc_host data structures with new wwn. */
3971 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
3972 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3973
3974 /* Register SGL pool to the device using non-embedded mailbox command */
3975 rc = lpfc_sli4_post_sgl_list(phba);
3976 if (unlikely(rc)) {
3977 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3978 "0582 Error %d during sgl post operation", rc);
3979 rc = -ENODEV;
3980 goto out_free_vpd;
3981 }
3982
3983 /* Register SCSI SGL pool to the device */
3984 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
3985 if (unlikely(rc)) {
3986 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
3987 "0383 Error %d during scsi sgl post opeation",
3988 rc);
3989 /* Some Scsi buffers were moved to the abort scsi list */
3990 /* A pci function reset will repost them */
3991 rc = -ENODEV;
3992 goto out_free_vpd;
3993 }
3994
3995 /* Post the rpi header region to the device. */
3996 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
3997 if (unlikely(rc)) {
3998 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
3999 "0393 Error %d during rpi post operation\n",
4000 rc);
4001 rc = -ENODEV;
4002 goto out_free_vpd;
4003 }
4004 /* Temporary initialization of lpfc_fip_flag to non-fip */
4005 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4006
4007 /* Set up all the queues to the device */
4008 rc = lpfc_sli4_queue_setup(phba);
4009 if (unlikely(rc)) {
4010 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4011 "0381 Error %d during queue setup.\n ", rc);
4012 goto out_stop_timers;
4013 }
4014
4015 /* Arm the CQs and then EQs on device */
4016 lpfc_sli4_arm_cqeq_intr(phba);
4017
4018 /* Indicate device interrupt mode */
4019 phba->sli4_hba.intr_enable = 1;
4020
4021 /* Allow asynchronous mailbox command to go through */
4022 spin_lock_irq(&phba->hbalock);
4023 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4024 spin_unlock_irq(&phba->hbalock);
4025
4026 /* Post receive buffers to the device */
4027 lpfc_sli4_rb_setup(phba);
4028
4029 /* Start the ELS watchdog timer */
4030 /*
4031 * The driver for SLI4 is not yet ready to process timeouts
4032 * or interrupts. Once it is, the comment bars can be removed.
4033 */
4034 /* mod_timer(&vport->els_tmofunc,
4035 * jiffies + HZ * (phba->fc_ratov*2)); */
4036
4037 /* Start heart beat timer */
4038 mod_timer(&phba->hb_tmofunc,
4039 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4040 phba->hb_outstanding = 0;
4041 phba->last_completion_time = jiffies;
4042
4043 /* Start error attention (ERATT) polling timer */
4044 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4045
4046 /*
4047 * The port is ready, set the host's link state to LINK_DOWN
4048 * in preparation for link interrupts.
4049 */
4050 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4051 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4052 lpfc_set_loopback_flag(phba);
4053 /* Change driver state to LPFC_LINK_DOWN right before init link */
4054 spin_lock_irq(&phba->hbalock);
4055 phba->link_state = LPFC_LINK_DOWN;
4056 spin_unlock_irq(&phba->hbalock);
4057 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
4058 if (unlikely(rc != MBX_NOT_FINISHED)) {
4059 kfree(vpd);
4060 return 0;
4061 } else
4062 rc = -EIO;
4063
4064 /* Unset all the queues set up in this routine when error out */
4065 if (rc)
4066 lpfc_sli4_queue_unset(phba);
4067
4068out_stop_timers:
4069 if (rc)
4070 lpfc_stop_hba_timers(phba);
4071out_free_vpd:
4072 kfree(vpd);
4073out_free_mbox:
4074 mempool_free(mboxq, phba->mbox_mem_pool);
4075 return rc;
4076}
3356 4077
3357/** 4078/**
3358 * lpfc_mbox_timeout - Timeout call back function for mbox timer 4079 * lpfc_mbox_timeout - Timeout call back function for mbox timer
@@ -3812,13 +4533,420 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
3812 4533
3813out_not_finished: 4534out_not_finished:
3814 if (processing_queue) { 4535 if (processing_queue) {
3815 pmbox->mb.mbxStatus = MBX_NOT_FINISHED; 4536 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
3816 lpfc_mbox_cmpl_put(phba, pmbox); 4537 lpfc_mbox_cmpl_put(phba, pmbox);
3817 } 4538 }
3818 return MBX_NOT_FINISHED; 4539 return MBX_NOT_FINISHED;
3819} 4540}
3820 4541
3821/** 4542/**
4543 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
4544 * @phba: Pointer to HBA context object.
4545 * @mboxq: Pointer to mailbox object.
4546 *
4547 * The function posts a mailbox to the port. The mailbox is expected
4548 * to be comletely filled in and ready for the port to operate on it.
4549 * This routine executes a synchronous completion operation on the
4550 * mailbox by polling for its completion.
4551 *
4552 * The caller must not be holding any locks when calling this routine.
4553 *
4554 * Returns:
4555 * MBX_SUCCESS - mailbox posted successfully
4556 * Any of the MBX error values.
4557 **/
4558static int
4559lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
4560{
4561 int rc = MBX_SUCCESS;
4562 unsigned long iflag;
4563 uint32_t db_ready;
4564 uint32_t mcqe_status;
4565 uint32_t mbx_cmnd;
4566 unsigned long timeout;
4567 struct lpfc_sli *psli = &phba->sli;
4568 struct lpfc_mqe *mb = &mboxq->u.mqe;
4569 struct lpfc_bmbx_create *mbox_rgn;
4570 struct dma_address *dma_address;
4571 struct lpfc_register bmbx_reg;
4572
4573 /*
4574 * Only one mailbox can be active to the bootstrap mailbox region
4575 * at a time and there is no queueing provided.
4576 */
4577 spin_lock_irqsave(&phba->hbalock, iflag);
4578 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
4579 spin_unlock_irqrestore(&phba->hbalock, iflag);
4580 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4581 "(%d):2532 Mailbox command x%x (x%x) "
4582 "cannot issue Data: x%x x%x\n",
4583 mboxq->vport ? mboxq->vport->vpi : 0,
4584 mboxq->u.mb.mbxCommand,
4585 lpfc_sli4_mbox_opcode_get(phba, mboxq),
4586 psli->sli_flag, MBX_POLL);
4587 return MBXERR_ERROR;
4588 }
4589 /* The server grabs the token and owns it until release */
4590 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4591 phba->sli.mbox_active = mboxq;
4592 spin_unlock_irqrestore(&phba->hbalock, iflag);
4593
4594 /*
4595 * Initialize the bootstrap memory region to avoid stale data areas
4596 * in the mailbox post. Then copy the caller's mailbox contents to
4597 * the bmbx mailbox region.
4598 */
4599 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
4600 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
4601 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
4602 sizeof(struct lpfc_mqe));
4603
4604 /* Post the high mailbox dma address to the port and wait for ready. */
4605 dma_address = &phba->sli4_hba.bmbx.dma_address;
4606 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
4607
4608 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
4609 * 1000) + jiffies;
4610 do {
4611 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
4612 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
4613 if (!db_ready)
4614 msleep(2);
4615
4616 if (time_after(jiffies, timeout)) {
4617 rc = MBXERR_ERROR;
4618 goto exit;
4619 }
4620 } while (!db_ready);
4621
4622 /* Post the low mailbox dma address to the port. */
4623 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
4624 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
4625 * 1000) + jiffies;
4626 do {
4627 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
4628 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
4629 if (!db_ready)
4630 msleep(2);
4631
4632 if (time_after(jiffies, timeout)) {
4633 rc = MBXERR_ERROR;
4634 goto exit;
4635 }
4636 } while (!db_ready);
4637
4638 /*
4639 * Read the CQ to ensure the mailbox has completed.
4640 * If so, update the mailbox status so that the upper layers
4641 * can complete the request normally.
4642 */
4643 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
4644 sizeof(struct lpfc_mqe));
4645 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
4646 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
4647 sizeof(struct lpfc_mcqe));
4648 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
4649
4650 /* Prefix the mailbox status with range x4000 to note SLI4 status. */
4651 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
4652 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
4653 rc = MBXERR_ERROR;
4654 }
4655
4656 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4657 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
4658 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
4659 " x%x x%x CQ: x%x x%x x%x x%x\n",
4660 mboxq->vport ? mboxq->vport->vpi : 0,
4661 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
4662 bf_get(lpfc_mqe_status, mb),
4663 mb->un.mb_words[0], mb->un.mb_words[1],
4664 mb->un.mb_words[2], mb->un.mb_words[3],
4665 mb->un.mb_words[4], mb->un.mb_words[5],
4666 mb->un.mb_words[6], mb->un.mb_words[7],
4667 mb->un.mb_words[8], mb->un.mb_words[9],
4668 mb->un.mb_words[10], mb->un.mb_words[11],
4669 mb->un.mb_words[12], mboxq->mcqe.word0,
4670 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4671 mboxq->mcqe.trailer);
4672exit:
4673 /* We are holding the token, no needed for lock when release */
4674 spin_lock_irqsave(&phba->hbalock, iflag);
4675 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4676 phba->sli.mbox_active = NULL;
4677 spin_unlock_irqrestore(&phba->hbalock, iflag);
4678 return rc;
4679}
4680
4681/**
4682 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
4683 * @phba: Pointer to HBA context object.
4684 * @pmbox: Pointer to mailbox object.
4685 * @flag: Flag indicating how the mailbox need to be processed.
4686 *
4687 * This function is called by discovery code and HBA management code to submit
4688 * a mailbox command to firmware with SLI-4 interface spec.
4689 *
4690 * Return codes the caller owns the mailbox command after the return of the
4691 * function.
4692 **/
4693static int
4694lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4695 uint32_t flag)
4696{
4697 struct lpfc_sli *psli = &phba->sli;
4698 unsigned long iflags;
4699 int rc;
4700
4701 /* Detect polling mode and jump to a handler */
4702 if (!phba->sli4_hba.intr_enable) {
4703 if (flag == MBX_POLL)
4704 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
4705 else
4706 rc = -EIO;
4707 if (rc != MBX_SUCCESS)
4708 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4709 "(%d):2541 Mailbox command x%x "
4710 "(x%x) cannot issue Data: x%x x%x\n",
4711 mboxq->vport ? mboxq->vport->vpi : 0,
4712 mboxq->u.mb.mbxCommand,
4713 lpfc_sli4_mbox_opcode_get(phba, mboxq),
4714 psli->sli_flag, flag);
4715 return rc;
4716 } else if (flag == MBX_POLL) {
4717 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4718 "(%d):2542 Mailbox command x%x (x%x) "
4719 "cannot issue Data: x%x x%x\n",
4720 mboxq->vport ? mboxq->vport->vpi : 0,
4721 mboxq->u.mb.mbxCommand,
4722 lpfc_sli4_mbox_opcode_get(phba, mboxq),
4723 psli->sli_flag, flag);
4724 return -EIO;
4725 }
4726
4727 /* Now, interrupt mode asynchrous mailbox command */
4728 rc = lpfc_mbox_cmd_check(phba, mboxq);
4729 if (rc) {
4730 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4731 "(%d):2543 Mailbox command x%x (x%x) "
4732 "cannot issue Data: x%x x%x\n",
4733 mboxq->vport ? mboxq->vport->vpi : 0,
4734 mboxq->u.mb.mbxCommand,
4735 lpfc_sli4_mbox_opcode_get(phba, mboxq),
4736 psli->sli_flag, flag);
4737 goto out_not_finished;
4738 }
4739 rc = lpfc_mbox_dev_check(phba);
4740 if (unlikely(rc)) {
4741 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4742 "(%d):2544 Mailbox command x%x (x%x) "
4743 "cannot issue Data: x%x x%x\n",
4744 mboxq->vport ? mboxq->vport->vpi : 0,
4745 mboxq->u.mb.mbxCommand,
4746 lpfc_sli4_mbox_opcode_get(phba, mboxq),
4747 psli->sli_flag, flag);
4748 goto out_not_finished;
4749 }
4750
4751 /* Put the mailbox command to the driver internal FIFO */
4752 psli->slistat.mbox_busy++;
4753 spin_lock_irqsave(&phba->hbalock, iflags);
4754 lpfc_mbox_put(phba, mboxq);
4755 spin_unlock_irqrestore(&phba->hbalock, iflags);
4756 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4757 "(%d):0354 Mbox cmd issue - Enqueue Data: "
4758 "x%x (x%x) x%x x%x x%x\n",
4759 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
4760 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4761 lpfc_sli4_mbox_opcode_get(phba, mboxq),
4762 phba->pport->port_state,
4763 psli->sli_flag, MBX_NOWAIT);
4764 /* Wake up worker thread to transport mailbox command from head */
4765 lpfc_worker_wake_up(phba);
4766
4767 return MBX_BUSY;
4768
4769out_not_finished:
4770 return MBX_NOT_FINISHED;
4771}
4772
4773/**
4774 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
4775 * @phba: Pointer to HBA context object.
4776 *
4777 * This function is called by worker thread to send a mailbox command to
4778 * SLI4 HBA firmware.
4779 *
4780 **/
4781int
4782lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
4783{
4784 struct lpfc_sli *psli = &phba->sli;
4785 LPFC_MBOXQ_t *mboxq;
4786 int rc = MBX_SUCCESS;
4787 unsigned long iflags;
4788 struct lpfc_mqe *mqe;
4789 uint32_t mbx_cmnd;
4790
4791 /* Check interrupt mode before post async mailbox command */
4792 if (unlikely(!phba->sli4_hba.intr_enable))
4793 return MBX_NOT_FINISHED;
4794
4795 /* Check for mailbox command service token */
4796 spin_lock_irqsave(&phba->hbalock, iflags);
4797 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4798 spin_unlock_irqrestore(&phba->hbalock, iflags);
4799 return MBX_NOT_FINISHED;
4800 }
4801 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
4802 spin_unlock_irqrestore(&phba->hbalock, iflags);
4803 return MBX_NOT_FINISHED;
4804 }
4805 if (unlikely(phba->sli.mbox_active)) {
4806 spin_unlock_irqrestore(&phba->hbalock, iflags);
4807 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4808 "0384 There is pending active mailbox cmd\n");
4809 return MBX_NOT_FINISHED;
4810 }
4811 /* Take the mailbox command service token */
4812 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4813
4814 /* Get the next mailbox command from head of queue */
4815 mboxq = lpfc_mbox_get(phba);
4816
4817 /* If no more mailbox command waiting for post, we're done */
4818 if (!mboxq) {
4819 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4820 spin_unlock_irqrestore(&phba->hbalock, iflags);
4821 return MBX_SUCCESS;
4822 }
4823 phba->sli.mbox_active = mboxq;
4824 spin_unlock_irqrestore(&phba->hbalock, iflags);
4825
4826 /* Check device readiness for posting mailbox command */
4827 rc = lpfc_mbox_dev_check(phba);
4828 if (unlikely(rc))
4829 /* Driver clean routine will clean up pending mailbox */
4830 goto out_not_finished;
4831
4832 /* Prepare the mbox command to be posted */
4833 mqe = &mboxq->u.mqe;
4834 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
4835
4836 /* Start timer for the mbox_tmo and log some mailbox post messages */
4837 mod_timer(&psli->mbox_tmo, (jiffies +
4838 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
4839
4840 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4841 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
4842 "x%x x%x\n",
4843 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
4844 lpfc_sli4_mbox_opcode_get(phba, mboxq),
4845 phba->pport->port_state, psli->sli_flag);
4846
4847 if (mbx_cmnd != MBX_HEARTBEAT) {
4848 if (mboxq->vport) {
4849 lpfc_debugfs_disc_trc(mboxq->vport,
4850 LPFC_DISC_TRC_MBOX_VPORT,
4851 "MBOX Send vport: cmd:x%x mb:x%x x%x",
4852 mbx_cmnd, mqe->un.mb_words[0],
4853 mqe->un.mb_words[1]);
4854 } else {
4855 lpfc_debugfs_disc_trc(phba->pport,
4856 LPFC_DISC_TRC_MBOX,
4857 "MBOX Send: cmd:x%x mb:x%x x%x",
4858 mbx_cmnd, mqe->un.mb_words[0],
4859 mqe->un.mb_words[1]);
4860 }
4861 }
4862 psli->slistat.mbox_cmd++;
4863
4864 /* Post the mailbox command to the port */
4865 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
4866 if (rc != MBX_SUCCESS) {
4867 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4868 "(%d):2533 Mailbox command x%x (x%x) "
4869 "cannot issue Data: x%x x%x\n",
4870 mboxq->vport ? mboxq->vport->vpi : 0,
4871 mboxq->u.mb.mbxCommand,
4872 lpfc_sli4_mbox_opcode_get(phba, mboxq),
4873 psli->sli_flag, MBX_NOWAIT);
4874 goto out_not_finished;
4875 }
4876
4877 return rc;
4878
4879out_not_finished:
4880 spin_lock_irqsave(&phba->hbalock, iflags);
4881 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
4882 __lpfc_mbox_cmpl_put(phba, mboxq);
4883 /* Release the token */
4884 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4885 phba->sli.mbox_active = NULL;
4886 spin_unlock_irqrestore(&phba->hbalock, iflags);
4887
4888 return MBX_NOT_FINISHED;
4889}
4890
4891/**
4892 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
4893 * @phba: Pointer to HBA context object.
4894 * @pmbox: Pointer to mailbox object.
4895 * @flag: Flag indicating how the mailbox need to be processed.
4896 *
4897 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
4898 * the API jump table function pointer from the lpfc_hba struct.
4899 *
4900 * Return codes the caller owns the mailbox command after the return of the
4901 * function.
4902 **/
4903int
4904lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
4905{
4906 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
4907}
4908
4909/**
4910 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
4911 * @phba: The hba struct for which this call is being executed.
4912 * @dev_grp: The HBA PCI-Device group number.
4913 *
4914 * This routine sets up the mbox interface API function jump table in @phba
4915 * struct.
4916 * Returns: 0 - success, -ENODEV - failure.
4917 **/
4918int
4919lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4920{
4921
4922 switch (dev_grp) {
4923 case LPFC_PCI_DEV_LP:
4924 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
4925 phba->lpfc_sli_handle_slow_ring_event =
4926 lpfc_sli_handle_slow_ring_event_s3;
4927 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
4928 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
4929 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
4930 break;
4931 case LPFC_PCI_DEV_OC:
4932 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
4933 phba->lpfc_sli_handle_slow_ring_event =
4934 lpfc_sli_handle_slow_ring_event_s4;
4935 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
4936 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
4937 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
4938 break;
4939 default:
4940 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4941 "1420 Invalid HBA PCI-device group: 0x%x\n",
4942 dev_grp);
4943 return -ENODEV;
4944 break;
4945 }
4946 return 0;
4947}
4948
4949/**
3822 * __lpfc_sli_ringtx_put - Add an iocb to the txq 4950 * __lpfc_sli_ringtx_put - Add an iocb to the txq
3823 * @phba: Pointer to HBA context object. 4951 * @phba: Pointer to HBA context object.
3824 * @pring: Pointer to driver SLI ring object. 4952 * @pring: Pointer to driver SLI ring object.
@@ -4501,28 +5629,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
4501 5629
4502 /* Return any active mbox cmds */ 5630 /* Return any active mbox cmds */
4503 del_timer_sync(&psli->mbox_tmo); 5631 del_timer_sync(&psli->mbox_tmo);
4504 spin_lock_irqsave(&phba->hbalock, flags);
4505 5632
4506 spin_lock(&phba->pport->work_port_lock); 5633 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
4507 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 5634 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4508 spin_unlock(&phba->pport->work_port_lock); 5635 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
4509 5636
4510 /* Return any pending or completed mbox cmds */ 5637 return 1;
4511 list_splice_init(&phba->sli.mboxq, &completions); 5638}
4512 if (psli->mbox_active) { 5639
4513 list_add_tail(&psli->mbox_active->list, &completions); 5640/**
4514 psli->mbox_active = NULL; 5641 * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
4515 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5642 * @phba: Pointer to HBA context object.
4516 } 5643 *
4517 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 5644 * This function cleans up all queues, iocb, buffers, mailbox commands while
4518 spin_unlock_irqrestore(&phba->hbalock, flags); 5645 * shutting down the SLI4 HBA FCoE function. This function is called with no
5646 * lock held and always returns 1.
5647 *
5648 * This function does the following to cleanup driver FCoE function resources:
5649 * - Free discovery resources for each virtual port
5650 * - Cleanup any pending fabric iocbs
5651 * - Iterate through the iocb txq and free each entry in the list.
5652 * - Free up any buffer posted to the HBA.
5653 * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
5654 * - Free mailbox commands in the mailbox queue.
5655 **/
5656int
5657lpfc_sli4_hba_down(struct lpfc_hba *phba)
5658{
5659 /* Stop the SLI4 device port */
5660 lpfc_stop_port(phba);
5661
5662 /* Tear down the queues in the HBA */
5663 lpfc_sli4_queue_unset(phba);
5664
5665 /* unregister default FCFI from the HBA */
5666 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
4519 5667
4520 while (!list_empty(&completions)) {
4521 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
4522 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
4523 if (pmb->mbox_cmpl)
4524 pmb->mbox_cmpl(phba,pmb);
4525 }
4526 return 1; 5668 return 1;
4527} 5669}
4528 5670
@@ -4853,7 +5995,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4853 iabt = &abtsiocbp->iocb; 5995 iabt = &abtsiocbp->iocb;
4854 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 5996 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
4855 iabt->un.acxri.abortContextTag = icmd->ulpContext; 5997 iabt->un.acxri.abortContextTag = icmd->ulpContext;
4856 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 5998 if (phba->sli_rev == LPFC_SLI_REV4)
5999 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
6000 else
6001 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
4857 iabt->ulpLe = 1; 6002 iabt->ulpLe = 1;
4858 iabt->ulpClass = icmd->ulpClass; 6003 iabt->ulpClass = icmd->ulpClass;
4859 6004
@@ -4869,7 +6014,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4869 "abort cmd iotag x%x\n", 6014 "abort cmd iotag x%x\n",
4870 iabt->un.acxri.abortContextTag, 6015 iabt->un.acxri.abortContextTag,
4871 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 6016 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
4872 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 6017 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
4873 6018
4874 if (retval) 6019 if (retval)
4875 __lpfc_sli_release_iocbq(phba, abtsiocbp); 6020 __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -5052,7 +6197,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
5052 cmd = &iocbq->iocb; 6197 cmd = &iocbq->iocb;
5053 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 6198 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
5054 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 6199 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
5055 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 6200 if (phba->sli_rev == LPFC_SLI_REV4)
6201 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
6202 else
6203 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
5056 abtsiocb->iocb.ulpLe = 1; 6204 abtsiocb->iocb.ulpLe = 1;
5057 abtsiocb->iocb.ulpClass = cmd->ulpClass; 6205 abtsiocb->iocb.ulpClass = cmd->ulpClass;
5058 abtsiocb->vport = phba->pport; 6206 abtsiocb->vport = phba->pport;
@@ -5064,7 +6212,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
5064 6212
5065 /* Setup callback routine and issue the command. */ 6213 /* Setup callback routine and issue the command. */
5066 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 6214 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
5067 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 6215 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
6216 abtsiocb, 0);
5068 if (ret_val == IOCB_ERROR) { 6217 if (ret_val == IOCB_ERROR) {
5069 lpfc_sli_release_iocbq(phba, abtsiocb); 6218 lpfc_sli_release_iocbq(phba, abtsiocb);
5070 errcnt++; 6219 errcnt++;
@@ -5145,7 +6294,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
5145 **/ 6294 **/
5146int 6295int
5147lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 6296lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
5148 struct lpfc_sli_ring *pring, 6297 uint32_t ring_number,
5149 struct lpfc_iocbq *piocb, 6298 struct lpfc_iocbq *piocb,
5150 struct lpfc_iocbq *prspiocbq, 6299 struct lpfc_iocbq *prspiocbq,
5151 uint32_t timeout) 6300 uint32_t timeout)
@@ -5176,7 +6325,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
5176 readl(phba->HCregaddr); /* flush */ 6325 readl(phba->HCregaddr); /* flush */
5177 } 6326 }
5178 6327
5179 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 6328 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
5180 if (retval == IOCB_SUCCESS) { 6329 if (retval == IOCB_SUCCESS) {
5181 timeout_req = timeout * HZ; 6330 timeout_req = timeout * HZ;
5182 timeleft = wait_event_timeout(done_q, 6331 timeleft = wait_event_timeout(done_q,
@@ -5385,6 +6534,58 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
5385} 6534}
5386 6535
5387/** 6536/**
6537 * lpfc_sli4_eratt_read - read sli-4 error attention events
6538 * @phba: Pointer to HBA context.
6539 *
6540 * This function is called to read the SLI4 device error attention registers
6541 * for possible error attention events. The caller must hold the hostlock
6542 * with spin_lock_irq().
6543 *
6544 * This fucntion returns 1 when there is Error Attention in the Host Attention
6545 * Register and returns 0 otherwise.
6546 **/
6547static int
6548lpfc_sli4_eratt_read(struct lpfc_hba *phba)
6549{
6550 uint32_t uerr_sta_hi, uerr_sta_lo;
6551 uint32_t onlnreg0, onlnreg1;
6552
6553 /* For now, use the SLI4 device internal unrecoverable error
6554 * registers for error attention. This can be changed later.
6555 */
6556 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
6557 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
6558 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
6559 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
6560 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
6561 if (uerr_sta_lo || uerr_sta_hi) {
6562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6563 "1423 HBA Unrecoverable error: "
6564 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
6565 "online0_reg=0x%x, online1_reg=0x%x\n",
6566 uerr_sta_lo, uerr_sta_hi,
6567 onlnreg0, onlnreg1);
6568 /* TEMP: as the driver error recover logic is not
6569 * fully developed, we just log the error message
6570 * and the device error attention action is now
6571 * temporarily disabled.
6572 */
6573 return 0;
6574 phba->work_status[0] = uerr_sta_lo;
6575 phba->work_status[1] = uerr_sta_hi;
6576 spin_lock_irq(&phba->hbalock);
6577 /* Set the driver HA work bitmap */
6578 phba->work_ha |= HA_ERATT;
6579 /* Indicate polling handles this ERATT */
6580 phba->hba_flag |= HBA_ERATT_HANDLED;
6581 spin_unlock_irq(&phba->hbalock);
6582 return 1;
6583 }
6584 }
6585 return 0;
6586}
6587
6588/**
5388 * lpfc_sli_check_eratt - check error attention events 6589 * lpfc_sli_check_eratt - check error attention events
5389 * @phba: Pointer to HBA context. 6590 * @phba: Pointer to HBA context.
5390 * 6591 *
@@ -5434,6 +6635,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
5434 /* Read chip Host Attention (HA) register */ 6635 /* Read chip Host Attention (HA) register */
5435 ha_copy = lpfc_sli_eratt_read(phba); 6636 ha_copy = lpfc_sli_eratt_read(phba);
5436 break; 6637 break;
6638 case LPFC_SLI_REV4:
6639 /* Read devcie Uncoverable Error (UERR) registers */
6640 ha_copy = lpfc_sli4_eratt_read(phba);
6641 break;
5437 default: 6642 default:
5438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6643 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5439 "0299 Invalid SLI revision (%d)\n", 6644 "0299 Invalid SLI revision (%d)\n",