aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c414
1 files changed, 350 insertions, 64 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 35e3b96d4e07..049fb9a17b3f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/slab.h>
26 27
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
@@ -494,7 +495,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
494 * 495 *
495 * Returns sglq ponter = success, NULL = Failure. 496 * Returns sglq ponter = success, NULL = Failure.
496 **/ 497 **/
497static struct lpfc_sglq * 498struct lpfc_sglq *
498__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 499__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
499{ 500{
500 uint16_t adj_xri; 501 uint16_t adj_xri;
@@ -526,6 +527,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba)
526 return NULL; 527 return NULL;
527 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; 528 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
528 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 529 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
530 sglq->state = SGL_ALLOCATED;
529 return sglq; 531 return sglq;
530} 532}
531 533
@@ -580,15 +582,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
580 else 582 else
581 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 583 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
582 if (sglq) { 584 if (sglq) {
583 if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) { 585 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
586 (sglq->state != SGL_XRI_ABORTED)) {
584 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 587 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
585 iflag); 588 iflag);
586 list_add(&sglq->list, 589 list_add(&sglq->list,
587 &phba->sli4_hba.lpfc_abts_els_sgl_list); 590 &phba->sli4_hba.lpfc_abts_els_sgl_list);
588 spin_unlock_irqrestore( 591 spin_unlock_irqrestore(
589 &phba->sli4_hba.abts_sgl_list_lock, iflag); 592 &phba->sli4_hba.abts_sgl_list_lock, iflag);
590 } else 593 } else {
594 sglq->state = SGL_FREED;
591 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 595 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
596 }
592 } 597 }
593 598
594 599
@@ -2258,41 +2263,56 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2258 spin_unlock_irqrestore(&phba->hbalock, 2263 spin_unlock_irqrestore(&phba->hbalock,
2259 iflag); 2264 iflag);
2260 } 2265 }
2261 if ((phba->sli_rev == LPFC_SLI_REV4) && 2266 if (phba->sli_rev == LPFC_SLI_REV4) {
2262 (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) { 2267 if (saveq->iocb_flag &
2263 /* Set cmdiocb flag for the exchange 2268 LPFC_EXCHANGE_BUSY) {
2264 * busy so sgl (xri) will not be 2269 /* Set cmdiocb flag for the
2265 * released until the abort xri is 2270 * exchange busy so sgl (xri)
2266 * received from hba, clear the 2271 * will not be released until
2267 * LPFC_DRIVER_ABORTED bit in case 2272 * the abort xri is received
2268 * it was driver initiated abort. 2273 * from hba.
2269 */ 2274 */
2270 spin_lock_irqsave(&phba->hbalock, 2275 spin_lock_irqsave(
2271 iflag); 2276 &phba->hbalock, iflag);
2272 cmdiocbp->iocb_flag &= 2277 cmdiocbp->iocb_flag |=
2273 ~LPFC_DRIVER_ABORTED; 2278 LPFC_EXCHANGE_BUSY;
2274 cmdiocbp->iocb_flag |= 2279 spin_unlock_irqrestore(
2275 LPFC_EXCHANGE_BUSY; 2280 &phba->hbalock, iflag);
2276 spin_unlock_irqrestore(&phba->hbalock, 2281 }
2277 iflag); 2282 if (cmdiocbp->iocb_flag &
2278 cmdiocbp->iocb.ulpStatus = 2283 LPFC_DRIVER_ABORTED) {
2279 IOSTAT_LOCAL_REJECT; 2284 /*
2280 cmdiocbp->iocb.un.ulpWord[4] = 2285 * Clear LPFC_DRIVER_ABORTED
2281 IOERR_ABORT_REQUESTED; 2286 * bit in case it was driver
2282 /* 2287 * initiated abort.
2283 * For SLI4, irsiocb contains NO_XRI 2288 */
2284 * in sli_xritag, it shall not affect 2289 spin_lock_irqsave(
2285 * releasing sgl (xri) process. 2290 &phba->hbalock, iflag);
2286 */ 2291 cmdiocbp->iocb_flag &=
2287 saveq->iocb.ulpStatus = 2292 ~LPFC_DRIVER_ABORTED;
2288 IOSTAT_LOCAL_REJECT; 2293 spin_unlock_irqrestore(
2289 saveq->iocb.un.ulpWord[4] = 2294 &phba->hbalock, iflag);
2290 IOERR_SLI_ABORTED; 2295 cmdiocbp->iocb.ulpStatus =
2291 spin_lock_irqsave(&phba->hbalock, 2296 IOSTAT_LOCAL_REJECT;
2292 iflag); 2297 cmdiocbp->iocb.un.ulpWord[4] =
2293 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2298 IOERR_ABORT_REQUESTED;
2294 spin_unlock_irqrestore(&phba->hbalock, 2299 /*
2295 iflag); 2300 * For SLI4, irsiocb contains
2301 * NO_XRI in sli_xritag, it
2302 * shall not affect releasing
2303 * sgl (xri) process.
2304 */
2305 saveq->iocb.ulpStatus =
2306 IOSTAT_LOCAL_REJECT;
2307 saveq->iocb.un.ulpWord[4] =
2308 IOERR_SLI_ABORTED;
2309 spin_lock_irqsave(
2310 &phba->hbalock, iflag);
2311 saveq->iocb_flag |=
2312 LPFC_DELAY_MEM_FREE;
2313 spin_unlock_irqrestore(
2314 &phba->hbalock, iflag);
2315 }
2296 } 2316 }
2297 } 2317 }
2298 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2318 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -2515,14 +2535,16 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2515 2535
2516 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2536 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2517 &rspiocbq); 2537 &rspiocbq);
2518 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 2538 if (unlikely(!cmdiocbq))
2519 spin_unlock_irqrestore(&phba->hbalock, 2539 break;
2520 iflag); 2540 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2521 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2541 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2522 &rspiocbq); 2542 if (cmdiocbq->iocb_cmpl) {
2523 spin_lock_irqsave(&phba->hbalock, 2543 spin_unlock_irqrestore(&phba->hbalock, iflag);
2524 iflag); 2544 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2525 } 2545 &rspiocbq);
2546 spin_lock_irqsave(&phba->hbalock, iflag);
2547 }
2526 break; 2548 break;
2527 case LPFC_UNSOL_IOCB: 2549 case LPFC_UNSOL_IOCB:
2528 spin_unlock_irqrestore(&phba->hbalock, iflag); 2550 spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -3091,6 +3113,12 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3091 3113
3092 /* Check to see if any errors occurred during init */ 3114 /* Check to see if any errors occurred during init */
3093 if ((status & HS_FFERM) || (i >= 20)) { 3115 if ((status & HS_FFERM) || (i >= 20)) {
3116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3117 "2751 Adapter failed to restart, "
3118 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3119 status,
3120 readl(phba->MBslimaddr + 0xa8),
3121 readl(phba->MBslimaddr + 0xac));
3094 phba->link_state = LPFC_HBA_ERROR; 3122 phba->link_state = LPFC_HBA_ERROR;
3095 retval = 1; 3123 retval = 1;
3096 } 3124 }
@@ -3278,6 +3306,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
3278 if (retval != MBX_SUCCESS) { 3306 if (retval != MBX_SUCCESS) {
3279 if (retval != MBX_BUSY) 3307 if (retval != MBX_BUSY)
3280 mempool_free(pmb, phba->mbox_mem_pool); 3308 mempool_free(pmb, phba->mbox_mem_pool);
3309 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3310 "2752 KILL_BOARD command failed retval %d\n",
3311 retval);
3281 spin_lock_irq(&phba->hbalock); 3312 spin_lock_irq(&phba->hbalock);
3282 phba->link_flag &= ~LS_IGNORE_ERATT; 3313 phba->link_flag &= ~LS_IGNORE_ERATT;
3283 spin_unlock_irq(&phba->hbalock); 3314 spin_unlock_irq(&phba->hbalock);
@@ -4035,7 +4066,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4035 4066
4036lpfc_sli_hba_setup_error: 4067lpfc_sli_hba_setup_error:
4037 phba->link_state = LPFC_HBA_ERROR; 4068 phba->link_state = LPFC_HBA_ERROR;
4038 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4039 "0445 Firmware initialization failed\n"); 4070 "0445 Firmware initialization failed\n");
4040 return rc; 4071 return rc;
4041} 4072}
@@ -4388,7 +4419,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4388 spin_unlock_irq(&phba->hbalock); 4419 spin_unlock_irq(&phba->hbalock);
4389 4420
4390 /* Read the port's service parameters. */ 4421 /* Read the port's service parameters. */
4391 lpfc_read_sparam(phba, mboxq, vport->vpi); 4422 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
4423 if (rc) {
4424 phba->link_state = LPFC_HBA_ERROR;
4425 rc = -ENOMEM;
4426 goto out_free_vpd;
4427 }
4428
4392 mboxq->vport = vport; 4429 mboxq->vport = vport;
4393 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4430 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4394 mp = (struct lpfc_dmabuf *) mboxq->context1; 4431 mp = (struct lpfc_dmabuf *) mboxq->context1;
@@ -4483,6 +4520,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4483 /* Post receive buffers to the device */ 4520 /* Post receive buffers to the device */
4484 lpfc_sli4_rb_setup(phba); 4521 lpfc_sli4_rb_setup(phba);
4485 4522
4523 /* Reset HBA FCF states after HBA reset */
4524 phba->fcf.fcf_flag = 0;
4525 phba->fcf.current_rec.flag = 0;
4526
4486 /* Start the ELS watchdog timer */ 4527 /* Start the ELS watchdog timer */
4487 mod_timer(&vport->els_tmofunc, 4528 mod_timer(&vport->els_tmofunc,
4488 jiffies + HZ * (phba->fc_ratov * 2)); 4529 jiffies + HZ * (phba->fc_ratov * 2));
@@ -7436,6 +7477,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7436{ 7477{
7437 wait_queue_head_t *pdone_q; 7478 wait_queue_head_t *pdone_q;
7438 unsigned long iflags; 7479 unsigned long iflags;
7480 struct lpfc_scsi_buf *lpfc_cmd;
7439 7481
7440 spin_lock_irqsave(&phba->hbalock, iflags); 7482 spin_lock_irqsave(&phba->hbalock, iflags);
7441 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 7483 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
@@ -7443,6 +7485,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7443 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 7485 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
7444 &rspiocbq->iocb, sizeof(IOCB_t)); 7486 &rspiocbq->iocb, sizeof(IOCB_t));
7445 7487
7488 /* Set the exchange busy flag for task management commands */
7489 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
7490 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
7491 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
7492 cur_iocbq);
7493 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
7494 }
7495
7446 pdone_q = cmdiocbq->context_un.wait_queue; 7496 pdone_q = cmdiocbq->context_un.wait_queue;
7447 if (pdone_q) 7497 if (pdone_q)
7448 wake_up(pdone_q); 7498 wake_up(pdone_q);
@@ -9061,6 +9111,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
9061 /* Fake the irspiocb and copy necessary response information */ 9111 /* Fake the irspiocb and copy necessary response information */
9062 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 9112 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
9063 9113
9114 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
9115 spin_lock_irqsave(&phba->hbalock, iflags);
9116 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
9117 spin_unlock_irqrestore(&phba->hbalock, iflags);
9118 }
9119
9064 /* Pass the cmd_iocb and the rsp state to the upper layer */ 9120 /* Pass the cmd_iocb and the rsp state to the upper layer */
9065 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 9121 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
9066} 9122}
@@ -11941,15 +11997,19 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11941} 11997}
11942 11998
11943/** 11999/**
11944 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. 12000 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
11945 * @phba: pointer to lpfc hba data structure. 12001 * @phba: pointer to lpfc hba data structure.
11946 * @fcf_index: FCF table entry offset. 12002 * @fcf_index: FCF table entry offset.
11947 * 12003 *
11948 * This routine is invoked to read up to @fcf_num of FCF record from the 12004 * This routine is invoked to scan the entire FCF table by reading FCF
11949 * device starting with the given @fcf_index. 12005 * record and processing it one at a time starting from the @fcf_index
12006 * for initial FCF discovery or fast FCF failover rediscovery.
12007 *
12008 * Return 0 if the mailbox command is submitted sucessfully, none 0
12009 * otherwise.
11950 **/ 12010 **/
11951int 12011int
11952lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) 12012lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
11953{ 12013{
11954 int rc = 0, error; 12014 int rc = 0, error;
11955 LPFC_MBOXQ_t *mboxq; 12015 LPFC_MBOXQ_t *mboxq;
@@ -11961,17 +12021,17 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11961 "2000 Failed to allocate mbox for " 12021 "2000 Failed to allocate mbox for "
11962 "READ_FCF cmd\n"); 12022 "READ_FCF cmd\n");
11963 error = -ENOMEM; 12023 error = -ENOMEM;
11964 goto fail_fcfscan; 12024 goto fail_fcf_scan;
11965 } 12025 }
11966 /* Construct the read FCF record mailbox command */ 12026 /* Construct the read FCF record mailbox command */
11967 rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index); 12027 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
11968 if (rc) { 12028 if (rc) {
11969 error = -EINVAL; 12029 error = -EINVAL;
11970 goto fail_fcfscan; 12030 goto fail_fcf_scan;
11971 } 12031 }
11972 /* Issue the mailbox command asynchronously */ 12032 /* Issue the mailbox command asynchronously */
11973 mboxq->vport = phba->pport; 12033 mboxq->vport = phba->pport;
11974 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; 12034 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
11975 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12035 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11976 if (rc == MBX_NOT_FINISHED) 12036 if (rc == MBX_NOT_FINISHED)
11977 error = -EIO; 12037 error = -EIO;
@@ -11979,9 +12039,13 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11979 spin_lock_irq(&phba->hbalock); 12039 spin_lock_irq(&phba->hbalock);
11980 phba->hba_flag |= FCF_DISC_INPROGRESS; 12040 phba->hba_flag |= FCF_DISC_INPROGRESS;
11981 spin_unlock_irq(&phba->hbalock); 12041 spin_unlock_irq(&phba->hbalock);
12042 /* Reset FCF round robin index bmask for new scan */
12043 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
12044 memset(phba->fcf.fcf_rr_bmask, 0,
12045 sizeof(*phba->fcf.fcf_rr_bmask));
11982 error = 0; 12046 error = 0;
11983 } 12047 }
11984fail_fcfscan: 12048fail_fcf_scan:
11985 if (error) { 12049 if (error) {
11986 if (mboxq) 12050 if (mboxq)
11987 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12051 lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -11994,6 +12058,181 @@ fail_fcfscan:
11994} 12058}
11995 12059
11996/** 12060/**
12061 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
12062 * @phba: pointer to lpfc hba data structure.
12063 * @fcf_index: FCF table entry offset.
12064 *
12065 * This routine is invoked to read an FCF record indicated by @fcf_index
12066 * and to use it for FLOGI round robin FCF failover.
12067 *
12068 * Return 0 if the mailbox command is submitted sucessfully, none 0
12069 * otherwise.
12070 **/
12071int
12072lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12073{
12074 int rc = 0, error;
12075 LPFC_MBOXQ_t *mboxq;
12076
12077 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12078 if (!mboxq) {
12079 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
12080 "2763 Failed to allocate mbox for "
12081 "READ_FCF cmd\n");
12082 error = -ENOMEM;
12083 goto fail_fcf_read;
12084 }
12085 /* Construct the read FCF record mailbox command */
12086 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12087 if (rc) {
12088 error = -EINVAL;
12089 goto fail_fcf_read;
12090 }
12091 /* Issue the mailbox command asynchronously */
12092 mboxq->vport = phba->pport;
12093 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
12094 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12095 if (rc == MBX_NOT_FINISHED)
12096 error = -EIO;
12097 else
12098 error = 0;
12099
12100fail_fcf_read:
12101 if (error && mboxq)
12102 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12103 return error;
12104}
12105
12106/**
12107 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
12108 * @phba: pointer to lpfc hba data structure.
12109 * @fcf_index: FCF table entry offset.
12110 *
12111 * This routine is invoked to read an FCF record indicated by @fcf_index to
12112 * determine whether it's eligible for FLOGI round robin failover list.
12113 *
12114 * Return 0 if the mailbox command is submitted sucessfully, none 0
12115 * otherwise.
12116 **/
12117int
12118lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12119{
12120 int rc = 0, error;
12121 LPFC_MBOXQ_t *mboxq;
12122
12123 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12124 if (!mboxq) {
12125 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
12126 "2758 Failed to allocate mbox for "
12127 "READ_FCF cmd\n");
12128 error = -ENOMEM;
12129 goto fail_fcf_read;
12130 }
12131 /* Construct the read FCF record mailbox command */
12132 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12133 if (rc) {
12134 error = -EINVAL;
12135 goto fail_fcf_read;
12136 }
12137 /* Issue the mailbox command asynchronously */
12138 mboxq->vport = phba->pport;
12139 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
12140 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12141 if (rc == MBX_NOT_FINISHED)
12142 error = -EIO;
12143 else
12144 error = 0;
12145
12146fail_fcf_read:
12147 if (error && mboxq)
12148 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12149 return error;
12150}
12151
12152/**
12153 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
12154 * @phba: pointer to lpfc hba data structure.
12155 *
12156 * This routine is to get the next eligible FCF record index in a round
12157 * robin fashion. If the next eligible FCF record index equals to the
12158 * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
12159 * shall be returned, otherwise, the next eligible FCF record's index
12160 * shall be returned.
12161 **/
12162uint16_t
12163lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
12164{
12165 uint16_t next_fcf_index;
12166
12167 /* Search from the currently registered FCF index */
12168 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12169 LPFC_SLI4_FCF_TBL_INDX_MAX,
12170 phba->fcf.current_rec.fcf_indx);
12171 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
12172 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
12173 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12174 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
12175 /* Round robin failover stop condition */
12176 if (next_fcf_index == phba->fcf.fcf_rr_init_indx)
12177 return LPFC_FCOE_FCF_NEXT_NONE;
12178
12179 return next_fcf_index;
12180}
12181
12182/**
12183 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
12184 * @phba: pointer to lpfc hba data structure.
12185 *
12186 * This routine sets the FCF record index in to the eligible bmask for
12187 * round robin failover search. It checks to make sure that the index
12188 * does not go beyond the range of the driver allocated bmask dimension
12189 * before setting the bit.
12190 *
12191 * Returns 0 if the index bit successfully set, otherwise, it returns
12192 * -EINVAL.
12193 **/
12194int
12195lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
12196{
12197 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12198 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12199 "2610 HBA FCF index reached driver's "
12200 "book keeping dimension: fcf_index:%d, "
12201 "driver_bmask_max:%d\n",
12202 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12203 return -EINVAL;
12204 }
12205 /* Set the eligible FCF record index bmask */
12206 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12207
12208 return 0;
12209}
12210
12211/**
12212 * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index
12213 * @phba: pointer to lpfc hba data structure.
12214 *
12215 * This routine clears the FCF record index from the eligible bmask for
12216 * round robin failover search. It checks to make sure that the index
12217 * does not go beyond the range of the driver allocated bmask dimension
12218 * before clearing the bit.
12219 **/
12220void
12221lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
12222{
12223 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12224 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12225 "2762 HBA FCF index goes beyond driver's "
12226 "book keeping dimension: fcf_index:%d, "
12227 "driver_bmask_max:%d\n",
12228 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12229 return;
12230 }
12231 /* Clear the eligible FCF record index bmask */
12232 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12233}
12234
12235/**
11997 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 12236 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
11998 * @phba: pointer to lpfc hba data structure. 12237 * @phba: pointer to lpfc hba data structure.
11999 * 12238 *
@@ -12014,21 +12253,40 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
12014 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 12253 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12015 &redisc_fcf->header.cfg_shdr.response); 12254 &redisc_fcf->header.cfg_shdr.response);
12016 if (shdr_status || shdr_add_status) { 12255 if (shdr_status || shdr_add_status) {
12017 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12256 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12018 "2746 Requesting for FCF rediscovery failed " 12257 "2746 Requesting for FCF rediscovery failed "
12019 "status x%x add_status x%x\n", 12258 "status x%x add_status x%x\n",
12020 shdr_status, shdr_add_status); 12259 shdr_status, shdr_add_status);
12021 /* 12260 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
12022 * Request failed, last resort to re-try current 12261 spin_lock_irq(&phba->hbalock);
12023 * registered FCF entry 12262 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
12024 */ 12263 spin_unlock_irq(&phba->hbalock);
12025 lpfc_retry_pport_discovery(phba); 12264 /*
12026 } else 12265 * CVL event triggered FCF rediscover request failed,
12266 * last resort to re-try current registered FCF entry.
12267 */
12268 lpfc_retry_pport_discovery(phba);
12269 } else {
12270 spin_lock_irq(&phba->hbalock);
12271 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
12272 spin_unlock_irq(&phba->hbalock);
12273 /*
12274 * DEAD FCF event triggered FCF rediscover request
12275 * failed, last resort to fail over as a link down
12276 * to FCF registration.
12277 */
12278 lpfc_sli4_fcf_dead_failthrough(phba);
12279 }
12280 } else {
12281 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12282 "2775 Start FCF rediscovery quiescent period "
12283 "wait timer before scaning FCF table\n");
12027 /* 12284 /*
12028 * Start FCF rediscovery wait timer for pending FCF 12285 * Start FCF rediscovery wait timer for pending FCF
12029 * before rescan FCF record table. 12286 * before rescan FCF record table.
12030 */ 12287 */
12031 lpfc_fcf_redisc_wait_start_timer(phba); 12288 lpfc_fcf_redisc_wait_start_timer(phba);
12289 }
12032 12290
12033 mempool_free(mbox, phba->mbox_mem_pool); 12291 mempool_free(mbox, phba->mbox_mem_pool);
12034} 12292}
@@ -12047,6 +12305,9 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
12047 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 12305 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
12048 int rc, length; 12306 int rc, length;
12049 12307
12308 /* Cancel retry delay timers to all vports before FCF rediscover */
12309 lpfc_cancel_all_vport_retry_delay_timer(phba);
12310
12050 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12311 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12051 if (!mbox) { 12312 if (!mbox) {
12052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12078,6 +12339,31 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
12078} 12339}
12079 12340
12080/** 12341/**
12342 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
12343 * @phba: pointer to lpfc hba data structure.
12344 *
12345 * This function is the failover routine as a last resort to the FCF DEAD
12346 * event when driver failed to perform fast FCF failover.
12347 **/
12348void
12349lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
12350{
12351 uint32_t link_state;
12352
12353 /*
12354 * Last resort as FCF DEAD event failover will treat this as
12355 * a link down, but save the link state because we don't want
12356 * it to be changed to Link Down unless it is already down.
12357 */
12358 link_state = phba->link_state;
12359 lpfc_linkdown(phba);
12360 phba->link_state = link_state;
12361
12362 /* Unregister FCF if no devices connected to it */
12363 lpfc_unregister_unused_fcf(phba);
12364}
12365
12366/**
12081 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 12367 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
12082 * @phba: pointer to lpfc hba data structure. 12368 * @phba: pointer to lpfc hba data structure.
12083 * 12369 *