aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1911
1 files changed, 1361 insertions, 550 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 43cbe336f1f8..049fb9a17b3f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/slab.h>
26 27
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
@@ -30,6 +31,7 @@
30#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h> 32#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h> 33#include <scsi/fc/fc_fs.h>
34#include <linux/aer.h>
33 35
34#include "lpfc_hw4.h" 36#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 37#include "lpfc_hw.h"
@@ -58,8 +60,11 @@ typedef enum _lpfc_iocb_type {
58static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 60static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59 uint32_t); 61 uint32_t);
60static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 62static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint8_t *, uint32_t *); 63 uint8_t *, uint32_t *);
62 64static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 struct lpfc_iocbq *);
66static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *);
63static IOCB_t * 68static IOCB_t *
64lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 69lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
65{ 70{
@@ -259,6 +264,9 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
259 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 264 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
260 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); 265 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
261 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 266 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
267 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
268 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
269 readl(q->phba->sli4_hba.EQCQDBregaddr);
262 return released; 270 return released;
263} 271}
264 272
@@ -487,7 +495,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
487 * 495 *
488 * Returns sglq ponter = success, NULL = Failure. 496 * Returns sglq ponter = success, NULL = Failure.
489 **/ 497 **/
490static struct lpfc_sglq * 498struct lpfc_sglq *
491__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 499__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
492{ 500{
493 uint16_t adj_xri; 501 uint16_t adj_xri;
@@ -515,8 +523,11 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba)
515 struct lpfc_sglq *sglq = NULL; 523 struct lpfc_sglq *sglq = NULL;
516 uint16_t adj_xri; 524 uint16_t adj_xri;
517 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 525 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
526 if (!sglq)
527 return NULL;
518 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; 528 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
519 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 529 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
530 sglq->state = SGL_ALLOCATED;
520 return sglq; 531 return sglq;
521} 532}
522 533
@@ -571,18 +582,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
571 else 582 else
572 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 583 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
573 if (sglq) { 584 if (sglq) {
574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED 585 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
575 || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 586 (sglq->state != SGL_XRI_ABORTED)) {
576 && (iocbq->iocb.un.ulpWord[4]
577 == IOERR_SLI_ABORTED))) {
578 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 587 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
579 iflag); 588 iflag);
580 list_add(&sglq->list, 589 list_add(&sglq->list,
581 &phba->sli4_hba.lpfc_abts_els_sgl_list); 590 &phba->sli4_hba.lpfc_abts_els_sgl_list);
582 spin_unlock_irqrestore( 591 spin_unlock_irqrestore(
583 &phba->sli4_hba.abts_sgl_list_lock, iflag); 592 &phba->sli4_hba.abts_sgl_list_lock, iflag);
584 } else 593 } else {
594 sglq->state = SGL_FREED;
585 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 595 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
596 }
586 } 597 }
587 598
588 599
@@ -755,10 +766,6 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
755 case DSSCMD_IWRITE64_CX: 766 case DSSCMD_IWRITE64_CX:
756 case DSSCMD_IREAD64_CR: 767 case DSSCMD_IREAD64_CR:
757 case DSSCMD_IREAD64_CX: 768 case DSSCMD_IREAD64_CX:
758 case DSSCMD_INVALIDATE_DEK:
759 case DSSCMD_SET_KEK:
760 case DSSCMD_GET_KEK_ID:
761 case DSSCMD_GEN_XFER:
762 type = LPFC_SOL_IOCB; 769 type = LPFC_SOL_IOCB;
763 break; 770 break;
764 case CMD_ABORT_XRI_CN: 771 case CMD_ABORT_XRI_CN:
@@ -767,6 +774,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
767 case CMD_CLOSE_XRI_CX: 774 case CMD_CLOSE_XRI_CX:
768 case CMD_XRI_ABORTED_CX: 775 case CMD_XRI_ABORTED_CX:
769 case CMD_ABORT_MXRI64_CN: 776 case CMD_ABORT_MXRI64_CN:
777 case CMD_XMIT_BLS_RSP64_CX:
770 type = LPFC_ABORT_IOCB; 778 type = LPFC_ABORT_IOCB;
771 break; 779 break;
772 case CMD_RCV_SEQUENCE_CX: 780 case CMD_RCV_SEQUENCE_CX:
@@ -1373,7 +1381,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1373/* HBQ for ELS and CT traffic. */ 1381/* HBQ for ELS and CT traffic. */
1374static struct lpfc_hbq_init lpfc_els_hbq = { 1382static struct lpfc_hbq_init lpfc_els_hbq = {
1375 .rn = 1, 1383 .rn = 1,
1376 .entry_count = 200, 1384 .entry_count = 256,
1377 .mask_count = 0, 1385 .mask_count = 0,
1378 .profile = 0, 1386 .profile = 0,
1379 .ring_mask = (1 << LPFC_ELS_RING), 1387 .ring_mask = (1 << LPFC_ELS_RING),
@@ -1472,8 +1480,11 @@ err:
1472int 1480int
1473lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1481lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1474{ 1482{
1475 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1483 if (phba->sli_rev == LPFC_SLI_REV4)
1476 lpfc_hbq_defs[qno]->add_count)); 1484 return 0;
1485 else
1486 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1487 lpfc_hbq_defs[qno]->add_count);
1477} 1488}
1478 1489
1479/** 1490/**
@@ -1488,8 +1499,12 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1488static int 1499static int
1489lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1500lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1490{ 1501{
1491 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1502 if (phba->sli_rev == LPFC_SLI_REV4)
1492 lpfc_hbq_defs[qno]->init_count)); 1503 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1504 lpfc_hbq_defs[qno]->entry_count);
1505 else
1506 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1507 lpfc_hbq_defs[qno]->init_count);
1493} 1508}
1494 1509
1495/** 1510/**
@@ -1700,6 +1715,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1700 struct lpfc_dmabuf *mp; 1715 struct lpfc_dmabuf *mp;
1701 uint16_t rpi, vpi; 1716 uint16_t rpi, vpi;
1702 int rc; 1717 int rc;
1718 struct lpfc_vport *vport = pmb->vport;
1703 1719
1704 mp = (struct lpfc_dmabuf *) (pmb->context1); 1720 mp = (struct lpfc_dmabuf *) (pmb->context1);
1705 1721
@@ -1728,6 +1744,18 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1728 return; 1744 return;
1729 } 1745 }
1730 1746
1747 /* Unreg VPI, if the REG_VPI succeed after VLink failure */
1748 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
1749 !(phba->pport->load_flag & FC_UNLOADING) &&
1750 !pmb->u.mb.mbxStatus) {
1751 lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
1752 pmb->vport = vport;
1753 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1754 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1755 if (rc != MBX_NOT_FINISHED)
1756 return;
1757 }
1758
1731 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 1759 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1732 lpfc_sli4_mbox_cmd_free(phba, pmb); 1760 lpfc_sli4_mbox_cmd_free(phba, pmb);
1733 else 1761 else
@@ -1794,7 +1822,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1794 */ 1822 */
1795 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 1823 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
1796 MBX_SHUTDOWN) { 1824 MBX_SHUTDOWN) {
1797 /* Unknow mailbox command compl */ 1825 /* Unknown mailbox command compl */
1798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1826 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1799 "(%d):0323 Unknown Mailbox command " 1827 "(%d):0323 Unknown Mailbox command "
1800 "x%x (x%x) Cmpl\n", 1828 "x%x (x%x) Cmpl\n",
@@ -2068,8 +2096,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2068 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2096 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2069 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2097 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2070 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2098 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2071 Rctl = FC_ELS_REQ; 2099 Rctl = FC_RCTL_ELS_REQ;
2072 Type = FC_ELS_DATA; 2100 Type = FC_TYPE_ELS;
2073 } else { 2101 } else {
2074 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2102 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2075 Rctl = w5p->hcsw.Rctl; 2103 Rctl = w5p->hcsw.Rctl;
@@ -2079,8 +2107,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2079 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2107 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2080 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2108 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2081 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2109 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2082 Rctl = FC_ELS_REQ; 2110 Rctl = FC_RCTL_ELS_REQ;
2083 Type = FC_ELS_DATA; 2111 Type = FC_TYPE_ELS;
2084 w5p->hcsw.Rctl = Rctl; 2112 w5p->hcsw.Rctl = Rctl;
2085 w5p->hcsw.Type = Type; 2113 w5p->hcsw.Type = Type;
2086 } 2114 }
@@ -2211,9 +2239,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2211 * All other are passed to the completion callback. 2239 * All other are passed to the completion callback.
2212 */ 2240 */
2213 if (pring->ringno == LPFC_ELS_RING) { 2241 if (pring->ringno == LPFC_ELS_RING) {
2214 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) { 2242 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2243 (cmdiocbp->iocb_flag &
2244 LPFC_DRIVER_ABORTED)) {
2245 spin_lock_irqsave(&phba->hbalock,
2246 iflag);
2215 cmdiocbp->iocb_flag &= 2247 cmdiocbp->iocb_flag &=
2216 ~LPFC_DRIVER_ABORTED; 2248 ~LPFC_DRIVER_ABORTED;
2249 spin_unlock_irqrestore(&phba->hbalock,
2250 iflag);
2217 saveq->iocb.ulpStatus = 2251 saveq->iocb.ulpStatus =
2218 IOSTAT_LOCAL_REJECT; 2252 IOSTAT_LOCAL_REJECT;
2219 saveq->iocb.un.ulpWord[4] = 2253 saveq->iocb.un.ulpWord[4] =
@@ -2223,7 +2257,62 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2223 * of DMAing payload, so don't free data 2257 * of DMAing payload, so don't free data
2224 * buffer till after a hbeat. 2258 * buffer till after a hbeat.
2225 */ 2259 */
2260 spin_lock_irqsave(&phba->hbalock,
2261 iflag);
2226 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2262 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2263 spin_unlock_irqrestore(&phba->hbalock,
2264 iflag);
2265 }
2266 if (phba->sli_rev == LPFC_SLI_REV4) {
2267 if (saveq->iocb_flag &
2268 LPFC_EXCHANGE_BUSY) {
2269 /* Set cmdiocb flag for the
2270 * exchange busy so sgl (xri)
2271 * will not be released until
2272 * the abort xri is received
2273 * from hba.
2274 */
2275 spin_lock_irqsave(
2276 &phba->hbalock, iflag);
2277 cmdiocbp->iocb_flag |=
2278 LPFC_EXCHANGE_BUSY;
2279 spin_unlock_irqrestore(
2280 &phba->hbalock, iflag);
2281 }
2282 if (cmdiocbp->iocb_flag &
2283 LPFC_DRIVER_ABORTED) {
2284 /*
2285 * Clear LPFC_DRIVER_ABORTED
2286 * bit in case it was driver
2287 * initiated abort.
2288 */
2289 spin_lock_irqsave(
2290 &phba->hbalock, iflag);
2291 cmdiocbp->iocb_flag &=
2292 ~LPFC_DRIVER_ABORTED;
2293 spin_unlock_irqrestore(
2294 &phba->hbalock, iflag);
2295 cmdiocbp->iocb.ulpStatus =
2296 IOSTAT_LOCAL_REJECT;
2297 cmdiocbp->iocb.un.ulpWord[4] =
2298 IOERR_ABORT_REQUESTED;
2299 /*
2300 * For SLI4, irsiocb contains
2301 * NO_XRI in sli_xritag, it
2302 * shall not affect releasing
2303 * sgl (xri) process.
2304 */
2305 saveq->iocb.ulpStatus =
2306 IOSTAT_LOCAL_REJECT;
2307 saveq->iocb.un.ulpWord[4] =
2308 IOERR_SLI_ABORTED;
2309 spin_lock_irqsave(
2310 &phba->hbalock, iflag);
2311 saveq->iocb_flag |=
2312 LPFC_DELAY_MEM_FREE;
2313 spin_unlock_irqrestore(
2314 &phba->hbalock, iflag);
2315 }
2227 } 2316 }
2228 } 2317 }
2229 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2318 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -2324,168 +2413,6 @@ void lpfc_poll_eratt(unsigned long ptr)
2324 return; 2413 return;
2325} 2414}
2326 2415
2327/**
2328 * lpfc_sli_poll_fcp_ring - Handle FCP ring completion in polling mode
2329 * @phba: Pointer to HBA context object.
2330 *
2331 * This function is called from lpfc_queuecommand, lpfc_poll_timeout,
2332 * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING
2333 * is enabled.
2334 *
2335 * The caller does not hold any lock.
2336 * The function processes each response iocb in the response ring until it
2337 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2338 * LE bit set. The function will call the completion handler of the command iocb
2339 * if the response iocb indicates a completion for a command iocb or it is
2340 * an abort completion.
2341 **/
2342void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
2343{
2344 struct lpfc_sli *psli = &phba->sli;
2345 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
2346 IOCB_t *irsp = NULL;
2347 IOCB_t *entry = NULL;
2348 struct lpfc_iocbq *cmdiocbq = NULL;
2349 struct lpfc_iocbq rspiocbq;
2350 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2351 uint32_t status;
2352 uint32_t portRspPut, portRspMax;
2353 int type;
2354 uint32_t rsp_cmpl = 0;
2355 uint32_t ha_copy;
2356 unsigned long iflags;
2357
2358 pring->stats.iocb_event++;
2359
2360 /*
2361 * The next available response entry should never exceed the maximum
2362 * entries. If it does, treat it as an adapter hardware error.
2363 */
2364 portRspMax = pring->numRiocb;
2365 portRspPut = le32_to_cpu(pgp->rspPutInx);
2366 if (unlikely(portRspPut >= portRspMax)) {
2367 lpfc_sli_rsp_pointers_error(phba, pring);
2368 return;
2369 }
2370
2371 rmb();
2372 while (pring->rspidx != portRspPut) {
2373 entry = lpfc_resp_iocb(phba, pring);
2374 if (++pring->rspidx >= portRspMax)
2375 pring->rspidx = 0;
2376
2377 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2378 (uint32_t *) &rspiocbq.iocb,
2379 phba->iocb_rsp_size);
2380 irsp = &rspiocbq.iocb;
2381 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2382 pring->stats.iocb_rsp++;
2383 rsp_cmpl++;
2384
2385 if (unlikely(irsp->ulpStatus)) {
2386 /* Rsp ring <ringno> error: IOCB */
2387 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2388 "0326 Rsp Ring %d error: IOCB Data: "
2389 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2390 pring->ringno,
2391 irsp->un.ulpWord[0],
2392 irsp->un.ulpWord[1],
2393 irsp->un.ulpWord[2],
2394 irsp->un.ulpWord[3],
2395 irsp->un.ulpWord[4],
2396 irsp->un.ulpWord[5],
2397 *(uint32_t *)&irsp->un1,
2398 *((uint32_t *)&irsp->un1 + 1));
2399 }
2400
2401 switch (type) {
2402 case LPFC_ABORT_IOCB:
2403 case LPFC_SOL_IOCB:
2404 /*
2405 * Idle exchange closed via ABTS from port. No iocb
2406 * resources need to be recovered.
2407 */
2408 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2409 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2410 "0314 IOCB cmd 0x%x "
2411 "processed. Skipping "
2412 "completion",
2413 irsp->ulpCommand);
2414 break;
2415 }
2416
2417 spin_lock_irqsave(&phba->hbalock, iflags);
2418 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2419 &rspiocbq);
2420 spin_unlock_irqrestore(&phba->hbalock, iflags);
2421 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2422 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2423 &rspiocbq);
2424 }
2425 break;
2426 default:
2427 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2428 char adaptermsg[LPFC_MAX_ADPTMSG];
2429 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2430 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2431 MAX_MSG_DATA);
2432 dev_warn(&((phba->pcidev)->dev),
2433 "lpfc%d: %s\n",
2434 phba->brd_no, adaptermsg);
2435 } else {
2436 /* Unknown IOCB command */
2437 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2438 "0321 Unknown IOCB command "
2439 "Data: x%x, x%x x%x x%x x%x\n",
2440 type, irsp->ulpCommand,
2441 irsp->ulpStatus,
2442 irsp->ulpIoTag,
2443 irsp->ulpContext);
2444 }
2445 break;
2446 }
2447
2448 /*
2449 * The response IOCB has been processed. Update the ring
2450 * pointer in SLIM. If the port response put pointer has not
2451 * been updated, sync the pgp->rspPutInx and fetch the new port
2452 * response put pointer.
2453 */
2454 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2455
2456 if (pring->rspidx == portRspPut)
2457 portRspPut = le32_to_cpu(pgp->rspPutInx);
2458 }
2459
2460 ha_copy = readl(phba->HAregaddr);
2461 ha_copy >>= (LPFC_FCP_RING * 4);
2462
2463 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
2464 spin_lock_irqsave(&phba->hbalock, iflags);
2465 pring->stats.iocb_rsp_full++;
2466 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
2467 writel(status, phba->CAregaddr);
2468 readl(phba->CAregaddr);
2469 spin_unlock_irqrestore(&phba->hbalock, iflags);
2470 }
2471 if ((ha_copy & HA_R0CE_RSP) &&
2472 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2473 spin_lock_irqsave(&phba->hbalock, iflags);
2474 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2475 pring->stats.iocb_cmd_empty++;
2476
2477 /* Force update of the local copy of cmdGetInx */
2478 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2479 lpfc_sli_resume_iocb(phba, pring);
2480
2481 if ((pring->lpfc_sli_cmd_available))
2482 (pring->lpfc_sli_cmd_available) (phba, pring);
2483
2484 spin_unlock_irqrestore(&phba->hbalock, iflags);
2485 }
2486
2487 return;
2488}
2489 2416
2490/** 2417/**
2491 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2418 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
@@ -2502,9 +2429,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
2502 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2429 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2503 * function if this is an unsolicited iocb. 2430 * function if this is an unsolicited iocb.
2504 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2431 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2505 * to check it explicitly. This function always returns 1. 2432 * to check it explicitly.
2506 **/ 2433 */
2507static int 2434int
2508lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2435lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2509 struct lpfc_sli_ring *pring, uint32_t mask) 2436 struct lpfc_sli_ring *pring, uint32_t mask)
2510{ 2437{
@@ -2534,6 +2461,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2534 spin_unlock_irqrestore(&phba->hbalock, iflag); 2461 spin_unlock_irqrestore(&phba->hbalock, iflag);
2535 return 1; 2462 return 1;
2536 } 2463 }
2464 if (phba->fcp_ring_in_use) {
2465 spin_unlock_irqrestore(&phba->hbalock, iflag);
2466 return 1;
2467 } else
2468 phba->fcp_ring_in_use = 1;
2537 2469
2538 rmb(); 2470 rmb();
2539 while (pring->rspidx != portRspPut) { 2471 while (pring->rspidx != portRspPut) {
@@ -2603,18 +2535,15 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2603 2535
2604 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2536 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2605 &rspiocbq); 2537 &rspiocbq);
2606 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 2538 if (unlikely(!cmdiocbq))
2607 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2539 break;
2608 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2540 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2609 &rspiocbq); 2541 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2610 } else { 2542 if (cmdiocbq->iocb_cmpl) {
2611 spin_unlock_irqrestore(&phba->hbalock, 2543 spin_unlock_irqrestore(&phba->hbalock, iflag);
2612 iflag); 2544 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2613 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2545 &rspiocbq);
2614 &rspiocbq); 2546 spin_lock_irqsave(&phba->hbalock, iflag);
2615 spin_lock_irqsave(&phba->hbalock,
2616 iflag);
2617 }
2618 } 2547 }
2619 break; 2548 break;
2620 case LPFC_UNSOL_IOCB: 2549 case LPFC_UNSOL_IOCB:
@@ -2675,6 +2604,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2675 2604
2676 } 2605 }
2677 2606
2607 phba->fcp_ring_in_use = 0;
2678 spin_unlock_irqrestore(&phba->hbalock, iflag); 2608 spin_unlock_irqrestore(&phba->hbalock, iflag);
2679 return rc; 2609 return rc;
2680} 2610}
@@ -3018,16 +2948,39 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask) 2948 struct lpfc_sli_ring *pring, uint32_t mask)
3019{ 2949{
3020 struct lpfc_iocbq *irspiocbq; 2950 struct lpfc_iocbq *irspiocbq;
2951 struct hbq_dmabuf *dmabuf;
2952 struct lpfc_cq_event *cq_event;
3021 unsigned long iflag; 2953 unsigned long iflag;
3022 2954
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { 2955 spin_lock_irqsave(&phba->hbalock, iflag);
2956 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
2957 spin_unlock_irqrestore(&phba->hbalock, iflag);
2958 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3024 /* Get the response iocb from the head of work queue */ 2959 /* Get the response iocb from the head of work queue */
3025 spin_lock_irqsave(&phba->hbalock, iflag); 2960 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, 2961 list_remove_head(&phba->sli4_hba.sp_queue_event,
3027 irspiocbq, struct lpfc_iocbq, list); 2962 cq_event, struct lpfc_cq_event, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag); 2963 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029 /* Process the response iocb */ 2964
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); 2965 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
2966 case CQE_CODE_COMPL_WQE:
2967 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
2968 cq_event);
2969 /* Translate ELS WCQE to response IOCBQ */
2970 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
2971 irspiocbq);
2972 if (irspiocbq)
2973 lpfc_sli_sp_handle_rspiocb(phba, pring,
2974 irspiocbq);
2975 break;
2976 case CQE_CODE_RECEIVE:
2977 dmabuf = container_of(cq_event, struct hbq_dmabuf,
2978 cq_event);
2979 lpfc_sli4_handle_received_buffer(phba, dmabuf);
2980 break;
2981 default:
2982 break;
2983 }
3031 } 2984 }
3032} 2985}
3033 2986
@@ -3160,6 +3113,12 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3160 3113
3161 /* Check to see if any errors occurred during init */ 3114 /* Check to see if any errors occurred during init */
3162 if ((status & HS_FFERM) || (i >= 20)) { 3115 if ((status & HS_FFERM) || (i >= 20)) {
3116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3117 "2751 Adapter failed to restart, "
3118 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3119 status,
3120 readl(phba->MBslimaddr + 0xa8),
3121 readl(phba->MBslimaddr + 0xac));
3163 phba->link_state = LPFC_HBA_ERROR; 3122 phba->link_state = LPFC_HBA_ERROR;
3164 retval = 1; 3123 retval = 1;
3165 } 3124 }
@@ -3347,6 +3306,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
3347 if (retval != MBX_SUCCESS) { 3306 if (retval != MBX_SUCCESS) {
3348 if (retval != MBX_BUSY) 3307 if (retval != MBX_BUSY)
3349 mempool_free(pmb, phba->mbox_mem_pool); 3308 mempool_free(pmb, phba->mbox_mem_pool);
3309 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3310 "2752 KILL_BOARD command failed retval %d\n",
3311 retval);
3350 spin_lock_irq(&phba->hbalock); 3312 spin_lock_irq(&phba->hbalock);
3351 phba->link_flag &= ~LS_IGNORE_ERATT; 3313 phba->link_flag &= ~LS_IGNORE_ERATT;
3352 spin_unlock_irq(&phba->hbalock); 3314 spin_unlock_irq(&phba->hbalock);
@@ -3416,6 +3378,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
3416 3378
3417 /* perform board reset */ 3379 /* perform board reset */
3418 phba->fc_eventTag = 0; 3380 phba->fc_eventTag = 0;
3381 phba->link_events = 0;
3419 phba->pport->fc_myDID = 0; 3382 phba->pport->fc_myDID = 0;
3420 phba->pport->fc_prevDID = 0; 3383 phba->pport->fc_prevDID = 0;
3421 3384
@@ -3476,6 +3439,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3476 3439
3477 /* perform board reset */ 3440 /* perform board reset */
3478 phba->fc_eventTag = 0; 3441 phba->fc_eventTag = 0;
3442 phba->link_events = 0;
3479 phba->pport->fc_myDID = 0; 3443 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0; 3444 phba->pport->fc_prevDID = 0;
3481 3445
@@ -3495,7 +3459,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3495 list_del_init(&phba->sli4_hba.dat_rq->list); 3459 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list); 3460 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list); 3461 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3462 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3463 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) 3464 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
@@ -3531,9 +3494,13 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3531 struct lpfc_sli *psli; 3494 struct lpfc_sli *psli;
3532 volatile uint32_t word0; 3495 volatile uint32_t word0;
3533 void __iomem *to_slim; 3496 void __iomem *to_slim;
3497 uint32_t hba_aer_enabled;
3534 3498
3535 spin_lock_irq(&phba->hbalock); 3499 spin_lock_irq(&phba->hbalock);
3536 3500
3501 /* Take PCIe device Advanced Error Reporting (AER) state */
3502 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3503
3537 psli = &phba->sli; 3504 psli = &phba->sli;
3538 3505
3539 /* Restart HBA */ 3506 /* Restart HBA */
@@ -3573,6 +3540,10 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3573 /* Give the INITFF and Post time to settle. */ 3540 /* Give the INITFF and Post time to settle. */
3574 mdelay(100); 3541 mdelay(100);
3575 3542
3543 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3544 if (hba_aer_enabled)
3545 pci_disable_pcie_error_reporting(phba->pcidev);
3546
3576 lpfc_hba_down_post(phba); 3547 lpfc_hba_down_post(phba);
3577 3548
3578 return 0; 3549 return 0;
@@ -4042,6 +4013,24 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4042 if (rc) 4013 if (rc)
4043 goto lpfc_sli_hba_setup_error; 4014 goto lpfc_sli_hba_setup_error;
4044 4015
4016 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4017 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4018 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4019 if (!rc) {
4020 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4021 "2709 This device supports "
4022 "Advanced Error Reporting (AER)\n");
4023 spin_lock_irq(&phba->hbalock);
4024 phba->hba_flag |= HBA_AER_ENABLED;
4025 spin_unlock_irq(&phba->hbalock);
4026 } else {
4027 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4028 "2708 This device does not support "
4029 "Advanced Error Reporting (AER)\n");
4030 phba->cfg_aer_support = 0;
4031 }
4032 }
4033
4045 if (phba->sli_rev == 3) { 4034 if (phba->sli_rev == 3) {
4046 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4035 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4047 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4036 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
@@ -4077,7 +4066,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4077 4066
4078lpfc_sli_hba_setup_error: 4067lpfc_sli_hba_setup_error:
4079 phba->link_state = LPFC_HBA_ERROR; 4068 phba->link_state = LPFC_HBA_ERROR;
4080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4081 "0445 Firmware initialization failed\n"); 4070 "0445 Firmware initialization failed\n");
4082 return rc; 4071 return rc;
4083} 4072}
@@ -4163,7 +4152,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4163 * addition, this routine gets the port vpd data. 4152 * addition, this routine gets the port vpd data.
4164 * 4153 *
4165 * Return codes 4154 * Return codes
4166 * 0 - sucessful 4155 * 0 - successful
4167 * ENOMEM - could not allocated memory. 4156 * ENOMEM - could not allocated memory.
4168 **/ 4157 **/
4169static int 4158static int
@@ -4211,6 +4200,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4211 if (rc) { 4200 if (rc) {
4212 dma_free_coherent(&phba->pcidev->dev, dma_size, 4201 dma_free_coherent(&phba->pcidev->dev, dma_size,
4213 dmabuf->virt, dmabuf->phys); 4202 dmabuf->virt, dmabuf->phys);
4203 kfree(dmabuf);
4214 return -EIO; 4204 return -EIO;
4215 } 4205 }
4216 4206
@@ -4243,7 +4233,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4243 4233
4244 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4234 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4245 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4235 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4246 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4247 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4236 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4248 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4237 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4249 LPFC_QUEUE_REARM); 4238 LPFC_QUEUE_REARM);
@@ -4322,6 +4311,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4322 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 4311 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4323 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 4312 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4324 phba->hba_flag |= HBA_FCOE_SUPPORT; 4313 phba->hba_flag |= HBA_FCOE_SUPPORT;
4314
4315 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
4316 LPFC_DCBX_CEE_MODE)
4317 phba->hba_flag |= HBA_FIP_SUPPORT;
4318 else
4319 phba->hba_flag &= ~HBA_FIP_SUPPORT;
4320
4325 if (phba->sli_rev != LPFC_SLI_REV4 || 4321 if (phba->sli_rev != LPFC_SLI_REV4 ||
4326 !(phba->hba_flag & HBA_FCOE_SUPPORT)) { 4322 !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
4327 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4323 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -4423,7 +4419,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4423 spin_unlock_irq(&phba->hbalock); 4419 spin_unlock_irq(&phba->hbalock);
4424 4420
4425 /* Read the port's service parameters. */ 4421 /* Read the port's service parameters. */
4426 lpfc_read_sparam(phba, mboxq, vport->vpi); 4422 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
4423 if (rc) {
4424 phba->link_state = LPFC_HBA_ERROR;
4425 rc = -ENOMEM;
4426 goto out_free_vpd;
4427 }
4428
4427 mboxq->vport = vport; 4429 mboxq->vport = vport;
4428 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4430 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4429 mp = (struct lpfc_dmabuf *) mboxq->context1; 4431 mp = (struct lpfc_dmabuf *) mboxq->context1;
@@ -4468,7 +4470,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4468 rc = lpfc_sli4_post_sgl_list(phba); 4470 rc = lpfc_sli4_post_sgl_list(phba);
4469 if (unlikely(rc)) { 4471 if (unlikely(rc)) {
4470 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4472 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4471 "0582 Error %d during sgl post operation", rc); 4473 "0582 Error %d during sgl post operation\n",
4474 rc);
4472 rc = -ENODEV; 4475 rc = -ENODEV;
4473 goto out_free_vpd; 4476 goto out_free_vpd;
4474 } 4477 }
@@ -4477,8 +4480,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4477 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 4480 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4478 if (unlikely(rc)) { 4481 if (unlikely(rc)) {
4479 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4482 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4480 "0383 Error %d during scsi sgl post opeation", 4483 "0383 Error %d during scsi sgl post "
4481 rc); 4484 "operation\n", rc);
4482 /* Some Scsi buffers were moved to the abort scsi list */ 4485 /* Some Scsi buffers were moved to the abort scsi list */
4483 /* A pci function reset will repost them */ 4486 /* A pci function reset will repost them */
4484 rc = -ENODEV; 4487 rc = -ENODEV;
@@ -4494,10 +4497,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4494 rc = -ENODEV; 4497 rc = -ENODEV;
4495 goto out_free_vpd; 4498 goto out_free_vpd;
4496 } 4499 }
4497 if (phba->cfg_enable_fip)
4498 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
4499 else
4500 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4501 4500
4502 /* Set up all the queues to the device */ 4501 /* Set up all the queues to the device */
4503 rc = lpfc_sli4_queue_setup(phba); 4502 rc = lpfc_sli4_queue_setup(phba);
@@ -4521,6 +4520,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4521 /* Post receive buffers to the device */ 4520 /* Post receive buffers to the device */
4522 lpfc_sli4_rb_setup(phba); 4521 lpfc_sli4_rb_setup(phba);
4523 4522
4523 /* Reset HBA FCF states after HBA reset */
4524 phba->fcf.fcf_flag = 0;
4525 phba->fcf.current_rec.flag = 0;
4526
4524 /* Start the ELS watchdog timer */ 4527 /* Start the ELS watchdog timer */
4525 mod_timer(&vport->els_tmofunc, 4528 mod_timer(&vport->els_tmofunc,
4526 jiffies + HZ * (phba->fc_ratov * 2)); 4529 jiffies + HZ * (phba->fc_ratov * 2));
@@ -5669,7 +5672,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
5669 case CMD_GEN_REQUEST64_CX: 5672 case CMD_GEN_REQUEST64_CX:
5670 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 5673 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
5671 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 5674 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
5672 FC_FCP_CMND) || 5675 FC_RCTL_DD_UNSOL_CMD) ||
5673 (piocb->iocb.un.genreq64.w5.hcsw.Type != 5676 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
5674 MENLO_TRANSPORT_TYPE)) 5677 MENLO_TRANSPORT_TYPE))
5675 5678
@@ -5777,19 +5780,19 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5777 5780
5778 for (i = 0; i < numBdes; i++) { 5781 for (i = 0; i < numBdes; i++) {
5779 /* Should already be byte swapped. */ 5782 /* Should already be byte swapped. */
5780 sgl->addr_hi = bpl->addrHigh; 5783 sgl->addr_hi = bpl->addrHigh;
5781 sgl->addr_lo = bpl->addrLow; 5784 sgl->addr_lo = bpl->addrLow;
5782 /* swap the size field back to the cpu so we 5785
5783 * can assign it to the sgl.
5784 */
5785 bde.tus.w = le32_to_cpu(bpl->tus.w);
5786 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5787 if ((i+1) == numBdes) 5786 if ((i+1) == numBdes)
5788 bf_set(lpfc_sli4_sge_last, sgl, 1); 5787 bf_set(lpfc_sli4_sge_last, sgl, 1);
5789 else 5788 else
5790 bf_set(lpfc_sli4_sge_last, sgl, 0); 5789 bf_set(lpfc_sli4_sge_last, sgl, 0);
5791 sgl->word2 = cpu_to_le32(sgl->word2); 5790 sgl->word2 = cpu_to_le32(sgl->word2);
5792 sgl->word3 = cpu_to_le32(sgl->word3); 5791 /* swap the size field back to the cpu so we
5792 * can assign it to the sgl.
5793 */
5794 bde.tus.w = le32_to_cpu(bpl->tus.w);
5795 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
5793 bpl++; 5796 bpl++;
5794 sgl++; 5797 sgl++;
5795 } 5798 }
@@ -5802,11 +5805,10 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5802 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 5805 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5803 sgl->addr_lo = 5806 sgl->addr_lo =
5804 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 5807 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5805 bf_set(lpfc_sli4_sge_len, sgl,
5806 icmd->un.genreq64.bdl.bdeSize);
5807 bf_set(lpfc_sli4_sge_last, sgl, 1); 5808 bf_set(lpfc_sli4_sge_last, sgl, 1);
5808 sgl->word2 = cpu_to_le32(sgl->word2); 5809 sgl->word2 = cpu_to_le32(sgl->word2);
5809 sgl->word3 = cpu_to_le32(sgl->word3); 5810 sgl->sge_len =
5811 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
5810 } 5812 }
5811 return sglq->sli4_xritag; 5813 return sglq->sli4_xritag;
5812} 5814}
@@ -5849,7 +5851,7 @@ static int
5849lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 5851lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5850 union lpfc_wqe *wqe) 5852 union lpfc_wqe *wqe)
5851{ 5853{
5852 uint32_t payload_len = 0; 5854 uint32_t xmit_len = 0, total_len = 0;
5853 uint8_t ct = 0; 5855 uint8_t ct = 0;
5854 uint32_t fip; 5856 uint32_t fip;
5855 uint32_t abort_tag; 5857 uint32_t abort_tag;
@@ -5857,12 +5859,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5857 uint8_t cmnd; 5859 uint8_t cmnd;
5858 uint16_t xritag; 5860 uint16_t xritag;
5859 struct ulp_bde64 *bpl = NULL; 5861 struct ulp_bde64 *bpl = NULL;
5862 uint32_t els_id = ELS_ID_DEFAULT;
5863 int numBdes, i;
5864 struct ulp_bde64 bde;
5860 5865
5861 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); 5866 fip = phba->hba_flag & HBA_FIP_SUPPORT;
5862 /* The fcp commands will set command type */ 5867 /* The fcp commands will set command type */
5863 if (iocbq->iocb_flag & LPFC_IO_FCP) 5868 if (iocbq->iocb_flag & LPFC_IO_FCP)
5864 command_type = FCP_COMMAND; 5869 command_type = FCP_COMMAND;
5865 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS)) 5870 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
5866 command_type = ELS_COMMAND_FIP; 5871 command_type = ELS_COMMAND_FIP;
5867 else 5872 else
5868 command_type = ELS_COMMAND_NON_FIP; 5873 command_type = ELS_COMMAND_NON_FIP;
@@ -5874,6 +5879,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5874 wqe->words[7] = 0; /* The ct field has moved so reset */ 5879 wqe->words[7] = 0; /* The ct field has moved so reset */
5875 /* words0-2 bpl convert bde */ 5880 /* words0-2 bpl convert bde */
5876 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 5881 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5882 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
5883 sizeof(struct ulp_bde64);
5877 bpl = (struct ulp_bde64 *) 5884 bpl = (struct ulp_bde64 *)
5878 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 5885 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5879 if (!bpl) 5886 if (!bpl)
@@ -5886,9 +5893,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5886 * can assign it to the sgl. 5893 * can assign it to the sgl.
5887 */ 5894 */
5888 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 5895 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5889 payload_len = wqe->generic.bde.tus.f.bdeSize; 5896 xmit_len = wqe->generic.bde.tus.f.bdeSize;
5897 total_len = 0;
5898 for (i = 0; i < numBdes; i++) {
5899 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
5900 total_len += bde.tus.f.bdeSize;
5901 }
5890 } else 5902 } else
5891 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 5903 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5892 5904
5893 iocbq->iocb.ulpIoTag = iocbq->iotag; 5905 iocbq->iocb.ulpIoTag = iocbq->iotag;
5894 cmnd = iocbq->iocb.ulpCommand; 5906 cmnd = iocbq->iocb.ulpCommand;
@@ -5902,7 +5914,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5902 iocbq->iocb.ulpCommand); 5914 iocbq->iocb.ulpCommand);
5903 return IOCB_ERROR; 5915 return IOCB_ERROR;
5904 } 5916 }
5905 wqe->els_req.payload_len = payload_len; 5917 wqe->els_req.payload_len = xmit_len;
5906 /* Els_reguest64 has a TMO */ 5918 /* Els_reguest64 has a TMO */
5907 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 5919 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5908 iocbq->iocb.ulpTimeout); 5920 iocbq->iocb.ulpTimeout);
@@ -5923,7 +5935,22 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5923 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); 5935 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5924 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 5936 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5925 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 5937 /* CCP CCPE PV PRI in word10 were set in the memcpy */
5938
5939 if (command_type == ELS_COMMAND_FIP) {
5940 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
5941 >> LPFC_FIP_ELS_ID_SHIFT);
5942 }
5943 bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id);
5944
5926 break; 5945 break;
5946 case CMD_XMIT_SEQUENCE64_CX:
5947 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5948 iocbq->iocb.un.ulpWord[3]);
5949 wqe->generic.word3 = 0;
5950 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5951 /* The entire sequence is transmitted for this IOCB */
5952 xmit_len = total_len;
5953 cmnd = CMD_XMIT_SEQUENCE64_CR;
5927 case CMD_XMIT_SEQUENCE64_CR: 5954 case CMD_XMIT_SEQUENCE64_CR:
5928 /* word3 iocb=io_tag32 wqe=payload_offset */ 5955 /* word3 iocb=io_tag32 wqe=payload_offset */
5929 /* payload offset used for multilpe outstanding 5956 /* payload offset used for multilpe outstanding
@@ -5933,7 +5960,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5933 /* word4 relative_offset memcpy */ 5960 /* word4 relative_offset memcpy */
5934 /* word5 r_ctl/df_ctl memcpy */ 5961 /* word5 r_ctl/df_ctl memcpy */
5935 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 5962 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5936 wqe->xmit_sequence.xmit_len = payload_len; 5963 wqe->xmit_sequence.xmit_len = xmit_len;
5964 command_type = OTHER_COMMAND;
5937 break; 5965 break;
5938 case CMD_XMIT_BCAST64_CN: 5966 case CMD_XMIT_BCAST64_CN:
5939 /* word3 iocb=iotag32 wqe=payload_len */ 5967 /* word3 iocb=iotag32 wqe=payload_len */
@@ -5962,7 +5990,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5962 case CMD_FCP_IREAD64_CR: 5990 case CMD_FCP_IREAD64_CR:
5963 /* FCP_CMD is always the 1st sgl entry */ 5991 /* FCP_CMD is always the 1st sgl entry */
5964 wqe->fcp_iread.payload_len = 5992 wqe->fcp_iread.payload_len =
5965 payload_len + sizeof(struct fcp_rsp); 5993 xmit_len + sizeof(struct fcp_rsp);
5966 5994
5967 /* word 4 (xfer length) should have been set on the memcpy */ 5995 /* word 4 (xfer length) should have been set on the memcpy */
5968 5996
@@ -5999,7 +6027,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5999 * sgl[1] = rsp. 6027 * sgl[1] = rsp.
6000 * 6028 *
6001 */ 6029 */
6002 wqe->gen_req.command_len = payload_len; 6030 wqe->gen_req.command_len = xmit_len;
6003 /* Word4 parameter copied in the memcpy */ 6031 /* Word4 parameter copied in the memcpy */
6004 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ 6032 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
6005 /* word6 context tag copied in memcpy */ 6033 /* word6 context tag copied in memcpy */
@@ -6051,12 +6079,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6051 else 6079 else
6052 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 6080 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
6053 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 6081 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
6054 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6055 wqe->words[5] = 0; 6082 wqe->words[5] = 0;
6056 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 6083 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
6057 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6084 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
6058 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 6085 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6059 wqe->generic.abort_tag = abort_tag;
6060 /* 6086 /*
6061 * The abort handler will send us CMD_ABORT_XRI_CN or 6087 * The abort handler will send us CMD_ABORT_XRI_CN or
6062 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 6088 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
@@ -6066,6 +6092,38 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6066 command_type = OTHER_COMMAND; 6092 command_type = OTHER_COMMAND;
6067 xritag = 0; 6093 xritag = 0;
6068 break; 6094 break;
6095 case CMD_XMIT_BLS_RSP64_CX:
6096 /* As BLS ABTS-ACC WQE is very different from other WQEs,
6097 * we re-construct this WQE here based on information in
6098 * iocbq from scratch.
6099 */
6100 memset(wqe, 0, sizeof(union lpfc_wqe));
6101 /* OX_ID is invariable to who sent ABTS to CT exchange */
6102 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
6103 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc));
6104 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) ==
6105 LPFC_ABTS_UNSOL_INT) {
6106 /* ABTS sent by initiator to CT exchange, the
6107 * RX_ID field will be filled with the newly
6108 * allocated responder XRI.
6109 */
6110 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
6111 iocbq->sli4_xritag);
6112 } else {
6113 /* ABTS sent by responder to CT exchange, the
6114 * RX_ID field will be filled with the responder
6115 * RX_ID from ABTS.
6116 */
6117 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
6118 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc));
6119 }
6120 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
6121 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6122 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6123 iocbq->iocb.ulpContext);
6124 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
6125 command_type = OTHER_COMMAND;
6126 break;
6069 case CMD_XRI_ABORTED_CX: 6127 case CMD_XRI_ABORTED_CX:
6070 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 6128 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
6071 /* words0-2 are all 0's no bde */ 6129 /* words0-2 are all 0's no bde */
@@ -6120,11 +6178,10 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6120 uint16_t xritag; 6178 uint16_t xritag;
6121 union lpfc_wqe wqe; 6179 union lpfc_wqe wqe;
6122 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 6180 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6123 uint32_t fcp_wqidx;
6124 6181
6125 if (piocb->sli4_xritag == NO_XRI) { 6182 if (piocb->sli4_xritag == NO_XRI) {
6126 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 6183 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6127 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6184 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6128 sglq = NULL; 6185 sglq = NULL;
6129 else { 6186 else {
6130 sglq = __lpfc_sli_get_sglq(phba); 6187 sglq = __lpfc_sli_get_sglq(phba);
@@ -6154,9 +6211,18 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6154 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 6211 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6155 return IOCB_ERROR; 6212 return IOCB_ERROR;
6156 6213
6157 if (piocb->iocb_flag & LPFC_IO_FCP) { 6214 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
6158 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 6215 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
6159 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) 6216 /*
6217 * For FCP command IOCB, get a new WQ index to distribute
6218 * WQE across the WQsr. On the other hand, for abort IOCB,
6219 * it carries the same WQ index to the original command
6220 * IOCB.
6221 */
6222 if (piocb->iocb_flag & LPFC_IO_FCP)
6223 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
6224 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
6225 &wqe))
6160 return IOCB_ERROR; 6226 return IOCB_ERROR;
6161 } else { 6227 } else {
6162 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 6228 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
@@ -6449,31 +6515,37 @@ lpfc_sli_setup(struct lpfc_hba *phba)
6449 pring->iotag_max = 4096; 6515 pring->iotag_max = 4096;
6450 pring->lpfc_sli_rcv_async_status = 6516 pring->lpfc_sli_rcv_async_status =
6451 lpfc_sli_async_event_handler; 6517 lpfc_sli_async_event_handler;
6452 pring->num_mask = 4; 6518 pring->num_mask = LPFC_MAX_RING_MASK;
6453 pring->prt[0].profile = 0; /* Mask 0 */ 6519 pring->prt[0].profile = 0; /* Mask 0 */
6454 pring->prt[0].rctl = FC_ELS_REQ; 6520 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
6455 pring->prt[0].type = FC_ELS_DATA; 6521 pring->prt[0].type = FC_TYPE_ELS;
6456 pring->prt[0].lpfc_sli_rcv_unsol_event = 6522 pring->prt[0].lpfc_sli_rcv_unsol_event =
6457 lpfc_els_unsol_event; 6523 lpfc_els_unsol_event;
6458 pring->prt[1].profile = 0; /* Mask 1 */ 6524 pring->prt[1].profile = 0; /* Mask 1 */
6459 pring->prt[1].rctl = FC_ELS_RSP; 6525 pring->prt[1].rctl = FC_RCTL_ELS_REP;
6460 pring->prt[1].type = FC_ELS_DATA; 6526 pring->prt[1].type = FC_TYPE_ELS;
6461 pring->prt[1].lpfc_sli_rcv_unsol_event = 6527 pring->prt[1].lpfc_sli_rcv_unsol_event =
6462 lpfc_els_unsol_event; 6528 lpfc_els_unsol_event;
6463 pring->prt[2].profile = 0; /* Mask 2 */ 6529 pring->prt[2].profile = 0; /* Mask 2 */
6464 /* NameServer Inquiry */ 6530 /* NameServer Inquiry */
6465 pring->prt[2].rctl = FC_UNSOL_CTL; 6531 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
6466 /* NameServer */ 6532 /* NameServer */
6467 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 6533 pring->prt[2].type = FC_TYPE_CT;
6468 pring->prt[2].lpfc_sli_rcv_unsol_event = 6534 pring->prt[2].lpfc_sli_rcv_unsol_event =
6469 lpfc_ct_unsol_event; 6535 lpfc_ct_unsol_event;
6470 pring->prt[3].profile = 0; /* Mask 3 */ 6536 pring->prt[3].profile = 0; /* Mask 3 */
6471 /* NameServer response */ 6537 /* NameServer response */
6472 pring->prt[3].rctl = FC_SOL_CTL; 6538 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
6473 /* NameServer */ 6539 /* NameServer */
6474 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 6540 pring->prt[3].type = FC_TYPE_CT;
6475 pring->prt[3].lpfc_sli_rcv_unsol_event = 6541 pring->prt[3].lpfc_sli_rcv_unsol_event =
6476 lpfc_ct_unsol_event; 6542 lpfc_ct_unsol_event;
6543 /* abort unsolicited sequence */
6544 pring->prt[4].profile = 0; /* Mask 4 */
6545 pring->prt[4].rctl = FC_RCTL_BA_ABTS;
6546 pring->prt[4].type = FC_TYPE_BLS;
6547 pring->prt[4].lpfc_sli_rcv_unsol_event =
6548 lpfc_sli4_ct_abort_unsol_event;
6477 break; 6549 break;
6478 } 6550 }
6479 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 6551 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
@@ -6976,8 +7048,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6976 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 7048 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
6977 7049
6978 spin_lock_irq(&phba->hbalock); 7050 spin_lock_irq(&phba->hbalock);
6979 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 7051 if (phba->sli_rev < LPFC_SLI_REV4) {
6980 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 7052 if (abort_iotag != 0 &&
7053 abort_iotag <= phba->sli.last_iotag)
7054 abort_iocb =
7055 phba->sli.iocbq_lookup[abort_iotag];
7056 } else
7057 /* For sli4 the abort_tag is the XRI,
7058 * so the abort routine puts the iotag of the iocb
7059 * being aborted in the context field of the abort
7060 * IOCB.
7061 */
7062 abort_iocb = phba->sli.iocbq_lookup[abort_context];
6981 7063
6982 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, 7064 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
6983 "0327 Cannot abort els iocb %p " 7065 "0327 Cannot abort els iocb %p "
@@ -6991,9 +7073,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6991 * might have completed already. Do not free it again. 7073 * might have completed already. Do not free it again.
6992 */ 7074 */
6993 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 7075 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
6994 spin_unlock_irq(&phba->hbalock); 7076 if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
6995 lpfc_sli_release_iocbq(phba, cmdiocb); 7077 spin_unlock_irq(&phba->hbalock);
6996 return; 7078 lpfc_sli_release_iocbq(phba, cmdiocb);
7079 return;
7080 }
7081 /* For SLI4 the ulpContext field for abort IOCB
7082 * holds the iotag of the IOCB being aborted so
7083 * the local abort_context needs to be reset to
7084 * match the aborted IOCBs ulpContext.
7085 */
7086 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
7087 abort_context = abort_iocb->iocb.ulpContext;
6997 } 7088 }
6998 /* 7089 /*
6999 * make sure we have the right iocbq before taking it 7090 * make sure we have the right iocbq before taking it
@@ -7003,7 +7094,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7003 abort_iocb->iocb.ulpContext != abort_context || 7094 abort_iocb->iocb.ulpContext != abort_context ||
7004 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 7095 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
7005 spin_unlock_irq(&phba->hbalock); 7096 spin_unlock_irq(&phba->hbalock);
7006 else { 7097 else if (phba->sli_rev < LPFC_SLI_REV4) {
7098 /*
7099 * leave the SLI4 aborted command on the txcmplq
7100 * list and the command complete WCQE's XB bit
7101 * will tell whether the SGL (XRI) can be released
7102 * immediately or to the aborted SGL list for the
7103 * following abort XRI from the HBA.
7104 */
7007 list_del_init(&abort_iocb->list); 7105 list_del_init(&abort_iocb->list);
7008 pring->txcmplq_cnt--; 7106 pring->txcmplq_cnt--;
7009 spin_unlock_irq(&phba->hbalock); 7107 spin_unlock_irq(&phba->hbalock);
@@ -7012,11 +7110,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7012 * payload, so don't free data buffer till after 7110 * payload, so don't free data buffer till after
7013 * a hbeat. 7111 * a hbeat.
7014 */ 7112 */
7113 spin_lock_irq(&phba->hbalock);
7015 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 7114 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
7016
7017 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 7115 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
7116 spin_unlock_irq(&phba->hbalock);
7117
7018 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 7118 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
7019 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; 7119 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
7020 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 7120 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
7021 } 7121 }
7022 } 7122 }
@@ -7105,20 +7205,27 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7105 return 0; 7205 return 0;
7106 7206
7107 /* This signals the response to set the correct status 7207 /* This signals the response to set the correct status
7108 * before calling the completion handler. 7208 * before calling the completion handler
7109 */ 7209 */
7110 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 7210 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
7111 7211
7112 iabt = &abtsiocbp->iocb; 7212 iabt = &abtsiocbp->iocb;
7113 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 7213 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
7114 iabt->un.acxri.abortContextTag = icmd->ulpContext; 7214 iabt->un.acxri.abortContextTag = icmd->ulpContext;
7115 if (phba->sli_rev == LPFC_SLI_REV4) 7215 if (phba->sli_rev == LPFC_SLI_REV4) {
7116 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 7216 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
7217 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
7218 }
7117 else 7219 else
7118 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 7220 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
7119 iabt->ulpLe = 1; 7221 iabt->ulpLe = 1;
7120 iabt->ulpClass = icmd->ulpClass; 7222 iabt->ulpClass = icmd->ulpClass;
7121 7223
7224 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7225 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
7226 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
7227 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
7228
7122 if (phba->link_state >= LPFC_LINK_UP) 7229 if (phba->link_state >= LPFC_LINK_UP)
7123 iabt->ulpCommand = CMD_ABORT_XRI_CN; 7230 iabt->ulpCommand = CMD_ABORT_XRI_CN;
7124 else 7231 else
@@ -7322,6 +7429,11 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
7322 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7429 abtsiocb->iocb.ulpClass = cmd->ulpClass;
7323 abtsiocb->vport = phba->pport; 7430 abtsiocb->vport = phba->pport;
7324 7431
7432 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7433 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
7434 if (iocbq->iocb_flag & LPFC_IO_FCP)
7435 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
7436
7325 if (lpfc_is_link_up(phba)) 7437 if (lpfc_is_link_up(phba))
7326 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 7438 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
7327 else 7439 else
@@ -7365,6 +7477,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7365{ 7477{
7366 wait_queue_head_t *pdone_q; 7478 wait_queue_head_t *pdone_q;
7367 unsigned long iflags; 7479 unsigned long iflags;
7480 struct lpfc_scsi_buf *lpfc_cmd;
7368 7481
7369 spin_lock_irqsave(&phba->hbalock, iflags); 7482 spin_lock_irqsave(&phba->hbalock, iflags);
7370 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 7483 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
@@ -7372,6 +7485,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7372 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 7485 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
7373 &rspiocbq->iocb, sizeof(IOCB_t)); 7486 &rspiocbq->iocb, sizeof(IOCB_t));
7374 7487
7488 /* Set the exchange busy flag for task management commands */
7489 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
7490 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
7491 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
7492 cur_iocbq);
7493 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
7494 }
7495
7375 pdone_q = cmdiocbq->context_un.wait_queue; 7496 pdone_q = cmdiocbq->context_un.wait_queue;
7376 if (pdone_q) 7497 if (pdone_q)
7377 wake_up(pdone_q); 7498 wake_up(pdone_q);
@@ -7687,31 +7808,28 @@ static int
7687lpfc_sli4_eratt_read(struct lpfc_hba *phba) 7808lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7688{ 7809{
7689 uint32_t uerr_sta_hi, uerr_sta_lo; 7810 uint32_t uerr_sta_hi, uerr_sta_lo;
7690 uint32_t onlnreg0, onlnreg1;
7691 7811
7692 /* For now, use the SLI4 device internal unrecoverable error 7812 /* For now, use the SLI4 device internal unrecoverable error
7693 * registers for error attention. This can be changed later. 7813 * registers for error attention. This can be changed later.
7694 */ 7814 */
7695 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); 7815 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7696 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); 7816 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7697 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { 7817 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
7698 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); 7818 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
7699 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); 7819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7700 if (uerr_sta_lo || uerr_sta_hi) { 7820 "1423 HBA Unrecoverable error: "
7701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7821 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7702 "1423 HBA Unrecoverable error: " 7822 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
7703 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 7823 uerr_sta_lo, uerr_sta_hi,
7704 "online0_reg=0x%x, online1_reg=0x%x\n", 7824 phba->sli4_hba.ue_mask_lo,
7705 uerr_sta_lo, uerr_sta_hi, 7825 phba->sli4_hba.ue_mask_hi);
7706 onlnreg0, onlnreg1); 7826 phba->work_status[0] = uerr_sta_lo;
7707 phba->work_status[0] = uerr_sta_lo; 7827 phba->work_status[1] = uerr_sta_hi;
7708 phba->work_status[1] = uerr_sta_hi; 7828 /* Set the driver HA work bitmap */
7709 /* Set the driver HA work bitmap */ 7829 phba->work_ha |= HA_ERATT;
7710 phba->work_ha |= HA_ERATT; 7830 /* Indicate polling handles this ERATT */
7711 /* Indicate polling handles this ERATT */ 7831 phba->hba_flag |= HBA_ERATT_HANDLED;
7712 phba->hba_flag |= HBA_ERATT_HANDLED; 7832 return 1;
7713 return 1;
7714 }
7715 } 7833 }
7716 return 0; 7834 return 0;
7717} 7835}
@@ -7834,7 +7952,7 @@ irqreturn_t
7834lpfc_sli_sp_intr_handler(int irq, void *dev_id) 7952lpfc_sli_sp_intr_handler(int irq, void *dev_id)
7835{ 7953{
7836 struct lpfc_hba *phba; 7954 struct lpfc_hba *phba;
7837 uint32_t ha_copy; 7955 uint32_t ha_copy, hc_copy;
7838 uint32_t work_ha_copy; 7956 uint32_t work_ha_copy;
7839 unsigned long status; 7957 unsigned long status;
7840 unsigned long iflag; 7958 unsigned long iflag;
@@ -7892,8 +8010,13 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
7892 } 8010 }
7893 8011
7894 /* Clear up only attention source related to slow-path */ 8012 /* Clear up only attention source related to slow-path */
8013 hc_copy = readl(phba->HCregaddr);
8014 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
8015 HC_LAINT_ENA | HC_ERINT_ENA),
8016 phba->HCregaddr);
7895 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 8017 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
7896 phba->HAregaddr); 8018 phba->HAregaddr);
8019 writel(hc_copy, phba->HCregaddr);
7897 readl(phba->HAregaddr); /* flush */ 8020 readl(phba->HAregaddr); /* flush */
7898 spin_unlock_irqrestore(&phba->hbalock, iflag); 8021 spin_unlock_irqrestore(&phba->hbalock, iflag);
7899 } else 8022 } else
@@ -8049,7 +8172,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8049 KERN_ERR, 8172 KERN_ERR,
8050 LOG_MBOX | LOG_SLI, 8173 LOG_MBOX | LOG_SLI,
8051 "0350 rc should have" 8174 "0350 rc should have"
8052 "been MBX_BUSY"); 8175 "been MBX_BUSY\n");
8053 if (rc != MBX_NOT_FINISHED) 8176 if (rc != MBX_NOT_FINISHED)
8054 goto send_current_mbox; 8177 goto send_current_mbox;
8055 } 8178 }
@@ -8078,7 +8201,7 @@ send_current_mbox:
8078 if (rc != MBX_SUCCESS) 8201 if (rc != MBX_SUCCESS)
8079 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 8202 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8080 LOG_SLI, "0349 rc should be " 8203 LOG_SLI, "0349 rc should be "
8081 "MBX_SUCCESS"); 8204 "MBX_SUCCESS\n");
8082 } 8205 }
8083 8206
8084 spin_lock_irqsave(&phba->hbalock, iflag); 8207 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -8203,6 +8326,7 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
8203 struct lpfc_hba *phba; 8326 struct lpfc_hba *phba;
8204 irqreturn_t sp_irq_rc, fp_irq_rc; 8327 irqreturn_t sp_irq_rc, fp_irq_rc;
8205 unsigned long status1, status2; 8328 unsigned long status1, status2;
8329 uint32_t hc_copy;
8206 8330
8207 /* 8331 /*
8208 * Get the driver's phba structure from the dev_id and 8332 * Get the driver's phba structure from the dev_id and
@@ -8240,7 +8364,12 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
8240 } 8364 }
8241 8365
8242 /* Clear attention sources except link and error attentions */ 8366 /* Clear attention sources except link and error attentions */
8367 hc_copy = readl(phba->HCregaddr);
8368 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
8369 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
8370 phba->HCregaddr);
8243 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 8371 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
8372 writel(hc_copy, phba->HCregaddr);
8244 readl(phba->HAregaddr); /* flush */ 8373 readl(phba->HAregaddr); /* flush */
8245 spin_unlock(&phba->hbalock); 8374 spin_unlock(&phba->hbalock);
8246 8375
@@ -8342,17 +8471,28 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8342 } 8471 }
8343} 8472}
8344 8473
8474/**
8475 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
8476 * @phba: pointer to lpfc hba data structure
8477 * @pIocbIn: pointer to the rspiocbq
8478 * @pIocbOut: pointer to the cmdiocbq
8479 * @wcqe: pointer to the complete wcqe
8480 *
8481 * This routine transfers the fields of a command iocbq to a response iocbq
8482 * by copying all the IOCB fields from command iocbq and transferring the
8483 * completion status information from the complete wcqe.
8484 **/
8345static void 8485static void
8346lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, 8486lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
8487 struct lpfc_iocbq *pIocbIn,
8347 struct lpfc_iocbq *pIocbOut, 8488 struct lpfc_iocbq *pIocbOut,
8348 struct lpfc_wcqe_complete *wcqe) 8489 struct lpfc_wcqe_complete *wcqe)
8349{ 8490{
8491 unsigned long iflags;
8350 size_t offset = offsetof(struct lpfc_iocbq, iocb); 8492 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8351 8493
8352 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 8494 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8353 sizeof(struct lpfc_iocbq) - offset); 8495 sizeof(struct lpfc_iocbq) - offset);
8354 memset(&pIocbIn->sli4_info, 0,
8355 sizeof(struct lpfc_sli4_rspiocb_info));
8356 /* Map WCQE parameters into irspiocb parameters */ 8496 /* Map WCQE parameters into irspiocb parameters */
8357 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); 8497 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8358 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 8498 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
@@ -8362,18 +8502,60 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8362 wcqe->total_data_placed; 8502 wcqe->total_data_placed;
8363 else 8503 else
8364 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8504 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8365 else 8505 else {
8366 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8506 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8367 /* Load in additional WCQE parameters */ 8507 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
8368 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe); 8508 }
8369 pIocbIn->sli4_info.bfield = 0; 8509
8370 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 8510 /* Pick up HBA exchange busy condition */
8371 pIocbIn->sli4_info.bfield |= LPFC_XB; 8511 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
8372 if (bf_get(lpfc_wcqe_c_pv, wcqe)) { 8512 spin_lock_irqsave(&phba->hbalock, iflags);
8373 pIocbIn->sli4_info.bfield |= LPFC_PV; 8513 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
8374 pIocbIn->sli4_info.priority = 8514 spin_unlock_irqrestore(&phba->hbalock, iflags);
8375 bf_get(lpfc_wcqe_c_priority, wcqe); 8515 }
8516}
8517
8518/**
8519 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
8520 * @phba: Pointer to HBA context object.
8521 * @wcqe: Pointer to work-queue completion queue entry.
8522 *
8523 * This routine handles an ELS work-queue completion event and construct
8524 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
8525 * discovery engine to handle.
8526 *
8527 * Return: Pointer to the receive IOCBQ, NULL otherwise.
8528 **/
8529static struct lpfc_iocbq *
8530lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
8531 struct lpfc_iocbq *irspiocbq)
8532{
8533 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8534 struct lpfc_iocbq *cmdiocbq;
8535 struct lpfc_wcqe_complete *wcqe;
8536 unsigned long iflags;
8537
8538 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
8539 spin_lock_irqsave(&phba->hbalock, iflags);
8540 pring->stats.iocb_event++;
8541 /* Look up the ELS command IOCB and create pseudo response IOCB */
8542 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8543 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8544 spin_unlock_irqrestore(&phba->hbalock, iflags);
8545
8546 if (unlikely(!cmdiocbq)) {
8547 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8548 "0386 ELS complete with no corresponding "
8549 "cmdiocb: iotag (%d)\n",
8550 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8551 lpfc_sli_release_iocbq(phba, irspiocbq);
8552 return NULL;
8376 } 8553 }
8554
8555 /* Fake the irspiocbq and copy necessary response information */
8556 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
8557
8558 return irspiocbq;
8377} 8559}
8378 8560
8379/** 8561/**
@@ -8566,45 +8748,26 @@ static bool
8566lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 8748lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8567 struct lpfc_wcqe_complete *wcqe) 8749 struct lpfc_wcqe_complete *wcqe)
8568{ 8750{
8569 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8570 struct lpfc_iocbq *cmdiocbq;
8571 struct lpfc_iocbq *irspiocbq; 8751 struct lpfc_iocbq *irspiocbq;
8572 unsigned long iflags; 8752 unsigned long iflags;
8573 bool workposted = false;
8574 8753
8575 spin_lock_irqsave(&phba->hbalock, iflags); 8754 /* Get an irspiocbq for later ELS response processing use */
8576 pring->stats.iocb_event++;
8577 /* Look up the ELS command IOCB and create pseudo response IOCB */
8578 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8579 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8580 spin_unlock_irqrestore(&phba->hbalock, iflags);
8581
8582 if (unlikely(!cmdiocbq)) {
8583 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8584 "0386 ELS complete with no corresponding "
8585 "cmdiocb: iotag (%d)\n",
8586 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8587 return workposted;
8588 }
8589
8590 /* Fake the irspiocbq and copy necessary response information */
8591 irspiocbq = lpfc_sli_get_iocbq(phba); 8755 irspiocbq = lpfc_sli_get_iocbq(phba);
8592 if (!irspiocbq) { 8756 if (!irspiocbq) {
8593 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8757 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8594 "0387 Failed to allocate an iocbq\n"); 8758 "0387 Failed to allocate an iocbq\n");
8595 return workposted; 8759 return false;
8596 } 8760 }
8597 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8598 8761
8599 /* Add the irspiocb to the response IOCB work list */ 8762 /* Save off the slow-path queue event for work thread to process */
8763 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
8600 spin_lock_irqsave(&phba->hbalock, iflags); 8764 spin_lock_irqsave(&phba->hbalock, iflags);
8601 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue); 8765 list_add_tail(&irspiocbq->cq_event.list,
8602 /* Indicate ELS ring attention */ 8766 &phba->sli4_hba.sp_queue_event);
8603 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); 8767 phba->hba_flag |= HBA_SP_QUEUE_EVT;
8604 spin_unlock_irqrestore(&phba->hbalock, iflags); 8768 spin_unlock_irqrestore(&phba->hbalock, iflags);
8605 workposted = true;
8606 8769
8607 return workposted; 8770 return true;
8608} 8771}
8609 8772
8610/** 8773/**
@@ -8690,52 +8853,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8690} 8853}
8691 8854
8692/** 8855/**
8693 * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
8694 * @phba: Pointer to HBA context object.
8695 * @cq: Pointer to the completion queue.
8696 * @wcqe: Pointer to a completion queue entry.
8697 *
8698 * This routine process a slow-path work-queue completion queue entry.
8699 *
8700 * Return: true if work posted to worker thread, otherwise false.
8701 **/
8702static bool
8703lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8704 struct lpfc_cqe *cqe)
8705{
8706 struct lpfc_wcqe_complete wcqe;
8707 bool workposted = false;
8708
8709 /* Copy the work queue CQE and convert endian order if needed */
8710 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8711
8712 /* Check and process for different type of WCQE and dispatch */
8713 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8714 case CQE_CODE_COMPL_WQE:
8715 /* Process the WQ complete event */
8716 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8717 (struct lpfc_wcqe_complete *)&wcqe);
8718 break;
8719 case CQE_CODE_RELEASE_WQE:
8720 /* Process the WQ release event */
8721 lpfc_sli4_sp_handle_rel_wcqe(phba,
8722 (struct lpfc_wcqe_release *)&wcqe);
8723 break;
8724 case CQE_CODE_XRI_ABORTED:
8725 /* Process the WQ XRI abort event */
8726 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8727 (struct sli4_wcqe_xri_aborted *)&wcqe);
8728 break;
8729 default:
8730 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8731 "0388 Not a valid WCQE code: x%x\n",
8732 bf_get(lpfc_wcqe_c_code, &wcqe));
8733 break;
8734 }
8735 return workposted;
8736}
8737
8738/**
8739 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 8856 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8740 * @phba: Pointer to HBA context object. 8857 * @phba: Pointer to HBA context object.
8741 * @rcqe: Pointer to receive-queue completion queue entry. 8858 * @rcqe: Pointer to receive-queue completion queue entry.
@@ -8745,9 +8862,8 @@ lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8745 * Return: true if work posted to worker thread, otherwise false. 8862 * Return: true if work posted to worker thread, otherwise false.
8746 **/ 8863 **/
8747static bool 8864static bool
8748lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 8865lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
8749{ 8866{
8750 struct lpfc_rcqe rcqe;
8751 bool workposted = false; 8867 bool workposted = false;
8752 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 8868 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8753 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 8869 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
@@ -8755,31 +8871,28 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8755 uint32_t status; 8871 uint32_t status;
8756 unsigned long iflags; 8872 unsigned long iflags;
8757 8873
8758 /* Copy the receive queue CQE and convert endian order if needed */ 8874 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
8759 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8760 lpfc_sli4_rq_release(hrq, drq);
8761 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
8762 goto out;
8763 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
8764 goto out; 8875 goto out;
8765 8876
8766 status = bf_get(lpfc_rcqe_status, &rcqe); 8877 status = bf_get(lpfc_rcqe_status, rcqe);
8767 switch (status) { 8878 switch (status) {
8768 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 8879 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8769 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8880 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8770 "2537 Receive Frame Truncated!!\n"); 8881 "2537 Receive Frame Truncated!!\n");
8771 case FC_STATUS_RQ_SUCCESS: 8882 case FC_STATUS_RQ_SUCCESS:
8883 lpfc_sli4_rq_release(hrq, drq);
8772 spin_lock_irqsave(&phba->hbalock, iflags); 8884 spin_lock_irqsave(&phba->hbalock, iflags);
8773 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 8885 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8774 if (!dma_buf) { 8886 if (!dma_buf) {
8775 spin_unlock_irqrestore(&phba->hbalock, iflags); 8887 spin_unlock_irqrestore(&phba->hbalock, iflags);
8776 goto out; 8888 goto out;
8777 } 8889 }
8778 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe)); 8890 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
8779 /* save off the frame for the word thread to process */ 8891 /* save off the frame for the word thread to process */
8780 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list); 8892 list_add_tail(&dma_buf->cq_event.list,
8893 &phba->sli4_hba.sp_queue_event);
8781 /* Frame received */ 8894 /* Frame received */
8782 phba->hba_flag |= HBA_RECEIVE_BUFFER; 8895 phba->hba_flag |= HBA_SP_QUEUE_EVT;
8783 spin_unlock_irqrestore(&phba->hbalock, iflags); 8896 spin_unlock_irqrestore(&phba->hbalock, iflags);
8784 workposted = true; 8897 workposted = true;
8785 break; 8898 break;
@@ -8794,7 +8907,58 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8794 } 8907 }
8795out: 8908out:
8796 return workposted; 8909 return workposted;
8910}
8911
8912/**
8913 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
8914 * @phba: Pointer to HBA context object.
8915 * @cq: Pointer to the completion queue.
8916 * @wcqe: Pointer to a completion queue entry.
8917 *
8918 * This routine process a slow-path work-queue or recieve queue completion queue
8919 * entry.
8920 *
8921 * Return: true if work posted to worker thread, otherwise false.
8922 **/
8923static bool
8924lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8925 struct lpfc_cqe *cqe)
8926{
8927 struct lpfc_cqe cqevt;
8928 bool workposted = false;
8929
8930 /* Copy the work queue CQE and convert endian order if needed */
8931 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
8797 8932
8933 /* Check and process for different type of WCQE and dispatch */
8934 switch (bf_get(lpfc_cqe_code, &cqevt)) {
8935 case CQE_CODE_COMPL_WQE:
8936 /* Process the WQ/RQ complete event */
8937 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8938 (struct lpfc_wcqe_complete *)&cqevt);
8939 break;
8940 case CQE_CODE_RELEASE_WQE:
8941 /* Process the WQ release event */
8942 lpfc_sli4_sp_handle_rel_wcqe(phba,
8943 (struct lpfc_wcqe_release *)&cqevt);
8944 break;
8945 case CQE_CODE_XRI_ABORTED:
8946 /* Process the WQ XRI abort event */
8947 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8948 (struct sli4_wcqe_xri_aborted *)&cqevt);
8949 break;
8950 case CQE_CODE_RECEIVE:
8951 /* Process the RQ event */
8952 workposted = lpfc_sli4_sp_handle_rcqe(phba,
8953 (struct lpfc_rcqe *)&cqevt);
8954 break;
8955 default:
8956 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8957 "0388 Not a valid WCQE code: x%x\n",
8958 bf_get(lpfc_cqe_code, &cqevt));
8959 break;
8960 }
8961 return workposted;
8798} 8962}
8799 8963
8800/** 8964/**
@@ -8819,8 +8983,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8819 int ecount = 0; 8983 int ecount = 0;
8820 uint16_t cqid; 8984 uint16_t cqid;
8821 8985
8822 if (bf_get(lpfc_eqe_major_code, eqe) != 0 || 8986 if (bf_get(lpfc_eqe_major_code, eqe) != 0) {
8823 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8825 "0359 Not a valid slow-path completion " 8988 "0359 Not a valid slow-path completion "
8826 "event: majorcode=x%x, minorcode=x%x\n", 8989 "event: majorcode=x%x, minorcode=x%x\n",
@@ -8858,14 +9021,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8858 break; 9021 break;
8859 case LPFC_WCQ: 9022 case LPFC_WCQ:
8860 while ((cqe = lpfc_sli4_cq_get(cq))) { 9023 while ((cqe = lpfc_sli4_cq_get(cq))) {
8861 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe); 9024 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
8862 if (!(++ecount % LPFC_GET_QE_REL_INT))
8863 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8864 }
8865 break;
8866 case LPFC_RCQ:
8867 while ((cqe = lpfc_sli4_cq_get(cq))) {
8868 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8869 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9025 if (!(++ecount % LPFC_GET_QE_REL_INT))
8870 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 9026 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8871 } 9027 }
@@ -8953,7 +9109,13 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8953 } 9109 }
8954 9110
8955 /* Fake the irspiocb and copy necessary response information */ 9111 /* Fake the irspiocb and copy necessary response information */
8956 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe); 9112 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
9113
9114 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
9115 spin_lock_irqsave(&phba->hbalock, iflags);
9116 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
9117 spin_unlock_irqrestore(&phba->hbalock, iflags);
9118 }
8957 9119
8958 /* Pass the cmd_iocb and the rsp state to the upper layer */ 9120 /* Pass the cmd_iocb and the rsp state to the upper layer */
8959 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 9121 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
@@ -9059,8 +9221,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9059 uint16_t cqid; 9221 uint16_t cqid;
9060 int ecount = 0; 9222 int ecount = 0;
9061 9223
9062 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) || 9224 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) {
9063 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
9064 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9225 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9065 "0366 Not a valid fast-path completion " 9226 "0366 Not a valid fast-path completion "
9066 "event: majorcode=x%x, minorcode=x%x\n", 9227 "event: majorcode=x%x, minorcode=x%x\n",
@@ -10427,8 +10588,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10427 return xritag; 10588 return xritag;
10428 } 10589 }
10429 spin_unlock_irq(&phba->hbalock); 10590 spin_unlock_irq(&phba->hbalock);
10430 10591 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10431 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10432 "2004 Failed to allocate XRI.last XRITAG is %d" 10592 "2004 Failed to allocate XRI.last XRITAG is %d"
10433 " Max XRI is %d, Used XRI is %d\n", 10593 " Max XRI is %d, Used XRI is %d\n",
10434 phba->sli4_hba.next_xri, 10594 phba->sli4_hba.next_xri,
@@ -10492,15 +10652,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10492 lpfc_sli4_mbox_cmd_free(phba, mbox); 10652 lpfc_sli4_mbox_cmd_free(phba, mbox);
10493 return -ENOMEM; 10653 return -ENOMEM;
10494 } 10654 }
10495
10496 /* Get the first SGE entry from the non-embedded DMA memory */ 10655 /* Get the first SGE entry from the non-embedded DMA memory */
10497 if (unlikely(!mbox->sge_array)) {
10498 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10499 "2525 Failed to get the non-embedded SGE "
10500 "virtual address\n");
10501 lpfc_sli4_mbox_cmd_free(phba, mbox);
10502 return -ENOMEM;
10503 }
10504 viraddr = mbox->sge_array->addr[0]; 10656 viraddr = mbox->sge_array->addr[0];
10505 10657
10506 /* Set up the SGL pages in the non-embedded DMA pages */ 10658 /* Set up the SGL pages in the non-embedded DMA pages */
@@ -10524,8 +10676,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10524 sgl_pg_pairs++; 10676 sgl_pg_pairs++;
10525 } 10677 }
10526 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 10678 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10527 pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs; 10679 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
10528 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10529 /* Perform endian conversion if necessary */ 10680 /* Perform endian conversion if necessary */
10530 sgl->word0 = cpu_to_le32(sgl->word0); 10681 sgl->word0 = cpu_to_le32(sgl->word0);
10531 10682
@@ -10607,15 +10758,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10607 lpfc_sli4_mbox_cmd_free(phba, mbox); 10758 lpfc_sli4_mbox_cmd_free(phba, mbox);
10608 return -ENOMEM; 10759 return -ENOMEM;
10609 } 10760 }
10610
10611 /* Get the first SGE entry from the non-embedded DMA memory */ 10761 /* Get the first SGE entry from the non-embedded DMA memory */
10612 if (unlikely(!mbox->sge_array)) {
10613 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10614 "2565 Failed to get the non-embedded SGE "
10615 "virtual address\n");
10616 lpfc_sli4_mbox_cmd_free(phba, mbox);
10617 return -ENOMEM;
10618 }
10619 viraddr = mbox->sge_array->addr[0]; 10762 viraddr = mbox->sge_array->addr[0];
10620 10763
10621 /* Set up the SGL pages in the non-embedded DMA pages */ 10764 /* Set up the SGL pages in the non-embedded DMA pages */
@@ -10802,6 +10945,105 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10802} 10945}
10803 10946
10804/** 10947/**
10948 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
10949 * @vport: The vport to work on.
10950 *
10951 * This function updates the receive sequence time stamp for this vport. The
10952 * receive sequence time stamp indicates the time that the last frame of the
10953 * the sequence that has been idle for the longest amount of time was received.
10954 * the driver uses this time stamp to indicate if any received sequences have
10955 * timed out.
10956 **/
10957void
10958lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
10959{
10960 struct lpfc_dmabuf *h_buf;
10961 struct hbq_dmabuf *dmabuf = NULL;
10962
10963 /* get the oldest sequence on the rcv list */
10964 h_buf = list_get_first(&vport->rcv_buffer_list,
10965 struct lpfc_dmabuf, list);
10966 if (!h_buf)
10967 return;
10968 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10969 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
10970}
10971
10972/**
10973 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
10974 * @vport: The vport that the received sequences were sent to.
10975 *
10976 * This function cleans up all outstanding received sequences. This is called
10977 * by the driver when a link event or user action invalidates all the received
10978 * sequences.
10979 **/
10980void
10981lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
10982{
10983 struct lpfc_dmabuf *h_buf, *hnext;
10984 struct lpfc_dmabuf *d_buf, *dnext;
10985 struct hbq_dmabuf *dmabuf = NULL;
10986
10987 /* start with the oldest sequence on the rcv list */
10988 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
10989 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10990 list_del_init(&dmabuf->hbuf.list);
10991 list_for_each_entry_safe(d_buf, dnext,
10992 &dmabuf->dbuf.list, list) {
10993 list_del_init(&d_buf->list);
10994 lpfc_in_buf_free(vport->phba, d_buf);
10995 }
10996 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
10997 }
10998}
10999
11000/**
11001 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
11002 * @vport: The vport that the received sequences were sent to.
11003 *
11004 * This function determines whether any received sequences have timed out by
11005 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
11006 * indicates that there is at least one timed out sequence this routine will
11007 * go through the received sequences one at a time from most inactive to most
11008 * active to determine which ones need to be cleaned up. Once it has determined
11009 * that a sequence needs to be cleaned up it will simply free up the resources
11010 * without sending an abort.
11011 **/
11012void
11013lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
11014{
11015 struct lpfc_dmabuf *h_buf, *hnext;
11016 struct lpfc_dmabuf *d_buf, *dnext;
11017 struct hbq_dmabuf *dmabuf = NULL;
11018 unsigned long timeout;
11019 int abort_count = 0;
11020
11021 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
11022 vport->rcv_buffer_time_stamp);
11023 if (list_empty(&vport->rcv_buffer_list) ||
11024 time_before(jiffies, timeout))
11025 return;
11026 /* start with the oldest sequence on the rcv list */
11027 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
11028 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
11029 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
11030 dmabuf->time_stamp);
11031 if (time_before(jiffies, timeout))
11032 break;
11033 abort_count++;
11034 list_del_init(&dmabuf->hbuf.list);
11035 list_for_each_entry_safe(d_buf, dnext,
11036 &dmabuf->dbuf.list, list) {
11037 list_del_init(&d_buf->list);
11038 lpfc_in_buf_free(vport->phba, d_buf);
11039 }
11040 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
11041 }
11042 if (abort_count)
11043 lpfc_update_rcv_time_stamp(vport);
11044}
11045
11046/**
10805 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 11047 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10806 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 11048 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10807 * 11049 *
@@ -10823,6 +11065,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10823 struct hbq_dmabuf *seq_dmabuf = NULL; 11065 struct hbq_dmabuf *seq_dmabuf = NULL;
10824 struct hbq_dmabuf *temp_dmabuf = NULL; 11066 struct hbq_dmabuf *temp_dmabuf = NULL;
10825 11067
11068 INIT_LIST_HEAD(&dmabuf->dbuf.list);
11069 dmabuf->time_stamp = jiffies;
10826 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11070 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10827 /* Use the hdr_buf to find the sequence that this frame belongs to */ 11071 /* Use the hdr_buf to find the sequence that this frame belongs to */
10828 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 11072 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
@@ -10841,13 +11085,27 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10841 * Queue the buffer on the vport's rcv_buffer_list. 11085 * Queue the buffer on the vport's rcv_buffer_list.
10842 */ 11086 */
10843 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 11087 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
11088 lpfc_update_rcv_time_stamp(vport);
10844 return dmabuf; 11089 return dmabuf;
10845 } 11090 }
10846 temp_hdr = seq_dmabuf->hbuf.virt; 11091 temp_hdr = seq_dmabuf->hbuf.virt;
10847 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { 11092 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
10848 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list); 11093 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
11094 list_del_init(&seq_dmabuf->hbuf.list);
11095 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
11096 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
11097 lpfc_update_rcv_time_stamp(vport);
10849 return dmabuf; 11098 return dmabuf;
10850 } 11099 }
11100 /* move this sequence to the tail to indicate a young sequence */
11101 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
11102 seq_dmabuf->time_stamp = jiffies;
11103 lpfc_update_rcv_time_stamp(vport);
11104 if (list_empty(&seq_dmabuf->dbuf.list)) {
11105 temp_hdr = dmabuf->hbuf.virt;
11106 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
11107 return seq_dmabuf;
11108 }
10851 /* find the correct place in the sequence to insert this frame */ 11109 /* find the correct place in the sequence to insert this frame */
10852 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 11110 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10853 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11111 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
@@ -10856,7 +11114,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10856 * If the frame's sequence count is greater than the frame on 11114 * If the frame's sequence count is greater than the frame on
10857 * the list then insert the frame right after this frame 11115 * the list then insert the frame right after this frame
10858 */ 11116 */
10859 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) { 11117 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
11118 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
10860 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 11119 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10861 return seq_dmabuf; 11120 return seq_dmabuf;
10862 } 11121 }
@@ -10865,6 +11124,210 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10865} 11124}
10866 11125
10867/** 11126/**
11127 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
11128 * @vport: pointer to a vitural port
11129 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11130 *
11131 * This function tries to abort from the partially assembed sequence, described
11132 * by the information from basic abbort @dmabuf. It checks to see whether such
11133 * partially assembled sequence held by the driver. If so, it shall free up all
11134 * the frames from the partially assembled sequence.
11135 *
11136 * Return
11137 * true -- if there is matching partially assembled sequence present and all
11138 * the frames freed with the sequence;
11139 * false -- if there is no matching partially assembled sequence present so
11140 * nothing got aborted in the lower layer driver
11141 **/
11142static bool
11143lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
11144 struct hbq_dmabuf *dmabuf)
11145{
11146 struct fc_frame_header *new_hdr;
11147 struct fc_frame_header *temp_hdr;
11148 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
11149 struct hbq_dmabuf *seq_dmabuf = NULL;
11150
11151 /* Use the hdr_buf to find the sequence that matches this frame */
11152 INIT_LIST_HEAD(&dmabuf->dbuf.list);
11153 INIT_LIST_HEAD(&dmabuf->hbuf.list);
11154 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11155 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
11156 temp_hdr = (struct fc_frame_header *)h_buf->virt;
11157 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
11158 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
11159 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
11160 continue;
11161 /* found a pending sequence that matches this frame */
11162 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
11163 break;
11164 }
11165
11166 /* Free up all the frames from the partially assembled sequence */
11167 if (seq_dmabuf) {
11168 list_for_each_entry_safe(d_buf, n_buf,
11169 &seq_dmabuf->dbuf.list, list) {
11170 list_del_init(&d_buf->list);
11171 lpfc_in_buf_free(vport->phba, d_buf);
11172 }
11173 return true;
11174 }
11175 return false;
11176}
11177
11178/**
11179 * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler
11180 * @phba: Pointer to HBA context object.
11181 * @cmd_iocbq: pointer to the command iocbq structure.
11182 * @rsp_iocbq: pointer to the response iocbq structure.
11183 *
11184 * This function handles the sequence abort accept iocb command complete
11185 * event. It properly releases the memory allocated to the sequence abort
11186 * accept iocb.
11187 **/
11188static void
11189lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
11190 struct lpfc_iocbq *cmd_iocbq,
11191 struct lpfc_iocbq *rsp_iocbq)
11192{
11193 if (cmd_iocbq)
11194 lpfc_sli_release_iocbq(phba, cmd_iocbq);
11195}
11196
11197/**
11198 * lpfc_sli4_seq_abort_acc - Accept sequence abort
11199 * @phba: Pointer to HBA context object.
11200 * @fc_hdr: pointer to a FC frame header.
11201 *
11202 * This function sends a basic accept to a previous unsol sequence abort
11203 * event after aborting the sequence handling.
11204 **/
11205static void
11206lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
11207 struct fc_frame_header *fc_hdr)
11208{
11209 struct lpfc_iocbq *ctiocb = NULL;
11210 struct lpfc_nodelist *ndlp;
11211 uint16_t oxid, rxid;
11212 uint32_t sid, fctl;
11213 IOCB_t *icmd;
11214
11215 if (!lpfc_is_link_up(phba))
11216 return;
11217
11218 sid = sli4_sid_from_fc_hdr(fc_hdr);
11219 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
11220 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
11221
11222 ndlp = lpfc_findnode_did(phba->pport, sid);
11223 if (!ndlp) {
11224 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
11225 "1268 Find ndlp returned NULL for oxid:x%x "
11226 "SID:x%x\n", oxid, sid);
11227 return;
11228 }
11229
11230 /* Allocate buffer for acc iocb */
11231 ctiocb = lpfc_sli_get_iocbq(phba);
11232 if (!ctiocb)
11233 return;
11234
11235 /* Extract the F_CTL field from FC_HDR */
11236 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
11237
11238 icmd = &ctiocb->iocb;
11239 icmd->un.xseq64.bdl.bdeSize = 0;
11240 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
11241 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11242 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
11243 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
11244
11245 /* Fill in the rest of iocb fields */
11246 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
11247 icmd->ulpBdeCount = 0;
11248 icmd->ulpLe = 1;
11249 icmd->ulpClass = CLASS3;
11250 icmd->ulpContext = ndlp->nlp_rpi;
11251
11252 ctiocb->iocb_cmpl = NULL;
11253 ctiocb->vport = phba->pport;
11254 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl;
11255
11256 if (fctl & FC_FC_EX_CTX) {
11257 /* ABTS sent by responder to CT exchange, construction
11258 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
11259 * field and RX_ID from ABTS for RX_ID field.
11260 */
11261 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP);
11262 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid);
11263 ctiocb->sli4_xritag = oxid;
11264 } else {
11265 /* ABTS sent by initiator to CT exchange, construction
11266 * of BA_ACC will need to allocate a new XRI as for the
11267 * XRI_TAG and RX_ID fields.
11268 */
11269 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT);
11270 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI);
11271 ctiocb->sli4_xritag = NO_XRI;
11272 }
11273 bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid);
11274
11275 /* Xmit CT abts accept on exchange <xid> */
11276 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11277 "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n",
11278 CMD_XMIT_BLS_RSP64_CX, phba->link_state);
11279 lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
11280}
11281
11282/**
11283 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
11284 * @vport: Pointer to the vport on which this sequence was received
11285 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11286 *
11287 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
11288 * receive sequence is only partially assembed by the driver, it shall abort
11289 * the partially assembled frames for the sequence. Otherwise, if the
11290 * unsolicited receive sequence has been completely assembled and passed to
11291 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
11292 * unsolicited sequence has been aborted. After that, it will issue a basic
11293 * accept to accept the abort.
11294 **/
11295void
11296lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
11297 struct hbq_dmabuf *dmabuf)
11298{
11299 struct lpfc_hba *phba = vport->phba;
11300 struct fc_frame_header fc_hdr;
11301 uint32_t fctl;
11302 bool abts_par;
11303
11304 /* Make a copy of fc_hdr before the dmabuf being released */
11305 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
11306 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
11307
11308 if (fctl & FC_FC_EX_CTX) {
11309 /*
11310 * ABTS sent by responder to exchange, just free the buffer
11311 */
11312 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11313 } else {
11314 /*
11315 * ABTS sent by initiator to exchange, need to do cleanup
11316 */
11317 /* Try to abort partially assembled seq */
11318 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
11319
11320 /* Send abort to ULP if partially seq abort failed */
11321 if (abts_par == false)
11322 lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
11323 else
11324 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11325 }
11326 /* Send basic accept (BA_ACC) to the abort requester */
11327 lpfc_sli4_seq_abort_acc(phba, &fc_hdr);
11328}
11329
11330/**
10868 * lpfc_seq_complete - Indicates if a sequence is complete 11331 * lpfc_seq_complete - Indicates if a sequence is complete
10869 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11332 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10870 * 11333 *
@@ -10899,7 +11362,7 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
10899 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11362 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10900 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11363 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10901 /* If there is a hole in the sequence count then fail. */ 11364 /* If there is a hole in the sequence count then fail. */
10902 if (++seq_count != hdr->fh_seq_cnt) 11365 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
10903 return 0; 11366 return 0;
10904 fctl = (hdr->fh_f_ctl[0] << 16 | 11367 fctl = (hdr->fh_f_ctl[0] << 16 |
10905 hdr->fh_f_ctl[1] << 8 | 11368 hdr->fh_f_ctl[1] << 8 |
@@ -10931,14 +11394,14 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10931 struct lpfc_iocbq *first_iocbq, *iocbq; 11394 struct lpfc_iocbq *first_iocbq, *iocbq;
10932 struct fc_frame_header *fc_hdr; 11395 struct fc_frame_header *fc_hdr;
10933 uint32_t sid; 11396 uint32_t sid;
11397 struct ulp_bde64 *pbde;
10934 11398
10935 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11399 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10936 /* remove from receive buffer list */ 11400 /* remove from receive buffer list */
10937 list_del_init(&seq_dmabuf->hbuf.list); 11401 list_del_init(&seq_dmabuf->hbuf.list);
11402 lpfc_update_rcv_time_stamp(vport);
10938 /* get the Remote Port's SID */ 11403 /* get the Remote Port's SID */
10939 sid = (fc_hdr->fh_s_id[0] << 16 | 11404 sid = sli4_sid_from_fc_hdr(fc_hdr);
10940 fc_hdr->fh_s_id[1] << 8 |
10941 fc_hdr->fh_s_id[2]);
10942 /* Get an iocbq struct to fill in. */ 11405 /* Get an iocbq struct to fill in. */
10943 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 11406 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10944 if (first_iocbq) { 11407 if (first_iocbq) {
@@ -10957,7 +11420,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10957 LPFC_DATA_BUF_SIZE; 11420 LPFC_DATA_BUF_SIZE;
10958 first_iocbq->iocb.un.rcvels.remoteID = sid; 11421 first_iocbq->iocb.un.rcvels.remoteID = sid;
10959 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11422 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10960 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11423 bf_get(lpfc_rcqe_length,
11424 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10961 } 11425 }
10962 iocbq = first_iocbq; 11426 iocbq = first_iocbq;
10963 /* 11427 /*
@@ -10972,10 +11436,12 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10972 if (!iocbq->context3) { 11436 if (!iocbq->context3) {
10973 iocbq->context3 = d_buf; 11437 iocbq->context3 = d_buf;
10974 iocbq->iocb.ulpBdeCount++; 11438 iocbq->iocb.ulpBdeCount++;
10975 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = 11439 pbde = (struct ulp_bde64 *)
10976 LPFC_DATA_BUF_SIZE; 11440 &iocbq->iocb.unsli3.sli3Words[4];
11441 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
10977 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11442 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10978 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11443 bf_get(lpfc_rcqe_length,
11444 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10979 } else { 11445 } else {
10980 iocbq = lpfc_sli_get_iocbq(vport->phba); 11446 iocbq = lpfc_sli_get_iocbq(vport->phba);
10981 if (!iocbq) { 11447 if (!iocbq) {
@@ -10994,7 +11460,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10994 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 11460 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10995 LPFC_DATA_BUF_SIZE; 11461 LPFC_DATA_BUF_SIZE;
10996 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11462 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10997 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11463 bf_get(lpfc_rcqe_length,
11464 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10998 iocbq->iocb.un.rcvels.remoteID = sid; 11465 iocbq->iocb.un.rcvels.remoteID = sid;
10999 list_add_tail(&iocbq->list, &first_iocbq->list); 11466 list_add_tail(&iocbq->list, &first_iocbq->list);
11000 } 11467 }
@@ -11002,6 +11469,43 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11002 return first_iocbq; 11469 return first_iocbq;
11003} 11470}
11004 11471
11472static void
11473lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
11474 struct hbq_dmabuf *seq_dmabuf)
11475{
11476 struct fc_frame_header *fc_hdr;
11477 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
11478 struct lpfc_hba *phba = vport->phba;
11479
11480 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11481 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11482 if (!iocbq) {
11483 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11484 "2707 Ring %d handler: Failed to allocate "
11485 "iocb Rctl x%x Type x%x received\n",
11486 LPFC_ELS_RING,
11487 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11488 return;
11489 }
11490 if (!lpfc_complete_unsol_iocb(phba,
11491 &phba->sli.ring[LPFC_ELS_RING],
11492 iocbq, fc_hdr->fh_r_ctl,
11493 fc_hdr->fh_type))
11494 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11495 "2540 Ring %d handler: unexpected Rctl "
11496 "x%x Type x%x received\n",
11497 LPFC_ELS_RING,
11498 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11499
11500 /* Free iocb created in lpfc_prep_seq */
11501 list_for_each_entry_safe(curr_iocb, next_iocb,
11502 &iocbq->list, list) {
11503 list_del_init(&curr_iocb->list);
11504 lpfc_sli_release_iocbq(phba, curr_iocb);
11505 }
11506 lpfc_sli_release_iocbq(phba, iocbq);
11507}
11508
11005/** 11509/**
11006 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 11510 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
11007 * @phba: Pointer to HBA context object. 11511 * @phba: Pointer to HBA context object.
@@ -11014,67 +11518,48 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11014 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 11518 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
11015 * appropriate receive function when the final frame in a sequence is received. 11519 * appropriate receive function when the final frame in a sequence is received.
11016 **/ 11520 **/
11017int 11521void
11018lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) 11522lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11523 struct hbq_dmabuf *dmabuf)
11019{ 11524{
11020 LIST_HEAD(cmplq); 11525 struct hbq_dmabuf *seq_dmabuf;
11021 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
11022 struct fc_frame_header *fc_hdr; 11526 struct fc_frame_header *fc_hdr;
11023 struct lpfc_vport *vport; 11527 struct lpfc_vport *vport;
11024 uint32_t fcfi; 11528 uint32_t fcfi;
11025 struct lpfc_iocbq *iocbq;
11026
11027 /* Clear hba flag and get all received buffers into the cmplq */
11028 spin_lock_irq(&phba->hbalock);
11029 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
11030 list_splice_init(&phba->rb_pend_list, &cmplq);
11031 spin_unlock_irq(&phba->hbalock);
11032 11529
11033 /* Process each received buffer */ 11530 /* Process each received buffer */
11034 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) { 11531 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11035 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11532 /* check to see if this a valid type of frame */
11036 /* check to see if this a valid type of frame */ 11533 if (lpfc_fc_frame_check(phba, fc_hdr)) {
11037 if (lpfc_fc_frame_check(phba, fc_hdr)) { 11534 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11038 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11535 return;
11039 continue; 11536 }
11040 } 11537 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
11041 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe); 11538 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
11042 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 11539 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
11043 if (!vport) { 11540 /* throw out the frame */
11044 /* throw out the frame */ 11541 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11045 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11542 return;
11046 continue; 11543 }
11047 } 11544 /* Handle the basic abort sequence (BA_ABTS) event */
11048 /* Link this frame */ 11545 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
11049 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 11546 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
11050 if (!seq_dmabuf) { 11547 return;
11051 /* unable to add frame to vport - throw it out */ 11548 }
11052 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11549
11053 continue; 11550 /* Link this frame */
11054 } 11551 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
11055 /* If not last frame in sequence continue processing frames. */ 11552 if (!seq_dmabuf) {
11056 if (!lpfc_seq_complete(seq_dmabuf)) { 11553 /* unable to add frame to vport - throw it out */
11057 /* 11554 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11058 * When saving off frames post a new one and mark this 11555 return;
11059 * frame to be freed when it is finished. 11556 }
11060 **/ 11557 /* If not last frame in sequence continue processing frames. */
11061 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); 11558 if (!lpfc_seq_complete(seq_dmabuf))
11062 dmabuf->tag = -1; 11559 return;
11063 continue; 11560
11064 } 11561 /* Send the complete sequence to the upper layer protocol */
11065 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11562 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
11066 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11067 if (!lpfc_complete_unsol_iocb(phba,
11068 &phba->sli.ring[LPFC_ELS_RING],
11069 iocbq, fc_hdr->fh_r_ctl,
11070 fc_hdr->fh_type))
11071 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11072 "2540 Ring %d handler: unexpected Rctl "
11073 "x%x Type x%x received\n",
11074 LPFC_ELS_RING,
11075 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11076 };
11077 return 0;
11078} 11563}
11079 11564
11080/** 11565/**
@@ -11091,7 +11576,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
11091 * sequential. 11576 * sequential.
11092 * 11577 *
11093 * Return codes 11578 * Return codes
11094 * 0 - sucessful 11579 * 0 - successful
11095 * EIO - The mailbox failed to complete successfully. 11580 * EIO - The mailbox failed to complete successfully.
11096 * When this error occurs, the driver is not guaranteed 11581 * When this error occurs, the driver is not guaranteed
11097 * to have any rpi regions posted to the device and 11582 * to have any rpi regions posted to the device and
@@ -11129,7 +11614,7 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
11129 * maps up to 64 rpi context regions. 11614 * maps up to 64 rpi context regions.
11130 * 11615 *
11131 * Return codes 11616 * Return codes
11132 * 0 - sucessful 11617 * 0 - successful
11133 * ENOMEM - No available memory 11618 * ENOMEM - No available memory
11134 * EIO - The mailbox failed to complete successfully. 11619 * EIO - The mailbox failed to complete successfully.
11135 **/ 11620 **/
@@ -11191,7 +11676,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
11191 * PAGE_SIZE modulo 64 rpi context headers. 11676 * PAGE_SIZE modulo 64 rpi context headers.
11192 * 11677 *
11193 * Returns 11678 * Returns
11194 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful 11679 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
11195 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 11680 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
11196 **/ 11681 **/
11197int 11682int
@@ -11334,6 +11819,7 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11334{ 11819{
11335 LPFC_MBOXQ_t *mboxq; 11820 LPFC_MBOXQ_t *mboxq;
11336 int rc = 0; 11821 int rc = 0;
11822 int retval = MBX_SUCCESS;
11337 uint32_t mbox_tmo; 11823 uint32_t mbox_tmo;
11338 11824
11339 if (vpi == 0) 11825 if (vpi == 0)
@@ -11344,16 +11830,17 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11344 lpfc_init_vpi(phba, mboxq, vpi); 11830 lpfc_init_vpi(phba, mboxq, vpi);
11345 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); 11831 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11346 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11832 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11347 if (rc != MBX_TIMEOUT)
11348 mempool_free(mboxq, phba->mbox_mem_pool);
11349 if (rc != MBX_SUCCESS) { 11833 if (rc != MBX_SUCCESS) {
11350 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11834 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11351 "2022 INIT VPI Mailbox failed " 11835 "2022 INIT VPI Mailbox failed "
11352 "status %d, mbxStatus x%x\n", rc, 11836 "status %d, mbxStatus x%x\n", rc,
11353 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 11837 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11354 rc = -EIO; 11838 retval = -EIO;
11355 } 11839 }
11356 return rc; 11840 if (rc != MBX_TIMEOUT)
11841 mempool_free(mboxq, phba->mbox_mem_pool);
11842
11843 return retval;
11357} 11844}
11358 11845
11359/** 11846/**
@@ -11438,13 +11925,6 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11438 */ 11925 */
11439 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 11926 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11440 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 11927 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11441 if (unlikely(!mboxq->sge_array)) {
11442 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11443 "2526 Failed to get the non-embedded SGE "
11444 "virtual address\n");
11445 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11446 return -ENOMEM;
11447 }
11448 virt_addr = mboxq->sge_array->addr[0]; 11928 virt_addr = mboxq->sge_array->addr[0];
11449 /* 11929 /*
11450 * Configure the FCF record for FCFI 0. This is the driver's 11930 * Configure the FCF record for FCFI 0. This is the driver's
@@ -11517,24 +11997,22 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11517} 11997}
11518 11998
11519/** 11999/**
11520 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. 12000 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
11521 * @phba: pointer to lpfc hba data structure. 12001 * @phba: pointer to lpfc hba data structure.
11522 * @fcf_index: FCF table entry offset. 12002 * @fcf_index: FCF table entry offset.
11523 * 12003 *
11524 * This routine is invoked to read up to @fcf_num of FCF record from the 12004 * This routine is invoked to scan the entire FCF table by reading FCF
11525 * device starting with the given @fcf_index. 12005 * record and processing it one at a time starting from the @fcf_index
12006 * for initial FCF discovery or fast FCF failover rediscovery.
12007 *
12008 * Return 0 if the mailbox command is submitted sucessfully, none 0
12009 * otherwise.
11526 **/ 12010 **/
11527int 12011int
11528lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) 12012lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
11529{ 12013{
11530 int rc = 0, error; 12014 int rc = 0, error;
11531 LPFC_MBOXQ_t *mboxq; 12015 LPFC_MBOXQ_t *mboxq;
11532 void *virt_addr;
11533 dma_addr_t phys_addr;
11534 uint8_t *bytep;
11535 struct lpfc_mbx_sge sge;
11536 uint32_t alloc_len, req_len;
11537 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11538 12016
11539 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 12017 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
11540 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12018 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -11542,59 +12020,347 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11543 "2000 Failed to allocate mbox for " 12021 "2000 Failed to allocate mbox for "
11544 "READ_FCF cmd\n"); 12022 "READ_FCF cmd\n");
11545 return -ENOMEM; 12023 error = -ENOMEM;
12024 goto fail_fcf_scan;
11546 } 12025 }
12026 /* Construct the read FCF record mailbox command */
12027 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12028 if (rc) {
12029 error = -EINVAL;
12030 goto fail_fcf_scan;
12031 }
12032 /* Issue the mailbox command asynchronously */
12033 mboxq->vport = phba->pport;
12034 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
12035 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12036 if (rc == MBX_NOT_FINISHED)
12037 error = -EIO;
12038 else {
12039 spin_lock_irq(&phba->hbalock);
12040 phba->hba_flag |= FCF_DISC_INPROGRESS;
12041 spin_unlock_irq(&phba->hbalock);
12042 /* Reset FCF round robin index bmask for new scan */
12043 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
12044 memset(phba->fcf.fcf_rr_bmask, 0,
12045 sizeof(*phba->fcf.fcf_rr_bmask));
12046 error = 0;
12047 }
12048fail_fcf_scan:
12049 if (error) {
12050 if (mboxq)
12051 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12052 /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
12053 spin_lock_irq(&phba->hbalock);
12054 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
12055 spin_unlock_irq(&phba->hbalock);
12056 }
12057 return error;
12058}
11547 12059
11548 req_len = sizeof(struct fcf_record) + 12060/**
11549 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); 12061 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
12062 * @phba: pointer to lpfc hba data structure.
12063 * @fcf_index: FCF table entry offset.
12064 *
12065 * This routine is invoked to read an FCF record indicated by @fcf_index
12066 * and to use it for FLOGI round robin FCF failover.
12067 *
12068 * Return 0 if the mailbox command is submitted sucessfully, none 0
12069 * otherwise.
12070 **/
12071int
12072lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12073{
12074 int rc = 0, error;
12075 LPFC_MBOXQ_t *mboxq;
11550 12076
11551 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ 12077 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11552 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 12078 if (!mboxq) {
11553 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, 12079 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
11554 LPFC_SLI4_MBX_NEMBED); 12080 "2763 Failed to allocate mbox for "
12081 "READ_FCF cmd\n");
12082 error = -ENOMEM;
12083 goto fail_fcf_read;
12084 }
12085 /* Construct the read FCF record mailbox command */
12086 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12087 if (rc) {
12088 error = -EINVAL;
12089 goto fail_fcf_read;
12090 }
12091 /* Issue the mailbox command asynchronously */
12092 mboxq->vport = phba->pport;
12093 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
12094 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12095 if (rc == MBX_NOT_FINISHED)
12096 error = -EIO;
12097 else
12098 error = 0;
11555 12099
11556 if (alloc_len < req_len) { 12100fail_fcf_read:
11557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12101 if (error && mboxq)
11558 "0291 Allocated DMA memory size (x%x) is "
11559 "less than the requested DMA memory "
11560 "size (x%x)\n", alloc_len, req_len);
11561 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12102 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11562 return -ENOMEM; 12103 return error;
12104}
12105
12106/**
12107 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
12108 * @phba: pointer to lpfc hba data structure.
12109 * @fcf_index: FCF table entry offset.
12110 *
12111 * This routine is invoked to read an FCF record indicated by @fcf_index to
12112 * determine whether it's eligible for FLOGI round robin failover list.
12113 *
12114 * Return 0 if the mailbox command is submitted sucessfully, none 0
12115 * otherwise.
12116 **/
12117int
12118lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12119{
12120 int rc = 0, error;
12121 LPFC_MBOXQ_t *mboxq;
12122
12123 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12124 if (!mboxq) {
12125 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
12126 "2758 Failed to allocate mbox for "
12127 "READ_FCF cmd\n");
12128 error = -ENOMEM;
12129 goto fail_fcf_read;
12130 }
12131 /* Construct the read FCF record mailbox command */
12132 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12133 if (rc) {
12134 error = -EINVAL;
12135 goto fail_fcf_read;
11563 } 12136 }
12137 /* Issue the mailbox command asynchronously */
12138 mboxq->vport = phba->pport;
12139 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
12140 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12141 if (rc == MBX_NOT_FINISHED)
12142 error = -EIO;
12143 else
12144 error = 0;
11564 12145
11565 /* Get the first SGE entry from the non-embedded DMA memory. This 12146fail_fcf_read:
11566 * routine only uses a single SGE. 12147 if (error && mboxq)
11567 */
11568 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11569 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11570 if (unlikely(!mboxq->sge_array)) {
11571 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11572 "2527 Failed to get the non-embedded SGE "
11573 "virtual address\n");
11574 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12148 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12149 return error;
12150}
12151
12152/**
12153 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
12154 * @phba: pointer to lpfc hba data structure.
12155 *
12156 * This routine is to get the next eligible FCF record index in a round
12157 * robin fashion. If the next eligible FCF record index equals to the
12158 * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
12159 * shall be returned, otherwise, the next eligible FCF record's index
12160 * shall be returned.
12161 **/
12162uint16_t
12163lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
12164{
12165 uint16_t next_fcf_index;
12166
12167 /* Search from the currently registered FCF index */
12168 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12169 LPFC_SLI4_FCF_TBL_INDX_MAX,
12170 phba->fcf.current_rec.fcf_indx);
12171 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
12172 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
12173 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12174 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
12175 /* Round robin failover stop condition */
12176 if (next_fcf_index == phba->fcf.fcf_rr_init_indx)
12177 return LPFC_FCOE_FCF_NEXT_NONE;
12178
12179 return next_fcf_index;
12180}
12181
12182/**
12183 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
12184 * @phba: pointer to lpfc hba data structure.
12185 *
12186 * This routine sets the FCF record index in to the eligible bmask for
12187 * round robin failover search. It checks to make sure that the index
12188 * does not go beyond the range of the driver allocated bmask dimension
12189 * before setting the bit.
12190 *
12191 * Returns 0 if the index bit successfully set, otherwise, it returns
12192 * -EINVAL.
12193 **/
12194int
12195lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
12196{
12197 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12198 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12199 "2610 HBA FCF index reached driver's "
12200 "book keeping dimension: fcf_index:%d, "
12201 "driver_bmask_max:%d\n",
12202 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12203 return -EINVAL;
12204 }
12205 /* Set the eligible FCF record index bmask */
12206 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12207
12208 return 0;
12209}
12210
12211/**
12212 * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index
12213 * @phba: pointer to lpfc hba data structure.
12214 *
12215 * This routine clears the FCF record index from the eligible bmask for
12216 * round robin failover search. It checks to make sure that the index
12217 * does not go beyond the range of the driver allocated bmask dimension
12218 * before clearing the bit.
12219 **/
12220void
12221lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
12222{
12223 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12224 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12225 "2762 HBA FCF index goes beyond driver's "
12226 "book keeping dimension: fcf_index:%d, "
12227 "driver_bmask_max:%d\n",
12228 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12229 return;
12230 }
12231 /* Clear the eligible FCF record index bmask */
12232 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12233}
12234
12235/**
12236 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
12237 * @phba: pointer to lpfc hba data structure.
12238 *
12239 * This routine is the completion routine for the rediscover FCF table mailbox
12240 * command. If the mailbox command returned failure, it will try to stop the
12241 * FCF rediscover wait timer.
12242 **/
12243void
12244lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
12245{
12246 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
12247 uint32_t shdr_status, shdr_add_status;
12248
12249 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
12250
12251 shdr_status = bf_get(lpfc_mbox_hdr_status,
12252 &redisc_fcf->header.cfg_shdr.response);
12253 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12254 &redisc_fcf->header.cfg_shdr.response);
12255 if (shdr_status || shdr_add_status) {
12256 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12257 "2746 Requesting for FCF rediscovery failed "
12258 "status x%x add_status x%x\n",
12259 shdr_status, shdr_add_status);
12260 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
12261 spin_lock_irq(&phba->hbalock);
12262 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
12263 spin_unlock_irq(&phba->hbalock);
12264 /*
12265 * CVL event triggered FCF rediscover request failed,
12266 * last resort to re-try current registered FCF entry.
12267 */
12268 lpfc_retry_pport_discovery(phba);
12269 } else {
12270 spin_lock_irq(&phba->hbalock);
12271 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
12272 spin_unlock_irq(&phba->hbalock);
12273 /*
12274 * DEAD FCF event triggered FCF rediscover request
12275 * failed, last resort to fail over as a link down
12276 * to FCF registration.
12277 */
12278 lpfc_sli4_fcf_dead_failthrough(phba);
12279 }
12280 } else {
12281 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12282 "2775 Start FCF rediscovery quiescent period "
12283 "wait timer before scaning FCF table\n");
12284 /*
12285 * Start FCF rediscovery wait timer for pending FCF
12286 * before rescan FCF record table.
12287 */
12288 lpfc_fcf_redisc_wait_start_timer(phba);
12289 }
12290
12291 mempool_free(mbox, phba->mbox_mem_pool);
12292}
12293
12294/**
12295 * lpfc_sli4_redisc_all_fcf - Request to rediscover entire FCF table by port.
12296 * @phba: pointer to lpfc hba data structure.
12297 *
12298 * This routine is invoked to request for rediscovery of the entire FCF table
12299 * by the port.
12300 **/
12301int
12302lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
12303{
12304 LPFC_MBOXQ_t *mbox;
12305 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
12306 int rc, length;
12307
12308 /* Cancel retry delay timers to all vports before FCF rediscover */
12309 lpfc_cancel_all_vport_retry_delay_timer(phba);
12310
12311 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12312 if (!mbox) {
12313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12314 "2745 Failed to allocate mbox for "
12315 "requesting FCF rediscover.\n");
11575 return -ENOMEM; 12316 return -ENOMEM;
11576 } 12317 }
11577 virt_addr = mboxq->sge_array->addr[0];
11578 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11579 12318
11580 /* Set up command fields */ 12319 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
11581 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); 12320 sizeof(struct lpfc_sli4_cfg_mhdr));
11582 /* Perform necessary endian conversion */ 12321 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
11583 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 12322 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
11584 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); 12323 length, LPFC_SLI4_MBX_EMBED);
11585 mboxq->vport = phba->pport; 12324
11586 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; 12325 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
11587 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12326 /* Set count to 0 for invalidating the entire FCF database */
12327 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
12328
12329 /* Issue the mailbox command asynchronously */
12330 mbox->vport = phba->pport;
12331 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
12332 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
12333
11588 if (rc == MBX_NOT_FINISHED) { 12334 if (rc == MBX_NOT_FINISHED) {
11589 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12335 mempool_free(mbox, phba->mbox_mem_pool);
11590 error = -EIO; 12336 return -EIO;
11591 } else {
11592 spin_lock_irq(&phba->hbalock);
11593 phba->hba_flag |= FCF_DISC_INPROGRESS;
11594 spin_unlock_irq(&phba->hbalock);
11595 error = 0;
11596 } 12337 }
11597 return error; 12338 return 0;
12339}
12340
12341/**
12342 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
12343 * @phba: pointer to lpfc hba data structure.
12344 *
12345 * This function is the failover routine as a last resort to the FCF DEAD
12346 * event when driver failed to perform fast FCF failover.
12347 **/
12348void
12349lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
12350{
12351 uint32_t link_state;
12352
12353 /*
12354 * Last resort as FCF DEAD event failover will treat this as
12355 * a link down, but save the link state because we don't want
12356 * it to be changed to Link Down unless it is already down.
12357 */
12358 link_state = phba->link_state;
12359 lpfc_linkdown(phba);
12360 phba->link_state = link_state;
12361
12362 /* Unregister FCF if no devices connected to it */
12363 lpfc_unregister_unused_fcf(phba);
11598} 12364}
11599 12365
11600/** 12366/**
@@ -11725,3 +12491,48 @@ out:
11725 kfree(rgn23_data); 12491 kfree(rgn23_data);
11726 return; 12492 return;
11727} 12493}
12494
12495/**
12496 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
12497 * @vport: pointer to vport data structure.
12498 *
12499 * This function iterate through the mailboxq and clean up all REG_LOGIN
12500 * and REG_VPI mailbox commands associated with the vport. This function
12501 * is called when driver want to restart discovery of the vport due to
12502 * a Clear Virtual Link event.
12503 **/
12504void
12505lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12506{
12507 struct lpfc_hba *phba = vport->phba;
12508 LPFC_MBOXQ_t *mb, *nextmb;
12509 struct lpfc_dmabuf *mp;
12510
12511 spin_lock_irq(&phba->hbalock);
12512 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
12513 if (mb->vport != vport)
12514 continue;
12515
12516 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
12517 (mb->u.mb.mbxCommand != MBX_REG_VPI))
12518 continue;
12519
12520 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12521 mp = (struct lpfc_dmabuf *) (mb->context1);
12522 if (mp) {
12523 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
12524 kfree(mp);
12525 }
12526 }
12527 list_del(&mb->list);
12528 mempool_free(mb, phba->mbox_mem_pool);
12529 }
12530 mb = phba->sli.mbox_active;
12531 if (mb && (mb->vport == vport)) {
12532 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
12533 (mb->u.mb.mbxCommand == MBX_REG_VPI))
12534 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12535 }
12536 spin_unlock_irq(&phba->hbalock);
12537}
12538