diff options
author | James Smart <James.Smart@Emulex.Com> | 2009-10-02 15:17:02 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2009-12-04 13:01:42 -0500 |
commit | 45ed119035b27f240345b06e090d559874e3677a (patch) | |
tree | 14466c52a644d73ea90f30b885cfe4e3fc88d12e /drivers/scsi/lpfc/lpfc_sli.c | |
parent | 0d87841997125971b7a39d21d1435054f91884c3 (diff) |
[SCSI] lpfc 8.3.5: fix fcp command polling, add FIP mode, performance optimisations and devloss timout fixes
This patch includes the following changes:
- Fixed Panic/Hang when using polling mode for fcp commands
- Added support for Read_rev mbox bits indicating FIP mode of HBA
- Optimize performance of slow-path handling of els responses
- Add code to cleanup orphaned unsolicited receive sequences
- Fixed Devloss timeout when multiple initiators are in same zone
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 464 |
1 files changed, 229 insertions, 235 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 42d0f1948a7a..c4b19d094d39 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -59,7 +59,9 @@ typedef enum _lpfc_iocb_type { | |||
59 | static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, | 59 | static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, |
60 | uint32_t); | 60 | uint32_t); |
61 | static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, | 61 | static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, |
62 | uint8_t *, uint32_t *); | 62 | uint8_t *, uint32_t *); |
63 | static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, | ||
64 | struct lpfc_iocbq *); | ||
63 | static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, | 65 | static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, |
64 | struct hbq_dmabuf *); | 66 | struct hbq_dmabuf *); |
65 | static IOCB_t * | 67 | static IOCB_t * |
@@ -2329,168 +2331,6 @@ void lpfc_poll_eratt(unsigned long ptr) | |||
2329 | return; | 2331 | return; |
2330 | } | 2332 | } |
2331 | 2333 | ||
2332 | /** | ||
2333 | * lpfc_sli_poll_fcp_ring - Handle FCP ring completion in polling mode | ||
2334 | * @phba: Pointer to HBA context object. | ||
2335 | * | ||
2336 | * This function is called from lpfc_queuecommand, lpfc_poll_timeout, | ||
2337 | * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING | ||
2338 | * is enabled. | ||
2339 | * | ||
2340 | * The caller does not hold any lock. | ||
2341 | * The function processes each response iocb in the response ring until it | ||
2342 | * finds an iocb with LE bit set and chains all the iocbs upto the iocb with | ||
2343 | * LE bit set. The function will call the completion handler of the command iocb | ||
2344 | * if the response iocb indicates a completion for a command iocb or it is | ||
2345 | * an abort completion. | ||
2346 | **/ | ||
2347 | void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) | ||
2348 | { | ||
2349 | struct lpfc_sli *psli = &phba->sli; | ||
2350 | struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; | ||
2351 | IOCB_t *irsp = NULL; | ||
2352 | IOCB_t *entry = NULL; | ||
2353 | struct lpfc_iocbq *cmdiocbq = NULL; | ||
2354 | struct lpfc_iocbq rspiocbq; | ||
2355 | struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; | ||
2356 | uint32_t status; | ||
2357 | uint32_t portRspPut, portRspMax; | ||
2358 | int type; | ||
2359 | uint32_t rsp_cmpl = 0; | ||
2360 | uint32_t ha_copy; | ||
2361 | unsigned long iflags; | ||
2362 | |||
2363 | pring->stats.iocb_event++; | ||
2364 | |||
2365 | /* | ||
2366 | * The next available response entry should never exceed the maximum | ||
2367 | * entries. If it does, treat it as an adapter hardware error. | ||
2368 | */ | ||
2369 | portRspMax = pring->numRiocb; | ||
2370 | portRspPut = le32_to_cpu(pgp->rspPutInx); | ||
2371 | if (unlikely(portRspPut >= portRspMax)) { | ||
2372 | lpfc_sli_rsp_pointers_error(phba, pring); | ||
2373 | return; | ||
2374 | } | ||
2375 | |||
2376 | rmb(); | ||
2377 | while (pring->rspidx != portRspPut) { | ||
2378 | entry = lpfc_resp_iocb(phba, pring); | ||
2379 | if (++pring->rspidx >= portRspMax) | ||
2380 | pring->rspidx = 0; | ||
2381 | |||
2382 | lpfc_sli_pcimem_bcopy((uint32_t *) entry, | ||
2383 | (uint32_t *) &rspiocbq.iocb, | ||
2384 | phba->iocb_rsp_size); | ||
2385 | irsp = &rspiocbq.iocb; | ||
2386 | type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); | ||
2387 | pring->stats.iocb_rsp++; | ||
2388 | rsp_cmpl++; | ||
2389 | |||
2390 | if (unlikely(irsp->ulpStatus)) { | ||
2391 | /* Rsp ring <ringno> error: IOCB */ | ||
2392 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
2393 | "0326 Rsp Ring %d error: IOCB Data: " | ||
2394 | "x%x x%x x%x x%x x%x x%x x%x x%x\n", | ||
2395 | pring->ringno, | ||
2396 | irsp->un.ulpWord[0], | ||
2397 | irsp->un.ulpWord[1], | ||
2398 | irsp->un.ulpWord[2], | ||
2399 | irsp->un.ulpWord[3], | ||
2400 | irsp->un.ulpWord[4], | ||
2401 | irsp->un.ulpWord[5], | ||
2402 | *(uint32_t *)&irsp->un1, | ||
2403 | *((uint32_t *)&irsp->un1 + 1)); | ||
2404 | } | ||
2405 | |||
2406 | switch (type) { | ||
2407 | case LPFC_ABORT_IOCB: | ||
2408 | case LPFC_SOL_IOCB: | ||
2409 | /* | ||
2410 | * Idle exchange closed via ABTS from port. No iocb | ||
2411 | * resources need to be recovered. | ||
2412 | */ | ||
2413 | if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { | ||
2414 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | ||
2415 | "0314 IOCB cmd 0x%x " | ||
2416 | "processed. Skipping " | ||
2417 | "completion", | ||
2418 | irsp->ulpCommand); | ||
2419 | break; | ||
2420 | } | ||
2421 | |||
2422 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
2423 | cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, | ||
2424 | &rspiocbq); | ||
2425 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
2426 | if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { | ||
2427 | (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, | ||
2428 | &rspiocbq); | ||
2429 | } | ||
2430 | break; | ||
2431 | default: | ||
2432 | if (irsp->ulpCommand == CMD_ADAPTER_MSG) { | ||
2433 | char adaptermsg[LPFC_MAX_ADPTMSG]; | ||
2434 | memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); | ||
2435 | memcpy(&adaptermsg[0], (uint8_t *) irsp, | ||
2436 | MAX_MSG_DATA); | ||
2437 | dev_warn(&((phba->pcidev)->dev), | ||
2438 | "lpfc%d: %s\n", | ||
2439 | phba->brd_no, adaptermsg); | ||
2440 | } else { | ||
2441 | /* Unknown IOCB command */ | ||
2442 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
2443 | "0321 Unknown IOCB command " | ||
2444 | "Data: x%x, x%x x%x x%x x%x\n", | ||
2445 | type, irsp->ulpCommand, | ||
2446 | irsp->ulpStatus, | ||
2447 | irsp->ulpIoTag, | ||
2448 | irsp->ulpContext); | ||
2449 | } | ||
2450 | break; | ||
2451 | } | ||
2452 | |||
2453 | /* | ||
2454 | * The response IOCB has been processed. Update the ring | ||
2455 | * pointer in SLIM. If the port response put pointer has not | ||
2456 | * been updated, sync the pgp->rspPutInx and fetch the new port | ||
2457 | * response put pointer. | ||
2458 | */ | ||
2459 | writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); | ||
2460 | |||
2461 | if (pring->rspidx == portRspPut) | ||
2462 | portRspPut = le32_to_cpu(pgp->rspPutInx); | ||
2463 | } | ||
2464 | |||
2465 | ha_copy = readl(phba->HAregaddr); | ||
2466 | ha_copy >>= (LPFC_FCP_RING * 4); | ||
2467 | |||
2468 | if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { | ||
2469 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
2470 | pring->stats.iocb_rsp_full++; | ||
2471 | status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); | ||
2472 | writel(status, phba->CAregaddr); | ||
2473 | readl(phba->CAregaddr); | ||
2474 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
2475 | } | ||
2476 | if ((ha_copy & HA_R0CE_RSP) && | ||
2477 | (pring->flag & LPFC_CALL_RING_AVAILABLE)) { | ||
2478 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
2479 | pring->flag &= ~LPFC_CALL_RING_AVAILABLE; | ||
2480 | pring->stats.iocb_cmd_empty++; | ||
2481 | |||
2482 | /* Force update of the local copy of cmdGetInx */ | ||
2483 | pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); | ||
2484 | lpfc_sli_resume_iocb(phba, pring); | ||
2485 | |||
2486 | if ((pring->lpfc_sli_cmd_available)) | ||
2487 | (pring->lpfc_sli_cmd_available) (phba, pring); | ||
2488 | |||
2489 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
2490 | } | ||
2491 | |||
2492 | return; | ||
2493 | } | ||
2494 | 2334 | ||
2495 | /** | 2335 | /** |
2496 | * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring | 2336 | * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring |
@@ -2507,9 +2347,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) | |||
2507 | * an abort completion. The function will call lpfc_sli_process_unsol_iocb | 2347 | * an abort completion. The function will call lpfc_sli_process_unsol_iocb |
2508 | * function if this is an unsolicited iocb. | 2348 | * function if this is an unsolicited iocb. |
2509 | * This routine presumes LPFC_FCP_RING handling and doesn't bother | 2349 | * This routine presumes LPFC_FCP_RING handling and doesn't bother |
2510 | * to check it explicitly. This function always returns 1. | 2350 | * to check it explicitly. |
2511 | **/ | 2351 | */ |
2512 | static int | 2352 | int |
2513 | lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | 2353 | lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, |
2514 | struct lpfc_sli_ring *pring, uint32_t mask) | 2354 | struct lpfc_sli_ring *pring, uint32_t mask) |
2515 | { | 2355 | { |
@@ -2539,6 +2379,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
2539 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 2379 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
2540 | return 1; | 2380 | return 1; |
2541 | } | 2381 | } |
2382 | if (phba->fcp_ring_in_use) { | ||
2383 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
2384 | return 1; | ||
2385 | } else | ||
2386 | phba->fcp_ring_in_use = 1; | ||
2542 | 2387 | ||
2543 | rmb(); | 2388 | rmb(); |
2544 | while (pring->rspidx != portRspPut) { | 2389 | while (pring->rspidx != portRspPut) { |
@@ -2609,10 +2454,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
2609 | cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, | 2454 | cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, |
2610 | &rspiocbq); | 2455 | &rspiocbq); |
2611 | if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { | 2456 | if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { |
2612 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | ||
2613 | (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, | ||
2614 | &rspiocbq); | ||
2615 | } else { | ||
2616 | spin_unlock_irqrestore(&phba->hbalock, | 2457 | spin_unlock_irqrestore(&phba->hbalock, |
2617 | iflag); | 2458 | iflag); |
2618 | (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, | 2459 | (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, |
@@ -2620,7 +2461,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
2620 | spin_lock_irqsave(&phba->hbalock, | 2461 | spin_lock_irqsave(&phba->hbalock, |
2621 | iflag); | 2462 | iflag); |
2622 | } | 2463 | } |
2623 | } | ||
2624 | break; | 2464 | break; |
2625 | case LPFC_UNSOL_IOCB: | 2465 | case LPFC_UNSOL_IOCB: |
2626 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 2466 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
@@ -2680,6 +2520,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |||
2680 | 2520 | ||
2681 | } | 2521 | } |
2682 | 2522 | ||
2523 | phba->fcp_ring_in_use = 0; | ||
2683 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 2524 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
2684 | return rc; | 2525 | return rc; |
2685 | } | 2526 | } |
@@ -3027,10 +2868,13 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, | |||
3027 | struct lpfc_cq_event *cq_event; | 2868 | struct lpfc_cq_event *cq_event; |
3028 | unsigned long iflag; | 2869 | unsigned long iflag; |
3029 | 2870 | ||
3030 | while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { | 2871 | spin_lock_irqsave(&phba->hbalock, iflag); |
2872 | phba->hba_flag &= ~HBA_SP_QUEUE_EVT; | ||
2873 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
2874 | while (!list_empty(&phba->sli4_hba.sp_queue_event)) { | ||
3031 | /* Get the response iocb from the head of work queue */ | 2875 | /* Get the response iocb from the head of work queue */ |
3032 | spin_lock_irqsave(&phba->hbalock, iflag); | 2876 | spin_lock_irqsave(&phba->hbalock, iflag); |
3033 | list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, | 2877 | list_remove_head(&phba->sli4_hba.sp_queue_event, |
3034 | cq_event, struct lpfc_cq_event, list); | 2878 | cq_event, struct lpfc_cq_event, list); |
3035 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 2879 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
3036 | 2880 | ||
@@ -3038,7 +2882,12 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, | |||
3038 | case CQE_CODE_COMPL_WQE: | 2882 | case CQE_CODE_COMPL_WQE: |
3039 | irspiocbq = container_of(cq_event, struct lpfc_iocbq, | 2883 | irspiocbq = container_of(cq_event, struct lpfc_iocbq, |
3040 | cq_event); | 2884 | cq_event); |
3041 | lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); | 2885 | /* Translate ELS WCQE to response IOCBQ */ |
2886 | irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, | ||
2887 | irspiocbq); | ||
2888 | if (irspiocbq) | ||
2889 | lpfc_sli_sp_handle_rspiocb(phba, pring, | ||
2890 | irspiocbq); | ||
3042 | break; | 2891 | break; |
3043 | case CQE_CODE_RECEIVE: | 2892 | case CQE_CODE_RECEIVE: |
3044 | dmabuf = container_of(cq_event, struct hbq_dmabuf, | 2893 | dmabuf = container_of(cq_event, struct hbq_dmabuf, |
@@ -4368,6 +4217,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4368 | phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); | 4217 | phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); |
4369 | if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) | 4218 | if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) |
4370 | phba->hba_flag |= HBA_FCOE_SUPPORT; | 4219 | phba->hba_flag |= HBA_FCOE_SUPPORT; |
4220 | |||
4221 | if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == | ||
4222 | LPFC_DCBX_CEE_MODE) | ||
4223 | phba->hba_flag |= HBA_FIP_SUPPORT; | ||
4224 | else | ||
4225 | phba->hba_flag &= ~HBA_FIP_SUPPORT; | ||
4226 | |||
4371 | if (phba->sli_rev != LPFC_SLI_REV4 || | 4227 | if (phba->sli_rev != LPFC_SLI_REV4 || |
4372 | !(phba->hba_flag & HBA_FCOE_SUPPORT)) { | 4228 | !(phba->hba_flag & HBA_FCOE_SUPPORT)) { |
4373 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 4229 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
@@ -4541,10 +4397,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4541 | rc = -ENODEV; | 4397 | rc = -ENODEV; |
4542 | goto out_free_vpd; | 4398 | goto out_free_vpd; |
4543 | } | 4399 | } |
4544 | if (phba->cfg_enable_fip) | ||
4545 | bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1); | ||
4546 | else | ||
4547 | bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); | ||
4548 | 4400 | ||
4549 | /* Set up all the queues to the device */ | 4401 | /* Set up all the queues to the device */ |
4550 | rc = lpfc_sli4_queue_setup(phba); | 4402 | rc = lpfc_sli4_queue_setup(phba); |
@@ -5905,7 +5757,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
5905 | uint16_t xritag; | 5757 | uint16_t xritag; |
5906 | struct ulp_bde64 *bpl = NULL; | 5758 | struct ulp_bde64 *bpl = NULL; |
5907 | 5759 | ||
5908 | fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); | 5760 | fip = phba->hba_flag & HBA_FIP_SUPPORT; |
5909 | /* The fcp commands will set command type */ | 5761 | /* The fcp commands will set command type */ |
5910 | if (iocbq->iocb_flag & LPFC_IO_FCP) | 5762 | if (iocbq->iocb_flag & LPFC_IO_FCP) |
5911 | command_type = FCP_COMMAND; | 5763 | command_type = FCP_COMMAND; |
@@ -7046,8 +6898,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
7046 | abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; | 6898 | abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; |
7047 | 6899 | ||
7048 | spin_lock_irq(&phba->hbalock); | 6900 | spin_lock_irq(&phba->hbalock); |
7049 | if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) | 6901 | if (phba->sli_rev < LPFC_SLI_REV4) { |
7050 | abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; | 6902 | if (abort_iotag != 0 && |
6903 | abort_iotag <= phba->sli.last_iotag) | ||
6904 | abort_iocb = | ||
6905 | phba->sli.iocbq_lookup[abort_iotag]; | ||
6906 | } else | ||
6907 | /* For sli4 the abort_tag is the XRI, | ||
6908 | * so the abort routine puts the iotag of the iocb | ||
6909 | * being aborted in the context field of the abort | ||
6910 | * IOCB. | ||
6911 | */ | ||
6912 | abort_iocb = phba->sli.iocbq_lookup[abort_context]; | ||
7051 | 6913 | ||
7052 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, | 6914 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, |
7053 | "0327 Cannot abort els iocb %p " | 6915 | "0327 Cannot abort els iocb %p " |
@@ -7061,9 +6923,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
7061 | * might have completed already. Do not free it again. | 6923 | * might have completed already. Do not free it again. |
7062 | */ | 6924 | */ |
7063 | if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { | 6925 | if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { |
7064 | spin_unlock_irq(&phba->hbalock); | 6926 | if (irsp->un.ulpWord[4] != IOERR_NO_XRI) { |
7065 | lpfc_sli_release_iocbq(phba, cmdiocb); | 6927 | spin_unlock_irq(&phba->hbalock); |
7066 | return; | 6928 | lpfc_sli_release_iocbq(phba, cmdiocb); |
6929 | return; | ||
6930 | } | ||
6931 | /* For SLI4 the ulpContext field for abort IOCB | ||
6932 | * holds the iotag of the IOCB being aborted so | ||
6933 | * the local abort_context needs to be reset to | ||
6934 | * match the aborted IOCBs ulpContext. | ||
6935 | */ | ||
6936 | if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4) | ||
6937 | abort_context = abort_iocb->iocb.ulpContext; | ||
7067 | } | 6938 | } |
7068 | /* | 6939 | /* |
7069 | * make sure we have the right iocbq before taking it | 6940 | * make sure we have the right iocbq before taking it |
@@ -7182,8 +7053,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
7182 | iabt = &abtsiocbp->iocb; | 7053 | iabt = &abtsiocbp->iocb; |
7183 | iabt->un.acxri.abortType = ABORT_TYPE_ABTS; | 7054 | iabt->un.acxri.abortType = ABORT_TYPE_ABTS; |
7184 | iabt->un.acxri.abortContextTag = icmd->ulpContext; | 7055 | iabt->un.acxri.abortContextTag = icmd->ulpContext; |
7185 | if (phba->sli_rev == LPFC_SLI_REV4) | 7056 | if (phba->sli_rev == LPFC_SLI_REV4) { |
7186 | iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; | 7057 | iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; |
7058 | iabt->un.acxri.abortContextTag = cmdiocb->iotag; | ||
7059 | } | ||
7187 | else | 7060 | else |
7188 | iabt->un.acxri.abortIoTag = icmd->ulpIoTag; | 7061 | iabt->un.acxri.abortIoTag = icmd->ulpIoTag; |
7189 | iabt->ulpLe = 1; | 7062 | iabt->ulpLe = 1; |
@@ -8421,7 +8294,6 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, | |||
8421 | 8294 | ||
8422 | memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, | 8295 | memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, |
8423 | sizeof(struct lpfc_iocbq) - offset); | 8296 | sizeof(struct lpfc_iocbq) - offset); |
8424 | pIocbIn->cq_event.cqe.wcqe_cmpl = *wcqe; | ||
8425 | /* Map WCQE parameters into irspiocb parameters */ | 8297 | /* Map WCQE parameters into irspiocb parameters */ |
8426 | pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); | 8298 | pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); |
8427 | if (pIocbOut->iocb_flag & LPFC_IO_FCP) | 8299 | if (pIocbOut->iocb_flag & LPFC_IO_FCP) |
@@ -8436,6 +8308,49 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, | |||
8436 | } | 8308 | } |
8437 | 8309 | ||
8438 | /** | 8310 | /** |
8311 | * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe | ||
8312 | * @phba: Pointer to HBA context object. | ||
8313 | * @wcqe: Pointer to work-queue completion queue entry. | ||
8314 | * | ||
8315 | * This routine handles an ELS work-queue completion event and construct | ||
8316 | * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common | ||
8317 | * discovery engine to handle. | ||
8318 | * | ||
8319 | * Return: Pointer to the receive IOCBQ, NULL otherwise. | ||
8320 | **/ | ||
8321 | static struct lpfc_iocbq * | ||
8322 | lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, | ||
8323 | struct lpfc_iocbq *irspiocbq) | ||
8324 | { | ||
8325 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | ||
8326 | struct lpfc_iocbq *cmdiocbq; | ||
8327 | struct lpfc_wcqe_complete *wcqe; | ||
8328 | unsigned long iflags; | ||
8329 | |||
8330 | wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; | ||
8331 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
8332 | pring->stats.iocb_event++; | ||
8333 | /* Look up the ELS command IOCB and create pseudo response IOCB */ | ||
8334 | cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, | ||
8335 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
8336 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
8337 | |||
8338 | if (unlikely(!cmdiocbq)) { | ||
8339 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
8340 | "0386 ELS complete with no corresponding " | ||
8341 | "cmdiocb: iotag (%d)\n", | ||
8342 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
8343 | lpfc_sli_release_iocbq(phba, irspiocbq); | ||
8344 | return NULL; | ||
8345 | } | ||
8346 | |||
8347 | /* Fake the irspiocbq and copy necessary response information */ | ||
8348 | lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe); | ||
8349 | |||
8350 | return irspiocbq; | ||
8351 | } | ||
8352 | |||
8353 | /** | ||
8439 | * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event | 8354 | * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event |
8440 | * @phba: Pointer to HBA context object. | 8355 | * @phba: Pointer to HBA context object. |
8441 | * @cqe: Pointer to mailbox completion queue entry. | 8356 | * @cqe: Pointer to mailbox completion queue entry. |
@@ -8625,46 +8540,26 @@ static bool | |||
8625 | lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, | 8540 | lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, |
8626 | struct lpfc_wcqe_complete *wcqe) | 8541 | struct lpfc_wcqe_complete *wcqe) |
8627 | { | 8542 | { |
8628 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | ||
8629 | struct lpfc_iocbq *cmdiocbq; | ||
8630 | struct lpfc_iocbq *irspiocbq; | 8543 | struct lpfc_iocbq *irspiocbq; |
8631 | unsigned long iflags; | 8544 | unsigned long iflags; |
8632 | bool workposted = false; | ||
8633 | 8545 | ||
8634 | spin_lock_irqsave(&phba->hbalock, iflags); | 8546 | /* Get an irspiocbq for later ELS response processing use */ |
8635 | pring->stats.iocb_event++; | ||
8636 | /* Look up the ELS command IOCB and create pseudo response IOCB */ | ||
8637 | cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, | ||
8638 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
8639 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
8640 | |||
8641 | if (unlikely(!cmdiocbq)) { | ||
8642 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
8643 | "0386 ELS complete with no corresponding " | ||
8644 | "cmdiocb: iotag (%d)\n", | ||
8645 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
8646 | return workposted; | ||
8647 | } | ||
8648 | |||
8649 | /* Fake the irspiocbq and copy necessary response information */ | ||
8650 | irspiocbq = lpfc_sli_get_iocbq(phba); | 8547 | irspiocbq = lpfc_sli_get_iocbq(phba); |
8651 | if (!irspiocbq) { | 8548 | if (!irspiocbq) { |
8652 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 8549 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
8653 | "0387 Failed to allocate an iocbq\n"); | 8550 | "0387 Failed to allocate an iocbq\n"); |
8654 | return workposted; | 8551 | return false; |
8655 | } | 8552 | } |
8656 | lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe); | ||
8657 | 8553 | ||
8658 | /* Add the irspiocb to the response IOCB work list */ | 8554 | /* Save off the slow-path queue event for work thread to process */ |
8555 | memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); | ||
8659 | spin_lock_irqsave(&phba->hbalock, iflags); | 8556 | spin_lock_irqsave(&phba->hbalock, iflags); |
8660 | list_add_tail(&irspiocbq->cq_event.list, | 8557 | list_add_tail(&irspiocbq->cq_event.list, |
8661 | &phba->sli4_hba.sp_rspiocb_work_queue); | 8558 | &phba->sli4_hba.sp_queue_event); |
8662 | /* Indicate ELS ring attention */ | 8559 | phba->hba_flag |= HBA_SP_QUEUE_EVT; |
8663 | phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); | ||
8664 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 8560 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
8665 | workposted = true; | ||
8666 | 8561 | ||
8667 | return workposted; | 8562 | return true; |
8668 | } | 8563 | } |
8669 | 8564 | ||
8670 | /** | 8565 | /** |
@@ -8769,8 +8664,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) | |||
8769 | unsigned long iflags; | 8664 | unsigned long iflags; |
8770 | 8665 | ||
8771 | lpfc_sli4_rq_release(hrq, drq); | 8666 | lpfc_sli4_rq_release(hrq, drq); |
8772 | if (bf_get(lpfc_rcqe_code, rcqe) != CQE_CODE_RECEIVE) | ||
8773 | goto out; | ||
8774 | if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) | 8667 | if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) |
8775 | goto out; | 8668 | goto out; |
8776 | 8669 | ||
@@ -8789,9 +8682,9 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) | |||
8789 | memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); | 8682 | memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); |
8790 | /* save off the frame for the word thread to process */ | 8683 | /* save off the frame for the word thread to process */ |
8791 | list_add_tail(&dma_buf->cq_event.list, | 8684 | list_add_tail(&dma_buf->cq_event.list, |
8792 | &phba->sli4_hba.sp_rspiocb_work_queue); | 8685 | &phba->sli4_hba.sp_queue_event); |
8793 | /* Frame received */ | 8686 | /* Frame received */ |
8794 | phba->hba_flag |= HBA_RECEIVE_BUFFER; | 8687 | phba->hba_flag |= HBA_SP_QUEUE_EVT; |
8795 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 8688 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
8796 | workposted = true; | 8689 | workposted = true; |
8797 | break; | 8690 | break; |
@@ -8806,7 +8699,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) | |||
8806 | } | 8699 | } |
8807 | out: | 8700 | out: |
8808 | return workposted; | 8701 | return workposted; |
8809 | |||
8810 | } | 8702 | } |
8811 | 8703 | ||
8812 | /** | 8704 | /** |
@@ -8824,38 +8716,38 @@ static bool | |||
8824 | lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | 8716 | lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, |
8825 | struct lpfc_cqe *cqe) | 8717 | struct lpfc_cqe *cqe) |
8826 | { | 8718 | { |
8827 | struct lpfc_wcqe_complete wcqe; | 8719 | struct lpfc_cqe cqevt; |
8828 | bool workposted = false; | 8720 | bool workposted = false; |
8829 | 8721 | ||
8830 | /* Copy the work queue CQE and convert endian order if needed */ | 8722 | /* Copy the work queue CQE and convert endian order if needed */ |
8831 | lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); | 8723 | lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); |
8832 | 8724 | ||
8833 | /* Check and process for different type of WCQE and dispatch */ | 8725 | /* Check and process for different type of WCQE and dispatch */ |
8834 | switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { | 8726 | switch (bf_get(lpfc_cqe_code, &cqevt)) { |
8835 | case CQE_CODE_COMPL_WQE: | 8727 | case CQE_CODE_COMPL_WQE: |
8836 | /* Process the WQ complete event */ | 8728 | /* Process the WQ/RQ complete event */ |
8837 | workposted = lpfc_sli4_sp_handle_els_wcqe(phba, | 8729 | workposted = lpfc_sli4_sp_handle_els_wcqe(phba, |
8838 | (struct lpfc_wcqe_complete *)&wcqe); | 8730 | (struct lpfc_wcqe_complete *)&cqevt); |
8839 | break; | 8731 | break; |
8840 | case CQE_CODE_RELEASE_WQE: | 8732 | case CQE_CODE_RELEASE_WQE: |
8841 | /* Process the WQ release event */ | 8733 | /* Process the WQ release event */ |
8842 | lpfc_sli4_sp_handle_rel_wcqe(phba, | 8734 | lpfc_sli4_sp_handle_rel_wcqe(phba, |
8843 | (struct lpfc_wcqe_release *)&wcqe); | 8735 | (struct lpfc_wcqe_release *)&cqevt); |
8844 | break; | 8736 | break; |
8845 | case CQE_CODE_XRI_ABORTED: | 8737 | case CQE_CODE_XRI_ABORTED: |
8846 | /* Process the WQ XRI abort event */ | 8738 | /* Process the WQ XRI abort event */ |
8847 | workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, | 8739 | workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, |
8848 | (struct sli4_wcqe_xri_aborted *)&wcqe); | 8740 | (struct sli4_wcqe_xri_aborted *)&cqevt); |
8849 | break; | 8741 | break; |
8850 | case CQE_CODE_RECEIVE: | 8742 | case CQE_CODE_RECEIVE: |
8851 | /* Process the RQ event */ | 8743 | /* Process the RQ event */ |
8852 | workposted = lpfc_sli4_sp_handle_rcqe(phba, | 8744 | workposted = lpfc_sli4_sp_handle_rcqe(phba, |
8853 | (struct lpfc_rcqe *)&wcqe); | 8745 | (struct lpfc_rcqe *)&cqevt); |
8854 | break; | 8746 | break; |
8855 | default: | 8747 | default: |
8856 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 8748 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
8857 | "0388 Not a valid WCQE code: x%x\n", | 8749 | "0388 Not a valid WCQE code: x%x\n", |
8858 | bf_get(lpfc_wcqe_c_code, &wcqe)); | 8750 | bf_get(lpfc_cqe_code, &cqevt)); |
8859 | break; | 8751 | break; |
8860 | } | 8752 | } |
8861 | return workposted; | 8753 | return workposted; |
@@ -10841,6 +10733,105 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, | |||
10841 | } | 10733 | } |
10842 | 10734 | ||
10843 | /** | 10735 | /** |
10736 | * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp | ||
10737 | * @vport: The vport to work on. | ||
10738 | * | ||
10739 | * This function updates the receive sequence time stamp for this vport. The | ||
10740 | * receive sequence time stamp indicates the time that the last frame of the | ||
10741 | * the sequence that has been idle for the longest amount of time was received. | ||
10742 | * the driver uses this time stamp to indicate if any received sequences have | ||
10743 | * timed out. | ||
10744 | **/ | ||
10745 | void | ||
10746 | lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) | ||
10747 | { | ||
10748 | struct lpfc_dmabuf *h_buf; | ||
10749 | struct hbq_dmabuf *dmabuf = NULL; | ||
10750 | |||
10751 | /* get the oldest sequence on the rcv list */ | ||
10752 | h_buf = list_get_first(&vport->rcv_buffer_list, | ||
10753 | struct lpfc_dmabuf, list); | ||
10754 | if (!h_buf) | ||
10755 | return; | ||
10756 | dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); | ||
10757 | vport->rcv_buffer_time_stamp = dmabuf->time_stamp; | ||
10758 | } | ||
10759 | |||
10760 | /** | ||
10761 | * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. | ||
10762 | * @vport: The vport that the received sequences were sent to. | ||
10763 | * | ||
10764 | * This function cleans up all outstanding received sequences. This is called | ||
10765 | * by the driver when a link event or user action invalidates all the received | ||
10766 | * sequences. | ||
10767 | **/ | ||
10768 | void | ||
10769 | lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) | ||
10770 | { | ||
10771 | struct lpfc_dmabuf *h_buf, *hnext; | ||
10772 | struct lpfc_dmabuf *d_buf, *dnext; | ||
10773 | struct hbq_dmabuf *dmabuf = NULL; | ||
10774 | |||
10775 | /* start with the oldest sequence on the rcv list */ | ||
10776 | list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { | ||
10777 | dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); | ||
10778 | list_del_init(&dmabuf->hbuf.list); | ||
10779 | list_for_each_entry_safe(d_buf, dnext, | ||
10780 | &dmabuf->dbuf.list, list) { | ||
10781 | list_del_init(&d_buf->list); | ||
10782 | lpfc_in_buf_free(vport->phba, d_buf); | ||
10783 | } | ||
10784 | lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); | ||
10785 | } | ||
10786 | } | ||
10787 | |||
10788 | /** | ||
10789 | * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. | ||
10790 | * @vport: The vport that the received sequences were sent to. | ||
10791 | * | ||
10792 | * This function determines whether any received sequences have timed out by | ||
10793 | * first checking the vport's rcv_buffer_time_stamp. If this time_stamp | ||
10794 | * indicates that there is at least one timed out sequence this routine will | ||
10795 | * go through the received sequences one at a time from most inactive to most | ||
10796 | * active to determine which ones need to be cleaned up. Once it has determined | ||
10797 | * that a sequence needs to be cleaned up it will simply free up the resources | ||
10798 | * without sending an abort. | ||
10799 | **/ | ||
10800 | void | ||
10801 | lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) | ||
10802 | { | ||
10803 | struct lpfc_dmabuf *h_buf, *hnext; | ||
10804 | struct lpfc_dmabuf *d_buf, *dnext; | ||
10805 | struct hbq_dmabuf *dmabuf = NULL; | ||
10806 | unsigned long timeout; | ||
10807 | int abort_count = 0; | ||
10808 | |||
10809 | timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + | ||
10810 | vport->rcv_buffer_time_stamp); | ||
10811 | if (list_empty(&vport->rcv_buffer_list) || | ||
10812 | time_before(jiffies, timeout)) | ||
10813 | return; | ||
10814 | /* start with the oldest sequence on the rcv list */ | ||
10815 | list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { | ||
10816 | dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); | ||
10817 | timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + | ||
10818 | dmabuf->time_stamp); | ||
10819 | if (time_before(jiffies, timeout)) | ||
10820 | break; | ||
10821 | abort_count++; | ||
10822 | list_del_init(&dmabuf->hbuf.list); | ||
10823 | list_for_each_entry_safe(d_buf, dnext, | ||
10824 | &dmabuf->dbuf.list, list) { | ||
10825 | list_del_init(&d_buf->list); | ||
10826 | lpfc_in_buf_free(vport->phba, d_buf); | ||
10827 | } | ||
10828 | lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); | ||
10829 | } | ||
10830 | if (abort_count) | ||
10831 | lpfc_update_rcv_time_stamp(vport); | ||
10832 | } | ||
10833 | |||
10834 | /** | ||
10844 | * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences | 10835 | * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences |
10845 | * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame | 10836 | * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame |
10846 | * | 10837 | * |
@@ -10863,6 +10854,7 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) | |||
10863 | struct hbq_dmabuf *temp_dmabuf = NULL; | 10854 | struct hbq_dmabuf *temp_dmabuf = NULL; |
10864 | 10855 | ||
10865 | INIT_LIST_HEAD(&dmabuf->dbuf.list); | 10856 | INIT_LIST_HEAD(&dmabuf->dbuf.list); |
10857 | dmabuf->time_stamp = jiffies; | ||
10866 | new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; | 10858 | new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
10867 | /* Use the hdr_buf to find the sequence that this frame belongs to */ | 10859 | /* Use the hdr_buf to find the sequence that this frame belongs to */ |
10868 | list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { | 10860 | list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { |
@@ -10881,6 +10873,7 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) | |||
10881 | * Queue the buffer on the vport's rcv_buffer_list. | 10873 | * Queue the buffer on the vport's rcv_buffer_list. |
10882 | */ | 10874 | */ |
10883 | list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); | 10875 | list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); |
10876 | lpfc_update_rcv_time_stamp(vport); | ||
10884 | return dmabuf; | 10877 | return dmabuf; |
10885 | } | 10878 | } |
10886 | temp_hdr = seq_dmabuf->hbuf.virt; | 10879 | temp_hdr = seq_dmabuf->hbuf.virt; |
@@ -10888,8 +10881,13 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) | |||
10888 | list_del_init(&seq_dmabuf->hbuf.list); | 10881 | list_del_init(&seq_dmabuf->hbuf.list); |
10889 | list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); | 10882 | list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); |
10890 | list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); | 10883 | list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); |
10884 | lpfc_update_rcv_time_stamp(vport); | ||
10891 | return dmabuf; | 10885 | return dmabuf; |
10892 | } | 10886 | } |
10887 | /* move this sequence to the tail to indicate a young sequence */ | ||
10888 | list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); | ||
10889 | seq_dmabuf->time_stamp = jiffies; | ||
10890 | lpfc_update_rcv_time_stamp(vport); | ||
10893 | /* find the correct place in the sequence to insert this frame */ | 10891 | /* find the correct place in the sequence to insert this frame */ |
10894 | list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { | 10892 | list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { |
10895 | temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); | 10893 | temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
@@ -11148,6 +11146,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
11148 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; | 11146 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; |
11149 | /* remove from receive buffer list */ | 11147 | /* remove from receive buffer list */ |
11150 | list_del_init(&seq_dmabuf->hbuf.list); | 11148 | list_del_init(&seq_dmabuf->hbuf.list); |
11149 | lpfc_update_rcv_time_stamp(vport); | ||
11151 | /* get the Remote Port's SID */ | 11150 | /* get the Remote Port's SID */ |
11152 | sid = sli4_sid_from_fc_hdr(fc_hdr); | 11151 | sid = sli4_sid_from_fc_hdr(fc_hdr); |
11153 | /* Get an iocbq struct to fill in. */ | 11152 | /* Get an iocbq struct to fill in. */ |
@@ -11274,11 +11273,6 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, | |||
11274 | struct lpfc_vport *vport; | 11273 | struct lpfc_vport *vport; |
11275 | uint32_t fcfi; | 11274 | uint32_t fcfi; |
11276 | 11275 | ||
11277 | /* Clear hba flag and get all received buffers into the cmplq */ | ||
11278 | spin_lock_irq(&phba->hbalock); | ||
11279 | phba->hba_flag &= ~HBA_RECEIVE_BUFFER; | ||
11280 | spin_unlock_irq(&phba->hbalock); | ||
11281 | |||
11282 | /* Process each received buffer */ | 11276 | /* Process each received buffer */ |
11283 | fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; | 11277 | fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
11284 | /* check to see if this a valid type of frame */ | 11278 | /* check to see if this a valid type of frame */ |
@@ -11309,9 +11303,9 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, | |||
11309 | /* If not last frame in sequence continue processing frames. */ | 11303 | /* If not last frame in sequence continue processing frames. */ |
11310 | if (!lpfc_seq_complete(seq_dmabuf)) { | 11304 | if (!lpfc_seq_complete(seq_dmabuf)) { |
11311 | /* | 11305 | /* |
11312 | * When saving off frames post a new one and mark this | 11306 | * When saving off frames post a new one and mark this |
11313 | * frame to be freed when it is finished. | 11307 | * frame to be freed when it is finished. |
11314 | **/ | 11308 | **/ |
11315 | lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); | 11309 | lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); |
11316 | dmabuf->tag = -1; | 11310 | dmabuf->tag = -1; |
11317 | return; | 11311 | return; |