diff options
author | James Smart <James.Smart@Emulex.Com> | 2009-10-02 15:16:39 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2009-12-04 13:01:39 -0500 |
commit | 4d9ab994e214d35107017c342aca42477b137316 (patch) | |
tree | 0ee7dd76ce9938eceeac20e4dab287194dc42c41 /drivers/scsi/lpfc/lpfc_sli.c | |
parent | 1796e72291b2b6aafaec5954e666d0b5a95da935 (diff) |
[SCSI] lpfc 8.3.5: fix reset path, ELS ordering and discovery issues
This patch includes the following fixes:
- Fixed panic during HBA reset.
- Fixed FCoE event tag passed in resume_rpi.
- Fix out of order ELS commands
- Fixed discovery issues found during VLAN testing.
- Fix UNREG_VPI failure on extended link pull
- Fixed crash while processing unsolicited FC frames.
- Clear retry count in the delayed ELS handler
- Fixed discovery failure during quick link bounce.
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 293 |
1 files changed, 153 insertions, 140 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 43cbe336f1f8..8d884d8e18be 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -3018,16 +3018,31 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, | |||
3018 | struct lpfc_sli_ring *pring, uint32_t mask) | 3018 | struct lpfc_sli_ring *pring, uint32_t mask) |
3019 | { | 3019 | { |
3020 | struct lpfc_iocbq *irspiocbq; | 3020 | struct lpfc_iocbq *irspiocbq; |
3021 | struct hbq_dmabuf *dmabuf; | ||
3022 | struct lpfc_cq_event *cq_event; | ||
3021 | unsigned long iflag; | 3023 | unsigned long iflag; |
3022 | 3024 | ||
3023 | while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { | 3025 | while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { |
3024 | /* Get the response iocb from the head of work queue */ | 3026 | /* Get the response iocb from the head of work queue */ |
3025 | spin_lock_irqsave(&phba->hbalock, iflag); | 3027 | spin_lock_irqsave(&phba->hbalock, iflag); |
3026 | list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, | 3028 | list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, |
3027 | irspiocbq, struct lpfc_iocbq, list); | 3029 | cq_event, struct lpfc_cq_event, list); |
3028 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 3030 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
3029 | /* Process the response iocb */ | 3031 | |
3030 | lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); | 3032 | switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { |
3033 | case CQE_CODE_COMPL_WQE: | ||
3034 | irspiocbq = container_of(cq_event, struct lpfc_iocbq, | ||
3035 | cq_event); | ||
3036 | lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); | ||
3037 | break; | ||
3038 | case CQE_CODE_RECEIVE: | ||
3039 | dmabuf = container_of(cq_event, struct hbq_dmabuf, | ||
3040 | cq_event); | ||
3041 | lpfc_sli4_handle_received_buffer(phba, dmabuf); | ||
3042 | break; | ||
3043 | default: | ||
3044 | break; | ||
3045 | } | ||
3031 | } | 3046 | } |
3032 | } | 3047 | } |
3033 | 3048 | ||
@@ -3416,6 +3431,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) | |||
3416 | 3431 | ||
3417 | /* perform board reset */ | 3432 | /* perform board reset */ |
3418 | phba->fc_eventTag = 0; | 3433 | phba->fc_eventTag = 0; |
3434 | phba->link_events = 0; | ||
3419 | phba->pport->fc_myDID = 0; | 3435 | phba->pport->fc_myDID = 0; |
3420 | phba->pport->fc_prevDID = 0; | 3436 | phba->pport->fc_prevDID = 0; |
3421 | 3437 | ||
@@ -3476,6 +3492,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) | |||
3476 | 3492 | ||
3477 | /* perform board reset */ | 3493 | /* perform board reset */ |
3478 | phba->fc_eventTag = 0; | 3494 | phba->fc_eventTag = 0; |
3495 | phba->link_events = 0; | ||
3479 | phba->pport->fc_myDID = 0; | 3496 | phba->pport->fc_myDID = 0; |
3480 | phba->pport->fc_prevDID = 0; | 3497 | phba->pport->fc_prevDID = 0; |
3481 | 3498 | ||
@@ -3495,7 +3512,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) | |||
3495 | list_del_init(&phba->sli4_hba.dat_rq->list); | 3512 | list_del_init(&phba->sli4_hba.dat_rq->list); |
3496 | list_del_init(&phba->sli4_hba.mbx_cq->list); | 3513 | list_del_init(&phba->sli4_hba.mbx_cq->list); |
3497 | list_del_init(&phba->sli4_hba.els_cq->list); | 3514 | list_del_init(&phba->sli4_hba.els_cq->list); |
3498 | list_del_init(&phba->sli4_hba.rxq_cq->list); | ||
3499 | for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) | 3515 | for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) |
3500 | list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); | 3516 | list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); |
3501 | for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) | 3517 | for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) |
@@ -4243,7 +4259,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) | |||
4243 | 4259 | ||
4244 | lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); | 4260 | lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); |
4245 | lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); | 4261 | lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); |
4246 | lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM); | ||
4247 | for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) | 4262 | for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) |
4248 | lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], | 4263 | lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], |
4249 | LPFC_QUEUE_REARM); | 4264 | LPFC_QUEUE_REARM); |
@@ -8351,8 +8366,7 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, | |||
8351 | 8366 | ||
8352 | memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, | 8367 | memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, |
8353 | sizeof(struct lpfc_iocbq) - offset); | 8368 | sizeof(struct lpfc_iocbq) - offset); |
8354 | memset(&pIocbIn->sli4_info, 0, | 8369 | pIocbIn->cq_event.cqe.wcqe_cmpl = *wcqe; |
8355 | sizeof(struct lpfc_sli4_rspiocb_info)); | ||
8356 | /* Map WCQE parameters into irspiocb parameters */ | 8370 | /* Map WCQE parameters into irspiocb parameters */ |
8357 | pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); | 8371 | pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); |
8358 | if (pIocbOut->iocb_flag & LPFC_IO_FCP) | 8372 | if (pIocbOut->iocb_flag & LPFC_IO_FCP) |
@@ -8364,16 +8378,6 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, | |||
8364 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; | 8378 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; |
8365 | else | 8379 | else |
8366 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; | 8380 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; |
8367 | /* Load in additional WCQE parameters */ | ||
8368 | pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe); | ||
8369 | pIocbIn->sli4_info.bfield = 0; | ||
8370 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) | ||
8371 | pIocbIn->sli4_info.bfield |= LPFC_XB; | ||
8372 | if (bf_get(lpfc_wcqe_c_pv, wcqe)) { | ||
8373 | pIocbIn->sli4_info.bfield |= LPFC_PV; | ||
8374 | pIocbIn->sli4_info.priority = | ||
8375 | bf_get(lpfc_wcqe_c_priority, wcqe); | ||
8376 | } | ||
8377 | } | 8381 | } |
8378 | 8382 | ||
8379 | /** | 8383 | /** |
@@ -8598,7 +8602,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, | |||
8598 | 8602 | ||
8599 | /* Add the irspiocb to the response IOCB work list */ | 8603 | /* Add the irspiocb to the response IOCB work list */ |
8600 | spin_lock_irqsave(&phba->hbalock, iflags); | 8604 | spin_lock_irqsave(&phba->hbalock, iflags); |
8601 | list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue); | 8605 | list_add_tail(&irspiocbq->cq_event.list, |
8606 | &phba->sli4_hba.sp_rspiocb_work_queue); | ||
8602 | /* Indicate ELS ring attention */ | 8607 | /* Indicate ELS ring attention */ |
8603 | phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); | 8608 | phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); |
8604 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 8609 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
@@ -8690,52 +8695,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, | |||
8690 | } | 8695 | } |
8691 | 8696 | ||
8692 | /** | 8697 | /** |
8693 | * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry | ||
8694 | * @phba: Pointer to HBA context object. | ||
8695 | * @cq: Pointer to the completion queue. | ||
8696 | * @wcqe: Pointer to a completion queue entry. | ||
8697 | * | ||
8698 | * This routine process a slow-path work-queue completion queue entry. | ||
8699 | * | ||
8700 | * Return: true if work posted to worker thread, otherwise false. | ||
8701 | **/ | ||
8702 | static bool | ||
8703 | lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | ||
8704 | struct lpfc_cqe *cqe) | ||
8705 | { | ||
8706 | struct lpfc_wcqe_complete wcqe; | ||
8707 | bool workposted = false; | ||
8708 | |||
8709 | /* Copy the work queue CQE and convert endian order if needed */ | ||
8710 | lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); | ||
8711 | |||
8712 | /* Check and process for different type of WCQE and dispatch */ | ||
8713 | switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { | ||
8714 | case CQE_CODE_COMPL_WQE: | ||
8715 | /* Process the WQ complete event */ | ||
8716 | workposted = lpfc_sli4_sp_handle_els_wcqe(phba, | ||
8717 | (struct lpfc_wcqe_complete *)&wcqe); | ||
8718 | break; | ||
8719 | case CQE_CODE_RELEASE_WQE: | ||
8720 | /* Process the WQ release event */ | ||
8721 | lpfc_sli4_sp_handle_rel_wcqe(phba, | ||
8722 | (struct lpfc_wcqe_release *)&wcqe); | ||
8723 | break; | ||
8724 | case CQE_CODE_XRI_ABORTED: | ||
8725 | /* Process the WQ XRI abort event */ | ||
8726 | workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, | ||
8727 | (struct sli4_wcqe_xri_aborted *)&wcqe); | ||
8728 | break; | ||
8729 | default: | ||
8730 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8731 | "0388 Not a valid WCQE code: x%x\n", | ||
8732 | bf_get(lpfc_wcqe_c_code, &wcqe)); | ||
8733 | break; | ||
8734 | } | ||
8735 | return workposted; | ||
8736 | } | ||
8737 | |||
8738 | /** | ||
8739 | * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry | 8698 | * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry |
8740 | * @phba: Pointer to HBA context object. | 8699 | * @phba: Pointer to HBA context object. |
8741 | * @rcqe: Pointer to receive-queue completion queue entry. | 8700 | * @rcqe: Pointer to receive-queue completion queue entry. |
@@ -8745,9 +8704,8 @@ lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | |||
8745 | * Return: true if work posted to worker thread, otherwise false. | 8704 | * Return: true if work posted to worker thread, otherwise false. |
8746 | **/ | 8705 | **/ |
8747 | static bool | 8706 | static bool |
8748 | lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) | 8707 | lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) |
8749 | { | 8708 | { |
8750 | struct lpfc_rcqe rcqe; | ||
8751 | bool workposted = false; | 8709 | bool workposted = false; |
8752 | struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; | 8710 | struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; |
8753 | struct lpfc_queue *drq = phba->sli4_hba.dat_rq; | 8711 | struct lpfc_queue *drq = phba->sli4_hba.dat_rq; |
@@ -8755,15 +8713,13 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) | |||
8755 | uint32_t status; | 8713 | uint32_t status; |
8756 | unsigned long iflags; | 8714 | unsigned long iflags; |
8757 | 8715 | ||
8758 | /* Copy the receive queue CQE and convert endian order if needed */ | ||
8759 | lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe)); | ||
8760 | lpfc_sli4_rq_release(hrq, drq); | 8716 | lpfc_sli4_rq_release(hrq, drq); |
8761 | if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE) | 8717 | if (bf_get(lpfc_rcqe_code, rcqe) != CQE_CODE_RECEIVE) |
8762 | goto out; | 8718 | goto out; |
8763 | if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id) | 8719 | if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) |
8764 | goto out; | 8720 | goto out; |
8765 | 8721 | ||
8766 | status = bf_get(lpfc_rcqe_status, &rcqe); | 8722 | status = bf_get(lpfc_rcqe_status, rcqe); |
8767 | switch (status) { | 8723 | switch (status) { |
8768 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: | 8724 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
8769 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 8725 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
@@ -8775,9 +8731,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) | |||
8775 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 8731 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
8776 | goto out; | 8732 | goto out; |
8777 | } | 8733 | } |
8778 | memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe)); | 8734 | memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); |
8779 | /* save off the frame for the word thread to process */ | 8735 | /* save off the frame for the word thread to process */ |
8780 | list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list); | 8736 | list_add_tail(&dma_buf->cq_event.list, |
8737 | &phba->sli4_hba.sp_rspiocb_work_queue); | ||
8781 | /* Frame received */ | 8738 | /* Frame received */ |
8782 | phba->hba_flag |= HBA_RECEIVE_BUFFER; | 8739 | phba->hba_flag |= HBA_RECEIVE_BUFFER; |
8783 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 8740 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
@@ -8798,6 +8755,58 @@ out: | |||
8798 | } | 8755 | } |
8799 | 8756 | ||
8800 | /** | 8757 | /** |
8758 | * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry | ||
8759 | * @phba: Pointer to HBA context object. | ||
8760 | * @cq: Pointer to the completion queue. | ||
8761 | * @wcqe: Pointer to a completion queue entry. | ||
8762 | * | ||
8763 | * This routine process a slow-path work-queue or recieve queue completion queue | ||
8764 | * entry. | ||
8765 | * | ||
8766 | * Return: true if work posted to worker thread, otherwise false. | ||
8767 | **/ | ||
8768 | static bool | ||
8769 | lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | ||
8770 | struct lpfc_cqe *cqe) | ||
8771 | { | ||
8772 | struct lpfc_wcqe_complete wcqe; | ||
8773 | bool workposted = false; | ||
8774 | |||
8775 | /* Copy the work queue CQE and convert endian order if needed */ | ||
8776 | lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); | ||
8777 | |||
8778 | /* Check and process for different type of WCQE and dispatch */ | ||
8779 | switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { | ||
8780 | case CQE_CODE_COMPL_WQE: | ||
8781 | /* Process the WQ complete event */ | ||
8782 | workposted = lpfc_sli4_sp_handle_els_wcqe(phba, | ||
8783 | (struct lpfc_wcqe_complete *)&wcqe); | ||
8784 | break; | ||
8785 | case CQE_CODE_RELEASE_WQE: | ||
8786 | /* Process the WQ release event */ | ||
8787 | lpfc_sli4_sp_handle_rel_wcqe(phba, | ||
8788 | (struct lpfc_wcqe_release *)&wcqe); | ||
8789 | break; | ||
8790 | case CQE_CODE_XRI_ABORTED: | ||
8791 | /* Process the WQ XRI abort event */ | ||
8792 | workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, | ||
8793 | (struct sli4_wcqe_xri_aborted *)&wcqe); | ||
8794 | break; | ||
8795 | case CQE_CODE_RECEIVE: | ||
8796 | /* Process the RQ event */ | ||
8797 | workposted = lpfc_sli4_sp_handle_rcqe(phba, | ||
8798 | (struct lpfc_rcqe *)&wcqe); | ||
8799 | break; | ||
8800 | default: | ||
8801 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8802 | "0388 Not a valid WCQE code: x%x\n", | ||
8803 | bf_get(lpfc_wcqe_c_code, &wcqe)); | ||
8804 | break; | ||
8805 | } | ||
8806 | return workposted; | ||
8807 | } | ||
8808 | |||
8809 | /** | ||
8801 | * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry | 8810 | * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry |
8802 | * @phba: Pointer to HBA context object. | 8811 | * @phba: Pointer to HBA context object. |
8803 | * @eqe: Pointer to fast-path event queue entry. | 8812 | * @eqe: Pointer to fast-path event queue entry. |
@@ -8858,14 +8867,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) | |||
8858 | break; | 8867 | break; |
8859 | case LPFC_WCQ: | 8868 | case LPFC_WCQ: |
8860 | while ((cqe = lpfc_sli4_cq_get(cq))) { | 8869 | while ((cqe = lpfc_sli4_cq_get(cq))) { |
8861 | workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe); | 8870 | workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe); |
8862 | if (!(++ecount % LPFC_GET_QE_REL_INT)) | ||
8863 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); | ||
8864 | } | ||
8865 | break; | ||
8866 | case LPFC_RCQ: | ||
8867 | while ((cqe = lpfc_sli4_cq_get(cq))) { | ||
8868 | workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe); | ||
8869 | if (!(++ecount % LPFC_GET_QE_REL_INT)) | 8871 | if (!(++ecount % LPFC_GET_QE_REL_INT)) |
8870 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); | 8872 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); |
8871 | } | 8873 | } |
@@ -10823,6 +10825,7 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) | |||
10823 | struct hbq_dmabuf *seq_dmabuf = NULL; | 10825 | struct hbq_dmabuf *seq_dmabuf = NULL; |
10824 | struct hbq_dmabuf *temp_dmabuf = NULL; | 10826 | struct hbq_dmabuf *temp_dmabuf = NULL; |
10825 | 10827 | ||
10828 | INIT_LIST_HEAD(&dmabuf->dbuf.list); | ||
10826 | new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; | 10829 | new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
10827 | /* Use the hdr_buf to find the sequence that this frame belongs to */ | 10830 | /* Use the hdr_buf to find the sequence that this frame belongs to */ |
10828 | list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { | 10831 | list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { |
@@ -10845,7 +10848,9 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) | |||
10845 | } | 10848 | } |
10846 | temp_hdr = seq_dmabuf->hbuf.virt; | 10849 | temp_hdr = seq_dmabuf->hbuf.virt; |
10847 | if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { | 10850 | if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { |
10848 | list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list); | 10851 | list_del_init(&seq_dmabuf->hbuf.list); |
10852 | list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); | ||
10853 | list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); | ||
10849 | return dmabuf; | 10854 | return dmabuf; |
10850 | } | 10855 | } |
10851 | /* find the correct place in the sequence to insert this frame */ | 10856 | /* find the correct place in the sequence to insert this frame */ |
@@ -10957,7 +10962,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
10957 | LPFC_DATA_BUF_SIZE; | 10962 | LPFC_DATA_BUF_SIZE; |
10958 | first_iocbq->iocb.un.rcvels.remoteID = sid; | 10963 | first_iocbq->iocb.un.rcvels.remoteID = sid; |
10959 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += | 10964 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += |
10960 | bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); | 10965 | bf_get(lpfc_rcqe_length, |
10966 | &seq_dmabuf->cq_event.cqe.rcqe_cmpl); | ||
10961 | } | 10967 | } |
10962 | iocbq = first_iocbq; | 10968 | iocbq = first_iocbq; |
10963 | /* | 10969 | /* |
@@ -10975,7 +10981,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
10975 | iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = | 10981 | iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = |
10976 | LPFC_DATA_BUF_SIZE; | 10982 | LPFC_DATA_BUF_SIZE; |
10977 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += | 10983 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += |
10978 | bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); | 10984 | bf_get(lpfc_rcqe_length, |
10985 | &seq_dmabuf->cq_event.cqe.rcqe_cmpl); | ||
10979 | } else { | 10986 | } else { |
10980 | iocbq = lpfc_sli_get_iocbq(vport->phba); | 10987 | iocbq = lpfc_sli_get_iocbq(vport->phba); |
10981 | if (!iocbq) { | 10988 | if (!iocbq) { |
@@ -10994,7 +11001,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
10994 | iocbq->iocb.un.cont64[0].tus.f.bdeSize = | 11001 | iocbq->iocb.un.cont64[0].tus.f.bdeSize = |
10995 | LPFC_DATA_BUF_SIZE; | 11002 | LPFC_DATA_BUF_SIZE; |
10996 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += | 11003 | first_iocbq->iocb.unsli3.rcvsli3.acc_len += |
10997 | bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); | 11004 | bf_get(lpfc_rcqe_length, |
11005 | &seq_dmabuf->cq_event.cqe.rcqe_cmpl); | ||
10998 | iocbq->iocb.un.rcvels.remoteID = sid; | 11006 | iocbq->iocb.un.rcvels.remoteID = sid; |
10999 | list_add_tail(&iocbq->list, &first_iocbq->list); | 11007 | list_add_tail(&iocbq->list, &first_iocbq->list); |
11000 | } | 11008 | } |
@@ -11014,11 +11022,11 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | |||
11014 | * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the | 11022 | * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the |
11015 | * appropriate receive function when the final frame in a sequence is received. | 11023 | * appropriate receive function when the final frame in a sequence is received. |
11016 | **/ | 11024 | **/ |
11017 | int | 11025 | void |
11018 | lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) | 11026 | lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, |
11027 | struct hbq_dmabuf *dmabuf) | ||
11019 | { | 11028 | { |
11020 | LIST_HEAD(cmplq); | 11029 | struct hbq_dmabuf *seq_dmabuf; |
11021 | struct hbq_dmabuf *dmabuf, *seq_dmabuf; | ||
11022 | struct fc_frame_header *fc_hdr; | 11030 | struct fc_frame_header *fc_hdr; |
11023 | struct lpfc_vport *vport; | 11031 | struct lpfc_vport *vport; |
11024 | uint32_t fcfi; | 11032 | uint32_t fcfi; |
@@ -11027,54 +11035,50 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) | |||
11027 | /* Clear hba flag and get all received buffers into the cmplq */ | 11035 | /* Clear hba flag and get all received buffers into the cmplq */ |
11028 | spin_lock_irq(&phba->hbalock); | 11036 | spin_lock_irq(&phba->hbalock); |
11029 | phba->hba_flag &= ~HBA_RECEIVE_BUFFER; | 11037 | phba->hba_flag &= ~HBA_RECEIVE_BUFFER; |
11030 | list_splice_init(&phba->rb_pend_list, &cmplq); | ||
11031 | spin_unlock_irq(&phba->hbalock); | 11038 | spin_unlock_irq(&phba->hbalock); |
11032 | 11039 | ||
11033 | /* Process each received buffer */ | 11040 | /* Process each received buffer */ |
11034 | while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) { | 11041 | fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
11035 | fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; | 11042 | /* check to see if this a valid type of frame */ |
11036 | /* check to see if this a valid type of frame */ | 11043 | if (lpfc_fc_frame_check(phba, fc_hdr)) { |
11037 | if (lpfc_fc_frame_check(phba, fc_hdr)) { | 11044 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
11038 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | 11045 | return; |
11039 | continue; | 11046 | } |
11040 | } | 11047 | fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); |
11041 | fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe); | 11048 | vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); |
11042 | vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); | 11049 | if (!vport) { |
11043 | if (!vport) { | 11050 | /* throw out the frame */ |
11044 | /* throw out the frame */ | 11051 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
11045 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | 11052 | return; |
11046 | continue; | 11053 | } |
11047 | } | 11054 | /* Link this frame */ |
11048 | /* Link this frame */ | 11055 | seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); |
11049 | seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); | 11056 | if (!seq_dmabuf) { |
11050 | if (!seq_dmabuf) { | 11057 | /* unable to add frame to vport - throw it out */ |
11051 | /* unable to add frame to vport - throw it out */ | 11058 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
11052 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | 11059 | return; |
11053 | continue; | 11060 | } |
11054 | } | 11061 | /* If not last frame in sequence continue processing frames. */ |
11055 | /* If not last frame in sequence continue processing frames. */ | 11062 | if (!lpfc_seq_complete(seq_dmabuf)) { |
11056 | if (!lpfc_seq_complete(seq_dmabuf)) { | 11063 | /* |
11057 | /* | 11064 | * When saving off frames post a new one and mark this |
11058 | * When saving off frames post a new one and mark this | 11065 | * frame to be freed when it is finished. |
11059 | * frame to be freed when it is finished. | 11066 | **/ |
11060 | **/ | 11067 | lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); |
11061 | lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); | 11068 | dmabuf->tag = -1; |
11062 | dmabuf->tag = -1; | 11069 | return; |
11063 | continue; | 11070 | } |
11064 | } | 11071 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; |
11065 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; | 11072 | iocbq = lpfc_prep_seq(vport, seq_dmabuf); |
11066 | iocbq = lpfc_prep_seq(vport, seq_dmabuf); | 11073 | if (!lpfc_complete_unsol_iocb(phba, |
11067 | if (!lpfc_complete_unsol_iocb(phba, | 11074 | &phba->sli.ring[LPFC_ELS_RING], |
11068 | &phba->sli.ring[LPFC_ELS_RING], | 11075 | iocbq, fc_hdr->fh_r_ctl, |
11069 | iocbq, fc_hdr->fh_r_ctl, | 11076 | fc_hdr->fh_type)) |
11070 | fc_hdr->fh_type)) | 11077 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
11071 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | 11078 | "2540 Ring %d handler: unexpected Rctl " |
11072 | "2540 Ring %d handler: unexpected Rctl " | 11079 | "x%x Type x%x received\n", |
11073 | "x%x Type x%x received\n", | 11080 | LPFC_ELS_RING, |
11074 | LPFC_ELS_RING, | 11081 | fc_hdr->fh_r_ctl, fc_hdr->fh_type); |
11075 | fc_hdr->fh_r_ctl, fc_hdr->fh_type); | ||
11076 | }; | ||
11077 | return 0; | ||
11078 | } | 11082 | } |
11079 | 11083 | ||
11080 | /** | 11084 | /** |
@@ -11542,7 +11546,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) | |||
11542 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 11546 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
11543 | "2000 Failed to allocate mbox for " | 11547 | "2000 Failed to allocate mbox for " |
11544 | "READ_FCF cmd\n"); | 11548 | "READ_FCF cmd\n"); |
11545 | return -ENOMEM; | 11549 | error = -ENOMEM; |
11550 | goto fail_fcfscan; | ||
11546 | } | 11551 | } |
11547 | 11552 | ||
11548 | req_len = sizeof(struct fcf_record) + | 11553 | req_len = sizeof(struct fcf_record) + |
@@ -11558,8 +11563,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) | |||
11558 | "0291 Allocated DMA memory size (x%x) is " | 11563 | "0291 Allocated DMA memory size (x%x) is " |
11559 | "less than the requested DMA memory " | 11564 | "less than the requested DMA memory " |
11560 | "size (x%x)\n", alloc_len, req_len); | 11565 | "size (x%x)\n", alloc_len, req_len); |
11561 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 11566 | error = -ENOMEM; |
11562 | return -ENOMEM; | 11567 | goto fail_fcfscan; |
11563 | } | 11568 | } |
11564 | 11569 | ||
11565 | /* Get the first SGE entry from the non-embedded DMA memory. This | 11570 | /* Get the first SGE entry from the non-embedded DMA memory. This |
@@ -11571,8 +11576,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) | |||
11571 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | 11576 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, |
11572 | "2527 Failed to get the non-embedded SGE " | 11577 | "2527 Failed to get the non-embedded SGE " |
11573 | "virtual address\n"); | 11578 | "virtual address\n"); |
11574 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | 11579 | error = -ENOMEM; |
11575 | return -ENOMEM; | 11580 | goto fail_fcfscan; |
11576 | } | 11581 | } |
11577 | virt_addr = mboxq->sge_array->addr[0]; | 11582 | virt_addr = mboxq->sge_array->addr[0]; |
11578 | read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; | 11583 | read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; |
@@ -11586,7 +11591,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) | |||
11586 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; | 11591 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; |
11587 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); | 11592 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
11588 | if (rc == MBX_NOT_FINISHED) { | 11593 | if (rc == MBX_NOT_FINISHED) { |
11589 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | ||
11590 | error = -EIO; | 11594 | error = -EIO; |
11591 | } else { | 11595 | } else { |
11592 | spin_lock_irq(&phba->hbalock); | 11596 | spin_lock_irq(&phba->hbalock); |
@@ -11594,6 +11598,15 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) | |||
11594 | spin_unlock_irq(&phba->hbalock); | 11598 | spin_unlock_irq(&phba->hbalock); |
11595 | error = 0; | 11599 | error = 0; |
11596 | } | 11600 | } |
11601 | fail_fcfscan: | ||
11602 | if (error) { | ||
11603 | if (mboxq) | ||
11604 | lpfc_sli4_mbox_cmd_free(phba, mboxq); | ||
11605 | /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */ | ||
11606 | spin_lock_irq(&phba->hbalock); | ||
11607 | phba->hba_flag &= ~FCF_DISC_INPROGRESS; | ||
11608 | spin_unlock_irq(&phba->hbalock); | ||
11609 | } | ||
11597 | return error; | 11610 | return error; |
11598 | } | 11611 | } |
11599 | 11612 | ||