diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 264 |
1 files changed, 195 insertions, 69 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index ff04daf18f48..acc43b061ba1 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -4139,8 +4139,11 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, | |||
4139 | return -EIO; | 4139 | return -EIO; |
4140 | } | 4140 | } |
4141 | data_length = mqe->un.mb_words[5]; | 4141 | data_length = mqe->un.mb_words[5]; |
4142 | if (data_length > DMP_FCOEPARAM_RGN_SIZE) | 4142 | if (data_length > DMP_FCOEPARAM_RGN_SIZE) { |
4143 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | ||
4144 | kfree(mp); | ||
4143 | return -EIO; | 4145 | return -EIO; |
4146 | } | ||
4144 | 4147 | ||
4145 | lpfc_parse_fcoe_conf(phba, mp->virt, data_length); | 4148 | lpfc_parse_fcoe_conf(phba, mp->virt, data_length); |
4146 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 4149 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
@@ -4211,27 +4214,6 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, | |||
4211 | return -EIO; | 4214 | return -EIO; |
4212 | } | 4215 | } |
4213 | 4216 | ||
4214 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, | ||
4215 | "(%d):0380 Mailbox cmd x%x Status x%x " | ||
4216 | "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " | ||
4217 | "x%x x%x x%x x%x x%x x%x x%x x%x x%x " | ||
4218 | "CQ: x%x x%x x%x x%x\n", | ||
4219 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
4220 | bf_get(lpfc_mqe_command, mqe), | ||
4221 | bf_get(lpfc_mqe_status, mqe), | ||
4222 | mqe->un.mb_words[0], mqe->un.mb_words[1], | ||
4223 | mqe->un.mb_words[2], mqe->un.mb_words[3], | ||
4224 | mqe->un.mb_words[4], mqe->un.mb_words[5], | ||
4225 | mqe->un.mb_words[6], mqe->un.mb_words[7], | ||
4226 | mqe->un.mb_words[8], mqe->un.mb_words[9], | ||
4227 | mqe->un.mb_words[10], mqe->un.mb_words[11], | ||
4228 | mqe->un.mb_words[12], mqe->un.mb_words[13], | ||
4229 | mqe->un.mb_words[14], mqe->un.mb_words[15], | ||
4230 | mqe->un.mb_words[16], mqe->un.mb_words[50], | ||
4231 | mboxq->mcqe.word0, | ||
4232 | mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, | ||
4233 | mboxq->mcqe.trailer); | ||
4234 | |||
4235 | /* | 4217 | /* |
4236 | * The available vpd length cannot be bigger than the | 4218 | * The available vpd length cannot be bigger than the |
4237 | * DMA buffer passed to the port. Catch the less than | 4219 | * DMA buffer passed to the port. Catch the less than |
@@ -4337,21 +4319,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4337 | goto out_free_vpd; | 4319 | goto out_free_vpd; |
4338 | 4320 | ||
4339 | mqe = &mboxq->u.mqe; | 4321 | mqe = &mboxq->u.mqe; |
4340 | if ((bf_get(lpfc_mbx_rd_rev_sli_lvl, | 4322 | phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); |
4341 | &mqe->un.read_rev) != LPFC_SLI_REV4) || | 4323 | if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) |
4342 | (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) { | 4324 | phba->hba_flag |= HBA_FCOE_SUPPORT; |
4325 | if (phba->sli_rev != LPFC_SLI_REV4 || | ||
4326 | !(phba->hba_flag & HBA_FCOE_SUPPORT)) { | ||
4343 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 4327 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
4344 | "0376 READ_REV Error. SLI Level %d " | 4328 | "0376 READ_REV Error. SLI Level %d " |
4345 | "FCoE enabled %d\n", | 4329 | "FCoE enabled %d\n", |
4346 | bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev), | 4330 | phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT); |
4347 | bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)); | ||
4348 | rc = -EIO; | 4331 | rc = -EIO; |
4349 | goto out_free_vpd; | 4332 | goto out_free_vpd; |
4350 | } | 4333 | } |
4351 | /* Single threaded at this point, no need for lock */ | ||
4352 | spin_lock_irq(&phba->hbalock); | ||
4353 | phba->hba_flag |= HBA_FCOE_SUPPORT; | ||
4354 | spin_unlock_irq(&phba->hbalock); | ||
4355 | /* | 4334 | /* |
4356 | * Evaluate the read rev and vpd data. Populate the driver | 4335 | * Evaluate the read rev and vpd data. Populate the driver |
4357 | * state with the results. If this routine fails, the failure | 4336 | * state with the results. If this routine fails, the failure |
@@ -4365,8 +4344,32 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4365 | rc = 0; | 4344 | rc = 0; |
4366 | } | 4345 | } |
4367 | 4346 | ||
4368 | /* By now, we should determine the SLI revision, hard code for now */ | 4347 | /* Save information as VPD data */ |
4369 | phba->sli_rev = LPFC_SLI_REV4; | 4348 | phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; |
4349 | phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; | ||
4350 | phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; | ||
4351 | phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, | ||
4352 | &mqe->un.read_rev); | ||
4353 | phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, | ||
4354 | &mqe->un.read_rev); | ||
4355 | phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, | ||
4356 | &mqe->un.read_rev); | ||
4357 | phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, | ||
4358 | &mqe->un.read_rev); | ||
4359 | phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; | ||
4360 | memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); | ||
4361 | phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; | ||
4362 | memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); | ||
4363 | phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; | ||
4364 | memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); | ||
4365 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, | ||
4366 | "(%d):0380 READ_REV Status x%x " | ||
4367 | "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", | ||
4368 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
4369 | bf_get(lpfc_mqe_status, mqe), | ||
4370 | phba->vpd.rev.opFwName, | ||
4371 | phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, | ||
4372 | phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); | ||
4370 | 4373 | ||
4371 | /* | 4374 | /* |
4372 | * Discover the port's supported feature set and match it against the | 4375 | * Discover the port's supported feature set and match it against the |
@@ -4491,8 +4494,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4491 | rc = -ENODEV; | 4494 | rc = -ENODEV; |
4492 | goto out_free_vpd; | 4495 | goto out_free_vpd; |
4493 | } | 4496 | } |
4494 | /* Temporary initialization of lpfc_fip_flag to non-fip */ | 4497 | if (phba->cfg_enable_fip) |
4495 | bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); | 4498 | bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1); |
4499 | else | ||
4500 | bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); | ||
4496 | 4501 | ||
4497 | /* Set up all the queues to the device */ | 4502 | /* Set up all the queues to the device */ |
4498 | rc = lpfc_sli4_queue_setup(phba); | 4503 | rc = lpfc_sli4_queue_setup(phba); |
@@ -5030,6 +5035,92 @@ out_not_finished: | |||
5030 | } | 5035 | } |
5031 | 5036 | ||
5032 | /** | 5037 | /** |
5038 | * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command | ||
5039 | * @phba: Pointer to HBA context object. | ||
5040 | * | ||
5041 | * The function blocks the posting of SLI4 asynchronous mailbox commands from | ||
5042 | * the driver internal pending mailbox queue. It will then try to wait out the | ||
5043 | * possible outstanding mailbox command before return. | ||
5044 | * | ||
5045 | * Returns: | ||
5046 | * 0 - the outstanding mailbox command completed; otherwise, the wait for | ||
5047 | * the outstanding mailbox command timed out. | ||
5048 | **/ | ||
5049 | static int | ||
5050 | lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) | ||
5051 | { | ||
5052 | struct lpfc_sli *psli = &phba->sli; | ||
5053 | uint8_t actcmd = MBX_HEARTBEAT; | ||
5054 | int rc = 0; | ||
5055 | unsigned long timeout; | ||
5056 | |||
5057 | /* Mark the asynchronous mailbox command posting as blocked */ | ||
5058 | spin_lock_irq(&phba->hbalock); | ||
5059 | psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; | ||
5060 | if (phba->sli.mbox_active) | ||
5061 | actcmd = phba->sli.mbox_active->u.mb.mbxCommand; | ||
5062 | spin_unlock_irq(&phba->hbalock); | ||
5063 | /* Determine how long we might wait for the active mailbox | ||
5064 | * command to be gracefully completed by firmware. | ||
5065 | */ | ||
5066 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + | ||
5067 | jiffies; | ||
5068 | /* Wait for the outstnading mailbox command to complete */ | ||
5069 | while (phba->sli.mbox_active) { | ||
5070 | /* Check active mailbox complete status every 2ms */ | ||
5071 | msleep(2); | ||
5072 | if (time_after(jiffies, timeout)) { | ||
5073 | /* Timeout, marked the outstanding cmd not complete */ | ||
5074 | rc = 1; | ||
5075 | break; | ||
5076 | } | ||
5077 | } | ||
5078 | |||
5079 | /* Can not cleanly block async mailbox command, fails it */ | ||
5080 | if (rc) { | ||
5081 | spin_lock_irq(&phba->hbalock); | ||
5082 | psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; | ||
5083 | spin_unlock_irq(&phba->hbalock); | ||
5084 | } | ||
5085 | return rc; | ||
5086 | } | ||
5087 | |||
5088 | /** | ||
5089 | * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command | ||
5090 | * @phba: Pointer to HBA context object. | ||
5091 | * | ||
5092 | * The function unblocks and resume posting of SLI4 asynchronous mailbox | ||
5093 | * commands from the driver internal pending mailbox queue. It makes sure | ||
5094 | * that there is no outstanding mailbox command before resuming posting | ||
5095 | * asynchronous mailbox commands. If, for any reason, there is outstanding | ||
5096 | * mailbox command, it will try to wait it out before resuming asynchronous | ||
5097 | * mailbox command posting. | ||
5098 | **/ | ||
5099 | static void | ||
5100 | lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) | ||
5101 | { | ||
5102 | struct lpfc_sli *psli = &phba->sli; | ||
5103 | |||
5104 | spin_lock_irq(&phba->hbalock); | ||
5105 | if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { | ||
5106 | /* Asynchronous mailbox posting is not blocked, do nothing */ | ||
5107 | spin_unlock_irq(&phba->hbalock); | ||
5108 | return; | ||
5109 | } | ||
5110 | |||
5111 | /* Outstanding synchronous mailbox command is guaranteed to be done, | ||
5112 | * successful or timeout, after timing-out the outstanding mailbox | ||
5113 | * command shall always be removed, so just unblock posting async | ||
5114 | * mailbox command and resume | ||
5115 | */ | ||
5116 | psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; | ||
5117 | spin_unlock_irq(&phba->hbalock); | ||
5118 | |||
5119 | /* wake up worker thread to post asynchronlous mailbox command */ | ||
5120 | lpfc_worker_wake_up(phba); | ||
5121 | } | ||
5122 | |||
5123 | /** | ||
5033 | * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox | 5124 | * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox |
5034 | * @phba: Pointer to HBA context object. | 5125 | * @phba: Pointer to HBA context object. |
5035 | * @mboxq: Pointer to mailbox object. | 5126 | * @mboxq: Pointer to mailbox object. |
@@ -5204,14 +5295,35 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, | |||
5204 | psli->sli_flag, flag); | 5295 | psli->sli_flag, flag); |
5205 | return rc; | 5296 | return rc; |
5206 | } else if (flag == MBX_POLL) { | 5297 | } else if (flag == MBX_POLL) { |
5207 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 5298 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, |
5208 | "(%d):2542 Mailbox command x%x (x%x) " | 5299 | "(%d):2542 Try to issue mailbox command " |
5209 | "cannot issue Data: x%x x%x\n", | 5300 | "x%x (x%x) synchronously ahead of async" |
5301 | "mailbox command queue: x%x x%x\n", | ||
5210 | mboxq->vport ? mboxq->vport->vpi : 0, | 5302 | mboxq->vport ? mboxq->vport->vpi : 0, |
5211 | mboxq->u.mb.mbxCommand, | 5303 | mboxq->u.mb.mbxCommand, |
5212 | lpfc_sli4_mbox_opcode_get(phba, mboxq), | 5304 | lpfc_sli4_mbox_opcode_get(phba, mboxq), |
5213 | psli->sli_flag, flag); | 5305 | psli->sli_flag, flag); |
5214 | return -EIO; | 5306 | /* Try to block the asynchronous mailbox posting */ |
5307 | rc = lpfc_sli4_async_mbox_block(phba); | ||
5308 | if (!rc) { | ||
5309 | /* Successfully blocked, now issue sync mbox cmd */ | ||
5310 | rc = lpfc_sli4_post_sync_mbox(phba, mboxq); | ||
5311 | if (rc != MBX_SUCCESS) | ||
5312 | lpfc_printf_log(phba, KERN_ERR, | ||
5313 | LOG_MBOX | LOG_SLI, | ||
5314 | "(%d):2597 Mailbox command " | ||
5315 | "x%x (x%x) cannot issue " | ||
5316 | "Data: x%x x%x\n", | ||
5317 | mboxq->vport ? | ||
5318 | mboxq->vport->vpi : 0, | ||
5319 | mboxq->u.mb.mbxCommand, | ||
5320 | lpfc_sli4_mbox_opcode_get(phba, | ||
5321 | mboxq), | ||
5322 | psli->sli_flag, flag); | ||
5323 | /* Unblock the async mailbox posting afterward */ | ||
5324 | lpfc_sli4_async_mbox_unblock(phba); | ||
5325 | } | ||
5326 | return rc; | ||
5215 | } | 5327 | } |
5216 | 5328 | ||
5217 | /* Now, interrupt mode asynchrous mailbox command */ | 5329 | /* Now, interrupt mode asynchrous mailbox command */ |
@@ -5749,18 +5861,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
5749 | 5861 | ||
5750 | fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); | 5862 | fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); |
5751 | /* The fcp commands will set command type */ | 5863 | /* The fcp commands will set command type */ |
5752 | if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip)) | 5864 | if (iocbq->iocb_flag & LPFC_IO_FCP) |
5753 | command_type = ELS_COMMAND_NON_FIP; | ||
5754 | else if (!(iocbq->iocb_flag & LPFC_IO_FCP)) | ||
5755 | command_type = ELS_COMMAND_FIP; | ||
5756 | else if (iocbq->iocb_flag & LPFC_IO_FCP) | ||
5757 | command_type = FCP_COMMAND; | 5865 | command_type = FCP_COMMAND; |
5758 | else { | 5866 | else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS)) |
5759 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 5867 | command_type = ELS_COMMAND_FIP; |
5760 | "2019 Invalid cmd 0x%x\n", | 5868 | else |
5761 | iocbq->iocb.ulpCommand); | 5869 | command_type = ELS_COMMAND_NON_FIP; |
5762 | return IOCB_ERROR; | 5870 | |
5763 | } | ||
5764 | /* Some of the fields are in the right position already */ | 5871 | /* Some of the fields are in the right position already */ |
5765 | memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); | 5872 | memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); |
5766 | abort_tag = (uint32_t) iocbq->iotag; | 5873 | abort_tag = (uint32_t) iocbq->iotag; |
@@ -5814,11 +5921,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
5814 | bf_set(lpfc_wqe_gen_context, &wqe->generic, | 5921 | bf_set(lpfc_wqe_gen_context, &wqe->generic, |
5815 | iocbq->iocb.ulpContext); | 5922 | iocbq->iocb.ulpContext); |
5816 | 5923 | ||
5817 | if (iocbq->vport->fc_myDID != 0) { | ||
5818 | bf_set(els_req64_sid, &wqe->els_req, | ||
5819 | iocbq->vport->fc_myDID); | ||
5820 | bf_set(els_req64_sp, &wqe->els_req, 1); | ||
5821 | } | ||
5822 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); | 5924 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); |
5823 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); | 5925 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); |
5824 | /* CCP CCPE PV PRI in word10 were set in the memcpy */ | 5926 | /* CCP CCPE PV PRI in word10 were set in the memcpy */ |
@@ -5877,14 +5979,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
5877 | * is set and we are sending our 2nd or greater command on | 5979 | * is set and we are sending our 2nd or greater command on |
5878 | * this exchange. | 5980 | * this exchange. |
5879 | */ | 5981 | */ |
5982 | /* Always open the exchange */ | ||
5983 | bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); | ||
5880 | 5984 | ||
5881 | /* ALLOW read & write to fall through to ICMD64 */ | 5985 | wqe->words[10] &= 0xffff0000; /* zero out ebde count */ |
5986 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); | ||
5987 | break; | ||
5882 | case CMD_FCP_ICMND64_CR: | 5988 | case CMD_FCP_ICMND64_CR: |
5883 | /* Always open the exchange */ | 5989 | /* Always open the exchange */ |
5884 | bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); | 5990 | bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); |
5885 | 5991 | ||
5992 | wqe->words[4] = 0; | ||
5886 | wqe->words[10] &= 0xffff0000; /* zero out ebde count */ | 5993 | wqe->words[10] &= 0xffff0000; /* zero out ebde count */ |
5887 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); | 5994 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); |
5888 | break; | 5995 | break; |
5889 | case CMD_GEN_REQUEST64_CR: | 5996 | case CMD_GEN_REQUEST64_CR: |
5890 | /* word3 command length is described as byte offset to the | 5997 | /* word3 command length is described as byte offset to the |
@@ -7247,6 +7354,32 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, | |||
7247 | } | 7354 | } |
7248 | 7355 | ||
7249 | /** | 7356 | /** |
7357 | * lpfc_chk_iocb_flg - Test IOCB flag with lock held. | ||
7358 | * @phba: Pointer to HBA context object.. | ||
7359 | * @piocbq: Pointer to command iocb. | ||
7360 | * @flag: Flag to test. | ||
7361 | * | ||
7362 | * This routine grabs the hbalock and then test the iocb_flag to | ||
7363 | * see if the passed in flag is set. | ||
7364 | * Returns: | ||
7365 | * 1 if flag is set. | ||
7366 | * 0 if flag is not set. | ||
7367 | **/ | ||
7368 | static int | ||
7369 | lpfc_chk_iocb_flg(struct lpfc_hba *phba, | ||
7370 | struct lpfc_iocbq *piocbq, uint32_t flag) | ||
7371 | { | ||
7372 | unsigned long iflags; | ||
7373 | int ret; | ||
7374 | |||
7375 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
7376 | ret = piocbq->iocb_flag & flag; | ||
7377 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
7378 | return ret; | ||
7379 | |||
7380 | } | ||
7381 | |||
7382 | /** | ||
7250 | * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands | 7383 | * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands |
7251 | * @phba: Pointer to HBA context object.. | 7384 | * @phba: Pointer to HBA context object.. |
7252 | * @pring: Pointer to sli ring. | 7385 | * @pring: Pointer to sli ring. |
@@ -7313,7 +7446,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | |||
7313 | if (retval == IOCB_SUCCESS) { | 7446 | if (retval == IOCB_SUCCESS) { |
7314 | timeout_req = timeout * HZ; | 7447 | timeout_req = timeout * HZ; |
7315 | timeleft = wait_event_timeout(done_q, | 7448 | timeleft = wait_event_timeout(done_q, |
7316 | piocb->iocb_flag & LPFC_IO_WAKE, | 7449 | lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), |
7317 | timeout_req); | 7450 | timeout_req); |
7318 | 7451 | ||
7319 | if (piocb->iocb_flag & LPFC_IO_WAKE) { | 7452 | if (piocb->iocb_flag & LPFC_IO_WAKE) { |
@@ -7498,20 +7631,16 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) | |||
7498 | if ((HS_FFER1 & phba->work_hs) && | 7631 | if ((HS_FFER1 & phba->work_hs) && |
7499 | ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | | 7632 | ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | |
7500 | HS_FFER6 | HS_FFER7) & phba->work_hs)) { | 7633 | HS_FFER6 | HS_FFER7) & phba->work_hs)) { |
7501 | spin_lock_irq(&phba->hbalock); | ||
7502 | phba->hba_flag |= DEFER_ERATT; | 7634 | phba->hba_flag |= DEFER_ERATT; |
7503 | spin_unlock_irq(&phba->hbalock); | ||
7504 | /* Clear all interrupt enable conditions */ | 7635 | /* Clear all interrupt enable conditions */ |
7505 | writel(0, phba->HCregaddr); | 7636 | writel(0, phba->HCregaddr); |
7506 | readl(phba->HCregaddr); | 7637 | readl(phba->HCregaddr); |
7507 | } | 7638 | } |
7508 | 7639 | ||
7509 | /* Set the driver HA work bitmap */ | 7640 | /* Set the driver HA work bitmap */ |
7510 | spin_lock_irq(&phba->hbalock); | ||
7511 | phba->work_ha |= HA_ERATT; | 7641 | phba->work_ha |= HA_ERATT; |
7512 | /* Indicate polling handles this ERATT */ | 7642 | /* Indicate polling handles this ERATT */ |
7513 | phba->hba_flag |= HBA_ERATT_HANDLED; | 7643 | phba->hba_flag |= HBA_ERATT_HANDLED; |
7514 | spin_unlock_irq(&phba->hbalock); | ||
7515 | return 1; | 7644 | return 1; |
7516 | } | 7645 | } |
7517 | return 0; | 7646 | return 0; |
@@ -7557,12 +7686,10 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) | |||
7557 | return 0; | 7686 | return 0; |
7558 | phba->work_status[0] = uerr_sta_lo; | 7687 | phba->work_status[0] = uerr_sta_lo; |
7559 | phba->work_status[1] = uerr_sta_hi; | 7688 | phba->work_status[1] = uerr_sta_hi; |
7560 | spin_lock_irq(&phba->hbalock); | ||
7561 | /* Set the driver HA work bitmap */ | 7689 | /* Set the driver HA work bitmap */ |
7562 | phba->work_ha |= HA_ERATT; | 7690 | phba->work_ha |= HA_ERATT; |
7563 | /* Indicate polling handles this ERATT */ | 7691 | /* Indicate polling handles this ERATT */ |
7564 | phba->hba_flag |= HBA_ERATT_HANDLED; | 7692 | phba->hba_flag |= HBA_ERATT_HANDLED; |
7565 | spin_unlock_irq(&phba->hbalock); | ||
7566 | return 1; | 7693 | return 1; |
7567 | } | 7694 | } |
7568 | } | 7695 | } |
@@ -9245,6 +9372,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, | |||
9245 | kfree(dmabuf); | 9372 | kfree(dmabuf); |
9246 | goto out_fail; | 9373 | goto out_fail; |
9247 | } | 9374 | } |
9375 | memset(dmabuf->virt, 0, PAGE_SIZE); | ||
9248 | dmabuf->buffer_tag = x; | 9376 | dmabuf->buffer_tag = x; |
9249 | list_add_tail(&dmabuf->list, &queue->page_list); | 9377 | list_add_tail(&dmabuf->list, &queue->page_list); |
9250 | /* initialize queue's entry array */ | 9378 | /* initialize queue's entry array */ |
@@ -9667,7 +9795,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, | |||
9667 | /* link the wq onto the parent cq child list */ | 9795 | /* link the wq onto the parent cq child list */ |
9668 | list_add_tail(&wq->list, &cq->child_list); | 9796 | list_add_tail(&wq->list, &cq->child_list); |
9669 | out: | 9797 | out: |
9670 | if (rc == MBX_TIMEOUT) | 9798 | if (rc != MBX_TIMEOUT) |
9671 | mempool_free(mbox, phba->mbox_mem_pool); | 9799 | mempool_free(mbox, phba->mbox_mem_pool); |
9672 | return status; | 9800 | return status; |
9673 | } | 9801 | } |
@@ -11020,10 +11148,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) | |||
11020 | rpi_page->start_rpi); | 11148 | rpi_page->start_rpi); |
11021 | hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); | 11149 | hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); |
11022 | hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); | 11150 | hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); |
11023 | if (!phba->sli4_hba.intr_enable) | 11151 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
11024 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
11025 | else | ||
11026 | rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); | ||
11027 | shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; | 11152 | shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; |
11028 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | 11153 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
11029 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | 11154 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
@@ -11363,6 +11488,7 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, | |||
11363 | bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); | 11488 | bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); |
11364 | bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); | 11489 | bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); |
11365 | bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); | 11490 | bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); |
11491 | bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); | ||
11366 | bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); | 11492 | bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); |
11367 | bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, | 11493 | bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, |
11368 | LPFC_FCF_FPMA | LPFC_FCF_SPMA); | 11494 | LPFC_FCF_FPMA | LPFC_FCF_SPMA); |