aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c269
1 files changed, 219 insertions, 50 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 049fb9a17b3f..7a61455140b6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -212,7 +212,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
212 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; 212 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
213 213
214 /* If the next EQE is not valid then we are done */ 214 /* If the next EQE is not valid then we are done */
215 if (!bf_get(lpfc_eqe_valid, eqe)) 215 if (!bf_get_le32(lpfc_eqe_valid, eqe))
216 return NULL; 216 return NULL;
217 /* If the host has not yet processed the next entry then we are done */ 217 /* If the host has not yet processed the next entry then we are done */
218 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 218 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
@@ -247,7 +247,7 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
247 /* while there are valid entries */ 247 /* while there are valid entries */
248 while (q->hba_index != q->host_index) { 248 while (q->hba_index != q->host_index) {
249 temp_eqe = q->qe[q->host_index].eqe; 249 temp_eqe = q->qe[q->host_index].eqe;
250 bf_set(lpfc_eqe_valid, temp_eqe, 0); 250 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
251 released++; 251 released++;
252 q->host_index = ((q->host_index + 1) % q->entry_count); 252 q->host_index = ((q->host_index + 1) % q->entry_count);
253 } 253 }
@@ -285,7 +285,7 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
285 struct lpfc_cqe *cqe; 285 struct lpfc_cqe *cqe;
286 286
287 /* If the next CQE is not valid then we are done */ 287 /* If the next CQE is not valid then we are done */
288 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 288 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
289 return NULL; 289 return NULL;
290 /* If the host has not yet processed the next entry then we are done */ 290 /* If the host has not yet processed the next entry then we are done */
291 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 291 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
@@ -321,7 +321,7 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
321 /* while there are valid entries */ 321 /* while there are valid entries */
322 while (q->hba_index != q->host_index) { 322 while (q->hba_index != q->host_index) {
323 temp_qe = q->qe[q->host_index].cqe; 323 temp_qe = q->qe[q->host_index].cqe;
324 bf_set(lpfc_cqe_valid, temp_qe, 0); 324 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
325 released++; 325 released++;
326 q->host_index = ((q->host_index + 1) % q->entry_count); 326 q->host_index = ((q->host_index + 1) % q->entry_count);
327 } 327 }
@@ -1659,6 +1659,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1659 case MBX_INIT_VPI: 1659 case MBX_INIT_VPI:
1660 case MBX_INIT_VFI: 1660 case MBX_INIT_VFI:
1661 case MBX_RESUME_RPI: 1661 case MBX_RESUME_RPI:
1662 case MBX_READ_EVENT_LOG_STATUS:
1663 case MBX_READ_EVENT_LOG:
1662 ret = mbxCommand; 1664 ret = mbxCommand;
1663 break; 1665 break;
1664 default: 1666 default:
@@ -4296,7 +4298,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4296 "2570 Failed to read FCoE parameters\n"); 4298 "2570 Failed to read FCoE parameters\n");
4297 4299
4298 /* Issue READ_REV to collect vpd and FW information. */ 4300 /* Issue READ_REV to collect vpd and FW information. */
4299 vpd_size = PAGE_SIZE; 4301 vpd_size = SLI4_PAGE_SIZE;
4300 vpd = kzalloc(vpd_size, GFP_KERNEL); 4302 vpd = kzalloc(vpd_size, GFP_KERNEL);
4301 if (!vpd) { 4303 if (!vpd) {
4302 rc = -ENOMEM; 4304 rc = -ENOMEM;
@@ -4891,9 +4893,34 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4891 mb->mbxOwner = OWN_CHIP; 4893 mb->mbxOwner = OWN_CHIP;
4892 4894
4893 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 4895 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
4894 /* First copy command data to host SLIM area */ 4896 /* Populate mbox extension offset word. */
4897 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
4898 *(((uint32_t *)mb) + pmbox->mbox_offset_word)
4899 = (uint8_t *)phba->mbox_ext
4900 - (uint8_t *)phba->mbox;
4901 }
4902
4903 /* Copy the mailbox extension data */
4904 if (pmbox->in_ext_byte_len && pmbox->context2) {
4905 lpfc_sli_pcimem_bcopy(pmbox->context2,
4906 (uint8_t *)phba->mbox_ext,
4907 pmbox->in_ext_byte_len);
4908 }
4909 /* Copy command data to host SLIM area */
4895 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4910 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
4896 } else { 4911 } else {
4912 /* Populate mbox extension offset word. */
4913 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
4914 *(((uint32_t *)mb) + pmbox->mbox_offset_word)
4915 = MAILBOX_HBA_EXT_OFFSET;
4916
4917 /* Copy the mailbox extension data */
4918 if (pmbox->in_ext_byte_len && pmbox->context2) {
4919 lpfc_memcpy_to_slim(phba->MBslimaddr +
4920 MAILBOX_HBA_EXT_OFFSET,
4921 pmbox->context2, pmbox->in_ext_byte_len);
4922
4923 }
4897 if (mb->mbxCommand == MBX_CONFIG_PORT) { 4924 if (mb->mbxCommand == MBX_CONFIG_PORT) {
4898 /* copy command data into host mbox for cmpl */ 4925 /* copy command data into host mbox for cmpl */
4899 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4926 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
@@ -5003,15 +5030,22 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
5003 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5030 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5004 /* copy results back to user */ 5031 /* copy results back to user */
5005 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 5032 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
5033 /* Copy the mailbox extension data */
5034 if (pmbox->out_ext_byte_len && pmbox->context2) {
5035 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
5036 pmbox->context2,
5037 pmbox->out_ext_byte_len);
5038 }
5006 } else { 5039 } else {
5007 /* First copy command data */ 5040 /* First copy command data */
5008 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 5041 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
5009 MAILBOX_CMD_SIZE); 5042 MAILBOX_CMD_SIZE);
5010 if ((mb->mbxCommand == MBX_DUMP_MEMORY) && 5043 /* Copy the mailbox extension data */
5011 pmbox->context2) { 5044 if (pmbox->out_ext_byte_len && pmbox->context2) {
5012 lpfc_memcpy_from_slim((void *)pmbox->context2, 5045 lpfc_memcpy_from_slim(pmbox->context2,
5013 phba->MBslimaddr + DMP_RSP_OFFSET, 5046 phba->MBslimaddr +
5014 mb->un.varDmp.word_cnt); 5047 MAILBOX_HBA_EXT_OFFSET,
5048 pmbox->out_ext_byte_len);
5015 } 5049 }
5016 } 5050 }
5017 5051
@@ -7104,13 +7138,11 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7104 */ 7138 */
7105 list_del_init(&abort_iocb->list); 7139 list_del_init(&abort_iocb->list);
7106 pring->txcmplq_cnt--; 7140 pring->txcmplq_cnt--;
7107 spin_unlock_irq(&phba->hbalock);
7108 7141
7109 /* Firmware could still be in progress of DMAing 7142 /* Firmware could still be in progress of DMAing
7110 * payload, so don't free data buffer till after 7143 * payload, so don't free data buffer till after
7111 * a hbeat. 7144 * a hbeat.
7112 */ 7145 */
7113 spin_lock_irq(&phba->hbalock);
7114 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 7146 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
7115 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 7147 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
7116 spin_unlock_irq(&phba->hbalock); 7148 spin_unlock_irq(&phba->hbalock);
@@ -7118,7 +7150,8 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7118 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 7150 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
7119 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED; 7151 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
7120 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 7152 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
7121 } 7153 } else
7154 spin_unlock_irq(&phba->hbalock);
7122 } 7155 }
7123 7156
7124 lpfc_sli_release_iocbq(phba, cmdiocb); 7157 lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -8133,6 +8166,12 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8133 if (pmb->mbox_cmpl) { 8166 if (pmb->mbox_cmpl) {
8134 lpfc_sli_pcimem_bcopy(mbox, pmbox, 8167 lpfc_sli_pcimem_bcopy(mbox, pmbox,
8135 MAILBOX_CMD_SIZE); 8168 MAILBOX_CMD_SIZE);
8169 if (pmb->out_ext_byte_len &&
8170 pmb->context2)
8171 lpfc_sli_pcimem_bcopy(
8172 phba->mbox_ext,
8173 pmb->context2,
8174 pmb->out_ext_byte_len);
8136 } 8175 }
8137 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 8176 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8138 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 8177 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
@@ -8983,17 +9022,17 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8983 int ecount = 0; 9022 int ecount = 0;
8984 uint16_t cqid; 9023 uint16_t cqid;
8985 9024
8986 if (bf_get(lpfc_eqe_major_code, eqe) != 0) { 9025 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
8987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9026 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8988 "0359 Not a valid slow-path completion " 9027 "0359 Not a valid slow-path completion "
8989 "event: majorcode=x%x, minorcode=x%x\n", 9028 "event: majorcode=x%x, minorcode=x%x\n",
8990 bf_get(lpfc_eqe_major_code, eqe), 9029 bf_get_le32(lpfc_eqe_major_code, eqe),
8991 bf_get(lpfc_eqe_minor_code, eqe)); 9030 bf_get_le32(lpfc_eqe_minor_code, eqe));
8992 return; 9031 return;
8993 } 9032 }
8994 9033
8995 /* Get the reference to the corresponding CQ */ 9034 /* Get the reference to the corresponding CQ */
8996 cqid = bf_get(lpfc_eqe_resource_id, eqe); 9035 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
8997 9036
8998 /* Search for completion queue pointer matching this cqid */ 9037 /* Search for completion queue pointer matching this cqid */
8999 speq = phba->sli4_hba.sp_eq; 9038 speq = phba->sli4_hba.sp_eq;
@@ -9221,12 +9260,12 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9221 uint16_t cqid; 9260 uint16_t cqid;
9222 int ecount = 0; 9261 int ecount = 0;
9223 9262
9224 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) { 9263 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
9225 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9264 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9226 "0366 Not a valid fast-path completion " 9265 "0366 Not a valid fast-path completion "
9227 "event: majorcode=x%x, minorcode=x%x\n", 9266 "event: majorcode=x%x, minorcode=x%x\n",
9228 bf_get(lpfc_eqe_major_code, eqe), 9267 bf_get_le32(lpfc_eqe_major_code, eqe),
9229 bf_get(lpfc_eqe_minor_code, eqe)); 9268 bf_get_le32(lpfc_eqe_minor_code, eqe));
9230 return; 9269 return;
9231 } 9270 }
9232 9271
@@ -9239,7 +9278,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9239 } 9278 }
9240 9279
9241 /* Get the reference to the corresponding CQ */ 9280 /* Get the reference to the corresponding CQ */
9242 cqid = bf_get(lpfc_eqe_resource_id, eqe); 9281 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
9243 if (unlikely(cqid != cq->queue_id)) { 9282 if (unlikely(cqid != cq->queue_id)) {
9244 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9283 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9245 "0368 Miss-matched fast-path completion " 9284 "0368 Miss-matched fast-path completion "
@@ -9506,7 +9545,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
9506 while (!list_empty(&queue->page_list)) { 9545 while (!list_empty(&queue->page_list)) {
9507 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 9546 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
9508 list); 9547 list);
9509 dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE, 9548 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
9510 dmabuf->virt, dmabuf->phys); 9549 dmabuf->virt, dmabuf->phys);
9511 kfree(dmabuf); 9550 kfree(dmabuf);
9512 } 9551 }
@@ -9532,13 +9571,17 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9532 struct lpfc_dmabuf *dmabuf; 9571 struct lpfc_dmabuf *dmabuf;
9533 int x, total_qe_count; 9572 int x, total_qe_count;
9534 void *dma_pointer; 9573 void *dma_pointer;
9574 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
9535 9575
9576 if (!phba->sli4_hba.pc_sli4_params.supported)
9577 hw_page_size = SLI4_PAGE_SIZE;
9536 9578
9537 queue = kzalloc(sizeof(struct lpfc_queue) + 9579 queue = kzalloc(sizeof(struct lpfc_queue) +
9538 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 9580 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9539 if (!queue) 9581 if (!queue)
9540 return NULL; 9582 return NULL;
9541 queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE; 9583 queue->page_count = (ALIGN(entry_size * entry_count,
9584 hw_page_size))/hw_page_size;
9542 INIT_LIST_HEAD(&queue->list); 9585 INIT_LIST_HEAD(&queue->list);
9543 INIT_LIST_HEAD(&queue->page_list); 9586 INIT_LIST_HEAD(&queue->page_list);
9544 INIT_LIST_HEAD(&queue->child_list); 9587 INIT_LIST_HEAD(&queue->child_list);
@@ -9547,19 +9590,19 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9547 if (!dmabuf) 9590 if (!dmabuf)
9548 goto out_fail; 9591 goto out_fail;
9549 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 9592 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9550 PAGE_SIZE, &dmabuf->phys, 9593 hw_page_size, &dmabuf->phys,
9551 GFP_KERNEL); 9594 GFP_KERNEL);
9552 if (!dmabuf->virt) { 9595 if (!dmabuf->virt) {
9553 kfree(dmabuf); 9596 kfree(dmabuf);
9554 goto out_fail; 9597 goto out_fail;
9555 } 9598 }
9556 memset(dmabuf->virt, 0, PAGE_SIZE); 9599 memset(dmabuf->virt, 0, hw_page_size);
9557 dmabuf->buffer_tag = x; 9600 dmabuf->buffer_tag = x;
9558 list_add_tail(&dmabuf->list, &queue->page_list); 9601 list_add_tail(&dmabuf->list, &queue->page_list);
9559 /* initialize queue's entry array */ 9602 /* initialize queue's entry array */
9560 dma_pointer = dmabuf->virt; 9603 dma_pointer = dmabuf->virt;
9561 for (; total_qe_count < entry_count && 9604 for (; total_qe_count < entry_count &&
9562 dma_pointer < (PAGE_SIZE + dmabuf->virt); 9605 dma_pointer < (hw_page_size + dmabuf->virt);
9563 total_qe_count++, dma_pointer += entry_size) { 9606 total_qe_count++, dma_pointer += entry_size) {
9564 queue->qe[total_qe_count].address = dma_pointer; 9607 queue->qe[total_qe_count].address = dma_pointer;
9565 } 9608 }
@@ -9604,6 +9647,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9604 uint32_t shdr_status, shdr_add_status; 9647 uint32_t shdr_status, shdr_add_status;
9605 union lpfc_sli4_cfg_shdr *shdr; 9648 union lpfc_sli4_cfg_shdr *shdr;
9606 uint16_t dmult; 9649 uint16_t dmult;
9650 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
9651
9652 if (!phba->sli4_hba.pc_sli4_params.supported)
9653 hw_page_size = SLI4_PAGE_SIZE;
9607 9654
9608 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9655 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9609 if (!mbox) 9656 if (!mbox)
@@ -9653,6 +9700,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9653 break; 9700 break;
9654 } 9701 }
9655 list_for_each_entry(dmabuf, &eq->page_list, list) { 9702 list_for_each_entry(dmabuf, &eq->page_list, list) {
9703 memset(dmabuf->virt, 0, hw_page_size);
9656 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 9704 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9657 putPaddrLow(dmabuf->phys); 9705 putPaddrLow(dmabuf->phys);
9658 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 9706 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -9715,6 +9763,11 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9715 int rc, length, status = 0; 9763 int rc, length, status = 0;
9716 uint32_t shdr_status, shdr_add_status; 9764 uint32_t shdr_status, shdr_add_status;
9717 union lpfc_sli4_cfg_shdr *shdr; 9765 union lpfc_sli4_cfg_shdr *shdr;
9766 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
9767
9768 if (!phba->sli4_hba.pc_sli4_params.supported)
9769 hw_page_size = SLI4_PAGE_SIZE;
9770
9718 9771
9719 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9772 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9720 if (!mbox) 9773 if (!mbox)
@@ -9752,6 +9805,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9752 break; 9805 break;
9753 } 9806 }
9754 list_for_each_entry(dmabuf, &cq->page_list, list) { 9807 list_for_each_entry(dmabuf, &cq->page_list, list) {
9808 memset(dmabuf->virt, 0, hw_page_size);
9755 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 9809 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9756 putPaddrLow(dmabuf->phys); 9810 putPaddrLow(dmabuf->phys);
9757 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 9811 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -9791,9 +9845,70 @@ out:
9791} 9845}
9792 9846
9793/** 9847/**
9848 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
9849 * @phba: HBA structure that indicates port to create a queue on.
9850 * @mq: The queue structure to use to create the mailbox queue.
9851 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
9852 * @cq: The completion queue to associate with this cq.
9853 *
9854 * This function provides failback (fb) functionality when the
9855 * mq_create_ext fails on older FW generations. It's purpose is identical
9856 * to mq_create_ext otherwise.
9857 *
9858 * This routine cannot fail as all attributes were previously accessed and
9859 * initialized in mq_create_ext.
9860 **/
9861static void
9862lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
9863 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
9864{
9865 struct lpfc_mbx_mq_create *mq_create;
9866 struct lpfc_dmabuf *dmabuf;
9867 int length;
9868
9869 length = (sizeof(struct lpfc_mbx_mq_create) -
9870 sizeof(struct lpfc_sli4_cfg_mhdr));
9871 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9872 LPFC_MBOX_OPCODE_MQ_CREATE,
9873 length, LPFC_SLI4_MBX_EMBED);
9874 mq_create = &mbox->u.mqe.un.mq_create;
9875 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9876 mq->page_count);
9877 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9878 cq->queue_id);
9879 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9880 switch (mq->entry_count) {
9881 case 16:
9882 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9883 LPFC_MQ_CNT_16);
9884 break;
9885 case 32:
9886 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9887 LPFC_MQ_CNT_32);
9888 break;
9889 case 64:
9890 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9891 LPFC_MQ_CNT_64);
9892 break;
9893 case 128:
9894 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9895 LPFC_MQ_CNT_128);
9896 break;
9897 }
9898 list_for_each_entry(dmabuf, &mq->page_list, list) {
9899 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9900 putPaddrLow(dmabuf->phys);
9901 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9902 putPaddrHigh(dmabuf->phys);
9903 }
9904}
9905
9906/**
9794 * lpfc_mq_create - Create a mailbox Queue on the HBA 9907 * lpfc_mq_create - Create a mailbox Queue on the HBA
9795 * @phba: HBA structure that indicates port to create a queue on. 9908 * @phba: HBA structure that indicates port to create a queue on.
9796 * @mq: The queue structure to use to create the mailbox queue. 9909 * @mq: The queue structure to use to create the mailbox queue.
9910 * @cq: The completion queue to associate with this cq.
9911 * @subtype: The queue's subtype.
9797 * 9912 *
9798 * This function creates a mailbox queue, as detailed in @mq, on a port, 9913 * This function creates a mailbox queue, as detailed in @mq, on a port,
9799 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 9914 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
@@ -9809,31 +9924,43 @@ out:
9809 * memory this function will return ENOMEM. If the queue create mailbox command 9924 * memory this function will return ENOMEM. If the queue create mailbox command
9810 * fails this function will return ENXIO. 9925 * fails this function will return ENXIO.
9811 **/ 9926 **/
9812uint32_t 9927int32_t
9813lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 9928lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9814 struct lpfc_queue *cq, uint32_t subtype) 9929 struct lpfc_queue *cq, uint32_t subtype)
9815{ 9930{
9816 struct lpfc_mbx_mq_create *mq_create; 9931 struct lpfc_mbx_mq_create *mq_create;
9932 struct lpfc_mbx_mq_create_ext *mq_create_ext;
9817 struct lpfc_dmabuf *dmabuf; 9933 struct lpfc_dmabuf *dmabuf;
9818 LPFC_MBOXQ_t *mbox; 9934 LPFC_MBOXQ_t *mbox;
9819 int rc, length, status = 0; 9935 int rc, length, status = 0;
9820 uint32_t shdr_status, shdr_add_status; 9936 uint32_t shdr_status, shdr_add_status;
9821 union lpfc_sli4_cfg_shdr *shdr; 9937 union lpfc_sli4_cfg_shdr *shdr;
9938 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
9939
9940 if (!phba->sli4_hba.pc_sli4_params.supported)
9941 hw_page_size = SLI4_PAGE_SIZE;
9822 9942
9823 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9943 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9824 if (!mbox) 9944 if (!mbox)
9825 return -ENOMEM; 9945 return -ENOMEM;
9826 length = (sizeof(struct lpfc_mbx_mq_create) - 9946 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
9827 sizeof(struct lpfc_sli4_cfg_mhdr)); 9947 sizeof(struct lpfc_sli4_cfg_mhdr));
9828 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 9948 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9829 LPFC_MBOX_OPCODE_MQ_CREATE, 9949 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
9830 length, LPFC_SLI4_MBX_EMBED); 9950 length, LPFC_SLI4_MBX_EMBED);
9831 mq_create = &mbox->u.mqe.un.mq_create; 9951
9832 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 9952 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
9953 bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request,
9833 mq->page_count); 9954 mq->page_count);
9834 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 9955 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request,
9835 cq->queue_id); 9956 1);
9836 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 9957 bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste,
9958 &mq_create_ext->u.request, 1);
9959 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
9960 &mq_create_ext->u.request, 1);
9961 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
9962 cq->queue_id);
9963 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
9837 switch (mq->entry_count) { 9964 switch (mq->entry_count) {
9838 default: 9965 default:
9839 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -9843,31 +9970,47 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9843 return -EINVAL; 9970 return -EINVAL;
9844 /* otherwise default to smallest count (drop through) */ 9971 /* otherwise default to smallest count (drop through) */
9845 case 16: 9972 case 16:
9846 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 9973 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
9847 LPFC_MQ_CNT_16); 9974 LPFC_MQ_CNT_16);
9848 break; 9975 break;
9849 case 32: 9976 case 32:
9850 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 9977 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
9851 LPFC_MQ_CNT_32); 9978 LPFC_MQ_CNT_32);
9852 break; 9979 break;
9853 case 64: 9980 case 64:
9854 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 9981 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
9855 LPFC_MQ_CNT_64); 9982 LPFC_MQ_CNT_64);
9856 break; 9983 break;
9857 case 128: 9984 case 128:
9858 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 9985 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
9859 LPFC_MQ_CNT_128); 9986 LPFC_MQ_CNT_128);
9860 break; 9987 break;
9861 } 9988 }
9862 list_for_each_entry(dmabuf, &mq->page_list, list) { 9989 list_for_each_entry(dmabuf, &mq->page_list, list) {
9863 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 9990 memset(dmabuf->virt, 0, hw_page_size);
9991 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
9864 putPaddrLow(dmabuf->phys); 9992 putPaddrLow(dmabuf->phys);
9865 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 9993 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
9866 putPaddrHigh(dmabuf->phys); 9994 putPaddrHigh(dmabuf->phys);
9867 } 9995 }
9868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 9996 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9997 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
9998 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
9999 &mq_create_ext->u.response);
10000 if (rc != MBX_SUCCESS) {
10001 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10002 "2795 MQ_CREATE_EXT failed with "
10003 "status x%x. Failback to MQ_CREATE.\n",
10004 rc);
10005 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
10006 mq_create = &mbox->u.mqe.un.mq_create;
10007 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10008 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
10009 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
10010 &mq_create->u.response);
10011 }
10012
9869 /* The IOCTL status is embedded in the mailbox subheader. */ 10013 /* The IOCTL status is embedded in the mailbox subheader. */
9870 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9871 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10014 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9872 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10015 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9873 if (shdr_status || shdr_add_status || rc) { 10016 if (shdr_status || shdr_add_status || rc) {
@@ -9878,7 +10021,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9878 status = -ENXIO; 10021 status = -ENXIO;
9879 goto out; 10022 goto out;
9880 } 10023 }
9881 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9882 if (mq->queue_id == 0xFFFF) { 10024 if (mq->queue_id == 0xFFFF) {
9883 status = -ENXIO; 10025 status = -ENXIO;
9884 goto out; 10026 goto out;
@@ -9927,6 +10069,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9927 int rc, length, status = 0; 10069 int rc, length, status = 0;
9928 uint32_t shdr_status, shdr_add_status; 10070 uint32_t shdr_status, shdr_add_status;
9929 union lpfc_sli4_cfg_shdr *shdr; 10071 union lpfc_sli4_cfg_shdr *shdr;
10072 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
10073
10074 if (!phba->sli4_hba.pc_sli4_params.supported)
10075 hw_page_size = SLI4_PAGE_SIZE;
9930 10076
9931 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10077 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9932 if (!mbox) 10078 if (!mbox)
@@ -9942,6 +10088,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9942 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 10088 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
9943 cq->queue_id); 10089 cq->queue_id);
9944 list_for_each_entry(dmabuf, &wq->page_list, list) { 10090 list_for_each_entry(dmabuf, &wq->page_list, list) {
10091 memset(dmabuf->virt, 0, hw_page_size);
9945 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10092 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9946 putPaddrLow(dmabuf->phys); 10093 putPaddrLow(dmabuf->phys);
9947 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10094 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -10010,6 +10157,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10010 int rc, length, status = 0; 10157 int rc, length, status = 0;
10011 uint32_t shdr_status, shdr_add_status; 10158 uint32_t shdr_status, shdr_add_status;
10012 union lpfc_sli4_cfg_shdr *shdr; 10159 union lpfc_sli4_cfg_shdr *shdr;
10160 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
10161
10162 if (!phba->sli4_hba.pc_sli4_params.supported)
10163 hw_page_size = SLI4_PAGE_SIZE;
10013 10164
10014 if (hrq->entry_count != drq->entry_count) 10165 if (hrq->entry_count != drq->entry_count)
10015 return -EINVAL; 10166 return -EINVAL;
@@ -10054,6 +10205,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10054 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 10205 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
10055 LPFC_HDR_BUF_SIZE); 10206 LPFC_HDR_BUF_SIZE);
10056 list_for_each_entry(dmabuf, &hrq->page_list, list) { 10207 list_for_each_entry(dmabuf, &hrq->page_list, list) {
10208 memset(dmabuf->virt, 0, hw_page_size);
10057 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10209 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
10058 putPaddrLow(dmabuf->phys); 10210 putPaddrLow(dmabuf->phys);
10059 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10211 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -10626,7 +10778,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10626 10778
10627 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + 10779 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
10628 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 10780 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10629 if (reqlen > PAGE_SIZE) { 10781 if (reqlen > SLI4_PAGE_SIZE) {
10630 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10782 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10631 "2559 Block sgl registration required DMA " 10783 "2559 Block sgl registration required DMA "
10632 "size (%d) great than a page\n", reqlen); 10784 "size (%d) great than a page\n", reqlen);
@@ -10732,7 +10884,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10732 /* Calculate the requested length of the dma memory */ 10884 /* Calculate the requested length of the dma memory */
10733 reqlen = cnt * sizeof(struct sgl_page_pairs) + 10885 reqlen = cnt * sizeof(struct sgl_page_pairs) +
10734 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 10886 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10735 if (reqlen > PAGE_SIZE) { 10887 if (reqlen > SLI4_PAGE_SIZE) {
10736 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10888 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10737 "0217 Block sgl registration required DMA " 10889 "0217 Block sgl registration required DMA "
10738 "size (%d) great than a page\n", reqlen); 10890 "size (%d) great than a page\n", reqlen);
@@ -11568,8 +11720,8 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11568 * 11720 *
11569 * This routine is invoked to post rpi header templates to the 11721 * This routine is invoked to post rpi header templates to the
11570 * HBA consistent with the SLI-4 interface spec. This routine 11722 * HBA consistent with the SLI-4 interface spec. This routine
11571 * posts a PAGE_SIZE memory region to the port to hold up to 11723 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
11572 * PAGE_SIZE modulo 64 rpi context headers. 11724 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
11573 * 11725 *
11574 * This routine does not require any locks. It's usage is expected 11726 * This routine does not require any locks. It's usage is expected
11575 * to be driver load or reset recovery when the driver is 11727 * to be driver load or reset recovery when the driver is
@@ -11672,8 +11824,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
11672 * 11824 *
11673 * This routine is invoked to post rpi header templates to the 11825 * This routine is invoked to post rpi header templates to the
11674 * HBA consistent with the SLI-4 interface spec. This routine 11826 * HBA consistent with the SLI-4 interface spec. This routine
11675 * posts a PAGE_SIZE memory region to the port to hold up to 11827 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
11676 * PAGE_SIZE modulo 64 rpi context headers. 11828 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
11677 * 11829 *
11678 * Returns 11830 * Returns
11679 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 11831 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
@@ -12040,9 +12192,11 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12040 phba->hba_flag |= FCF_DISC_INPROGRESS; 12192 phba->hba_flag |= FCF_DISC_INPROGRESS;
12041 spin_unlock_irq(&phba->hbalock); 12193 spin_unlock_irq(&phba->hbalock);
12042 /* Reset FCF round robin index bmask for new scan */ 12194 /* Reset FCF round robin index bmask for new scan */
12043 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 12195 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) {
12044 memset(phba->fcf.fcf_rr_bmask, 0, 12196 memset(phba->fcf.fcf_rr_bmask, 0,
12045 sizeof(*phba->fcf.fcf_rr_bmask)); 12197 sizeof(*phba->fcf.fcf_rr_bmask));
12198 phba->fcf.eligible_fcf_cnt = 0;
12199 }
12046 error = 0; 12200 error = 0;
12047 } 12201 }
12048fail_fcf_scan: 12202fail_fcf_scan:
@@ -12507,6 +12661,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12507 struct lpfc_hba *phba = vport->phba; 12661 struct lpfc_hba *phba = vport->phba;
12508 LPFC_MBOXQ_t *mb, *nextmb; 12662 LPFC_MBOXQ_t *mb, *nextmb;
12509 struct lpfc_dmabuf *mp; 12663 struct lpfc_dmabuf *mp;
12664 struct lpfc_nodelist *ndlp;
12510 12665
12511 spin_lock_irq(&phba->hbalock); 12666 spin_lock_irq(&phba->hbalock);
12512 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 12667 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
@@ -12523,6 +12678,11 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12523 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 12678 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
12524 kfree(mp); 12679 kfree(mp);
12525 } 12680 }
12681 ndlp = (struct lpfc_nodelist *) mb->context2;
12682 if (ndlp) {
12683 lpfc_nlp_put(ndlp);
12684 mb->context2 = NULL;
12685 }
12526 } 12686 }
12527 list_del(&mb->list); 12687 list_del(&mb->list);
12528 mempool_free(mb, phba->mbox_mem_pool); 12688 mempool_free(mb, phba->mbox_mem_pool);
@@ -12532,6 +12692,15 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12532 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 12692 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
12533 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 12693 (mb->u.mb.mbxCommand == MBX_REG_VPI))
12534 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12694 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12695 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12696 ndlp = (struct lpfc_nodelist *) mb->context2;
12697 if (ndlp) {
12698 lpfc_nlp_put(ndlp);
12699 mb->context2 = NULL;
12700 }
12701 /* Unregister the RPI when mailbox complete */
12702 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
12703 }
12535 } 12704 }
12536 spin_unlock_irq(&phba->hbalock); 12705 spin_unlock_irq(&phba->hbalock);
12537} 12706}