diff options
author | James Smart <james.smart@emulex.com> | 2010-03-15 11:25:07 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-04-11 10:23:48 -0400 |
commit | cb5172eafd9ffdab6bb7b1eec628ea706d5817c8 (patch) | |
tree | 9e87cd6a91ade79cf24bed529bc2702157391580 /drivers/scsi/lpfc/lpfc_sli.c | |
parent | 999d813f227435c35b44362ee82211a1458844fc (diff) |
[SCSI] lpfc 8.3.11: SLI4 Improvements
- Correct all SLI4 code to work on big endian systems.
- Move read of sli4 params earlier so returned values are used correctly.
Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com>
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 37 |
1 files changed, 21 insertions, 16 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2eff81d366f9..dd879a7d04a3 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -212,7 +212,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q) | |||
212 | struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; | 212 | struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; |
213 | 213 | ||
214 | /* If the next EQE is not valid then we are done */ | 214 | /* If the next EQE is not valid then we are done */ |
215 | if (!bf_get(lpfc_eqe_valid, eqe)) | 215 | if (!bf_get_le32(lpfc_eqe_valid, eqe)) |
216 | return NULL; | 216 | return NULL; |
217 | /* If the host has not yet processed the next entry then we are done */ | 217 | /* If the host has not yet processed the next entry then we are done */ |
218 | if (((q->hba_index + 1) % q->entry_count) == q->host_index) | 218 | if (((q->hba_index + 1) % q->entry_count) == q->host_index) |
@@ -247,7 +247,7 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) | |||
247 | /* while there are valid entries */ | 247 | /* while there are valid entries */ |
248 | while (q->hba_index != q->host_index) { | 248 | while (q->hba_index != q->host_index) { |
249 | temp_eqe = q->qe[q->host_index].eqe; | 249 | temp_eqe = q->qe[q->host_index].eqe; |
250 | bf_set(lpfc_eqe_valid, temp_eqe, 0); | 250 | bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); |
251 | released++; | 251 | released++; |
252 | q->host_index = ((q->host_index + 1) % q->entry_count); | 252 | q->host_index = ((q->host_index + 1) % q->entry_count); |
253 | } | 253 | } |
@@ -285,7 +285,7 @@ lpfc_sli4_cq_get(struct lpfc_queue *q) | |||
285 | struct lpfc_cqe *cqe; | 285 | struct lpfc_cqe *cqe; |
286 | 286 | ||
287 | /* If the next CQE is not valid then we are done */ | 287 | /* If the next CQE is not valid then we are done */ |
288 | if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) | 288 | if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) |
289 | return NULL; | 289 | return NULL; |
290 | /* If the host has not yet processed the next entry then we are done */ | 290 | /* If the host has not yet processed the next entry then we are done */ |
291 | if (((q->hba_index + 1) % q->entry_count) == q->host_index) | 291 | if (((q->hba_index + 1) % q->entry_count) == q->host_index) |
@@ -321,7 +321,7 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) | |||
321 | /* while there are valid entries */ | 321 | /* while there are valid entries */ |
322 | while (q->hba_index != q->host_index) { | 322 | while (q->hba_index != q->host_index) { |
323 | temp_qe = q->qe[q->host_index].cqe; | 323 | temp_qe = q->qe[q->host_index].cqe; |
324 | bf_set(lpfc_cqe_valid, temp_qe, 0); | 324 | bf_set_le32(lpfc_cqe_valid, temp_qe, 0); |
325 | released++; | 325 | released++; |
326 | q->host_index = ((q->host_index + 1) % q->entry_count); | 326 | q->host_index = ((q->host_index + 1) % q->entry_count); |
327 | } | 327 | } |
@@ -8983,17 +8983,17 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) | |||
8983 | int ecount = 0; | 8983 | int ecount = 0; |
8984 | uint16_t cqid; | 8984 | uint16_t cqid; |
8985 | 8985 | ||
8986 | if (bf_get(lpfc_eqe_major_code, eqe) != 0) { | 8986 | if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) { |
8987 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 8987 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
8988 | "0359 Not a valid slow-path completion " | 8988 | "0359 Not a valid slow-path completion " |
8989 | "event: majorcode=x%x, minorcode=x%x\n", | 8989 | "event: majorcode=x%x, minorcode=x%x\n", |
8990 | bf_get(lpfc_eqe_major_code, eqe), | 8990 | bf_get_le32(lpfc_eqe_major_code, eqe), |
8991 | bf_get(lpfc_eqe_minor_code, eqe)); | 8991 | bf_get_le32(lpfc_eqe_minor_code, eqe)); |
8992 | return; | 8992 | return; |
8993 | } | 8993 | } |
8994 | 8994 | ||
8995 | /* Get the reference to the corresponding CQ */ | 8995 | /* Get the reference to the corresponding CQ */ |
8996 | cqid = bf_get(lpfc_eqe_resource_id, eqe); | 8996 | cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); |
8997 | 8997 | ||
8998 | /* Search for completion queue pointer matching this cqid */ | 8998 | /* Search for completion queue pointer matching this cqid */ |
8999 | speq = phba->sli4_hba.sp_eq; | 8999 | speq = phba->sli4_hba.sp_eq; |
@@ -9221,12 +9221,12 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, | |||
9221 | uint16_t cqid; | 9221 | uint16_t cqid; |
9222 | int ecount = 0; | 9222 | int ecount = 0; |
9223 | 9223 | ||
9224 | if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) { | 9224 | if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { |
9225 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 9225 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
9226 | "0366 Not a valid fast-path completion " | 9226 | "0366 Not a valid fast-path completion " |
9227 | "event: majorcode=x%x, minorcode=x%x\n", | 9227 | "event: majorcode=x%x, minorcode=x%x\n", |
9228 | bf_get(lpfc_eqe_major_code, eqe), | 9228 | bf_get_le32(lpfc_eqe_major_code, eqe), |
9229 | bf_get(lpfc_eqe_minor_code, eqe)); | 9229 | bf_get_le32(lpfc_eqe_minor_code, eqe)); |
9230 | return; | 9230 | return; |
9231 | } | 9231 | } |
9232 | 9232 | ||
@@ -9239,7 +9239,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, | |||
9239 | } | 9239 | } |
9240 | 9240 | ||
9241 | /* Get the reference to the corresponding CQ */ | 9241 | /* Get the reference to the corresponding CQ */ |
9242 | cqid = bf_get(lpfc_eqe_resource_id, eqe); | 9242 | cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); |
9243 | if (unlikely(cqid != cq->queue_id)) { | 9243 | if (unlikely(cqid != cq->queue_id)) { |
9244 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 9244 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
9245 | "0368 Miss-matched fast-path completion " | 9245 | "0368 Miss-matched fast-path completion " |
@@ -9532,13 +9532,18 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, | |||
9532 | struct lpfc_dmabuf *dmabuf; | 9532 | struct lpfc_dmabuf *dmabuf; |
9533 | int x, total_qe_count; | 9533 | int x, total_qe_count; |
9534 | void *dma_pointer; | 9534 | void *dma_pointer; |
9535 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; | ||
9535 | 9536 | ||
9536 | 9537 | ||
9538 | if (!phba->sli4_hba.pc_sli4_params.supported) | ||
9539 | hw_page_size = SLI4_PAGE_SIZE; | ||
9540 | |||
9537 | queue = kzalloc(sizeof(struct lpfc_queue) + | 9541 | queue = kzalloc(sizeof(struct lpfc_queue) + |
9538 | (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); | 9542 | (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); |
9539 | if (!queue) | 9543 | if (!queue) |
9540 | return NULL; | 9544 | return NULL; |
9541 | queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE; | 9545 | queue->page_count = (ALIGN(entry_size * entry_count, |
9546 | hw_page_size))/hw_page_size; | ||
9542 | INIT_LIST_HEAD(&queue->list); | 9547 | INIT_LIST_HEAD(&queue->list); |
9543 | INIT_LIST_HEAD(&queue->page_list); | 9548 | INIT_LIST_HEAD(&queue->page_list); |
9544 | INIT_LIST_HEAD(&queue->child_list); | 9549 | INIT_LIST_HEAD(&queue->child_list); |
@@ -9547,19 +9552,19 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, | |||
9547 | if (!dmabuf) | 9552 | if (!dmabuf) |
9548 | goto out_fail; | 9553 | goto out_fail; |
9549 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, | 9554 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, |
9550 | PAGE_SIZE, &dmabuf->phys, | 9555 | hw_page_size, &dmabuf->phys, |
9551 | GFP_KERNEL); | 9556 | GFP_KERNEL); |
9552 | if (!dmabuf->virt) { | 9557 | if (!dmabuf->virt) { |
9553 | kfree(dmabuf); | 9558 | kfree(dmabuf); |
9554 | goto out_fail; | 9559 | goto out_fail; |
9555 | } | 9560 | } |
9556 | memset(dmabuf->virt, 0, PAGE_SIZE); | 9561 | memset(dmabuf->virt, 0, hw_page_size); |
9557 | dmabuf->buffer_tag = x; | 9562 | dmabuf->buffer_tag = x; |
9558 | list_add_tail(&dmabuf->list, &queue->page_list); | 9563 | list_add_tail(&dmabuf->list, &queue->page_list); |
9559 | /* initialize queue's entry array */ | 9564 | /* initialize queue's entry array */ |
9560 | dma_pointer = dmabuf->virt; | 9565 | dma_pointer = dmabuf->virt; |
9561 | for (; total_qe_count < entry_count && | 9566 | for (; total_qe_count < entry_count && |
9562 | dma_pointer < (PAGE_SIZE + dmabuf->virt); | 9567 | dma_pointer < (hw_page_size + dmabuf->virt); |
9563 | total_qe_count++, dma_pointer += entry_size) { | 9568 | total_qe_count++, dma_pointer += entry_size) { |
9564 | queue->qe[total_qe_count].address = dma_pointer; | 9569 | queue->qe[total_qe_count].address = dma_pointer; |
9565 | } | 9570 | } |