aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c37
4 files changed, 69 insertions, 55 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 362730b6dd85..7c4f389a2f67 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1622,7 +1622,9 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1622 new_fcf_record = (struct fcf_record *)(virt_addr + 1622 new_fcf_record = (struct fcf_record *)(virt_addr +
1623 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1623 sizeof(struct lpfc_mbx_read_fcf_tbl));
1624 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 1624 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1625 sizeof(struct fcf_record)); 1625 offsetof(struct fcf_record, vlan_bitmap));
1626 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
1627 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
1626 1628
1627 return new_fcf_record; 1629 return new_fcf_record;
1628} 1630}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 820015fbc4d6..bff98add80cd 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -41,8 +41,14 @@
41 * Or clear that bit field: 41 * Or clear that bit field:
42 * bf_set(example_bit_field, &t1, 0); 42 * bf_set(example_bit_field, &t1, 0);
43 */ 43 */
44#define bf_get_le32(name, ptr) \
45 ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
44#define bf_get(name, ptr) \ 46#define bf_get(name, ptr) \
45 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK) 47 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
48#define bf_set_le32(name, ptr, value) \
49 ((ptr)->name##_WORD = cpu_to_le32(((((value) & \
50 name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
51 ~(name##_MASK << name##_SHIFT)))))
46#define bf_set(name, ptr, value) \ 52#define bf_set(name, ptr, value) \
47 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \ 53 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
48 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT)))) 54 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
@@ -1940,6 +1946,7 @@ struct lpfc_mbx_sli4_params {
1940#define rdma_MASK 0x00000001 1946#define rdma_MASK 0x00000001
1941#define rdma_WORD word3 1947#define rdma_WORD word3
1942 uint32_t sge_supp_len; 1948 uint32_t sge_supp_len;
1949#define SLI4_PAGE_SIZE 4096
1943 uint32_t word5; 1950 uint32_t word5;
1944#define if_page_sz_SHIFT 0 1951#define if_page_sz_SHIFT 0
1945#define if_page_sz_MASK 0x0000ffff 1952#define if_page_sz_MASK 0x0000ffff
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 25ee8cc6ab7a..f8e88bb423cb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2566,7 +2566,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2566 shost->max_cmd_len = 16; 2566 shost->max_cmd_len = 16;
2567 if (phba->sli_rev == LPFC_SLI_REV4) { 2567 if (phba->sli_rev == LPFC_SLI_REV4) {
2568 shost->dma_boundary = 2568 shost->dma_boundary =
2569 phba->sli4_hba.pc_sli4_params.sge_supp_len; 2569 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2570 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2570 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2571 } 2571 }
2572 2572
@@ -4039,6 +4039,43 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4039 if (unlikely(rc)) 4039 if (unlikely(rc))
4040 goto out_free_bsmbx; 4040 goto out_free_bsmbx;
4041 4041
4042 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4043 GFP_KERNEL);
4044 if (!mboxq) {
4045 rc = -ENOMEM;
4046 goto out_free_bsmbx;
4047 }
4048
4049 /* Get the Supported Pages. It is always available. */
4050 lpfc_supported_pages(mboxq);
4051 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4052 if (unlikely(rc)) {
4053 rc = -EIO;
4054 mempool_free(mboxq, phba->mbox_mem_pool);
4055 goto out_free_bsmbx;
4056 }
4057
4058 mqe = &mboxq->u.mqe;
4059 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4060 LPFC_MAX_SUPPORTED_PAGES);
4061 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4062 switch (pn_page[i]) {
4063 case LPFC_SLI4_PARAMETERS:
4064 phba->sli4_hba.pc_sli4_params.supported = 1;
4065 break;
4066 default:
4067 break;
4068 }
4069 }
4070
4071 /* Read the port's SLI4 Parameters capabilities if supported. */
4072 if (phba->sli4_hba.pc_sli4_params.supported)
4073 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4074 mempool_free(mboxq, phba->mbox_mem_pool);
4075 if (rc) {
4076 rc = -EIO;
4077 goto out_free_bsmbx;
4078 }
4042 /* Create all the SLI4 queues */ 4079 /* Create all the SLI4 queues */
4043 rc = lpfc_sli4_queue_create(phba); 4080 rc = lpfc_sli4_queue_create(phba);
4044 if (rc) 4081 if (rc)
@@ -4099,43 +4136,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4099 goto out_free_fcp_eq_hdl; 4136 goto out_free_fcp_eq_hdl;
4100 } 4137 }
4101 4138
4102 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4103 GFP_KERNEL);
4104 if (!mboxq) {
4105 rc = -ENOMEM;
4106 goto out_free_fcp_eq_hdl;
4107 }
4108
4109 /* Get the Supported Pages. It is always available. */
4110 lpfc_supported_pages(mboxq);
4111 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4112 if (unlikely(rc)) {
4113 rc = -EIO;
4114 mempool_free(mboxq, phba->mbox_mem_pool);
4115 goto out_free_fcp_eq_hdl;
4116 }
4117
4118 mqe = &mboxq->u.mqe;
4119 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4120 LPFC_MAX_SUPPORTED_PAGES);
4121 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4122 switch (pn_page[i]) {
4123 case LPFC_SLI4_PARAMETERS:
4124 phba->sli4_hba.pc_sli4_params.supported = 1;
4125 break;
4126 default:
4127 break;
4128 }
4129 }
4130
4131 /* Read the port's SLI4 Parameters capabilities if supported. */
4132 if (phba->sli4_hba.pc_sli4_params.supported)
4133 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4134 mempool_free(mboxq, phba->mbox_mem_pool);
4135 if (rc) {
4136 rc = -EIO;
4137 goto out_free_fcp_eq_hdl;
4138 }
4139 return rc; 4139 return rc;
4140 4140
4141out_free_fcp_eq_hdl: 4141out_free_fcp_eq_hdl:
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 2eff81d366f9..dd879a7d04a3 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -212,7 +212,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
212 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; 212 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
213 213
214 /* If the next EQE is not valid then we are done */ 214 /* If the next EQE is not valid then we are done */
215 if (!bf_get(lpfc_eqe_valid, eqe)) 215 if (!bf_get_le32(lpfc_eqe_valid, eqe))
216 return NULL; 216 return NULL;
217 /* If the host has not yet processed the next entry then we are done */ 217 /* If the host has not yet processed the next entry then we are done */
218 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 218 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
@@ -247,7 +247,7 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
247 /* while there are valid entries */ 247 /* while there are valid entries */
248 while (q->hba_index != q->host_index) { 248 while (q->hba_index != q->host_index) {
249 temp_eqe = q->qe[q->host_index].eqe; 249 temp_eqe = q->qe[q->host_index].eqe;
250 bf_set(lpfc_eqe_valid, temp_eqe, 0); 250 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
251 released++; 251 released++;
252 q->host_index = ((q->host_index + 1) % q->entry_count); 252 q->host_index = ((q->host_index + 1) % q->entry_count);
253 } 253 }
@@ -285,7 +285,7 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
285 struct lpfc_cqe *cqe; 285 struct lpfc_cqe *cqe;
286 286
287 /* If the next CQE is not valid then we are done */ 287 /* If the next CQE is not valid then we are done */
288 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 288 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
289 return NULL; 289 return NULL;
290 /* If the host has not yet processed the next entry then we are done */ 290 /* If the host has not yet processed the next entry then we are done */
291 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 291 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
@@ -321,7 +321,7 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
321 /* while there are valid entries */ 321 /* while there are valid entries */
322 while (q->hba_index != q->host_index) { 322 while (q->hba_index != q->host_index) {
323 temp_qe = q->qe[q->host_index].cqe; 323 temp_qe = q->qe[q->host_index].cqe;
324 bf_set(lpfc_cqe_valid, temp_qe, 0); 324 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
325 released++; 325 released++;
326 q->host_index = ((q->host_index + 1) % q->entry_count); 326 q->host_index = ((q->host_index + 1) % q->entry_count);
327 } 327 }
@@ -8983,17 +8983,17 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8983 int ecount = 0; 8983 int ecount = 0;
8984 uint16_t cqid; 8984 uint16_t cqid;
8985 8985
8986 if (bf_get(lpfc_eqe_major_code, eqe) != 0) { 8986 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
8987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8988 "0359 Not a valid slow-path completion " 8988 "0359 Not a valid slow-path completion "
8989 "event: majorcode=x%x, minorcode=x%x\n", 8989 "event: majorcode=x%x, minorcode=x%x\n",
8990 bf_get(lpfc_eqe_major_code, eqe), 8990 bf_get_le32(lpfc_eqe_major_code, eqe),
8991 bf_get(lpfc_eqe_minor_code, eqe)); 8991 bf_get_le32(lpfc_eqe_minor_code, eqe));
8992 return; 8992 return;
8993 } 8993 }
8994 8994
8995 /* Get the reference to the corresponding CQ */ 8995 /* Get the reference to the corresponding CQ */
8996 cqid = bf_get(lpfc_eqe_resource_id, eqe); 8996 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
8997 8997
8998 /* Search for completion queue pointer matching this cqid */ 8998 /* Search for completion queue pointer matching this cqid */
8999 speq = phba->sli4_hba.sp_eq; 8999 speq = phba->sli4_hba.sp_eq;
@@ -9221,12 +9221,12 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9221 uint16_t cqid; 9221 uint16_t cqid;
9222 int ecount = 0; 9222 int ecount = 0;
9223 9223
9224 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) { 9224 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
9225 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9225 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9226 "0366 Not a valid fast-path completion " 9226 "0366 Not a valid fast-path completion "
9227 "event: majorcode=x%x, minorcode=x%x\n", 9227 "event: majorcode=x%x, minorcode=x%x\n",
9228 bf_get(lpfc_eqe_major_code, eqe), 9228 bf_get_le32(lpfc_eqe_major_code, eqe),
9229 bf_get(lpfc_eqe_minor_code, eqe)); 9229 bf_get_le32(lpfc_eqe_minor_code, eqe));
9230 return; 9230 return;
9231 } 9231 }
9232 9232
@@ -9239,7 +9239,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9239 } 9239 }
9240 9240
9241 /* Get the reference to the corresponding CQ */ 9241 /* Get the reference to the corresponding CQ */
9242 cqid = bf_get(lpfc_eqe_resource_id, eqe); 9242 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
9243 if (unlikely(cqid != cq->queue_id)) { 9243 if (unlikely(cqid != cq->queue_id)) {
9244 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9244 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9245 "0368 Miss-matched fast-path completion " 9245 "0368 Miss-matched fast-path completion "
@@ -9532,13 +9532,18 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9532 struct lpfc_dmabuf *dmabuf; 9532 struct lpfc_dmabuf *dmabuf;
9533 int x, total_qe_count; 9533 int x, total_qe_count;
9534 void *dma_pointer; 9534 void *dma_pointer;
9535 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
9535 9536
9536 9537
9538 if (!phba->sli4_hba.pc_sli4_params.supported)
9539 hw_page_size = SLI4_PAGE_SIZE;
9540
9537 queue = kzalloc(sizeof(struct lpfc_queue) + 9541 queue = kzalloc(sizeof(struct lpfc_queue) +
9538 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 9542 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9539 if (!queue) 9543 if (!queue)
9540 return NULL; 9544 return NULL;
9541 queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE; 9545 queue->page_count = (ALIGN(entry_size * entry_count,
9546 hw_page_size))/hw_page_size;
9542 INIT_LIST_HEAD(&queue->list); 9547 INIT_LIST_HEAD(&queue->list);
9543 INIT_LIST_HEAD(&queue->page_list); 9548 INIT_LIST_HEAD(&queue->page_list);
9544 INIT_LIST_HEAD(&queue->child_list); 9549 INIT_LIST_HEAD(&queue->child_list);
@@ -9547,19 +9552,19 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9547 if (!dmabuf) 9552 if (!dmabuf)
9548 goto out_fail; 9553 goto out_fail;
9549 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 9554 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9550 PAGE_SIZE, &dmabuf->phys, 9555 hw_page_size, &dmabuf->phys,
9551 GFP_KERNEL); 9556 GFP_KERNEL);
9552 if (!dmabuf->virt) { 9557 if (!dmabuf->virt) {
9553 kfree(dmabuf); 9558 kfree(dmabuf);
9554 goto out_fail; 9559 goto out_fail;
9555 } 9560 }
9556 memset(dmabuf->virt, 0, PAGE_SIZE); 9561 memset(dmabuf->virt, 0, hw_page_size);
9557 dmabuf->buffer_tag = x; 9562 dmabuf->buffer_tag = x;
9558 list_add_tail(&dmabuf->list, &queue->page_list); 9563 list_add_tail(&dmabuf->list, &queue->page_list);
9559 /* initialize queue's entry array */ 9564 /* initialize queue's entry array */
9560 dma_pointer = dmabuf->virt; 9565 dma_pointer = dmabuf->virt;
9561 for (; total_qe_count < entry_count && 9566 for (; total_qe_count < entry_count &&
9562 dma_pointer < (PAGE_SIZE + dmabuf->virt); 9567 dma_pointer < (hw_page_size + dmabuf->virt);
9563 total_qe_count++, dma_pointer += entry_size) { 9568 total_qe_count++, dma_pointer += entry_size) {
9564 queue->qe[total_qe_count].address = dma_pointer; 9569 queue->qe[total_qe_count].address = dma_pointer;
9565 } 9570 }