aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c621
1 files changed, 325 insertions, 296 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0e7e144507b2..219bf534ef99 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -69,6 +69,8 @@ static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *); 69 struct lpfc_cqe *);
70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *, 70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71 int); 71 int);
72static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
73 uint32_t);
72 74
73static IOCB_t * 75static IOCB_t *
74lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 76lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -94,6 +96,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
94 union lpfc_wqe *temp_wqe; 96 union lpfc_wqe *temp_wqe;
95 struct lpfc_register doorbell; 97 struct lpfc_register doorbell;
96 uint32_t host_index; 98 uint32_t host_index;
99 uint32_t idx;
97 100
98 /* sanity check on queue memory */ 101 /* sanity check on queue memory */
99 if (unlikely(!q)) 102 if (unlikely(!q))
@@ -101,8 +104,12 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
101 temp_wqe = q->qe[q->host_index].wqe; 104 temp_wqe = q->qe[q->host_index].wqe;
102 105
103 /* If the host has not yet processed the next entry then we are done */ 106 /* If the host has not yet processed the next entry then we are done */
104 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 107 idx = ((q->host_index + 1) % q->entry_count);
108 if (idx == q->hba_index) {
109 q->WQ_overflow++;
105 return -ENOMEM; 110 return -ENOMEM;
111 }
112 q->WQ_posted++;
106 /* set consumption flag every once in a while */ 113 /* set consumption flag every once in a while */
107 if (!((q->host_index + 1) % q->entry_repost)) 114 if (!((q->host_index + 1) % q->entry_repost))
108 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 115 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
@@ -112,7 +119,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
112 119
113 /* Update the host index before invoking device */ 120 /* Update the host index before invoking device */
114 host_index = q->host_index; 121 host_index = q->host_index;
115 q->host_index = ((q->host_index + 1) % q->entry_count); 122
123 q->host_index = idx;
116 124
117 /* Ring Doorbell */ 125 /* Ring Doorbell */
118 doorbell.word0 = 0; 126 doorbell.word0 = 0;
@@ -120,7 +128,6 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
120 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); 128 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
121 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); 129 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
122 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); 130 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
123 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
124 131
125 return 0; 132 return 0;
126} 133}
@@ -194,7 +201,6 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
194 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 201 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
195 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 202 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
196 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 203 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
197 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
198 return 0; 204 return 0;
199} 205}
200 206
@@ -234,6 +240,7 @@ static struct lpfc_eqe *
234lpfc_sli4_eq_get(struct lpfc_queue *q) 240lpfc_sli4_eq_get(struct lpfc_queue *q)
235{ 241{
236 struct lpfc_eqe *eqe; 242 struct lpfc_eqe *eqe;
243 uint32_t idx;
237 244
238 /* sanity check on queue memory */ 245 /* sanity check on queue memory */
239 if (unlikely(!q)) 246 if (unlikely(!q))
@@ -244,14 +251,34 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
244 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 251 if (!bf_get_le32(lpfc_eqe_valid, eqe))
245 return NULL; 252 return NULL;
246 /* If the host has not yet processed the next entry then we are done */ 253 /* If the host has not yet processed the next entry then we are done */
247 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 254 idx = ((q->hba_index + 1) % q->entry_count);
255 if (idx == q->host_index)
248 return NULL; 256 return NULL;
249 257
250 q->hba_index = ((q->hba_index + 1) % q->entry_count); 258 q->hba_index = idx;
251 return eqe; 259 return eqe;
252} 260}
253 261
254/** 262/**
263 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
264 * @q: The Event Queue to disable interrupts
265 *
266 **/
267static inline void
268lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
269{
270 struct lpfc_register doorbell;
271
272 doorbell.word0 = 0;
273 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
274 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
275 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
276 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
277 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
278 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
279}
280
281/**
255 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 282 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
256 * @q: The Event Queue that the host has completed processing for. 283 * @q: The Event Queue that the host has completed processing for.
257 * @arm: Indicates whether the host wants to arms this CQ. 284 * @arm: Indicates whether the host wants to arms this CQ.
@@ -318,6 +345,7 @@ static struct lpfc_cqe *
318lpfc_sli4_cq_get(struct lpfc_queue *q) 345lpfc_sli4_cq_get(struct lpfc_queue *q)
319{ 346{
320 struct lpfc_cqe *cqe; 347 struct lpfc_cqe *cqe;
348 uint32_t idx;
321 349
322 /* sanity check on queue memory */ 350 /* sanity check on queue memory */
323 if (unlikely(!q)) 351 if (unlikely(!q))
@@ -327,11 +355,12 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
327 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 355 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
328 return NULL; 356 return NULL;
329 /* If the host has not yet processed the next entry then we are done */ 357 /* If the host has not yet processed the next entry then we are done */
330 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 358 idx = ((q->hba_index + 1) % q->entry_count);
359 if (idx == q->host_index)
331 return NULL; 360 return NULL;
332 361
333 cqe = q->qe[q->hba_index].cqe; 362 cqe = q->qe[q->hba_index].cqe;
334 q->hba_index = ((q->hba_index + 1) % q->entry_count); 363 q->hba_index = idx;
335 return cqe; 364 return cqe;
336} 365}
337 366
@@ -472,8 +501,8 @@ lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
472static inline IOCB_t * 501static inline IOCB_t *
473lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 502lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
474{ 503{
475 return (IOCB_t *) (((char *) pring->cmdringaddr) + 504 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
476 pring->cmdidx * phba->iocb_cmd_size); 505 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
477} 506}
478 507
479/** 508/**
@@ -489,8 +518,8 @@ lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
489static inline IOCB_t * 518static inline IOCB_t *
490lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 519lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
491{ 520{
492 return (IOCB_t *) (((char *) pring->rspringaddr) + 521 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
493 pring->rspidx * phba->iocb_rsp_size); 522 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
494} 523}
495 524
496/** 525/**
@@ -1320,21 +1349,23 @@ static IOCB_t *
1320lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1349lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1321{ 1350{
1322 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1351 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1323 uint32_t max_cmd_idx = pring->numCiocb; 1352 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1324 if ((pring->next_cmdidx == pring->cmdidx) && 1353 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1325 (++pring->next_cmdidx >= max_cmd_idx)) 1354 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1326 pring->next_cmdidx = 0; 1355 pring->sli.sli3.next_cmdidx = 0;
1327 1356
1328 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 1357 if (unlikely(pring->sli.sli3.local_getidx ==
1358 pring->sli.sli3.next_cmdidx)) {
1329 1359
1330 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1360 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1331 1361
1332 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 1362 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1333 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1363 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1334 "0315 Ring %d issue: portCmdGet %d " 1364 "0315 Ring %d issue: portCmdGet %d "
1335 "is bigger than cmd ring %d\n", 1365 "is bigger than cmd ring %d\n",
1336 pring->ringno, 1366 pring->ringno,
1337 pring->local_getidx, max_cmd_idx); 1367 pring->sli.sli3.local_getidx,
1368 max_cmd_idx);
1338 1369
1339 phba->link_state = LPFC_HBA_ERROR; 1370 phba->link_state = LPFC_HBA_ERROR;
1340 /* 1371 /*
@@ -1349,7 +1380,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1349 return NULL; 1380 return NULL;
1350 } 1381 }
1351 1382
1352 if (pring->local_getidx == pring->next_cmdidx) 1383 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1353 return NULL; 1384 return NULL;
1354 } 1385 }
1355 1386
@@ -1484,8 +1515,8 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1484 * Let the HBA know what IOCB slot will be the next one the 1515 * Let the HBA know what IOCB slot will be the next one the
1485 * driver will put a command into. 1516 * driver will put a command into.
1486 */ 1517 */
1487 pring->cmdidx = pring->next_cmdidx; 1518 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1488 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1519 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1489} 1520}
1490 1521
1491/** 1522/**
@@ -2056,6 +2087,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2056 case MBX_READ_EVENT_LOG: 2087 case MBX_READ_EVENT_LOG:
2057 case MBX_SECURITY_MGMT: 2088 case MBX_SECURITY_MGMT:
2058 case MBX_AUTH_PORT: 2089 case MBX_AUTH_PORT:
2090 case MBX_ACCESS_VDATA:
2059 ret = mbxCommand; 2091 ret = mbxCommand;
2060 break; 2092 break;
2061 default: 2093 default:
@@ -2786,7 +2818,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2786 "0312 Ring %d handler: portRspPut %d " 2818 "0312 Ring %d handler: portRspPut %d "
2787 "is bigger than rsp ring %d\n", 2819 "is bigger than rsp ring %d\n",
2788 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2820 pring->ringno, le32_to_cpu(pgp->rspPutInx),
2789 pring->numRiocb); 2821 pring->sli.sli3.numRiocb);
2790 2822
2791 phba->link_state = LPFC_HBA_ERROR; 2823 phba->link_state = LPFC_HBA_ERROR;
2792 2824
@@ -2815,10 +2847,26 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2815void lpfc_poll_eratt(unsigned long ptr) 2847void lpfc_poll_eratt(unsigned long ptr)
2816{ 2848{
2817 struct lpfc_hba *phba; 2849 struct lpfc_hba *phba;
2818 uint32_t eratt = 0; 2850 uint32_t eratt = 0, rem;
2851 uint64_t sli_intr, cnt;
2819 2852
2820 phba = (struct lpfc_hba *)ptr; 2853 phba = (struct lpfc_hba *)ptr;
2821 2854
2855 /* Here we will also keep track of interrupts per sec of the hba */
2856 sli_intr = phba->sli.slistat.sli_intr;
2857
2858 if (phba->sli.slistat.sli_prev_intr > sli_intr)
2859 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2860 sli_intr);
2861 else
2862 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2863
2864 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
2865 rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
2866 phba->sli.slistat.sli_ips = cnt;
2867
2868 phba->sli.slistat.sli_prev_intr = sli_intr;
2869
2822 /* Check chip HA register for error event */ 2870 /* Check chip HA register for error event */
2823 eratt = lpfc_sli_check_eratt(phba); 2871 eratt = lpfc_sli_check_eratt(phba);
2824 2872
@@ -2873,7 +2921,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2873 * The next available response entry should never exceed the maximum 2921 * The next available response entry should never exceed the maximum
2874 * entries. If it does, treat it as an adapter hardware error. 2922 * entries. If it does, treat it as an adapter hardware error.
2875 */ 2923 */
2876 portRspMax = pring->numRiocb; 2924 portRspMax = pring->sli.sli3.numRiocb;
2877 portRspPut = le32_to_cpu(pgp->rspPutInx); 2925 portRspPut = le32_to_cpu(pgp->rspPutInx);
2878 if (unlikely(portRspPut >= portRspMax)) { 2926 if (unlikely(portRspPut >= portRspMax)) {
2879 lpfc_sli_rsp_pointers_error(phba, pring); 2927 lpfc_sli_rsp_pointers_error(phba, pring);
@@ -2887,7 +2935,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2887 phba->fcp_ring_in_use = 1; 2935 phba->fcp_ring_in_use = 1;
2888 2936
2889 rmb(); 2937 rmb();
2890 while (pring->rspidx != portRspPut) { 2938 while (pring->sli.sli3.rspidx != portRspPut) {
2891 /* 2939 /*
2892 * Fetch an entry off the ring and copy it into a local data 2940 * Fetch an entry off the ring and copy it into a local data
2893 * structure. The copy involves a byte-swap since the 2941 * structure. The copy involves a byte-swap since the
@@ -2896,8 +2944,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2896 entry = lpfc_resp_iocb(phba, pring); 2944 entry = lpfc_resp_iocb(phba, pring);
2897 phba->last_completion_time = jiffies; 2945 phba->last_completion_time = jiffies;
2898 2946
2899 if (++pring->rspidx >= portRspMax) 2947 if (++pring->sli.sli3.rspidx >= portRspMax)
2900 pring->rspidx = 0; 2948 pring->sli.sli3.rspidx = 0;
2901 2949
2902 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 2950 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2903 (uint32_t *) &rspiocbq.iocb, 2951 (uint32_t *) &rspiocbq.iocb,
@@ -2915,7 +2963,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2915 * queuedepths of the SCSI device. 2963 * queuedepths of the SCSI device.
2916 */ 2964 */
2917 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2965 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2918 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2966 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
2967 IOERR_NO_RESOURCES)) {
2919 spin_unlock_irqrestore(&phba->hbalock, iflag); 2968 spin_unlock_irqrestore(&phba->hbalock, iflag);
2920 phba->lpfc_rampdown_queue_depth(phba); 2969 phba->lpfc_rampdown_queue_depth(phba);
2921 spin_lock_irqsave(&phba->hbalock, iflag); 2970 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2998,9 +3047,10 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2998 * been updated, sync the pgp->rspPutInx and fetch the new port 3047 * been updated, sync the pgp->rspPutInx and fetch the new port
2999 * response put pointer. 3048 * response put pointer.
3000 */ 3049 */
3001 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3050 writel(pring->sli.sli3.rspidx,
3051 &phba->host_gp[pring->ringno].rspGetInx);
3002 3052
3003 if (pring->rspidx == portRspPut) 3053 if (pring->sli.sli3.rspidx == portRspPut)
3004 portRspPut = le32_to_cpu(pgp->rspPutInx); 3054 portRspPut = le32_to_cpu(pgp->rspPutInx);
3005 } 3055 }
3006 3056
@@ -3015,7 +3065,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3015 pring->stats.iocb_cmd_empty++; 3065 pring->stats.iocb_cmd_empty++;
3016 3066
3017 /* Force update of the local copy of cmdGetInx */ 3067 /* Force update of the local copy of cmdGetInx */
3018 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3068 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3019 lpfc_sli_resume_iocb(phba, pring); 3069 lpfc_sli_resume_iocb(phba, pring);
3020 3070
3021 if ((pring->lpfc_sli_cmd_available)) 3071 if ((pring->lpfc_sli_cmd_available))
@@ -3086,7 +3136,8 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3086 * queuedepths of the SCSI device. 3136 * queuedepths of the SCSI device.
3087 */ 3137 */
3088 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3138 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3089 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 3139 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3140 IOERR_NO_RESOURCES)) {
3090 spin_unlock_irqrestore(&phba->hbalock, iflag); 3141 spin_unlock_irqrestore(&phba->hbalock, iflag);
3091 phba->lpfc_rampdown_queue_depth(phba); 3142 phba->lpfc_rampdown_queue_depth(phba);
3092 spin_lock_irqsave(&phba->hbalock, iflag); 3143 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -3247,7 +3298,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3247 * The next available response entry should never exceed the maximum 3298 * The next available response entry should never exceed the maximum
3248 * entries. If it does, treat it as an adapter hardware error. 3299 * entries. If it does, treat it as an adapter hardware error.
3249 */ 3300 */
3250 portRspMax = pring->numRiocb; 3301 portRspMax = pring->sli.sli3.numRiocb;
3251 portRspPut = le32_to_cpu(pgp->rspPutInx); 3302 portRspPut = le32_to_cpu(pgp->rspPutInx);
3252 if (portRspPut >= portRspMax) { 3303 if (portRspPut >= portRspMax) {
3253 /* 3304 /*
@@ -3269,7 +3320,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3269 } 3320 }
3270 3321
3271 rmb(); 3322 rmb();
3272 while (pring->rspidx != portRspPut) { 3323 while (pring->sli.sli3.rspidx != portRspPut) {
3273 /* 3324 /*
3274 * Build a completion list and call the appropriate handler. 3325 * Build a completion list and call the appropriate handler.
3275 * The process is to get the next available response iocb, get 3326 * The process is to get the next available response iocb, get
@@ -3297,8 +3348,8 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3297 phba->iocb_rsp_size); 3348 phba->iocb_rsp_size);
3298 irsp = &rspiocbp->iocb; 3349 irsp = &rspiocbp->iocb;
3299 3350
3300 if (++pring->rspidx >= portRspMax) 3351 if (++pring->sli.sli3.rspidx >= portRspMax)
3301 pring->rspidx = 0; 3352 pring->sli.sli3.rspidx = 0;
3302 3353
3303 if (pring->ringno == LPFC_ELS_RING) { 3354 if (pring->ringno == LPFC_ELS_RING) {
3304 lpfc_debugfs_slow_ring_trc(phba, 3355 lpfc_debugfs_slow_ring_trc(phba,
@@ -3308,7 +3359,8 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3308 *(((uint32_t *) irsp) + 7)); 3359 *(((uint32_t *) irsp) + 7));
3309 } 3360 }
3310 3361
3311 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3362 writel(pring->sli.sli3.rspidx,
3363 &phba->host_gp[pring->ringno].rspGetInx);
3312 3364
3313 spin_unlock_irqrestore(&phba->hbalock, iflag); 3365 spin_unlock_irqrestore(&phba->hbalock, iflag);
3314 /* Handle the response IOCB */ 3366 /* Handle the response IOCB */
@@ -3320,10 +3372,10 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3320 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3372 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3321 * response put pointer. 3373 * response put pointer.
3322 */ 3374 */
3323 if (pring->rspidx == portRspPut) { 3375 if (pring->sli.sli3.rspidx == portRspPut) {
3324 portRspPut = le32_to_cpu(pgp->rspPutInx); 3376 portRspPut = le32_to_cpu(pgp->rspPutInx);
3325 } 3377 }
3326 } /* while (pring->rspidx != portRspPut) */ 3378 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3327 3379
3328 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3380 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3329 /* At least one response entry has been freed */ 3381 /* At least one response entry has been freed */
@@ -3338,7 +3390,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3338 pring->stats.iocb_cmd_empty++; 3390 pring->stats.iocb_cmd_empty++;
3339 3391
3340 /* Force update of the local copy of cmdGetInx */ 3392 /* Force update of the local copy of cmdGetInx */
3341 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3393 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3342 lpfc_sli_resume_iocb(phba, pring); 3394 lpfc_sli_resume_iocb(phba, pring);
3343 3395
3344 if ((pring->lpfc_sli_cmd_available)) 3396 if ((pring->lpfc_sli_cmd_available))
@@ -3859,10 +3911,10 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
3859 for (i = 0; i < psli->num_rings; i++) { 3911 for (i = 0; i < psli->num_rings; i++) {
3860 pring = &psli->ring[i]; 3912 pring = &psli->ring[i];
3861 pring->flag = 0; 3913 pring->flag = 0;
3862 pring->rspidx = 0; 3914 pring->sli.sli3.rspidx = 0;
3863 pring->next_cmdidx = 0; 3915 pring->sli.sli3.next_cmdidx = 0;
3864 pring->local_getidx = 0; 3916 pring->sli.sli3.local_getidx = 0;
3865 pring->cmdidx = 0; 3917 pring->sli.sli3.cmdidx = 0;
3866 pring->missbufcnt = 0; 3918 pring->missbufcnt = 0;
3867 } 3919 }
3868 3920
@@ -4893,16 +4945,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4893 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4945 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4894 fcp_eqidx = 0; 4946 fcp_eqidx = 0;
4895 if (phba->sli4_hba.fcp_cq) { 4947 if (phba->sli4_hba.fcp_cq) {
4896 do 4948 do {
4897 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4949 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4898 LPFC_QUEUE_REARM); 4950 LPFC_QUEUE_REARM);
4899 while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4951 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
4900 } 4952 }
4901 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4953 if (phba->sli4_hba.hba_eq) {
4902 if (phba->sli4_hba.fp_eq) { 4954 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
4903 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
4904 fcp_eqidx++) 4955 fcp_eqidx++)
4905 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4956 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
4906 LPFC_QUEUE_REARM); 4957 LPFC_QUEUE_REARM);
4907 } 4958 }
4908} 4959}
@@ -7784,14 +7835,18 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7784 * 7835 *
7785 * Return: index into SLI4 fast-path FCP queue index. 7836 * Return: index into SLI4 fast-path FCP queue index.
7786 **/ 7837 **/
7787static uint32_t 7838static inline uint32_t
7788lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 7839lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
7789{ 7840{
7790 ++phba->fcp_qidx; 7841 int i;
7791 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count) 7842
7792 phba->fcp_qidx = 0; 7843 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
7844 i = smp_processor_id();
7845 else
7846 i = atomic_add_return(1, &phba->fcp_qidx);
7793 7847
7794 return phba->fcp_qidx; 7848 i = (i % phba->cfg_fcp_io_channel);
7849 return i;
7795} 7850}
7796 7851
7797/** 7852/**
@@ -8311,16 +8366,6 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8311 8366
8312 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8367 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8313 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8368 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8314 /*
8315 * For FCP command IOCB, get a new WQ index to distribute
8316 * WQE across the WQsr. On the other hand, for abort IOCB,
8317 * it carries the same WQ index to the original command
8318 * IOCB.
8319 */
8320 if (piocb->iocb_flag & LPFC_IO_FCP)
8321 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8322 if (unlikely(!phba->sli4_hba.fcp_wq))
8323 return IOCB_ERROR;
8324 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8369 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8325 &wqe)) 8370 &wqe))
8326 return IOCB_ERROR; 8371 return IOCB_ERROR;
@@ -8401,13 +8446,68 @@ int
8401lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8446lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8402 struct lpfc_iocbq *piocb, uint32_t flag) 8447 struct lpfc_iocbq *piocb, uint32_t flag)
8403{ 8448{
8449 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
8450 struct lpfc_sli_ring *pring;
8451 struct lpfc_queue *fpeq;
8452 struct lpfc_eqe *eqe;
8404 unsigned long iflags; 8453 unsigned long iflags;
8405 int rc; 8454 int rc, idx;
8406 8455
8407 spin_lock_irqsave(&phba->hbalock, iflags); 8456 if (phba->sli_rev == LPFC_SLI_REV4) {
8408 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8457 if (piocb->iocb_flag & LPFC_IO_FCP) {
8409 spin_unlock_irqrestore(&phba->hbalock, iflags); 8458 if (unlikely(!phba->sli4_hba.fcp_wq))
8459 return IOCB_ERROR;
8460 idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8461 piocb->fcp_wqidx = idx;
8462 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
8463
8464 pring = &phba->sli.ring[ring_number];
8465 spin_lock_irqsave(&pring->ring_lock, iflags);
8466 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8467 flag);
8468 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8469
8470 if (lpfc_fcp_look_ahead) {
8471 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
8472
8473 if (atomic_dec_and_test(&fcp_eq_hdl->
8474 fcp_eq_in_use)) {
8410 8475
8476 /* Get associated EQ with this index */
8477 fpeq = phba->sli4_hba.hba_eq[idx];
8478
8479 /* Turn off interrupts from this EQ */
8480 lpfc_sli4_eq_clr_intr(fpeq);
8481
8482 /*
8483 * Process all the events on FCP EQ
8484 */
8485 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
8486 lpfc_sli4_hba_handle_eqe(phba,
8487 eqe, idx);
8488 fpeq->EQ_processed++;
8489 }
8490
8491 /* Always clear and re-arm the EQ */
8492 lpfc_sli4_eq_release(fpeq,
8493 LPFC_QUEUE_REARM);
8494 }
8495 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
8496 }
8497 } else {
8498 pring = &phba->sli.ring[ring_number];
8499 spin_lock_irqsave(&pring->ring_lock, iflags);
8500 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8501 flag);
8502 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8503
8504 }
8505 } else {
8506 /* For now, SLI2/3 will still use hbalock */
8507 spin_lock_irqsave(&phba->hbalock, iflags);
8508 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8509 spin_unlock_irqrestore(&phba->hbalock, iflags);
8510 }
8411 return rc; 8511 return rc;
8412} 8512}
8413 8513
@@ -8434,18 +8534,18 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
8434 8534
8435 /* Take some away from the FCP ring */ 8535 /* Take some away from the FCP ring */
8436 pring = &psli->ring[psli->fcp_ring]; 8536 pring = &psli->ring[psli->fcp_ring];
8437 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8537 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8438 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8538 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8439 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8539 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8440 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8540 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8441 8541
8442 /* and give them to the extra ring */ 8542 /* and give them to the extra ring */
8443 pring = &psli->ring[psli->extra_ring]; 8543 pring = &psli->ring[psli->extra_ring];
8444 8544
8445 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8545 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8446 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8546 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8447 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8547 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8448 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8548 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8449 8549
8450 /* Setup default profile for this ring */ 8550 /* Setup default profile for this ring */
8451 pring->iotag_max = 4096; 8551 pring->iotag_max = 4096;
@@ -8457,56 +8557,6 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
8457 return 0; 8557 return 0;
8458} 8558}
8459 8559
8460/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS.
8461 * @vport: pointer to virtual port object.
8462 * @ndlp: nodelist pointer for the impacted rport.
8463 *
8464 * The driver calls this routine in response to a XRI ABORT CQE
8465 * event from the port. In this event, the driver is required to
8466 * recover its login to the rport even though its login may be valid
8467 * from the driver's perspective. The failed ABTS notice from the
8468 * port indicates the rport is not responding.
8469 */
8470static void
8471lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
8472 struct lpfc_nodelist *ndlp)
8473{
8474 struct Scsi_Host *shost;
8475 struct lpfc_hba *phba;
8476 unsigned long flags = 0;
8477
8478 shost = lpfc_shost_from_vport(vport);
8479 phba = vport->phba;
8480 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
8481 lpfc_printf_log(phba, KERN_INFO,
8482 LOG_SLI, "3093 No rport recovery needed. "
8483 "rport in state 0x%x\n",
8484 ndlp->nlp_state);
8485 return;
8486 }
8487 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8488 "3094 Start rport recovery on shost id 0x%x "
8489 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
8490 "flags 0x%x\n",
8491 shost->host_no, ndlp->nlp_DID,
8492 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
8493 ndlp->nlp_flag);
8494 /*
8495 * The rport is not responding. Don't attempt ADISC recovery.
8496 * Remove the FCP-2 flag to force a PLOGI.
8497 */
8498 spin_lock_irqsave(shost->host_lock, flags);
8499 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
8500 spin_unlock_irqrestore(shost->host_lock, flags);
8501 lpfc_disc_state_machine(vport, ndlp, NULL,
8502 NLP_EVT_DEVICE_RECOVERY);
8503 lpfc_cancel_retry_delay_tmo(vport, ndlp);
8504 spin_lock_irqsave(shost->host_lock, flags);
8505 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
8506 spin_unlock_irqrestore(shost->host_lock, flags);
8507 lpfc_disc_start(vport);
8508}
8509
8510/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 8560/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8511 * @phba: Pointer to HBA context object. 8561 * @phba: Pointer to HBA context object.
8512 * @iocbq: Pointer to iocb object. 8562 * @iocbq: Pointer to iocb object.
@@ -8594,7 +8644,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8594 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 8644 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
8595 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 8645 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
8596 */ 8646 */
8597 ext_status = axri->parameter & WCQE_PARAM_MASK; 8647 ext_status = axri->parameter & IOERR_PARAM_MASK;
8598 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 8648 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
8599 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 8649 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
8600 lpfc_sli_abts_recover_port(vport, ndlp); 8650 lpfc_sli_abts_recover_port(vport, ndlp);
@@ -8692,7 +8742,9 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8692 struct lpfc_sli *psli = &phba->sli; 8742 struct lpfc_sli *psli = &phba->sli;
8693 struct lpfc_sli_ring *pring; 8743 struct lpfc_sli_ring *pring;
8694 8744
8695 psli->num_rings = MAX_CONFIGURED_RINGS; 8745 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
8746 if (phba->sli_rev == LPFC_SLI_REV4)
8747 psli->num_rings += phba->cfg_fcp_io_channel;
8696 psli->sli_flag = 0; 8748 psli->sli_flag = 0;
8697 psli->fcp_ring = LPFC_FCP_RING; 8749 psli->fcp_ring = LPFC_FCP_RING;
8698 psli->next_ring = LPFC_FCP_NEXT_RING; 8750 psli->next_ring = LPFC_FCP_NEXT_RING;
@@ -8707,16 +8759,20 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8707 switch (i) { 8759 switch (i) {
8708 case LPFC_FCP_RING: /* ring 0 - FCP */ 8760 case LPFC_FCP_RING: /* ring 0 - FCP */
8709 /* numCiocb and numRiocb are used in config_port */ 8761 /* numCiocb and numRiocb are used in config_port */
8710 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 8762 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
8711 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 8763 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
8712 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8764 pring->sli.sli3.numCiocb +=
8713 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8765 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8714 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8766 pring->sli.sli3.numRiocb +=
8715 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8767 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8716 pring->sizeCiocb = (phba->sli_rev == 3) ? 8768 pring->sli.sli3.numCiocb +=
8769 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8770 pring->sli.sli3.numRiocb +=
8771 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8772 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8717 SLI3_IOCB_CMD_SIZE : 8773 SLI3_IOCB_CMD_SIZE :
8718 SLI2_IOCB_CMD_SIZE; 8774 SLI2_IOCB_CMD_SIZE;
8719 pring->sizeRiocb = (phba->sli_rev == 3) ? 8775 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8720 SLI3_IOCB_RSP_SIZE : 8776 SLI3_IOCB_RSP_SIZE :
8721 SLI2_IOCB_RSP_SIZE; 8777 SLI2_IOCB_RSP_SIZE;
8722 pring->iotag_ctr = 0; 8778 pring->iotag_ctr = 0;
@@ -8727,12 +8783,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8727 break; 8783 break;
8728 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 8784 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
8729 /* numCiocb and numRiocb are used in config_port */ 8785 /* numCiocb and numRiocb are used in config_port */
8730 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 8786 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
8731 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 8787 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
8732 pring->sizeCiocb = (phba->sli_rev == 3) ? 8788 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8733 SLI3_IOCB_CMD_SIZE : 8789 SLI3_IOCB_CMD_SIZE :
8734 SLI2_IOCB_CMD_SIZE; 8790 SLI2_IOCB_CMD_SIZE;
8735 pring->sizeRiocb = (phba->sli_rev == 3) ? 8791 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8736 SLI3_IOCB_RSP_SIZE : 8792 SLI3_IOCB_RSP_SIZE :
8737 SLI2_IOCB_RSP_SIZE; 8793 SLI2_IOCB_RSP_SIZE;
8738 pring->iotag_max = phba->cfg_hba_queue_depth; 8794 pring->iotag_max = phba->cfg_hba_queue_depth;
@@ -8740,12 +8796,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8740 break; 8796 break;
8741 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 8797 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
8742 /* numCiocb and numRiocb are used in config_port */ 8798 /* numCiocb and numRiocb are used in config_port */
8743 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 8799 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
8744 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 8800 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
8745 pring->sizeCiocb = (phba->sli_rev == 3) ? 8801 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8746 SLI3_IOCB_CMD_SIZE : 8802 SLI3_IOCB_CMD_SIZE :
8747 SLI2_IOCB_CMD_SIZE; 8803 SLI2_IOCB_CMD_SIZE;
8748 pring->sizeRiocb = (phba->sli_rev == 3) ? 8804 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8749 SLI3_IOCB_RSP_SIZE : 8805 SLI3_IOCB_RSP_SIZE :
8750 SLI2_IOCB_RSP_SIZE; 8806 SLI2_IOCB_RSP_SIZE;
8751 pring->fast_iotag = 0; 8807 pring->fast_iotag = 0;
@@ -8786,8 +8842,9 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8786 lpfc_sli4_ct_abort_unsol_event; 8842 lpfc_sli4_ct_abort_unsol_event;
8787 break; 8843 break;
8788 } 8844 }
8789 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 8845 totiocbsize += (pring->sli.sli3.numCiocb *
8790 (pring->numRiocb * pring->sizeRiocb); 8846 pring->sli.sli3.sizeCiocb) +
8847 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
8791 } 8848 }
8792 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 8849 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
8793 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 8850 /* Too many cmd / rsp ring entries in SLI2 SLIM */
@@ -8828,14 +8885,15 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
8828 for (i = 0; i < psli->num_rings; i++) { 8885 for (i = 0; i < psli->num_rings; i++) {
8829 pring = &psli->ring[i]; 8886 pring = &psli->ring[i];
8830 pring->ringno = i; 8887 pring->ringno = i;
8831 pring->next_cmdidx = 0; 8888 pring->sli.sli3.next_cmdidx = 0;
8832 pring->local_getidx = 0; 8889 pring->sli.sli3.local_getidx = 0;
8833 pring->cmdidx = 0; 8890 pring->sli.sli3.cmdidx = 0;
8834 INIT_LIST_HEAD(&pring->txq); 8891 INIT_LIST_HEAD(&pring->txq);
8835 INIT_LIST_HEAD(&pring->txcmplq); 8892 INIT_LIST_HEAD(&pring->txcmplq);
8836 INIT_LIST_HEAD(&pring->iocb_continueq); 8893 INIT_LIST_HEAD(&pring->iocb_continueq);
8837 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 8894 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
8838 INIT_LIST_HEAD(&pring->postbufq); 8895 INIT_LIST_HEAD(&pring->postbufq);
8896 spin_lock_init(&pring->ring_lock);
8839 } 8897 }
8840 spin_unlock_irq(&phba->hbalock); 8898 spin_unlock_irq(&phba->hbalock);
8841 return 1; 8899 return 1;
@@ -9334,6 +9392,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9334 IOCB_t *icmd = NULL; 9392 IOCB_t *icmd = NULL;
9335 IOCB_t *iabt = NULL; 9393 IOCB_t *iabt = NULL;
9336 int retval; 9394 int retval;
9395 unsigned long iflags;
9337 9396
9338 /* 9397 /*
9339 * There are certain command types we don't want to abort. And we 9398 * There are certain command types we don't want to abort. And we
@@ -9386,7 +9445,17 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9386 iabt->un.acxri.abortIoTag, 9445 iabt->un.acxri.abortIoTag,
9387 iabt->un.acxri.abortContextTag, 9446 iabt->un.acxri.abortContextTag,
9388 abtsiocbp->iotag); 9447 abtsiocbp->iotag);
9389 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 9448
9449 if (phba->sli_rev == LPFC_SLI_REV4) {
9450 /* Note: both hbalock and ring_lock need to be set here */
9451 spin_lock_irqsave(&pring->ring_lock, iflags);
9452 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9453 abtsiocbp, 0);
9454 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9455 } else {
9456 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9457 abtsiocbp, 0);
9458 }
9390 9459
9391 if (retval) 9460 if (retval)
9392 __lpfc_sli_release_iocbq(phba, abtsiocbp); 9461 __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -10947,12 +11016,12 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
10947 unsigned long iflags; 11016 unsigned long iflags;
10948 11017
10949 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 11018 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
10950 spin_lock_irqsave(&phba->hbalock, iflags); 11019 spin_lock_irqsave(&pring->ring_lock, iflags);
10951 pring->stats.iocb_event++; 11020 pring->stats.iocb_event++;
10952 /* Look up the ELS command IOCB and create pseudo response IOCB */ 11021 /* Look up the ELS command IOCB and create pseudo response IOCB */
10953 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11022 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
10954 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11023 bf_get(lpfc_wcqe_c_request_tag, wcqe));
10955 spin_unlock_irqrestore(&phba->hbalock, iflags); 11024 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10956 11025
10957 if (unlikely(!cmdiocbq)) { 11026 if (unlikely(!cmdiocbq)) {
10958 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11027 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -11154,6 +11223,7 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11154/** 11223/**
11155 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 11224 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11156 * @phba: Pointer to HBA context object. 11225 * @phba: Pointer to HBA context object.
11226 * @cq: Pointer to associated CQ
11157 * @wcqe: Pointer to work-queue completion queue entry. 11227 * @wcqe: Pointer to work-queue completion queue entry.
11158 * 11228 *
11159 * This routine handles an ELS work-queue completion event. 11229 * This routine handles an ELS work-queue completion event.
@@ -11161,12 +11231,12 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11161 * Return: true if work posted to worker thread, otherwise false. 11231 * Return: true if work posted to worker thread, otherwise false.
11162 **/ 11232 **/
11163static bool 11233static bool
11164lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 11234lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11165 struct lpfc_wcqe_complete *wcqe) 11235 struct lpfc_wcqe_complete *wcqe)
11166{ 11236{
11167 struct lpfc_iocbq *irspiocbq; 11237 struct lpfc_iocbq *irspiocbq;
11168 unsigned long iflags; 11238 unsigned long iflags;
11169 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 11239 struct lpfc_sli_ring *pring = cq->pring;
11170 11240
11171 /* Get an irspiocbq for later ELS response processing use */ 11241 /* Get an irspiocbq for later ELS response processing use */
11172 irspiocbq = lpfc_sli_get_iocbq(phba); 11242 irspiocbq = lpfc_sli_get_iocbq(phba);
@@ -11311,14 +11381,17 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
11311 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 11381 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
11312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11382 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11313 "2537 Receive Frame Truncated!!\n"); 11383 "2537 Receive Frame Truncated!!\n");
11384 hrq->RQ_buf_trunc++;
11314 case FC_STATUS_RQ_SUCCESS: 11385 case FC_STATUS_RQ_SUCCESS:
11315 lpfc_sli4_rq_release(hrq, drq); 11386 lpfc_sli4_rq_release(hrq, drq);
11316 spin_lock_irqsave(&phba->hbalock, iflags); 11387 spin_lock_irqsave(&phba->hbalock, iflags);
11317 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 11388 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
11318 if (!dma_buf) { 11389 if (!dma_buf) {
11390 hrq->RQ_no_buf_found++;
11319 spin_unlock_irqrestore(&phba->hbalock, iflags); 11391 spin_unlock_irqrestore(&phba->hbalock, iflags);
11320 goto out; 11392 goto out;
11321 } 11393 }
11394 hrq->RQ_rcv_buf++;
11322 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 11395 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
11323 /* save off the frame for the word thread to process */ 11396 /* save off the frame for the word thread to process */
11324 list_add_tail(&dma_buf->cq_event.list, 11397 list_add_tail(&dma_buf->cq_event.list,
@@ -11330,6 +11403,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
11330 break; 11403 break;
11331 case FC_STATUS_INSUFF_BUF_NEED_BUF: 11404 case FC_STATUS_INSUFF_BUF_NEED_BUF:
11332 case FC_STATUS_INSUFF_BUF_FRM_DISC: 11405 case FC_STATUS_INSUFF_BUF_FRM_DISC:
11406 hrq->RQ_no_posted_buf++;
11333 /* Post more buffers if possible */ 11407 /* Post more buffers if possible */
11334 spin_lock_irqsave(&phba->hbalock, iflags); 11408 spin_lock_irqsave(&phba->hbalock, iflags);
11335 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 11409 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
@@ -11367,7 +11441,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11367 case CQE_CODE_COMPL_WQE: 11441 case CQE_CODE_COMPL_WQE:
11368 /* Process the WQ/RQ complete event */ 11442 /* Process the WQ/RQ complete event */
11369 phba->last_completion_time = jiffies; 11443 phba->last_completion_time = jiffies;
11370 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, 11444 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
11371 (struct lpfc_wcqe_complete *)&cqevt); 11445 (struct lpfc_wcqe_complete *)&cqevt);
11372 break; 11446 break;
11373 case CQE_CODE_RELEASE_WQE: 11447 case CQE_CODE_RELEASE_WQE:
@@ -11411,31 +11485,18 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11411 * 11485 *
11412 **/ 11486 **/
11413static void 11487static void
11414lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 11488lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11489 struct lpfc_queue *speq)
11415{ 11490{
11416 struct lpfc_queue *cq = NULL, *childq, *speq; 11491 struct lpfc_queue *cq = NULL, *childq;
11417 struct lpfc_cqe *cqe; 11492 struct lpfc_cqe *cqe;
11418 bool workposted = false; 11493 bool workposted = false;
11419 int ecount = 0; 11494 int ecount = 0;
11420 uint16_t cqid; 11495 uint16_t cqid;
11421 11496
11422 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
11423 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11424 "0359 Not a valid slow-path completion "
11425 "event: majorcode=x%x, minorcode=x%x\n",
11426 bf_get_le32(lpfc_eqe_major_code, eqe),
11427 bf_get_le32(lpfc_eqe_minor_code, eqe));
11428 return;
11429 }
11430
11431 /* Get the reference to the corresponding CQ */ 11497 /* Get the reference to the corresponding CQ */
11432 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11498 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11433 11499
11434 /* Search for completion queue pointer matching this cqid */
11435 speq = phba->sli4_hba.sp_eq;
11436 /* sanity check on queue memory */
11437 if (unlikely(!speq))
11438 return;
11439 list_for_each_entry(childq, &speq->child_list, list) { 11500 list_for_each_entry(childq, &speq->child_list, list) {
11440 if (childq->queue_id == cqid) { 11501 if (childq->queue_id == cqid) {
11441 cq = childq; 11502 cq = childq;
@@ -11457,6 +11518,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11457 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 11518 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
11458 if (!(++ecount % cq->entry_repost)) 11519 if (!(++ecount % cq->entry_repost))
11459 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11520 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11521 cq->CQ_mbox++;
11460 } 11522 }
11461 break; 11523 break;
11462 case LPFC_WCQ: 11524 case LPFC_WCQ:
@@ -11470,6 +11532,10 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11470 if (!(++ecount % cq->entry_repost)) 11532 if (!(++ecount % cq->entry_repost))
11471 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11533 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11472 } 11534 }
11535
11536 /* Track the max number of CQEs processed in 1 EQ */
11537 if (ecount > cq->CQ_max_cqe)
11538 cq->CQ_max_cqe = ecount;
11473 break; 11539 break;
11474 default: 11540 default:
11475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11541 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -11494,34 +11560,33 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11494 11560
11495/** 11561/**
11496 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 11562 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
11497 * @eqe: Pointer to fast-path completion queue entry. 11563 * @phba: Pointer to HBA context object.
11564 * @cq: Pointer to associated CQ
11565 * @wcqe: Pointer to work-queue completion queue entry.
11498 * 11566 *
11499 * This routine process a fast-path work queue completion entry from fast-path 11567 * This routine process a fast-path work queue completion entry from fast-path
11500 * event queue for FCP command response completion. 11568 * event queue for FCP command response completion.
11501 **/ 11569 **/
11502static void 11570static void
11503lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, 11571lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11504 struct lpfc_wcqe_complete *wcqe) 11572 struct lpfc_wcqe_complete *wcqe)
11505{ 11573{
11506 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 11574 struct lpfc_sli_ring *pring = cq->pring;
11507 struct lpfc_iocbq *cmdiocbq; 11575 struct lpfc_iocbq *cmdiocbq;
11508 struct lpfc_iocbq irspiocbq; 11576 struct lpfc_iocbq irspiocbq;
11509 unsigned long iflags; 11577 unsigned long iflags;
11510 11578
11511 spin_lock_irqsave(&phba->hbalock, iflags);
11512 pring->stats.iocb_event++;
11513 spin_unlock_irqrestore(&phba->hbalock, iflags);
11514
11515 /* Check for response status */ 11579 /* Check for response status */
11516 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 11580 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
11517 /* If resource errors reported from HBA, reduce queue 11581 /* If resource errors reported from HBA, reduce queue
11518 * depth of the SCSI device. 11582 * depth of the SCSI device.
11519 */ 11583 */
11520 if ((bf_get(lpfc_wcqe_c_status, wcqe) == 11584 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
11521 IOSTAT_LOCAL_REJECT) && 11585 IOSTAT_LOCAL_REJECT)) &&
11522 (wcqe->parameter == IOERR_NO_RESOURCES)) { 11586 ((wcqe->parameter & IOERR_PARAM_MASK) ==
11587 IOERR_NO_RESOURCES))
11523 phba->lpfc_rampdown_queue_depth(phba); 11588 phba->lpfc_rampdown_queue_depth(phba);
11524 } 11589
11525 /* Log the error status */ 11590 /* Log the error status */
11526 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11591 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11527 "0373 FCP complete error: status=x%x, " 11592 "0373 FCP complete error: status=x%x, "
@@ -11534,10 +11599,11 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
11534 } 11599 }
11535 11600
11536 /* Look up the FCP command IOCB and create pseudo response IOCB */ 11601 /* Look up the FCP command IOCB and create pseudo response IOCB */
11537 spin_lock_irqsave(&phba->hbalock, iflags); 11602 spin_lock_irqsave(&pring->ring_lock, iflags);
11603 pring->stats.iocb_event++;
11538 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11604 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11539 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11605 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11540 spin_unlock_irqrestore(&phba->hbalock, iflags); 11606 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11541 if (unlikely(!cmdiocbq)) { 11607 if (unlikely(!cmdiocbq)) {
11542 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11608 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11543 "0374 FCP complete with no corresponding " 11609 "0374 FCP complete with no corresponding "
@@ -11621,17 +11687,20 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11621 /* Check and process for different type of WCQE and dispatch */ 11687 /* Check and process for different type of WCQE and dispatch */
11622 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 11688 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11623 case CQE_CODE_COMPL_WQE: 11689 case CQE_CODE_COMPL_WQE:
11690 cq->CQ_wq++;
11624 /* Process the WQ complete event */ 11691 /* Process the WQ complete event */
11625 phba->last_completion_time = jiffies; 11692 phba->last_completion_time = jiffies;
11626 lpfc_sli4_fp_handle_fcp_wcqe(phba, 11693 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
11627 (struct lpfc_wcqe_complete *)&wcqe); 11694 (struct lpfc_wcqe_complete *)&wcqe);
11628 break; 11695 break;
11629 case CQE_CODE_RELEASE_WQE: 11696 case CQE_CODE_RELEASE_WQE:
11697 cq->CQ_release_wqe++;
11630 /* Process the WQ release event */ 11698 /* Process the WQ release event */
11631 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 11699 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11632 (struct lpfc_wcqe_release *)&wcqe); 11700 (struct lpfc_wcqe_release *)&wcqe);
11633 break; 11701 break;
11634 case CQE_CODE_XRI_ABORTED: 11702 case CQE_CODE_XRI_ABORTED:
11703 cq->CQ_xri_aborted++;
11635 /* Process the WQ XRI abort event */ 11704 /* Process the WQ XRI abort event */
11636 phba->last_completion_time = jiffies; 11705 phba->last_completion_time = jiffies;
11637 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 11706 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
@@ -11647,7 +11716,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11647} 11716}
11648 11717
11649/** 11718/**
11650 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry 11719 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
11651 * @phba: Pointer to HBA context object. 11720 * @phba: Pointer to HBA context object.
11652 * @eqe: Pointer to fast-path event queue entry. 11721 * @eqe: Pointer to fast-path event queue entry.
11653 * 11722 *
@@ -11659,8 +11728,8 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11659 * completion queue, and then return. 11728 * completion queue, and then return.
11660 **/ 11729 **/
11661static void 11730static void
11662lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11731lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11663 uint32_t fcp_cqidx) 11732 uint32_t qidx)
11664{ 11733{
11665 struct lpfc_queue *cq; 11734 struct lpfc_queue *cq;
11666 struct lpfc_cqe *cqe; 11735 struct lpfc_cqe *cqe;
@@ -11670,30 +11739,38 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11670 11739
11671 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 11740 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
11672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11741 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11673 "0366 Not a valid fast-path completion " 11742 "0366 Not a valid completion "
11674 "event: majorcode=x%x, minorcode=x%x\n", 11743 "event: majorcode=x%x, minorcode=x%x\n",
11675 bf_get_le32(lpfc_eqe_major_code, eqe), 11744 bf_get_le32(lpfc_eqe_major_code, eqe),
11676 bf_get_le32(lpfc_eqe_minor_code, eqe)); 11745 bf_get_le32(lpfc_eqe_minor_code, eqe));
11677 return; 11746 return;
11678 } 11747 }
11679 11748
11749 /* Get the reference to the corresponding CQ */
11750 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11751
11752 /* Check if this is a Slow path event */
11753 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
11754 lpfc_sli4_sp_handle_eqe(phba, eqe,
11755 phba->sli4_hba.hba_eq[qidx]);
11756 return;
11757 }
11758
11680 if (unlikely(!phba->sli4_hba.fcp_cq)) { 11759 if (unlikely(!phba->sli4_hba.fcp_cq)) {
11681 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11760 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11682 "3146 Fast-path completion queues " 11761 "3146 Fast-path completion queues "
11683 "does not exist\n"); 11762 "does not exist\n");
11684 return; 11763 return;
11685 } 11764 }
11686 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 11765 cq = phba->sli4_hba.fcp_cq[qidx];
11687 if (unlikely(!cq)) { 11766 if (unlikely(!cq)) {
11688 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11767 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11689 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11768 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11690 "0367 Fast-path completion queue " 11769 "0367 Fast-path completion queue "
11691 "(%d) does not exist\n", fcp_cqidx); 11770 "(%d) does not exist\n", qidx);
11692 return; 11771 return;
11693 } 11772 }
11694 11773
11695 /* Get the reference to the corresponding CQ */
11696 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11697 if (unlikely(cqid != cq->queue_id)) { 11774 if (unlikely(cqid != cq->queue_id)) {
11698 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11775 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11699 "0368 Miss-matched fast-path completion " 11776 "0368 Miss-matched fast-path completion "
@@ -11709,6 +11786,10 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11709 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11786 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11710 } 11787 }
11711 11788
11789 /* Track the max number of CQEs processed in 1 EQ */
11790 if (ecount > cq->CQ_max_cqe)
11791 cq->CQ_max_cqe = ecount;
11792
11712 /* Catch the no cq entry condition */ 11793 /* Catch the no cq entry condition */
11713 if (unlikely(ecount == 0)) 11794 if (unlikely(ecount == 0))
11714 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11795 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -11737,86 +11818,7 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11737} 11818}
11738 11819
11739/** 11820/**
11740 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device 11821 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
11741 * @irq: Interrupt number.
11742 * @dev_id: The device context pointer.
11743 *
11744 * This function is directly called from the PCI layer as an interrupt
11745 * service routine when device with SLI-4 interface spec is enabled with
11746 * MSI-X multi-message interrupt mode and there are slow-path events in
11747 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11748 * interrupt mode, this function is called as part of the device-level
11749 * interrupt handler. When the PCI slot is in error recovery or the HBA is
11750 * undergoing initialization, the interrupt handler will not process the
11751 * interrupt. The link attention and ELS ring attention events are handled
11752 * by the worker thread. The interrupt handler signals the worker thread
11753 * and returns for these events. This function is called without any lock
11754 * held. It gets the hbalock to access and update SLI data structures.
11755 *
11756 * This function returns IRQ_HANDLED when interrupt is handled else it
11757 * returns IRQ_NONE.
11758 **/
11759irqreturn_t
11760lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11761{
11762 struct lpfc_hba *phba;
11763 struct lpfc_queue *speq;
11764 struct lpfc_eqe *eqe;
11765 unsigned long iflag;
11766 int ecount = 0;
11767
11768 /*
11769 * Get the driver's phba structure from the dev_id
11770 */
11771 phba = (struct lpfc_hba *)dev_id;
11772
11773 if (unlikely(!phba))
11774 return IRQ_NONE;
11775
11776 /* Get to the EQ struct associated with this vector */
11777 speq = phba->sli4_hba.sp_eq;
11778 if (unlikely(!speq))
11779 return IRQ_NONE;
11780
11781 /* Check device state for handling interrupt */
11782 if (unlikely(lpfc_intr_state_check(phba))) {
11783 /* Check again for link_state with lock held */
11784 spin_lock_irqsave(&phba->hbalock, iflag);
11785 if (phba->link_state < LPFC_LINK_DOWN)
11786 /* Flush, clear interrupt, and rearm the EQ */
11787 lpfc_sli4_eq_flush(phba, speq);
11788 spin_unlock_irqrestore(&phba->hbalock, iflag);
11789 return IRQ_NONE;
11790 }
11791
11792 /*
11793 * Process all the event on FCP slow-path EQ
11794 */
11795 while ((eqe = lpfc_sli4_eq_get(speq))) {
11796 lpfc_sli4_sp_handle_eqe(phba, eqe);
11797 if (!(++ecount % speq->entry_repost))
11798 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
11799 }
11800
11801 /* Always clear and re-arm the slow-path EQ */
11802 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
11803
11804 /* Catch the no cq entry condition */
11805 if (unlikely(ecount == 0)) {
11806 if (phba->intr_type == MSIX)
11807 /* MSI-X treated interrupt served as no EQ share INT */
11808 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11809 "0357 MSI-X interrupt with no EQE\n");
11810 else
11811 /* Non MSI-X treated on interrupt as EQ share INT */
11812 return IRQ_NONE;
11813 }
11814
11815 return IRQ_HANDLED;
11816} /* lpfc_sli4_sp_intr_handler */
11817
11818/**
11819 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
11820 * @irq: Interrupt number. 11822 * @irq: Interrupt number.
11821 * @dev_id: The device context pointer. 11823 * @dev_id: The device context pointer.
11822 * 11824 *
@@ -11833,11 +11835,16 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11833 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 11835 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11834 * equal to that of FCP CQ index. 11836 * equal to that of FCP CQ index.
11835 * 11837 *
11838 * The link attention and ELS ring attention events are handled
11839 * by the worker thread. The interrupt handler signals the worker thread
11840 * and returns for these events. This function is called without any lock
11841 * held. It gets the hbalock to access and update SLI data structures.
11842 *
11836 * This function returns IRQ_HANDLED when interrupt is handled else it 11843 * This function returns IRQ_HANDLED when interrupt is handled else it
11837 * returns IRQ_NONE. 11844 * returns IRQ_NONE.
11838 **/ 11845 **/
11839irqreturn_t 11846irqreturn_t
11840lpfc_sli4_fp_intr_handler(int irq, void *dev_id) 11847lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
11841{ 11848{
11842 struct lpfc_hba *phba; 11849 struct lpfc_hba *phba;
11843 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 11850 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
@@ -11854,22 +11861,34 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11854 11861
11855 if (unlikely(!phba)) 11862 if (unlikely(!phba))
11856 return IRQ_NONE; 11863 return IRQ_NONE;
11857 if (unlikely(!phba->sli4_hba.fp_eq)) 11864 if (unlikely(!phba->sli4_hba.hba_eq))
11858 return IRQ_NONE; 11865 return IRQ_NONE;
11859 11866
11860 /* Get to the EQ struct associated with this vector */ 11867 /* Get to the EQ struct associated with this vector */
11861 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11868 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
11862 if (unlikely(!fpeq)) 11869 if (unlikely(!fpeq))
11863 return IRQ_NONE; 11870 return IRQ_NONE;
11864 11871
11872 if (lpfc_fcp_look_ahead) {
11873 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
11874 lpfc_sli4_eq_clr_intr(fpeq);
11875 else {
11876 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11877 return IRQ_NONE;
11878 }
11879 }
11880
11865 /* Check device state for handling interrupt */ 11881 /* Check device state for handling interrupt */
11866 if (unlikely(lpfc_intr_state_check(phba))) { 11882 if (unlikely(lpfc_intr_state_check(phba))) {
11883 fpeq->EQ_badstate++;
11867 /* Check again for link_state with lock held */ 11884 /* Check again for link_state with lock held */
11868 spin_lock_irqsave(&phba->hbalock, iflag); 11885 spin_lock_irqsave(&phba->hbalock, iflag);
11869 if (phba->link_state < LPFC_LINK_DOWN) 11886 if (phba->link_state < LPFC_LINK_DOWN)
11870 /* Flush, clear interrupt, and rearm the EQ */ 11887 /* Flush, clear interrupt, and rearm the EQ */
11871 lpfc_sli4_eq_flush(phba, fpeq); 11888 lpfc_sli4_eq_flush(phba, fpeq);
11872 spin_unlock_irqrestore(&phba->hbalock, iflag); 11889 spin_unlock_irqrestore(&phba->hbalock, iflag);
11890 if (lpfc_fcp_look_ahead)
11891 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11873 return IRQ_NONE; 11892 return IRQ_NONE;
11874 } 11893 }
11875 11894
@@ -11877,15 +11896,27 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11877 * Process all the event on FCP fast-path EQ 11896 * Process all the event on FCP fast-path EQ
11878 */ 11897 */
11879 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 11898 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
11880 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 11899 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
11881 if (!(++ecount % fpeq->entry_repost)) 11900 if (!(++ecount % fpeq->entry_repost))
11882 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 11901 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
11902 fpeq->EQ_processed++;
11883 } 11903 }
11884 11904
11905 /* Track the max number of EQEs processed in 1 intr */
11906 if (ecount > fpeq->EQ_max_eqe)
11907 fpeq->EQ_max_eqe = ecount;
11908
11885 /* Always clear and re-arm the fast-path EQ */ 11909 /* Always clear and re-arm the fast-path EQ */
11886 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 11910 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
11887 11911
11888 if (unlikely(ecount == 0)) { 11912 if (unlikely(ecount == 0)) {
11913 fpeq->EQ_no_entry++;
11914
11915 if (lpfc_fcp_look_ahead) {
11916 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11917 return IRQ_NONE;
11918 }
11919
11889 if (phba->intr_type == MSIX) 11920 if (phba->intr_type == MSIX)
11890 /* MSI-X treated interrupt served as no EQ share INT */ 11921 /* MSI-X treated interrupt served as no EQ share INT */
11891 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11922 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -11895,6 +11926,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11895 return IRQ_NONE; 11926 return IRQ_NONE;
11896 } 11927 }
11897 11928
11929 if (lpfc_fcp_look_ahead)
11930 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11898 return IRQ_HANDLED; 11931 return IRQ_HANDLED;
11899} /* lpfc_sli4_fp_intr_handler */ 11932} /* lpfc_sli4_fp_intr_handler */
11900 11933
@@ -11919,8 +11952,8 @@ irqreturn_t
11919lpfc_sli4_intr_handler(int irq, void *dev_id) 11952lpfc_sli4_intr_handler(int irq, void *dev_id)
11920{ 11953{
11921 struct lpfc_hba *phba; 11954 struct lpfc_hba *phba;
11922 irqreturn_t sp_irq_rc, fp_irq_rc; 11955 irqreturn_t hba_irq_rc;
11923 bool fp_handled = false; 11956 bool hba_handled = false;
11924 uint32_t fcp_eqidx; 11957 uint32_t fcp_eqidx;
11925 11958
11926 /* Get the driver's phba structure from the dev_id */ 11959 /* Get the driver's phba structure from the dev_id */
@@ -11930,21 +11963,16 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
11930 return IRQ_NONE; 11963 return IRQ_NONE;
11931 11964
11932 /* 11965 /*
11933 * Invokes slow-path host attention interrupt handling as appropriate.
11934 */
11935 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
11936
11937 /*
11938 * Invoke fast-path host attention interrupt handling as appropriate. 11966 * Invoke fast-path host attention interrupt handling as appropriate.
11939 */ 11967 */
11940 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 11968 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
11941 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, 11969 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
11942 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 11970 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
11943 if (fp_irq_rc == IRQ_HANDLED) 11971 if (hba_irq_rc == IRQ_HANDLED)
11944 fp_handled |= true; 11972 hba_handled |= true;
11945 } 11973 }
11946 11974
11947 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; 11975 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
11948} /* lpfc_sli4_intr_handler */ 11976} /* lpfc_sli4_intr_handler */
11949 11977
11950/** 11978/**
@@ -12075,7 +12103,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12075 union lpfc_sli4_cfg_shdr *shdr; 12103 union lpfc_sli4_cfg_shdr *shdr;
12076 uint16_t dmult; 12104 uint16_t dmult;
12077 12105
12078 if (startq >= phba->cfg_fcp_eq_count) 12106 if (startq >= phba->cfg_fcp_io_channel)
12079 return 0; 12107 return 0;
12080 12108
12081 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12109 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -12089,12 +12117,13 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12089 eq_delay = &mbox->u.mqe.un.eq_delay; 12117 eq_delay = &mbox->u.mqe.un.eq_delay;
12090 12118
12091 /* Calculate delay multiper from maximum interrupt per second */ 12119 /* Calculate delay multiper from maximum interrupt per second */
12092 dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1; 12120 dmult = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
12121 dmult = LPFC_DMULT_CONST/dmult - 1;
12093 12122
12094 cnt = 0; 12123 cnt = 0;
12095 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_eq_count; 12124 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
12096 fcp_eqidx++) { 12125 fcp_eqidx++) {
12097 eq = phba->sli4_hba.fp_eq[fcp_eqidx]; 12126 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
12098 if (!eq) 12127 if (!eq)
12099 continue; 12128 continue;
12100 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 12129 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;