diff options
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 3382 |
1 files changed, 3382 insertions, 0 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 706bb22a6e8e..cf42ada3ffcd 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -70,6 +70,350 @@ typedef enum _lpfc_iocb_type { | |||
70 | LPFC_ABORT_IOCB | 70 | LPFC_ABORT_IOCB |
71 | } lpfc_iocb_type; | 71 | } lpfc_iocb_type; |
72 | 72 | ||
73 | |||
74 | /* Provide function prototypes local to this module. */ | ||
75 | static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, | ||
76 | uint32_t); | ||
77 | static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, | ||
78 | uint8_t *, uint32_t *); | ||
79 | |||
80 | static IOCB_t * | ||
81 | lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) | ||
82 | { | ||
83 | return &iocbq->iocb; | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue | ||
88 | * @q: The Work Queue to operate on. | ||
89 | * @wqe: The work Queue Entry to put on the Work queue. | ||
90 | * | ||
91 | * This routine will copy the contents of @wqe to the next available entry on | ||
92 | * the @q. This function will then ring the Work Queue Doorbell to signal the | ||
93 | * HBA to start processing the Work Queue Entry. This function returns 0 if | ||
94 | * successful. If no entries are available on @q then this function will return | ||
95 | * -ENOMEM. | ||
96 | * The caller is expected to hold the hbalock when calling this routine. | ||
97 | **/ | ||
98 | static uint32_t | ||
99 | lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) | ||
100 | { | ||
101 | union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe; | ||
102 | struct lpfc_register doorbell; | ||
103 | uint32_t host_index; | ||
104 | |||
105 | /* If the host has not yet processed the next entry then we are done */ | ||
106 | if (((q->host_index + 1) % q->entry_count) == q->hba_index) | ||
107 | return -ENOMEM; | ||
108 | /* set consumption flag every once in a while */ | ||
109 | if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) | ||
110 | bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1); | ||
111 | |||
112 | lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); | ||
113 | |||
114 | /* Update the host index before invoking device */ | ||
115 | host_index = q->host_index; | ||
116 | q->host_index = ((q->host_index + 1) % q->entry_count); | ||
117 | |||
118 | /* Ring Doorbell */ | ||
119 | doorbell.word0 = 0; | ||
120 | bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); | ||
121 | bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); | ||
122 | bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); | ||
123 | writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); | ||
124 | readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */ | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * lpfc_sli4_wq_release - Updates internal hba index for WQ | ||
131 | * @q: The Work Queue to operate on. | ||
132 | * @index: The index to advance the hba index to. | ||
133 | * | ||
134 | * This routine will update the HBA index of a queue to reflect consumption of | ||
135 | * Work Queue Entries by the HBA. When the HBA indicates that it has consumed | ||
136 | * an entry the host calls this function to update the queue's internal | ||
137 | * pointers. This routine returns the number of entries that were consumed by | ||
138 | * the HBA. | ||
139 | **/ | ||
140 | static uint32_t | ||
141 | lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) | ||
142 | { | ||
143 | uint32_t released = 0; | ||
144 | |||
145 | if (q->hba_index == index) | ||
146 | return 0; | ||
147 | do { | ||
148 | q->hba_index = ((q->hba_index + 1) % q->entry_count); | ||
149 | released++; | ||
150 | } while (q->hba_index != index); | ||
151 | return released; | ||
152 | } | ||
153 | |||
154 | /** | ||
155 | * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue | ||
156 | * @q: The Mailbox Queue to operate on. | ||
157 | * @wqe: The Mailbox Queue Entry to put on the Work queue. | ||
158 | * | ||
159 | * This routine will copy the contents of @mqe to the next available entry on | ||
160 | * the @q. This function will then ring the Work Queue Doorbell to signal the | ||
161 | * HBA to start processing the Work Queue Entry. This function returns 0 if | ||
162 | * successful. If no entries are available on @q then this function will return | ||
163 | * -ENOMEM. | ||
164 | * The caller is expected to hold the hbalock when calling this routine. | ||
165 | **/ | ||
166 | static uint32_t | ||
167 | lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) | ||
168 | { | ||
169 | struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe; | ||
170 | struct lpfc_register doorbell; | ||
171 | uint32_t host_index; | ||
172 | |||
173 | /* If the host has not yet processed the next entry then we are done */ | ||
174 | if (((q->host_index + 1) % q->entry_count) == q->hba_index) | ||
175 | return -ENOMEM; | ||
176 | lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); | ||
177 | /* Save off the mailbox pointer for completion */ | ||
178 | q->phba->mbox = (MAILBOX_t *)temp_mqe; | ||
179 | |||
180 | /* Update the host index before invoking device */ | ||
181 | host_index = q->host_index; | ||
182 | q->host_index = ((q->host_index + 1) % q->entry_count); | ||
183 | |||
184 | /* Ring Doorbell */ | ||
185 | doorbell.word0 = 0; | ||
186 | bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); | ||
187 | bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); | ||
188 | writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); | ||
189 | readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */ | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * lpfc_sli4_mq_release - Updates internal hba index for MQ | ||
195 | * @q: The Mailbox Queue to operate on. | ||
196 | * | ||
197 | * This routine will update the HBA index of a queue to reflect consumption of | ||
198 | * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed | ||
199 | * an entry the host calls this function to update the queue's internal | ||
200 | * pointers. This routine returns the number of entries that were consumed by | ||
201 | * the HBA. | ||
202 | **/ | ||
203 | static uint32_t | ||
204 | lpfc_sli4_mq_release(struct lpfc_queue *q) | ||
205 | { | ||
206 | /* Clear the mailbox pointer for completion */ | ||
207 | q->phba->mbox = NULL; | ||
208 | q->hba_index = ((q->hba_index + 1) % q->entry_count); | ||
209 | return 1; | ||
210 | } | ||
211 | |||
212 | /** | ||
213 | * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ | ||
214 | * @q: The Event Queue to get the first valid EQE from | ||
215 | * | ||
216 | * This routine will get the first valid Event Queue Entry from @q, update | ||
217 | * the queue's internal hba index, and return the EQE. If no valid EQEs are in | ||
218 | * the Queue (no more work to do), or the Queue is full of EQEs that have been | ||
219 | * processed, but not popped back to the HBA then this routine will return NULL. | ||
220 | **/ | ||
221 | static struct lpfc_eqe * | ||
222 | lpfc_sli4_eq_get(struct lpfc_queue *q) | ||
223 | { | ||
224 | struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; | ||
225 | |||
226 | /* If the next EQE is not valid then we are done */ | ||
227 | if (!bf_get(lpfc_eqe_valid, eqe)) | ||
228 | return NULL; | ||
229 | /* If the host has not yet processed the next entry then we are done */ | ||
230 | if (((q->hba_index + 1) % q->entry_count) == q->host_index) | ||
231 | return NULL; | ||
232 | |||
233 | q->hba_index = ((q->hba_index + 1) % q->entry_count); | ||
234 | return eqe; | ||
235 | } | ||
236 | |||
237 | /** | ||
238 | * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ | ||
239 | * @q: The Event Queue that the host has completed processing for. | ||
240 | * @arm: Indicates whether the host wants to arms this CQ. | ||
241 | * | ||
242 | * This routine will mark all Event Queue Entries on @q, from the last | ||
243 | * known completed entry to the last entry that was processed, as completed | ||
244 | * by clearing the valid bit for each completion queue entry. Then it will | ||
245 | * notify the HBA, by ringing the doorbell, that the EQEs have been processed. | ||
246 | * The internal host index in the @q will be updated by this routine to indicate | ||
247 | * that the host has finished processing the entries. The @arm parameter | ||
248 | * indicates that the queue should be rearmed when ringing the doorbell. | ||
249 | * | ||
250 | * This function will return the number of EQEs that were popped. | ||
251 | **/ | ||
252 | uint32_t | ||
253 | lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) | ||
254 | { | ||
255 | uint32_t released = 0; | ||
256 | struct lpfc_eqe *temp_eqe; | ||
257 | struct lpfc_register doorbell; | ||
258 | |||
259 | /* while there are valid entries */ | ||
260 | while (q->hba_index != q->host_index) { | ||
261 | temp_eqe = q->qe[q->host_index].eqe; | ||
262 | bf_set(lpfc_eqe_valid, temp_eqe, 0); | ||
263 | released++; | ||
264 | q->host_index = ((q->host_index + 1) % q->entry_count); | ||
265 | } | ||
266 | if (unlikely(released == 0 && !arm)) | ||
267 | return 0; | ||
268 | |||
269 | /* ring doorbell for number popped */ | ||
270 | doorbell.word0 = 0; | ||
271 | if (arm) { | ||
272 | bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); | ||
273 | bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); | ||
274 | } | ||
275 | bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); | ||
276 | bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); | ||
277 | bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); | ||
278 | writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); | ||
279 | return released; | ||
280 | } | ||
281 | |||
282 | /** | ||
283 | * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ | ||
284 | * @q: The Completion Queue to get the first valid CQE from | ||
285 | * | ||
286 | * This routine will get the first valid Completion Queue Entry from @q, update | ||
287 | * the queue's internal hba index, and return the CQE. If no valid CQEs are in | ||
288 | * the Queue (no more work to do), or the Queue is full of CQEs that have been | ||
289 | * processed, but not popped back to the HBA then this routine will return NULL. | ||
290 | **/ | ||
291 | static struct lpfc_cqe * | ||
292 | lpfc_sli4_cq_get(struct lpfc_queue *q) | ||
293 | { | ||
294 | struct lpfc_cqe *cqe; | ||
295 | |||
296 | /* If the next CQE is not valid then we are done */ | ||
297 | if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) | ||
298 | return NULL; | ||
299 | /* If the host has not yet processed the next entry then we are done */ | ||
300 | if (((q->hba_index + 1) % q->entry_count) == q->host_index) | ||
301 | return NULL; | ||
302 | |||
303 | cqe = q->qe[q->hba_index].cqe; | ||
304 | q->hba_index = ((q->hba_index + 1) % q->entry_count); | ||
305 | return cqe; | ||
306 | } | ||
307 | |||
308 | /** | ||
309 | * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ | ||
310 | * @q: The Completion Queue that the host has completed processing for. | ||
311 | * @arm: Indicates whether the host wants to arms this CQ. | ||
312 | * | ||
313 | * This routine will mark all Completion queue entries on @q, from the last | ||
314 | * known completed entry to the last entry that was processed, as completed | ||
315 | * by clearing the valid bit for each completion queue entry. Then it will | ||
316 | * notify the HBA, by ringing the doorbell, that the CQEs have been processed. | ||
317 | * The internal host index in the @q will be updated by this routine to indicate | ||
318 | * that the host has finished processing the entries. The @arm parameter | ||
319 | * indicates that the queue should be rearmed when ringing the doorbell. | ||
320 | * | ||
321 | * This function will return the number of CQEs that were released. | ||
322 | **/ | ||
323 | uint32_t | ||
324 | lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) | ||
325 | { | ||
326 | uint32_t released = 0; | ||
327 | struct lpfc_cqe *temp_qe; | ||
328 | struct lpfc_register doorbell; | ||
329 | |||
330 | /* while there are valid entries */ | ||
331 | while (q->hba_index != q->host_index) { | ||
332 | temp_qe = q->qe[q->host_index].cqe; | ||
333 | bf_set(lpfc_cqe_valid, temp_qe, 0); | ||
334 | released++; | ||
335 | q->host_index = ((q->host_index + 1) % q->entry_count); | ||
336 | } | ||
337 | if (unlikely(released == 0 && !arm)) | ||
338 | return 0; | ||
339 | |||
340 | /* ring doorbell for number popped */ | ||
341 | doorbell.word0 = 0; | ||
342 | if (arm) | ||
343 | bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); | ||
344 | bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); | ||
345 | bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); | ||
346 | bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id); | ||
347 | writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); | ||
348 | return released; | ||
349 | } | ||
350 | |||
351 | /** | ||
352 | * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue | ||
353 | * @q: The Header Receive Queue to operate on. | ||
354 | * @wqe: The Receive Queue Entry to put on the Receive queue. | ||
355 | * | ||
356 | * This routine will copy the contents of @wqe to the next available entry on | ||
357 | * the @q. This function will then ring the Receive Queue Doorbell to signal the | ||
358 | * HBA to start processing the Receive Queue Entry. This function returns the | ||
359 | * index that the rqe was copied to if successful. If no entries are available | ||
360 | * on @q then this function will return -ENOMEM. | ||
361 | * The caller is expected to hold the hbalock when calling this routine. | ||
362 | **/ | ||
363 | static int | ||
364 | lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, | ||
365 | struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) | ||
366 | { | ||
367 | struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe; | ||
368 | struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe; | ||
369 | struct lpfc_register doorbell; | ||
370 | int put_index = hq->host_index; | ||
371 | |||
372 | if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) | ||
373 | return -EINVAL; | ||
374 | if (hq->host_index != dq->host_index) | ||
375 | return -EINVAL; | ||
376 | /* If the host has not yet processed the next entry then we are done */ | ||
377 | if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) | ||
378 | return -EBUSY; | ||
379 | lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); | ||
380 | lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); | ||
381 | |||
382 | /* Update the host index to point to the next slot */ | ||
383 | hq->host_index = ((hq->host_index + 1) % hq->entry_count); | ||
384 | dq->host_index = ((dq->host_index + 1) % dq->entry_count); | ||
385 | |||
386 | /* Ring The Header Receive Queue Doorbell */ | ||
387 | if (!(hq->host_index % LPFC_RQ_POST_BATCH)) { | ||
388 | doorbell.word0 = 0; | ||
389 | bf_set(lpfc_rq_doorbell_num_posted, &doorbell, | ||
390 | LPFC_RQ_POST_BATCH); | ||
391 | bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); | ||
392 | writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); | ||
393 | } | ||
394 | return put_index; | ||
395 | } | ||
396 | |||
397 | /** | ||
398 | * lpfc_sli4_rq_release - Updates internal hba index for RQ | ||
399 | * @q: The Header Receive Queue to operate on. | ||
400 | * | ||
401 | * This routine will update the HBA index of a queue to reflect consumption of | ||
402 | * one Receive Queue Entry by the HBA. When the HBA indicates that it has | ||
403 | * consumed an entry the host calls this function to update the queue's | ||
404 | * internal pointers. This routine returns the number of entries that were | ||
405 | * consumed by the HBA. | ||
406 | **/ | ||
407 | static uint32_t | ||
408 | lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) | ||
409 | { | ||
410 | if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) | ||
411 | return 0; | ||
412 | hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); | ||
413 | dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); | ||
414 | return 1; | ||
415 | } | ||
416 | |||
73 | /** | 417 | /** |
74 | * lpfc_cmd_iocb - Get next command iocb entry in the ring | 418 | * lpfc_cmd_iocb - Get next command iocb entry in the ring |
75 | * @phba: Pointer to HBA context object. | 419 | * @phba: Pointer to HBA context object. |
@@ -215,6 +559,59 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) | |||
215 | } | 559 | } |
216 | 560 | ||
217 | /** | 561 | /** |
562 | * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool | ||
563 | * @phba: Pointer to HBA context object. | ||
564 | * @iocbq: Pointer to driver iocb object. | ||
565 | * | ||
566 | * This function is called with hbalock held to release driver | ||
567 | * iocb object to the iocb pool. The iotag in the iocb object | ||
568 | * does not change for each use of the iocb object. This function | ||
569 | * clears all other fields of the iocb object when it is freed. | ||
570 | * The sqlq structure that holds the xritag and phys and virtual | ||
571 | * mappings for the scatter gather list is retrieved from the | ||
572 | * active array of sglq. The get of the sglq pointer also clears | ||
573 | * the entry in the array. If the status of the IO indiactes that | ||
574 | * this IO was aborted then the sglq entry it put on the | ||
575 | * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the | ||
576 | * IO has good status or fails for any other reason then the sglq | ||
577 | * entry is added to the free list (lpfc_sgl_list). | ||
578 | **/ | ||
579 | static void | ||
580 | __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | ||
581 | { | ||
582 | struct lpfc_sglq *sglq; | ||
583 | size_t start_clean = offsetof(struct lpfc_iocbq, iocb); | ||
584 | unsigned long iflag; | ||
585 | |||
586 | if (iocbq->sli4_xritag == NO_XRI) | ||
587 | sglq = NULL; | ||
588 | else | ||
589 | sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); | ||
590 | if (sglq) { | ||
591 | if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED | ||
592 | || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) | ||
593 | && (iocbq->iocb.un.ulpWord[4] | ||
594 | == IOERR_SLI_ABORTED))) { | ||
595 | spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, | ||
596 | iflag); | ||
597 | list_add(&sglq->list, | ||
598 | &phba->sli4_hba.lpfc_abts_els_sgl_list); | ||
599 | spin_unlock_irqrestore( | ||
600 | &phba->sli4_hba.abts_sgl_list_lock, iflag); | ||
601 | } else | ||
602 | list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); | ||
603 | } | ||
604 | |||
605 | |||
606 | /* | ||
607 | * Clean all volatile data fields, preserve iotag and node struct. | ||
608 | */ | ||
609 | memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); | ||
610 | iocbq->sli4_xritag = NO_XRI; | ||
611 | list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); | ||
612 | } | ||
613 | |||
614 | /** | ||
218 | * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool | 615 | * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool |
219 | * @phba: Pointer to HBA context object. | 616 | * @phba: Pointer to HBA context object. |
220 | * @iocbq: Pointer to driver iocb object. | 617 | * @iocbq: Pointer to driver iocb object. |
@@ -959,6 +1356,37 @@ lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, | |||
959 | return -ENOMEM; | 1356 | return -ENOMEM; |
960 | } | 1357 | } |
961 | 1358 | ||
1359 | /** | ||
1360 | * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware | ||
1361 | * @phba: Pointer to HBA context object. | ||
1362 | * @hbqno: HBQ number. | ||
1363 | * @hbq_buf: Pointer to HBQ buffer. | ||
1364 | * | ||
1365 | * This function is called with the hbalock held to post an RQE to the SLI4 | ||
1366 | * firmware. If able to post the RQE to the RQ it will queue the hbq entry to | ||
1367 | * the hbq_buffer_list and return zero, otherwise it will return an error. | ||
1368 | **/ | ||
1369 | static int | ||
1370 | lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, | ||
1371 | struct hbq_dmabuf *hbq_buf) | ||
1372 | { | ||
1373 | int rc; | ||
1374 | struct lpfc_rqe hrqe; | ||
1375 | struct lpfc_rqe drqe; | ||
1376 | |||
1377 | hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); | ||
1378 | hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); | ||
1379 | drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); | ||
1380 | drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); | ||
1381 | rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, | ||
1382 | &hrqe, &drqe); | ||
1383 | if (rc < 0) | ||
1384 | return rc; | ||
1385 | hbq_buf->tag = rc; | ||
1386 | list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); | ||
1387 | return 0; | ||
1388 | } | ||
1389 | |||
962 | /* HBQ for ELS and CT traffic. */ | 1390 | /* HBQ for ELS and CT traffic. */ |
963 | static struct lpfc_hbq_init lpfc_els_hbq = { | 1391 | static struct lpfc_hbq_init lpfc_els_hbq = { |
964 | .rn = 1, | 1392 | .rn = 1, |
@@ -2575,6 +3003,36 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, | |||
2575 | } | 3003 | } |
2576 | 3004 | ||
2577 | /** | 3005 | /** |
3006 | * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events | ||
3007 | * @phba: Pointer to HBA context object. | ||
3008 | * @pring: Pointer to driver SLI ring object. | ||
3009 | * @mask: Host attention register mask for this ring. | ||
3010 | * | ||
3011 | * This function is called from the worker thread when there is a pending | ||
3012 | * ELS response iocb on the driver internal slow-path response iocb worker | ||
3013 | * queue. The caller does not hold any lock. The function will remove each | ||
3014 | * response iocb from the response worker queue and calls the handle | ||
3015 | * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. | ||
3016 | **/ | ||
3017 | static void | ||
3018 | lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, | ||
3019 | struct lpfc_sli_ring *pring, uint32_t mask) | ||
3020 | { | ||
3021 | struct lpfc_iocbq *irspiocbq; | ||
3022 | unsigned long iflag; | ||
3023 | |||
3024 | while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { | ||
3025 | /* Get the response iocb from the head of work queue */ | ||
3026 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
3027 | list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, | ||
3028 | irspiocbq, struct lpfc_iocbq, list); | ||
3029 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
3030 | /* Process the response iocb */ | ||
3031 | lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); | ||
3032 | } | ||
3033 | } | ||
3034 | |||
3035 | /** | ||
2578 | * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring | 3036 | * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring |
2579 | * @phba: Pointer to HBA context object. | 3037 | * @phba: Pointer to HBA context object. |
2580 | * @pring: Pointer to driver SLI ring object. | 3038 | * @pring: Pointer to driver SLI ring object. |
@@ -3376,6 +3834,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba) | |||
3376 | } | 3834 | } |
3377 | 3835 | ||
3378 | /** | 3836 | /** |
3837 | * lpfc_sli4_rb_setup - Initialize and post RBs to HBA | ||
3838 | * @phba: Pointer to HBA context object. | ||
3839 | * | ||
3840 | * This function is called during the SLI initialization to configure | ||
3841 | * all the HBQs and post buffers to the HBQ. The caller is not | ||
3842 | * required to hold any locks. This function will return zero if successful | ||
3843 | * else it will return negative error code. | ||
3844 | **/ | ||
3845 | static int | ||
3846 | lpfc_sli4_rb_setup(struct lpfc_hba *phba) | ||
3847 | { | ||
3848 | phba->hbq_in_use = 1; | ||
3849 | phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; | ||
3850 | phba->hbq_count = 1; | ||
3851 | /* Initially populate or replenish the HBQs */ | ||
3852 | lpfc_sli_hbqbuf_init_hbqs(phba, 0); | ||
3853 | return 0; | ||
3854 | } | ||
3855 | |||
3856 | /** | ||
3379 | * lpfc_sli_config_port - Issue config port mailbox command | 3857 | * lpfc_sli_config_port - Issue config port mailbox command |
3380 | * @phba: Pointer to HBA context object. | 3858 | * @phba: Pointer to HBA context object. |
3381 | * @sli_mode: sli mode - 2/3 | 3859 | * @sli_mode: sli mode - 2/3 |
@@ -5130,6 +5608,448 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, | |||
5130 | } | 5608 | } |
5131 | 5609 | ||
5132 | /** | 5610 | /** |
5611 | * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. | ||
5612 | * @phba: Pointer to HBA context object. | ||
5613 | * @piocb: Pointer to command iocb. | ||
5614 | * @sglq: Pointer to the scatter gather queue object. | ||
5615 | * | ||
5616 | * This routine converts the bpl or bde that is in the IOCB | ||
5617 | * to a sgl list for the sli4 hardware. The physical address | ||
5618 | * of the bpl/bde is converted back to a virtual address. | ||
5619 | * If the IOCB contains a BPL then the list of BDE's is | ||
5620 | * converted to sli4_sge's. If the IOCB contains a single | ||
5621 | * BDE then it is converted to a single sli_sge. | ||
5622 | * The IOCB is still in cpu endianess so the contents of | ||
5623 | * the bpl can be used without byte swapping. | ||
5624 | * | ||
5625 | * Returns valid XRI = Success, NO_XRI = Failure. | ||
5626 | **/ | ||
5627 | static uint16_t | ||
5628 | lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | ||
5629 | struct lpfc_sglq *sglq) | ||
5630 | { | ||
5631 | uint16_t xritag = NO_XRI; | ||
5632 | struct ulp_bde64 *bpl = NULL; | ||
5633 | struct ulp_bde64 bde; | ||
5634 | struct sli4_sge *sgl = NULL; | ||
5635 | IOCB_t *icmd; | ||
5636 | int numBdes = 0; | ||
5637 | int i = 0; | ||
5638 | |||
5639 | if (!piocbq || !sglq) | ||
5640 | return xritag; | ||
5641 | |||
5642 | sgl = (struct sli4_sge *)sglq->sgl; | ||
5643 | icmd = &piocbq->iocb; | ||
5644 | if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { | ||
5645 | numBdes = icmd->un.genreq64.bdl.bdeSize / | ||
5646 | sizeof(struct ulp_bde64); | ||
5647 | /* The addrHigh and addrLow fields within the IOCB | ||
5648 | * have not been byteswapped yet so there is no | ||
5649 | * need to swap them back. | ||
5650 | */ | ||
5651 | bpl = (struct ulp_bde64 *) | ||
5652 | ((struct lpfc_dmabuf *)piocbq->context3)->virt; | ||
5653 | |||
5654 | if (!bpl) | ||
5655 | return xritag; | ||
5656 | |||
5657 | for (i = 0; i < numBdes; i++) { | ||
5658 | /* Should already be byte swapped. */ | ||
5659 | sgl->addr_hi = bpl->addrHigh; | ||
5660 | sgl->addr_lo = bpl->addrLow; | ||
5661 | /* swap the size field back to the cpu so we | ||
5662 | * can assign it to the sgl. | ||
5663 | */ | ||
5664 | bde.tus.w = le32_to_cpu(bpl->tus.w); | ||
5665 | bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize); | ||
5666 | if ((i+1) == numBdes) | ||
5667 | bf_set(lpfc_sli4_sge_last, sgl, 1); | ||
5668 | else | ||
5669 | bf_set(lpfc_sli4_sge_last, sgl, 0); | ||
5670 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
5671 | sgl->word3 = cpu_to_le32(sgl->word3); | ||
5672 | bpl++; | ||
5673 | sgl++; | ||
5674 | } | ||
5675 | } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { | ||
5676 | /* The addrHigh and addrLow fields of the BDE have not | ||
5677 | * been byteswapped yet so they need to be swapped | ||
5678 | * before putting them in the sgl. | ||
5679 | */ | ||
5680 | sgl->addr_hi = | ||
5681 | cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); | ||
5682 | sgl->addr_lo = | ||
5683 | cpu_to_le32(icmd->un.genreq64.bdl.addrLow); | ||
5684 | bf_set(lpfc_sli4_sge_len, sgl, | ||
5685 | icmd->un.genreq64.bdl.bdeSize); | ||
5686 | bf_set(lpfc_sli4_sge_last, sgl, 1); | ||
5687 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
5688 | sgl->word3 = cpu_to_le32(sgl->word3); | ||
5689 | } | ||
5690 | return sglq->sli4_xritag; | ||
5691 | } | ||
5692 | |||
5693 | /** | ||
5694 | * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution | ||
5695 | * @phba: Pointer to HBA context object. | ||
5696 | * @piocb: Pointer to command iocb. | ||
5697 | * | ||
5698 | * This routine performs a round robin SCSI command to SLI4 FCP WQ index | ||
5699 | * distribution. | ||
5700 | * | ||
5701 | * Return: index into SLI4 fast-path FCP queue index. | ||
5702 | **/ | ||
5703 | static uint32_t | ||
5704 | lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) | ||
5705 | { | ||
5706 | static uint32_t fcp_qidx; | ||
5707 | |||
5708 | return fcp_qidx++ % phba->cfg_fcp_wq_count; | ||
5709 | } | ||
5710 | |||
5711 | /** | ||
5712 | * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. | ||
5713 | * @phba: Pointer to HBA context object. | ||
5714 | * @piocb: Pointer to command iocb. | ||
5715 | * @wqe: Pointer to the work queue entry. | ||
5716 | * | ||
5717 | * This routine converts the iocb command to its Work Queue Entry | ||
5718 | * equivalent. The wqe pointer should not have any fields set when | ||
5719 | * this routine is called because it will memcpy over them. | ||
5720 | * This routine does not set the CQ_ID or the WQEC bits in the | ||
5721 | * wqe. | ||
5722 | * | ||
5723 | * Returns: 0 = Success, IOCB_ERROR = Failure. | ||
5724 | **/ | ||
5725 | static int | ||
5726 | lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | ||
5727 | union lpfc_wqe *wqe) | ||
5728 | { | ||
5729 | uint32_t payload_len = 0; | ||
5730 | uint8_t ct = 0; | ||
5731 | uint32_t fip; | ||
5732 | uint32_t abort_tag; | ||
5733 | uint8_t command_type = ELS_COMMAND_NON_FIP; | ||
5734 | uint8_t cmnd; | ||
5735 | uint16_t xritag; | ||
5736 | struct ulp_bde64 *bpl = NULL; | ||
5737 | |||
5738 | fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); | ||
5739 | /* The fcp commands will set command type */ | ||
5740 | if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip)) | ||
5741 | command_type = ELS_COMMAND_NON_FIP; | ||
5742 | else if (!(iocbq->iocb_flag & LPFC_IO_FCP)) | ||
5743 | command_type = ELS_COMMAND_FIP; | ||
5744 | else if (iocbq->iocb_flag & LPFC_IO_FCP) | ||
5745 | command_type = FCP_COMMAND; | ||
5746 | else { | ||
5747 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
5748 | "2019 Invalid cmd 0x%x\n", | ||
5749 | iocbq->iocb.ulpCommand); | ||
5750 | return IOCB_ERROR; | ||
5751 | } | ||
5752 | /* Some of the fields are in the right position already */ | ||
5753 | memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); | ||
5754 | abort_tag = (uint32_t) iocbq->iotag; | ||
5755 | xritag = iocbq->sli4_xritag; | ||
5756 | wqe->words[7] = 0; /* The ct field has moved so reset */ | ||
5757 | /* words0-2 bpl convert bde */ | ||
5758 | if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { | ||
5759 | bpl = (struct ulp_bde64 *) | ||
5760 | ((struct lpfc_dmabuf *)iocbq->context3)->virt; | ||
5761 | if (!bpl) | ||
5762 | return IOCB_ERROR; | ||
5763 | |||
5764 | /* Should already be byte swapped. */ | ||
5765 | wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); | ||
5766 | wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); | ||
5767 | /* swap the size field back to the cpu so we | ||
5768 | * can assign it to the sgl. | ||
5769 | */ | ||
5770 | wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); | ||
5771 | payload_len = wqe->generic.bde.tus.f.bdeSize; | ||
5772 | } else | ||
5773 | payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; | ||
5774 | |||
5775 | iocbq->iocb.ulpIoTag = iocbq->iotag; | ||
5776 | cmnd = iocbq->iocb.ulpCommand; | ||
5777 | |||
5778 | switch (iocbq->iocb.ulpCommand) { | ||
5779 | case CMD_ELS_REQUEST64_CR: | ||
5780 | if (!iocbq->iocb.ulpLe) { | ||
5781 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
5782 | "2007 Only Limited Edition cmd Format" | ||
5783 | " supported 0x%x\n", | ||
5784 | iocbq->iocb.ulpCommand); | ||
5785 | return IOCB_ERROR; | ||
5786 | } | ||
5787 | wqe->els_req.payload_len = payload_len; | ||
5788 | /* Els_reguest64 has a TMO */ | ||
5789 | bf_set(wqe_tmo, &wqe->els_req.wqe_com, | ||
5790 | iocbq->iocb.ulpTimeout); | ||
5791 | /* Need a VF for word 4 set the vf bit*/ | ||
5792 | bf_set(els_req64_vf, &wqe->els_req, 0); | ||
5793 | /* And a VFID for word 12 */ | ||
5794 | bf_set(els_req64_vfid, &wqe->els_req, 0); | ||
5795 | /* | ||
5796 | * Set ct field to 3, indicates that the context_tag field | ||
5797 | * contains the FCFI and remote N_Port_ID is | ||
5798 | * in word 5. | ||
5799 | */ | ||
5800 | |||
5801 | ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); | ||
5802 | bf_set(lpfc_wqe_gen_context, &wqe->generic, | ||
5803 | iocbq->iocb.ulpContext); | ||
5804 | |||
5805 | if (iocbq->vport->fc_myDID != 0) { | ||
5806 | bf_set(els_req64_sid, &wqe->els_req, | ||
5807 | iocbq->vport->fc_myDID); | ||
5808 | bf_set(els_req64_sp, &wqe->els_req, 1); | ||
5809 | } | ||
5810 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); | ||
5811 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); | ||
5812 | /* CCP CCPE PV PRI in word10 were set in the memcpy */ | ||
5813 | break; | ||
5814 | case CMD_XMIT_SEQUENCE64_CR: | ||
5815 | /* word3 iocb=io_tag32 wqe=payload_offset */ | ||
5816 | /* payload offset used for multilpe outstanding | ||
5817 | * sequences on the same exchange | ||
5818 | */ | ||
5819 | wqe->words[3] = 0; | ||
5820 | /* word4 relative_offset memcpy */ | ||
5821 | /* word5 r_ctl/df_ctl memcpy */ | ||
5822 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); | ||
5823 | wqe->xmit_sequence.xmit_len = payload_len; | ||
5824 | break; | ||
5825 | case CMD_XMIT_BCAST64_CN: | ||
5826 | /* word3 iocb=iotag32 wqe=payload_len */ | ||
5827 | wqe->words[3] = 0; /* no definition for this in wqe */ | ||
5828 | /* word4 iocb=rsvd wqe=rsvd */ | ||
5829 | /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ | ||
5830 | /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ | ||
5831 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, | ||
5832 | ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); | ||
5833 | break; | ||
5834 | case CMD_FCP_IWRITE64_CR: | ||
5835 | command_type = FCP_COMMAND_DATA_OUT; | ||
5836 | /* The struct for wqe fcp_iwrite has 3 fields that are somewhat | ||
5837 | * confusing. | ||
5838 | * word3 is payload_len: byte offset to the sgl entry for the | ||
5839 | * fcp_command. | ||
5840 | * word4 is total xfer len, same as the IOCB->ulpParameter. | ||
5841 | * word5 is initial xfer len 0 = wait for xfer-ready | ||
5842 | */ | ||
5843 | |||
5844 | /* Always wait for xfer-ready before sending data */ | ||
5845 | wqe->fcp_iwrite.initial_xfer_len = 0; | ||
5846 | /* word 4 (xfer length) should have been set on the memcpy */ | ||
5847 | |||
5848 | /* allow write to fall through to read */ | ||
5849 | case CMD_FCP_IREAD64_CR: | ||
5850 | /* FCP_CMD is always the 1st sgl entry */ | ||
5851 | wqe->fcp_iread.payload_len = | ||
5852 | payload_len + sizeof(struct fcp_rsp); | ||
5853 | |||
5854 | /* word 4 (xfer length) should have been set on the memcpy */ | ||
5855 | |||
5856 | bf_set(lpfc_wqe_gen_erp, &wqe->generic, | ||
5857 | iocbq->iocb.ulpFCP2Rcvy); | ||
5858 | bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS); | ||
5859 | /* The XC bit and the XS bit are similar. The driver never | ||
5860 | * tracked whether or not the exchange was previouslly open. | ||
5861 | * XC = Exchange create, 0 is create. 1 is already open. | ||
5862 | * XS = link cmd: 1 do not close the exchange after command. | ||
5863 | * XS = 0 close exchange when command completes. | ||
5864 | * The only time we would not set the XC bit is when the XS bit | ||
5865 | * is set and we are sending our 2nd or greater command on | ||
5866 | * this exchange. | ||
5867 | */ | ||
5868 | |||
5869 | /* ALLOW read & write to fall through to ICMD64 */ | ||
5870 | case CMD_FCP_ICMND64_CR: | ||
5871 | /* Always open the exchange */ | ||
5872 | bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); | ||
5873 | |||
5874 | wqe->words[10] &= 0xffff0000; /* zero out ebde count */ | ||
5875 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); | ||
5876 | break; | ||
5877 | case CMD_GEN_REQUEST64_CR: | ||
5878 | /* word3 command length is described as byte offset to the | ||
5879 | * rsp_data. Would always be 16, sizeof(struct sli4_sge) | ||
5880 | * sgl[0] = cmnd | ||
5881 | * sgl[1] = rsp. | ||
5882 | * | ||
5883 | */ | ||
5884 | wqe->gen_req.command_len = payload_len; | ||
5885 | /* Word4 parameter copied in the memcpy */ | ||
5886 | /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ | ||
5887 | /* word6 context tag copied in memcpy */ | ||
5888 | if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { | ||
5889 | ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); | ||
5890 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
5891 | "2015 Invalid CT %x command 0x%x\n", | ||
5892 | ct, iocbq->iocb.ulpCommand); | ||
5893 | return IOCB_ERROR; | ||
5894 | } | ||
5895 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0); | ||
5896 | bf_set(wqe_tmo, &wqe->gen_req.wqe_com, | ||
5897 | iocbq->iocb.ulpTimeout); | ||
5898 | |||
5899 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); | ||
5900 | command_type = OTHER_COMMAND; | ||
5901 | break; | ||
5902 | case CMD_XMIT_ELS_RSP64_CX: | ||
5903 | /* words0-2 BDE memcpy */ | ||
5904 | /* word3 iocb=iotag32 wqe=rsvd */ | ||
5905 | wqe->words[3] = 0; | ||
5906 | /* word4 iocb=did wge=rsvd. */ | ||
5907 | wqe->words[4] = 0; | ||
5908 | /* word5 iocb=rsvd wge=did */ | ||
5909 | bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, | ||
5910 | iocbq->iocb.un.elsreq64.remoteID); | ||
5911 | |||
5912 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, | ||
5913 | ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); | ||
5914 | |||
5915 | bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); | ||
5916 | bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); | ||
5917 | if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) | ||
5918 | bf_set(lpfc_wqe_gen_context, &wqe->generic, | ||
5919 | iocbq->vport->vpi + phba->vpi_base); | ||
5920 | command_type = OTHER_COMMAND; | ||
5921 | break; | ||
5922 | case CMD_CLOSE_XRI_CN: | ||
5923 | case CMD_ABORT_XRI_CN: | ||
5924 | case CMD_ABORT_XRI_CX: | ||
5925 | /* words 0-2 memcpy should be 0 rserved */ | ||
5926 | /* port will send abts */ | ||
5927 | if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) | ||
5928 | /* | ||
5929 | * The link is down so the fw does not need to send abts | ||
5930 | * on the wire. | ||
5931 | */ | ||
5932 | bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); | ||
5933 | else | ||
5934 | bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); | ||
5935 | bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); | ||
5936 | abort_tag = iocbq->iocb.un.acxri.abortIoTag; | ||
5937 | wqe->words[5] = 0; | ||
5938 | bf_set(lpfc_wqe_gen_ct, &wqe->generic, | ||
5939 | ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); | ||
5940 | abort_tag = iocbq->iocb.un.acxri.abortIoTag; | ||
5941 | wqe->generic.abort_tag = abort_tag; | ||
5942 | /* | ||
5943 | * The abort handler will send us CMD_ABORT_XRI_CN or | ||
5944 | * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX | ||
5945 | */ | ||
5946 | bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX); | ||
5947 | cmnd = CMD_ABORT_XRI_CX; | ||
5948 | command_type = OTHER_COMMAND; | ||
5949 | xritag = 0; | ||
5950 | break; | ||
5951 | case CMD_XRI_ABORTED_CX: | ||
5952 | case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ | ||
5953 | /* words0-2 are all 0's no bde */ | ||
5954 | /* word3 and word4 are rsvrd */ | ||
5955 | wqe->words[3] = 0; | ||
5956 | wqe->words[4] = 0; | ||
5957 | /* word5 iocb=rsvd wge=did */ | ||
5958 | /* There is no remote port id in the IOCB? */ | ||
5959 | /* Let this fall through and fail */ | ||
5960 | case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ | ||
5961 | case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ | ||
5962 | case CMD_FCP_TRSP64_CX: /* Target mode rcv */ | ||
5963 | case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ | ||
5964 | default: | ||
5965 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
5966 | "2014 Invalid command 0x%x\n", | ||
5967 | iocbq->iocb.ulpCommand); | ||
5968 | return IOCB_ERROR; | ||
5969 | break; | ||
5970 | |||
5971 | } | ||
5972 | bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag); | ||
5973 | bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag); | ||
5974 | wqe->generic.abort_tag = abort_tag; | ||
5975 | bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type); | ||
5976 | bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd); | ||
5977 | bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass); | ||
5978 | bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT); | ||
5979 | |||
5980 | return 0; | ||
5981 | } | ||
5982 | |||
5983 | /** | ||
5984 | * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb | ||
5985 | * @phba: Pointer to HBA context object. | ||
5986 | * @ring_number: SLI ring number to issue iocb on. | ||
5987 | * @piocb: Pointer to command iocb. | ||
5988 | * @flag: Flag indicating if this command can be put into txq. | ||
5989 | * | ||
5990 | * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue | ||
5991 | * an iocb command to an HBA with SLI-4 interface spec. | ||
5992 | * | ||
5993 | * This function is called with hbalock held. The function will return success | ||
5994 | * after it successfully submit the iocb to firmware or after adding to the | ||
5995 | * txq. | ||
5996 | **/ | ||
5997 | static int | ||
5998 | __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, | ||
5999 | struct lpfc_iocbq *piocb, uint32_t flag) | ||
6000 | { | ||
6001 | struct lpfc_sglq *sglq; | ||
6002 | uint16_t xritag; | ||
6003 | union lpfc_wqe wqe; | ||
6004 | struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; | ||
6005 | uint32_t fcp_wqidx; | ||
6006 | |||
6007 | if (piocb->sli4_xritag == NO_XRI) { | ||
6008 | if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || | ||
6009 | piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) | ||
6010 | sglq = NULL; | ||
6011 | else { | ||
6012 | sglq = __lpfc_sli_get_sglq(phba); | ||
6013 | if (!sglq) | ||
6014 | return IOCB_ERROR; | ||
6015 | piocb->sli4_xritag = sglq->sli4_xritag; | ||
6016 | } | ||
6017 | } else if (piocb->iocb_flag & LPFC_IO_FCP) { | ||
6018 | sglq = NULL; /* These IO's already have an XRI and | ||
6019 | * a mapped sgl. | ||
6020 | */ | ||
6021 | } else { | ||
6022 | /* This is a continuation of a commandi,(CX) so this | ||
6023 | * sglq is on the active list | ||
6024 | */ | ||
6025 | sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); | ||
6026 | if (!sglq) | ||
6027 | return IOCB_ERROR; | ||
6028 | } | ||
6029 | |||
6030 | if (sglq) { | ||
6031 | xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq); | ||
6032 | if (xritag != sglq->sli4_xritag) | ||
6033 | return IOCB_ERROR; | ||
6034 | } | ||
6035 | |||
6036 | if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) | ||
6037 | return IOCB_ERROR; | ||
6038 | |||
6039 | if (piocb->iocb_flag & LPFC_IO_FCP) { | ||
6040 | fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb); | ||
6041 | if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) | ||
6042 | return IOCB_ERROR; | ||
6043 | } else { | ||
6044 | if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) | ||
6045 | return IOCB_ERROR; | ||
6046 | } | ||
6047 | lpfc_sli_ringtxcmpl_put(phba, pring, piocb); | ||
6048 | |||
6049 | return 0; | ||
6050 | } | ||
6051 | |||
6052 | /** | ||
5133 | * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb | 6053 | * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb |
5134 | * | 6054 | * |
5135 | * This routine wraps the actual lockless version for issusing IOCB function | 6055 | * This routine wraps the actual lockless version for issusing IOCB function |
@@ -5165,6 +6085,10 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | |||
5165 | phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; | 6085 | phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; |
5166 | phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; | 6086 | phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; |
5167 | break; | 6087 | break; |
6088 | case LPFC_PCI_DEV_OC: | ||
6089 | phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; | ||
6090 | phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; | ||
6091 | break; | ||
5168 | default: | 6092 | default: |
5169 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 6093 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
5170 | "1419 Invalid HBA PCI-device group: 0x%x\n", | 6094 | "1419 Invalid HBA PCI-device group: 0x%x\n", |
@@ -7152,3 +8076,2461 @@ lpfc_sli_intr_handler(int irq, void *dev_id) | |||
7152 | /* Return device-level interrupt handling status */ | 8076 | /* Return device-level interrupt handling status */ |
7153 | return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; | 8077 | return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; |
7154 | } /* lpfc_sli_intr_handler */ | 8078 | } /* lpfc_sli_intr_handler */ |
8079 | |||
8080 | /** | ||
8081 | * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event | ||
8082 | * @phba: pointer to lpfc hba data structure. | ||
8083 | * | ||
8084 | * This routine is invoked by the worker thread to process all the pending | ||
8085 | * SLI4 FCP abort XRI events. | ||
8086 | **/ | ||
8087 | void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) | ||
8088 | { | ||
8089 | struct lpfc_cq_event *cq_event; | ||
8090 | |||
8091 | /* First, declare the fcp xri abort event has been handled */ | ||
8092 | spin_lock_irq(&phba->hbalock); | ||
8093 | phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; | ||
8094 | spin_unlock_irq(&phba->hbalock); | ||
8095 | /* Now, handle all the fcp xri abort events */ | ||
8096 | while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { | ||
8097 | /* Get the first event from the head of the event queue */ | ||
8098 | spin_lock_irq(&phba->hbalock); | ||
8099 | list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, | ||
8100 | cq_event, struct lpfc_cq_event, list); | ||
8101 | spin_unlock_irq(&phba->hbalock); | ||
8102 | /* Notify aborted XRI for FCP work queue */ | ||
8103 | lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); | ||
8104 | /* Free the event processed back to the free pool */ | ||
8105 | lpfc_sli4_cq_event_release(phba, cq_event); | ||
8106 | } | ||
8107 | } | ||
8108 | |||
8109 | /** | ||
8110 | * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event | ||
8111 | * @phba: pointer to lpfc hba data structure. | ||
8112 | * | ||
8113 | * This routine is invoked by the worker thread to process all the pending | ||
8114 | * SLI4 els abort xri events. | ||
8115 | **/ | ||
8116 | void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) | ||
8117 | { | ||
8118 | struct lpfc_cq_event *cq_event; | ||
8119 | |||
8120 | /* First, declare the els xri abort event has been handled */ | ||
8121 | spin_lock_irq(&phba->hbalock); | ||
8122 | phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; | ||
8123 | spin_unlock_irq(&phba->hbalock); | ||
8124 | /* Now, handle all the els xri abort events */ | ||
8125 | while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { | ||
8126 | /* Get the first event from the head of the event queue */ | ||
8127 | spin_lock_irq(&phba->hbalock); | ||
8128 | list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, | ||
8129 | cq_event, struct lpfc_cq_event, list); | ||
8130 | spin_unlock_irq(&phba->hbalock); | ||
8131 | /* Notify aborted XRI for ELS work queue */ | ||
8132 | lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); | ||
8133 | /* Free the event processed back to the free pool */ | ||
8134 | lpfc_sli4_cq_event_release(phba, cq_event); | ||
8135 | } | ||
8136 | } | ||
8137 | |||
8138 | static void | ||
8139 | lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, | ||
8140 | struct lpfc_iocbq *pIocbOut, | ||
8141 | struct lpfc_wcqe_complete *wcqe) | ||
8142 | { | ||
8143 | size_t offset = offsetof(struct lpfc_iocbq, iocb); | ||
8144 | |||
8145 | memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, | ||
8146 | sizeof(struct lpfc_iocbq) - offset); | ||
8147 | memset(&pIocbIn->sli4_info, 0, | ||
8148 | sizeof(struct lpfc_sli4_rspiocb_info)); | ||
8149 | /* Map WCQE parameters into irspiocb parameters */ | ||
8150 | pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); | ||
8151 | if (pIocbOut->iocb_flag & LPFC_IO_FCP) | ||
8152 | if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) | ||
8153 | pIocbIn->iocb.un.fcpi.fcpi_parm = | ||
8154 | pIocbOut->iocb.un.fcpi.fcpi_parm - | ||
8155 | wcqe->total_data_placed; | ||
8156 | else | ||
8157 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; | ||
8158 | else | ||
8159 | pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; | ||
8160 | /* Load in additional WCQE parameters */ | ||
8161 | pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe); | ||
8162 | pIocbIn->sli4_info.bfield = 0; | ||
8163 | if (bf_get(lpfc_wcqe_c_xb, wcqe)) | ||
8164 | pIocbIn->sli4_info.bfield |= LPFC_XB; | ||
8165 | if (bf_get(lpfc_wcqe_c_pv, wcqe)) { | ||
8166 | pIocbIn->sli4_info.bfield |= LPFC_PV; | ||
8167 | pIocbIn->sli4_info.priority = | ||
8168 | bf_get(lpfc_wcqe_c_priority, wcqe); | ||
8169 | } | ||
8170 | } | ||
8171 | |||
8172 | /** | ||
8173 | * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event | ||
8174 | * @phba: Pointer to HBA context object. | ||
8175 | * @wcqe: Pointer to work-queue completion queue entry. | ||
8176 | * | ||
8177 | * This routine handles an ELS work-queue completion event. | ||
8178 | * | ||
8179 | * Return: true if work posted to worker thread, otherwise false. | ||
8180 | **/ | ||
8181 | static bool | ||
8182 | lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, | ||
8183 | struct lpfc_wcqe_complete *wcqe) | ||
8184 | { | ||
8185 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | ||
8186 | struct lpfc_iocbq *cmdiocbq; | ||
8187 | struct lpfc_iocbq *irspiocbq; | ||
8188 | unsigned long iflags; | ||
8189 | bool workposted = false; | ||
8190 | |||
8191 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
8192 | pring->stats.iocb_event++; | ||
8193 | /* Look up the ELS command IOCB and create pseudo response IOCB */ | ||
8194 | cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, | ||
8195 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
8196 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
8197 | |||
8198 | if (unlikely(!cmdiocbq)) { | ||
8199 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
8200 | "0386 ELS complete with no corresponding " | ||
8201 | "cmdiocb: iotag (%d)\n", | ||
8202 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
8203 | return workposted; | ||
8204 | } | ||
8205 | |||
8206 | /* Fake the irspiocbq and copy necessary response information */ | ||
8207 | irspiocbq = lpfc_sli_get_iocbq(phba); | ||
8208 | if (!irspiocbq) { | ||
8209 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8210 | "0387 Failed to allocate an iocbq\n"); | ||
8211 | return workposted; | ||
8212 | } | ||
8213 | lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe); | ||
8214 | |||
8215 | /* Add the irspiocb to the response IOCB work list */ | ||
8216 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
8217 | list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue); | ||
8218 | /* Indicate ELS ring attention */ | ||
8219 | phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); | ||
8220 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
8221 | workposted = true; | ||
8222 | |||
8223 | return workposted; | ||
8224 | } | ||
8225 | |||
8226 | /** | ||
8227 | * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event | ||
8228 | * @phba: Pointer to HBA context object. | ||
8229 | * @wcqe: Pointer to work-queue completion queue entry. | ||
8230 | * | ||
8231 | * This routine handles slow-path WQ entry comsumed event by invoking the | ||
8232 | * proper WQ release routine to the slow-path WQ. | ||
8233 | **/ | ||
8234 | static void | ||
8235 | lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, | ||
8236 | struct lpfc_wcqe_release *wcqe) | ||
8237 | { | ||
8238 | /* Check for the slow-path ELS work queue */ | ||
8239 | if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) | ||
8240 | lpfc_sli4_wq_release(phba->sli4_hba.els_wq, | ||
8241 | bf_get(lpfc_wcqe_r_wqe_index, wcqe)); | ||
8242 | else | ||
8243 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
8244 | "2579 Slow-path wqe consume event carries " | ||
8245 | "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", | ||
8246 | bf_get(lpfc_wcqe_r_wqe_index, wcqe), | ||
8247 | phba->sli4_hba.els_wq->queue_id); | ||
8248 | } | ||
8249 | |||
8250 | /** | ||
8251 | * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event | ||
8252 | * @phba: Pointer to HBA context object. | ||
8253 | * @cq: Pointer to a WQ completion queue. | ||
8254 | * @wcqe: Pointer to work-queue completion queue entry. | ||
8255 | * | ||
8256 | * This routine handles an XRI abort event. | ||
8257 | * | ||
8258 | * Return: true if work posted to worker thread, otherwise false. | ||
8259 | **/ | ||
8260 | static bool | ||
8261 | lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, | ||
8262 | struct lpfc_queue *cq, | ||
8263 | struct sli4_wcqe_xri_aborted *wcqe) | ||
8264 | { | ||
8265 | bool workposted = false; | ||
8266 | struct lpfc_cq_event *cq_event; | ||
8267 | unsigned long iflags; | ||
8268 | |||
8269 | /* Allocate a new internal CQ_EVENT entry */ | ||
8270 | cq_event = lpfc_sli4_cq_event_alloc(phba); | ||
8271 | if (!cq_event) { | ||
8272 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8273 | "0602 Failed to allocate CQ_EVENT entry\n"); | ||
8274 | return false; | ||
8275 | } | ||
8276 | |||
8277 | /* Move the CQE into the proper xri abort event list */ | ||
8278 | memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); | ||
8279 | switch (cq->subtype) { | ||
8280 | case LPFC_FCP: | ||
8281 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
8282 | list_add_tail(&cq_event->list, | ||
8283 | &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); | ||
8284 | /* Set the fcp xri abort event flag */ | ||
8285 | phba->hba_flag |= FCP_XRI_ABORT_EVENT; | ||
8286 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
8287 | workposted = true; | ||
8288 | break; | ||
8289 | case LPFC_ELS: | ||
8290 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
8291 | list_add_tail(&cq_event->list, | ||
8292 | &phba->sli4_hba.sp_els_xri_aborted_work_queue); | ||
8293 | /* Set the els xri abort event flag */ | ||
8294 | phba->hba_flag |= ELS_XRI_ABORT_EVENT; | ||
8295 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
8296 | workposted = true; | ||
8297 | break; | ||
8298 | default: | ||
8299 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8300 | "0603 Invalid work queue CQE subtype (x%x)\n", | ||
8301 | cq->subtype); | ||
8302 | workposted = false; | ||
8303 | break; | ||
8304 | } | ||
8305 | return workposted; | ||
8306 | } | ||
8307 | |||
8308 | /** | ||
8309 | * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry | ||
8310 | * @phba: Pointer to HBA context object. | ||
8311 | * @cq: Pointer to the completion queue. | ||
8312 | * @wcqe: Pointer to a completion queue entry. | ||
8313 | * | ||
8314 | * This routine process a slow-path work-queue completion queue entry. | ||
8315 | * | ||
8316 | * Return: true if work posted to worker thread, otherwise false. | ||
8317 | **/ | ||
8318 | static bool | ||
8319 | lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | ||
8320 | struct lpfc_cqe *cqe) | ||
8321 | { | ||
8322 | struct lpfc_wcqe_complete wcqe; | ||
8323 | bool workposted = false; | ||
8324 | |||
8325 | /* Copy the work queue CQE and convert endian order if needed */ | ||
8326 | lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); | ||
8327 | |||
8328 | /* Check and process for different type of WCQE and dispatch */ | ||
8329 | switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { | ||
8330 | case CQE_CODE_COMPL_WQE: | ||
8331 | /* Process the WQ complete event */ | ||
8332 | workposted = lpfc_sli4_sp_handle_els_wcqe(phba, | ||
8333 | (struct lpfc_wcqe_complete *)&wcqe); | ||
8334 | break; | ||
8335 | case CQE_CODE_RELEASE_WQE: | ||
8336 | /* Process the WQ release event */ | ||
8337 | lpfc_sli4_sp_handle_rel_wcqe(phba, | ||
8338 | (struct lpfc_wcqe_release *)&wcqe); | ||
8339 | break; | ||
8340 | case CQE_CODE_XRI_ABORTED: | ||
8341 | /* Process the WQ XRI abort event */ | ||
8342 | workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, | ||
8343 | (struct sli4_wcqe_xri_aborted *)&wcqe); | ||
8344 | break; | ||
8345 | default: | ||
8346 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8347 | "0388 Not a valid WCQE code: x%x\n", | ||
8348 | bf_get(lpfc_wcqe_c_code, &wcqe)); | ||
8349 | break; | ||
8350 | } | ||
8351 | return workposted; | ||
8352 | } | ||
8353 | |||
8354 | /** | ||
8355 | * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry | ||
8356 | * @phba: Pointer to HBA context object. | ||
8357 | * @rcqe: Pointer to receive-queue completion queue entry. | ||
8358 | * | ||
8359 | * This routine process a receive-queue completion queue entry. | ||
8360 | * | ||
8361 | * Return: true if work posted to worker thread, otherwise false. | ||
8362 | **/ | ||
8363 | static bool | ||
8364 | lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) | ||
8365 | { | ||
8366 | struct lpfc_rcqe rcqe; | ||
8367 | bool workposted = false; | ||
8368 | struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; | ||
8369 | struct lpfc_queue *drq = phba->sli4_hba.dat_rq; | ||
8370 | struct hbq_dmabuf *dma_buf; | ||
8371 | uint32_t status; | ||
8372 | unsigned long iflags; | ||
8373 | |||
8374 | /* Copy the receive queue CQE and convert endian order if needed */ | ||
8375 | lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe)); | ||
8376 | lpfc_sli4_rq_release(hrq, drq); | ||
8377 | if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE) | ||
8378 | goto out; | ||
8379 | if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id) | ||
8380 | goto out; | ||
8381 | |||
8382 | status = bf_get(lpfc_rcqe_status, &rcqe); | ||
8383 | switch (status) { | ||
8384 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: | ||
8385 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8386 | "2537 Receive Frame Truncated!!\n"); | ||
8387 | case FC_STATUS_RQ_SUCCESS: | ||
8388 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
8389 | dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); | ||
8390 | if (!dma_buf) { | ||
8391 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
8392 | goto out; | ||
8393 | } | ||
8394 | memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe)); | ||
8395 | /* save off the frame for the word thread to process */ | ||
8396 | list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list); | ||
8397 | /* Frame received */ | ||
8398 | phba->hba_flag |= HBA_RECEIVE_BUFFER; | ||
8399 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
8400 | workposted = true; | ||
8401 | break; | ||
8402 | case FC_STATUS_INSUFF_BUF_NEED_BUF: | ||
8403 | case FC_STATUS_INSUFF_BUF_FRM_DISC: | ||
8404 | /* Post more buffers if possible */ | ||
8405 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
8406 | phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; | ||
8407 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
8408 | workposted = true; | ||
8409 | break; | ||
8410 | } | ||
8411 | out: | ||
8412 | return workposted; | ||
8413 | |||
8414 | } | ||
8415 | |||
8416 | /** | ||
8417 | * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry | ||
8418 | * @phba: Pointer to HBA context object. | ||
8419 | * @eqe: Pointer to fast-path event queue entry. | ||
8420 | * | ||
8421 | * This routine process a event queue entry from the slow-path event queue. | ||
8422 | * It will check the MajorCode and MinorCode to determine this is for a | ||
8423 | * completion event on a completion queue, if not, an error shall be logged | ||
8424 | * and just return. Otherwise, it will get to the corresponding completion | ||
8425 | * queue and process all the entries on that completion queue, rearm the | ||
8426 | * completion queue, and then return. | ||
8427 | * | ||
8428 | **/ | ||
8429 | static void | ||
8430 | lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) | ||
8431 | { | ||
8432 | struct lpfc_queue *cq = NULL, *childq, *speq; | ||
8433 | struct lpfc_cqe *cqe; | ||
8434 | bool workposted = false; | ||
8435 | int ecount = 0; | ||
8436 | uint16_t cqid; | ||
8437 | |||
8438 | if (bf_get(lpfc_eqe_major_code, eqe) != 0 || | ||
8439 | bf_get(lpfc_eqe_minor_code, eqe) != 0) { | ||
8440 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8441 | "0359 Not a valid slow-path completion " | ||
8442 | "event: majorcode=x%x, minorcode=x%x\n", | ||
8443 | bf_get(lpfc_eqe_major_code, eqe), | ||
8444 | bf_get(lpfc_eqe_minor_code, eqe)); | ||
8445 | return; | ||
8446 | } | ||
8447 | |||
8448 | /* Get the reference to the corresponding CQ */ | ||
8449 | cqid = bf_get(lpfc_eqe_resource_id, eqe); | ||
8450 | |||
8451 | /* Search for completion queue pointer matching this cqid */ | ||
8452 | speq = phba->sli4_hba.sp_eq; | ||
8453 | list_for_each_entry(childq, &speq->child_list, list) { | ||
8454 | if (childq->queue_id == cqid) { | ||
8455 | cq = childq; | ||
8456 | break; | ||
8457 | } | ||
8458 | } | ||
8459 | if (unlikely(!cq)) { | ||
8460 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8461 | "0365 Slow-path CQ identifier (%d) does " | ||
8462 | "not exist\n", cqid); | ||
8463 | return; | ||
8464 | } | ||
8465 | |||
8466 | /* Process all the entries to the CQ */ | ||
8467 | switch (cq->type) { | ||
8468 | case LPFC_MCQ: | ||
8469 | while ((cqe = lpfc_sli4_cq_get(cq))) { | ||
8470 | workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); | ||
8471 | if (!(++ecount % LPFC_GET_QE_REL_INT)) | ||
8472 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); | ||
8473 | } | ||
8474 | break; | ||
8475 | case LPFC_WCQ: | ||
8476 | while ((cqe = lpfc_sli4_cq_get(cq))) { | ||
8477 | workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe); | ||
8478 | if (!(++ecount % LPFC_GET_QE_REL_INT)) | ||
8479 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); | ||
8480 | } | ||
8481 | break; | ||
8482 | case LPFC_RCQ: | ||
8483 | while ((cqe = lpfc_sli4_cq_get(cq))) { | ||
8484 | workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe); | ||
8485 | if (!(++ecount % LPFC_GET_QE_REL_INT)) | ||
8486 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); | ||
8487 | } | ||
8488 | break; | ||
8489 | default: | ||
8490 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8491 | "0370 Invalid completion queue type (%d)\n", | ||
8492 | cq->type); | ||
8493 | return; | ||
8494 | } | ||
8495 | |||
8496 | /* Catch the no cq entry condition, log an error */ | ||
8497 | if (unlikely(ecount == 0)) | ||
8498 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8499 | "0371 No entry from the CQ: identifier " | ||
8500 | "(x%x), type (%d)\n", cq->queue_id, cq->type); | ||
8501 | |||
8502 | /* In any case, flash and re-arm the RCQ */ | ||
8503 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); | ||
8504 | |||
8505 | /* wake up worker thread if there are works to be done */ | ||
8506 | if (workposted) | ||
8507 | lpfc_worker_wake_up(phba); | ||
8508 | } | ||
8509 | |||
8510 | /** | ||
8511 | * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry | ||
8512 | * @eqe: Pointer to fast-path completion queue entry. | ||
8513 | * | ||
8514 | * This routine process a fast-path work queue completion entry from fast-path | ||
8515 | * event queue for FCP command response completion. | ||
8516 | **/ | ||
8517 | static void | ||
8518 | lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, | ||
8519 | struct lpfc_wcqe_complete *wcqe) | ||
8520 | { | ||
8521 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; | ||
8522 | struct lpfc_iocbq *cmdiocbq; | ||
8523 | struct lpfc_iocbq irspiocbq; | ||
8524 | unsigned long iflags; | ||
8525 | |||
8526 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
8527 | pring->stats.iocb_event++; | ||
8528 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
8529 | |||
8530 | /* Check for response status */ | ||
8531 | if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { | ||
8532 | /* If resource errors reported from HBA, reduce queue | ||
8533 | * depth of the SCSI device. | ||
8534 | */ | ||
8535 | if ((bf_get(lpfc_wcqe_c_status, wcqe) == | ||
8536 | IOSTAT_LOCAL_REJECT) && | ||
8537 | (wcqe->parameter == IOERR_NO_RESOURCES)) { | ||
8538 | phba->lpfc_rampdown_queue_depth(phba); | ||
8539 | } | ||
8540 | /* Log the error status */ | ||
8541 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
8542 | "0373 FCP complete error: status=x%x, " | ||
8543 | "hw_status=x%x, total_data_specified=%d, " | ||
8544 | "parameter=x%x, word3=x%x\n", | ||
8545 | bf_get(lpfc_wcqe_c_status, wcqe), | ||
8546 | bf_get(lpfc_wcqe_c_hw_status, wcqe), | ||
8547 | wcqe->total_data_placed, wcqe->parameter, | ||
8548 | wcqe->word3); | ||
8549 | } | ||
8550 | |||
8551 | /* Look up the FCP command IOCB and create pseudo response IOCB */ | ||
8552 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
8553 | cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, | ||
8554 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
8555 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
8556 | if (unlikely(!cmdiocbq)) { | ||
8557 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
8558 | "0374 FCP complete with no corresponding " | ||
8559 | "cmdiocb: iotag (%d)\n", | ||
8560 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
8561 | return; | ||
8562 | } | ||
8563 | if (unlikely(!cmdiocbq->iocb_cmpl)) { | ||
8564 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
8565 | "0375 FCP cmdiocb not callback function " | ||
8566 | "iotag: (%d)\n", | ||
8567 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); | ||
8568 | return; | ||
8569 | } | ||
8570 | |||
8571 | /* Fake the irspiocb and copy necessary response information */ | ||
8572 | lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe); | ||
8573 | |||
8574 | /* Pass the cmd_iocb and the rsp state to the upper layer */ | ||
8575 | (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); | ||
8576 | } | ||
8577 | |||
8578 | /** | ||
8579 | * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event | ||
8580 | * @phba: Pointer to HBA context object. | ||
8581 | * @cq: Pointer to completion queue. | ||
8582 | * @wcqe: Pointer to work-queue completion queue entry. | ||
8583 | * | ||
8584 | * This routine handles an fast-path WQ entry comsumed event by invoking the | ||
8585 | * proper WQ release routine to the slow-path WQ. | ||
8586 | **/ | ||
8587 | static void | ||
8588 | lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | ||
8589 | struct lpfc_wcqe_release *wcqe) | ||
8590 | { | ||
8591 | struct lpfc_queue *childwq; | ||
8592 | bool wqid_matched = false; | ||
8593 | uint16_t fcp_wqid; | ||
8594 | |||
8595 | /* Check for fast-path FCP work queue release */ | ||
8596 | fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); | ||
8597 | list_for_each_entry(childwq, &cq->child_list, list) { | ||
8598 | if (childwq->queue_id == fcp_wqid) { | ||
8599 | lpfc_sli4_wq_release(childwq, | ||
8600 | bf_get(lpfc_wcqe_r_wqe_index, wcqe)); | ||
8601 | wqid_matched = true; | ||
8602 | break; | ||
8603 | } | ||
8604 | } | ||
8605 | /* Report warning log message if no match found */ | ||
8606 | if (wqid_matched != true) | ||
8607 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
8608 | "2580 Fast-path wqe consume event carries " | ||
8609 | "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); | ||
8610 | } | ||
8611 | |||
8612 | /** | ||
8613 | * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry | ||
8614 | * @cq: Pointer to the completion queue. | ||
8615 | * @eqe: Pointer to fast-path completion queue entry. | ||
8616 | * | ||
8617 | * This routine process a fast-path work queue completion entry from fast-path | ||
8618 | * event queue for FCP command response completion. | ||
8619 | **/ | ||
8620 | static int | ||
8621 | lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, | ||
8622 | struct lpfc_cqe *cqe) | ||
8623 | { | ||
8624 | struct lpfc_wcqe_release wcqe; | ||
8625 | bool workposted = false; | ||
8626 | |||
8627 | /* Copy the work queue CQE and convert endian order if needed */ | ||
8628 | lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); | ||
8629 | |||
8630 | /* Check and process for different type of WCQE and dispatch */ | ||
8631 | switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { | ||
8632 | case CQE_CODE_COMPL_WQE: | ||
8633 | /* Process the WQ complete event */ | ||
8634 | lpfc_sli4_fp_handle_fcp_wcqe(phba, | ||
8635 | (struct lpfc_wcqe_complete *)&wcqe); | ||
8636 | break; | ||
8637 | case CQE_CODE_RELEASE_WQE: | ||
8638 | /* Process the WQ release event */ | ||
8639 | lpfc_sli4_fp_handle_rel_wcqe(phba, cq, | ||
8640 | (struct lpfc_wcqe_release *)&wcqe); | ||
8641 | break; | ||
8642 | case CQE_CODE_XRI_ABORTED: | ||
8643 | /* Process the WQ XRI abort event */ | ||
8644 | workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, | ||
8645 | (struct sli4_wcqe_xri_aborted *)&wcqe); | ||
8646 | break; | ||
8647 | default: | ||
8648 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8649 | "0144 Not a valid WCQE code: x%x\n", | ||
8650 | bf_get(lpfc_wcqe_c_code, &wcqe)); | ||
8651 | break; | ||
8652 | } | ||
8653 | return workposted; | ||
8654 | } | ||
8655 | |||
8656 | /** | ||
8657 | * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry | ||
8658 | * @phba: Pointer to HBA context object. | ||
8659 | * @eqe: Pointer to fast-path event queue entry. | ||
8660 | * | ||
8661 | * This routine process a event queue entry from the fast-path event queue. | ||
8662 | * It will check the MajorCode and MinorCode to determine this is for a | ||
8663 | * completion event on a completion queue, if not, an error shall be logged | ||
8664 | * and just return. Otherwise, it will get to the corresponding completion | ||
8665 | * queue and process all the entries on the completion queue, rearm the | ||
8666 | * completion queue, and then return. | ||
8667 | **/ | ||
8668 | static void | ||
8669 | lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, | ||
8670 | uint32_t fcp_cqidx) | ||
8671 | { | ||
8672 | struct lpfc_queue *cq; | ||
8673 | struct lpfc_cqe *cqe; | ||
8674 | bool workposted = false; | ||
8675 | uint16_t cqid; | ||
8676 | int ecount = 0; | ||
8677 | |||
8678 | if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) || | ||
8679 | unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) { | ||
8680 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8681 | "0366 Not a valid fast-path completion " | ||
8682 | "event: majorcode=x%x, minorcode=x%x\n", | ||
8683 | bf_get(lpfc_eqe_major_code, eqe), | ||
8684 | bf_get(lpfc_eqe_minor_code, eqe)); | ||
8685 | return; | ||
8686 | } | ||
8687 | |||
8688 | cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; | ||
8689 | if (unlikely(!cq)) { | ||
8690 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8691 | "0367 Fast-path completion queue does not " | ||
8692 | "exist\n"); | ||
8693 | return; | ||
8694 | } | ||
8695 | |||
8696 | /* Get the reference to the corresponding CQ */ | ||
8697 | cqid = bf_get(lpfc_eqe_resource_id, eqe); | ||
8698 | if (unlikely(cqid != cq->queue_id)) { | ||
8699 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8700 | "0368 Miss-matched fast-path completion " | ||
8701 | "queue identifier: eqcqid=%d, fcpcqid=%d\n", | ||
8702 | cqid, cq->queue_id); | ||
8703 | return; | ||
8704 | } | ||
8705 | |||
8706 | /* Process all the entries to the CQ */ | ||
8707 | while ((cqe = lpfc_sli4_cq_get(cq))) { | ||
8708 | workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); | ||
8709 | if (!(++ecount % LPFC_GET_QE_REL_INT)) | ||
8710 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); | ||
8711 | } | ||
8712 | |||
8713 | /* Catch the no cq entry condition */ | ||
8714 | if (unlikely(ecount == 0)) | ||
8715 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
8716 | "0369 No entry from fast-path completion " | ||
8717 | "queue fcpcqid=%d\n", cq->queue_id); | ||
8718 | |||
8719 | /* In any case, flash and re-arm the CQ */ | ||
8720 | lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); | ||
8721 | |||
8722 | /* wake up worker thread if there are works to be done */ | ||
8723 | if (workposted) | ||
8724 | lpfc_worker_wake_up(phba); | ||
8725 | } | ||
8726 | |||
8727 | static void | ||
8728 | lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) | ||
8729 | { | ||
8730 | struct lpfc_eqe *eqe; | ||
8731 | |||
8732 | /* walk all the EQ entries and drop on the floor */ | ||
8733 | while ((eqe = lpfc_sli4_eq_get(eq))) | ||
8734 | ; | ||
8735 | |||
8736 | /* Clear and re-arm the EQ */ | ||
8737 | lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); | ||
8738 | } | ||
8739 | |||
8740 | /** | ||
8741 | * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device | ||
8742 | * @irq: Interrupt number. | ||
8743 | * @dev_id: The device context pointer. | ||
8744 | * | ||
8745 | * This function is directly called from the PCI layer as an interrupt | ||
8746 | * service routine when device with SLI-4 interface spec is enabled with | ||
8747 | * MSI-X multi-message interrupt mode and there are slow-path events in | ||
8748 | * the HBA. However, when the device is enabled with either MSI or Pin-IRQ | ||
8749 | * interrupt mode, this function is called as part of the device-level | ||
8750 | * interrupt handler. When the PCI slot is in error recovery or the HBA is | ||
8751 | * undergoing initialization, the interrupt handler will not process the | ||
8752 | * interrupt. The link attention and ELS ring attention events are handled | ||
8753 | * by the worker thread. The interrupt handler signals the worker thread | ||
8754 | * and returns for these events. This function is called without any lock | ||
8755 | * held. It gets the hbalock to access and update SLI data structures. | ||
8756 | * | ||
8757 | * This function returns IRQ_HANDLED when interrupt is handled else it | ||
8758 | * returns IRQ_NONE. | ||
8759 | **/ | ||
8760 | irqreturn_t | ||
8761 | lpfc_sli4_sp_intr_handler(int irq, void *dev_id) | ||
8762 | { | ||
8763 | struct lpfc_hba *phba; | ||
8764 | struct lpfc_queue *speq; | ||
8765 | struct lpfc_eqe *eqe; | ||
8766 | unsigned long iflag; | ||
8767 | int ecount = 0; | ||
8768 | |||
8769 | /* | ||
8770 | * Get the driver's phba structure from the dev_id | ||
8771 | */ | ||
8772 | phba = (struct lpfc_hba *)dev_id; | ||
8773 | |||
8774 | if (unlikely(!phba)) | ||
8775 | return IRQ_NONE; | ||
8776 | |||
8777 | /* Get to the EQ struct associated with this vector */ | ||
8778 | speq = phba->sli4_hba.sp_eq; | ||
8779 | |||
8780 | /* Check device state for handling interrupt */ | ||
8781 | if (unlikely(lpfc_intr_state_check(phba))) { | ||
8782 | /* Check again for link_state with lock held */ | ||
8783 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
8784 | if (phba->link_state < LPFC_LINK_DOWN) | ||
8785 | /* Flush, clear interrupt, and rearm the EQ */ | ||
8786 | lpfc_sli4_eq_flush(phba, speq); | ||
8787 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
8788 | return IRQ_NONE; | ||
8789 | } | ||
8790 | |||
8791 | /* | ||
8792 | * Process all the event on FCP slow-path EQ | ||
8793 | */ | ||
8794 | while ((eqe = lpfc_sli4_eq_get(speq))) { | ||
8795 | lpfc_sli4_sp_handle_eqe(phba, eqe); | ||
8796 | if (!(++ecount % LPFC_GET_QE_REL_INT)) | ||
8797 | lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); | ||
8798 | } | ||
8799 | |||
8800 | /* Always clear and re-arm the slow-path EQ */ | ||
8801 | lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); | ||
8802 | |||
8803 | /* Catch the no cq entry condition */ | ||
8804 | if (unlikely(ecount == 0)) { | ||
8805 | if (phba->intr_type == MSIX) | ||
8806 | /* MSI-X treated interrupt served as no EQ share INT */ | ||
8807 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
8808 | "0357 MSI-X interrupt with no EQE\n"); | ||
8809 | else | ||
8810 | /* Non MSI-X treated on interrupt as EQ share INT */ | ||
8811 | return IRQ_NONE; | ||
8812 | } | ||
8813 | |||
8814 | return IRQ_HANDLED; | ||
8815 | } /* lpfc_sli4_sp_intr_handler */ | ||
8816 | |||
8817 | /** | ||
8818 | * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device | ||
8819 | * @irq: Interrupt number. | ||
8820 | * @dev_id: The device context pointer. | ||
8821 | * | ||
8822 | * This function is directly called from the PCI layer as an interrupt | ||
8823 | * service routine when device with SLI-4 interface spec is enabled with | ||
8824 | * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB | ||
8825 | * ring event in the HBA. However, when the device is enabled with either | ||
8826 | * MSI or Pin-IRQ interrupt mode, this function is called as part of the | ||
8827 | * device-level interrupt handler. When the PCI slot is in error recovery | ||
8828 | * or the HBA is undergoing initialization, the interrupt handler will not | ||
8829 | * process the interrupt. The SCSI FCP fast-path ring event are handled in | ||
8830 | * the intrrupt context. This function is called without any lock held. | ||
8831 | * It gets the hbalock to access and update SLI data structures. Note that, | ||
8832 | * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is | ||
8833 | * equal to that of FCP CQ index. | ||
8834 | * | ||
8835 | * This function returns IRQ_HANDLED when interrupt is handled else it | ||
8836 | * returns IRQ_NONE. | ||
8837 | **/ | ||
8838 | irqreturn_t | ||
8839 | lpfc_sli4_fp_intr_handler(int irq, void *dev_id) | ||
8840 | { | ||
8841 | struct lpfc_hba *phba; | ||
8842 | struct lpfc_fcp_eq_hdl *fcp_eq_hdl; | ||
8843 | struct lpfc_queue *fpeq; | ||
8844 | struct lpfc_eqe *eqe; | ||
8845 | unsigned long iflag; | ||
8846 | int ecount = 0; | ||
8847 | uint32_t fcp_eqidx; | ||
8848 | |||
8849 | /* Get the driver's phba structure from the dev_id */ | ||
8850 | fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; | ||
8851 | phba = fcp_eq_hdl->phba; | ||
8852 | fcp_eqidx = fcp_eq_hdl->idx; | ||
8853 | |||
8854 | if (unlikely(!phba)) | ||
8855 | return IRQ_NONE; | ||
8856 | |||
8857 | /* Get to the EQ struct associated with this vector */ | ||
8858 | fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; | ||
8859 | |||
8860 | /* Check device state for handling interrupt */ | ||
8861 | if (unlikely(lpfc_intr_state_check(phba))) { | ||
8862 | /* Check again for link_state with lock held */ | ||
8863 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
8864 | if (phba->link_state < LPFC_LINK_DOWN) | ||
8865 | /* Flush, clear interrupt, and rearm the EQ */ | ||
8866 | lpfc_sli4_eq_flush(phba, fpeq); | ||
8867 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
8868 | return IRQ_NONE; | ||
8869 | } | ||
8870 | |||
8871 | /* | ||
8872 | * Process all the event on FCP fast-path EQ | ||
8873 | */ | ||
8874 | while ((eqe = lpfc_sli4_eq_get(fpeq))) { | ||
8875 | lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); | ||
8876 | if (!(++ecount % LPFC_GET_QE_REL_INT)) | ||
8877 | lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); | ||
8878 | } | ||
8879 | |||
8880 | /* Always clear and re-arm the fast-path EQ */ | ||
8881 | lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); | ||
8882 | |||
8883 | if (unlikely(ecount == 0)) { | ||
8884 | if (phba->intr_type == MSIX) | ||
8885 | /* MSI-X treated interrupt served as no EQ share INT */ | ||
8886 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
8887 | "0358 MSI-X interrupt with no EQE\n"); | ||
8888 | else | ||
8889 | /* Non MSI-X treated on interrupt as EQ share INT */ | ||
8890 | return IRQ_NONE; | ||
8891 | } | ||
8892 | |||
8893 | return IRQ_HANDLED; | ||
8894 | } /* lpfc_sli4_fp_intr_handler */ | ||
8895 | |||
8896 | /** | ||
8897 | * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device | ||
8898 | * @irq: Interrupt number. | ||
8899 | * @dev_id: The device context pointer. | ||
8900 | * | ||
8901 | * This function is the device-level interrupt handler to device with SLI-4 | ||
8902 | * interface spec, called from the PCI layer when either MSI or Pin-IRQ | ||
8903 | * interrupt mode is enabled and there is an event in the HBA which requires | ||
8904 | * driver attention. This function invokes the slow-path interrupt attention | ||
8905 | * handling function and fast-path interrupt attention handling function in | ||
8906 | * turn to process the relevant HBA attention events. This function is called | ||
8907 | * without any lock held. It gets the hbalock to access and update SLI data | ||
8908 | * structures. | ||
8909 | * | ||
8910 | * This function returns IRQ_HANDLED when interrupt is handled, else it | ||
8911 | * returns IRQ_NONE. | ||
8912 | **/ | ||
8913 | irqreturn_t | ||
8914 | lpfc_sli4_intr_handler(int irq, void *dev_id) | ||
8915 | { | ||
8916 | struct lpfc_hba *phba; | ||
8917 | irqreturn_t sp_irq_rc, fp_irq_rc; | ||
8918 | bool fp_handled = false; | ||
8919 | uint32_t fcp_eqidx; | ||
8920 | |||
8921 | /* Get the driver's phba structure from the dev_id */ | ||
8922 | phba = (struct lpfc_hba *)dev_id; | ||
8923 | |||
8924 | if (unlikely(!phba)) | ||
8925 | return IRQ_NONE; | ||
8926 | |||
8927 | /* | ||
8928 | * Invokes slow-path host attention interrupt handling as appropriate. | ||
8929 | */ | ||
8930 | sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); | ||
8931 | |||
8932 | /* | ||
8933 | * Invoke fast-path host attention interrupt handling as appropriate. | ||
8934 | */ | ||
8935 | for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { | ||
8936 | fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, | ||
8937 | &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); | ||
8938 | if (fp_irq_rc == IRQ_HANDLED) | ||
8939 | fp_handled |= true; | ||
8940 | } | ||
8941 | |||
8942 | return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; | ||
8943 | } /* lpfc_sli4_intr_handler */ | ||
8944 | |||
8945 | /** | ||
8946 | * lpfc_sli4_queue_free - free a queue structure and associated memory | ||
8947 | * @queue: The queue structure to free. | ||
8948 | * | ||
8949 | * This function frees a queue structure and the DMAable memeory used for | ||
8950 | * the host resident queue. This function must be called after destroying the | ||
8951 | * queue on the HBA. | ||
8952 | **/ | ||
8953 | void | ||
8954 | lpfc_sli4_queue_free(struct lpfc_queue *queue) | ||
8955 | { | ||
8956 | struct lpfc_dmabuf *dmabuf; | ||
8957 | |||
8958 | if (!queue) | ||
8959 | return; | ||
8960 | |||
8961 | while (!list_empty(&queue->page_list)) { | ||
8962 | list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, | ||
8963 | list); | ||
8964 | dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE, | ||
8965 | dmabuf->virt, dmabuf->phys); | ||
8966 | kfree(dmabuf); | ||
8967 | } | ||
8968 | kfree(queue); | ||
8969 | return; | ||
8970 | } | ||
8971 | |||
8972 | /** | ||
8973 | * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure | ||
8974 | * @phba: The HBA that this queue is being created on. | ||
8975 | * @entry_size: The size of each queue entry for this queue. | ||
8976 | * @entry count: The number of entries that this queue will handle. | ||
8977 | * | ||
8978 | * This function allocates a queue structure and the DMAable memory used for | ||
8979 | * the host resident queue. This function must be called before creating the | ||
8980 | * queue on the HBA. | ||
8981 | **/ | ||
8982 | struct lpfc_queue * | ||
8983 | lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, | ||
8984 | uint32_t entry_count) | ||
8985 | { | ||
8986 | struct lpfc_queue *queue; | ||
8987 | struct lpfc_dmabuf *dmabuf; | ||
8988 | int x, total_qe_count; | ||
8989 | void *dma_pointer; | ||
8990 | |||
8991 | |||
8992 | queue = kzalloc(sizeof(struct lpfc_queue) + | ||
8993 | (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); | ||
8994 | if (!queue) | ||
8995 | return NULL; | ||
8996 | queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE; | ||
8997 | INIT_LIST_HEAD(&queue->list); | ||
8998 | INIT_LIST_HEAD(&queue->page_list); | ||
8999 | INIT_LIST_HEAD(&queue->child_list); | ||
9000 | for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { | ||
9001 | dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
9002 | if (!dmabuf) | ||
9003 | goto out_fail; | ||
9004 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, | ||
9005 | PAGE_SIZE, &dmabuf->phys, | ||
9006 | GFP_KERNEL); | ||
9007 | if (!dmabuf->virt) { | ||
9008 | kfree(dmabuf); | ||
9009 | goto out_fail; | ||
9010 | } | ||
9011 | dmabuf->buffer_tag = x; | ||
9012 | list_add_tail(&dmabuf->list, &queue->page_list); | ||
9013 | /* initialize queue's entry array */ | ||
9014 | dma_pointer = dmabuf->virt; | ||
9015 | for (; total_qe_count < entry_count && | ||
9016 | dma_pointer < (PAGE_SIZE + dmabuf->virt); | ||
9017 | total_qe_count++, dma_pointer += entry_size) { | ||
9018 | queue->qe[total_qe_count].address = dma_pointer; | ||
9019 | } | ||
9020 | } | ||
9021 | queue->entry_size = entry_size; | ||
9022 | queue->entry_count = entry_count; | ||
9023 | queue->phba = phba; | ||
9024 | |||
9025 | return queue; | ||
9026 | out_fail: | ||
9027 | lpfc_sli4_queue_free(queue); | ||
9028 | return NULL; | ||
9029 | } | ||
9030 | |||
9031 | /** | ||
9032 | * lpfc_eq_create - Create an Event Queue on the HBA | ||
9033 | * @phba: HBA structure that indicates port to create a queue on. | ||
9034 | * @eq: The queue structure to use to create the event queue. | ||
9035 | * @imax: The maximum interrupt per second limit. | ||
9036 | * | ||
9037 | * This function creates an event queue, as detailed in @eq, on a port, | ||
9038 | * described by @phba by sending an EQ_CREATE mailbox command to the HBA. | ||
9039 | * | ||
9040 | * The @phba struct is used to send mailbox command to HBA. The @eq struct | ||
9041 | * is used to get the entry count and entry size that are necessary to | ||
9042 | * determine the number of pages to allocate and use for this queue. This | ||
9043 | * function will send the EQ_CREATE mailbox command to the HBA to setup the | ||
9044 | * event queue. This function is asynchronous and will wait for the mailbox | ||
9045 | * command to finish before continuing. | ||
9046 | * | ||
9047 | * On success this function will return a zero. If unable to allocate enough | ||
9048 | * memory this function will return ENOMEM. If the queue create mailbox command | ||
9049 | * fails this function will return ENXIO. | ||
9050 | **/ | ||
9051 | uint32_t | ||
9052 | lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) | ||
9053 | { | ||
9054 | struct lpfc_mbx_eq_create *eq_create; | ||
9055 | LPFC_MBOXQ_t *mbox; | ||
9056 | int rc, length, status = 0; | ||
9057 | struct lpfc_dmabuf *dmabuf; | ||
9058 | uint32_t shdr_status, shdr_add_status; | ||
9059 | union lpfc_sli4_cfg_shdr *shdr; | ||
9060 | uint16_t dmult; | ||
9061 | |||
9062 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
9063 | if (!mbox) | ||
9064 | return -ENOMEM; | ||
9065 | length = (sizeof(struct lpfc_mbx_eq_create) - | ||
9066 | sizeof(struct lpfc_sli4_cfg_mhdr)); | ||
9067 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, | ||
9068 | LPFC_MBOX_OPCODE_EQ_CREATE, | ||
9069 | length, LPFC_SLI4_MBX_EMBED); | ||
9070 | eq_create = &mbox->u.mqe.un.eq_create; | ||
9071 | bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, | ||
9072 | eq->page_count); | ||
9073 | bf_set(lpfc_eq_context_size, &eq_create->u.request.context, | ||
9074 | LPFC_EQE_SIZE); | ||
9075 | bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); | ||
9076 | /* Calculate delay multiper from maximum interrupt per second */ | ||
9077 | dmult = LPFC_DMULT_CONST/imax - 1; | ||
9078 | bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, | ||
9079 | dmult); | ||
9080 | switch (eq->entry_count) { | ||
9081 | default: | ||
9082 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
9083 | "0360 Unsupported EQ count. (%d)\n", | ||
9084 | eq->entry_count); | ||
9085 | if (eq->entry_count < 256) | ||
9086 | return -EINVAL; | ||
9087 | /* otherwise default to smallest count (drop through) */ | ||
9088 | case 256: | ||
9089 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, | ||
9090 | LPFC_EQ_CNT_256); | ||
9091 | break; | ||
9092 | case 512: | ||
9093 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, | ||
9094 | LPFC_EQ_CNT_512); | ||
9095 | break; | ||
9096 | case 1024: | ||
9097 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, | ||
9098 | LPFC_EQ_CNT_1024); | ||
9099 | break; | ||
9100 | case 2048: | ||
9101 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, | ||
9102 | LPFC_EQ_CNT_2048); | ||
9103 | break; | ||
9104 | case 4096: | ||
9105 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, | ||
9106 | LPFC_EQ_CNT_4096); | ||
9107 | break; | ||
9108 | } | ||
9109 | list_for_each_entry(dmabuf, &eq->page_list, list) { | ||
9110 | eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | ||
9111 | putPaddrLow(dmabuf->phys); | ||
9112 | eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = | ||
9113 | putPaddrHigh(dmabuf->phys); | ||
9114 | } | ||
9115 | mbox->vport = phba->pport; | ||
9116 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
9117 | mbox->context1 = NULL; | ||
9118 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | ||
9119 | shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; | ||
9120 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
9121 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
9122 | if (shdr_status || shdr_add_status || rc) { | ||
9123 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9124 | "2500 EQ_CREATE mailbox failed with " | ||
9125 | "status x%x add_status x%x, mbx status x%x\n", | ||
9126 | shdr_status, shdr_add_status, rc); | ||
9127 | status = -ENXIO; | ||
9128 | } | ||
9129 | eq->type = LPFC_EQ; | ||
9130 | eq->subtype = LPFC_NONE; | ||
9131 | eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); | ||
9132 | if (eq->queue_id == 0xFFFF) | ||
9133 | status = -ENXIO; | ||
9134 | eq->host_index = 0; | ||
9135 | eq->hba_index = 0; | ||
9136 | |||
9137 | if (rc != MBX_TIMEOUT) | ||
9138 | mempool_free(mbox, phba->mbox_mem_pool); | ||
9139 | return status; | ||
9140 | } | ||
9141 | |||
9142 | /** | ||
9143 | * lpfc_cq_create - Create a Completion Queue on the HBA | ||
9144 | * @phba: HBA structure that indicates port to create a queue on. | ||
9145 | * @cq: The queue structure to use to create the completion queue. | ||
9146 | * @eq: The event queue to bind this completion queue to. | ||
9147 | * | ||
9148 | * This function creates a completion queue, as detailed in @wq, on a port, | ||
9149 | * described by @phba by sending a CQ_CREATE mailbox command to the HBA. | ||
9150 | * | ||
9151 | * The @phba struct is used to send mailbox command to HBA. The @cq struct | ||
9152 | * is used to get the entry count and entry size that are necessary to | ||
9153 | * determine the number of pages to allocate and use for this queue. The @eq | ||
9154 | * is used to indicate which event queue to bind this completion queue to. This | ||
9155 | * function will send the CQ_CREATE mailbox command to the HBA to setup the | ||
9156 | * completion queue. This function is asynchronous and will wait for the mailbox | ||
9157 | * command to finish before continuing. | ||
9158 | * | ||
9159 | * On success this function will return a zero. If unable to allocate enough | ||
9160 | * memory this function will return ENOMEM. If the queue create mailbox command | ||
9161 | * fails this function will return ENXIO. | ||
9162 | **/ | ||
9163 | uint32_t | ||
9164 | lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, | ||
9165 | struct lpfc_queue *eq, uint32_t type, uint32_t subtype) | ||
9166 | { | ||
9167 | struct lpfc_mbx_cq_create *cq_create; | ||
9168 | struct lpfc_dmabuf *dmabuf; | ||
9169 | LPFC_MBOXQ_t *mbox; | ||
9170 | int rc, length, status = 0; | ||
9171 | uint32_t shdr_status, shdr_add_status; | ||
9172 | union lpfc_sli4_cfg_shdr *shdr; | ||
9173 | |||
9174 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
9175 | if (!mbox) | ||
9176 | return -ENOMEM; | ||
9177 | length = (sizeof(struct lpfc_mbx_cq_create) - | ||
9178 | sizeof(struct lpfc_sli4_cfg_mhdr)); | ||
9179 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, | ||
9180 | LPFC_MBOX_OPCODE_CQ_CREATE, | ||
9181 | length, LPFC_SLI4_MBX_EMBED); | ||
9182 | cq_create = &mbox->u.mqe.un.cq_create; | ||
9183 | bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, | ||
9184 | cq->page_count); | ||
9185 | bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); | ||
9186 | bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); | ||
9187 | bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); | ||
9188 | switch (cq->entry_count) { | ||
9189 | default: | ||
9190 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
9191 | "0361 Unsupported CQ count. (%d)\n", | ||
9192 | cq->entry_count); | ||
9193 | if (cq->entry_count < 256) | ||
9194 | return -EINVAL; | ||
9195 | /* otherwise default to smallest count (drop through) */ | ||
9196 | case 256: | ||
9197 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, | ||
9198 | LPFC_CQ_CNT_256); | ||
9199 | break; | ||
9200 | case 512: | ||
9201 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, | ||
9202 | LPFC_CQ_CNT_512); | ||
9203 | break; | ||
9204 | case 1024: | ||
9205 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, | ||
9206 | LPFC_CQ_CNT_1024); | ||
9207 | break; | ||
9208 | } | ||
9209 | list_for_each_entry(dmabuf, &cq->page_list, list) { | ||
9210 | cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | ||
9211 | putPaddrLow(dmabuf->phys); | ||
9212 | cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = | ||
9213 | putPaddrHigh(dmabuf->phys); | ||
9214 | } | ||
9215 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | ||
9216 | |||
9217 | /* The IOCTL status is embedded in the mailbox subheader. */ | ||
9218 | shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; | ||
9219 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
9220 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
9221 | if (shdr_status || shdr_add_status || rc) { | ||
9222 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9223 | "2501 CQ_CREATE mailbox failed with " | ||
9224 | "status x%x add_status x%x, mbx status x%x\n", | ||
9225 | shdr_status, shdr_add_status, rc); | ||
9226 | status = -ENXIO; | ||
9227 | goto out; | ||
9228 | } | ||
9229 | cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); | ||
9230 | if (cq->queue_id == 0xFFFF) { | ||
9231 | status = -ENXIO; | ||
9232 | goto out; | ||
9233 | } | ||
9234 | /* link the cq onto the parent eq child list */ | ||
9235 | list_add_tail(&cq->list, &eq->child_list); | ||
9236 | /* Set up completion queue's type and subtype */ | ||
9237 | cq->type = type; | ||
9238 | cq->subtype = subtype; | ||
9239 | cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); | ||
9240 | cq->host_index = 0; | ||
9241 | cq->hba_index = 0; | ||
9242 | out: | ||
9243 | |||
9244 | if (rc != MBX_TIMEOUT) | ||
9245 | mempool_free(mbox, phba->mbox_mem_pool); | ||
9246 | return status; | ||
9247 | } | ||
9248 | |||
9249 | /** | ||
9250 | * lpfc_wq_create - Create a Work Queue on the HBA | ||
9251 | * @phba: HBA structure that indicates port to create a queue on. | ||
9252 | * @wq: The queue structure to use to create the work queue. | ||
9253 | * @cq: The completion queue to bind this work queue to. | ||
9254 | * @subtype: The subtype of the work queue indicating its functionality. | ||
9255 | * | ||
9256 | * This function creates a work queue, as detailed in @wq, on a port, described | ||
9257 | * by @phba by sending a WQ_CREATE mailbox command to the HBA. | ||
9258 | * | ||
9259 | * The @phba struct is used to send mailbox command to HBA. The @wq struct | ||
9260 | * is used to get the entry count and entry size that are necessary to | ||
9261 | * determine the number of pages to allocate and use for this queue. The @cq | ||
9262 | * is used to indicate which completion queue to bind this work queue to. This | ||
9263 | * function will send the WQ_CREATE mailbox command to the HBA to setup the | ||
9264 | * work queue. This function is asynchronous and will wait for the mailbox | ||
9265 | * command to finish before continuing. | ||
9266 | * | ||
9267 | * On success this function will return a zero. If unable to allocate enough | ||
9268 | * memory this function will return ENOMEM. If the queue create mailbox command | ||
9269 | * fails this function will return ENXIO. | ||
9270 | **/ | ||
9271 | uint32_t | ||
9272 | lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, | ||
9273 | struct lpfc_queue *cq, uint32_t subtype) | ||
9274 | { | ||
9275 | struct lpfc_mbx_wq_create *wq_create; | ||
9276 | struct lpfc_dmabuf *dmabuf; | ||
9277 | LPFC_MBOXQ_t *mbox; | ||
9278 | int rc, length, status = 0; | ||
9279 | uint32_t shdr_status, shdr_add_status; | ||
9280 | union lpfc_sli4_cfg_shdr *shdr; | ||
9281 | |||
9282 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
9283 | if (!mbox) | ||
9284 | return -ENOMEM; | ||
9285 | length = (sizeof(struct lpfc_mbx_wq_create) - | ||
9286 | sizeof(struct lpfc_sli4_cfg_mhdr)); | ||
9287 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
9288 | LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, | ||
9289 | length, LPFC_SLI4_MBX_EMBED); | ||
9290 | wq_create = &mbox->u.mqe.un.wq_create; | ||
9291 | bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, | ||
9292 | wq->page_count); | ||
9293 | bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, | ||
9294 | cq->queue_id); | ||
9295 | list_for_each_entry(dmabuf, &wq->page_list, list) { | ||
9296 | wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | ||
9297 | putPaddrLow(dmabuf->phys); | ||
9298 | wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = | ||
9299 | putPaddrHigh(dmabuf->phys); | ||
9300 | } | ||
9301 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | ||
9302 | /* The IOCTL status is embedded in the mailbox subheader. */ | ||
9303 | shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; | ||
9304 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
9305 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
9306 | if (shdr_status || shdr_add_status || rc) { | ||
9307 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9308 | "2503 WQ_CREATE mailbox failed with " | ||
9309 | "status x%x add_status x%x, mbx status x%x\n", | ||
9310 | shdr_status, shdr_add_status, rc); | ||
9311 | status = -ENXIO; | ||
9312 | goto out; | ||
9313 | } | ||
9314 | wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); | ||
9315 | if (wq->queue_id == 0xFFFF) { | ||
9316 | status = -ENXIO; | ||
9317 | goto out; | ||
9318 | } | ||
9319 | wq->type = LPFC_WQ; | ||
9320 | wq->subtype = subtype; | ||
9321 | wq->host_index = 0; | ||
9322 | wq->hba_index = 0; | ||
9323 | |||
9324 | /* link the wq onto the parent cq child list */ | ||
9325 | list_add_tail(&wq->list, &cq->child_list); | ||
9326 | out: | ||
9327 | if (rc == MBX_TIMEOUT) | ||
9328 | mempool_free(mbox, phba->mbox_mem_pool); | ||
9329 | return status; | ||
9330 | } | ||
9331 | |||
9332 | /** | ||
9333 | * lpfc_rq_create - Create a Receive Queue on the HBA | ||
9334 | * @phba: HBA structure that indicates port to create a queue on. | ||
9335 | * @hrq: The queue structure to use to create the header receive queue. | ||
9336 | * @drq: The queue structure to use to create the data receive queue. | ||
9337 | * @cq: The completion queue to bind this work queue to. | ||
9338 | * | ||
9339 | * This function creates a receive buffer queue pair , as detailed in @hrq and | ||
9340 | * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command | ||
9341 | * to the HBA. | ||
9342 | * | ||
9343 | * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq | ||
9344 | * struct is used to get the entry count that is necessary to determine the | ||
9345 | * number of pages to use for this queue. The @cq is used to indicate which | ||
9346 | * completion queue to bind received buffers that are posted to these queues to. | ||
9347 | * This function will send the RQ_CREATE mailbox command to the HBA to setup the | ||
9348 | * receive queue pair. This function is asynchronous and will wait for the | ||
9349 | * mailbox command to finish before continuing. | ||
9350 | * | ||
9351 | * On success this function will return a zero. If unable to allocate enough | ||
9352 | * memory this function will return ENOMEM. If the queue create mailbox command | ||
9353 | * fails this function will return ENXIO. | ||
9354 | **/ | ||
9355 | uint32_t | ||
9356 | lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, | ||
9357 | struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) | ||
9358 | { | ||
9359 | struct lpfc_mbx_rq_create *rq_create; | ||
9360 | struct lpfc_dmabuf *dmabuf; | ||
9361 | LPFC_MBOXQ_t *mbox; | ||
9362 | int rc, length, status = 0; | ||
9363 | uint32_t shdr_status, shdr_add_status; | ||
9364 | union lpfc_sli4_cfg_shdr *shdr; | ||
9365 | |||
9366 | if (hrq->entry_count != drq->entry_count) | ||
9367 | return -EINVAL; | ||
9368 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
9369 | if (!mbox) | ||
9370 | return -ENOMEM; | ||
9371 | length = (sizeof(struct lpfc_mbx_rq_create) - | ||
9372 | sizeof(struct lpfc_sli4_cfg_mhdr)); | ||
9373 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
9374 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, | ||
9375 | length, LPFC_SLI4_MBX_EMBED); | ||
9376 | rq_create = &mbox->u.mqe.un.rq_create; | ||
9377 | switch (hrq->entry_count) { | ||
9378 | default: | ||
9379 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
9380 | "2535 Unsupported RQ count. (%d)\n", | ||
9381 | hrq->entry_count); | ||
9382 | if (hrq->entry_count < 512) | ||
9383 | return -EINVAL; | ||
9384 | /* otherwise default to smallest count (drop through) */ | ||
9385 | case 512: | ||
9386 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | ||
9387 | LPFC_RQ_RING_SIZE_512); | ||
9388 | break; | ||
9389 | case 1024: | ||
9390 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | ||
9391 | LPFC_RQ_RING_SIZE_1024); | ||
9392 | break; | ||
9393 | case 2048: | ||
9394 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | ||
9395 | LPFC_RQ_RING_SIZE_2048); | ||
9396 | break; | ||
9397 | case 4096: | ||
9398 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | ||
9399 | LPFC_RQ_RING_SIZE_4096); | ||
9400 | break; | ||
9401 | } | ||
9402 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, | ||
9403 | cq->queue_id); | ||
9404 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, | ||
9405 | hrq->page_count); | ||
9406 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
9407 | LPFC_HDR_BUF_SIZE); | ||
9408 | list_for_each_entry(dmabuf, &hrq->page_list, list) { | ||
9409 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | ||
9410 | putPaddrLow(dmabuf->phys); | ||
9411 | rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = | ||
9412 | putPaddrHigh(dmabuf->phys); | ||
9413 | } | ||
9414 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | ||
9415 | /* The IOCTL status is embedded in the mailbox subheader. */ | ||
9416 | shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; | ||
9417 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
9418 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
9419 | if (shdr_status || shdr_add_status || rc) { | ||
9420 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9421 | "2504 RQ_CREATE mailbox failed with " | ||
9422 | "status x%x add_status x%x, mbx status x%x\n", | ||
9423 | shdr_status, shdr_add_status, rc); | ||
9424 | status = -ENXIO; | ||
9425 | goto out; | ||
9426 | } | ||
9427 | hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); | ||
9428 | if (hrq->queue_id == 0xFFFF) { | ||
9429 | status = -ENXIO; | ||
9430 | goto out; | ||
9431 | } | ||
9432 | hrq->type = LPFC_HRQ; | ||
9433 | hrq->subtype = subtype; | ||
9434 | hrq->host_index = 0; | ||
9435 | hrq->hba_index = 0; | ||
9436 | |||
9437 | /* now create the data queue */ | ||
9438 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
9439 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, | ||
9440 | length, LPFC_SLI4_MBX_EMBED); | ||
9441 | switch (drq->entry_count) { | ||
9442 | default: | ||
9443 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
9444 | "2536 Unsupported RQ count. (%d)\n", | ||
9445 | drq->entry_count); | ||
9446 | if (drq->entry_count < 512) | ||
9447 | return -EINVAL; | ||
9448 | /* otherwise default to smallest count (drop through) */ | ||
9449 | case 512: | ||
9450 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | ||
9451 | LPFC_RQ_RING_SIZE_512); | ||
9452 | break; | ||
9453 | case 1024: | ||
9454 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | ||
9455 | LPFC_RQ_RING_SIZE_1024); | ||
9456 | break; | ||
9457 | case 2048: | ||
9458 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | ||
9459 | LPFC_RQ_RING_SIZE_2048); | ||
9460 | break; | ||
9461 | case 4096: | ||
9462 | bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, | ||
9463 | LPFC_RQ_RING_SIZE_4096); | ||
9464 | break; | ||
9465 | } | ||
9466 | bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, | ||
9467 | cq->queue_id); | ||
9468 | bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, | ||
9469 | drq->page_count); | ||
9470 | bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, | ||
9471 | LPFC_DATA_BUF_SIZE); | ||
9472 | list_for_each_entry(dmabuf, &drq->page_list, list) { | ||
9473 | rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = | ||
9474 | putPaddrLow(dmabuf->phys); | ||
9475 | rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = | ||
9476 | putPaddrHigh(dmabuf->phys); | ||
9477 | } | ||
9478 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | ||
9479 | /* The IOCTL status is embedded in the mailbox subheader. */ | ||
9480 | shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; | ||
9481 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
9482 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
9483 | if (shdr_status || shdr_add_status || rc) { | ||
9484 | status = -ENXIO; | ||
9485 | goto out; | ||
9486 | } | ||
9487 | drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); | ||
9488 | if (drq->queue_id == 0xFFFF) { | ||
9489 | status = -ENXIO; | ||
9490 | goto out; | ||
9491 | } | ||
9492 | drq->type = LPFC_DRQ; | ||
9493 | drq->subtype = subtype; | ||
9494 | drq->host_index = 0; | ||
9495 | drq->hba_index = 0; | ||
9496 | |||
9497 | /* link the header and data RQs onto the parent cq child list */ | ||
9498 | list_add_tail(&hrq->list, &cq->child_list); | ||
9499 | list_add_tail(&drq->list, &cq->child_list); | ||
9500 | |||
9501 | out: | ||
9502 | if (rc != MBX_TIMEOUT) | ||
9503 | mempool_free(mbox, phba->mbox_mem_pool); | ||
9504 | return status; | ||
9505 | } | ||
9506 | |||
9507 | /** | ||
9508 | * lpfc_eq_destroy - Destroy an event Queue on the HBA | ||
9509 | * @eq: The queue structure associated with the queue to destroy. | ||
9510 | * | ||
9511 | * This function destroys a queue, as detailed in @eq by sending an mailbox | ||
9512 | * command, specific to the type of queue, to the HBA. | ||
9513 | * | ||
9514 | * The @eq struct is used to get the queue ID of the queue to destroy. | ||
9515 | * | ||
9516 | * On success this function will return a zero. If the queue destroy mailbox | ||
9517 | * command fails this function will return ENXIO. | ||
9518 | **/ | ||
9519 | uint32_t | ||
9520 | lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) | ||
9521 | { | ||
9522 | LPFC_MBOXQ_t *mbox; | ||
9523 | int rc, length, status = 0; | ||
9524 | uint32_t shdr_status, shdr_add_status; | ||
9525 | union lpfc_sli4_cfg_shdr *shdr; | ||
9526 | |||
9527 | if (!eq) | ||
9528 | return -ENODEV; | ||
9529 | mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); | ||
9530 | if (!mbox) | ||
9531 | return -ENOMEM; | ||
9532 | length = (sizeof(struct lpfc_mbx_eq_destroy) - | ||
9533 | sizeof(struct lpfc_sli4_cfg_mhdr)); | ||
9534 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, | ||
9535 | LPFC_MBOX_OPCODE_EQ_DESTROY, | ||
9536 | length, LPFC_SLI4_MBX_EMBED); | ||
9537 | bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, | ||
9538 | eq->queue_id); | ||
9539 | mbox->vport = eq->phba->pport; | ||
9540 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
9541 | |||
9542 | rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); | ||
9543 | /* The IOCTL status is embedded in the mailbox subheader. */ | ||
9544 | shdr = (union lpfc_sli4_cfg_shdr *) | ||
9545 | &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; | ||
9546 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
9547 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
9548 | if (shdr_status || shdr_add_status || rc) { | ||
9549 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9550 | "2505 EQ_DESTROY mailbox failed with " | ||
9551 | "status x%x add_status x%x, mbx status x%x\n", | ||
9552 | shdr_status, shdr_add_status, rc); | ||
9553 | status = -ENXIO; | ||
9554 | } | ||
9555 | |||
9556 | /* Remove eq from any list */ | ||
9557 | list_del_init(&eq->list); | ||
9558 | if (rc != MBX_TIMEOUT) | ||
9559 | mempool_free(mbox, eq->phba->mbox_mem_pool); | ||
9560 | return status; | ||
9561 | } | ||
9562 | |||
9563 | /** | ||
9564 | * lpfc_cq_destroy - Destroy a Completion Queue on the HBA | ||
9565 | * @cq: The queue structure associated with the queue to destroy. | ||
9566 | * | ||
9567 | * This function destroys a queue, as detailed in @cq by sending an mailbox | ||
9568 | * command, specific to the type of queue, to the HBA. | ||
9569 | * | ||
9570 | * The @cq struct is used to get the queue ID of the queue to destroy. | ||
9571 | * | ||
9572 | * On success this function will return a zero. If the queue destroy mailbox | ||
9573 | * command fails this function will return ENXIO. | ||
9574 | **/ | ||
9575 | uint32_t | ||
9576 | lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) | ||
9577 | { | ||
9578 | LPFC_MBOXQ_t *mbox; | ||
9579 | int rc, length, status = 0; | ||
9580 | uint32_t shdr_status, shdr_add_status; | ||
9581 | union lpfc_sli4_cfg_shdr *shdr; | ||
9582 | |||
9583 | if (!cq) | ||
9584 | return -ENODEV; | ||
9585 | mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); | ||
9586 | if (!mbox) | ||
9587 | return -ENOMEM; | ||
9588 | length = (sizeof(struct lpfc_mbx_cq_destroy) - | ||
9589 | sizeof(struct lpfc_sli4_cfg_mhdr)); | ||
9590 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, | ||
9591 | LPFC_MBOX_OPCODE_CQ_DESTROY, | ||
9592 | length, LPFC_SLI4_MBX_EMBED); | ||
9593 | bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, | ||
9594 | cq->queue_id); | ||
9595 | mbox->vport = cq->phba->pport; | ||
9596 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
9597 | rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); | ||
9598 | /* The IOCTL status is embedded in the mailbox subheader. */ | ||
9599 | shdr = (union lpfc_sli4_cfg_shdr *) | ||
9600 | &mbox->u.mqe.un.wq_create.header.cfg_shdr; | ||
9601 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
9602 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
9603 | if (shdr_status || shdr_add_status || rc) { | ||
9604 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9605 | "2506 CQ_DESTROY mailbox failed with " | ||
9606 | "status x%x add_status x%x, mbx status x%x\n", | ||
9607 | shdr_status, shdr_add_status, rc); | ||
9608 | status = -ENXIO; | ||
9609 | } | ||
9610 | /* Remove cq from any list */ | ||
9611 | list_del_init(&cq->list); | ||
9612 | if (rc != MBX_TIMEOUT) | ||
9613 | mempool_free(mbox, cq->phba->mbox_mem_pool); | ||
9614 | return status; | ||
9615 | } | ||
9616 | |||
9617 | /** | ||
9618 | * lpfc_wq_destroy - Destroy a Work Queue on the HBA | ||
9619 | * @wq: The queue structure associated with the queue to destroy. | ||
9620 | * | ||
9621 | * This function destroys a queue, as detailed in @wq by sending an mailbox | ||
9622 | * command, specific to the type of queue, to the HBA. | ||
9623 | * | ||
9624 | * The @wq struct is used to get the queue ID of the queue to destroy. | ||
9625 | * | ||
9626 | * On success this function will return a zero. If the queue destroy mailbox | ||
9627 | * command fails this function will return ENXIO. | ||
9628 | **/ | ||
9629 | uint32_t | ||
9630 | lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) | ||
9631 | { | ||
9632 | LPFC_MBOXQ_t *mbox; | ||
9633 | int rc, length, status = 0; | ||
9634 | uint32_t shdr_status, shdr_add_status; | ||
9635 | union lpfc_sli4_cfg_shdr *shdr; | ||
9636 | |||
9637 | if (!wq) | ||
9638 | return -ENODEV; | ||
9639 | mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); | ||
9640 | if (!mbox) | ||
9641 | return -ENOMEM; | ||
9642 | length = (sizeof(struct lpfc_mbx_wq_destroy) - | ||
9643 | sizeof(struct lpfc_sli4_cfg_mhdr)); | ||
9644 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
9645 | LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, | ||
9646 | length, LPFC_SLI4_MBX_EMBED); | ||
9647 | bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, | ||
9648 | wq->queue_id); | ||
9649 | mbox->vport = wq->phba->pport; | ||
9650 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
9651 | rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); | ||
9652 | shdr = (union lpfc_sli4_cfg_shdr *) | ||
9653 | &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; | ||
9654 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
9655 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
9656 | if (shdr_status || shdr_add_status || rc) { | ||
9657 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9658 | "2508 WQ_DESTROY mailbox failed with " | ||
9659 | "status x%x add_status x%x, mbx status x%x\n", | ||
9660 | shdr_status, shdr_add_status, rc); | ||
9661 | status = -ENXIO; | ||
9662 | } | ||
9663 | /* Remove wq from any list */ | ||
9664 | list_del_init(&wq->list); | ||
9665 | if (rc != MBX_TIMEOUT) | ||
9666 | mempool_free(mbox, wq->phba->mbox_mem_pool); | ||
9667 | return status; | ||
9668 | } | ||
9669 | |||
9670 | /** | ||
9671 | * lpfc_rq_destroy - Destroy a Receive Queue on the HBA | ||
9672 | * @rq: The queue structure associated with the queue to destroy. | ||
9673 | * | ||
9674 | * This function destroys a queue, as detailed in @rq by sending an mailbox | ||
9675 | * command, specific to the type of queue, to the HBA. | ||
9676 | * | ||
9677 | * The @rq struct is used to get the queue ID of the queue to destroy. | ||
9678 | * | ||
9679 | * On success this function will return a zero. If the queue destroy mailbox | ||
9680 | * command fails this function will return ENXIO. | ||
9681 | **/ | ||
9682 | uint32_t | ||
9683 | lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, | ||
9684 | struct lpfc_queue *drq) | ||
9685 | { | ||
9686 | LPFC_MBOXQ_t *mbox; | ||
9687 | int rc, length, status = 0; | ||
9688 | uint32_t shdr_status, shdr_add_status; | ||
9689 | union lpfc_sli4_cfg_shdr *shdr; | ||
9690 | |||
9691 | if (!hrq || !drq) | ||
9692 | return -ENODEV; | ||
9693 | mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); | ||
9694 | if (!mbox) | ||
9695 | return -ENOMEM; | ||
9696 | length = (sizeof(struct lpfc_mbx_rq_destroy) - | ||
9697 | sizeof(struct mbox_header)); | ||
9698 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
9699 | LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, | ||
9700 | length, LPFC_SLI4_MBX_EMBED); | ||
9701 | bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, | ||
9702 | hrq->queue_id); | ||
9703 | mbox->vport = hrq->phba->pport; | ||
9704 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
9705 | rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); | ||
9706 | /* The IOCTL status is embedded in the mailbox subheader. */ | ||
9707 | shdr = (union lpfc_sli4_cfg_shdr *) | ||
9708 | &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; | ||
9709 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
9710 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
9711 | if (shdr_status || shdr_add_status || rc) { | ||
9712 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9713 | "2509 RQ_DESTROY mailbox failed with " | ||
9714 | "status x%x add_status x%x, mbx status x%x\n", | ||
9715 | shdr_status, shdr_add_status, rc); | ||
9716 | if (rc != MBX_TIMEOUT) | ||
9717 | mempool_free(mbox, hrq->phba->mbox_mem_pool); | ||
9718 | return -ENXIO; | ||
9719 | } | ||
9720 | bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, | ||
9721 | drq->queue_id); | ||
9722 | rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); | ||
9723 | shdr = (union lpfc_sli4_cfg_shdr *) | ||
9724 | &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; | ||
9725 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
9726 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
9727 | if (shdr_status || shdr_add_status || rc) { | ||
9728 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9729 | "2510 RQ_DESTROY mailbox failed with " | ||
9730 | "status x%x add_status x%x, mbx status x%x\n", | ||
9731 | shdr_status, shdr_add_status, rc); | ||
9732 | status = -ENXIO; | ||
9733 | } | ||
9734 | list_del_init(&hrq->list); | ||
9735 | list_del_init(&drq->list); | ||
9736 | if (rc != MBX_TIMEOUT) | ||
9737 | mempool_free(mbox, hrq->phba->mbox_mem_pool); | ||
9738 | return status; | ||
9739 | } | ||
9740 | |||
9741 | /** | ||
9742 | * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA | ||
9743 | * @phba: The virtual port for which this call being executed. | ||
9744 | * @pdma_phys_addr0: Physical address of the 1st SGL page. | ||
9745 | * @pdma_phys_addr1: Physical address of the 2nd SGL page. | ||
9746 | * @xritag: the xritag that ties this io to the SGL pages. | ||
9747 | * | ||
9748 | * This routine will post the sgl pages for the IO that has the xritag | ||
9749 | * that is in the iocbq structure. The xritag is assigned during iocbq | ||
9750 | * creation and persists for as long as the driver is loaded. | ||
9751 | * if the caller has fewer than 256 scatter gather segments to map then | ||
9752 | * pdma_phys_addr1 should be 0. | ||
9753 | * If the caller needs to map more than 256 scatter gather segment then | ||
9754 | * pdma_phys_addr1 should be a valid physical address. | ||
9755 | * physical address for SGLs must be 64 byte aligned. | ||
9756 | * If you are going to map 2 SGL's then the first one must have 256 entries | ||
9757 | * the second sgl can have between 1 and 256 entries. | ||
9758 | * | ||
9759 | * Return codes: | ||
9760 | * 0 - Success | ||
9761 | * -ENXIO, -ENOMEM - Failure | ||
9762 | **/ | ||
9763 | int | ||
9764 | lpfc_sli4_post_sgl(struct lpfc_hba *phba, | ||
9765 | dma_addr_t pdma_phys_addr0, | ||
9766 | dma_addr_t pdma_phys_addr1, | ||
9767 | uint16_t xritag) | ||
9768 | { | ||
9769 | struct lpfc_mbx_post_sgl_pages *post_sgl_pages; | ||
9770 | LPFC_MBOXQ_t *mbox; | ||
9771 | int rc; | ||
9772 | uint32_t shdr_status, shdr_add_status; | ||
9773 | union lpfc_sli4_cfg_shdr *shdr; | ||
9774 | |||
9775 | if (xritag == NO_XRI) { | ||
9776 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
9777 | "0364 Invalid param:\n"); | ||
9778 | return -EINVAL; | ||
9779 | } | ||
9780 | |||
9781 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
9782 | if (!mbox) | ||
9783 | return -ENOMEM; | ||
9784 | |||
9785 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
9786 | LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, | ||
9787 | sizeof(struct lpfc_mbx_post_sgl_pages) - | ||
9788 | sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); | ||
9789 | |||
9790 | post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) | ||
9791 | &mbox->u.mqe.un.post_sgl_pages; | ||
9792 | bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); | ||
9793 | bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); | ||
9794 | |||
9795 | post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = | ||
9796 | cpu_to_le32(putPaddrLow(pdma_phys_addr0)); | ||
9797 | post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = | ||
9798 | cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); | ||
9799 | |||
9800 | post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = | ||
9801 | cpu_to_le32(putPaddrLow(pdma_phys_addr1)); | ||
9802 | post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = | ||
9803 | cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); | ||
9804 | if (!phba->sli4_hba.intr_enable) | ||
9805 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | ||
9806 | else | ||
9807 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); | ||
9808 | /* The IOCTL status is embedded in the mailbox subheader. */ | ||
9809 | shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; | ||
9810 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
9811 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
9812 | if (rc != MBX_TIMEOUT) | ||
9813 | mempool_free(mbox, phba->mbox_mem_pool); | ||
9814 | if (shdr_status || shdr_add_status || rc) { | ||
9815 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9816 | "2511 POST_SGL mailbox failed with " | ||
9817 | "status x%x add_status x%x, mbx status x%x\n", | ||
9818 | shdr_status, shdr_add_status, rc); | ||
9819 | rc = -ENXIO; | ||
9820 | } | ||
9821 | return 0; | ||
9822 | } | ||
9823 | /** | ||
9824 | * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA | ||
9825 | * @phba: The virtual port for which this call being executed. | ||
9826 | * | ||
9827 | * This routine will remove all of the sgl pages registered with the hba. | ||
9828 | * | ||
9829 | * Return codes: | ||
9830 | * 0 - Success | ||
9831 | * -ENXIO, -ENOMEM - Failure | ||
9832 | **/ | ||
9833 | int | ||
9834 | lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba) | ||
9835 | { | ||
9836 | LPFC_MBOXQ_t *mbox; | ||
9837 | int rc; | ||
9838 | uint32_t shdr_status, shdr_add_status; | ||
9839 | union lpfc_sli4_cfg_shdr *shdr; | ||
9840 | |||
9841 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
9842 | if (!mbox) | ||
9843 | return -ENOMEM; | ||
9844 | |||
9845 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
9846 | LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0, | ||
9847 | LPFC_SLI4_MBX_EMBED); | ||
9848 | if (!phba->sli4_hba.intr_enable) | ||
9849 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | ||
9850 | else | ||
9851 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); | ||
9852 | /* The IOCTL status is embedded in the mailbox subheader. */ | ||
9853 | shdr = (union lpfc_sli4_cfg_shdr *) | ||
9854 | &mbox->u.mqe.un.sli4_config.header.cfg_shdr; | ||
9855 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
9856 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
9857 | if (rc != MBX_TIMEOUT) | ||
9858 | mempool_free(mbox, phba->mbox_mem_pool); | ||
9859 | if (shdr_status || shdr_add_status || rc) { | ||
9860 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9861 | "2512 REMOVE_ALL_SGL_PAGES mailbox failed with " | ||
9862 | "status x%x add_status x%x, mbx status x%x\n", | ||
9863 | shdr_status, shdr_add_status, rc); | ||
9864 | rc = -ENXIO; | ||
9865 | } | ||
9866 | return rc; | ||
9867 | } | ||
9868 | |||
9869 | /** | ||
9870 | * lpfc_sli4_next_xritag - Get an xritag for the io | ||
9871 | * @phba: Pointer to HBA context object. | ||
9872 | * | ||
9873 | * This function gets an xritag for the iocb. If there is no unused xritag | ||
9874 | * it will return 0xffff. | ||
9875 | * The function returns the allocated xritag if successful, else returns zero. | ||
9876 | * Zero is not a valid xritag. | ||
9877 | * The caller is not required to hold any lock. | ||
9878 | **/ | ||
9879 | uint16_t | ||
9880 | lpfc_sli4_next_xritag(struct lpfc_hba *phba) | ||
9881 | { | ||
9882 | uint16_t xritag; | ||
9883 | |||
9884 | spin_lock_irq(&phba->hbalock); | ||
9885 | xritag = phba->sli4_hba.next_xri; | ||
9886 | if ((xritag != (uint16_t) -1) && xritag < | ||
9887 | (phba->sli4_hba.max_cfg_param.max_xri | ||
9888 | + phba->sli4_hba.max_cfg_param.xri_base)) { | ||
9889 | phba->sli4_hba.next_xri++; | ||
9890 | phba->sli4_hba.max_cfg_param.xri_used++; | ||
9891 | spin_unlock_irq(&phba->hbalock); | ||
9892 | return xritag; | ||
9893 | } | ||
9894 | spin_unlock_irq(&phba->hbalock); | ||
9895 | |||
9896 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
9897 | "2004 Failed to allocate XRI.last XRITAG is %d" | ||
9898 | " Max XRI is %d, Used XRI is %d\n", | ||
9899 | phba->sli4_hba.next_xri, | ||
9900 | phba->sli4_hba.max_cfg_param.max_xri, | ||
9901 | phba->sli4_hba.max_cfg_param.xri_used); | ||
9902 | return -1; | ||
9903 | } | ||
9904 | |||
9905 | /** | ||
9906 | * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware. | ||
9907 | * @phba: pointer to lpfc hba data structure. | ||
9908 | * | ||
9909 | * This routine is invoked to post a block of driver's sgl pages to the | ||
9910 | * HBA using non-embedded mailbox command. No Lock is held. This routine | ||
9911 | * is only called when the driver is loading and after all IO has been | ||
9912 | * stopped. | ||
9913 | **/ | ||
9914 | int | ||
9915 | lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) | ||
9916 | { | ||
9917 | struct lpfc_sglq *sglq_entry; | ||
9918 | struct lpfc_mbx_post_uembed_sgl_page1 *sgl; | ||
9919 | struct sgl_page_pairs *sgl_pg_pairs; | ||
9920 | void *viraddr; | ||
9921 | LPFC_MBOXQ_t *mbox; | ||
9922 | uint32_t reqlen, alloclen, pg_pairs; | ||
9923 | uint32_t mbox_tmo; | ||
9924 | uint16_t xritag_start = 0; | ||
9925 | int els_xri_cnt, rc = 0; | ||
9926 | uint32_t shdr_status, shdr_add_status; | ||
9927 | union lpfc_sli4_cfg_shdr *shdr; | ||
9928 | |||
9929 | /* The number of sgls to be posted */ | ||
9930 | els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); | ||
9931 | |||
9932 | reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + | ||
9933 | sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); | ||
9934 | if (reqlen > PAGE_SIZE) { | ||
9935 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
9936 | "2559 Block sgl registration required DMA " | ||
9937 | "size (%d) great than a page\n", reqlen); | ||
9938 | return -ENOMEM; | ||
9939 | } | ||
9940 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
9941 | if (!mbox) { | ||
9942 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9943 | "2560 Failed to allocate mbox cmd memory\n"); | ||
9944 | return -ENOMEM; | ||
9945 | } | ||
9946 | |||
9947 | /* Allocate DMA memory and set up the non-embedded mailbox command */ | ||
9948 | alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
9949 | LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, | ||
9950 | LPFC_SLI4_MBX_NEMBED); | ||
9951 | |||
9952 | if (alloclen < reqlen) { | ||
9953 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
9954 | "0285 Allocated DMA memory size (%d) is " | ||
9955 | "less than the requested DMA memory " | ||
9956 | "size (%d)\n", alloclen, reqlen); | ||
9957 | lpfc_sli4_mbox_cmd_free(phba, mbox); | ||
9958 | return -ENOMEM; | ||
9959 | } | ||
9960 | |||
9961 | /* Get the first SGE entry from the non-embedded DMA memory */ | ||
9962 | if (unlikely(!mbox->sge_array)) { | ||
9963 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | ||
9964 | "2525 Failed to get the non-embedded SGE " | ||
9965 | "virtual address\n"); | ||
9966 | lpfc_sli4_mbox_cmd_free(phba, mbox); | ||
9967 | return -ENOMEM; | ||
9968 | } | ||
9969 | viraddr = mbox->sge_array->addr[0]; | ||
9970 | |||
9971 | /* Set up the SGL pages in the non-embedded DMA pages */ | ||
9972 | sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; | ||
9973 | sgl_pg_pairs = &sgl->sgl_pg_pairs; | ||
9974 | |||
9975 | for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { | ||
9976 | sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; | ||
9977 | /* Set up the sge entry */ | ||
9978 | sgl_pg_pairs->sgl_pg0_addr_lo = | ||
9979 | cpu_to_le32(putPaddrLow(sglq_entry->phys)); | ||
9980 | sgl_pg_pairs->sgl_pg0_addr_hi = | ||
9981 | cpu_to_le32(putPaddrHigh(sglq_entry->phys)); | ||
9982 | sgl_pg_pairs->sgl_pg1_addr_lo = | ||
9983 | cpu_to_le32(putPaddrLow(0)); | ||
9984 | sgl_pg_pairs->sgl_pg1_addr_hi = | ||
9985 | cpu_to_le32(putPaddrHigh(0)); | ||
9986 | /* Keep the first xritag on the list */ | ||
9987 | if (pg_pairs == 0) | ||
9988 | xritag_start = sglq_entry->sli4_xritag; | ||
9989 | sgl_pg_pairs++; | ||
9990 | } | ||
9991 | bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); | ||
9992 | pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs; | ||
9993 | bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); | ||
9994 | /* Perform endian conversion if necessary */ | ||
9995 | sgl->word0 = cpu_to_le32(sgl->word0); | ||
9996 | |||
9997 | if (!phba->sli4_hba.intr_enable) | ||
9998 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | ||
9999 | else { | ||
10000 | mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); | ||
10001 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); | ||
10002 | } | ||
10003 | shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; | ||
10004 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
10005 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
10006 | if (rc != MBX_TIMEOUT) | ||
10007 | lpfc_sli4_mbox_cmd_free(phba, mbox); | ||
10008 | if (shdr_status || shdr_add_status || rc) { | ||
10009 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
10010 | "2513 POST_SGL_BLOCK mailbox command failed " | ||
10011 | "status x%x add_status x%x mbx status x%x\n", | ||
10012 | shdr_status, shdr_add_status, rc); | ||
10013 | rc = -ENXIO; | ||
10014 | } | ||
10015 | return rc; | ||
10016 | } | ||
10017 | |||
10018 | /** | ||
10019 | * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware | ||
10020 | * @phba: pointer to lpfc hba data structure. | ||
10021 | * @sblist: pointer to scsi buffer list. | ||
10022 | * @count: number of scsi buffers on the list. | ||
10023 | * | ||
10024 | * This routine is invoked to post a block of @count scsi sgl pages from a | ||
10025 | * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. | ||
10026 | * No Lock is held. | ||
10027 | * | ||
10028 | **/ | ||
10029 | int | ||
10030 | lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, | ||
10031 | int cnt) | ||
10032 | { | ||
10033 | struct lpfc_scsi_buf *psb; | ||
10034 | struct lpfc_mbx_post_uembed_sgl_page1 *sgl; | ||
10035 | struct sgl_page_pairs *sgl_pg_pairs; | ||
10036 | void *viraddr; | ||
10037 | LPFC_MBOXQ_t *mbox; | ||
10038 | uint32_t reqlen, alloclen, pg_pairs; | ||
10039 | uint32_t mbox_tmo; | ||
10040 | uint16_t xritag_start = 0; | ||
10041 | int rc = 0; | ||
10042 | uint32_t shdr_status, shdr_add_status; | ||
10043 | dma_addr_t pdma_phys_bpl1; | ||
10044 | union lpfc_sli4_cfg_shdr *shdr; | ||
10045 | |||
10046 | /* Calculate the requested length of the dma memory */ | ||
10047 | reqlen = cnt * sizeof(struct sgl_page_pairs) + | ||
10048 | sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); | ||
10049 | if (reqlen > PAGE_SIZE) { | ||
10050 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
10051 | "0217 Block sgl registration required DMA " | ||
10052 | "size (%d) great than a page\n", reqlen); | ||
10053 | return -ENOMEM; | ||
10054 | } | ||
10055 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
10056 | if (!mbox) { | ||
10057 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
10058 | "0283 Failed to allocate mbox cmd memory\n"); | ||
10059 | return -ENOMEM; | ||
10060 | } | ||
10061 | |||
10062 | /* Allocate DMA memory and set up the non-embedded mailbox command */ | ||
10063 | alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
10064 | LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, | ||
10065 | LPFC_SLI4_MBX_NEMBED); | ||
10066 | |||
10067 | if (alloclen < reqlen) { | ||
10068 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
10069 | "2561 Allocated DMA memory size (%d) is " | ||
10070 | "less than the requested DMA memory " | ||
10071 | "size (%d)\n", alloclen, reqlen); | ||
10072 | lpfc_sli4_mbox_cmd_free(phba, mbox); | ||
10073 | return -ENOMEM; | ||
10074 | } | ||
10075 | |||
10076 | /* Get the first SGE entry from the non-embedded DMA memory */ | ||
10077 | if (unlikely(!mbox->sge_array)) { | ||
10078 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | ||
10079 | "2565 Failed to get the non-embedded SGE " | ||
10080 | "virtual address\n"); | ||
10081 | lpfc_sli4_mbox_cmd_free(phba, mbox); | ||
10082 | return -ENOMEM; | ||
10083 | } | ||
10084 | viraddr = mbox->sge_array->addr[0]; | ||
10085 | |||
10086 | /* Set up the SGL pages in the non-embedded DMA pages */ | ||
10087 | sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; | ||
10088 | sgl_pg_pairs = &sgl->sgl_pg_pairs; | ||
10089 | |||
10090 | pg_pairs = 0; | ||
10091 | list_for_each_entry(psb, sblist, list) { | ||
10092 | /* Set up the sge entry */ | ||
10093 | sgl_pg_pairs->sgl_pg0_addr_lo = | ||
10094 | cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); | ||
10095 | sgl_pg_pairs->sgl_pg0_addr_hi = | ||
10096 | cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); | ||
10097 | if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) | ||
10098 | pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; | ||
10099 | else | ||
10100 | pdma_phys_bpl1 = 0; | ||
10101 | sgl_pg_pairs->sgl_pg1_addr_lo = | ||
10102 | cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); | ||
10103 | sgl_pg_pairs->sgl_pg1_addr_hi = | ||
10104 | cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); | ||
10105 | /* Keep the first xritag on the list */ | ||
10106 | if (pg_pairs == 0) | ||
10107 | xritag_start = psb->cur_iocbq.sli4_xritag; | ||
10108 | sgl_pg_pairs++; | ||
10109 | pg_pairs++; | ||
10110 | } | ||
10111 | bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); | ||
10112 | bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); | ||
10113 | /* Perform endian conversion if necessary */ | ||
10114 | sgl->word0 = cpu_to_le32(sgl->word0); | ||
10115 | |||
10116 | if (!phba->sli4_hba.intr_enable) | ||
10117 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | ||
10118 | else { | ||
10119 | mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); | ||
10120 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); | ||
10121 | } | ||
10122 | shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; | ||
10123 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
10124 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
10125 | if (rc != MBX_TIMEOUT) | ||
10126 | lpfc_sli4_mbox_cmd_free(phba, mbox); | ||
10127 | if (shdr_status || shdr_add_status || rc) { | ||
10128 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
10129 | "2564 POST_SGL_BLOCK mailbox command failed " | ||
10130 | "status x%x add_status x%x mbx status x%x\n", | ||
10131 | shdr_status, shdr_add_status, rc); | ||
10132 | rc = -ENXIO; | ||
10133 | } | ||
10134 | return rc; | ||
10135 | } | ||
10136 | |||
10137 | /** | ||
10138 | * lpfc_fc_frame_check - Check that this frame is a valid frame to handle | ||
10139 | * @phba: pointer to lpfc_hba struct that the frame was received on | ||
10140 | * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) | ||
10141 | * | ||
10142 | * This function checks the fields in the @fc_hdr to see if the FC frame is a | ||
10143 | * valid type of frame that the LPFC driver will handle. This function will | ||
10144 | * return a zero if the frame is a valid frame or a non zero value when the | ||
10145 | * frame does not pass the check. | ||
10146 | **/ | ||
10147 | static int | ||
10148 | lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) | ||
10149 | { | ||
10150 | char *rctl_names[] = FC_RCTL_NAMES_INIT; | ||
10151 | char *type_names[] = FC_TYPE_NAMES_INIT; | ||
10152 | struct fc_vft_header *fc_vft_hdr; | ||
10153 | |||
10154 | switch (fc_hdr->fh_r_ctl) { | ||
10155 | case FC_RCTL_DD_UNCAT: /* uncategorized information */ | ||
10156 | case FC_RCTL_DD_SOL_DATA: /* solicited data */ | ||
10157 | case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ | ||
10158 | case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ | ||
10159 | case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ | ||
10160 | case FC_RCTL_DD_DATA_DESC: /* data descriptor */ | ||
10161 | case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ | ||
10162 | case FC_RCTL_DD_CMD_STATUS: /* command status */ | ||
10163 | case FC_RCTL_ELS_REQ: /* extended link services request */ | ||
10164 | case FC_RCTL_ELS_REP: /* extended link services reply */ | ||
10165 | case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ | ||
10166 | case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ | ||
10167 | case FC_RCTL_BA_NOP: /* basic link service NOP */ | ||
10168 | case FC_RCTL_BA_ABTS: /* basic link service abort */ | ||
10169 | case FC_RCTL_BA_RMC: /* remove connection */ | ||
10170 | case FC_RCTL_BA_ACC: /* basic accept */ | ||
10171 | case FC_RCTL_BA_RJT: /* basic reject */ | ||
10172 | case FC_RCTL_BA_PRMT: | ||
10173 | case FC_RCTL_ACK_1: /* acknowledge_1 */ | ||
10174 | case FC_RCTL_ACK_0: /* acknowledge_0 */ | ||
10175 | case FC_RCTL_P_RJT: /* port reject */ | ||
10176 | case FC_RCTL_F_RJT: /* fabric reject */ | ||
10177 | case FC_RCTL_P_BSY: /* port busy */ | ||
10178 | case FC_RCTL_F_BSY: /* fabric busy to data frame */ | ||
10179 | case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ | ||
10180 | case FC_RCTL_LCR: /* link credit reset */ | ||
10181 | case FC_RCTL_END: /* end */ | ||
10182 | break; | ||
10183 | case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ | ||
10184 | fc_vft_hdr = (struct fc_vft_header *)fc_hdr; | ||
10185 | fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; | ||
10186 | return lpfc_fc_frame_check(phba, fc_hdr); | ||
10187 | default: | ||
10188 | goto drop; | ||
10189 | } | ||
10190 | switch (fc_hdr->fh_type) { | ||
10191 | case FC_TYPE_BLS: | ||
10192 | case FC_TYPE_ELS: | ||
10193 | case FC_TYPE_FCP: | ||
10194 | case FC_TYPE_CT: | ||
10195 | break; | ||
10196 | case FC_TYPE_IP: | ||
10197 | case FC_TYPE_ILS: | ||
10198 | default: | ||
10199 | goto drop; | ||
10200 | } | ||
10201 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | ||
10202 | "2538 Received frame rctl:%s type:%s\n", | ||
10203 | rctl_names[fc_hdr->fh_r_ctl], | ||
10204 | type_names[fc_hdr->fh_type]); | ||
10205 | return 0; | ||
10206 | drop: | ||
10207 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, | ||
10208 | "2539 Dropped frame rctl:%s type:%s\n", | ||
10209 | rctl_names[fc_hdr->fh_r_ctl], | ||
10210 | type_names[fc_hdr->fh_type]); | ||
10211 | return 1; | ||
10212 | } | ||
10213 | |||
10214 | /** | ||
10215 | * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame | ||
10216 | * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) | ||
10217 | * | ||
10218 | * This function processes the FC header to retrieve the VFI from the VF | ||
10219 | * header, if one exists. This function will return the VFI if one exists | ||
10220 | * or 0 if no VSAN Header exists. | ||
10221 | **/ | ||
10222 | static uint32_t | ||
10223 | lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) | ||
10224 | { | ||
10225 | struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; | ||
10226 | |||
10227 | if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) | ||
10228 | return 0; | ||
10229 | return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); | ||
10230 | } | ||
10231 | |||
10232 | /** | ||
10233 | * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to | ||
10234 | * @phba: Pointer to the HBA structure to search for the vport on | ||
10235 | * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) | ||
10236 | * @fcfi: The FC Fabric ID that the frame came from | ||
10237 | * | ||
10238 | * This function searches the @phba for a vport that matches the content of the | ||
10239 | * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the | ||
10240 | * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function | ||
10241 | * returns the matching vport pointer or NULL if unable to match frame to a | ||
10242 | * vport. | ||
10243 | **/ | ||
10244 | static struct lpfc_vport * | ||
10245 | lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, | ||
10246 | uint16_t fcfi) | ||
10247 | { | ||
10248 | struct lpfc_vport **vports; | ||
10249 | struct lpfc_vport *vport = NULL; | ||
10250 | int i; | ||
10251 | uint32_t did = (fc_hdr->fh_d_id[0] << 16 | | ||
10252 | fc_hdr->fh_d_id[1] << 8 | | ||
10253 | fc_hdr->fh_d_id[2]); | ||
10254 | |||
10255 | vports = lpfc_create_vport_work_array(phba); | ||
10256 | if (vports != NULL) | ||
10257 | for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | ||
10258 | if (phba->fcf.fcfi == fcfi && | ||
10259 | vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && | ||
10260 | vports[i]->fc_myDID == did) { | ||
10261 | vport = vports[i]; | ||
10262 | break; | ||
10263 | } | ||
10264 | } | ||
10265 | lpfc_destroy_vport_work_array(phba, vports); | ||
10266 | return vport; | ||
10267 | } | ||
10268 | |||
10269 | /** | ||
10270 | * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences | ||
10271 | * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame | ||
10272 | * | ||
10273 | * This function searches through the existing incomplete sequences that have | ||
10274 | * been sent to this @vport. If the frame matches one of the incomplete | ||
10275 | * sequences then the dbuf in the @dmabuf is added to the list of frames that | ||
10276 | * make up that sequence. If no sequence is found that matches this frame then | ||
10277 | * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list | ||
10278 | * This function returns a pointer to the first dmabuf in the sequence list that | ||
10279 | * the frame was linked to. | ||
10280 | **/ | ||
10281 | static struct hbq_dmabuf * | ||
10282 | lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) | ||
10283 | { | ||
10284 | struct fc_frame_header *new_hdr; | ||
10285 | struct fc_frame_header *temp_hdr; | ||
10286 | struct lpfc_dmabuf *d_buf; | ||
10287 | struct lpfc_dmabuf *h_buf; | ||
10288 | struct hbq_dmabuf *seq_dmabuf = NULL; | ||
10289 | struct hbq_dmabuf *temp_dmabuf = NULL; | ||
10290 | |||
10291 | new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; | ||
10292 | /* Use the hdr_buf to find the sequence that this frame belongs to */ | ||
10293 | list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { | ||
10294 | temp_hdr = (struct fc_frame_header *)h_buf->virt; | ||
10295 | if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || | ||
10296 | (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || | ||
10297 | (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) | ||
10298 | continue; | ||
10299 | /* found a pending sequence that matches this frame */ | ||
10300 | seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); | ||
10301 | break; | ||
10302 | } | ||
10303 | if (!seq_dmabuf) { | ||
10304 | /* | ||
10305 | * This indicates first frame received for this sequence. | ||
10306 | * Queue the buffer on the vport's rcv_buffer_list. | ||
10307 | */ | ||
10308 | list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); | ||
10309 | return dmabuf; | ||
10310 | } | ||
10311 | temp_hdr = seq_dmabuf->hbuf.virt; | ||
10312 | if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { | ||
10313 | list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list); | ||
10314 | return dmabuf; | ||
10315 | } | ||
10316 | /* find the correct place in the sequence to insert this frame */ | ||
10317 | list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { | ||
10318 | temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); | ||
10319 | temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; | ||
10320 | /* | ||
10321 | * If the frame's sequence count is greater than the frame on | ||
10322 | * the list then insert the frame right after this frame | ||
10323 | */ | ||
10324 | if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) { | ||
10325 | list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); | ||
10326 | return seq_dmabuf; | ||
10327 | } | ||
10328 | } | ||
10329 | return NULL; | ||
10330 | } | ||
10331 | |||
10332 | /** | ||
10333 | * lpfc_seq_complete - Indicates if a sequence is complete | ||
10334 | * @dmabuf: pointer to a dmabuf that describes the FC sequence | ||
10335 | * | ||
10336 | * This function checks the sequence, starting with the frame described by | ||
10337 | * @dmabuf, to see if all the frames associated with this sequence are present. | ||
10338 | * the frames associated with this sequence are linked to the @dmabuf using the | ||
10339 | * dbuf list. This function looks for two major things. 1) That the first frame | ||
10340 | * has a sequence count of zero. 2) There is a frame with last frame of sequence | ||
10341 | * set. 3) That there are no holes in the sequence count. The function will | ||
10342 | * return 1 when the sequence is complete, otherwise it will return 0. | ||
10343 | **/ | ||
10344 | static int | ||
10345 | lpfc_seq_complete(struct hbq_dmabuf *dmabuf) | ||
10346 | { | ||
10347 | struct fc_frame_header *hdr; | ||
10348 | struct lpfc_dmabuf *d_buf; | ||
10349 | struct hbq_dmabuf *seq_dmabuf; | ||
10350 | uint32_t fctl; | ||
10351 | int seq_count = 0; | ||
10352 | |||
10353 | hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; | ||
10354 | /* make sure first fame of sequence has a sequence count of zero */ | ||
10355 | if (hdr->fh_seq_cnt != seq_count) | ||
10356 | return 0; | ||
10357 | fctl = (hdr->fh_f_ctl[0] << 16 | | ||
10358 | hdr->fh_f_ctl[1] << 8 | | ||
10359 | hdr->fh_f_ctl[2]); | ||
10360 | /* If last frame of sequence we can return success. */ | ||
10361 | if (fctl & FC_FC_END_SEQ) | ||
10362 | return 1; | ||
10363 | list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { | ||
10364 | seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); | ||
10365 | hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; | ||
10366 | /* If there is a hole in the sequence count then fail. */ | ||
10367 | if (++seq_count != hdr->fh_seq_cnt) | ||
10368 | return 0; | ||
10369 | fctl = (hdr->fh_f_ctl[0] << 16 | | ||
10370 | hdr->fh_f_ctl[1] << 8 | | ||
10371 | hdr->fh_f_ctl[2]); | ||
10372 | /* If last frame of sequence we can return success. */ | ||
10373 | if (fctl & FC_FC_END_SEQ) | ||
10374 | return 1; | ||
10375 | } | ||
10376 | return 0; | ||
10377 | } | ||
10378 | |||
10379 | /** | ||
10380 | * lpfc_prep_seq - Prep sequence for ULP processing | ||
10381 | * @vport: Pointer to the vport on which this sequence was received | ||
10382 | * @dmabuf: pointer to a dmabuf that describes the FC sequence | ||
10383 | * | ||
10384 | * This function takes a sequence, described by a list of frames, and creates | ||
10385 | * a list of iocbq structures to describe the sequence. This iocbq list will be | ||
10386 | * used to issue to the generic unsolicited sequence handler. This routine | ||
10387 | * returns a pointer to the first iocbq in the list. If the function is unable | ||
10388 | * to allocate an iocbq then it throw out the received frames that were not | ||
10389 | * able to be described and return a pointer to the first iocbq. If unable to | ||
10390 | * allocate any iocbqs (including the first) this function will return NULL. | ||
10391 | **/ | ||
10392 | static struct lpfc_iocbq * | ||
10393 | lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) | ||
10394 | { | ||
10395 | struct lpfc_dmabuf *d_buf, *n_buf; | ||
10396 | struct lpfc_iocbq *first_iocbq, *iocbq; | ||
10397 | struct fc_frame_header *fc_hdr; | ||
10398 | uint32_t sid; | ||
10399 | |||
10400 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; | ||
10401 | /* remove from receive buffer list */ | ||
10402 | list_del_init(&seq_dmabuf->hbuf.list); | ||
10403 | /* get the Remote Port's SID */ | ||
10404 | sid = (fc_hdr->fh_s_id[0] << 16 | | ||
10405 | fc_hdr->fh_s_id[1] << 8 | | ||
10406 | fc_hdr->fh_s_id[2]); | ||
10407 | /* Get an iocbq struct to fill in. */ | ||
10408 | first_iocbq = lpfc_sli_get_iocbq(vport->phba); | ||
10409 | if (first_iocbq) { | ||
10410 | /* Initialize the first IOCB. */ | ||
10411 | first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; | ||
10412 | first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; | ||
10413 | first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); | ||
10414 | first_iocbq->iocb.unsli3.rcvsli3.vpi = | ||
10415 | vport->vpi + vport->phba->vpi_base; | ||
10416 | /* put the first buffer into the first IOCBq */ | ||
10417 | first_iocbq->context2 = &seq_dmabuf->dbuf; | ||
10418 | first_iocbq->context3 = NULL; | ||
10419 | first_iocbq->iocb.ulpBdeCount = 1; | ||
10420 | first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = | ||
10421 | LPFC_DATA_BUF_SIZE; | ||
10422 | first_iocbq->iocb.un.rcvels.remoteID = sid; | ||
10423 | } | ||
10424 | iocbq = first_iocbq; | ||
10425 | /* | ||
10426 | * Each IOCBq can have two Buffers assigned, so go through the list | ||
10427 | * of buffers for this sequence and save two buffers in each IOCBq | ||
10428 | */ | ||
10429 | list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { | ||
10430 | if (!iocbq) { | ||
10431 | lpfc_in_buf_free(vport->phba, d_buf); | ||
10432 | continue; | ||
10433 | } | ||
10434 | if (!iocbq->context3) { | ||
10435 | iocbq->context3 = d_buf; | ||
10436 | iocbq->iocb.ulpBdeCount++; | ||
10437 | iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = | ||
10438 | LPFC_DATA_BUF_SIZE; | ||
10439 | } else { | ||
10440 | iocbq = lpfc_sli_get_iocbq(vport->phba); | ||
10441 | if (!iocbq) { | ||
10442 | if (first_iocbq) { | ||
10443 | first_iocbq->iocb.ulpStatus = | ||
10444 | IOSTAT_FCP_RSP_ERROR; | ||
10445 | first_iocbq->iocb.un.ulpWord[4] = | ||
10446 | IOERR_NO_RESOURCES; | ||
10447 | } | ||
10448 | lpfc_in_buf_free(vport->phba, d_buf); | ||
10449 | continue; | ||
10450 | } | ||
10451 | iocbq->context2 = d_buf; | ||
10452 | iocbq->context3 = NULL; | ||
10453 | iocbq->iocb.ulpBdeCount = 1; | ||
10454 | iocbq->iocb.un.cont64[0].tus.f.bdeSize = | ||
10455 | LPFC_DATA_BUF_SIZE; | ||
10456 | iocbq->iocb.un.rcvels.remoteID = sid; | ||
10457 | list_add_tail(&iocbq->list, &first_iocbq->list); | ||
10458 | } | ||
10459 | } | ||
10460 | return first_iocbq; | ||
10461 | } | ||
10462 | |||
10463 | /** | ||
10464 | * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware | ||
10465 | * @phba: Pointer to HBA context object. | ||
10466 | * | ||
10467 | * This function is called with no lock held. This function processes all | ||
10468 | * the received buffers and gives it to upper layers when a received buffer | ||
10469 | * indicates that it is the final frame in the sequence. The interrupt | ||
10470 | * service routine processes received buffers at interrupt contexts and adds | ||
10471 | * received dma buffers to the rb_pend_list queue and signals the worker thread. | ||
10472 | * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the | ||
10473 | * appropriate receive function when the final frame in a sequence is received. | ||
10474 | **/ | ||
10475 | int | ||
10476 | lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) | ||
10477 | { | ||
10478 | LIST_HEAD(cmplq); | ||
10479 | struct hbq_dmabuf *dmabuf, *seq_dmabuf; | ||
10480 | struct fc_frame_header *fc_hdr; | ||
10481 | struct lpfc_vport *vport; | ||
10482 | uint32_t fcfi; | ||
10483 | struct lpfc_iocbq *iocbq; | ||
10484 | |||
10485 | /* Clear hba flag and get all received buffers into the cmplq */ | ||
10486 | spin_lock_irq(&phba->hbalock); | ||
10487 | phba->hba_flag &= ~HBA_RECEIVE_BUFFER; | ||
10488 | list_splice_init(&phba->rb_pend_list, &cmplq); | ||
10489 | spin_unlock_irq(&phba->hbalock); | ||
10490 | |||
10491 | /* Process each received buffer */ | ||
10492 | while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) { | ||
10493 | fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; | ||
10494 | /* check to see if this a valid type of frame */ | ||
10495 | if (lpfc_fc_frame_check(phba, fc_hdr)) { | ||
10496 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | ||
10497 | continue; | ||
10498 | } | ||
10499 | fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe); | ||
10500 | vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); | ||
10501 | if (!vport) { | ||
10502 | /* throw out the frame */ | ||
10503 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | ||
10504 | continue; | ||
10505 | } | ||
10506 | /* Link this frame */ | ||
10507 | seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); | ||
10508 | if (!seq_dmabuf) { | ||
10509 | /* unable to add frame to vport - throw it out */ | ||
10510 | lpfc_in_buf_free(phba, &dmabuf->dbuf); | ||
10511 | continue; | ||
10512 | } | ||
10513 | /* If not last frame in sequence continue processing frames. */ | ||
10514 | if (!lpfc_seq_complete(seq_dmabuf)) { | ||
10515 | /* | ||
10516 | * When saving off frames post a new one and mark this | ||
10517 | * frame to be freed when it is finished. | ||
10518 | **/ | ||
10519 | lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); | ||
10520 | dmabuf->tag = -1; | ||
10521 | continue; | ||
10522 | } | ||
10523 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; | ||
10524 | iocbq = lpfc_prep_seq(vport, seq_dmabuf); | ||
10525 | if (!lpfc_complete_unsol_iocb(phba, | ||
10526 | &phba->sli.ring[LPFC_ELS_RING], | ||
10527 | iocbq, fc_hdr->fh_r_ctl, | ||
10528 | fc_hdr->fh_type)) | ||
10529 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, | ||
10530 | "2540 Ring %d handler: unexpected Rctl " | ||
10531 | "x%x Type x%x received\n", | ||
10532 | LPFC_ELS_RING, | ||
10533 | fc_hdr->fh_r_ctl, fc_hdr->fh_type); | ||
10534 | }; | ||
10535 | return 0; | ||
10536 | } | ||