diff options
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_puda.c | 1436 | ||||
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_puda.h | 183 |
2 files changed, 1619 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c new file mode 100644 index 000000000000..ae9971f93ecd --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c | |||
@@ -0,0 +1,1436 @@ | |||
1 | /******************************************************************************* | ||
2 | * | ||
3 | * Copyright (c) 2015-2016 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenFabrics.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | * | ||
33 | *******************************************************************************/ | ||
34 | |||
35 | #include "i40iw_osdep.h" | ||
36 | #include "i40iw_register.h" | ||
37 | #include "i40iw_status.h" | ||
38 | #include "i40iw_hmc.h" | ||
39 | |||
40 | #include "i40iw_d.h" | ||
41 | #include "i40iw_type.h" | ||
42 | #include "i40iw_p.h" | ||
43 | #include "i40iw_puda.h" | ||
44 | |||
45 | static void i40iw_ieq_receive(struct i40iw_sc_dev *dev, | ||
46 | struct i40iw_puda_buf *buf); | ||
47 | static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid); | ||
48 | static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx); | ||
49 | static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc | ||
50 | *rsrc, bool initial); | ||
51 | /** | ||
52 | * i40iw_puda_get_listbuf - get buffer from puda list | ||
53 | * @list: list to use for buffers (ILQ or IEQ) | ||
54 | */ | ||
55 | static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list) | ||
56 | { | ||
57 | struct i40iw_puda_buf *buf = NULL; | ||
58 | |||
59 | if (!list_empty(list)) { | ||
60 | buf = (struct i40iw_puda_buf *)list->next; | ||
61 | list_del((struct list_head *)&buf->list); | ||
62 | } | ||
63 | return buf; | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * i40iw_puda_get_bufpool - return buffer from resource | ||
68 | * @rsrc: resource to use for buffer | ||
69 | */ | ||
70 | struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc) | ||
71 | { | ||
72 | struct i40iw_puda_buf *buf = NULL; | ||
73 | struct list_head *list = &rsrc->bufpool; | ||
74 | unsigned long flags; | ||
75 | |||
76 | spin_lock_irqsave(&rsrc->bufpool_lock, flags); | ||
77 | buf = i40iw_puda_get_listbuf(list); | ||
78 | if (buf) | ||
79 | rsrc->avail_buf_count--; | ||
80 | else | ||
81 | rsrc->stats_buf_alloc_fail++; | ||
82 | spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); | ||
83 | return buf; | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * i40iw_puda_ret_bufpool - return buffer to rsrc list | ||
88 | * @rsrc: resource to use for buffer | ||
89 | * @buf: buffe to return to resouce | ||
90 | */ | ||
91 | void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc, | ||
92 | struct i40iw_puda_buf *buf) | ||
93 | { | ||
94 | unsigned long flags; | ||
95 | |||
96 | spin_lock_irqsave(&rsrc->bufpool_lock, flags); | ||
97 | list_add(&buf->list, &rsrc->bufpool); | ||
98 | spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); | ||
99 | rsrc->avail_buf_count++; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * i40iw_puda_post_recvbuf - set wqe for rcv buffer | ||
104 | * @rsrc: resource ptr | ||
105 | * @wqe_idx: wqe index to use | ||
106 | * @buf: puda buffer for rcv q | ||
107 | * @initial: flag if during init time | ||
108 | */ | ||
109 | static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx, | ||
110 | struct i40iw_puda_buf *buf, bool initial) | ||
111 | { | ||
112 | u64 *wqe; | ||
113 | struct i40iw_sc_qp *qp = &rsrc->qp; | ||
114 | u64 offset24 = 0; | ||
115 | |||
116 | qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf; | ||
117 | wqe = qp->qp_uk.rq_base[wqe_idx].elem; | ||
118 | i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, | ||
119 | "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__, | ||
120 | wqe_idx, buf, wqe); | ||
121 | if (!initial) | ||
122 | get_64bit_val(wqe, 24, &offset24); | ||
123 | |||
124 | offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); | ||
125 | set_64bit_val(wqe, 24, offset24); | ||
126 | |||
127 | set_64bit_val(wqe, 0, buf->mem.pa); | ||
128 | set_64bit_val(wqe, 8, | ||
129 | LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); | ||
130 | set_64bit_val(wqe, 24, offset24); | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * i40iw_puda_replenish_rq - post rcv buffers | ||
135 | * @rsrc: resource to use for buffer | ||
136 | * @initial: flag if during init time | ||
137 | */ | ||
138 | static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc, | ||
139 | bool initial) | ||
140 | { | ||
141 | u32 i; | ||
142 | u32 invalid_cnt = rsrc->rxq_invalid_cnt; | ||
143 | struct i40iw_puda_buf *buf = NULL; | ||
144 | |||
145 | for (i = 0; i < invalid_cnt; i++) { | ||
146 | buf = i40iw_puda_get_bufpool(rsrc); | ||
147 | if (!buf) | ||
148 | return I40IW_ERR_list_empty; | ||
149 | i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, | ||
150 | initial); | ||
151 | rsrc->rx_wqe_idx = | ||
152 | ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size); | ||
153 | rsrc->rxq_invalid_cnt--; | ||
154 | } | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * i40iw_puda_alloc_buf - allocate mem for buffer | ||
160 | * @dev: iwarp device | ||
161 | * @length: length of buffer | ||
162 | */ | ||
163 | static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev, | ||
164 | u32 length) | ||
165 | { | ||
166 | struct i40iw_puda_buf *buf = NULL; | ||
167 | struct i40iw_virt_mem buf_mem; | ||
168 | enum i40iw_status_code ret; | ||
169 | |||
170 | ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem, | ||
171 | sizeof(struct i40iw_puda_buf)); | ||
172 | if (ret) { | ||
173 | i40iw_debug(dev, I40IW_DEBUG_PUDA, | ||
174 | "%s: error mem for buf\n", __func__); | ||
175 | return NULL; | ||
176 | } | ||
177 | buf = (struct i40iw_puda_buf *)buf_mem.va; | ||
178 | ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1); | ||
179 | if (ret) { | ||
180 | i40iw_debug(dev, I40IW_DEBUG_PUDA, | ||
181 | "%s: error dma mem for buf\n", __func__); | ||
182 | i40iw_free_virt_mem(dev->hw, &buf_mem); | ||
183 | return NULL; | ||
184 | } | ||
185 | buf->buf_mem.va = buf_mem.va; | ||
186 | buf->buf_mem.size = buf_mem.size; | ||
187 | return buf; | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * i40iw_puda_dele_buf - delete buffer back to system | ||
192 | * @dev: iwarp device | ||
193 | * @buf: buffer to free | ||
194 | */ | ||
195 | static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev, | ||
196 | struct i40iw_puda_buf *buf) | ||
197 | { | ||
198 | i40iw_free_dma_mem(dev->hw, &buf->mem); | ||
199 | i40iw_free_virt_mem(dev->hw, &buf->buf_mem); | ||
200 | } | ||
201 | |||
202 | /** | ||
203 | * i40iw_puda_get_next_send_wqe - return next wqe for processing | ||
204 | * @qp: puda qp for wqe | ||
205 | * @wqe_idx: wqe index for caller | ||
206 | */ | ||
207 | static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx) | ||
208 | { | ||
209 | u64 *wqe = NULL; | ||
210 | enum i40iw_status_code ret_code = 0; | ||
211 | |||
212 | *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); | ||
213 | if (!*wqe_idx) | ||
214 | qp->swqe_polarity = !qp->swqe_polarity; | ||
215 | I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); | ||
216 | if (ret_code) | ||
217 | return wqe; | ||
218 | wqe = qp->sq_base[*wqe_idx].elem; | ||
219 | |||
220 | return wqe; | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * i40iw_puda_poll_info - poll cq for completion | ||
225 | * @cq: cq for poll | ||
226 | * @info: info return for successful completion | ||
227 | */ | ||
228 | static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq, | ||
229 | struct i40iw_puda_completion_info *info) | ||
230 | { | ||
231 | u64 qword0, qword2, qword3; | ||
232 | u64 *cqe; | ||
233 | u64 comp_ctx; | ||
234 | bool valid_bit; | ||
235 | u32 major_err, minor_err; | ||
236 | bool error; | ||
237 | |||
238 | cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk); | ||
239 | get_64bit_val(cqe, 24, &qword3); | ||
240 | valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID); | ||
241 | |||
242 | if (valid_bit != cq->cq_uk.polarity) | ||
243 | return I40IW_ERR_QUEUE_EMPTY; | ||
244 | |||
245 | i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32); | ||
246 | error = (bool)RS_64(qword3, I40IW_CQ_ERROR); | ||
247 | if (error) { | ||
248 | i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__); | ||
249 | major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR)); | ||
250 | minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR)); | ||
251 | info->compl_error = major_err << 16 | minor_err; | ||
252 | return I40IW_ERR_CQ_COMPL_ERROR; | ||
253 | } | ||
254 | |||
255 | get_64bit_val(cqe, 0, &qword0); | ||
256 | get_64bit_val(cqe, 16, &qword2); | ||
257 | |||
258 | info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ); | ||
259 | info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); | ||
260 | |||
261 | get_64bit_val(cqe, 8, &comp_ctx); | ||
262 | info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx; | ||
263 | info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX); | ||
264 | |||
265 | if (info->q_type == I40IW_CQE_QTYPE_RQ) { | ||
266 | info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID); | ||
267 | info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO); | ||
268 | info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO); | ||
269 | info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN); | ||
270 | } | ||
271 | |||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | /** | ||
276 | * i40iw_puda_poll_completion - processes completion for cq | ||
277 | * @dev: iwarp device | ||
278 | * @cq: cq getting interrupt | ||
279 | * @compl_err: return any completion err | ||
280 | */ | ||
281 | enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev, | ||
282 | struct i40iw_sc_cq *cq, u32 *compl_err) | ||
283 | { | ||
284 | struct i40iw_qp_uk *qp; | ||
285 | struct i40iw_cq_uk *cq_uk = &cq->cq_uk; | ||
286 | struct i40iw_puda_completion_info info; | ||
287 | enum i40iw_status_code ret = 0; | ||
288 | struct i40iw_puda_buf *buf; | ||
289 | struct i40iw_puda_rsrc *rsrc; | ||
290 | void *sqwrid; | ||
291 | u8 cq_type = cq->cq_type; | ||
292 | unsigned long flags; | ||
293 | |||
294 | if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) { | ||
295 | rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq; | ||
296 | } else { | ||
297 | i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__); | ||
298 | return I40IW_ERR_BAD_PTR; | ||
299 | } | ||
300 | memset(&info, 0, sizeof(info)); | ||
301 | ret = i40iw_puda_poll_info(cq, &info); | ||
302 | *compl_err = info.compl_error; | ||
303 | if (ret == I40IW_ERR_QUEUE_EMPTY) | ||
304 | return ret; | ||
305 | if (ret) | ||
306 | goto done; | ||
307 | |||
308 | qp = info.qp; | ||
309 | if (!qp || !rsrc) { | ||
310 | ret = I40IW_ERR_BAD_PTR; | ||
311 | goto done; | ||
312 | } | ||
313 | |||
314 | if (qp->qp_id != rsrc->qp_id) { | ||
315 | ret = I40IW_ERR_BAD_PTR; | ||
316 | goto done; | ||
317 | } | ||
318 | |||
319 | if (info.q_type == I40IW_CQE_QTYPE_RQ) { | ||
320 | buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx]; | ||
321 | /* Get all the tcpip information in the buf header */ | ||
322 | ret = i40iw_puda_get_tcpip_info(&info, buf); | ||
323 | if (ret) { | ||
324 | rsrc->stats_rcvd_pkt_err++; | ||
325 | if (cq_type == I40IW_CQ_TYPE_ILQ) { | ||
326 | i40iw_ilq_putback_rcvbuf(&rsrc->qp, | ||
327 | info.wqe_idx); | ||
328 | } else { | ||
329 | i40iw_puda_ret_bufpool(rsrc, buf); | ||
330 | i40iw_puda_replenish_rq(rsrc, false); | ||
331 | } | ||
332 | goto done; | ||
333 | } | ||
334 | |||
335 | rsrc->stats_pkt_rcvd++; | ||
336 | rsrc->compl_rxwqe_idx = info.wqe_idx; | ||
337 | i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__); | ||
338 | rsrc->receive(rsrc->dev, buf); | ||
339 | if (cq_type == I40IW_CQ_TYPE_ILQ) | ||
340 | i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx); | ||
341 | else | ||
342 | i40iw_puda_replenish_rq(rsrc, false); | ||
343 | |||
344 | } else { | ||
345 | i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__); | ||
346 | sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid; | ||
347 | I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx); | ||
348 | rsrc->xmit_complete(rsrc->dev, sqwrid); | ||
349 | spin_lock_irqsave(&rsrc->bufpool_lock, flags); | ||
350 | rsrc->tx_wqe_avail_cnt++; | ||
351 | spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); | ||
352 | if (!list_empty(&dev->ilq->txpend)) | ||
353 | i40iw_puda_send_buf(dev->ilq, NULL); | ||
354 | } | ||
355 | |||
356 | done: | ||
357 | I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret); | ||
358 | if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0) | ||
359 | cq_uk->polarity = !cq_uk->polarity; | ||
360 | /* update cq tail in cq shadow memory also */ | ||
361 | I40IW_RING_MOVE_TAIL(cq_uk->cq_ring); | ||
362 | set_64bit_val(cq_uk->shadow_area, 0, | ||
363 | I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring)); | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | /** | ||
368 | * i40iw_puda_send - complete send wqe for transmit | ||
369 | * @qp: puda qp for send | ||
370 | * @info: buffer information for transmit | ||
371 | */ | ||
372 | enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, | ||
373 | struct i40iw_puda_send_info *info) | ||
374 | { | ||
375 | u64 *wqe; | ||
376 | u32 iplen, l4len; | ||
377 | u64 header[2]; | ||
378 | u32 wqe_idx; | ||
379 | u8 iipt; | ||
380 | |||
381 | /* number of 32 bits DWORDS in header */ | ||
382 | l4len = info->tcplen >> 2; | ||
383 | if (info->ipv4) { | ||
384 | iipt = 3; | ||
385 | iplen = 5; | ||
386 | } else { | ||
387 | iipt = 1; | ||
388 | iplen = 10; | ||
389 | } | ||
390 | |||
391 | wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx); | ||
392 | if (!wqe) | ||
393 | return I40IW_ERR_QP_TOOMANY_WRS_POSTED; | ||
394 | qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch; | ||
395 | /* Third line of WQE descriptor */ | ||
396 | /* maclen is in words */ | ||
397 | header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) | | ||
398 | LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) | | ||
399 | LS_64(iipt, I40IW_UDA_QPSQ_IIPT) | | ||
400 | LS_64(l4len, I40IW_UDA_QPSQ_L4LEN); | ||
401 | /* Forth line of WQE descriptor */ | ||
402 | header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) | | ||
403 | LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) | | ||
404 | LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) | | ||
405 | LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID); | ||
406 | |||
407 | set_64bit_val(wqe, 0, info->paddr); | ||
408 | set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); | ||
409 | set_64bit_val(wqe, 16, header[0]); | ||
410 | set_64bit_val(wqe, 24, header[1]); | ||
411 | |||
412 | i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); | ||
413 | i40iw_qp_post_wr(&qp->qp_uk); | ||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | /** | ||
418 | * i40iw_puda_send_buf - transmit puda buffer | ||
419 | * @rsrc: resource to use for buffer | ||
420 | * @buf: puda buffer to transmit | ||
421 | */ | ||
422 | void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf) | ||
423 | { | ||
424 | struct i40iw_puda_send_info info; | ||
425 | enum i40iw_status_code ret = 0; | ||
426 | unsigned long flags; | ||
427 | |||
428 | spin_lock_irqsave(&rsrc->bufpool_lock, flags); | ||
429 | /* if no wqe available or not from a completion and we have | ||
430 | * pending buffers, we must queue new buffer | ||
431 | */ | ||
432 | if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) { | ||
433 | list_add_tail(&buf->list, &rsrc->txpend); | ||
434 | spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); | ||
435 | rsrc->stats_sent_pkt_q++; | ||
436 | if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) | ||
437 | i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, | ||
438 | "%s: adding to txpend\n", __func__); | ||
439 | return; | ||
440 | } | ||
441 | rsrc->tx_wqe_avail_cnt--; | ||
442 | /* if we are coming from a completion and have pending buffers | ||
443 | * then Get one from pending list | ||
444 | */ | ||
445 | if (!buf) { | ||
446 | buf = i40iw_puda_get_listbuf(&rsrc->txpend); | ||
447 | if (!buf) | ||
448 | goto done; | ||
449 | } | ||
450 | |||
451 | info.scratch = (void *)buf; | ||
452 | info.paddr = buf->mem.pa; | ||
453 | info.len = buf->totallen; | ||
454 | info.tcplen = buf->tcphlen; | ||
455 | info.maclen = buf->maclen; | ||
456 | info.ipv4 = buf->ipv4; | ||
457 | info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ); | ||
458 | |||
459 | ret = i40iw_puda_send(&rsrc->qp, &info); | ||
460 | if (ret) { | ||
461 | rsrc->tx_wqe_avail_cnt++; | ||
462 | rsrc->stats_sent_pkt_q++; | ||
463 | list_add(&buf->list, &rsrc->txpend); | ||
464 | if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) | ||
465 | i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, | ||
466 | "%s: adding to puda_send\n", __func__); | ||
467 | } else { | ||
468 | rsrc->stats_pkt_sent++; | ||
469 | } | ||
470 | done: | ||
471 | spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); | ||
472 | } | ||
473 | |||
474 | /** | ||
475 | * i40iw_puda_qp_setctx - during init, set qp's context | ||
476 | * @rsrc: qp's resource | ||
477 | */ | ||
478 | static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc) | ||
479 | { | ||
480 | struct i40iw_sc_qp *qp = &rsrc->qp; | ||
481 | u64 *qp_ctx = qp->hw_host_ctx; | ||
482 | |||
483 | set_64bit_val(qp_ctx, 8, qp->sq_pa); | ||
484 | set_64bit_val(qp_ctx, 16, qp->rq_pa); | ||
485 | |||
486 | set_64bit_val(qp_ctx, 24, | ||
487 | LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) | | ||
488 | LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE)); | ||
489 | |||
490 | set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS)); | ||
491 | set_64bit_val(qp_ctx, 56, 0); | ||
492 | set_64bit_val(qp_ctx, 64, 1); | ||
493 | |||
494 | set_64bit_val(qp_ctx, 136, | ||
495 | LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) | | ||
496 | LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM)); | ||
497 | |||
498 | set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN)); | ||
499 | |||
500 | set_64bit_val(qp_ctx, 168, | ||
501 | LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX)); | ||
502 | |||
503 | set_64bit_val(qp_ctx, 176, | ||
504 | LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) | | ||
505 | LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) | | ||
506 | LS_64(qp->qs_handle, I40IWQPC_QSHANDLE)); | ||
507 | |||
508 | i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT", | ||
509 | qp_ctx, I40IW_QP_CTX_SIZE); | ||
510 | } | ||
511 | |||
512 | /** | ||
513 | * i40iw_puda_qp_wqe - setup wqe for qp create | ||
514 | * @rsrc: resource for qp | ||
515 | */ | ||
516 | static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc) | ||
517 | { | ||
518 | struct i40iw_sc_qp *qp = &rsrc->qp; | ||
519 | struct i40iw_sc_dev *dev = rsrc->dev; | ||
520 | struct i40iw_sc_cqp *cqp; | ||
521 | u64 *wqe; | ||
522 | u64 header; | ||
523 | struct i40iw_ccq_cqe_info compl_info; | ||
524 | enum i40iw_status_code status = 0; | ||
525 | |||
526 | cqp = dev->cqp; | ||
527 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0); | ||
528 | if (!wqe) | ||
529 | return I40IW_ERR_RING_FULL; | ||
530 | |||
531 | set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); | ||
532 | set_64bit_val(wqe, 40, qp->shadow_area_pa); | ||
533 | header = qp->qp_uk.qp_id | | ||
534 | LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) | | ||
535 | LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) | | ||
536 | LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) | | ||
537 | LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | | ||
538 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
539 | |||
540 | set_64bit_val(wqe, 24, header); | ||
541 | |||
542 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); | ||
543 | i40iw_sc_cqp_post_sq(cqp); | ||
544 | status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, | ||
545 | I40IW_CQP_OP_CREATE_QP, | ||
546 | &compl_info); | ||
547 | return status; | ||
548 | } | ||
549 | |||
550 | /** | ||
551 | * i40iw_puda_qp_create - create qp for resource | ||
552 | * @rsrc: resource to use for buffer | ||
553 | */ | ||
554 | static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc) | ||
555 | { | ||
556 | struct i40iw_sc_qp *qp = &rsrc->qp; | ||
557 | struct i40iw_qp_uk *ukqp = &qp->qp_uk; | ||
558 | enum i40iw_status_code ret = 0; | ||
559 | u32 sq_size, rq_size, t_size; | ||
560 | struct i40iw_dma_mem *mem; | ||
561 | |||
562 | sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE; | ||
563 | rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE; | ||
564 | t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) + | ||
565 | I40IW_QP_CTX_SIZE); | ||
566 | /* Get page aligned memory */ | ||
567 | ret = | ||
568 | i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size, | ||
569 | I40IW_HW_PAGE_SIZE); | ||
570 | if (ret) { | ||
571 | i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__); | ||
572 | return ret; | ||
573 | } | ||
574 | |||
575 | mem = &rsrc->qpmem; | ||
576 | memset(mem->va, 0, t_size); | ||
577 | qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false); | ||
578 | qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false); | ||
579 | qp->pd = &rsrc->sc_pd; | ||
580 | qp->qp_type = I40IW_QP_TYPE_UDA; | ||
581 | qp->dev = rsrc->dev; | ||
582 | qp->back_qp = (void *)rsrc; | ||
583 | qp->sq_pa = mem->pa; | ||
584 | qp->rq_pa = qp->sq_pa + sq_size; | ||
585 | ukqp->sq_base = mem->va; | ||
586 | ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size]; | ||
587 | ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem; | ||
588 | qp->shadow_area_pa = qp->rq_pa + rq_size; | ||
589 | qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE; | ||
590 | qp->hw_host_ctx_pa = | ||
591 | qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3); | ||
592 | ukqp->qp_id = rsrc->qp_id; | ||
593 | ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array; | ||
594 | ukqp->rq_wrid_array = rsrc->rq_wrid_array; | ||
595 | |||
596 | ukqp->qp_id = rsrc->qp_id; | ||
597 | ukqp->sq_size = rsrc->sq_size; | ||
598 | ukqp->rq_size = rsrc->rq_size; | ||
599 | |||
600 | I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size); | ||
601 | I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size); | ||
602 | I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size); | ||
603 | |||
604 | if (qp->pd->dev->is_pf) | ||
605 | ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) + | ||
606 | I40E_PFPE_WQEALLOC); | ||
607 | else | ||
608 | ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) + | ||
609 | I40E_VFPE_WQEALLOC1); | ||
610 | |||
611 | qp->qs_handle = qp->dev->qs_handle; | ||
612 | i40iw_puda_qp_setctx(rsrc); | ||
613 | ret = i40iw_puda_qp_wqe(rsrc); | ||
614 | if (ret) | ||
615 | i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem); | ||
616 | return ret; | ||
617 | } | ||
618 | |||
619 | /** | ||
620 | * i40iw_puda_cq_create - create cq for resource | ||
621 | * @rsrc: resource for which cq to create | ||
622 | */ | ||
623 | static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc) | ||
624 | { | ||
625 | struct i40iw_sc_dev *dev = rsrc->dev; | ||
626 | struct i40iw_sc_cq *cq = &rsrc->cq; | ||
627 | u64 *wqe; | ||
628 | struct i40iw_sc_cqp *cqp; | ||
629 | u64 header; | ||
630 | enum i40iw_status_code ret = 0; | ||
631 | u32 tsize, cqsize; | ||
632 | u32 shadow_read_threshold = 128; | ||
633 | struct i40iw_dma_mem *mem; | ||
634 | struct i40iw_ccq_cqe_info compl_info; | ||
635 | struct i40iw_cq_init_info info; | ||
636 | struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info; | ||
637 | |||
638 | cq->back_cq = (void *)rsrc; | ||
639 | cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); | ||
640 | tsize = cqsize + sizeof(struct i40iw_cq_shadow_area); | ||
641 | ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize, | ||
642 | I40IW_CQ0_ALIGNMENT_MASK); | ||
643 | if (ret) | ||
644 | return ret; | ||
645 | |||
646 | mem = &rsrc->cqmem; | ||
647 | memset(&info, 0, sizeof(info)); | ||
648 | info.dev = dev; | ||
649 | info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ? | ||
650 | I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ; | ||
651 | info.shadow_read_threshold = rsrc->cq_size >> 2; | ||
652 | info.ceq_id_valid = true; | ||
653 | info.cq_base_pa = mem->pa; | ||
654 | info.shadow_area_pa = mem->pa + cqsize; | ||
655 | init_info->cq_base = mem->va; | ||
656 | init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize); | ||
657 | init_info->cq_size = rsrc->cq_size; | ||
658 | init_info->cq_id = rsrc->cq_id; | ||
659 | ret = dev->iw_priv_cq_ops->cq_init(cq, &info); | ||
660 | if (ret) | ||
661 | goto error; | ||
662 | cqp = dev->cqp; | ||
663 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0); | ||
664 | if (!wqe) { | ||
665 | ret = I40IW_ERR_RING_FULL; | ||
666 | goto error; | ||
667 | } | ||
668 | |||
669 | set_64bit_val(wqe, 0, rsrc->cq_size); | ||
670 | set_64bit_val(wqe, 8, RS_64_1(cq, 1)); | ||
671 | set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); | ||
672 | set_64bit_val(wqe, 32, cq->cq_pa); | ||
673 | |||
674 | set_64bit_val(wqe, 40, cq->shadow_area_pa); | ||
675 | |||
676 | header = rsrc->cq_id | | ||
677 | LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) | | ||
678 | LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) | | ||
679 | LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | | ||
680 | LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | | ||
681 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
682 | set_64bit_val(wqe, 24, header); | ||
683 | |||
684 | i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", | ||
685 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
686 | |||
687 | i40iw_sc_cqp_post_sq(dev->cqp); | ||
688 | ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, | ||
689 | I40IW_CQP_OP_CREATE_CQ, | ||
690 | &compl_info); | ||
691 | |||
692 | error: | ||
693 | if (ret) | ||
694 | i40iw_free_dma_mem(dev->hw, &rsrc->cqmem); | ||
695 | return ret; | ||
696 | } | ||
697 | |||
698 | /** | ||
699 | * i40iw_puda_dele_resources - delete all resources during close | ||
700 | * @dev: iwarp device | ||
701 | * @type: type of resource to dele | ||
702 | * @reset: true if reset chip | ||
703 | */ | ||
704 | void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev, | ||
705 | enum puda_resource_type type, | ||
706 | bool reset) | ||
707 | { | ||
708 | struct i40iw_ccq_cqe_info compl_info; | ||
709 | struct i40iw_puda_rsrc *rsrc; | ||
710 | struct i40iw_puda_buf *buf = NULL; | ||
711 | struct i40iw_puda_buf *nextbuf = NULL; | ||
712 | struct i40iw_virt_mem *vmem; | ||
713 | enum i40iw_status_code ret; | ||
714 | |||
715 | switch (type) { | ||
716 | case I40IW_PUDA_RSRC_TYPE_ILQ: | ||
717 | rsrc = dev->ilq; | ||
718 | vmem = &dev->ilq_mem; | ||
719 | break; | ||
720 | case I40IW_PUDA_RSRC_TYPE_IEQ: | ||
721 | rsrc = dev->ieq; | ||
722 | vmem = &dev->ieq_mem; | ||
723 | break; | ||
724 | default: | ||
725 | i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n", | ||
726 | __func__, type); | ||
727 | return; | ||
728 | } | ||
729 | |||
730 | switch (rsrc->completion) { | ||
731 | case PUDA_HASH_CRC_COMPLETE: | ||
732 | i40iw_free_hash_desc(&rsrc->hash_desc); | ||
733 | case PUDA_QP_CREATED: | ||
734 | do { | ||
735 | if (reset) | ||
736 | break; | ||
737 | ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp, | ||
738 | 0, false, true, true); | ||
739 | if (ret) | ||
740 | i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, | ||
741 | "%s error ieq qp destroy\n", | ||
742 | __func__); | ||
743 | |||
744 | ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, | ||
745 | I40IW_CQP_OP_DESTROY_QP, | ||
746 | &compl_info); | ||
747 | if (ret) | ||
748 | i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, | ||
749 | "%s error ieq qp destroy done\n", | ||
750 | __func__); | ||
751 | } while (0); | ||
752 | |||
753 | i40iw_free_dma_mem(dev->hw, &rsrc->qpmem); | ||
754 | /* fallthrough */ | ||
755 | case PUDA_CQ_CREATED: | ||
756 | do { | ||
757 | if (reset) | ||
758 | break; | ||
759 | ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true); | ||
760 | if (ret) | ||
761 | i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, | ||
762 | "%s error ieq cq destroy\n", | ||
763 | __func__); | ||
764 | |||
765 | ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, | ||
766 | I40IW_CQP_OP_DESTROY_CQ, | ||
767 | &compl_info); | ||
768 | if (ret) | ||
769 | i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, | ||
770 | "%s error ieq qp destroy done\n", | ||
771 | __func__); | ||
772 | } while (0); | ||
773 | |||
774 | i40iw_free_dma_mem(dev->hw, &rsrc->cqmem); | ||
775 | break; | ||
776 | default: | ||
777 | i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__); | ||
778 | break; | ||
779 | } | ||
780 | /* Free all allocated puda buffers for both tx and rx */ | ||
781 | buf = rsrc->alloclist; | ||
782 | while (buf) { | ||
783 | nextbuf = buf->next; | ||
784 | i40iw_puda_dele_buf(dev, buf); | ||
785 | buf = nextbuf; | ||
786 | rsrc->alloc_buf_count--; | ||
787 | } | ||
788 | i40iw_free_virt_mem(dev->hw, vmem); | ||
789 | } | ||
790 | |||
791 | /** | ||
792 | * i40iw_puda_allocbufs - allocate buffers for resource | ||
793 | * @rsrc: resource for buffer allocation | ||
794 | * @count: number of buffers to create | ||
795 | */ | ||
796 | static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc, | ||
797 | u32 count) | ||
798 | { | ||
799 | u32 i; | ||
800 | struct i40iw_puda_buf *buf; | ||
801 | struct i40iw_puda_buf *nextbuf; | ||
802 | |||
803 | for (i = 0; i < count; i++) { | ||
804 | buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size); | ||
805 | if (!buf) { | ||
806 | rsrc->stats_buf_alloc_fail++; | ||
807 | return I40IW_ERR_NO_MEMORY; | ||
808 | } | ||
809 | i40iw_puda_ret_bufpool(rsrc, buf); | ||
810 | rsrc->alloc_buf_count++; | ||
811 | if (!rsrc->alloclist) { | ||
812 | rsrc->alloclist = buf; | ||
813 | } else { | ||
814 | nextbuf = rsrc->alloclist; | ||
815 | rsrc->alloclist = buf; | ||
816 | buf->next = nextbuf; | ||
817 | } | ||
818 | } | ||
819 | rsrc->avail_buf_count = rsrc->alloc_buf_count; | ||
820 | return 0; | ||
821 | } | ||
822 | |||
823 | /** | ||
824 | * i40iw_puda_create_rsrc - create resouce (ilq or ieq) | ||
825 | * @dev: iwarp device | ||
826 | * @info: resource information | ||
827 | */ | ||
828 | enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev, | ||
829 | struct i40iw_puda_rsrc_info *info) | ||
830 | { | ||
831 | enum i40iw_status_code ret = 0; | ||
832 | struct i40iw_puda_rsrc *rsrc; | ||
833 | u32 pudasize; | ||
834 | u32 sqwridsize, rqwridsize; | ||
835 | struct i40iw_virt_mem *vmem; | ||
836 | |||
837 | info->count = 1; | ||
838 | pudasize = sizeof(struct i40iw_puda_rsrc); | ||
839 | sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info); | ||
840 | rqwridsize = info->rq_size * 8; | ||
841 | switch (info->type) { | ||
842 | case I40IW_PUDA_RSRC_TYPE_ILQ: | ||
843 | vmem = &dev->ilq_mem; | ||
844 | break; | ||
845 | case I40IW_PUDA_RSRC_TYPE_IEQ: | ||
846 | vmem = &dev->ieq_mem; | ||
847 | break; | ||
848 | default: | ||
849 | return I40IW_NOT_SUPPORTED; | ||
850 | } | ||
851 | ret = | ||
852 | i40iw_allocate_virt_mem(dev->hw, vmem, | ||
853 | pudasize + sqwridsize + rqwridsize); | ||
854 | if (ret) | ||
855 | return ret; | ||
856 | rsrc = (struct i40iw_puda_rsrc *)vmem->va; | ||
857 | spin_lock_init(&rsrc->bufpool_lock); | ||
858 | if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) { | ||
859 | dev->ilq = (struct i40iw_puda_rsrc *)vmem->va; | ||
860 | dev->ilq_count = info->count; | ||
861 | rsrc->receive = info->receive; | ||
862 | rsrc->xmit_complete = info->xmit_complete; | ||
863 | } else { | ||
864 | vmem = &dev->ieq_mem; | ||
865 | dev->ieq_count = info->count; | ||
866 | dev->ieq = (struct i40iw_puda_rsrc *)vmem->va; | ||
867 | rsrc->receive = i40iw_ieq_receive; | ||
868 | rsrc->xmit_complete = i40iw_ieq_tx_compl; | ||
869 | } | ||
870 | |||
871 | rsrc->type = info->type; | ||
872 | rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize); | ||
873 | rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize); | ||
874 | rsrc->mss = info->mss; | ||
875 | /* Initialize all ieq lists */ | ||
876 | INIT_LIST_HEAD(&rsrc->bufpool); | ||
877 | INIT_LIST_HEAD(&rsrc->txpend); | ||
878 | |||
879 | rsrc->tx_wqe_avail_cnt = info->sq_size - 1; | ||
880 | dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id); | ||
881 | rsrc->qp_id = info->qp_id; | ||
882 | rsrc->cq_id = info->cq_id; | ||
883 | rsrc->sq_size = info->sq_size; | ||
884 | rsrc->rq_size = info->rq_size; | ||
885 | rsrc->cq_size = info->rq_size + info->sq_size; | ||
886 | rsrc->buf_size = info->buf_size; | ||
887 | rsrc->dev = dev; | ||
888 | |||
889 | ret = i40iw_puda_cq_create(rsrc); | ||
890 | if (!ret) { | ||
891 | rsrc->completion = PUDA_CQ_CREATED; | ||
892 | ret = i40iw_puda_qp_create(rsrc); | ||
893 | } | ||
894 | if (ret) { | ||
895 | i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__); | ||
896 | goto error; | ||
897 | } | ||
898 | rsrc->completion = PUDA_QP_CREATED; | ||
899 | |||
900 | ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size); | ||
901 | if (ret) { | ||
902 | i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__); | ||
903 | goto error; | ||
904 | } | ||
905 | |||
906 | rsrc->rxq_invalid_cnt = info->rq_size; | ||
907 | ret = i40iw_puda_replenish_rq(rsrc, true); | ||
908 | if (ret) | ||
909 | goto error; | ||
910 | |||
911 | if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) { | ||
912 | if (!i40iw_init_hash_desc(&rsrc->hash_desc)) { | ||
913 | rsrc->check_crc = true; | ||
914 | rsrc->completion = PUDA_HASH_CRC_COMPLETE; | ||
915 | ret = 0; | ||
916 | } | ||
917 | } | ||
918 | |||
919 | dev->ccq_ops->ccq_arm(&rsrc->cq); | ||
920 | return ret; | ||
921 | error: | ||
922 | i40iw_puda_dele_resources(dev, info->type, false); | ||
923 | |||
924 | return ret; | ||
925 | } | ||
926 | |||
927 | /** | ||
928 | * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq | ||
929 | * @qp: ilq's qp resource | ||
930 | * @wqe_idx: wqe index of completed rcvbuf | ||
931 | */ | ||
932 | static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx) | ||
933 | { | ||
934 | u64 *wqe; | ||
935 | u64 offset24; | ||
936 | |||
937 | wqe = qp->qp_uk.rq_base[wqe_idx].elem; | ||
938 | get_64bit_val(wqe, 24, &offset24); | ||
939 | offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); | ||
940 | set_64bit_val(wqe, 24, offset24); | ||
941 | } | ||
942 | |||
943 | /** | ||
944 | * i40iw_ieq_get_fpdu - given length return fpdu length | ||
945 | * @length: length if fpdu | ||
946 | */ | ||
947 | static u16 i40iw_ieq_get_fpdu_length(u16 length) | ||
948 | { | ||
949 | u16 fpdu_len; | ||
950 | |||
951 | fpdu_len = length + I40IW_IEQ_MPA_FRAMING; | ||
952 | fpdu_len = (fpdu_len + 3) & 0xfffffffc; | ||
953 | return fpdu_len; | ||
954 | } | ||
955 | |||
956 | /** | ||
957 | * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf | ||
958 | * @buf: rcv buffer with partial | ||
959 | * @txbuf: tx buffer for sendign back | ||
960 | * @buf_offset: rcv buffer offset to copy from | ||
961 | * @txbuf_offset: at offset in tx buf to copy | ||
962 | * @length: length of data to copy | ||
963 | */ | ||
964 | static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf, | ||
965 | struct i40iw_puda_buf *txbuf, | ||
966 | u16 buf_offset, u32 txbuf_offset, | ||
967 | u32 length) | ||
968 | { | ||
969 | void *mem1 = (u8 *)buf->mem.va + buf_offset; | ||
970 | void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset; | ||
971 | |||
972 | memcpy(mem2, mem1, length); | ||
973 | } | ||
974 | |||
975 | /** | ||
976 | * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling | ||
977 | * @buf: reeive buffer with partial | ||
978 | * @txbuf: buffer to prepare | ||
979 | */ | ||
980 | static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf, | ||
981 | struct i40iw_puda_buf *txbuf) | ||
982 | { | ||
983 | txbuf->maclen = buf->maclen; | ||
984 | txbuf->tcphlen = buf->tcphlen; | ||
985 | txbuf->ipv4 = buf->ipv4; | ||
986 | txbuf->hdrlen = buf->hdrlen; | ||
987 | i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen); | ||
988 | } | ||
989 | |||
990 | /** | ||
991 | * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range | ||
992 | * @buf: receive exception buffer | ||
993 | * @fps: first partial sequence number | ||
994 | */ | ||
995 | static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps) | ||
996 | { | ||
997 | u32 offset; | ||
998 | |||
999 | if (buf->seqnum < fps) { | ||
1000 | offset = fps - buf->seqnum; | ||
1001 | if (offset > buf->datalen) | ||
1002 | return; | ||
1003 | buf->data += offset; | ||
1004 | buf->datalen -= (u16)offset; | ||
1005 | buf->seqnum = fps; | ||
1006 | } | ||
1007 | } | ||
1008 | |||
1009 | /** | ||
1010 | * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu | ||
1011 | * @ieq: ieq resource | ||
1012 | * @rxlist: ieq's received buffer list | ||
1013 | * @pbufl: temporary list for buffers for fpddu | ||
1014 | * @txbuf: tx buffer for fpdu | ||
1015 | * @fpdu_len: total length of fpdu | ||
1016 | */ | ||
1017 | static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq, | ||
1018 | struct list_head *rxlist, | ||
1019 | struct list_head *pbufl, | ||
1020 | struct i40iw_puda_buf *txbuf, | ||
1021 | u16 fpdu_len) | ||
1022 | { | ||
1023 | struct i40iw_puda_buf *buf; | ||
1024 | u32 nextseqnum; | ||
1025 | u16 txoffset, bufoffset; | ||
1026 | |||
1027 | buf = i40iw_puda_get_listbuf(pbufl); | ||
1028 | nextseqnum = buf->seqnum + fpdu_len; | ||
1029 | txbuf->totallen = buf->hdrlen + fpdu_len; | ||
1030 | txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen; | ||
1031 | i40iw_ieq_setup_tx_buf(buf, txbuf); | ||
1032 | |||
1033 | txoffset = buf->hdrlen; | ||
1034 | bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); | ||
1035 | |||
1036 | do { | ||
1037 | if (buf->datalen >= fpdu_len) { | ||
1038 | /* copied full fpdu */ | ||
1039 | i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len); | ||
1040 | buf->datalen -= fpdu_len; | ||
1041 | buf->data += fpdu_len; | ||
1042 | buf->seqnum = nextseqnum; | ||
1043 | break; | ||
1044 | } | ||
1045 | /* copy partial fpdu */ | ||
1046 | i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen); | ||
1047 | txoffset += buf->datalen; | ||
1048 | fpdu_len -= buf->datalen; | ||
1049 | i40iw_puda_ret_bufpool(ieq, buf); | ||
1050 | buf = i40iw_puda_get_listbuf(pbufl); | ||
1051 | bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); | ||
1052 | } while (1); | ||
1053 | |||
1054 | /* last buffer on the list*/ | ||
1055 | if (buf->datalen) | ||
1056 | list_add(&buf->list, rxlist); | ||
1057 | else | ||
1058 | i40iw_puda_ret_bufpool(ieq, buf); | ||
1059 | } | ||
1060 | |||
1061 | /** | ||
1062 | * i40iw_ieq_create_pbufl - create buffer list for single fpdu | ||
1063 | * @rxlist: resource list for receive ieq buffes | ||
1064 | * @pbufl: temp. list for buffers for fpddu | ||
1065 | * @buf: first receive buffer | ||
1066 | * @fpdu_len: total length of fpdu | ||
1067 | */ | ||
1068 | static enum i40iw_status_code i40iw_ieq_create_pbufl( | ||
1069 | struct i40iw_pfpdu *pfpdu, | ||
1070 | struct list_head *rxlist, | ||
1071 | struct list_head *pbufl, | ||
1072 | struct i40iw_puda_buf *buf, | ||
1073 | u16 fpdu_len) | ||
1074 | { | ||
1075 | enum i40iw_status_code status = 0; | ||
1076 | struct i40iw_puda_buf *nextbuf; | ||
1077 | u32 nextseqnum; | ||
1078 | u16 plen = fpdu_len - buf->datalen; | ||
1079 | bool done = false; | ||
1080 | |||
1081 | nextseqnum = buf->seqnum + buf->datalen; | ||
1082 | do { | ||
1083 | nextbuf = i40iw_puda_get_listbuf(rxlist); | ||
1084 | if (!nextbuf) { | ||
1085 | status = I40IW_ERR_list_empty; | ||
1086 | break; | ||
1087 | } | ||
1088 | list_add_tail(&nextbuf->list, pbufl); | ||
1089 | if (nextbuf->seqnum != nextseqnum) { | ||
1090 | pfpdu->bad_seq_num++; | ||
1091 | status = I40IW_ERR_SEQ_NUM; | ||
1092 | break; | ||
1093 | } | ||
1094 | if (nextbuf->datalen >= plen) { | ||
1095 | done = true; | ||
1096 | } else { | ||
1097 | plen -= nextbuf->datalen; | ||
1098 | nextseqnum = nextbuf->seqnum + nextbuf->datalen; | ||
1099 | } | ||
1100 | |||
1101 | } while (!done); | ||
1102 | |||
1103 | return status; | ||
1104 | } | ||
1105 | |||
1106 | /** | ||
1107 | * i40iw_ieq_handle_partial - process partial fpdu buffer | ||
1108 | * @ieq: ieq resource | ||
1109 | * @pfpdu: partial management per user qp | ||
1110 | * @buf: receive buffer | ||
1111 | * @fpdu_len: fpdu len in the buffer | ||
1112 | */ | ||
1113 | static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq, | ||
1114 | struct i40iw_pfpdu *pfpdu, | ||
1115 | struct i40iw_puda_buf *buf, | ||
1116 | u16 fpdu_len) | ||
1117 | { | ||
1118 | enum i40iw_status_code status = 0; | ||
1119 | u8 *crcptr; | ||
1120 | u32 mpacrc; | ||
1121 | u32 seqnum = buf->seqnum; | ||
1122 | struct list_head pbufl; /* partial buffer list */ | ||
1123 | struct i40iw_puda_buf *txbuf = NULL; | ||
1124 | struct list_head *rxlist = &pfpdu->rxlist; | ||
1125 | |||
1126 | INIT_LIST_HEAD(&pbufl); | ||
1127 | list_add(&buf->list, &pbufl); | ||
1128 | |||
1129 | status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len); | ||
1130 | if (!status) | ||
1131 | goto error; | ||
1132 | |||
1133 | txbuf = i40iw_puda_get_bufpool(ieq); | ||
1134 | if (!txbuf) { | ||
1135 | pfpdu->no_tx_bufs++; | ||
1136 | status = I40IW_ERR_NO_TXBUFS; | ||
1137 | goto error; | ||
1138 | } | ||
1139 | |||
1140 | i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len); | ||
1141 | i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum); | ||
1142 | crcptr = txbuf->data + fpdu_len - 4; | ||
1143 | mpacrc = *(u32 *)crcptr; | ||
1144 | if (ieq->check_crc) { | ||
1145 | status = i40iw_ieq_check_mpacrc(&ieq->hash_desc, txbuf->data, | ||
1146 | (fpdu_len - 4), mpacrc); | ||
1147 | if (status) { | ||
1148 | i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, | ||
1149 | "%s: error bad crc\n", __func__); | ||
1150 | goto error; | ||
1151 | } | ||
1152 | } | ||
1153 | |||
1154 | i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER", | ||
1155 | txbuf->mem.va, txbuf->totallen); | ||
1156 | i40iw_puda_send_buf(ieq, txbuf); | ||
1157 | pfpdu->rcv_nxt = seqnum + fpdu_len; | ||
1158 | return status; | ||
1159 | error: | ||
1160 | while (!list_empty(&pbufl)) { | ||
1161 | buf = (struct i40iw_puda_buf *)(pbufl.prev); | ||
1162 | list_del(&buf->list); | ||
1163 | list_add(&buf->list, rxlist); | ||
1164 | } | ||
1165 | if (txbuf) | ||
1166 | i40iw_puda_ret_bufpool(ieq, txbuf); | ||
1167 | return status; | ||
1168 | } | ||
1169 | |||
1170 | /** | ||
1171 | * i40iw_ieq_process_buf - process buffer rcvd for ieq | ||
1172 | * @ieq: ieq resource | ||
1173 | * @pfpdu: partial management per user qp | ||
1174 | * @buf: receive buffer | ||
1175 | */ | ||
1176 | static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq, | ||
1177 | struct i40iw_pfpdu *pfpdu, | ||
1178 | struct i40iw_puda_buf *buf) | ||
1179 | { | ||
1180 | u16 fpdu_len = 0; | ||
1181 | u16 datalen = buf->datalen; | ||
1182 | u8 *datap = buf->data; | ||
1183 | u8 *crcptr; | ||
1184 | u16 ioffset = 0; | ||
1185 | u32 mpacrc; | ||
1186 | u32 seqnum = buf->seqnum; | ||
1187 | u16 length = 0; | ||
1188 | u16 full = 0; | ||
1189 | bool partial = false; | ||
1190 | struct i40iw_puda_buf *txbuf; | ||
1191 | struct list_head *rxlist = &pfpdu->rxlist; | ||
1192 | enum i40iw_status_code ret = 0; | ||
1193 | enum i40iw_status_code status = 0; | ||
1194 | |||
1195 | ioffset = (u16)(buf->data - (u8 *)buf->mem.va); | ||
1196 | while (datalen) { | ||
1197 | fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(u16 *)datap)); | ||
1198 | if (fpdu_len > pfpdu->max_fpdu_data) { | ||
1199 | i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, | ||
1200 | "%s: error bad fpdu_len\n", __func__); | ||
1201 | status = I40IW_ERR_MPA_CRC; | ||
1202 | list_add(&buf->list, rxlist); | ||
1203 | return status; | ||
1204 | } | ||
1205 | |||
1206 | if (datalen < fpdu_len) { | ||
1207 | partial = true; | ||
1208 | break; | ||
1209 | } | ||
1210 | crcptr = datap + fpdu_len - 4; | ||
1211 | mpacrc = *(u32 *)crcptr; | ||
1212 | if (ieq->check_crc) | ||
1213 | ret = i40iw_ieq_check_mpacrc(&ieq->hash_desc, | ||
1214 | datap, fpdu_len - 4, mpacrc); | ||
1215 | if (ret) { | ||
1216 | status = I40IW_ERR_MPA_CRC; | ||
1217 | list_add(&buf->list, rxlist); | ||
1218 | return status; | ||
1219 | } | ||
1220 | full++; | ||
1221 | pfpdu->fpdu_processed++; | ||
1222 | datap += fpdu_len; | ||
1223 | length += fpdu_len; | ||
1224 | datalen -= fpdu_len; | ||
1225 | } | ||
1226 | if (full) { | ||
1227 | /* copy full pdu's in the txbuf and send them out */ | ||
1228 | txbuf = i40iw_puda_get_bufpool(ieq); | ||
1229 | if (!txbuf) { | ||
1230 | pfpdu->no_tx_bufs++; | ||
1231 | status = I40IW_ERR_NO_TXBUFS; | ||
1232 | list_add(&buf->list, rxlist); | ||
1233 | return status; | ||
1234 | } | ||
1235 | /* modify txbuf's buffer header */ | ||
1236 | i40iw_ieq_setup_tx_buf(buf, txbuf); | ||
1237 | /* copy full fpdu's to new buffer */ | ||
1238 | i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen, | ||
1239 | length); | ||
1240 | txbuf->totallen = buf->hdrlen + length; | ||
1241 | |||
1242 | i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum); | ||
1243 | i40iw_puda_send_buf(ieq, txbuf); | ||
1244 | |||
1245 | if (!datalen) { | ||
1246 | pfpdu->rcv_nxt = buf->seqnum + length; | ||
1247 | i40iw_puda_ret_bufpool(ieq, buf); | ||
1248 | return status; | ||
1249 | } | ||
1250 | buf->data = datap; | ||
1251 | buf->seqnum = seqnum + length; | ||
1252 | buf->datalen = datalen; | ||
1253 | pfpdu->rcv_nxt = buf->seqnum; | ||
1254 | } | ||
1255 | if (partial) | ||
1256 | status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len); | ||
1257 | |||
1258 | return status; | ||
1259 | } | ||
1260 | |||
1261 | /** | ||
1262 | * i40iw_ieq_process_fpdus - process fpdu's buffers on its list | ||
1263 | * @qp: qp for which partial fpdus | ||
1264 | * @ieq: ieq resource | ||
1265 | */ | ||
1266 | static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp, | ||
1267 | struct i40iw_puda_rsrc *ieq) | ||
1268 | { | ||
1269 | struct i40iw_pfpdu *pfpdu = &qp->pfpdu; | ||
1270 | struct list_head *rxlist = &pfpdu->rxlist; | ||
1271 | struct i40iw_puda_buf *buf; | ||
1272 | enum i40iw_status_code status; | ||
1273 | |||
1274 | do { | ||
1275 | if (list_empty(rxlist)) | ||
1276 | break; | ||
1277 | buf = i40iw_puda_get_listbuf(rxlist); | ||
1278 | if (!buf) { | ||
1279 | i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, | ||
1280 | "%s: error no buf\n", __func__); | ||
1281 | break; | ||
1282 | } | ||
1283 | if (buf->seqnum != pfpdu->rcv_nxt) { | ||
1284 | /* This could be out of order or missing packet */ | ||
1285 | pfpdu->out_of_order++; | ||
1286 | list_add(&buf->list, rxlist); | ||
1287 | break; | ||
1288 | } | ||
1289 | /* keep processing buffers from the head of the list */ | ||
1290 | status = i40iw_ieq_process_buf(ieq, pfpdu, buf); | ||
1291 | if (status == I40IW_ERR_MPA_CRC) { | ||
1292 | pfpdu->mpa_crc_err = true; | ||
1293 | while (!list_empty(rxlist)) { | ||
1294 | buf = i40iw_puda_get_listbuf(rxlist); | ||
1295 | i40iw_puda_ret_bufpool(ieq, buf); | ||
1296 | pfpdu->crc_err++; | ||
1297 | } | ||
1298 | /* create CQP for AE */ | ||
1299 | i40iw_ieq_mpa_crc_ae(ieq->dev, qp); | ||
1300 | } | ||
1301 | } while (!status); | ||
1302 | } | ||
1303 | |||
1304 | /** | ||
1305 | * i40iw_ieq_handle_exception - handle qp's exception | ||
1306 | * @ieq: ieq resource | ||
1307 | * @qp: qp receiving excpetion | ||
1308 | * @buf: receive buffer | ||
1309 | */ | ||
1310 | static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq, | ||
1311 | struct i40iw_sc_qp *qp, | ||
1312 | struct i40iw_puda_buf *buf) | ||
1313 | { | ||
1314 | struct i40iw_puda_buf *tmpbuf = NULL; | ||
1315 | struct i40iw_pfpdu *pfpdu = &qp->pfpdu; | ||
1316 | u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx; | ||
1317 | u32 rcv_wnd = hw_host_ctx[23]; | ||
1318 | /* first partial seq # in q2 */ | ||
1319 | u32 fps = qp->q2_buf[16]; | ||
1320 | struct list_head *rxlist = &pfpdu->rxlist; | ||
1321 | struct list_head *plist; | ||
1322 | |||
1323 | pfpdu->total_ieq_bufs++; | ||
1324 | |||
1325 | if (pfpdu->mpa_crc_err) { | ||
1326 | pfpdu->crc_err++; | ||
1327 | goto error; | ||
1328 | } | ||
1329 | if (pfpdu->mode && (fps != pfpdu->fps)) { | ||
1330 | /* clean up qp as it is new partial sequence */ | ||
1331 | i40iw_ieq_cleanup_qp(ieq->dev, qp); | ||
1332 | i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, | ||
1333 | "%s: restarting new partial\n", __func__); | ||
1334 | pfpdu->mode = false; | ||
1335 | } | ||
1336 | |||
1337 | if (!pfpdu->mode) { | ||
1338 | i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128); | ||
1339 | /* First_Partial_Sequence_Number check */ | ||
1340 | pfpdu->rcv_nxt = fps; | ||
1341 | pfpdu->fps = fps; | ||
1342 | pfpdu->mode = true; | ||
1343 | pfpdu->max_fpdu_data = ieq->mss; | ||
1344 | pfpdu->pmode_count++; | ||
1345 | INIT_LIST_HEAD(rxlist); | ||
1346 | i40iw_ieq_check_first_buf(buf, fps); | ||
1347 | } | ||
1348 | |||
1349 | if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) { | ||
1350 | pfpdu->bad_seq_num++; | ||
1351 | goto error; | ||
1352 | } | ||
1353 | |||
1354 | if (!list_empty(rxlist)) { | ||
1355 | tmpbuf = (struct i40iw_puda_buf *)rxlist->next; | ||
1356 | plist = &tmpbuf->list; | ||
1357 | while ((struct list_head *)tmpbuf != rxlist) { | ||
1358 | if ((int)(buf->seqnum - tmpbuf->seqnum) < 0) | ||
1359 | break; | ||
1360 | tmpbuf = (struct i40iw_puda_buf *)plist->next; | ||
1361 | } | ||
1362 | /* Insert buf before tmpbuf */ | ||
1363 | list_add_tail(&buf->list, &tmpbuf->list); | ||
1364 | } else { | ||
1365 | list_add_tail(&buf->list, rxlist); | ||
1366 | } | ||
1367 | i40iw_ieq_process_fpdus(qp, ieq); | ||
1368 | return; | ||
1369 | error: | ||
1370 | i40iw_puda_ret_bufpool(ieq, buf); | ||
1371 | } | ||
1372 | |||
1373 | /** | ||
1374 | * i40iw_ieq_receive - received exception buffer | ||
1375 | * @dev: iwarp device | ||
1376 | * @buf: exception buffer received | ||
1377 | */ | ||
1378 | static void i40iw_ieq_receive(struct i40iw_sc_dev *dev, | ||
1379 | struct i40iw_puda_buf *buf) | ||
1380 | { | ||
1381 | struct i40iw_puda_rsrc *ieq = dev->ieq; | ||
1382 | struct i40iw_sc_qp *qp = NULL; | ||
1383 | u32 wqe_idx = ieq->compl_rxwqe_idx; | ||
1384 | |||
1385 | qp = i40iw_ieq_get_qp(dev, buf); | ||
1386 | if (!qp) { | ||
1387 | ieq->stats_bad_qp_id++; | ||
1388 | i40iw_puda_ret_bufpool(ieq, buf); | ||
1389 | } else { | ||
1390 | i40iw_ieq_handle_exception(ieq, qp, buf); | ||
1391 | } | ||
1392 | /* | ||
1393 | * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq() | ||
1394 | * on which wqe_idx to start replenish rq | ||
1395 | */ | ||
1396 | if (!ieq->rxq_invalid_cnt) | ||
1397 | ieq->rx_wqe_idx = wqe_idx; | ||
1398 | ieq->rxq_invalid_cnt++; | ||
1399 | } | ||
1400 | |||
1401 | /** | ||
1402 | * i40iw_ieq_tx_compl - put back after sending completed exception buffer | ||
1403 | * @dev: iwarp device | ||
1404 | * @sqwrid: pointer to puda buffer | ||
1405 | */ | ||
1406 | static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid) | ||
1407 | { | ||
1408 | struct i40iw_puda_rsrc *ieq = dev->ieq; | ||
1409 | struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid; | ||
1410 | |||
1411 | i40iw_puda_ret_bufpool(ieq, buf); | ||
1412 | if (!list_empty(&ieq->txpend)) { | ||
1413 | buf = i40iw_puda_get_listbuf(&ieq->txpend); | ||
1414 | i40iw_puda_send_buf(ieq, buf); | ||
1415 | } | ||
1416 | } | ||
1417 | |||
1418 | /** | ||
1419 | * i40iw_ieq_cleanup_qp - qp is being destroyed | ||
1420 | * @dev: iwarp device | ||
1421 | * @qp: all pending fpdu buffers | ||
1422 | */ | ||
1423 | void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) | ||
1424 | { | ||
1425 | struct i40iw_puda_buf *buf; | ||
1426 | struct i40iw_pfpdu *pfpdu = &qp->pfpdu; | ||
1427 | struct list_head *rxlist = &pfpdu->rxlist; | ||
1428 | struct i40iw_puda_rsrc *ieq = dev->ieq; | ||
1429 | |||
1430 | if (!pfpdu->mode) | ||
1431 | return; | ||
1432 | while (!list_empty(rxlist)) { | ||
1433 | buf = i40iw_puda_get_listbuf(rxlist); | ||
1434 | i40iw_puda_ret_bufpool(ieq, buf); | ||
1435 | } | ||
1436 | } | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h new file mode 100644 index 000000000000..b689aa41dfe2 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h | |||
@@ -0,0 +1,183 @@ | |||
1 | /******************************************************************************* | ||
2 | * | ||
3 | * Copyright (c) 2015-2016 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenFabrics.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | * | ||
33 | *******************************************************************************/ | ||
34 | |||
35 | #ifndef I40IW_PUDA_H | ||
36 | #define I40IW_PUDA_H | ||
37 | |||
38 | #define I40IW_IEQ_MPA_FRAMING 6 | ||
39 | |||
40 | struct i40iw_sc_dev; | ||
41 | struct i40iw_sc_qp; | ||
42 | struct i40iw_sc_cq; | ||
43 | |||
44 | enum puda_resource_type { | ||
45 | I40IW_PUDA_RSRC_TYPE_ILQ = 1, | ||
46 | I40IW_PUDA_RSRC_TYPE_IEQ | ||
47 | }; | ||
48 | |||
49 | enum puda_rsrc_complete { | ||
50 | PUDA_CQ_CREATED = 1, | ||
51 | PUDA_QP_CREATED, | ||
52 | PUDA_TX_COMPLETE, | ||
53 | PUDA_RX_COMPLETE, | ||
54 | PUDA_HASH_CRC_COMPLETE | ||
55 | }; | ||
56 | |||
57 | struct i40iw_puda_completion_info { | ||
58 | struct i40iw_qp_uk *qp; | ||
59 | u8 q_type; | ||
60 | u8 vlan_valid; | ||
61 | u8 l3proto; | ||
62 | u8 l4proto; | ||
63 | u16 payload_len; | ||
64 | u32 compl_error; /* No_err=0, else major and minor err code */ | ||
65 | u32 qp_id; | ||
66 | u32 wqe_idx; | ||
67 | }; | ||
68 | |||
69 | struct i40iw_puda_send_info { | ||
70 | u64 paddr; /* Physical address */ | ||
71 | u32 len; | ||
72 | u8 tcplen; | ||
73 | u8 maclen; | ||
74 | bool ipv4; | ||
75 | bool doloopback; | ||
76 | void *scratch; | ||
77 | }; | ||
78 | |||
79 | struct i40iw_puda_buf { | ||
80 | struct list_head list; /* MUST be first entry */ | ||
81 | struct i40iw_dma_mem mem; /* DMA memory for the buffer */ | ||
82 | struct i40iw_puda_buf *next; /* for alloclist in rsrc struct */ | ||
83 | struct i40iw_virt_mem buf_mem; /* Buffer memory for this buffer */ | ||
84 | void *scratch; | ||
85 | u8 *iph; | ||
86 | u8 *tcph; | ||
87 | u8 *data; | ||
88 | u16 datalen; | ||
89 | u16 vlan_id; | ||
90 | u8 tcphlen; /* tcp length in bytes */ | ||
91 | u8 maclen; /* mac length in bytes */ | ||
92 | u32 totallen; /* machlen+iphlen+tcphlen+datalen */ | ||
93 | atomic_t refcount; | ||
94 | u8 hdrlen; | ||
95 | bool ipv4; | ||
96 | u32 seqnum; | ||
97 | }; | ||
98 | |||
99 | struct i40iw_puda_rsrc_info { | ||
100 | enum puda_resource_type type; /* ILQ or IEQ */ | ||
101 | u32 count; | ||
102 | u16 pd_id; | ||
103 | u32 cq_id; | ||
104 | u32 qp_id; | ||
105 | u32 sq_size; | ||
106 | u32 rq_size; | ||
107 | u16 buf_size; | ||
108 | u16 mss; | ||
109 | u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */ | ||
110 | void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *); | ||
111 | void (*xmit_complete)(struct i40iw_sc_dev *, void *); | ||
112 | }; | ||
113 | |||
114 | struct i40iw_puda_rsrc { | ||
115 | struct i40iw_sc_cq cq; | ||
116 | struct i40iw_sc_qp qp; | ||
117 | struct i40iw_sc_pd sc_pd; | ||
118 | struct i40iw_sc_dev *dev; | ||
119 | struct i40iw_dma_mem cqmem; | ||
120 | struct i40iw_dma_mem qpmem; | ||
121 | struct i40iw_virt_mem ilq_mem; | ||
122 | enum puda_rsrc_complete completion; | ||
123 | enum puda_resource_type type; | ||
124 | u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */ | ||
125 | u16 mss; | ||
126 | u32 cq_id; | ||
127 | u32 qp_id; | ||
128 | u32 sq_size; | ||
129 | u32 rq_size; | ||
130 | u32 cq_size; | ||
131 | struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array; | ||
132 | u64 *rq_wrid_array; | ||
133 | u32 compl_rxwqe_idx; | ||
134 | u32 rx_wqe_idx; | ||
135 | u32 rxq_invalid_cnt; | ||
136 | u32 tx_wqe_avail_cnt; | ||
137 | bool check_crc; | ||
138 | struct hash_desc hash_desc; | ||
139 | struct list_head txpend; | ||
140 | struct list_head bufpool; /* free buffers pool list for recv and xmit */ | ||
141 | u32 alloc_buf_count; | ||
142 | u32 avail_buf_count; /* snapshot of currently available buffers */ | ||
143 | spinlock_t bufpool_lock; | ||
144 | struct i40iw_puda_buf *alloclist; | ||
145 | void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *); | ||
146 | void (*xmit_complete)(struct i40iw_sc_dev *, void *); | ||
147 | /* puda stats */ | ||
148 | u64 stats_buf_alloc_fail; | ||
149 | u64 stats_pkt_rcvd; | ||
150 | u64 stats_pkt_sent; | ||
151 | u64 stats_rcvd_pkt_err; | ||
152 | u64 stats_sent_pkt_q; | ||
153 | u64 stats_bad_qp_id; | ||
154 | }; | ||
155 | |||
156 | struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc); | ||
157 | void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc, | ||
158 | struct i40iw_puda_buf *buf); | ||
159 | void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, | ||
160 | struct i40iw_puda_buf *buf); | ||
161 | enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, | ||
162 | struct i40iw_puda_send_info *info); | ||
163 | enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev, | ||
164 | struct i40iw_puda_rsrc_info *info); | ||
165 | void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev, | ||
166 | enum puda_resource_type type, | ||
167 | bool reset); | ||
168 | enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev, | ||
169 | struct i40iw_sc_cq *cq, u32 *compl_err); | ||
170 | void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); | ||
171 | |||
172 | struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, | ||
173 | struct i40iw_puda_buf *buf); | ||
174 | enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info, | ||
175 | struct i40iw_puda_buf *buf); | ||
176 | enum i40iw_status_code i40iw_ieq_check_mpacrc(struct hash_desc *desc, | ||
177 | void *addr, u32 length, u32 value); | ||
178 | enum i40iw_status_code i40iw_init_hash_desc(struct hash_desc *desc); | ||
179 | void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); | ||
180 | void i40iw_free_hash_desc(struct hash_desc *desc); | ||
181 | void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, | ||
182 | u32 seqnum); | ||
183 | #endif | ||