diff options
author | Faisal Latif <faisal.latif@intel.com> | 2016-01-20 14:40:10 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-03-16 13:50:52 -0400 |
commit | 86dbcd0f12e95df6a2cfe8bc883768946e68e2aa (patch) | |
tree | fedf73cc22a603c5c9b04209e07628a1716f0b45 | |
parent | 8d8cd0bf67982a2b400ca3fd5d6807b834f6a38e (diff) |
i40iw: add file to handle cqp calls
i40iw_ctrl.c provides for hardware wqe support and cqp.
Changes since v2:
cleanup coccinelle error reported by Julia Lawall
Changes since v1:
reported by Christoph Hellwig's review
-remove unnecessary casts
Acked-by: Anjali Singhai Jain <anjali.singhai@intel.com>
Acked-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Faisal Latif <faisal.latif@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 4743 |
1 files changed, 4743 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c new file mode 100644 index 000000000000..f05802bf6ca0 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c | |||
@@ -0,0 +1,4743 @@ | |||
1 | /******************************************************************************* | ||
2 | * | ||
3 | * Copyright (c) 2015-2016 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenFabrics.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | * | ||
33 | *******************************************************************************/ | ||
34 | |||
35 | #include "i40iw_osdep.h" | ||
36 | #include "i40iw_register.h" | ||
37 | #include "i40iw_status.h" | ||
38 | #include "i40iw_hmc.h" | ||
39 | |||
40 | #include "i40iw_d.h" | ||
41 | #include "i40iw_type.h" | ||
42 | #include "i40iw_p.h" | ||
43 | #include "i40iw_vf.h" | ||
44 | #include "i40iw_virtchnl.h" | ||
45 | |||
46 | /** | ||
47 | * i40iw_insert_wqe_hdr - write wqe header | ||
48 | * @wqe: cqp wqe for header | ||
49 | * @header: header for the cqp wqe | ||
50 | */ | ||
51 | static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) | ||
52 | { | ||
53 | wmb(); /* make sure WQE is populated before polarity is set */ | ||
54 | set_64bit_val(wqe, 24, header); | ||
55 | } | ||
56 | |||
57 | /** | ||
58 | * i40iw_get_cqp_reg_info - get head and tail for cqp using registers | ||
59 | * @cqp: struct for cqp hw | ||
60 | * @val: cqp tail register value | ||
61 | * @tail:wqtail register value | ||
62 | * @error: cqp processing err | ||
63 | */ | ||
64 | static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp, | ||
65 | u32 *val, | ||
66 | u32 *tail, | ||
67 | u32 *error) | ||
68 | { | ||
69 | if (cqp->dev->is_pf) { | ||
70 | *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL); | ||
71 | *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL); | ||
72 | *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR); | ||
73 | } else { | ||
74 | *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1); | ||
75 | *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL); | ||
76 | *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR); | ||
77 | } | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * i40iw_cqp_poll_registers - poll cqp registers | ||
82 | * @cqp: struct for cqp hw | ||
83 | * @tail:wqtail register value | ||
84 | * @count: how many times to try for completion | ||
85 | */ | ||
86 | static enum i40iw_status_code i40iw_cqp_poll_registers( | ||
87 | struct i40iw_sc_cqp *cqp, | ||
88 | u32 tail, | ||
89 | u32 count) | ||
90 | { | ||
91 | u32 i = 0; | ||
92 | u32 newtail, error, val; | ||
93 | |||
94 | while (i < count) { | ||
95 | i++; | ||
96 | i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error); | ||
97 | if (error) { | ||
98 | error = (cqp->dev->is_pf) ? | ||
99 | i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) : | ||
100 | i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1); | ||
101 | return I40IW_ERR_CQP_COMPL_ERROR; | ||
102 | } | ||
103 | if (newtail != tail) { | ||
104 | /* SUCCESS */ | ||
105 | I40IW_RING_MOVE_TAIL(cqp->sq_ring); | ||
106 | return 0; | ||
107 | } | ||
108 | udelay(I40IW_SLEEP_COUNT); | ||
109 | } | ||
110 | return I40IW_ERR_TIMEOUT; | ||
111 | } | ||
112 | |||
113 | /** | ||
114 | * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer | ||
115 | * @buf: ptr to fpm commit buffer | ||
116 | * @info: ptr to i40iw_hmc_obj_info struct | ||
117 | * | ||
118 | * parses fpm commit info and copy base value | ||
119 | * of hmc objects in hmc_info | ||
120 | */ | ||
121 | static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( | ||
122 | u64 *buf, | ||
123 | struct i40iw_hmc_obj_info *info) | ||
124 | { | ||
125 | u64 temp; | ||
126 | u32 i, j; | ||
127 | u32 low; | ||
128 | |||
129 | /* copy base values in obj_info */ | ||
130 | for (i = I40IW_HMC_IW_QP, j = 0; | ||
131 | i <= I40IW_HMC_IW_PBLE; i++, j += 8) { | ||
132 | get_64bit_val(buf, j, &temp); | ||
133 | info[i].base = RS_64_1(temp, 32) * 512; | ||
134 | low = (u32)(temp); | ||
135 | if (low) | ||
136 | info[i].cnt = low; | ||
137 | } | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | /** | ||
142 | * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer | ||
143 | * @buf: ptr to fpm query buffer | ||
144 | * @info: ptr to i40iw_hmc_obj_info struct | ||
145 | * @hmc_fpm_misc: ptr to fpm data | ||
146 | * | ||
147 | * parses fpm query buffer and copy max_cnt and | ||
148 | * size value of hmc objects in hmc_info | ||
149 | */ | ||
150 | static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( | ||
151 | u64 *buf, | ||
152 | struct i40iw_hmc_info *hmc_info, | ||
153 | struct i40iw_hmc_fpm_misc *hmc_fpm_misc) | ||
154 | { | ||
155 | u64 temp; | ||
156 | struct i40iw_hmc_obj_info *obj_info; | ||
157 | u32 i, j, size; | ||
158 | u16 max_pe_sds; | ||
159 | |||
160 | obj_info = hmc_info->hmc_obj; | ||
161 | |||
162 | get_64bit_val(buf, 0, &temp); | ||
163 | hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX); | ||
164 | max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS); | ||
165 | |||
166 | /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */ | ||
167 | if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID) | ||
168 | max_pe_sds--; | ||
169 | hmc_fpm_misc->max_sds = max_pe_sds; | ||
170 | hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; | ||
171 | |||
172 | for (i = I40IW_HMC_IW_QP, j = 8; | ||
173 | i <= I40IW_HMC_IW_ARP; i++, j += 8) { | ||
174 | get_64bit_val(buf, j, &temp); | ||
175 | if (i == I40IW_HMC_IW_QP) | ||
176 | obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); | ||
177 | else if (i == I40IW_HMC_IW_CQ) | ||
178 | obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); | ||
179 | else | ||
180 | obj_info[i].max_cnt = (u32)temp; | ||
181 | |||
182 | size = (u32)RS_64_1(temp, 32); | ||
183 | obj_info[i].size = ((u64)1 << size); | ||
184 | } | ||
185 | for (i = I40IW_HMC_IW_MR, j = 48; | ||
186 | i <= I40IW_HMC_IW_PBLE; i++, j += 8) { | ||
187 | get_64bit_val(buf, j, &temp); | ||
188 | obj_info[i].max_cnt = (u32)temp; | ||
189 | size = (u32)RS_64_1(temp, 32); | ||
190 | obj_info[i].size = LS_64_1(1, size); | ||
191 | } | ||
192 | |||
193 | get_64bit_val(buf, 120, &temp); | ||
194 | hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS); | ||
195 | get_64bit_val(buf, 120, &temp); | ||
196 | hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER); | ||
197 | get_64bit_val(buf, 120, &temp); | ||
198 | hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET); | ||
199 | get_64bit_val(buf, 64, &temp); | ||
200 | hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE); | ||
201 | if (!hmc_fpm_misc->xf_block_size) | ||
202 | return I40IW_ERR_INVALID_SIZE; | ||
203 | get_64bit_val(buf, 80, &temp); | ||
204 | hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE); | ||
205 | if (!hmc_fpm_misc->q1_block_size) | ||
206 | return I40IW_ERR_INVALID_SIZE; | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | /** | ||
211 | * i40iw_sc_pd_init - initialize sc pd struct | ||
212 | * @dev: sc device struct | ||
213 | * @pd: sc pd ptr | ||
214 | * @pd_id: pd_id for allocated pd | ||
215 | */ | ||
216 | static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev, | ||
217 | struct i40iw_sc_pd *pd, | ||
218 | u16 pd_id) | ||
219 | { | ||
220 | pd->size = sizeof(*pd); | ||
221 | pd->pd_id = pd_id; | ||
222 | pd->dev = dev; | ||
223 | } | ||
224 | |||
225 | /** | ||
226 | * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size | ||
227 | * @wqsize: size of the wq (sq, rq, srq) to encoded_size | ||
228 | * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's | ||
229 | */ | ||
230 | u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq) | ||
231 | { | ||
232 | u8 encoded_size = 0; | ||
233 | |||
234 | /* cqp sq's hw coded value starts from 1 for size of 4 | ||
235 | * while it starts from 0 for qp' wq's. | ||
236 | */ | ||
237 | if (cqpsq) | ||
238 | encoded_size = 1; | ||
239 | wqsize >>= 2; | ||
240 | while (wqsize >>= 1) | ||
241 | encoded_size++; | ||
242 | return encoded_size; | ||
243 | } | ||
244 | |||
245 | /** | ||
246 | * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair | ||
247 | * @cqp: IWARP control queue pair pointer | ||
248 | * @info: IWARP control queue pair init info pointer | ||
249 | * | ||
250 | * Initializes the object and context buffers for a control Queue Pair. | ||
251 | */ | ||
252 | static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp, | ||
253 | struct i40iw_cqp_init_info *info) | ||
254 | { | ||
255 | u8 hw_sq_size; | ||
256 | |||
257 | if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) || | ||
258 | (info->sq_size < I40IW_CQP_SW_SQSIZE_4) || | ||
259 | ((info->sq_size & (info->sq_size - 1)))) | ||
260 | return I40IW_ERR_INVALID_SIZE; | ||
261 | |||
262 | hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true); | ||
263 | cqp->size = sizeof(*cqp); | ||
264 | cqp->sq_size = info->sq_size; | ||
265 | cqp->hw_sq_size = hw_sq_size; | ||
266 | cqp->sq_base = info->sq; | ||
267 | cqp->host_ctx = info->host_ctx; | ||
268 | cqp->sq_pa = info->sq_pa; | ||
269 | cqp->host_ctx_pa = info->host_ctx_pa; | ||
270 | cqp->dev = info->dev; | ||
271 | cqp->struct_ver = info->struct_ver; | ||
272 | cqp->scratch_array = info->scratch_array; | ||
273 | cqp->polarity = 0; | ||
274 | cqp->en_datacenter_tcp = info->en_datacenter_tcp; | ||
275 | cqp->enabled_vf_count = info->enabled_vf_count; | ||
276 | cqp->hmc_profile = info->hmc_profile; | ||
277 | info->dev->cqp = cqp; | ||
278 | |||
279 | I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size); | ||
280 | i40iw_debug(cqp->dev, I40IW_DEBUG_WQE, | ||
281 | "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n", | ||
282 | __func__, cqp->sq_size, cqp->hw_sq_size, | ||
283 | cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity); | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * i40iw_sc_cqp_create - create cqp during bringup | ||
289 | * @cqp: struct for cqp hw | ||
290 | * @disable_pfpdus: if pfpdu to be disabled | ||
291 | * @maj_err: If error, major err number | ||
292 | * @min_err: If error, minor err number | ||
293 | */ | ||
294 | static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp, | ||
295 | bool disable_pfpdus, | ||
296 | u16 *maj_err, | ||
297 | u16 *min_err) | ||
298 | { | ||
299 | u64 temp; | ||
300 | u32 cnt = 0, p1, p2, val = 0, err_code; | ||
301 | enum i40iw_status_code ret_code; | ||
302 | |||
303 | ret_code = i40iw_allocate_dma_mem(cqp->dev->hw, | ||
304 | &cqp->sdbuf, | ||
305 | 128, | ||
306 | I40IW_SD_BUF_ALIGNMENT); | ||
307 | |||
308 | if (ret_code) | ||
309 | goto exit; | ||
310 | |||
311 | temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) | | ||
312 | LS_64(cqp->struct_ver, I40IW_CQPHC_SVER); | ||
313 | |||
314 | if (disable_pfpdus) | ||
315 | temp |= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS); | ||
316 | |||
317 | set_64bit_val(cqp->host_ctx, 0, temp); | ||
318 | set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa); | ||
319 | temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) | | ||
320 | LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE); | ||
321 | set_64bit_val(cqp->host_ctx, 16, temp); | ||
322 | set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp); | ||
323 | set_64bit_val(cqp->host_ctx, 32, 0); | ||
324 | set_64bit_val(cqp->host_ctx, 40, 0); | ||
325 | set_64bit_val(cqp->host_ctx, 48, 0); | ||
326 | set_64bit_val(cqp->host_ctx, 56, 0); | ||
327 | |||
328 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX", | ||
329 | cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8); | ||
330 | |||
331 | p1 = RS_32_1(cqp->host_ctx_pa, 32); | ||
332 | p2 = (u32)cqp->host_ctx_pa; | ||
333 | |||
334 | if (cqp->dev->is_pf) { | ||
335 | i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1); | ||
336 | i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2); | ||
337 | } else { | ||
338 | i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1); | ||
339 | i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2); | ||
340 | } | ||
341 | do { | ||
342 | if (cnt++ > I40IW_DONE_COUNT) { | ||
343 | i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf); | ||
344 | ret_code = I40IW_ERR_TIMEOUT; | ||
345 | /* | ||
346 | * read PFPE_CQPERRORCODES register to get the minor | ||
347 | * and major error code | ||
348 | */ | ||
349 | if (cqp->dev->is_pf) | ||
350 | err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES); | ||
351 | else | ||
352 | err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1); | ||
353 | *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE); | ||
354 | *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE); | ||
355 | goto exit; | ||
356 | } | ||
357 | udelay(I40IW_SLEEP_COUNT); | ||
358 | if (cqp->dev->is_pf) | ||
359 | val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS); | ||
360 | else | ||
361 | val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1); | ||
362 | } while (!val); | ||
363 | |||
364 | exit: | ||
365 | if (!ret_code) | ||
366 | cqp->process_cqp_sds = i40iw_update_sds_noccq; | ||
367 | return ret_code; | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * i40iw_sc_cqp_post_sq - post of cqp's sq | ||
372 | * @cqp: struct for cqp hw | ||
373 | */ | ||
374 | void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp) | ||
375 | { | ||
376 | if (cqp->dev->is_pf) | ||
377 | i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring)); | ||
378 | else | ||
379 | i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring)); | ||
380 | |||
381 | i40iw_debug(cqp->dev, | ||
382 | I40IW_DEBUG_WQE, | ||
383 | "%s: HEAD_TAIL[%04d,%04d,%04d]\n", | ||
384 | __func__, | ||
385 | cqp->sq_ring.head, | ||
386 | cqp->sq_ring.tail, | ||
387 | cqp->sq_ring.size); | ||
388 | } | ||
389 | |||
390 | /** | ||
391 | * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq | ||
392 | * @cqp: struct for cqp hw | ||
393 | * @wqe_idx: we index of cqp ring | ||
394 | */ | ||
395 | u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) | ||
396 | { | ||
397 | u64 *wqe = NULL; | ||
398 | u32 wqe_idx; | ||
399 | enum i40iw_status_code ret_code; | ||
400 | |||
401 | if (I40IW_RING_FULL_ERR(cqp->sq_ring)) { | ||
402 | i40iw_debug(cqp->dev, | ||
403 | I40IW_DEBUG_WQE, | ||
404 | "%s: ring is full head %x tail %x size %x\n", | ||
405 | __func__, | ||
406 | cqp->sq_ring.head, | ||
407 | cqp->sq_ring.tail, | ||
408 | cqp->sq_ring.size); | ||
409 | return NULL; | ||
410 | } | ||
411 | I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code); | ||
412 | if (ret_code) | ||
413 | return NULL; | ||
414 | if (!wqe_idx) | ||
415 | cqp->polarity = !cqp->polarity; | ||
416 | |||
417 | wqe = cqp->sq_base[wqe_idx].elem; | ||
418 | cqp->scratch_array[wqe_idx] = scratch; | ||
419 | I40IW_CQP_INIT_WQE(wqe); | ||
420 | |||
421 | return wqe; | ||
422 | } | ||
423 | |||
424 | /** | ||
425 | * i40iw_sc_cqp_destroy - destroy cqp during close | ||
426 | * @cqp: struct for cqp hw | ||
427 | */ | ||
428 | static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp) | ||
429 | { | ||
430 | u32 cnt = 0, val = 1; | ||
431 | enum i40iw_status_code ret_code = 0; | ||
432 | u32 cqpstat_addr; | ||
433 | |||
434 | if (cqp->dev->is_pf) { | ||
435 | i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0); | ||
436 | i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0); | ||
437 | cqpstat_addr = I40E_PFPE_CCQPSTATUS; | ||
438 | } else { | ||
439 | i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0); | ||
440 | i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0); | ||
441 | cqpstat_addr = I40E_VFPE_CCQPSTATUS1; | ||
442 | } | ||
443 | do { | ||
444 | if (cnt++ > I40IW_DONE_COUNT) { | ||
445 | ret_code = I40IW_ERR_TIMEOUT; | ||
446 | break; | ||
447 | } | ||
448 | udelay(I40IW_SLEEP_COUNT); | ||
449 | val = i40iw_rd32(cqp->dev->hw, cqpstat_addr); | ||
450 | } while (val); | ||
451 | |||
452 | i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf); | ||
453 | return ret_code; | ||
454 | } | ||
455 | |||
456 | /** | ||
457 | * i40iw_sc_ccq_arm - enable intr for control cq | ||
458 | * @ccq: ccq sc struct | ||
459 | */ | ||
460 | static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq) | ||
461 | { | ||
462 | u64 temp_val; | ||
463 | u16 sw_cq_sel; | ||
464 | u8 arm_next_se; | ||
465 | u8 arm_seq_num; | ||
466 | |||
467 | /* write to cq doorbell shadow area */ | ||
468 | /* arm next se should always be zero */ | ||
469 | get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val); | ||
470 | |||
471 | sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT); | ||
472 | arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE); | ||
473 | |||
474 | arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM); | ||
475 | arm_seq_num++; | ||
476 | |||
477 | temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) | | ||
478 | LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) | | ||
479 | LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) | | ||
480 | LS_64(1, I40IW_CQ_DBSA_ARM_NEXT); | ||
481 | |||
482 | set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val); | ||
483 | |||
484 | wmb(); /* make sure shadow area is updated before arming */ | ||
485 | |||
486 | if (ccq->dev->is_pf) | ||
487 | i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id); | ||
488 | else | ||
489 | i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id); | ||
490 | } | ||
491 | |||
492 | /** | ||
493 | * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry | ||
494 | * @ccq: ccq sc struct | ||
495 | * @info: completion q entry to return | ||
496 | */ | ||
497 | static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info( | ||
498 | struct i40iw_sc_cq *ccq, | ||
499 | struct i40iw_ccq_cqe_info *info) | ||
500 | { | ||
501 | u64 qp_ctx, temp, temp1; | ||
502 | u64 *cqe; | ||
503 | struct i40iw_sc_cqp *cqp; | ||
504 | u32 wqe_idx; | ||
505 | u8 polarity; | ||
506 | enum i40iw_status_code ret_code = 0; | ||
507 | |||
508 | if (ccq->cq_uk.avoid_mem_cflct) | ||
509 | cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk); | ||
510 | else | ||
511 | cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk); | ||
512 | |||
513 | get_64bit_val(cqe, 24, &temp); | ||
514 | polarity = (u8)RS_64(temp, I40IW_CQ_VALID); | ||
515 | if (polarity != ccq->cq_uk.polarity) | ||
516 | return I40IW_ERR_QUEUE_EMPTY; | ||
517 | |||
518 | get_64bit_val(cqe, 8, &qp_ctx); | ||
519 | cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx; | ||
520 | info->error = (bool)RS_64(temp, I40IW_CQ_ERROR); | ||
521 | info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR); | ||
522 | if (info->error) { | ||
523 | info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR); | ||
524 | info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR); | ||
525 | } | ||
526 | wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX); | ||
527 | info->scratch = cqp->scratch_array[wqe_idx]; | ||
528 | |||
529 | get_64bit_val(cqe, 16, &temp1); | ||
530 | info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL); | ||
531 | get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1); | ||
532 | info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE); | ||
533 | info->cqp = cqp; | ||
534 | |||
535 | /* move the head for cq */ | ||
536 | I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code); | ||
537 | if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0) | ||
538 | ccq->cq_uk.polarity ^= 1; | ||
539 | |||
540 | /* update cq tail in cq shadow memory also */ | ||
541 | I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring); | ||
542 | set_64bit_val(ccq->cq_uk.shadow_area, | ||
543 | 0, | ||
544 | I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring)); | ||
545 | wmb(); /* write shadow area before tail */ | ||
546 | I40IW_RING_MOVE_TAIL(cqp->sq_ring); | ||
547 | return ret_code; | ||
548 | } | ||
549 | |||
550 | /** | ||
551 | * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ | ||
552 | * @cqp: struct for cqp hw | ||
553 | * @op_code: cqp opcode for completion | ||
554 | * @info: completion q entry to return | ||
555 | */ | ||
556 | static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done( | ||
557 | struct i40iw_sc_cqp *cqp, | ||
558 | u8 op_code, | ||
559 | struct i40iw_ccq_cqe_info *compl_info) | ||
560 | { | ||
561 | struct i40iw_ccq_cqe_info info; | ||
562 | struct i40iw_sc_cq *ccq; | ||
563 | enum i40iw_status_code ret_code = 0; | ||
564 | u32 cnt = 0; | ||
565 | |||
566 | memset(&info, 0, sizeof(info)); | ||
567 | ccq = cqp->dev->ccq; | ||
568 | while (1) { | ||
569 | if (cnt++ > I40IW_DONE_COUNT) | ||
570 | return I40IW_ERR_TIMEOUT; | ||
571 | |||
572 | if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) { | ||
573 | udelay(I40IW_SLEEP_COUNT); | ||
574 | continue; | ||
575 | } | ||
576 | |||
577 | if (info.error) { | ||
578 | ret_code = I40IW_ERR_CQP_COMPL_ERROR; | ||
579 | break; | ||
580 | } | ||
581 | /* check if opcode is cq create */ | ||
582 | if (op_code != info.op_code) { | ||
583 | i40iw_debug(cqp->dev, I40IW_DEBUG_WQE, | ||
584 | "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n", | ||
585 | __func__, op_code, info.op_code); | ||
586 | } | ||
587 | /* success, exit out of the loop */ | ||
588 | if (op_code == info.op_code) | ||
589 | break; | ||
590 | } | ||
591 | |||
592 | if (compl_info) | ||
593 | memcpy(compl_info, &info, sizeof(*compl_info)); | ||
594 | |||
595 | return ret_code; | ||
596 | } | ||
597 | |||
598 | /** | ||
599 | * i40iw_sc_manage_push_page - Handle push page | ||
600 | * @cqp: struct for cqp hw | ||
601 | * @info: push page info | ||
602 | * @scratch: u64 saved to be used during cqp completion | ||
603 | * @post_sq: flag for cqp db to ring | ||
604 | */ | ||
605 | static enum i40iw_status_code i40iw_sc_manage_push_page( | ||
606 | struct i40iw_sc_cqp *cqp, | ||
607 | struct i40iw_cqp_manage_push_page_info *info, | ||
608 | u64 scratch, | ||
609 | bool post_sq) | ||
610 | { | ||
611 | u64 *wqe; | ||
612 | u64 header; | ||
613 | |||
614 | if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT) | ||
615 | return I40IW_ERR_INVALID_PUSH_PAGE_INDEX; | ||
616 | |||
617 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
618 | if (!wqe) | ||
619 | return I40IW_ERR_RING_FULL; | ||
620 | |||
621 | set_64bit_val(wqe, 16, info->qs_handle); | ||
622 | |||
623 | header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) | | ||
624 | LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) | | ||
625 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | | ||
626 | LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE); | ||
627 | |||
628 | i40iw_insert_wqe_hdr(wqe, header); | ||
629 | |||
630 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE", | ||
631 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
632 | |||
633 | if (post_sq) | ||
634 | i40iw_sc_cqp_post_sq(cqp); | ||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | /** | ||
639 | * i40iw_sc_manage_hmc_pm_func_table - manage of function table | ||
640 | * @cqp: struct for cqp hw | ||
641 | * @scratch: u64 saved to be used during cqp completion | ||
642 | * @vf_index: vf index for cqp | ||
643 | * @free_pm_fcn: function number | ||
644 | * @post_sq: flag for cqp db to ring | ||
645 | */ | ||
646 | static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table( | ||
647 | struct i40iw_sc_cqp *cqp, | ||
648 | u64 scratch, | ||
649 | u8 vf_index, | ||
650 | bool free_pm_fcn, | ||
651 | bool post_sq) | ||
652 | { | ||
653 | u64 *wqe; | ||
654 | u64 header; | ||
655 | |||
656 | if (vf_index >= I40IW_MAX_VF_PER_PF) | ||
657 | return I40IW_ERR_INVALID_VF_ID; | ||
658 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
659 | if (!wqe) | ||
660 | return I40IW_ERR_RING_FULL; | ||
661 | |||
662 | header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) | | ||
663 | LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) | | ||
664 | LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) | | ||
665 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
666 | |||
667 | i40iw_insert_wqe_hdr(wqe, header); | ||
668 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE", | ||
669 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
670 | if (post_sq) | ||
671 | i40iw_sc_cqp_post_sq(cqp); | ||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | /** | ||
676 | * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile | ||
677 | * @cqp: struct for cqp hw | ||
678 | * @scratch: u64 saved to be used during cqp completion | ||
679 | * @hmc_profile_type: type of profile to set | ||
680 | * @vf_num: vf number for profile | ||
681 | * @post_sq: flag for cqp db to ring | ||
682 | * @poll_registers: flag to poll register for cqp completion | ||
683 | */ | ||
684 | static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile( | ||
685 | struct i40iw_sc_cqp *cqp, | ||
686 | u64 scratch, | ||
687 | u8 hmc_profile_type, | ||
688 | u8 vf_num, bool post_sq, | ||
689 | bool poll_registers) | ||
690 | { | ||
691 | u64 *wqe; | ||
692 | u64 header; | ||
693 | u32 val, tail, error; | ||
694 | enum i40iw_status_code ret_code = 0; | ||
695 | |||
696 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
697 | if (!wqe) | ||
698 | return I40IW_ERR_RING_FULL; | ||
699 | |||
700 | set_64bit_val(wqe, 16, | ||
701 | (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) | | ||
702 | LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM))); | ||
703 | |||
704 | header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) | | ||
705 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
706 | |||
707 | i40iw_insert_wqe_hdr(wqe, header); | ||
708 | |||
709 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE", | ||
710 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
711 | |||
712 | i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); | ||
713 | if (error) | ||
714 | return I40IW_ERR_CQP_COMPL_ERROR; | ||
715 | |||
716 | if (post_sq) { | ||
717 | i40iw_sc_cqp_post_sq(cqp); | ||
718 | if (poll_registers) | ||
719 | ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000); | ||
720 | else | ||
721 | ret_code = i40iw_sc_poll_for_cqp_op_done(cqp, | ||
722 | I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, | ||
723 | NULL); | ||
724 | } | ||
725 | |||
726 | return ret_code; | ||
727 | } | ||
728 | |||
729 | /** | ||
730 | * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table | ||
731 | * @cqp: struct for cqp hw | ||
732 | */ | ||
733 | static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp) | ||
734 | { | ||
735 | return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL); | ||
736 | } | ||
737 | |||
738 | /** | ||
739 | * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit | ||
740 | * @cqp: struct for cqp hw | ||
741 | */ | ||
742 | static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp) | ||
743 | { | ||
744 | return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL); | ||
745 | } | ||
746 | |||
747 | /** | ||
748 | * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values | ||
749 | * @cqp: struct for cqp hw | ||
750 | * @scratch: u64 saved to be used during cqp completion | ||
751 | * @hmc_fn_id: hmc function id | ||
752 | * @commit_fpm_mem; Memory for fpm values | ||
753 | * @post_sq: flag for cqp db to ring | ||
754 | * @wait_type: poll ccq or cqp registers for cqp completion | ||
755 | */ | ||
756 | static enum i40iw_status_code i40iw_sc_commit_fpm_values( | ||
757 | struct i40iw_sc_cqp *cqp, | ||
758 | u64 scratch, | ||
759 | u8 hmc_fn_id, | ||
760 | struct i40iw_dma_mem *commit_fpm_mem, | ||
761 | bool post_sq, | ||
762 | u8 wait_type) | ||
763 | { | ||
764 | u64 *wqe; | ||
765 | u64 header; | ||
766 | u32 tail, val, error; | ||
767 | enum i40iw_status_code ret_code = 0; | ||
768 | |||
769 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
770 | if (!wqe) | ||
771 | return I40IW_ERR_RING_FULL; | ||
772 | |||
773 | set_64bit_val(wqe, 16, hmc_fn_id); | ||
774 | set_64bit_val(wqe, 32, commit_fpm_mem->pa); | ||
775 | |||
776 | header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) | | ||
777 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
778 | |||
779 | i40iw_insert_wqe_hdr(wqe, header); | ||
780 | |||
781 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE", | ||
782 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
783 | |||
784 | i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); | ||
785 | if (error) | ||
786 | return I40IW_ERR_CQP_COMPL_ERROR; | ||
787 | |||
788 | if (post_sq) { | ||
789 | i40iw_sc_cqp_post_sq(cqp); | ||
790 | |||
791 | if (wait_type == I40IW_CQP_WAIT_POLL_REGS) | ||
792 | ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT); | ||
793 | else if (wait_type == I40IW_CQP_WAIT_POLL_CQ) | ||
794 | ret_code = i40iw_sc_commit_fpm_values_done(cqp); | ||
795 | } | ||
796 | |||
797 | return ret_code; | ||
798 | } | ||
799 | |||
800 | /** | ||
801 | * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm | ||
802 | * @cqp: struct for cqp hw | ||
803 | */ | ||
804 | static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp) | ||
805 | { | ||
806 | return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL); | ||
807 | } | ||
808 | |||
809 | /** | ||
810 | * i40iw_sc_query_fpm_values - cqp wqe query fpm values | ||
811 | * @cqp: struct for cqp hw | ||
812 | * @scratch: u64 saved to be used during cqp completion | ||
813 | * @hmc_fn_id: hmc function id | ||
814 | * @query_fpm_mem: memory for return fpm values | ||
815 | * @post_sq: flag for cqp db to ring | ||
816 | * @wait_type: poll ccq or cqp registers for cqp completion | ||
817 | */ | ||
818 | static enum i40iw_status_code i40iw_sc_query_fpm_values( | ||
819 | struct i40iw_sc_cqp *cqp, | ||
820 | u64 scratch, | ||
821 | u8 hmc_fn_id, | ||
822 | struct i40iw_dma_mem *query_fpm_mem, | ||
823 | bool post_sq, | ||
824 | u8 wait_type) | ||
825 | { | ||
826 | u64 *wqe; | ||
827 | u64 header; | ||
828 | u32 tail, val, error; | ||
829 | enum i40iw_status_code ret_code = 0; | ||
830 | |||
831 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
832 | if (!wqe) | ||
833 | return I40IW_ERR_RING_FULL; | ||
834 | |||
835 | set_64bit_val(wqe, 16, hmc_fn_id); | ||
836 | set_64bit_val(wqe, 32, query_fpm_mem->pa); | ||
837 | |||
838 | header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) | | ||
839 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
840 | |||
841 | i40iw_insert_wqe_hdr(wqe, header); | ||
842 | |||
843 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE", | ||
844 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
845 | |||
846 | /* read the tail from CQP_TAIL register */ | ||
847 | i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); | ||
848 | |||
849 | if (error) | ||
850 | return I40IW_ERR_CQP_COMPL_ERROR; | ||
851 | |||
852 | if (post_sq) { | ||
853 | i40iw_sc_cqp_post_sq(cqp); | ||
854 | if (wait_type == I40IW_CQP_WAIT_POLL_REGS) | ||
855 | ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT); | ||
856 | else if (wait_type == I40IW_CQP_WAIT_POLL_CQ) | ||
857 | ret_code = i40iw_sc_query_fpm_values_done(cqp); | ||
858 | } | ||
859 | |||
860 | return ret_code; | ||
861 | } | ||
862 | |||
863 | /** | ||
864 | * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry | ||
865 | * @cqp: struct for cqp hw | ||
866 | * @info: arp entry information | ||
867 | * @scratch: u64 saved to be used during cqp completion | ||
868 | * @post_sq: flag for cqp db to ring | ||
869 | */ | ||
870 | static enum i40iw_status_code i40iw_sc_add_arp_cache_entry( | ||
871 | struct i40iw_sc_cqp *cqp, | ||
872 | struct i40iw_add_arp_cache_entry_info *info, | ||
873 | u64 scratch, | ||
874 | bool post_sq) | ||
875 | { | ||
876 | u64 *wqe; | ||
877 | u64 temp, header; | ||
878 | |||
879 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
880 | if (!wqe) | ||
881 | return I40IW_ERR_RING_FULL; | ||
882 | set_64bit_val(wqe, 8, info->reach_max); | ||
883 | |||
884 | temp = info->mac_addr[5] | | ||
885 | LS_64_1(info->mac_addr[4], 8) | | ||
886 | LS_64_1(info->mac_addr[3], 16) | | ||
887 | LS_64_1(info->mac_addr[2], 24) | | ||
888 | LS_64_1(info->mac_addr[1], 32) | | ||
889 | LS_64_1(info->mac_addr[0], 40); | ||
890 | |||
891 | set_64bit_val(wqe, 16, temp); | ||
892 | |||
893 | header = info->arp_index | | ||
894 | LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) | | ||
895 | LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) | | ||
896 | LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) | | ||
897 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
898 | |||
899 | i40iw_insert_wqe_hdr(wqe, header); | ||
900 | |||
901 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE", | ||
902 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
903 | |||
904 | if (post_sq) | ||
905 | i40iw_sc_cqp_post_sq(cqp); | ||
906 | return 0; | ||
907 | } | ||
908 | |||
909 | /** | ||
910 | * i40iw_sc_del_arp_cache_entry - dele arp cache entry | ||
911 | * @cqp: struct for cqp hw | ||
912 | * @scratch: u64 saved to be used during cqp completion | ||
913 | * @arp_index: arp index to delete arp entry | ||
914 | * @post_sq: flag for cqp db to ring | ||
915 | */ | ||
916 | static enum i40iw_status_code i40iw_sc_del_arp_cache_entry( | ||
917 | struct i40iw_sc_cqp *cqp, | ||
918 | u64 scratch, | ||
919 | u16 arp_index, | ||
920 | bool post_sq) | ||
921 | { | ||
922 | u64 *wqe; | ||
923 | u64 header; | ||
924 | |||
925 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
926 | if (!wqe) | ||
927 | return I40IW_ERR_RING_FULL; | ||
928 | |||
929 | header = arp_index | | ||
930 | LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) | | ||
931 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
932 | i40iw_insert_wqe_hdr(wqe, header); | ||
933 | |||
934 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE", | ||
935 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
936 | |||
937 | if (post_sq) | ||
938 | i40iw_sc_cqp_post_sq(cqp); | ||
939 | return 0; | ||
940 | } | ||
941 | |||
942 | /** | ||
943 | * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index | ||
944 | * @cqp: struct for cqp hw | ||
945 | * @scratch: u64 saved to be used during cqp completion | ||
946 | * @arp_index: arp index to delete arp entry | ||
947 | * @post_sq: flag for cqp db to ring | ||
948 | */ | ||
949 | static enum i40iw_status_code i40iw_sc_query_arp_cache_entry( | ||
950 | struct i40iw_sc_cqp *cqp, | ||
951 | u64 scratch, | ||
952 | u16 arp_index, | ||
953 | bool post_sq) | ||
954 | { | ||
955 | u64 *wqe; | ||
956 | u64 header; | ||
957 | |||
958 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
959 | if (!wqe) | ||
960 | return I40IW_ERR_RING_FULL; | ||
961 | |||
962 | header = arp_index | | ||
963 | LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) | | ||
964 | LS_64(1, I40IW_CQPSQ_MAT_QUERY) | | ||
965 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
966 | |||
967 | i40iw_insert_wqe_hdr(wqe, header); | ||
968 | |||
969 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE", | ||
970 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
971 | |||
972 | if (post_sq) | ||
973 | i40iw_sc_cqp_post_sq(cqp); | ||
974 | return 0; | ||
975 | } | ||
976 | |||
977 | /** | ||
978 | * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries | ||
979 | * @cqp: struct for cqp hw | ||
980 | * @info: info for apbvt entry to add or delete | ||
981 | * @scratch: u64 saved to be used during cqp completion | ||
982 | * @post_sq: flag for cqp db to ring | ||
983 | */ | ||
984 | static enum i40iw_status_code i40iw_sc_manage_apbvt_entry( | ||
985 | struct i40iw_sc_cqp *cqp, | ||
986 | struct i40iw_apbvt_info *info, | ||
987 | u64 scratch, | ||
988 | bool post_sq) | ||
989 | { | ||
990 | u64 *wqe; | ||
991 | u64 header; | ||
992 | |||
993 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
994 | if (!wqe) | ||
995 | return I40IW_ERR_RING_FULL; | ||
996 | |||
997 | set_64bit_val(wqe, 16, info->port); | ||
998 | |||
999 | header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) | | ||
1000 | LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) | | ||
1001 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
1002 | |||
1003 | i40iw_insert_wqe_hdr(wqe, header); | ||
1004 | |||
1005 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE", | ||
1006 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1007 | |||
1008 | if (post_sq) | ||
1009 | i40iw_sc_cqp_post_sq(cqp); | ||
1010 | return 0; | ||
1011 | } | ||
1012 | |||
1013 | /** | ||
1014 | * i40iw_sc_manage_qhash_table_entry - manage quad hash entries | ||
1015 | * @cqp: struct for cqp hw | ||
1016 | * @info: info for quad hash to manage | ||
1017 | * @scratch: u64 saved to be used during cqp completion | ||
1018 | * @post_sq: flag for cqp db to ring | ||
1019 | * | ||
1020 | * This is called before connection establishment is started. For passive connections, when | ||
1021 | * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local | ||
1022 | * ip address and tcp port. When SYN is received (passive connections) or | ||
1023 | * sent (active connections), this routine is called with entry type of | ||
1024 | * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info. | ||
1025 | * | ||
1026 | * When iwarp connection is done and its state moves to RTS, the quad hash entry in | ||
1027 | * the hardware will point to iwarp's qp number and requires no calls from the driver. | ||
1028 | */ | ||
1029 | static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry( | ||
1030 | struct i40iw_sc_cqp *cqp, | ||
1031 | struct i40iw_qhash_table_info *info, | ||
1032 | u64 scratch, | ||
1033 | bool post_sq) | ||
1034 | { | ||
1035 | u64 *wqe; | ||
1036 | u64 qw1 = 0; | ||
1037 | u64 qw2 = 0; | ||
1038 | u64 temp; | ||
1039 | |||
1040 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1041 | if (!wqe) | ||
1042 | return I40IW_ERR_RING_FULL; | ||
1043 | |||
1044 | temp = info->mac_addr[5] | | ||
1045 | LS_64_1(info->mac_addr[4], 8) | | ||
1046 | LS_64_1(info->mac_addr[3], 16) | | ||
1047 | LS_64_1(info->mac_addr[2], 24) | | ||
1048 | LS_64_1(info->mac_addr[1], 32) | | ||
1049 | LS_64_1(info->mac_addr[0], 40); | ||
1050 | |||
1051 | set_64bit_val(wqe, 0, temp); | ||
1052 | |||
1053 | qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) | | ||
1054 | LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT); | ||
1055 | if (info->ipv4_valid) { | ||
1056 | set_64bit_val(wqe, | ||
1057 | 48, | ||
1058 | LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3)); | ||
1059 | } else { | ||
1060 | set_64bit_val(wqe, | ||
1061 | 56, | ||
1062 | LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) | | ||
1063 | LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1)); | ||
1064 | |||
1065 | set_64bit_val(wqe, | ||
1066 | 48, | ||
1067 | LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) | | ||
1068 | LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3)); | ||
1069 | } | ||
1070 | qw2 = LS_64(cqp->dev->qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE); | ||
1071 | if (info->vlan_valid) | ||
1072 | qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID); | ||
1073 | set_64bit_val(wqe, 16, qw2); | ||
1074 | if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) { | ||
1075 | qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT); | ||
1076 | if (!info->ipv4_valid) { | ||
1077 | set_64bit_val(wqe, | ||
1078 | 40, | ||
1079 | LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) | | ||
1080 | LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1)); | ||
1081 | set_64bit_val(wqe, | ||
1082 | 32, | ||
1083 | LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) | | ||
1084 | LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3)); | ||
1085 | } else { | ||
1086 | set_64bit_val(wqe, | ||
1087 | 32, | ||
1088 | LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3)); | ||
1089 | } | ||
1090 | } | ||
1091 | |||
1092 | set_64bit_val(wqe, 8, qw1); | ||
1093 | temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) | | ||
1094 | LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) | | ||
1095 | LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) | | ||
1096 | LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) | | ||
1097 | LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) | | ||
1098 | LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE); | ||
1099 | |||
1100 | i40iw_insert_wqe_hdr(wqe, temp); | ||
1101 | |||
1102 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE", | ||
1103 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1104 | |||
1105 | if (post_sq) | ||
1106 | i40iw_sc_cqp_post_sq(cqp); | ||
1107 | return 0; | ||
1108 | } | ||
1109 | |||
1110 | /** | ||
1111 | * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry | ||
1112 | * @cqp: struct for cqp hw | ||
1113 | * @scratch: u64 saved to be used during cqp completion | ||
1114 | * @post_sq: flag for cqp db to ring | ||
1115 | */ | ||
1116 | static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry( | ||
1117 | struct i40iw_sc_cqp *cqp, | ||
1118 | u64 scratch, | ||
1119 | bool post_sq) | ||
1120 | { | ||
1121 | u64 *wqe; | ||
1122 | u64 header; | ||
1123 | |||
1124 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1125 | if (!wqe) | ||
1126 | return I40IW_ERR_RING_FULL; | ||
1127 | header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) | | ||
1128 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
1129 | |||
1130 | i40iw_insert_wqe_hdr(wqe, header); | ||
1131 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE", | ||
1132 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1133 | if (post_sq) | ||
1134 | i40iw_sc_cqp_post_sq(cqp); | ||
1135 | return 0; | ||
1136 | } | ||
1137 | |||
1138 | /** | ||
1139 | * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry | ||
1140 | * @cqp: struct for cqp hw | ||
1141 | * @info:mac addr info | ||
1142 | * @scratch: u64 saved to be used during cqp completion | ||
1143 | * @post_sq: flag for cqp db to ring | ||
1144 | */ | ||
1145 | static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry( | ||
1146 | struct i40iw_sc_cqp *cqp, | ||
1147 | struct i40iw_local_mac_ipaddr_entry_info *info, | ||
1148 | u64 scratch, | ||
1149 | bool post_sq) | ||
1150 | { | ||
1151 | u64 *wqe; | ||
1152 | u64 temp, header; | ||
1153 | |||
1154 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1155 | if (!wqe) | ||
1156 | return I40IW_ERR_RING_FULL; | ||
1157 | temp = info->mac_addr[5] | | ||
1158 | LS_64_1(info->mac_addr[4], 8) | | ||
1159 | LS_64_1(info->mac_addr[3], 16) | | ||
1160 | LS_64_1(info->mac_addr[2], 24) | | ||
1161 | LS_64_1(info->mac_addr[1], 32) | | ||
1162 | LS_64_1(info->mac_addr[0], 40); | ||
1163 | |||
1164 | set_64bit_val(wqe, 32, temp); | ||
1165 | |||
1166 | header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) | | ||
1167 | LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) | | ||
1168 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
1169 | |||
1170 | i40iw_insert_wqe_hdr(wqe, header); | ||
1171 | |||
1172 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE", | ||
1173 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1174 | |||
1175 | if (post_sq) | ||
1176 | i40iw_sc_cqp_post_sq(cqp); | ||
1177 | return 0; | ||
1178 | } | ||
1179 | |||
1180 | /** | ||
1181 | * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac | ||
1182 | * @cqp: struct for cqp hw | ||
1183 | * @scratch: u64 saved to be used during cqp completion | ||
1184 | * @entry_idx: index of mac entry | ||
1185 | * @ ignore_ref_count: to force mac adde delete | ||
1186 | * @post_sq: flag for cqp db to ring | ||
1187 | */ | ||
1188 | static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry( | ||
1189 | struct i40iw_sc_cqp *cqp, | ||
1190 | u64 scratch, | ||
1191 | u8 entry_idx, | ||
1192 | u8 ignore_ref_count, | ||
1193 | bool post_sq) | ||
1194 | { | ||
1195 | u64 *wqe; | ||
1196 | u64 header; | ||
1197 | |||
1198 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1199 | if (!wqe) | ||
1200 | return I40IW_ERR_RING_FULL; | ||
1201 | header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) | | ||
1202 | LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) | | ||
1203 | LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) | | ||
1204 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | | ||
1205 | LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT); | ||
1206 | |||
1207 | i40iw_insert_wqe_hdr(wqe, header); | ||
1208 | |||
1209 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE", | ||
1210 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1211 | |||
1212 | if (post_sq) | ||
1213 | i40iw_sc_cqp_post_sq(cqp); | ||
1214 | return 0; | ||
1215 | } | ||
1216 | |||
1217 | /** | ||
1218 | * i40iw_sc_cqp_nop - send a nop wqe | ||
1219 | * @cqp: struct for cqp hw | ||
1220 | * @scratch: u64 saved to be used during cqp completion | ||
1221 | * @post_sq: flag for cqp db to ring | ||
1222 | */ | ||
1223 | static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp, | ||
1224 | u64 scratch, | ||
1225 | bool post_sq) | ||
1226 | { | ||
1227 | u64 *wqe; | ||
1228 | u64 header; | ||
1229 | |||
1230 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1231 | if (!wqe) | ||
1232 | return I40IW_ERR_RING_FULL; | ||
1233 | header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) | | ||
1234 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
1235 | i40iw_insert_wqe_hdr(wqe, header); | ||
1236 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE", | ||
1237 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1238 | |||
1239 | if (post_sq) | ||
1240 | i40iw_sc_cqp_post_sq(cqp); | ||
1241 | return 0; | ||
1242 | } | ||
1243 | |||
1244 | /** | ||
1245 | * i40iw_sc_ceq_init - initialize ceq | ||
1246 | * @ceq: ceq sc structure | ||
1247 | * @info: ceq initialization info | ||
1248 | */ | ||
1249 | static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq, | ||
1250 | struct i40iw_ceq_init_info *info) | ||
1251 | { | ||
1252 | u32 pble_obj_cnt; | ||
1253 | |||
1254 | if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) || | ||
1255 | (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES)) | ||
1256 | return I40IW_ERR_INVALID_SIZE; | ||
1257 | |||
1258 | if (info->ceq_id >= I40IW_MAX_CEQID) | ||
1259 | return I40IW_ERR_INVALID_CEQ_ID; | ||
1260 | |||
1261 | pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; | ||
1262 | |||
1263 | if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt)) | ||
1264 | return I40IW_ERR_INVALID_PBLE_INDEX; | ||
1265 | |||
1266 | ceq->size = sizeof(*ceq); | ||
1267 | ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base; | ||
1268 | ceq->ceq_id = info->ceq_id; | ||
1269 | ceq->dev = info->dev; | ||
1270 | ceq->elem_cnt = info->elem_cnt; | ||
1271 | ceq->ceq_elem_pa = info->ceqe_pa; | ||
1272 | ceq->virtual_map = info->virtual_map; | ||
1273 | |||
1274 | ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0); | ||
1275 | ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0); | ||
1276 | ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL); | ||
1277 | |||
1278 | ceq->tph_en = info->tph_en; | ||
1279 | ceq->tph_val = info->tph_val; | ||
1280 | ceq->polarity = 1; | ||
1281 | I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt); | ||
1282 | ceq->dev->ceq[info->ceq_id] = ceq; | ||
1283 | |||
1284 | return 0; | ||
1285 | } | ||
1286 | |||
1287 | /** | ||
1288 | * i40iw_sc_ceq_create - create ceq wqe | ||
1289 | * @ceq: ceq sc structure | ||
1290 | * @scratch: u64 saved to be used during cqp completion | ||
1291 | * @post_sq: flag for cqp db to ring | ||
1292 | */ | ||
1293 | static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq, | ||
1294 | u64 scratch, | ||
1295 | bool post_sq) | ||
1296 | { | ||
1297 | struct i40iw_sc_cqp *cqp; | ||
1298 | u64 *wqe; | ||
1299 | u64 header; | ||
1300 | |||
1301 | cqp = ceq->dev->cqp; | ||
1302 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1303 | if (!wqe) | ||
1304 | return I40IW_ERR_RING_FULL; | ||
1305 | set_64bit_val(wqe, 16, ceq->elem_cnt); | ||
1306 | set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa)); | ||
1307 | set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0)); | ||
1308 | set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL)); | ||
1309 | |||
1310 | header = ceq->ceq_id | | ||
1311 | LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) | | ||
1312 | LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) | | ||
1313 | LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) | | ||
1314 | LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) | | ||
1315 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
1316 | |||
1317 | i40iw_insert_wqe_hdr(wqe, header); | ||
1318 | |||
1319 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE", | ||
1320 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1321 | |||
1322 | if (post_sq) | ||
1323 | i40iw_sc_cqp_post_sq(cqp); | ||
1324 | return 0; | ||
1325 | } | ||
1326 | |||
1327 | /** | ||
1328 | * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete | ||
1329 | * @ceq: ceq sc structure | ||
1330 | */ | ||
1331 | static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq) | ||
1332 | { | ||
1333 | struct i40iw_sc_cqp *cqp; | ||
1334 | |||
1335 | cqp = ceq->dev->cqp; | ||
1336 | return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL); | ||
1337 | } | ||
1338 | |||
1339 | /** | ||
1340 | * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete | ||
1341 | * @ceq: ceq sc structure | ||
1342 | */ | ||
1343 | static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq) | ||
1344 | { | ||
1345 | struct i40iw_sc_cqp *cqp; | ||
1346 | |||
1347 | cqp = ceq->dev->cqp; | ||
1348 | cqp->process_cqp_sds = i40iw_update_sds_noccq; | ||
1349 | return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL); | ||
1350 | } | ||
1351 | |||
1352 | /** | ||
1353 | * i40iw_sc_cceq_create - create cceq | ||
1354 | * @ceq: ceq sc structure | ||
1355 | * @scratch: u64 saved to be used during cqp completion | ||
1356 | */ | ||
1357 | static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch) | ||
1358 | { | ||
1359 | enum i40iw_status_code ret_code; | ||
1360 | |||
1361 | ret_code = i40iw_sc_ceq_create(ceq, scratch, true); | ||
1362 | if (!ret_code) | ||
1363 | ret_code = i40iw_sc_cceq_create_done(ceq); | ||
1364 | return ret_code; | ||
1365 | } | ||
1366 | |||
1367 | /** | ||
1368 | * i40iw_sc_ceq_destroy - destroy ceq | ||
1369 | * @ceq: ceq sc structure | ||
1370 | * @scratch: u64 saved to be used during cqp completion | ||
1371 | * @post_sq: flag for cqp db to ring | ||
1372 | */ | ||
1373 | static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq, | ||
1374 | u64 scratch, | ||
1375 | bool post_sq) | ||
1376 | { | ||
1377 | struct i40iw_sc_cqp *cqp; | ||
1378 | u64 *wqe; | ||
1379 | u64 header; | ||
1380 | |||
1381 | cqp = ceq->dev->cqp; | ||
1382 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1383 | if (!wqe) | ||
1384 | return I40IW_ERR_RING_FULL; | ||
1385 | set_64bit_val(wqe, 16, ceq->elem_cnt); | ||
1386 | set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx); | ||
1387 | header = ceq->ceq_id | | ||
1388 | LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) | | ||
1389 | LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) | | ||
1390 | LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) | | ||
1391 | LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) | | ||
1392 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
1393 | i40iw_insert_wqe_hdr(wqe, header); | ||
1394 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE", | ||
1395 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1396 | |||
1397 | if (post_sq) | ||
1398 | i40iw_sc_cqp_post_sq(cqp); | ||
1399 | return 0; | ||
1400 | } | ||
1401 | |||
1402 | /** | ||
1403 | * i40iw_sc_process_ceq - process ceq | ||
1404 | * @dev: sc device struct | ||
1405 | * @ceq: ceq sc structure | ||
1406 | */ | ||
1407 | static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq) | ||
1408 | { | ||
1409 | u64 temp; | ||
1410 | u64 *ceqe; | ||
1411 | struct i40iw_sc_cq *cq = NULL; | ||
1412 | u8 polarity; | ||
1413 | |||
1414 | ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq); | ||
1415 | get_64bit_val(ceqe, 0, &temp); | ||
1416 | polarity = (u8)RS_64(temp, I40IW_CEQE_VALID); | ||
1417 | if (polarity != ceq->polarity) | ||
1418 | return cq; | ||
1419 | |||
1420 | cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1); | ||
1421 | |||
1422 | I40IW_RING_MOVE_TAIL(ceq->ceq_ring); | ||
1423 | if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0) | ||
1424 | ceq->polarity ^= 1; | ||
1425 | |||
1426 | if (dev->is_pf) | ||
1427 | i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id); | ||
1428 | else | ||
1429 | i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id); | ||
1430 | |||
1431 | return cq; | ||
1432 | } | ||
1433 | |||
1434 | /** | ||
1435 | * i40iw_sc_aeq_init - initialize aeq | ||
1436 | * @aeq: aeq structure ptr | ||
1437 | * @info: aeq initialization info | ||
1438 | */ | ||
1439 | static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq, | ||
1440 | struct i40iw_aeq_init_info *info) | ||
1441 | { | ||
1442 | u32 pble_obj_cnt; | ||
1443 | |||
1444 | if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) || | ||
1445 | (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES)) | ||
1446 | return I40IW_ERR_INVALID_SIZE; | ||
1447 | pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; | ||
1448 | |||
1449 | if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt)) | ||
1450 | return I40IW_ERR_INVALID_PBLE_INDEX; | ||
1451 | |||
1452 | aeq->size = sizeof(*aeq); | ||
1453 | aeq->polarity = 1; | ||
1454 | aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base; | ||
1455 | aeq->dev = info->dev; | ||
1456 | aeq->elem_cnt = info->elem_cnt; | ||
1457 | |||
1458 | aeq->aeq_elem_pa = info->aeq_elem_pa; | ||
1459 | I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt); | ||
1460 | info->dev->aeq = aeq; | ||
1461 | |||
1462 | aeq->virtual_map = info->virtual_map; | ||
1463 | aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL); | ||
1464 | aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0); | ||
1465 | aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0); | ||
1466 | info->dev->aeq = aeq; | ||
1467 | return 0; | ||
1468 | } | ||
1469 | |||
1470 | /** | ||
1471 | * i40iw_sc_aeq_create - create aeq | ||
1472 | * @aeq: aeq structure ptr | ||
1473 | * @scratch: u64 saved to be used during cqp completion | ||
1474 | * @post_sq: flag for cqp db to ring | ||
1475 | */ | ||
1476 | static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq, | ||
1477 | u64 scratch, | ||
1478 | bool post_sq) | ||
1479 | { | ||
1480 | u64 *wqe; | ||
1481 | struct i40iw_sc_cqp *cqp; | ||
1482 | u64 header; | ||
1483 | |||
1484 | cqp = aeq->dev->cqp; | ||
1485 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1486 | if (!wqe) | ||
1487 | return I40IW_ERR_RING_FULL; | ||
1488 | set_64bit_val(wqe, 16, aeq->elem_cnt); | ||
1489 | set_64bit_val(wqe, 32, | ||
1490 | (aeq->virtual_map ? 0 : aeq->aeq_elem_pa)); | ||
1491 | set_64bit_val(wqe, 48, | ||
1492 | (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0)); | ||
1493 | |||
1494 | header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) | | ||
1495 | LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) | | ||
1496 | LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) | | ||
1497 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
1498 | |||
1499 | i40iw_insert_wqe_hdr(wqe, header); | ||
1500 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE", | ||
1501 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1502 | if (post_sq) | ||
1503 | i40iw_sc_cqp_post_sq(cqp); | ||
1504 | return 0; | ||
1505 | } | ||
1506 | |||
1507 | /** | ||
1508 | * i40iw_sc_aeq_destroy - destroy aeq during close | ||
1509 | * @aeq: aeq structure ptr | ||
1510 | * @scratch: u64 saved to be used during cqp completion | ||
1511 | * @post_sq: flag for cqp db to ring | ||
1512 | */ | ||
1513 | static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq, | ||
1514 | u64 scratch, | ||
1515 | bool post_sq) | ||
1516 | { | ||
1517 | u64 *wqe; | ||
1518 | struct i40iw_sc_cqp *cqp; | ||
1519 | u64 header; | ||
1520 | |||
1521 | cqp = aeq->dev->cqp; | ||
1522 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1523 | if (!wqe) | ||
1524 | return I40IW_ERR_RING_FULL; | ||
1525 | set_64bit_val(wqe, 16, aeq->elem_cnt); | ||
1526 | set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx); | ||
1527 | header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) | | ||
1528 | LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) | | ||
1529 | LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) | | ||
1530 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
1531 | i40iw_insert_wqe_hdr(wqe, header); | ||
1532 | |||
1533 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE", | ||
1534 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1535 | if (post_sq) | ||
1536 | i40iw_sc_cqp_post_sq(cqp); | ||
1537 | return 0; | ||
1538 | } | ||
1539 | |||
1540 | /** | ||
1541 | * i40iw_sc_get_next_aeqe - get next aeq entry | ||
1542 | * @aeq: aeq structure ptr | ||
1543 | * @info: aeqe info to be returned | ||
1544 | */ | ||
1545 | static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq, | ||
1546 | struct i40iw_aeqe_info *info) | ||
1547 | { | ||
1548 | u64 temp, compl_ctx; | ||
1549 | u64 *aeqe; | ||
1550 | u16 wqe_idx; | ||
1551 | u8 ae_src; | ||
1552 | u8 polarity; | ||
1553 | |||
1554 | aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq); | ||
1555 | get_64bit_val(aeqe, 0, &compl_ctx); | ||
1556 | get_64bit_val(aeqe, 8, &temp); | ||
1557 | polarity = (u8)RS_64(temp, I40IW_AEQE_VALID); | ||
1558 | |||
1559 | if (aeq->polarity != polarity) | ||
1560 | return I40IW_ERR_QUEUE_EMPTY; | ||
1561 | |||
1562 | i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16); | ||
1563 | |||
1564 | ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC); | ||
1565 | wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX); | ||
1566 | info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID); | ||
1567 | info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE); | ||
1568 | info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE); | ||
1569 | info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE); | ||
1570 | info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA); | ||
1571 | info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW); | ||
1572 | switch (ae_src) { | ||
1573 | case I40IW_AE_SOURCE_RQ: | ||
1574 | case I40IW_AE_SOURCE_RQ_0011: | ||
1575 | info->qp = true; | ||
1576 | info->wqe_idx = wqe_idx; | ||
1577 | info->compl_ctx = compl_ctx; | ||
1578 | break; | ||
1579 | case I40IW_AE_SOURCE_CQ: | ||
1580 | case I40IW_AE_SOURCE_CQ_0110: | ||
1581 | case I40IW_AE_SOURCE_CQ_1010: | ||
1582 | case I40IW_AE_SOURCE_CQ_1110: | ||
1583 | info->cq = true; | ||
1584 | info->compl_ctx = LS_64_1(compl_ctx, 1); | ||
1585 | break; | ||
1586 | case I40IW_AE_SOURCE_SQ: | ||
1587 | case I40IW_AE_SOURCE_SQ_0111: | ||
1588 | info->qp = true; | ||
1589 | info->sq = true; | ||
1590 | info->wqe_idx = wqe_idx; | ||
1591 | info->compl_ctx = compl_ctx; | ||
1592 | break; | ||
1593 | case I40IW_AE_SOURCE_IN_RR_WR: | ||
1594 | case I40IW_AE_SOURCE_IN_RR_WR_1011: | ||
1595 | info->qp = true; | ||
1596 | info->compl_ctx = compl_ctx; | ||
1597 | info->in_rdrsp_wr = true; | ||
1598 | break; | ||
1599 | case I40IW_AE_SOURCE_OUT_RR: | ||
1600 | case I40IW_AE_SOURCE_OUT_RR_1111: | ||
1601 | info->qp = true; | ||
1602 | info->compl_ctx = compl_ctx; | ||
1603 | info->out_rdrsp = true; | ||
1604 | break; | ||
1605 | default: | ||
1606 | break; | ||
1607 | } | ||
1608 | I40IW_RING_MOVE_TAIL(aeq->aeq_ring); | ||
1609 | if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0) | ||
1610 | aeq->polarity ^= 1; | ||
1611 | return 0; | ||
1612 | } | ||
1613 | |||
1614 | /** | ||
1615 | * i40iw_sc_repost_aeq_entries - repost completed aeq entries | ||
1616 | * @dev: sc device struct | ||
1617 | * @count: allocate count | ||
1618 | */ | ||
1619 | static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev, | ||
1620 | u32 count) | ||
1621 | { | ||
1622 | if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT) | ||
1623 | return I40IW_ERR_INVALID_SIZE; | ||
1624 | |||
1625 | if (dev->is_pf) | ||
1626 | i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count); | ||
1627 | else | ||
1628 | i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count); | ||
1629 | |||
1630 | return 0; | ||
1631 | } | ||
1632 | |||
1633 | /** | ||
1634 | * i40iw_sc_aeq_create_done - create aeq | ||
1635 | * @aeq: aeq structure ptr | ||
1636 | */ | ||
1637 | static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq) | ||
1638 | { | ||
1639 | struct i40iw_sc_cqp *cqp; | ||
1640 | |||
1641 | cqp = aeq->dev->cqp; | ||
1642 | return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL); | ||
1643 | } | ||
1644 | |||
1645 | /** | ||
1646 | * i40iw_sc_aeq_destroy_done - destroy of aeq during close | ||
1647 | * @aeq: aeq structure ptr | ||
1648 | */ | ||
1649 | static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq) | ||
1650 | { | ||
1651 | struct i40iw_sc_cqp *cqp; | ||
1652 | |||
1653 | cqp = aeq->dev->cqp; | ||
1654 | return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL); | ||
1655 | } | ||
1656 | |||
1657 | /** | ||
1658 | * i40iw_sc_ccq_init - initialize control cq | ||
1659 | * @cq: sc's cq ctruct | ||
1660 | * @info: info for control cq initialization | ||
1661 | */ | ||
1662 | static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq, | ||
1663 | struct i40iw_ccq_init_info *info) | ||
1664 | { | ||
1665 | u32 pble_obj_cnt; | ||
1666 | |||
1667 | if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE) | ||
1668 | return I40IW_ERR_INVALID_SIZE; | ||
1669 | |||
1670 | if (info->ceq_id > I40IW_MAX_CEQID) | ||
1671 | return I40IW_ERR_INVALID_CEQ_ID; | ||
1672 | |||
1673 | pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; | ||
1674 | |||
1675 | if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt)) | ||
1676 | return I40IW_ERR_INVALID_PBLE_INDEX; | ||
1677 | |||
1678 | cq->cq_pa = info->cq_pa; | ||
1679 | cq->cq_uk.cq_base = info->cq_base; | ||
1680 | cq->shadow_area_pa = info->shadow_area_pa; | ||
1681 | cq->cq_uk.shadow_area = info->shadow_area; | ||
1682 | cq->shadow_read_threshold = info->shadow_read_threshold; | ||
1683 | cq->dev = info->dev; | ||
1684 | cq->ceq_id = info->ceq_id; | ||
1685 | cq->cq_uk.cq_size = info->num_elem; | ||
1686 | cq->cq_type = I40IW_CQ_TYPE_CQP; | ||
1687 | cq->ceqe_mask = info->ceqe_mask; | ||
1688 | I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem); | ||
1689 | |||
1690 | cq->cq_uk.cq_id = 0; /* control cq is id 0 always */ | ||
1691 | cq->ceq_id_valid = info->ceq_id_valid; | ||
1692 | cq->tph_en = info->tph_en; | ||
1693 | cq->tph_val = info->tph_val; | ||
1694 | cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct; | ||
1695 | |||
1696 | cq->pbl_list = info->pbl_list; | ||
1697 | cq->virtual_map = info->virtual_map; | ||
1698 | cq->pbl_chunk_size = info->pbl_chunk_size; | ||
1699 | cq->first_pm_pbl_idx = info->first_pm_pbl_idx; | ||
1700 | cq->cq_uk.polarity = true; | ||
1701 | |||
1702 | /* following are only for iw cqs so initialize them to zero */ | ||
1703 | cq->cq_uk.cqe_alloc_reg = NULL; | ||
1704 | info->dev->ccq = cq; | ||
1705 | return 0; | ||
1706 | } | ||
1707 | |||
1708 | /** | ||
1709 | * i40iw_sc_ccq_create_done - poll cqp for ccq create | ||
1710 | * @ccq: ccq sc struct | ||
1711 | */ | ||
1712 | static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq) | ||
1713 | { | ||
1714 | struct i40iw_sc_cqp *cqp; | ||
1715 | |||
1716 | cqp = ccq->dev->cqp; | ||
1717 | return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL); | ||
1718 | } | ||
1719 | |||
1720 | /** | ||
1721 | * i40iw_sc_ccq_create - create control cq | ||
1722 | * @ccq: ccq sc struct | ||
1723 | * @scratch: u64 saved to be used during cqp completion | ||
1724 | * @check_overflow: overlow flag for ccq | ||
1725 | * @post_sq: flag for cqp db to ring | ||
1726 | */ | ||
1727 | static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq, | ||
1728 | u64 scratch, | ||
1729 | bool check_overflow, | ||
1730 | bool post_sq) | ||
1731 | { | ||
1732 | u64 *wqe; | ||
1733 | struct i40iw_sc_cqp *cqp; | ||
1734 | u64 header; | ||
1735 | enum i40iw_status_code ret_code; | ||
1736 | |||
1737 | cqp = ccq->dev->cqp; | ||
1738 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1739 | if (!wqe) | ||
1740 | return I40IW_ERR_RING_FULL; | ||
1741 | set_64bit_val(wqe, 0, ccq->cq_uk.cq_size); | ||
1742 | set_64bit_val(wqe, 8, RS_64_1(ccq, 1)); | ||
1743 | set_64bit_val(wqe, 16, | ||
1744 | LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); | ||
1745 | set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa)); | ||
1746 | set_64bit_val(wqe, 40, ccq->shadow_area_pa); | ||
1747 | set_64bit_val(wqe, 48, | ||
1748 | (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0)); | ||
1749 | set_64bit_val(wqe, 56, | ||
1750 | LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL)); | ||
1751 | |||
1752 | header = ccq->cq_uk.cq_id | | ||
1753 | LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | | ||
1754 | LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) | | ||
1755 | LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | | ||
1756 | LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) | | ||
1757 | LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | | ||
1758 | LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | | ||
1759 | LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | | ||
1760 | LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) | | ||
1761 | LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | | ||
1762 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
1763 | |||
1764 | i40iw_insert_wqe_hdr(wqe, header); | ||
1765 | |||
1766 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE", | ||
1767 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1768 | |||
1769 | if (post_sq) { | ||
1770 | i40iw_sc_cqp_post_sq(cqp); | ||
1771 | ret_code = i40iw_sc_ccq_create_done(ccq); | ||
1772 | if (ret_code) | ||
1773 | return ret_code; | ||
1774 | } | ||
1775 | cqp->process_cqp_sds = i40iw_cqp_sds_cmd; | ||
1776 | |||
1777 | return 0; | ||
1778 | } | ||
1779 | |||
1780 | /** | ||
1781 | * i40iw_sc_ccq_destroy - destroy ccq during close | ||
1782 | * @ccq: ccq sc struct | ||
1783 | * @scratch: u64 saved to be used during cqp completion | ||
1784 | * @post_sq: flag for cqp db to ring | ||
1785 | */ | ||
1786 | static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq, | ||
1787 | u64 scratch, | ||
1788 | bool post_sq) | ||
1789 | { | ||
1790 | struct i40iw_sc_cqp *cqp; | ||
1791 | u64 *wqe; | ||
1792 | u64 header; | ||
1793 | enum i40iw_status_code ret_code = 0; | ||
1794 | u32 tail, val, error; | ||
1795 | |||
1796 | cqp = ccq->dev->cqp; | ||
1797 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1798 | if (!wqe) | ||
1799 | return I40IW_ERR_RING_FULL; | ||
1800 | set_64bit_val(wqe, 0, ccq->cq_uk.cq_size); | ||
1801 | set_64bit_val(wqe, 8, RS_64_1(ccq, 1)); | ||
1802 | set_64bit_val(wqe, 40, ccq->shadow_area_pa); | ||
1803 | |||
1804 | header = ccq->cq_uk.cq_id | | ||
1805 | LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | | ||
1806 | LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) | | ||
1807 | LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | | ||
1808 | LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | | ||
1809 | LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) | | ||
1810 | LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | | ||
1811 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
1812 | |||
1813 | i40iw_insert_wqe_hdr(wqe, header); | ||
1814 | |||
1815 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE", | ||
1816 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1817 | |||
1818 | i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); | ||
1819 | if (error) | ||
1820 | return I40IW_ERR_CQP_COMPL_ERROR; | ||
1821 | |||
1822 | if (post_sq) { | ||
1823 | i40iw_sc_cqp_post_sq(cqp); | ||
1824 | ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000); | ||
1825 | } | ||
1826 | |||
1827 | return ret_code; | ||
1828 | } | ||
1829 | |||
1830 | /** | ||
1831 | * i40iw_sc_cq_init - initialize completion q | ||
1832 | * @cq: cq struct | ||
1833 | * @info: cq initialization info | ||
1834 | */ | ||
1835 | static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq, | ||
1836 | struct i40iw_cq_init_info *info) | ||
1837 | { | ||
1838 | u32 __iomem *cqe_alloc_reg = NULL; | ||
1839 | enum i40iw_status_code ret_code; | ||
1840 | u32 pble_obj_cnt; | ||
1841 | u32 arm_offset; | ||
1842 | |||
1843 | pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; | ||
1844 | |||
1845 | if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt)) | ||
1846 | return I40IW_ERR_INVALID_PBLE_INDEX; | ||
1847 | |||
1848 | cq->cq_pa = info->cq_base_pa; | ||
1849 | cq->dev = info->dev; | ||
1850 | cq->ceq_id = info->ceq_id; | ||
1851 | arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1; | ||
1852 | if (i40iw_get_hw_addr(cq->dev)) | ||
1853 | cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) + | ||
1854 | arm_offset); | ||
1855 | info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg; | ||
1856 | ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info); | ||
1857 | if (ret_code) | ||
1858 | return ret_code; | ||
1859 | cq->virtual_map = info->virtual_map; | ||
1860 | cq->pbl_chunk_size = info->pbl_chunk_size; | ||
1861 | cq->ceqe_mask = info->ceqe_mask; | ||
1862 | cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP; | ||
1863 | |||
1864 | cq->shadow_area_pa = info->shadow_area_pa; | ||
1865 | cq->shadow_read_threshold = info->shadow_read_threshold; | ||
1866 | |||
1867 | cq->ceq_id_valid = info->ceq_id_valid; | ||
1868 | cq->tph_en = info->tph_en; | ||
1869 | cq->tph_val = info->tph_val; | ||
1870 | |||
1871 | cq->first_pm_pbl_idx = info->first_pm_pbl_idx; | ||
1872 | |||
1873 | return 0; | ||
1874 | } | ||
1875 | |||
1876 | /** | ||
1877 | * i40iw_sc_cq_create - create completion q | ||
1878 | * @cq: cq struct | ||
1879 | * @scratch: u64 saved to be used during cqp completion | ||
1880 | * @check_overflow: flag for overflow check | ||
1881 | * @post_sq: flag for cqp db to ring | ||
1882 | */ | ||
1883 | static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq, | ||
1884 | u64 scratch, | ||
1885 | bool check_overflow, | ||
1886 | bool post_sq) | ||
1887 | { | ||
1888 | u64 *wqe; | ||
1889 | struct i40iw_sc_cqp *cqp; | ||
1890 | u64 header; | ||
1891 | |||
1892 | if (cq->cq_uk.cq_id > I40IW_MAX_CQID) | ||
1893 | return I40IW_ERR_INVALID_CQ_ID; | ||
1894 | |||
1895 | if (cq->ceq_id > I40IW_MAX_CEQID) | ||
1896 | return I40IW_ERR_INVALID_CEQ_ID; | ||
1897 | |||
1898 | cqp = cq->dev->cqp; | ||
1899 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1900 | if (!wqe) | ||
1901 | return I40IW_ERR_RING_FULL; | ||
1902 | |||
1903 | set_64bit_val(wqe, 0, cq->cq_uk.cq_size); | ||
1904 | set_64bit_val(wqe, 8, RS_64_1(cq, 1)); | ||
1905 | set_64bit_val(wqe, | ||
1906 | 16, | ||
1907 | LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); | ||
1908 | |||
1909 | set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa)); | ||
1910 | |||
1911 | set_64bit_val(wqe, 40, cq->shadow_area_pa); | ||
1912 | set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); | ||
1913 | set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL)); | ||
1914 | |||
1915 | header = cq->cq_uk.cq_id | | ||
1916 | LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | | ||
1917 | LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) | | ||
1918 | LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | | ||
1919 | LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) | | ||
1920 | LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | | ||
1921 | LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | | ||
1922 | LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | | ||
1923 | LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) | | ||
1924 | LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | | ||
1925 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
1926 | |||
1927 | i40iw_insert_wqe_hdr(wqe, header); | ||
1928 | |||
1929 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE", | ||
1930 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1931 | |||
1932 | if (post_sq) | ||
1933 | i40iw_sc_cqp_post_sq(cqp); | ||
1934 | return 0; | ||
1935 | } | ||
1936 | |||
1937 | /** | ||
1938 | * i40iw_sc_cq_destroy - destroy completion q | ||
1939 | * @cq: cq struct | ||
1940 | * @scratch: u64 saved to be used during cqp completion | ||
1941 | * @post_sq: flag for cqp db to ring | ||
1942 | */ | ||
1943 | static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq, | ||
1944 | u64 scratch, | ||
1945 | bool post_sq) | ||
1946 | { | ||
1947 | struct i40iw_sc_cqp *cqp; | ||
1948 | u64 *wqe; | ||
1949 | u64 header; | ||
1950 | |||
1951 | cqp = cq->dev->cqp; | ||
1952 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
1953 | if (!wqe) | ||
1954 | return I40IW_ERR_RING_FULL; | ||
1955 | set_64bit_val(wqe, 0, cq->cq_uk.cq_size); | ||
1956 | set_64bit_val(wqe, 8, RS_64_1(cq, 1)); | ||
1957 | set_64bit_val(wqe, 40, cq->shadow_area_pa); | ||
1958 | set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); | ||
1959 | |||
1960 | header = cq->cq_uk.cq_id | | ||
1961 | LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | | ||
1962 | LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) | | ||
1963 | LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | | ||
1964 | LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | | ||
1965 | LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | | ||
1966 | LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | | ||
1967 | LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) | | ||
1968 | LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | | ||
1969 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
1970 | |||
1971 | i40iw_insert_wqe_hdr(wqe, header); | ||
1972 | |||
1973 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE", | ||
1974 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
1975 | |||
1976 | if (post_sq) | ||
1977 | i40iw_sc_cqp_post_sq(cqp); | ||
1978 | return 0; | ||
1979 | } | ||
1980 | |||
1981 | /** | ||
1982 | * i40iw_sc_cq_modify - modify a Completion Queue | ||
1983 | * @cq: cq struct | ||
1984 | * @info: modification info struct | ||
1985 | * @scratch: | ||
1986 | * @post_sq: flag to post to sq | ||
1987 | */ | ||
1988 | static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq, | ||
1989 | struct i40iw_modify_cq_info *info, | ||
1990 | u64 scratch, | ||
1991 | bool post_sq) | ||
1992 | { | ||
1993 | struct i40iw_sc_cqp *cqp; | ||
1994 | u64 *wqe; | ||
1995 | u64 header; | ||
1996 | u32 cq_size, ceq_id, first_pm_pbl_idx; | ||
1997 | u8 pbl_chunk_size; | ||
1998 | bool virtual_map, ceq_id_valid, check_overflow; | ||
1999 | u32 pble_obj_cnt; | ||
2000 | |||
2001 | if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID)) | ||
2002 | return I40IW_ERR_INVALID_CEQ_ID; | ||
2003 | |||
2004 | pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; | ||
2005 | |||
2006 | if (info->cq_resize && info->virtual_map && | ||
2007 | (info->first_pm_pbl_idx >= pble_obj_cnt)) | ||
2008 | return I40IW_ERR_INVALID_PBLE_INDEX; | ||
2009 | |||
2010 | cqp = cq->dev->cqp; | ||
2011 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
2012 | if (!wqe) | ||
2013 | return I40IW_ERR_RING_FULL; | ||
2014 | |||
2015 | cq->pbl_list = info->pbl_list; | ||
2016 | cq->cq_pa = info->cq_pa; | ||
2017 | cq->first_pm_pbl_idx = info->first_pm_pbl_idx; | ||
2018 | |||
2019 | cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size; | ||
2020 | if (info->ceq_change) { | ||
2021 | ceq_id_valid = true; | ||
2022 | ceq_id = info->ceq_id; | ||
2023 | } else { | ||
2024 | ceq_id_valid = cq->ceq_id_valid; | ||
2025 | ceq_id = ceq_id_valid ? cq->ceq_id : 0; | ||
2026 | } | ||
2027 | virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map; | ||
2028 | first_pm_pbl_idx = (info->cq_resize ? | ||
2029 | (info->virtual_map ? info->first_pm_pbl_idx : 0) : | ||
2030 | (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); | ||
2031 | pbl_chunk_size = (info->cq_resize ? | ||
2032 | (info->virtual_map ? info->pbl_chunk_size : 0) : | ||
2033 | (cq->virtual_map ? cq->pbl_chunk_size : 0)); | ||
2034 | check_overflow = info->check_overflow_change ? info->check_overflow : | ||
2035 | cq->check_overflow; | ||
2036 | cq->cq_uk.cq_size = cq_size; | ||
2037 | cq->ceq_id_valid = ceq_id_valid; | ||
2038 | cq->ceq_id = ceq_id; | ||
2039 | cq->virtual_map = virtual_map; | ||
2040 | cq->first_pm_pbl_idx = first_pm_pbl_idx; | ||
2041 | cq->pbl_chunk_size = pbl_chunk_size; | ||
2042 | cq->check_overflow = check_overflow; | ||
2043 | |||
2044 | set_64bit_val(wqe, 0, cq_size); | ||
2045 | set_64bit_val(wqe, 8, RS_64_1(cq, 1)); | ||
2046 | set_64bit_val(wqe, 16, | ||
2047 | LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); | ||
2048 | set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa)); | ||
2049 | set_64bit_val(wqe, 40, cq->shadow_area_pa); | ||
2050 | set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0)); | ||
2051 | set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL)); | ||
2052 | |||
2053 | header = cq->cq_uk.cq_id | | ||
2054 | LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) | | ||
2055 | LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) | | ||
2056 | LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) | | ||
2057 | LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | | ||
2058 | LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) | | ||
2059 | LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | | ||
2060 | LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | | ||
2061 | LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | | ||
2062 | LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) | | ||
2063 | LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | | ||
2064 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
2065 | |||
2066 | i40iw_insert_wqe_hdr(wqe, header); | ||
2067 | |||
2068 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE", | ||
2069 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
2070 | |||
2071 | if (post_sq) | ||
2072 | i40iw_sc_cqp_post_sq(cqp); | ||
2073 | return 0; | ||
2074 | } | ||
2075 | |||
2076 | /** | ||
2077 | * i40iw_sc_qp_init - initialize qp | ||
2078 | * @qp: sc qp | ||
2079 | * @info: initialization qp info | ||
2080 | */ | ||
2081 | static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp, | ||
2082 | struct i40iw_qp_init_info *info) | ||
2083 | { | ||
2084 | u32 __iomem *wqe_alloc_reg = NULL; | ||
2085 | enum i40iw_status_code ret_code; | ||
2086 | u32 pble_obj_cnt; | ||
2087 | u8 wqe_size; | ||
2088 | u32 offset; | ||
2089 | |||
2090 | qp->dev = info->pd->dev; | ||
2091 | qp->sq_pa = info->sq_pa; | ||
2092 | qp->rq_pa = info->rq_pa; | ||
2093 | qp->hw_host_ctx_pa = info->host_ctx_pa; | ||
2094 | qp->q2_pa = info->q2_pa; | ||
2095 | qp->shadow_area_pa = info->shadow_area_pa; | ||
2096 | |||
2097 | qp->q2_buf = info->q2; | ||
2098 | qp->pd = info->pd; | ||
2099 | qp->hw_host_ctx = info->host_ctx; | ||
2100 | offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1; | ||
2101 | if (i40iw_get_hw_addr(qp->pd->dev)) | ||
2102 | wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) + | ||
2103 | offset); | ||
2104 | |||
2105 | info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg; | ||
2106 | ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info); | ||
2107 | if (ret_code) | ||
2108 | return ret_code; | ||
2109 | qp->virtual_map = info->virtual_map; | ||
2110 | |||
2111 | pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; | ||
2112 | |||
2113 | if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) || | ||
2114 | (info->virtual_map && (info->rq_pa >= pble_obj_cnt))) | ||
2115 | return I40IW_ERR_INVALID_PBLE_INDEX; | ||
2116 | |||
2117 | qp->llp_stream_handle = (void *)(-1); | ||
2118 | qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP; | ||
2119 | |||
2120 | qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size, | ||
2121 | false); | ||
2122 | i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n", | ||
2123 | __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size); | ||
2124 | ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt, | ||
2125 | &wqe_size); | ||
2126 | if (ret_code) | ||
2127 | return ret_code; | ||
2128 | qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size * | ||
2129 | (wqe_size / I40IW_QP_WQE_MIN_SIZE), false); | ||
2130 | i40iw_debug(qp->dev, I40IW_DEBUG_WQE, | ||
2131 | "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n", | ||
2132 | __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size); | ||
2133 | qp->sq_tph_val = info->sq_tph_val; | ||
2134 | qp->rq_tph_val = info->rq_tph_val; | ||
2135 | qp->sq_tph_en = info->sq_tph_en; | ||
2136 | qp->rq_tph_en = info->rq_tph_en; | ||
2137 | qp->rcv_tph_en = info->rcv_tph_en; | ||
2138 | qp->xmit_tph_en = info->xmit_tph_en; | ||
2139 | qp->qs_handle = qp->pd->dev->qs_handle; | ||
2140 | qp->exception_lan_queue = qp->pd->dev->exception_lan_queue; | ||
2141 | |||
2142 | return 0; | ||
2143 | } | ||
2144 | |||
2145 | /** | ||
2146 | * i40iw_sc_qp_create - create qp | ||
2147 | * @qp: sc qp | ||
2148 | * @info: qp create info | ||
2149 | * @scratch: u64 saved to be used during cqp completion | ||
2150 | * @post_sq: flag for cqp db to ring | ||
2151 | */ | ||
2152 | static enum i40iw_status_code i40iw_sc_qp_create( | ||
2153 | struct i40iw_sc_qp *qp, | ||
2154 | struct i40iw_create_qp_info *info, | ||
2155 | u64 scratch, | ||
2156 | bool post_sq) | ||
2157 | { | ||
2158 | struct i40iw_sc_cqp *cqp; | ||
2159 | u64 *wqe; | ||
2160 | u64 header; | ||
2161 | |||
2162 | if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) || | ||
2163 | (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID)) | ||
2164 | return I40IW_ERR_INVALID_QP_ID; | ||
2165 | |||
2166 | cqp = qp->pd->dev->cqp; | ||
2167 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
2168 | if (!wqe) | ||
2169 | return I40IW_ERR_RING_FULL; | ||
2170 | |||
2171 | set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); | ||
2172 | |||
2173 | set_64bit_val(wqe, 40, qp->shadow_area_pa); | ||
2174 | |||
2175 | header = qp->qp_uk.qp_id | | ||
2176 | LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) | | ||
2177 | LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) | | ||
2178 | LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) | | ||
2179 | LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | | ||
2180 | LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) | | ||
2181 | LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | | ||
2182 | LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | | ||
2183 | LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) | | ||
2184 | LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) | | ||
2185 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
2186 | |||
2187 | i40iw_insert_wqe_hdr(wqe, header); | ||
2188 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE", | ||
2189 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
2190 | |||
2191 | if (post_sq) | ||
2192 | i40iw_sc_cqp_post_sq(cqp); | ||
2193 | return 0; | ||
2194 | } | ||
2195 | |||
2196 | /** | ||
2197 | * i40iw_sc_qp_modify - modify qp cqp wqe | ||
2198 | * @qp: sc qp | ||
2199 | * @info: modify qp info | ||
2200 | * @scratch: u64 saved to be used during cqp completion | ||
2201 | * @post_sq: flag for cqp db to ring | ||
2202 | */ | ||
2203 | static enum i40iw_status_code i40iw_sc_qp_modify( | ||
2204 | struct i40iw_sc_qp *qp, | ||
2205 | struct i40iw_modify_qp_info *info, | ||
2206 | u64 scratch, | ||
2207 | bool post_sq) | ||
2208 | { | ||
2209 | u64 *wqe; | ||
2210 | struct i40iw_sc_cqp *cqp; | ||
2211 | u64 header; | ||
2212 | u8 term_actions = 0; | ||
2213 | u8 term_len = 0; | ||
2214 | |||
2215 | cqp = qp->pd->dev->cqp; | ||
2216 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
2217 | if (!wqe) | ||
2218 | return I40IW_ERR_RING_FULL; | ||
2219 | if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) { | ||
2220 | if (info->dont_send_fin) | ||
2221 | term_actions += I40IWQP_TERM_SEND_TERM_ONLY; | ||
2222 | if (info->dont_send_term) | ||
2223 | term_actions += I40IWQP_TERM_SEND_FIN_ONLY; | ||
2224 | if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) || | ||
2225 | (term_actions == I40IWQP_TERM_SEND_TERM_ONLY)) | ||
2226 | term_len = info->termlen; | ||
2227 | } | ||
2228 | |||
2229 | set_64bit_val(wqe, | ||
2230 | 8, | ||
2231 | LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) | | ||
2232 | LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); | ||
2233 | |||
2234 | set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); | ||
2235 | set_64bit_val(wqe, 40, qp->shadow_area_pa); | ||
2236 | |||
2237 | header = qp->qp_uk.qp_id | | ||
2238 | LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) | | ||
2239 | LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) | | ||
2240 | LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) | | ||
2241 | LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) | | ||
2242 | LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) | | ||
2243 | LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | | ||
2244 | LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | | ||
2245 | LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | | ||
2246 | LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) | | ||
2247 | LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | | ||
2248 | LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | | ||
2249 | LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) | | ||
2250 | LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) | | ||
2251 | LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) | | ||
2252 | LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) | | ||
2253 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
2254 | |||
2255 | i40iw_insert_wqe_hdr(wqe, header); | ||
2256 | |||
2257 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE", | ||
2258 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
2259 | |||
2260 | if (post_sq) | ||
2261 | i40iw_sc_cqp_post_sq(cqp); | ||
2262 | return 0; | ||
2263 | } | ||
2264 | |||
2265 | /** | ||
2266 | * i40iw_sc_qp_destroy - cqp destroy qp | ||
2267 | * @qp: sc qp | ||
2268 | * @scratch: u64 saved to be used during cqp completion | ||
2269 | * @remove_hash_idx: flag if to remove hash idx | ||
2270 | * @ignore_mw_bnd: memory window bind flag | ||
2271 | * @post_sq: flag for cqp db to ring | ||
2272 | */ | ||
2273 | static enum i40iw_status_code i40iw_sc_qp_destroy( | ||
2274 | struct i40iw_sc_qp *qp, | ||
2275 | u64 scratch, | ||
2276 | bool remove_hash_idx, | ||
2277 | bool ignore_mw_bnd, | ||
2278 | bool post_sq) | ||
2279 | { | ||
2280 | u64 *wqe; | ||
2281 | struct i40iw_sc_cqp *cqp; | ||
2282 | u64 header; | ||
2283 | |||
2284 | cqp = qp->pd->dev->cqp; | ||
2285 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
2286 | if (!wqe) | ||
2287 | return I40IW_ERR_RING_FULL; | ||
2288 | set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); | ||
2289 | set_64bit_val(wqe, 40, qp->shadow_area_pa); | ||
2290 | |||
2291 | header = qp->qp_uk.qp_id | | ||
2292 | LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) | | ||
2293 | LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | | ||
2294 | LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) | | ||
2295 | LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | | ||
2296 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
2297 | |||
2298 | i40iw_insert_wqe_hdr(wqe, header); | ||
2299 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE", | ||
2300 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
2301 | |||
2302 | if (post_sq) | ||
2303 | i40iw_sc_cqp_post_sq(cqp); | ||
2304 | return 0; | ||
2305 | } | ||
2306 | |||
2307 | /** | ||
2308 | * i40iw_sc_qp_flush_wqes - flush qp's wqe | ||
2309 | * @qp: sc qp | ||
2310 | * @info: dlush information | ||
2311 | * @scratch: u64 saved to be used during cqp completion | ||
2312 | * @post_sq: flag for cqp db to ring | ||
2313 | */ | ||
2314 | static enum i40iw_status_code i40iw_sc_qp_flush_wqes( | ||
2315 | struct i40iw_sc_qp *qp, | ||
2316 | struct i40iw_qp_flush_info *info, | ||
2317 | u64 scratch, | ||
2318 | bool post_sq) | ||
2319 | { | ||
2320 | u64 temp = 0; | ||
2321 | u64 *wqe; | ||
2322 | struct i40iw_sc_cqp *cqp; | ||
2323 | u64 header; | ||
2324 | bool flush_sq = false, flush_rq = false; | ||
2325 | |||
2326 | if (info->rq && !qp->flush_rq) | ||
2327 | flush_rq = true; | ||
2328 | |||
2329 | if (info->sq && !qp->flush_sq) | ||
2330 | flush_sq = true; | ||
2331 | |||
2332 | qp->flush_sq |= flush_sq; | ||
2333 | qp->flush_rq |= flush_rq; | ||
2334 | if (!flush_sq && !flush_rq) { | ||
2335 | if (info->ae_code != I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR) | ||
2336 | return 0; | ||
2337 | } | ||
2338 | |||
2339 | cqp = qp->pd->dev->cqp; | ||
2340 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
2341 | if (!wqe) | ||
2342 | return I40IW_ERR_RING_FULL; | ||
2343 | if (info->userflushcode) { | ||
2344 | if (flush_rq) { | ||
2345 | temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) | | ||
2346 | LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR); | ||
2347 | } | ||
2348 | if (flush_sq) { | ||
2349 | temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) | | ||
2350 | LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR); | ||
2351 | } | ||
2352 | } | ||
2353 | set_64bit_val(wqe, 16, temp); | ||
2354 | |||
2355 | temp = (info->generate_ae) ? | ||
2356 | info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0; | ||
2357 | |||
2358 | set_64bit_val(wqe, 8, temp); | ||
2359 | |||
2360 | header = qp->qp_uk.qp_id | | ||
2361 | LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) | | ||
2362 | LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) | | ||
2363 | LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) | | ||
2364 | LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) | | ||
2365 | LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) | | ||
2366 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
2367 | |||
2368 | i40iw_insert_wqe_hdr(wqe, header); | ||
2369 | |||
2370 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE", | ||
2371 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
2372 | |||
2373 | if (post_sq) | ||
2374 | i40iw_sc_cqp_post_sq(cqp); | ||
2375 | return 0; | ||
2376 | } | ||
2377 | |||
2378 | /** | ||
2379 | * i40iw_sc_qp_upload_context - upload qp's context | ||
2380 | * @dev: sc device struct | ||
2381 | * @info: upload context info ptr for return | ||
2382 | * @scratch: u64 saved to be used during cqp completion | ||
2383 | * @post_sq: flag for cqp db to ring | ||
2384 | */ | ||
2385 | static enum i40iw_status_code i40iw_sc_qp_upload_context( | ||
2386 | struct i40iw_sc_dev *dev, | ||
2387 | struct i40iw_upload_context_info *info, | ||
2388 | u64 scratch, | ||
2389 | bool post_sq) | ||
2390 | { | ||
2391 | u64 *wqe; | ||
2392 | struct i40iw_sc_cqp *cqp; | ||
2393 | u64 header; | ||
2394 | |||
2395 | cqp = dev->cqp; | ||
2396 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
2397 | if (!wqe) | ||
2398 | return I40IW_ERR_RING_FULL; | ||
2399 | set_64bit_val(wqe, 16, info->buf_pa); | ||
2400 | |||
2401 | header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) | | ||
2402 | LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) | | ||
2403 | LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) | | ||
2404 | LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) | | ||
2405 | LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) | | ||
2406 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
2407 | |||
2408 | i40iw_insert_wqe_hdr(wqe, header); | ||
2409 | |||
2410 | i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE", | ||
2411 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
2412 | |||
2413 | if (post_sq) | ||
2414 | i40iw_sc_cqp_post_sq(cqp); | ||
2415 | return 0; | ||
2416 | } | ||
2417 | |||
2418 | /** | ||
2419 | * i40iw_sc_qp_setctx - set qp's context | ||
2420 | * @qp: sc qp | ||
2421 | * @qp_ctx: context ptr | ||
2422 | * @info: ctx info | ||
2423 | */ | ||
2424 | static enum i40iw_status_code i40iw_sc_qp_setctx( | ||
2425 | struct i40iw_sc_qp *qp, | ||
2426 | u64 *qp_ctx, | ||
2427 | struct i40iw_qp_host_ctx_info *info) | ||
2428 | { | ||
2429 | struct i40iwarp_offload_info *iw; | ||
2430 | struct i40iw_tcp_offload_info *tcp; | ||
2431 | u64 qw0, qw3, qw7 = 0; | ||
2432 | |||
2433 | iw = info->iwarp_info; | ||
2434 | tcp = info->tcp_info; | ||
2435 | qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) | | ||
2436 | LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) | | ||
2437 | LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) | | ||
2438 | LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) | | ||
2439 | LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) | | ||
2440 | LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) | | ||
2441 | LS_64(info->push_idx, I40IWQPC_PPIDX) | | ||
2442 | LS_64(info->push_mode_en, I40IWQPC_PMENA); | ||
2443 | |||
2444 | set_64bit_val(qp_ctx, 8, qp->sq_pa); | ||
2445 | set_64bit_val(qp_ctx, 16, qp->rq_pa); | ||
2446 | |||
2447 | qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) | | ||
2448 | LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) | | ||
2449 | LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE); | ||
2450 | |||
2451 | set_64bit_val(qp_ctx, | ||
2452 | 128, | ||
2453 | LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX)); | ||
2454 | |||
2455 | set_64bit_val(qp_ctx, | ||
2456 | 136, | ||
2457 | LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) | | ||
2458 | LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM)); | ||
2459 | |||
2460 | set_64bit_val(qp_ctx, | ||
2461 | 168, | ||
2462 | LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX)); | ||
2463 | set_64bit_val(qp_ctx, | ||
2464 | 176, | ||
2465 | LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) | | ||
2466 | LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) | | ||
2467 | LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) | | ||
2468 | LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE)); | ||
2469 | |||
2470 | if (info->iwarp_info_valid) { | ||
2471 | qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) | | ||
2472 | LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER); | ||
2473 | |||
2474 | qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX); | ||
2475 | set_64bit_val(qp_ctx, 144, qp->q2_pa); | ||
2476 | set_64bit_val(qp_ctx, | ||
2477 | 152, | ||
2478 | LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT)); | ||
2479 | |||
2480 | /* | ||
2481 | * Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an | ||
2482 | *advertisable IRD of 64 | ||
2483 | */ | ||
2484 | iw->ird_size = I40IW_QPCTX_ENCD_MAXIRD; | ||
2485 | set_64bit_val(qp_ctx, | ||
2486 | 160, | ||
2487 | LS_64(iw->ord_size, I40IWQPC_ORDSIZE) | | ||
2488 | LS_64(iw->ird_size, I40IWQPC_IRDSIZE) | | ||
2489 | LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) | | ||
2490 | LS_64(iw->rd_enable, I40IWQPC_RDOK) | | ||
2491 | LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) | | ||
2492 | LS_64(iw->bind_en, I40IWQPC_BINDEN) | | ||
2493 | LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) | | ||
2494 | LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) | | ||
2495 | LS_64(1, I40IWQPC_IWARPMODE) | | ||
2496 | LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) | | ||
2497 | LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) | | ||
2498 | LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) | | ||
2499 | LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) | | ||
2500 | LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET)); | ||
2501 | } | ||
2502 | if (info->tcp_info_valid) { | ||
2503 | qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) | | ||
2504 | LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) | | ||
2505 | LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) | | ||
2506 | LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) | | ||
2507 | LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) | | ||
2508 | LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) | | ||
2509 | LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH); | ||
2510 | |||
2511 | qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) | | ||
2512 | LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) | | ||
2513 | LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) | | ||
2514 | LS_64(tcp->tos, I40IWQPC_TOS) | | ||
2515 | LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) | | ||
2516 | LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM); | ||
2517 | |||
2518 | qp->src_mac_addr_idx = tcp->src_mac_addr_idx; | ||
2519 | set_64bit_val(qp_ctx, | ||
2520 | 32, | ||
2521 | LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) | | ||
2522 | LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3)); | ||
2523 | |||
2524 | set_64bit_val(qp_ctx, | ||
2525 | 40, | ||
2526 | LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) | | ||
2527 | LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1)); | ||
2528 | |||
2529 | set_64bit_val(qp_ctx, | ||
2530 | 48, | ||
2531 | LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) | | ||
2532 | LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) | | ||
2533 | LS_64(tcp->arp_idx, I40IWQPC_ARPIDX)); | ||
2534 | |||
2535 | qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) | | ||
2536 | LS_64(tcp->wscale, I40IWQPC_WSCALE) | | ||
2537 | LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) | | ||
2538 | LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) | | ||
2539 | LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) | | ||
2540 | LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) | | ||
2541 | LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE); | ||
2542 | |||
2543 | set_64bit_val(qp_ctx, | ||
2544 | 72, | ||
2545 | LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) | | ||
2546 | LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE)); | ||
2547 | set_64bit_val(qp_ctx, | ||
2548 | 80, | ||
2549 | LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) | | ||
2550 | LS_64(tcp->snd_wnd, I40IWQPC_SNDWND)); | ||
2551 | |||
2552 | set_64bit_val(qp_ctx, | ||
2553 | 88, | ||
2554 | LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) | | ||
2555 | LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND)); | ||
2556 | set_64bit_val(qp_ctx, | ||
2557 | 96, | ||
2558 | LS_64(tcp->snd_max, I40IWQPC_SNDMAX) | | ||
2559 | LS_64(tcp->snd_una, I40IWQPC_SNDUNA)); | ||
2560 | set_64bit_val(qp_ctx, | ||
2561 | 104, | ||
2562 | LS_64(tcp->srtt, I40IWQPC_SRTT) | | ||
2563 | LS_64(tcp->rtt_var, I40IWQPC_RTTVAR)); | ||
2564 | set_64bit_val(qp_ctx, | ||
2565 | 112, | ||
2566 | LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) | | ||
2567 | LS_64(tcp->cwnd, I40IWQPC_CWND)); | ||
2568 | set_64bit_val(qp_ctx, | ||
2569 | 120, | ||
2570 | LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) | | ||
2571 | LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2)); | ||
2572 | set_64bit_val(qp_ctx, | ||
2573 | 128, | ||
2574 | LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) | | ||
2575 | LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH)); | ||
2576 | set_64bit_val(qp_ctx, | ||
2577 | 184, | ||
2578 | LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) | | ||
2579 | LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2)); | ||
2580 | set_64bit_val(qp_ctx, | ||
2581 | 192, | ||
2582 | LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) | | ||
2583 | LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0)); | ||
2584 | } | ||
2585 | |||
2586 | set_64bit_val(qp_ctx, 0, qw0); | ||
2587 | set_64bit_val(qp_ctx, 24, qw3); | ||
2588 | set_64bit_val(qp_ctx, 56, qw7); | ||
2589 | |||
2590 | i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE", | ||
2591 | qp_ctx, I40IW_QP_CTX_SIZE); | ||
2592 | return 0; | ||
2593 | } | ||
2594 | |||
2595 | /** | ||
2596 | * i40iw_sc_alloc_stag - mr stag alloc | ||
2597 | * @dev: sc device struct | ||
2598 | * @info: stag info | ||
2599 | * @scratch: u64 saved to be used during cqp completion | ||
2600 | * @post_sq: flag for cqp db to ring | ||
2601 | */ | ||
2602 | static enum i40iw_status_code i40iw_sc_alloc_stag( | ||
2603 | struct i40iw_sc_dev *dev, | ||
2604 | struct i40iw_allocate_stag_info *info, | ||
2605 | u64 scratch, | ||
2606 | bool post_sq) | ||
2607 | { | ||
2608 | u64 *wqe; | ||
2609 | struct i40iw_sc_cqp *cqp; | ||
2610 | u64 header; | ||
2611 | |||
2612 | cqp = dev->cqp; | ||
2613 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
2614 | if (!wqe) | ||
2615 | return I40IW_ERR_RING_FULL; | ||
2616 | set_64bit_val(wqe, | ||
2617 | 8, | ||
2618 | LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) | | ||
2619 | LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN)); | ||
2620 | set_64bit_val(wqe, | ||
2621 | 16, | ||
2622 | LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX)); | ||
2623 | set_64bit_val(wqe, | ||
2624 | 40, | ||
2625 | LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX)); | ||
2626 | |||
2627 | header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) | | ||
2628 | LS_64(1, I40IW_CQPSQ_STAG_MR) | | ||
2629 | LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) | | ||
2630 | LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) | | ||
2631 | LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) | | ||
2632 | LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) | | ||
2633 | LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) | | ||
2634 | LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) | | ||
2635 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
2636 | |||
2637 | i40iw_insert_wqe_hdr(wqe, header); | ||
2638 | |||
2639 | i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE", | ||
2640 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
2641 | |||
2642 | if (post_sq) | ||
2643 | i40iw_sc_cqp_post_sq(cqp); | ||
2644 | return 0; | ||
2645 | } | ||
2646 | |||
2647 | /** | ||
2648 | * i40iw_sc_mr_reg_non_shared - non-shared mr registration | ||
2649 | * @dev: sc device struct | ||
2650 | * @info: mr info | ||
2651 | * @scratch: u64 saved to be used during cqp completion | ||
2652 | * @post_sq: flag for cqp db to ring | ||
2653 | */ | ||
2654 | static enum i40iw_status_code i40iw_sc_mr_reg_non_shared( | ||
2655 | struct i40iw_sc_dev *dev, | ||
2656 | struct i40iw_reg_ns_stag_info *info, | ||
2657 | u64 scratch, | ||
2658 | bool post_sq) | ||
2659 | { | ||
2660 | u64 *wqe; | ||
2661 | u64 temp; | ||
2662 | struct i40iw_sc_cqp *cqp; | ||
2663 | u64 header; | ||
2664 | u32 pble_obj_cnt; | ||
2665 | bool remote_access; | ||
2666 | u8 addr_type; | ||
2667 | |||
2668 | if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY | | ||
2669 | I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY)) | ||
2670 | remote_access = true; | ||
2671 | else | ||
2672 | remote_access = false; | ||
2673 | |||
2674 | pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; | ||
2675 | |||
2676 | if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt)) | ||
2677 | return I40IW_ERR_INVALID_PBLE_INDEX; | ||
2678 | |||
2679 | cqp = dev->cqp; | ||
2680 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
2681 | if (!wqe) | ||
2682 | return I40IW_ERR_RING_FULL; | ||
2683 | |||
2684 | temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo; | ||
2685 | set_64bit_val(wqe, 0, temp); | ||
2686 | |||
2687 | set_64bit_val(wqe, | ||
2688 | 8, | ||
2689 | LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) | | ||
2690 | LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID)); | ||
2691 | |||
2692 | set_64bit_val(wqe, | ||
2693 | 16, | ||
2694 | LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) | | ||
2695 | LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX)); | ||
2696 | if (!info->chunk_size) { | ||
2697 | set_64bit_val(wqe, 32, info->reg_addr_pa); | ||
2698 | set_64bit_val(wqe, 48, 0); | ||
2699 | } else { | ||
2700 | set_64bit_val(wqe, 32, 0); | ||
2701 | set_64bit_val(wqe, 48, info->first_pm_pbl_index); | ||
2702 | } | ||
2703 | set_64bit_val(wqe, 40, info->hmc_fcn_index); | ||
2704 | set_64bit_val(wqe, 56, 0); | ||
2705 | |||
2706 | addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0; | ||
2707 | header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) | | ||
2708 | LS_64(1, I40IW_CQPSQ_STAG_MR) | | ||
2709 | LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) | | ||
2710 | LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) | | ||
2711 | LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) | | ||
2712 | LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) | | ||
2713 | LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) | | ||
2714 | LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) | | ||
2715 | LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) | | ||
2716 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
2717 | |||
2718 | i40iw_insert_wqe_hdr(wqe, header); | ||
2719 | |||
2720 | i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE", | ||
2721 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
2722 | |||
2723 | if (post_sq) | ||
2724 | i40iw_sc_cqp_post_sq(cqp); | ||
2725 | return 0; | ||
2726 | } | ||
2727 | |||
2728 | /** | ||
2729 | * i40iw_sc_mr_reg_shared - registered shared memory region | ||
2730 | * @dev: sc device struct | ||
2731 | * @info: info for shared memory registeration | ||
2732 | * @scratch: u64 saved to be used during cqp completion | ||
2733 | * @post_sq: flag for cqp db to ring | ||
2734 | */ | ||
2735 | static enum i40iw_status_code i40iw_sc_mr_reg_shared( | ||
2736 | struct i40iw_sc_dev *dev, | ||
2737 | struct i40iw_register_shared_stag *info, | ||
2738 | u64 scratch, | ||
2739 | bool post_sq) | ||
2740 | { | ||
2741 | u64 *wqe; | ||
2742 | struct i40iw_sc_cqp *cqp; | ||
2743 | u64 temp, va64, fbo, header; | ||
2744 | u32 va32; | ||
2745 | bool remote_access; | ||
2746 | u8 addr_type; | ||
2747 | |||
2748 | if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY | | ||
2749 | I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY)) | ||
2750 | remote_access = true; | ||
2751 | else | ||
2752 | remote_access = false; | ||
2753 | cqp = dev->cqp; | ||
2754 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
2755 | if (!wqe) | ||
2756 | return I40IW_ERR_RING_FULL; | ||
2757 | va64 = (uintptr_t)(info->va); | ||
2758 | va32 = (u32)(va64 & 0x00000000FFFFFFFF); | ||
2759 | fbo = (u64)(va32 & (4096 - 1)); | ||
2760 | |||
2761 | set_64bit_val(wqe, | ||
2762 | 0, | ||
2763 | (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo)); | ||
2764 | |||
2765 | set_64bit_val(wqe, | ||
2766 | 8, | ||
2767 | LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID)); | ||
2768 | temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) | | ||
2769 | LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) | | ||
2770 | LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX); | ||
2771 | set_64bit_val(wqe, 16, temp); | ||
2772 | |||
2773 | addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0; | ||
2774 | header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) | | ||
2775 | LS_64(1, I40IW_CQPSQ_STAG_MR) | | ||
2776 | LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) | | ||
2777 | LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) | | ||
2778 | LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) | | ||
2779 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
2780 | |||
2781 | i40iw_insert_wqe_hdr(wqe, header); | ||
2782 | |||
2783 | i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE", | ||
2784 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
2785 | |||
2786 | if (post_sq) | ||
2787 | i40iw_sc_cqp_post_sq(cqp); | ||
2788 | return 0; | ||
2789 | } | ||
2790 | |||
2791 | /** | ||
2792 | * i40iw_sc_dealloc_stag - deallocate stag | ||
2793 | * @dev: sc device struct | ||
2794 | * @info: dealloc stag info | ||
2795 | * @scratch: u64 saved to be used during cqp completion | ||
2796 | * @post_sq: flag for cqp db to ring | ||
2797 | */ | ||
2798 | static enum i40iw_status_code i40iw_sc_dealloc_stag( | ||
2799 | struct i40iw_sc_dev *dev, | ||
2800 | struct i40iw_dealloc_stag_info *info, | ||
2801 | u64 scratch, | ||
2802 | bool post_sq) | ||
2803 | { | ||
2804 | u64 header; | ||
2805 | u64 *wqe; | ||
2806 | struct i40iw_sc_cqp *cqp; | ||
2807 | |||
2808 | cqp = dev->cqp; | ||
2809 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
2810 | if (!wqe) | ||
2811 | return I40IW_ERR_RING_FULL; | ||
2812 | set_64bit_val(wqe, | ||
2813 | 8, | ||
2814 | LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID)); | ||
2815 | set_64bit_val(wqe, | ||
2816 | 16, | ||
2817 | LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX)); | ||
2818 | |||
2819 | header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) | | ||
2820 | LS_64(info->mr, I40IW_CQPSQ_STAG_MR) | | ||
2821 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
2822 | |||
2823 | i40iw_insert_wqe_hdr(wqe, header); | ||
2824 | |||
2825 | i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE", | ||
2826 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
2827 | |||
2828 | if (post_sq) | ||
2829 | i40iw_sc_cqp_post_sq(cqp); | ||
2830 | return 0; | ||
2831 | } | ||
2832 | |||
2833 | /** | ||
2834 | * i40iw_sc_query_stag - query hardware for stag | ||
2835 | * @dev: sc device struct | ||
2836 | * @scratch: u64 saved to be used during cqp completion | ||
2837 | * @stag_index: stag index for query | ||
2838 | * @post_sq: flag for cqp db to ring | ||
2839 | */ | ||
2840 | static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev, | ||
2841 | u64 scratch, | ||
2842 | u32 stag_index, | ||
2843 | bool post_sq) | ||
2844 | { | ||
2845 | u64 header; | ||
2846 | u64 *wqe; | ||
2847 | struct i40iw_sc_cqp *cqp; | ||
2848 | |||
2849 | cqp = dev->cqp; | ||
2850 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
2851 | if (!wqe) | ||
2852 | return I40IW_ERR_RING_FULL; | ||
2853 | set_64bit_val(wqe, | ||
2854 | 16, | ||
2855 | LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX)); | ||
2856 | |||
2857 | header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) | | ||
2858 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
2859 | |||
2860 | i40iw_insert_wqe_hdr(wqe, header); | ||
2861 | |||
2862 | i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE", | ||
2863 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
2864 | |||
2865 | if (post_sq) | ||
2866 | i40iw_sc_cqp_post_sq(cqp); | ||
2867 | return 0; | ||
2868 | } | ||
2869 | |||
2870 | /** | ||
2871 | * i40iw_sc_mw_alloc - mw allocate | ||
2872 | * @dev: sc device struct | ||
2873 | * @scratch: u64 saved to be used during cqp completion | ||
2874 | * @mw_stag_index:stag index | ||
2875 | * @pd_id: pd is for this mw | ||
2876 | * @post_sq: flag for cqp db to ring | ||
2877 | */ | ||
2878 | static enum i40iw_status_code i40iw_sc_mw_alloc( | ||
2879 | struct i40iw_sc_dev *dev, | ||
2880 | u64 scratch, | ||
2881 | u32 mw_stag_index, | ||
2882 | u16 pd_id, | ||
2883 | bool post_sq) | ||
2884 | { | ||
2885 | u64 header; | ||
2886 | struct i40iw_sc_cqp *cqp; | ||
2887 | u64 *wqe; | ||
2888 | |||
2889 | cqp = dev->cqp; | ||
2890 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
2891 | if (!wqe) | ||
2892 | return I40IW_ERR_RING_FULL; | ||
2893 | set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID)); | ||
2894 | set_64bit_val(wqe, | ||
2895 | 16, | ||
2896 | LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX)); | ||
2897 | |||
2898 | header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) | | ||
2899 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
2900 | |||
2901 | i40iw_insert_wqe_hdr(wqe, header); | ||
2902 | |||
2903 | i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE", | ||
2904 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
2905 | |||
2906 | if (post_sq) | ||
2907 | i40iw_sc_cqp_post_sq(cqp); | ||
2908 | return 0; | ||
2909 | } | ||
2910 | |||
2911 | /** | ||
2912 | * i40iw_sc_send_lsmm - send last streaming mode message | ||
2913 | * @qp: sc qp struct | ||
2914 | * @lsmm_buf: buffer with lsmm message | ||
2915 | * @size: size of lsmm buffer | ||
2916 | * @stag: stag of lsmm buffer | ||
2917 | */ | ||
2918 | static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp, | ||
2919 | void *lsmm_buf, | ||
2920 | u32 size, | ||
2921 | i40iw_stag stag) | ||
2922 | { | ||
2923 | u64 *wqe; | ||
2924 | u64 header; | ||
2925 | struct i40iw_qp_uk *qp_uk; | ||
2926 | |||
2927 | qp_uk = &qp->qp_uk; | ||
2928 | wqe = qp_uk->sq_base->elem; | ||
2929 | |||
2930 | set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf); | ||
2931 | |||
2932 | set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG))); | ||
2933 | |||
2934 | set_64bit_val(wqe, 16, 0); | ||
2935 | |||
2936 | header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) | | ||
2937 | LS_64(1, I40IWQPSQ_STREAMMODE) | | ||
2938 | LS_64(1, I40IWQPSQ_WAITFORRCVPDU) | | ||
2939 | LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); | ||
2940 | |||
2941 | i40iw_insert_wqe_hdr(wqe, header); | ||
2942 | |||
2943 | i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE", | ||
2944 | wqe, I40IW_QP_WQE_MIN_SIZE); | ||
2945 | } | ||
2946 | |||
2947 | /** | ||
2948 | * i40iw_sc_send_lsmm_nostag - for privilege qp | ||
2949 | * @qp: sc qp struct | ||
2950 | * @lsmm_buf: buffer with lsmm message | ||
2951 | * @size: size of lsmm buffer | ||
2952 | */ | ||
2953 | static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp, | ||
2954 | void *lsmm_buf, | ||
2955 | u32 size) | ||
2956 | { | ||
2957 | u64 *wqe; | ||
2958 | u64 header; | ||
2959 | struct i40iw_qp_uk *qp_uk; | ||
2960 | |||
2961 | qp_uk = &qp->qp_uk; | ||
2962 | wqe = qp_uk->sq_base->elem; | ||
2963 | |||
2964 | set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf); | ||
2965 | |||
2966 | set_64bit_val(wqe, 8, size); | ||
2967 | |||
2968 | set_64bit_val(wqe, 16, 0); | ||
2969 | |||
2970 | header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) | | ||
2971 | LS_64(1, I40IWQPSQ_STREAMMODE) | | ||
2972 | LS_64(1, I40IWQPSQ_WAITFORRCVPDU) | | ||
2973 | LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); | ||
2974 | |||
2975 | i40iw_insert_wqe_hdr(wqe, header); | ||
2976 | |||
2977 | i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE", | ||
2978 | wqe, I40IW_QP_WQE_MIN_SIZE); | ||
2979 | } | ||
2980 | |||
2981 | /** | ||
2982 | * i40iw_sc_send_rtt - send last read0 or write0 | ||
2983 | * @qp: sc qp struct | ||
2984 | * @read: Do read0 or write0 | ||
2985 | */ | ||
2986 | static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read) | ||
2987 | { | ||
2988 | u64 *wqe; | ||
2989 | u64 header; | ||
2990 | struct i40iw_qp_uk *qp_uk; | ||
2991 | |||
2992 | qp_uk = &qp->qp_uk; | ||
2993 | wqe = qp_uk->sq_base->elem; | ||
2994 | |||
2995 | set_64bit_val(wqe, 0, 0); | ||
2996 | set_64bit_val(wqe, 8, 0); | ||
2997 | set_64bit_val(wqe, 16, 0); | ||
2998 | if (read) { | ||
2999 | header = LS_64(0x1234, I40IWQPSQ_REMSTAG) | | ||
3000 | LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) | | ||
3001 | LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); | ||
3002 | set_64bit_val(wqe, 8, ((u64)0xabcd << 32)); | ||
3003 | } else { | ||
3004 | header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) | | ||
3005 | LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); | ||
3006 | } | ||
3007 | |||
3008 | i40iw_insert_wqe_hdr(wqe, header); | ||
3009 | |||
3010 | i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE", | ||
3011 | wqe, I40IW_QP_WQE_MIN_SIZE); | ||
3012 | } | ||
3013 | |||
3014 | /** | ||
3015 | * i40iw_sc_post_wqe0 - send wqe with opcode | ||
3016 | * @qp: sc qp struct | ||
3017 | * @opcode: opcode to use for wqe0 | ||
3018 | */ | ||
3019 | static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode) | ||
3020 | { | ||
3021 | u64 *wqe; | ||
3022 | u64 header; | ||
3023 | struct i40iw_qp_uk *qp_uk; | ||
3024 | |||
3025 | qp_uk = &qp->qp_uk; | ||
3026 | wqe = qp_uk->sq_base->elem; | ||
3027 | |||
3028 | if (!wqe) | ||
3029 | return I40IW_ERR_QP_TOOMANY_WRS_POSTED; | ||
3030 | switch (opcode) { | ||
3031 | case I40IWQP_OP_NOP: | ||
3032 | set_64bit_val(wqe, 0, 0); | ||
3033 | set_64bit_val(wqe, 8, 0); | ||
3034 | set_64bit_val(wqe, 16, 0); | ||
3035 | header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | | ||
3036 | LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); | ||
3037 | |||
3038 | i40iw_insert_wqe_hdr(wqe, header); | ||
3039 | break; | ||
3040 | case I40IWQP_OP_RDMA_SEND: | ||
3041 | set_64bit_val(wqe, 0, 0); | ||
3042 | set_64bit_val(wqe, 8, 0); | ||
3043 | set_64bit_val(wqe, 16, 0); | ||
3044 | header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) | | ||
3045 | LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) | | ||
3046 | LS_64(1, I40IWQPSQ_STREAMMODE) | | ||
3047 | LS_64(1, I40IWQPSQ_WAITFORRCVPDU); | ||
3048 | |||
3049 | i40iw_insert_wqe_hdr(wqe, header); | ||
3050 | break; | ||
3051 | default: | ||
3052 | i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n", | ||
3053 | __func__); | ||
3054 | break; | ||
3055 | } | ||
3056 | return 0; | ||
3057 | } | ||
3058 | |||
3059 | /** | ||
3060 | * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info | ||
3061 | * @dev : ptr to i40iw_dev struct | ||
3062 | * @hmc_fn_id: hmc function id | ||
3063 | */ | ||
3064 | enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id) | ||
3065 | { | ||
3066 | struct i40iw_hmc_info *hmc_info; | ||
3067 | struct i40iw_dma_mem query_fpm_mem; | ||
3068 | struct i40iw_virt_mem virt_mem; | ||
3069 | struct i40iw_vfdev *vf_dev = NULL; | ||
3070 | u32 mem_size; | ||
3071 | enum i40iw_status_code ret_code = 0; | ||
3072 | bool poll_registers = true; | ||
3073 | u16 iw_vf_idx; | ||
3074 | u8 wait_type; | ||
3075 | |||
3076 | if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID || | ||
3077 | (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID)) | ||
3078 | return I40IW_ERR_INVALID_HMCFN_ID; | ||
3079 | |||
3080 | i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id, | ||
3081 | dev->hmc_fn_id); | ||
3082 | if (hmc_fn_id == dev->hmc_fn_id) { | ||
3083 | hmc_info = dev->hmc_info; | ||
3084 | query_fpm_mem.pa = dev->fpm_query_buf_pa; | ||
3085 | query_fpm_mem.va = dev->fpm_query_buf; | ||
3086 | } else { | ||
3087 | vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id); | ||
3088 | if (!vf_dev) | ||
3089 | return I40IW_ERR_INVALID_VF_ID; | ||
3090 | |||
3091 | hmc_info = &vf_dev->hmc_info; | ||
3092 | iw_vf_idx = vf_dev->iw_vf_idx; | ||
3093 | i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev, | ||
3094 | hmc_info, hmc_info->hmc_obj); | ||
3095 | if (!vf_dev->fpm_query_buf) { | ||
3096 | if (!dev->vf_fpm_query_buf[iw_vf_idx].va) { | ||
3097 | ret_code = i40iw_alloc_query_fpm_buf(dev, | ||
3098 | &dev->vf_fpm_query_buf[iw_vf_idx]); | ||
3099 | if (ret_code) | ||
3100 | return ret_code; | ||
3101 | } | ||
3102 | vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va; | ||
3103 | vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa; | ||
3104 | } | ||
3105 | query_fpm_mem.pa = vf_dev->fpm_query_buf_pa; | ||
3106 | query_fpm_mem.va = vf_dev->fpm_query_buf; | ||
3107 | /** | ||
3108 | * It is HARDWARE specific: | ||
3109 | * this call is done by PF for VF and | ||
3110 | * i40iw_sc_query_fpm_values needs ccq poll | ||
3111 | * because PF ccq is already created. | ||
3112 | */ | ||
3113 | poll_registers = false; | ||
3114 | } | ||
3115 | |||
3116 | hmc_info->hmc_fn_id = hmc_fn_id; | ||
3117 | |||
3118 | if (hmc_fn_id != dev->hmc_fn_id) { | ||
3119 | ret_code = | ||
3120 | i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id); | ||
3121 | } else { | ||
3122 | wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS : | ||
3123 | (u8)I40IW_CQP_WAIT_POLL_CQ; | ||
3124 | |||
3125 | ret_code = i40iw_sc_query_fpm_values( | ||
3126 | dev->cqp, | ||
3127 | 0, | ||
3128 | hmc_info->hmc_fn_id, | ||
3129 | &query_fpm_mem, | ||
3130 | true, | ||
3131 | wait_type); | ||
3132 | } | ||
3133 | if (ret_code) | ||
3134 | return ret_code; | ||
3135 | |||
3136 | /* parse the fpm_query_buf and fill hmc obj info */ | ||
3137 | ret_code = | ||
3138 | i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va, | ||
3139 | hmc_info, | ||
3140 | &dev->hmc_fpm_misc); | ||
3141 | if (ret_code) | ||
3142 | return ret_code; | ||
3143 | i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER", | ||
3144 | query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE); | ||
3145 | |||
3146 | if (hmc_fn_id != dev->hmc_fn_id) { | ||
3147 | i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id); | ||
3148 | |||
3149 | /* parse the fpm_commit_buf and fill hmc obj info */ | ||
3150 | i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj); | ||
3151 | mem_size = sizeof(struct i40iw_hmc_sd_entry) * | ||
3152 | (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index); | ||
3153 | ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size); | ||
3154 | if (ret_code) | ||
3155 | return ret_code; | ||
3156 | hmc_info->sd_table.sd_entry = virt_mem.va; | ||
3157 | } | ||
3158 | |||
3159 | /* fill size of objects which are fixed */ | ||
3160 | hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4; | ||
3161 | hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4; | ||
3162 | hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8; | ||
3163 | hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192; | ||
3164 | hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1; | ||
3165 | |||
3166 | return ret_code; | ||
3167 | } | ||
3168 | |||
3169 | /** | ||
3170 | * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and | ||
3171 | * populates fpm base address in hmc_info | ||
3172 | * @dev : ptr to i40iw_dev struct | ||
3173 | * @hmc_fn_id: hmc function id | ||
3174 | */ | ||
3175 | static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev, | ||
3176 | u8 hmc_fn_id) | ||
3177 | { | ||
3178 | struct i40iw_hmc_info *hmc_info; | ||
3179 | struct i40iw_hmc_obj_info *obj_info; | ||
3180 | u64 *buf; | ||
3181 | struct i40iw_dma_mem commit_fpm_mem; | ||
3182 | u32 i, j; | ||
3183 | enum i40iw_status_code ret_code = 0; | ||
3184 | bool poll_registers = true; | ||
3185 | u8 wait_type; | ||
3186 | |||
3187 | if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID || | ||
3188 | (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID)) | ||
3189 | return I40IW_ERR_INVALID_HMCFN_ID; | ||
3190 | |||
3191 | if (hmc_fn_id == dev->hmc_fn_id) { | ||
3192 | hmc_info = dev->hmc_info; | ||
3193 | } else { | ||
3194 | hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id); | ||
3195 | poll_registers = false; | ||
3196 | } | ||
3197 | if (!hmc_info) | ||
3198 | return I40IW_ERR_BAD_PTR; | ||
3199 | |||
3200 | obj_info = hmc_info->hmc_obj; | ||
3201 | buf = dev->fpm_commit_buf; | ||
3202 | |||
3203 | /* copy cnt values in commit buf */ | ||
3204 | for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; | ||
3205 | i++, j += 8) | ||
3206 | set_64bit_val(buf, j, (u64)obj_info[i].cnt); | ||
3207 | |||
3208 | set_64bit_val(buf, 40, 0); /* APBVT rsvd */ | ||
3209 | |||
3210 | commit_fpm_mem.pa = dev->fpm_commit_buf_pa; | ||
3211 | commit_fpm_mem.va = dev->fpm_commit_buf; | ||
3212 | wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS : | ||
3213 | (u8)I40IW_CQP_WAIT_POLL_CQ; | ||
3214 | ret_code = i40iw_sc_commit_fpm_values( | ||
3215 | dev->cqp, | ||
3216 | 0, | ||
3217 | hmc_info->hmc_fn_id, | ||
3218 | &commit_fpm_mem, | ||
3219 | true, | ||
3220 | wait_type); | ||
3221 | |||
3222 | /* parse the fpm_commit_buf and fill hmc obj info */ | ||
3223 | if (!ret_code) | ||
3224 | ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf, hmc_info->hmc_obj); | ||
3225 | |||
3226 | i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER", | ||
3227 | commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE); | ||
3228 | |||
3229 | return ret_code; | ||
3230 | } | ||
3231 | |||
3232 | /** | ||
3233 | * cqp_sds_wqe_fill - fill cqp wqe doe sd | ||
3234 | * @cqp: struct for cqp hw | ||
3235 | * @info; sd info for wqe | ||
3236 | * @scratch: u64 saved to be used during cqp completion | ||
3237 | */ | ||
3238 | static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, | ||
3239 | struct i40iw_update_sds_info *info, | ||
3240 | u64 scratch) | ||
3241 | { | ||
3242 | u64 data; | ||
3243 | u64 header; | ||
3244 | u64 *wqe; | ||
3245 | int mem_entries, wqe_entries; | ||
3246 | struct i40iw_dma_mem *sdbuf = &cqp->sdbuf; | ||
3247 | |||
3248 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
3249 | if (!wqe) | ||
3250 | return I40IW_ERR_RING_FULL; | ||
3251 | |||
3252 | I40IW_CQP_INIT_WQE(wqe); | ||
3253 | wqe_entries = (info->cnt > 3) ? 3 : info->cnt; | ||
3254 | mem_entries = info->cnt - wqe_entries; | ||
3255 | |||
3256 | header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) | | ||
3257 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | | ||
3258 | LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT); | ||
3259 | |||
3260 | if (mem_entries) { | ||
3261 | memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4)); | ||
3262 | data = sdbuf->pa; | ||
3263 | } else { | ||
3264 | data = 0; | ||
3265 | } | ||
3266 | data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID); | ||
3267 | |||
3268 | set_64bit_val(wqe, 16, data); | ||
3269 | |||
3270 | switch (wqe_entries) { | ||
3271 | case 3: | ||
3272 | set_64bit_val(wqe, 48, | ||
3273 | (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) | | ||
3274 | LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); | ||
3275 | |||
3276 | set_64bit_val(wqe, 56, info->entry[2].data); | ||
3277 | /* fallthrough */ | ||
3278 | case 2: | ||
3279 | set_64bit_val(wqe, 32, | ||
3280 | (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) | | ||
3281 | LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); | ||
3282 | |||
3283 | set_64bit_val(wqe, 40, info->entry[1].data); | ||
3284 | /* fallthrough */ | ||
3285 | case 1: | ||
3286 | set_64bit_val(wqe, 0, | ||
3287 | LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD)); | ||
3288 | |||
3289 | set_64bit_val(wqe, 8, info->entry[0].data); | ||
3290 | break; | ||
3291 | default: | ||
3292 | break; | ||
3293 | } | ||
3294 | |||
3295 | i40iw_insert_wqe_hdr(wqe, header); | ||
3296 | |||
3297 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE", | ||
3298 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
3299 | return 0; | ||
3300 | } | ||
3301 | |||
3302 | /** | ||
3303 | * i40iw_update_pe_sds - cqp wqe for sd | ||
3304 | * @dev: ptr to i40iw_dev struct | ||
3305 | * @info: sd info for sd's | ||
3306 | * @scratch: u64 saved to be used during cqp completion | ||
3307 | */ | ||
3308 | static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev, | ||
3309 | struct i40iw_update_sds_info *info, | ||
3310 | u64 scratch) | ||
3311 | { | ||
3312 | struct i40iw_sc_cqp *cqp = dev->cqp; | ||
3313 | enum i40iw_status_code ret_code; | ||
3314 | |||
3315 | ret_code = cqp_sds_wqe_fill(cqp, info, scratch); | ||
3316 | if (!ret_code) | ||
3317 | i40iw_sc_cqp_post_sq(cqp); | ||
3318 | |||
3319 | return ret_code; | ||
3320 | } | ||
3321 | |||
3322 | /** | ||
3323 | * i40iw_update_sds_noccq - update sd before ccq created | ||
3324 | * @dev: sc device struct | ||
3325 | * @info: sd info for sd's | ||
3326 | */ | ||
3327 | enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev, | ||
3328 | struct i40iw_update_sds_info *info) | ||
3329 | { | ||
3330 | u32 error, val, tail; | ||
3331 | struct i40iw_sc_cqp *cqp = dev->cqp; | ||
3332 | enum i40iw_status_code ret_code; | ||
3333 | |||
3334 | ret_code = cqp_sds_wqe_fill(cqp, info, 0); | ||
3335 | if (ret_code) | ||
3336 | return ret_code; | ||
3337 | i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); | ||
3338 | if (error) | ||
3339 | return I40IW_ERR_CQP_COMPL_ERROR; | ||
3340 | |||
3341 | i40iw_sc_cqp_post_sq(cqp); | ||
3342 | ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT); | ||
3343 | |||
3344 | return ret_code; | ||
3345 | } | ||
3346 | |||
3347 | /** | ||
3348 | * i40iw_sc_suspend_qp - suspend qp for param change | ||
3349 | * @cqp: struct for cqp hw | ||
3350 | * @qp: sc qp struct | ||
3351 | * @scratch: u64 saved to be used during cqp completion | ||
3352 | */ | ||
3353 | enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp, | ||
3354 | struct i40iw_sc_qp *qp, | ||
3355 | u64 scratch) | ||
3356 | { | ||
3357 | u64 header; | ||
3358 | u64 *wqe; | ||
3359 | |||
3360 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
3361 | if (!wqe) | ||
3362 | return I40IW_ERR_RING_FULL; | ||
3363 | header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) | | ||
3364 | LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) | | ||
3365 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
3366 | |||
3367 | i40iw_insert_wqe_hdr(wqe, header); | ||
3368 | |||
3369 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE", | ||
3370 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
3371 | |||
3372 | i40iw_sc_cqp_post_sq(cqp); | ||
3373 | return 0; | ||
3374 | } | ||
3375 | |||
3376 | /** | ||
3377 | * i40iw_sc_resume_qp - resume qp after suspend | ||
3378 | * @cqp: struct for cqp hw | ||
3379 | * @qp: sc qp struct | ||
3380 | * @scratch: u64 saved to be used during cqp completion | ||
3381 | */ | ||
3382 | enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp, | ||
3383 | struct i40iw_sc_qp *qp, | ||
3384 | u64 scratch) | ||
3385 | { | ||
3386 | u64 header; | ||
3387 | u64 *wqe; | ||
3388 | |||
3389 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
3390 | if (!wqe) | ||
3391 | return I40IW_ERR_RING_FULL; | ||
3392 | set_64bit_val(wqe, | ||
3393 | 16, | ||
3394 | LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE)); | ||
3395 | |||
3396 | header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) | | ||
3397 | LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) | | ||
3398 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
3399 | |||
3400 | i40iw_insert_wqe_hdr(wqe, header); | ||
3401 | |||
3402 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE", | ||
3403 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
3404 | |||
3405 | i40iw_sc_cqp_post_sq(cqp); | ||
3406 | return 0; | ||
3407 | } | ||
3408 | |||
3409 | /** | ||
3410 | * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages | ||
3411 | * @cqp: struct for cqp hw | ||
3412 | * @scratch: u64 saved to be used during cqp completion | ||
3413 | * @hmc_fn_id: hmc function id | ||
3414 | * @post_sq: flag for cqp db to ring | ||
3415 | * @poll_registers: flag to poll register for cqp completion | ||
3416 | */ | ||
3417 | enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated( | ||
3418 | struct i40iw_sc_cqp *cqp, | ||
3419 | u64 scratch, | ||
3420 | u8 hmc_fn_id, | ||
3421 | bool post_sq, | ||
3422 | bool poll_registers) | ||
3423 | { | ||
3424 | u64 header; | ||
3425 | u64 *wqe; | ||
3426 | u32 tail, val, error; | ||
3427 | enum i40iw_status_code ret_code = 0; | ||
3428 | |||
3429 | wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); | ||
3430 | if (!wqe) | ||
3431 | return I40IW_ERR_RING_FULL; | ||
3432 | set_64bit_val(wqe, | ||
3433 | 16, | ||
3434 | LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID)); | ||
3435 | |||
3436 | header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) | | ||
3437 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | ||
3438 | |||
3439 | i40iw_insert_wqe_hdr(wqe, header); | ||
3440 | |||
3441 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE", | ||
3442 | wqe, I40IW_CQP_WQE_SIZE * 8); | ||
3443 | i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); | ||
3444 | if (error) { | ||
3445 | ret_code = I40IW_ERR_CQP_COMPL_ERROR; | ||
3446 | return ret_code; | ||
3447 | } | ||
3448 | if (post_sq) { | ||
3449 | i40iw_sc_cqp_post_sq(cqp); | ||
3450 | if (poll_registers) | ||
3451 | /* check for cqp sq tail update */ | ||
3452 | ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000); | ||
3453 | else | ||
3454 | ret_code = i40iw_sc_poll_for_cqp_op_done(cqp, | ||
3455 | I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, | ||
3456 | NULL); | ||
3457 | } | ||
3458 | |||
3459 | return ret_code; | ||
3460 | } | ||
3461 | |||
3462 | /** | ||
3463 | * i40iw_ring_full - check if cqp ring is full | ||
3464 | * @cqp: struct for cqp hw | ||
3465 | */ | ||
3466 | static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp) | ||
3467 | { | ||
3468 | return I40IW_RING_FULL_ERR(cqp->sq_ring); | ||
3469 | } | ||
3470 | |||
3471 | /** | ||
3472 | * i40iw_config_fpm_values - configure HMC objects | ||
3473 | * @dev: sc device struct | ||
3474 | * @qp_count: desired qp count | ||
3475 | */ | ||
3476 | enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count) | ||
3477 | { | ||
3478 | struct i40iw_virt_mem virt_mem; | ||
3479 | u32 i, mem_size; | ||
3480 | u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted; | ||
3481 | u32 powerof2; | ||
3482 | u64 sd_needed, bytes_needed; | ||
3483 | u32 loop_count = 0; | ||
3484 | |||
3485 | struct i40iw_hmc_info *hmc_info; | ||
3486 | struct i40iw_hmc_fpm_misc *hmc_fpm_misc; | ||
3487 | enum i40iw_status_code ret_code = 0; | ||
3488 | |||
3489 | hmc_info = dev->hmc_info; | ||
3490 | hmc_fpm_misc = &dev->hmc_fpm_misc; | ||
3491 | |||
3492 | ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id); | ||
3493 | if (ret_code) { | ||
3494 | i40iw_debug(dev, I40IW_DEBUG_HMC, | ||
3495 | "i40iw_sc_init_iw_hmc returned error_code = %d\n", | ||
3496 | ret_code); | ||
3497 | return ret_code; | ||
3498 | } | ||
3499 | |||
3500 | bytes_needed = 0; | ||
3501 | for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) { | ||
3502 | hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt; | ||
3503 | bytes_needed += | ||
3504 | (hmc_info->hmc_obj[i].max_cnt) * (hmc_info->hmc_obj[i].size); | ||
3505 | i40iw_debug(dev, I40IW_DEBUG_HMC, | ||
3506 | "%s i[%04d] max_cnt[0x%04X] size[0x%04llx]\n", | ||
3507 | __func__, i, hmc_info->hmc_obj[i].max_cnt, | ||
3508 | hmc_info->hmc_obj[i].size); | ||
3509 | } | ||
3510 | sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; /* round up */ | ||
3511 | i40iw_debug(dev, I40IW_DEBUG_HMC, | ||
3512 | "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n", | ||
3513 | __func__, sd_needed, hmc_info->first_sd_index); | ||
3514 | i40iw_debug(dev, I40IW_DEBUG_HMC, | ||
3515 | "%s: bytes_needed=0x%llx sd count %d where max sd is %d\n", | ||
3516 | __func__, bytes_needed, hmc_info->sd_table.sd_cnt, | ||
3517 | hmc_fpm_misc->max_sds); | ||
3518 | |||
3519 | qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt); | ||
3520 | qpwantedoriginal = qpwanted; | ||
3521 | mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt; | ||
3522 | pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt; | ||
3523 | |||
3524 | i40iw_debug(dev, I40IW_DEBUG_HMC, | ||
3525 | "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n", | ||
3526 | qp_count, hmc_fpm_misc->max_sds, | ||
3527 | hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt, | ||
3528 | hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt, | ||
3529 | hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt, | ||
3530 | hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt); | ||
3531 | |||
3532 | do { | ||
3533 | ++loop_count; | ||
3534 | hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted; | ||
3535 | hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt = | ||
3536 | min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt); | ||
3537 | hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */ | ||
3538 | hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt = | ||
3539 | qpwanted * hmc_fpm_misc->ht_multiplier; | ||
3540 | hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt = | ||
3541 | hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt; | ||
3542 | hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1; | ||
3543 | hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted; | ||
3544 | |||
3545 | hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted; | ||
3546 | hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted; | ||
3547 | hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt = | ||
3548 | hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size; | ||
3549 | hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt = | ||
3550 | hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size; | ||
3551 | hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt = | ||
3552 | ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket; | ||
3553 | hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00; | ||
3554 | hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00; | ||
3555 | hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted; | ||
3556 | |||
3557 | /* How much memory is needed for all the objects. */ | ||
3558 | bytes_needed = 0; | ||
3559 | for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) | ||
3560 | bytes_needed += | ||
3561 | (hmc_info->hmc_obj[i].cnt) * (hmc_info->hmc_obj[i].size); | ||
3562 | sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; | ||
3563 | if ((loop_count > 1000) || | ||
3564 | ((!(loop_count % 10)) && | ||
3565 | (qpwanted > qpwantedoriginal * 2 / 3))) { | ||
3566 | if (qpwanted > FPM_MULTIPLIER) { | ||
3567 | qpwanted -= FPM_MULTIPLIER; | ||
3568 | powerof2 = 1; | ||
3569 | while (powerof2 < qpwanted) | ||
3570 | powerof2 *= 2; | ||
3571 | powerof2 /= 2; | ||
3572 | qpwanted = powerof2; | ||
3573 | } else { | ||
3574 | qpwanted /= 2; | ||
3575 | } | ||
3576 | } | ||
3577 | if (mrwanted > FPM_MULTIPLIER * 10) | ||
3578 | mrwanted -= FPM_MULTIPLIER * 10; | ||
3579 | if (pblewanted > FPM_MULTIPLIER * 1000) | ||
3580 | pblewanted -= FPM_MULTIPLIER * 1000; | ||
3581 | } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000); | ||
3582 | |||
3583 | bytes_needed = 0; | ||
3584 | for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) { | ||
3585 | bytes_needed += (hmc_info->hmc_obj[i].cnt) * (hmc_info->hmc_obj[i].size); | ||
3586 | i40iw_debug(dev, I40IW_DEBUG_HMC, | ||
3587 | "%s i[%04d] cnt[0x%04x] size[0x%04llx]\n", | ||
3588 | __func__, i, hmc_info->hmc_obj[i].cnt, | ||
3589 | hmc_info->hmc_obj[i].size); | ||
3590 | } | ||
3591 | sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; /* round up not truncate. */ | ||
3592 | |||
3593 | i40iw_debug(dev, I40IW_DEBUG_HMC, | ||
3594 | "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n", | ||
3595 | loop_count, sd_needed, | ||
3596 | hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt, | ||
3597 | hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt, | ||
3598 | hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt, | ||
3599 | hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt); | ||
3600 | |||
3601 | ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id); | ||
3602 | if (ret_code) { | ||
3603 | i40iw_debug(dev, I40IW_DEBUG_HMC, | ||
3604 | "configure_iw_fpm returned error_code[x%08X]\n", | ||
3605 | i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1)); | ||
3606 | return ret_code; | ||
3607 | } | ||
3608 | |||
3609 | hmc_info->sd_table.sd_cnt = (u32)sd_needed; | ||
3610 | |||
3611 | mem_size = sizeof(struct i40iw_hmc_sd_entry) * | ||
3612 | (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1); | ||
3613 | ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size); | ||
3614 | if (ret_code) { | ||
3615 | i40iw_debug(dev, I40IW_DEBUG_HMC, | ||
3616 | "%s: failed to allocate memory for sd_entry buffer\n", | ||
3617 | __func__); | ||
3618 | return ret_code; | ||
3619 | } | ||
3620 | hmc_info->sd_table.sd_entry = virt_mem.va; | ||
3621 | |||
3622 | return ret_code; | ||
3623 | } | ||
3624 | |||
3625 | /** | ||
3626 | * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available | ||
3627 | * @dev: rdma device | ||
3628 | * @pcmdinfo: cqp command info | ||
3629 | */ | ||
3630 | static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev, | ||
3631 | struct cqp_commands_info *pcmdinfo) | ||
3632 | { | ||
3633 | enum i40iw_status_code status; | ||
3634 | struct i40iw_dma_mem values_mem; | ||
3635 | |||
3636 | dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++; | ||
3637 | switch (pcmdinfo->cqp_cmd) { | ||
3638 | case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY: | ||
3639 | status = i40iw_sc_del_local_mac_ipaddr_entry( | ||
3640 | pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp, | ||
3641 | pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch, | ||
3642 | pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx, | ||
3643 | pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count, | ||
3644 | pcmdinfo->post_sq); | ||
3645 | break; | ||
3646 | case OP_CEQ_DESTROY: | ||
3647 | status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq, | ||
3648 | pcmdinfo->in.u.ceq_destroy.scratch, | ||
3649 | pcmdinfo->post_sq); | ||
3650 | break; | ||
3651 | case OP_AEQ_DESTROY: | ||
3652 | status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq, | ||
3653 | pcmdinfo->in.u.aeq_destroy.scratch, | ||
3654 | pcmdinfo->post_sq); | ||
3655 | |||
3656 | break; | ||
3657 | case OP_DELETE_ARP_CACHE_ENTRY: | ||
3658 | status = i40iw_sc_del_arp_cache_entry( | ||
3659 | pcmdinfo->in.u.del_arp_cache_entry.cqp, | ||
3660 | pcmdinfo->in.u.del_arp_cache_entry.scratch, | ||
3661 | pcmdinfo->in.u.del_arp_cache_entry.arp_index, | ||
3662 | pcmdinfo->post_sq); | ||
3663 | break; | ||
3664 | case OP_MANAGE_APBVT_ENTRY: | ||
3665 | status = i40iw_sc_manage_apbvt_entry( | ||
3666 | pcmdinfo->in.u.manage_apbvt_entry.cqp, | ||
3667 | &pcmdinfo->in.u.manage_apbvt_entry.info, | ||
3668 | pcmdinfo->in.u.manage_apbvt_entry.scratch, | ||
3669 | pcmdinfo->post_sq); | ||
3670 | break; | ||
3671 | case OP_CEQ_CREATE: | ||
3672 | status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq, | ||
3673 | pcmdinfo->in.u.ceq_create.scratch, | ||
3674 | pcmdinfo->post_sq); | ||
3675 | break; | ||
3676 | case OP_AEQ_CREATE: | ||
3677 | status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq, | ||
3678 | pcmdinfo->in.u.aeq_create.scratch, | ||
3679 | pcmdinfo->post_sq); | ||
3680 | break; | ||
3681 | case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY: | ||
3682 | status = i40iw_sc_alloc_local_mac_ipaddr_entry( | ||
3683 | pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp, | ||
3684 | pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch, | ||
3685 | pcmdinfo->post_sq); | ||
3686 | break; | ||
3687 | case OP_ADD_LOCAL_MAC_IPADDR_ENTRY: | ||
3688 | status = i40iw_sc_add_local_mac_ipaddr_entry( | ||
3689 | pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp, | ||
3690 | &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info, | ||
3691 | pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch, | ||
3692 | pcmdinfo->post_sq); | ||
3693 | break; | ||
3694 | case OP_MANAGE_QHASH_TABLE_ENTRY: | ||
3695 | status = i40iw_sc_manage_qhash_table_entry( | ||
3696 | pcmdinfo->in.u.manage_qhash_table_entry.cqp, | ||
3697 | &pcmdinfo->in.u.manage_qhash_table_entry.info, | ||
3698 | pcmdinfo->in.u.manage_qhash_table_entry.scratch, | ||
3699 | pcmdinfo->post_sq); | ||
3700 | |||
3701 | break; | ||
3702 | case OP_QP_MODIFY: | ||
3703 | status = i40iw_sc_qp_modify( | ||
3704 | pcmdinfo->in.u.qp_modify.qp, | ||
3705 | &pcmdinfo->in.u.qp_modify.info, | ||
3706 | pcmdinfo->in.u.qp_modify.scratch, | ||
3707 | pcmdinfo->post_sq); | ||
3708 | |||
3709 | break; | ||
3710 | case OP_QP_UPLOAD_CONTEXT: | ||
3711 | status = i40iw_sc_qp_upload_context( | ||
3712 | pcmdinfo->in.u.qp_upload_context.dev, | ||
3713 | &pcmdinfo->in.u.qp_upload_context.info, | ||
3714 | pcmdinfo->in.u.qp_upload_context.scratch, | ||
3715 | pcmdinfo->post_sq); | ||
3716 | |||
3717 | break; | ||
3718 | case OP_CQ_CREATE: | ||
3719 | status = i40iw_sc_cq_create( | ||
3720 | pcmdinfo->in.u.cq_create.cq, | ||
3721 | pcmdinfo->in.u.cq_create.scratch, | ||
3722 | pcmdinfo->in.u.cq_create.check_overflow, | ||
3723 | pcmdinfo->post_sq); | ||
3724 | break; | ||
3725 | case OP_CQ_DESTROY: | ||
3726 | status = i40iw_sc_cq_destroy( | ||
3727 | pcmdinfo->in.u.cq_destroy.cq, | ||
3728 | pcmdinfo->in.u.cq_destroy.scratch, | ||
3729 | pcmdinfo->post_sq); | ||
3730 | |||
3731 | break; | ||
3732 | case OP_QP_CREATE: | ||
3733 | status = i40iw_sc_qp_create( | ||
3734 | pcmdinfo->in.u.qp_create.qp, | ||
3735 | &pcmdinfo->in.u.qp_create.info, | ||
3736 | pcmdinfo->in.u.qp_create.scratch, | ||
3737 | pcmdinfo->post_sq); | ||
3738 | break; | ||
3739 | case OP_QP_DESTROY: | ||
3740 | status = i40iw_sc_qp_destroy( | ||
3741 | pcmdinfo->in.u.qp_destroy.qp, | ||
3742 | pcmdinfo->in.u.qp_destroy.scratch, | ||
3743 | pcmdinfo->in.u.qp_destroy.remove_hash_idx, | ||
3744 | pcmdinfo->in.u.qp_destroy. | ||
3745 | ignore_mw_bnd, | ||
3746 | pcmdinfo->post_sq); | ||
3747 | |||
3748 | break; | ||
3749 | case OP_ALLOC_STAG: | ||
3750 | status = i40iw_sc_alloc_stag( | ||
3751 | pcmdinfo->in.u.alloc_stag.dev, | ||
3752 | &pcmdinfo->in.u.alloc_stag.info, | ||
3753 | pcmdinfo->in.u.alloc_stag.scratch, | ||
3754 | pcmdinfo->post_sq); | ||
3755 | break; | ||
3756 | case OP_MR_REG_NON_SHARED: | ||
3757 | status = i40iw_sc_mr_reg_non_shared( | ||
3758 | pcmdinfo->in.u.mr_reg_non_shared.dev, | ||
3759 | &pcmdinfo->in.u.mr_reg_non_shared.info, | ||
3760 | pcmdinfo->in.u.mr_reg_non_shared.scratch, | ||
3761 | pcmdinfo->post_sq); | ||
3762 | |||
3763 | break; | ||
3764 | case OP_DEALLOC_STAG: | ||
3765 | status = i40iw_sc_dealloc_stag( | ||
3766 | pcmdinfo->in.u.dealloc_stag.dev, | ||
3767 | &pcmdinfo->in.u.dealloc_stag.info, | ||
3768 | pcmdinfo->in.u.dealloc_stag.scratch, | ||
3769 | pcmdinfo->post_sq); | ||
3770 | |||
3771 | break; | ||
3772 | case OP_MW_ALLOC: | ||
3773 | status = i40iw_sc_mw_alloc( | ||
3774 | pcmdinfo->in.u.mw_alloc.dev, | ||
3775 | pcmdinfo->in.u.mw_alloc.scratch, | ||
3776 | pcmdinfo->in.u.mw_alloc.mw_stag_index, | ||
3777 | pcmdinfo->in.u.mw_alloc.pd_id, | ||
3778 | pcmdinfo->post_sq); | ||
3779 | |||
3780 | break; | ||
3781 | case OP_QP_FLUSH_WQES: | ||
3782 | status = i40iw_sc_qp_flush_wqes( | ||
3783 | pcmdinfo->in.u.qp_flush_wqes.qp, | ||
3784 | &pcmdinfo->in.u.qp_flush_wqes.info, | ||
3785 | pcmdinfo->in.u.qp_flush_wqes. | ||
3786 | scratch, pcmdinfo->post_sq); | ||
3787 | break; | ||
3788 | case OP_ADD_ARP_CACHE_ENTRY: | ||
3789 | status = i40iw_sc_add_arp_cache_entry( | ||
3790 | pcmdinfo->in.u.add_arp_cache_entry.cqp, | ||
3791 | &pcmdinfo->in.u.add_arp_cache_entry.info, | ||
3792 | pcmdinfo->in.u.add_arp_cache_entry.scratch, | ||
3793 | pcmdinfo->post_sq); | ||
3794 | break; | ||
3795 | case OP_MANAGE_PUSH_PAGE: | ||
3796 | status = i40iw_sc_manage_push_page( | ||
3797 | pcmdinfo->in.u.manage_push_page.cqp, | ||
3798 | &pcmdinfo->in.u.manage_push_page.info, | ||
3799 | pcmdinfo->in.u.manage_push_page.scratch, | ||
3800 | pcmdinfo->post_sq); | ||
3801 | break; | ||
3802 | case OP_UPDATE_PE_SDS: | ||
3803 | /* case I40IW_CQP_OP_UPDATE_PE_SDS */ | ||
3804 | status = i40iw_update_pe_sds( | ||
3805 | pcmdinfo->in.u.update_pe_sds.dev, | ||
3806 | &pcmdinfo->in.u.update_pe_sds.info, | ||
3807 | pcmdinfo->in.u.update_pe_sds. | ||
3808 | scratch); | ||
3809 | |||
3810 | break; | ||
3811 | case OP_MANAGE_HMC_PM_FUNC_TABLE: | ||
3812 | status = i40iw_sc_manage_hmc_pm_func_table( | ||
3813 | pcmdinfo->in.u.manage_hmc_pm.dev->cqp, | ||
3814 | pcmdinfo->in.u.manage_hmc_pm.scratch, | ||
3815 | (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id, | ||
3816 | pcmdinfo->in.u.manage_hmc_pm.info.free_fcn, | ||
3817 | true); | ||
3818 | break; | ||
3819 | case OP_SUSPEND: | ||
3820 | status = i40iw_sc_suspend_qp( | ||
3821 | pcmdinfo->in.u.suspend_resume.cqp, | ||
3822 | pcmdinfo->in.u.suspend_resume.qp, | ||
3823 | pcmdinfo->in.u.suspend_resume.scratch); | ||
3824 | break; | ||
3825 | case OP_RESUME: | ||
3826 | status = i40iw_sc_resume_qp( | ||
3827 | pcmdinfo->in.u.suspend_resume.cqp, | ||
3828 | pcmdinfo->in.u.suspend_resume.qp, | ||
3829 | pcmdinfo->in.u.suspend_resume.scratch); | ||
3830 | break; | ||
3831 | case OP_MANAGE_VF_PBLE_BP: | ||
3832 | status = i40iw_manage_vf_pble_bp( | ||
3833 | pcmdinfo->in.u.manage_vf_pble_bp.cqp, | ||
3834 | &pcmdinfo->in.u.manage_vf_pble_bp.info, | ||
3835 | pcmdinfo->in.u.manage_vf_pble_bp.scratch, true); | ||
3836 | break; | ||
3837 | case OP_QUERY_FPM_VALUES: | ||
3838 | values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa; | ||
3839 | values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va; | ||
3840 | status = i40iw_sc_query_fpm_values( | ||
3841 | pcmdinfo->in.u.query_fpm_values.cqp, | ||
3842 | pcmdinfo->in.u.query_fpm_values.scratch, | ||
3843 | pcmdinfo->in.u.query_fpm_values.hmc_fn_id, | ||
3844 | &values_mem, true, I40IW_CQP_WAIT_EVENT); | ||
3845 | break; | ||
3846 | case OP_COMMIT_FPM_VALUES: | ||
3847 | values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa; | ||
3848 | values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va; | ||
3849 | status = i40iw_sc_commit_fpm_values( | ||
3850 | pcmdinfo->in.u.commit_fpm_values.cqp, | ||
3851 | pcmdinfo->in.u.commit_fpm_values.scratch, | ||
3852 | pcmdinfo->in.u.commit_fpm_values.hmc_fn_id, | ||
3853 | &values_mem, | ||
3854 | true, | ||
3855 | I40IW_CQP_WAIT_EVENT); | ||
3856 | break; | ||
3857 | default: | ||
3858 | status = I40IW_NOT_SUPPORTED; | ||
3859 | break; | ||
3860 | } | ||
3861 | |||
3862 | return status; | ||
3863 | } | ||
3864 | |||
3865 | /** | ||
3866 | * i40iw_process_cqp_cmd - process all cqp commands | ||
3867 | * @dev: sc device struct | ||
3868 | * @pcmdinfo: cqp command info | ||
3869 | */ | ||
3870 | enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev, | ||
3871 | struct cqp_commands_info *pcmdinfo) | ||
3872 | { | ||
3873 | enum i40iw_status_code status = 0; | ||
3874 | unsigned long flags; | ||
3875 | |||
3876 | spin_lock_irqsave(&dev->cqp_lock, flags); | ||
3877 | if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) | ||
3878 | status = i40iw_exec_cqp_cmd(dev, pcmdinfo); | ||
3879 | else | ||
3880 | list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head); | ||
3881 | spin_unlock_irqrestore(&dev->cqp_lock, flags); | ||
3882 | return status; | ||
3883 | } | ||
3884 | |||
3885 | /** | ||
3886 | * i40iw_process_bh - called from tasklet for cqp list | ||
3887 | * @dev: sc device struct | ||
3888 | */ | ||
3889 | enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev) | ||
3890 | { | ||
3891 | enum i40iw_status_code status = 0; | ||
3892 | struct cqp_commands_info *pcmdinfo; | ||
3893 | unsigned long flags; | ||
3894 | |||
3895 | spin_lock_irqsave(&dev->cqp_lock, flags); | ||
3896 | while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) { | ||
3897 | pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head); | ||
3898 | |||
3899 | status = i40iw_exec_cqp_cmd(dev, pcmdinfo); | ||
3900 | if (status) | ||
3901 | break; | ||
3902 | } | ||
3903 | spin_unlock_irqrestore(&dev->cqp_lock, flags); | ||
3904 | return status; | ||
3905 | } | ||
3906 | |||
3907 | /** | ||
3908 | * i40iw_iwarp_opcode - determine if incoming is rdma layer | ||
3909 | * @info: aeq info for the packet | ||
3910 | * @pkt: packet for error | ||
3911 | */ | ||
3912 | static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt) | ||
3913 | { | ||
3914 | u16 *mpa; | ||
3915 | u32 opcode = 0xffffffff; | ||
3916 | |||
3917 | if (info->q2_data_written) { | ||
3918 | mpa = (u16 *)pkt; | ||
3919 | opcode = ntohs(mpa[1]) & 0xf; | ||
3920 | } | ||
3921 | return opcode; | ||
3922 | } | ||
3923 | |||
3924 | /** | ||
3925 | * i40iw_locate_mpa - return pointer to mpa in the pkt | ||
3926 | * @pkt: packet with data | ||
3927 | */ | ||
3928 | static u8 *i40iw_locate_mpa(u8 *pkt) | ||
3929 | { | ||
3930 | /* skip over ethernet header */ | ||
3931 | pkt += I40IW_MAC_HLEN; | ||
3932 | |||
3933 | /* Skip over IP and TCP headers */ | ||
3934 | pkt += 4 * (pkt[0] & 0x0f); | ||
3935 | pkt += 4 * ((pkt[12] >> 4) & 0x0f); | ||
3936 | return pkt; | ||
3937 | } | ||
3938 | |||
3939 | /** | ||
3940 | * i40iw_setup_termhdr - termhdr for terminate pkt | ||
3941 | * @qp: sc qp ptr for pkt | ||
3942 | * @hdr: term hdr | ||
3943 | * @opcode: flush opcode for termhdr | ||
3944 | * @layer_etype: error layer + error type | ||
3945 | * @err: error cod ein the header | ||
3946 | */ | ||
3947 | static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp, | ||
3948 | struct i40iw_terminate_hdr *hdr, | ||
3949 | enum i40iw_flush_opcode opcode, | ||
3950 | u8 layer_etype, | ||
3951 | u8 err) | ||
3952 | { | ||
3953 | qp->flush_code = opcode; | ||
3954 | hdr->layer_etype = layer_etype; | ||
3955 | hdr->error_code = err; | ||
3956 | } | ||
3957 | |||
3958 | /** | ||
3959 | * i40iw_bld_terminate_hdr - build terminate message header | ||
3960 | * @qp: qp associated with received terminate AE | ||
3961 | * @info: the struct contiaing AE information | ||
3962 | */ | ||
3963 | static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp, | ||
3964 | struct i40iw_aeqe_info *info) | ||
3965 | { | ||
3966 | u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; | ||
3967 | u16 ddp_seg_len; | ||
3968 | int copy_len = 0; | ||
3969 | u8 is_tagged = 0; | ||
3970 | enum i40iw_flush_opcode flush_code = FLUSH_INVALID; | ||
3971 | u32 opcode; | ||
3972 | struct i40iw_terminate_hdr *termhdr; | ||
3973 | |||
3974 | termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf; | ||
3975 | memset(termhdr, 0, Q2_BAD_FRAME_OFFSET); | ||
3976 | |||
3977 | if (info->q2_data_written) { | ||
3978 | /* Use data from offending packet to fill in ddp & rdma hdrs */ | ||
3979 | pkt = i40iw_locate_mpa(pkt); | ||
3980 | ddp_seg_len = ntohs(*(u16 *)pkt); | ||
3981 | if (ddp_seg_len) { | ||
3982 | copy_len = 2; | ||
3983 | termhdr->hdrct = DDP_LEN_FLAG; | ||
3984 | if (pkt[2] & 0x80) { | ||
3985 | is_tagged = 1; | ||
3986 | if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) { | ||
3987 | copy_len += TERM_DDP_LEN_TAGGED; | ||
3988 | termhdr->hdrct |= DDP_HDR_FLAG; | ||
3989 | } | ||
3990 | } else { | ||
3991 | if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) { | ||
3992 | copy_len += TERM_DDP_LEN_UNTAGGED; | ||
3993 | termhdr->hdrct |= DDP_HDR_FLAG; | ||
3994 | } | ||
3995 | |||
3996 | if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) { | ||
3997 | if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) { | ||
3998 | copy_len += TERM_RDMA_LEN; | ||
3999 | termhdr->hdrct |= RDMA_HDR_FLAG; | ||
4000 | } | ||
4001 | } | ||
4002 | } | ||
4003 | } | ||
4004 | } | ||
4005 | |||
4006 | opcode = i40iw_iwarp_opcode(info, pkt); | ||
4007 | |||
4008 | switch (info->ae_id) { | ||
4009 | case I40IW_AE_AMP_UNALLOCATED_STAG: | ||
4010 | qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; | ||
4011 | if (opcode == I40IW_OP_TYPE_RDMA_WRITE) | ||
4012 | i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR, | ||
4013 | (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG); | ||
4014 | else | ||
4015 | i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, | ||
4016 | (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG); | ||
4017 | break; | ||
4018 | case I40IW_AE_AMP_BOUNDS_VIOLATION: | ||
4019 | qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; | ||
4020 | if (info->q2_data_written) | ||
4021 | i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR, | ||
4022 | (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS); | ||
4023 | else | ||
4024 | i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, | ||
4025 | (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS); | ||
4026 | break; | ||
4027 | case I40IW_AE_AMP_BAD_PD: | ||
4028 | switch (opcode) { | ||
4029 | case I40IW_OP_TYPE_RDMA_WRITE: | ||
4030 | i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR, | ||
4031 | (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG); | ||
4032 | break; | ||
4033 | case I40IW_OP_TYPE_SEND_INV: | ||
4034 | case I40IW_OP_TYPE_SEND_SOL_INV: | ||
4035 | i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, | ||
4036 | (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG); | ||
4037 | break; | ||
4038 | default: | ||
4039 | i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, | ||
4040 | (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG); | ||
4041 | } | ||
4042 | break; | ||
4043 | case I40IW_AE_AMP_INVALID_STAG: | ||
4044 | qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; | ||
4045 | i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, | ||
4046 | (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG); | ||
4047 | break; | ||
4048 | case I40IW_AE_AMP_BAD_QP: | ||
4049 | i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR, | ||
4050 | (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN); | ||
4051 | break; | ||
4052 | case I40IW_AE_AMP_BAD_STAG_KEY: | ||
4053 | case I40IW_AE_AMP_BAD_STAG_INDEX: | ||
4054 | qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; | ||
4055 | switch (opcode) { | ||
4056 | case I40IW_OP_TYPE_SEND_INV: | ||
4057 | case I40IW_OP_TYPE_SEND_SOL_INV: | ||
4058 | i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR, | ||
4059 | (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG); | ||
4060 | break; | ||
4061 | default: | ||
4062 | i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, | ||
4063 | (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG); | ||
4064 | } | ||
4065 | break; | ||
4066 | case I40IW_AE_AMP_RIGHTS_VIOLATION: | ||
4067 | case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: | ||
4068 | case I40IW_AE_PRIV_OPERATION_DENIED: | ||
4069 | qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; | ||
4070 | i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, | ||
4071 | (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS); | ||
4072 | break; | ||
4073 | case I40IW_AE_AMP_TO_WRAP: | ||
4074 | qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; | ||
4075 | i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, | ||
4076 | (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP); | ||
4077 | break; | ||
4078 | case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH: | ||
4079 | i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR, | ||
4080 | (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER); | ||
4081 | break; | ||
4082 | case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR: | ||
4083 | i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, | ||
4084 | (LAYER_MPA << 4) | DDP_LLP, MPA_CRC); | ||
4085 | break; | ||
4086 | case I40IW_AE_LLP_SEGMENT_TOO_LARGE: | ||
4087 | case I40IW_AE_LLP_SEGMENT_TOO_SMALL: | ||
4088 | i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR, | ||
4089 | (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL); | ||
4090 | break; | ||
4091 | case I40IW_AE_LCE_QP_CATASTROPHIC: | ||
4092 | case I40IW_AE_DDP_NO_L_BIT: | ||
4093 | i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR, | ||
4094 | (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL); | ||
4095 | break; | ||
4096 | case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN: | ||
4097 | case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID: | ||
4098 | i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, | ||
4099 | (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE); | ||
4100 | break; | ||
4101 | case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: | ||
4102 | qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; | ||
4103 | i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR, | ||
4104 | (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG); | ||
4105 | break; | ||
4106 | case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION: | ||
4107 | if (is_tagged) | ||
4108 | i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, | ||
4109 | (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER); | ||
4110 | else | ||
4111 | i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, | ||
4112 | (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER); | ||
4113 | break; | ||
4114 | case I40IW_AE_DDP_UBE_INVALID_MO: | ||
4115 | i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, | ||
4116 | (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO); | ||
4117 | break; | ||
4118 | case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: | ||
4119 | i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR, | ||
4120 | (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF); | ||
4121 | break; | ||
4122 | case I40IW_AE_DDP_UBE_INVALID_QN: | ||
4123 | i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, | ||
4124 | (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN); | ||
4125 | break; | ||
4126 | case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: | ||
4127 | i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, | ||
4128 | (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER); | ||
4129 | break; | ||
4130 | case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE: | ||
4131 | i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR, | ||
4132 | (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP); | ||
4133 | break; | ||
4134 | default: | ||
4135 | i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR, | ||
4136 | (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED); | ||
4137 | break; | ||
4138 | } | ||
4139 | |||
4140 | if (copy_len) | ||
4141 | memcpy(termhdr + 1, pkt, copy_len); | ||
4142 | |||
4143 | if (flush_code && !info->in_rdrsp_wr) | ||
4144 | qp->sq_flush = (info->sq) ? true : false; | ||
4145 | |||
4146 | return sizeof(struct i40iw_terminate_hdr) + copy_len; | ||
4147 | } | ||
4148 | |||
4149 | /** | ||
4150 | * i40iw_terminate_send_fin() - Send fin for terminate message | ||
4151 | * @qp: qp associated with received terminate AE | ||
4152 | */ | ||
4153 | void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp) | ||
4154 | { | ||
4155 | /* Send the fin only */ | ||
4156 | i40iw_term_modify_qp(qp, | ||
4157 | I40IW_QP_STATE_TERMINATE, | ||
4158 | I40IWQP_TERM_SEND_FIN_ONLY, | ||
4159 | 0); | ||
4160 | } | ||
4161 | |||
4162 | /** | ||
4163 | * i40iw_terminate_connection() - Bad AE and send terminate to remote QP | ||
4164 | * @qp: qp associated with received terminate AE | ||
4165 | * @info: the struct contiaing AE information | ||
4166 | */ | ||
4167 | void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info) | ||
4168 | { | ||
4169 | u8 termlen = 0; | ||
4170 | |||
4171 | if (qp->term_flags & I40IW_TERM_SENT) | ||
4172 | return; /* Sanity check */ | ||
4173 | |||
4174 | /* Eventtype can change from bld_terminate_hdr */ | ||
4175 | qp->eventtype = TERM_EVENT_QP_FATAL; | ||
4176 | termlen = i40iw_bld_terminate_hdr(qp, info); | ||
4177 | i40iw_terminate_start_timer(qp); | ||
4178 | qp->term_flags |= I40IW_TERM_SENT; | ||
4179 | i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE, | ||
4180 | I40IWQP_TERM_SEND_TERM_ONLY, termlen); | ||
4181 | } | ||
4182 | |||
4183 | /** | ||
4184 | * i40iw_terminate_received - handle terminate received AE | ||
4185 | * @qp: qp associated with received terminate AE | ||
4186 | * @info: the struct contiaing AE information | ||
4187 | */ | ||
4188 | void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info) | ||
4189 | { | ||
4190 | u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; | ||
4191 | u32 *mpa; | ||
4192 | u8 ddp_ctl; | ||
4193 | u8 rdma_ctl; | ||
4194 | u16 aeq_id = 0; | ||
4195 | struct i40iw_terminate_hdr *termhdr; | ||
4196 | |||
4197 | mpa = (u32 *)i40iw_locate_mpa(pkt); | ||
4198 | if (info->q2_data_written) { | ||
4199 | /* did not validate the frame - do it now */ | ||
4200 | ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff; | ||
4201 | rdma_ctl = ntohl(mpa[0]) & 0xff; | ||
4202 | if ((ddp_ctl & 0xc0) != 0x40) | ||
4203 | aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC; | ||
4204 | else if ((ddp_ctl & 0x03) != 1) | ||
4205 | aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION; | ||
4206 | else if (ntohl(mpa[2]) != 2) | ||
4207 | aeq_id = I40IW_AE_DDP_UBE_INVALID_QN; | ||
4208 | else if (ntohl(mpa[3]) != 1) | ||
4209 | aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN; | ||
4210 | else if (ntohl(mpa[4]) != 0) | ||
4211 | aeq_id = I40IW_AE_DDP_UBE_INVALID_MO; | ||
4212 | else if ((rdma_ctl & 0xc0) != 0x40) | ||
4213 | aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION; | ||
4214 | |||
4215 | info->ae_id = aeq_id; | ||
4216 | if (info->ae_id) { | ||
4217 | /* Bad terminate recvd - send back a terminate */ | ||
4218 | i40iw_terminate_connection(qp, info); | ||
4219 | return; | ||
4220 | } | ||
4221 | } | ||
4222 | |||
4223 | qp->term_flags |= I40IW_TERM_RCVD; | ||
4224 | qp->eventtype = TERM_EVENT_QP_FATAL; | ||
4225 | termhdr = (struct i40iw_terminate_hdr *)&mpa[5]; | ||
4226 | if (termhdr->layer_etype == RDMAP_REMOTE_PROT || | ||
4227 | termhdr->layer_etype == RDMAP_REMOTE_OP) { | ||
4228 | i40iw_terminate_done(qp, 0); | ||
4229 | } else { | ||
4230 | i40iw_terminate_start_timer(qp); | ||
4231 | i40iw_terminate_send_fin(qp); | ||
4232 | } | ||
4233 | } | ||
4234 | |||
4235 | /** | ||
4236 | * i40iw_hw_stat_init - Initiliaze HW stats table | ||
4237 | * @devstat: pestat struct | ||
4238 | * @fcn_idx: PCI fn id | ||
4239 | * @hw: PF i40iw_hw structure. | ||
4240 | * @is_pf: Is it a PF? | ||
4241 | * | ||
4242 | * Populate the HW stat table with register offset addr for each | ||
4243 | * stat. And start the perioidic stats timer. | ||
4244 | */ | ||
4245 | static void i40iw_hw_stat_init(struct i40iw_dev_pestat *devstat, | ||
4246 | u8 fcn_idx, | ||
4247 | struct i40iw_hw *hw, bool is_pf) | ||
4248 | { | ||
4249 | u32 stat_reg_offset; | ||
4250 | u32 stat_index; | ||
4251 | struct i40iw_dev_hw_stat_offsets *stat_table = | ||
4252 | &devstat->hw_stat_offsets; | ||
4253 | struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats; | ||
4254 | |||
4255 | devstat->hw = hw; | ||
4256 | |||
4257 | if (is_pf) { | ||
4258 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] = | ||
4259 | I40E_GLPES_PFIP4RXDISCARD(fcn_idx); | ||
4260 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] = | ||
4261 | I40E_GLPES_PFIP4RXTRUNC(fcn_idx); | ||
4262 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = | ||
4263 | I40E_GLPES_PFIP4TXNOROUTE(fcn_idx); | ||
4264 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] = | ||
4265 | I40E_GLPES_PFIP6RXDISCARD(fcn_idx); | ||
4266 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] = | ||
4267 | I40E_GLPES_PFIP6RXTRUNC(fcn_idx); | ||
4268 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = | ||
4269 | I40E_GLPES_PFIP6TXNOROUTE(fcn_idx); | ||
4270 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] = | ||
4271 | I40E_GLPES_PFTCPRTXSEG(fcn_idx); | ||
4272 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] = | ||
4273 | I40E_GLPES_PFTCPRXOPTERR(fcn_idx); | ||
4274 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = | ||
4275 | I40E_GLPES_PFTCPRXPROTOERR(fcn_idx); | ||
4276 | |||
4277 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] = | ||
4278 | I40E_GLPES_PFIP4RXOCTSLO(fcn_idx); | ||
4279 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] = | ||
4280 | I40E_GLPES_PFIP4RXPKTSLO(fcn_idx); | ||
4281 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] = | ||
4282 | I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx); | ||
4283 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] = | ||
4284 | I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx); | ||
4285 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] = | ||
4286 | I40E_GLPES_PFIP4TXOCTSLO(fcn_idx); | ||
4287 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] = | ||
4288 | I40E_GLPES_PFIP4TXPKTSLO(fcn_idx); | ||
4289 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] = | ||
4290 | I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx); | ||
4291 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] = | ||
4292 | I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx); | ||
4293 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] = | ||
4294 | I40E_GLPES_PFIP6RXOCTSLO(fcn_idx); | ||
4295 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] = | ||
4296 | I40E_GLPES_PFIP6RXPKTSLO(fcn_idx); | ||
4297 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] = | ||
4298 | I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx); | ||
4299 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] = | ||
4300 | I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx); | ||
4301 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] = | ||
4302 | I40E_GLPES_PFIP6TXOCTSLO(fcn_idx); | ||
4303 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] = | ||
4304 | I40E_GLPES_PFIP6TXPKTSLO(fcn_idx); | ||
4305 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] = | ||
4306 | I40E_GLPES_PFIP6TXPKTSLO(fcn_idx); | ||
4307 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] = | ||
4308 | I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx); | ||
4309 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] = | ||
4310 | I40E_GLPES_PFTCPRXSEGSLO(fcn_idx); | ||
4311 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] = | ||
4312 | I40E_GLPES_PFTCPTXSEGLO(fcn_idx); | ||
4313 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] = | ||
4314 | I40E_GLPES_PFRDMARXRDSLO(fcn_idx); | ||
4315 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] = | ||
4316 | I40E_GLPES_PFRDMARXSNDSLO(fcn_idx); | ||
4317 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] = | ||
4318 | I40E_GLPES_PFRDMARXWRSLO(fcn_idx); | ||
4319 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] = | ||
4320 | I40E_GLPES_PFRDMATXRDSLO(fcn_idx); | ||
4321 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] = | ||
4322 | I40E_GLPES_PFRDMATXSNDSLO(fcn_idx); | ||
4323 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] = | ||
4324 | I40E_GLPES_PFRDMATXWRSLO(fcn_idx); | ||
4325 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] = | ||
4326 | I40E_GLPES_PFRDMAVBNDLO(fcn_idx); | ||
4327 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] = | ||
4328 | I40E_GLPES_PFRDMAVINVLO(fcn_idx); | ||
4329 | } else { | ||
4330 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] = | ||
4331 | I40E_GLPES_VFIP4RXDISCARD(fcn_idx); | ||
4332 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] = | ||
4333 | I40E_GLPES_VFIP4RXTRUNC(fcn_idx); | ||
4334 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = | ||
4335 | I40E_GLPES_VFIP4TXNOROUTE(fcn_idx); | ||
4336 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] = | ||
4337 | I40E_GLPES_VFIP6RXDISCARD(fcn_idx); | ||
4338 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] = | ||
4339 | I40E_GLPES_VFIP6RXTRUNC(fcn_idx); | ||
4340 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = | ||
4341 | I40E_GLPES_VFIP6TXNOROUTE(fcn_idx); | ||
4342 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] = | ||
4343 | I40E_GLPES_VFTCPRTXSEG(fcn_idx); | ||
4344 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] = | ||
4345 | I40E_GLPES_VFTCPRXOPTERR(fcn_idx); | ||
4346 | stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = | ||
4347 | I40E_GLPES_VFTCPRXPROTOERR(fcn_idx); | ||
4348 | |||
4349 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] = | ||
4350 | I40E_GLPES_VFIP4RXOCTSLO(fcn_idx); | ||
4351 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] = | ||
4352 | I40E_GLPES_VFIP4RXPKTSLO(fcn_idx); | ||
4353 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] = | ||
4354 | I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx); | ||
4355 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] = | ||
4356 | I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx); | ||
4357 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] = | ||
4358 | I40E_GLPES_VFIP4TXOCTSLO(fcn_idx); | ||
4359 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] = | ||
4360 | I40E_GLPES_VFIP4TXPKTSLO(fcn_idx); | ||
4361 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] = | ||
4362 | I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx); | ||
4363 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] = | ||
4364 | I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx); | ||
4365 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] = | ||
4366 | I40E_GLPES_VFIP6RXOCTSLO(fcn_idx); | ||
4367 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] = | ||
4368 | I40E_GLPES_VFIP6RXPKTSLO(fcn_idx); | ||
4369 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] = | ||
4370 | I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx); | ||
4371 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] = | ||
4372 | I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx); | ||
4373 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] = | ||
4374 | I40E_GLPES_VFIP6TXOCTSLO(fcn_idx); | ||
4375 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] = | ||
4376 | I40E_GLPES_VFIP6TXPKTSLO(fcn_idx); | ||
4377 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] = | ||
4378 | I40E_GLPES_VFIP6TXPKTSLO(fcn_idx); | ||
4379 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] = | ||
4380 | I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx); | ||
4381 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] = | ||
4382 | I40E_GLPES_VFTCPRXSEGSLO(fcn_idx); | ||
4383 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] = | ||
4384 | I40E_GLPES_VFTCPTXSEGLO(fcn_idx); | ||
4385 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] = | ||
4386 | I40E_GLPES_VFRDMARXRDSLO(fcn_idx); | ||
4387 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] = | ||
4388 | I40E_GLPES_VFRDMARXSNDSLO(fcn_idx); | ||
4389 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] = | ||
4390 | I40E_GLPES_VFRDMARXWRSLO(fcn_idx); | ||
4391 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] = | ||
4392 | I40E_GLPES_VFRDMATXRDSLO(fcn_idx); | ||
4393 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] = | ||
4394 | I40E_GLPES_VFRDMATXSNDSLO(fcn_idx); | ||
4395 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] = | ||
4396 | I40E_GLPES_VFRDMATXWRSLO(fcn_idx); | ||
4397 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] = | ||
4398 | I40E_GLPES_VFRDMAVBNDLO(fcn_idx); | ||
4399 | stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] = | ||
4400 | I40E_GLPES_VFRDMAVINVLO(fcn_idx); | ||
4401 | } | ||
4402 | |||
4403 | for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64; | ||
4404 | stat_index++) { | ||
4405 | stat_reg_offset = stat_table->stat_offset_64[stat_index]; | ||
4406 | last_rd_stats->stat_value_64[stat_index] = | ||
4407 | readq(devstat->hw->hw_addr + stat_reg_offset); | ||
4408 | } | ||
4409 | |||
4410 | for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32; | ||
4411 | stat_index++) { | ||
4412 | stat_reg_offset = stat_table->stat_offset_32[stat_index]; | ||
4413 | last_rd_stats->stat_value_32[stat_index] = | ||
4414 | i40iw_rd32(devstat->hw, stat_reg_offset); | ||
4415 | } | ||
4416 | } | ||
4417 | |||
4418 | /** | ||
4419 | * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs. | ||
4420 | * @devstat: pestat struct | ||
4421 | * @index: index in HW stat table which contains offset reg-addr | ||
4422 | * @value: hw stat value | ||
4423 | */ | ||
4424 | static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat *devstat, | ||
4425 | enum i40iw_hw_stat_index_32b index, | ||
4426 | u64 *value) | ||
4427 | { | ||
4428 | struct i40iw_dev_hw_stat_offsets *stat_table = | ||
4429 | &devstat->hw_stat_offsets; | ||
4430 | struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats; | ||
4431 | struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats; | ||
4432 | u64 new_stat_value = 0; | ||
4433 | u32 stat_reg_offset = stat_table->stat_offset_32[index]; | ||
4434 | |||
4435 | new_stat_value = i40iw_rd32(devstat->hw, stat_reg_offset); | ||
4436 | /*roll-over case */ | ||
4437 | if (new_stat_value < last_rd_stats->stat_value_32[index]) | ||
4438 | hw_stats->stat_value_32[index] += new_stat_value; | ||
4439 | else | ||
4440 | hw_stats->stat_value_32[index] += | ||
4441 | new_stat_value - last_rd_stats->stat_value_32[index]; | ||
4442 | last_rd_stats->stat_value_32[index] = new_stat_value; | ||
4443 | *value = hw_stats->stat_value_32[index]; | ||
4444 | } | ||
4445 | |||
4446 | /** | ||
4447 | * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs. | ||
4448 | * @devstat: pestat struct | ||
4449 | * @index: index in HW stat table which contains offset reg-addr | ||
4450 | * @value: hw stat value | ||
4451 | */ | ||
4452 | static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat *devstat, | ||
4453 | enum i40iw_hw_stat_index_64b index, | ||
4454 | u64 *value) | ||
4455 | { | ||
4456 | struct i40iw_dev_hw_stat_offsets *stat_table = | ||
4457 | &devstat->hw_stat_offsets; | ||
4458 | struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats; | ||
4459 | struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats; | ||
4460 | u64 new_stat_value = 0; | ||
4461 | u32 stat_reg_offset = stat_table->stat_offset_64[index]; | ||
4462 | |||
4463 | new_stat_value = readq(devstat->hw->hw_addr + stat_reg_offset); | ||
4464 | /*roll-over case */ | ||
4465 | if (new_stat_value < last_rd_stats->stat_value_64[index]) | ||
4466 | hw_stats->stat_value_64[index] += new_stat_value; | ||
4467 | else | ||
4468 | hw_stats->stat_value_64[index] += | ||
4469 | new_stat_value - last_rd_stats->stat_value_64[index]; | ||
4470 | last_rd_stats->stat_value_64[index] = new_stat_value; | ||
4471 | *value = hw_stats->stat_value_64[index]; | ||
4472 | } | ||
4473 | |||
4474 | /** | ||
4475 | * i40iw_hw_stat_read_all - read all HW stat counters | ||
4476 | * @devstat: pestat struct | ||
4477 | * @stat_values: hw stats structure | ||
4478 | * | ||
4479 | * Read all the HW stat counters and populates hw_stats structure | ||
4480 | * of passed-in dev's pestat as well as copy created in stat_values. | ||
4481 | */ | ||
4482 | static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat *devstat, | ||
4483 | struct i40iw_dev_hw_stats *stat_values) | ||
4484 | { | ||
4485 | u32 stat_index; | ||
4486 | |||
4487 | for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32; | ||
4488 | stat_index++) | ||
4489 | i40iw_hw_stat_read_32(devstat, stat_index, | ||
4490 | &stat_values->stat_value_32[stat_index]); | ||
4491 | for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64; | ||
4492 | stat_index++) | ||
4493 | i40iw_hw_stat_read_64(devstat, stat_index, | ||
4494 | &stat_values->stat_value_64[stat_index]); | ||
4495 | } | ||
4496 | |||
4497 | /** | ||
4498 | * i40iw_hw_stat_refresh_all - Update all HW stat structs | ||
4499 | * @devstat: pestat struct | ||
4500 | * @stat_values: hw stats structure | ||
4501 | * | ||
4502 | * Read all the HW stat counters to refresh values in hw_stats structure | ||
4503 | * of passed-in dev's pestat | ||
4504 | */ | ||
4505 | static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat) | ||
4506 | { | ||
4507 | u64 stat_value; | ||
4508 | u32 stat_index; | ||
4509 | |||
4510 | for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32; | ||
4511 | stat_index++) | ||
4512 | i40iw_hw_stat_read_32(devstat, stat_index, &stat_value); | ||
4513 | for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64; | ||
4514 | stat_index++) | ||
4515 | i40iw_hw_stat_read_64(devstat, stat_index, &stat_value); | ||
4516 | } | ||
4517 | |||
4518 | static struct i40iw_cqp_ops iw_cqp_ops = { | ||
4519 | i40iw_sc_cqp_init, | ||
4520 | i40iw_sc_cqp_create, | ||
4521 | i40iw_sc_cqp_post_sq, | ||
4522 | i40iw_sc_cqp_get_next_send_wqe, | ||
4523 | i40iw_sc_cqp_destroy, | ||
4524 | i40iw_sc_poll_for_cqp_op_done | ||
4525 | }; | ||
4526 | |||
4527 | static struct i40iw_ccq_ops iw_ccq_ops = { | ||
4528 | i40iw_sc_ccq_init, | ||
4529 | i40iw_sc_ccq_create, | ||
4530 | i40iw_sc_ccq_destroy, | ||
4531 | i40iw_sc_ccq_create_done, | ||
4532 | i40iw_sc_ccq_get_cqe_info, | ||
4533 | i40iw_sc_ccq_arm | ||
4534 | }; | ||
4535 | |||
4536 | static struct i40iw_ceq_ops iw_ceq_ops = { | ||
4537 | i40iw_sc_ceq_init, | ||
4538 | i40iw_sc_ceq_create, | ||
4539 | i40iw_sc_cceq_create_done, | ||
4540 | i40iw_sc_cceq_destroy_done, | ||
4541 | i40iw_sc_cceq_create, | ||
4542 | i40iw_sc_ceq_destroy, | ||
4543 | i40iw_sc_process_ceq | ||
4544 | }; | ||
4545 | |||
4546 | static struct i40iw_aeq_ops iw_aeq_ops = { | ||
4547 | i40iw_sc_aeq_init, | ||
4548 | i40iw_sc_aeq_create, | ||
4549 | i40iw_sc_aeq_destroy, | ||
4550 | i40iw_sc_get_next_aeqe, | ||
4551 | i40iw_sc_repost_aeq_entries, | ||
4552 | i40iw_sc_aeq_create_done, | ||
4553 | i40iw_sc_aeq_destroy_done | ||
4554 | }; | ||
4555 | |||
4556 | /* iwarp pd ops */ | ||
4557 | static struct i40iw_pd_ops iw_pd_ops = { | ||
4558 | i40iw_sc_pd_init, | ||
4559 | }; | ||
4560 | |||
4561 | static struct i40iw_priv_qp_ops iw_priv_qp_ops = { | ||
4562 | i40iw_sc_qp_init, | ||
4563 | i40iw_sc_qp_create, | ||
4564 | i40iw_sc_qp_modify, | ||
4565 | i40iw_sc_qp_destroy, | ||
4566 | i40iw_sc_qp_flush_wqes, | ||
4567 | i40iw_sc_qp_upload_context, | ||
4568 | i40iw_sc_qp_setctx, | ||
4569 | i40iw_sc_send_lsmm, | ||
4570 | i40iw_sc_send_lsmm_nostag, | ||
4571 | i40iw_sc_send_rtt, | ||
4572 | i40iw_sc_post_wqe0, | ||
4573 | }; | ||
4574 | |||
4575 | static struct i40iw_priv_cq_ops iw_priv_cq_ops = { | ||
4576 | i40iw_sc_cq_init, | ||
4577 | i40iw_sc_cq_create, | ||
4578 | i40iw_sc_cq_destroy, | ||
4579 | i40iw_sc_cq_modify, | ||
4580 | }; | ||
4581 | |||
4582 | static struct i40iw_mr_ops iw_mr_ops = { | ||
4583 | i40iw_sc_alloc_stag, | ||
4584 | i40iw_sc_mr_reg_non_shared, | ||
4585 | i40iw_sc_mr_reg_shared, | ||
4586 | i40iw_sc_dealloc_stag, | ||
4587 | i40iw_sc_query_stag, | ||
4588 | i40iw_sc_mw_alloc | ||
4589 | }; | ||
4590 | |||
4591 | static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = { | ||
4592 | i40iw_sc_manage_push_page, | ||
4593 | i40iw_sc_manage_hmc_pm_func_table, | ||
4594 | i40iw_sc_set_hmc_resource_profile, | ||
4595 | i40iw_sc_commit_fpm_values, | ||
4596 | i40iw_sc_query_fpm_values, | ||
4597 | i40iw_sc_static_hmc_pages_allocated, | ||
4598 | i40iw_sc_add_arp_cache_entry, | ||
4599 | i40iw_sc_del_arp_cache_entry, | ||
4600 | i40iw_sc_query_arp_cache_entry, | ||
4601 | i40iw_sc_manage_apbvt_entry, | ||
4602 | i40iw_sc_manage_qhash_table_entry, | ||
4603 | i40iw_sc_alloc_local_mac_ipaddr_entry, | ||
4604 | i40iw_sc_add_local_mac_ipaddr_entry, | ||
4605 | i40iw_sc_del_local_mac_ipaddr_entry, | ||
4606 | i40iw_sc_cqp_nop, | ||
4607 | i40iw_sc_commit_fpm_values_done, | ||
4608 | i40iw_sc_query_fpm_values_done, | ||
4609 | i40iw_sc_manage_hmc_pm_func_table_done, | ||
4610 | i40iw_sc_suspend_qp, | ||
4611 | i40iw_sc_resume_qp | ||
4612 | }; | ||
4613 | |||
4614 | static struct i40iw_hmc_ops iw_hmc_ops = { | ||
4615 | i40iw_sc_init_iw_hmc, | ||
4616 | i40iw_sc_parse_fpm_query_buf, | ||
4617 | i40iw_sc_configure_iw_fpm, | ||
4618 | i40iw_sc_parse_fpm_commit_buf, | ||
4619 | i40iw_sc_create_hmc_obj, | ||
4620 | i40iw_sc_del_hmc_obj, | ||
4621 | NULL, | ||
4622 | NULL | ||
4623 | }; | ||
4624 | |||
4625 | static const struct i40iw_device_pestat_ops iw_device_pestat_ops = { | ||
4626 | i40iw_hw_stat_init, | ||
4627 | i40iw_hw_stat_read_32, | ||
4628 | i40iw_hw_stat_read_64, | ||
4629 | i40iw_hw_stat_read_all, | ||
4630 | i40iw_hw_stat_refresh_all | ||
4631 | }; | ||
4632 | |||
4633 | /** | ||
4634 | * i40iw_device_init_pestat - Initialize the pestat structure | ||
4635 | * @dev: pestat struct | ||
4636 | */ | ||
4637 | enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat) | ||
4638 | { | ||
4639 | devstat->ops = iw_device_pestat_ops; | ||
4640 | return 0; | ||
4641 | } | ||
4642 | |||
4643 | /** | ||
4644 | * i40iw_device_init - Initialize IWARP device | ||
4645 | * @dev: IWARP device pointer | ||
4646 | * @info: IWARP init info | ||
4647 | */ | ||
4648 | enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev, | ||
4649 | struct i40iw_device_init_info *info) | ||
4650 | { | ||
4651 | u32 val; | ||
4652 | u32 vchnl_ver = 0; | ||
4653 | u16 hmc_fcn = 0; | ||
4654 | enum i40iw_status_code ret_code = 0; | ||
4655 | u8 db_size; | ||
4656 | |||
4657 | spin_lock_init(&dev->cqp_lock); | ||
4658 | INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for the cqp commands backlog. */ | ||
4659 | |||
4660 | i40iw_device_init_uk(&dev->dev_uk); | ||
4661 | |||
4662 | dev->debug_mask = info->debug_mask; | ||
4663 | |||
4664 | ret_code = i40iw_device_init_pestat(&dev->dev_pestat); | ||
4665 | if (ret_code) { | ||
4666 | i40iw_debug(dev, I40IW_DEBUG_DEV, | ||
4667 | "%s: i40iw_device_init_pestat failed\n", __func__); | ||
4668 | return ret_code; | ||
4669 | } | ||
4670 | dev->hmc_fn_id = info->hmc_fn_id; | ||
4671 | dev->qs_handle = info->qs_handle; | ||
4672 | dev->exception_lan_queue = info->exception_lan_queue; | ||
4673 | dev->is_pf = info->is_pf; | ||
4674 | |||
4675 | dev->fpm_query_buf_pa = info->fpm_query_buf_pa; | ||
4676 | dev->fpm_query_buf = info->fpm_query_buf; | ||
4677 | |||
4678 | dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa; | ||
4679 | dev->fpm_commit_buf = info->fpm_commit_buf; | ||
4680 | |||
4681 | dev->hw = info->hw; | ||
4682 | dev->hw->hw_addr = info->bar0; | ||
4683 | |||
4684 | val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID); | ||
4685 | dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID); | ||
4686 | |||
4687 | if (dev->is_pf) { | ||
4688 | dev->dev_pestat.ops.iw_hw_stat_init(&dev->dev_pestat, | ||
4689 | dev->hmc_fn_id, dev->hw, true); | ||
4690 | spin_lock_init(&dev->dev_pestat.stats_lock); | ||
4691 | /*start the periodic stats_timer */ | ||
4692 | i40iw_hw_stats_start_timer(dev); | ||
4693 | val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL); | ||
4694 | db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE); | ||
4695 | if ((db_size != I40IW_PE_DB_SIZE_4M) && | ||
4696 | (db_size != I40IW_PE_DB_SIZE_8M)) { | ||
4697 | i40iw_debug(dev, I40IW_DEBUG_DEV, | ||
4698 | "%s: PE doorbell is not enabled in CSR val 0x%x\n", | ||
4699 | __func__, val); | ||
4700 | ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED; | ||
4701 | return ret_code; | ||
4702 | } | ||
4703 | dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET; | ||
4704 | dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf; | ||
4705 | } else { | ||
4706 | dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET; | ||
4707 | } | ||
4708 | |||
4709 | dev->cqp_ops = &iw_cqp_ops; | ||
4710 | dev->ccq_ops = &iw_ccq_ops; | ||
4711 | dev->ceq_ops = &iw_ceq_ops; | ||
4712 | dev->aeq_ops = &iw_aeq_ops; | ||
4713 | dev->cqp_misc_ops = &iw_cqp_misc_ops; | ||
4714 | dev->iw_pd_ops = &iw_pd_ops; | ||
4715 | dev->iw_priv_qp_ops = &iw_priv_qp_ops; | ||
4716 | dev->iw_priv_cq_ops = &iw_priv_cq_ops; | ||
4717 | dev->mr_ops = &iw_mr_ops; | ||
4718 | dev->hmc_ops = &iw_hmc_ops; | ||
4719 | dev->vchnl_if.vchnl_send = info->vchnl_send; | ||
4720 | if (dev->vchnl_if.vchnl_send) | ||
4721 | dev->vchnl_up = true; | ||
4722 | else | ||
4723 | dev->vchnl_up = false; | ||
4724 | if (!dev->is_pf) { | ||
4725 | dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf; | ||
4726 | ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver); | ||
4727 | if (!ret_code) { | ||
4728 | i40iw_debug(dev, I40IW_DEBUG_DEV, | ||
4729 | "%s: Get Channel version rc = 0x%0x, version is %u\n", | ||
4730 | __func__, ret_code, vchnl_ver); | ||
4731 | ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn); | ||
4732 | if (!ret_code) { | ||
4733 | i40iw_debug(dev, I40IW_DEBUG_DEV, | ||
4734 | "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n", | ||
4735 | __func__, ret_code, hmc_fcn); | ||
4736 | dev->hmc_fn_id = (u8)hmc_fcn; | ||
4737 | } | ||
4738 | } | ||
4739 | } | ||
4740 | dev->iw_vf_cqp_ops = &iw_vf_cqp_ops; | ||
4741 | |||
4742 | return ret_code; | ||
4743 | } | ||