diff options
author | Haggai Eran <haggaie@mellanox.com> | 2014-12-11 10:04:24 -0500 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2014-12-15 21:19:03 -0500 |
commit | 7bdf65d411c1715d695be0d9a555d7f48d0a7220 (patch) | |
tree | f4af6f7c1e55e52c03842c6ef4fff0bb5fb20e95 | |
parent | 6aec21f6a8322fa8d43df3ea7f051dfd8967f1b9 (diff) |
IB/mlx5: Handle page faults
This patch implement a page fault handler (leaving the pages pinned as
of time being). The page fault handler handles initiator and responder
page faults for UD/RC transports, for send/receive operations, as well
as RDMA read/write initiator support.
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Shachar Raindel <raindel@mellanox.com>
Signed-off-by: Haggai Eran <haggaie@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r-- | drivers/infiniband/hw/mlx5/odp.c | 408 | ||||
-rw-r--r-- | include/linux/mlx5/qp.h | 7 |
2 files changed, 415 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 63bbdba396f1..bd1dbe5ebc15 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c | |||
@@ -30,6 +30,9 @@ | |||
30 | * SOFTWARE. | 30 | * SOFTWARE. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <rdma/ib_umem.h> | ||
34 | #include <rdma/ib_umem_odp.h> | ||
35 | |||
33 | #include "mlx5_ib.h" | 36 | #include "mlx5_ib.h" |
34 | 37 | ||
35 | struct workqueue_struct *mlx5_ib_page_fault_wq; | 38 | struct workqueue_struct *mlx5_ib_page_fault_wq; |
@@ -85,12 +88,417 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp, | |||
85 | qp->mqp.qpn); | 88 | qp->mqp.qpn); |
86 | } | 89 | } |
87 | 90 | ||
91 | /* | ||
92 | * Handle a single data segment in a page-fault WQE. | ||
93 | * | ||
94 | * Returns number of pages retrieved on success. The caller will continue to | ||
95 | * the next data segment. | ||
96 | * Can return the following error codes: | ||
97 | * -EAGAIN to designate a temporary error. The caller will abort handling the | ||
98 | * page fault and resolve it. | ||
99 | * -EFAULT when there's an error mapping the requested pages. The caller will | ||
100 | * abort the page fault handling and possibly move the QP to an error state. | ||
101 | * On other errors the QP should also be closed with an error. | ||
102 | */ | ||
103 | static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, | ||
104 | struct mlx5_ib_pfault *pfault, | ||
105 | u32 key, u64 io_virt, size_t bcnt, | ||
106 | u32 *bytes_mapped) | ||
107 | { | ||
108 | struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device); | ||
109 | int srcu_key; | ||
110 | unsigned int current_seq; | ||
111 | u64 start_idx; | ||
112 | int npages = 0, ret = 0; | ||
113 | struct mlx5_ib_mr *mr; | ||
114 | u64 access_mask = ODP_READ_ALLOWED_BIT; | ||
115 | |||
116 | srcu_key = srcu_read_lock(&mib_dev->mr_srcu); | ||
117 | mr = mlx5_ib_odp_find_mr_lkey(mib_dev, key); | ||
118 | /* | ||
119 | * If we didn't find the MR, it means the MR was closed while we were | ||
120 | * handling the ODP event. In this case we return -EFAULT so that the | ||
121 | * QP will be closed. | ||
122 | */ | ||
123 | if (!mr || !mr->ibmr.pd) { | ||
124 | pr_err("Failed to find relevant mr for lkey=0x%06x, probably the MR was destroyed\n", | ||
125 | key); | ||
126 | ret = -EFAULT; | ||
127 | goto srcu_unlock; | ||
128 | } | ||
129 | if (!mr->umem->odp_data) { | ||
130 | pr_debug("skipping non ODP MR (lkey=0x%06x) in page fault handler.\n", | ||
131 | key); | ||
132 | if (bytes_mapped) | ||
133 | *bytes_mapped += | ||
134 | (bcnt - pfault->mpfault.bytes_committed); | ||
135 | goto srcu_unlock; | ||
136 | } | ||
137 | if (mr->ibmr.pd != qp->ibqp.pd) { | ||
138 | pr_err("Page-fault with different PDs for QP and MR.\n"); | ||
139 | ret = -EFAULT; | ||
140 | goto srcu_unlock; | ||
141 | } | ||
142 | |||
143 | current_seq = ACCESS_ONCE(mr->umem->odp_data->notifiers_seq); | ||
144 | |||
145 | /* | ||
146 | * Avoid branches - this code will perform correctly | ||
147 | * in all iterations (in iteration 2 and above, | ||
148 | * bytes_committed == 0). | ||
149 | */ | ||
150 | io_virt += pfault->mpfault.bytes_committed; | ||
151 | bcnt -= pfault->mpfault.bytes_committed; | ||
152 | |||
153 | start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT; | ||
154 | |||
155 | if (mr->umem->writable) | ||
156 | access_mask |= ODP_WRITE_ALLOWED_BIT; | ||
157 | npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt, | ||
158 | access_mask, current_seq); | ||
159 | if (npages < 0) { | ||
160 | ret = npages; | ||
161 | goto srcu_unlock; | ||
162 | } | ||
163 | |||
164 | if (npages > 0) { | ||
165 | mutex_lock(&mr->umem->odp_data->umem_mutex); | ||
166 | /* | ||
167 | * No need to check whether the MTTs really belong to | ||
168 | * this MR, since ib_umem_odp_map_dma_pages already | ||
169 | * checks this. | ||
170 | */ | ||
171 | ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0); | ||
172 | mutex_unlock(&mr->umem->odp_data->umem_mutex); | ||
173 | if (ret < 0) { | ||
174 | pr_err("Failed to update mkey page tables\n"); | ||
175 | goto srcu_unlock; | ||
176 | } | ||
177 | |||
178 | if (bytes_mapped) { | ||
179 | u32 new_mappings = npages * PAGE_SIZE - | ||
180 | (io_virt - round_down(io_virt, PAGE_SIZE)); | ||
181 | *bytes_mapped += min_t(u32, new_mappings, bcnt); | ||
182 | } | ||
183 | } | ||
184 | |||
185 | srcu_unlock: | ||
186 | srcu_read_unlock(&mib_dev->mr_srcu, srcu_key); | ||
187 | pfault->mpfault.bytes_committed = 0; | ||
188 | return ret ? ret : npages; | ||
189 | } | ||
190 | |||
191 | /** | ||
192 | * Parse a series of data segments for page fault handling. | ||
193 | * | ||
194 | * @qp the QP on which the fault occurred. | ||
195 | * @pfault contains page fault information. | ||
196 | * @wqe points at the first data segment in the WQE. | ||
197 | * @wqe_end points after the end of the WQE. | ||
198 | * @bytes_mapped receives the number of bytes that the function was able to | ||
199 | * map. This allows the caller to decide intelligently whether | ||
200 | * enough memory was mapped to resolve the page fault | ||
201 | * successfully (e.g. enough for the next MTU, or the entire | ||
202 | * WQE). | ||
203 | * @total_wqe_bytes receives the total data size of this WQE in bytes (minus | ||
204 | * the committed bytes). | ||
205 | * | ||
206 | * Returns the number of pages loaded if positive, zero for an empty WQE, or a | ||
207 | * negative error code. | ||
208 | */ | ||
209 | static int pagefault_data_segments(struct mlx5_ib_qp *qp, | ||
210 | struct mlx5_ib_pfault *pfault, void *wqe, | ||
211 | void *wqe_end, u32 *bytes_mapped, | ||
212 | u32 *total_wqe_bytes, int receive_queue) | ||
213 | { | ||
214 | int ret = 0, npages = 0; | ||
215 | u64 io_virt; | ||
216 | u32 key; | ||
217 | u32 byte_count; | ||
218 | size_t bcnt; | ||
219 | int inline_segment; | ||
220 | |||
221 | /* Skip SRQ next-WQE segment. */ | ||
222 | if (receive_queue && qp->ibqp.srq) | ||
223 | wqe += sizeof(struct mlx5_wqe_srq_next_seg); | ||
224 | |||
225 | if (bytes_mapped) | ||
226 | *bytes_mapped = 0; | ||
227 | if (total_wqe_bytes) | ||
228 | *total_wqe_bytes = 0; | ||
229 | |||
230 | while (wqe < wqe_end) { | ||
231 | struct mlx5_wqe_data_seg *dseg = wqe; | ||
232 | |||
233 | io_virt = be64_to_cpu(dseg->addr); | ||
234 | key = be32_to_cpu(dseg->lkey); | ||
235 | byte_count = be32_to_cpu(dseg->byte_count); | ||
236 | inline_segment = !!(byte_count & MLX5_INLINE_SEG); | ||
237 | bcnt = byte_count & ~MLX5_INLINE_SEG; | ||
238 | |||
239 | if (inline_segment) { | ||
240 | bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK; | ||
241 | wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt, | ||
242 | 16); | ||
243 | } else { | ||
244 | wqe += sizeof(*dseg); | ||
245 | } | ||
246 | |||
247 | /* receive WQE end of sg list. */ | ||
248 | if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY && | ||
249 | io_virt == 0) | ||
250 | break; | ||
251 | |||
252 | if (!inline_segment && total_wqe_bytes) { | ||
253 | *total_wqe_bytes += bcnt - min_t(size_t, bcnt, | ||
254 | pfault->mpfault.bytes_committed); | ||
255 | } | ||
256 | |||
257 | /* A zero length data segment designates a length of 2GB. */ | ||
258 | if (bcnt == 0) | ||
259 | bcnt = 1U << 31; | ||
260 | |||
261 | if (inline_segment || bcnt <= pfault->mpfault.bytes_committed) { | ||
262 | pfault->mpfault.bytes_committed -= | ||
263 | min_t(size_t, bcnt, | ||
264 | pfault->mpfault.bytes_committed); | ||
265 | continue; | ||
266 | } | ||
267 | |||
268 | ret = pagefault_single_data_segment(qp, pfault, key, io_virt, | ||
269 | bcnt, bytes_mapped); | ||
270 | if (ret < 0) | ||
271 | break; | ||
272 | npages += ret; | ||
273 | } | ||
274 | |||
275 | return ret < 0 ? ret : npages; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * Parse initiator WQE. Advances the wqe pointer to point at the | ||
280 | * scatter-gather list, and set wqe_end to the end of the WQE. | ||
281 | */ | ||
282 | static int mlx5_ib_mr_initiator_pfault_handler( | ||
283 | struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, | ||
284 | void **wqe, void **wqe_end, int wqe_length) | ||
285 | { | ||
286 | struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); | ||
287 | struct mlx5_wqe_ctrl_seg *ctrl = *wqe; | ||
288 | u16 wqe_index = pfault->mpfault.wqe.wqe_index; | ||
289 | unsigned ds, opcode; | ||
290 | #if defined(DEBUG) | ||
291 | u32 ctrl_wqe_index, ctrl_qpn; | ||
292 | #endif | ||
293 | |||
294 | ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; | ||
295 | if (ds * MLX5_WQE_DS_UNITS > wqe_length) { | ||
296 | mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n", | ||
297 | ds, wqe_length); | ||
298 | return -EFAULT; | ||
299 | } | ||
300 | |||
301 | if (ds == 0) { | ||
302 | mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n", | ||
303 | wqe_index, qp->mqp.qpn); | ||
304 | return -EFAULT; | ||
305 | } | ||
306 | |||
307 | #if defined(DEBUG) | ||
308 | ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) & | ||
309 | MLX5_WQE_CTRL_WQE_INDEX_MASK) >> | ||
310 | MLX5_WQE_CTRL_WQE_INDEX_SHIFT; | ||
311 | if (wqe_index != ctrl_wqe_index) { | ||
312 | mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n", | ||
313 | wqe_index, qp->mqp.qpn, | ||
314 | ctrl_wqe_index); | ||
315 | return -EFAULT; | ||
316 | } | ||
317 | |||
318 | ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >> | ||
319 | MLX5_WQE_CTRL_QPN_SHIFT; | ||
320 | if (qp->mqp.qpn != ctrl_qpn) { | ||
321 | mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n", | ||
322 | wqe_index, qp->mqp.qpn, | ||
323 | ctrl_qpn); | ||
324 | return -EFAULT; | ||
325 | } | ||
326 | #endif /* DEBUG */ | ||
327 | |||
328 | *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS; | ||
329 | *wqe += sizeof(*ctrl); | ||
330 | |||
331 | opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & | ||
332 | MLX5_WQE_CTRL_OPCODE_MASK; | ||
333 | switch (qp->ibqp.qp_type) { | ||
334 | case IB_QPT_RC: | ||
335 | switch (opcode) { | ||
336 | case MLX5_OPCODE_SEND: | ||
337 | case MLX5_OPCODE_SEND_IMM: | ||
338 | case MLX5_OPCODE_SEND_INVAL: | ||
339 | if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & | ||
340 | IB_ODP_SUPPORT_SEND)) | ||
341 | goto invalid_transport_or_opcode; | ||
342 | break; | ||
343 | case MLX5_OPCODE_RDMA_WRITE: | ||
344 | case MLX5_OPCODE_RDMA_WRITE_IMM: | ||
345 | if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & | ||
346 | IB_ODP_SUPPORT_WRITE)) | ||
347 | goto invalid_transport_or_opcode; | ||
348 | *wqe += sizeof(struct mlx5_wqe_raddr_seg); | ||
349 | break; | ||
350 | case MLX5_OPCODE_RDMA_READ: | ||
351 | if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & | ||
352 | IB_ODP_SUPPORT_READ)) | ||
353 | goto invalid_transport_or_opcode; | ||
354 | *wqe += sizeof(struct mlx5_wqe_raddr_seg); | ||
355 | break; | ||
356 | default: | ||
357 | goto invalid_transport_or_opcode; | ||
358 | } | ||
359 | break; | ||
360 | case IB_QPT_UD: | ||
361 | switch (opcode) { | ||
362 | case MLX5_OPCODE_SEND: | ||
363 | case MLX5_OPCODE_SEND_IMM: | ||
364 | if (!(dev->odp_caps.per_transport_caps.ud_odp_caps & | ||
365 | IB_ODP_SUPPORT_SEND)) | ||
366 | goto invalid_transport_or_opcode; | ||
367 | *wqe += sizeof(struct mlx5_wqe_datagram_seg); | ||
368 | break; | ||
369 | default: | ||
370 | goto invalid_transport_or_opcode; | ||
371 | } | ||
372 | break; | ||
373 | default: | ||
374 | invalid_transport_or_opcode: | ||
375 | mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n", | ||
376 | qp->ibqp.qp_type, opcode); | ||
377 | return -EFAULT; | ||
378 | } | ||
379 | |||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * Parse responder WQE. Advances the wqe pointer to point at the | ||
385 | * scatter-gather list, and set wqe_end to the end of the WQE. | ||
386 | */ | ||
387 | static int mlx5_ib_mr_responder_pfault_handler( | ||
388 | struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, | ||
389 | void **wqe, void **wqe_end, int wqe_length) | ||
390 | { | ||
391 | struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); | ||
392 | struct mlx5_ib_wq *wq = &qp->rq; | ||
393 | int wqe_size = 1 << wq->wqe_shift; | ||
394 | |||
395 | if (qp->ibqp.srq) { | ||
396 | mlx5_ib_err(dev, "ODP fault on SRQ is not supported\n"); | ||
397 | return -EFAULT; | ||
398 | } | ||
399 | |||
400 | if (qp->wq_sig) { | ||
401 | mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n"); | ||
402 | return -EFAULT; | ||
403 | } | ||
404 | |||
405 | if (wqe_size > wqe_length) { | ||
406 | mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n"); | ||
407 | return -EFAULT; | ||
408 | } | ||
409 | |||
410 | switch (qp->ibqp.qp_type) { | ||
411 | case IB_QPT_RC: | ||
412 | if (!(dev->odp_caps.per_transport_caps.rc_odp_caps & | ||
413 | IB_ODP_SUPPORT_RECV)) | ||
414 | goto invalid_transport_or_opcode; | ||
415 | break; | ||
416 | default: | ||
417 | invalid_transport_or_opcode: | ||
418 | mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n", | ||
419 | qp->ibqp.qp_type); | ||
420 | return -EFAULT; | ||
421 | } | ||
422 | |||
423 | *wqe_end = *wqe + wqe_size; | ||
424 | |||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp, | ||
429 | struct mlx5_ib_pfault *pfault) | ||
430 | { | ||
431 | struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); | ||
432 | int ret; | ||
433 | void *wqe, *wqe_end; | ||
434 | u32 bytes_mapped, total_wqe_bytes; | ||
435 | char *buffer = NULL; | ||
436 | int resume_with_error = 0; | ||
437 | u16 wqe_index = pfault->mpfault.wqe.wqe_index; | ||
438 | int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR; | ||
439 | |||
440 | buffer = (char *)__get_free_page(GFP_KERNEL); | ||
441 | if (!buffer) { | ||
442 | mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n"); | ||
443 | resume_with_error = 1; | ||
444 | goto resolve_page_fault; | ||
445 | } | ||
446 | |||
447 | ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer, | ||
448 | PAGE_SIZE); | ||
449 | if (ret < 0) { | ||
450 | mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n", | ||
451 | -ret, wqe_index, qp->mqp.qpn); | ||
452 | resume_with_error = 1; | ||
453 | goto resolve_page_fault; | ||
454 | } | ||
455 | |||
456 | wqe = buffer; | ||
457 | if (requestor) | ||
458 | ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe, | ||
459 | &wqe_end, ret); | ||
460 | else | ||
461 | ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe, | ||
462 | &wqe_end, ret); | ||
463 | if (ret < 0) { | ||
464 | resume_with_error = 1; | ||
465 | goto resolve_page_fault; | ||
466 | } | ||
467 | |||
468 | if (wqe >= wqe_end) { | ||
469 | mlx5_ib_err(dev, "ODP fault on invalid WQE.\n"); | ||
470 | resume_with_error = 1; | ||
471 | goto resolve_page_fault; | ||
472 | } | ||
473 | |||
474 | ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped, | ||
475 | &total_wqe_bytes, !requestor); | ||
476 | if (ret == -EAGAIN) { | ||
477 | goto resolve_page_fault; | ||
478 | } else if (ret < 0 || total_wqe_bytes > bytes_mapped) { | ||
479 | mlx5_ib_err(dev, "Error getting user pages for page fault. Error: 0x%x\n", | ||
480 | -ret); | ||
481 | resume_with_error = 1; | ||
482 | goto resolve_page_fault; | ||
483 | } | ||
484 | |||
485 | resolve_page_fault: | ||
486 | mlx5_ib_page_fault_resume(qp, pfault, resume_with_error); | ||
487 | mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n", | ||
488 | qp->mqp.qpn, resume_with_error, pfault->mpfault.flags); | ||
489 | |||
490 | free_page((unsigned long)buffer); | ||
491 | } | ||
492 | |||
88 | void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, | 493 | void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, |
89 | struct mlx5_ib_pfault *pfault) | 494 | struct mlx5_ib_pfault *pfault) |
90 | { | 495 | { |
91 | u8 event_subtype = pfault->mpfault.event_subtype; | 496 | u8 event_subtype = pfault->mpfault.event_subtype; |
92 | 497 | ||
93 | switch (event_subtype) { | 498 | switch (event_subtype) { |
499 | case MLX5_PFAULT_SUBTYPE_WQE: | ||
500 | mlx5_ib_mr_wqe_pfault_handler(qp, pfault); | ||
501 | break; | ||
94 | default: | 502 | default: |
95 | pr_warn("Invalid page fault event subtype: 0x%x\n", | 503 | pr_warn("Invalid page fault event subtype: 0x%x\n", |
96 | event_subtype); | 504 | event_subtype); |
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 6b1d6f60c7e6..61f7a342d1bf 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h | |||
@@ -193,7 +193,12 @@ struct mlx5_wqe_ctrl_seg { | |||
193 | }; | 193 | }; |
194 | 194 | ||
195 | #define MLX5_WQE_CTRL_DS_MASK 0x3f | 195 | #define MLX5_WQE_CTRL_DS_MASK 0x3f |
196 | #define MLX5_WQE_CTRL_QPN_MASK 0xffffff00 | ||
197 | #define MLX5_WQE_CTRL_QPN_SHIFT 8 | ||
196 | #define MLX5_WQE_DS_UNITS 16 | 198 | #define MLX5_WQE_DS_UNITS 16 |
199 | #define MLX5_WQE_CTRL_OPCODE_MASK 0xff | ||
200 | #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 | ||
201 | #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 | ||
197 | 202 | ||
198 | struct mlx5_wqe_xrc_seg { | 203 | struct mlx5_wqe_xrc_seg { |
199 | __be32 xrc_srqn; | 204 | __be32 xrc_srqn; |
@@ -298,6 +303,8 @@ struct mlx5_wqe_signature_seg { | |||
298 | u8 rsvd1[11]; | 303 | u8 rsvd1[11]; |
299 | }; | 304 | }; |
300 | 305 | ||
306 | #define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff | ||
307 | |||
301 | struct mlx5_wqe_inline_seg { | 308 | struct mlx5_wqe_inline_seg { |
302 | __be32 byte_count; | 309 | __be32 byte_count; |
303 | }; | 310 | }; |