diff options
author | Roland Dreier <rolandd@cisco.com> | 2008-07-23 11:12:26 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-07-23 11:12:26 -0400 |
commit | 95d04f0735b4fc837bff9aedcc3f3efb20ddc3d1 (patch) | |
tree | 900cd7debae9827c3e20f5199307ae27e83ba862 /drivers/infiniband/hw | |
parent | e4044cfc493338cd09870bd45dc646336bb66e9f (diff) |
IB/mlx4: Add support for memory management extensions and local DMA L_Key
Add support for the following operations to mlx4 when device firmware
supports them:
- Send with invalidate and local invalidate send queue work requests;
- Allocate/free fast register MRs;
- Allocate/free fast register MR page lists;
- Fast register MR send queue work requests;
- Local DMA L_Key.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/mlx4/cq.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 15 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 70 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 72 |
5 files changed, 175 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 299f20832ab6..0b191a4842ce 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -637,6 +637,7 @@ repoll: | |||
637 | case MLX4_OPCODE_SEND_IMM: | 637 | case MLX4_OPCODE_SEND_IMM: |
638 | wc->wc_flags |= IB_WC_WITH_IMM; | 638 | wc->wc_flags |= IB_WC_WITH_IMM; |
639 | case MLX4_OPCODE_SEND: | 639 | case MLX4_OPCODE_SEND: |
640 | case MLX4_OPCODE_SEND_INVAL: | ||
640 | wc->opcode = IB_WC_SEND; | 641 | wc->opcode = IB_WC_SEND; |
641 | break; | 642 | break; |
642 | case MLX4_OPCODE_RDMA_READ: | 643 | case MLX4_OPCODE_RDMA_READ: |
@@ -657,6 +658,12 @@ repoll: | |||
657 | case MLX4_OPCODE_LSO: | 658 | case MLX4_OPCODE_LSO: |
658 | wc->opcode = IB_WC_LSO; | 659 | wc->opcode = IB_WC_LSO; |
659 | break; | 660 | break; |
661 | case MLX4_OPCODE_FMR: | ||
662 | wc->opcode = IB_WC_FAST_REG_MR; | ||
663 | break; | ||
664 | case MLX4_OPCODE_LOCAL_INVAL: | ||
665 | wc->opcode = IB_WC_LOCAL_INV; | ||
666 | break; | ||
660 | } | 667 | } |
661 | } else { | 668 | } else { |
662 | wc->byte_len = be32_to_cpu(cqe->byte_cnt); | 669 | wc->byte_len = be32_to_cpu(cqe->byte_cnt); |
@@ -667,6 +674,11 @@ repoll: | |||
667 | wc->wc_flags = IB_WC_WITH_IMM; | 674 | wc->wc_flags = IB_WC_WITH_IMM; |
668 | wc->ex.imm_data = cqe->immed_rss_invalid; | 675 | wc->ex.imm_data = cqe->immed_rss_invalid; |
669 | break; | 676 | break; |
677 | case MLX4_RECV_OPCODE_SEND_INVAL: | ||
678 | wc->opcode = IB_WC_RECV; | ||
679 | wc->wc_flags = IB_WC_WITH_INVALIDATE; | ||
680 | wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid); | ||
681 | break; | ||
670 | case MLX4_RECV_OPCODE_SEND: | 682 | case MLX4_RECV_OPCODE_SEND: |
671 | wc->opcode = IB_WC_RECV; | 683 | wc->opcode = IB_WC_RECV; |
672 | wc->wc_flags = 0; | 684 | wc->wc_flags = 0; |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index bcf50648fa18..38d6907ab521 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -104,6 +104,12 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
104 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; | 104 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; |
105 | if (dev->dev->caps.max_gso_sz) | 105 | if (dev->dev->caps.max_gso_sz) |
106 | props->device_cap_flags |= IB_DEVICE_UD_TSO; | 106 | props->device_cap_flags |= IB_DEVICE_UD_TSO; |
107 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) | ||
108 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; | ||
109 | if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) && | ||
110 | (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && | ||
111 | (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) | ||
112 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; | ||
107 | 113 | ||
108 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & | 114 | props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & |
109 | 0xffffff; | 115 | 0xffffff; |
@@ -127,6 +133,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
127 | props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs; | 133 | props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs; |
128 | props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; | 134 | props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; |
129 | props->max_srq_sge = dev->dev->caps.max_srq_sge; | 135 | props->max_srq_sge = dev->dev->caps.max_srq_sge; |
136 | props->max_fast_reg_page_list_len = PAGE_SIZE / sizeof (u64); | ||
130 | props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; | 137 | props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; |
131 | props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? | 138 | props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? |
132 | IB_ATOMIC_HCA : IB_ATOMIC_NONE; | 139 | IB_ATOMIC_HCA : IB_ATOMIC_NONE; |
@@ -565,6 +572,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
565 | strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); | 572 | strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); |
566 | ibdev->ib_dev.owner = THIS_MODULE; | 573 | ibdev->ib_dev.owner = THIS_MODULE; |
567 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; | 574 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; |
575 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; | ||
568 | ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports; | 576 | ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports; |
569 | ibdev->ib_dev.num_comp_vectors = 1; | 577 | ibdev->ib_dev.num_comp_vectors = 1; |
570 | ibdev->ib_dev.dma_device = &dev->pdev->dev; | 578 | ibdev->ib_dev.dma_device = &dev->pdev->dev; |
@@ -627,6 +635,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
627 | ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; | 635 | ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; |
628 | ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; | 636 | ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; |
629 | ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; | 637 | ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; |
638 | ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr; | ||
639 | ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list; | ||
640 | ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list; | ||
630 | ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; | 641 | ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; |
631 | ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; | 642 | ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; |
632 | ibdev->ib_dev.process_mad = mlx4_ib_process_mad; | 643 | ibdev->ib_dev.process_mad = mlx4_ib_process_mad; |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index c4cf5b69eefa..d26a91317d4d 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -83,6 +83,11 @@ struct mlx4_ib_mr { | |||
83 | struct ib_umem *umem; | 83 | struct ib_umem *umem; |
84 | }; | 84 | }; |
85 | 85 | ||
86 | struct mlx4_ib_fast_reg_page_list { | ||
87 | struct ib_fast_reg_page_list ibfrpl; | ||
88 | dma_addr_t map; | ||
89 | }; | ||
90 | |||
86 | struct mlx4_ib_fmr { | 91 | struct mlx4_ib_fmr { |
87 | struct ib_fmr ibfmr; | 92 | struct ib_fmr ibfmr; |
88 | struct mlx4_fmr mfmr; | 93 | struct mlx4_fmr mfmr; |
@@ -199,6 +204,11 @@ static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr) | |||
199 | return container_of(ibmr, struct mlx4_ib_mr, ibmr); | 204 | return container_of(ibmr, struct mlx4_ib_mr, ibmr); |
200 | } | 205 | } |
201 | 206 | ||
207 | static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) | ||
208 | { | ||
209 | return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl); | ||
210 | } | ||
211 | |||
202 | static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) | 212 | static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) |
203 | { | 213 | { |
204 | return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr); | 214 | return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr); |
@@ -239,6 +249,11 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
239 | u64 virt_addr, int access_flags, | 249 | u64 virt_addr, int access_flags, |
240 | struct ib_udata *udata); | 250 | struct ib_udata *udata); |
241 | int mlx4_ib_dereg_mr(struct ib_mr *mr); | 251 | int mlx4_ib_dereg_mr(struct ib_mr *mr); |
252 | struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, | ||
253 | int max_page_list_len); | ||
254 | struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, | ||
255 | int page_list_len); | ||
256 | void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); | ||
242 | 257 | ||
243 | int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); | 258 | int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
244 | int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); | 259 | int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 68e92485fc76..db2086faa4ed 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -183,6 +183,76 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr) | |||
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
186 | struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, | ||
187 | int max_page_list_len) | ||
188 | { | ||
189 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | ||
190 | struct mlx4_ib_mr *mr; | ||
191 | int err; | ||
192 | |||
193 | mr = kmalloc(sizeof *mr, GFP_KERNEL); | ||
194 | if (!mr) | ||
195 | return ERR_PTR(-ENOMEM); | ||
196 | |||
197 | err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0, | ||
198 | max_page_list_len, 0, &mr->mmr); | ||
199 | if (err) | ||
200 | goto err_free; | ||
201 | |||
202 | err = mlx4_mr_enable(dev->dev, &mr->mmr); | ||
203 | if (err) | ||
204 | goto err_mr; | ||
205 | |||
206 | return &mr->ibmr; | ||
207 | |||
208 | err_mr: | ||
209 | mlx4_mr_free(dev->dev, &mr->mmr); | ||
210 | |||
211 | err_free: | ||
212 | kfree(mr); | ||
213 | return ERR_PTR(err); | ||
214 | } | ||
215 | |||
216 | struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, | ||
217 | int page_list_len) | ||
218 | { | ||
219 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | ||
220 | struct mlx4_ib_fast_reg_page_list *mfrpl; | ||
221 | int size = page_list_len * sizeof (u64); | ||
222 | |||
223 | if (size > PAGE_SIZE) | ||
224 | return ERR_PTR(-EINVAL); | ||
225 | |||
226 | mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL); | ||
227 | if (!mfrpl) | ||
228 | return ERR_PTR(-ENOMEM); | ||
229 | |||
230 | mfrpl->ibfrpl.page_list = dma_alloc_coherent(&dev->dev->pdev->dev, | ||
231 | size, &mfrpl->map, | ||
232 | GFP_KERNEL); | ||
233 | if (!mfrpl->ibfrpl.page_list) | ||
234 | goto err_free; | ||
235 | |||
236 | WARN_ON(mfrpl->map & 0x3f); | ||
237 | |||
238 | return &mfrpl->ibfrpl; | ||
239 | |||
240 | err_free: | ||
241 | kfree(mfrpl); | ||
242 | return ERR_PTR(-ENOMEM); | ||
243 | } | ||
244 | |||
245 | void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) | ||
246 | { | ||
247 | struct mlx4_ib_dev *dev = to_mdev(page_list->device); | ||
248 | struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); | ||
249 | int size = page_list->max_page_list_len * sizeof (u64); | ||
250 | |||
251 | dma_free_coherent(&dev->dev->pdev->dev, size, page_list->page_list, | ||
252 | mfrpl->map); | ||
253 | kfree(mfrpl); | ||
254 | } | ||
255 | |||
186 | struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, | 256 | struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, |
187 | struct ib_fmr_attr *fmr_attr) | 257 | struct ib_fmr_attr *fmr_attr) |
188 | { | 258 | { |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index bda0859a5ac5..02a99bc4442e 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -78,6 +78,9 @@ static const __be32 mlx4_ib_opcode[] = { | |||
78 | [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ), | 78 | [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ), |
79 | [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), | 79 | [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), |
80 | [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), | 80 | [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), |
81 | [IB_WR_SEND_WITH_INV] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_INVAL), | ||
82 | [IB_WR_LOCAL_INV] = __constant_cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), | ||
83 | [IB_WR_FAST_REG_MR] = __constant_cpu_to_be32(MLX4_OPCODE_FMR), | ||
81 | }; | 84 | }; |
82 | 85 | ||
83 | static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) | 86 | static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) |
@@ -976,6 +979,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
976 | context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn); | 979 | context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn); |
977 | context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); | 980 | context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); |
978 | 981 | ||
982 | /* Set "fast registration enabled" for all kernel QPs */ | ||
983 | if (!qp->ibqp.uobject) | ||
984 | context->params1 |= cpu_to_be32(1 << 11); | ||
985 | |||
979 | if (attr_mask & IB_QP_RNR_RETRY) { | 986 | if (attr_mask & IB_QP_RNR_RETRY) { |
980 | context->params1 |= cpu_to_be32(attr->rnr_retry << 13); | 987 | context->params1 |= cpu_to_be32(attr->rnr_retry << 13); |
981 | optpar |= MLX4_QP_OPTPAR_RNR_RETRY; | 988 | optpar |= MLX4_QP_OPTPAR_RNR_RETRY; |
@@ -1322,6 +1329,38 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq | |||
1322 | return cur + nreq >= wq->max_post; | 1329 | return cur + nreq >= wq->max_post; |
1323 | } | 1330 | } |
1324 | 1331 | ||
1332 | static __be32 convert_access(int acc) | ||
1333 | { | ||
1334 | return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC) : 0) | | ||
1335 | (acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) | | ||
1336 | (acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ) : 0) | | ||
1337 | (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) | | ||
1338 | cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); | ||
1339 | } | ||
1340 | |||
1341 | static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) | ||
1342 | { | ||
1343 | struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); | ||
1344 | |||
1345 | fseg->flags = convert_access(wr->wr.fast_reg.access_flags); | ||
1346 | fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey); | ||
1347 | fseg->buf_list = cpu_to_be64(mfrpl->map); | ||
1348 | fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); | ||
1349 | fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length); | ||
1350 | fseg->offset = 0; /* XXX -- is this just for ZBVA? */ | ||
1351 | fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift); | ||
1352 | fseg->reserved[0] = 0; | ||
1353 | fseg->reserved[1] = 0; | ||
1354 | } | ||
1355 | |||
1356 | static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) | ||
1357 | { | ||
1358 | iseg->flags = 0; | ||
1359 | iseg->mem_key = cpu_to_be32(rkey); | ||
1360 | iseg->guest_id = 0; | ||
1361 | iseg->pa = 0; | ||
1362 | } | ||
1363 | |||
1325 | static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, | 1364 | static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, |
1326 | u64 remote_addr, u32 rkey) | 1365 | u64 remote_addr, u32 rkey) |
1327 | { | 1366 | { |
@@ -1423,6 +1462,21 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, | |||
1423 | return 0; | 1462 | return 0; |
1424 | } | 1463 | } |
1425 | 1464 | ||
1465 | static __be32 send_ieth(struct ib_send_wr *wr) | ||
1466 | { | ||
1467 | switch (wr->opcode) { | ||
1468 | case IB_WR_SEND_WITH_IMM: | ||
1469 | case IB_WR_RDMA_WRITE_WITH_IMM: | ||
1470 | return wr->ex.imm_data; | ||
1471 | |||
1472 | case IB_WR_SEND_WITH_INV: | ||
1473 | return cpu_to_be32(wr->ex.invalidate_rkey); | ||
1474 | |||
1475 | default: | ||
1476 | return 0; | ||
1477 | } | ||
1478 | } | ||
1479 | |||
1426 | int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | 1480 | int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
1427 | struct ib_send_wr **bad_wr) | 1481 | struct ib_send_wr **bad_wr) |
1428 | { | 1482 | { |
@@ -1469,11 +1523,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1469 | MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) | | 1523 | MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) | |
1470 | qp->sq_signal_bits; | 1524 | qp->sq_signal_bits; |
1471 | 1525 | ||
1472 | if (wr->opcode == IB_WR_SEND_WITH_IMM || | 1526 | ctrl->imm = send_ieth(wr); |
1473 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | ||
1474 | ctrl->imm = wr->ex.imm_data; | ||
1475 | else | ||
1476 | ctrl->imm = 0; | ||
1477 | 1527 | ||
1478 | wqe += sizeof *ctrl; | 1528 | wqe += sizeof *ctrl; |
1479 | size = sizeof *ctrl / 16; | 1529 | size = sizeof *ctrl / 16; |
@@ -1505,6 +1555,18 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1505 | size += sizeof (struct mlx4_wqe_raddr_seg) / 16; | 1555 | size += sizeof (struct mlx4_wqe_raddr_seg) / 16; |
1506 | break; | 1556 | break; |
1507 | 1557 | ||
1558 | case IB_WR_LOCAL_INV: | ||
1559 | set_local_inv_seg(wqe, wr->ex.invalidate_rkey); | ||
1560 | wqe += sizeof (struct mlx4_wqe_local_inval_seg); | ||
1561 | size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; | ||
1562 | break; | ||
1563 | |||
1564 | case IB_WR_FAST_REG_MR: | ||
1565 | set_fmr_seg(wqe, wr); | ||
1566 | wqe += sizeof (struct mlx4_wqe_fmr_seg); | ||
1567 | size += sizeof (struct mlx4_wqe_fmr_seg) / 16; | ||
1568 | break; | ||
1569 | |||
1508 | default: | 1570 | default: |
1509 | /* No extra segments required for sends */ | 1571 | /* No extra segments required for sends */ |
1510 | break; | 1572 | break; |