aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-12-23 13:12:48 -0500
committerDoug Ledford <dledford@redhat.com>2015-12-23 14:29:04 -0500
commitfeb7c1e38bccfd18cc06677cb648ed2340788fe8 (patch)
tree9898dd6dfef73285013027d349179163a7d9a2c6
parentb7d3e0a94fe128912bbebf0ae68551c85fd2d429 (diff)
IB: remove in-kernel support for memory windows
Remove the unused ib_allow_mw and ib_bind_mw functions, remove the unused IB_WR_BIND_MW and IB_WC_BIND_MW opcodes and move ib_dealloc_mw into the uverbs module. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Reviewed-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com> [core] Reviewed-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--Documentation/infiniband/core_locking.txt2
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c4
-rw-r--r--drivers/infiniband/core/uverbs_main.c13
-rw-r--r--drivers/infiniband/core/verbs.c36
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c82
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c3
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c22
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c27
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c3
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c75
-rw-r--r--drivers/staging/rdma/amso1100/c2_cq.c3
-rw-r--r--drivers/staging/rdma/ehca/ehca_iverbs.h3
-rw-r--r--drivers/staging/rdma/ehca/ehca_main.c1
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.c12
-rw-r--r--drivers/staging/rdma/ehca/ehca_reqs.c1
-rw-r--r--include/rdma/ib_verbs.h83
27 files changed, 16 insertions, 381 deletions
diff --git a/Documentation/infiniband/core_locking.txt b/Documentation/infiniband/core_locking.txt
index e1678542279a..4b1f36b6ada0 100644
--- a/Documentation/infiniband/core_locking.txt
+++ b/Documentation/infiniband/core_locking.txt
@@ -15,7 +15,6 @@ Sleeping and interrupt context
15 modify_ah 15 modify_ah
16 query_ah 16 query_ah
17 destroy_ah 17 destroy_ah
18 bind_mw
19 post_send 18 post_send
20 post_recv 19 post_recv
21 poll_cq 20 poll_cq
@@ -31,7 +30,6 @@ Sleeping and interrupt context
31 ib_modify_ah 30 ib_modify_ah
32 ib_query_ah 31 ib_query_ah
33 ib_destroy_ah 32 ib_destroy_ah
34 ib_bind_mw
35 ib_post_send 33 ib_post_send
36 ib_post_recv 34 ib_post_recv
37 ib_req_notify_cq 35 ib_req_notify_cq
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 94bbd8c155fc..612ccfd39bf9 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -204,6 +204,8 @@ void ib_uverbs_event_handler(struct ib_event_handler *handler,
204 struct ib_event *event); 204 struct ib_event *event);
205void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd); 205void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd);
206 206
207int uverbs_dealloc_mw(struct ib_mw *mw);
208
207struct ib_uverbs_flow_spec { 209struct ib_uverbs_flow_spec {
208 union { 210 union {
209 union { 211 union {
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 95610565fb46..5428ebee096f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1243,7 +1243,7 @@ err_copy:
1243 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1243 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1244 1244
1245err_unalloc: 1245err_unalloc:
1246 ib_dealloc_mw(mw); 1246 uverbs_dealloc_mw(mw);
1247 1247
1248err_put: 1248err_put:
1249 put_pd_read(pd); 1249 put_pd_read(pd);
@@ -1272,7 +1272,7 @@ ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1272 1272
1273 mw = uobj->object; 1273 mw = uobj->object;
1274 1274
1275 ret = ib_dealloc_mw(mw); 1275 ret = uverbs_dealloc_mw(mw);
1276 if (!ret) 1276 if (!ret)
1277 uobj->live = 0; 1277 uobj->live = 0;
1278 1278
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e3ef28861be6..39680aed99dd 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -133,6 +133,17 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
133static void ib_uverbs_add_one(struct ib_device *device); 133static void ib_uverbs_add_one(struct ib_device *device);
134static void ib_uverbs_remove_one(struct ib_device *device, void *client_data); 134static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
135 135
136int uverbs_dealloc_mw(struct ib_mw *mw)
137{
138 struct ib_pd *pd = mw->pd;
139 int ret;
140
141 ret = mw->device->dealloc_mw(mw);
142 if (!ret)
143 atomic_dec(&pd->usecnt);
144 return ret;
145}
146
136static void ib_uverbs_release_dev(struct kobject *kobj) 147static void ib_uverbs_release_dev(struct kobject *kobj)
137{ 148{
138 struct ib_uverbs_device *dev = 149 struct ib_uverbs_device *dev =
@@ -224,7 +235,7 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
224 struct ib_mw *mw = uobj->object; 235 struct ib_mw *mw = uobj->object;
225 236
226 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 237 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
227 ib_dealloc_mw(mw); 238 uverbs_dealloc_mw(mw);
228 kfree(uobj); 239 kfree(uobj);
229 } 240 }
230 241
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 70b1016be1c8..c5e0f07a7f82 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1403,42 +1403,6 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1403} 1403}
1404EXPORT_SYMBOL(ib_alloc_mr); 1404EXPORT_SYMBOL(ib_alloc_mr);
1405 1405
1406/* Memory windows */
1407
1408struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
1409{
1410 struct ib_mw *mw;
1411
1412 if (!pd->device->alloc_mw)
1413 return ERR_PTR(-ENOSYS);
1414
1415 mw = pd->device->alloc_mw(pd, type);
1416 if (!IS_ERR(mw)) {
1417 mw->device = pd->device;
1418 mw->pd = pd;
1419 mw->uobject = NULL;
1420 mw->type = type;
1421 atomic_inc(&pd->usecnt);
1422 }
1423
1424 return mw;
1425}
1426EXPORT_SYMBOL(ib_alloc_mw);
1427
1428int ib_dealloc_mw(struct ib_mw *mw)
1429{
1430 struct ib_pd *pd;
1431 int ret;
1432
1433 pd = mw->pd;
1434 ret = mw->device->dealloc_mw(mw);
1435 if (!ret)
1436 atomic_dec(&pd->usecnt);
1437
1438 return ret;
1439}
1440EXPORT_SYMBOL(ib_dealloc_mw);
1441
1442/* "Fast" memory regions */ 1406/* "Fast" memory regions */
1443 1407
1444struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 1408struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
index cfe404925a39..97fbfd2c298e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cq.c
@@ -115,10 +115,6 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
115 case T3_SEND_WITH_SE_INV: 115 case T3_SEND_WITH_SE_INV:
116 wc->opcode = IB_WC_SEND; 116 wc->opcode = IB_WC_SEND;
117 break; 117 break;
118 case T3_BIND_MW:
119 wc->opcode = IB_WC_BIND_MW;
120 break;
121
122 case T3_LOCAL_INV: 118 case T3_LOCAL_INV:
123 wc->opcode = IB_WC_LOCAL_INV; 119 wc->opcode = IB_WC_LOCAL_INV;
124 break; 120 break;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index f7aa019a3c1a..384e1d783507 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1388,7 +1388,6 @@ int iwch_register_device(struct iwch_dev *dev)
1388 dev->ibdev.reg_user_mr = iwch_reg_user_mr; 1388 dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1389 dev->ibdev.dereg_mr = iwch_dereg_mr; 1389 dev->ibdev.dereg_mr = iwch_dereg_mr;
1390 dev->ibdev.alloc_mw = iwch_alloc_mw; 1390 dev->ibdev.alloc_mw = iwch_alloc_mw;
1391 dev->ibdev.bind_mw = iwch_bind_mw;
1392 dev->ibdev.dealloc_mw = iwch_dealloc_mw; 1391 dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1393 dev->ibdev.alloc_mr = iwch_alloc_mr; 1392 dev->ibdev.alloc_mr = iwch_alloc_mr;
1394 dev->ibdev.map_mr_sg = iwch_map_mr_sg; 1393 dev->ibdev.map_mr_sg = iwch_map_mr_sg;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index f4fa6d677c0f..f24df44146c6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -330,9 +330,6 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
330 struct ib_send_wr **bad_wr); 330 struct ib_send_wr **bad_wr);
331int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 331int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
332 struct ib_recv_wr **bad_wr); 332 struct ib_recv_wr **bad_wr);
333int iwch_bind_mw(struct ib_qp *qp,
334 struct ib_mw *mw,
335 struct ib_mw_bind *mw_bind);
336int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 333int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
337int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); 334int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
338int iwch_post_zb_read(struct iwch_ep *ep); 335int iwch_post_zb_read(struct iwch_ep *ep);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index d0548fc6395e..d939980a708f 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -526,88 +526,6 @@ out:
526 return err; 526 return err;
527} 527}
528 528
529int iwch_bind_mw(struct ib_qp *qp,
530 struct ib_mw *mw,
531 struct ib_mw_bind *mw_bind)
532{
533 struct iwch_dev *rhp;
534 struct iwch_mw *mhp;
535 struct iwch_qp *qhp;
536 union t3_wr *wqe;
537 u32 pbl_addr;
538 u8 page_size;
539 u32 num_wrs;
540 unsigned long flag;
541 struct ib_sge sgl;
542 int err=0;
543 enum t3_wr_flags t3_wr_flags;
544 u32 idx;
545 struct t3_swsq *sqp;
546
547 qhp = to_iwch_qp(qp);
548 mhp = to_iwch_mw(mw);
549 rhp = qhp->rhp;
550
551 spin_lock_irqsave(&qhp->lock, flag);
552 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
553 spin_unlock_irqrestore(&qhp->lock, flag);
554 return -EINVAL;
555 }
556 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
557 qhp->wq.sq_size_log2);
558 if (num_wrs == 0) {
559 spin_unlock_irqrestore(&qhp->lock, flag);
560 return -ENOMEM;
561 }
562 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
563 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
564 mw, mw_bind);
565 wqe = (union t3_wr *) (qhp->wq.queue + idx);
566
567 t3_wr_flags = 0;
568 if (mw_bind->send_flags & IB_SEND_SIGNALED)
569 t3_wr_flags = T3_COMPLETION_FLAG;
570
571 sgl.addr = mw_bind->bind_info.addr;
572 sgl.lkey = mw_bind->bind_info.mr->lkey;
573 sgl.length = mw_bind->bind_info.length;
574 wqe->bind.reserved = 0;
575 wqe->bind.type = TPT_VATO;
576
577 /* TBD: check perms */
578 wqe->bind.perms = iwch_ib_to_tpt_bind_access(
579 mw_bind->bind_info.mw_access_flags);
580 wqe->bind.mr_stag = cpu_to_be32(mw_bind->bind_info.mr->lkey);
581 wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
582 wqe->bind.mw_len = cpu_to_be32(mw_bind->bind_info.length);
583 wqe->bind.mw_va = cpu_to_be64(mw_bind->bind_info.addr);
584 err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
585 if (err) {
586 spin_unlock_irqrestore(&qhp->lock, flag);
587 return err;
588 }
589 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
590 sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
591 sqp->wr_id = mw_bind->wr_id;
592 sqp->opcode = T3_BIND_MW;
593 sqp->sq_wptr = qhp->wq.sq_wptr;
594 sqp->complete = 0;
595 sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED);
596 wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr);
597 wqe->bind.mr_pagesz = page_size;
598 build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags,
599 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
600 sizeof(struct t3_bind_mw_wr) >> 3, T3_SOPEOP);
601 ++(qhp->wq.wptr);
602 ++(qhp->wq.sq_wptr);
603 spin_unlock_irqrestore(&qhp->lock, flag);
604
605 if (cxio_wq_db_enabled(&qhp->wq))
606 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
607
608 return err;
609}
610
611static inline void build_term_codes(struct respQ_msg_t *rsp_msg, 529static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
612 u8 *layer_type, u8 *ecode) 530 u8 *layer_type, u8 *ecode)
613{ 531{
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index de9cd6901752..cf21df4a8bf5 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -744,9 +744,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
744 case FW_RI_SEND_WITH_SE: 744 case FW_RI_SEND_WITH_SE:
745 wc->opcode = IB_WC_SEND; 745 wc->opcode = IB_WC_SEND;
746 break; 746 break;
747 case FW_RI_BIND_MW:
748 wc->opcode = IB_WC_BIND_MW;
749 break;
750 747
751 case FW_RI_LOCAL_INV: 748 case FW_RI_LOCAL_INV:
752 wc->opcode = IB_WC_LOCAL_INV; 749 wc->opcode = IB_WC_LOCAL_INV;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index dd00cf2d0b65..fb2de75a0392 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -947,8 +947,6 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
947 struct ib_send_wr **bad_wr); 947 struct ib_send_wr **bad_wr);
948int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 948int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
949 struct ib_recv_wr **bad_wr); 949 struct ib_recv_wr **bad_wr);
950int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
951 struct ib_mw_bind *mw_bind);
952int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 950int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
953int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog); 951int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
954int c4iw_destroy_listen(struct iw_cm_id *cm_id); 952int c4iw_destroy_listen(struct iw_cm_id *cm_id);
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 249ea57e1d99..ec04272fbdc2 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -552,7 +552,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
552 dev->ibdev.reg_user_mr = c4iw_reg_user_mr; 552 dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
553 dev->ibdev.dereg_mr = c4iw_dereg_mr; 553 dev->ibdev.dereg_mr = c4iw_dereg_mr;
554 dev->ibdev.alloc_mw = c4iw_alloc_mw; 554 dev->ibdev.alloc_mw = c4iw_alloc_mw;
555 dev->ibdev.bind_mw = c4iw_bind_mw;
556 dev->ibdev.dealloc_mw = c4iw_dealloc_mw; 555 dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
557 dev->ibdev.alloc_mr = c4iw_alloc_mr; 556 dev->ibdev.alloc_mr = c4iw_alloc_mr;
558 dev->ibdev.map_mr_sg = c4iw_map_mr_sg; 557 dev->ibdev.map_mr_sg = c4iw_map_mr_sg;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index aa515afee724..e99345eb875a 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -933,11 +933,6 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
933 return err; 933 return err;
934} 934}
935 935
936int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
937{
938 return -ENOSYS;
939}
940
941static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, 936static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
942 u8 *ecode) 937 u8 *ecode)
943{ 938{
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index b88fc8f5ab18..9f8b516eb2b0 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -811,9 +811,6 @@ repoll:
811 wc->opcode = IB_WC_MASKED_FETCH_ADD; 811 wc->opcode = IB_WC_MASKED_FETCH_ADD;
812 wc->byte_len = 8; 812 wc->byte_len = 8;
813 break; 813 break;
814 case MLX4_OPCODE_BIND_MW:
815 wc->opcode = IB_WC_BIND_MW;
816 break;
817 case MLX4_OPCODE_LSO: 814 case MLX4_OPCODE_LSO:
818 wc->opcode = IB_WC_LSO; 815 wc->opcode = IB_WC_LSO;
819 break; 816 break;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 97d6878f9938..627267f860f9 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2283,7 +2283,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2283 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2283 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2284 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { 2284 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2285 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw; 2285 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
2286 ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
2287 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw; 2286 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2288 2287
2289 ibdev->ib_dev.uverbs_cmd_mask |= 2288 ibdev->ib_dev.uverbs_cmd_mask |=
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 1caa11edac03..8916e9b55324 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -704,8 +704,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
704 struct ib_udata *udata); 704 struct ib_udata *udata);
705int mlx4_ib_dereg_mr(struct ib_mr *mr); 705int mlx4_ib_dereg_mr(struct ib_mr *mr);
706struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); 706struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
707int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
708 struct ib_mw_bind *mw_bind);
709int mlx4_ib_dealloc_mw(struct ib_mw *mw); 707int mlx4_ib_dealloc_mw(struct ib_mw *mw);
710struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, 708struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
711 enum ib_mr_type mr_type, 709 enum ib_mr_type mr_type,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 4d1e1c632603..242b94ec105b 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -366,28 +366,6 @@ err_free:
366 return ERR_PTR(err); 366 return ERR_PTR(err);
367} 367}
368 368
369int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
370 struct ib_mw_bind *mw_bind)
371{
372 struct ib_bind_mw_wr wr;
373 struct ib_send_wr *bad_wr;
374 int ret;
375
376 memset(&wr, 0, sizeof(wr));
377 wr.wr.opcode = IB_WR_BIND_MW;
378 wr.wr.wr_id = mw_bind->wr_id;
379 wr.wr.send_flags = mw_bind->send_flags;
380 wr.mw = mw;
381 wr.bind_info = mw_bind->bind_info;
382 wr.rkey = ib_inc_rkey(mw->rkey);
383
384 ret = mlx4_ib_post_send(qp, &wr.wr, &bad_wr);
385 if (!ret)
386 mw->rkey = wr.rkey;
387
388 return ret;
389}
390
391int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) 369int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
392{ 370{
393 struct mlx4_ib_mw *mw = to_mmw(ibmw); 371 struct mlx4_ib_mw *mw = to_mmw(ibmw);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 6049644da429..3f5d2af5f8e4 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -115,7 +115,6 @@ static const __be32 mlx4_ib_opcode[] = {
115 [IB_WR_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), 115 [IB_WR_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR),
116 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), 116 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
117 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), 117 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
118 [IB_WR_BIND_MW] = cpu_to_be32(MLX4_OPCODE_BIND_MW),
119}; 118};
120 119
121static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 120static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
@@ -2531,25 +2530,6 @@ static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
2531 fseg->reserved[1] = 0; 2530 fseg->reserved[1] = 0;
2532} 2531}
2533 2532
2534static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg,
2535 struct ib_bind_mw_wr *wr)
2536{
2537 bseg->flags1 =
2538 convert_access(wr->bind_info.mw_access_flags) &
2539 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ |
2540 MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE |
2541 MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC);
2542 bseg->flags2 = 0;
2543 if (wr->mw->type == IB_MW_TYPE_2)
2544 bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2);
2545 if (wr->bind_info.mw_access_flags & IB_ZERO_BASED)
2546 bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED);
2547 bseg->new_rkey = cpu_to_be32(wr->rkey);
2548 bseg->lkey = cpu_to_be32(wr->bind_info.mr->lkey);
2549 bseg->addr = cpu_to_be64(wr->bind_info.addr);
2550 bseg->length = cpu_to_be64(wr->bind_info.length);
2551}
2552
2553static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) 2533static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
2554{ 2534{
2555 memset(iseg, 0, sizeof(*iseg)); 2535 memset(iseg, 0, sizeof(*iseg));
@@ -2870,13 +2850,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2870 size += sizeof(struct mlx4_wqe_fmr_seg) / 16; 2850 size += sizeof(struct mlx4_wqe_fmr_seg) / 16;
2871 break; 2851 break;
2872 2852
2873 case IB_WR_BIND_MW:
2874 ctrl->srcrb_flags |=
2875 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
2876 set_bind_seg(wqe, bind_mw_wr(wr));
2877 wqe += sizeof(struct mlx4_wqe_bind_seg);
2878 size += sizeof(struct mlx4_wqe_bind_seg) / 16;
2879 break;
2880 default: 2853 default:
2881 /* No extra segments required for sends */ 2854 /* No extra segments required for sends */
2882 break; 2855 break;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 3ce5cfa7a4e0..db2270ad21b2 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -154,9 +154,6 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
154 wc->opcode = IB_WC_MASKED_FETCH_ADD; 154 wc->opcode = IB_WC_MASKED_FETCH_ADD;
155 wc->byte_len = 8; 155 wc->byte_len = 8;
156 break; 156 break;
157 case MLX5_OPCODE_BIND_MW:
158 wc->opcode = IB_WC_BIND_MW;
159 break;
160 case MLX5_OPCODE_UMR: 157 case MLX5_OPCODE_UMR:
161 wc->opcode = get_umr_comp(wq, idx); 158 wc->opcode = get_umr_comp(wq, idx);
162 break; 159 break;
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 40ba83338155..a6531ffe29a6 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -608,9 +608,6 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
608 entry->opcode = IB_WC_FETCH_ADD; 608 entry->opcode = IB_WC_FETCH_ADD;
609 entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; 609 entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
610 break; 610 break;
611 case MTHCA_OPCODE_BIND_MW:
612 entry->opcode = IB_WC_BIND_MW;
613 break;
614 default: 611 default:
615 entry->opcode = MTHCA_OPCODE_INVALID; 612 entry->opcode = MTHCA_OPCODE_INVALID;
616 break; 613 break;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index f76358c39ebb..c8c661ebe3db 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -206,80 +206,6 @@ static int nes_dealloc_mw(struct ib_mw *ibmw)
206} 206}
207 207
208 208
209/**
210 * nes_bind_mw
211 */
212static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
213 struct ib_mw_bind *ibmw_bind)
214{
215 u64 u64temp;
216 struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
217 struct nes_device *nesdev = nesvnic->nesdev;
218 /* struct nes_mr *nesmr = to_nesmw(ibmw); */
219 struct nes_qp *nesqp = to_nesqp(ibqp);
220 struct nes_hw_qp_wqe *wqe;
221 unsigned long flags = 0;
222 u32 head;
223 u32 wqe_misc = 0;
224 u32 qsize;
225
226 if (nesqp->ibqp_state > IB_QPS_RTS)
227 return -EINVAL;
228
229 spin_lock_irqsave(&nesqp->lock, flags);
230
231 head = nesqp->hwqp.sq_head;
232 qsize = nesqp->hwqp.sq_tail;
233
234 /* Check for SQ overflow */
235 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
236 spin_unlock_irqrestore(&nesqp->lock, flags);
237 return -ENOMEM;
238 }
239
240 wqe = &nesqp->hwqp.sq_vbase[head];
241 /* nes_debug(NES_DBG_MR, "processing sq wqe at %p, head = %u.\n", wqe, head); */
242 nes_fill_init_qp_wqe(wqe, nesqp, head);
243 u64temp = ibmw_bind->wr_id;
244 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, u64temp);
245 wqe_misc = NES_IWARP_SQ_OP_BIND;
246
247 wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
248
249 if (ibmw_bind->send_flags & IB_SEND_SIGNALED)
250 wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
251
252 if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_WRITE)
253 wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE;
254 if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_READ)
255 wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ;
256
257 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc);
258 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX,
259 ibmw_bind->bind_info.mr->lkey);
260 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey);
261 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX,
262 ibmw_bind->bind_info.length);
263 wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0;
264 u64temp = (u64)ibmw_bind->bind_info.addr;
265 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp);
266
267 head++;
268 if (head >= qsize)
269 head = 0;
270
271 nesqp->hwqp.sq_head = head;
272 barrier();
273
274 nes_write32(nesdev->regs+NES_WQE_ALLOC,
275 (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
276
277 spin_unlock_irqrestore(&nesqp->lock, flags);
278
279 return 0;
280}
281
282
283/* 209/*
284 * nes_alloc_fast_mr 210 * nes_alloc_fast_mr
285 */ 211 */
@@ -3892,7 +3818,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
3892 nesibdev->ibdev.dereg_mr = nes_dereg_mr; 3818 nesibdev->ibdev.dereg_mr = nes_dereg_mr;
3893 nesibdev->ibdev.alloc_mw = nes_alloc_mw; 3819 nesibdev->ibdev.alloc_mw = nes_alloc_mw;
3894 nesibdev->ibdev.dealloc_mw = nes_dealloc_mw; 3820 nesibdev->ibdev.dealloc_mw = nes_dealloc_mw;
3895 nesibdev->ibdev.bind_mw = nes_bind_mw;
3896 3821
3897 nesibdev->ibdev.alloc_mr = nes_alloc_mr; 3822 nesibdev->ibdev.alloc_mr = nes_alloc_mr;
3898 nesibdev->ibdev.map_mr_sg = nes_map_mr_sg; 3823 nesibdev->ibdev.map_mr_sg = nes_map_mr_sg;
diff --git a/drivers/staging/rdma/amso1100/c2_cq.c b/drivers/staging/rdma/amso1100/c2_cq.c
index 3ef881f2da0f..7ad0c082485a 100644
--- a/drivers/staging/rdma/amso1100/c2_cq.c
+++ b/drivers/staging/rdma/amso1100/c2_cq.c
@@ -173,9 +173,6 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
173 case C2_WR_TYPE_RDMA_READ: 173 case C2_WR_TYPE_RDMA_READ:
174 entry->opcode = IB_WC_RDMA_READ; 174 entry->opcode = IB_WC_RDMA_READ;
175 break; 175 break;
176 case C2_WR_TYPE_BIND_MW:
177 entry->opcode = IB_WC_BIND_MW;
178 break;
179 case C2_WR_TYPE_RECV: 176 case C2_WR_TYPE_RECV:
180 entry->byte_len = be32_to_cpu(ce->bytes_rcvd); 177 entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
181 entry->opcode = IB_WC_RECV; 178 entry->opcode = IB_WC_RECV;
diff --git a/drivers/staging/rdma/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h
index 17ab33b76882..cca5933fcda6 100644
--- a/drivers/staging/rdma/ehca/ehca_iverbs.h
+++ b/drivers/staging/rdma/ehca/ehca_iverbs.h
@@ -88,9 +88,6 @@ int ehca_dereg_mr(struct ib_mr *mr);
88 88
89struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); 89struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
90 90
91int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
92 struct ib_mw_bind *mw_bind);
93
94int ehca_dealloc_mw(struct ib_mw *mw); 91int ehca_dealloc_mw(struct ib_mw *mw);
95 92
96struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, 93struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
diff --git a/drivers/staging/rdma/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c
index 8e920a31d5e7..963274ee652a 100644
--- a/drivers/staging/rdma/ehca/ehca_main.c
+++ b/drivers/staging/rdma/ehca/ehca_main.c
@@ -515,7 +515,6 @@ static int ehca_init_device(struct ehca_shca *shca)
515 shca->ib_device.reg_user_mr = ehca_reg_user_mr; 515 shca->ib_device.reg_user_mr = ehca_reg_user_mr;
516 shca->ib_device.dereg_mr = ehca_dereg_mr; 516 shca->ib_device.dereg_mr = ehca_dereg_mr;
517 shca->ib_device.alloc_mw = ehca_alloc_mw; 517 shca->ib_device.alloc_mw = ehca_alloc_mw;
518 shca->ib_device.bind_mw = ehca_bind_mw;
519 shca->ib_device.dealloc_mw = ehca_dealloc_mw; 518 shca->ib_device.dealloc_mw = ehca_dealloc_mw;
520 shca->ib_device.alloc_fmr = ehca_alloc_fmr; 519 shca->ib_device.alloc_fmr = ehca_alloc_fmr;
521 shca->ib_device.map_phys_fmr = ehca_map_phys_fmr; 520 shca->ib_device.map_phys_fmr = ehca_map_phys_fmr;
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
index 1c1a8dd09ac5..c6e324522566 100644
--- a/drivers/staging/rdma/ehca/ehca_mrmw.c
+++ b/drivers/staging/rdma/ehca/ehca_mrmw.c
@@ -413,18 +413,6 @@ alloc_mw_exit0:
413 413
414/*----------------------------------------------------------------------*/ 414/*----------------------------------------------------------------------*/
415 415
416int ehca_bind_mw(struct ib_qp *qp,
417 struct ib_mw *mw,
418 struct ib_mw_bind *mw_bind)
419{
420 /* TODO: not supported up to now */
421 ehca_gen_err("bind MW currently not supported by HCAD");
422
423 return -EPERM;
424} /* end ehca_bind_mw() */
425
426/*----------------------------------------------------------------------*/
427
428int ehca_dealloc_mw(struct ib_mw *mw) 416int ehca_dealloc_mw(struct ib_mw *mw)
429{ 417{
430 u64 h_ret; 418 u64 h_ret;
diff --git a/drivers/staging/rdma/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c
index 10e2074384f5..11813b880e16 100644
--- a/drivers/staging/rdma/ehca/ehca_reqs.c
+++ b/drivers/staging/rdma/ehca/ehca_reqs.c
@@ -614,7 +614,6 @@ int ehca_post_srq_recv(struct ib_srq *srq,
614static const u8 ib_wc_opcode[255] = { 614static const u8 ib_wc_opcode[255] = {
615 [0x01] = IB_WC_RECV+1, 615 [0x01] = IB_WC_RECV+1,
616 [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1, 616 [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
617 [0x04] = IB_WC_BIND_MW+1,
618 [0x08] = IB_WC_FETCH_ADD+1, 617 [0x08] = IB_WC_FETCH_ADD+1,
619 [0x10] = IB_WC_COMP_SWAP+1, 618 [0x10] = IB_WC_COMP_SWAP+1,
620 [0x20] = IB_WC_RDMA_WRITE+1, 619 [0x20] = IB_WC_RDMA_WRITE+1,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 86970f3e90b4..177844265c98 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -812,7 +812,6 @@ enum ib_wc_opcode {
812 IB_WC_RDMA_READ, 812 IB_WC_RDMA_READ,
813 IB_WC_COMP_SWAP, 813 IB_WC_COMP_SWAP,
814 IB_WC_FETCH_ADD, 814 IB_WC_FETCH_ADD,
815 IB_WC_BIND_MW,
816 IB_WC_LSO, 815 IB_WC_LSO,
817 IB_WC_LOCAL_INV, 816 IB_WC_LOCAL_INV,
818 IB_WC_REG_MR, 817 IB_WC_REG_MR,
@@ -1110,7 +1109,6 @@ enum ib_wr_opcode {
1110 IB_WR_REG_MR, 1109 IB_WR_REG_MR,
1111 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 1110 IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1112 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1111 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1113 IB_WR_BIND_MW,
1114 IB_WR_REG_SIG_MR, 1112 IB_WR_REG_SIG_MR,
1115 /* reserve values for low level drivers' internal use. 1113 /* reserve values for low level drivers' internal use.
1116 * These values will not be used at all in the ib core layer. 1114 * These values will not be used at all in the ib core layer.
@@ -1145,23 +1143,6 @@ struct ib_sge {
1145 u32 lkey; 1143 u32 lkey;
1146}; 1144};
1147 1145
1148/**
1149 * struct ib_mw_bind_info - Parameters for a memory window bind operation.
1150 * @mr: A memory region to bind the memory window to.
1151 * @addr: The address where the memory window should begin.
1152 * @length: The length of the memory window, in bytes.
1153 * @mw_access_flags: Access flags from enum ib_access_flags for the window.
1154 *
1155 * This struct contains the shared parameters for type 1 and type 2
1156 * memory window bind operations.
1157 */
1158struct ib_mw_bind_info {
1159 struct ib_mr *mr;
1160 u64 addr;
1161 u64 length;
1162 int mw_access_flags;
1163};
1164
1165struct ib_cqe { 1146struct ib_cqe {
1166 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1147 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1167}; 1148};
@@ -1237,19 +1218,6 @@ static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1237 return container_of(wr, struct ib_reg_wr, wr); 1218 return container_of(wr, struct ib_reg_wr, wr);
1238} 1219}
1239 1220
1240struct ib_bind_mw_wr {
1241 struct ib_send_wr wr;
1242 struct ib_mw *mw;
1243 /* The new rkey for the memory window. */
1244 u32 rkey;
1245 struct ib_mw_bind_info bind_info;
1246};
1247
1248static inline struct ib_bind_mw_wr *bind_mw_wr(struct ib_send_wr *wr)
1249{
1250 return container_of(wr, struct ib_bind_mw_wr, wr);
1251}
1252
1253struct ib_sig_handover_wr { 1221struct ib_sig_handover_wr {
1254 struct ib_send_wr wr; 1222 struct ib_send_wr wr;
1255 struct ib_sig_attrs *sig_attrs; 1223 struct ib_sig_attrs *sig_attrs;
@@ -1299,18 +1267,6 @@ enum ib_mr_rereg_flags {
1299 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1267 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1300}; 1268};
1301 1269
1302/**
1303 * struct ib_mw_bind - Parameters for a type 1 memory window bind operation.
1304 * @wr_id: Work request id.
1305 * @send_flags: Flags from ib_send_flags enum.
1306 * @bind_info: More parameters of the bind operation.
1307 */
1308struct ib_mw_bind {
1309 u64 wr_id;
1310 int send_flags;
1311 struct ib_mw_bind_info bind_info;
1312};
1313
1314struct ib_fmr_attr { 1270struct ib_fmr_attr {
1315 int max_pages; 1271 int max_pages;
1316 int max_maps; 1272 int max_maps;
@@ -1845,9 +1801,6 @@ struct ib_device {
1845 int sg_nents); 1801 int sg_nents);
1846 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 1802 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
1847 enum ib_mw_type type); 1803 enum ib_mw_type type);
1848 int (*bind_mw)(struct ib_qp *qp,
1849 struct ib_mw *mw,
1850 struct ib_mw_bind *mw_bind);
1851 int (*dealloc_mw)(struct ib_mw *mw); 1804 int (*dealloc_mw)(struct ib_mw *mw);
1852 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 1805 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1853 int mr_access_flags, 1806 int mr_access_flags,
@@ -2976,42 +2929,6 @@ static inline u32 ib_inc_rkey(u32 rkey)
2976} 2929}
2977 2930
2978/** 2931/**
2979 * ib_alloc_mw - Allocates a memory window.
2980 * @pd: The protection domain associated with the memory window.
2981 * @type: The type of the memory window (1 or 2).
2982 */
2983struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
2984
2985/**
2986 * ib_bind_mw - Posts a work request to the send queue of the specified
2987 * QP, which binds the memory window to the given address range and
2988 * remote access attributes.
2989 * @qp: QP to post the bind work request on.
2990 * @mw: The memory window to bind.
2991 * @mw_bind: Specifies information about the memory window, including
2992 * its address range, remote access rights, and associated memory region.
2993 *
2994 * If there is no immediate error, the function will update the rkey member
2995 * of the mw parameter to its new value. The bind operation can still fail
2996 * asynchronously.
2997 */
2998static inline int ib_bind_mw(struct ib_qp *qp,
2999 struct ib_mw *mw,
3000 struct ib_mw_bind *mw_bind)
3001{
3002 /* XXX reference counting in corresponding MR? */
3003 return mw->device->bind_mw ?
3004 mw->device->bind_mw(qp, mw, mw_bind) :
3005 -ENOSYS;
3006}
3007
3008/**
3009 * ib_dealloc_mw - Deallocates a memory window.
3010 * @mw: The memory window to deallocate.
3011 */
3012int ib_dealloc_mw(struct ib_mw *mw);
3013
3014/**
3015 * ib_alloc_fmr - Allocates a unmapped fast memory region. 2932 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3016 * @pd: The protection domain associated with the unmapped region. 2933 * @pd: The protection domain associated with the unmapped region.
3017 * @mr_access_flags: Specifies the memory access rights. 2934 * @mr_access_flags: Specifies the memory access rights.