aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2008-04-17 00:09:32 -0400
committerRoland Dreier <rolandd@cisco.com>2008-04-17 00:09:32 -0400
commit0f39cf3d54e67a705773fd0ec56ca3dcd3e9272f (patch)
tree83f19f0014d0e880fb245906105e903dd6d733d5
parente7eacd36865ae0707f5efae8e4dda421ffcd1b66 (diff)
IB/core: Add support for "send with invalidate" work requests
Add a new IB_WR_SEND_WITH_INV send opcode that can be used to mark a "send with invalidate" work request as defined in the iWARP verbs and the InfiniBand base memory management extensions. Also put "imm_data" and a new "invalidate_rkey" member in a new "ex" union in struct ib_send_wr. The invalidate_rkey member can be used to pass in an R_Key/STag to be invalidated. Add this new union to struct ib_uverbs_send_wr. Add code to copy the invalidate_rkey field in ib_uverbs_post_send(). Fix up low-level drivers to deal with the change to struct ib_send_wr, and just remove the imm_data initialization from net/sunrpc/xprtrdma/, since that code never does any send with immediate operations. Also, move the existing IB_DEVICE_SEND_W_INV flag to a new bit, since the iWARP drivers currently in the tree set the bit. The amso1100 driver at least will silently fail to honor the IB_SEND_INVALIDATE bit if passed in as part of userspace send requests (since it does not implement kernel bypass work request queueing). Remove the flag from all existing drivers that set it until we know which ones are OK. The values chosen for the new flag is not consecutive to avoid clashing with flags defined in the XRC patches, which are not merged yet but which are already in use and are likely to be merged soon. This resurrects a patch sent long ago by Mikkel Hagen <mhagen@iol.unh.edu>. Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c13
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c4
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--include/rdma/ib_user_verbs.h5
-rw-r--r--include/rdma/ib_verbs.h11
-rw-r--r--net/sunrpc/xprtrdma/verbs.c1
15 files changed, 46 insertions, 31 deletions
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 9e98cec6230f..2c3bff5fe867 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1463,7 +1463,6 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1463 next->num_sge = user_wr->num_sge; 1463 next->num_sge = user_wr->num_sge;
1464 next->opcode = user_wr->opcode; 1464 next->opcode = user_wr->opcode;
1465 next->send_flags = user_wr->send_flags; 1465 next->send_flags = user_wr->send_flags;
1466 next->imm_data = (__be32 __force) user_wr->imm_data;
1467 1466
1468 if (is_ud) { 1467 if (is_ud) {
1469 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, 1468 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
@@ -1476,14 +1475,24 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1476 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; 1475 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
1477 } else { 1476 } else {
1478 switch (next->opcode) { 1477 switch (next->opcode) {
1479 case IB_WR_RDMA_WRITE:
1480 case IB_WR_RDMA_WRITE_WITH_IMM: 1478 case IB_WR_RDMA_WRITE_WITH_IMM:
1479 next->ex.imm_data =
1480 (__be32 __force) user_wr->ex.imm_data;
1481 case IB_WR_RDMA_WRITE:
1481 case IB_WR_RDMA_READ: 1482 case IB_WR_RDMA_READ:
1482 next->wr.rdma.remote_addr = 1483 next->wr.rdma.remote_addr =
1483 user_wr->wr.rdma.remote_addr; 1484 user_wr->wr.rdma.remote_addr;
1484 next->wr.rdma.rkey = 1485 next->wr.rdma.rkey =
1485 user_wr->wr.rdma.rkey; 1486 user_wr->wr.rdma.rkey;
1486 break; 1487 break;
1488 case IB_WR_SEND_WITH_IMM:
1489 next->ex.imm_data =
1490 (__be32 __force) user_wr->ex.imm_data;
1491 break;
1492 case IB_WR_SEND_WITH_INV:
1493 next->ex.invalidate_rkey =
1494 user_wr->ex.invalidate_rkey;
1495 break;
1487 case IB_WR_ATOMIC_CMP_AND_SWP: 1496 case IB_WR_ATOMIC_CMP_AND_SWP:
1488 case IB_WR_ATOMIC_FETCH_AND_ADD: 1497 case IB_WR_ATOMIC_FETCH_AND_ADD:
1489 next->wr.atomic.remote_addr = 1498 next->wr.atomic.remote_addr =
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 7a625524e0c5..b1441aeb60c2 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -455,7 +455,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
455 IB_DEVICE_CURR_QP_STATE_MOD | 455 IB_DEVICE_CURR_QP_STATE_MOD |
456 IB_DEVICE_SYS_IMAGE_GUID | 456 IB_DEVICE_SYS_IMAGE_GUID |
457 IB_DEVICE_ZERO_STAG | 457 IB_DEVICE_ZERO_STAG |
458 IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW); 458 IB_DEVICE_MEM_WINDOW);
459 459
460 /* Allocate the qptr_array */ 460 /* Allocate the qptr_array */
461 c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); 461 c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 50e1f2a16e0c..ca7265443c05 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1109,8 +1109,7 @@ int iwch_register_device(struct iwch_dev *dev)
1109 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); 1109 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1110 dev->ibdev.owner = THIS_MODULE; 1110 dev->ibdev.owner = THIS_MODULE;
1111 dev->device_cap_flags = 1111 dev->device_cap_flags =
1112 (IB_DEVICE_ZERO_STAG | 1112 (IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW);
1113 IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW);
1114 1113
1115 dev->ibdev.uverbs_cmd_mask = 1114 dev->ibdev.uverbs_cmd_mask =
1116 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1115 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index bc5d9b0813e5..8891c3b0a3d5 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -72,7 +72,7 @@ static int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
72 wqe->send.reserved[2] = 0; 72 wqe->send.reserved[2] = 0;
73 if (wr->opcode == IB_WR_SEND_WITH_IMM) { 73 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
74 plen = 4; 74 plen = 4;
75 wqe->send.sgl[0].stag = wr->imm_data; 75 wqe->send.sgl[0].stag = wr->ex.imm_data;
76 wqe->send.sgl[0].len = __constant_cpu_to_be32(0); 76 wqe->send.sgl[0].len = __constant_cpu_to_be32(0);
77 wqe->send.num_sgle = __constant_cpu_to_be32(0); 77 wqe->send.num_sgle = __constant_cpu_to_be32(0);
78 *flit_cnt = 5; 78 *flit_cnt = 5;
@@ -112,7 +112,7 @@ static int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
112 112
113 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 113 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
114 plen = 4; 114 plen = 4;
115 wqe->write.sgl[0].stag = wr->imm_data; 115 wqe->write.sgl[0].stag = wr->ex.imm_data;
116 wqe->write.sgl[0].len = __constant_cpu_to_be32(0); 116 wqe->write.sgl[0].len = __constant_cpu_to_be32(0);
117 wqe->write.num_sgle = __constant_cpu_to_be32(0); 117 wqe->write.num_sgle = __constant_cpu_to_be32(0);
118 *flit_cnt = 6; 118 *flit_cnt = 6;
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 2ce8cffb8664..a20bbf466188 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -188,7 +188,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
188 if (send_wr->opcode == IB_WR_SEND_WITH_IMM || 188 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
189 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 189 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
190 /* this might not work as long as HW does not support it */ 190 /* this might not work as long as HW does not support it */
191 wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data); 191 wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
192 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT; 192 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
193 } 193 }
194 194
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 467981905bbe..c405dfba5531 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -308,7 +308,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
308 else { 308 else {
309 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); 309 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
310 /* Immediate data comes after the BTH */ 310 /* Immediate data comes after the BTH */
311 ohdr->u.imm_data = wqe->wr.imm_data; 311 ohdr->u.imm_data = wqe->wr.ex.imm_data;
312 hwords += 1; 312 hwords += 1;
313 } 313 }
314 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 314 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -346,7 +346,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
346 qp->s_state = 346 qp->s_state =
347 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 347 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
348 /* Immediate data comes after RETH */ 348 /* Immediate data comes after RETH */
349 ohdr->u.rc.imm_data = wqe->wr.imm_data; 349 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
350 hwords += 1; 350 hwords += 1;
351 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 351 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
352 bth0 |= 1 << 23; 352 bth0 |= 1 << 23;
@@ -490,7 +490,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
490 else { 490 else {
491 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 491 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
492 /* Immediate data comes after the BTH */ 492 /* Immediate data comes after the BTH */
493 ohdr->u.imm_data = wqe->wr.imm_data; 493 ohdr->u.imm_data = wqe->wr.ex.imm_data;
494 hwords += 1; 494 hwords += 1;
495 } 495 }
496 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 496 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -526,7 +526,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
526 else { 526 else {
527 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 527 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
528 /* Immediate data comes after the BTH */ 528 /* Immediate data comes after the BTH */
529 ohdr->u.imm_data = wqe->wr.imm_data; 529 ohdr->u.imm_data = wqe->wr.ex.imm_data;
530 hwords += 1; 530 hwords += 1;
531 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 531 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
532 bth0 |= 1 << 23; 532 bth0 |= 1 << 23;
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index bcaa2914e341..8ac5c1d82ccd 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -310,7 +310,7 @@ again:
310 switch (wqe->wr.opcode) { 310 switch (wqe->wr.opcode) {
311 case IB_WR_SEND_WITH_IMM: 311 case IB_WR_SEND_WITH_IMM:
312 wc.wc_flags = IB_WC_WITH_IMM; 312 wc.wc_flags = IB_WC_WITH_IMM;
313 wc.imm_data = wqe->wr.imm_data; 313 wc.imm_data = wqe->wr.ex.imm_data;
314 /* FALLTHROUGH */ 314 /* FALLTHROUGH */
315 case IB_WR_SEND: 315 case IB_WR_SEND:
316 if (!ipath_get_rwqe(qp, 0)) { 316 if (!ipath_get_rwqe(qp, 0)) {
@@ -339,7 +339,7 @@ again:
339 goto err; 339 goto err;
340 } 340 }
341 wc.wc_flags = IB_WC_WITH_IMM; 341 wc.wc_flags = IB_WC_WITH_IMM;
342 wc.imm_data = wqe->wr.imm_data; 342 wc.imm_data = wqe->wr.ex.imm_data;
343 if (!ipath_get_rwqe(qp, 1)) 343 if (!ipath_get_rwqe(qp, 1))
344 goto rnr_nak; 344 goto rnr_nak;
345 /* FALLTHROUGH */ 345 /* FALLTHROUGH */
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index 2dd8de20d221..bfe8926b5514 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -94,7 +94,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
94 qp->s_state = 94 qp->s_state =
95 OP(SEND_ONLY_WITH_IMMEDIATE); 95 OP(SEND_ONLY_WITH_IMMEDIATE);
96 /* Immediate data comes after the BTH */ 96 /* Immediate data comes after the BTH */
97 ohdr->u.imm_data = wqe->wr.imm_data; 97 ohdr->u.imm_data = wqe->wr.ex.imm_data;
98 hwords += 1; 98 hwords += 1;
99 } 99 }
100 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 100 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -123,7 +123,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
123 qp->s_state = 123 qp->s_state =
124 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); 124 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
125 /* Immediate data comes after the RETH */ 125 /* Immediate data comes after the RETH */
126 ohdr->u.rc.imm_data = wqe->wr.imm_data; 126 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
127 hwords += 1; 127 hwords += 1;
128 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 128 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
129 bth0 |= 1 << 23; 129 bth0 |= 1 << 23;
@@ -152,7 +152,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
152 else { 152 else {
153 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); 153 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
154 /* Immediate data comes after the BTH */ 154 /* Immediate data comes after the BTH */
155 ohdr->u.imm_data = wqe->wr.imm_data; 155 ohdr->u.imm_data = wqe->wr.ex.imm_data;
156 hwords += 1; 156 hwords += 1;
157 } 157 }
158 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 158 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
@@ -177,7 +177,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
177 qp->s_state = 177 qp->s_state =
178 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); 178 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
179 /* Immediate data comes after the BTH */ 179 /* Immediate data comes after the BTH */
180 ohdr->u.imm_data = wqe->wr.imm_data; 180 ohdr->u.imm_data = wqe->wr.ex.imm_data;
181 hwords += 1; 181 hwords += 1;
182 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 182 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
183 bth0 |= 1 << 23; 183 bth0 |= 1 << 23;
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 918f52070653..8b6a261c89e3 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -95,7 +95,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
95 95
96 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 96 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
97 wc.wc_flags = IB_WC_WITH_IMM; 97 wc.wc_flags = IB_WC_WITH_IMM;
98 wc.imm_data = swqe->wr.imm_data; 98 wc.imm_data = swqe->wr.ex.imm_data;
99 } else { 99 } else {
100 wc.wc_flags = 0; 100 wc.wc_flags = 0;
101 wc.imm_data = 0; 101 wc.imm_data = 0;
@@ -327,7 +327,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
327 } 327 }
328 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 328 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
329 qp->s_hdrwords++; 329 qp->s_hdrwords++;
330 ohdr->u.ud.imm_data = wqe->wr.imm_data; 330 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
331 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; 331 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
332 } else 332 } else
333 bth0 = IB_OPCODE_UD_SEND_ONLY << 24; 333 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index f5210c17e312..38e651a67589 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1249,7 +1249,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1249 case IB_WR_SEND_WITH_IMM: 1249 case IB_WR_SEND_WITH_IMM:
1250 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1250 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1251 sqp->ud_header.immediate_present = 1; 1251 sqp->ud_header.immediate_present = 1;
1252 sqp->ud_header.immediate_data = wr->imm_data; 1252 sqp->ud_header.immediate_data = wr->ex.imm_data;
1253 break; 1253 break;
1254 default: 1254 default:
1255 return -EINVAL; 1255 return -EINVAL;
@@ -1492,7 +1492,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1492 1492
1493 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1493 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1494 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1494 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1495 ctrl->imm = wr->imm_data; 1495 ctrl->imm = wr->ex.imm_data;
1496 else 1496 else
1497 ctrl->imm = 0; 1497 ctrl->imm = 0;
1498 1498
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 8433897624bc..b3fd6b05d79d 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1532,7 +1532,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1532 case IB_WR_SEND_WITH_IMM: 1532 case IB_WR_SEND_WITH_IMM:
1533 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1533 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1534 sqp->ud_header.immediate_present = 1; 1534 sqp->ud_header.immediate_present = 1;
1535 sqp->ud_header.immediate_data = wr->imm_data; 1535 sqp->ud_header.immediate_data = wr->ex.imm_data;
1536 break; 1536 break;
1537 default: 1537 default:
1538 return -EINVAL; 1538 return -EINVAL;
@@ -1679,7 +1679,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1679 cpu_to_be32(1); 1679 cpu_to_be32(1);
1680 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1680 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1681 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1681 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1682 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; 1682 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1683 1683
1684 wqe += sizeof (struct mthca_next_seg); 1684 wqe += sizeof (struct mthca_next_seg);
1685 size = sizeof (struct mthca_next_seg) / 16; 1685 size = sizeof (struct mthca_next_seg) / 16;
@@ -2020,7 +2020,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2020 cpu_to_be32(1); 2020 cpu_to_be32(1);
2021 if (wr->opcode == IB_WR_SEND_WITH_IMM || 2021 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
2022 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 2022 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
2023 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; 2023 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
2024 2024
2025 wqe += sizeof (struct mthca_next_seg); 2025 wqe += sizeof (struct mthca_next_seg);
2026 size = sizeof (struct mthca_next_seg) / 16; 2026 size = sizeof (struct mthca_next_seg) / 16;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 134189d77ed3..aa53aab91bf8 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -393,7 +393,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
393 nesadapter->base_pd = 1; 393 nesadapter->base_pd = 1;
394 394
395 nesadapter->device_cap_flags = 395 nesadapter->device_cap_flags =
396 IB_DEVICE_ZERO_STAG | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW; 396 IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW;
397 397
398 nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter) 398 nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
399 [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]); 399 [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index 64a721fcbc1c..8d65bf0a625b 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -533,7 +533,10 @@ struct ib_uverbs_send_wr {
533 __u32 num_sge; 533 __u32 num_sge;
534 __u32 opcode; 534 __u32 opcode;
535 __u32 send_flags; 535 __u32 send_flags;
536 __u32 imm_data; 536 union {
537 __u32 imm_data;
538 __u32 invalidate_rkey;
539 } ex;
537 union { 540 union {
538 struct { 541 struct {
539 __u64 remote_addr; 542 __u64 remote_addr;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 66928e9cab19..c48f6af5ef9a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -94,7 +94,7 @@ enum ib_device_cap_flags {
94 IB_DEVICE_SRQ_RESIZE = (1<<13), 94 IB_DEVICE_SRQ_RESIZE = (1<<13),
95 IB_DEVICE_N_NOTIFY_CQ = (1<<14), 95 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
96 IB_DEVICE_ZERO_STAG = (1<<15), 96 IB_DEVICE_ZERO_STAG = (1<<15),
97 IB_DEVICE_SEND_W_INV = (1<<16), 97 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */
98 IB_DEVICE_MEM_WINDOW = (1<<17), 98 IB_DEVICE_MEM_WINDOW = (1<<17),
99 /* 99 /*
100 * Devices should set IB_DEVICE_UD_IP_SUM if they support 100 * Devices should set IB_DEVICE_UD_IP_SUM if they support
@@ -105,6 +105,7 @@ enum ib_device_cap_flags {
105 */ 105 */
106 IB_DEVICE_UD_IP_CSUM = (1<<18), 106 IB_DEVICE_UD_IP_CSUM = (1<<18),
107 IB_DEVICE_UD_TSO = (1<<19), 107 IB_DEVICE_UD_TSO = (1<<19),
108 IB_DEVICE_SEND_W_INV = (1<<21),
108}; 109};
109 110
110enum ib_atomic_cap { 111enum ib_atomic_cap {
@@ -625,7 +626,8 @@ enum ib_wr_opcode {
625 IB_WR_RDMA_READ, 626 IB_WR_RDMA_READ,
626 IB_WR_ATOMIC_CMP_AND_SWP, 627 IB_WR_ATOMIC_CMP_AND_SWP,
627 IB_WR_ATOMIC_FETCH_AND_ADD, 628 IB_WR_ATOMIC_FETCH_AND_ADD,
628 IB_WR_LSO 629 IB_WR_LSO,
630 IB_WR_SEND_WITH_INV,
629}; 631};
630 632
631enum ib_send_flags { 633enum ib_send_flags {
@@ -649,7 +651,10 @@ struct ib_send_wr {
649 int num_sge; 651 int num_sge;
650 enum ib_wr_opcode opcode; 652 enum ib_wr_opcode opcode;
651 int send_flags; 653 int send_flags;
652 __be32 imm_data; 654 union {
655 __be32 imm_data;
656 u32 invalidate_rkey;
657 } ex;
653 union { 658 union {
654 struct { 659 struct {
655 u64 remote_addr; 660 u64 remote_addr;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index ffbf22a1d2ca..8ea283ecc522 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1573,7 +1573,6 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
1573 send_wr.sg_list = req->rl_send_iov; 1573 send_wr.sg_list = req->rl_send_iov;
1574 send_wr.num_sge = req->rl_niovs; 1574 send_wr.num_sge = req->rl_niovs;
1575 send_wr.opcode = IB_WR_SEND; 1575 send_wr.opcode = IB_WR_SEND;
1576 send_wr.imm_data = 0;
1577 if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */ 1576 if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */
1578 ib_dma_sync_single_for_device(ia->ri_id->device, 1577 ib_dma_sync_single_for_device(ia->ri_id->device,
1579 req->rl_send_iov[3].addr, req->rl_send_iov[3].length, 1578 req->rl_send_iov[3].addr, req->rl_send_iov[3].length,