aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDoug Ledford <dledford@redhat.com>2015-10-28 22:23:34 -0400
committerDoug Ledford <dledford@redhat.com>2015-10-28 22:23:34 -0400
commit63e8790d39a2d7c9a0ebeab987a6033d184bc6ba (patch)
tree9436939401b222d344f66e2bda59b445d5b9189f
parent95893dde99d9d14f8a6ac99ea3103792a8da5f25 (diff)
parenteb14ab3ba14081e403be93dc6396627567fadf60 (diff)
Merge branch 'wr-cleanup' into k.o/for-4.4
-rw-r--r--drivers/infiniband/core/agent.c2
-rw-r--r--drivers/infiniband/core/mad.c40
-rw-r--r--drivers/infiniband/core/mad_priv.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c143
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c36
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c46
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c54
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c20
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c178
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c52
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c115
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c84
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c41
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c53
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c23
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c38
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c20
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c20
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c22
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c22
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c6
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h10
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c72
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c156
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h6
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c26
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c32
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c35
-rw-r--r--drivers/staging/rdma/amso1100/c2_qp.c8
-rw-r--r--drivers/staging/rdma/ehca/ehca_reqs.c53
-rw-r--r--drivers/staging/rdma/hfi1/keys.c22
-rw-r--r--drivers/staging/rdma/hfi1/qp.c2
-rw-r--r--drivers/staging/rdma/hfi1/rc.c24
-rw-r--r--drivers/staging/rdma/hfi1/ruc.c18
-rw-r--r--drivers/staging/rdma/hfi1/uc.c4
-rw-r--r--drivers/staging/rdma/hfi1/ud.c20
-rw-r--r--drivers/staging/rdma/hfi1/verbs.c23
-rw-r--r--drivers/staging/rdma/hfi1/verbs.h10
-rw-r--r--drivers/staging/rdma/ipath/ipath_rc.c24
-rw-r--r--drivers/staging/rdma/ipath/ipath_ruc.c16
-rw-r--r--drivers/staging/rdma/ipath/ipath_uc.c4
-rw-r--r--drivers/staging/rdma/ipath/ipath_ud.c26
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs.c20
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs.h9
-rw-r--r--include/rdma/ib_verbs.h131
-rw-r--r--net/rds/ib.h6
-rw-r--r--net/rds/ib_send.c71
-rw-r--r--net/rds/iw.h6
-rw-r--r--net/rds/iw_rdma.c29
-rw-r--r--net/rds/iw_send.c113
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c23
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c70
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c18
63 files changed, 1152 insertions, 988 deletions
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 0429040304fd..4fa524dfb6cf 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -126,7 +126,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
126 mad_send_wr = container_of(send_buf, 126 mad_send_wr = container_of(send_buf,
127 struct ib_mad_send_wr_private, 127 struct ib_mad_send_wr_private,
128 send_buf); 128 send_buf);
129 mad_send_wr->send_wr.wr.ud.port_num = port_num; 129 mad_send_wr->send_wr.port_num = port_num;
130 } 130 }
131 131
132 if (ib_post_send_mad(send_buf, NULL)) { 132 if (ib_post_send_mad(send_buf, NULL)) {
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index fa63b89e15aa..8d8af7a41a30 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -752,7 +752,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
752 struct ib_device *device = mad_agent_priv->agent.device; 752 struct ib_device *device = mad_agent_priv->agent.device;
753 u8 port_num; 753 u8 port_num;
754 struct ib_wc mad_wc; 754 struct ib_wc mad_wc;
755 struct ib_send_wr *send_wr = &mad_send_wr->send_wr; 755 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
756 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); 756 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
757 u16 out_mad_pkey_index = 0; 757 u16 out_mad_pkey_index = 0;
758 u16 drslid; 758 u16 drslid;
@@ -761,7 +761,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
761 761
762 if (rdma_cap_ib_switch(device) && 762 if (rdma_cap_ib_switch(device) &&
763 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 763 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
764 port_num = send_wr->wr.ud.port_num; 764 port_num = send_wr->port_num;
765 else 765 else
766 port_num = mad_agent_priv->agent.port_num; 766 port_num = mad_agent_priv->agent.port_num;
767 767
@@ -832,9 +832,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
832 } 832 }
833 833
834 build_smp_wc(mad_agent_priv->agent.qp, 834 build_smp_wc(mad_agent_priv->agent.qp,
835 send_wr->wr_id, drslid, 835 send_wr->wr.wr_id, drslid,
836 send_wr->wr.ud.pkey_index, 836 send_wr->pkey_index,
837 send_wr->wr.ud.port_num, &mad_wc); 837 send_wr->port_num, &mad_wc);
838 838
839 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { 839 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
840 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len 840 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
@@ -894,7 +894,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
894 894
895 local->mad_send_wr = mad_send_wr; 895 local->mad_send_wr = mad_send_wr;
896 if (opa) { 896 if (opa) {
897 local->mad_send_wr->send_wr.wr.ud.pkey_index = out_mad_pkey_index; 897 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
898 local->return_wc_byte_len = mad_size; 898 local->return_wc_byte_len = mad_size;
899 } 899 }
900 /* Reference MAD agent until send side of local completion handled */ 900 /* Reference MAD agent until send side of local completion handled */
@@ -1039,14 +1039,14 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
1039 1039
1040 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; 1040 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1041 1041
1042 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr; 1042 mad_send_wr->send_wr.wr.wr_id = (unsigned long) mad_send_wr;
1043 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; 1043 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1044 mad_send_wr->send_wr.num_sge = 2; 1044 mad_send_wr->send_wr.wr.num_sge = 2;
1045 mad_send_wr->send_wr.opcode = IB_WR_SEND; 1045 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1046 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED; 1046 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1047 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn; 1047 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1048 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; 1048 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1049 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index; 1049 mad_send_wr->send_wr.pkey_index = pkey_index;
1050 1050
1051 if (rmpp_active) { 1051 if (rmpp_active) {
1052 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); 1052 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
@@ -1151,7 +1151,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1151 1151
1152 /* Set WR ID to find mad_send_wr upon completion */ 1152 /* Set WR ID to find mad_send_wr upon completion */
1153 qp_info = mad_send_wr->mad_agent_priv->qp_info; 1153 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1154 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; 1154 mad_send_wr->send_wr.wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1155 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 1155 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1156 1156
1157 mad_agent = mad_send_wr->send_buf.mad_agent; 1157 mad_agent = mad_send_wr->send_buf.mad_agent;
@@ -1179,7 +1179,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1179 1179
1180 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1180 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1181 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1181 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1182 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, 1182 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1183 &bad_send_wr); 1183 &bad_send_wr);
1184 list = &qp_info->send_queue.list; 1184 list = &qp_info->send_queue.list;
1185 } else { 1185 } else {
@@ -1244,7 +1244,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1244 * request associated with the completion 1244 * request associated with the completion
1245 */ 1245 */
1246 next_send_buf = send_buf->next; 1246 next_send_buf = send_buf->next;
1247 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah; 1247 mad_send_wr->send_wr.ah = send_buf->ah;
1248 1248
1249 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == 1249 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
@@ -2457,7 +2457,7 @@ retry:
2457 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2457 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2458 2458
2459 if (queued_send_wr) { 2459 if (queued_send_wr) {
2460 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, 2460 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2461 &bad_send_wr); 2461 &bad_send_wr);
2462 if (ret) { 2462 if (ret) {
2463 dev_err(&port_priv->device->dev, 2463 dev_err(&port_priv->device->dev,
@@ -2515,7 +2515,7 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
2515 struct ib_send_wr *bad_send_wr; 2515 struct ib_send_wr *bad_send_wr;
2516 2516
2517 mad_send_wr->retry = 0; 2517 mad_send_wr->retry = 0;
2518 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr, 2518 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2519 &bad_send_wr); 2519 &bad_send_wr);
2520 if (ret) 2520 if (ret)
2521 ib_mad_send_done_handler(port_priv, wc); 2521 ib_mad_send_done_handler(port_priv, wc);
@@ -2713,7 +2713,7 @@ static void local_completions(struct work_struct *work)
2713 build_smp_wc(recv_mad_agent->agent.qp, 2713 build_smp_wc(recv_mad_agent->agent.qp,
2714 (unsigned long) local->mad_send_wr, 2714 (unsigned long) local->mad_send_wr,
2715 be16_to_cpu(IB_LID_PERMISSIVE), 2715 be16_to_cpu(IB_LID_PERMISSIVE),
2716 local->mad_send_wr->send_wr.wr.ud.pkey_index, 2716 local->mad_send_wr->send_wr.pkey_index,
2717 recv_mad_agent->agent.port_num, &wc); 2717 recv_mad_agent->agent.port_num, &wc);
2718 2718
2719 local->mad_priv->header.recv_wc.wc = &wc; 2719 local->mad_priv->header.recv_wc.wc = &wc;
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 4a4f7aad0978..990698a6ab4b 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -123,7 +123,7 @@ struct ib_mad_send_wr_private {
123 struct ib_mad_send_buf send_buf; 123 struct ib_mad_send_buf send_buf;
124 u64 header_mapping; 124 u64 header_mapping;
125 u64 payload_mapping; 125 u64 payload_mapping;
126 struct ib_send_wr send_wr; 126 struct ib_ud_wr send_wr;
127 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 127 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
128 __be64 tid; 128 __be64 tid;
129 unsigned long timeout; 129 unsigned long timeout;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 8fd081ae9aa9..94816aeb95a0 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2427,6 +2427,12 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2427 return in_len; 2427 return in_len;
2428} 2428}
2429 2429
2430static void *alloc_wr(size_t wr_size, __u32 num_sge)
2431{
2432 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2433 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2434};
2435
2430ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2436ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2431 struct ib_device *ib_dev, 2437 struct ib_device *ib_dev,
2432 const char __user *buf, int in_len, 2438 const char __user *buf, int in_len,
@@ -2475,14 +2481,83 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2475 goto out_put; 2481 goto out_put;
2476 } 2482 }
2477 2483
2478 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2484 if (is_ud) {
2479 user_wr->num_sge * sizeof (struct ib_sge), 2485 struct ib_ud_wr *ud;
2480 GFP_KERNEL); 2486
2481 if (!next) { 2487 if (user_wr->opcode != IB_WR_SEND &&
2482 ret = -ENOMEM; 2488 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2489 ret = -EINVAL;
2490 goto out_put;
2491 }
2492
2493 ud = alloc_wr(sizeof(*ud), user_wr->num_sge);
2494 if (!ud) {
2495 ret = -ENOMEM;
2496 goto out_put;
2497 }
2498
2499 ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext);
2500 if (!ud->ah) {
2501 kfree(ud);
2502 ret = -EINVAL;
2503 goto out_put;
2504 }
2505 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2506 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2507
2508 next = &ud->wr;
2509 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2510 user_wr->opcode == IB_WR_RDMA_WRITE ||
2511 user_wr->opcode == IB_WR_RDMA_READ) {
2512 struct ib_rdma_wr *rdma;
2513
2514 rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge);
2515 if (!rdma) {
2516 ret = -ENOMEM;
2517 goto out_put;
2518 }
2519
2520 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2521 rdma->rkey = user_wr->wr.rdma.rkey;
2522
2523 next = &rdma->wr;
2524 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2525 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2526 struct ib_atomic_wr *atomic;
2527
2528 atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge);
2529 if (!atomic) {
2530 ret = -ENOMEM;
2531 goto out_put;
2532 }
2533
2534 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2535 atomic->compare_add = user_wr->wr.atomic.compare_add;
2536 atomic->swap = user_wr->wr.atomic.swap;
2537 atomic->rkey = user_wr->wr.atomic.rkey;
2538
2539 next = &atomic->wr;
2540 } else if (user_wr->opcode == IB_WR_SEND ||
2541 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2542 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2543 next = alloc_wr(sizeof(*next), user_wr->num_sge);
2544 if (!next) {
2545 ret = -ENOMEM;
2546 goto out_put;
2547 }
2548 } else {
2549 ret = -EINVAL;
2483 goto out_put; 2550 goto out_put;
2484 } 2551 }
2485 2552
2553 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2554 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2555 next->ex.imm_data =
2556 (__be32 __force) user_wr->ex.imm_data;
2557 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2558 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2559 }
2560
2486 if (!last) 2561 if (!last)
2487 wr = next; 2562 wr = next;
2488 else 2563 else
@@ -2495,60 +2570,6 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2495 next->opcode = user_wr->opcode; 2570 next->opcode = user_wr->opcode;
2496 next->send_flags = user_wr->send_flags; 2571 next->send_flags = user_wr->send_flags;
2497 2572
2498 if (is_ud) {
2499 if (next->opcode != IB_WR_SEND &&
2500 next->opcode != IB_WR_SEND_WITH_IMM) {
2501 ret = -EINVAL;
2502 goto out_put;
2503 }
2504
2505 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2506 file->ucontext);
2507 if (!next->wr.ud.ah) {
2508 ret = -EINVAL;
2509 goto out_put;
2510 }
2511 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
2512 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2513 if (next->opcode == IB_WR_SEND_WITH_IMM)
2514 next->ex.imm_data =
2515 (__be32 __force) user_wr->ex.imm_data;
2516 } else {
2517 switch (next->opcode) {
2518 case IB_WR_RDMA_WRITE_WITH_IMM:
2519 next->ex.imm_data =
2520 (__be32 __force) user_wr->ex.imm_data;
2521 case IB_WR_RDMA_WRITE:
2522 case IB_WR_RDMA_READ:
2523 next->wr.rdma.remote_addr =
2524 user_wr->wr.rdma.remote_addr;
2525 next->wr.rdma.rkey =
2526 user_wr->wr.rdma.rkey;
2527 break;
2528 case IB_WR_SEND_WITH_IMM:
2529 next->ex.imm_data =
2530 (__be32 __force) user_wr->ex.imm_data;
2531 break;
2532 case IB_WR_SEND_WITH_INV:
2533 next->ex.invalidate_rkey =
2534 user_wr->ex.invalidate_rkey;
2535 break;
2536 case IB_WR_ATOMIC_CMP_AND_SWP:
2537 case IB_WR_ATOMIC_FETCH_AND_ADD:
2538 next->wr.atomic.remote_addr =
2539 user_wr->wr.atomic.remote_addr;
2540 next->wr.atomic.compare_add =
2541 user_wr->wr.atomic.compare_add;
2542 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2543 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2544 case IB_WR_SEND:
2545 break;
2546 default:
2547 ret = -EINVAL;
2548 goto out_put;
2549 }
2550 }
2551
2552 if (next->num_sge) { 2573 if (next->num_sge) {
2553 next->sg_list = (void *) next + 2574 next->sg_list = (void *) next +
2554 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2575 ALIGN(sizeof *next, sizeof (struct ib_sge));
@@ -2582,8 +2603,8 @@ out_put:
2582 put_qp_read(qp); 2603 put_qp_read(qp);
2583 2604
2584 while (wr) { 2605 while (wr) {
2585 if (is_ud && wr->wr.ud.ah) 2606 if (is_ud && ud_wr(wr)->ah)
2586 put_ah_read(wr->wr.ud.ah); 2607 put_ah_read(ud_wr(wr)->ah);
2587 next = wr->next; 2608 next = wr->next;
2588 kfree(wr); 2609 kfree(wr);
2589 wr = next; 2610 wr = next;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index b57c0befd962..bac0508fedd9 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -95,8 +95,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
95 wqe->write.reserved[0] = 0; 95 wqe->write.reserved[0] = 0;
96 wqe->write.reserved[1] = 0; 96 wqe->write.reserved[1] = 0;
97 wqe->write.reserved[2] = 0; 97 wqe->write.reserved[2] = 0;
98 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); 98 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
99 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); 99 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
100 100
101 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 101 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
102 plen = 4; 102 plen = 4;
@@ -137,8 +137,8 @@ static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
137 wqe->read.local_inv = 0; 137 wqe->read.local_inv = 0;
138 wqe->read.reserved[0] = 0; 138 wqe->read.reserved[0] = 0;
139 wqe->read.reserved[1] = 0; 139 wqe->read.reserved[1] = 0;
140 wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey); 140 wqe->read.rem_stag = cpu_to_be32(rdma_wr(wr)->rkey);
141 wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr); 141 wqe->read.rem_to = cpu_to_be64(rdma_wr(wr)->remote_addr);
142 wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey); 142 wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
143 wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length); 143 wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
144 wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr); 144 wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
@@ -146,27 +146,27 @@ static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
146 return 0; 146 return 0;
147} 147}
148 148
149static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr, 149static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *send_wr,
150 u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) 150 u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
151{ 151{
152 struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr);
152 int i; 153 int i;
153 __be64 *p; 154 __be64 *p;
154 155
155 if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH) 156 if (wr->page_list_len > T3_MAX_FASTREG_DEPTH)
156 return -EINVAL; 157 return -EINVAL;
157 *wr_cnt = 1; 158 *wr_cnt = 1;
158 wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey); 159 wqe->fastreg.stag = cpu_to_be32(wr->rkey);
159 wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length); 160 wqe->fastreg.len = cpu_to_be32(wr->length);
160 wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); 161 wqe->fastreg.va_base_hi = cpu_to_be32(wr->iova_start >> 32);
161 wqe->fastreg.va_base_lo_fbo = 162 wqe->fastreg.va_base_lo_fbo = cpu_to_be32(wr->iova_start & 0xffffffff);
162 cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff);
163 wqe->fastreg.page_type_perms = cpu_to_be32( 163 wqe->fastreg.page_type_perms = cpu_to_be32(
164 V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) | 164 V_FR_PAGE_COUNT(wr->page_list_len) |
165 V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) | 165 V_FR_PAGE_SIZE(wr->page_shift-12) |
166 V_FR_TYPE(TPT_VATO) | 166 V_FR_TYPE(TPT_VATO) |
167 V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags))); 167 V_FR_PERMS(iwch_ib_to_tpt_access(wr->access_flags)));
168 p = &wqe->fastreg.pbl_addrs[0]; 168 p = &wqe->fastreg.pbl_addrs[0];
169 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) { 169 for (i = 0; i < wr->page_list_len; i++, p++) {
170 170
171 /* If we need a 2nd WR, then set it up */ 171 /* If we need a 2nd WR, then set it up */
172 if (i == T3_MAX_FASTREG_FRAG) { 172 if (i == T3_MAX_FASTREG_FRAG) {
@@ -175,14 +175,14 @@ static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr,
175 Q_PTR2IDX((wq->wptr+1), wq->size_log2)); 175 Q_PTR2IDX((wq->wptr+1), wq->size_log2));
176 build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0, 176 build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
177 Q_GENBIT(wq->wptr + 1, wq->size_log2), 177 Q_GENBIT(wq->wptr + 1, wq->size_log2),
178 0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG, 178 0, 1 + wr->page_list_len - T3_MAX_FASTREG_FRAG,
179 T3_EOP); 179 T3_EOP);
180 180
181 p = &wqe->pbl_frag.pbl_addrs[0]; 181 p = &wqe->pbl_frag.pbl_addrs[0];
182 } 182 }
183 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); 183 *p = cpu_to_be64((u64)wr->page_list->page_list[i]);
184 } 184 }
185 *flit_cnt = 5 + wr->wr.fast_reg.page_list_len; 185 *flit_cnt = 5 + wr->page_list_len;
186 if (*flit_cnt > 15) 186 if (*flit_cnt > 15)
187 *flit_cnt = 15; 187 *flit_cnt = 15;
188 return 0; 188 return 0;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index acfc2f22b382..1dc9f11a4243 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -528,8 +528,8 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
528 if (wr->num_sge > T4_MAX_SEND_SGE) 528 if (wr->num_sge > T4_MAX_SEND_SGE)
529 return -EINVAL; 529 return -EINVAL;
530 wqe->write.r2 = 0; 530 wqe->write.r2 = 0;
531 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); 531 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
532 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); 532 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
533 if (wr->num_sge) { 533 if (wr->num_sge) {
534 if (wr->send_flags & IB_SEND_INLINE) { 534 if (wr->send_flags & IB_SEND_INLINE) {
535 ret = build_immd(sq, wqe->write.u.immd_src, wr, 535 ret = build_immd(sq, wqe->write.u.immd_src, wr,
@@ -566,10 +566,10 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
566 if (wr->num_sge > 1) 566 if (wr->num_sge > 1)
567 return -EINVAL; 567 return -EINVAL;
568 if (wr->num_sge) { 568 if (wr->num_sge) {
569 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey); 569 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
570 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr 570 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
571 >> 32)); 571 >> 32));
572 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr); 572 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
573 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); 573 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
574 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); 574 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
575 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr 575 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
@@ -606,39 +606,36 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
606} 606}
607 607
608static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, 608static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
609 struct ib_send_wr *wr, u8 *len16, u8 t5dev) 609 struct ib_send_wr *send_wr, u8 *len16, u8 t5dev)
610{ 610{
611 611 struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr);
612 struct fw_ri_immd *imdp; 612 struct fw_ri_immd *imdp;
613 __be64 *p; 613 __be64 *p;
614 int i; 614 int i;
615 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); 615 int pbllen = roundup(wr->page_list_len * sizeof(u64), 32);
616 int rem; 616 int rem;
617 617
618 if (wr->wr.fast_reg.page_list_len > 618 if (wr->page_list_len > t4_max_fr_depth(use_dsgl))
619 t4_max_fr_depth(use_dsgl))
620 return -EINVAL; 619 return -EINVAL;
621 620
622 wqe->fr.qpbinde_to_dcacpu = 0; 621 wqe->fr.qpbinde_to_dcacpu = 0;
623 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12; 622 wqe->fr.pgsz_shift = wr->page_shift - 12;
624 wqe->fr.addr_type = FW_RI_VA_BASED_TO; 623 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
625 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags); 624 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access_flags);
626 wqe->fr.len_hi = 0; 625 wqe->fr.len_hi = 0;
627 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length); 626 wqe->fr.len_lo = cpu_to_be32(wr->length);
628 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey); 627 wqe->fr.stag = cpu_to_be32(wr->rkey);
629 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); 628 wqe->fr.va_hi = cpu_to_be32(wr->iova_start >> 32);
630 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & 629 wqe->fr.va_lo_fbo = cpu_to_be32(wr->iova_start & 0xffffffff);
631 0xffffffff);
632 630
633 if (t5dev && use_dsgl && (pbllen > max_fr_immd)) { 631 if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
634 struct c4iw_fr_page_list *c4pl = 632 struct c4iw_fr_page_list *c4pl =
635 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list); 633 to_c4iw_fr_page_list(wr->page_list);
636 struct fw_ri_dsgl *sglp; 634 struct fw_ri_dsgl *sglp;
637 635
638 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { 636 for (i = 0; i < wr->page_list_len; i++) {
639 wr->wr.fast_reg.page_list->page_list[i] = (__force u64) 637 wr->page_list->page_list[i] = (__force u64)
640 cpu_to_be64((u64) 638 cpu_to_be64((u64)wr->page_list->page_list[i]);
641 wr->wr.fast_reg.page_list->page_list[i]);
642 } 639 }
643 640
644 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); 641 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
@@ -657,9 +654,8 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
657 imdp->immdlen = cpu_to_be32(pbllen); 654 imdp->immdlen = cpu_to_be32(pbllen);
658 p = (__be64 *)(imdp + 1); 655 p = (__be64 *)(imdp + 1);
659 rem = pbllen; 656 rem = pbllen;
660 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { 657 for (i = 0; i < wr->page_list_len; i++) {
661 *p = cpu_to_be64( 658 *p = cpu_to_be64((u64)wr->page_list->page_list[i]);
662 (u64)wr->wr.fast_reg.page_list->page_list[i]);
663 rem -= sizeof(*p); 659 rem -= sizeof(*p);
664 if (++p == (__be64 *)&sq->queue[sq->size]) 660 if (++p == (__be64 *)&sq->queue[sq->size])
665 p = (__be64 *)sq->queue; 661 p = (__be64 *)sq->queue;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 67c7120f0d52..870e56b6b25f 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -457,7 +457,8 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
457 struct ib_grh *grh, struct ib_mad *mad) 457 struct ib_grh *grh, struct ib_mad *mad)
458{ 458{
459 struct ib_sge list; 459 struct ib_sge list;
460 struct ib_send_wr wr, *bad_wr; 460 struct ib_ud_wr wr;
461 struct ib_send_wr *bad_wr;
461 struct mlx4_ib_demux_pv_ctx *tun_ctx; 462 struct mlx4_ib_demux_pv_ctx *tun_ctx;
462 struct mlx4_ib_demux_pv_qp *tun_qp; 463 struct mlx4_ib_demux_pv_qp *tun_qp;
463 struct mlx4_rcv_tunnel_mad *tun_mad; 464 struct mlx4_rcv_tunnel_mad *tun_mad;
@@ -582,18 +583,18 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
582 list.length = sizeof (struct mlx4_rcv_tunnel_mad); 583 list.length = sizeof (struct mlx4_rcv_tunnel_mad);
583 list.lkey = tun_ctx->pd->local_dma_lkey; 584 list.lkey = tun_ctx->pd->local_dma_lkey;
584 585
585 wr.wr.ud.ah = ah; 586 wr.ah = ah;
586 wr.wr.ud.port_num = port; 587 wr.port_num = port;
587 wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; 588 wr.remote_qkey = IB_QP_SET_QKEY;
588 wr.wr.ud.remote_qpn = dqpn; 589 wr.remote_qpn = dqpn;
589 wr.next = NULL; 590 wr.wr.next = NULL;
590 wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt); 591 wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
591 wr.sg_list = &list; 592 wr.wr.sg_list = &list;
592 wr.num_sge = 1; 593 wr.wr.num_sge = 1;
593 wr.opcode = IB_WR_SEND; 594 wr.wr.opcode = IB_WR_SEND;
594 wr.send_flags = IB_SEND_SIGNALED; 595 wr.wr.send_flags = IB_SEND_SIGNALED;
595 596
596 ret = ib_post_send(src_qp, &wr, &bad_wr); 597 ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
597out: 598out:
598 if (ret) 599 if (ret)
599 ib_destroy_ah(ah); 600 ib_destroy_ah(ah);
@@ -1186,7 +1187,8 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1186 u8 *s_mac, u16 vlan_id, struct ib_mad *mad) 1187 u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
1187{ 1188{
1188 struct ib_sge list; 1189 struct ib_sge list;
1189 struct ib_send_wr wr, *bad_wr; 1190 struct ib_ud_wr wr;
1191 struct ib_send_wr *bad_wr;
1190 struct mlx4_ib_demux_pv_ctx *sqp_ctx; 1192 struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1191 struct mlx4_ib_demux_pv_qp *sqp; 1193 struct mlx4_ib_demux_pv_qp *sqp;
1192 struct mlx4_mad_snd_buf *sqp_mad; 1194 struct mlx4_mad_snd_buf *sqp_mad;
@@ -1257,17 +1259,17 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1257 list.length = sizeof (struct mlx4_mad_snd_buf); 1259 list.length = sizeof (struct mlx4_mad_snd_buf);
1258 list.lkey = sqp_ctx->pd->local_dma_lkey; 1260 list.lkey = sqp_ctx->pd->local_dma_lkey;
1259 1261
1260 wr.wr.ud.ah = ah; 1262 wr.ah = ah;
1261 wr.wr.ud.port_num = port; 1263 wr.port_num = port;
1262 wr.wr.ud.pkey_index = wire_pkey_ix; 1264 wr.pkey_index = wire_pkey_ix;
1263 wr.wr.ud.remote_qkey = qkey; 1265 wr.remote_qkey = qkey;
1264 wr.wr.ud.remote_qpn = remote_qpn; 1266 wr.remote_qpn = remote_qpn;
1265 wr.next = NULL; 1267 wr.wr.next = NULL;
1266 wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum); 1268 wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1267 wr.sg_list = &list; 1269 wr.wr.sg_list = &list;
1268 wr.num_sge = 1; 1270 wr.wr.num_sge = 1;
1269 wr.opcode = IB_WR_SEND; 1271 wr.wr.opcode = IB_WR_SEND;
1270 wr.send_flags = IB_SEND_SIGNALED; 1272 wr.wr.send_flags = IB_SEND_SIGNALED;
1271 if (s_mac) 1273 if (s_mac)
1272 memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6); 1274 memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
1273 if (vlan_id < 0x1000) 1275 if (vlan_id < 0x1000)
@@ -1275,7 +1277,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1275 to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id); 1277 to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id);
1276 1278
1277 1279
1278 ret = ib_post_send(send_qp, &wr, &bad_wr); 1280 ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
1279out: 1281out:
1280 if (ret) 1282 if (ret)
1281 ib_destroy_ah(ah); 1283 ib_destroy_ah(ah);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 2542fd3c1a49..5bba176e9dfa 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -321,21 +321,21 @@ err_free:
321int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, 321int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
322 struct ib_mw_bind *mw_bind) 322 struct ib_mw_bind *mw_bind)
323{ 323{
324 struct ib_send_wr wr; 324 struct ib_bind_mw_wr wr;
325 struct ib_send_wr *bad_wr; 325 struct ib_send_wr *bad_wr;
326 int ret; 326 int ret;
327 327
328 memset(&wr, 0, sizeof(wr)); 328 memset(&wr, 0, sizeof(wr));
329 wr.opcode = IB_WR_BIND_MW; 329 wr.wr.opcode = IB_WR_BIND_MW;
330 wr.wr_id = mw_bind->wr_id; 330 wr.wr.wr_id = mw_bind->wr_id;
331 wr.send_flags = mw_bind->send_flags; 331 wr.wr.send_flags = mw_bind->send_flags;
332 wr.wr.bind_mw.mw = mw; 332 wr.mw = mw;
333 wr.wr.bind_mw.bind_info = mw_bind->bind_info; 333 wr.bind_info = mw_bind->bind_info;
334 wr.wr.bind_mw.rkey = ib_inc_rkey(mw->rkey); 334 wr.rkey = ib_inc_rkey(mw->rkey);
335 335
336 ret = mlx4_ib_post_send(qp, &wr, &bad_wr); 336 ret = mlx4_ib_post_send(qp, &wr.wr, &bad_wr);
337 if (!ret) 337 if (!ret)
338 mw->rkey = wr.wr.bind_mw.rkey; 338 mw->rkey = wr.rkey;
339 339
340 return ret; 340 return ret;
341} 341}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 42a051b088e4..f2b2a61898f8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2133,14 +2133,14 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
2133} 2133}
2134 2134
2135static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, 2135static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
2136 struct ib_send_wr *wr, 2136 struct ib_ud_wr *wr,
2137 void *wqe, unsigned *mlx_seg_len) 2137 void *wqe, unsigned *mlx_seg_len)
2138{ 2138{
2139 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); 2139 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
2140 struct ib_device *ib_dev = &mdev->ib_dev; 2140 struct ib_device *ib_dev = &mdev->ib_dev;
2141 struct mlx4_wqe_mlx_seg *mlx = wqe; 2141 struct mlx4_wqe_mlx_seg *mlx = wqe;
2142 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; 2142 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
2143 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 2143 struct mlx4_ib_ah *ah = to_mah(wr->ah);
2144 u16 pkey; 2144 u16 pkey;
2145 u32 qkey; 2145 u32 qkey;
2146 int send_size; 2146 int send_size;
@@ -2148,13 +2148,13 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
2148 int spc; 2148 int spc;
2149 int i; 2149 int i;
2150 2150
2151 if (wr->opcode != IB_WR_SEND) 2151 if (wr->wr.opcode != IB_WR_SEND)
2152 return -EINVAL; 2152 return -EINVAL;
2153 2153
2154 send_size = 0; 2154 send_size = 0;
2155 2155
2156 for (i = 0; i < wr->num_sge; ++i) 2156 for (i = 0; i < wr->wr.num_sge; ++i)
2157 send_size += wr->sg_list[i].length; 2157 send_size += wr->wr.sg_list[i].length;
2158 2158
2159 /* for proxy-qp0 sends, need to add in size of tunnel header */ 2159 /* for proxy-qp0 sends, need to add in size of tunnel header */
2160 /* for tunnel-qp0 sends, tunnel header is already in s/g list */ 2160 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
@@ -2179,11 +2179,11 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
2179 mlx->rlid = sqp->ud_header.lrh.destination_lid; 2179 mlx->rlid = sqp->ud_header.lrh.destination_lid;
2180 2180
2181 sqp->ud_header.lrh.virtual_lane = 0; 2181 sqp->ud_header.lrh.virtual_lane = 0;
2182 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 2182 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2183 ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); 2183 ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
2184 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 2184 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
2185 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) 2185 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
2186 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 2186 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
2187 else 2187 else
2188 sqp->ud_header.bth.destination_qpn = 2188 sqp->ud_header.bth.destination_qpn =
2189 cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); 2189 cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]);
@@ -2255,14 +2255,14 @@ static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac)
2255 } 2255 }
2256} 2256}
2257 2257
2258static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, 2258static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
2259 void *wqe, unsigned *mlx_seg_len) 2259 void *wqe, unsigned *mlx_seg_len)
2260{ 2260{
2261 struct ib_device *ib_dev = sqp->qp.ibqp.device; 2261 struct ib_device *ib_dev = sqp->qp.ibqp.device;
2262 struct mlx4_wqe_mlx_seg *mlx = wqe; 2262 struct mlx4_wqe_mlx_seg *mlx = wqe;
2263 struct mlx4_wqe_ctrl_seg *ctrl = wqe; 2263 struct mlx4_wqe_ctrl_seg *ctrl = wqe;
2264 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; 2264 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
2265 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 2265 struct mlx4_ib_ah *ah = to_mah(wr->ah);
2266 union ib_gid sgid; 2266 union ib_gid sgid;
2267 u16 pkey; 2267 u16 pkey;
2268 int send_size; 2268 int send_size;
@@ -2276,8 +2276,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
2276 bool is_grh; 2276 bool is_grh;
2277 2277
2278 send_size = 0; 2278 send_size = 0;
2279 for (i = 0; i < wr->num_sge; ++i) 2279 for (i = 0; i < wr->wr.num_sge; ++i)
2280 send_size += wr->sg_list[i].length; 2280 send_size += wr->wr.sg_list[i].length;
2281 2281
2282 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; 2282 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
2283 is_grh = mlx4_ib_ah_grh_present(ah); 2283 is_grh = mlx4_ib_ah_grh_present(ah);
@@ -2357,7 +2357,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
2357 mlx->rlid = sqp->ud_header.lrh.destination_lid; 2357 mlx->rlid = sqp->ud_header.lrh.destination_lid;
2358 } 2358 }
2359 2359
2360 switch (wr->opcode) { 2360 switch (wr->wr.opcode) {
2361 case IB_WR_SEND: 2361 case IB_WR_SEND:
2362 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; 2362 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2363 sqp->ud_header.immediate_present = 0; 2363 sqp->ud_header.immediate_present = 0;
@@ -2365,7 +2365,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
2365 case IB_WR_SEND_WITH_IMM: 2365 case IB_WR_SEND_WITH_IMM:
2366 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 2366 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2367 sqp->ud_header.immediate_present = 1; 2367 sqp->ud_header.immediate_present = 1;
2368 sqp->ud_header.immediate_data = wr->ex.imm_data; 2368 sqp->ud_header.immediate_data = wr->wr.ex.imm_data;
2369 break; 2369 break;
2370 default: 2370 default:
2371 return -EINVAL; 2371 return -EINVAL;
@@ -2408,16 +2408,16 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
2408 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) 2408 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
2409 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; 2409 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
2410 } 2410 }
2411 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 2411 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2412 if (!sqp->qp.ibqp.qp_num) 2412 if (!sqp->qp.ibqp.qp_num)
2413 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); 2413 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
2414 else 2414 else
2415 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); 2415 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
2416 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 2416 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
2417 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 2417 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
2418 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 2418 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
2419 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? 2419 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
2420 sqp->qkey : wr->wr.ud.remote_qkey); 2420 sqp->qkey : wr->remote_qkey);
2421 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); 2421 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
2422 2422
2423 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); 2423 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
@@ -2505,43 +2505,45 @@ static __be32 convert_access(int acc)
2505 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); 2505 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
2506} 2506}
2507 2507
2508static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) 2508static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg,
2509 struct ib_fast_reg_wr *wr)
2509{ 2510{
2510 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); 2511 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->page_list);
2511 int i; 2512 int i;
2512 2513
2513 for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i) 2514 for (i = 0; i < wr->page_list_len; ++i)
2514 mfrpl->mapped_page_list[i] = 2515 mfrpl->mapped_page_list[i] =
2515 cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] | 2516 cpu_to_be64(wr->page_list->page_list[i] |
2516 MLX4_MTT_FLAG_PRESENT); 2517 MLX4_MTT_FLAG_PRESENT);
2517 2518
2518 fseg->flags = convert_access(wr->wr.fast_reg.access_flags); 2519 fseg->flags = convert_access(wr->access_flags);
2519 fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey); 2520 fseg->mem_key = cpu_to_be32(wr->rkey);
2520 fseg->buf_list = cpu_to_be64(mfrpl->map); 2521 fseg->buf_list = cpu_to_be64(mfrpl->map);
2521 fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); 2522 fseg->start_addr = cpu_to_be64(wr->iova_start);
2522 fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length); 2523 fseg->reg_len = cpu_to_be64(wr->length);
2523 fseg->offset = 0; /* XXX -- is this just for ZBVA? */ 2524 fseg->offset = 0; /* XXX -- is this just for ZBVA? */
2524 fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift); 2525 fseg->page_size = cpu_to_be32(wr->page_shift);
2525 fseg->reserved[0] = 0; 2526 fseg->reserved[0] = 0;
2526 fseg->reserved[1] = 0; 2527 fseg->reserved[1] = 0;
2527} 2528}
2528 2529
2529static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr) 2530static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg,
2531 struct ib_bind_mw_wr *wr)
2530{ 2532{
2531 bseg->flags1 = 2533 bseg->flags1 =
2532 convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) & 2534 convert_access(wr->bind_info.mw_access_flags) &
2533 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ | 2535 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ |
2534 MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE | 2536 MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE |
2535 MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC); 2537 MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC);
2536 bseg->flags2 = 0; 2538 bseg->flags2 = 0;
2537 if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2) 2539 if (wr->mw->type == IB_MW_TYPE_2)
2538 bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2); 2540 bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2);
2539 if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED) 2541 if (wr->bind_info.mw_access_flags & IB_ZERO_BASED)
2540 bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED); 2542 bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED);
2541 bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey); 2543 bseg->new_rkey = cpu_to_be32(wr->rkey);
2542 bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey); 2544 bseg->lkey = cpu_to_be32(wr->bind_info.mr->lkey);
2543 bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr); 2545 bseg->addr = cpu_to_be64(wr->bind_info.addr);
2544 bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length); 2546 bseg->length = cpu_to_be64(wr->bind_info.length);
2545} 2547}
2546 2548
2547static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) 2549static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
@@ -2558,46 +2560,47 @@ static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
2558 rseg->reserved = 0; 2560 rseg->reserved = 0;
2559} 2561}
2560 2562
2561static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr) 2563static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
2564 struct ib_atomic_wr *wr)
2562{ 2565{
2563 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 2566 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
2564 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 2567 aseg->swap_add = cpu_to_be64(wr->swap);
2565 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 2568 aseg->compare = cpu_to_be64(wr->compare_add);
2566 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { 2569 } else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
2567 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 2570 aseg->swap_add = cpu_to_be64(wr->compare_add);
2568 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); 2571 aseg->compare = cpu_to_be64(wr->compare_add_mask);
2569 } else { 2572 } else {
2570 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 2573 aseg->swap_add = cpu_to_be64(wr->compare_add);
2571 aseg->compare = 0; 2574 aseg->compare = 0;
2572 } 2575 }
2573 2576
2574} 2577}
2575 2578
2576static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, 2579static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
2577 struct ib_send_wr *wr) 2580 struct ib_atomic_wr *wr)
2578{ 2581{
2579 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 2582 aseg->swap_add = cpu_to_be64(wr->swap);
2580 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); 2583 aseg->swap_add_mask = cpu_to_be64(wr->swap_mask);
2581 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 2584 aseg->compare = cpu_to_be64(wr->compare_add);
2582 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); 2585 aseg->compare_mask = cpu_to_be64(wr->compare_add_mask);
2583} 2586}
2584 2587
2585static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, 2588static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
2586 struct ib_send_wr *wr) 2589 struct ib_ud_wr *wr)
2587{ 2590{
2588 memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); 2591 memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
2589 dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); 2592 dseg->dqpn = cpu_to_be32(wr->remote_qpn);
2590 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 2593 dseg->qkey = cpu_to_be32(wr->remote_qkey);
2591 dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; 2594 dseg->vlan = to_mah(wr->ah)->av.eth.vlan;
2592 memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); 2595 memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6);
2593} 2596}
2594 2597
2595static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, 2598static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
2596 struct mlx4_wqe_datagram_seg *dseg, 2599 struct mlx4_wqe_datagram_seg *dseg,
2597 struct ib_send_wr *wr, 2600 struct ib_ud_wr *wr,
2598 enum mlx4_ib_qp_type qpt) 2601 enum mlx4_ib_qp_type qpt)
2599{ 2602{
2600 union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av; 2603 union mlx4_ext_av *av = &to_mah(wr->ah)->av;
2601 struct mlx4_av sqp_av = {0}; 2604 struct mlx4_av sqp_av = {0};
2602 int port = *((u8 *) &av->ib.port_pd) & 0x3; 2605 int port = *((u8 *) &av->ib.port_pd) & 0x3;
2603 2606
@@ -2616,18 +2619,18 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
2616 dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); 2619 dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
2617} 2620}
2618 2621
2619static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) 2622static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len)
2620{ 2623{
2621 struct mlx4_wqe_inline_seg *inl = wqe; 2624 struct mlx4_wqe_inline_seg *inl = wqe;
2622 struct mlx4_ib_tunnel_header hdr; 2625 struct mlx4_ib_tunnel_header hdr;
2623 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 2626 struct mlx4_ib_ah *ah = to_mah(wr->ah);
2624 int spc; 2627 int spc;
2625 int i; 2628 int i;
2626 2629
2627 memcpy(&hdr.av, &ah->av, sizeof hdr.av); 2630 memcpy(&hdr.av, &ah->av, sizeof hdr.av);
2628 hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 2631 hdr.remote_qpn = cpu_to_be32(wr->remote_qpn);
2629 hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index); 2632 hdr.pkey_index = cpu_to_be16(wr->pkey_index);
2630 hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 2633 hdr.qkey = cpu_to_be32(wr->remote_qkey);
2631 memcpy(hdr.mac, ah->av.eth.mac, 6); 2634 memcpy(hdr.mac, ah->av.eth.mac, 6);
2632 hdr.vlan = ah->av.eth.vlan; 2635 hdr.vlan = ah->av.eth.vlan;
2633 2636
@@ -2699,22 +2702,22 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
2699 dseg->addr = cpu_to_be64(sg->addr); 2702 dseg->addr = cpu_to_be64(sg->addr);
2700} 2703}
2701 2704
2702static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, 2705static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
2703 struct mlx4_ib_qp *qp, unsigned *lso_seg_len, 2706 struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
2704 __be32 *lso_hdr_sz, __be32 *blh) 2707 __be32 *lso_hdr_sz, __be32 *blh)
2705{ 2708{
2706 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); 2709 unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
2707 2710
2708 if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE)) 2711 if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
2709 *blh = cpu_to_be32(1 << 6); 2712 *blh = cpu_to_be32(1 << 6);
2710 2713
2711 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && 2714 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
2712 wr->num_sge > qp->sq.max_gs - (halign >> 4))) 2715 wr->wr.num_sge > qp->sq.max_gs - (halign >> 4)))
2713 return -EINVAL; 2716 return -EINVAL;
2714 2717
2715 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); 2718 memcpy(wqe->header, wr->header, wr->hlen);
2716 2719
2717 *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen); 2720 *lso_hdr_sz = cpu_to_be32(wr->mss << 16 | wr->hlen);
2718 *lso_seg_len = halign; 2721 *lso_seg_len = halign;
2719 return 0; 2722 return 0;
2720} 2723}
@@ -2813,11 +2816,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2813 case IB_WR_ATOMIC_CMP_AND_SWP: 2816 case IB_WR_ATOMIC_CMP_AND_SWP:
2814 case IB_WR_ATOMIC_FETCH_AND_ADD: 2817 case IB_WR_ATOMIC_FETCH_AND_ADD:
2815 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: 2818 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
2816 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 2819 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
2817 wr->wr.atomic.rkey); 2820 atomic_wr(wr)->rkey);
2818 wqe += sizeof (struct mlx4_wqe_raddr_seg); 2821 wqe += sizeof (struct mlx4_wqe_raddr_seg);
2819 2822
2820 set_atomic_seg(wqe, wr); 2823 set_atomic_seg(wqe, atomic_wr(wr));
2821 wqe += sizeof (struct mlx4_wqe_atomic_seg); 2824 wqe += sizeof (struct mlx4_wqe_atomic_seg);
2822 2825
2823 size += (sizeof (struct mlx4_wqe_raddr_seg) + 2826 size += (sizeof (struct mlx4_wqe_raddr_seg) +
@@ -2826,11 +2829,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2826 break; 2829 break;
2827 2830
2828 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 2831 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2829 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 2832 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
2830 wr->wr.atomic.rkey); 2833 atomic_wr(wr)->rkey);
2831 wqe += sizeof (struct mlx4_wqe_raddr_seg); 2834 wqe += sizeof (struct mlx4_wqe_raddr_seg);
2832 2835
2833 set_masked_atomic_seg(wqe, wr); 2836 set_masked_atomic_seg(wqe, atomic_wr(wr));
2834 wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); 2837 wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
2835 2838
2836 size += (sizeof (struct mlx4_wqe_raddr_seg) + 2839 size += (sizeof (struct mlx4_wqe_raddr_seg) +
@@ -2841,8 +2844,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2841 case IB_WR_RDMA_READ: 2844 case IB_WR_RDMA_READ:
2842 case IB_WR_RDMA_WRITE: 2845 case IB_WR_RDMA_WRITE:
2843 case IB_WR_RDMA_WRITE_WITH_IMM: 2846 case IB_WR_RDMA_WRITE_WITH_IMM:
2844 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 2847 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
2845 wr->wr.rdma.rkey); 2848 rdma_wr(wr)->rkey);
2846 wqe += sizeof (struct mlx4_wqe_raddr_seg); 2849 wqe += sizeof (struct mlx4_wqe_raddr_seg);
2847 size += sizeof (struct mlx4_wqe_raddr_seg) / 16; 2850 size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
2848 break; 2851 break;
@@ -2858,7 +2861,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2858 case IB_WR_FAST_REG_MR: 2861 case IB_WR_FAST_REG_MR:
2859 ctrl->srcrb_flags |= 2862 ctrl->srcrb_flags |=
2860 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); 2863 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
2861 set_fmr_seg(wqe, wr); 2864 set_fmr_seg(wqe, fast_reg_wr(wr));
2862 wqe += sizeof (struct mlx4_wqe_fmr_seg); 2865 wqe += sizeof (struct mlx4_wqe_fmr_seg);
2863 size += sizeof (struct mlx4_wqe_fmr_seg) / 16; 2866 size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
2864 break; 2867 break;
@@ -2866,7 +2869,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2866 case IB_WR_BIND_MW: 2869 case IB_WR_BIND_MW:
2867 ctrl->srcrb_flags |= 2870 ctrl->srcrb_flags |=
2868 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); 2871 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
2869 set_bind_seg(wqe, wr); 2872 set_bind_seg(wqe, bind_mw_wr(wr));
2870 wqe += sizeof(struct mlx4_wqe_bind_seg); 2873 wqe += sizeof(struct mlx4_wqe_bind_seg);
2871 size += sizeof(struct mlx4_wqe_bind_seg) / 16; 2874 size += sizeof(struct mlx4_wqe_bind_seg) / 16;
2872 break; 2875 break;
@@ -2877,7 +2880,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2877 break; 2880 break;
2878 2881
2879 case MLX4_IB_QPT_TUN_SMI_OWNER: 2882 case MLX4_IB_QPT_TUN_SMI_OWNER:
2880 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); 2883 err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
2884 ctrl, &seglen);
2881 if (unlikely(err)) { 2885 if (unlikely(err)) {
2882 *bad_wr = wr; 2886 *bad_wr = wr;
2883 goto out; 2887 goto out;
@@ -2888,19 +2892,20 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2888 case MLX4_IB_QPT_TUN_SMI: 2892 case MLX4_IB_QPT_TUN_SMI:
2889 case MLX4_IB_QPT_TUN_GSI: 2893 case MLX4_IB_QPT_TUN_GSI:
2890 /* this is a UD qp used in MAD responses to slaves. */ 2894 /* this is a UD qp used in MAD responses to slaves. */
2891 set_datagram_seg(wqe, wr); 2895 set_datagram_seg(wqe, ud_wr(wr));
2892 /* set the forced-loopback bit in the data seg av */ 2896 /* set the forced-loopback bit in the data seg av */
2893 *(__be32 *) wqe |= cpu_to_be32(0x80000000); 2897 *(__be32 *) wqe |= cpu_to_be32(0x80000000);
2894 wqe += sizeof (struct mlx4_wqe_datagram_seg); 2898 wqe += sizeof (struct mlx4_wqe_datagram_seg);
2895 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 2899 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
2896 break; 2900 break;
2897 case MLX4_IB_QPT_UD: 2901 case MLX4_IB_QPT_UD:
2898 set_datagram_seg(wqe, wr); 2902 set_datagram_seg(wqe, ud_wr(wr));
2899 wqe += sizeof (struct mlx4_wqe_datagram_seg); 2903 wqe += sizeof (struct mlx4_wqe_datagram_seg);
2900 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 2904 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
2901 2905
2902 if (wr->opcode == IB_WR_LSO) { 2906 if (wr->opcode == IB_WR_LSO) {
2903 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); 2907 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen,
2908 &lso_hdr_sz, &blh);
2904 if (unlikely(err)) { 2909 if (unlikely(err)) {
2905 *bad_wr = wr; 2910 *bad_wr = wr;
2906 goto out; 2911 goto out;
@@ -2912,7 +2917,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2912 break; 2917 break;
2913 2918
2914 case MLX4_IB_QPT_PROXY_SMI_OWNER: 2919 case MLX4_IB_QPT_PROXY_SMI_OWNER:
2915 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); 2920 err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
2921 ctrl, &seglen);
2916 if (unlikely(err)) { 2922 if (unlikely(err)) {
2917 *bad_wr = wr; 2923 *bad_wr = wr;
2918 goto out; 2924 goto out;
@@ -2923,7 +2929,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2923 add_zero_len_inline(wqe); 2929 add_zero_len_inline(wqe);
2924 wqe += 16; 2930 wqe += 16;
2925 size++; 2931 size++;
2926 build_tunnel_header(wr, wqe, &seglen); 2932 build_tunnel_header(ud_wr(wr), wqe, &seglen);
2927 wqe += seglen; 2933 wqe += seglen;
2928 size += seglen / 16; 2934 size += seglen / 16;
2929 break; 2935 break;
@@ -2933,18 +2939,20 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2933 * In this case we first add a UD segment targeting 2939 * In this case we first add a UD segment targeting
2934 * the tunnel qp, and then add a header with address 2940 * the tunnel qp, and then add a header with address
2935 * information */ 2941 * information */
2936 set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, 2942 set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe,
2943 ud_wr(wr),
2937 qp->mlx4_ib_qp_type); 2944 qp->mlx4_ib_qp_type);
2938 wqe += sizeof (struct mlx4_wqe_datagram_seg); 2945 wqe += sizeof (struct mlx4_wqe_datagram_seg);
2939 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 2946 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
2940 build_tunnel_header(wr, wqe, &seglen); 2947 build_tunnel_header(ud_wr(wr), wqe, &seglen);
2941 wqe += seglen; 2948 wqe += seglen;
2942 size += seglen / 16; 2949 size += seglen / 16;
2943 break; 2950 break;
2944 2951
2945 case MLX4_IB_QPT_SMI: 2952 case MLX4_IB_QPT_SMI:
2946 case MLX4_IB_QPT_GSI: 2953 case MLX4_IB_QPT_GSI:
2947 err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); 2954 err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl,
2955 &seglen);
2948 if (unlikely(err)) { 2956 if (unlikely(err)) {
2949 *bad_wr = wr; 2957 *bad_wr = wr;
2950 goto out; 2958 goto out;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 22123b79d550..29f3ecdbe790 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -245,6 +245,7 @@ enum mlx5_ib_qp_flags {
245}; 245};
246 246
247struct mlx5_umr_wr { 247struct mlx5_umr_wr {
248 struct ib_send_wr wr;
248 union { 249 union {
249 u64 virt_addr; 250 u64 virt_addr;
250 u64 offset; 251 u64 offset;
@@ -257,6 +258,11 @@ struct mlx5_umr_wr {
257 u32 mkey; 258 u32 mkey;
258}; 259};
259 260
261static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
262{
263 return container_of(wr, struct mlx5_umr_wr, wr);
264}
265
260struct mlx5_shared_mr_info { 266struct mlx5_shared_mr_info {
261 int mr_id; 267 int mr_id;
262 struct ib_umem *umem; 268 struct ib_umem *umem;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 54a15b5d336d..b30d4ae0fb61 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -687,7 +687,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
687 int access_flags) 687 int access_flags)
688{ 688{
689 struct mlx5_ib_dev *dev = to_mdev(pd->device); 689 struct mlx5_ib_dev *dev = to_mdev(pd->device);
690 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; 690 struct mlx5_umr_wr *umrwr = umr_wr(wr);
691 691
692 sg->addr = dma; 692 sg->addr = dma;
693 sg->length = ALIGN(sizeof(u64) * n, 64); 693 sg->length = ALIGN(sizeof(u64) * n, 64);
@@ -715,7 +715,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
715static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, 715static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
716 struct ib_send_wr *wr, u32 key) 716 struct ib_send_wr *wr, u32 key)
717{ 717{
718 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; 718 struct mlx5_umr_wr *umrwr = umr_wr(wr);
719 719
720 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE; 720 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
721 wr->opcode = MLX5_IB_WR_UMR; 721 wr->opcode = MLX5_IB_WR_UMR;
@@ -752,7 +752,8 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
752 struct device *ddev = dev->ib_dev.dma_device; 752 struct device *ddev = dev->ib_dev.dma_device;
753 struct umr_common *umrc = &dev->umrc; 753 struct umr_common *umrc = &dev->umrc;
754 struct mlx5_ib_umr_context umr_context; 754 struct mlx5_ib_umr_context umr_context;
755 struct ib_send_wr wr, *bad; 755 struct mlx5_umr_wr umrwr;
756 struct ib_send_wr *bad;
756 struct mlx5_ib_mr *mr; 757 struct mlx5_ib_mr *mr;
757 struct ib_sge sg; 758 struct ib_sge sg;
758 int size; 759 int size;
@@ -798,14 +799,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
798 goto free_pas; 799 goto free_pas;
799 } 800 }
800 801
801 memset(&wr, 0, sizeof(wr)); 802 memset(&umrwr, 0, sizeof(umrwr));
802 wr.wr_id = (u64)(unsigned long)&umr_context; 803 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
803 prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift, 804 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key,
804 virt_addr, len, access_flags); 805 page_shift, virt_addr, len, access_flags);
805 806
806 mlx5_ib_init_umr_context(&umr_context); 807 mlx5_ib_init_umr_context(&umr_context);
807 down(&umrc->sem); 808 down(&umrc->sem);
808 err = ib_post_send(umrc->qp, &wr, &bad); 809 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
809 if (err) { 810 if (err) {
810 mlx5_ib_warn(dev, "post send failed, err %d\n", err); 811 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
811 goto unmap_dma; 812 goto unmap_dma;
@@ -851,8 +852,8 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
851 int size; 852 int size;
852 __be64 *pas; 853 __be64 *pas;
853 dma_addr_t dma; 854 dma_addr_t dma;
854 struct ib_send_wr wr, *bad; 855 struct ib_send_wr *bad;
855 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg; 856 struct mlx5_umr_wr wr;
856 struct ib_sge sg; 857 struct ib_sge sg;
857 int err = 0; 858 int err = 0;
858 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64); 859 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
@@ -917,26 +918,26 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
917 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); 918 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
918 919
919 memset(&wr, 0, sizeof(wr)); 920 memset(&wr, 0, sizeof(wr));
920 wr.wr_id = (u64)(unsigned long)&umr_context; 921 wr.wr.wr_id = (u64)(unsigned long)&umr_context;
921 922
922 sg.addr = dma; 923 sg.addr = dma;
923 sg.length = ALIGN(npages * sizeof(u64), 924 sg.length = ALIGN(npages * sizeof(u64),
924 MLX5_UMR_MTT_ALIGNMENT); 925 MLX5_UMR_MTT_ALIGNMENT);
925 sg.lkey = dev->umrc.pd->local_dma_lkey; 926 sg.lkey = dev->umrc.pd->local_dma_lkey;
926 927
927 wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE | 928 wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
928 MLX5_IB_SEND_UMR_UPDATE_MTT; 929 MLX5_IB_SEND_UMR_UPDATE_MTT;
929 wr.sg_list = &sg; 930 wr.wr.sg_list = &sg;
930 wr.num_sge = 1; 931 wr.wr.num_sge = 1;
931 wr.opcode = MLX5_IB_WR_UMR; 932 wr.wr.opcode = MLX5_IB_WR_UMR;
932 umrwr->npages = sg.length / sizeof(u64); 933 wr.npages = sg.length / sizeof(u64);
933 umrwr->page_shift = PAGE_SHIFT; 934 wr.page_shift = PAGE_SHIFT;
934 umrwr->mkey = mr->mmr.key; 935 wr.mkey = mr->mmr.key;
935 umrwr->target.offset = start_page_index; 936 wr.target.offset = start_page_index;
936 937
937 mlx5_ib_init_umr_context(&umr_context); 938 mlx5_ib_init_umr_context(&umr_context);
938 down(&umrc->sem); 939 down(&umrc->sem);
939 err = ib_post_send(umrc->qp, &wr, &bad); 940 err = ib_post_send(umrc->qp, &wr.wr, &bad);
940 if (err) { 941 if (err) {
941 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err); 942 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
942 } else { 943 } else {
@@ -1122,16 +1123,17 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1122{ 1123{
1123 struct umr_common *umrc = &dev->umrc; 1124 struct umr_common *umrc = &dev->umrc;
1124 struct mlx5_ib_umr_context umr_context; 1125 struct mlx5_ib_umr_context umr_context;
1125 struct ib_send_wr wr, *bad; 1126 struct mlx5_umr_wr umrwr;
1127 struct ib_send_wr *bad;
1126 int err; 1128 int err;
1127 1129
1128 memset(&wr, 0, sizeof(wr)); 1130 memset(&umrwr.wr, 0, sizeof(umrwr));
1129 wr.wr_id = (u64)(unsigned long)&umr_context; 1131 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
1130 prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); 1132 prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key);
1131 1133
1132 mlx5_ib_init_umr_context(&umr_context); 1134 mlx5_ib_init_umr_context(&umr_context);
1133 down(&umrc->sem); 1135 down(&umrc->sem);
1134 err = ib_post_send(umrc->qp, &wr, &bad); 1136 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1135 if (err) { 1137 if (err) {
1136 up(&umrc->sem); 1138 up(&umrc->sem);
1137 mlx5_ib_dbg(dev, "err %d\n", err); 1139 mlx5_ib_dbg(dev, "err %d\n", err);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 6f521a3418e8..9bad68820061 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1838,9 +1838,9 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1838static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, 1838static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1839 struct ib_send_wr *wr) 1839 struct ib_send_wr *wr)
1840{ 1840{
1841 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av)); 1841 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
1842 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV); 1842 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
1843 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1843 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
1844} 1844}
1845 1845
1846static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) 1846static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
@@ -1908,7 +1908,7 @@ static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1908 } 1908 }
1909 1909
1910 umr->flags = (1 << 5); /* fail if not free */ 1910 umr->flags = (1 << 5); /* fail if not free */
1911 umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len); 1911 umr->klm_octowords = get_klm_octo(fast_reg_wr(wr)->page_list_len);
1912 umr->mkey_mask = frwr_mkey_mask(); 1912 umr->mkey_mask = frwr_mkey_mask();
1913} 1913}
1914 1914
@@ -1952,7 +1952,7 @@ static __be64 get_umr_update_mtt_mask(void)
1952static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 1952static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1953 struct ib_send_wr *wr) 1953 struct ib_send_wr *wr)
1954{ 1954{
1955 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; 1955 struct mlx5_umr_wr *umrwr = umr_wr(wr);
1956 1956
1957 memset(umr, 0, sizeof(*umr)); 1957 memset(umr, 0, sizeof(*umr));
1958 1958
@@ -1996,20 +1996,20 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
1996 return; 1996 return;
1997 } 1997 }
1998 1998
1999 seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) | 1999 seg->flags = get_umr_flags(fast_reg_wr(wr)->access_flags) |
2000 MLX5_ACCESS_MODE_MTT; 2000 MLX5_ACCESS_MODE_MTT;
2001 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); 2001 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
2002 seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00); 2002 seg->qpn_mkey7_0 = cpu_to_be32((fast_reg_wr(wr)->rkey & 0xff) | 0xffffff00);
2003 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); 2003 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
2004 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); 2004 seg->start_addr = cpu_to_be64(fast_reg_wr(wr)->iova_start);
2005 seg->len = cpu_to_be64(wr->wr.fast_reg.length); 2005 seg->len = cpu_to_be64(fast_reg_wr(wr)->length);
2006 seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2); 2006 seg->xlt_oct_size = cpu_to_be32((fast_reg_wr(wr)->page_list_len + 1) / 2);
2007 seg->log2_page_size = wr->wr.fast_reg.page_shift; 2007 seg->log2_page_size = fast_reg_wr(wr)->page_shift;
2008} 2008}
2009 2009
2010static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) 2010static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
2011{ 2011{
2012 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; 2012 struct mlx5_umr_wr *umrwr = umr_wr(wr);
2013 2013
2014 memset(seg, 0, sizeof(*seg)); 2014 memset(seg, 0, sizeof(*seg));
2015 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { 2015 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
@@ -2034,15 +2034,15 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
2034 struct mlx5_ib_pd *pd, 2034 struct mlx5_ib_pd *pd,
2035 int writ) 2035 int writ)
2036{ 2036{
2037 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); 2037 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(fast_reg_wr(wr)->page_list);
2038 u64 *page_list = wr->wr.fast_reg.page_list->page_list; 2038 u64 *page_list = fast_reg_wr(wr)->page_list->page_list;
2039 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0); 2039 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
2040 int i; 2040 int i;
2041 2041
2042 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) 2042 for (i = 0; i < fast_reg_wr(wr)->page_list_len; i++)
2043 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); 2043 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
2044 dseg->addr = cpu_to_be64(mfrpl->map); 2044 dseg->addr = cpu_to_be64(mfrpl->map);
2045 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); 2045 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * fast_reg_wr(wr)->page_list_len, 64));
2046 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); 2046 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2047} 2047}
2048 2048
@@ -2224,22 +2224,22 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
2224 return 0; 2224 return 0;
2225} 2225}
2226 2226
2227static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, 2227static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
2228 void **seg, int *size) 2228 struct mlx5_ib_qp *qp, void **seg, int *size)
2229{ 2229{
2230 struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs; 2230 struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
2231 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr; 2231 struct ib_mr *sig_mr = wr->sig_mr;
2232 struct mlx5_bsf *bsf; 2232 struct mlx5_bsf *bsf;
2233 u32 data_len = wr->sg_list->length; 2233 u32 data_len = wr->wr.sg_list->length;
2234 u32 data_key = wr->sg_list->lkey; 2234 u32 data_key = wr->wr.sg_list->lkey;
2235 u64 data_va = wr->sg_list->addr; 2235 u64 data_va = wr->wr.sg_list->addr;
2236 int ret; 2236 int ret;
2237 int wqe_size; 2237 int wqe_size;
2238 2238
2239 if (!wr->wr.sig_handover.prot || 2239 if (!wr->prot ||
2240 (data_key == wr->wr.sig_handover.prot->lkey && 2240 (data_key == wr->prot->lkey &&
2241 data_va == wr->wr.sig_handover.prot->addr && 2241 data_va == wr->prot->addr &&
2242 data_len == wr->wr.sig_handover.prot->length)) { 2242 data_len == wr->prot->length)) {
2243 /** 2243 /**
2244 * Source domain doesn't contain signature information 2244 * Source domain doesn't contain signature information
2245 * or data and protection are interleaved in memory. 2245 * or data and protection are interleaved in memory.
@@ -2273,8 +2273,8 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2273 struct mlx5_stride_block_ctrl_seg *sblock_ctrl; 2273 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2274 struct mlx5_stride_block_entry *data_sentry; 2274 struct mlx5_stride_block_entry *data_sentry;
2275 struct mlx5_stride_block_entry *prot_sentry; 2275 struct mlx5_stride_block_entry *prot_sentry;
2276 u32 prot_key = wr->wr.sig_handover.prot->lkey; 2276 u32 prot_key = wr->prot->lkey;
2277 u64 prot_va = wr->wr.sig_handover.prot->addr; 2277 u64 prot_va = wr->prot->addr;
2278 u16 block_size = sig_attrs->mem.sig.dif.pi_interval; 2278 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2279 int prot_size; 2279 int prot_size;
2280 2280
@@ -2326,16 +2326,16 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2326} 2326}
2327 2327
2328static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, 2328static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2329 struct ib_send_wr *wr, u32 nelements, 2329 struct ib_sig_handover_wr *wr, u32 nelements,
2330 u32 length, u32 pdn) 2330 u32 length, u32 pdn)
2331{ 2331{
2332 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr; 2332 struct ib_mr *sig_mr = wr->sig_mr;
2333 u32 sig_key = sig_mr->rkey; 2333 u32 sig_key = sig_mr->rkey;
2334 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1; 2334 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
2335 2335
2336 memset(seg, 0, sizeof(*seg)); 2336 memset(seg, 0, sizeof(*seg));
2337 2337
2338 seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) | 2338 seg->flags = get_umr_flags(wr->access_flags) |
2339 MLX5_ACCESS_MODE_KLM; 2339 MLX5_ACCESS_MODE_KLM;
2340 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); 2340 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
2341 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | 2341 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
@@ -2346,7 +2346,7 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2346} 2346}
2347 2347
2348static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 2348static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2349 struct ib_send_wr *wr, u32 nelements) 2349 u32 nelements)
2350{ 2350{
2351 memset(umr, 0, sizeof(*umr)); 2351 memset(umr, 0, sizeof(*umr));
2352 2352
@@ -2357,37 +2357,37 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2357} 2357}
2358 2358
2359 2359
2360static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, 2360static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
2361 void **seg, int *size) 2361 void **seg, int *size)
2362{ 2362{
2363 struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr); 2363 struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
2364 struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
2364 u32 pdn = get_pd(qp)->pdn; 2365 u32 pdn = get_pd(qp)->pdn;
2365 u32 klm_oct_size; 2366 u32 klm_oct_size;
2366 int region_len, ret; 2367 int region_len, ret;
2367 2368
2368 if (unlikely(wr->num_sge != 1) || 2369 if (unlikely(wr->wr.num_sge != 1) ||
2369 unlikely(wr->wr.sig_handover.access_flags & 2370 unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
2370 IB_ACCESS_REMOTE_ATOMIC) ||
2371 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) || 2371 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2372 unlikely(!sig_mr->sig->sig_status_checked)) 2372 unlikely(!sig_mr->sig->sig_status_checked))
2373 return -EINVAL; 2373 return -EINVAL;
2374 2374
2375 /* length of the protected region, data + protection */ 2375 /* length of the protected region, data + protection */
2376 region_len = wr->sg_list->length; 2376 region_len = wr->wr.sg_list->length;
2377 if (wr->wr.sig_handover.prot && 2377 if (wr->prot &&
2378 (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey || 2378 (wr->prot->lkey != wr->wr.sg_list->lkey ||
2379 wr->wr.sig_handover.prot->addr != wr->sg_list->addr || 2379 wr->prot->addr != wr->wr.sg_list->addr ||
2380 wr->wr.sig_handover.prot->length != wr->sg_list->length)) 2380 wr->prot->length != wr->wr.sg_list->length))
2381 region_len += wr->wr.sig_handover.prot->length; 2381 region_len += wr->prot->length;
2382 2382
2383 /** 2383 /**
2384 * KLM octoword size - if protection was provided 2384 * KLM octoword size - if protection was provided
2385 * then we use strided block format (3 octowords), 2385 * then we use strided block format (3 octowords),
2386 * else we use single KLM (1 octoword) 2386 * else we use single KLM (1 octoword)
2387 **/ 2387 **/
2388 klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1; 2388 klm_oct_size = wr->prot ? 3 : 1;
2389 2389
2390 set_sig_umr_segment(*seg, wr, klm_oct_size); 2390 set_sig_umr_segment(*seg, klm_oct_size);
2391 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 2391 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2392 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 2392 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2393 if (unlikely((*seg == qp->sq.qend))) 2393 if (unlikely((*seg == qp->sq.qend)))
@@ -2454,8 +2454,8 @@ static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
2454 if (unlikely((*seg == qp->sq.qend))) 2454 if (unlikely((*seg == qp->sq.qend)))
2455 *seg = mlx5_get_send_wqe(qp, 0); 2455 *seg = mlx5_get_send_wqe(qp, 0);
2456 if (!li) { 2456 if (!li) {
2457 if (unlikely(wr->wr.fast_reg.page_list_len > 2457 if (unlikely(fast_reg_wr(wr)->page_list_len >
2458 wr->wr.fast_reg.page_list->max_page_list_len)) 2458 fast_reg_wr(wr)->page_list->max_page_list_len))
2459 return -ENOMEM; 2459 return -ENOMEM;
2460 2460
2461 set_frwr_pages(*seg, wr, mdev, pd, writ); 2461 set_frwr_pages(*seg, wr, mdev, pd, writ);
@@ -2627,7 +2627,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2627 switch (ibqp->qp_type) { 2627 switch (ibqp->qp_type) {
2628 case IB_QPT_XRC_INI: 2628 case IB_QPT_XRC_INI:
2629 xrc = seg; 2629 xrc = seg;
2630 xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
2631 seg += sizeof(*xrc); 2630 seg += sizeof(*xrc);
2632 size += sizeof(*xrc) / 16; 2631 size += sizeof(*xrc) / 16;
2633 /* fall through */ 2632 /* fall through */
@@ -2636,8 +2635,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2636 case IB_WR_RDMA_READ: 2635 case IB_WR_RDMA_READ:
2637 case IB_WR_RDMA_WRITE: 2636 case IB_WR_RDMA_WRITE:
2638 case IB_WR_RDMA_WRITE_WITH_IMM: 2637 case IB_WR_RDMA_WRITE_WITH_IMM:
2639 set_raddr_seg(seg, wr->wr.rdma.remote_addr, 2638 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2640 wr->wr.rdma.rkey); 2639 rdma_wr(wr)->rkey);
2641 seg += sizeof(struct mlx5_wqe_raddr_seg); 2640 seg += sizeof(struct mlx5_wqe_raddr_seg);
2642 size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 2641 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2643 break; 2642 break;
@@ -2666,7 +2665,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2666 case IB_WR_FAST_REG_MR: 2665 case IB_WR_FAST_REG_MR:
2667 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2666 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2668 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR; 2667 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
2669 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); 2668 ctrl->imm = cpu_to_be32(fast_reg_wr(wr)->rkey);
2670 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); 2669 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2671 if (err) { 2670 if (err) {
2672 mlx5_ib_warn(dev, "\n"); 2671 mlx5_ib_warn(dev, "\n");
@@ -2678,7 +2677,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2678 2677
2679 case IB_WR_REG_SIG_MR: 2678 case IB_WR_REG_SIG_MR:
2680 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; 2679 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2681 mr = to_mmr(wr->wr.sig_handover.sig_mr); 2680 mr = to_mmr(sig_handover_wr(wr)->sig_mr);
2682 2681
2683 ctrl->imm = cpu_to_be32(mr->ibmr.rkey); 2682 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2684 err = set_sig_umr_wr(wr, qp, &seg, &size); 2683 err = set_sig_umr_wr(wr, qp, &seg, &size);
@@ -2706,7 +2705,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2706 goto out; 2705 goto out;
2707 } 2706 }
2708 2707
2709 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem, 2708 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem,
2710 mr->sig->psv_memory.psv_idx, &seg, 2709 mr->sig->psv_memory.psv_idx, &seg,
2711 &size); 2710 &size);
2712 if (err) { 2711 if (err) {
@@ -2728,7 +2727,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2728 } 2727 }
2729 2728
2730 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2729 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2731 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire, 2730 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
2732 mr->sig->psv_wire.psv_idx, &seg, 2731 mr->sig->psv_wire.psv_idx, &seg,
2733 &size); 2732 &size);
2734 if (err) { 2733 if (err) {
@@ -2752,8 +2751,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2752 switch (wr->opcode) { 2751 switch (wr->opcode) {
2753 case IB_WR_RDMA_WRITE: 2752 case IB_WR_RDMA_WRITE:
2754 case IB_WR_RDMA_WRITE_WITH_IMM: 2753 case IB_WR_RDMA_WRITE_WITH_IMM:
2755 set_raddr_seg(seg, wr->wr.rdma.remote_addr, 2754 set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
2756 wr->wr.rdma.rkey); 2755 rdma_wr(wr)->rkey);
2757 seg += sizeof(struct mlx5_wqe_raddr_seg); 2756 seg += sizeof(struct mlx5_wqe_raddr_seg);
2758 size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 2757 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2759 break; 2758 break;
@@ -2780,7 +2779,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2780 goto out; 2779 goto out;
2781 } 2780 }
2782 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; 2781 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2783 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); 2782 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
2784 set_reg_umr_segment(seg, wr); 2783 set_reg_umr_segment(seg, wr);
2785 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 2784 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2786 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 2785 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index e354b2f04ad9..35fe506e2cfa 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1476,7 +1476,7 @@ void mthca_free_qp(struct mthca_dev *dev,
1476 1476
1477/* Create UD header for an MLX send and build a data segment for it */ 1477/* Create UD header for an MLX send and build a data segment for it */
1478static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, 1478static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1479 int ind, struct ib_send_wr *wr, 1479 int ind, struct ib_ud_wr *wr,
1480 struct mthca_mlx_seg *mlx, 1480 struct mthca_mlx_seg *mlx,
1481 struct mthca_data_seg *data) 1481 struct mthca_data_seg *data)
1482{ 1482{
@@ -1485,10 +1485,10 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1485 u16 pkey; 1485 u16 pkey;
1486 1486
1487 ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, 1487 ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
1488 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0, 1488 mthca_ah_grh_present(to_mah(wr->ah)), 0,
1489 &sqp->ud_header); 1489 &sqp->ud_header);
1490 1490
1491 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); 1491 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header);
1492 if (err) 1492 if (err)
1493 return err; 1493 return err;
1494 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); 1494 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
@@ -1499,7 +1499,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1499 mlx->rlid = sqp->ud_header.lrh.destination_lid; 1499 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1500 mlx->vcrc = 0; 1500 mlx->vcrc = 0;
1501 1501
1502 switch (wr->opcode) { 1502 switch (wr->wr.opcode) {
1503 case IB_WR_SEND: 1503 case IB_WR_SEND:
1504 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; 1504 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1505 sqp->ud_header.immediate_present = 0; 1505 sqp->ud_header.immediate_present = 0;
@@ -1507,7 +1507,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1507 case IB_WR_SEND_WITH_IMM: 1507 case IB_WR_SEND_WITH_IMM:
1508 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1508 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1509 sqp->ud_header.immediate_present = 1; 1509 sqp->ud_header.immediate_present = 1;
1510 sqp->ud_header.immediate_data = wr->ex.imm_data; 1510 sqp->ud_header.immediate_data = wr->wr.ex.imm_data;
1511 break; 1511 break;
1512 default: 1512 default:
1513 return -EINVAL; 1513 return -EINVAL;
@@ -1516,18 +1516,18 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1516 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 1516 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1517 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) 1517 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1518 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; 1518 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1519 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 1519 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
1520 if (!sqp->qp.ibqp.qp_num) 1520 if (!sqp->qp.ibqp.qp_num)
1521 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, 1521 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1522 sqp->pkey_index, &pkey); 1522 sqp->pkey_index, &pkey);
1523 else 1523 else
1524 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, 1524 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1525 wr->wr.ud.pkey_index, &pkey); 1525 wr->pkey_index, &pkey);
1526 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 1526 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1527 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1527 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
1528 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 1528 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1529 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? 1529 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
1530 sqp->qkey : wr->wr.ud.remote_qkey); 1530 sqp->qkey : wr->remote_qkey);
1531 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); 1531 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1532 1532
1533 header_size = ib_ud_header_pack(&sqp->ud_header, 1533 header_size = ib_ud_header_pack(&sqp->ud_header,
@@ -1569,34 +1569,34 @@ static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
1569} 1569}
1570 1570
1571static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, 1571static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
1572 struct ib_send_wr *wr) 1572 struct ib_atomic_wr *wr)
1573{ 1573{
1574 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1574 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1575 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 1575 aseg->swap_add = cpu_to_be64(wr->swap);
1576 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 1576 aseg->compare = cpu_to_be64(wr->compare_add);
1577 } else { 1577 } else {
1578 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 1578 aseg->swap_add = cpu_to_be64(wr->compare_add);
1579 aseg->compare = 0; 1579 aseg->compare = 0;
1580 } 1580 }
1581 1581
1582} 1582}
1583 1583
1584static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, 1584static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
1585 struct ib_send_wr *wr) 1585 struct ib_ud_wr *wr)
1586{ 1586{
1587 useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key); 1587 useg->lkey = cpu_to_be32(to_mah(wr->ah)->key);
1588 useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); 1588 useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma);
1589 useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1589 useg->dqpn = cpu_to_be32(wr->remote_qpn);
1590 useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1590 useg->qkey = cpu_to_be32(wr->remote_qkey);
1591 1591
1592} 1592}
1593 1593
1594static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, 1594static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
1595 struct ib_send_wr *wr) 1595 struct ib_ud_wr *wr)
1596{ 1596{
1597 memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); 1597 memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE);
1598 useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1598 useg->dqpn = cpu_to_be32(wr->remote_qpn);
1599 useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1599 useg->qkey = cpu_to_be32(wr->remote_qkey);
1600} 1600}
1601 1601
1602int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1602int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
@@ -1664,11 +1664,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1664 switch (wr->opcode) { 1664 switch (wr->opcode) {
1665 case IB_WR_ATOMIC_CMP_AND_SWP: 1665 case IB_WR_ATOMIC_CMP_AND_SWP:
1666 case IB_WR_ATOMIC_FETCH_AND_ADD: 1666 case IB_WR_ATOMIC_FETCH_AND_ADD:
1667 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 1667 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
1668 wr->wr.atomic.rkey); 1668 atomic_wr(wr)->rkey);
1669 wqe += sizeof (struct mthca_raddr_seg); 1669 wqe += sizeof (struct mthca_raddr_seg);
1670 1670
1671 set_atomic_seg(wqe, wr); 1671 set_atomic_seg(wqe, atomic_wr(wr));
1672 wqe += sizeof (struct mthca_atomic_seg); 1672 wqe += sizeof (struct mthca_atomic_seg);
1673 size += (sizeof (struct mthca_raddr_seg) + 1673 size += (sizeof (struct mthca_raddr_seg) +
1674 sizeof (struct mthca_atomic_seg)) / 16; 1674 sizeof (struct mthca_atomic_seg)) / 16;
@@ -1677,8 +1677,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1677 case IB_WR_RDMA_WRITE: 1677 case IB_WR_RDMA_WRITE:
1678 case IB_WR_RDMA_WRITE_WITH_IMM: 1678 case IB_WR_RDMA_WRITE_WITH_IMM:
1679 case IB_WR_RDMA_READ: 1679 case IB_WR_RDMA_READ:
1680 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 1680 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
1681 wr->wr.rdma.rkey); 1681 rdma_wr(wr)->rkey);
1682 wqe += sizeof (struct mthca_raddr_seg); 1682 wqe += sizeof (struct mthca_raddr_seg);
1683 size += sizeof (struct mthca_raddr_seg) / 16; 1683 size += sizeof (struct mthca_raddr_seg) / 16;
1684 break; 1684 break;
@@ -1694,8 +1694,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1694 switch (wr->opcode) { 1694 switch (wr->opcode) {
1695 case IB_WR_RDMA_WRITE: 1695 case IB_WR_RDMA_WRITE:
1696 case IB_WR_RDMA_WRITE_WITH_IMM: 1696 case IB_WR_RDMA_WRITE_WITH_IMM:
1697 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 1697 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
1698 wr->wr.rdma.rkey); 1698 rdma_wr(wr)->rkey);
1699 wqe += sizeof (struct mthca_raddr_seg); 1699 wqe += sizeof (struct mthca_raddr_seg);
1700 size += sizeof (struct mthca_raddr_seg) / 16; 1700 size += sizeof (struct mthca_raddr_seg) / 16;
1701 break; 1701 break;
@@ -1708,13 +1708,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1708 break; 1708 break;
1709 1709
1710 case UD: 1710 case UD:
1711 set_tavor_ud_seg(wqe, wr); 1711 set_tavor_ud_seg(wqe, ud_wr(wr));
1712 wqe += sizeof (struct mthca_tavor_ud_seg); 1712 wqe += sizeof (struct mthca_tavor_ud_seg);
1713 size += sizeof (struct mthca_tavor_ud_seg) / 16; 1713 size += sizeof (struct mthca_tavor_ud_seg) / 16;
1714 break; 1714 break;
1715 1715
1716 case MLX: 1716 case MLX:
1717 err = build_mlx_header(dev, to_msqp(qp), ind, wr, 1717 err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr),
1718 wqe - sizeof (struct mthca_next_seg), 1718 wqe - sizeof (struct mthca_next_seg),
1719 wqe); 1719 wqe);
1720 if (err) { 1720 if (err) {
@@ -2005,11 +2005,11 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2005 switch (wr->opcode) { 2005 switch (wr->opcode) {
2006 case IB_WR_ATOMIC_CMP_AND_SWP: 2006 case IB_WR_ATOMIC_CMP_AND_SWP:
2007 case IB_WR_ATOMIC_FETCH_AND_ADD: 2007 case IB_WR_ATOMIC_FETCH_AND_ADD:
2008 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 2008 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
2009 wr->wr.atomic.rkey); 2009 atomic_wr(wr)->rkey);
2010 wqe += sizeof (struct mthca_raddr_seg); 2010 wqe += sizeof (struct mthca_raddr_seg);
2011 2011
2012 set_atomic_seg(wqe, wr); 2012 set_atomic_seg(wqe, atomic_wr(wr));
2013 wqe += sizeof (struct mthca_atomic_seg); 2013 wqe += sizeof (struct mthca_atomic_seg);
2014 size += (sizeof (struct mthca_raddr_seg) + 2014 size += (sizeof (struct mthca_raddr_seg) +
2015 sizeof (struct mthca_atomic_seg)) / 16; 2015 sizeof (struct mthca_atomic_seg)) / 16;
@@ -2018,8 +2018,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2018 case IB_WR_RDMA_READ: 2018 case IB_WR_RDMA_READ:
2019 case IB_WR_RDMA_WRITE: 2019 case IB_WR_RDMA_WRITE:
2020 case IB_WR_RDMA_WRITE_WITH_IMM: 2020 case IB_WR_RDMA_WRITE_WITH_IMM:
2021 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 2021 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
2022 wr->wr.rdma.rkey); 2022 rdma_wr(wr)->rkey);
2023 wqe += sizeof (struct mthca_raddr_seg); 2023 wqe += sizeof (struct mthca_raddr_seg);
2024 size += sizeof (struct mthca_raddr_seg) / 16; 2024 size += sizeof (struct mthca_raddr_seg) / 16;
2025 break; 2025 break;
@@ -2035,8 +2035,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2035 switch (wr->opcode) { 2035 switch (wr->opcode) {
2036 case IB_WR_RDMA_WRITE: 2036 case IB_WR_RDMA_WRITE:
2037 case IB_WR_RDMA_WRITE_WITH_IMM: 2037 case IB_WR_RDMA_WRITE_WITH_IMM:
2038 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 2038 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
2039 wr->wr.rdma.rkey); 2039 rdma_wr(wr)->rkey);
2040 wqe += sizeof (struct mthca_raddr_seg); 2040 wqe += sizeof (struct mthca_raddr_seg);
2041 size += sizeof (struct mthca_raddr_seg) / 16; 2041 size += sizeof (struct mthca_raddr_seg) / 16;
2042 break; 2042 break;
@@ -2049,13 +2049,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2049 break; 2049 break;
2050 2050
2051 case UD: 2051 case UD:
2052 set_arbel_ud_seg(wqe, wr); 2052 set_arbel_ud_seg(wqe, ud_wr(wr));
2053 wqe += sizeof (struct mthca_arbel_ud_seg); 2053 wqe += sizeof (struct mthca_arbel_ud_seg);
2054 size += sizeof (struct mthca_arbel_ud_seg) / 16; 2054 size += sizeof (struct mthca_arbel_ud_seg) / 16;
2055 break; 2055 break;
2056 2056
2057 case MLX: 2057 case MLX:
2058 err = build_mlx_header(dev, to_msqp(qp), ind, wr, 2058 err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr),
2059 wqe - sizeof (struct mthca_next_seg), 2059 wqe - sizeof (struct mthca_next_seg),
2060 wqe); 2060 wqe);
2061 if (err) { 2061 if (err) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 44cb513f9a87..f71b37b75f82 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3372,9 +3372,9 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3372 wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE; 3372 wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
3373 3373
3374 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX, 3374 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
3375 ib_wr->wr.rdma.rkey); 3375 rdma_wr(ib_wr)->rkey);
3376 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX, 3376 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
3377 ib_wr->wr.rdma.remote_addr); 3377 rdma_wr(ib_wr)->remote_addr);
3378 3378
3379 if ((ib_wr->send_flags & IB_SEND_INLINE) && 3379 if ((ib_wr->send_flags & IB_SEND_INLINE) &&
3380 ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) && 3380 ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
@@ -3409,9 +3409,9 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3409 } 3409 }
3410 3410
3411 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX, 3411 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
3412 ib_wr->wr.rdma.remote_addr); 3412 rdma_wr(ib_wr)->remote_addr);
3413 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX, 3413 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
3414 ib_wr->wr.rdma.rkey); 3414 rdma_wr(ib_wr)->rkey);
3415 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX, 3415 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
3416 ib_wr->sg_list->length); 3416 ib_wr->sg_list->length);
3417 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, 3417 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
@@ -3428,15 +3428,16 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3428 case IB_WR_FAST_REG_MR: 3428 case IB_WR_FAST_REG_MR:
3429 { 3429 {
3430 int i; 3430 int i;
3431 int flags = ib_wr->wr.fast_reg.access_flags; 3431 struct ib_fast_reg_wr *fwr = fast_reg_wr(ib_wr);
3432 int flags = fwr->access_flags;
3432 struct nes_ib_fast_reg_page_list *pnesfrpl = 3433 struct nes_ib_fast_reg_page_list *pnesfrpl =
3433 container_of(ib_wr->wr.fast_reg.page_list, 3434 container_of(fwr->page_list,
3434 struct nes_ib_fast_reg_page_list, 3435 struct nes_ib_fast_reg_page_list,
3435 ibfrpl); 3436 ibfrpl);
3436 u64 *src_page_list = pnesfrpl->ibfrpl.page_list; 3437 u64 *src_page_list = pnesfrpl->ibfrpl.page_list;
3437 u64 *dst_page_list = pnesfrpl->nes_wqe_pbl.kva; 3438 u64 *dst_page_list = pnesfrpl->nes_wqe_pbl.kva;
3438 3439
3439 if (ib_wr->wr.fast_reg.page_list_len > 3440 if (fwr->page_list_len >
3440 (NES_4K_PBL_CHUNK_SIZE / sizeof(u64))) { 3441 (NES_4K_PBL_CHUNK_SIZE / sizeof(u64))) {
3441 nes_debug(NES_DBG_IW_TX, "SQ_FMR: bad page_list_len\n"); 3442 nes_debug(NES_DBG_IW_TX, "SQ_FMR: bad page_list_len\n");
3442 err = -EINVAL; 3443 err = -EINVAL;
@@ -3445,19 +3446,19 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3445 wqe_misc = NES_IWARP_SQ_OP_FAST_REG; 3446 wqe_misc = NES_IWARP_SQ_OP_FAST_REG;
3446 set_wqe_64bit_value(wqe->wqe_words, 3447 set_wqe_64bit_value(wqe->wqe_words,
3447 NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX, 3448 NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX,
3448 ib_wr->wr.fast_reg.iova_start); 3449 fwr->iova_start);
3449 set_wqe_32bit_value(wqe->wqe_words, 3450 set_wqe_32bit_value(wqe->wqe_words,
3450 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, 3451 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
3451 ib_wr->wr.fast_reg.length); 3452 fwr->length);
3452 set_wqe_32bit_value(wqe->wqe_words, 3453 set_wqe_32bit_value(wqe->wqe_words,
3453 NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); 3454 NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
3454 set_wqe_32bit_value(wqe->wqe_words, 3455 set_wqe_32bit_value(wqe->wqe_words,
3455 NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX, 3456 NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX,
3456 ib_wr->wr.fast_reg.rkey); 3457 fwr->rkey);
3457 /* Set page size: */ 3458 /* Set page size: */
3458 if (ib_wr->wr.fast_reg.page_shift == 12) { 3459 if (fwr->page_shift == 12) {
3459 wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K; 3460 wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K;
3460 } else if (ib_wr->wr.fast_reg.page_shift == 21) { 3461 } else if (fwr->page_shift == 21) {
3461 wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M; 3462 wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M;
3462 } else { 3463 } else {
3463 nes_debug(NES_DBG_IW_TX, "Invalid page shift," 3464 nes_debug(NES_DBG_IW_TX, "Invalid page shift,"
@@ -3480,11 +3481,11 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3480 wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND; 3481 wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND;
3481 3482
3482 /* Fill in PBL info: */ 3483 /* Fill in PBL info: */
3483 if (ib_wr->wr.fast_reg.page_list_len > 3484 if (fwr->page_list_len >
3484 pnesfrpl->ibfrpl.max_page_list_len) { 3485 pnesfrpl->ibfrpl.max_page_list_len) {
3485 nes_debug(NES_DBG_IW_TX, "Invalid page list length," 3486 nes_debug(NES_DBG_IW_TX, "Invalid page list length,"
3486 " ib_wr=%p, value=%u, max=%u\n", 3487 " ib_wr=%p, value=%u, max=%u\n",
3487 ib_wr, ib_wr->wr.fast_reg.page_list_len, 3488 ib_wr, fwr->page_list_len,
3488 pnesfrpl->ibfrpl.max_page_list_len); 3489 pnesfrpl->ibfrpl.max_page_list_len);
3489 err = -EINVAL; 3490 err = -EINVAL;
3490 break; 3491 break;
@@ -3496,19 +3497,19 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3496 3497
3497 set_wqe_32bit_value(wqe->wqe_words, 3498 set_wqe_32bit_value(wqe->wqe_words,
3498 NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX, 3499 NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX,
3499 ib_wr->wr.fast_reg.page_list_len * 8); 3500 fwr->page_list_len * 8);
3500 3501
3501 for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++) 3502 for (i = 0; i < fwr->page_list_len; i++)
3502 dst_page_list[i] = cpu_to_le64(src_page_list[i]); 3503 dst_page_list[i] = cpu_to_le64(src_page_list[i]);
3503 3504
3504 nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %llx, " 3505 nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %llx, "
3505 "length: %d, rkey: %0x, pgl_paddr: %llx, " 3506 "length: %d, rkey: %0x, pgl_paddr: %llx, "
3506 "page_list_len: %u, wqe_misc: %x\n", 3507 "page_list_len: %u, wqe_misc: %x\n",
3507 (unsigned long long) ib_wr->wr.fast_reg.iova_start, 3508 (unsigned long long) fwr->iova_start,
3508 ib_wr->wr.fast_reg.length, 3509 fwr->length,
3509 ib_wr->wr.fast_reg.rkey, 3510 fwr->rkey,
3510 (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr, 3511 (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr,
3511 ib_wr->wr.fast_reg.page_list_len, 3512 fwr->page_list_len,
3512 wqe_misc); 3513 wqe_misc);
3513 break; 3514 break;
3514 } 3515 }
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 9bb710a402cd..dd00219b7d15 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1997,13 +1997,13 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1997{ 1997{
1998 struct ocrdma_ewqe_ud_hdr *ud_hdr = 1998 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1999 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); 1999 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
2000 struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah); 2000 struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
2001 2001
2002 ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn; 2002 ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
2003 if (qp->qp_type == IB_QPT_GSI) 2003 if (qp->qp_type == IB_QPT_GSI)
2004 ud_hdr->qkey = qp->qkey; 2004 ud_hdr->qkey = qp->qkey;
2005 else 2005 else
2006 ud_hdr->qkey = wr->wr.ud.remote_qkey; 2006 ud_hdr->qkey = ud_wr(wr)->remote_qkey;
2007 ud_hdr->rsvd_ahid = ah->id; 2007 ud_hdr->rsvd_ahid = ah->id;
2008 if (ah->av->valid & OCRDMA_AV_VLAN_VALID) 2008 if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
2009 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT); 2009 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
@@ -2106,9 +2106,9 @@ static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2106 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); 2106 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2107 if (status) 2107 if (status)
2108 return status; 2108 return status;
2109 ext_rw->addr_lo = wr->wr.rdma.remote_addr; 2109 ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2110 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); 2110 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2111 ext_rw->lrkey = wr->wr.rdma.rkey; 2111 ext_rw->lrkey = rdma_wr(wr)->rkey;
2112 ext_rw->len = hdr->total_len; 2112 ext_rw->len = hdr->total_len;
2113 return 0; 2113 return 0;
2114} 2114}
@@ -2126,13 +2126,14 @@ static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2126 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); 2126 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2127 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); 2127 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2128 2128
2129 ext_rw->addr_lo = wr->wr.rdma.remote_addr; 2129 ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2130 ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); 2130 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2131 ext_rw->lrkey = wr->wr.rdma.rkey; 2131 ext_rw->lrkey = rdma_wr(wr)->rkey;
2132 ext_rw->len = hdr->total_len; 2132 ext_rw->len = hdr->total_len;
2133} 2133}
2134 2134
2135static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl, 2135static void build_frmr_pbes(struct ib_fast_reg_wr *wr,
2136 struct ocrdma_pbl *pbl_tbl,
2136 struct ocrdma_hw_mr *hwmr) 2137 struct ocrdma_hw_mr *hwmr)
2137{ 2138{
2138 int i; 2139 int i;
@@ -2144,12 +2145,12 @@ static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
2144 num_pbes = 0; 2145 num_pbes = 0;
2145 2146
2146 /* go through the OS phy regions & fill hw pbe entries into pbls. */ 2147 /* go through the OS phy regions & fill hw pbe entries into pbls. */
2147 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { 2148 for (i = 0; i < wr->page_list_len; i++) {
2148 /* number of pbes can be more for one OS buf, when 2149 /* number of pbes can be more for one OS buf, when
2149 * buffers are of different sizes. 2150 * buffers are of different sizes.
2150 * split the ib_buf to one or more pbes. 2151 * split the ib_buf to one or more pbes.
2151 */ 2152 */
2152 buf_addr = wr->wr.fast_reg.page_list->page_list[i]; 2153 buf_addr = wr->page_list->page_list[i];
2153 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK)); 2154 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2154 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr)); 2155 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2155 num_pbes += 1; 2156 num_pbes += 1;
@@ -2178,9 +2179,10 @@ static int get_encoded_page_size(int pg_sz)
2178 2179
2179 2180
2180static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, 2181static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2181 struct ib_send_wr *wr) 2182 struct ib_send_wr *send_wr)
2182{ 2183{
2183 u64 fbo; 2184 u64 fbo;
2185 struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr);
2184 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); 2186 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2185 struct ocrdma_mr *mr; 2187 struct ocrdma_mr *mr;
2186 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2188 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
@@ -2188,33 +2190,32 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2188 2190
2189 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); 2191 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2190 2192
2191 if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr) 2193 if (wr->page_list_len > dev->attr.max_pages_per_frmr)
2192 return -EINVAL; 2194 return -EINVAL;
2193 2195
2194 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); 2196 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2195 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); 2197 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2196 2198
2197 if (wr->wr.fast_reg.page_list_len == 0) 2199 if (wr->page_list_len == 0)
2198 BUG(); 2200 BUG();
2199 if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE) 2201 if (wr->access_flags & IB_ACCESS_LOCAL_WRITE)
2200 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR; 2202 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2201 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE) 2203 if (wr->access_flags & IB_ACCESS_REMOTE_WRITE)
2202 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR; 2204 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2203 if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ) 2205 if (wr->access_flags & IB_ACCESS_REMOTE_READ)
2204 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD; 2206 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2205 hdr->lkey = wr->wr.fast_reg.rkey; 2207 hdr->lkey = wr->rkey;
2206 hdr->total_len = wr->wr.fast_reg.length; 2208 hdr->total_len = wr->length;
2207 2209
2208 fbo = wr->wr.fast_reg.iova_start - 2210 fbo = wr->iova_start - (wr->page_list->page_list[0] & PAGE_MASK);
2209 (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
2210 2211
2211 fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start); 2212 fast_reg->va_hi = upper_32_bits(wr->iova_start);
2212 fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff); 2213 fast_reg->va_lo = (u32) (wr->iova_start & 0xffffffff);
2213 fast_reg->fbo_hi = upper_32_bits(fbo); 2214 fast_reg->fbo_hi = upper_32_bits(fbo);
2214 fast_reg->fbo_lo = (u32) fbo & 0xffffffff; 2215 fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2215 fast_reg->num_sges = wr->wr.fast_reg.page_list_len; 2216 fast_reg->num_sges = wr->page_list_len;
2216 fast_reg->size_sge = 2217 fast_reg->size_sge =
2217 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); 2218 get_encoded_page_size(1 << wr->page_shift);
2218 mr = (struct ocrdma_mr *) (unsigned long) 2219 mr = (struct ocrdma_mr *) (unsigned long)
2219 dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; 2220 dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
2220 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); 2221 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 5afaa218508d..eaf139a33b2e 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -338,12 +338,13 @@ bail:
338/* 338/*
339 * Initialize the memory region specified by the work reqeust. 339 * Initialize the memory region specified by the work reqeust.
340 */ 340 */
341int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) 341int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *send_wr)
342{ 342{
343 struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr);
343 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; 344 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
344 struct qib_pd *pd = to_ipd(qp->ibqp.pd); 345 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
345 struct qib_mregion *mr; 346 struct qib_mregion *mr;
346 u32 rkey = wr->wr.fast_reg.rkey; 347 u32 rkey = wr->rkey;
347 unsigned i, n, m; 348 unsigned i, n, m;
348 int ret = -EINVAL; 349 int ret = -EINVAL;
349 unsigned long flags; 350 unsigned long flags;
@@ -360,22 +361,22 @@ int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
360 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) 361 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
361 goto bail; 362 goto bail;
362 363
363 if (wr->wr.fast_reg.page_list_len > mr->max_segs) 364 if (wr->page_list_len > mr->max_segs)
364 goto bail; 365 goto bail;
365 366
366 ps = 1UL << wr->wr.fast_reg.page_shift; 367 ps = 1UL << wr->page_shift;
367 if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len) 368 if (wr->length > ps * wr->page_list_len)
368 goto bail; 369 goto bail;
369 370
370 mr->user_base = wr->wr.fast_reg.iova_start; 371 mr->user_base = wr->iova_start;
371 mr->iova = wr->wr.fast_reg.iova_start; 372 mr->iova = wr->iova_start;
372 mr->lkey = rkey; 373 mr->lkey = rkey;
373 mr->length = wr->wr.fast_reg.length; 374 mr->length = wr->length;
374 mr->access_flags = wr->wr.fast_reg.access_flags; 375 mr->access_flags = wr->access_flags;
375 page_list = wr->wr.fast_reg.page_list->page_list; 376 page_list = wr->page_list->page_list;
376 m = 0; 377 m = 0;
377 n = 0; 378 n = 0;
378 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { 379 for (i = 0; i < wr->page_list_len; i++) {
379 mr->map[m]->segs[n].vaddr = (void *) page_list[i]; 380 mr->map[m]->segs[n].vaddr = (void *) page_list[i];
380 mr->map[m]->segs[n].length = ps; 381 mr->map[m]->segs[n].length = ps;
381 if (++n == QIB_SEGSZ) { 382 if (++n == QIB_SEGSZ) {
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 4fa88ba2963e..40f85bb3e0d3 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -436,7 +436,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
436 if (qp->ibqp.qp_type == IB_QPT_UD || 436 if (qp->ibqp.qp_type == IB_QPT_UD ||
437 qp->ibqp.qp_type == IB_QPT_SMI || 437 qp->ibqp.qp_type == IB_QPT_SMI ||
438 qp->ibqp.qp_type == IB_QPT_GSI) 438 qp->ibqp.qp_type == IB_QPT_GSI)
439 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); 439 atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
440 if (++qp->s_last >= qp->s_size) 440 if (++qp->s_last >= qp->s_size)
441 qp->s_last = 0; 441 qp->s_last = 0;
442 } 442 }
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 4544d6f88ad7..e6b7556d5221 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -373,10 +373,11 @@ int qib_make_rc_req(struct qib_qp *qp)
373 qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; 373 qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
374 goto bail; 374 goto bail;
375 } 375 }
376
376 ohdr->u.rc.reth.vaddr = 377 ohdr->u.rc.reth.vaddr =
377 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 378 cpu_to_be64(wqe->rdma_wr.remote_addr);
378 ohdr->u.rc.reth.rkey = 379 ohdr->u.rc.reth.rkey =
379 cpu_to_be32(wqe->wr.wr.rdma.rkey); 380 cpu_to_be32(wqe->rdma_wr.rkey);
380 ohdr->u.rc.reth.length = cpu_to_be32(len); 381 ohdr->u.rc.reth.length = cpu_to_be32(len);
381 hwords += sizeof(struct ib_reth) / sizeof(u32); 382 hwords += sizeof(struct ib_reth) / sizeof(u32);
382 wqe->lpsn = wqe->psn; 383 wqe->lpsn = wqe->psn;
@@ -386,15 +387,15 @@ int qib_make_rc_req(struct qib_qp *qp)
386 len = pmtu; 387 len = pmtu;
387 break; 388 break;
388 } 389 }
389 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) 390 if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
390 qp->s_state = OP(RDMA_WRITE_ONLY); 391 qp->s_state = OP(RDMA_WRITE_ONLY);
391 else { 392 else {
392 qp->s_state = 393 qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
393 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
394 /* Immediate data comes after RETH */ 394 /* Immediate data comes after RETH */
395 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; 395 ohdr->u.rc.imm_data =
396 wqe->rdma_wr.wr.ex.imm_data;
396 hwords += 1; 397 hwords += 1;
397 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 398 if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
398 bth0 |= IB_BTH_SOLICITED; 399 bth0 |= IB_BTH_SOLICITED;
399 } 400 }
400 bth2 |= IB_BTH_REQ_ACK; 401 bth2 |= IB_BTH_REQ_ACK;
@@ -424,10 +425,11 @@ int qib_make_rc_req(struct qib_qp *qp)
424 qp->s_next_psn += (len - 1) / pmtu; 425 qp->s_next_psn += (len - 1) / pmtu;
425 wqe->lpsn = qp->s_next_psn++; 426 wqe->lpsn = qp->s_next_psn++;
426 } 427 }
428
427 ohdr->u.rc.reth.vaddr = 429 ohdr->u.rc.reth.vaddr =
428 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 430 cpu_to_be64(wqe->rdma_wr.remote_addr);
429 ohdr->u.rc.reth.rkey = 431 ohdr->u.rc.reth.rkey =
430 cpu_to_be32(wqe->wr.wr.rdma.rkey); 432 cpu_to_be32(wqe->rdma_wr.rkey);
431 ohdr->u.rc.reth.length = cpu_to_be32(len); 433 ohdr->u.rc.reth.length = cpu_to_be32(len);
432 qp->s_state = OP(RDMA_READ_REQUEST); 434 qp->s_state = OP(RDMA_READ_REQUEST);
433 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 435 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
@@ -455,24 +457,24 @@ int qib_make_rc_req(struct qib_qp *qp)
455 qp->s_lsn++; 457 qp->s_lsn++;
456 wqe->lpsn = wqe->psn; 458 wqe->lpsn = wqe->psn;
457 } 459 }
458 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 460 if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
459 qp->s_state = OP(COMPARE_SWAP); 461 qp->s_state = OP(COMPARE_SWAP);
460 ohdr->u.atomic_eth.swap_data = cpu_to_be64( 462 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
461 wqe->wr.wr.atomic.swap); 463 wqe->atomic_wr.swap);
462 ohdr->u.atomic_eth.compare_data = cpu_to_be64( 464 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
463 wqe->wr.wr.atomic.compare_add); 465 wqe->atomic_wr.compare_add);
464 } else { 466 } else {
465 qp->s_state = OP(FETCH_ADD); 467 qp->s_state = OP(FETCH_ADD);
466 ohdr->u.atomic_eth.swap_data = cpu_to_be64( 468 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
467 wqe->wr.wr.atomic.compare_add); 469 wqe->atomic_wr.compare_add);
468 ohdr->u.atomic_eth.compare_data = 0; 470 ohdr->u.atomic_eth.compare_data = 0;
469 } 471 }
470 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( 472 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
471 wqe->wr.wr.atomic.remote_addr >> 32); 473 wqe->atomic_wr.remote_addr >> 32);
472 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( 474 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
473 wqe->wr.wr.atomic.remote_addr); 475 wqe->atomic_wr.remote_addr);
474 ohdr->u.atomic_eth.rkey = cpu_to_be32( 476 ohdr->u.atomic_eth.rkey = cpu_to_be32(
475 wqe->wr.wr.atomic.rkey); 477 wqe->atomic_wr.rkey);
476 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); 478 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
477 ss = NULL; 479 ss = NULL;
478 len = 0; 480 len = 0;
@@ -597,9 +599,9 @@ int qib_make_rc_req(struct qib_qp *qp)
597 */ 599 */
598 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu; 600 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
599 ohdr->u.rc.reth.vaddr = 601 ohdr->u.rc.reth.vaddr =
600 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len); 602 cpu_to_be64(wqe->rdma_wr.remote_addr + len);
601 ohdr->u.rc.reth.rkey = 603 ohdr->u.rc.reth.rkey =
602 cpu_to_be32(wqe->wr.wr.rdma.rkey); 604 cpu_to_be32(wqe->rdma_wr.rkey);
603 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); 605 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
604 qp->s_state = OP(RDMA_READ_REQUEST); 606 qp->s_state = OP(RDMA_READ_REQUEST);
605 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 607 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 22e356ca8058..b1aa21bdd484 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -459,8 +459,8 @@ again:
459 if (wqe->length == 0) 459 if (wqe->length == 0)
460 break; 460 break;
461 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, 461 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
462 wqe->wr.wr.rdma.remote_addr, 462 wqe->rdma_wr.remote_addr,
463 wqe->wr.wr.rdma.rkey, 463 wqe->rdma_wr.rkey,
464 IB_ACCESS_REMOTE_WRITE))) 464 IB_ACCESS_REMOTE_WRITE)))
465 goto acc_err; 465 goto acc_err;
466 qp->r_sge.sg_list = NULL; 466 qp->r_sge.sg_list = NULL;
@@ -472,8 +472,8 @@ again:
472 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) 472 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
473 goto inv_err; 473 goto inv_err;
474 if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, 474 if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
475 wqe->wr.wr.rdma.remote_addr, 475 wqe->rdma_wr.remote_addr,
476 wqe->wr.wr.rdma.rkey, 476 wqe->rdma_wr.rkey,
477 IB_ACCESS_REMOTE_READ))) 477 IB_ACCESS_REMOTE_READ)))
478 goto acc_err; 478 goto acc_err;
479 release = 0; 479 release = 0;
@@ -490,18 +490,18 @@ again:
490 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) 490 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
491 goto inv_err; 491 goto inv_err;
492 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), 492 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
493 wqe->wr.wr.atomic.remote_addr, 493 wqe->atomic_wr.remote_addr,
494 wqe->wr.wr.atomic.rkey, 494 wqe->atomic_wr.rkey,
495 IB_ACCESS_REMOTE_ATOMIC))) 495 IB_ACCESS_REMOTE_ATOMIC)))
496 goto acc_err; 496 goto acc_err;
497 /* Perform atomic OP and save result. */ 497 /* Perform atomic OP and save result. */
498 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; 498 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
499 sdata = wqe->wr.wr.atomic.compare_add; 499 sdata = wqe->atomic_wr.compare_add;
500 *(u64 *) sqp->s_sge.sge.vaddr = 500 *(u64 *) sqp->s_sge.sge.vaddr =
501 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? 501 (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
502 (u64) atomic64_add_return(sdata, maddr) - sdata : 502 (u64) atomic64_add_return(sdata, maddr) - sdata :
503 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, 503 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
504 sdata, wqe->wr.wr.atomic.swap); 504 sdata, wqe->atomic_wr.swap);
505 qib_put_mr(qp->r_sge.sge.mr); 505 qib_put_mr(qp->r_sge.sge.mr);
506 qp->r_sge.num_sge = 0; 506 qp->r_sge.num_sge = 0;
507 goto send_comp; 507 goto send_comp;
@@ -785,7 +785,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
785 if (qp->ibqp.qp_type == IB_QPT_UD || 785 if (qp->ibqp.qp_type == IB_QPT_UD ||
786 qp->ibqp.qp_type == IB_QPT_SMI || 786 qp->ibqp.qp_type == IB_QPT_SMI ||
787 qp->ibqp.qp_type == IB_QPT_GSI) 787 qp->ibqp.qp_type == IB_QPT_GSI)
788 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); 788 atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
789 789
790 /* See ch. 11.2.4.1 and 10.7.3.1 */ 790 /* See ch. 11.2.4.1 and 10.7.3.1 */
791 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || 791 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index aa3a8035bb68..06a564589c35 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -129,9 +129,9 @@ int qib_make_uc_req(struct qib_qp *qp)
129 case IB_WR_RDMA_WRITE: 129 case IB_WR_RDMA_WRITE:
130 case IB_WR_RDMA_WRITE_WITH_IMM: 130 case IB_WR_RDMA_WRITE_WITH_IMM:
131 ohdr->u.rc.reth.vaddr = 131 ohdr->u.rc.reth.vaddr =
132 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 132 cpu_to_be64(wqe->rdma_wr.remote_addr);
133 ohdr->u.rc.reth.rkey = 133 ohdr->u.rc.reth.rkey =
134 cpu_to_be32(wqe->wr.wr.rdma.rkey); 134 cpu_to_be32(wqe->rdma_wr.rkey);
135 ohdr->u.rc.reth.length = cpu_to_be32(len); 135 ohdr->u.rc.reth.length = cpu_to_be32(len);
136 hwords += sizeof(struct ib_reth) / 4; 136 hwords += sizeof(struct ib_reth) / 4;
137 if (len > pmtu) { 137 if (len > pmtu) {
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 26243b722b5e..59193f67ea78 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -59,7 +59,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
59 u32 length; 59 u32 length;
60 enum ib_qp_type sqptype, dqptype; 60 enum ib_qp_type sqptype, dqptype;
61 61
62 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn); 62 qp = qib_lookup_qpn(ibp, swqe->ud_wr.remote_qpn);
63 if (!qp) { 63 if (!qp) {
64 ibp->n_pkt_drops++; 64 ibp->n_pkt_drops++;
65 return; 65 return;
@@ -76,7 +76,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
76 goto drop; 76 goto drop;
77 } 77 }
78 78
79 ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr; 79 ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
80 ppd = ppd_from_ibp(ibp); 80 ppd = ppd_from_ibp(ibp);
81 81
82 if (qp->ibqp.qp_num > 1) { 82 if (qp->ibqp.qp_num > 1) {
@@ -106,8 +106,8 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
106 if (qp->ibqp.qp_num) { 106 if (qp->ibqp.qp_num) {
107 u32 qkey; 107 u32 qkey;
108 108
109 qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ? 109 qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
110 sqp->qkey : swqe->wr.wr.ud.remote_qkey; 110 sqp->qkey : swqe->ud_wr.remote_qkey;
111 if (unlikely(qkey != qp->qkey)) { 111 if (unlikely(qkey != qp->qkey)) {
112 u16 lid; 112 u16 lid;
113 113
@@ -210,7 +210,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
210 wc.qp = &qp->ibqp; 210 wc.qp = &qp->ibqp;
211 wc.src_qp = sqp->ibqp.qp_num; 211 wc.src_qp = sqp->ibqp.qp_num;
212 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? 212 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
213 swqe->wr.wr.ud.pkey_index : 0; 213 swqe->ud_wr.pkey_index : 0;
214 wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1)); 214 wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
215 wc.sl = ah_attr->sl; 215 wc.sl = ah_attr->sl;
216 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1); 216 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
@@ -277,7 +277,7 @@ int qib_make_ud_req(struct qib_qp *qp)
277 /* Construct the header. */ 277 /* Construct the header. */
278 ibp = to_iport(qp->ibqp.device, qp->port_num); 278 ibp = to_iport(qp->ibqp.device, qp->port_num);
279 ppd = ppd_from_ibp(ibp); 279 ppd = ppd_from_ibp(ibp);
280 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; 280 ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
281 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) { 281 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
282 if (ah_attr->dlid != QIB_PERMISSIVE_LID) 282 if (ah_attr->dlid != QIB_PERMISSIVE_LID)
283 this_cpu_inc(ibp->pmastats->n_multicast_xmit); 283 this_cpu_inc(ibp->pmastats->n_multicast_xmit);
@@ -363,7 +363,7 @@ int qib_make_ud_req(struct qib_qp *qp)
363 bth0 |= extra_bytes << 20; 363 bth0 |= extra_bytes << 20;
364 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY : 364 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
365 qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ? 365 qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
366 wqe->wr.wr.ud.pkey_index : qp->s_pkey_index); 366 wqe->ud_wr.pkey_index : qp->s_pkey_index);
367 ohdr->bth[0] = cpu_to_be32(bth0); 367 ohdr->bth[0] = cpu_to_be32(bth0);
368 /* 368 /*
369 * Use the multicast QP if the destination LID is a multicast LID. 369 * Use the multicast QP if the destination LID is a multicast LID.
@@ -371,14 +371,14 @@ int qib_make_ud_req(struct qib_qp *qp)
371 ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE && 371 ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
372 ah_attr->dlid != QIB_PERMISSIVE_LID ? 372 ah_attr->dlid != QIB_PERMISSIVE_LID ?
373 cpu_to_be32(QIB_MULTICAST_QPN) : 373 cpu_to_be32(QIB_MULTICAST_QPN) :
374 cpu_to_be32(wqe->wr.wr.ud.remote_qpn); 374 cpu_to_be32(wqe->ud_wr.remote_qpn);
375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK); 375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
376 /* 376 /*
377 * Qkeys with the high order bit set mean use the 377 * Qkeys with the high order bit set mean use the
378 * qkey from the QP context instead of the WR (see 10.2.5). 378 * qkey from the QP context instead of the WR (see 10.2.5).
379 */ 379 */
380 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ? 380 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
381 qp->qkey : wqe->wr.wr.ud.remote_qkey); 381 qp->qkey : wqe->ud_wr.remote_qkey);
382 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); 382 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
383 383
384done: 384done:
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 3dcc4985b60f..a6b0b098ff30 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -374,7 +374,7 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
374 wr->opcode != IB_WR_SEND_WITH_IMM) 374 wr->opcode != IB_WR_SEND_WITH_IMM)
375 goto bail_inval; 375 goto bail_inval;
376 /* Check UD destination address PD */ 376 /* Check UD destination address PD */
377 if (qp->ibqp.pd != wr->wr.ud.ah->pd) 377 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
378 goto bail_inval; 378 goto bail_inval;
379 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) 379 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
380 goto bail_inval; 380 goto bail_inval;
@@ -397,7 +397,23 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
397 rkt = &to_idev(qp->ibqp.device)->lk_table; 397 rkt = &to_idev(qp->ibqp.device)->lk_table;
398 pd = to_ipd(qp->ibqp.pd); 398 pd = to_ipd(qp->ibqp.pd);
399 wqe = get_swqe_ptr(qp, qp->s_head); 399 wqe = get_swqe_ptr(qp, qp->s_head);
400 wqe->wr = *wr; 400
401 if (qp->ibqp.qp_type != IB_QPT_UC &&
402 qp->ibqp.qp_type != IB_QPT_RC)
403 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
404 else if (wr->opcode == IB_WR_FAST_REG_MR)
405 memcpy(&wqe->fast_reg_wr, fast_reg_wr(wr),
406 sizeof(wqe->fast_reg_wr));
407 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
408 wr->opcode == IB_WR_RDMA_WRITE ||
409 wr->opcode == IB_WR_RDMA_READ)
410 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
411 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
412 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
413 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
414 else
415 memcpy(&wqe->wr, wr, sizeof(wqe->wr));
416
401 wqe->length = 0; 417 wqe->length = 0;
402 j = 0; 418 j = 0;
403 if (wr->num_sge) { 419 if (wr->num_sge) {
@@ -426,7 +442,7 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
426 qp->port_num - 1)->ibmtu) 442 qp->port_num - 1)->ibmtu)
427 goto bail_inval_free; 443 goto bail_inval_free;
428 else 444 else
429 atomic_inc(&to_iah(wr->wr.ud.ah)->refcount); 445 atomic_inc(&to_iah(ud_wr(wr)->ah)->refcount);
430 wqe->ssn = qp->s_ssn++; 446 wqe->ssn = qp->s_ssn++;
431 qp->s_head = next; 447 qp->s_head = next;
432 448
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index a08df70e8503..8aa16851a5e6 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -338,7 +338,13 @@ struct qib_mr {
338 * in qp->s_max_sge. 338 * in qp->s_max_sge.
339 */ 339 */
340struct qib_swqe { 340struct qib_swqe {
341 struct ib_send_wr wr; /* don't use wr.sg_list */ 341 union {
342 struct ib_send_wr wr; /* don't use wr.sg_list */
343 struct ib_ud_wr ud_wr;
344 struct ib_fast_reg_wr fast_reg_wr;
345 struct ib_rdma_wr rdma_wr;
346 struct ib_atomic_wr atomic_wr;
347 };
342 u32 psn; /* first packet sequence number */ 348 u32 psn; /* first packet sequence number */
343 u32 lpsn; /* last packet sequence number */ 349 u32 lpsn; /* last packet sequence number */
344 u32 ssn; /* send sequence number */ 350 u32 ssn; /* send sequence number */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index edc5b8565d6d..3ede10309754 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -360,7 +360,7 @@ struct ipoib_dev_priv {
360 unsigned tx_head; 360 unsigned tx_head;
361 unsigned tx_tail; 361 unsigned tx_tail;
362 struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; 362 struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
363 struct ib_send_wr tx_wr; 363 struct ib_ud_wr tx_wr;
364 unsigned tx_outstanding; 364 unsigned tx_outstanding;
365 struct ib_wc send_wc[MAX_SEND_CQE]; 365 struct ib_wc send_wc[MAX_SEND_CQE];
366 366
@@ -528,7 +528,7 @@ static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
528 priv->tx_sge[i + off].addr = mapping[i + off]; 528 priv->tx_sge[i + off].addr = mapping[i + off];
529 priv->tx_sge[i + off].length = skb_frag_size(&frags[i]); 529 priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
530 } 530 }
531 priv->tx_wr.num_sge = nr_frags + off; 531 priv->tx_wr.wr.num_sge = nr_frags + off;
532} 532}
533 533
534#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 534#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index c78dc1638030..3ae9726efb98 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -700,9 +700,9 @@ static inline int post_send(struct ipoib_dev_priv *priv,
700 700
701 ipoib_build_sge(priv, tx_req); 701 ipoib_build_sge(priv, tx_req);
702 702
703 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; 703 priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
704 704
705 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); 705 return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr);
706} 706}
707 707
708void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) 708void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index d266667ca9b8..5ea0c14070d1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -518,19 +518,19 @@ static inline int post_send(struct ipoib_dev_priv *priv,
518 518
519 ipoib_build_sge(priv, tx_req); 519 ipoib_build_sge(priv, tx_req);
520 520
521 priv->tx_wr.wr_id = wr_id; 521 priv->tx_wr.wr.wr_id = wr_id;
522 priv->tx_wr.wr.ud.remote_qpn = qpn; 522 priv->tx_wr.remote_qpn = qpn;
523 priv->tx_wr.wr.ud.ah = address; 523 priv->tx_wr.ah = address;
524 524
525 if (head) { 525 if (head) {
526 priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size; 526 priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
527 priv->tx_wr.wr.ud.header = head; 527 priv->tx_wr.header = head;
528 priv->tx_wr.wr.ud.hlen = hlen; 528 priv->tx_wr.hlen = hlen;
529 priv->tx_wr.opcode = IB_WR_LSO; 529 priv->tx_wr.wr.opcode = IB_WR_LSO;
530 } else 530 } else
531 priv->tx_wr.opcode = IB_WR_SEND; 531 priv->tx_wr.wr.opcode = IB_WR_SEND;
532 532
533 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); 533 return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr);
534} 534}
535 535
536void ipoib_send(struct net_device *dev, struct sk_buff *skb, 536void ipoib_send(struct net_device *dev, struct sk_buff *skb,
@@ -583,9 +583,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
583 } 583 }
584 584
585 if (skb->ip_summed == CHECKSUM_PARTIAL) 585 if (skb->ip_summed == CHECKSUM_PARTIAL)
586 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM; 586 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
587 else 587 else
588 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 588 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
589 589
590 if (++priv->tx_outstanding == ipoib_sendq_size) { 590 if (++priv->tx_outstanding == ipoib_sendq_size) {
591 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); 591 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index cbb6721d0a65..7d3281866ffc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -461,7 +461,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
461 netdev_update_features(dev); 461 netdev_update_features(dev);
462 dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); 462 dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
463 rtnl_unlock(); 463 rtnl_unlock();
464 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 464 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
465 465
466 ipoib_flush_paths(dev); 466 ipoib_flush_paths(dev);
467 rtnl_lock(); 467 rtnl_lock();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d4b97614196c..f357ca67a41c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -245,7 +245,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
245 245
246 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); 246 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
247 spin_unlock_irq(&priv->lock); 247 spin_unlock_irq(&priv->lock);
248 priv->tx_wr.wr.ud.remote_qkey = priv->qkey; 248 priv->tx_wr.remote_qkey = priv->qkey;
249 set_qkey = 1; 249 set_qkey = 1;
250 } 250 }
251 251
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 78845b6e8b81..d48c5bae7877 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -221,9 +221,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
221 for (i = 0; i < MAX_SKB_FRAGS + 1; ++i) 221 for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
222 priv->tx_sge[i].lkey = priv->pd->local_dma_lkey; 222 priv->tx_sge[i].lkey = priv->pd->local_dma_lkey;
223 223
224 priv->tx_wr.opcode = IB_WR_SEND; 224 priv->tx_wr.wr.opcode = IB_WR_SEND;
225 priv->tx_wr.sg_list = priv->tx_sge; 225 priv->tx_wr.wr.sg_list = priv->tx_sge;
226 priv->tx_wr.send_flags = IB_SEND_SIGNALED; 226 priv->tx_wr.wr.send_flags = IB_SEND_SIGNALED;
227 227
228 priv->rx_sge[0].lkey = priv->pd->local_dma_lkey; 228 priv->rx_sge[0].lkey = priv->pd->local_dma_lkey;
229 229
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index d8bbad9eb59b..2484bee993ec 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -295,7 +295,11 @@ struct iser_tx_desc {
295 int num_sge; 295 int num_sge;
296 bool mapped; 296 bool mapped;
297 u8 wr_idx; 297 u8 wr_idx;
298 struct ib_send_wr wrs[ISER_MAX_WRS]; 298 union iser_wr {
299 struct ib_send_wr send;
300 struct ib_fast_reg_wr fast_reg;
301 struct ib_sig_handover_wr sig;
302 } wrs[ISER_MAX_WRS];
299 struct iser_mem_reg data_reg; 303 struct iser_mem_reg data_reg;
300 struct iser_mem_reg prot_reg; 304 struct iser_mem_reg prot_reg;
301 struct ib_sig_attrs sig_attrs; 305 struct ib_sig_attrs sig_attrs;
@@ -707,11 +711,11 @@ iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
707static inline struct ib_send_wr * 711static inline struct ib_send_wr *
708iser_tx_next_wr(struct iser_tx_desc *tx_desc) 712iser_tx_next_wr(struct iser_tx_desc *tx_desc)
709{ 713{
710 struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx]; 714 struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx].send;
711 struct ib_send_wr *last_wr; 715 struct ib_send_wr *last_wr;
712 716
713 if (tx_desc->wr_idx) { 717 if (tx_desc->wr_idx) {
714 last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1]; 718 last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1].send;
715 last_wr->next = cur_wr; 719 last_wr->next = cur_wr;
716 } 720 }
717 tx_desc->wr_idx++; 721 tx_desc->wr_idx++;
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 3e0452c4248f..b29fda3e8e74 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -437,7 +437,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
437{ 437{
438 struct iser_tx_desc *tx_desc = &iser_task->desc; 438 struct iser_tx_desc *tx_desc = &iser_task->desc;
439 struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs; 439 struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs;
440 struct ib_send_wr *wr; 440 struct ib_sig_handover_wr *wr;
441 int ret; 441 int ret;
442 442
443 memset(sig_attrs, 0, sizeof(*sig_attrs)); 443 memset(sig_attrs, 0, sizeof(*sig_attrs));
@@ -447,26 +447,24 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
447 447
448 iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask); 448 iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
449 449
450 if (!pi_ctx->sig_mr_valid) { 450 if (!pi_ctx->sig_mr_valid)
451 wr = iser_tx_next_wr(tx_desc); 451 iser_inv_rkey(iser_tx_next_wr(tx_desc), pi_ctx->sig_mr);
452 iser_inv_rkey(wr, pi_ctx->sig_mr); 452
453 } 453 wr = sig_handover_wr(iser_tx_next_wr(tx_desc));
454 454 wr->wr.opcode = IB_WR_REG_SIG_MR;
455 wr = iser_tx_next_wr(tx_desc); 455 wr->wr.wr_id = ISER_FASTREG_LI_WRID;
456 wr->opcode = IB_WR_REG_SIG_MR; 456 wr->wr.sg_list = &data_reg->sge;
457 wr->wr_id = ISER_FASTREG_LI_WRID; 457 wr->wr.num_sge = 1;
458 wr->sg_list = &data_reg->sge; 458 wr->wr.send_flags = 0;
459 wr->num_sge = 1; 459 wr->sig_attrs = sig_attrs;
460 wr->send_flags = 0; 460 wr->sig_mr = pi_ctx->sig_mr;
461 wr->wr.sig_handover.sig_attrs = sig_attrs;
462 wr->wr.sig_handover.sig_mr = pi_ctx->sig_mr;
463 if (scsi_prot_sg_count(iser_task->sc)) 461 if (scsi_prot_sg_count(iser_task->sc))
464 wr->wr.sig_handover.prot = &prot_reg->sge; 462 wr->prot = &prot_reg->sge;
465 else 463 else
466 wr->wr.sig_handover.prot = NULL; 464 wr->prot = NULL;
467 wr->wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE | 465 wr->access_flags = IB_ACCESS_LOCAL_WRITE |
468 IB_ACCESS_REMOTE_READ | 466 IB_ACCESS_REMOTE_READ |
469 IB_ACCESS_REMOTE_WRITE; 467 IB_ACCESS_REMOTE_WRITE;
470 pi_ctx->sig_mr_valid = 0; 468 pi_ctx->sig_mr_valid = 0;
471 469
472 sig_reg->sge.lkey = pi_ctx->sig_mr->lkey; 470 sig_reg->sge.lkey = pi_ctx->sig_mr->lkey;
@@ -491,7 +489,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
491 struct ib_mr *mr = rsc->mr; 489 struct ib_mr *mr = rsc->mr;
492 struct ib_fast_reg_page_list *frpl = rsc->frpl; 490 struct ib_fast_reg_page_list *frpl = rsc->frpl;
493 struct iser_tx_desc *tx_desc = &iser_task->desc; 491 struct iser_tx_desc *tx_desc = &iser_task->desc;
494 struct ib_send_wr *wr; 492 struct ib_fast_reg_wr *wr;
495 int offset, size, plen; 493 int offset, size, plen;
496 494
497 plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list, 495 plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
@@ -501,24 +499,22 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
501 return -EINVAL; 499 return -EINVAL;
502 } 500 }
503 501
504 if (!rsc->mr_valid) { 502 if (!rsc->mr_valid)
505 wr = iser_tx_next_wr(tx_desc); 503 iser_inv_rkey(iser_tx_next_wr(tx_desc), mr);
506 iser_inv_rkey(wr, mr); 504
507 } 505 wr = fast_reg_wr(iser_tx_next_wr(tx_desc));
508 506 wr->wr.opcode = IB_WR_FAST_REG_MR;
509 wr = iser_tx_next_wr(tx_desc); 507 wr->wr.wr_id = ISER_FASTREG_LI_WRID;
510 wr->opcode = IB_WR_FAST_REG_MR; 508 wr->wr.send_flags = 0;
511 wr->wr_id = ISER_FASTREG_LI_WRID; 509 wr->iova_start = frpl->page_list[0] + offset;
512 wr->send_flags = 0; 510 wr->page_list = frpl;
513 wr->wr.fast_reg.iova_start = frpl->page_list[0] + offset; 511 wr->page_list_len = plen;
514 wr->wr.fast_reg.page_list = frpl; 512 wr->page_shift = SHIFT_4K;
515 wr->wr.fast_reg.page_list_len = plen; 513 wr->length = size;
516 wr->wr.fast_reg.page_shift = SHIFT_4K; 514 wr->rkey = mr->rkey;
517 wr->wr.fast_reg.length = size; 515 wr->access_flags = (IB_ACCESS_LOCAL_WRITE |
518 wr->wr.fast_reg.rkey = mr->rkey; 516 IB_ACCESS_REMOTE_WRITE |
519 wr->wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | 517 IB_ACCESS_REMOTE_READ);
520 IB_ACCESS_REMOTE_WRITE |
521 IB_ACCESS_REMOTE_READ);
522 rsc->mr_valid = 0; 518 rsc->mr_valid = 0;
523 519
524 reg->sge.lkey = mr->lkey; 520 reg->sge.lkey = mr->lkey;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index f3122372d49f..e7f3b204239b 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1135,7 +1135,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
1135 wr->opcode = IB_WR_SEND; 1135 wr->opcode = IB_WR_SEND;
1136 wr->send_flags = signal ? IB_SEND_SIGNALED : 0; 1136 wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
1137 1137
1138 ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0], &bad_wr); 1138 ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr);
1139 if (ib_ret) 1139 if (ib_ret)
1140 iser_err("ib_post_send failed, ret:%d opcode:%d\n", 1140 iser_err("ib_post_send failed, ret:%d opcode:%d\n",
1141 ib_ret, bad_wr->opcode); 1141 ib_ret, bad_wr->opcode);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 7d41c9d6d6bf..1b4d13d323b6 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1715,10 +1715,10 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1715 isert_unmap_data_buf(isert_conn, &wr->data); 1715 isert_unmap_data_buf(isert_conn, &wr->data);
1716 } 1716 }
1717 1717
1718 if (wr->send_wr) { 1718 if (wr->rdma_wr) {
1719 isert_dbg("Cmd %p free send_wr\n", isert_cmd); 1719 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
1720 kfree(wr->send_wr); 1720 kfree(wr->rdma_wr);
1721 wr->send_wr = NULL; 1721 wr->rdma_wr = NULL;
1722 } 1722 }
1723 1723
1724 if (wr->ib_sge) { 1724 if (wr->ib_sge) {
@@ -1753,7 +1753,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1753 } 1753 }
1754 1754
1755 wr->ib_sge = NULL; 1755 wr->ib_sge = NULL;
1756 wr->send_wr = NULL; 1756 wr->rdma_wr = NULL;
1757} 1757}
1758 1758
1759static void 1759static void
@@ -1922,7 +1922,7 @@ isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1922 } 1922 }
1923 1923
1924 device->unreg_rdma_mem(isert_cmd, isert_conn); 1924 device->unreg_rdma_mem(isert_cmd, isert_conn);
1925 wr->send_wr_num = 0; 1925 wr->rdma_wr_num = 0;
1926 if (ret) 1926 if (ret)
1927 transport_send_check_condition_and_sense(se_cmd, 1927 transport_send_check_condition_and_sense(se_cmd,
1928 se_cmd->pi_err, 0); 1928 se_cmd->pi_err, 0);
@@ -1950,7 +1950,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1950 iscsit_stop_dataout_timer(cmd); 1950 iscsit_stop_dataout_timer(cmd);
1951 device->unreg_rdma_mem(isert_cmd, isert_conn); 1951 device->unreg_rdma_mem(isert_cmd, isert_conn);
1952 cmd->write_data_done = wr->data.len; 1952 cmd->write_data_done = wr->data.len;
1953 wr->send_wr_num = 0; 1953 wr->rdma_wr_num = 0;
1954 1954
1955 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1955 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1956 spin_lock_bh(&cmd->istate_lock); 1956 spin_lock_bh(&cmd->istate_lock);
@@ -2402,7 +2402,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2402 2402
2403static int 2403static int
2404isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 2404isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2405 struct ib_sge *ib_sge, struct ib_send_wr *send_wr, 2405 struct ib_sge *ib_sge, struct ib_rdma_wr *rdma_wr,
2406 u32 data_left, u32 offset) 2406 u32 data_left, u32 offset)
2407{ 2407{
2408 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 2408 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
@@ -2417,8 +2417,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2417 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge); 2417 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2418 page_off = offset % PAGE_SIZE; 2418 page_off = offset % PAGE_SIZE;
2419 2419
2420 send_wr->sg_list = ib_sge; 2420 rdma_wr->wr.sg_list = ib_sge;
2421 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; 2421 rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc;
2422 /* 2422 /*
2423 * Perform mapping of TCM scatterlist memory ib_sge dma_addr. 2423 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2424 */ 2424 */
@@ -2443,11 +2443,11 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2443 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); 2443 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
2444 } 2444 }
2445 2445
2446 send_wr->num_sge = ++i; 2446 rdma_wr->wr.num_sge = ++i;
2447 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", 2447 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2448 send_wr->sg_list, send_wr->num_sge); 2448 rdma_wr->wr.sg_list, rdma_wr->wr.num_sge);
2449 2449
2450 return send_wr->num_sge; 2450 return rdma_wr->wr.num_sge;
2451} 2451}
2452 2452
2453static int 2453static int
@@ -2458,7 +2458,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2458 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2458 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2459 struct isert_conn *isert_conn = conn->context; 2459 struct isert_conn *isert_conn = conn->context;
2460 struct isert_data_buf *data = &wr->data; 2460 struct isert_data_buf *data = &wr->data;
2461 struct ib_send_wr *send_wr; 2461 struct ib_rdma_wr *rdma_wr;
2462 struct ib_sge *ib_sge; 2462 struct ib_sge *ib_sge;
2463 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0; 2463 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2464 int ret = 0, i, ib_sge_cnt; 2464 int ret = 0, i, ib_sge_cnt;
@@ -2483,11 +2483,11 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2483 } 2483 }
2484 wr->ib_sge = ib_sge; 2484 wr->ib_sge = ib_sge;
2485 2485
2486 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge); 2486 wr->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2487 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2487 wr->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) * wr->rdma_wr_num,
2488 GFP_KERNEL); 2488 GFP_KERNEL);
2489 if (!wr->send_wr) { 2489 if (!wr->rdma_wr) {
2490 isert_dbg("Unable to allocate wr->send_wr\n"); 2490 isert_dbg("Unable to allocate wr->rdma_wr\n");
2491 ret = -ENOMEM; 2491 ret = -ENOMEM;
2492 goto unmap_cmd; 2492 goto unmap_cmd;
2493 } 2493 }
@@ -2495,31 +2495,31 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2495 wr->isert_cmd = isert_cmd; 2495 wr->isert_cmd = isert_cmd;
2496 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 2496 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2497 2497
2498 for (i = 0; i < wr->send_wr_num; i++) { 2498 for (i = 0; i < wr->rdma_wr_num; i++) {
2499 send_wr = &isert_cmd->rdma_wr.send_wr[i]; 2499 rdma_wr = &isert_cmd->rdma_wr.rdma_wr[i];
2500 data_len = min(data_left, rdma_write_max); 2500 data_len = min(data_left, rdma_write_max);
2501 2501
2502 send_wr->send_flags = 0; 2502 rdma_wr->wr.send_flags = 0;
2503 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2503 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2504 send_wr->opcode = IB_WR_RDMA_WRITE; 2504 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
2505 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset; 2505 rdma_wr->remote_addr = isert_cmd->read_va + offset;
2506 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2506 rdma_wr->rkey = isert_cmd->read_stag;
2507 if (i + 1 == wr->send_wr_num) 2507 if (i + 1 == wr->rdma_wr_num)
2508 send_wr->next = &isert_cmd->tx_desc.send_wr; 2508 rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr;
2509 else 2509 else
2510 send_wr->next = &wr->send_wr[i + 1]; 2510 rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr;
2511 } else { 2511 } else {
2512 send_wr->opcode = IB_WR_RDMA_READ; 2512 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
2513 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset; 2513 rdma_wr->remote_addr = isert_cmd->write_va + va_offset;
2514 send_wr->wr.rdma.rkey = isert_cmd->write_stag; 2514 rdma_wr->rkey = isert_cmd->write_stag;
2515 if (i + 1 == wr->send_wr_num) 2515 if (i + 1 == wr->rdma_wr_num)
2516 send_wr->send_flags = IB_SEND_SIGNALED; 2516 rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
2517 else 2517 else
2518 send_wr->next = &wr->send_wr[i + 1]; 2518 rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr;
2519 } 2519 }
2520 2520
2521 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 2521 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2522 send_wr, data_len, offset); 2522 rdma_wr, data_len, offset);
2523 ib_sge += ib_sge_cnt; 2523 ib_sge += ib_sge_cnt;
2524 2524
2525 offset += data_len; 2525 offset += data_len;
@@ -2599,8 +2599,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2599 struct ib_device *ib_dev = device->ib_device; 2599 struct ib_device *ib_dev = device->ib_device;
2600 struct ib_mr *mr; 2600 struct ib_mr *mr;
2601 struct ib_fast_reg_page_list *frpl; 2601 struct ib_fast_reg_page_list *frpl;
2602 struct ib_send_wr fr_wr, inv_wr; 2602 struct ib_fast_reg_wr fr_wr;
2603 struct ib_send_wr *bad_wr, *wr = NULL; 2603 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
2604 int ret, pagelist_len; 2604 int ret, pagelist_len;
2605 u32 page_off; 2605 u32 page_off;
2606 2606
@@ -2638,20 +2638,20 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2638 2638
2639 /* Prepare FASTREG WR */ 2639 /* Prepare FASTREG WR */
2640 memset(&fr_wr, 0, sizeof(fr_wr)); 2640 memset(&fr_wr, 0, sizeof(fr_wr));
2641 fr_wr.wr_id = ISER_FASTREG_LI_WRID; 2641 fr_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
2642 fr_wr.opcode = IB_WR_FAST_REG_MR; 2642 fr_wr.wr.opcode = IB_WR_FAST_REG_MR;
2643 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off; 2643 fr_wr.iova_start = frpl->page_list[0] + page_off;
2644 fr_wr.wr.fast_reg.page_list = frpl; 2644 fr_wr.page_list = frpl;
2645 fr_wr.wr.fast_reg.page_list_len = pagelist_len; 2645 fr_wr.page_list_len = pagelist_len;
2646 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 2646 fr_wr.page_shift = PAGE_SHIFT;
2647 fr_wr.wr.fast_reg.length = mem->len; 2647 fr_wr.length = mem->len;
2648 fr_wr.wr.fast_reg.rkey = mr->rkey; 2648 fr_wr.rkey = mr->rkey;
2649 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; 2649 fr_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
2650 2650
2651 if (!wr) 2651 if (!wr)
2652 wr = &fr_wr; 2652 wr = &fr_wr.wr;
2653 else 2653 else
2654 wr->next = &fr_wr; 2654 wr->next = &fr_wr.wr;
2655 2655
2656 ret = ib_post_send(isert_conn->qp, wr, &bad_wr); 2656 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2657 if (ret) { 2657 if (ret) {
@@ -2732,8 +2732,8 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
2732 struct isert_rdma_wr *rdma_wr, 2732 struct isert_rdma_wr *rdma_wr,
2733 struct fast_reg_descriptor *fr_desc) 2733 struct fast_reg_descriptor *fr_desc)
2734{ 2734{
2735 struct ib_send_wr sig_wr, inv_wr; 2735 struct ib_sig_handover_wr sig_wr;
2736 struct ib_send_wr *bad_wr, *wr = NULL; 2736 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
2737 struct pi_context *pi_ctx = fr_desc->pi_ctx; 2737 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2738 struct ib_sig_attrs sig_attrs; 2738 struct ib_sig_attrs sig_attrs;
2739 int ret; 2739 int ret;
@@ -2751,20 +2751,20 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
2751 } 2751 }
2752 2752
2753 memset(&sig_wr, 0, sizeof(sig_wr)); 2753 memset(&sig_wr, 0, sizeof(sig_wr));
2754 sig_wr.opcode = IB_WR_REG_SIG_MR; 2754 sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
2755 sig_wr.wr_id = ISER_FASTREG_LI_WRID; 2755 sig_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
2756 sig_wr.sg_list = &rdma_wr->ib_sg[DATA]; 2756 sig_wr.wr.sg_list = &rdma_wr->ib_sg[DATA];
2757 sig_wr.num_sge = 1; 2757 sig_wr.wr.num_sge = 1;
2758 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; 2758 sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
2759 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; 2759 sig_wr.sig_attrs = &sig_attrs;
2760 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; 2760 sig_wr.sig_mr = pi_ctx->sig_mr;
2761 if (se_cmd->t_prot_sg) 2761 if (se_cmd->t_prot_sg)
2762 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT]; 2762 sig_wr.prot = &rdma_wr->ib_sg[PROT];
2763 2763
2764 if (!wr) 2764 if (!wr)
2765 wr = &sig_wr; 2765 wr = &sig_wr.wr;
2766 else 2766 else
2767 wr->next = &sig_wr; 2767 wr->next = &sig_wr.wr;
2768 2768
2769 ret = ib_post_send(isert_conn->qp, wr, &bad_wr); 2769 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2770 if (ret) { 2770 if (ret) {
@@ -2858,7 +2858,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2858 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2858 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2859 struct isert_conn *isert_conn = conn->context; 2859 struct isert_conn *isert_conn = conn->context;
2860 struct fast_reg_descriptor *fr_desc = NULL; 2860 struct fast_reg_descriptor *fr_desc = NULL;
2861 struct ib_send_wr *send_wr; 2861 struct ib_rdma_wr *rdma_wr;
2862 struct ib_sge *ib_sg; 2862 struct ib_sge *ib_sg;
2863 u32 offset; 2863 u32 offset;
2864 int ret = 0; 2864 int ret = 0;
@@ -2899,26 +2899,26 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2899 2899
2900 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg)); 2900 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2901 wr->ib_sge = &wr->s_ib_sge; 2901 wr->ib_sge = &wr->s_ib_sge;
2902 wr->send_wr_num = 1; 2902 wr->rdma_wr_num = 1;
2903 memset(&wr->s_send_wr, 0, sizeof(*send_wr)); 2903 memset(&wr->s_rdma_wr, 0, sizeof(wr->s_rdma_wr));
2904 wr->send_wr = &wr->s_send_wr; 2904 wr->rdma_wr = &wr->s_rdma_wr;
2905 wr->isert_cmd = isert_cmd; 2905 wr->isert_cmd = isert_cmd;
2906 2906
2907 send_wr = &isert_cmd->rdma_wr.s_send_wr; 2907 rdma_wr = &isert_cmd->rdma_wr.s_rdma_wr;
2908 send_wr->sg_list = &wr->s_ib_sge; 2908 rdma_wr->wr.sg_list = &wr->s_ib_sge;
2909 send_wr->num_sge = 1; 2909 rdma_wr->wr.num_sge = 1;
2910 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; 2910 rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc;
2911 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2911 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2912 send_wr->opcode = IB_WR_RDMA_WRITE; 2912 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
2913 send_wr->wr.rdma.remote_addr = isert_cmd->read_va; 2913 rdma_wr->remote_addr = isert_cmd->read_va;
2914 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2914 rdma_wr->rkey = isert_cmd->read_stag;
2915 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? 2915 rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2916 0 : IB_SEND_SIGNALED; 2916 0 : IB_SEND_SIGNALED;
2917 } else { 2917 } else {
2918 send_wr->opcode = IB_WR_RDMA_READ; 2918 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
2919 send_wr->wr.rdma.remote_addr = isert_cmd->write_va; 2919 rdma_wr->remote_addr = isert_cmd->write_va;
2920 send_wr->wr.rdma.rkey = isert_cmd->write_stag; 2920 rdma_wr->rkey = isert_cmd->write_stag;
2921 send_wr->send_flags = IB_SEND_SIGNALED; 2921 rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
2922 } 2922 }
2923 2923
2924 return 0; 2924 return 0;
@@ -2966,8 +2966,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2966 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2966 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2967 isert_init_send_wr(isert_conn, isert_cmd, 2967 isert_init_send_wr(isert_conn, isert_cmd,
2968 &isert_cmd->tx_desc.send_wr); 2968 &isert_cmd->tx_desc.send_wr);
2969 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; 2969 isert_cmd->rdma_wr.s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
2970 wr->send_wr_num += 1; 2970 wr->rdma_wr_num += 1;
2971 2971
2972 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc); 2972 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2973 if (rc) { 2973 if (rc) {
@@ -2976,7 +2976,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2976 } 2976 }
2977 } 2977 }
2978 2978
2979 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); 2979 rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed);
2980 if (rc) 2980 if (rc)
2981 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2981 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2982 2982
@@ -3010,7 +3010,7 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
3010 return rc; 3010 return rc;
3011 } 3011 }
3012 3012
3013 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); 3013 rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed);
3014 if (rc) 3014 if (rc)
3015 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 3015 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
3016 3016
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index c5b99bcecbcf..360461819452 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -117,9 +117,9 @@ struct isert_rdma_wr {
117 enum iser_ib_op_code iser_ib_op; 117 enum iser_ib_op_code iser_ib_op;
118 struct ib_sge *ib_sge; 118 struct ib_sge *ib_sge;
119 struct ib_sge s_ib_sge; 119 struct ib_sge s_ib_sge;
120 int send_wr_num; 120 int rdma_wr_num;
121 struct ib_send_wr *send_wr; 121 struct ib_rdma_wr *rdma_wr;
122 struct ib_send_wr s_send_wr; 122 struct ib_rdma_wr s_rdma_wr;
123 struct ib_sge ib_sg[3]; 123 struct ib_sge ib_sg[3];
124 struct isert_data_buf data; 124 struct isert_data_buf data;
125 struct isert_data_buf prot; 125 struct isert_data_buf prot;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 8ba887824d05..235f9b8b27f1 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1306,7 +1306,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1306 struct srp_target_port *target = ch->target; 1306 struct srp_target_port *target = ch->target;
1307 struct srp_device *dev = target->srp_host->srp_dev; 1307 struct srp_device *dev = target->srp_host->srp_dev;
1308 struct ib_send_wr *bad_wr; 1308 struct ib_send_wr *bad_wr;
1309 struct ib_send_wr wr; 1309 struct ib_fast_reg_wr wr;
1310 struct srp_fr_desc *desc; 1310 struct srp_fr_desc *desc;
1311 u32 rkey; 1311 u32 rkey;
1312 1312
@@ -1324,17 +1324,17 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1324 sizeof(state->pages[0]) * state->npages); 1324 sizeof(state->pages[0]) * state->npages);
1325 1325
1326 memset(&wr, 0, sizeof(wr)); 1326 memset(&wr, 0, sizeof(wr));
1327 wr.opcode = IB_WR_FAST_REG_MR; 1327 wr.wr.opcode = IB_WR_FAST_REG_MR;
1328 wr.wr_id = FAST_REG_WR_ID_MASK; 1328 wr.wr.wr_id = FAST_REG_WR_ID_MASK;
1329 wr.wr.fast_reg.iova_start = state->base_dma_addr; 1329 wr.iova_start = state->base_dma_addr;
1330 wr.wr.fast_reg.page_list = desc->frpl; 1330 wr.page_list = desc->frpl;
1331 wr.wr.fast_reg.page_list_len = state->npages; 1331 wr.page_list_len = state->npages;
1332 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size); 1332 wr.page_shift = ilog2(dev->mr_page_size);
1333 wr.wr.fast_reg.length = state->dma_len; 1333 wr.length = state->dma_len;
1334 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | 1334 wr.access_flags = (IB_ACCESS_LOCAL_WRITE |
1335 IB_ACCESS_REMOTE_READ | 1335 IB_ACCESS_REMOTE_READ |
1336 IB_ACCESS_REMOTE_WRITE); 1336 IB_ACCESS_REMOTE_WRITE);
1337 wr.wr.fast_reg.rkey = desc->mr->lkey; 1337 wr.rkey = desc->mr->lkey;
1338 1338
1339 *state->fr.next++ = desc; 1339 *state->fr.next++ = desc;
1340 state->nmdesc++; 1340 state->nmdesc++;
@@ -1342,7 +1342,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1342 srp_map_desc(state, state->base_dma_addr, state->dma_len, 1342 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1343 desc->mr->rkey); 1343 desc->mr->rkey);
1344 1344
1345 return ib_post_send(ch->qp, &wr, &bad_wr); 1345 return ib_post_send(ch->qp, &wr.wr, &bad_wr);
1346} 1346}
1347 1347
1348static int srp_finish_mapping(struct srp_map_state *state, 1348static int srp_finish_mapping(struct srp_map_state *state,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index a7ac77a02593..47c4022fda76 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2823,7 +2823,7 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2823static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, 2823static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2824 struct srpt_send_ioctx *ioctx) 2824 struct srpt_send_ioctx *ioctx)
2825{ 2825{
2826 struct ib_send_wr wr; 2826 struct ib_rdma_wr wr;
2827 struct ib_send_wr *bad_wr; 2827 struct ib_send_wr *bad_wr;
2828 struct rdma_iu *riu; 2828 struct rdma_iu *riu;
2829 int i; 2829 int i;
@@ -2851,29 +2851,29 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2851 2851
2852 for (i = 0; i < n_rdma; ++i, ++riu) { 2852 for (i = 0; i < n_rdma; ++i, ++riu) {
2853 if (dir == DMA_FROM_DEVICE) { 2853 if (dir == DMA_FROM_DEVICE) {
2854 wr.opcode = IB_WR_RDMA_WRITE; 2854 wr.wr.opcode = IB_WR_RDMA_WRITE;
2855 wr.wr_id = encode_wr_id(i == n_rdma - 1 ? 2855 wr.wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
2856 SRPT_RDMA_WRITE_LAST : 2856 SRPT_RDMA_WRITE_LAST :
2857 SRPT_RDMA_MID, 2857 SRPT_RDMA_MID,
2858 ioctx->ioctx.index); 2858 ioctx->ioctx.index);
2859 } else { 2859 } else {
2860 wr.opcode = IB_WR_RDMA_READ; 2860 wr.wr.opcode = IB_WR_RDMA_READ;
2861 wr.wr_id = encode_wr_id(i == n_rdma - 1 ? 2861 wr.wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
2862 SRPT_RDMA_READ_LAST : 2862 SRPT_RDMA_READ_LAST :
2863 SRPT_RDMA_MID, 2863 SRPT_RDMA_MID,
2864 ioctx->ioctx.index); 2864 ioctx->ioctx.index);
2865 } 2865 }
2866 wr.next = NULL; 2866 wr.wr.next = NULL;
2867 wr.wr.rdma.remote_addr = riu->raddr; 2867 wr.remote_addr = riu->raddr;
2868 wr.wr.rdma.rkey = riu->rkey; 2868 wr.rkey = riu->rkey;
2869 wr.num_sge = riu->sge_cnt; 2869 wr.wr.num_sge = riu->sge_cnt;
2870 wr.sg_list = riu->sge; 2870 wr.wr.sg_list = riu->sge;
2871 2871
2872 /* only get completion event for the last rdma write */ 2872 /* only get completion event for the last rdma write */
2873 if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE) 2873 if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE)
2874 wr.send_flags = IB_SEND_SIGNALED; 2874 wr.wr.send_flags = IB_SEND_SIGNALED;
2875 2875
2876 ret = ib_post_send(ch->qp, &wr, &bad_wr); 2876 ret = ib_post_send(ch->qp, &wr.wr, &bad_wr);
2877 if (ret) 2877 if (ret)
2878 break; 2878 break;
2879 } 2879 }
@@ -2882,11 +2882,11 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2882 pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n", 2882 pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n",
2883 __func__, __LINE__, ret, i, n_rdma); 2883 __func__, __LINE__, ret, i, n_rdma);
2884 if (ret && i > 0) { 2884 if (ret && i > 0) {
2885 wr.num_sge = 0; 2885 wr.wr.num_sge = 0;
2886 wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index); 2886 wr.wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
2887 wr.send_flags = IB_SEND_SIGNALED; 2887 wr.wr.send_flags = IB_SEND_SIGNALED;
2888 while (ch->state == CH_LIVE && 2888 while (ch->state == CH_LIVE &&
2889 ib_post_send(ch->qp, &wr, &bad_wr) != 0) { 2889 ib_post_send(ch->qp, &wr.wr, &bad_wr) != 0) {
2890 pr_info("Trying to abort failed RDMA transfer [%d]\n", 2890 pr_info("Trying to abort failed RDMA transfer [%d]\n",
2891 ioctx->ioctx.index); 2891 ioctx->ioctx.index);
2892 msleep(1000); 2892 msleep(1000);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index 5015a9e830bd..993d1ff9ba21 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -527,7 +527,7 @@ typedef struct kib_tx /* transmit message */
527 __u64 tx_msgaddr; /* message buffer (I/O addr) */ 527 __u64 tx_msgaddr; /* message buffer (I/O addr) */
528 DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */ 528 DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */
529 int tx_nwrq; /* # send work items */ 529 int tx_nwrq; /* # send work items */
530 struct ib_send_wr *tx_wrq; /* send work items... */ 530 struct ib_rdma_wr *tx_wrq; /* send work items... */
531 struct ib_sge *tx_sge; /* ...and their memory */ 531 struct ib_sge *tx_sge; /* ...and their memory */
532 kib_rdma_desc_t *tx_rd; /* rdma descriptor */ 532 kib_rdma_desc_t *tx_rd; /* rdma descriptor */
533 int tx_nfrags; /* # entries in... */ 533 int tx_nfrags; /* # entries in... */
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index a23a6d956a4d..a34f1707c167 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -834,7 +834,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
834 /* close_conn will launch failover */ 834 /* close_conn will launch failover */
835 rc = -ENETDOWN; 835 rc = -ENETDOWN;
836 } else { 836 } else {
837 rc = ib_post_send(conn->ibc_cmid->qp, tx->tx_wrq, &bad_wrq); 837 rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &bad_wrq);
838 } 838 }
839 839
840 conn->ibc_last_send = jiffies; 840 conn->ibc_last_send = jiffies;
@@ -1008,7 +1008,7 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
1008{ 1008{
1009 kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; 1009 kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
1010 struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; 1010 struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
1011 struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; 1011 struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
1012 int nob = offsetof(kib_msg_t, ibm_u) + body_nob; 1012 int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
1013 struct ib_mr *mr; 1013 struct ib_mr *mr;
1014 1014
@@ -1027,12 +1027,12 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
1027 1027
1028 memset(wrq, 0, sizeof(*wrq)); 1028 memset(wrq, 0, sizeof(*wrq));
1029 1029
1030 wrq->next = NULL; 1030 wrq->wr.next = NULL;
1031 wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); 1031 wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
1032 wrq->sg_list = sge; 1032 wrq->wr.sg_list = sge;
1033 wrq->num_sge = 1; 1033 wrq->wr.num_sge = 1;
1034 wrq->opcode = IB_WR_SEND; 1034 wrq->wr.opcode = IB_WR_SEND;
1035 wrq->send_flags = IB_SEND_SIGNALED; 1035 wrq->wr.send_flags = IB_SEND_SIGNALED;
1036 1036
1037 tx->tx_nwrq++; 1037 tx->tx_nwrq++;
1038} 1038}
@@ -1044,7 +1044,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
1044 kib_msg_t *ibmsg = tx->tx_msg; 1044 kib_msg_t *ibmsg = tx->tx_msg;
1045 kib_rdma_desc_t *srcrd = tx->tx_rd; 1045 kib_rdma_desc_t *srcrd = tx->tx_rd;
1046 struct ib_sge *sge = &tx->tx_sge[0]; 1046 struct ib_sge *sge = &tx->tx_sge[0];
1047 struct ib_send_wr *wrq = &tx->tx_wrq[0]; 1047 struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next;
1048 int rc = resid; 1048 int rc = resid;
1049 int srcidx; 1049 int srcidx;
1050 int dstidx; 1050 int dstidx;
@@ -1090,16 +1090,17 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
1090 sge->length = wrknob; 1090 sge->length = wrknob;
1091 1091
1092 wrq = &tx->tx_wrq[tx->tx_nwrq]; 1092 wrq = &tx->tx_wrq[tx->tx_nwrq];
1093 next = wrq + 1;
1093 1094
1094 wrq->next = wrq + 1; 1095 wrq->wr.next = &next->wr;
1095 wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA); 1096 wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
1096 wrq->sg_list = sge; 1097 wrq->wr.sg_list = sge;
1097 wrq->num_sge = 1; 1098 wrq->wr.num_sge = 1;
1098 wrq->opcode = IB_WR_RDMA_WRITE; 1099 wrq->wr.opcode = IB_WR_RDMA_WRITE;
1099 wrq->send_flags = 0; 1100 wrq->wr.send_flags = 0;
1100 1101
1101 wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); 1102 wrq->remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
1102 wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx); 1103 wrq->rkey = kiblnd_rd_frag_key(dstrd, dstidx);
1103 1104
1104 srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob); 1105 srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
1105 dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob); 1106 dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
diff --git a/drivers/staging/rdma/amso1100/c2_qp.c b/drivers/staging/rdma/amso1100/c2_qp.c
index 86708dee58b1..4c43ca935cc7 100644
--- a/drivers/staging/rdma/amso1100/c2_qp.c
+++ b/drivers/staging/rdma/amso1100/c2_qp.c
@@ -860,9 +860,9 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
860 flags |= SQ_READ_FENCE; 860 flags |= SQ_READ_FENCE;
861 } 861 }
862 wr.sqwr.rdma_write.remote_stag = 862 wr.sqwr.rdma_write.remote_stag =
863 cpu_to_be32(ib_wr->wr.rdma.rkey); 863 cpu_to_be32(rdma_wr(ib_wr)->rkey);
864 wr.sqwr.rdma_write.remote_to = 864 wr.sqwr.rdma_write.remote_to =
865 cpu_to_be64(ib_wr->wr.rdma.remote_addr); 865 cpu_to_be64(rdma_wr(ib_wr)->remote_addr);
866 err = move_sgl((struct c2_data_addr *) 866 err = move_sgl((struct c2_data_addr *)
867 & (wr.sqwr.rdma_write.data), 867 & (wr.sqwr.rdma_write.data),
868 ib_wr->sg_list, 868 ib_wr->sg_list,
@@ -889,9 +889,9 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
889 wr.sqwr.rdma_read.local_to = 889 wr.sqwr.rdma_read.local_to =
890 cpu_to_be64(ib_wr->sg_list->addr); 890 cpu_to_be64(ib_wr->sg_list->addr);
891 wr.sqwr.rdma_read.remote_stag = 891 wr.sqwr.rdma_read.remote_stag =
892 cpu_to_be32(ib_wr->wr.rdma.rkey); 892 cpu_to_be32(rdma_wr(ib_wr)->rkey);
893 wr.sqwr.rdma_read.remote_to = 893 wr.sqwr.rdma_read.remote_to =
894 cpu_to_be64(ib_wr->wr.rdma.remote_addr); 894 cpu_to_be64(rdma_wr(ib_wr)->remote_addr);
895 wr.sqwr.rdma_read.length = 895 wr.sqwr.rdma_read.length =
896 cpu_to_be32(ib_wr->sg_list->length); 896 cpu_to_be32(ib_wr->sg_list->length);
897 break; 897 break;
diff --git a/drivers/staging/rdma/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c
index 47f94984353d..10e2074384f5 100644
--- a/drivers/staging/rdma/ehca/ehca_reqs.c
+++ b/drivers/staging/rdma/ehca/ehca_reqs.c
@@ -110,19 +110,19 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
110/* need ib_mad struct */ 110/* need ib_mad struct */
111#include <rdma/ib_mad.h> 111#include <rdma/ib_mad.h>
112 112
113static void trace_send_wr_ud(const struct ib_send_wr *send_wr) 113static void trace_ud_wr(const struct ib_ud_wr *ud_wr)
114{ 114{
115 int idx; 115 int idx;
116 int j; 116 int j;
117 while (send_wr) { 117 while (ud_wr) {
118 struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr; 118 struct ib_mad_hdr *mad_hdr = ud_wrmad_hdr;
119 struct ib_sge *sge = send_wr->sg_list; 119 struct ib_sge *sge = ud_wr->wr.sg_list;
120 ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x " 120 ehca_gen_dbg("ud_wr#%x wr_id=%lx num_sge=%x "
121 "send_flags=%x opcode=%x", idx, send_wr->wr_id, 121 "send_flags=%x opcode=%x", idx, ud_wr->wr.wr_id,
122 send_wr->num_sge, send_wr->send_flags, 122 ud_wr->wr.num_sge, ud_wr->wr.send_flags,
123 send_wr->opcode); 123 ud_wr->.wr.opcode);
124 if (mad_hdr) { 124 if (mad_hdr) {
125 ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x " 125 ehca_gen_dbg("ud_wr#%x mad_hdr base_version=%x "
126 "mgmt_class=%x class_version=%x method=%x " 126 "mgmt_class=%x class_version=%x method=%x "
127 "status=%x class_specific=%x tid=%lx " 127 "status=%x class_specific=%x tid=%lx "
128 "attr_id=%x resv=%x attr_mod=%x", 128 "attr_id=%x resv=%x attr_mod=%x",
@@ -134,33 +134,33 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
134 mad_hdr->resv, 134 mad_hdr->resv,
135 mad_hdr->attr_mod); 135 mad_hdr->attr_mod);
136 } 136 }
137 for (j = 0; j < send_wr->num_sge; j++) { 137 for (j = 0; j < ud_wr->wr.num_sge; j++) {
138 u8 *data = __va(sge->addr); 138 u8 *data = __va(sge->addr);
139 ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x " 139 ehca_gen_dbg("ud_wr#%x sge#%x addr=%p length=%x "
140 "lkey=%x", 140 "lkey=%x",
141 idx, j, data, sge->length, sge->lkey); 141 idx, j, data, sge->length, sge->lkey);
142 /* assume length is n*16 */ 142 /* assume length is n*16 */
143 ehca_dmp(data, sge->length, "send_wr#%x sge#%x", 143 ehca_dmp(data, sge->length, "ud_wr#%x sge#%x",
144 idx, j); 144 idx, j);
145 sge++; 145 sge++;
146 } /* eof for j */ 146 } /* eof for j */
147 idx++; 147 idx++;
148 send_wr = send_wr->next; 148 ud_wr = ud_wr(ud_wr->wr.next);
149 } /* eof while send_wr */ 149 } /* eof while ud_wr */
150} 150}
151 151
152#endif /* DEBUG_GSI_SEND_WR */ 152#endif /* DEBUG_GSI_SEND_WR */
153 153
154static inline int ehca_write_swqe(struct ehca_qp *qp, 154static inline int ehca_write_swqe(struct ehca_qp *qp,
155 struct ehca_wqe *wqe_p, 155 struct ehca_wqe *wqe_p,
156 const struct ib_send_wr *send_wr, 156 struct ib_send_wr *send_wr,
157 u32 sq_map_idx, 157 u32 sq_map_idx,
158 int hidden) 158 int hidden)
159{ 159{
160 u32 idx; 160 u32 idx;
161 u64 dma_length; 161 u64 dma_length;
162 struct ehca_av *my_av; 162 struct ehca_av *my_av;
163 u32 remote_qkey = send_wr->wr.ud.remote_qkey; 163 u32 remote_qkey;
164 struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx]; 164 struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx];
165 165
166 if (unlikely((send_wr->num_sge < 0) || 166 if (unlikely((send_wr->num_sge < 0) ||
@@ -223,20 +223,21 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
223 /* no break is intential here */ 223 /* no break is intential here */
224 case IB_QPT_UD: 224 case IB_QPT_UD:
225 /* IB 1.2 spec C10-15 compliance */ 225 /* IB 1.2 spec C10-15 compliance */
226 if (send_wr->wr.ud.remote_qkey & 0x80000000) 226 remote_qkey = ud_wr(send_wr)->remote_qkey;
227 if (remote_qkey & 0x80000000)
227 remote_qkey = qp->qkey; 228 remote_qkey = qp->qkey;
228 229
229 wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8; 230 wqe_p->destination_qp_number = ud_wr(send_wr)->remote_qpn << 8;
230 wqe_p->local_ee_context_qkey = remote_qkey; 231 wqe_p->local_ee_context_qkey = remote_qkey;
231 if (unlikely(!send_wr->wr.ud.ah)) { 232 if (unlikely(!ud_wr(send_wr)->ah)) {
232 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); 233 ehca_gen_err("ud_wr(send_wr) is NULL. qp=%p", qp);
233 return -EINVAL; 234 return -EINVAL;
234 } 235 }
235 if (unlikely(send_wr->wr.ud.remote_qpn == 0)) { 236 if (unlikely(ud_wr(send_wr)->remote_qpn == 0)) {
236 ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num); 237 ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
237 return -EINVAL; 238 return -EINVAL;
238 } 239 }
239 my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah); 240 my_av = container_of(ud_wr(send_wr)->ah, struct ehca_av, ib_ah);
240 wqe_p->u.ud_av.ud_av = my_av->av; 241 wqe_p->u.ud_av.ud_av = my_av->av;
241 242
242 /* 243 /*
@@ -255,9 +256,9 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
255 qp->qp_type == IB_QPT_GSI) 256 qp->qp_type == IB_QPT_GSI)
256 wqe_p->u.ud_av.ud_av.pmtu = 1; 257 wqe_p->u.ud_av.ud_av.pmtu = 1;
257 if (qp->qp_type == IB_QPT_GSI) { 258 if (qp->qp_type == IB_QPT_GSI) {
258 wqe_p->pkeyi = send_wr->wr.ud.pkey_index; 259 wqe_p->pkeyi = ud_wr(send_wr)->pkey_index;
259#ifdef DEBUG_GSI_SEND_WR 260#ifdef DEBUG_GSI_SEND_WR
260 trace_send_wr_ud(send_wr); 261 trace_ud_wr(ud_wr(send_wr));
261#endif /* DEBUG_GSI_SEND_WR */ 262#endif /* DEBUG_GSI_SEND_WR */
262 } 263 }
263 break; 264 break;
@@ -269,8 +270,8 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
269 case IB_QPT_RC: 270 case IB_QPT_RC:
270 /* TODO: atomic not implemented */ 271 /* TODO: atomic not implemented */
271 wqe_p->u.nud.remote_virtual_address = 272 wqe_p->u.nud.remote_virtual_address =
272 send_wr->wr.rdma.remote_addr; 273 rdma_wr(send_wr)->remote_addr;
273 wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey; 274 wqe_p->u.nud.rkey = rdma_wr(send_wr)->rkey;
274 275
275 /* 276 /*
276 * omitted checking of IB_SEND_INLINE 277 * omitted checking of IB_SEND_INLINE
diff --git a/drivers/staging/rdma/hfi1/keys.c b/drivers/staging/rdma/hfi1/keys.c
index f6eff177ace1..82c21b1c0263 100644
--- a/drivers/staging/rdma/hfi1/keys.c
+++ b/drivers/staging/rdma/hfi1/keys.c
@@ -358,12 +358,12 @@ bail:
358/* 358/*
359 * Initialize the memory region specified by the work request. 359 * Initialize the memory region specified by the work request.
360 */ 360 */
361int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_send_wr *wr) 361int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_fast_reg_wr *wr)
362{ 362{
363 struct hfi1_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; 363 struct hfi1_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
364 struct hfi1_pd *pd = to_ipd(qp->ibqp.pd); 364 struct hfi1_pd *pd = to_ipd(qp->ibqp.pd);
365 struct hfi1_mregion *mr; 365 struct hfi1_mregion *mr;
366 u32 rkey = wr->wr.fast_reg.rkey; 366 u32 rkey = wr->rkey;
367 unsigned i, n, m; 367 unsigned i, n, m;
368 int ret = -EINVAL; 368 int ret = -EINVAL;
369 unsigned long flags; 369 unsigned long flags;
@@ -380,22 +380,22 @@ int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_send_wr *wr)
380 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) 380 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
381 goto bail; 381 goto bail;
382 382
383 if (wr->wr.fast_reg.page_list_len > mr->max_segs) 383 if (wr->page_list_len > mr->max_segs)
384 goto bail; 384 goto bail;
385 385
386 ps = 1UL << wr->wr.fast_reg.page_shift; 386 ps = 1UL << wr->page_shift;
387 if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len) 387 if (wr->length > ps * wr->page_list_len)
388 goto bail; 388 goto bail;
389 389
390 mr->user_base = wr->wr.fast_reg.iova_start; 390 mr->user_base = wr->iova_start;
391 mr->iova = wr->wr.fast_reg.iova_start; 391 mr->iova = wr->iova_start;
392 mr->lkey = rkey; 392 mr->lkey = rkey;
393 mr->length = wr->wr.fast_reg.length; 393 mr->length = wr->length;
394 mr->access_flags = wr->wr.fast_reg.access_flags; 394 mr->access_flags = wr->access_flags;
395 page_list = wr->wr.fast_reg.page_list->page_list; 395 page_list = wr->page_list->page_list;
396 m = 0; 396 m = 0;
397 n = 0; 397 n = 0;
398 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { 398 for (i = 0; i < wr->page_list_len; i++) {
399 mr->map[m]->segs[n].vaddr = (void *) page_list[i]; 399 mr->map[m]->segs[n].vaddr = (void *) page_list[i];
400 mr->map[m]->segs[n].length = ps; 400 mr->map[m]->segs[n].length = ps;
401 if (++n == HFI1_SEGSZ) { 401 if (++n == HFI1_SEGSZ) {
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
index df1fa56eaf85..f8c36166962f 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -422,7 +422,7 @@ static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends)
422 if (qp->ibqp.qp_type == IB_QPT_UD || 422 if (qp->ibqp.qp_type == IB_QPT_UD ||
423 qp->ibqp.qp_type == IB_QPT_SMI || 423 qp->ibqp.qp_type == IB_QPT_SMI ||
424 qp->ibqp.qp_type == IB_QPT_GSI) 424 qp->ibqp.qp_type == IB_QPT_GSI)
425 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); 425 atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
426 if (++qp->s_last >= qp->s_size) 426 if (++qp->s_last >= qp->s_size)
427 qp->s_last = 0; 427 qp->s_last = 0;
428 } 428 }
diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c
index 632dd5ba7dfd..fd0ac608c62d 100644
--- a/drivers/staging/rdma/hfi1/rc.c
+++ b/drivers/staging/rdma/hfi1/rc.c
@@ -404,9 +404,9 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
404 goto bail; 404 goto bail;
405 } 405 }
406 ohdr->u.rc.reth.vaddr = 406 ohdr->u.rc.reth.vaddr =
407 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 407 cpu_to_be64(wqe->rdma_wr.remote_addr);
408 ohdr->u.rc.reth.rkey = 408 ohdr->u.rc.reth.rkey =
409 cpu_to_be32(wqe->wr.wr.rdma.rkey); 409 cpu_to_be32(wqe->rdma_wr.rkey);
410 ohdr->u.rc.reth.length = cpu_to_be32(len); 410 ohdr->u.rc.reth.length = cpu_to_be32(len);
411 hwords += sizeof(struct ib_reth) / sizeof(u32); 411 hwords += sizeof(struct ib_reth) / sizeof(u32);
412 wqe->lpsn = wqe->psn; 412 wqe->lpsn = wqe->psn;
@@ -455,9 +455,9 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
455 wqe->lpsn = qp->s_next_psn++; 455 wqe->lpsn = qp->s_next_psn++;
456 } 456 }
457 ohdr->u.rc.reth.vaddr = 457 ohdr->u.rc.reth.vaddr =
458 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 458 cpu_to_be64(wqe->rdma_wr.remote_addr);
459 ohdr->u.rc.reth.rkey = 459 ohdr->u.rc.reth.rkey =
460 cpu_to_be32(wqe->wr.wr.rdma.rkey); 460 cpu_to_be32(wqe->rdma_wr.rkey);
461 ohdr->u.rc.reth.length = cpu_to_be32(len); 461 ohdr->u.rc.reth.length = cpu_to_be32(len);
462 qp->s_state = OP(RDMA_READ_REQUEST); 462 qp->s_state = OP(RDMA_READ_REQUEST);
463 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 463 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
@@ -488,21 +488,21 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
488 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 488 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
489 qp->s_state = OP(COMPARE_SWAP); 489 qp->s_state = OP(COMPARE_SWAP);
490 ohdr->u.atomic_eth.swap_data = cpu_to_be64( 490 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
491 wqe->wr.wr.atomic.swap); 491 wqe->atomic_wr.swap);
492 ohdr->u.atomic_eth.compare_data = cpu_to_be64( 492 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
493 wqe->wr.wr.atomic.compare_add); 493 wqe->atomic_wr.compare_add);
494 } else { 494 } else {
495 qp->s_state = OP(FETCH_ADD); 495 qp->s_state = OP(FETCH_ADD);
496 ohdr->u.atomic_eth.swap_data = cpu_to_be64( 496 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
497 wqe->wr.wr.atomic.compare_add); 497 wqe->atomic_wr.compare_add);
498 ohdr->u.atomic_eth.compare_data = 0; 498 ohdr->u.atomic_eth.compare_data = 0;
499 } 499 }
500 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( 500 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
501 wqe->wr.wr.atomic.remote_addr >> 32); 501 wqe->atomic_wr.remote_addr >> 32);
502 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( 502 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
503 wqe->wr.wr.atomic.remote_addr); 503 wqe->atomic_wr.remote_addr);
504 ohdr->u.atomic_eth.rkey = cpu_to_be32( 504 ohdr->u.atomic_eth.rkey = cpu_to_be32(
505 wqe->wr.wr.atomic.rkey); 505 wqe->atomic_wr.rkey);
506 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); 506 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
507 ss = NULL; 507 ss = NULL;
508 len = 0; 508 len = 0;
@@ -629,9 +629,9 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
629 */ 629 */
630 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; 630 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
631 ohdr->u.rc.reth.vaddr = 631 ohdr->u.rc.reth.vaddr =
632 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len); 632 cpu_to_be64(wqe->rdma_wr.remote_addr + len);
633 ohdr->u.rc.reth.rkey = 633 ohdr->u.rc.reth.rkey =
634 cpu_to_be32(wqe->wr.wr.rdma.rkey); 634 cpu_to_be32(wqe->rdma_wr.rkey);
635 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); 635 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
636 qp->s_state = OP(RDMA_READ_REQUEST); 636 qp->s_state = OP(RDMA_READ_REQUEST);
637 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 637 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c
index a4115288db66..d614474770b3 100644
--- a/drivers/staging/rdma/hfi1/ruc.c
+++ b/drivers/staging/rdma/hfi1/ruc.c
@@ -481,8 +481,8 @@ again:
481 if (wqe->length == 0) 481 if (wqe->length == 0)
482 break; 482 break;
483 if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, wqe->length, 483 if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
484 wqe->wr.wr.rdma.remote_addr, 484 wqe->rdma_wr.remote_addr,
485 wqe->wr.wr.rdma.rkey, 485 wqe->rdma_wr.rkey,
486 IB_ACCESS_REMOTE_WRITE))) 486 IB_ACCESS_REMOTE_WRITE)))
487 goto acc_err; 487 goto acc_err;
488 qp->r_sge.sg_list = NULL; 488 qp->r_sge.sg_list = NULL;
@@ -494,8 +494,8 @@ again:
494 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) 494 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
495 goto inv_err; 495 goto inv_err;
496 if (unlikely(!hfi1_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, 496 if (unlikely(!hfi1_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
497 wqe->wr.wr.rdma.remote_addr, 497 wqe->rdma_wr.remote_addr,
498 wqe->wr.wr.rdma.rkey, 498 wqe->rdma_wr.rkey,
499 IB_ACCESS_REMOTE_READ))) 499 IB_ACCESS_REMOTE_READ)))
500 goto acc_err; 500 goto acc_err;
501 release = 0; 501 release = 0;
@@ -512,18 +512,18 @@ again:
512 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) 512 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
513 goto inv_err; 513 goto inv_err;
514 if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), 514 if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
515 wqe->wr.wr.atomic.remote_addr, 515 wqe->atomic_wr.remote_addr,
516 wqe->wr.wr.atomic.rkey, 516 wqe->atomic_wr.rkey,
517 IB_ACCESS_REMOTE_ATOMIC))) 517 IB_ACCESS_REMOTE_ATOMIC)))
518 goto acc_err; 518 goto acc_err;
519 /* Perform atomic OP and save result. */ 519 /* Perform atomic OP and save result. */
520 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; 520 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
521 sdata = wqe->wr.wr.atomic.compare_add; 521 sdata = wqe->atomic_wr.compare_add;
522 *(u64 *) sqp->s_sge.sge.vaddr = 522 *(u64 *) sqp->s_sge.sge.vaddr =
523 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? 523 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
524 (u64) atomic64_add_return(sdata, maddr) - sdata : 524 (u64) atomic64_add_return(sdata, maddr) - sdata :
525 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, 525 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
526 sdata, wqe->wr.wr.atomic.swap); 526 sdata, wqe->atomic_wr.swap);
527 hfi1_put_mr(qp->r_sge.sge.mr); 527 hfi1_put_mr(qp->r_sge.sge.mr);
528 qp->r_sge.num_sge = 0; 528 qp->r_sge.num_sge = 0;
529 goto send_comp; 529 goto send_comp;
@@ -913,7 +913,7 @@ void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe,
913 if (qp->ibqp.qp_type == IB_QPT_UD || 913 if (qp->ibqp.qp_type == IB_QPT_UD ||
914 qp->ibqp.qp_type == IB_QPT_SMI || 914 qp->ibqp.qp_type == IB_QPT_SMI ||
915 qp->ibqp.qp_type == IB_QPT_GSI) 915 qp->ibqp.qp_type == IB_QPT_GSI)
916 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); 916 atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
917 917
918 /* See ch. 11.2.4.1 and 10.7.3.1 */ 918 /* See ch. 11.2.4.1 and 10.7.3.1 */
919 if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || 919 if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c
index b536f397737c..6095039c4485 100644
--- a/drivers/staging/rdma/hfi1/uc.c
+++ b/drivers/staging/rdma/hfi1/uc.c
@@ -147,9 +147,9 @@ int hfi1_make_uc_req(struct hfi1_qp *qp)
147 case IB_WR_RDMA_WRITE: 147 case IB_WR_RDMA_WRITE:
148 case IB_WR_RDMA_WRITE_WITH_IMM: 148 case IB_WR_RDMA_WRITE_WITH_IMM:
149 ohdr->u.rc.reth.vaddr = 149 ohdr->u.rc.reth.vaddr =
150 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 150 cpu_to_be64(wqe->rdma_wr.remote_addr);
151 ohdr->u.rc.reth.rkey = 151 ohdr->u.rc.reth.rkey =
152 cpu_to_be32(wqe->wr.wr.rdma.rkey); 152 cpu_to_be32(wqe->rdma_wr.rkey);
153 ohdr->u.rc.reth.length = cpu_to_be32(len); 153 ohdr->u.rc.reth.length = cpu_to_be32(len);
154 hwords += sizeof(struct ib_reth) / 4; 154 hwords += sizeof(struct ib_reth) / 4;
155 if (len > pmtu) { 155 if (len > pmtu) {
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c
index d40d1a1e10aa..5a9c784bec04 100644
--- a/drivers/staging/rdma/hfi1/ud.c
+++ b/drivers/staging/rdma/hfi1/ud.c
@@ -80,7 +80,7 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
80 80
81 rcu_read_lock(); 81 rcu_read_lock();
82 82
83 qp = hfi1_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn); 83 qp = hfi1_lookup_qpn(ibp, swqe->ud_wr.remote_qpn);
84 if (!qp) { 84 if (!qp) {
85 ibp->n_pkt_drops++; 85 ibp->n_pkt_drops++;
86 rcu_read_unlock(); 86 rcu_read_unlock();
@@ -98,7 +98,7 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
98 goto drop; 98 goto drop;
99 } 99 }
100 100
101 ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr; 101 ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
102 ppd = ppd_from_ibp(ibp); 102 ppd = ppd_from_ibp(ibp);
103 103
104 if (qp->ibqp.qp_num > 1) { 104 if (qp->ibqp.qp_num > 1) {
@@ -128,8 +128,8 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
128 if (qp->ibqp.qp_num) { 128 if (qp->ibqp.qp_num) {
129 u32 qkey; 129 u32 qkey;
130 130
131 qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ? 131 qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
132 sqp->qkey : swqe->wr.wr.ud.remote_qkey; 132 sqp->qkey : swqe->ud_wr.remote_qkey;
133 if (unlikely(qkey != qp->qkey)) { 133 if (unlikely(qkey != qp->qkey)) {
134 u16 lid; 134 u16 lid;
135 135
@@ -234,7 +234,7 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
234 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) { 234 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
235 if (sqp->ibqp.qp_type == IB_QPT_GSI || 235 if (sqp->ibqp.qp_type == IB_QPT_GSI ||
236 sqp->ibqp.qp_type == IB_QPT_SMI) 236 sqp->ibqp.qp_type == IB_QPT_SMI)
237 wc.pkey_index = swqe->wr.wr.ud.pkey_index; 237 wc.pkey_index = swqe->ud_wr.pkey_index;
238 else 238 else
239 wc.pkey_index = sqp->s_pkey_index; 239 wc.pkey_index = sqp->s_pkey_index;
240 } else { 240 } else {
@@ -309,7 +309,7 @@ int hfi1_make_ud_req(struct hfi1_qp *qp)
309 /* Construct the header. */ 309 /* Construct the header. */
310 ibp = to_iport(qp->ibqp.device, qp->port_num); 310 ibp = to_iport(qp->ibqp.device, qp->port_num);
311 ppd = ppd_from_ibp(ibp); 311 ppd = ppd_from_ibp(ibp);
312 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; 312 ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
313 if (ah_attr->dlid < HFI1_MULTICAST_LID_BASE || 313 if (ah_attr->dlid < HFI1_MULTICAST_LID_BASE ||
314 ah_attr->dlid == HFI1_PERMISSIVE_LID) { 314 ah_attr->dlid == HFI1_PERMISSIVE_LID) {
315 lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); 315 lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
@@ -401,18 +401,18 @@ int hfi1_make_ud_req(struct hfi1_qp *qp)
401 bth0 |= IB_BTH_SOLICITED; 401 bth0 |= IB_BTH_SOLICITED;
402 bth0 |= extra_bytes << 20; 402 bth0 |= extra_bytes << 20;
403 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) 403 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
404 bth0 |= hfi1_get_pkey(ibp, wqe->wr.wr.ud.pkey_index); 404 bth0 |= hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
405 else 405 else
406 bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index); 406 bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
407 ohdr->bth[0] = cpu_to_be32(bth0); 407 ohdr->bth[0] = cpu_to_be32(bth0);
408 ohdr->bth[1] = cpu_to_be32(wqe->wr.wr.ud.remote_qpn); 408 ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
409 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->s_next_psn++)); 409 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->s_next_psn++));
410 /* 410 /*
411 * Qkeys with the high order bit set mean use the 411 * Qkeys with the high order bit set mean use the
412 * qkey from the QP context instead of the WR (see 10.2.5). 412 * qkey from the QP context instead of the WR (see 10.2.5).
413 */ 413 */
414 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ? 414 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
415 qp->qkey : wqe->wr.wr.ud.remote_qkey); 415 qp->qkey : wqe->ud_wr.remote_qkey);
416 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); 416 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
417 /* disarm any ahg */ 417 /* disarm any ahg */
418 qp->s_hdr->ahgcount = 0; 418 qp->s_hdr->ahgcount = 0;
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c
index 41bb59eb001c..981e6c1b79a3 100644
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ b/drivers/staging/rdma/hfi1/verbs.c
@@ -391,7 +391,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr)
391 wr->opcode != IB_WR_SEND_WITH_IMM) 391 wr->opcode != IB_WR_SEND_WITH_IMM)
392 return -EINVAL; 392 return -EINVAL;
393 /* Check UD destination address PD */ 393 /* Check UD destination address PD */
394 if (qp->ibqp.pd != wr->wr.ud.ah->pd) 394 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
395 return -EINVAL; 395 return -EINVAL;
396 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) 396 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
397 return -EINVAL; 397 return -EINVAL;
@@ -412,7 +412,24 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr)
412 rkt = &to_idev(qp->ibqp.device)->lk_table; 412 rkt = &to_idev(qp->ibqp.device)->lk_table;
413 pd = to_ipd(qp->ibqp.pd); 413 pd = to_ipd(qp->ibqp.pd);
414 wqe = get_swqe_ptr(qp, qp->s_head); 414 wqe = get_swqe_ptr(qp, qp->s_head);
415 wqe->wr = *wr; 415
416
417 if (qp->ibqp.qp_type != IB_QPT_UC &&
418 qp->ibqp.qp_type != IB_QPT_RC)
419 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
420 else if (wr->opcode == IB_WR_FAST_REG_MR)
421 memcpy(&wqe->fast_reg_wr, fast_reg_wr(wr),
422 sizeof(wqe->fast_reg_wr));
423 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
424 wr->opcode == IB_WR_RDMA_WRITE ||
425 wr->opcode == IB_WR_RDMA_READ)
426 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
427 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
428 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
429 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
430 else
431 memcpy(&wqe->wr, wr, sizeof(wqe->wr));
432
416 wqe->length = 0; 433 wqe->length = 0;
417 j = 0; 434 j = 0;
418 if (wr->num_sge) { 435 if (wr->num_sge) {
@@ -438,7 +455,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr)
438 if (wqe->length > 0x80000000U) 455 if (wqe->length > 0x80000000U)
439 goto bail_inval_free; 456 goto bail_inval_free;
440 } else { 457 } else {
441 struct hfi1_ah *ah = to_iah(wr->wr.ud.ah); 458 struct hfi1_ah *ah = to_iah(ud_wr(wr)->ah);
442 459
443 atomic_inc(&ah->refcount); 460 atomic_inc(&ah->refcount);
444 } 461 }
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h
index ed903a93baf7..cf5a3c956284 100644
--- a/drivers/staging/rdma/hfi1/verbs.h
+++ b/drivers/staging/rdma/hfi1/verbs.h
@@ -348,7 +348,13 @@ struct hfi1_mr {
348 * in qp->s_max_sge. 348 * in qp->s_max_sge.
349 */ 349 */
350struct hfi1_swqe { 350struct hfi1_swqe {
351 struct ib_send_wr wr; /* don't use wr.sg_list */ 351 union {
352 struct ib_send_wr wr; /* don't use wr.sg_list */
353 struct ib_rdma_wr rdma_wr;
354 struct ib_atomic_wr atomic_wr;
355 struct ib_ud_wr ud_wr;
356 struct ib_fast_reg_wr fast_reg_wr;
357 };
352 u32 psn; /* first packet sequence number */ 358 u32 psn; /* first packet sequence number */
353 u32 lpsn; /* last packet sequence number */ 359 u32 lpsn; /* last packet sequence number */
354 u32 ssn; /* send sequence number */ 360 u32 ssn; /* send sequence number */
@@ -1025,7 +1031,7 @@ struct ib_fast_reg_page_list *hfi1_alloc_fast_reg_page_list(
1025 1031
1026void hfi1_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl); 1032void hfi1_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
1027 1033
1028int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_send_wr *wr); 1034int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_fast_reg_wr *wr);
1029 1035
1030struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags, 1036struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1031 struct ib_fmr_attr *fmr_attr); 1037 struct ib_fmr_attr *fmr_attr);
diff --git a/drivers/staging/rdma/ipath/ipath_rc.c b/drivers/staging/rdma/ipath/ipath_rc.c
index 79b3dbc97179..d4aa53574e57 100644
--- a/drivers/staging/rdma/ipath/ipath_rc.c
+++ b/drivers/staging/rdma/ipath/ipath_rc.c
@@ -350,9 +350,9 @@ int ipath_make_rc_req(struct ipath_qp *qp)
350 goto bail; 350 goto bail;
351 } 351 }
352 ohdr->u.rc.reth.vaddr = 352 ohdr->u.rc.reth.vaddr =
353 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 353 cpu_to_be64(wqe->rdma_wr.remote_addr);
354 ohdr->u.rc.reth.rkey = 354 ohdr->u.rc.reth.rkey =
355 cpu_to_be32(wqe->wr.wr.rdma.rkey); 355 cpu_to_be32(wqe->rdma_wr.rkey);
356 ohdr->u.rc.reth.length = cpu_to_be32(len); 356 ohdr->u.rc.reth.length = cpu_to_be32(len);
357 hwords += sizeof(struct ib_reth) / sizeof(u32); 357 hwords += sizeof(struct ib_reth) / sizeof(u32);
358 wqe->lpsn = wqe->psn; 358 wqe->lpsn = wqe->psn;
@@ -401,9 +401,9 @@ int ipath_make_rc_req(struct ipath_qp *qp)
401 wqe->lpsn = qp->s_next_psn++; 401 wqe->lpsn = qp->s_next_psn++;
402 } 402 }
403 ohdr->u.rc.reth.vaddr = 403 ohdr->u.rc.reth.vaddr =
404 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 404 cpu_to_be64(wqe->rdma_wr.remote_addr);
405 ohdr->u.rc.reth.rkey = 405 ohdr->u.rc.reth.rkey =
406 cpu_to_be32(wqe->wr.wr.rdma.rkey); 406 cpu_to_be32(wqe->rdma_wr.rkey);
407 ohdr->u.rc.reth.length = cpu_to_be32(len); 407 ohdr->u.rc.reth.length = cpu_to_be32(len);
408 qp->s_state = OP(RDMA_READ_REQUEST); 408 qp->s_state = OP(RDMA_READ_REQUEST);
409 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 409 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
@@ -433,21 +433,21 @@ int ipath_make_rc_req(struct ipath_qp *qp)
433 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 433 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
434 qp->s_state = OP(COMPARE_SWAP); 434 qp->s_state = OP(COMPARE_SWAP);
435 ohdr->u.atomic_eth.swap_data = cpu_to_be64( 435 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
436 wqe->wr.wr.atomic.swap); 436 wqe->atomic_wr.swap);
437 ohdr->u.atomic_eth.compare_data = cpu_to_be64( 437 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
438 wqe->wr.wr.atomic.compare_add); 438 wqe->atomic_wr.compare_add);
439 } else { 439 } else {
440 qp->s_state = OP(FETCH_ADD); 440 qp->s_state = OP(FETCH_ADD);
441 ohdr->u.atomic_eth.swap_data = cpu_to_be64( 441 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
442 wqe->wr.wr.atomic.compare_add); 442 wqe->atomic_wr.compare_add);
443 ohdr->u.atomic_eth.compare_data = 0; 443 ohdr->u.atomic_eth.compare_data = 0;
444 } 444 }
445 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( 445 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
446 wqe->wr.wr.atomic.remote_addr >> 32); 446 wqe->atomic_wr.remote_addr >> 32);
447 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( 447 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
448 wqe->wr.wr.atomic.remote_addr); 448 wqe->atomic_wr.remote_addr);
449 ohdr->u.atomic_eth.rkey = cpu_to_be32( 449 ohdr->u.atomic_eth.rkey = cpu_to_be32(
450 wqe->wr.wr.atomic.rkey); 450 wqe->atomic_wr.rkey);
451 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); 451 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
452 ss = NULL; 452 ss = NULL;
453 len = 0; 453 len = 0;
@@ -567,9 +567,9 @@ int ipath_make_rc_req(struct ipath_qp *qp)
567 ipath_init_restart(qp, wqe); 567 ipath_init_restart(qp, wqe);
568 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; 568 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
569 ohdr->u.rc.reth.vaddr = 569 ohdr->u.rc.reth.vaddr =
570 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len); 570 cpu_to_be64(wqe->rdma_wr.remote_addr + len);
571 ohdr->u.rc.reth.rkey = 571 ohdr->u.rc.reth.rkey =
572 cpu_to_be32(wqe->wr.wr.rdma.rkey); 572 cpu_to_be32(wqe->rdma_wr.rkey);
573 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len); 573 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
574 qp->s_state = OP(RDMA_READ_REQUEST); 574 qp->s_state = OP(RDMA_READ_REQUEST);
575 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); 575 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
diff --git a/drivers/staging/rdma/ipath/ipath_ruc.c b/drivers/staging/rdma/ipath/ipath_ruc.c
index 1f95bbaf7602..46af8b03d3d4 100644
--- a/drivers/staging/rdma/ipath/ipath_ruc.c
+++ b/drivers/staging/rdma/ipath/ipath_ruc.c
@@ -353,8 +353,8 @@ again:
353 if (wqe->length == 0) 353 if (wqe->length == 0)
354 break; 354 break;
355 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, 355 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
356 wqe->wr.wr.rdma.remote_addr, 356 wqe->rdma_wr.remote_addr,
357 wqe->wr.wr.rdma.rkey, 357 wqe->rdma_wr.rkey,
358 IB_ACCESS_REMOTE_WRITE))) 358 IB_ACCESS_REMOTE_WRITE)))
359 goto acc_err; 359 goto acc_err;
360 break; 360 break;
@@ -363,8 +363,8 @@ again:
363 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) 363 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
364 goto inv_err; 364 goto inv_err;
365 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, 365 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
366 wqe->wr.wr.rdma.remote_addr, 366 wqe->rdma_wr.remote_addr,
367 wqe->wr.wr.rdma.rkey, 367 wqe->rdma_wr.rkey,
368 IB_ACCESS_REMOTE_READ))) 368 IB_ACCESS_REMOTE_READ)))
369 goto acc_err; 369 goto acc_err;
370 qp->r_sge.sge = wqe->sg_list[0]; 370 qp->r_sge.sge = wqe->sg_list[0];
@@ -377,18 +377,18 @@ again:
377 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) 377 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
378 goto inv_err; 378 goto inv_err;
379 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), 379 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
380 wqe->wr.wr.atomic.remote_addr, 380 wqe->atomic_wr.remote_addr,
381 wqe->wr.wr.atomic.rkey, 381 wqe->atomic_wr.rkey,
382 IB_ACCESS_REMOTE_ATOMIC))) 382 IB_ACCESS_REMOTE_ATOMIC)))
383 goto acc_err; 383 goto acc_err;
384 /* Perform atomic OP and save result. */ 384 /* Perform atomic OP and save result. */
385 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; 385 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
386 sdata = wqe->wr.wr.atomic.compare_add; 386 sdata = wqe->atomic_wr.compare_add;
387 *(u64 *) sqp->s_sge.sge.vaddr = 387 *(u64 *) sqp->s_sge.sge.vaddr =
388 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? 388 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
389 (u64) atomic64_add_return(sdata, maddr) - sdata : 389 (u64) atomic64_add_return(sdata, maddr) - sdata :
390 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, 390 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
391 sdata, wqe->wr.wr.atomic.swap); 391 sdata, wqe->atomic_wr.swap);
392 goto send_comp; 392 goto send_comp;
393 393
394 default: 394 default:
diff --git a/drivers/staging/rdma/ipath/ipath_uc.c b/drivers/staging/rdma/ipath/ipath_uc.c
index 22e60998f1a7..0246b30280b9 100644
--- a/drivers/staging/rdma/ipath/ipath_uc.c
+++ b/drivers/staging/rdma/ipath/ipath_uc.c
@@ -126,9 +126,9 @@ int ipath_make_uc_req(struct ipath_qp *qp)
126 case IB_WR_RDMA_WRITE: 126 case IB_WR_RDMA_WRITE:
127 case IB_WR_RDMA_WRITE_WITH_IMM: 127 case IB_WR_RDMA_WRITE_WITH_IMM:
128 ohdr->u.rc.reth.vaddr = 128 ohdr->u.rc.reth.vaddr =
129 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 129 cpu_to_be64(wqe->rdma_wr.remote_addr);
130 ohdr->u.rc.reth.rkey = 130 ohdr->u.rc.reth.rkey =
131 cpu_to_be32(wqe->wr.wr.rdma.rkey); 131 cpu_to_be32(wqe->rdma_wr.rkey);
132 ohdr->u.rc.reth.length = cpu_to_be32(len); 132 ohdr->u.rc.reth.length = cpu_to_be32(len);
133 hwords += sizeof(struct ib_reth) / 4; 133 hwords += sizeof(struct ib_reth) / 4;
134 if (len > pmtu) { 134 if (len > pmtu) {
diff --git a/drivers/staging/rdma/ipath/ipath_ud.c b/drivers/staging/rdma/ipath/ipath_ud.c
index e8a2a915251e..3ffc1565d03d 100644
--- a/drivers/staging/rdma/ipath/ipath_ud.c
+++ b/drivers/staging/rdma/ipath/ipath_ud.c
@@ -65,7 +65,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
65 u32 rlen; 65 u32 rlen;
66 u32 length; 66 u32 length;
67 67
68 qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn); 68 qp = ipath_lookup_qpn(&dev->qp_table, swqe->ud_wr.remote_qpn);
69 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { 69 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
70 dev->n_pkt_drops++; 70 dev->n_pkt_drops++;
71 goto done; 71 goto done;
@@ -77,8 +77,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
77 * qkey from the QP context instead of the WR (see 10.2.5). 77 * qkey from the QP context instead of the WR (see 10.2.5).
78 */ 78 */
79 if (unlikely(qp->ibqp.qp_num && 79 if (unlikely(qp->ibqp.qp_num &&
80 ((int) swqe->wr.wr.ud.remote_qkey < 0 ? 80 ((int) swqe->ud_wr.remote_qkey < 0 ?
81 sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) { 81 sqp->qkey : swqe->ud_wr.remote_qkey) != qp->qkey)) {
82 /* XXX OK to lose a count once in a while. */ 82 /* XXX OK to lose a count once in a while. */
83 dev->qkey_violations++; 83 dev->qkey_violations++;
84 dev->n_pkt_drops++; 84 dev->n_pkt_drops++;
@@ -175,7 +175,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
175 } else 175 } else
176 spin_unlock_irqrestore(&rq->lock, flags); 176 spin_unlock_irqrestore(&rq->lock, flags);
177 177
178 ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr; 178 ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
179 if (ah_attr->ah_flags & IB_AH_GRH) { 179 if (ah_attr->ah_flags & IB_AH_GRH) {
180 ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh)); 180 ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
181 wc.wc_flags |= IB_WC_GRH; 181 wc.wc_flags |= IB_WC_GRH;
@@ -225,7 +225,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
225 wc.port_num = 1; 225 wc.port_num = 1;
226 /* Signal completion event if the solicited bit is set. */ 226 /* Signal completion event if the solicited bit is set. */
227 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 227 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
228 swqe->wr.send_flags & IB_SEND_SOLICITED); 228 swqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED);
229drop: 229drop:
230 if (atomic_dec_and_test(&qp->refcount)) 230 if (atomic_dec_and_test(&qp->refcount))
231 wake_up(&qp->wait); 231 wake_up(&qp->wait);
@@ -280,7 +280,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
280 next_cur = 0; 280 next_cur = 0;
281 281
282 /* Construct the header. */ 282 /* Construct the header. */
283 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; 283 ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
284 if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) { 284 if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) {
285 if (ah_attr->dlid != IPATH_PERMISSIVE_LID) 285 if (ah_attr->dlid != IPATH_PERMISSIVE_LID)
286 dev->n_multicast_xmit++; 286 dev->n_multicast_xmit++;
@@ -322,7 +322,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
322 qp->s_wqe = wqe; 322 qp->s_wqe = wqe;
323 qp->s_sge.sge = wqe->sg_list[0]; 323 qp->s_sge.sge = wqe->sg_list[0];
324 qp->s_sge.sg_list = wqe->sg_list + 1; 324 qp->s_sge.sg_list = wqe->sg_list + 1;
325 qp->s_sge.num_sge = wqe->wr.num_sge; 325 qp->s_sge.num_sge = wqe->ud_wr.wr.num_sge;
326 326
327 if (ah_attr->ah_flags & IB_AH_GRH) { 327 if (ah_attr->ah_flags & IB_AH_GRH) {
328 /* Header size in 32-bit words. */ 328 /* Header size in 32-bit words. */
@@ -340,9 +340,9 @@ int ipath_make_ud_req(struct ipath_qp *qp)
340 lrh0 = IPATH_LRH_BTH; 340 lrh0 = IPATH_LRH_BTH;
341 ohdr = &qp->s_hdr.u.oth; 341 ohdr = &qp->s_hdr.u.oth;
342 } 342 }
343 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 343 if (wqe->ud_wr.wr.opcode == IB_WR_SEND_WITH_IMM) {
344 qp->s_hdrwords++; 344 qp->s_hdrwords++;
345 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; 345 ohdr->u.ud.imm_data = wqe->ud_wr.wr.ex.imm_data;
346 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; 346 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
347 } else 347 } else
348 bth0 = IB_OPCODE_UD_SEND_ONLY << 24; 348 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
@@ -360,7 +360,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
360 qp->s_hdr.lrh[3] = cpu_to_be16(lid); 360 qp->s_hdr.lrh[3] = cpu_to_be16(lid);
361 } else 361 } else
362 qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE; 362 qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
363 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 363 if (wqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED)
364 bth0 |= 1 << 23; 364 bth0 |= 1 << 23;
365 bth0 |= extra_bytes << 20; 365 bth0 |= extra_bytes << 20;
366 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY : 366 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
@@ -372,14 +372,14 @@ int ipath_make_ud_req(struct ipath_qp *qp)
372 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && 372 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
373 ah_attr->dlid != IPATH_PERMISSIVE_LID ? 373 ah_attr->dlid != IPATH_PERMISSIVE_LID ?
374 cpu_to_be32(IPATH_MULTICAST_QPN) : 374 cpu_to_be32(IPATH_MULTICAST_QPN) :
375 cpu_to_be32(wqe->wr.wr.ud.remote_qpn); 375 cpu_to_be32(wqe->ud_wr.remote_qpn);
376 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK); 376 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
377 /* 377 /*
378 * Qkeys with the high order bit set mean use the 378 * Qkeys with the high order bit set mean use the
379 * qkey from the QP context instead of the WR (see 10.2.5). 379 * qkey from the QP context instead of the WR (see 10.2.5).
380 */ 380 */
381 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ? 381 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
382 qp->qkey : wqe->wr.wr.ud.remote_qkey); 382 qp->qkey : wqe->ud_wr.remote_qkey);
383 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); 383 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
384 384
385done: 385done:
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.c b/drivers/staging/rdma/ipath/ipath_verbs.c
index ed2bbc2f7eae..15633ec1843f 100644
--- a/drivers/staging/rdma/ipath/ipath_verbs.c
+++ b/drivers/staging/rdma/ipath/ipath_verbs.c
@@ -374,7 +374,7 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
374 wr->opcode != IB_WR_SEND_WITH_IMM) 374 wr->opcode != IB_WR_SEND_WITH_IMM)
375 goto bail_inval; 375 goto bail_inval;
376 /* Check UD destination address PD */ 376 /* Check UD destination address PD */
377 if (qp->ibqp.pd != wr->wr.ud.ah->pd) 377 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
378 goto bail_inval; 378 goto bail_inval;
379 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) 379 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
380 goto bail_inval; 380 goto bail_inval;
@@ -395,7 +395,23 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
395 } 395 }
396 396
397 wqe = get_swqe_ptr(qp, qp->s_head); 397 wqe = get_swqe_ptr(qp, qp->s_head);
398 wqe->wr = *wr; 398
399 if (qp->ibqp.qp_type != IB_QPT_UC &&
400 qp->ibqp.qp_type != IB_QPT_RC)
401 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
402 else if (wr->opcode == IB_WR_FAST_REG_MR)
403 memcpy(&wqe->fast_reg_wr, fast_reg_wr(wr),
404 sizeof(wqe->fast_reg_wr));
405 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
406 wr->opcode == IB_WR_RDMA_WRITE ||
407 wr->opcode == IB_WR_RDMA_READ)
408 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
409 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
410 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
411 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
412 else
413 memcpy(&wqe->wr, wr, sizeof(wqe->wr));
414
399 wqe->length = 0; 415 wqe->length = 0;
400 if (wr->num_sge) { 416 if (wr->num_sge) {
401 acc = wr->opcode >= IB_WR_RDMA_READ ? 417 acc = wr->opcode >= IB_WR_RDMA_READ ?
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.h b/drivers/staging/rdma/ipath/ipath_verbs.h
index ec167e545e15..ed102a26ec08 100644
--- a/drivers/staging/rdma/ipath/ipath_verbs.h
+++ b/drivers/staging/rdma/ipath/ipath_verbs.h
@@ -277,7 +277,14 @@ struct ipath_mr {
277 * in qp->s_max_sge. 277 * in qp->s_max_sge.
278 */ 278 */
279struct ipath_swqe { 279struct ipath_swqe {
280 struct ib_send_wr wr; /* don't use wr.sg_list */ 280 union {
281 struct ib_send_wr wr; /* don't use wr.sg_list */
282 struct ib_ud_wr ud_wr;
283 struct ib_fast_reg_wr fast_reg_wr;
284 struct ib_rdma_wr rdma_wr;
285 struct ib_atomic_wr atomic_wr;
286 };
287
281 u32 psn; /* first packet sequence number */ 288 u32 psn; /* first packet sequence number */
282 u32 lpsn; /* last packet sequence number */ 289 u32 lpsn; /* last packet sequence number */
283 u32 ssn; /* send sequence number */ 290 u32 ssn; /* send sequence number */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e4cc389c43cb..85103aff909b 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1096,54 +1096,93 @@ struct ib_send_wr {
1096 __be32 imm_data; 1096 __be32 imm_data;
1097 u32 invalidate_rkey; 1097 u32 invalidate_rkey;
1098 } ex; 1098 } ex;
1099 union {
1100 struct {
1101 u64 remote_addr;
1102 u32 rkey;
1103 } rdma;
1104 struct {
1105 u64 remote_addr;
1106 u64 compare_add;
1107 u64 swap;
1108 u64 compare_add_mask;
1109 u64 swap_mask;
1110 u32 rkey;
1111 } atomic;
1112 struct {
1113 struct ib_ah *ah;
1114 void *header;
1115 int hlen;
1116 int mss;
1117 u32 remote_qpn;
1118 u32 remote_qkey;
1119 u16 pkey_index; /* valid for GSI only */
1120 u8 port_num; /* valid for DR SMPs on switch only */
1121 } ud;
1122 struct {
1123 u64 iova_start;
1124 struct ib_fast_reg_page_list *page_list;
1125 unsigned int page_shift;
1126 unsigned int page_list_len;
1127 u32 length;
1128 int access_flags;
1129 u32 rkey;
1130 } fast_reg;
1131 struct {
1132 struct ib_mw *mw;
1133 /* The new rkey for the memory window. */
1134 u32 rkey;
1135 struct ib_mw_bind_info bind_info;
1136 } bind_mw;
1137 struct {
1138 struct ib_sig_attrs *sig_attrs;
1139 struct ib_mr *sig_mr;
1140 int access_flags;
1141 struct ib_sge *prot;
1142 } sig_handover;
1143 } wr;
1144 u32 xrc_remote_srq_num; /* XRC TGT QPs only */
1145}; 1099};
1146 1100
1101struct ib_rdma_wr {
1102 struct ib_send_wr wr;
1103 u64 remote_addr;
1104 u32 rkey;
1105};
1106
1107static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1108{
1109 return container_of(wr, struct ib_rdma_wr, wr);
1110}
1111
1112struct ib_atomic_wr {
1113 struct ib_send_wr wr;
1114 u64 remote_addr;
1115 u64 compare_add;
1116 u64 swap;
1117 u64 compare_add_mask;
1118 u64 swap_mask;
1119 u32 rkey;
1120};
1121
1122static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1123{
1124 return container_of(wr, struct ib_atomic_wr, wr);
1125}
1126
1127struct ib_ud_wr {
1128 struct ib_send_wr wr;
1129 struct ib_ah *ah;
1130 void *header;
1131 int hlen;
1132 int mss;
1133 u32 remote_qpn;
1134 u32 remote_qkey;
1135 u16 pkey_index; /* valid for GSI only */
1136 u8 port_num; /* valid for DR SMPs on switch only */
1137};
1138
1139static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1140{
1141 return container_of(wr, struct ib_ud_wr, wr);
1142}
1143
1144struct ib_fast_reg_wr {
1145 struct ib_send_wr wr;
1146 u64 iova_start;
1147 struct ib_fast_reg_page_list *page_list;
1148 unsigned int page_shift;
1149 unsigned int page_list_len;
1150 u32 length;
1151 int access_flags;
1152 u32 rkey;
1153};
1154
1155static inline struct ib_fast_reg_wr *fast_reg_wr(struct ib_send_wr *wr)
1156{
1157 return container_of(wr, struct ib_fast_reg_wr, wr);
1158}
1159
1160struct ib_bind_mw_wr {
1161 struct ib_send_wr wr;
1162 struct ib_mw *mw;
1163 /* The new rkey for the memory window. */
1164 u32 rkey;
1165 struct ib_mw_bind_info bind_info;
1166};
1167
1168static inline struct ib_bind_mw_wr *bind_mw_wr(struct ib_send_wr *wr)
1169{
1170 return container_of(wr, struct ib_bind_mw_wr, wr);
1171}
1172
1173struct ib_sig_handover_wr {
1174 struct ib_send_wr wr;
1175 struct ib_sig_attrs *sig_attrs;
1176 struct ib_mr *sig_mr;
1177 int access_flags;
1178 struct ib_sge *prot;
1179};
1180
1181static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1182{
1183 return container_of(wr, struct ib_sig_handover_wr, wr);
1184}
1185
1147struct ib_recv_wr { 1186struct ib_recv_wr {
1148 struct ib_recv_wr *next; 1187 struct ib_recv_wr *next;
1149 u64 wr_id; 1188 u64 wr_id;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index aae60fda77f6..301c48385166 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -69,7 +69,11 @@ struct rds_ib_connect_private {
69 69
70struct rds_ib_send_work { 70struct rds_ib_send_work {
71 void *s_op; 71 void *s_op;
72 struct ib_send_wr s_wr; 72 union {
73 struct ib_send_wr s_wr;
74 struct ib_rdma_wr s_rdma_wr;
75 struct ib_atomic_wr s_atomic_wr;
76 };
73 struct ib_sge s_sge[RDS_IB_MAX_SGE]; 77 struct ib_sge s_sge[RDS_IB_MAX_SGE];
74 unsigned long s_queued; 78 unsigned long s_queued;
75}; 79};
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 4e88047086b6..987386e9931b 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -785,23 +785,23 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
785 send->s_queued = jiffies; 785 send->s_queued = jiffies;
786 786
787 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) { 787 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
788 send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; 788 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
789 send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare; 789 send->s_atomic_wr.compare_add = op->op_m_cswp.compare;
790 send->s_wr.wr.atomic.swap = op->op_m_cswp.swap; 790 send->s_atomic_wr.swap = op->op_m_cswp.swap;
791 send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask; 791 send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask;
792 send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask; 792 send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask;
793 } else { /* FADD */ 793 } else { /* FADD */
794 send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; 794 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
795 send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add; 795 send->s_atomic_wr.compare_add = op->op_m_fadd.add;
796 send->s_wr.wr.atomic.swap = 0; 796 send->s_atomic_wr.swap = 0;
797 send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask; 797 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
798 send->s_wr.wr.atomic.swap_mask = 0; 798 send->s_atomic_wr.swap_mask = 0;
799 } 799 }
800 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); 800 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
801 send->s_wr.num_sge = 1; 801 send->s_atomic_wr.wr.num_sge = 1;
802 send->s_wr.next = NULL; 802 send->s_atomic_wr.wr.next = NULL;
803 send->s_wr.wr.atomic.remote_addr = op->op_remote_addr; 803 send->s_atomic_wr.remote_addr = op->op_remote_addr;
804 send->s_wr.wr.atomic.rkey = op->op_rkey; 804 send->s_atomic_wr.rkey = op->op_rkey;
805 send->s_op = op; 805 send->s_op = op;
806 rds_message_addref(container_of(send->s_op, struct rds_message, atomic)); 806 rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
807 807
@@ -826,11 +826,11 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
826 if (nr_sig) 826 if (nr_sig)
827 atomic_add(nr_sig, &ic->i_signaled_sends); 827 atomic_add(nr_sig, &ic->i_signaled_sends);
828 828
829 failed_wr = &send->s_wr; 829 failed_wr = &send->s_atomic_wr.wr;
830 ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr); 830 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
831 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, 831 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
832 send, &send->s_wr, ret, failed_wr); 832 send, &send->s_atomic_wr, ret, failed_wr);
833 BUG_ON(failed_wr != &send->s_wr); 833 BUG_ON(failed_wr != &send->s_atomic_wr.wr);
834 if (ret) { 834 if (ret) {
835 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 " 835 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
836 "returned %d\n", &conn->c_faddr, ret); 836 "returned %d\n", &conn->c_faddr, ret);
@@ -839,9 +839,9 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
839 goto out; 839 goto out;
840 } 840 }
841 841
842 if (unlikely(failed_wr != &send->s_wr)) { 842 if (unlikely(failed_wr != &send->s_atomic_wr.wr)) {
843 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret); 843 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
844 BUG_ON(failed_wr != &send->s_wr); 844 BUG_ON(failed_wr != &send->s_atomic_wr.wr);
845 } 845 }
846 846
847out: 847out:
@@ -912,22 +912,23 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
912 nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify); 912 nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
913 913
914 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; 914 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
915 send->s_wr.wr.rdma.remote_addr = remote_addr; 915 send->s_rdma_wr.remote_addr = remote_addr;
916 send->s_wr.wr.rdma.rkey = op->op_rkey; 916 send->s_rdma_wr.rkey = op->op_rkey;
917 917
918 if (num_sge > max_sge) { 918 if (num_sge > max_sge) {
919 send->s_wr.num_sge = max_sge; 919 send->s_rdma_wr.wr.num_sge = max_sge;
920 num_sge -= max_sge; 920 num_sge -= max_sge;
921 } else { 921 } else {
922 send->s_wr.num_sge = num_sge; 922 send->s_rdma_wr.wr.num_sge = num_sge;
923 } 923 }
924 924
925 send->s_wr.next = NULL; 925 send->s_rdma_wr.wr.next = NULL;
926 926
927 if (prev) 927 if (prev)
928 prev->s_wr.next = &send->s_wr; 928 prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr;
929 929
930 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { 930 for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
931 scat != &op->op_sg[op->op_count]; j++) {
931 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 932 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
932 send->s_sge[j].addr = 933 send->s_sge[j].addr =
933 ib_sg_dma_address(ic->i_cm_id->device, scat); 934 ib_sg_dma_address(ic->i_cm_id->device, scat);
@@ -942,7 +943,9 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
942 } 943 }
943 944
944 rdsdebug("send %p wr %p num_sge %u next %p\n", send, 945 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
945 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); 946 &send->s_rdma_wr.wr,
947 send->s_rdma_wr.wr.num_sge,
948 send->s_rdma_wr.wr.next);
946 949
947 prev = send; 950 prev = send;
948 if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) 951 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
@@ -963,11 +966,11 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
963 if (nr_sig) 966 if (nr_sig)
964 atomic_add(nr_sig, &ic->i_signaled_sends); 967 atomic_add(nr_sig, &ic->i_signaled_sends);
965 968
966 failed_wr = &first->s_wr; 969 failed_wr = &first->s_rdma_wr.wr;
967 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); 970 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
968 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 971 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
969 first, &first->s_wr, ret, failed_wr); 972 first, &first->s_rdma_wr.wr, ret, failed_wr);
970 BUG_ON(failed_wr != &first->s_wr); 973 BUG_ON(failed_wr != &first->s_rdma_wr.wr);
971 if (ret) { 974 if (ret) {
972 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " 975 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
973 "returned %d\n", &conn->c_faddr, ret); 976 "returned %d\n", &conn->c_faddr, ret);
@@ -976,9 +979,9 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
976 goto out; 979 goto out;
977 } 980 }
978 981
979 if (unlikely(failed_wr != &first->s_wr)) { 982 if (unlikely(failed_wr != &first->s_rdma_wr.wr)) {
980 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret); 983 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
981 BUG_ON(failed_wr != &first->s_wr); 984 BUG_ON(failed_wr != &first->s_rdma_wr.wr);
982 } 985 }
983 986
984 987
diff --git a/net/rds/iw.h b/net/rds/iw.h
index cbe6674e31ee..fe858e5dd8d1 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -77,7 +77,11 @@ struct rds_iw_send_work {
77 struct ib_fast_reg_page_list *s_page_list; 77 struct ib_fast_reg_page_list *s_page_list;
78 unsigned char s_remap_count; 78 unsigned char s_remap_count;
79 79
80 struct ib_send_wr s_wr; 80 union {
81 struct ib_send_wr s_send_wr;
82 struct ib_rdma_wr s_rdma_wr;
83 struct ib_fast_reg_wr s_fast_reg_wr;
84 };
81 struct ib_sge s_sge[RDS_IW_MAX_SGE]; 85 struct ib_sge s_sge[RDS_IW_MAX_SGE];
82 unsigned long s_queued; 86 unsigned long s_queued;
83}; 87};
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 6a8fbd6e69e7..f8a612cc69e6 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -696,7 +696,8 @@ static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool,
696static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping) 696static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping)
697{ 697{
698 struct rds_iw_mr *ibmr = mapping->m_mr; 698 struct rds_iw_mr *ibmr = mapping->m_mr;
699 struct ib_send_wr f_wr, *failed_wr; 699 struct ib_fast_reg_wr f_wr;
700 struct ib_send_wr *failed_wr;
700 int ret; 701 int ret;
701 702
702 /* 703 /*
@@ -709,22 +710,22 @@ static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping)
709 mapping->m_rkey = ibmr->mr->rkey; 710 mapping->m_rkey = ibmr->mr->rkey;
710 711
711 memset(&f_wr, 0, sizeof(f_wr)); 712 memset(&f_wr, 0, sizeof(f_wr));
712 f_wr.wr_id = RDS_IW_FAST_REG_WR_ID; 713 f_wr.wr.wr_id = RDS_IW_FAST_REG_WR_ID;
713 f_wr.opcode = IB_WR_FAST_REG_MR; 714 f_wr.wr.opcode = IB_WR_FAST_REG_MR;
714 f_wr.wr.fast_reg.length = mapping->m_sg.bytes; 715 f_wr.length = mapping->m_sg.bytes;
715 f_wr.wr.fast_reg.rkey = mapping->m_rkey; 716 f_wr.rkey = mapping->m_rkey;
716 f_wr.wr.fast_reg.page_list = ibmr->page_list; 717 f_wr.page_list = ibmr->page_list;
717 f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len; 718 f_wr.page_list_len = mapping->m_sg.dma_len;
718 f_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 719 f_wr.page_shift = PAGE_SHIFT;
719 f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE | 720 f_wr.access_flags = IB_ACCESS_LOCAL_WRITE |
720 IB_ACCESS_REMOTE_READ | 721 IB_ACCESS_REMOTE_READ |
721 IB_ACCESS_REMOTE_WRITE; 722 IB_ACCESS_REMOTE_WRITE;
722 f_wr.wr.fast_reg.iova_start = 0; 723 f_wr.iova_start = 0;
723 f_wr.send_flags = IB_SEND_SIGNALED; 724 f_wr.wr.send_flags = IB_SEND_SIGNALED;
724 725
725 failed_wr = &f_wr; 726 failed_wr = &f_wr.wr;
726 ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr); 727 ret = ib_post_send(ibmr->cm_id->qp, &f_wr.wr, &failed_wr);
727 BUG_ON(failed_wr != &f_wr); 728 BUG_ON(failed_wr != &f_wr.wr);
728 if (ret) 729 if (ret)
729 printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", 730 printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
730 __func__, __LINE__, ret); 731 __func__, __LINE__, ret);
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 86152ec3b887..f6e23c515b44 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -137,13 +137,13 @@ void rds_iw_send_init_ring(struct rds_iw_connection *ic)
137 send->s_op = NULL; 137 send->s_op = NULL;
138 send->s_mapping = NULL; 138 send->s_mapping = NULL;
139 139
140 send->s_wr.next = NULL; 140 send->s_send_wr.next = NULL;
141 send->s_wr.wr_id = i; 141 send->s_send_wr.wr_id = i;
142 send->s_wr.sg_list = send->s_sge; 142 send->s_send_wr.sg_list = send->s_sge;
143 send->s_wr.num_sge = 1; 143 send->s_send_wr.num_sge = 1;
144 send->s_wr.opcode = IB_WR_SEND; 144 send->s_send_wr.opcode = IB_WR_SEND;
145 send->s_wr.send_flags = 0; 145 send->s_send_wr.send_flags = 0;
146 send->s_wr.ex.imm_data = 0; 146 send->s_send_wr.ex.imm_data = 0;
147 147
148 sge = rds_iw_data_sge(ic, send->s_sge); 148 sge = rds_iw_data_sge(ic, send->s_sge);
149 sge->lkey = 0; 149 sge->lkey = 0;
@@ -179,7 +179,7 @@ void rds_iw_send_clear_ring(struct rds_iw_connection *ic)
179 ib_dereg_mr(send->s_mr); 179 ib_dereg_mr(send->s_mr);
180 BUG_ON(!send->s_page_list); 180 BUG_ON(!send->s_page_list);
181 ib_free_fast_reg_page_list(send->s_page_list); 181 ib_free_fast_reg_page_list(send->s_page_list);
182 if (send->s_wr.opcode == 0xdead) 182 if (send->s_send_wr.opcode == 0xdead)
183 continue; 183 continue;
184 if (send->s_rm) 184 if (send->s_rm)
185 rds_iw_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR); 185 rds_iw_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
@@ -247,7 +247,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
247 send = &ic->i_sends[oldest]; 247 send = &ic->i_sends[oldest];
248 248
249 /* In the error case, wc.opcode sometimes contains garbage */ 249 /* In the error case, wc.opcode sometimes contains garbage */
250 switch (send->s_wr.opcode) { 250 switch (send->s_send_wr.opcode) {
251 case IB_WR_SEND: 251 case IB_WR_SEND:
252 if (send->s_rm) 252 if (send->s_rm)
253 rds_iw_send_unmap_rm(ic, send, wc.status); 253 rds_iw_send_unmap_rm(ic, send, wc.status);
@@ -262,12 +262,12 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
262 default: 262 default:
263 printk_ratelimited(KERN_NOTICE 263 printk_ratelimited(KERN_NOTICE
264 "RDS/IW: %s: unexpected opcode 0x%x in WR!\n", 264 "RDS/IW: %s: unexpected opcode 0x%x in WR!\n",
265 __func__, send->s_wr.opcode); 265 __func__, send->s_send_wr.opcode);
266 break; 266 break;
267 } 267 }
268 268
269 send->s_wr.opcode = 0xdead; 269 send->s_send_wr.opcode = 0xdead;
270 send->s_wr.num_sge = 1; 270 send->s_send_wr.num_sge = 1;
271 if (time_after(jiffies, send->s_queued + HZ/2)) 271 if (time_after(jiffies, send->s_queued + HZ/2))
272 rds_iw_stats_inc(s_iw_tx_stalled); 272 rds_iw_stats_inc(s_iw_tx_stalled);
273 273
@@ -455,10 +455,10 @@ rds_iw_xmit_populate_wr(struct rds_iw_connection *ic,
455 455
456 WARN_ON(pos != send - ic->i_sends); 456 WARN_ON(pos != send - ic->i_sends);
457 457
458 send->s_wr.send_flags = send_flags; 458 send->s_send_wr.send_flags = send_flags;
459 send->s_wr.opcode = IB_WR_SEND; 459 send->s_send_wr.opcode = IB_WR_SEND;
460 send->s_wr.num_sge = 2; 460 send->s_send_wr.num_sge = 2;
461 send->s_wr.next = NULL; 461 send->s_send_wr.next = NULL;
462 send->s_queued = jiffies; 462 send->s_queued = jiffies;
463 send->s_op = NULL; 463 send->s_op = NULL;
464 464
@@ -472,7 +472,7 @@ rds_iw_xmit_populate_wr(struct rds_iw_connection *ic,
472 } else { 472 } else {
473 /* We're sending a packet with no payload. There is only 473 /* We're sending a packet with no payload. There is only
474 * one SGE */ 474 * one SGE */
475 send->s_wr.num_sge = 1; 475 send->s_send_wr.num_sge = 1;
476 sge = &send->s_sge[0]; 476 sge = &send->s_sge[0];
477 } 477 }
478 478
@@ -672,23 +672,23 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
672 */ 672 */
673 if (ic->i_unsignaled_wrs-- == 0) { 673 if (ic->i_unsignaled_wrs-- == 0) {
674 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; 674 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
675 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 675 send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
676 } 676 }
677 677
678 ic->i_unsignaled_bytes -= len; 678 ic->i_unsignaled_bytes -= len;
679 if (ic->i_unsignaled_bytes <= 0) { 679 if (ic->i_unsignaled_bytes <= 0) {
680 ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes; 680 ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes;
681 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 681 send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
682 } 682 }
683 683
684 /* 684 /*
685 * Always signal the last one if we're stopping due to flow control. 685 * Always signal the last one if we're stopping due to flow control.
686 */ 686 */
687 if (flow_controlled && i == (work_alloc-1)) 687 if (flow_controlled && i == (work_alloc-1))
688 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 688 send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
689 689
690 rdsdebug("send %p wr %p num_sge %u next %p\n", send, 690 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
691 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); 691 &send->s_send_wr, send->s_send_wr.num_sge, send->s_send_wr.next);
692 692
693 sent += len; 693 sent += len;
694 rm->data.op_dmaoff += len; 694 rm->data.op_dmaoff += len;
@@ -722,7 +722,7 @@ add_header:
722 } 722 }
723 723
724 if (prev) 724 if (prev)
725 prev->s_wr.next = &send->s_wr; 725 prev->s_send_wr.next = &send->s_send_wr;
726 prev = send; 726 prev = send;
727 727
728 pos = (pos + 1) % ic->i_send_ring.w_nr; 728 pos = (pos + 1) % ic->i_send_ring.w_nr;
@@ -736,7 +736,7 @@ add_header:
736 /* if we finished the message then send completion owns it */ 736 /* if we finished the message then send completion owns it */
737 if (scat == &rm->data.op_sg[rm->data.op_count]) { 737 if (scat == &rm->data.op_sg[rm->data.op_count]) {
738 prev->s_rm = ic->i_rm; 738 prev->s_rm = ic->i_rm;
739 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 739 prev->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
740 ic->i_rm = NULL; 740 ic->i_rm = NULL;
741 } 741 }
742 742
@@ -748,11 +748,11 @@ add_header:
748 rds_iw_send_add_credits(conn, credit_alloc - i); 748 rds_iw_send_add_credits(conn, credit_alloc - i);
749 749
750 /* XXX need to worry about failed_wr and partial sends. */ 750 /* XXX need to worry about failed_wr and partial sends. */
751 failed_wr = &first->s_wr; 751 failed_wr = &first->s_send_wr;
752 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); 752 ret = ib_post_send(ic->i_cm_id->qp, &first->s_send_wr, &failed_wr);
753 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 753 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
754 first, &first->s_wr, ret, failed_wr); 754 first, &first->s_send_wr, ret, failed_wr);
755 BUG_ON(failed_wr != &first->s_wr); 755 BUG_ON(failed_wr != &first->s_send_wr);
756 if (ret) { 756 if (ret) {
757 printk(KERN_WARNING "RDS/IW: ib_post_send to %pI4 " 757 printk(KERN_WARNING "RDS/IW: ib_post_send to %pI4 "
758 "returned %d\n", &conn->c_faddr, ret); 758 "returned %d\n", &conn->c_faddr, ret);
@@ -778,14 +778,14 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd
778 * in the sg list is added to the fast reg page list and placed 778 * in the sg list is added to the fast reg page list and placed
779 * inside the fast_reg_mr WR. 779 * inside the fast_reg_mr WR.
780 */ 780 */
781 send->s_wr.opcode = IB_WR_FAST_REG_MR; 781 send->s_fast_reg_wr.wr.opcode = IB_WR_FAST_REG_MR;
782 send->s_wr.wr.fast_reg.length = len; 782 send->s_fast_reg_wr.length = len;
783 send->s_wr.wr.fast_reg.rkey = send->s_mr->rkey; 783 send->s_fast_reg_wr.rkey = send->s_mr->rkey;
784 send->s_wr.wr.fast_reg.page_list = send->s_page_list; 784 send->s_fast_reg_wr.page_list = send->s_page_list;
785 send->s_wr.wr.fast_reg.page_list_len = nent; 785 send->s_fast_reg_wr.page_list_len = nent;
786 send->s_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 786 send->s_fast_reg_wr.page_shift = PAGE_SHIFT;
787 send->s_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE; 787 send->s_fast_reg_wr.access_flags = IB_ACCESS_REMOTE_WRITE;
788 send->s_wr.wr.fast_reg.iova_start = sg_addr; 788 send->s_fast_reg_wr.iova_start = sg_addr;
789 789
790 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); 790 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
791} 791}
@@ -863,7 +863,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
863 num_sge = op->op_count; 863 num_sge = op->op_count;
864 864
865 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { 865 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
866 send->s_wr.send_flags = 0; 866 send->s_rdma_wr.wr.send_flags = 0;
867 send->s_queued = jiffies; 867 send->s_queued = jiffies;
868 868
869 /* 869 /*
@@ -872,7 +872,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
872 */ 872 */
873 if (ic->i_unsignaled_wrs-- == 0) { 873 if (ic->i_unsignaled_wrs-- == 0) {
874 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; 874 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
875 send->s_wr.send_flags = IB_SEND_SIGNALED; 875 send->s_rdma_wr.wr.send_flags = IB_SEND_SIGNALED;
876 } 876 }
877 877
878 /* To avoid the need to have the plumbing to invalidate the fastreg_mr used 878 /* To avoid the need to have the plumbing to invalidate the fastreg_mr used
@@ -880,29 +880,30 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
880 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. 880 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
881 */ 881 */
882 if (op->op_write) 882 if (op->op_write)
883 send->s_wr.opcode = IB_WR_RDMA_WRITE; 883 send->s_rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
884 else 884 else
885 send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; 885 send->s_rdma_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
886 886
887 send->s_wr.wr.rdma.remote_addr = remote_addr; 887 send->s_rdma_wr.remote_addr = remote_addr;
888 send->s_wr.wr.rdma.rkey = op->op_rkey; 888 send->s_rdma_wr.rkey = op->op_rkey;
889 send->s_op = op; 889 send->s_op = op;
890 890
891 if (num_sge > rds_iwdev->max_sge) { 891 if (num_sge > rds_iwdev->max_sge) {
892 send->s_wr.num_sge = rds_iwdev->max_sge; 892 send->s_rdma_wr.wr.num_sge = rds_iwdev->max_sge;
893 num_sge -= rds_iwdev->max_sge; 893 num_sge -= rds_iwdev->max_sge;
894 } else 894 } else
895 send->s_wr.num_sge = num_sge; 895 send->s_rdma_wr.wr.num_sge = num_sge;
896 896
897 send->s_wr.next = NULL; 897 send->s_rdma_wr.wr.next = NULL;
898 898
899 if (prev) 899 if (prev)
900 prev->s_wr.next = &send->s_wr; 900 prev->s_send_wr.next = &send->s_rdma_wr.wr;
901 901
902 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { 902 for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
903 scat != &op->op_sg[op->op_count]; j++) {
903 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 904 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
904 905
905 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) 906 if (send->s_rdma_wr.wr.opcode == IB_WR_RDMA_READ_WITH_INV)
906 send->s_page_list->page_list[j] = ib_sg_dma_address(ic->i_cm_id->device, scat); 907 send->s_page_list->page_list[j] = ib_sg_dma_address(ic->i_cm_id->device, scat);
907 else { 908 else {
908 send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat); 909 send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat);
@@ -917,15 +918,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
917 scat++; 918 scat++;
918 } 919 }
919 920
920 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) { 921 if (send->s_rdma_wr.wr.opcode == IB_WR_RDMA_READ_WITH_INV) {
921 send->s_wr.num_sge = 1; 922 send->s_rdma_wr.wr.num_sge = 1;
922 send->s_sge[0].addr = conn->c_xmit_rm->m_rs->rs_user_addr; 923 send->s_sge[0].addr = conn->c_xmit_rm->m_rs->rs_user_addr;
923 send->s_sge[0].length = conn->c_xmit_rm->m_rs->rs_user_bytes; 924 send->s_sge[0].length = conn->c_xmit_rm->m_rs->rs_user_bytes;
924 send->s_sge[0].lkey = ic->i_sends[fr_pos].s_mr->lkey; 925 send->s_sge[0].lkey = ic->i_sends[fr_pos].s_mr->lkey;
925 } 926 }
926 927
927 rdsdebug("send %p wr %p num_sge %u next %p\n", send, 928 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
928 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); 929 &send->s_rdma_wr,
930 send->s_rdma_wr.wr.num_sge,
931 send->s_rdma_wr.wr.next);
929 932
930 prev = send; 933 prev = send;
931 if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) 934 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
@@ -934,7 +937,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
934 937
935 /* if we finished the message then send completion owns it */ 938 /* if we finished the message then send completion owns it */
936 if (scat == &op->op_sg[op->op_count]) 939 if (scat == &op->op_sg[op->op_count])
937 first->s_wr.send_flags = IB_SEND_SIGNALED; 940 first->s_rdma_wr.wr.send_flags = IB_SEND_SIGNALED;
938 941
939 if (i < work_alloc) { 942 if (i < work_alloc) {
940 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i); 943 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i);
@@ -953,11 +956,11 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
953 work_alloc++; 956 work_alloc++;
954 } 957 }
955 958
956 failed_wr = &first->s_wr; 959 failed_wr = &first->s_rdma_wr.wr;
957 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); 960 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
958 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 961 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
959 first, &first->s_wr, ret, failed_wr); 962 first, &first->s_rdma_wr, ret, failed_wr);
960 BUG_ON(failed_wr != &first->s_wr); 963 BUG_ON(failed_wr != &first->s_rdma_wr.wr);
961 if (ret) { 964 if (ret) {
962 printk(KERN_WARNING "RDS/IW: rdma ib_post_send to %pI4 " 965 printk(KERN_WARNING "RDS/IW: rdma ib_post_send to %pI4 "
963 "returned %d\n", &conn->c_faddr, ret); 966 "returned %d\n", &conn->c_faddr, ret);
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 5318951b3b53..0d2f46f600b6 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -312,7 +312,8 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
312 struct rpcrdma_mw *mw; 312 struct rpcrdma_mw *mw;
313 struct rpcrdma_frmr *frmr; 313 struct rpcrdma_frmr *frmr;
314 struct ib_mr *mr; 314 struct ib_mr *mr;
315 struct ib_send_wr fastreg_wr, *bad_wr; 315 struct ib_fast_reg_wr fastreg_wr;
316 struct ib_send_wr *bad_wr;
316 u8 key; 317 u8 key;
317 int len, pageoff; 318 int len, pageoff;
318 int i, rc; 319 int i, rc;
@@ -358,23 +359,23 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
358 __func__, mw, i, len); 359 __func__, mw, i, len);
359 360
360 memset(&fastreg_wr, 0, sizeof(fastreg_wr)); 361 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
361 fastreg_wr.wr_id = (unsigned long)(void *)mw; 362 fastreg_wr.wr.wr_id = (unsigned long)(void *)mw;
362 fastreg_wr.opcode = IB_WR_FAST_REG_MR; 363 fastreg_wr.wr.opcode = IB_WR_FAST_REG_MR;
363 fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma + pageoff; 364 fastreg_wr.iova_start = seg1->mr_dma + pageoff;
364 fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl; 365 fastreg_wr.page_list = frmr->fr_pgl;
365 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 366 fastreg_wr.page_shift = PAGE_SHIFT;
366 fastreg_wr.wr.fast_reg.page_list_len = page_no; 367 fastreg_wr.page_list_len = page_no;
367 fastreg_wr.wr.fast_reg.length = len; 368 fastreg_wr.length = len;
368 fastreg_wr.wr.fast_reg.access_flags = writing ? 369 fastreg_wr.access_flags = writing ?
369 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : 370 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
370 IB_ACCESS_REMOTE_READ; 371 IB_ACCESS_REMOTE_READ;
371 mr = frmr->fr_mr; 372 mr = frmr->fr_mr;
372 key = (u8)(mr->rkey & 0x000000FF); 373 key = (u8)(mr->rkey & 0x000000FF);
373 ib_update_fast_reg_key(mr, ++key); 374 ib_update_fast_reg_key(mr, ++key);
374 fastreg_wr.wr.fast_reg.rkey = mr->rkey; 375 fastreg_wr.rkey = mr->rkey;
375 376
376 DECR_CQCOUNT(&r_xprt->rx_ep); 377 DECR_CQCOUNT(&r_xprt->rx_ep);
377 rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr); 378 rc = ib_post_send(ia->ri_id->qp, &fastreg_wr.wr, &bad_wr);
378 if (rc) 379 if (rc)
379 goto out_senderr; 380 goto out_senderr;
380 381
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index cb5174284074..7be42d0da19e 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -126,7 +126,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
126 u64 rs_offset, 126 u64 rs_offset,
127 bool last) 127 bool last)
128{ 128{
129 struct ib_send_wr read_wr; 129 struct ib_rdma_wr read_wr;
130 int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT; 130 int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
131 struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt); 131 struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
132 int ret, read, pno; 132 int ret, read, pno;
@@ -179,16 +179,16 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
179 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); 179 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
180 180
181 memset(&read_wr, 0, sizeof(read_wr)); 181 memset(&read_wr, 0, sizeof(read_wr));
182 read_wr.wr_id = (unsigned long)ctxt; 182 read_wr.wr.wr_id = (unsigned long)ctxt;
183 read_wr.opcode = IB_WR_RDMA_READ; 183 read_wr.wr.opcode = IB_WR_RDMA_READ;
184 ctxt->wr_op = read_wr.opcode; 184 ctxt->wr_op = read_wr.wr.opcode;
185 read_wr.send_flags = IB_SEND_SIGNALED; 185 read_wr.wr.send_flags = IB_SEND_SIGNALED;
186 read_wr.wr.rdma.rkey = rs_handle; 186 read_wr.rkey = rs_handle;
187 read_wr.wr.rdma.remote_addr = rs_offset; 187 read_wr.remote_addr = rs_offset;
188 read_wr.sg_list = ctxt->sge; 188 read_wr.wr.sg_list = ctxt->sge;
189 read_wr.num_sge = pages_needed; 189 read_wr.wr.num_sge = pages_needed;
190 190
191 ret = svc_rdma_send(xprt, &read_wr); 191 ret = svc_rdma_send(xprt, &read_wr.wr);
192 if (ret) { 192 if (ret) {
193 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret); 193 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
194 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 194 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
@@ -218,9 +218,9 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
218 u64 rs_offset, 218 u64 rs_offset,
219 bool last) 219 bool last)
220{ 220{
221 struct ib_send_wr read_wr; 221 struct ib_rdma_wr read_wr;
222 struct ib_send_wr inv_wr; 222 struct ib_send_wr inv_wr;
223 struct ib_send_wr fastreg_wr; 223 struct ib_fast_reg_wr fastreg_wr;
224 u8 key; 224 u8 key;
225 int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT; 225 int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
226 struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt); 226 struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
@@ -289,31 +289,31 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
289 289
290 /* Prepare FASTREG WR */ 290 /* Prepare FASTREG WR */
291 memset(&fastreg_wr, 0, sizeof(fastreg_wr)); 291 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
292 fastreg_wr.opcode = IB_WR_FAST_REG_MR; 292 fastreg_wr.wr.opcode = IB_WR_FAST_REG_MR;
293 fastreg_wr.send_flags = IB_SEND_SIGNALED; 293 fastreg_wr.wr.send_flags = IB_SEND_SIGNALED;
294 fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva; 294 fastreg_wr.iova_start = (unsigned long)frmr->kva;
295 fastreg_wr.wr.fast_reg.page_list = frmr->page_list; 295 fastreg_wr.page_list = frmr->page_list;
296 fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len; 296 fastreg_wr.page_list_len = frmr->page_list_len;
297 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 297 fastreg_wr.page_shift = PAGE_SHIFT;
298 fastreg_wr.wr.fast_reg.length = frmr->map_len; 298 fastreg_wr.length = frmr->map_len;
299 fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags; 299 fastreg_wr.access_flags = frmr->access_flags;
300 fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey; 300 fastreg_wr.rkey = frmr->mr->lkey;
301 fastreg_wr.next = &read_wr; 301 fastreg_wr.wr.next = &read_wr.wr;
302 302
303 /* Prepare RDMA_READ */ 303 /* Prepare RDMA_READ */
304 memset(&read_wr, 0, sizeof(read_wr)); 304 memset(&read_wr, 0, sizeof(read_wr));
305 read_wr.send_flags = IB_SEND_SIGNALED; 305 read_wr.wr.send_flags = IB_SEND_SIGNALED;
306 read_wr.wr.rdma.rkey = rs_handle; 306 read_wr.rkey = rs_handle;
307 read_wr.wr.rdma.remote_addr = rs_offset; 307 read_wr.remote_addr = rs_offset;
308 read_wr.sg_list = ctxt->sge; 308 read_wr.wr.sg_list = ctxt->sge;
309 read_wr.num_sge = 1; 309 read_wr.wr.num_sge = 1;
310 if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) { 310 if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) {
311 read_wr.opcode = IB_WR_RDMA_READ_WITH_INV; 311 read_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
312 read_wr.wr_id = (unsigned long)ctxt; 312 read_wr.wr.wr_id = (unsigned long)ctxt;
313 read_wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey; 313 read_wr.wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey;
314 } else { 314 } else {
315 read_wr.opcode = IB_WR_RDMA_READ; 315 read_wr.wr.opcode = IB_WR_RDMA_READ;
316 read_wr.next = &inv_wr; 316 read_wr.wr.next = &inv_wr;
317 /* Prepare invalidate */ 317 /* Prepare invalidate */
318 memset(&inv_wr, 0, sizeof(inv_wr)); 318 memset(&inv_wr, 0, sizeof(inv_wr));
319 inv_wr.wr_id = (unsigned long)ctxt; 319 inv_wr.wr_id = (unsigned long)ctxt;
@@ -321,10 +321,10 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
321 inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE; 321 inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE;
322 inv_wr.ex.invalidate_rkey = frmr->mr->lkey; 322 inv_wr.ex.invalidate_rkey = frmr->mr->lkey;
323 } 323 }
324 ctxt->wr_op = read_wr.opcode; 324 ctxt->wr_op = read_wr.wr.opcode;
325 325
326 /* Post the chain */ 326 /* Post the chain */
327 ret = svc_rdma_send(xprt, &fastreg_wr); 327 ret = svc_rdma_send(xprt, &fastreg_wr.wr);
328 if (ret) { 328 if (ret) {
329 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret); 329 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
330 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 330 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 1dfae8317065..969a1ab75fc3 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -217,7 +217,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
217 u32 xdr_off, int write_len, 217 u32 xdr_off, int write_len,
218 struct svc_rdma_req_map *vec) 218 struct svc_rdma_req_map *vec)
219{ 219{
220 struct ib_send_wr write_wr; 220 struct ib_rdma_wr write_wr;
221 struct ib_sge *sge; 221 struct ib_sge *sge;
222 int xdr_sge_no; 222 int xdr_sge_no;
223 int sge_no; 223 int sge_no;
@@ -282,17 +282,17 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
282 /* Prepare WRITE WR */ 282 /* Prepare WRITE WR */
283 memset(&write_wr, 0, sizeof write_wr); 283 memset(&write_wr, 0, sizeof write_wr);
284 ctxt->wr_op = IB_WR_RDMA_WRITE; 284 ctxt->wr_op = IB_WR_RDMA_WRITE;
285 write_wr.wr_id = (unsigned long)ctxt; 285 write_wr.wr.wr_id = (unsigned long)ctxt;
286 write_wr.sg_list = &sge[0]; 286 write_wr.wr.sg_list = &sge[0];
287 write_wr.num_sge = sge_no; 287 write_wr.wr.num_sge = sge_no;
288 write_wr.opcode = IB_WR_RDMA_WRITE; 288 write_wr.wr.opcode = IB_WR_RDMA_WRITE;
289 write_wr.send_flags = IB_SEND_SIGNALED; 289 write_wr.wr.send_flags = IB_SEND_SIGNALED;
290 write_wr.wr.rdma.rkey = rmr; 290 write_wr.rkey = rmr;
291 write_wr.wr.rdma.remote_addr = to; 291 write_wr.remote_addr = to;
292 292
293 /* Post It */ 293 /* Post It */
294 atomic_inc(&rdma_stat_write); 294 atomic_inc(&rdma_stat_write);
295 if (svc_rdma_send(xprt, &write_wr)) 295 if (svc_rdma_send(xprt, &write_wr.wr))
296 goto err; 296 goto err;
297 return write_len - bc; 297 return write_len - bc;
298 err: 298 err: