diff options
author | Christoph Hellwig <hch@lst.de> | 2015-10-08 04:16:33 -0400 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2015-10-08 06:09:10 -0400 |
commit | e622f2f4ad2142d2a613a57fb85f8cf737935ef5 (patch) | |
tree | 19fa458bcaacf3f8b2f5e40676f748afc3df1e84 | |
parent | b8cab5dab15ff5c2acc3faefdde28919b0341c11 (diff) |
IB: split struct ib_send_wr
This patch split up struct ib_send_wr so that all non-trivial verbs
use their own structure which embedds struct ib_send_wr. This dramaticly
shrinks the size of a WR for most common operations:
sizeof(struct ib_send_wr) (old): 96
sizeof(struct ib_send_wr): 48
sizeof(struct ib_rdma_wr): 64
sizeof(struct ib_atomic_wr): 96
sizeof(struct ib_ud_wr): 88
sizeof(struct ib_fast_reg_wr): 88
sizeof(struct ib_bind_mw_wr): 96
sizeof(struct ib_sig_handover_wr): 80
And with Sagi's pending MR rework the fast registration WR will also be
down to a reasonable size:
sizeof(struct ib_fastreg_wr): 64
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> [srp, srpt]
Reviewed-by: Chuck Lever <chuck.lever@oracle.com> [sunrpc]
Tested-by: Haggai Eran <haggaie@mellanox.com>
Tested-by: Sagi Grimberg <sagig@mellanox.com>
Tested-by: Steve Wise <swise@opengridcomputing.com>
63 files changed, 1152 insertions, 986 deletions
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 0429040304fd..4fa524dfb6cf 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c | |||
@@ -126,7 +126,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh * | |||
126 | mad_send_wr = container_of(send_buf, | 126 | mad_send_wr = container_of(send_buf, |
127 | struct ib_mad_send_wr_private, | 127 | struct ib_mad_send_wr_private, |
128 | send_buf); | 128 | send_buf); |
129 | mad_send_wr->send_wr.wr.ud.port_num = port_num; | 129 | mad_send_wr->send_wr.port_num = port_num; |
130 | } | 130 | } |
131 | 131 | ||
132 | if (ib_post_send_mad(send_buf, NULL)) { | 132 | if (ib_post_send_mad(send_buf, NULL)) { |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 4b5c72311deb..844d9bb22700 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -752,7 +752,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
752 | struct ib_device *device = mad_agent_priv->agent.device; | 752 | struct ib_device *device = mad_agent_priv->agent.device; |
753 | u8 port_num; | 753 | u8 port_num; |
754 | struct ib_wc mad_wc; | 754 | struct ib_wc mad_wc; |
755 | struct ib_send_wr *send_wr = &mad_send_wr->send_wr; | 755 | struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; |
756 | size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); | 756 | size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); |
757 | u16 out_mad_pkey_index = 0; | 757 | u16 out_mad_pkey_index = 0; |
758 | u16 drslid; | 758 | u16 drslid; |
@@ -761,7 +761,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
761 | 761 | ||
762 | if (rdma_cap_ib_switch(device) && | 762 | if (rdma_cap_ib_switch(device) && |
763 | smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | 763 | smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) |
764 | port_num = send_wr->wr.ud.port_num; | 764 | port_num = send_wr->port_num; |
765 | else | 765 | else |
766 | port_num = mad_agent_priv->agent.port_num; | 766 | port_num = mad_agent_priv->agent.port_num; |
767 | 767 | ||
@@ -832,9 +832,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
832 | } | 832 | } |
833 | 833 | ||
834 | build_smp_wc(mad_agent_priv->agent.qp, | 834 | build_smp_wc(mad_agent_priv->agent.qp, |
835 | send_wr->wr_id, drslid, | 835 | send_wr->wr.wr_id, drslid, |
836 | send_wr->wr.ud.pkey_index, | 836 | send_wr->pkey_index, |
837 | send_wr->wr.ud.port_num, &mad_wc); | 837 | send_wr->port_num, &mad_wc); |
838 | 838 | ||
839 | if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { | 839 | if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { |
840 | mad_wc.byte_len = mad_send_wr->send_buf.hdr_len | 840 | mad_wc.byte_len = mad_send_wr->send_buf.hdr_len |
@@ -894,7 +894,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
894 | 894 | ||
895 | local->mad_send_wr = mad_send_wr; | 895 | local->mad_send_wr = mad_send_wr; |
896 | if (opa) { | 896 | if (opa) { |
897 | local->mad_send_wr->send_wr.wr.ud.pkey_index = out_mad_pkey_index; | 897 | local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; |
898 | local->return_wc_byte_len = mad_size; | 898 | local->return_wc_byte_len = mad_size; |
899 | } | 899 | } |
900 | /* Reference MAD agent until send side of local completion handled */ | 900 | /* Reference MAD agent until send side of local completion handled */ |
@@ -1039,14 +1039,14 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | |||
1039 | 1039 | ||
1040 | mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; | 1040 | mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; |
1041 | 1041 | ||
1042 | mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr; | 1042 | mad_send_wr->send_wr.wr.wr_id = (unsigned long) mad_send_wr; |
1043 | mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; | 1043 | mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; |
1044 | mad_send_wr->send_wr.num_sge = 2; | 1044 | mad_send_wr->send_wr.wr.num_sge = 2; |
1045 | mad_send_wr->send_wr.opcode = IB_WR_SEND; | 1045 | mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; |
1046 | mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED; | 1046 | mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; |
1047 | mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn; | 1047 | mad_send_wr->send_wr.remote_qpn = remote_qpn; |
1048 | mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; | 1048 | mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; |
1049 | mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index; | 1049 | mad_send_wr->send_wr.pkey_index = pkey_index; |
1050 | 1050 | ||
1051 | if (rmpp_active) { | 1051 | if (rmpp_active) { |
1052 | ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); | 1052 | ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); |
@@ -1151,7 +1151,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
1151 | 1151 | ||
1152 | /* Set WR ID to find mad_send_wr upon completion */ | 1152 | /* Set WR ID to find mad_send_wr upon completion */ |
1153 | qp_info = mad_send_wr->mad_agent_priv->qp_info; | 1153 | qp_info = mad_send_wr->mad_agent_priv->qp_info; |
1154 | mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; | 1154 | mad_send_wr->send_wr.wr.wr_id = (unsigned long)&mad_send_wr->mad_list; |
1155 | mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; | 1155 | mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; |
1156 | 1156 | ||
1157 | mad_agent = mad_send_wr->send_buf.mad_agent; | 1157 | mad_agent = mad_send_wr->send_buf.mad_agent; |
@@ -1179,7 +1179,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
1179 | 1179 | ||
1180 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); | 1180 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); |
1181 | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { | 1181 | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { |
1182 | ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, | 1182 | ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, |
1183 | &bad_send_wr); | 1183 | &bad_send_wr); |
1184 | list = &qp_info->send_queue.list; | 1184 | list = &qp_info->send_queue.list; |
1185 | } else { | 1185 | } else { |
@@ -1244,7 +1244,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, | |||
1244 | * request associated with the completion | 1244 | * request associated with the completion |
1245 | */ | 1245 | */ |
1246 | next_send_buf = send_buf->next; | 1246 | next_send_buf = send_buf->next; |
1247 | mad_send_wr->send_wr.wr.ud.ah = send_buf->ah; | 1247 | mad_send_wr->send_wr.ah = send_buf->ah; |
1248 | 1248 | ||
1249 | if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == | 1249 | if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == |
1250 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | 1250 | IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { |
@@ -2457,7 +2457,7 @@ retry: | |||
2457 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); | 2457 | ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); |
2458 | 2458 | ||
2459 | if (queued_send_wr) { | 2459 | if (queued_send_wr) { |
2460 | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, | 2460 | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, |
2461 | &bad_send_wr); | 2461 | &bad_send_wr); |
2462 | if (ret) { | 2462 | if (ret) { |
2463 | dev_err(&port_priv->device->dev, | 2463 | dev_err(&port_priv->device->dev, |
@@ -2515,7 +2515,7 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, | |||
2515 | struct ib_send_wr *bad_send_wr; | 2515 | struct ib_send_wr *bad_send_wr; |
2516 | 2516 | ||
2517 | mad_send_wr->retry = 0; | 2517 | mad_send_wr->retry = 0; |
2518 | ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr, | 2518 | ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, |
2519 | &bad_send_wr); | 2519 | &bad_send_wr); |
2520 | if (ret) | 2520 | if (ret) |
2521 | ib_mad_send_done_handler(port_priv, wc); | 2521 | ib_mad_send_done_handler(port_priv, wc); |
@@ -2713,7 +2713,7 @@ static void local_completions(struct work_struct *work) | |||
2713 | build_smp_wc(recv_mad_agent->agent.qp, | 2713 | build_smp_wc(recv_mad_agent->agent.qp, |
2714 | (unsigned long) local->mad_send_wr, | 2714 | (unsigned long) local->mad_send_wr, |
2715 | be16_to_cpu(IB_LID_PERMISSIVE), | 2715 | be16_to_cpu(IB_LID_PERMISSIVE), |
2716 | local->mad_send_wr->send_wr.wr.ud.pkey_index, | 2716 | local->mad_send_wr->send_wr.pkey_index, |
2717 | recv_mad_agent->agent.port_num, &wc); | 2717 | recv_mad_agent->agent.port_num, &wc); |
2718 | 2718 | ||
2719 | local->mad_priv->header.recv_wc.wc = &wc; | 2719 | local->mad_priv->header.recv_wc.wc = &wc; |
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 4a4f7aad0978..990698a6ab4b 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h | |||
@@ -123,7 +123,7 @@ struct ib_mad_send_wr_private { | |||
123 | struct ib_mad_send_buf send_buf; | 123 | struct ib_mad_send_buf send_buf; |
124 | u64 header_mapping; | 124 | u64 header_mapping; |
125 | u64 payload_mapping; | 125 | u64 payload_mapping; |
126 | struct ib_send_wr send_wr; | 126 | struct ib_ud_wr send_wr; |
127 | struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; | 127 | struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; |
128 | __be64 tid; | 128 | __be64 tid; |
129 | unsigned long timeout; | 129 | unsigned long timeout; |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index be4cb9f04be3..8adb71fd6a3a 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -2303,6 +2303,12 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, | |||
2303 | return in_len; | 2303 | return in_len; |
2304 | } | 2304 | } |
2305 | 2305 | ||
2306 | static void *alloc_wr(size_t wr_size, __u32 num_sge) | ||
2307 | { | ||
2308 | return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + | ||
2309 | num_sge * sizeof (struct ib_sge), GFP_KERNEL); | ||
2310 | }; | ||
2311 | |||
2306 | ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | 2312 | ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, |
2307 | struct ib_device *ib_dev, | 2313 | struct ib_device *ib_dev, |
2308 | const char __user *buf, int in_len, | 2314 | const char __user *buf, int in_len, |
@@ -2351,14 +2357,83 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
2351 | goto out_put; | 2357 | goto out_put; |
2352 | } | 2358 | } |
2353 | 2359 | ||
2354 | next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + | 2360 | if (is_ud) { |
2355 | user_wr->num_sge * sizeof (struct ib_sge), | 2361 | struct ib_ud_wr *ud; |
2356 | GFP_KERNEL); | 2362 | |
2357 | if (!next) { | 2363 | if (user_wr->opcode != IB_WR_SEND && |
2358 | ret = -ENOMEM; | 2364 | user_wr->opcode != IB_WR_SEND_WITH_IMM) { |
2365 | ret = -EINVAL; | ||
2366 | goto out_put; | ||
2367 | } | ||
2368 | |||
2369 | ud = alloc_wr(sizeof(*ud), user_wr->num_sge); | ||
2370 | if (!ud) { | ||
2371 | ret = -ENOMEM; | ||
2372 | goto out_put; | ||
2373 | } | ||
2374 | |||
2375 | ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext); | ||
2376 | if (!ud->ah) { | ||
2377 | kfree(ud); | ||
2378 | ret = -EINVAL; | ||
2379 | goto out_put; | ||
2380 | } | ||
2381 | ud->remote_qpn = user_wr->wr.ud.remote_qpn; | ||
2382 | ud->remote_qkey = user_wr->wr.ud.remote_qkey; | ||
2383 | |||
2384 | next = &ud->wr; | ||
2385 | } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || | ||
2386 | user_wr->opcode == IB_WR_RDMA_WRITE || | ||
2387 | user_wr->opcode == IB_WR_RDMA_READ) { | ||
2388 | struct ib_rdma_wr *rdma; | ||
2389 | |||
2390 | rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge); | ||
2391 | if (!rdma) { | ||
2392 | ret = -ENOMEM; | ||
2393 | goto out_put; | ||
2394 | } | ||
2395 | |||
2396 | rdma->remote_addr = user_wr->wr.rdma.remote_addr; | ||
2397 | rdma->rkey = user_wr->wr.rdma.rkey; | ||
2398 | |||
2399 | next = &rdma->wr; | ||
2400 | } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || | ||
2401 | user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { | ||
2402 | struct ib_atomic_wr *atomic; | ||
2403 | |||
2404 | atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge); | ||
2405 | if (!atomic) { | ||
2406 | ret = -ENOMEM; | ||
2407 | goto out_put; | ||
2408 | } | ||
2409 | |||
2410 | atomic->remote_addr = user_wr->wr.atomic.remote_addr; | ||
2411 | atomic->compare_add = user_wr->wr.atomic.compare_add; | ||
2412 | atomic->swap = user_wr->wr.atomic.swap; | ||
2413 | atomic->rkey = user_wr->wr.atomic.rkey; | ||
2414 | |||
2415 | next = &atomic->wr; | ||
2416 | } else if (user_wr->opcode == IB_WR_SEND || | ||
2417 | user_wr->opcode == IB_WR_SEND_WITH_IMM || | ||
2418 | user_wr->opcode == IB_WR_SEND_WITH_INV) { | ||
2419 | next = alloc_wr(sizeof(*next), user_wr->num_sge); | ||
2420 | if (!next) { | ||
2421 | ret = -ENOMEM; | ||
2422 | goto out_put; | ||
2423 | } | ||
2424 | } else { | ||
2425 | ret = -EINVAL; | ||
2359 | goto out_put; | 2426 | goto out_put; |
2360 | } | 2427 | } |
2361 | 2428 | ||
2429 | if (user_wr->opcode == IB_WR_SEND_WITH_IMM || | ||
2430 | user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { | ||
2431 | next->ex.imm_data = | ||
2432 | (__be32 __force) user_wr->ex.imm_data; | ||
2433 | } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { | ||
2434 | next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; | ||
2435 | } | ||
2436 | |||
2362 | if (!last) | 2437 | if (!last) |
2363 | wr = next; | 2438 | wr = next; |
2364 | else | 2439 | else |
@@ -2371,60 +2446,6 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
2371 | next->opcode = user_wr->opcode; | 2446 | next->opcode = user_wr->opcode; |
2372 | next->send_flags = user_wr->send_flags; | 2447 | next->send_flags = user_wr->send_flags; |
2373 | 2448 | ||
2374 | if (is_ud) { | ||
2375 | if (next->opcode != IB_WR_SEND && | ||
2376 | next->opcode != IB_WR_SEND_WITH_IMM) { | ||
2377 | ret = -EINVAL; | ||
2378 | goto out_put; | ||
2379 | } | ||
2380 | |||
2381 | next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, | ||
2382 | file->ucontext); | ||
2383 | if (!next->wr.ud.ah) { | ||
2384 | ret = -EINVAL; | ||
2385 | goto out_put; | ||
2386 | } | ||
2387 | next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn; | ||
2388 | next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey; | ||
2389 | if (next->opcode == IB_WR_SEND_WITH_IMM) | ||
2390 | next->ex.imm_data = | ||
2391 | (__be32 __force) user_wr->ex.imm_data; | ||
2392 | } else { | ||
2393 | switch (next->opcode) { | ||
2394 | case IB_WR_RDMA_WRITE_WITH_IMM: | ||
2395 | next->ex.imm_data = | ||
2396 | (__be32 __force) user_wr->ex.imm_data; | ||
2397 | case IB_WR_RDMA_WRITE: | ||
2398 | case IB_WR_RDMA_READ: | ||
2399 | next->wr.rdma.remote_addr = | ||
2400 | user_wr->wr.rdma.remote_addr; | ||
2401 | next->wr.rdma.rkey = | ||
2402 | user_wr->wr.rdma.rkey; | ||
2403 | break; | ||
2404 | case IB_WR_SEND_WITH_IMM: | ||
2405 | next->ex.imm_data = | ||
2406 | (__be32 __force) user_wr->ex.imm_data; | ||
2407 | break; | ||
2408 | case IB_WR_SEND_WITH_INV: | ||
2409 | next->ex.invalidate_rkey = | ||
2410 | user_wr->ex.invalidate_rkey; | ||
2411 | break; | ||
2412 | case IB_WR_ATOMIC_CMP_AND_SWP: | ||
2413 | case IB_WR_ATOMIC_FETCH_AND_ADD: | ||
2414 | next->wr.atomic.remote_addr = | ||
2415 | user_wr->wr.atomic.remote_addr; | ||
2416 | next->wr.atomic.compare_add = | ||
2417 | user_wr->wr.atomic.compare_add; | ||
2418 | next->wr.atomic.swap = user_wr->wr.atomic.swap; | ||
2419 | next->wr.atomic.rkey = user_wr->wr.atomic.rkey; | ||
2420 | case IB_WR_SEND: | ||
2421 | break; | ||
2422 | default: | ||
2423 | ret = -EINVAL; | ||
2424 | goto out_put; | ||
2425 | } | ||
2426 | } | ||
2427 | |||
2428 | if (next->num_sge) { | 2449 | if (next->num_sge) { |
2429 | next->sg_list = (void *) next + | 2450 | next->sg_list = (void *) next + |
2430 | ALIGN(sizeof *next, sizeof (struct ib_sge)); | 2451 | ALIGN(sizeof *next, sizeof (struct ib_sge)); |
@@ -2458,8 +2479,8 @@ out_put: | |||
2458 | put_qp_read(qp); | 2479 | put_qp_read(qp); |
2459 | 2480 | ||
2460 | while (wr) { | 2481 | while (wr) { |
2461 | if (is_ud && wr->wr.ud.ah) | 2482 | if (is_ud && ud_wr(wr)->ah) |
2462 | put_ah_read(wr->wr.ud.ah); | 2483 | put_ah_read(ud_wr(wr)->ah); |
2463 | next = wr->next; | 2484 | next = wr->next; |
2464 | kfree(wr); | 2485 | kfree(wr); |
2465 | wr = next; | 2486 | wr = next; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index b57c0befd962..bac0508fedd9 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -95,8 +95,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, | |||
95 | wqe->write.reserved[0] = 0; | 95 | wqe->write.reserved[0] = 0; |
96 | wqe->write.reserved[1] = 0; | 96 | wqe->write.reserved[1] = 0; |
97 | wqe->write.reserved[2] = 0; | 97 | wqe->write.reserved[2] = 0; |
98 | wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); | 98 | wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); |
99 | wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); | 99 | wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); |
100 | 100 | ||
101 | if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { | 101 | if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { |
102 | plen = 4; | 102 | plen = 4; |
@@ -137,8 +137,8 @@ static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, | |||
137 | wqe->read.local_inv = 0; | 137 | wqe->read.local_inv = 0; |
138 | wqe->read.reserved[0] = 0; | 138 | wqe->read.reserved[0] = 0; |
139 | wqe->read.reserved[1] = 0; | 139 | wqe->read.reserved[1] = 0; |
140 | wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey); | 140 | wqe->read.rem_stag = cpu_to_be32(rdma_wr(wr)->rkey); |
141 | wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr); | 141 | wqe->read.rem_to = cpu_to_be64(rdma_wr(wr)->remote_addr); |
142 | wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey); | 142 | wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey); |
143 | wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length); | 143 | wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length); |
144 | wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr); | 144 | wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr); |
@@ -146,27 +146,27 @@ static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, | |||
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
148 | 148 | ||
149 | static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr, | 149 | static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *send_wr, |
150 | u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) | 150 | u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) |
151 | { | 151 | { |
152 | struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr); | ||
152 | int i; | 153 | int i; |
153 | __be64 *p; | 154 | __be64 *p; |
154 | 155 | ||
155 | if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH) | 156 | if (wr->page_list_len > T3_MAX_FASTREG_DEPTH) |
156 | return -EINVAL; | 157 | return -EINVAL; |
157 | *wr_cnt = 1; | 158 | *wr_cnt = 1; |
158 | wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey); | 159 | wqe->fastreg.stag = cpu_to_be32(wr->rkey); |
159 | wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length); | 160 | wqe->fastreg.len = cpu_to_be32(wr->length); |
160 | wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); | 161 | wqe->fastreg.va_base_hi = cpu_to_be32(wr->iova_start >> 32); |
161 | wqe->fastreg.va_base_lo_fbo = | 162 | wqe->fastreg.va_base_lo_fbo = cpu_to_be32(wr->iova_start & 0xffffffff); |
162 | cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff); | ||
163 | wqe->fastreg.page_type_perms = cpu_to_be32( | 163 | wqe->fastreg.page_type_perms = cpu_to_be32( |
164 | V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) | | 164 | V_FR_PAGE_COUNT(wr->page_list_len) | |
165 | V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) | | 165 | V_FR_PAGE_SIZE(wr->page_shift-12) | |
166 | V_FR_TYPE(TPT_VATO) | | 166 | V_FR_TYPE(TPT_VATO) | |
167 | V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags))); | 167 | V_FR_PERMS(iwch_ib_to_tpt_access(wr->access_flags))); |
168 | p = &wqe->fastreg.pbl_addrs[0]; | 168 | p = &wqe->fastreg.pbl_addrs[0]; |
169 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) { | 169 | for (i = 0; i < wr->page_list_len; i++, p++) { |
170 | 170 | ||
171 | /* If we need a 2nd WR, then set it up */ | 171 | /* If we need a 2nd WR, then set it up */ |
172 | if (i == T3_MAX_FASTREG_FRAG) { | 172 | if (i == T3_MAX_FASTREG_FRAG) { |
@@ -175,14 +175,14 @@ static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr, | |||
175 | Q_PTR2IDX((wq->wptr+1), wq->size_log2)); | 175 | Q_PTR2IDX((wq->wptr+1), wq->size_log2)); |
176 | build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0, | 176 | build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0, |
177 | Q_GENBIT(wq->wptr + 1, wq->size_log2), | 177 | Q_GENBIT(wq->wptr + 1, wq->size_log2), |
178 | 0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG, | 178 | 0, 1 + wr->page_list_len - T3_MAX_FASTREG_FRAG, |
179 | T3_EOP); | 179 | T3_EOP); |
180 | 180 | ||
181 | p = &wqe->pbl_frag.pbl_addrs[0]; | 181 | p = &wqe->pbl_frag.pbl_addrs[0]; |
182 | } | 182 | } |
183 | *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); | 183 | *p = cpu_to_be64((u64)wr->page_list->page_list[i]); |
184 | } | 184 | } |
185 | *flit_cnt = 5 + wr->wr.fast_reg.page_list_len; | 185 | *flit_cnt = 5 + wr->page_list_len; |
186 | if (*flit_cnt > 15) | 186 | if (*flit_cnt > 15) |
187 | *flit_cnt = 15; | 187 | *flit_cnt = 15; |
188 | return 0; | 188 | return 0; |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 6517e1208ccb..b60498fff99a 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -528,8 +528,8 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, | |||
528 | if (wr->num_sge > T4_MAX_SEND_SGE) | 528 | if (wr->num_sge > T4_MAX_SEND_SGE) |
529 | return -EINVAL; | 529 | return -EINVAL; |
530 | wqe->write.r2 = 0; | 530 | wqe->write.r2 = 0; |
531 | wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); | 531 | wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); |
532 | wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); | 532 | wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); |
533 | if (wr->num_sge) { | 533 | if (wr->num_sge) { |
534 | if (wr->send_flags & IB_SEND_INLINE) { | 534 | if (wr->send_flags & IB_SEND_INLINE) { |
535 | ret = build_immd(sq, wqe->write.u.immd_src, wr, | 535 | ret = build_immd(sq, wqe->write.u.immd_src, wr, |
@@ -566,10 +566,10 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |||
566 | if (wr->num_sge > 1) | 566 | if (wr->num_sge > 1) |
567 | return -EINVAL; | 567 | return -EINVAL; |
568 | if (wr->num_sge) { | 568 | if (wr->num_sge) { |
569 | wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey); | 569 | wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); |
570 | wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr | 570 | wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr |
571 | >> 32)); | 571 | >> 32)); |
572 | wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr); | 572 | wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr); |
573 | wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); | 573 | wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); |
574 | wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); | 574 | wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); |
575 | wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr | 575 | wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr |
@@ -606,39 +606,36 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, | |||
606 | } | 606 | } |
607 | 607 | ||
608 | static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, | 608 | static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, |
609 | struct ib_send_wr *wr, u8 *len16, u8 t5dev) | 609 | struct ib_send_wr *send_wr, u8 *len16, u8 t5dev) |
610 | { | 610 | { |
611 | 611 | struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr); | |
612 | struct fw_ri_immd *imdp; | 612 | struct fw_ri_immd *imdp; |
613 | __be64 *p; | 613 | __be64 *p; |
614 | int i; | 614 | int i; |
615 | int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); | 615 | int pbllen = roundup(wr->page_list_len * sizeof(u64), 32); |
616 | int rem; | 616 | int rem; |
617 | 617 | ||
618 | if (wr->wr.fast_reg.page_list_len > | 618 | if (wr->page_list_len > t4_max_fr_depth(use_dsgl)) |
619 | t4_max_fr_depth(use_dsgl)) | ||
620 | return -EINVAL; | 619 | return -EINVAL; |
621 | 620 | ||
622 | wqe->fr.qpbinde_to_dcacpu = 0; | 621 | wqe->fr.qpbinde_to_dcacpu = 0; |
623 | wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12; | 622 | wqe->fr.pgsz_shift = wr->page_shift - 12; |
624 | wqe->fr.addr_type = FW_RI_VA_BASED_TO; | 623 | wqe->fr.addr_type = FW_RI_VA_BASED_TO; |
625 | wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags); | 624 | wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access_flags); |
626 | wqe->fr.len_hi = 0; | 625 | wqe->fr.len_hi = 0; |
627 | wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length); | 626 | wqe->fr.len_lo = cpu_to_be32(wr->length); |
628 | wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey); | 627 | wqe->fr.stag = cpu_to_be32(wr->rkey); |
629 | wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); | 628 | wqe->fr.va_hi = cpu_to_be32(wr->iova_start >> 32); |
630 | wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & | 629 | wqe->fr.va_lo_fbo = cpu_to_be32(wr->iova_start & 0xffffffff); |
631 | 0xffffffff); | ||
632 | 630 | ||
633 | if (t5dev && use_dsgl && (pbllen > max_fr_immd)) { | 631 | if (t5dev && use_dsgl && (pbllen > max_fr_immd)) { |
634 | struct c4iw_fr_page_list *c4pl = | 632 | struct c4iw_fr_page_list *c4pl = |
635 | to_c4iw_fr_page_list(wr->wr.fast_reg.page_list); | 633 | to_c4iw_fr_page_list(wr->page_list); |
636 | struct fw_ri_dsgl *sglp; | 634 | struct fw_ri_dsgl *sglp; |
637 | 635 | ||
638 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { | 636 | for (i = 0; i < wr->page_list_len; i++) { |
639 | wr->wr.fast_reg.page_list->page_list[i] = (__force u64) | 637 | wr->page_list->page_list[i] = (__force u64) |
640 | cpu_to_be64((u64) | 638 | cpu_to_be64((u64)wr->page_list->page_list[i]); |
641 | wr->wr.fast_reg.page_list->page_list[i]); | ||
642 | } | 639 | } |
643 | 640 | ||
644 | sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); | 641 | sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); |
@@ -657,9 +654,8 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, | |||
657 | imdp->immdlen = cpu_to_be32(pbllen); | 654 | imdp->immdlen = cpu_to_be32(pbllen); |
658 | p = (__be64 *)(imdp + 1); | 655 | p = (__be64 *)(imdp + 1); |
659 | rem = pbllen; | 656 | rem = pbllen; |
660 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { | 657 | for (i = 0; i < wr->page_list_len; i++) { |
661 | *p = cpu_to_be64( | 658 | *p = cpu_to_be64((u64)wr->page_list->page_list[i]); |
662 | (u64)wr->wr.fast_reg.page_list->page_list[i]); | ||
663 | rem -= sizeof(*p); | 659 | rem -= sizeof(*p); |
664 | if (++p == (__be64 *)&sq->queue[sq->size]) | 660 | if (++p == (__be64 *)&sq->queue[sq->size]) |
665 | p = (__be64 *)sq->queue; | 661 | p = (__be64 *)sq->queue; |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 1cd75ff02251..5f2de2ed5598 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -457,7 +457,8 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
457 | struct ib_grh *grh, struct ib_mad *mad) | 457 | struct ib_grh *grh, struct ib_mad *mad) |
458 | { | 458 | { |
459 | struct ib_sge list; | 459 | struct ib_sge list; |
460 | struct ib_send_wr wr, *bad_wr; | 460 | struct ib_ud_wr wr; |
461 | struct ib_send_wr *bad_wr; | ||
461 | struct mlx4_ib_demux_pv_ctx *tun_ctx; | 462 | struct mlx4_ib_demux_pv_ctx *tun_ctx; |
462 | struct mlx4_ib_demux_pv_qp *tun_qp; | 463 | struct mlx4_ib_demux_pv_qp *tun_qp; |
463 | struct mlx4_rcv_tunnel_mad *tun_mad; | 464 | struct mlx4_rcv_tunnel_mad *tun_mad; |
@@ -582,18 +583,18 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
582 | list.length = sizeof (struct mlx4_rcv_tunnel_mad); | 583 | list.length = sizeof (struct mlx4_rcv_tunnel_mad); |
583 | list.lkey = tun_ctx->pd->local_dma_lkey; | 584 | list.lkey = tun_ctx->pd->local_dma_lkey; |
584 | 585 | ||
585 | wr.wr.ud.ah = ah; | 586 | wr.ah = ah; |
586 | wr.wr.ud.port_num = port; | 587 | wr.port_num = port; |
587 | wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; | 588 | wr.remote_qkey = IB_QP_SET_QKEY; |
588 | wr.wr.ud.remote_qpn = dqpn; | 589 | wr.remote_qpn = dqpn; |
589 | wr.next = NULL; | 590 | wr.wr.next = NULL; |
590 | wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt); | 591 | wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt); |
591 | wr.sg_list = &list; | 592 | wr.wr.sg_list = &list; |
592 | wr.num_sge = 1; | 593 | wr.wr.num_sge = 1; |
593 | wr.opcode = IB_WR_SEND; | 594 | wr.wr.opcode = IB_WR_SEND; |
594 | wr.send_flags = IB_SEND_SIGNALED; | 595 | wr.wr.send_flags = IB_SEND_SIGNALED; |
595 | 596 | ||
596 | ret = ib_post_send(src_qp, &wr, &bad_wr); | 597 | ret = ib_post_send(src_qp, &wr.wr, &bad_wr); |
597 | out: | 598 | out: |
598 | if (ret) | 599 | if (ret) |
599 | ib_destroy_ah(ah); | 600 | ib_destroy_ah(ah); |
@@ -1175,7 +1176,8 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
1175 | u8 *s_mac, struct ib_mad *mad) | 1176 | u8 *s_mac, struct ib_mad *mad) |
1176 | { | 1177 | { |
1177 | struct ib_sge list; | 1178 | struct ib_sge list; |
1178 | struct ib_send_wr wr, *bad_wr; | 1179 | struct ib_ud_wr wr; |
1180 | struct ib_send_wr *bad_wr; | ||
1179 | struct mlx4_ib_demux_pv_ctx *sqp_ctx; | 1181 | struct mlx4_ib_demux_pv_ctx *sqp_ctx; |
1180 | struct mlx4_ib_demux_pv_qp *sqp; | 1182 | struct mlx4_ib_demux_pv_qp *sqp; |
1181 | struct mlx4_mad_snd_buf *sqp_mad; | 1183 | struct mlx4_mad_snd_buf *sqp_mad; |
@@ -1246,22 +1248,22 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
1246 | list.length = sizeof (struct mlx4_mad_snd_buf); | 1248 | list.length = sizeof (struct mlx4_mad_snd_buf); |
1247 | list.lkey = sqp_ctx->pd->local_dma_lkey; | 1249 | list.lkey = sqp_ctx->pd->local_dma_lkey; |
1248 | 1250 | ||
1249 | wr.wr.ud.ah = ah; | 1251 | wr.ah = ah; |
1250 | wr.wr.ud.port_num = port; | 1252 | wr.port_num = port; |
1251 | wr.wr.ud.pkey_index = wire_pkey_ix; | 1253 | wr.pkey_index = wire_pkey_ix; |
1252 | wr.wr.ud.remote_qkey = qkey; | 1254 | wr.remote_qkey = qkey; |
1253 | wr.wr.ud.remote_qpn = remote_qpn; | 1255 | wr.remote_qpn = remote_qpn; |
1254 | wr.next = NULL; | 1256 | wr.wr.next = NULL; |
1255 | wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum); | 1257 | wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum); |
1256 | wr.sg_list = &list; | 1258 | wr.wr.sg_list = &list; |
1257 | wr.num_sge = 1; | 1259 | wr.wr.num_sge = 1; |
1258 | wr.opcode = IB_WR_SEND; | 1260 | wr.wr.opcode = IB_WR_SEND; |
1259 | wr.send_flags = IB_SEND_SIGNALED; | 1261 | wr.wr.send_flags = IB_SEND_SIGNALED; |
1260 | if (s_mac) | 1262 | if (s_mac) |
1261 | memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6); | 1263 | memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6); |
1262 | 1264 | ||
1263 | 1265 | ||
1264 | ret = ib_post_send(send_qp, &wr, &bad_wr); | 1266 | ret = ib_post_send(send_qp, &wr.wr, &bad_wr); |
1265 | out: | 1267 | out: |
1266 | if (ret) | 1268 | if (ret) |
1267 | ib_destroy_ah(ah); | 1269 | ib_destroy_ah(ah); |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 2542fd3c1a49..5bba176e9dfa 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -321,21 +321,21 @@ err_free: | |||
321 | int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, | 321 | int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, |
322 | struct ib_mw_bind *mw_bind) | 322 | struct ib_mw_bind *mw_bind) |
323 | { | 323 | { |
324 | struct ib_send_wr wr; | 324 | struct ib_bind_mw_wr wr; |
325 | struct ib_send_wr *bad_wr; | 325 | struct ib_send_wr *bad_wr; |
326 | int ret; | 326 | int ret; |
327 | 327 | ||
328 | memset(&wr, 0, sizeof(wr)); | 328 | memset(&wr, 0, sizeof(wr)); |
329 | wr.opcode = IB_WR_BIND_MW; | 329 | wr.wr.opcode = IB_WR_BIND_MW; |
330 | wr.wr_id = mw_bind->wr_id; | 330 | wr.wr.wr_id = mw_bind->wr_id; |
331 | wr.send_flags = mw_bind->send_flags; | 331 | wr.wr.send_flags = mw_bind->send_flags; |
332 | wr.wr.bind_mw.mw = mw; | 332 | wr.mw = mw; |
333 | wr.wr.bind_mw.bind_info = mw_bind->bind_info; | 333 | wr.bind_info = mw_bind->bind_info; |
334 | wr.wr.bind_mw.rkey = ib_inc_rkey(mw->rkey); | 334 | wr.rkey = ib_inc_rkey(mw->rkey); |
335 | 335 | ||
336 | ret = mlx4_ib_post_send(qp, &wr, &bad_wr); | 336 | ret = mlx4_ib_post_send(qp, &wr.wr, &bad_wr); |
337 | if (!ret) | 337 | if (!ret) |
338 | mw->rkey = wr.wr.bind_mw.rkey; | 338 | mw->rkey = wr.rkey; |
339 | 339 | ||
340 | return ret; | 340 | return ret; |
341 | } | 341 | } |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 4ad9be3ad61c..3831cddb551f 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -2036,14 +2036,14 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey) | |||
2036 | } | 2036 | } |
2037 | 2037 | ||
2038 | static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, | 2038 | static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, |
2039 | struct ib_send_wr *wr, | 2039 | struct ib_ud_wr *wr, |
2040 | void *wqe, unsigned *mlx_seg_len) | 2040 | void *wqe, unsigned *mlx_seg_len) |
2041 | { | 2041 | { |
2042 | struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); | 2042 | struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); |
2043 | struct ib_device *ib_dev = &mdev->ib_dev; | 2043 | struct ib_device *ib_dev = &mdev->ib_dev; |
2044 | struct mlx4_wqe_mlx_seg *mlx = wqe; | 2044 | struct mlx4_wqe_mlx_seg *mlx = wqe; |
2045 | struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; | 2045 | struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; |
2046 | struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); | 2046 | struct mlx4_ib_ah *ah = to_mah(wr->ah); |
2047 | u16 pkey; | 2047 | u16 pkey; |
2048 | u32 qkey; | 2048 | u32 qkey; |
2049 | int send_size; | 2049 | int send_size; |
@@ -2051,13 +2051,13 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, | |||
2051 | int spc; | 2051 | int spc; |
2052 | int i; | 2052 | int i; |
2053 | 2053 | ||
2054 | if (wr->opcode != IB_WR_SEND) | 2054 | if (wr->wr.opcode != IB_WR_SEND) |
2055 | return -EINVAL; | 2055 | return -EINVAL; |
2056 | 2056 | ||
2057 | send_size = 0; | 2057 | send_size = 0; |
2058 | 2058 | ||
2059 | for (i = 0; i < wr->num_sge; ++i) | 2059 | for (i = 0; i < wr->wr.num_sge; ++i) |
2060 | send_size += wr->sg_list[i].length; | 2060 | send_size += wr->wr.sg_list[i].length; |
2061 | 2061 | ||
2062 | /* for proxy-qp0 sends, need to add in size of tunnel header */ | 2062 | /* for proxy-qp0 sends, need to add in size of tunnel header */ |
2063 | /* for tunnel-qp0 sends, tunnel header is already in s/g list */ | 2063 | /* for tunnel-qp0 sends, tunnel header is already in s/g list */ |
@@ -2082,11 +2082,11 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, | |||
2082 | mlx->rlid = sqp->ud_header.lrh.destination_lid; | 2082 | mlx->rlid = sqp->ud_header.lrh.destination_lid; |
2083 | 2083 | ||
2084 | sqp->ud_header.lrh.virtual_lane = 0; | 2084 | sqp->ud_header.lrh.virtual_lane = 0; |
2085 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); | 2085 | sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); |
2086 | ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); | 2086 | ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); |
2087 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); | 2087 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); |
2088 | if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) | 2088 | if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) |
2089 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 2089 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); |
2090 | else | 2090 | else |
2091 | sqp->ud_header.bth.destination_qpn = | 2091 | sqp->ud_header.bth.destination_qpn = |
2092 | cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); | 2092 | cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); |
@@ -2158,14 +2158,14 @@ static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac) | |||
2158 | } | 2158 | } |
2159 | } | 2159 | } |
2160 | 2160 | ||
2161 | static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | 2161 | static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, |
2162 | void *wqe, unsigned *mlx_seg_len) | 2162 | void *wqe, unsigned *mlx_seg_len) |
2163 | { | 2163 | { |
2164 | struct ib_device *ib_dev = sqp->qp.ibqp.device; | 2164 | struct ib_device *ib_dev = sqp->qp.ibqp.device; |
2165 | struct mlx4_wqe_mlx_seg *mlx = wqe; | 2165 | struct mlx4_wqe_mlx_seg *mlx = wqe; |
2166 | struct mlx4_wqe_ctrl_seg *ctrl = wqe; | 2166 | struct mlx4_wqe_ctrl_seg *ctrl = wqe; |
2167 | struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; | 2167 | struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; |
2168 | struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); | 2168 | struct mlx4_ib_ah *ah = to_mah(wr->ah); |
2169 | union ib_gid sgid; | 2169 | union ib_gid sgid; |
2170 | u16 pkey; | 2170 | u16 pkey; |
2171 | int send_size; | 2171 | int send_size; |
@@ -2179,8 +2179,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
2179 | bool is_grh; | 2179 | bool is_grh; |
2180 | 2180 | ||
2181 | send_size = 0; | 2181 | send_size = 0; |
2182 | for (i = 0; i < wr->num_sge; ++i) | 2182 | for (i = 0; i < wr->wr.num_sge; ++i) |
2183 | send_size += wr->sg_list[i].length; | 2183 | send_size += wr->wr.sg_list[i].length; |
2184 | 2184 | ||
2185 | is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; | 2185 | is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; |
2186 | is_grh = mlx4_ib_ah_grh_present(ah); | 2186 | is_grh = mlx4_ib_ah_grh_present(ah); |
@@ -2257,7 +2257,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
2257 | mlx->rlid = sqp->ud_header.lrh.destination_lid; | 2257 | mlx->rlid = sqp->ud_header.lrh.destination_lid; |
2258 | } | 2258 | } |
2259 | 2259 | ||
2260 | switch (wr->opcode) { | 2260 | switch (wr->wr.opcode) { |
2261 | case IB_WR_SEND: | 2261 | case IB_WR_SEND: |
2262 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; | 2262 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; |
2263 | sqp->ud_header.immediate_present = 0; | 2263 | sqp->ud_header.immediate_present = 0; |
@@ -2265,7 +2265,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
2265 | case IB_WR_SEND_WITH_IMM: | 2265 | case IB_WR_SEND_WITH_IMM: |
2266 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; | 2266 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; |
2267 | sqp->ud_header.immediate_present = 1; | 2267 | sqp->ud_header.immediate_present = 1; |
2268 | sqp->ud_header.immediate_data = wr->ex.imm_data; | 2268 | sqp->ud_header.immediate_data = wr->wr.ex.imm_data; |
2269 | break; | 2269 | break; |
2270 | default: | 2270 | default: |
2271 | return -EINVAL; | 2271 | return -EINVAL; |
@@ -2308,16 +2308,16 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
2308 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) | 2308 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) |
2309 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; | 2309 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; |
2310 | } | 2310 | } |
2311 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); | 2311 | sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); |
2312 | if (!sqp->qp.ibqp.qp_num) | 2312 | if (!sqp->qp.ibqp.qp_num) |
2313 | ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); | 2313 | ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); |
2314 | else | 2314 | else |
2315 | ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); | 2315 | ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey); |
2316 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); | 2316 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); |
2317 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 2317 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); |
2318 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); | 2318 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); |
2319 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? | 2319 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? |
2320 | sqp->qkey : wr->wr.ud.remote_qkey); | 2320 | sqp->qkey : wr->remote_qkey); |
2321 | sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); | 2321 | sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); |
2322 | 2322 | ||
2323 | header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); | 2323 | header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); |
@@ -2405,43 +2405,45 @@ static __be32 convert_access(int acc) | |||
2405 | cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); | 2405 | cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); |
2406 | } | 2406 | } |
2407 | 2407 | ||
2408 | static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) | 2408 | static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, |
2409 | struct ib_fast_reg_wr *wr) | ||
2409 | { | 2410 | { |
2410 | struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); | 2411 | struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->page_list); |
2411 | int i; | 2412 | int i; |
2412 | 2413 | ||
2413 | for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i) | 2414 | for (i = 0; i < wr->page_list_len; ++i) |
2414 | mfrpl->mapped_page_list[i] = | 2415 | mfrpl->mapped_page_list[i] = |
2415 | cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] | | 2416 | cpu_to_be64(wr->page_list->page_list[i] | |
2416 | MLX4_MTT_FLAG_PRESENT); | 2417 | MLX4_MTT_FLAG_PRESENT); |
2417 | 2418 | ||
2418 | fseg->flags = convert_access(wr->wr.fast_reg.access_flags); | 2419 | fseg->flags = convert_access(wr->access_flags); |
2419 | fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey); | 2420 | fseg->mem_key = cpu_to_be32(wr->rkey); |
2420 | fseg->buf_list = cpu_to_be64(mfrpl->map); | 2421 | fseg->buf_list = cpu_to_be64(mfrpl->map); |
2421 | fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); | 2422 | fseg->start_addr = cpu_to_be64(wr->iova_start); |
2422 | fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length); | 2423 | fseg->reg_len = cpu_to_be64(wr->length); |
2423 | fseg->offset = 0; /* XXX -- is this just for ZBVA? */ | 2424 | fseg->offset = 0; /* XXX -- is this just for ZBVA? */ |
2424 | fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift); | 2425 | fseg->page_size = cpu_to_be32(wr->page_shift); |
2425 | fseg->reserved[0] = 0; | 2426 | fseg->reserved[0] = 0; |
2426 | fseg->reserved[1] = 0; | 2427 | fseg->reserved[1] = 0; |
2427 | } | 2428 | } |
2428 | 2429 | ||
2429 | static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr) | 2430 | static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, |
2431 | struct ib_bind_mw_wr *wr) | ||
2430 | { | 2432 | { |
2431 | bseg->flags1 = | 2433 | bseg->flags1 = |
2432 | convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) & | 2434 | convert_access(wr->bind_info.mw_access_flags) & |
2433 | cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ | | 2435 | cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ | |
2434 | MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE | | 2436 | MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE | |
2435 | MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC); | 2437 | MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC); |
2436 | bseg->flags2 = 0; | 2438 | bseg->flags2 = 0; |
2437 | if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2) | 2439 | if (wr->mw->type == IB_MW_TYPE_2) |
2438 | bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2); | 2440 | bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2); |
2439 | if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED) | 2441 | if (wr->bind_info.mw_access_flags & IB_ZERO_BASED) |
2440 | bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED); | 2442 | bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED); |
2441 | bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey); | 2443 | bseg->new_rkey = cpu_to_be32(wr->rkey); |
2442 | bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey); | 2444 | bseg->lkey = cpu_to_be32(wr->bind_info.mr->lkey); |
2443 | bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr); | 2445 | bseg->addr = cpu_to_be64(wr->bind_info.addr); |
2444 | bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length); | 2446 | bseg->length = cpu_to_be64(wr->bind_info.length); |
2445 | } | 2447 | } |
2446 | 2448 | ||
2447 | static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) | 2449 | static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) |
@@ -2458,46 +2460,47 @@ static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, | |||
2458 | rseg->reserved = 0; | 2460 | rseg->reserved = 0; |
2459 | } | 2461 | } |
2460 | 2462 | ||
2461 | static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr) | 2463 | static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, |
2464 | struct ib_atomic_wr *wr) | ||
2462 | { | 2465 | { |
2463 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | 2466 | if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { |
2464 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | 2467 | aseg->swap_add = cpu_to_be64(wr->swap); |
2465 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | 2468 | aseg->compare = cpu_to_be64(wr->compare_add); |
2466 | } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { | 2469 | } else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { |
2467 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | 2470 | aseg->swap_add = cpu_to_be64(wr->compare_add); |
2468 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); | 2471 | aseg->compare = cpu_to_be64(wr->compare_add_mask); |
2469 | } else { | 2472 | } else { |
2470 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | 2473 | aseg->swap_add = cpu_to_be64(wr->compare_add); |
2471 | aseg->compare = 0; | 2474 | aseg->compare = 0; |
2472 | } | 2475 | } |
2473 | 2476 | ||
2474 | } | 2477 | } |
2475 | 2478 | ||
2476 | static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, | 2479 | static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, |
2477 | struct ib_send_wr *wr) | 2480 | struct ib_atomic_wr *wr) |
2478 | { | 2481 | { |
2479 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | 2482 | aseg->swap_add = cpu_to_be64(wr->swap); |
2480 | aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); | 2483 | aseg->swap_add_mask = cpu_to_be64(wr->swap_mask); |
2481 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | 2484 | aseg->compare = cpu_to_be64(wr->compare_add); |
2482 | aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); | 2485 | aseg->compare_mask = cpu_to_be64(wr->compare_add_mask); |
2483 | } | 2486 | } |
2484 | 2487 | ||
2485 | static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, | 2488 | static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, |
2486 | struct ib_send_wr *wr) | 2489 | struct ib_ud_wr *wr) |
2487 | { | 2490 | { |
2488 | memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); | 2491 | memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av)); |
2489 | dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 2492 | dseg->dqpn = cpu_to_be32(wr->remote_qpn); |
2490 | dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | 2493 | dseg->qkey = cpu_to_be32(wr->remote_qkey); |
2491 | dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; | 2494 | dseg->vlan = to_mah(wr->ah)->av.eth.vlan; |
2492 | memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); | 2495 | memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6); |
2493 | } | 2496 | } |
2494 | 2497 | ||
2495 | static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, | 2498 | static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, |
2496 | struct mlx4_wqe_datagram_seg *dseg, | 2499 | struct mlx4_wqe_datagram_seg *dseg, |
2497 | struct ib_send_wr *wr, | 2500 | struct ib_ud_wr *wr, |
2498 | enum mlx4_ib_qp_type qpt) | 2501 | enum mlx4_ib_qp_type qpt) |
2499 | { | 2502 | { |
2500 | union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av; | 2503 | union mlx4_ext_av *av = &to_mah(wr->ah)->av; |
2501 | struct mlx4_av sqp_av = {0}; | 2504 | struct mlx4_av sqp_av = {0}; |
2502 | int port = *((u8 *) &av->ib.port_pd) & 0x3; | 2505 | int port = *((u8 *) &av->ib.port_pd) & 0x3; |
2503 | 2506 | ||
@@ -2516,18 +2519,18 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, | |||
2516 | dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); | 2519 | dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); |
2517 | } | 2520 | } |
2518 | 2521 | ||
2519 | static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) | 2522 | static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) |
2520 | { | 2523 | { |
2521 | struct mlx4_wqe_inline_seg *inl = wqe; | 2524 | struct mlx4_wqe_inline_seg *inl = wqe; |
2522 | struct mlx4_ib_tunnel_header hdr; | 2525 | struct mlx4_ib_tunnel_header hdr; |
2523 | struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); | 2526 | struct mlx4_ib_ah *ah = to_mah(wr->ah); |
2524 | int spc; | 2527 | int spc; |
2525 | int i; | 2528 | int i; |
2526 | 2529 | ||
2527 | memcpy(&hdr.av, &ah->av, sizeof hdr.av); | 2530 | memcpy(&hdr.av, &ah->av, sizeof hdr.av); |
2528 | hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 2531 | hdr.remote_qpn = cpu_to_be32(wr->remote_qpn); |
2529 | hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index); | 2532 | hdr.pkey_index = cpu_to_be16(wr->pkey_index); |
2530 | hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | 2533 | hdr.qkey = cpu_to_be32(wr->remote_qkey); |
2531 | memcpy(hdr.mac, ah->av.eth.mac, 6); | 2534 | memcpy(hdr.mac, ah->av.eth.mac, 6); |
2532 | hdr.vlan = ah->av.eth.vlan; | 2535 | hdr.vlan = ah->av.eth.vlan; |
2533 | 2536 | ||
@@ -2599,22 +2602,22 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) | |||
2599 | dseg->addr = cpu_to_be64(sg->addr); | 2602 | dseg->addr = cpu_to_be64(sg->addr); |
2600 | } | 2603 | } |
2601 | 2604 | ||
2602 | static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, | 2605 | static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr, |
2603 | struct mlx4_ib_qp *qp, unsigned *lso_seg_len, | 2606 | struct mlx4_ib_qp *qp, unsigned *lso_seg_len, |
2604 | __be32 *lso_hdr_sz, __be32 *blh) | 2607 | __be32 *lso_hdr_sz, __be32 *blh) |
2605 | { | 2608 | { |
2606 | unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); | 2609 | unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16); |
2607 | 2610 | ||
2608 | if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE)) | 2611 | if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE)) |
2609 | *blh = cpu_to_be32(1 << 6); | 2612 | *blh = cpu_to_be32(1 << 6); |
2610 | 2613 | ||
2611 | if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && | 2614 | if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && |
2612 | wr->num_sge > qp->sq.max_gs - (halign >> 4))) | 2615 | wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) |
2613 | return -EINVAL; | 2616 | return -EINVAL; |
2614 | 2617 | ||
2615 | memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); | 2618 | memcpy(wqe->header, wr->header, wr->hlen); |
2616 | 2619 | ||
2617 | *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen); | 2620 | *lso_hdr_sz = cpu_to_be32(wr->mss << 16 | wr->hlen); |
2618 | *lso_seg_len = halign; | 2621 | *lso_seg_len = halign; |
2619 | return 0; | 2622 | return 0; |
2620 | } | 2623 | } |
@@ -2713,11 +2716,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2713 | case IB_WR_ATOMIC_CMP_AND_SWP: | 2716 | case IB_WR_ATOMIC_CMP_AND_SWP: |
2714 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 2717 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
2715 | case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: | 2718 | case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: |
2716 | set_raddr_seg(wqe, wr->wr.atomic.remote_addr, | 2719 | set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, |
2717 | wr->wr.atomic.rkey); | 2720 | atomic_wr(wr)->rkey); |
2718 | wqe += sizeof (struct mlx4_wqe_raddr_seg); | 2721 | wqe += sizeof (struct mlx4_wqe_raddr_seg); |
2719 | 2722 | ||
2720 | set_atomic_seg(wqe, wr); | 2723 | set_atomic_seg(wqe, atomic_wr(wr)); |
2721 | wqe += sizeof (struct mlx4_wqe_atomic_seg); | 2724 | wqe += sizeof (struct mlx4_wqe_atomic_seg); |
2722 | 2725 | ||
2723 | size += (sizeof (struct mlx4_wqe_raddr_seg) + | 2726 | size += (sizeof (struct mlx4_wqe_raddr_seg) + |
@@ -2726,11 +2729,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2726 | break; | 2729 | break; |
2727 | 2730 | ||
2728 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: | 2731 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: |
2729 | set_raddr_seg(wqe, wr->wr.atomic.remote_addr, | 2732 | set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, |
2730 | wr->wr.atomic.rkey); | 2733 | atomic_wr(wr)->rkey); |
2731 | wqe += sizeof (struct mlx4_wqe_raddr_seg); | 2734 | wqe += sizeof (struct mlx4_wqe_raddr_seg); |
2732 | 2735 | ||
2733 | set_masked_atomic_seg(wqe, wr); | 2736 | set_masked_atomic_seg(wqe, atomic_wr(wr)); |
2734 | wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); | 2737 | wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); |
2735 | 2738 | ||
2736 | size += (sizeof (struct mlx4_wqe_raddr_seg) + | 2739 | size += (sizeof (struct mlx4_wqe_raddr_seg) + |
@@ -2741,8 +2744,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2741 | case IB_WR_RDMA_READ: | 2744 | case IB_WR_RDMA_READ: |
2742 | case IB_WR_RDMA_WRITE: | 2745 | case IB_WR_RDMA_WRITE: |
2743 | case IB_WR_RDMA_WRITE_WITH_IMM: | 2746 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2744 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, | 2747 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
2745 | wr->wr.rdma.rkey); | 2748 | rdma_wr(wr)->rkey); |
2746 | wqe += sizeof (struct mlx4_wqe_raddr_seg); | 2749 | wqe += sizeof (struct mlx4_wqe_raddr_seg); |
2747 | size += sizeof (struct mlx4_wqe_raddr_seg) / 16; | 2750 | size += sizeof (struct mlx4_wqe_raddr_seg) / 16; |
2748 | break; | 2751 | break; |
@@ -2758,7 +2761,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2758 | case IB_WR_FAST_REG_MR: | 2761 | case IB_WR_FAST_REG_MR: |
2759 | ctrl->srcrb_flags |= | 2762 | ctrl->srcrb_flags |= |
2760 | cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); | 2763 | cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); |
2761 | set_fmr_seg(wqe, wr); | 2764 | set_fmr_seg(wqe, fast_reg_wr(wr)); |
2762 | wqe += sizeof (struct mlx4_wqe_fmr_seg); | 2765 | wqe += sizeof (struct mlx4_wqe_fmr_seg); |
2763 | size += sizeof (struct mlx4_wqe_fmr_seg) / 16; | 2766 | size += sizeof (struct mlx4_wqe_fmr_seg) / 16; |
2764 | break; | 2767 | break; |
@@ -2766,7 +2769,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2766 | case IB_WR_BIND_MW: | 2769 | case IB_WR_BIND_MW: |
2767 | ctrl->srcrb_flags |= | 2770 | ctrl->srcrb_flags |= |
2768 | cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); | 2771 | cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); |
2769 | set_bind_seg(wqe, wr); | 2772 | set_bind_seg(wqe, bind_mw_wr(wr)); |
2770 | wqe += sizeof(struct mlx4_wqe_bind_seg); | 2773 | wqe += sizeof(struct mlx4_wqe_bind_seg); |
2771 | size += sizeof(struct mlx4_wqe_bind_seg) / 16; | 2774 | size += sizeof(struct mlx4_wqe_bind_seg) / 16; |
2772 | break; | 2775 | break; |
@@ -2777,7 +2780,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2777 | break; | 2780 | break; |
2778 | 2781 | ||
2779 | case MLX4_IB_QPT_TUN_SMI_OWNER: | 2782 | case MLX4_IB_QPT_TUN_SMI_OWNER: |
2780 | err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); | 2783 | err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr), |
2784 | ctrl, &seglen); | ||
2781 | if (unlikely(err)) { | 2785 | if (unlikely(err)) { |
2782 | *bad_wr = wr; | 2786 | *bad_wr = wr; |
2783 | goto out; | 2787 | goto out; |
@@ -2788,19 +2792,20 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2788 | case MLX4_IB_QPT_TUN_SMI: | 2792 | case MLX4_IB_QPT_TUN_SMI: |
2789 | case MLX4_IB_QPT_TUN_GSI: | 2793 | case MLX4_IB_QPT_TUN_GSI: |
2790 | /* this is a UD qp used in MAD responses to slaves. */ | 2794 | /* this is a UD qp used in MAD responses to slaves. */ |
2791 | set_datagram_seg(wqe, wr); | 2795 | set_datagram_seg(wqe, ud_wr(wr)); |
2792 | /* set the forced-loopback bit in the data seg av */ | 2796 | /* set the forced-loopback bit in the data seg av */ |
2793 | *(__be32 *) wqe |= cpu_to_be32(0x80000000); | 2797 | *(__be32 *) wqe |= cpu_to_be32(0x80000000); |
2794 | wqe += sizeof (struct mlx4_wqe_datagram_seg); | 2798 | wqe += sizeof (struct mlx4_wqe_datagram_seg); |
2795 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; | 2799 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; |
2796 | break; | 2800 | break; |
2797 | case MLX4_IB_QPT_UD: | 2801 | case MLX4_IB_QPT_UD: |
2798 | set_datagram_seg(wqe, wr); | 2802 | set_datagram_seg(wqe, ud_wr(wr)); |
2799 | wqe += sizeof (struct mlx4_wqe_datagram_seg); | 2803 | wqe += sizeof (struct mlx4_wqe_datagram_seg); |
2800 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; | 2804 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; |
2801 | 2805 | ||
2802 | if (wr->opcode == IB_WR_LSO) { | 2806 | if (wr->opcode == IB_WR_LSO) { |
2803 | err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); | 2807 | err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, |
2808 | &lso_hdr_sz, &blh); | ||
2804 | if (unlikely(err)) { | 2809 | if (unlikely(err)) { |
2805 | *bad_wr = wr; | 2810 | *bad_wr = wr; |
2806 | goto out; | 2811 | goto out; |
@@ -2812,7 +2817,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2812 | break; | 2817 | break; |
2813 | 2818 | ||
2814 | case MLX4_IB_QPT_PROXY_SMI_OWNER: | 2819 | case MLX4_IB_QPT_PROXY_SMI_OWNER: |
2815 | err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); | 2820 | err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr), |
2821 | ctrl, &seglen); | ||
2816 | if (unlikely(err)) { | 2822 | if (unlikely(err)) { |
2817 | *bad_wr = wr; | 2823 | *bad_wr = wr; |
2818 | goto out; | 2824 | goto out; |
@@ -2823,7 +2829,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2823 | add_zero_len_inline(wqe); | 2829 | add_zero_len_inline(wqe); |
2824 | wqe += 16; | 2830 | wqe += 16; |
2825 | size++; | 2831 | size++; |
2826 | build_tunnel_header(wr, wqe, &seglen); | 2832 | build_tunnel_header(ud_wr(wr), wqe, &seglen); |
2827 | wqe += seglen; | 2833 | wqe += seglen; |
2828 | size += seglen / 16; | 2834 | size += seglen / 16; |
2829 | break; | 2835 | break; |
@@ -2833,18 +2839,20 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2833 | * In this case we first add a UD segment targeting | 2839 | * In this case we first add a UD segment targeting |
2834 | * the tunnel qp, and then add a header with address | 2840 | * the tunnel qp, and then add a header with address |
2835 | * information */ | 2841 | * information */ |
2836 | set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, | 2842 | set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, |
2843 | ud_wr(wr), | ||
2837 | qp->mlx4_ib_qp_type); | 2844 | qp->mlx4_ib_qp_type); |
2838 | wqe += sizeof (struct mlx4_wqe_datagram_seg); | 2845 | wqe += sizeof (struct mlx4_wqe_datagram_seg); |
2839 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; | 2846 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; |
2840 | build_tunnel_header(wr, wqe, &seglen); | 2847 | build_tunnel_header(ud_wr(wr), wqe, &seglen); |
2841 | wqe += seglen; | 2848 | wqe += seglen; |
2842 | size += seglen / 16; | 2849 | size += seglen / 16; |
2843 | break; | 2850 | break; |
2844 | 2851 | ||
2845 | case MLX4_IB_QPT_SMI: | 2852 | case MLX4_IB_QPT_SMI: |
2846 | case MLX4_IB_QPT_GSI: | 2853 | case MLX4_IB_QPT_GSI: |
2847 | err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); | 2854 | err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl, |
2855 | &seglen); | ||
2848 | if (unlikely(err)) { | 2856 | if (unlikely(err)) { |
2849 | *bad_wr = wr; | 2857 | *bad_wr = wr; |
2850 | goto out; | 2858 | goto out; |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 22123b79d550..29f3ecdbe790 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -245,6 +245,7 @@ enum mlx5_ib_qp_flags { | |||
245 | }; | 245 | }; |
246 | 246 | ||
247 | struct mlx5_umr_wr { | 247 | struct mlx5_umr_wr { |
248 | struct ib_send_wr wr; | ||
248 | union { | 249 | union { |
249 | u64 virt_addr; | 250 | u64 virt_addr; |
250 | u64 offset; | 251 | u64 offset; |
@@ -257,6 +258,11 @@ struct mlx5_umr_wr { | |||
257 | u32 mkey; | 258 | u32 mkey; |
258 | }; | 259 | }; |
259 | 260 | ||
261 | static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr) | ||
262 | { | ||
263 | return container_of(wr, struct mlx5_umr_wr, wr); | ||
264 | } | ||
265 | |||
260 | struct mlx5_shared_mr_info { | 266 | struct mlx5_shared_mr_info { |
261 | int mr_id; | 267 | int mr_id; |
262 | struct ib_umem *umem; | 268 | struct ib_umem *umem; |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 54a15b5d336d..b30d4ae0fb61 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -687,7 +687,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, | |||
687 | int access_flags) | 687 | int access_flags) |
688 | { | 688 | { |
689 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | 689 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
690 | struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; | 690 | struct mlx5_umr_wr *umrwr = umr_wr(wr); |
691 | 691 | ||
692 | sg->addr = dma; | 692 | sg->addr = dma; |
693 | sg->length = ALIGN(sizeof(u64) * n, 64); | 693 | sg->length = ALIGN(sizeof(u64) * n, 64); |
@@ -715,7 +715,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, | |||
715 | static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, | 715 | static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, |
716 | struct ib_send_wr *wr, u32 key) | 716 | struct ib_send_wr *wr, u32 key) |
717 | { | 717 | { |
718 | struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; | 718 | struct mlx5_umr_wr *umrwr = umr_wr(wr); |
719 | 719 | ||
720 | wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE; | 720 | wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE; |
721 | wr->opcode = MLX5_IB_WR_UMR; | 721 | wr->opcode = MLX5_IB_WR_UMR; |
@@ -752,7 +752,8 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, | |||
752 | struct device *ddev = dev->ib_dev.dma_device; | 752 | struct device *ddev = dev->ib_dev.dma_device; |
753 | struct umr_common *umrc = &dev->umrc; | 753 | struct umr_common *umrc = &dev->umrc; |
754 | struct mlx5_ib_umr_context umr_context; | 754 | struct mlx5_ib_umr_context umr_context; |
755 | struct ib_send_wr wr, *bad; | 755 | struct mlx5_umr_wr umrwr; |
756 | struct ib_send_wr *bad; | ||
756 | struct mlx5_ib_mr *mr; | 757 | struct mlx5_ib_mr *mr; |
757 | struct ib_sge sg; | 758 | struct ib_sge sg; |
758 | int size; | 759 | int size; |
@@ -798,14 +799,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, | |||
798 | goto free_pas; | 799 | goto free_pas; |
799 | } | 800 | } |
800 | 801 | ||
801 | memset(&wr, 0, sizeof(wr)); | 802 | memset(&umrwr, 0, sizeof(umrwr)); |
802 | wr.wr_id = (u64)(unsigned long)&umr_context; | 803 | umrwr.wr.wr_id = (u64)(unsigned long)&umr_context; |
803 | prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift, | 804 | prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key, |
804 | virt_addr, len, access_flags); | 805 | page_shift, virt_addr, len, access_flags); |
805 | 806 | ||
806 | mlx5_ib_init_umr_context(&umr_context); | 807 | mlx5_ib_init_umr_context(&umr_context); |
807 | down(&umrc->sem); | 808 | down(&umrc->sem); |
808 | err = ib_post_send(umrc->qp, &wr, &bad); | 809 | err = ib_post_send(umrc->qp, &umrwr.wr, &bad); |
809 | if (err) { | 810 | if (err) { |
810 | mlx5_ib_warn(dev, "post send failed, err %d\n", err); | 811 | mlx5_ib_warn(dev, "post send failed, err %d\n", err); |
811 | goto unmap_dma; | 812 | goto unmap_dma; |
@@ -851,8 +852,8 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, | |||
851 | int size; | 852 | int size; |
852 | __be64 *pas; | 853 | __be64 *pas; |
853 | dma_addr_t dma; | 854 | dma_addr_t dma; |
854 | struct ib_send_wr wr, *bad; | 855 | struct ib_send_wr *bad; |
855 | struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg; | 856 | struct mlx5_umr_wr wr; |
856 | struct ib_sge sg; | 857 | struct ib_sge sg; |
857 | int err = 0; | 858 | int err = 0; |
858 | const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64); | 859 | const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64); |
@@ -917,26 +918,26 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, | |||
917 | dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); | 918 | dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); |
918 | 919 | ||
919 | memset(&wr, 0, sizeof(wr)); | 920 | memset(&wr, 0, sizeof(wr)); |
920 | wr.wr_id = (u64)(unsigned long)&umr_context; | 921 | wr.wr.wr_id = (u64)(unsigned long)&umr_context; |
921 | 922 | ||
922 | sg.addr = dma; | 923 | sg.addr = dma; |
923 | sg.length = ALIGN(npages * sizeof(u64), | 924 | sg.length = ALIGN(npages * sizeof(u64), |
924 | MLX5_UMR_MTT_ALIGNMENT); | 925 | MLX5_UMR_MTT_ALIGNMENT); |
925 | sg.lkey = dev->umrc.pd->local_dma_lkey; | 926 | sg.lkey = dev->umrc.pd->local_dma_lkey; |
926 | 927 | ||
927 | wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE | | 928 | wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE | |
928 | MLX5_IB_SEND_UMR_UPDATE_MTT; | 929 | MLX5_IB_SEND_UMR_UPDATE_MTT; |
929 | wr.sg_list = &sg; | 930 | wr.wr.sg_list = &sg; |
930 | wr.num_sge = 1; | 931 | wr.wr.num_sge = 1; |
931 | wr.opcode = MLX5_IB_WR_UMR; | 932 | wr.wr.opcode = MLX5_IB_WR_UMR; |
932 | umrwr->npages = sg.length / sizeof(u64); | 933 | wr.npages = sg.length / sizeof(u64); |
933 | umrwr->page_shift = PAGE_SHIFT; | 934 | wr.page_shift = PAGE_SHIFT; |
934 | umrwr->mkey = mr->mmr.key; | 935 | wr.mkey = mr->mmr.key; |
935 | umrwr->target.offset = start_page_index; | 936 | wr.target.offset = start_page_index; |
936 | 937 | ||
937 | mlx5_ib_init_umr_context(&umr_context); | 938 | mlx5_ib_init_umr_context(&umr_context); |
938 | down(&umrc->sem); | 939 | down(&umrc->sem); |
939 | err = ib_post_send(umrc->qp, &wr, &bad); | 940 | err = ib_post_send(umrc->qp, &wr.wr, &bad); |
940 | if (err) { | 941 | if (err) { |
941 | mlx5_ib_err(dev, "UMR post send failed, err %d\n", err); | 942 | mlx5_ib_err(dev, "UMR post send failed, err %d\n", err); |
942 | } else { | 943 | } else { |
@@ -1122,16 +1123,17 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |||
1122 | { | 1123 | { |
1123 | struct umr_common *umrc = &dev->umrc; | 1124 | struct umr_common *umrc = &dev->umrc; |
1124 | struct mlx5_ib_umr_context umr_context; | 1125 | struct mlx5_ib_umr_context umr_context; |
1125 | struct ib_send_wr wr, *bad; | 1126 | struct mlx5_umr_wr umrwr; |
1127 | struct ib_send_wr *bad; | ||
1126 | int err; | 1128 | int err; |
1127 | 1129 | ||
1128 | memset(&wr, 0, sizeof(wr)); | 1130 | memset(&umrwr.wr, 0, sizeof(umrwr)); |
1129 | wr.wr_id = (u64)(unsigned long)&umr_context; | 1131 | umrwr.wr.wr_id = (u64)(unsigned long)&umr_context; |
1130 | prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); | 1132 | prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key); |
1131 | 1133 | ||
1132 | mlx5_ib_init_umr_context(&umr_context); | 1134 | mlx5_ib_init_umr_context(&umr_context); |
1133 | down(&umrc->sem); | 1135 | down(&umrc->sem); |
1134 | err = ib_post_send(umrc->qp, &wr, &bad); | 1136 | err = ib_post_send(umrc->qp, &umrwr.wr, &bad); |
1135 | if (err) { | 1137 | if (err) { |
1136 | up(&umrc->sem); | 1138 | up(&umrc->sem); |
1137 | mlx5_ib_dbg(dev, "err %d\n", err); | 1139 | mlx5_ib_dbg(dev, "err %d\n", err); |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 6f521a3418e8..d4c36af4270f 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -1838,9 +1838,9 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, | |||
1838 | static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, | 1838 | static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, |
1839 | struct ib_send_wr *wr) | 1839 | struct ib_send_wr *wr) |
1840 | { | 1840 | { |
1841 | memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av)); | 1841 | memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); |
1842 | dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV); | 1842 | dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); |
1843 | dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | 1843 | dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); |
1844 | } | 1844 | } |
1845 | 1845 | ||
1846 | static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) | 1846 | static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) |
@@ -1908,7 +1908,7 @@ static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, | |||
1908 | } | 1908 | } |
1909 | 1909 | ||
1910 | umr->flags = (1 << 5); /* fail if not free */ | 1910 | umr->flags = (1 << 5); /* fail if not free */ |
1911 | umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len); | 1911 | umr->klm_octowords = get_klm_octo(fast_reg_wr(wr)->page_list_len); |
1912 | umr->mkey_mask = frwr_mkey_mask(); | 1912 | umr->mkey_mask = frwr_mkey_mask(); |
1913 | } | 1913 | } |
1914 | 1914 | ||
@@ -1952,7 +1952,7 @@ static __be64 get_umr_update_mtt_mask(void) | |||
1952 | static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, | 1952 | static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, |
1953 | struct ib_send_wr *wr) | 1953 | struct ib_send_wr *wr) |
1954 | { | 1954 | { |
1955 | struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; | 1955 | struct mlx5_umr_wr *umrwr = umr_wr(wr); |
1956 | 1956 | ||
1957 | memset(umr, 0, sizeof(*umr)); | 1957 | memset(umr, 0, sizeof(*umr)); |
1958 | 1958 | ||
@@ -1996,20 +1996,20 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, | |||
1996 | return; | 1996 | return; |
1997 | } | 1997 | } |
1998 | 1998 | ||
1999 | seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) | | 1999 | seg->flags = get_umr_flags(fast_reg_wr(wr)->access_flags) | |
2000 | MLX5_ACCESS_MODE_MTT; | 2000 | MLX5_ACCESS_MODE_MTT; |
2001 | *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); | 2001 | *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); |
2002 | seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00); | 2002 | seg->qpn_mkey7_0 = cpu_to_be32((fast_reg_wr(wr)->rkey & 0xff) | 0xffffff00); |
2003 | seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); | 2003 | seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); |
2004 | seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); | 2004 | seg->start_addr = cpu_to_be64(fast_reg_wr(wr)->iova_start); |
2005 | seg->len = cpu_to_be64(wr->wr.fast_reg.length); | 2005 | seg->len = cpu_to_be64(fast_reg_wr(wr)->length); |
2006 | seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2); | 2006 | seg->xlt_oct_size = cpu_to_be32((fast_reg_wr(wr)->page_list_len + 1) / 2); |
2007 | seg->log2_page_size = wr->wr.fast_reg.page_shift; | 2007 | seg->log2_page_size = fast_reg_wr(wr)->page_shift; |
2008 | } | 2008 | } |
2009 | 2009 | ||
2010 | static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) | 2010 | static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) |
2011 | { | 2011 | { |
2012 | struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; | 2012 | struct mlx5_umr_wr *umrwr = umr_wr(wr); |
2013 | 2013 | ||
2014 | memset(seg, 0, sizeof(*seg)); | 2014 | memset(seg, 0, sizeof(*seg)); |
2015 | if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { | 2015 | if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { |
@@ -2034,15 +2034,15 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, | |||
2034 | struct mlx5_ib_pd *pd, | 2034 | struct mlx5_ib_pd *pd, |
2035 | int writ) | 2035 | int writ) |
2036 | { | 2036 | { |
2037 | struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); | 2037 | struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(fast_reg_wr(wr)->page_list); |
2038 | u64 *page_list = wr->wr.fast_reg.page_list->page_list; | 2038 | u64 *page_list = fast_reg_wr(wr)->page_list->page_list; |
2039 | u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0); | 2039 | u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0); |
2040 | int i; | 2040 | int i; |
2041 | 2041 | ||
2042 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) | 2042 | for (i = 0; i < fast_reg_wr(wr)->page_list_len; i++) |
2043 | mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); | 2043 | mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); |
2044 | dseg->addr = cpu_to_be64(mfrpl->map); | 2044 | dseg->addr = cpu_to_be64(mfrpl->map); |
2045 | dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); | 2045 | dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * fast_reg_wr(wr)->page_list_len, 64)); |
2046 | dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); | 2046 | dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); |
2047 | } | 2047 | } |
2048 | 2048 | ||
@@ -2224,22 +2224,22 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr, | |||
2224 | return 0; | 2224 | return 0; |
2225 | } | 2225 | } |
2226 | 2226 | ||
2227 | static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, | 2227 | static int set_sig_data_segment(struct ib_sig_handover_wr *wr, |
2228 | void **seg, int *size) | 2228 | struct mlx5_ib_qp *qp, void **seg, int *size) |
2229 | { | 2229 | { |
2230 | struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs; | 2230 | struct ib_sig_attrs *sig_attrs = wr->sig_attrs; |
2231 | struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr; | 2231 | struct ib_mr *sig_mr = wr->sig_mr; |
2232 | struct mlx5_bsf *bsf; | 2232 | struct mlx5_bsf *bsf; |
2233 | u32 data_len = wr->sg_list->length; | 2233 | u32 data_len = wr->wr.sg_list->length; |
2234 | u32 data_key = wr->sg_list->lkey; | 2234 | u32 data_key = wr->wr.sg_list->lkey; |
2235 | u64 data_va = wr->sg_list->addr; | 2235 | u64 data_va = wr->wr.sg_list->addr; |
2236 | int ret; | 2236 | int ret; |
2237 | int wqe_size; | 2237 | int wqe_size; |
2238 | 2238 | ||
2239 | if (!wr->wr.sig_handover.prot || | 2239 | if (!wr->prot || |
2240 | (data_key == wr->wr.sig_handover.prot->lkey && | 2240 | (data_key == wr->prot->lkey && |
2241 | data_va == wr->wr.sig_handover.prot->addr && | 2241 | data_va == wr->prot->addr && |
2242 | data_len == wr->wr.sig_handover.prot->length)) { | 2242 | data_len == wr->prot->length)) { |
2243 | /** | 2243 | /** |
2244 | * Source domain doesn't contain signature information | 2244 | * Source domain doesn't contain signature information |
2245 | * or data and protection are interleaved in memory. | 2245 | * or data and protection are interleaved in memory. |
@@ -2273,8 +2273,8 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, | |||
2273 | struct mlx5_stride_block_ctrl_seg *sblock_ctrl; | 2273 | struct mlx5_stride_block_ctrl_seg *sblock_ctrl; |
2274 | struct mlx5_stride_block_entry *data_sentry; | 2274 | struct mlx5_stride_block_entry *data_sentry; |
2275 | struct mlx5_stride_block_entry *prot_sentry; | 2275 | struct mlx5_stride_block_entry *prot_sentry; |
2276 | u32 prot_key = wr->wr.sig_handover.prot->lkey; | 2276 | u32 prot_key = wr->prot->lkey; |
2277 | u64 prot_va = wr->wr.sig_handover.prot->addr; | 2277 | u64 prot_va = wr->prot->addr; |
2278 | u16 block_size = sig_attrs->mem.sig.dif.pi_interval; | 2278 | u16 block_size = sig_attrs->mem.sig.dif.pi_interval; |
2279 | int prot_size; | 2279 | int prot_size; |
2280 | 2280 | ||
@@ -2326,16 +2326,16 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, | |||
2326 | } | 2326 | } |
2327 | 2327 | ||
2328 | static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, | 2328 | static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, |
2329 | struct ib_send_wr *wr, u32 nelements, | 2329 | struct ib_sig_handover_wr *wr, u32 nelements, |
2330 | u32 length, u32 pdn) | 2330 | u32 length, u32 pdn) |
2331 | { | 2331 | { |
2332 | struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr; | 2332 | struct ib_mr *sig_mr = wr->sig_mr; |
2333 | u32 sig_key = sig_mr->rkey; | 2333 | u32 sig_key = sig_mr->rkey; |
2334 | u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1; | 2334 | u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1; |
2335 | 2335 | ||
2336 | memset(seg, 0, sizeof(*seg)); | 2336 | memset(seg, 0, sizeof(*seg)); |
2337 | 2337 | ||
2338 | seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) | | 2338 | seg->flags = get_umr_flags(wr->access_flags) | |
2339 | MLX5_ACCESS_MODE_KLM; | 2339 | MLX5_ACCESS_MODE_KLM; |
2340 | seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); | 2340 | seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); |
2341 | seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | | 2341 | seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | |
@@ -2346,7 +2346,7 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, | |||
2346 | } | 2346 | } |
2347 | 2347 | ||
2348 | static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, | 2348 | static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, |
2349 | struct ib_send_wr *wr, u32 nelements) | 2349 | u32 nelements) |
2350 | { | 2350 | { |
2351 | memset(umr, 0, sizeof(*umr)); | 2351 | memset(umr, 0, sizeof(*umr)); |
2352 | 2352 | ||
@@ -2357,37 +2357,37 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, | |||
2357 | } | 2357 | } |
2358 | 2358 | ||
2359 | 2359 | ||
2360 | static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, | 2360 | static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp, |
2361 | void **seg, int *size) | 2361 | void **seg, int *size) |
2362 | { | 2362 | { |
2363 | struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr); | 2363 | struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr); |
2364 | struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr); | ||
2364 | u32 pdn = get_pd(qp)->pdn; | 2365 | u32 pdn = get_pd(qp)->pdn; |
2365 | u32 klm_oct_size; | 2366 | u32 klm_oct_size; |
2366 | int region_len, ret; | 2367 | int region_len, ret; |
2367 | 2368 | ||
2368 | if (unlikely(wr->num_sge != 1) || | 2369 | if (unlikely(wr->wr.num_sge != 1) || |
2369 | unlikely(wr->wr.sig_handover.access_flags & | 2370 | unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) || |
2370 | IB_ACCESS_REMOTE_ATOMIC) || | ||
2371 | unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) || | 2371 | unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) || |
2372 | unlikely(!sig_mr->sig->sig_status_checked)) | 2372 | unlikely(!sig_mr->sig->sig_status_checked)) |
2373 | return -EINVAL; | 2373 | return -EINVAL; |
2374 | 2374 | ||
2375 | /* length of the protected region, data + protection */ | 2375 | /* length of the protected region, data + protection */ |
2376 | region_len = wr->sg_list->length; | 2376 | region_len = wr->wr.sg_list->length; |
2377 | if (wr->wr.sig_handover.prot && | 2377 | if (wr->prot && |
2378 | (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey || | 2378 | (wr->prot->lkey != wr->wr.sg_list->lkey || |
2379 | wr->wr.sig_handover.prot->addr != wr->sg_list->addr || | 2379 | wr->prot->addr != wr->wr.sg_list->addr || |
2380 | wr->wr.sig_handover.prot->length != wr->sg_list->length)) | 2380 | wr->prot->length != wr->wr.sg_list->length)) |
2381 | region_len += wr->wr.sig_handover.prot->length; | 2381 | region_len += wr->prot->length; |
2382 | 2382 | ||
2383 | /** | 2383 | /** |
2384 | * KLM octoword size - if protection was provided | 2384 | * KLM octoword size - if protection was provided |
2385 | * then we use strided block format (3 octowords), | 2385 | * then we use strided block format (3 octowords), |
2386 | * else we use single KLM (1 octoword) | 2386 | * else we use single KLM (1 octoword) |
2387 | **/ | 2387 | **/ |
2388 | klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1; | 2388 | klm_oct_size = wr->prot ? 3 : 1; |
2389 | 2389 | ||
2390 | set_sig_umr_segment(*seg, wr, klm_oct_size); | 2390 | set_sig_umr_segment(*seg, klm_oct_size); |
2391 | *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); | 2391 | *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); |
2392 | *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; | 2392 | *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; |
2393 | if (unlikely((*seg == qp->sq.qend))) | 2393 | if (unlikely((*seg == qp->sq.qend))) |
@@ -2454,8 +2454,8 @@ static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, | |||
2454 | if (unlikely((*seg == qp->sq.qend))) | 2454 | if (unlikely((*seg == qp->sq.qend))) |
2455 | *seg = mlx5_get_send_wqe(qp, 0); | 2455 | *seg = mlx5_get_send_wqe(qp, 0); |
2456 | if (!li) { | 2456 | if (!li) { |
2457 | if (unlikely(wr->wr.fast_reg.page_list_len > | 2457 | if (unlikely(fast_reg_wr(wr)->page_list_len > |
2458 | wr->wr.fast_reg.page_list->max_page_list_len)) | 2458 | fast_reg_wr(wr)->page_list->max_page_list_len)) |
2459 | return -ENOMEM; | 2459 | return -ENOMEM; |
2460 | 2460 | ||
2461 | set_frwr_pages(*seg, wr, mdev, pd, writ); | 2461 | set_frwr_pages(*seg, wr, mdev, pd, writ); |
@@ -2636,8 +2636,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2636 | case IB_WR_RDMA_READ: | 2636 | case IB_WR_RDMA_READ: |
2637 | case IB_WR_RDMA_WRITE: | 2637 | case IB_WR_RDMA_WRITE: |
2638 | case IB_WR_RDMA_WRITE_WITH_IMM: | 2638 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2639 | set_raddr_seg(seg, wr->wr.rdma.remote_addr, | 2639 | set_raddr_seg(seg, rdma_wr(wr)->remote_addr, |
2640 | wr->wr.rdma.rkey); | 2640 | rdma_wr(wr)->rkey); |
2641 | seg += sizeof(struct mlx5_wqe_raddr_seg); | 2641 | seg += sizeof(struct mlx5_wqe_raddr_seg); |
2642 | size += sizeof(struct mlx5_wqe_raddr_seg) / 16; | 2642 | size += sizeof(struct mlx5_wqe_raddr_seg) / 16; |
2643 | break; | 2643 | break; |
@@ -2666,7 +2666,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2666 | case IB_WR_FAST_REG_MR: | 2666 | case IB_WR_FAST_REG_MR: |
2667 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | 2667 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; |
2668 | qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR; | 2668 | qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR; |
2669 | ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); | 2669 | ctrl->imm = cpu_to_be32(fast_reg_wr(wr)->rkey); |
2670 | err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); | 2670 | err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); |
2671 | if (err) { | 2671 | if (err) { |
2672 | mlx5_ib_warn(dev, "\n"); | 2672 | mlx5_ib_warn(dev, "\n"); |
@@ -2678,7 +2678,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2678 | 2678 | ||
2679 | case IB_WR_REG_SIG_MR: | 2679 | case IB_WR_REG_SIG_MR: |
2680 | qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; | 2680 | qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; |
2681 | mr = to_mmr(wr->wr.sig_handover.sig_mr); | 2681 | mr = to_mmr(sig_handover_wr(wr)->sig_mr); |
2682 | 2682 | ||
2683 | ctrl->imm = cpu_to_be32(mr->ibmr.rkey); | 2683 | ctrl->imm = cpu_to_be32(mr->ibmr.rkey); |
2684 | err = set_sig_umr_wr(wr, qp, &seg, &size); | 2684 | err = set_sig_umr_wr(wr, qp, &seg, &size); |
@@ -2706,7 +2706,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2706 | goto out; | 2706 | goto out; |
2707 | } | 2707 | } |
2708 | 2708 | ||
2709 | err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem, | 2709 | err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem, |
2710 | mr->sig->psv_memory.psv_idx, &seg, | 2710 | mr->sig->psv_memory.psv_idx, &seg, |
2711 | &size); | 2711 | &size); |
2712 | if (err) { | 2712 | if (err) { |
@@ -2728,7 +2728,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2728 | } | 2728 | } |
2729 | 2729 | ||
2730 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | 2730 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; |
2731 | err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire, | 2731 | err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, |
2732 | mr->sig->psv_wire.psv_idx, &seg, | 2732 | mr->sig->psv_wire.psv_idx, &seg, |
2733 | &size); | 2733 | &size); |
2734 | if (err) { | 2734 | if (err) { |
@@ -2752,8 +2752,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2752 | switch (wr->opcode) { | 2752 | switch (wr->opcode) { |
2753 | case IB_WR_RDMA_WRITE: | 2753 | case IB_WR_RDMA_WRITE: |
2754 | case IB_WR_RDMA_WRITE_WITH_IMM: | 2754 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2755 | set_raddr_seg(seg, wr->wr.rdma.remote_addr, | 2755 | set_raddr_seg(seg, rdma_wr(wr)->remote_addr, |
2756 | wr->wr.rdma.rkey); | 2756 | rdma_wr(wr)->rkey); |
2757 | seg += sizeof(struct mlx5_wqe_raddr_seg); | 2757 | seg += sizeof(struct mlx5_wqe_raddr_seg); |
2758 | size += sizeof(struct mlx5_wqe_raddr_seg) / 16; | 2758 | size += sizeof(struct mlx5_wqe_raddr_seg) / 16; |
2759 | break; | 2759 | break; |
@@ -2780,7 +2780,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2780 | goto out; | 2780 | goto out; |
2781 | } | 2781 | } |
2782 | qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; | 2782 | qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; |
2783 | ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); | 2783 | ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey); |
2784 | set_reg_umr_segment(seg, wr); | 2784 | set_reg_umr_segment(seg, wr); |
2785 | seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); | 2785 | seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); |
2786 | size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; | 2786 | size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index e354b2f04ad9..35fe506e2cfa 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -1476,7 +1476,7 @@ void mthca_free_qp(struct mthca_dev *dev, | |||
1476 | 1476 | ||
1477 | /* Create UD header for an MLX send and build a data segment for it */ | 1477 | /* Create UD header for an MLX send and build a data segment for it */ |
1478 | static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | 1478 | static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, |
1479 | int ind, struct ib_send_wr *wr, | 1479 | int ind, struct ib_ud_wr *wr, |
1480 | struct mthca_mlx_seg *mlx, | 1480 | struct mthca_mlx_seg *mlx, |
1481 | struct mthca_data_seg *data) | 1481 | struct mthca_data_seg *data) |
1482 | { | 1482 | { |
@@ -1485,10 +1485,10 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
1485 | u16 pkey; | 1485 | u16 pkey; |
1486 | 1486 | ||
1487 | ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, | 1487 | ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, |
1488 | mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0, | 1488 | mthca_ah_grh_present(to_mah(wr->ah)), 0, |
1489 | &sqp->ud_header); | 1489 | &sqp->ud_header); |
1490 | 1490 | ||
1491 | err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); | 1491 | err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); |
1492 | if (err) | 1492 | if (err) |
1493 | return err; | 1493 | return err; |
1494 | mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); | 1494 | mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); |
@@ -1499,7 +1499,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
1499 | mlx->rlid = sqp->ud_header.lrh.destination_lid; | 1499 | mlx->rlid = sqp->ud_header.lrh.destination_lid; |
1500 | mlx->vcrc = 0; | 1500 | mlx->vcrc = 0; |
1501 | 1501 | ||
1502 | switch (wr->opcode) { | 1502 | switch (wr->wr.opcode) { |
1503 | case IB_WR_SEND: | 1503 | case IB_WR_SEND: |
1504 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; | 1504 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; |
1505 | sqp->ud_header.immediate_present = 0; | 1505 | sqp->ud_header.immediate_present = 0; |
@@ -1507,7 +1507,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
1507 | case IB_WR_SEND_WITH_IMM: | 1507 | case IB_WR_SEND_WITH_IMM: |
1508 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; | 1508 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; |
1509 | sqp->ud_header.immediate_present = 1; | 1509 | sqp->ud_header.immediate_present = 1; |
1510 | sqp->ud_header.immediate_data = wr->ex.imm_data; | 1510 | sqp->ud_header.immediate_data = wr->wr.ex.imm_data; |
1511 | break; | 1511 | break; |
1512 | default: | 1512 | default: |
1513 | return -EINVAL; | 1513 | return -EINVAL; |
@@ -1516,18 +1516,18 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
1516 | sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; | 1516 | sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; |
1517 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) | 1517 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) |
1518 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; | 1518 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; |
1519 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); | 1519 | sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); |
1520 | if (!sqp->qp.ibqp.qp_num) | 1520 | if (!sqp->qp.ibqp.qp_num) |
1521 | ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, | 1521 | ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, |
1522 | sqp->pkey_index, &pkey); | 1522 | sqp->pkey_index, &pkey); |
1523 | else | 1523 | else |
1524 | ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, | 1524 | ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, |
1525 | wr->wr.ud.pkey_index, &pkey); | 1525 | wr->pkey_index, &pkey); |
1526 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); | 1526 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); |
1527 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 1527 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); |
1528 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); | 1528 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); |
1529 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? | 1529 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? |
1530 | sqp->qkey : wr->wr.ud.remote_qkey); | 1530 | sqp->qkey : wr->remote_qkey); |
1531 | sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); | 1531 | sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); |
1532 | 1532 | ||
1533 | header_size = ib_ud_header_pack(&sqp->ud_header, | 1533 | header_size = ib_ud_header_pack(&sqp->ud_header, |
@@ -1569,34 +1569,34 @@ static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg, | |||
1569 | } | 1569 | } |
1570 | 1570 | ||
1571 | static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, | 1571 | static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, |
1572 | struct ib_send_wr *wr) | 1572 | struct ib_atomic_wr *wr) |
1573 | { | 1573 | { |
1574 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | 1574 | if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { |
1575 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | 1575 | aseg->swap_add = cpu_to_be64(wr->swap); |
1576 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | 1576 | aseg->compare = cpu_to_be64(wr->compare_add); |
1577 | } else { | 1577 | } else { |
1578 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | 1578 | aseg->swap_add = cpu_to_be64(wr->compare_add); |
1579 | aseg->compare = 0; | 1579 | aseg->compare = 0; |
1580 | } | 1580 | } |
1581 | 1581 | ||
1582 | } | 1582 | } |
1583 | 1583 | ||
1584 | static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, | 1584 | static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, |
1585 | struct ib_send_wr *wr) | 1585 | struct ib_ud_wr *wr) |
1586 | { | 1586 | { |
1587 | useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key); | 1587 | useg->lkey = cpu_to_be32(to_mah(wr->ah)->key); |
1588 | useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); | 1588 | useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma); |
1589 | useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 1589 | useg->dqpn = cpu_to_be32(wr->remote_qpn); |
1590 | useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | 1590 | useg->qkey = cpu_to_be32(wr->remote_qkey); |
1591 | 1591 | ||
1592 | } | 1592 | } |
1593 | 1593 | ||
1594 | static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, | 1594 | static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, |
1595 | struct ib_send_wr *wr) | 1595 | struct ib_ud_wr *wr) |
1596 | { | 1596 | { |
1597 | memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); | 1597 | memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE); |
1598 | useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 1598 | useg->dqpn = cpu_to_be32(wr->remote_qpn); |
1599 | useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | 1599 | useg->qkey = cpu_to_be32(wr->remote_qkey); |
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | 1602 | int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
@@ -1664,11 +1664,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1664 | switch (wr->opcode) { | 1664 | switch (wr->opcode) { |
1665 | case IB_WR_ATOMIC_CMP_AND_SWP: | 1665 | case IB_WR_ATOMIC_CMP_AND_SWP: |
1666 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 1666 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
1667 | set_raddr_seg(wqe, wr->wr.atomic.remote_addr, | 1667 | set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, |
1668 | wr->wr.atomic.rkey); | 1668 | atomic_wr(wr)->rkey); |
1669 | wqe += sizeof (struct mthca_raddr_seg); | 1669 | wqe += sizeof (struct mthca_raddr_seg); |
1670 | 1670 | ||
1671 | set_atomic_seg(wqe, wr); | 1671 | set_atomic_seg(wqe, atomic_wr(wr)); |
1672 | wqe += sizeof (struct mthca_atomic_seg); | 1672 | wqe += sizeof (struct mthca_atomic_seg); |
1673 | size += (sizeof (struct mthca_raddr_seg) + | 1673 | size += (sizeof (struct mthca_raddr_seg) + |
1674 | sizeof (struct mthca_atomic_seg)) / 16; | 1674 | sizeof (struct mthca_atomic_seg)) / 16; |
@@ -1677,8 +1677,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1677 | case IB_WR_RDMA_WRITE: | 1677 | case IB_WR_RDMA_WRITE: |
1678 | case IB_WR_RDMA_WRITE_WITH_IMM: | 1678 | case IB_WR_RDMA_WRITE_WITH_IMM: |
1679 | case IB_WR_RDMA_READ: | 1679 | case IB_WR_RDMA_READ: |
1680 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, | 1680 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
1681 | wr->wr.rdma.rkey); | 1681 | rdma_wr(wr)->rkey); |
1682 | wqe += sizeof (struct mthca_raddr_seg); | 1682 | wqe += sizeof (struct mthca_raddr_seg); |
1683 | size += sizeof (struct mthca_raddr_seg) / 16; | 1683 | size += sizeof (struct mthca_raddr_seg) / 16; |
1684 | break; | 1684 | break; |
@@ -1694,8 +1694,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1694 | switch (wr->opcode) { | 1694 | switch (wr->opcode) { |
1695 | case IB_WR_RDMA_WRITE: | 1695 | case IB_WR_RDMA_WRITE: |
1696 | case IB_WR_RDMA_WRITE_WITH_IMM: | 1696 | case IB_WR_RDMA_WRITE_WITH_IMM: |
1697 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, | 1697 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
1698 | wr->wr.rdma.rkey); | 1698 | rdma_wr(wr)->rkey); |
1699 | wqe += sizeof (struct mthca_raddr_seg); | 1699 | wqe += sizeof (struct mthca_raddr_seg); |
1700 | size += sizeof (struct mthca_raddr_seg) / 16; | 1700 | size += sizeof (struct mthca_raddr_seg) / 16; |
1701 | break; | 1701 | break; |
@@ -1708,13 +1708,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1708 | break; | 1708 | break; |
1709 | 1709 | ||
1710 | case UD: | 1710 | case UD: |
1711 | set_tavor_ud_seg(wqe, wr); | 1711 | set_tavor_ud_seg(wqe, ud_wr(wr)); |
1712 | wqe += sizeof (struct mthca_tavor_ud_seg); | 1712 | wqe += sizeof (struct mthca_tavor_ud_seg); |
1713 | size += sizeof (struct mthca_tavor_ud_seg) / 16; | 1713 | size += sizeof (struct mthca_tavor_ud_seg) / 16; |
1714 | break; | 1714 | break; |
1715 | 1715 | ||
1716 | case MLX: | 1716 | case MLX: |
1717 | err = build_mlx_header(dev, to_msqp(qp), ind, wr, | 1717 | err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), |
1718 | wqe - sizeof (struct mthca_next_seg), | 1718 | wqe - sizeof (struct mthca_next_seg), |
1719 | wqe); | 1719 | wqe); |
1720 | if (err) { | 1720 | if (err) { |
@@ -2005,11 +2005,11 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2005 | switch (wr->opcode) { | 2005 | switch (wr->opcode) { |
2006 | case IB_WR_ATOMIC_CMP_AND_SWP: | 2006 | case IB_WR_ATOMIC_CMP_AND_SWP: |
2007 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 2007 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
2008 | set_raddr_seg(wqe, wr->wr.atomic.remote_addr, | 2008 | set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, |
2009 | wr->wr.atomic.rkey); | 2009 | atomic_wr(wr)->rkey); |
2010 | wqe += sizeof (struct mthca_raddr_seg); | 2010 | wqe += sizeof (struct mthca_raddr_seg); |
2011 | 2011 | ||
2012 | set_atomic_seg(wqe, wr); | 2012 | set_atomic_seg(wqe, atomic_wr(wr)); |
2013 | wqe += sizeof (struct mthca_atomic_seg); | 2013 | wqe += sizeof (struct mthca_atomic_seg); |
2014 | size += (sizeof (struct mthca_raddr_seg) + | 2014 | size += (sizeof (struct mthca_raddr_seg) + |
2015 | sizeof (struct mthca_atomic_seg)) / 16; | 2015 | sizeof (struct mthca_atomic_seg)) / 16; |
@@ -2018,8 +2018,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2018 | case IB_WR_RDMA_READ: | 2018 | case IB_WR_RDMA_READ: |
2019 | case IB_WR_RDMA_WRITE: | 2019 | case IB_WR_RDMA_WRITE: |
2020 | case IB_WR_RDMA_WRITE_WITH_IMM: | 2020 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2021 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, | 2021 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
2022 | wr->wr.rdma.rkey); | 2022 | rdma_wr(wr)->rkey); |
2023 | wqe += sizeof (struct mthca_raddr_seg); | 2023 | wqe += sizeof (struct mthca_raddr_seg); |
2024 | size += sizeof (struct mthca_raddr_seg) / 16; | 2024 | size += sizeof (struct mthca_raddr_seg) / 16; |
2025 | break; | 2025 | break; |
@@ -2035,8 +2035,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2035 | switch (wr->opcode) { | 2035 | switch (wr->opcode) { |
2036 | case IB_WR_RDMA_WRITE: | 2036 | case IB_WR_RDMA_WRITE: |
2037 | case IB_WR_RDMA_WRITE_WITH_IMM: | 2037 | case IB_WR_RDMA_WRITE_WITH_IMM: |
2038 | set_raddr_seg(wqe, wr->wr.rdma.remote_addr, | 2038 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
2039 | wr->wr.rdma.rkey); | 2039 | rdma_wr(wr)->rkey); |
2040 | wqe += sizeof (struct mthca_raddr_seg); | 2040 | wqe += sizeof (struct mthca_raddr_seg); |
2041 | size += sizeof (struct mthca_raddr_seg) / 16; | 2041 | size += sizeof (struct mthca_raddr_seg) / 16; |
2042 | break; | 2042 | break; |
@@ -2049,13 +2049,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2049 | break; | 2049 | break; |
2050 | 2050 | ||
2051 | case UD: | 2051 | case UD: |
2052 | set_arbel_ud_seg(wqe, wr); | 2052 | set_arbel_ud_seg(wqe, ud_wr(wr)); |
2053 | wqe += sizeof (struct mthca_arbel_ud_seg); | 2053 | wqe += sizeof (struct mthca_arbel_ud_seg); |
2054 | size += sizeof (struct mthca_arbel_ud_seg) / 16; | 2054 | size += sizeof (struct mthca_arbel_ud_seg) / 16; |
2055 | break; | 2055 | break; |
2056 | 2056 | ||
2057 | case MLX: | 2057 | case MLX: |
2058 | err = build_mlx_header(dev, to_msqp(qp), ind, wr, | 2058 | err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), |
2059 | wqe - sizeof (struct mthca_next_seg), | 2059 | wqe - sizeof (struct mthca_next_seg), |
2060 | wqe); | 2060 | wqe); |
2061 | if (err) { | 2061 | if (err) { |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 44cb513f9a87..f71b37b75f82 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -3372,9 +3372,9 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
3372 | wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE; | 3372 | wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE; |
3373 | 3373 | ||
3374 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX, | 3374 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX, |
3375 | ib_wr->wr.rdma.rkey); | 3375 | rdma_wr(ib_wr)->rkey); |
3376 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX, | 3376 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX, |
3377 | ib_wr->wr.rdma.remote_addr); | 3377 | rdma_wr(ib_wr)->remote_addr); |
3378 | 3378 | ||
3379 | if ((ib_wr->send_flags & IB_SEND_INLINE) && | 3379 | if ((ib_wr->send_flags & IB_SEND_INLINE) && |
3380 | ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) && | 3380 | ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) && |
@@ -3409,9 +3409,9 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
3409 | } | 3409 | } |
3410 | 3410 | ||
3411 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX, | 3411 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX, |
3412 | ib_wr->wr.rdma.remote_addr); | 3412 | rdma_wr(ib_wr)->remote_addr); |
3413 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX, | 3413 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX, |
3414 | ib_wr->wr.rdma.rkey); | 3414 | rdma_wr(ib_wr)->rkey); |
3415 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX, | 3415 | set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX, |
3416 | ib_wr->sg_list->length); | 3416 | ib_wr->sg_list->length); |
3417 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, | 3417 | set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, |
@@ -3428,15 +3428,16 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
3428 | case IB_WR_FAST_REG_MR: | 3428 | case IB_WR_FAST_REG_MR: |
3429 | { | 3429 | { |
3430 | int i; | 3430 | int i; |
3431 | int flags = ib_wr->wr.fast_reg.access_flags; | 3431 | struct ib_fast_reg_wr *fwr = fast_reg_wr(ib_wr); |
3432 | int flags = fwr->access_flags; | ||
3432 | struct nes_ib_fast_reg_page_list *pnesfrpl = | 3433 | struct nes_ib_fast_reg_page_list *pnesfrpl = |
3433 | container_of(ib_wr->wr.fast_reg.page_list, | 3434 | container_of(fwr->page_list, |
3434 | struct nes_ib_fast_reg_page_list, | 3435 | struct nes_ib_fast_reg_page_list, |
3435 | ibfrpl); | 3436 | ibfrpl); |
3436 | u64 *src_page_list = pnesfrpl->ibfrpl.page_list; | 3437 | u64 *src_page_list = pnesfrpl->ibfrpl.page_list; |
3437 | u64 *dst_page_list = pnesfrpl->nes_wqe_pbl.kva; | 3438 | u64 *dst_page_list = pnesfrpl->nes_wqe_pbl.kva; |
3438 | 3439 | ||
3439 | if (ib_wr->wr.fast_reg.page_list_len > | 3440 | if (fwr->page_list_len > |
3440 | (NES_4K_PBL_CHUNK_SIZE / sizeof(u64))) { | 3441 | (NES_4K_PBL_CHUNK_SIZE / sizeof(u64))) { |
3441 | nes_debug(NES_DBG_IW_TX, "SQ_FMR: bad page_list_len\n"); | 3442 | nes_debug(NES_DBG_IW_TX, "SQ_FMR: bad page_list_len\n"); |
3442 | err = -EINVAL; | 3443 | err = -EINVAL; |
@@ -3445,19 +3446,19 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
3445 | wqe_misc = NES_IWARP_SQ_OP_FAST_REG; | 3446 | wqe_misc = NES_IWARP_SQ_OP_FAST_REG; |
3446 | set_wqe_64bit_value(wqe->wqe_words, | 3447 | set_wqe_64bit_value(wqe->wqe_words, |
3447 | NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX, | 3448 | NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX, |
3448 | ib_wr->wr.fast_reg.iova_start); | 3449 | fwr->iova_start); |
3449 | set_wqe_32bit_value(wqe->wqe_words, | 3450 | set_wqe_32bit_value(wqe->wqe_words, |
3450 | NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, | 3451 | NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, |
3451 | ib_wr->wr.fast_reg.length); | 3452 | fwr->length); |
3452 | set_wqe_32bit_value(wqe->wqe_words, | 3453 | set_wqe_32bit_value(wqe->wqe_words, |
3453 | NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); | 3454 | NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); |
3454 | set_wqe_32bit_value(wqe->wqe_words, | 3455 | set_wqe_32bit_value(wqe->wqe_words, |
3455 | NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX, | 3456 | NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX, |
3456 | ib_wr->wr.fast_reg.rkey); | 3457 | fwr->rkey); |
3457 | /* Set page size: */ | 3458 | /* Set page size: */ |
3458 | if (ib_wr->wr.fast_reg.page_shift == 12) { | 3459 | if (fwr->page_shift == 12) { |
3459 | wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K; | 3460 | wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K; |
3460 | } else if (ib_wr->wr.fast_reg.page_shift == 21) { | 3461 | } else if (fwr->page_shift == 21) { |
3461 | wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M; | 3462 | wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M; |
3462 | } else { | 3463 | } else { |
3463 | nes_debug(NES_DBG_IW_TX, "Invalid page shift," | 3464 | nes_debug(NES_DBG_IW_TX, "Invalid page shift," |
@@ -3480,11 +3481,11 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
3480 | wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND; | 3481 | wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND; |
3481 | 3482 | ||
3482 | /* Fill in PBL info: */ | 3483 | /* Fill in PBL info: */ |
3483 | if (ib_wr->wr.fast_reg.page_list_len > | 3484 | if (fwr->page_list_len > |
3484 | pnesfrpl->ibfrpl.max_page_list_len) { | 3485 | pnesfrpl->ibfrpl.max_page_list_len) { |
3485 | nes_debug(NES_DBG_IW_TX, "Invalid page list length," | 3486 | nes_debug(NES_DBG_IW_TX, "Invalid page list length," |
3486 | " ib_wr=%p, value=%u, max=%u\n", | 3487 | " ib_wr=%p, value=%u, max=%u\n", |
3487 | ib_wr, ib_wr->wr.fast_reg.page_list_len, | 3488 | ib_wr, fwr->page_list_len, |
3488 | pnesfrpl->ibfrpl.max_page_list_len); | 3489 | pnesfrpl->ibfrpl.max_page_list_len); |
3489 | err = -EINVAL; | 3490 | err = -EINVAL; |
3490 | break; | 3491 | break; |
@@ -3496,19 +3497,19 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
3496 | 3497 | ||
3497 | set_wqe_32bit_value(wqe->wqe_words, | 3498 | set_wqe_32bit_value(wqe->wqe_words, |
3498 | NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX, | 3499 | NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX, |
3499 | ib_wr->wr.fast_reg.page_list_len * 8); | 3500 | fwr->page_list_len * 8); |
3500 | 3501 | ||
3501 | for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++) | 3502 | for (i = 0; i < fwr->page_list_len; i++) |
3502 | dst_page_list[i] = cpu_to_le64(src_page_list[i]); | 3503 | dst_page_list[i] = cpu_to_le64(src_page_list[i]); |
3503 | 3504 | ||
3504 | nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %llx, " | 3505 | nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %llx, " |
3505 | "length: %d, rkey: %0x, pgl_paddr: %llx, " | 3506 | "length: %d, rkey: %0x, pgl_paddr: %llx, " |
3506 | "page_list_len: %u, wqe_misc: %x\n", | 3507 | "page_list_len: %u, wqe_misc: %x\n", |
3507 | (unsigned long long) ib_wr->wr.fast_reg.iova_start, | 3508 | (unsigned long long) fwr->iova_start, |
3508 | ib_wr->wr.fast_reg.length, | 3509 | fwr->length, |
3509 | ib_wr->wr.fast_reg.rkey, | 3510 | fwr->rkey, |
3510 | (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr, | 3511 | (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr, |
3511 | ib_wr->wr.fast_reg.page_list_len, | 3512 | fwr->page_list_len, |
3512 | wqe_misc); | 3513 | wqe_misc); |
3513 | break; | 3514 | break; |
3514 | } | 3515 | } |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 1f3affb6a477..eb09e224acb9 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -1997,13 +1997,13 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, | |||
1997 | { | 1997 | { |
1998 | struct ocrdma_ewqe_ud_hdr *ud_hdr = | 1998 | struct ocrdma_ewqe_ud_hdr *ud_hdr = |
1999 | (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); | 1999 | (struct ocrdma_ewqe_ud_hdr *)(hdr + 1); |
2000 | struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah); | 2000 | struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah); |
2001 | 2001 | ||
2002 | ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn; | 2002 | ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn; |
2003 | if (qp->qp_type == IB_QPT_GSI) | 2003 | if (qp->qp_type == IB_QPT_GSI) |
2004 | ud_hdr->qkey = qp->qkey; | 2004 | ud_hdr->qkey = qp->qkey; |
2005 | else | 2005 | else |
2006 | ud_hdr->qkey = wr->wr.ud.remote_qkey; | 2006 | ud_hdr->qkey = ud_wr(wr)->remote_qkey; |
2007 | ud_hdr->rsvd_ahid = ah->id; | 2007 | ud_hdr->rsvd_ahid = ah->id; |
2008 | if (ah->av->valid & OCRDMA_AV_VLAN_VALID) | 2008 | if (ah->av->valid & OCRDMA_AV_VLAN_VALID) |
2009 | hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT); | 2009 | hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT); |
@@ -2106,9 +2106,9 @@ static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |||
2106 | status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); | 2106 | status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); |
2107 | if (status) | 2107 | if (status) |
2108 | return status; | 2108 | return status; |
2109 | ext_rw->addr_lo = wr->wr.rdma.remote_addr; | 2109 | ext_rw->addr_lo = rdma_wr(wr)->remote_addr; |
2110 | ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); | 2110 | ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr); |
2111 | ext_rw->lrkey = wr->wr.rdma.rkey; | 2111 | ext_rw->lrkey = rdma_wr(wr)->rkey; |
2112 | ext_rw->len = hdr->total_len; | 2112 | ext_rw->len = hdr->total_len; |
2113 | return 0; | 2113 | return 0; |
2114 | } | 2114 | } |
@@ -2126,13 +2126,14 @@ static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |||
2126 | hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); | 2126 | hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT); |
2127 | hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); | 2127 | hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT); |
2128 | 2128 | ||
2129 | ext_rw->addr_lo = wr->wr.rdma.remote_addr; | 2129 | ext_rw->addr_lo = rdma_wr(wr)->remote_addr; |
2130 | ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr); | 2130 | ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr); |
2131 | ext_rw->lrkey = wr->wr.rdma.rkey; | 2131 | ext_rw->lrkey = rdma_wr(wr)->rkey; |
2132 | ext_rw->len = hdr->total_len; | 2132 | ext_rw->len = hdr->total_len; |
2133 | } | 2133 | } |
2134 | 2134 | ||
2135 | static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl, | 2135 | static void build_frmr_pbes(struct ib_fast_reg_wr *wr, |
2136 | struct ocrdma_pbl *pbl_tbl, | ||
2136 | struct ocrdma_hw_mr *hwmr) | 2137 | struct ocrdma_hw_mr *hwmr) |
2137 | { | 2138 | { |
2138 | int i; | 2139 | int i; |
@@ -2144,12 +2145,12 @@ static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl, | |||
2144 | num_pbes = 0; | 2145 | num_pbes = 0; |
2145 | 2146 | ||
2146 | /* go through the OS phy regions & fill hw pbe entries into pbls. */ | 2147 | /* go through the OS phy regions & fill hw pbe entries into pbls. */ |
2147 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { | 2148 | for (i = 0; i < wr->page_list_len; i++) { |
2148 | /* number of pbes can be more for one OS buf, when | 2149 | /* number of pbes can be more for one OS buf, when |
2149 | * buffers are of different sizes. | 2150 | * buffers are of different sizes. |
2150 | * split the ib_buf to one or more pbes. | 2151 | * split the ib_buf to one or more pbes. |
2151 | */ | 2152 | */ |
2152 | buf_addr = wr->wr.fast_reg.page_list->page_list[i]; | 2153 | buf_addr = wr->page_list->page_list[i]; |
2153 | pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK)); | 2154 | pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK)); |
2154 | pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr)); | 2155 | pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr)); |
2155 | num_pbes += 1; | 2156 | num_pbes += 1; |
@@ -2178,9 +2179,10 @@ static int get_encoded_page_size(int pg_sz) | |||
2178 | 2179 | ||
2179 | 2180 | ||
2180 | static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | 2181 | static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, |
2181 | struct ib_send_wr *wr) | 2182 | struct ib_send_wr *send_wr) |
2182 | { | 2183 | { |
2183 | u64 fbo; | 2184 | u64 fbo; |
2185 | struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr); | ||
2184 | struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); | 2186 | struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); |
2185 | struct ocrdma_mr *mr; | 2187 | struct ocrdma_mr *mr; |
2186 | struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); | 2188 | struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); |
@@ -2188,33 +2190,32 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, | |||
2188 | 2190 | ||
2189 | wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); | 2191 | wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); |
2190 | 2192 | ||
2191 | if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr) | 2193 | if (wr->page_list_len > dev->attr.max_pages_per_frmr) |
2192 | return -EINVAL; | 2194 | return -EINVAL; |
2193 | 2195 | ||
2194 | hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); | 2196 | hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); |
2195 | hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); | 2197 | hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); |
2196 | 2198 | ||
2197 | if (wr->wr.fast_reg.page_list_len == 0) | 2199 | if (wr->page_list_len == 0) |
2198 | BUG(); | 2200 | BUG(); |
2199 | if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE) | 2201 | if (wr->access_flags & IB_ACCESS_LOCAL_WRITE) |
2200 | hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR; | 2202 | hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR; |
2201 | if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE) | 2203 | if (wr->access_flags & IB_ACCESS_REMOTE_WRITE) |
2202 | hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR; | 2204 | hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR; |
2203 | if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ) | 2205 | if (wr->access_flags & IB_ACCESS_REMOTE_READ) |
2204 | hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD; | 2206 | hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD; |
2205 | hdr->lkey = wr->wr.fast_reg.rkey; | 2207 | hdr->lkey = wr->rkey; |
2206 | hdr->total_len = wr->wr.fast_reg.length; | 2208 | hdr->total_len = wr->length; |
2207 | 2209 | ||
2208 | fbo = wr->wr.fast_reg.iova_start - | 2210 | fbo = wr->iova_start - (wr->page_list->page_list[0] & PAGE_MASK); |
2209 | (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK); | ||
2210 | 2211 | ||
2211 | fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start); | 2212 | fast_reg->va_hi = upper_32_bits(wr->iova_start); |
2212 | fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff); | 2213 | fast_reg->va_lo = (u32) (wr->iova_start & 0xffffffff); |
2213 | fast_reg->fbo_hi = upper_32_bits(fbo); | 2214 | fast_reg->fbo_hi = upper_32_bits(fbo); |
2214 | fast_reg->fbo_lo = (u32) fbo & 0xffffffff; | 2215 | fast_reg->fbo_lo = (u32) fbo & 0xffffffff; |
2215 | fast_reg->num_sges = wr->wr.fast_reg.page_list_len; | 2216 | fast_reg->num_sges = wr->page_list_len; |
2216 | fast_reg->size_sge = | 2217 | fast_reg->size_sge = |
2217 | get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); | 2218 | get_encoded_page_size(1 << wr->page_shift); |
2218 | mr = (struct ocrdma_mr *) (unsigned long) | 2219 | mr = (struct ocrdma_mr *) (unsigned long) |
2219 | dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; | 2220 | dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; |
2220 | build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); | 2221 | build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); |
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 5afaa218508d..eaf139a33b2e 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c | |||
@@ -338,12 +338,13 @@ bail: | |||
338 | /* | 338 | /* |
339 | * Initialize the memory region specified by the work reqeust. | 339 | * Initialize the memory region specified by the work reqeust. |
340 | */ | 340 | */ |
341 | int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) | 341 | int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *send_wr) |
342 | { | 342 | { |
343 | struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr); | ||
343 | struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; | 344 | struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; |
344 | struct qib_pd *pd = to_ipd(qp->ibqp.pd); | 345 | struct qib_pd *pd = to_ipd(qp->ibqp.pd); |
345 | struct qib_mregion *mr; | 346 | struct qib_mregion *mr; |
346 | u32 rkey = wr->wr.fast_reg.rkey; | 347 | u32 rkey = wr->rkey; |
347 | unsigned i, n, m; | 348 | unsigned i, n, m; |
348 | int ret = -EINVAL; | 349 | int ret = -EINVAL; |
349 | unsigned long flags; | 350 | unsigned long flags; |
@@ -360,22 +361,22 @@ int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) | |||
360 | if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) | 361 | if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) |
361 | goto bail; | 362 | goto bail; |
362 | 363 | ||
363 | if (wr->wr.fast_reg.page_list_len > mr->max_segs) | 364 | if (wr->page_list_len > mr->max_segs) |
364 | goto bail; | 365 | goto bail; |
365 | 366 | ||
366 | ps = 1UL << wr->wr.fast_reg.page_shift; | 367 | ps = 1UL << wr->page_shift; |
367 | if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len) | 368 | if (wr->length > ps * wr->page_list_len) |
368 | goto bail; | 369 | goto bail; |
369 | 370 | ||
370 | mr->user_base = wr->wr.fast_reg.iova_start; | 371 | mr->user_base = wr->iova_start; |
371 | mr->iova = wr->wr.fast_reg.iova_start; | 372 | mr->iova = wr->iova_start; |
372 | mr->lkey = rkey; | 373 | mr->lkey = rkey; |
373 | mr->length = wr->wr.fast_reg.length; | 374 | mr->length = wr->length; |
374 | mr->access_flags = wr->wr.fast_reg.access_flags; | 375 | mr->access_flags = wr->access_flags; |
375 | page_list = wr->wr.fast_reg.page_list->page_list; | 376 | page_list = wr->page_list->page_list; |
376 | m = 0; | 377 | m = 0; |
377 | n = 0; | 378 | n = 0; |
378 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { | 379 | for (i = 0; i < wr->page_list_len; i++) { |
379 | mr->map[m]->segs[n].vaddr = (void *) page_list[i]; | 380 | mr->map[m]->segs[n].vaddr = (void *) page_list[i]; |
380 | mr->map[m]->segs[n].length = ps; | 381 | mr->map[m]->segs[n].length = ps; |
381 | if (++n == QIB_SEGSZ) { | 382 | if (++n == QIB_SEGSZ) { |
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 4fa88ba2963e..40f85bb3e0d3 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -436,7 +436,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | |||
436 | if (qp->ibqp.qp_type == IB_QPT_UD || | 436 | if (qp->ibqp.qp_type == IB_QPT_UD || |
437 | qp->ibqp.qp_type == IB_QPT_SMI || | 437 | qp->ibqp.qp_type == IB_QPT_SMI || |
438 | qp->ibqp.qp_type == IB_QPT_GSI) | 438 | qp->ibqp.qp_type == IB_QPT_GSI) |
439 | atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); | 439 | atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); |
440 | if (++qp->s_last >= qp->s_size) | 440 | if (++qp->s_last >= qp->s_size) |
441 | qp->s_last = 0; | 441 | qp->s_last = 0; |
442 | } | 442 | } |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 4544d6f88ad7..e6b7556d5221 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -373,10 +373,11 @@ int qib_make_rc_req(struct qib_qp *qp) | |||
373 | qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; | 373 | qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; |
374 | goto bail; | 374 | goto bail; |
375 | } | 375 | } |
376 | |||
376 | ohdr->u.rc.reth.vaddr = | 377 | ohdr->u.rc.reth.vaddr = |
377 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | 378 | cpu_to_be64(wqe->rdma_wr.remote_addr); |
378 | ohdr->u.rc.reth.rkey = | 379 | ohdr->u.rc.reth.rkey = |
379 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 380 | cpu_to_be32(wqe->rdma_wr.rkey); |
380 | ohdr->u.rc.reth.length = cpu_to_be32(len); | 381 | ohdr->u.rc.reth.length = cpu_to_be32(len); |
381 | hwords += sizeof(struct ib_reth) / sizeof(u32); | 382 | hwords += sizeof(struct ib_reth) / sizeof(u32); |
382 | wqe->lpsn = wqe->psn; | 383 | wqe->lpsn = wqe->psn; |
@@ -386,15 +387,15 @@ int qib_make_rc_req(struct qib_qp *qp) | |||
386 | len = pmtu; | 387 | len = pmtu; |
387 | break; | 388 | break; |
388 | } | 389 | } |
389 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE) | 390 | if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE) |
390 | qp->s_state = OP(RDMA_WRITE_ONLY); | 391 | qp->s_state = OP(RDMA_WRITE_ONLY); |
391 | else { | 392 | else { |
392 | qp->s_state = | 393 | qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); |
393 | OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); | ||
394 | /* Immediate data comes after RETH */ | 394 | /* Immediate data comes after RETH */ |
395 | ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; | 395 | ohdr->u.rc.imm_data = |
396 | wqe->rdma_wr.wr.ex.imm_data; | ||
396 | hwords += 1; | 397 | hwords += 1; |
397 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | 398 | if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED) |
398 | bth0 |= IB_BTH_SOLICITED; | 399 | bth0 |= IB_BTH_SOLICITED; |
399 | } | 400 | } |
400 | bth2 |= IB_BTH_REQ_ACK; | 401 | bth2 |= IB_BTH_REQ_ACK; |
@@ -424,10 +425,11 @@ int qib_make_rc_req(struct qib_qp *qp) | |||
424 | qp->s_next_psn += (len - 1) / pmtu; | 425 | qp->s_next_psn += (len - 1) / pmtu; |
425 | wqe->lpsn = qp->s_next_psn++; | 426 | wqe->lpsn = qp->s_next_psn++; |
426 | } | 427 | } |
428 | |||
427 | ohdr->u.rc.reth.vaddr = | 429 | ohdr->u.rc.reth.vaddr = |
428 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | 430 | cpu_to_be64(wqe->rdma_wr.remote_addr); |
429 | ohdr->u.rc.reth.rkey = | 431 | ohdr->u.rc.reth.rkey = |
430 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 432 | cpu_to_be32(wqe->rdma_wr.rkey); |
431 | ohdr->u.rc.reth.length = cpu_to_be32(len); | 433 | ohdr->u.rc.reth.length = cpu_to_be32(len); |
432 | qp->s_state = OP(RDMA_READ_REQUEST); | 434 | qp->s_state = OP(RDMA_READ_REQUEST); |
433 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); | 435 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); |
@@ -455,24 +457,24 @@ int qib_make_rc_req(struct qib_qp *qp) | |||
455 | qp->s_lsn++; | 457 | qp->s_lsn++; |
456 | wqe->lpsn = wqe->psn; | 458 | wqe->lpsn = wqe->psn; |
457 | } | 459 | } |
458 | if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | 460 | if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { |
459 | qp->s_state = OP(COMPARE_SWAP); | 461 | qp->s_state = OP(COMPARE_SWAP); |
460 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( | 462 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( |
461 | wqe->wr.wr.atomic.swap); | 463 | wqe->atomic_wr.swap); |
462 | ohdr->u.atomic_eth.compare_data = cpu_to_be64( | 464 | ohdr->u.atomic_eth.compare_data = cpu_to_be64( |
463 | wqe->wr.wr.atomic.compare_add); | 465 | wqe->atomic_wr.compare_add); |
464 | } else { | 466 | } else { |
465 | qp->s_state = OP(FETCH_ADD); | 467 | qp->s_state = OP(FETCH_ADD); |
466 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( | 468 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( |
467 | wqe->wr.wr.atomic.compare_add); | 469 | wqe->atomic_wr.compare_add); |
468 | ohdr->u.atomic_eth.compare_data = 0; | 470 | ohdr->u.atomic_eth.compare_data = 0; |
469 | } | 471 | } |
470 | ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( | 472 | ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( |
471 | wqe->wr.wr.atomic.remote_addr >> 32); | 473 | wqe->atomic_wr.remote_addr >> 32); |
472 | ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( | 474 | ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( |
473 | wqe->wr.wr.atomic.remote_addr); | 475 | wqe->atomic_wr.remote_addr); |
474 | ohdr->u.atomic_eth.rkey = cpu_to_be32( | 476 | ohdr->u.atomic_eth.rkey = cpu_to_be32( |
475 | wqe->wr.wr.atomic.rkey); | 477 | wqe->atomic_wr.rkey); |
476 | hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); | 478 | hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); |
477 | ss = NULL; | 479 | ss = NULL; |
478 | len = 0; | 480 | len = 0; |
@@ -597,9 +599,9 @@ int qib_make_rc_req(struct qib_qp *qp) | |||
597 | */ | 599 | */ |
598 | len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu; | 600 | len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu; |
599 | ohdr->u.rc.reth.vaddr = | 601 | ohdr->u.rc.reth.vaddr = |
600 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len); | 602 | cpu_to_be64(wqe->rdma_wr.remote_addr + len); |
601 | ohdr->u.rc.reth.rkey = | 603 | ohdr->u.rc.reth.rkey = |
602 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 604 | cpu_to_be32(wqe->rdma_wr.rkey); |
603 | ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); | 605 | ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); |
604 | qp->s_state = OP(RDMA_READ_REQUEST); | 606 | qp->s_state = OP(RDMA_READ_REQUEST); |
605 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); | 607 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); |
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 22e356ca8058..b1aa21bdd484 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c | |||
@@ -459,8 +459,8 @@ again: | |||
459 | if (wqe->length == 0) | 459 | if (wqe->length == 0) |
460 | break; | 460 | break; |
461 | if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, | 461 | if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, |
462 | wqe->wr.wr.rdma.remote_addr, | 462 | wqe->rdma_wr.remote_addr, |
463 | wqe->wr.wr.rdma.rkey, | 463 | wqe->rdma_wr.rkey, |
464 | IB_ACCESS_REMOTE_WRITE))) | 464 | IB_ACCESS_REMOTE_WRITE))) |
465 | goto acc_err; | 465 | goto acc_err; |
466 | qp->r_sge.sg_list = NULL; | 466 | qp->r_sge.sg_list = NULL; |
@@ -472,8 +472,8 @@ again: | |||
472 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) | 472 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) |
473 | goto inv_err; | 473 | goto inv_err; |
474 | if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, | 474 | if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, |
475 | wqe->wr.wr.rdma.remote_addr, | 475 | wqe->rdma_wr.remote_addr, |
476 | wqe->wr.wr.rdma.rkey, | 476 | wqe->rdma_wr.rkey, |
477 | IB_ACCESS_REMOTE_READ))) | 477 | IB_ACCESS_REMOTE_READ))) |
478 | goto acc_err; | 478 | goto acc_err; |
479 | release = 0; | 479 | release = 0; |
@@ -490,18 +490,18 @@ again: | |||
490 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) | 490 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) |
491 | goto inv_err; | 491 | goto inv_err; |
492 | if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), | 492 | if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), |
493 | wqe->wr.wr.atomic.remote_addr, | 493 | wqe->atomic_wr.remote_addr, |
494 | wqe->wr.wr.atomic.rkey, | 494 | wqe->atomic_wr.rkey, |
495 | IB_ACCESS_REMOTE_ATOMIC))) | 495 | IB_ACCESS_REMOTE_ATOMIC))) |
496 | goto acc_err; | 496 | goto acc_err; |
497 | /* Perform atomic OP and save result. */ | 497 | /* Perform atomic OP and save result. */ |
498 | maddr = (atomic64_t *) qp->r_sge.sge.vaddr; | 498 | maddr = (atomic64_t *) qp->r_sge.sge.vaddr; |
499 | sdata = wqe->wr.wr.atomic.compare_add; | 499 | sdata = wqe->atomic_wr.compare_add; |
500 | *(u64 *) sqp->s_sge.sge.vaddr = | 500 | *(u64 *) sqp->s_sge.sge.vaddr = |
501 | (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? | 501 | (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? |
502 | (u64) atomic64_add_return(sdata, maddr) - sdata : | 502 | (u64) atomic64_add_return(sdata, maddr) - sdata : |
503 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, | 503 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, |
504 | sdata, wqe->wr.wr.atomic.swap); | 504 | sdata, wqe->atomic_wr.swap); |
505 | qib_put_mr(qp->r_sge.sge.mr); | 505 | qib_put_mr(qp->r_sge.sge.mr); |
506 | qp->r_sge.num_sge = 0; | 506 | qp->r_sge.num_sge = 0; |
507 | goto send_comp; | 507 | goto send_comp; |
@@ -785,7 +785,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, | |||
785 | if (qp->ibqp.qp_type == IB_QPT_UD || | 785 | if (qp->ibqp.qp_type == IB_QPT_UD || |
786 | qp->ibqp.qp_type == IB_QPT_SMI || | 786 | qp->ibqp.qp_type == IB_QPT_SMI || |
787 | qp->ibqp.qp_type == IB_QPT_GSI) | 787 | qp->ibqp.qp_type == IB_QPT_GSI) |
788 | atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); | 788 | atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); |
789 | 789 | ||
790 | /* See ch. 11.2.4.1 and 10.7.3.1 */ | 790 | /* See ch. 11.2.4.1 and 10.7.3.1 */ |
791 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || | 791 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || |
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index aa3a8035bb68..06a564589c35 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
@@ -129,9 +129,9 @@ int qib_make_uc_req(struct qib_qp *qp) | |||
129 | case IB_WR_RDMA_WRITE: | 129 | case IB_WR_RDMA_WRITE: |
130 | case IB_WR_RDMA_WRITE_WITH_IMM: | 130 | case IB_WR_RDMA_WRITE_WITH_IMM: |
131 | ohdr->u.rc.reth.vaddr = | 131 | ohdr->u.rc.reth.vaddr = |
132 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | 132 | cpu_to_be64(wqe->rdma_wr.remote_addr); |
133 | ohdr->u.rc.reth.rkey = | 133 | ohdr->u.rc.reth.rkey = |
134 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 134 | cpu_to_be32(wqe->rdma_wr.rkey); |
135 | ohdr->u.rc.reth.length = cpu_to_be32(len); | 135 | ohdr->u.rc.reth.length = cpu_to_be32(len); |
136 | hwords += sizeof(struct ib_reth) / 4; | 136 | hwords += sizeof(struct ib_reth) / 4; |
137 | if (len > pmtu) { | 137 | if (len > pmtu) { |
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index 26243b722b5e..59193f67ea78 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
@@ -59,7 +59,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) | |||
59 | u32 length; | 59 | u32 length; |
60 | enum ib_qp_type sqptype, dqptype; | 60 | enum ib_qp_type sqptype, dqptype; |
61 | 61 | ||
62 | qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn); | 62 | qp = qib_lookup_qpn(ibp, swqe->ud_wr.remote_qpn); |
63 | if (!qp) { | 63 | if (!qp) { |
64 | ibp->n_pkt_drops++; | 64 | ibp->n_pkt_drops++; |
65 | return; | 65 | return; |
@@ -76,7 +76,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) | |||
76 | goto drop; | 76 | goto drop; |
77 | } | 77 | } |
78 | 78 | ||
79 | ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr; | 79 | ah_attr = &to_iah(swqe->ud_wr.ah)->attr; |
80 | ppd = ppd_from_ibp(ibp); | 80 | ppd = ppd_from_ibp(ibp); |
81 | 81 | ||
82 | if (qp->ibqp.qp_num > 1) { | 82 | if (qp->ibqp.qp_num > 1) { |
@@ -106,8 +106,8 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) | |||
106 | if (qp->ibqp.qp_num) { | 106 | if (qp->ibqp.qp_num) { |
107 | u32 qkey; | 107 | u32 qkey; |
108 | 108 | ||
109 | qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ? | 109 | qkey = (int)swqe->ud_wr.remote_qkey < 0 ? |
110 | sqp->qkey : swqe->wr.wr.ud.remote_qkey; | 110 | sqp->qkey : swqe->ud_wr.remote_qkey; |
111 | if (unlikely(qkey != qp->qkey)) { | 111 | if (unlikely(qkey != qp->qkey)) { |
112 | u16 lid; | 112 | u16 lid; |
113 | 113 | ||
@@ -210,7 +210,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) | |||
210 | wc.qp = &qp->ibqp; | 210 | wc.qp = &qp->ibqp; |
211 | wc.src_qp = sqp->ibqp.qp_num; | 211 | wc.src_qp = sqp->ibqp.qp_num; |
212 | wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? | 212 | wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? |
213 | swqe->wr.wr.ud.pkey_index : 0; | 213 | swqe->ud_wr.pkey_index : 0; |
214 | wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1)); | 214 | wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1)); |
215 | wc.sl = ah_attr->sl; | 215 | wc.sl = ah_attr->sl; |
216 | wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1); | 216 | wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1); |
@@ -277,7 +277,7 @@ int qib_make_ud_req(struct qib_qp *qp) | |||
277 | /* Construct the header. */ | 277 | /* Construct the header. */ |
278 | ibp = to_iport(qp->ibqp.device, qp->port_num); | 278 | ibp = to_iport(qp->ibqp.device, qp->port_num); |
279 | ppd = ppd_from_ibp(ibp); | 279 | ppd = ppd_from_ibp(ibp); |
280 | ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; | 280 | ah_attr = &to_iah(wqe->ud_wr.ah)->attr; |
281 | if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) { | 281 | if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) { |
282 | if (ah_attr->dlid != QIB_PERMISSIVE_LID) | 282 | if (ah_attr->dlid != QIB_PERMISSIVE_LID) |
283 | this_cpu_inc(ibp->pmastats->n_multicast_xmit); | 283 | this_cpu_inc(ibp->pmastats->n_multicast_xmit); |
@@ -363,7 +363,7 @@ int qib_make_ud_req(struct qib_qp *qp) | |||
363 | bth0 |= extra_bytes << 20; | 363 | bth0 |= extra_bytes << 20; |
364 | bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY : | 364 | bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY : |
365 | qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ? | 365 | qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ? |
366 | wqe->wr.wr.ud.pkey_index : qp->s_pkey_index); | 366 | wqe->ud_wr.pkey_index : qp->s_pkey_index); |
367 | ohdr->bth[0] = cpu_to_be32(bth0); | 367 | ohdr->bth[0] = cpu_to_be32(bth0); |
368 | /* | 368 | /* |
369 | * Use the multicast QP if the destination LID is a multicast LID. | 369 | * Use the multicast QP if the destination LID is a multicast LID. |
@@ -371,14 +371,14 @@ int qib_make_ud_req(struct qib_qp *qp) | |||
371 | ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE && | 371 | ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE && |
372 | ah_attr->dlid != QIB_PERMISSIVE_LID ? | 372 | ah_attr->dlid != QIB_PERMISSIVE_LID ? |
373 | cpu_to_be32(QIB_MULTICAST_QPN) : | 373 | cpu_to_be32(QIB_MULTICAST_QPN) : |
374 | cpu_to_be32(wqe->wr.wr.ud.remote_qpn); | 374 | cpu_to_be32(wqe->ud_wr.remote_qpn); |
375 | ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK); | 375 | ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK); |
376 | /* | 376 | /* |
377 | * Qkeys with the high order bit set mean use the | 377 | * Qkeys with the high order bit set mean use the |
378 | * qkey from the QP context instead of the WR (see 10.2.5). | 378 | * qkey from the QP context instead of the WR (see 10.2.5). |
379 | */ | 379 | */ |
380 | ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ? | 380 | ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ? |
381 | qp->qkey : wqe->wr.wr.ud.remote_qkey); | 381 | qp->qkey : wqe->ud_wr.remote_qkey); |
382 | ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); | 382 | ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); |
383 | 383 | ||
384 | done: | 384 | done: |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 3dcc4985b60f..a6b0b098ff30 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
@@ -374,7 +374,7 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, | |||
374 | wr->opcode != IB_WR_SEND_WITH_IMM) | 374 | wr->opcode != IB_WR_SEND_WITH_IMM) |
375 | goto bail_inval; | 375 | goto bail_inval; |
376 | /* Check UD destination address PD */ | 376 | /* Check UD destination address PD */ |
377 | if (qp->ibqp.pd != wr->wr.ud.ah->pd) | 377 | if (qp->ibqp.pd != ud_wr(wr)->ah->pd) |
378 | goto bail_inval; | 378 | goto bail_inval; |
379 | } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) | 379 | } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) |
380 | goto bail_inval; | 380 | goto bail_inval; |
@@ -397,7 +397,23 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, | |||
397 | rkt = &to_idev(qp->ibqp.device)->lk_table; | 397 | rkt = &to_idev(qp->ibqp.device)->lk_table; |
398 | pd = to_ipd(qp->ibqp.pd); | 398 | pd = to_ipd(qp->ibqp.pd); |
399 | wqe = get_swqe_ptr(qp, qp->s_head); | 399 | wqe = get_swqe_ptr(qp, qp->s_head); |
400 | wqe->wr = *wr; | 400 | |
401 | if (qp->ibqp.qp_type != IB_QPT_UC && | ||
402 | qp->ibqp.qp_type != IB_QPT_RC) | ||
403 | memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); | ||
404 | else if (wr->opcode == IB_WR_FAST_REG_MR) | ||
405 | memcpy(&wqe->fast_reg_wr, fast_reg_wr(wr), | ||
406 | sizeof(wqe->fast_reg_wr)); | ||
407 | else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || | ||
408 | wr->opcode == IB_WR_RDMA_WRITE || | ||
409 | wr->opcode == IB_WR_RDMA_READ) | ||
410 | memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); | ||
411 | else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || | ||
412 | wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) | ||
413 | memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); | ||
414 | else | ||
415 | memcpy(&wqe->wr, wr, sizeof(wqe->wr)); | ||
416 | |||
401 | wqe->length = 0; | 417 | wqe->length = 0; |
402 | j = 0; | 418 | j = 0; |
403 | if (wr->num_sge) { | 419 | if (wr->num_sge) { |
@@ -426,7 +442,7 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, | |||
426 | qp->port_num - 1)->ibmtu) | 442 | qp->port_num - 1)->ibmtu) |
427 | goto bail_inval_free; | 443 | goto bail_inval_free; |
428 | else | 444 | else |
429 | atomic_inc(&to_iah(wr->wr.ud.ah)->refcount); | 445 | atomic_inc(&to_iah(ud_wr(wr)->ah)->refcount); |
430 | wqe->ssn = qp->s_ssn++; | 446 | wqe->ssn = qp->s_ssn++; |
431 | qp->s_head = next; | 447 | qp->s_head = next; |
432 | 448 | ||
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index a08df70e8503..8aa16851a5e6 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
@@ -338,7 +338,13 @@ struct qib_mr { | |||
338 | * in qp->s_max_sge. | 338 | * in qp->s_max_sge. |
339 | */ | 339 | */ |
340 | struct qib_swqe { | 340 | struct qib_swqe { |
341 | struct ib_send_wr wr; /* don't use wr.sg_list */ | 341 | union { |
342 | struct ib_send_wr wr; /* don't use wr.sg_list */ | ||
343 | struct ib_ud_wr ud_wr; | ||
344 | struct ib_fast_reg_wr fast_reg_wr; | ||
345 | struct ib_rdma_wr rdma_wr; | ||
346 | struct ib_atomic_wr atomic_wr; | ||
347 | }; | ||
342 | u32 psn; /* first packet sequence number */ | 348 | u32 psn; /* first packet sequence number */ |
343 | u32 lpsn; /* last packet sequence number */ | 349 | u32 lpsn; /* last packet sequence number */ |
344 | u32 ssn; /* send sequence number */ | 350 | u32 ssn; /* send sequence number */ |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 4cd5428a2399..453860ade65e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -360,7 +360,7 @@ struct ipoib_dev_priv { | |||
360 | unsigned tx_head; | 360 | unsigned tx_head; |
361 | unsigned tx_tail; | 361 | unsigned tx_tail; |
362 | struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; | 362 | struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; |
363 | struct ib_send_wr tx_wr; | 363 | struct ib_ud_wr tx_wr; |
364 | unsigned tx_outstanding; | 364 | unsigned tx_outstanding; |
365 | struct ib_wc send_wc[MAX_SEND_CQE]; | 365 | struct ib_wc send_wc[MAX_SEND_CQE]; |
366 | 366 | ||
@@ -527,7 +527,7 @@ static inline void ipoib_build_sge(struct ipoib_dev_priv *priv, | |||
527 | priv->tx_sge[i + off].addr = mapping[i + off]; | 527 | priv->tx_sge[i + off].addr = mapping[i + off]; |
528 | priv->tx_sge[i + off].length = skb_frag_size(&frags[i]); | 528 | priv->tx_sge[i + off].length = skb_frag_size(&frags[i]); |
529 | } | 529 | } |
530 | priv->tx_wr.num_sge = nr_frags + off; | 530 | priv->tx_wr.wr.num_sge = nr_frags + off; |
531 | } | 531 | } |
532 | 532 | ||
533 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | 533 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index c78dc1638030..3ae9726efb98 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -700,9 +700,9 @@ static inline int post_send(struct ipoib_dev_priv *priv, | |||
700 | 700 | ||
701 | ipoib_build_sge(priv, tx_req); | 701 | ipoib_build_sge(priv, tx_req); |
702 | 702 | ||
703 | priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; | 703 | priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM; |
704 | 704 | ||
705 | return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); | 705 | return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr); |
706 | } | 706 | } |
707 | 707 | ||
708 | void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) | 708 | void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index d266667ca9b8..5ea0c14070d1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -518,19 +518,19 @@ static inline int post_send(struct ipoib_dev_priv *priv, | |||
518 | 518 | ||
519 | ipoib_build_sge(priv, tx_req); | 519 | ipoib_build_sge(priv, tx_req); |
520 | 520 | ||
521 | priv->tx_wr.wr_id = wr_id; | 521 | priv->tx_wr.wr.wr_id = wr_id; |
522 | priv->tx_wr.wr.ud.remote_qpn = qpn; | 522 | priv->tx_wr.remote_qpn = qpn; |
523 | priv->tx_wr.wr.ud.ah = address; | 523 | priv->tx_wr.ah = address; |
524 | 524 | ||
525 | if (head) { | 525 | if (head) { |
526 | priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size; | 526 | priv->tx_wr.mss = skb_shinfo(skb)->gso_size; |
527 | priv->tx_wr.wr.ud.header = head; | 527 | priv->tx_wr.header = head; |
528 | priv->tx_wr.wr.ud.hlen = hlen; | 528 | priv->tx_wr.hlen = hlen; |
529 | priv->tx_wr.opcode = IB_WR_LSO; | 529 | priv->tx_wr.wr.opcode = IB_WR_LSO; |
530 | } else | 530 | } else |
531 | priv->tx_wr.opcode = IB_WR_SEND; | 531 | priv->tx_wr.wr.opcode = IB_WR_SEND; |
532 | 532 | ||
533 | return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); | 533 | return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr); |
534 | } | 534 | } |
535 | 535 | ||
536 | void ipoib_send(struct net_device *dev, struct sk_buff *skb, | 536 | void ipoib_send(struct net_device *dev, struct sk_buff *skb, |
@@ -583,9 +583,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
583 | } | 583 | } |
584 | 584 | ||
585 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 585 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
586 | priv->tx_wr.send_flags |= IB_SEND_IP_CSUM; | 586 | priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM; |
587 | else | 587 | else |
588 | priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; | 588 | priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; |
589 | 589 | ||
590 | if (++priv->tx_outstanding == ipoib_sendq_size) { | 590 | if (++priv->tx_outstanding == ipoib_sendq_size) { |
591 | ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); | 591 | ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index f74316e679d2..65d916cc70c7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -461,7 +461,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) | |||
461 | netdev_update_features(dev); | 461 | netdev_update_features(dev); |
462 | dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); | 462 | dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); |
463 | rtnl_unlock(); | 463 | rtnl_unlock(); |
464 | priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; | 464 | priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; |
465 | 465 | ||
466 | ipoib_flush_paths(dev); | 466 | ipoib_flush_paths(dev); |
467 | rtnl_lock(); | 467 | rtnl_lock(); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 136cbefe00f8..029589b21fe9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -245,7 +245,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, | |||
245 | 245 | ||
246 | priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); | 246 | priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); |
247 | spin_unlock_irq(&priv->lock); | 247 | spin_unlock_irq(&priv->lock); |
248 | priv->tx_wr.wr.ud.remote_qkey = priv->qkey; | 248 | priv->tx_wr.remote_qkey = priv->qkey; |
249 | set_qkey = 1; | 249 | set_qkey = 1; |
250 | } | 250 | } |
251 | 251 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 78845b6e8b81..d48c5bae7877 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c | |||
@@ -221,9 +221,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) | |||
221 | for (i = 0; i < MAX_SKB_FRAGS + 1; ++i) | 221 | for (i = 0; i < MAX_SKB_FRAGS + 1; ++i) |
222 | priv->tx_sge[i].lkey = priv->pd->local_dma_lkey; | 222 | priv->tx_sge[i].lkey = priv->pd->local_dma_lkey; |
223 | 223 | ||
224 | priv->tx_wr.opcode = IB_WR_SEND; | 224 | priv->tx_wr.wr.opcode = IB_WR_SEND; |
225 | priv->tx_wr.sg_list = priv->tx_sge; | 225 | priv->tx_wr.wr.sg_list = priv->tx_sge; |
226 | priv->tx_wr.send_flags = IB_SEND_SIGNALED; | 226 | priv->tx_wr.wr.send_flags = IB_SEND_SIGNALED; |
227 | 227 | ||
228 | priv->rx_sge[0].lkey = priv->pd->local_dma_lkey; | 228 | priv->rx_sge[0].lkey = priv->pd->local_dma_lkey; |
229 | 229 | ||
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index a5edd6ede692..2fab519dbd86 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -300,7 +300,11 @@ struct iser_tx_desc { | |||
300 | int num_sge; | 300 | int num_sge; |
301 | bool mapped; | 301 | bool mapped; |
302 | u8 wr_idx; | 302 | u8 wr_idx; |
303 | struct ib_send_wr wrs[ISER_MAX_WRS]; | 303 | union iser_wr { |
304 | struct ib_send_wr send; | ||
305 | struct ib_fast_reg_wr fast_reg; | ||
306 | struct ib_sig_handover_wr sig; | ||
307 | } wrs[ISER_MAX_WRS]; | ||
304 | struct iser_mem_reg data_reg; | 308 | struct iser_mem_reg data_reg; |
305 | struct iser_mem_reg prot_reg; | 309 | struct iser_mem_reg prot_reg; |
306 | struct ib_sig_attrs sig_attrs; | 310 | struct ib_sig_attrs sig_attrs; |
@@ -712,11 +716,11 @@ iser_reg_desc_put_fmr(struct ib_conn *ib_conn, | |||
712 | static inline struct ib_send_wr * | 716 | static inline struct ib_send_wr * |
713 | iser_tx_next_wr(struct iser_tx_desc *tx_desc) | 717 | iser_tx_next_wr(struct iser_tx_desc *tx_desc) |
714 | { | 718 | { |
715 | struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx]; | 719 | struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx].send; |
716 | struct ib_send_wr *last_wr; | 720 | struct ib_send_wr *last_wr; |
717 | 721 | ||
718 | if (tx_desc->wr_idx) { | 722 | if (tx_desc->wr_idx) { |
719 | last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1]; | 723 | last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1].send; |
720 | last_wr->next = cur_wr; | 724 | last_wr->next = cur_wr; |
721 | } | 725 | } |
722 | tx_desc->wr_idx++; | 726 | tx_desc->wr_idx++; |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 4c46d67d37a1..f45e6a352173 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -683,7 +683,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, | |||
683 | { | 683 | { |
684 | struct iser_tx_desc *tx_desc = &iser_task->desc; | 684 | struct iser_tx_desc *tx_desc = &iser_task->desc; |
685 | struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs; | 685 | struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs; |
686 | struct ib_send_wr *wr; | 686 | struct ib_sig_handover_wr *wr; |
687 | int ret; | 687 | int ret; |
688 | 688 | ||
689 | memset(sig_attrs, 0, sizeof(*sig_attrs)); | 689 | memset(sig_attrs, 0, sizeof(*sig_attrs)); |
@@ -693,26 +693,24 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, | |||
693 | 693 | ||
694 | iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask); | 694 | iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask); |
695 | 695 | ||
696 | if (!pi_ctx->sig_mr_valid) { | 696 | if (!pi_ctx->sig_mr_valid) |
697 | wr = iser_tx_next_wr(tx_desc); | 697 | iser_inv_rkey(iser_tx_next_wr(tx_desc), pi_ctx->sig_mr); |
698 | iser_inv_rkey(wr, pi_ctx->sig_mr); | 698 | |
699 | } | 699 | wr = sig_handover_wr(iser_tx_next_wr(tx_desc)); |
700 | 700 | wr->wr.opcode = IB_WR_REG_SIG_MR; | |
701 | wr = iser_tx_next_wr(tx_desc); | 701 | wr->wr.wr_id = ISER_FASTREG_LI_WRID; |
702 | wr->opcode = IB_WR_REG_SIG_MR; | 702 | wr->wr.sg_list = &data_reg->sge; |
703 | wr->wr_id = ISER_FASTREG_LI_WRID; | 703 | wr->wr.num_sge = 1; |
704 | wr->sg_list = &data_reg->sge; | 704 | wr->wr.send_flags = 0; |
705 | wr->num_sge = 1; | 705 | wr->sig_attrs = sig_attrs; |
706 | wr->send_flags = 0; | 706 | wr->sig_mr = pi_ctx->sig_mr; |
707 | wr->wr.sig_handover.sig_attrs = sig_attrs; | ||
708 | wr->wr.sig_handover.sig_mr = pi_ctx->sig_mr; | ||
709 | if (scsi_prot_sg_count(iser_task->sc)) | 707 | if (scsi_prot_sg_count(iser_task->sc)) |
710 | wr->wr.sig_handover.prot = &prot_reg->sge; | 708 | wr->prot = &prot_reg->sge; |
711 | else | 709 | else |
712 | wr->wr.sig_handover.prot = NULL; | 710 | wr->prot = NULL; |
713 | wr->wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE | | 711 | wr->access_flags = IB_ACCESS_LOCAL_WRITE | |
714 | IB_ACCESS_REMOTE_READ | | 712 | IB_ACCESS_REMOTE_READ | |
715 | IB_ACCESS_REMOTE_WRITE; | 713 | IB_ACCESS_REMOTE_WRITE; |
716 | pi_ctx->sig_mr_valid = 0; | 714 | pi_ctx->sig_mr_valid = 0; |
717 | 715 | ||
718 | sig_reg->sge.lkey = pi_ctx->sig_mr->lkey; | 716 | sig_reg->sge.lkey = pi_ctx->sig_mr->lkey; |
@@ -737,7 +735,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, | |||
737 | struct ib_mr *mr = rsc->mr; | 735 | struct ib_mr *mr = rsc->mr; |
738 | struct ib_fast_reg_page_list *frpl = rsc->frpl; | 736 | struct ib_fast_reg_page_list *frpl = rsc->frpl; |
739 | struct iser_tx_desc *tx_desc = &iser_task->desc; | 737 | struct iser_tx_desc *tx_desc = &iser_task->desc; |
740 | struct ib_send_wr *wr; | 738 | struct ib_fast_reg_wr *wr; |
741 | int offset, size, plen; | 739 | int offset, size, plen; |
742 | 740 | ||
743 | plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list, | 741 | plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list, |
@@ -747,24 +745,22 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, | |||
747 | return -EINVAL; | 745 | return -EINVAL; |
748 | } | 746 | } |
749 | 747 | ||
750 | if (!rsc->mr_valid) { | 748 | if (!rsc->mr_valid) |
751 | wr = iser_tx_next_wr(tx_desc); | 749 | iser_inv_rkey(iser_tx_next_wr(tx_desc), mr); |
752 | iser_inv_rkey(wr, mr); | 750 | |
753 | } | 751 | wr = fast_reg_wr(iser_tx_next_wr(tx_desc)); |
754 | 752 | wr->wr.opcode = IB_WR_FAST_REG_MR; | |
755 | wr = iser_tx_next_wr(tx_desc); | 753 | wr->wr.wr_id = ISER_FASTREG_LI_WRID; |
756 | wr->opcode = IB_WR_FAST_REG_MR; | 754 | wr->wr.send_flags = 0; |
757 | wr->wr_id = ISER_FASTREG_LI_WRID; | 755 | wr->iova_start = frpl->page_list[0] + offset; |
758 | wr->send_flags = 0; | 756 | wr->page_list = frpl; |
759 | wr->wr.fast_reg.iova_start = frpl->page_list[0] + offset; | 757 | wr->page_list_len = plen; |
760 | wr->wr.fast_reg.page_list = frpl; | 758 | wr->page_shift = SHIFT_4K; |
761 | wr->wr.fast_reg.page_list_len = plen; | 759 | wr->length = size; |
762 | wr->wr.fast_reg.page_shift = SHIFT_4K; | 760 | wr->rkey = mr->rkey; |
763 | wr->wr.fast_reg.length = size; | 761 | wr->access_flags = (IB_ACCESS_LOCAL_WRITE | |
764 | wr->wr.fast_reg.rkey = mr->rkey; | 762 | IB_ACCESS_REMOTE_WRITE | |
765 | wr->wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | | 763 | IB_ACCESS_REMOTE_READ); |
766 | IB_ACCESS_REMOTE_WRITE | | ||
767 | IB_ACCESS_REMOTE_READ); | ||
768 | rsc->mr_valid = 0; | 764 | rsc->mr_valid = 0; |
769 | 765 | ||
770 | reg->sge.lkey = mr->lkey; | 766 | reg->sge.lkey = mr->lkey; |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 85132d867bc8..b26022e30af1 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -1135,7 +1135,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, | |||
1135 | wr->opcode = IB_WR_SEND; | 1135 | wr->opcode = IB_WR_SEND; |
1136 | wr->send_flags = signal ? IB_SEND_SIGNALED : 0; | 1136 | wr->send_flags = signal ? IB_SEND_SIGNALED : 0; |
1137 | 1137 | ||
1138 | ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0], &bad_wr); | 1138 | ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr); |
1139 | if (ib_ret) | 1139 | if (ib_ret) |
1140 | iser_err("ib_post_send failed, ret:%d opcode:%d\n", | 1140 | iser_err("ib_post_send failed, ret:%d opcode:%d\n", |
1141 | ib_ret, bad_wr->opcode); | 1141 | ib_ret, bad_wr->opcode); |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 403bd29443b8..02c4c0b4569d 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -1703,10 +1703,10 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | |||
1703 | isert_unmap_data_buf(isert_conn, &wr->data); | 1703 | isert_unmap_data_buf(isert_conn, &wr->data); |
1704 | } | 1704 | } |
1705 | 1705 | ||
1706 | if (wr->send_wr) { | 1706 | if (wr->rdma_wr) { |
1707 | isert_dbg("Cmd %p free send_wr\n", isert_cmd); | 1707 | isert_dbg("Cmd %p free send_wr\n", isert_cmd); |
1708 | kfree(wr->send_wr); | 1708 | kfree(wr->rdma_wr); |
1709 | wr->send_wr = NULL; | 1709 | wr->rdma_wr = NULL; |
1710 | } | 1710 | } |
1711 | 1711 | ||
1712 | if (wr->ib_sge) { | 1712 | if (wr->ib_sge) { |
@@ -1741,7 +1741,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) | |||
1741 | } | 1741 | } |
1742 | 1742 | ||
1743 | wr->ib_sge = NULL; | 1743 | wr->ib_sge = NULL; |
1744 | wr->send_wr = NULL; | 1744 | wr->rdma_wr = NULL; |
1745 | } | 1745 | } |
1746 | 1746 | ||
1747 | static void | 1747 | static void |
@@ -1910,7 +1910,7 @@ isert_completion_rdma_write(struct iser_tx_desc *tx_desc, | |||
1910 | } | 1910 | } |
1911 | 1911 | ||
1912 | device->unreg_rdma_mem(isert_cmd, isert_conn); | 1912 | device->unreg_rdma_mem(isert_cmd, isert_conn); |
1913 | wr->send_wr_num = 0; | 1913 | wr->rdma_wr_num = 0; |
1914 | if (ret) | 1914 | if (ret) |
1915 | transport_send_check_condition_and_sense(se_cmd, | 1915 | transport_send_check_condition_and_sense(se_cmd, |
1916 | se_cmd->pi_err, 0); | 1916 | se_cmd->pi_err, 0); |
@@ -1938,7 +1938,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, | |||
1938 | iscsit_stop_dataout_timer(cmd); | 1938 | iscsit_stop_dataout_timer(cmd); |
1939 | device->unreg_rdma_mem(isert_cmd, isert_conn); | 1939 | device->unreg_rdma_mem(isert_cmd, isert_conn); |
1940 | cmd->write_data_done = wr->data.len; | 1940 | cmd->write_data_done = wr->data.len; |
1941 | wr->send_wr_num = 0; | 1941 | wr->rdma_wr_num = 0; |
1942 | 1942 | ||
1943 | isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); | 1943 | isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); |
1944 | spin_lock_bh(&cmd->istate_lock); | 1944 | spin_lock_bh(&cmd->istate_lock); |
@@ -2384,7 +2384,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) | |||
2384 | 2384 | ||
2385 | static int | 2385 | static int |
2386 | isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | 2386 | isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, |
2387 | struct ib_sge *ib_sge, struct ib_send_wr *send_wr, | 2387 | struct ib_sge *ib_sge, struct ib_rdma_wr *rdma_wr, |
2388 | u32 data_left, u32 offset) | 2388 | u32 data_left, u32 offset) |
2389 | { | 2389 | { |
2390 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; | 2390 | struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; |
@@ -2399,8 +2399,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | |||
2399 | sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge); | 2399 | sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge); |
2400 | page_off = offset % PAGE_SIZE; | 2400 | page_off = offset % PAGE_SIZE; |
2401 | 2401 | ||
2402 | send_wr->sg_list = ib_sge; | 2402 | rdma_wr->wr.sg_list = ib_sge; |
2403 | send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; | 2403 | rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc; |
2404 | /* | 2404 | /* |
2405 | * Perform mapping of TCM scatterlist memory ib_sge dma_addr. | 2405 | * Perform mapping of TCM scatterlist memory ib_sge dma_addr. |
2406 | */ | 2406 | */ |
@@ -2425,11 +2425,11 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, | |||
2425 | isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); | 2425 | isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); |
2426 | } | 2426 | } |
2427 | 2427 | ||
2428 | send_wr->num_sge = ++i; | 2428 | rdma_wr->wr.num_sge = ++i; |
2429 | isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", | 2429 | isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", |
2430 | send_wr->sg_list, send_wr->num_sge); | 2430 | rdma_wr->wr.sg_list, rdma_wr->wr.num_sge); |
2431 | 2431 | ||
2432 | return send_wr->num_sge; | 2432 | return rdma_wr->wr.num_sge; |
2433 | } | 2433 | } |
2434 | 2434 | ||
2435 | static int | 2435 | static int |
@@ -2440,7 +2440,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2440 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2440 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2441 | struct isert_conn *isert_conn = conn->context; | 2441 | struct isert_conn *isert_conn = conn->context; |
2442 | struct isert_data_buf *data = &wr->data; | 2442 | struct isert_data_buf *data = &wr->data; |
2443 | struct ib_send_wr *send_wr; | 2443 | struct ib_rdma_wr *rdma_wr; |
2444 | struct ib_sge *ib_sge; | 2444 | struct ib_sge *ib_sge; |
2445 | u32 offset, data_len, data_left, rdma_write_max, va_offset = 0; | 2445 | u32 offset, data_len, data_left, rdma_write_max, va_offset = 0; |
2446 | int ret = 0, i, ib_sge_cnt; | 2446 | int ret = 0, i, ib_sge_cnt; |
@@ -2465,11 +2465,11 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2465 | } | 2465 | } |
2466 | wr->ib_sge = ib_sge; | 2466 | wr->ib_sge = ib_sge; |
2467 | 2467 | ||
2468 | wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge); | 2468 | wr->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge); |
2469 | wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, | 2469 | wr->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) * wr->rdma_wr_num, |
2470 | GFP_KERNEL); | 2470 | GFP_KERNEL); |
2471 | if (!wr->send_wr) { | 2471 | if (!wr->rdma_wr) { |
2472 | isert_dbg("Unable to allocate wr->send_wr\n"); | 2472 | isert_dbg("Unable to allocate wr->rdma_wr\n"); |
2473 | ret = -ENOMEM; | 2473 | ret = -ENOMEM; |
2474 | goto unmap_cmd; | 2474 | goto unmap_cmd; |
2475 | } | 2475 | } |
@@ -2477,31 +2477,31 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2477 | wr->isert_cmd = isert_cmd; | 2477 | wr->isert_cmd = isert_cmd; |
2478 | rdma_write_max = isert_conn->max_sge * PAGE_SIZE; | 2478 | rdma_write_max = isert_conn->max_sge * PAGE_SIZE; |
2479 | 2479 | ||
2480 | for (i = 0; i < wr->send_wr_num; i++) { | 2480 | for (i = 0; i < wr->rdma_wr_num; i++) { |
2481 | send_wr = &isert_cmd->rdma_wr.send_wr[i]; | 2481 | rdma_wr = &isert_cmd->rdma_wr.rdma_wr[i]; |
2482 | data_len = min(data_left, rdma_write_max); | 2482 | data_len = min(data_left, rdma_write_max); |
2483 | 2483 | ||
2484 | send_wr->send_flags = 0; | 2484 | rdma_wr->wr.send_flags = 0; |
2485 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { | 2485 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { |
2486 | send_wr->opcode = IB_WR_RDMA_WRITE; | 2486 | rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; |
2487 | send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset; | 2487 | rdma_wr->remote_addr = isert_cmd->read_va + offset; |
2488 | send_wr->wr.rdma.rkey = isert_cmd->read_stag; | 2488 | rdma_wr->rkey = isert_cmd->read_stag; |
2489 | if (i + 1 == wr->send_wr_num) | 2489 | if (i + 1 == wr->rdma_wr_num) |
2490 | send_wr->next = &isert_cmd->tx_desc.send_wr; | 2490 | rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr; |
2491 | else | 2491 | else |
2492 | send_wr->next = &wr->send_wr[i + 1]; | 2492 | rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr; |
2493 | } else { | 2493 | } else { |
2494 | send_wr->opcode = IB_WR_RDMA_READ; | 2494 | rdma_wr->wr.opcode = IB_WR_RDMA_READ; |
2495 | send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset; | 2495 | rdma_wr->remote_addr = isert_cmd->write_va + va_offset; |
2496 | send_wr->wr.rdma.rkey = isert_cmd->write_stag; | 2496 | rdma_wr->rkey = isert_cmd->write_stag; |
2497 | if (i + 1 == wr->send_wr_num) | 2497 | if (i + 1 == wr->rdma_wr_num) |
2498 | send_wr->send_flags = IB_SEND_SIGNALED; | 2498 | rdma_wr->wr.send_flags = IB_SEND_SIGNALED; |
2499 | else | 2499 | else |
2500 | send_wr->next = &wr->send_wr[i + 1]; | 2500 | rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr; |
2501 | } | 2501 | } |
2502 | 2502 | ||
2503 | ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, | 2503 | ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, |
2504 | send_wr, data_len, offset); | 2504 | rdma_wr, data_len, offset); |
2505 | ib_sge += ib_sge_cnt; | 2505 | ib_sge += ib_sge_cnt; |
2506 | 2506 | ||
2507 | offset += data_len; | 2507 | offset += data_len; |
@@ -2581,8 +2581,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2581 | struct ib_device *ib_dev = device->ib_device; | 2581 | struct ib_device *ib_dev = device->ib_device; |
2582 | struct ib_mr *mr; | 2582 | struct ib_mr *mr; |
2583 | struct ib_fast_reg_page_list *frpl; | 2583 | struct ib_fast_reg_page_list *frpl; |
2584 | struct ib_send_wr fr_wr, inv_wr; | 2584 | struct ib_fast_reg_wr fr_wr; |
2585 | struct ib_send_wr *bad_wr, *wr = NULL; | 2585 | struct ib_send_wr inv_wr, *bad_wr, *wr = NULL; |
2586 | int ret, pagelist_len; | 2586 | int ret, pagelist_len; |
2587 | u32 page_off; | 2587 | u32 page_off; |
2588 | 2588 | ||
@@ -2620,20 +2620,20 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, | |||
2620 | 2620 | ||
2621 | /* Prepare FASTREG WR */ | 2621 | /* Prepare FASTREG WR */ |
2622 | memset(&fr_wr, 0, sizeof(fr_wr)); | 2622 | memset(&fr_wr, 0, sizeof(fr_wr)); |
2623 | fr_wr.wr_id = ISER_FASTREG_LI_WRID; | 2623 | fr_wr.wr.wr_id = ISER_FASTREG_LI_WRID; |
2624 | fr_wr.opcode = IB_WR_FAST_REG_MR; | 2624 | fr_wr.wr.opcode = IB_WR_FAST_REG_MR; |
2625 | fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off; | 2625 | fr_wr.iova_start = frpl->page_list[0] + page_off; |
2626 | fr_wr.wr.fast_reg.page_list = frpl; | 2626 | fr_wr.page_list = frpl; |
2627 | fr_wr.wr.fast_reg.page_list_len = pagelist_len; | 2627 | fr_wr.page_list_len = pagelist_len; |
2628 | fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; | 2628 | fr_wr.page_shift = PAGE_SHIFT; |
2629 | fr_wr.wr.fast_reg.length = mem->len; | 2629 | fr_wr.length = mem->len; |
2630 | fr_wr.wr.fast_reg.rkey = mr->rkey; | 2630 | fr_wr.rkey = mr->rkey; |
2631 | fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; | 2631 | fr_wr.access_flags = IB_ACCESS_LOCAL_WRITE; |
2632 | 2632 | ||
2633 | if (!wr) | 2633 | if (!wr) |
2634 | wr = &fr_wr; | 2634 | wr = &fr_wr.wr; |
2635 | else | 2635 | else |
2636 | wr->next = &fr_wr; | 2636 | wr->next = &fr_wr.wr; |
2637 | 2637 | ||
2638 | ret = ib_post_send(isert_conn->qp, wr, &bad_wr); | 2638 | ret = ib_post_send(isert_conn->qp, wr, &bad_wr); |
2639 | if (ret) { | 2639 | if (ret) { |
@@ -2714,8 +2714,8 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, | |||
2714 | struct isert_rdma_wr *rdma_wr, | 2714 | struct isert_rdma_wr *rdma_wr, |
2715 | struct fast_reg_descriptor *fr_desc) | 2715 | struct fast_reg_descriptor *fr_desc) |
2716 | { | 2716 | { |
2717 | struct ib_send_wr sig_wr, inv_wr; | 2717 | struct ib_sig_handover_wr sig_wr; |
2718 | struct ib_send_wr *bad_wr, *wr = NULL; | 2718 | struct ib_send_wr inv_wr, *bad_wr, *wr = NULL; |
2719 | struct pi_context *pi_ctx = fr_desc->pi_ctx; | 2719 | struct pi_context *pi_ctx = fr_desc->pi_ctx; |
2720 | struct ib_sig_attrs sig_attrs; | 2720 | struct ib_sig_attrs sig_attrs; |
2721 | int ret; | 2721 | int ret; |
@@ -2733,20 +2733,20 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, | |||
2733 | } | 2733 | } |
2734 | 2734 | ||
2735 | memset(&sig_wr, 0, sizeof(sig_wr)); | 2735 | memset(&sig_wr, 0, sizeof(sig_wr)); |
2736 | sig_wr.opcode = IB_WR_REG_SIG_MR; | 2736 | sig_wr.wr.opcode = IB_WR_REG_SIG_MR; |
2737 | sig_wr.wr_id = ISER_FASTREG_LI_WRID; | 2737 | sig_wr.wr.wr_id = ISER_FASTREG_LI_WRID; |
2738 | sig_wr.sg_list = &rdma_wr->ib_sg[DATA]; | 2738 | sig_wr.wr.sg_list = &rdma_wr->ib_sg[DATA]; |
2739 | sig_wr.num_sge = 1; | 2739 | sig_wr.wr.num_sge = 1; |
2740 | sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; | 2740 | sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE; |
2741 | sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; | 2741 | sig_wr.sig_attrs = &sig_attrs; |
2742 | sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; | 2742 | sig_wr.sig_mr = pi_ctx->sig_mr; |
2743 | if (se_cmd->t_prot_sg) | 2743 | if (se_cmd->t_prot_sg) |
2744 | sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT]; | 2744 | sig_wr.prot = &rdma_wr->ib_sg[PROT]; |
2745 | 2745 | ||
2746 | if (!wr) | 2746 | if (!wr) |
2747 | wr = &sig_wr; | 2747 | wr = &sig_wr.wr; |
2748 | else | 2748 | else |
2749 | wr->next = &sig_wr; | 2749 | wr->next = &sig_wr.wr; |
2750 | 2750 | ||
2751 | ret = ib_post_send(isert_conn->qp, wr, &bad_wr); | 2751 | ret = ib_post_send(isert_conn->qp, wr, &bad_wr); |
2752 | if (ret) { | 2752 | if (ret) { |
@@ -2840,7 +2840,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2840 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); | 2840 | struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); |
2841 | struct isert_conn *isert_conn = conn->context; | 2841 | struct isert_conn *isert_conn = conn->context; |
2842 | struct fast_reg_descriptor *fr_desc = NULL; | 2842 | struct fast_reg_descriptor *fr_desc = NULL; |
2843 | struct ib_send_wr *send_wr; | 2843 | struct ib_rdma_wr *rdma_wr; |
2844 | struct ib_sge *ib_sg; | 2844 | struct ib_sge *ib_sg; |
2845 | u32 offset; | 2845 | u32 offset; |
2846 | int ret = 0; | 2846 | int ret = 0; |
@@ -2881,26 +2881,26 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2881 | 2881 | ||
2882 | memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg)); | 2882 | memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg)); |
2883 | wr->ib_sge = &wr->s_ib_sge; | 2883 | wr->ib_sge = &wr->s_ib_sge; |
2884 | wr->send_wr_num = 1; | 2884 | wr->rdma_wr_num = 1; |
2885 | memset(&wr->s_send_wr, 0, sizeof(*send_wr)); | 2885 | memset(&wr->s_rdma_wr, 0, sizeof(wr->s_rdma_wr)); |
2886 | wr->send_wr = &wr->s_send_wr; | 2886 | wr->rdma_wr = &wr->s_rdma_wr; |
2887 | wr->isert_cmd = isert_cmd; | 2887 | wr->isert_cmd = isert_cmd; |
2888 | 2888 | ||
2889 | send_wr = &isert_cmd->rdma_wr.s_send_wr; | 2889 | rdma_wr = &isert_cmd->rdma_wr.s_rdma_wr; |
2890 | send_wr->sg_list = &wr->s_ib_sge; | 2890 | rdma_wr->wr.sg_list = &wr->s_ib_sge; |
2891 | send_wr->num_sge = 1; | 2891 | rdma_wr->wr.num_sge = 1; |
2892 | send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; | 2892 | rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc; |
2893 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { | 2893 | if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { |
2894 | send_wr->opcode = IB_WR_RDMA_WRITE; | 2894 | rdma_wr->wr.opcode = IB_WR_RDMA_WRITE; |
2895 | send_wr->wr.rdma.remote_addr = isert_cmd->read_va; | 2895 | rdma_wr->remote_addr = isert_cmd->read_va; |
2896 | send_wr->wr.rdma.rkey = isert_cmd->read_stag; | 2896 | rdma_wr->rkey = isert_cmd->read_stag; |
2897 | send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? | 2897 | rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? |
2898 | 0 : IB_SEND_SIGNALED; | 2898 | 0 : IB_SEND_SIGNALED; |
2899 | } else { | 2899 | } else { |
2900 | send_wr->opcode = IB_WR_RDMA_READ; | 2900 | rdma_wr->wr.opcode = IB_WR_RDMA_READ; |
2901 | send_wr->wr.rdma.remote_addr = isert_cmd->write_va; | 2901 | rdma_wr->remote_addr = isert_cmd->write_va; |
2902 | send_wr->wr.rdma.rkey = isert_cmd->write_stag; | 2902 | rdma_wr->rkey = isert_cmd->write_stag; |
2903 | send_wr->send_flags = IB_SEND_SIGNALED; | 2903 | rdma_wr->wr.send_flags = IB_SEND_SIGNALED; |
2904 | } | 2904 | } |
2905 | 2905 | ||
2906 | return 0; | 2906 | return 0; |
@@ -2948,11 +2948,11 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) | |||
2948 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); | 2948 | isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); |
2949 | isert_init_send_wr(isert_conn, isert_cmd, | 2949 | isert_init_send_wr(isert_conn, isert_cmd, |
2950 | &isert_cmd->tx_desc.send_wr); | 2950 | &isert_cmd->tx_desc.send_wr); |
2951 | isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; | 2951 | isert_cmd->rdma_wr.s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr; |
2952 | wr->send_wr_num += 1; | 2952 | wr->rdma_wr_num += 1; |
2953 | } | 2953 | } |
2954 | 2954 | ||
2955 | rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); | 2955 | rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed); |
2956 | if (rc) | 2956 | if (rc) |
2957 | isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); | 2957 | isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); |
2958 | 2958 | ||
@@ -2986,7 +2986,7 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) | |||
2986 | return rc; | 2986 | return rc; |
2987 | } | 2987 | } |
2988 | 2988 | ||
2989 | rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); | 2989 | rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed); |
2990 | if (rc) | 2990 | if (rc) |
2991 | isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); | 2991 | isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); |
2992 | 2992 | ||
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 6a04ba3c0f72..0a4a7861cce9 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -118,9 +118,9 @@ struct isert_rdma_wr { | |||
118 | enum iser_ib_op_code iser_ib_op; | 118 | enum iser_ib_op_code iser_ib_op; |
119 | struct ib_sge *ib_sge; | 119 | struct ib_sge *ib_sge; |
120 | struct ib_sge s_ib_sge; | 120 | struct ib_sge s_ib_sge; |
121 | int send_wr_num; | 121 | int rdma_wr_num; |
122 | struct ib_send_wr *send_wr; | 122 | struct ib_rdma_wr *rdma_wr; |
123 | struct ib_send_wr s_send_wr; | 123 | struct ib_rdma_wr s_rdma_wr; |
124 | struct ib_sge ib_sg[3]; | 124 | struct ib_sge ib_sg[3]; |
125 | struct isert_data_buf data; | 125 | struct isert_data_buf data; |
126 | struct isert_data_buf prot; | 126 | struct isert_data_buf prot; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index b481490ad257..1390f99ca76b 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -1306,7 +1306,7 @@ static int srp_map_finish_fr(struct srp_map_state *state, | |||
1306 | struct srp_target_port *target = ch->target; | 1306 | struct srp_target_port *target = ch->target; |
1307 | struct srp_device *dev = target->srp_host->srp_dev; | 1307 | struct srp_device *dev = target->srp_host->srp_dev; |
1308 | struct ib_send_wr *bad_wr; | 1308 | struct ib_send_wr *bad_wr; |
1309 | struct ib_send_wr wr; | 1309 | struct ib_fast_reg_wr wr; |
1310 | struct srp_fr_desc *desc; | 1310 | struct srp_fr_desc *desc; |
1311 | u32 rkey; | 1311 | u32 rkey; |
1312 | 1312 | ||
@@ -1324,17 +1324,17 @@ static int srp_map_finish_fr(struct srp_map_state *state, | |||
1324 | sizeof(state->pages[0]) * state->npages); | 1324 | sizeof(state->pages[0]) * state->npages); |
1325 | 1325 | ||
1326 | memset(&wr, 0, sizeof(wr)); | 1326 | memset(&wr, 0, sizeof(wr)); |
1327 | wr.opcode = IB_WR_FAST_REG_MR; | 1327 | wr.wr.opcode = IB_WR_FAST_REG_MR; |
1328 | wr.wr_id = FAST_REG_WR_ID_MASK; | 1328 | wr.wr.wr_id = FAST_REG_WR_ID_MASK; |
1329 | wr.wr.fast_reg.iova_start = state->base_dma_addr; | 1329 | wr.iova_start = state->base_dma_addr; |
1330 | wr.wr.fast_reg.page_list = desc->frpl; | 1330 | wr.page_list = desc->frpl; |
1331 | wr.wr.fast_reg.page_list_len = state->npages; | 1331 | wr.page_list_len = state->npages; |
1332 | wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size); | 1332 | wr.page_shift = ilog2(dev->mr_page_size); |
1333 | wr.wr.fast_reg.length = state->dma_len; | 1333 | wr.length = state->dma_len; |
1334 | wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | | 1334 | wr.access_flags = (IB_ACCESS_LOCAL_WRITE | |
1335 | IB_ACCESS_REMOTE_READ | | 1335 | IB_ACCESS_REMOTE_READ | |
1336 | IB_ACCESS_REMOTE_WRITE); | 1336 | IB_ACCESS_REMOTE_WRITE); |
1337 | wr.wr.fast_reg.rkey = desc->mr->lkey; | 1337 | wr.rkey = desc->mr->lkey; |
1338 | 1338 | ||
1339 | *state->fr.next++ = desc; | 1339 | *state->fr.next++ = desc; |
1340 | state->nmdesc++; | 1340 | state->nmdesc++; |
@@ -1342,7 +1342,7 @@ static int srp_map_finish_fr(struct srp_map_state *state, | |||
1342 | srp_map_desc(state, state->base_dma_addr, state->dma_len, | 1342 | srp_map_desc(state, state->base_dma_addr, state->dma_len, |
1343 | desc->mr->rkey); | 1343 | desc->mr->rkey); |
1344 | 1344 | ||
1345 | return ib_post_send(ch->qp, &wr, &bad_wr); | 1345 | return ib_post_send(ch->qp, &wr.wr, &bad_wr); |
1346 | } | 1346 | } |
1347 | 1347 | ||
1348 | static int srp_finish_mapping(struct srp_map_state *state, | 1348 | static int srp_finish_mapping(struct srp_map_state *state, |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index f6fe0414139b..d65533e3a5eb 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -2822,7 +2822,7 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
2822 | static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, | 2822 | static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, |
2823 | struct srpt_send_ioctx *ioctx) | 2823 | struct srpt_send_ioctx *ioctx) |
2824 | { | 2824 | { |
2825 | struct ib_send_wr wr; | 2825 | struct ib_rdma_wr wr; |
2826 | struct ib_send_wr *bad_wr; | 2826 | struct ib_send_wr *bad_wr; |
2827 | struct rdma_iu *riu; | 2827 | struct rdma_iu *riu; |
2828 | int i; | 2828 | int i; |
@@ -2850,29 +2850,29 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, | |||
2850 | 2850 | ||
2851 | for (i = 0; i < n_rdma; ++i, ++riu) { | 2851 | for (i = 0; i < n_rdma; ++i, ++riu) { |
2852 | if (dir == DMA_FROM_DEVICE) { | 2852 | if (dir == DMA_FROM_DEVICE) { |
2853 | wr.opcode = IB_WR_RDMA_WRITE; | 2853 | wr.wr.opcode = IB_WR_RDMA_WRITE; |
2854 | wr.wr_id = encode_wr_id(i == n_rdma - 1 ? | 2854 | wr.wr.wr_id = encode_wr_id(i == n_rdma - 1 ? |
2855 | SRPT_RDMA_WRITE_LAST : | 2855 | SRPT_RDMA_WRITE_LAST : |
2856 | SRPT_RDMA_MID, | 2856 | SRPT_RDMA_MID, |
2857 | ioctx->ioctx.index); | 2857 | ioctx->ioctx.index); |
2858 | } else { | 2858 | } else { |
2859 | wr.opcode = IB_WR_RDMA_READ; | 2859 | wr.wr.opcode = IB_WR_RDMA_READ; |
2860 | wr.wr_id = encode_wr_id(i == n_rdma - 1 ? | 2860 | wr.wr.wr_id = encode_wr_id(i == n_rdma - 1 ? |
2861 | SRPT_RDMA_READ_LAST : | 2861 | SRPT_RDMA_READ_LAST : |
2862 | SRPT_RDMA_MID, | 2862 | SRPT_RDMA_MID, |
2863 | ioctx->ioctx.index); | 2863 | ioctx->ioctx.index); |
2864 | } | 2864 | } |
2865 | wr.next = NULL; | 2865 | wr.wr.next = NULL; |
2866 | wr.wr.rdma.remote_addr = riu->raddr; | 2866 | wr.remote_addr = riu->raddr; |
2867 | wr.wr.rdma.rkey = riu->rkey; | 2867 | wr.rkey = riu->rkey; |
2868 | wr.num_sge = riu->sge_cnt; | 2868 | wr.wr.num_sge = riu->sge_cnt; |
2869 | wr.sg_list = riu->sge; | 2869 | wr.wr.sg_list = riu->sge; |
2870 | 2870 | ||
2871 | /* only get completion event for the last rdma write */ | 2871 | /* only get completion event for the last rdma write */ |
2872 | if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE) | 2872 | if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE) |
2873 | wr.send_flags = IB_SEND_SIGNALED; | 2873 | wr.wr.send_flags = IB_SEND_SIGNALED; |
2874 | 2874 | ||
2875 | ret = ib_post_send(ch->qp, &wr, &bad_wr); | 2875 | ret = ib_post_send(ch->qp, &wr.wr, &bad_wr); |
2876 | if (ret) | 2876 | if (ret) |
2877 | break; | 2877 | break; |
2878 | } | 2878 | } |
@@ -2881,11 +2881,11 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, | |||
2881 | pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n", | 2881 | pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n", |
2882 | __func__, __LINE__, ret, i, n_rdma); | 2882 | __func__, __LINE__, ret, i, n_rdma); |
2883 | if (ret && i > 0) { | 2883 | if (ret && i > 0) { |
2884 | wr.num_sge = 0; | 2884 | wr.wr.num_sge = 0; |
2885 | wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index); | 2885 | wr.wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index); |
2886 | wr.send_flags = IB_SEND_SIGNALED; | 2886 | wr.wr.send_flags = IB_SEND_SIGNALED; |
2887 | while (ch->state == CH_LIVE && | 2887 | while (ch->state == CH_LIVE && |
2888 | ib_post_send(ch->qp, &wr, &bad_wr) != 0) { | 2888 | ib_post_send(ch->qp, &wr.wr, &bad_wr) != 0) { |
2889 | pr_info("Trying to abort failed RDMA transfer [%d]\n", | 2889 | pr_info("Trying to abort failed RDMA transfer [%d]\n", |
2890 | ioctx->ioctx.index); | 2890 | ioctx->ioctx.index); |
2891 | msleep(1000); | 2891 | msleep(1000); |
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h index f4b6c33ac318..144449122778 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h | |||
@@ -525,7 +525,7 @@ typedef struct kib_tx /* transmit message */ | |||
525 | __u64 tx_msgaddr; /* message buffer (I/O addr) */ | 525 | __u64 tx_msgaddr; /* message buffer (I/O addr) */ |
526 | DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */ | 526 | DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */ |
527 | int tx_nwrq; /* # send work items */ | 527 | int tx_nwrq; /* # send work items */ |
528 | struct ib_send_wr *tx_wrq; /* send work items... */ | 528 | struct ib_rdma_wr *tx_wrq; /* send work items... */ |
529 | struct ib_sge *tx_sge; /* ...and their memory */ | 529 | struct ib_sge *tx_sge; /* ...and their memory */ |
530 | kib_rdma_desc_t *tx_rd; /* rdma descriptor */ | 530 | kib_rdma_desc_t *tx_rd; /* rdma descriptor */ |
531 | int tx_nfrags; /* # entries in... */ | 531 | int tx_nfrags; /* # entries in... */ |
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index a23a6d956a4d..a34f1707c167 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | |||
@@ -834,7 +834,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) | |||
834 | /* close_conn will launch failover */ | 834 | /* close_conn will launch failover */ |
835 | rc = -ENETDOWN; | 835 | rc = -ENETDOWN; |
836 | } else { | 836 | } else { |
837 | rc = ib_post_send(conn->ibc_cmid->qp, tx->tx_wrq, &bad_wrq); | 837 | rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &bad_wrq); |
838 | } | 838 | } |
839 | 839 | ||
840 | conn->ibc_last_send = jiffies; | 840 | conn->ibc_last_send = jiffies; |
@@ -1008,7 +1008,7 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) | |||
1008 | { | 1008 | { |
1009 | kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; | 1009 | kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; |
1010 | struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; | 1010 | struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; |
1011 | struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; | 1011 | struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; |
1012 | int nob = offsetof(kib_msg_t, ibm_u) + body_nob; | 1012 | int nob = offsetof(kib_msg_t, ibm_u) + body_nob; |
1013 | struct ib_mr *mr; | 1013 | struct ib_mr *mr; |
1014 | 1014 | ||
@@ -1027,12 +1027,12 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) | |||
1027 | 1027 | ||
1028 | memset(wrq, 0, sizeof(*wrq)); | 1028 | memset(wrq, 0, sizeof(*wrq)); |
1029 | 1029 | ||
1030 | wrq->next = NULL; | 1030 | wrq->wr.next = NULL; |
1031 | wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); | 1031 | wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); |
1032 | wrq->sg_list = sge; | 1032 | wrq->wr.sg_list = sge; |
1033 | wrq->num_sge = 1; | 1033 | wrq->wr.num_sge = 1; |
1034 | wrq->opcode = IB_WR_SEND; | 1034 | wrq->wr.opcode = IB_WR_SEND; |
1035 | wrq->send_flags = IB_SEND_SIGNALED; | 1035 | wrq->wr.send_flags = IB_SEND_SIGNALED; |
1036 | 1036 | ||
1037 | tx->tx_nwrq++; | 1037 | tx->tx_nwrq++; |
1038 | } | 1038 | } |
@@ -1044,7 +1044,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, | |||
1044 | kib_msg_t *ibmsg = tx->tx_msg; | 1044 | kib_msg_t *ibmsg = tx->tx_msg; |
1045 | kib_rdma_desc_t *srcrd = tx->tx_rd; | 1045 | kib_rdma_desc_t *srcrd = tx->tx_rd; |
1046 | struct ib_sge *sge = &tx->tx_sge[0]; | 1046 | struct ib_sge *sge = &tx->tx_sge[0]; |
1047 | struct ib_send_wr *wrq = &tx->tx_wrq[0]; | 1047 | struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next; |
1048 | int rc = resid; | 1048 | int rc = resid; |
1049 | int srcidx; | 1049 | int srcidx; |
1050 | int dstidx; | 1050 | int dstidx; |
@@ -1090,16 +1090,17 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, | |||
1090 | sge->length = wrknob; | 1090 | sge->length = wrknob; |
1091 | 1091 | ||
1092 | wrq = &tx->tx_wrq[tx->tx_nwrq]; | 1092 | wrq = &tx->tx_wrq[tx->tx_nwrq]; |
1093 | next = wrq + 1; | ||
1093 | 1094 | ||
1094 | wrq->next = wrq + 1; | 1095 | wrq->wr.next = &next->wr; |
1095 | wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA); | 1096 | wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA); |
1096 | wrq->sg_list = sge; | 1097 | wrq->wr.sg_list = sge; |
1097 | wrq->num_sge = 1; | 1098 | wrq->wr.num_sge = 1; |
1098 | wrq->opcode = IB_WR_RDMA_WRITE; | 1099 | wrq->wr.opcode = IB_WR_RDMA_WRITE; |
1099 | wrq->send_flags = 0; | 1100 | wrq->wr.send_flags = 0; |
1100 | 1101 | ||
1101 | wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); | 1102 | wrq->remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); |
1102 | wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx); | 1103 | wrq->rkey = kiblnd_rd_frag_key(dstrd, dstidx); |
1103 | 1104 | ||
1104 | srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob); | 1105 | srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob); |
1105 | dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob); | 1106 | dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob); |
diff --git a/drivers/staging/rdma/amso1100/c2_qp.c b/drivers/staging/rdma/amso1100/c2_qp.c index 86708dee58b1..4c43ca935cc7 100644 --- a/drivers/staging/rdma/amso1100/c2_qp.c +++ b/drivers/staging/rdma/amso1100/c2_qp.c | |||
@@ -860,9 +860,9 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
860 | flags |= SQ_READ_FENCE; | 860 | flags |= SQ_READ_FENCE; |
861 | } | 861 | } |
862 | wr.sqwr.rdma_write.remote_stag = | 862 | wr.sqwr.rdma_write.remote_stag = |
863 | cpu_to_be32(ib_wr->wr.rdma.rkey); | 863 | cpu_to_be32(rdma_wr(ib_wr)->rkey); |
864 | wr.sqwr.rdma_write.remote_to = | 864 | wr.sqwr.rdma_write.remote_to = |
865 | cpu_to_be64(ib_wr->wr.rdma.remote_addr); | 865 | cpu_to_be64(rdma_wr(ib_wr)->remote_addr); |
866 | err = move_sgl((struct c2_data_addr *) | 866 | err = move_sgl((struct c2_data_addr *) |
867 | & (wr.sqwr.rdma_write.data), | 867 | & (wr.sqwr.rdma_write.data), |
868 | ib_wr->sg_list, | 868 | ib_wr->sg_list, |
@@ -889,9 +889,9 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
889 | wr.sqwr.rdma_read.local_to = | 889 | wr.sqwr.rdma_read.local_to = |
890 | cpu_to_be64(ib_wr->sg_list->addr); | 890 | cpu_to_be64(ib_wr->sg_list->addr); |
891 | wr.sqwr.rdma_read.remote_stag = | 891 | wr.sqwr.rdma_read.remote_stag = |
892 | cpu_to_be32(ib_wr->wr.rdma.rkey); | 892 | cpu_to_be32(rdma_wr(ib_wr)->rkey); |
893 | wr.sqwr.rdma_read.remote_to = | 893 | wr.sqwr.rdma_read.remote_to = |
894 | cpu_to_be64(ib_wr->wr.rdma.remote_addr); | 894 | cpu_to_be64(rdma_wr(ib_wr)->remote_addr); |
895 | wr.sqwr.rdma_read.length = | 895 | wr.sqwr.rdma_read.length = |
896 | cpu_to_be32(ib_wr->sg_list->length); | 896 | cpu_to_be32(ib_wr->sg_list->length); |
897 | break; | 897 | break; |
diff --git a/drivers/staging/rdma/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c index 47f94984353d..10e2074384f5 100644 --- a/drivers/staging/rdma/ehca/ehca_reqs.c +++ b/drivers/staging/rdma/ehca/ehca_reqs.c | |||
@@ -110,19 +110,19 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, | |||
110 | /* need ib_mad struct */ | 110 | /* need ib_mad struct */ |
111 | #include <rdma/ib_mad.h> | 111 | #include <rdma/ib_mad.h> |
112 | 112 | ||
113 | static void trace_send_wr_ud(const struct ib_send_wr *send_wr) | 113 | static void trace_ud_wr(const struct ib_ud_wr *ud_wr) |
114 | { | 114 | { |
115 | int idx; | 115 | int idx; |
116 | int j; | 116 | int j; |
117 | while (send_wr) { | 117 | while (ud_wr) { |
118 | struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr; | 118 | struct ib_mad_hdr *mad_hdr = ud_wrmad_hdr; |
119 | struct ib_sge *sge = send_wr->sg_list; | 119 | struct ib_sge *sge = ud_wr->wr.sg_list; |
120 | ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x " | 120 | ehca_gen_dbg("ud_wr#%x wr_id=%lx num_sge=%x " |
121 | "send_flags=%x opcode=%x", idx, send_wr->wr_id, | 121 | "send_flags=%x opcode=%x", idx, ud_wr->wr.wr_id, |
122 | send_wr->num_sge, send_wr->send_flags, | 122 | ud_wr->wr.num_sge, ud_wr->wr.send_flags, |
123 | send_wr->opcode); | 123 | ud_wr->.wr.opcode); |
124 | if (mad_hdr) { | 124 | if (mad_hdr) { |
125 | ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x " | 125 | ehca_gen_dbg("ud_wr#%x mad_hdr base_version=%x " |
126 | "mgmt_class=%x class_version=%x method=%x " | 126 | "mgmt_class=%x class_version=%x method=%x " |
127 | "status=%x class_specific=%x tid=%lx " | 127 | "status=%x class_specific=%x tid=%lx " |
128 | "attr_id=%x resv=%x attr_mod=%x", | 128 | "attr_id=%x resv=%x attr_mod=%x", |
@@ -134,33 +134,33 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr) | |||
134 | mad_hdr->resv, | 134 | mad_hdr->resv, |
135 | mad_hdr->attr_mod); | 135 | mad_hdr->attr_mod); |
136 | } | 136 | } |
137 | for (j = 0; j < send_wr->num_sge; j++) { | 137 | for (j = 0; j < ud_wr->wr.num_sge; j++) { |
138 | u8 *data = __va(sge->addr); | 138 | u8 *data = __va(sge->addr); |
139 | ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x " | 139 | ehca_gen_dbg("ud_wr#%x sge#%x addr=%p length=%x " |
140 | "lkey=%x", | 140 | "lkey=%x", |
141 | idx, j, data, sge->length, sge->lkey); | 141 | idx, j, data, sge->length, sge->lkey); |
142 | /* assume length is n*16 */ | 142 | /* assume length is n*16 */ |
143 | ehca_dmp(data, sge->length, "send_wr#%x sge#%x", | 143 | ehca_dmp(data, sge->length, "ud_wr#%x sge#%x", |
144 | idx, j); | 144 | idx, j); |
145 | sge++; | 145 | sge++; |
146 | } /* eof for j */ | 146 | } /* eof for j */ |
147 | idx++; | 147 | idx++; |
148 | send_wr = send_wr->next; | 148 | ud_wr = ud_wr(ud_wr->wr.next); |
149 | } /* eof while send_wr */ | 149 | } /* eof while ud_wr */ |
150 | } | 150 | } |
151 | 151 | ||
152 | #endif /* DEBUG_GSI_SEND_WR */ | 152 | #endif /* DEBUG_GSI_SEND_WR */ |
153 | 153 | ||
154 | static inline int ehca_write_swqe(struct ehca_qp *qp, | 154 | static inline int ehca_write_swqe(struct ehca_qp *qp, |
155 | struct ehca_wqe *wqe_p, | 155 | struct ehca_wqe *wqe_p, |
156 | const struct ib_send_wr *send_wr, | 156 | struct ib_send_wr *send_wr, |
157 | u32 sq_map_idx, | 157 | u32 sq_map_idx, |
158 | int hidden) | 158 | int hidden) |
159 | { | 159 | { |
160 | u32 idx; | 160 | u32 idx; |
161 | u64 dma_length; | 161 | u64 dma_length; |
162 | struct ehca_av *my_av; | 162 | struct ehca_av *my_av; |
163 | u32 remote_qkey = send_wr->wr.ud.remote_qkey; | 163 | u32 remote_qkey; |
164 | struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx]; | 164 | struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx]; |
165 | 165 | ||
166 | if (unlikely((send_wr->num_sge < 0) || | 166 | if (unlikely((send_wr->num_sge < 0) || |
@@ -223,20 +223,21 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, | |||
223 | /* no break is intential here */ | 223 | /* no break is intential here */ |
224 | case IB_QPT_UD: | 224 | case IB_QPT_UD: |
225 | /* IB 1.2 spec C10-15 compliance */ | 225 | /* IB 1.2 spec C10-15 compliance */ |
226 | if (send_wr->wr.ud.remote_qkey & 0x80000000) | 226 | remote_qkey = ud_wr(send_wr)->remote_qkey; |
227 | if (remote_qkey & 0x80000000) | ||
227 | remote_qkey = qp->qkey; | 228 | remote_qkey = qp->qkey; |
228 | 229 | ||
229 | wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8; | 230 | wqe_p->destination_qp_number = ud_wr(send_wr)->remote_qpn << 8; |
230 | wqe_p->local_ee_context_qkey = remote_qkey; | 231 | wqe_p->local_ee_context_qkey = remote_qkey; |
231 | if (unlikely(!send_wr->wr.ud.ah)) { | 232 | if (unlikely(!ud_wr(send_wr)->ah)) { |
232 | ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); | 233 | ehca_gen_err("ud_wr(send_wr) is NULL. qp=%p", qp); |
233 | return -EINVAL; | 234 | return -EINVAL; |
234 | } | 235 | } |
235 | if (unlikely(send_wr->wr.ud.remote_qpn == 0)) { | 236 | if (unlikely(ud_wr(send_wr)->remote_qpn == 0)) { |
236 | ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num); | 237 | ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num); |
237 | return -EINVAL; | 238 | return -EINVAL; |
238 | } | 239 | } |
239 | my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah); | 240 | my_av = container_of(ud_wr(send_wr)->ah, struct ehca_av, ib_ah); |
240 | wqe_p->u.ud_av.ud_av = my_av->av; | 241 | wqe_p->u.ud_av.ud_av = my_av->av; |
241 | 242 | ||
242 | /* | 243 | /* |
@@ -255,9 +256,9 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, | |||
255 | qp->qp_type == IB_QPT_GSI) | 256 | qp->qp_type == IB_QPT_GSI) |
256 | wqe_p->u.ud_av.ud_av.pmtu = 1; | 257 | wqe_p->u.ud_av.ud_av.pmtu = 1; |
257 | if (qp->qp_type == IB_QPT_GSI) { | 258 | if (qp->qp_type == IB_QPT_GSI) { |
258 | wqe_p->pkeyi = send_wr->wr.ud.pkey_index; | 259 | wqe_p->pkeyi = ud_wr(send_wr)->pkey_index; |
259 | #ifdef DEBUG_GSI_SEND_WR | 260 | #ifdef DEBUG_GSI_SEND_WR |
260 | trace_send_wr_ud(send_wr); | 261 | trace_ud_wr(ud_wr(send_wr)); |
261 | #endif /* DEBUG_GSI_SEND_WR */ | 262 | #endif /* DEBUG_GSI_SEND_WR */ |
262 | } | 263 | } |
263 | break; | 264 | break; |
@@ -269,8 +270,8 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, | |||
269 | case IB_QPT_RC: | 270 | case IB_QPT_RC: |
270 | /* TODO: atomic not implemented */ | 271 | /* TODO: atomic not implemented */ |
271 | wqe_p->u.nud.remote_virtual_address = | 272 | wqe_p->u.nud.remote_virtual_address = |
272 | send_wr->wr.rdma.remote_addr; | 273 | rdma_wr(send_wr)->remote_addr; |
273 | wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey; | 274 | wqe_p->u.nud.rkey = rdma_wr(send_wr)->rkey; |
274 | 275 | ||
275 | /* | 276 | /* |
276 | * omitted checking of IB_SEND_INLINE | 277 | * omitted checking of IB_SEND_INLINE |
diff --git a/drivers/staging/rdma/hfi1/keys.c b/drivers/staging/rdma/hfi1/keys.c index f6eff177ace1..82c21b1c0263 100644 --- a/drivers/staging/rdma/hfi1/keys.c +++ b/drivers/staging/rdma/hfi1/keys.c | |||
@@ -358,12 +358,12 @@ bail: | |||
358 | /* | 358 | /* |
359 | * Initialize the memory region specified by the work request. | 359 | * Initialize the memory region specified by the work request. |
360 | */ | 360 | */ |
361 | int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_send_wr *wr) | 361 | int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_fast_reg_wr *wr) |
362 | { | 362 | { |
363 | struct hfi1_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; | 363 | struct hfi1_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; |
364 | struct hfi1_pd *pd = to_ipd(qp->ibqp.pd); | 364 | struct hfi1_pd *pd = to_ipd(qp->ibqp.pd); |
365 | struct hfi1_mregion *mr; | 365 | struct hfi1_mregion *mr; |
366 | u32 rkey = wr->wr.fast_reg.rkey; | 366 | u32 rkey = wr->rkey; |
367 | unsigned i, n, m; | 367 | unsigned i, n, m; |
368 | int ret = -EINVAL; | 368 | int ret = -EINVAL; |
369 | unsigned long flags; | 369 | unsigned long flags; |
@@ -380,22 +380,22 @@ int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_send_wr *wr) | |||
380 | if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) | 380 | if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) |
381 | goto bail; | 381 | goto bail; |
382 | 382 | ||
383 | if (wr->wr.fast_reg.page_list_len > mr->max_segs) | 383 | if (wr->page_list_len > mr->max_segs) |
384 | goto bail; | 384 | goto bail; |
385 | 385 | ||
386 | ps = 1UL << wr->wr.fast_reg.page_shift; | 386 | ps = 1UL << wr->page_shift; |
387 | if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len) | 387 | if (wr->length > ps * wr->page_list_len) |
388 | goto bail; | 388 | goto bail; |
389 | 389 | ||
390 | mr->user_base = wr->wr.fast_reg.iova_start; | 390 | mr->user_base = wr->iova_start; |
391 | mr->iova = wr->wr.fast_reg.iova_start; | 391 | mr->iova = wr->iova_start; |
392 | mr->lkey = rkey; | 392 | mr->lkey = rkey; |
393 | mr->length = wr->wr.fast_reg.length; | 393 | mr->length = wr->length; |
394 | mr->access_flags = wr->wr.fast_reg.access_flags; | 394 | mr->access_flags = wr->access_flags; |
395 | page_list = wr->wr.fast_reg.page_list->page_list; | 395 | page_list = wr->page_list->page_list; |
396 | m = 0; | 396 | m = 0; |
397 | n = 0; | 397 | n = 0; |
398 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { | 398 | for (i = 0; i < wr->page_list_len; i++) { |
399 | mr->map[m]->segs[n].vaddr = (void *) page_list[i]; | 399 | mr->map[m]->segs[n].vaddr = (void *) page_list[i]; |
400 | mr->map[m]->segs[n].length = ps; | 400 | mr->map[m]->segs[n].length = ps; |
401 | if (++n == HFI1_SEGSZ) { | 401 | if (++n == HFI1_SEGSZ) { |
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index df1fa56eaf85..f8c36166962f 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c | |||
@@ -422,7 +422,7 @@ static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends) | |||
422 | if (qp->ibqp.qp_type == IB_QPT_UD || | 422 | if (qp->ibqp.qp_type == IB_QPT_UD || |
423 | qp->ibqp.qp_type == IB_QPT_SMI || | 423 | qp->ibqp.qp_type == IB_QPT_SMI || |
424 | qp->ibqp.qp_type == IB_QPT_GSI) | 424 | qp->ibqp.qp_type == IB_QPT_GSI) |
425 | atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); | 425 | atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); |
426 | if (++qp->s_last >= qp->s_size) | 426 | if (++qp->s_last >= qp->s_size) |
427 | qp->s_last = 0; | 427 | qp->s_last = 0; |
428 | } | 428 | } |
diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 632dd5ba7dfd..fd0ac608c62d 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c | |||
@@ -404,9 +404,9 @@ int hfi1_make_rc_req(struct hfi1_qp *qp) | |||
404 | goto bail; | 404 | goto bail; |
405 | } | 405 | } |
406 | ohdr->u.rc.reth.vaddr = | 406 | ohdr->u.rc.reth.vaddr = |
407 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | 407 | cpu_to_be64(wqe->rdma_wr.remote_addr); |
408 | ohdr->u.rc.reth.rkey = | 408 | ohdr->u.rc.reth.rkey = |
409 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 409 | cpu_to_be32(wqe->rdma_wr.rkey); |
410 | ohdr->u.rc.reth.length = cpu_to_be32(len); | 410 | ohdr->u.rc.reth.length = cpu_to_be32(len); |
411 | hwords += sizeof(struct ib_reth) / sizeof(u32); | 411 | hwords += sizeof(struct ib_reth) / sizeof(u32); |
412 | wqe->lpsn = wqe->psn; | 412 | wqe->lpsn = wqe->psn; |
@@ -455,9 +455,9 @@ int hfi1_make_rc_req(struct hfi1_qp *qp) | |||
455 | wqe->lpsn = qp->s_next_psn++; | 455 | wqe->lpsn = qp->s_next_psn++; |
456 | } | 456 | } |
457 | ohdr->u.rc.reth.vaddr = | 457 | ohdr->u.rc.reth.vaddr = |
458 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | 458 | cpu_to_be64(wqe->rdma_wr.remote_addr); |
459 | ohdr->u.rc.reth.rkey = | 459 | ohdr->u.rc.reth.rkey = |
460 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 460 | cpu_to_be32(wqe->rdma_wr.rkey); |
461 | ohdr->u.rc.reth.length = cpu_to_be32(len); | 461 | ohdr->u.rc.reth.length = cpu_to_be32(len); |
462 | qp->s_state = OP(RDMA_READ_REQUEST); | 462 | qp->s_state = OP(RDMA_READ_REQUEST); |
463 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); | 463 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); |
@@ -488,21 +488,21 @@ int hfi1_make_rc_req(struct hfi1_qp *qp) | |||
488 | if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | 488 | if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { |
489 | qp->s_state = OP(COMPARE_SWAP); | 489 | qp->s_state = OP(COMPARE_SWAP); |
490 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( | 490 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( |
491 | wqe->wr.wr.atomic.swap); | 491 | wqe->atomic_wr.swap); |
492 | ohdr->u.atomic_eth.compare_data = cpu_to_be64( | 492 | ohdr->u.atomic_eth.compare_data = cpu_to_be64( |
493 | wqe->wr.wr.atomic.compare_add); | 493 | wqe->atomic_wr.compare_add); |
494 | } else { | 494 | } else { |
495 | qp->s_state = OP(FETCH_ADD); | 495 | qp->s_state = OP(FETCH_ADD); |
496 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( | 496 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( |
497 | wqe->wr.wr.atomic.compare_add); | 497 | wqe->atomic_wr.compare_add); |
498 | ohdr->u.atomic_eth.compare_data = 0; | 498 | ohdr->u.atomic_eth.compare_data = 0; |
499 | } | 499 | } |
500 | ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( | 500 | ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( |
501 | wqe->wr.wr.atomic.remote_addr >> 32); | 501 | wqe->atomic_wr.remote_addr >> 32); |
502 | ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( | 502 | ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( |
503 | wqe->wr.wr.atomic.remote_addr); | 503 | wqe->atomic_wr.remote_addr); |
504 | ohdr->u.atomic_eth.rkey = cpu_to_be32( | 504 | ohdr->u.atomic_eth.rkey = cpu_to_be32( |
505 | wqe->wr.wr.atomic.rkey); | 505 | wqe->atomic_wr.rkey); |
506 | hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); | 506 | hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); |
507 | ss = NULL; | 507 | ss = NULL; |
508 | len = 0; | 508 | len = 0; |
@@ -629,9 +629,9 @@ int hfi1_make_rc_req(struct hfi1_qp *qp) | |||
629 | */ | 629 | */ |
630 | len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; | 630 | len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; |
631 | ohdr->u.rc.reth.vaddr = | 631 | ohdr->u.rc.reth.vaddr = |
632 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len); | 632 | cpu_to_be64(wqe->rdma_wr.remote_addr + len); |
633 | ohdr->u.rc.reth.rkey = | 633 | ohdr->u.rc.reth.rkey = |
634 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 634 | cpu_to_be32(wqe->rdma_wr.rkey); |
635 | ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); | 635 | ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); |
636 | qp->s_state = OP(RDMA_READ_REQUEST); | 636 | qp->s_state = OP(RDMA_READ_REQUEST); |
637 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); | 637 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); |
diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index a4115288db66..d614474770b3 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c | |||
@@ -481,8 +481,8 @@ again: | |||
481 | if (wqe->length == 0) | 481 | if (wqe->length == 0) |
482 | break; | 482 | break; |
483 | if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, wqe->length, | 483 | if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, wqe->length, |
484 | wqe->wr.wr.rdma.remote_addr, | 484 | wqe->rdma_wr.remote_addr, |
485 | wqe->wr.wr.rdma.rkey, | 485 | wqe->rdma_wr.rkey, |
486 | IB_ACCESS_REMOTE_WRITE))) | 486 | IB_ACCESS_REMOTE_WRITE))) |
487 | goto acc_err; | 487 | goto acc_err; |
488 | qp->r_sge.sg_list = NULL; | 488 | qp->r_sge.sg_list = NULL; |
@@ -494,8 +494,8 @@ again: | |||
494 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) | 494 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) |
495 | goto inv_err; | 495 | goto inv_err; |
496 | if (unlikely(!hfi1_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, | 496 | if (unlikely(!hfi1_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, |
497 | wqe->wr.wr.rdma.remote_addr, | 497 | wqe->rdma_wr.remote_addr, |
498 | wqe->wr.wr.rdma.rkey, | 498 | wqe->rdma_wr.rkey, |
499 | IB_ACCESS_REMOTE_READ))) | 499 | IB_ACCESS_REMOTE_READ))) |
500 | goto acc_err; | 500 | goto acc_err; |
501 | release = 0; | 501 | release = 0; |
@@ -512,18 +512,18 @@ again: | |||
512 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) | 512 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) |
513 | goto inv_err; | 513 | goto inv_err; |
514 | if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), | 514 | if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), |
515 | wqe->wr.wr.atomic.remote_addr, | 515 | wqe->atomic_wr.remote_addr, |
516 | wqe->wr.wr.atomic.rkey, | 516 | wqe->atomic_wr.rkey, |
517 | IB_ACCESS_REMOTE_ATOMIC))) | 517 | IB_ACCESS_REMOTE_ATOMIC))) |
518 | goto acc_err; | 518 | goto acc_err; |
519 | /* Perform atomic OP and save result. */ | 519 | /* Perform atomic OP and save result. */ |
520 | maddr = (atomic64_t *) qp->r_sge.sge.vaddr; | 520 | maddr = (atomic64_t *) qp->r_sge.sge.vaddr; |
521 | sdata = wqe->wr.wr.atomic.compare_add; | 521 | sdata = wqe->atomic_wr.compare_add; |
522 | *(u64 *) sqp->s_sge.sge.vaddr = | 522 | *(u64 *) sqp->s_sge.sge.vaddr = |
523 | (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? | 523 | (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? |
524 | (u64) atomic64_add_return(sdata, maddr) - sdata : | 524 | (u64) atomic64_add_return(sdata, maddr) - sdata : |
525 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, | 525 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, |
526 | sdata, wqe->wr.wr.atomic.swap); | 526 | sdata, wqe->atomic_wr.swap); |
527 | hfi1_put_mr(qp->r_sge.sge.mr); | 527 | hfi1_put_mr(qp->r_sge.sge.mr); |
528 | qp->r_sge.num_sge = 0; | 528 | qp->r_sge.num_sge = 0; |
529 | goto send_comp; | 529 | goto send_comp; |
@@ -913,7 +913,7 @@ void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe, | |||
913 | if (qp->ibqp.qp_type == IB_QPT_UD || | 913 | if (qp->ibqp.qp_type == IB_QPT_UD || |
914 | qp->ibqp.qp_type == IB_QPT_SMI || | 914 | qp->ibqp.qp_type == IB_QPT_SMI || |
915 | qp->ibqp.qp_type == IB_QPT_GSI) | 915 | qp->ibqp.qp_type == IB_QPT_GSI) |
916 | atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); | 916 | atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); |
917 | 917 | ||
918 | /* See ch. 11.2.4.1 and 10.7.3.1 */ | 918 | /* See ch. 11.2.4.1 and 10.7.3.1 */ |
919 | if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || | 919 | if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || |
diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index b536f397737c..6095039c4485 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c | |||
@@ -147,9 +147,9 @@ int hfi1_make_uc_req(struct hfi1_qp *qp) | |||
147 | case IB_WR_RDMA_WRITE: | 147 | case IB_WR_RDMA_WRITE: |
148 | case IB_WR_RDMA_WRITE_WITH_IMM: | 148 | case IB_WR_RDMA_WRITE_WITH_IMM: |
149 | ohdr->u.rc.reth.vaddr = | 149 | ohdr->u.rc.reth.vaddr = |
150 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | 150 | cpu_to_be64(wqe->rdma_wr.remote_addr); |
151 | ohdr->u.rc.reth.rkey = | 151 | ohdr->u.rc.reth.rkey = |
152 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 152 | cpu_to_be32(wqe->rdma_wr.rkey); |
153 | ohdr->u.rc.reth.length = cpu_to_be32(len); | 153 | ohdr->u.rc.reth.length = cpu_to_be32(len); |
154 | hwords += sizeof(struct ib_reth) / 4; | 154 | hwords += sizeof(struct ib_reth) / 4; |
155 | if (len > pmtu) { | 155 | if (len > pmtu) { |
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index d40d1a1e10aa..5a9c784bec04 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c | |||
@@ -80,7 +80,7 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe) | |||
80 | 80 | ||
81 | rcu_read_lock(); | 81 | rcu_read_lock(); |
82 | 82 | ||
83 | qp = hfi1_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn); | 83 | qp = hfi1_lookup_qpn(ibp, swqe->ud_wr.remote_qpn); |
84 | if (!qp) { | 84 | if (!qp) { |
85 | ibp->n_pkt_drops++; | 85 | ibp->n_pkt_drops++; |
86 | rcu_read_unlock(); | 86 | rcu_read_unlock(); |
@@ -98,7 +98,7 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe) | |||
98 | goto drop; | 98 | goto drop; |
99 | } | 99 | } |
100 | 100 | ||
101 | ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr; | 101 | ah_attr = &to_iah(swqe->ud_wr.ah)->attr; |
102 | ppd = ppd_from_ibp(ibp); | 102 | ppd = ppd_from_ibp(ibp); |
103 | 103 | ||
104 | if (qp->ibqp.qp_num > 1) { | 104 | if (qp->ibqp.qp_num > 1) { |
@@ -128,8 +128,8 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe) | |||
128 | if (qp->ibqp.qp_num) { | 128 | if (qp->ibqp.qp_num) { |
129 | u32 qkey; | 129 | u32 qkey; |
130 | 130 | ||
131 | qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ? | 131 | qkey = (int)swqe->ud_wr.remote_qkey < 0 ? |
132 | sqp->qkey : swqe->wr.wr.ud.remote_qkey; | 132 | sqp->qkey : swqe->ud_wr.remote_qkey; |
133 | if (unlikely(qkey != qp->qkey)) { | 133 | if (unlikely(qkey != qp->qkey)) { |
134 | u16 lid; | 134 | u16 lid; |
135 | 135 | ||
@@ -234,7 +234,7 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe) | |||
234 | if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) { | 234 | if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) { |
235 | if (sqp->ibqp.qp_type == IB_QPT_GSI || | 235 | if (sqp->ibqp.qp_type == IB_QPT_GSI || |
236 | sqp->ibqp.qp_type == IB_QPT_SMI) | 236 | sqp->ibqp.qp_type == IB_QPT_SMI) |
237 | wc.pkey_index = swqe->wr.wr.ud.pkey_index; | 237 | wc.pkey_index = swqe->ud_wr.pkey_index; |
238 | else | 238 | else |
239 | wc.pkey_index = sqp->s_pkey_index; | 239 | wc.pkey_index = sqp->s_pkey_index; |
240 | } else { | 240 | } else { |
@@ -309,7 +309,7 @@ int hfi1_make_ud_req(struct hfi1_qp *qp) | |||
309 | /* Construct the header. */ | 309 | /* Construct the header. */ |
310 | ibp = to_iport(qp->ibqp.device, qp->port_num); | 310 | ibp = to_iport(qp->ibqp.device, qp->port_num); |
311 | ppd = ppd_from_ibp(ibp); | 311 | ppd = ppd_from_ibp(ibp); |
312 | ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; | 312 | ah_attr = &to_iah(wqe->ud_wr.ah)->attr; |
313 | if (ah_attr->dlid < HFI1_MULTICAST_LID_BASE || | 313 | if (ah_attr->dlid < HFI1_MULTICAST_LID_BASE || |
314 | ah_attr->dlid == HFI1_PERMISSIVE_LID) { | 314 | ah_attr->dlid == HFI1_PERMISSIVE_LID) { |
315 | lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); | 315 | lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); |
@@ -401,18 +401,18 @@ int hfi1_make_ud_req(struct hfi1_qp *qp) | |||
401 | bth0 |= IB_BTH_SOLICITED; | 401 | bth0 |= IB_BTH_SOLICITED; |
402 | bth0 |= extra_bytes << 20; | 402 | bth0 |= extra_bytes << 20; |
403 | if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) | 403 | if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) |
404 | bth0 |= hfi1_get_pkey(ibp, wqe->wr.wr.ud.pkey_index); | 404 | bth0 |= hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index); |
405 | else | 405 | else |
406 | bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index); | 406 | bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index); |
407 | ohdr->bth[0] = cpu_to_be32(bth0); | 407 | ohdr->bth[0] = cpu_to_be32(bth0); |
408 | ohdr->bth[1] = cpu_to_be32(wqe->wr.wr.ud.remote_qpn); | 408 | ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn); |
409 | ohdr->bth[2] = cpu_to_be32(mask_psn(qp->s_next_psn++)); | 409 | ohdr->bth[2] = cpu_to_be32(mask_psn(qp->s_next_psn++)); |
410 | /* | 410 | /* |
411 | * Qkeys with the high order bit set mean use the | 411 | * Qkeys with the high order bit set mean use the |
412 | * qkey from the QP context instead of the WR (see 10.2.5). | 412 | * qkey from the QP context instead of the WR (see 10.2.5). |
413 | */ | 413 | */ |
414 | ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ? | 414 | ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ? |
415 | qp->qkey : wqe->wr.wr.ud.remote_qkey); | 415 | qp->qkey : wqe->ud_wr.remote_qkey); |
416 | ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); | 416 | ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); |
417 | /* disarm any ahg */ | 417 | /* disarm any ahg */ |
418 | qp->s_hdr->ahgcount = 0; | 418 | qp->s_hdr->ahgcount = 0; |
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 41bb59eb001c..981e6c1b79a3 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c | |||
@@ -391,7 +391,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) | |||
391 | wr->opcode != IB_WR_SEND_WITH_IMM) | 391 | wr->opcode != IB_WR_SEND_WITH_IMM) |
392 | return -EINVAL; | 392 | return -EINVAL; |
393 | /* Check UD destination address PD */ | 393 | /* Check UD destination address PD */ |
394 | if (qp->ibqp.pd != wr->wr.ud.ah->pd) | 394 | if (qp->ibqp.pd != ud_wr(wr)->ah->pd) |
395 | return -EINVAL; | 395 | return -EINVAL; |
396 | } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) | 396 | } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) |
397 | return -EINVAL; | 397 | return -EINVAL; |
@@ -412,7 +412,24 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) | |||
412 | rkt = &to_idev(qp->ibqp.device)->lk_table; | 412 | rkt = &to_idev(qp->ibqp.device)->lk_table; |
413 | pd = to_ipd(qp->ibqp.pd); | 413 | pd = to_ipd(qp->ibqp.pd); |
414 | wqe = get_swqe_ptr(qp, qp->s_head); | 414 | wqe = get_swqe_ptr(qp, qp->s_head); |
415 | wqe->wr = *wr; | 415 | |
416 | |||
417 | if (qp->ibqp.qp_type != IB_QPT_UC && | ||
418 | qp->ibqp.qp_type != IB_QPT_RC) | ||
419 | memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); | ||
420 | else if (wr->opcode == IB_WR_FAST_REG_MR) | ||
421 | memcpy(&wqe->fast_reg_wr, fast_reg_wr(wr), | ||
422 | sizeof(wqe->fast_reg_wr)); | ||
423 | else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || | ||
424 | wr->opcode == IB_WR_RDMA_WRITE || | ||
425 | wr->opcode == IB_WR_RDMA_READ) | ||
426 | memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); | ||
427 | else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || | ||
428 | wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) | ||
429 | memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); | ||
430 | else | ||
431 | memcpy(&wqe->wr, wr, sizeof(wqe->wr)); | ||
432 | |||
416 | wqe->length = 0; | 433 | wqe->length = 0; |
417 | j = 0; | 434 | j = 0; |
418 | if (wr->num_sge) { | 435 | if (wr->num_sge) { |
@@ -438,7 +455,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) | |||
438 | if (wqe->length > 0x80000000U) | 455 | if (wqe->length > 0x80000000U) |
439 | goto bail_inval_free; | 456 | goto bail_inval_free; |
440 | } else { | 457 | } else { |
441 | struct hfi1_ah *ah = to_iah(wr->wr.ud.ah); | 458 | struct hfi1_ah *ah = to_iah(ud_wr(wr)->ah); |
442 | 459 | ||
443 | atomic_inc(&ah->refcount); | 460 | atomic_inc(&ah->refcount); |
444 | } | 461 | } |
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index ed903a93baf7..cf5a3c956284 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h | |||
@@ -348,7 +348,13 @@ struct hfi1_mr { | |||
348 | * in qp->s_max_sge. | 348 | * in qp->s_max_sge. |
349 | */ | 349 | */ |
350 | struct hfi1_swqe { | 350 | struct hfi1_swqe { |
351 | struct ib_send_wr wr; /* don't use wr.sg_list */ | 351 | union { |
352 | struct ib_send_wr wr; /* don't use wr.sg_list */ | ||
353 | struct ib_rdma_wr rdma_wr; | ||
354 | struct ib_atomic_wr atomic_wr; | ||
355 | struct ib_ud_wr ud_wr; | ||
356 | struct ib_fast_reg_wr fast_reg_wr; | ||
357 | }; | ||
352 | u32 psn; /* first packet sequence number */ | 358 | u32 psn; /* first packet sequence number */ |
353 | u32 lpsn; /* last packet sequence number */ | 359 | u32 lpsn; /* last packet sequence number */ |
354 | u32 ssn; /* send sequence number */ | 360 | u32 ssn; /* send sequence number */ |
@@ -1025,7 +1031,7 @@ struct ib_fast_reg_page_list *hfi1_alloc_fast_reg_page_list( | |||
1025 | 1031 | ||
1026 | void hfi1_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl); | 1032 | void hfi1_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl); |
1027 | 1033 | ||
1028 | int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_send_wr *wr); | 1034 | int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_fast_reg_wr *wr); |
1029 | 1035 | ||
1030 | struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | 1036 | struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags, |
1031 | struct ib_fmr_attr *fmr_attr); | 1037 | struct ib_fmr_attr *fmr_attr); |
diff --git a/drivers/staging/rdma/ipath/ipath_rc.c b/drivers/staging/rdma/ipath/ipath_rc.c index 79b3dbc97179..d4aa53574e57 100644 --- a/drivers/staging/rdma/ipath/ipath_rc.c +++ b/drivers/staging/rdma/ipath/ipath_rc.c | |||
@@ -350,9 +350,9 @@ int ipath_make_rc_req(struct ipath_qp *qp) | |||
350 | goto bail; | 350 | goto bail; |
351 | } | 351 | } |
352 | ohdr->u.rc.reth.vaddr = | 352 | ohdr->u.rc.reth.vaddr = |
353 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | 353 | cpu_to_be64(wqe->rdma_wr.remote_addr); |
354 | ohdr->u.rc.reth.rkey = | 354 | ohdr->u.rc.reth.rkey = |
355 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 355 | cpu_to_be32(wqe->rdma_wr.rkey); |
356 | ohdr->u.rc.reth.length = cpu_to_be32(len); | 356 | ohdr->u.rc.reth.length = cpu_to_be32(len); |
357 | hwords += sizeof(struct ib_reth) / sizeof(u32); | 357 | hwords += sizeof(struct ib_reth) / sizeof(u32); |
358 | wqe->lpsn = wqe->psn; | 358 | wqe->lpsn = wqe->psn; |
@@ -401,9 +401,9 @@ int ipath_make_rc_req(struct ipath_qp *qp) | |||
401 | wqe->lpsn = qp->s_next_psn++; | 401 | wqe->lpsn = qp->s_next_psn++; |
402 | } | 402 | } |
403 | ohdr->u.rc.reth.vaddr = | 403 | ohdr->u.rc.reth.vaddr = |
404 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | 404 | cpu_to_be64(wqe->rdma_wr.remote_addr); |
405 | ohdr->u.rc.reth.rkey = | 405 | ohdr->u.rc.reth.rkey = |
406 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 406 | cpu_to_be32(wqe->rdma_wr.rkey); |
407 | ohdr->u.rc.reth.length = cpu_to_be32(len); | 407 | ohdr->u.rc.reth.length = cpu_to_be32(len); |
408 | qp->s_state = OP(RDMA_READ_REQUEST); | 408 | qp->s_state = OP(RDMA_READ_REQUEST); |
409 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); | 409 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); |
@@ -433,21 +433,21 @@ int ipath_make_rc_req(struct ipath_qp *qp) | |||
433 | if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | 433 | if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { |
434 | qp->s_state = OP(COMPARE_SWAP); | 434 | qp->s_state = OP(COMPARE_SWAP); |
435 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( | 435 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( |
436 | wqe->wr.wr.atomic.swap); | 436 | wqe->atomic_wr.swap); |
437 | ohdr->u.atomic_eth.compare_data = cpu_to_be64( | 437 | ohdr->u.atomic_eth.compare_data = cpu_to_be64( |
438 | wqe->wr.wr.atomic.compare_add); | 438 | wqe->atomic_wr.compare_add); |
439 | } else { | 439 | } else { |
440 | qp->s_state = OP(FETCH_ADD); | 440 | qp->s_state = OP(FETCH_ADD); |
441 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( | 441 | ohdr->u.atomic_eth.swap_data = cpu_to_be64( |
442 | wqe->wr.wr.atomic.compare_add); | 442 | wqe->atomic_wr.compare_add); |
443 | ohdr->u.atomic_eth.compare_data = 0; | 443 | ohdr->u.atomic_eth.compare_data = 0; |
444 | } | 444 | } |
445 | ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( | 445 | ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( |
446 | wqe->wr.wr.atomic.remote_addr >> 32); | 446 | wqe->atomic_wr.remote_addr >> 32); |
447 | ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( | 447 | ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( |
448 | wqe->wr.wr.atomic.remote_addr); | 448 | wqe->atomic_wr.remote_addr); |
449 | ohdr->u.atomic_eth.rkey = cpu_to_be32( | 449 | ohdr->u.atomic_eth.rkey = cpu_to_be32( |
450 | wqe->wr.wr.atomic.rkey); | 450 | wqe->atomic_wr.rkey); |
451 | hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); | 451 | hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); |
452 | ss = NULL; | 452 | ss = NULL; |
453 | len = 0; | 453 | len = 0; |
@@ -567,9 +567,9 @@ int ipath_make_rc_req(struct ipath_qp *qp) | |||
567 | ipath_init_restart(qp, wqe); | 567 | ipath_init_restart(qp, wqe); |
568 | len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; | 568 | len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; |
569 | ohdr->u.rc.reth.vaddr = | 569 | ohdr->u.rc.reth.vaddr = |
570 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len); | 570 | cpu_to_be64(wqe->rdma_wr.remote_addr + len); |
571 | ohdr->u.rc.reth.rkey = | 571 | ohdr->u.rc.reth.rkey = |
572 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 572 | cpu_to_be32(wqe->rdma_wr.rkey); |
573 | ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len); | 573 | ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len); |
574 | qp->s_state = OP(RDMA_READ_REQUEST); | 574 | qp->s_state = OP(RDMA_READ_REQUEST); |
575 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); | 575 | hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); |
diff --git a/drivers/staging/rdma/ipath/ipath_ruc.c b/drivers/staging/rdma/ipath/ipath_ruc.c index 1f95bbaf7602..46af8b03d3d4 100644 --- a/drivers/staging/rdma/ipath/ipath_ruc.c +++ b/drivers/staging/rdma/ipath/ipath_ruc.c | |||
@@ -353,8 +353,8 @@ again: | |||
353 | if (wqe->length == 0) | 353 | if (wqe->length == 0) |
354 | break; | 354 | break; |
355 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, | 355 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, |
356 | wqe->wr.wr.rdma.remote_addr, | 356 | wqe->rdma_wr.remote_addr, |
357 | wqe->wr.wr.rdma.rkey, | 357 | wqe->rdma_wr.rkey, |
358 | IB_ACCESS_REMOTE_WRITE))) | 358 | IB_ACCESS_REMOTE_WRITE))) |
359 | goto acc_err; | 359 | goto acc_err; |
360 | break; | 360 | break; |
@@ -363,8 +363,8 @@ again: | |||
363 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) | 363 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) |
364 | goto inv_err; | 364 | goto inv_err; |
365 | if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, | 365 | if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, |
366 | wqe->wr.wr.rdma.remote_addr, | 366 | wqe->rdma_wr.remote_addr, |
367 | wqe->wr.wr.rdma.rkey, | 367 | wqe->rdma_wr.rkey, |
368 | IB_ACCESS_REMOTE_READ))) | 368 | IB_ACCESS_REMOTE_READ))) |
369 | goto acc_err; | 369 | goto acc_err; |
370 | qp->r_sge.sge = wqe->sg_list[0]; | 370 | qp->r_sge.sge = wqe->sg_list[0]; |
@@ -377,18 +377,18 @@ again: | |||
377 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) | 377 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) |
378 | goto inv_err; | 378 | goto inv_err; |
379 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), | 379 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), |
380 | wqe->wr.wr.atomic.remote_addr, | 380 | wqe->atomic_wr.remote_addr, |
381 | wqe->wr.wr.atomic.rkey, | 381 | wqe->atomic_wr.rkey, |
382 | IB_ACCESS_REMOTE_ATOMIC))) | 382 | IB_ACCESS_REMOTE_ATOMIC))) |
383 | goto acc_err; | 383 | goto acc_err; |
384 | /* Perform atomic OP and save result. */ | 384 | /* Perform atomic OP and save result. */ |
385 | maddr = (atomic64_t *) qp->r_sge.sge.vaddr; | 385 | maddr = (atomic64_t *) qp->r_sge.sge.vaddr; |
386 | sdata = wqe->wr.wr.atomic.compare_add; | 386 | sdata = wqe->atomic_wr.compare_add; |
387 | *(u64 *) sqp->s_sge.sge.vaddr = | 387 | *(u64 *) sqp->s_sge.sge.vaddr = |
388 | (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? | 388 | (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? |
389 | (u64) atomic64_add_return(sdata, maddr) - sdata : | 389 | (u64) atomic64_add_return(sdata, maddr) - sdata : |
390 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, | 390 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, |
391 | sdata, wqe->wr.wr.atomic.swap); | 391 | sdata, wqe->atomic_wr.swap); |
392 | goto send_comp; | 392 | goto send_comp; |
393 | 393 | ||
394 | default: | 394 | default: |
diff --git a/drivers/staging/rdma/ipath/ipath_uc.c b/drivers/staging/rdma/ipath/ipath_uc.c index 22e60998f1a7..0246b30280b9 100644 --- a/drivers/staging/rdma/ipath/ipath_uc.c +++ b/drivers/staging/rdma/ipath/ipath_uc.c | |||
@@ -126,9 +126,9 @@ int ipath_make_uc_req(struct ipath_qp *qp) | |||
126 | case IB_WR_RDMA_WRITE: | 126 | case IB_WR_RDMA_WRITE: |
127 | case IB_WR_RDMA_WRITE_WITH_IMM: | 127 | case IB_WR_RDMA_WRITE_WITH_IMM: |
128 | ohdr->u.rc.reth.vaddr = | 128 | ohdr->u.rc.reth.vaddr = |
129 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | 129 | cpu_to_be64(wqe->rdma_wr.remote_addr); |
130 | ohdr->u.rc.reth.rkey = | 130 | ohdr->u.rc.reth.rkey = |
131 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | 131 | cpu_to_be32(wqe->rdma_wr.rkey); |
132 | ohdr->u.rc.reth.length = cpu_to_be32(len); | 132 | ohdr->u.rc.reth.length = cpu_to_be32(len); |
133 | hwords += sizeof(struct ib_reth) / 4; | 133 | hwords += sizeof(struct ib_reth) / 4; |
134 | if (len > pmtu) { | 134 | if (len > pmtu) { |
diff --git a/drivers/staging/rdma/ipath/ipath_ud.c b/drivers/staging/rdma/ipath/ipath_ud.c index e8a2a915251e..3ffc1565d03d 100644 --- a/drivers/staging/rdma/ipath/ipath_ud.c +++ b/drivers/staging/rdma/ipath/ipath_ud.c | |||
@@ -65,7 +65,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) | |||
65 | u32 rlen; | 65 | u32 rlen; |
66 | u32 length; | 66 | u32 length; |
67 | 67 | ||
68 | qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn); | 68 | qp = ipath_lookup_qpn(&dev->qp_table, swqe->ud_wr.remote_qpn); |
69 | if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { | 69 | if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { |
70 | dev->n_pkt_drops++; | 70 | dev->n_pkt_drops++; |
71 | goto done; | 71 | goto done; |
@@ -77,8 +77,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) | |||
77 | * qkey from the QP context instead of the WR (see 10.2.5). | 77 | * qkey from the QP context instead of the WR (see 10.2.5). |
78 | */ | 78 | */ |
79 | if (unlikely(qp->ibqp.qp_num && | 79 | if (unlikely(qp->ibqp.qp_num && |
80 | ((int) swqe->wr.wr.ud.remote_qkey < 0 ? | 80 | ((int) swqe->ud_wr.remote_qkey < 0 ? |
81 | sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) { | 81 | sqp->qkey : swqe->ud_wr.remote_qkey) != qp->qkey)) { |
82 | /* XXX OK to lose a count once in a while. */ | 82 | /* XXX OK to lose a count once in a while. */ |
83 | dev->qkey_violations++; | 83 | dev->qkey_violations++; |
84 | dev->n_pkt_drops++; | 84 | dev->n_pkt_drops++; |
@@ -175,7 +175,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) | |||
175 | } else | 175 | } else |
176 | spin_unlock_irqrestore(&rq->lock, flags); | 176 | spin_unlock_irqrestore(&rq->lock, flags); |
177 | 177 | ||
178 | ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr; | 178 | ah_attr = &to_iah(swqe->ud_wr.ah)->attr; |
179 | if (ah_attr->ah_flags & IB_AH_GRH) { | 179 | if (ah_attr->ah_flags & IB_AH_GRH) { |
180 | ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh)); | 180 | ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh)); |
181 | wc.wc_flags |= IB_WC_GRH; | 181 | wc.wc_flags |= IB_WC_GRH; |
@@ -225,7 +225,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) | |||
225 | wc.port_num = 1; | 225 | wc.port_num = 1; |
226 | /* Signal completion event if the solicited bit is set. */ | 226 | /* Signal completion event if the solicited bit is set. */ |
227 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 227 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
228 | swqe->wr.send_flags & IB_SEND_SOLICITED); | 228 | swqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED); |
229 | drop: | 229 | drop: |
230 | if (atomic_dec_and_test(&qp->refcount)) | 230 | if (atomic_dec_and_test(&qp->refcount)) |
231 | wake_up(&qp->wait); | 231 | wake_up(&qp->wait); |
@@ -280,7 +280,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
280 | next_cur = 0; | 280 | next_cur = 0; |
281 | 281 | ||
282 | /* Construct the header. */ | 282 | /* Construct the header. */ |
283 | ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; | 283 | ah_attr = &to_iah(wqe->ud_wr.ah)->attr; |
284 | if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) { | 284 | if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) { |
285 | if (ah_attr->dlid != IPATH_PERMISSIVE_LID) | 285 | if (ah_attr->dlid != IPATH_PERMISSIVE_LID) |
286 | dev->n_multicast_xmit++; | 286 | dev->n_multicast_xmit++; |
@@ -322,7 +322,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
322 | qp->s_wqe = wqe; | 322 | qp->s_wqe = wqe; |
323 | qp->s_sge.sge = wqe->sg_list[0]; | 323 | qp->s_sge.sge = wqe->sg_list[0]; |
324 | qp->s_sge.sg_list = wqe->sg_list + 1; | 324 | qp->s_sge.sg_list = wqe->sg_list + 1; |
325 | qp->s_sge.num_sge = wqe->wr.num_sge; | 325 | qp->s_sge.num_sge = wqe->ud_wr.wr.num_sge; |
326 | 326 | ||
327 | if (ah_attr->ah_flags & IB_AH_GRH) { | 327 | if (ah_attr->ah_flags & IB_AH_GRH) { |
328 | /* Header size in 32-bit words. */ | 328 | /* Header size in 32-bit words. */ |
@@ -340,9 +340,9 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
340 | lrh0 = IPATH_LRH_BTH; | 340 | lrh0 = IPATH_LRH_BTH; |
341 | ohdr = &qp->s_hdr.u.oth; | 341 | ohdr = &qp->s_hdr.u.oth; |
342 | } | 342 | } |
343 | if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { | 343 | if (wqe->ud_wr.wr.opcode == IB_WR_SEND_WITH_IMM) { |
344 | qp->s_hdrwords++; | 344 | qp->s_hdrwords++; |
345 | ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; | 345 | ohdr->u.ud.imm_data = wqe->ud_wr.wr.ex.imm_data; |
346 | bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; | 346 | bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; |
347 | } else | 347 | } else |
348 | bth0 = IB_OPCODE_UD_SEND_ONLY << 24; | 348 | bth0 = IB_OPCODE_UD_SEND_ONLY << 24; |
@@ -360,7 +360,7 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
360 | qp->s_hdr.lrh[3] = cpu_to_be16(lid); | 360 | qp->s_hdr.lrh[3] = cpu_to_be16(lid); |
361 | } else | 361 | } else |
362 | qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE; | 362 | qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE; |
363 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | 363 | if (wqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED) |
364 | bth0 |= 1 << 23; | 364 | bth0 |= 1 << 23; |
365 | bth0 |= extra_bytes << 20; | 365 | bth0 |= extra_bytes << 20; |
366 | bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY : | 366 | bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY : |
@@ -372,14 +372,14 @@ int ipath_make_ud_req(struct ipath_qp *qp) | |||
372 | ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && | 372 | ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && |
373 | ah_attr->dlid != IPATH_PERMISSIVE_LID ? | 373 | ah_attr->dlid != IPATH_PERMISSIVE_LID ? |
374 | cpu_to_be32(IPATH_MULTICAST_QPN) : | 374 | cpu_to_be32(IPATH_MULTICAST_QPN) : |
375 | cpu_to_be32(wqe->wr.wr.ud.remote_qpn); | 375 | cpu_to_be32(wqe->ud_wr.remote_qpn); |
376 | ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK); | 376 | ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK); |
377 | /* | 377 | /* |
378 | * Qkeys with the high order bit set mean use the | 378 | * Qkeys with the high order bit set mean use the |
379 | * qkey from the QP context instead of the WR (see 10.2.5). | 379 | * qkey from the QP context instead of the WR (see 10.2.5). |
380 | */ | 380 | */ |
381 | ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ? | 381 | ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ? |
382 | qp->qkey : wqe->wr.wr.ud.remote_qkey); | 382 | qp->qkey : wqe->ud_wr.remote_qkey); |
383 | ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); | 383 | ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); |
384 | 384 | ||
385 | done: | 385 | done: |
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.c b/drivers/staging/rdma/ipath/ipath_verbs.c index ed2bbc2f7eae..15633ec1843f 100644 --- a/drivers/staging/rdma/ipath/ipath_verbs.c +++ b/drivers/staging/rdma/ipath/ipath_verbs.c | |||
@@ -374,7 +374,7 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
374 | wr->opcode != IB_WR_SEND_WITH_IMM) | 374 | wr->opcode != IB_WR_SEND_WITH_IMM) |
375 | goto bail_inval; | 375 | goto bail_inval; |
376 | /* Check UD destination address PD */ | 376 | /* Check UD destination address PD */ |
377 | if (qp->ibqp.pd != wr->wr.ud.ah->pd) | 377 | if (qp->ibqp.pd != ud_wr(wr)->ah->pd) |
378 | goto bail_inval; | 378 | goto bail_inval; |
379 | } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) | 379 | } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) |
380 | goto bail_inval; | 380 | goto bail_inval; |
@@ -395,7 +395,23 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
395 | } | 395 | } |
396 | 396 | ||
397 | wqe = get_swqe_ptr(qp, qp->s_head); | 397 | wqe = get_swqe_ptr(qp, qp->s_head); |
398 | wqe->wr = *wr; | 398 | |
399 | if (qp->ibqp.qp_type != IB_QPT_UC && | ||
400 | qp->ibqp.qp_type != IB_QPT_RC) | ||
401 | memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); | ||
402 | else if (wr->opcode == IB_WR_FAST_REG_MR) | ||
403 | memcpy(&wqe->fast_reg_wr, fast_reg_wr(wr), | ||
404 | sizeof(wqe->fast_reg_wr)); | ||
405 | else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || | ||
406 | wr->opcode == IB_WR_RDMA_WRITE || | ||
407 | wr->opcode == IB_WR_RDMA_READ) | ||
408 | memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); | ||
409 | else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || | ||
410 | wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) | ||
411 | memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); | ||
412 | else | ||
413 | memcpy(&wqe->wr, wr, sizeof(wqe->wr)); | ||
414 | |||
399 | wqe->length = 0; | 415 | wqe->length = 0; |
400 | if (wr->num_sge) { | 416 | if (wr->num_sge) { |
401 | acc = wr->opcode >= IB_WR_RDMA_READ ? | 417 | acc = wr->opcode >= IB_WR_RDMA_READ ? |
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.h b/drivers/staging/rdma/ipath/ipath_verbs.h index ec167e545e15..ed102a26ec08 100644 --- a/drivers/staging/rdma/ipath/ipath_verbs.h +++ b/drivers/staging/rdma/ipath/ipath_verbs.h | |||
@@ -277,7 +277,14 @@ struct ipath_mr { | |||
277 | * in qp->s_max_sge. | 277 | * in qp->s_max_sge. |
278 | */ | 278 | */ |
279 | struct ipath_swqe { | 279 | struct ipath_swqe { |
280 | struct ib_send_wr wr; /* don't use wr.sg_list */ | 280 | union { |
281 | struct ib_send_wr wr; /* don't use wr.sg_list */ | ||
282 | struct ib_ud_wr ud_wr; | ||
283 | struct ib_fast_reg_wr fast_reg_wr; | ||
284 | struct ib_rdma_wr rdma_wr; | ||
285 | struct ib_atomic_wr atomic_wr; | ||
286 | }; | ||
287 | |||
281 | u32 psn; /* first packet sequence number */ | 288 | u32 psn; /* first packet sequence number */ |
282 | u32 lpsn; /* last packet sequence number */ | 289 | u32 lpsn; /* last packet sequence number */ |
283 | u32 ssn; /* send sequence number */ | 290 | u32 ssn; /* send sequence number */ |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 7845fae6f2df..25f022c9aaac 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -1100,54 +1100,94 @@ struct ib_send_wr { | |||
1100 | __be32 imm_data; | 1100 | __be32 imm_data; |
1101 | u32 invalidate_rkey; | 1101 | u32 invalidate_rkey; |
1102 | } ex; | 1102 | } ex; |
1103 | union { | ||
1104 | struct { | ||
1105 | u64 remote_addr; | ||
1106 | u32 rkey; | ||
1107 | } rdma; | ||
1108 | struct { | ||
1109 | u64 remote_addr; | ||
1110 | u64 compare_add; | ||
1111 | u64 swap; | ||
1112 | u64 compare_add_mask; | ||
1113 | u64 swap_mask; | ||
1114 | u32 rkey; | ||
1115 | } atomic; | ||
1116 | struct { | ||
1117 | struct ib_ah *ah; | ||
1118 | void *header; | ||
1119 | int hlen; | ||
1120 | int mss; | ||
1121 | u32 remote_qpn; | ||
1122 | u32 remote_qkey; | ||
1123 | u16 pkey_index; /* valid for GSI only */ | ||
1124 | u8 port_num; /* valid for DR SMPs on switch only */ | ||
1125 | } ud; | ||
1126 | struct { | ||
1127 | u64 iova_start; | ||
1128 | struct ib_fast_reg_page_list *page_list; | ||
1129 | unsigned int page_shift; | ||
1130 | unsigned int page_list_len; | ||
1131 | u32 length; | ||
1132 | int access_flags; | ||
1133 | u32 rkey; | ||
1134 | } fast_reg; | ||
1135 | struct { | ||
1136 | struct ib_mw *mw; | ||
1137 | /* The new rkey for the memory window. */ | ||
1138 | u32 rkey; | ||
1139 | struct ib_mw_bind_info bind_info; | ||
1140 | } bind_mw; | ||
1141 | struct { | ||
1142 | struct ib_sig_attrs *sig_attrs; | ||
1143 | struct ib_mr *sig_mr; | ||
1144 | int access_flags; | ||
1145 | struct ib_sge *prot; | ||
1146 | } sig_handover; | ||
1147 | } wr; | ||
1148 | u32 xrc_remote_srq_num; /* XRC TGT QPs only */ | 1103 | u32 xrc_remote_srq_num; /* XRC TGT QPs only */ |
1149 | }; | 1104 | }; |
1150 | 1105 | ||
1106 | struct ib_rdma_wr { | ||
1107 | struct ib_send_wr wr; | ||
1108 | u64 remote_addr; | ||
1109 | u32 rkey; | ||
1110 | }; | ||
1111 | |||
1112 | static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) | ||
1113 | { | ||
1114 | return container_of(wr, struct ib_rdma_wr, wr); | ||
1115 | } | ||
1116 | |||
1117 | struct ib_atomic_wr { | ||
1118 | struct ib_send_wr wr; | ||
1119 | u64 remote_addr; | ||
1120 | u64 compare_add; | ||
1121 | u64 swap; | ||
1122 | u64 compare_add_mask; | ||
1123 | u64 swap_mask; | ||
1124 | u32 rkey; | ||
1125 | }; | ||
1126 | |||
1127 | static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) | ||
1128 | { | ||
1129 | return container_of(wr, struct ib_atomic_wr, wr); | ||
1130 | } | ||
1131 | |||
1132 | struct ib_ud_wr { | ||
1133 | struct ib_send_wr wr; | ||
1134 | struct ib_ah *ah; | ||
1135 | void *header; | ||
1136 | int hlen; | ||
1137 | int mss; | ||
1138 | u32 remote_qpn; | ||
1139 | u32 remote_qkey; | ||
1140 | u16 pkey_index; /* valid for GSI only */ | ||
1141 | u8 port_num; /* valid for DR SMPs on switch only */ | ||
1142 | }; | ||
1143 | |||
1144 | static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) | ||
1145 | { | ||
1146 | return container_of(wr, struct ib_ud_wr, wr); | ||
1147 | } | ||
1148 | |||
1149 | struct ib_fast_reg_wr { | ||
1150 | struct ib_send_wr wr; | ||
1151 | u64 iova_start; | ||
1152 | struct ib_fast_reg_page_list *page_list; | ||
1153 | unsigned int page_shift; | ||
1154 | unsigned int page_list_len; | ||
1155 | u32 length; | ||
1156 | int access_flags; | ||
1157 | u32 rkey; | ||
1158 | }; | ||
1159 | |||
1160 | static inline struct ib_fast_reg_wr *fast_reg_wr(struct ib_send_wr *wr) | ||
1161 | { | ||
1162 | return container_of(wr, struct ib_fast_reg_wr, wr); | ||
1163 | } | ||
1164 | |||
1165 | struct ib_bind_mw_wr { | ||
1166 | struct ib_send_wr wr; | ||
1167 | struct ib_mw *mw; | ||
1168 | /* The new rkey for the memory window. */ | ||
1169 | u32 rkey; | ||
1170 | struct ib_mw_bind_info bind_info; | ||
1171 | }; | ||
1172 | |||
1173 | static inline struct ib_bind_mw_wr *bind_mw_wr(struct ib_send_wr *wr) | ||
1174 | { | ||
1175 | return container_of(wr, struct ib_bind_mw_wr, wr); | ||
1176 | } | ||
1177 | |||
1178 | struct ib_sig_handover_wr { | ||
1179 | struct ib_send_wr wr; | ||
1180 | struct ib_sig_attrs *sig_attrs; | ||
1181 | struct ib_mr *sig_mr; | ||
1182 | int access_flags; | ||
1183 | struct ib_sge *prot; | ||
1184 | }; | ||
1185 | |||
1186 | static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) | ||
1187 | { | ||
1188 | return container_of(wr, struct ib_sig_handover_wr, wr); | ||
1189 | } | ||
1190 | |||
1151 | struct ib_recv_wr { | 1191 | struct ib_recv_wr { |
1152 | struct ib_recv_wr *next; | 1192 | struct ib_recv_wr *next; |
1153 | u64 wr_id; | 1193 | u64 wr_id; |
diff --git a/net/rds/ib.h b/net/rds/ib.h index aae60fda77f6..301c48385166 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h | |||
@@ -69,7 +69,11 @@ struct rds_ib_connect_private { | |||
69 | 69 | ||
70 | struct rds_ib_send_work { | 70 | struct rds_ib_send_work { |
71 | void *s_op; | 71 | void *s_op; |
72 | struct ib_send_wr s_wr; | 72 | union { |
73 | struct ib_send_wr s_wr; | ||
74 | struct ib_rdma_wr s_rdma_wr; | ||
75 | struct ib_atomic_wr s_atomic_wr; | ||
76 | }; | ||
73 | struct ib_sge s_sge[RDS_IB_MAX_SGE]; | 77 | struct ib_sge s_sge[RDS_IB_MAX_SGE]; |
74 | unsigned long s_queued; | 78 | unsigned long s_queued; |
75 | }; | 79 | }; |
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 4e88047086b6..987386e9931b 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -785,23 +785,23 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) | |||
785 | send->s_queued = jiffies; | 785 | send->s_queued = jiffies; |
786 | 786 | ||
787 | if (op->op_type == RDS_ATOMIC_TYPE_CSWP) { | 787 | if (op->op_type == RDS_ATOMIC_TYPE_CSWP) { |
788 | send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; | 788 | send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; |
789 | send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare; | 789 | send->s_atomic_wr.compare_add = op->op_m_cswp.compare; |
790 | send->s_wr.wr.atomic.swap = op->op_m_cswp.swap; | 790 | send->s_atomic_wr.swap = op->op_m_cswp.swap; |
791 | send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask; | 791 | send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask; |
792 | send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask; | 792 | send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask; |
793 | } else { /* FADD */ | 793 | } else { /* FADD */ |
794 | send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; | 794 | send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; |
795 | send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add; | 795 | send->s_atomic_wr.compare_add = op->op_m_fadd.add; |
796 | send->s_wr.wr.atomic.swap = 0; | 796 | send->s_atomic_wr.swap = 0; |
797 | send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask; | 797 | send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask; |
798 | send->s_wr.wr.atomic.swap_mask = 0; | 798 | send->s_atomic_wr.swap_mask = 0; |
799 | } | 799 | } |
800 | nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); | 800 | nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); |
801 | send->s_wr.num_sge = 1; | 801 | send->s_atomic_wr.wr.num_sge = 1; |
802 | send->s_wr.next = NULL; | 802 | send->s_atomic_wr.wr.next = NULL; |
803 | send->s_wr.wr.atomic.remote_addr = op->op_remote_addr; | 803 | send->s_atomic_wr.remote_addr = op->op_remote_addr; |
804 | send->s_wr.wr.atomic.rkey = op->op_rkey; | 804 | send->s_atomic_wr.rkey = op->op_rkey; |
805 | send->s_op = op; | 805 | send->s_op = op; |
806 | rds_message_addref(container_of(send->s_op, struct rds_message, atomic)); | 806 | rds_message_addref(container_of(send->s_op, struct rds_message, atomic)); |
807 | 807 | ||
@@ -826,11 +826,11 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) | |||
826 | if (nr_sig) | 826 | if (nr_sig) |
827 | atomic_add(nr_sig, &ic->i_signaled_sends); | 827 | atomic_add(nr_sig, &ic->i_signaled_sends); |
828 | 828 | ||
829 | failed_wr = &send->s_wr; | 829 | failed_wr = &send->s_atomic_wr.wr; |
830 | ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr); | 830 | ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr); |
831 | rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, | 831 | rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, |
832 | send, &send->s_wr, ret, failed_wr); | 832 | send, &send->s_atomic_wr, ret, failed_wr); |
833 | BUG_ON(failed_wr != &send->s_wr); | 833 | BUG_ON(failed_wr != &send->s_atomic_wr.wr); |
834 | if (ret) { | 834 | if (ret) { |
835 | printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 " | 835 | printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 " |
836 | "returned %d\n", &conn->c_faddr, ret); | 836 | "returned %d\n", &conn->c_faddr, ret); |
@@ -839,9 +839,9 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) | |||
839 | goto out; | 839 | goto out; |
840 | } | 840 | } |
841 | 841 | ||
842 | if (unlikely(failed_wr != &send->s_wr)) { | 842 | if (unlikely(failed_wr != &send->s_atomic_wr.wr)) { |
843 | printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret); | 843 | printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret); |
844 | BUG_ON(failed_wr != &send->s_wr); | 844 | BUG_ON(failed_wr != &send->s_atomic_wr.wr); |
845 | } | 845 | } |
846 | 846 | ||
847 | out: | 847 | out: |
@@ -912,22 +912,23 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | |||
912 | nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify); | 912 | nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify); |
913 | 913 | ||
914 | send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; | 914 | send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; |
915 | send->s_wr.wr.rdma.remote_addr = remote_addr; | 915 | send->s_rdma_wr.remote_addr = remote_addr; |
916 | send->s_wr.wr.rdma.rkey = op->op_rkey; | 916 | send->s_rdma_wr.rkey = op->op_rkey; |
917 | 917 | ||
918 | if (num_sge > max_sge) { | 918 | if (num_sge > max_sge) { |
919 | send->s_wr.num_sge = max_sge; | 919 | send->s_rdma_wr.wr.num_sge = max_sge; |
920 | num_sge -= max_sge; | 920 | num_sge -= max_sge; |
921 | } else { | 921 | } else { |
922 | send->s_wr.num_sge = num_sge; | 922 | send->s_rdma_wr.wr.num_sge = num_sge; |
923 | } | 923 | } |
924 | 924 | ||
925 | send->s_wr.next = NULL; | 925 | send->s_rdma_wr.wr.next = NULL; |
926 | 926 | ||
927 | if (prev) | 927 | if (prev) |
928 | prev->s_wr.next = &send->s_wr; | 928 | prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr; |
929 | 929 | ||
930 | for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { | 930 | for (j = 0; j < send->s_rdma_wr.wr.num_sge && |
931 | scat != &op->op_sg[op->op_count]; j++) { | ||
931 | len = ib_sg_dma_len(ic->i_cm_id->device, scat); | 932 | len = ib_sg_dma_len(ic->i_cm_id->device, scat); |
932 | send->s_sge[j].addr = | 933 | send->s_sge[j].addr = |
933 | ib_sg_dma_address(ic->i_cm_id->device, scat); | 934 | ib_sg_dma_address(ic->i_cm_id->device, scat); |
@@ -942,7 +943,9 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | |||
942 | } | 943 | } |
943 | 944 | ||
944 | rdsdebug("send %p wr %p num_sge %u next %p\n", send, | 945 | rdsdebug("send %p wr %p num_sge %u next %p\n", send, |
945 | &send->s_wr, send->s_wr.num_sge, send->s_wr.next); | 946 | &send->s_rdma_wr.wr, |
947 | send->s_rdma_wr.wr.num_sge, | ||
948 | send->s_rdma_wr.wr.next); | ||
946 | 949 | ||
947 | prev = send; | 950 | prev = send; |
948 | if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) | 951 | if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) |
@@ -963,11 +966,11 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | |||
963 | if (nr_sig) | 966 | if (nr_sig) |
964 | atomic_add(nr_sig, &ic->i_signaled_sends); | 967 | atomic_add(nr_sig, &ic->i_signaled_sends); |
965 | 968 | ||
966 | failed_wr = &first->s_wr; | 969 | failed_wr = &first->s_rdma_wr.wr; |
967 | ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); | 970 | ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr); |
968 | rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, | 971 | rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, |
969 | first, &first->s_wr, ret, failed_wr); | 972 | first, &first->s_rdma_wr.wr, ret, failed_wr); |
970 | BUG_ON(failed_wr != &first->s_wr); | 973 | BUG_ON(failed_wr != &first->s_rdma_wr.wr); |
971 | if (ret) { | 974 | if (ret) { |
972 | printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " | 975 | printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " |
973 | "returned %d\n", &conn->c_faddr, ret); | 976 | "returned %d\n", &conn->c_faddr, ret); |
@@ -976,9 +979,9 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | |||
976 | goto out; | 979 | goto out; |
977 | } | 980 | } |
978 | 981 | ||
979 | if (unlikely(failed_wr != &first->s_wr)) { | 982 | if (unlikely(failed_wr != &first->s_rdma_wr.wr)) { |
980 | printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret); | 983 | printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret); |
981 | BUG_ON(failed_wr != &first->s_wr); | 984 | BUG_ON(failed_wr != &first->s_rdma_wr.wr); |
982 | } | 985 | } |
983 | 986 | ||
984 | 987 | ||
diff --git a/net/rds/iw.h b/net/rds/iw.h index cbe6674e31ee..fe858e5dd8d1 100644 --- a/net/rds/iw.h +++ b/net/rds/iw.h | |||
@@ -77,7 +77,11 @@ struct rds_iw_send_work { | |||
77 | struct ib_fast_reg_page_list *s_page_list; | 77 | struct ib_fast_reg_page_list *s_page_list; |
78 | unsigned char s_remap_count; | 78 | unsigned char s_remap_count; |
79 | 79 | ||
80 | struct ib_send_wr s_wr; | 80 | union { |
81 | struct ib_send_wr s_send_wr; | ||
82 | struct ib_rdma_wr s_rdma_wr; | ||
83 | struct ib_fast_reg_wr s_fast_reg_wr; | ||
84 | }; | ||
81 | struct ib_sge s_sge[RDS_IW_MAX_SGE]; | 85 | struct ib_sge s_sge[RDS_IW_MAX_SGE]; |
82 | unsigned long s_queued; | 86 | unsigned long s_queued; |
83 | }; | 87 | }; |
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index 6a8fbd6e69e7..f8a612cc69e6 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c | |||
@@ -696,7 +696,8 @@ static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, | |||
696 | static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping) | 696 | static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping) |
697 | { | 697 | { |
698 | struct rds_iw_mr *ibmr = mapping->m_mr; | 698 | struct rds_iw_mr *ibmr = mapping->m_mr; |
699 | struct ib_send_wr f_wr, *failed_wr; | 699 | struct ib_fast_reg_wr f_wr; |
700 | struct ib_send_wr *failed_wr; | ||
700 | int ret; | 701 | int ret; |
701 | 702 | ||
702 | /* | 703 | /* |
@@ -709,22 +710,22 @@ static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping) | |||
709 | mapping->m_rkey = ibmr->mr->rkey; | 710 | mapping->m_rkey = ibmr->mr->rkey; |
710 | 711 | ||
711 | memset(&f_wr, 0, sizeof(f_wr)); | 712 | memset(&f_wr, 0, sizeof(f_wr)); |
712 | f_wr.wr_id = RDS_IW_FAST_REG_WR_ID; | 713 | f_wr.wr.wr_id = RDS_IW_FAST_REG_WR_ID; |
713 | f_wr.opcode = IB_WR_FAST_REG_MR; | 714 | f_wr.wr.opcode = IB_WR_FAST_REG_MR; |
714 | f_wr.wr.fast_reg.length = mapping->m_sg.bytes; | 715 | f_wr.length = mapping->m_sg.bytes; |
715 | f_wr.wr.fast_reg.rkey = mapping->m_rkey; | 716 | f_wr.rkey = mapping->m_rkey; |
716 | f_wr.wr.fast_reg.page_list = ibmr->page_list; | 717 | f_wr.page_list = ibmr->page_list; |
717 | f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len; | 718 | f_wr.page_list_len = mapping->m_sg.dma_len; |
718 | f_wr.wr.fast_reg.page_shift = PAGE_SHIFT; | 719 | f_wr.page_shift = PAGE_SHIFT; |
719 | f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE | | 720 | f_wr.access_flags = IB_ACCESS_LOCAL_WRITE | |
720 | IB_ACCESS_REMOTE_READ | | 721 | IB_ACCESS_REMOTE_READ | |
721 | IB_ACCESS_REMOTE_WRITE; | 722 | IB_ACCESS_REMOTE_WRITE; |
722 | f_wr.wr.fast_reg.iova_start = 0; | 723 | f_wr.iova_start = 0; |
723 | f_wr.send_flags = IB_SEND_SIGNALED; | 724 | f_wr.wr.send_flags = IB_SEND_SIGNALED; |
724 | 725 | ||
725 | failed_wr = &f_wr; | 726 | failed_wr = &f_wr.wr; |
726 | ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr); | 727 | ret = ib_post_send(ibmr->cm_id->qp, &f_wr.wr, &failed_wr); |
727 | BUG_ON(failed_wr != &f_wr); | 728 | BUG_ON(failed_wr != &f_wr.wr); |
728 | if (ret) | 729 | if (ret) |
729 | printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", | 730 | printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", |
730 | __func__, __LINE__, ret); | 731 | __func__, __LINE__, ret); |
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 86152ec3b887..f6e23c515b44 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c | |||
@@ -137,13 +137,13 @@ void rds_iw_send_init_ring(struct rds_iw_connection *ic) | |||
137 | send->s_op = NULL; | 137 | send->s_op = NULL; |
138 | send->s_mapping = NULL; | 138 | send->s_mapping = NULL; |
139 | 139 | ||
140 | send->s_wr.next = NULL; | 140 | send->s_send_wr.next = NULL; |
141 | send->s_wr.wr_id = i; | 141 | send->s_send_wr.wr_id = i; |
142 | send->s_wr.sg_list = send->s_sge; | 142 | send->s_send_wr.sg_list = send->s_sge; |
143 | send->s_wr.num_sge = 1; | 143 | send->s_send_wr.num_sge = 1; |
144 | send->s_wr.opcode = IB_WR_SEND; | 144 | send->s_send_wr.opcode = IB_WR_SEND; |
145 | send->s_wr.send_flags = 0; | 145 | send->s_send_wr.send_flags = 0; |
146 | send->s_wr.ex.imm_data = 0; | 146 | send->s_send_wr.ex.imm_data = 0; |
147 | 147 | ||
148 | sge = rds_iw_data_sge(ic, send->s_sge); | 148 | sge = rds_iw_data_sge(ic, send->s_sge); |
149 | sge->lkey = 0; | 149 | sge->lkey = 0; |
@@ -179,7 +179,7 @@ void rds_iw_send_clear_ring(struct rds_iw_connection *ic) | |||
179 | ib_dereg_mr(send->s_mr); | 179 | ib_dereg_mr(send->s_mr); |
180 | BUG_ON(!send->s_page_list); | 180 | BUG_ON(!send->s_page_list); |
181 | ib_free_fast_reg_page_list(send->s_page_list); | 181 | ib_free_fast_reg_page_list(send->s_page_list); |
182 | if (send->s_wr.opcode == 0xdead) | 182 | if (send->s_send_wr.opcode == 0xdead) |
183 | continue; | 183 | continue; |
184 | if (send->s_rm) | 184 | if (send->s_rm) |
185 | rds_iw_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR); | 185 | rds_iw_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR); |
@@ -247,7 +247,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context) | |||
247 | send = &ic->i_sends[oldest]; | 247 | send = &ic->i_sends[oldest]; |
248 | 248 | ||
249 | /* In the error case, wc.opcode sometimes contains garbage */ | 249 | /* In the error case, wc.opcode sometimes contains garbage */ |
250 | switch (send->s_wr.opcode) { | 250 | switch (send->s_send_wr.opcode) { |
251 | case IB_WR_SEND: | 251 | case IB_WR_SEND: |
252 | if (send->s_rm) | 252 | if (send->s_rm) |
253 | rds_iw_send_unmap_rm(ic, send, wc.status); | 253 | rds_iw_send_unmap_rm(ic, send, wc.status); |
@@ -262,12 +262,12 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context) | |||
262 | default: | 262 | default: |
263 | printk_ratelimited(KERN_NOTICE | 263 | printk_ratelimited(KERN_NOTICE |
264 | "RDS/IW: %s: unexpected opcode 0x%x in WR!\n", | 264 | "RDS/IW: %s: unexpected opcode 0x%x in WR!\n", |
265 | __func__, send->s_wr.opcode); | 265 | __func__, send->s_send_wr.opcode); |
266 | break; | 266 | break; |
267 | } | 267 | } |
268 | 268 | ||
269 | send->s_wr.opcode = 0xdead; | 269 | send->s_send_wr.opcode = 0xdead; |
270 | send->s_wr.num_sge = 1; | 270 | send->s_send_wr.num_sge = 1; |
271 | if (time_after(jiffies, send->s_queued + HZ/2)) | 271 | if (time_after(jiffies, send->s_queued + HZ/2)) |
272 | rds_iw_stats_inc(s_iw_tx_stalled); | 272 | rds_iw_stats_inc(s_iw_tx_stalled); |
273 | 273 | ||
@@ -455,10 +455,10 @@ rds_iw_xmit_populate_wr(struct rds_iw_connection *ic, | |||
455 | 455 | ||
456 | WARN_ON(pos != send - ic->i_sends); | 456 | WARN_ON(pos != send - ic->i_sends); |
457 | 457 | ||
458 | send->s_wr.send_flags = send_flags; | 458 | send->s_send_wr.send_flags = send_flags; |
459 | send->s_wr.opcode = IB_WR_SEND; | 459 | send->s_send_wr.opcode = IB_WR_SEND; |
460 | send->s_wr.num_sge = 2; | 460 | send->s_send_wr.num_sge = 2; |
461 | send->s_wr.next = NULL; | 461 | send->s_send_wr.next = NULL; |
462 | send->s_queued = jiffies; | 462 | send->s_queued = jiffies; |
463 | send->s_op = NULL; | 463 | send->s_op = NULL; |
464 | 464 | ||
@@ -472,7 +472,7 @@ rds_iw_xmit_populate_wr(struct rds_iw_connection *ic, | |||
472 | } else { | 472 | } else { |
473 | /* We're sending a packet with no payload. There is only | 473 | /* We're sending a packet with no payload. There is only |
474 | * one SGE */ | 474 | * one SGE */ |
475 | send->s_wr.num_sge = 1; | 475 | send->s_send_wr.num_sge = 1; |
476 | sge = &send->s_sge[0]; | 476 | sge = &send->s_sge[0]; |
477 | } | 477 | } |
478 | 478 | ||
@@ -672,23 +672,23 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
672 | */ | 672 | */ |
673 | if (ic->i_unsignaled_wrs-- == 0) { | 673 | if (ic->i_unsignaled_wrs-- == 0) { |
674 | ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; | 674 | ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; |
675 | send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; | 675 | send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
676 | } | 676 | } |
677 | 677 | ||
678 | ic->i_unsignaled_bytes -= len; | 678 | ic->i_unsignaled_bytes -= len; |
679 | if (ic->i_unsignaled_bytes <= 0) { | 679 | if (ic->i_unsignaled_bytes <= 0) { |
680 | ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes; | 680 | ic->i_unsignaled_bytes = rds_iw_sysctl_max_unsig_bytes; |
681 | send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; | 681 | send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
682 | } | 682 | } |
683 | 683 | ||
684 | /* | 684 | /* |
685 | * Always signal the last one if we're stopping due to flow control. | 685 | * Always signal the last one if we're stopping due to flow control. |
686 | */ | 686 | */ |
687 | if (flow_controlled && i == (work_alloc-1)) | 687 | if (flow_controlled && i == (work_alloc-1)) |
688 | send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; | 688 | send->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
689 | 689 | ||
690 | rdsdebug("send %p wr %p num_sge %u next %p\n", send, | 690 | rdsdebug("send %p wr %p num_sge %u next %p\n", send, |
691 | &send->s_wr, send->s_wr.num_sge, send->s_wr.next); | 691 | &send->s_send_wr, send->s_send_wr.num_sge, send->s_send_wr.next); |
692 | 692 | ||
693 | sent += len; | 693 | sent += len; |
694 | rm->data.op_dmaoff += len; | 694 | rm->data.op_dmaoff += len; |
@@ -722,7 +722,7 @@ add_header: | |||
722 | } | 722 | } |
723 | 723 | ||
724 | if (prev) | 724 | if (prev) |
725 | prev->s_wr.next = &send->s_wr; | 725 | prev->s_send_wr.next = &send->s_send_wr; |
726 | prev = send; | 726 | prev = send; |
727 | 727 | ||
728 | pos = (pos + 1) % ic->i_send_ring.w_nr; | 728 | pos = (pos + 1) % ic->i_send_ring.w_nr; |
@@ -736,7 +736,7 @@ add_header: | |||
736 | /* if we finished the message then send completion owns it */ | 736 | /* if we finished the message then send completion owns it */ |
737 | if (scat == &rm->data.op_sg[rm->data.op_count]) { | 737 | if (scat == &rm->data.op_sg[rm->data.op_count]) { |
738 | prev->s_rm = ic->i_rm; | 738 | prev->s_rm = ic->i_rm; |
739 | prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; | 739 | prev->s_send_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
740 | ic->i_rm = NULL; | 740 | ic->i_rm = NULL; |
741 | } | 741 | } |
742 | 742 | ||
@@ -748,11 +748,11 @@ add_header: | |||
748 | rds_iw_send_add_credits(conn, credit_alloc - i); | 748 | rds_iw_send_add_credits(conn, credit_alloc - i); |
749 | 749 | ||
750 | /* XXX need to worry about failed_wr and partial sends. */ | 750 | /* XXX need to worry about failed_wr and partial sends. */ |
751 | failed_wr = &first->s_wr; | 751 | failed_wr = &first->s_send_wr; |
752 | ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); | 752 | ret = ib_post_send(ic->i_cm_id->qp, &first->s_send_wr, &failed_wr); |
753 | rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, | 753 | rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, |
754 | first, &first->s_wr, ret, failed_wr); | 754 | first, &first->s_send_wr, ret, failed_wr); |
755 | BUG_ON(failed_wr != &first->s_wr); | 755 | BUG_ON(failed_wr != &first->s_send_wr); |
756 | if (ret) { | 756 | if (ret) { |
757 | printk(KERN_WARNING "RDS/IW: ib_post_send to %pI4 " | 757 | printk(KERN_WARNING "RDS/IW: ib_post_send to %pI4 " |
758 | "returned %d\n", &conn->c_faddr, ret); | 758 | "returned %d\n", &conn->c_faddr, ret); |
@@ -778,14 +778,14 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd | |||
778 | * in the sg list is added to the fast reg page list and placed | 778 | * in the sg list is added to the fast reg page list and placed |
779 | * inside the fast_reg_mr WR. | 779 | * inside the fast_reg_mr WR. |
780 | */ | 780 | */ |
781 | send->s_wr.opcode = IB_WR_FAST_REG_MR; | 781 | send->s_fast_reg_wr.wr.opcode = IB_WR_FAST_REG_MR; |
782 | send->s_wr.wr.fast_reg.length = len; | 782 | send->s_fast_reg_wr.length = len; |
783 | send->s_wr.wr.fast_reg.rkey = send->s_mr->rkey; | 783 | send->s_fast_reg_wr.rkey = send->s_mr->rkey; |
784 | send->s_wr.wr.fast_reg.page_list = send->s_page_list; | 784 | send->s_fast_reg_wr.page_list = send->s_page_list; |
785 | send->s_wr.wr.fast_reg.page_list_len = nent; | 785 | send->s_fast_reg_wr.page_list_len = nent; |
786 | send->s_wr.wr.fast_reg.page_shift = PAGE_SHIFT; | 786 | send->s_fast_reg_wr.page_shift = PAGE_SHIFT; |
787 | send->s_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE; | 787 | send->s_fast_reg_wr.access_flags = IB_ACCESS_REMOTE_WRITE; |
788 | send->s_wr.wr.fast_reg.iova_start = sg_addr; | 788 | send->s_fast_reg_wr.iova_start = sg_addr; |
789 | 789 | ||
790 | ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); | 790 | ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); |
791 | } | 791 | } |
@@ -863,7 +863,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | |||
863 | num_sge = op->op_count; | 863 | num_sge = op->op_count; |
864 | 864 | ||
865 | for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { | 865 | for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { |
866 | send->s_wr.send_flags = 0; | 866 | send->s_rdma_wr.wr.send_flags = 0; |
867 | send->s_queued = jiffies; | 867 | send->s_queued = jiffies; |
868 | 868 | ||
869 | /* | 869 | /* |
@@ -872,7 +872,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | |||
872 | */ | 872 | */ |
873 | if (ic->i_unsignaled_wrs-- == 0) { | 873 | if (ic->i_unsignaled_wrs-- == 0) { |
874 | ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; | 874 | ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; |
875 | send->s_wr.send_flags = IB_SEND_SIGNALED; | 875 | send->s_rdma_wr.wr.send_flags = IB_SEND_SIGNALED; |
876 | } | 876 | } |
877 | 877 | ||
878 | /* To avoid the need to have the plumbing to invalidate the fastreg_mr used | 878 | /* To avoid the need to have the plumbing to invalidate the fastreg_mr used |
@@ -880,29 +880,30 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | |||
880 | * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. | 880 | * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. |
881 | */ | 881 | */ |
882 | if (op->op_write) | 882 | if (op->op_write) |
883 | send->s_wr.opcode = IB_WR_RDMA_WRITE; | 883 | send->s_rdma_wr.wr.opcode = IB_WR_RDMA_WRITE; |
884 | else | 884 | else |
885 | send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; | 885 | send->s_rdma_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV; |
886 | 886 | ||
887 | send->s_wr.wr.rdma.remote_addr = remote_addr; | 887 | send->s_rdma_wr.remote_addr = remote_addr; |
888 | send->s_wr.wr.rdma.rkey = op->op_rkey; | 888 | send->s_rdma_wr.rkey = op->op_rkey; |
889 | send->s_op = op; | 889 | send->s_op = op; |
890 | 890 | ||
891 | if (num_sge > rds_iwdev->max_sge) { | 891 | if (num_sge > rds_iwdev->max_sge) { |
892 | send->s_wr.num_sge = rds_iwdev->max_sge; | 892 | send->s_rdma_wr.wr.num_sge = rds_iwdev->max_sge; |
893 | num_sge -= rds_iwdev->max_sge; | 893 | num_sge -= rds_iwdev->max_sge; |
894 | } else | 894 | } else |
895 | send->s_wr.num_sge = num_sge; | 895 | send->s_rdma_wr.wr.num_sge = num_sge; |
896 | 896 | ||
897 | send->s_wr.next = NULL; | 897 | send->s_rdma_wr.wr.next = NULL; |
898 | 898 | ||
899 | if (prev) | 899 | if (prev) |
900 | prev->s_wr.next = &send->s_wr; | 900 | prev->s_send_wr.next = &send->s_rdma_wr.wr; |
901 | 901 | ||
902 | for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { | 902 | for (j = 0; j < send->s_rdma_wr.wr.num_sge && |
903 | scat != &op->op_sg[op->op_count]; j++) { | ||
903 | len = ib_sg_dma_len(ic->i_cm_id->device, scat); | 904 | len = ib_sg_dma_len(ic->i_cm_id->device, scat); |
904 | 905 | ||
905 | if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) | 906 | if (send->s_rdma_wr.wr.opcode == IB_WR_RDMA_READ_WITH_INV) |
906 | send->s_page_list->page_list[j] = ib_sg_dma_address(ic->i_cm_id->device, scat); | 907 | send->s_page_list->page_list[j] = ib_sg_dma_address(ic->i_cm_id->device, scat); |
907 | else { | 908 | else { |
908 | send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat); | 909 | send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat); |
@@ -917,15 +918,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | |||
917 | scat++; | 918 | scat++; |
918 | } | 919 | } |
919 | 920 | ||
920 | if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) { | 921 | if (send->s_rdma_wr.wr.opcode == IB_WR_RDMA_READ_WITH_INV) { |
921 | send->s_wr.num_sge = 1; | 922 | send->s_rdma_wr.wr.num_sge = 1; |
922 | send->s_sge[0].addr = conn->c_xmit_rm->m_rs->rs_user_addr; | 923 | send->s_sge[0].addr = conn->c_xmit_rm->m_rs->rs_user_addr; |
923 | send->s_sge[0].length = conn->c_xmit_rm->m_rs->rs_user_bytes; | 924 | send->s_sge[0].length = conn->c_xmit_rm->m_rs->rs_user_bytes; |
924 | send->s_sge[0].lkey = ic->i_sends[fr_pos].s_mr->lkey; | 925 | send->s_sge[0].lkey = ic->i_sends[fr_pos].s_mr->lkey; |
925 | } | 926 | } |
926 | 927 | ||
927 | rdsdebug("send %p wr %p num_sge %u next %p\n", send, | 928 | rdsdebug("send %p wr %p num_sge %u next %p\n", send, |
928 | &send->s_wr, send->s_wr.num_sge, send->s_wr.next); | 929 | &send->s_rdma_wr, |
930 | send->s_rdma_wr.wr.num_sge, | ||
931 | send->s_rdma_wr.wr.next); | ||
929 | 932 | ||
930 | prev = send; | 933 | prev = send; |
931 | if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) | 934 | if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) |
@@ -934,7 +937,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | |||
934 | 937 | ||
935 | /* if we finished the message then send completion owns it */ | 938 | /* if we finished the message then send completion owns it */ |
936 | if (scat == &op->op_sg[op->op_count]) | 939 | if (scat == &op->op_sg[op->op_count]) |
937 | first->s_wr.send_flags = IB_SEND_SIGNALED; | 940 | first->s_rdma_wr.wr.send_flags = IB_SEND_SIGNALED; |
938 | 941 | ||
939 | if (i < work_alloc) { | 942 | if (i < work_alloc) { |
940 | rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i); | 943 | rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc - i); |
@@ -953,11 +956,11 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | |||
953 | work_alloc++; | 956 | work_alloc++; |
954 | } | 957 | } |
955 | 958 | ||
956 | failed_wr = &first->s_wr; | 959 | failed_wr = &first->s_rdma_wr.wr; |
957 | ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); | 960 | ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr); |
958 | rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, | 961 | rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, |
959 | first, &first->s_wr, ret, failed_wr); | 962 | first, &first->s_rdma_wr, ret, failed_wr); |
960 | BUG_ON(failed_wr != &first->s_wr); | 963 | BUG_ON(failed_wr != &first->s_rdma_wr.wr); |
961 | if (ret) { | 964 | if (ret) { |
962 | printk(KERN_WARNING "RDS/IW: rdma ib_post_send to %pI4 " | 965 | printk(KERN_WARNING "RDS/IW: rdma ib_post_send to %pI4 " |
963 | "returned %d\n", &conn->c_faddr, ret); | 966 | "returned %d\n", &conn->c_faddr, ret); |
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 5318951b3b53..0d2f46f600b6 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
@@ -312,7 +312,8 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, | |||
312 | struct rpcrdma_mw *mw; | 312 | struct rpcrdma_mw *mw; |
313 | struct rpcrdma_frmr *frmr; | 313 | struct rpcrdma_frmr *frmr; |
314 | struct ib_mr *mr; | 314 | struct ib_mr *mr; |
315 | struct ib_send_wr fastreg_wr, *bad_wr; | 315 | struct ib_fast_reg_wr fastreg_wr; |
316 | struct ib_send_wr *bad_wr; | ||
316 | u8 key; | 317 | u8 key; |
317 | int len, pageoff; | 318 | int len, pageoff; |
318 | int i, rc; | 319 | int i, rc; |
@@ -358,23 +359,23 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, | |||
358 | __func__, mw, i, len); | 359 | __func__, mw, i, len); |
359 | 360 | ||
360 | memset(&fastreg_wr, 0, sizeof(fastreg_wr)); | 361 | memset(&fastreg_wr, 0, sizeof(fastreg_wr)); |
361 | fastreg_wr.wr_id = (unsigned long)(void *)mw; | 362 | fastreg_wr.wr.wr_id = (unsigned long)(void *)mw; |
362 | fastreg_wr.opcode = IB_WR_FAST_REG_MR; | 363 | fastreg_wr.wr.opcode = IB_WR_FAST_REG_MR; |
363 | fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma + pageoff; | 364 | fastreg_wr.iova_start = seg1->mr_dma + pageoff; |
364 | fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl; | 365 | fastreg_wr.page_list = frmr->fr_pgl; |
365 | fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; | 366 | fastreg_wr.page_shift = PAGE_SHIFT; |
366 | fastreg_wr.wr.fast_reg.page_list_len = page_no; | 367 | fastreg_wr.page_list_len = page_no; |
367 | fastreg_wr.wr.fast_reg.length = len; | 368 | fastreg_wr.length = len; |
368 | fastreg_wr.wr.fast_reg.access_flags = writing ? | 369 | fastreg_wr.access_flags = writing ? |
369 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : | 370 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : |
370 | IB_ACCESS_REMOTE_READ; | 371 | IB_ACCESS_REMOTE_READ; |
371 | mr = frmr->fr_mr; | 372 | mr = frmr->fr_mr; |
372 | key = (u8)(mr->rkey & 0x000000FF); | 373 | key = (u8)(mr->rkey & 0x000000FF); |
373 | ib_update_fast_reg_key(mr, ++key); | 374 | ib_update_fast_reg_key(mr, ++key); |
374 | fastreg_wr.wr.fast_reg.rkey = mr->rkey; | 375 | fastreg_wr.rkey = mr->rkey; |
375 | 376 | ||
376 | DECR_CQCOUNT(&r_xprt->rx_ep); | 377 | DECR_CQCOUNT(&r_xprt->rx_ep); |
377 | rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr); | 378 | rc = ib_post_send(ia->ri_id->qp, &fastreg_wr.wr, &bad_wr); |
378 | if (rc) | 379 | if (rc) |
379 | goto out_senderr; | 380 | goto out_senderr; |
380 | 381 | ||
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index cb5174284074..7be42d0da19e 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | |||
@@ -126,7 +126,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, | |||
126 | u64 rs_offset, | 126 | u64 rs_offset, |
127 | bool last) | 127 | bool last) |
128 | { | 128 | { |
129 | struct ib_send_wr read_wr; | 129 | struct ib_rdma_wr read_wr; |
130 | int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT; | 130 | int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT; |
131 | struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt); | 131 | struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt); |
132 | int ret, read, pno; | 132 | int ret, read, pno; |
@@ -179,16 +179,16 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, | |||
179 | clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); | 179 | clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); |
180 | 180 | ||
181 | memset(&read_wr, 0, sizeof(read_wr)); | 181 | memset(&read_wr, 0, sizeof(read_wr)); |
182 | read_wr.wr_id = (unsigned long)ctxt; | 182 | read_wr.wr.wr_id = (unsigned long)ctxt; |
183 | read_wr.opcode = IB_WR_RDMA_READ; | 183 | read_wr.wr.opcode = IB_WR_RDMA_READ; |
184 | ctxt->wr_op = read_wr.opcode; | 184 | ctxt->wr_op = read_wr.wr.opcode; |
185 | read_wr.send_flags = IB_SEND_SIGNALED; | 185 | read_wr.wr.send_flags = IB_SEND_SIGNALED; |
186 | read_wr.wr.rdma.rkey = rs_handle; | 186 | read_wr.rkey = rs_handle; |
187 | read_wr.wr.rdma.remote_addr = rs_offset; | 187 | read_wr.remote_addr = rs_offset; |
188 | read_wr.sg_list = ctxt->sge; | 188 | read_wr.wr.sg_list = ctxt->sge; |
189 | read_wr.num_sge = pages_needed; | 189 | read_wr.wr.num_sge = pages_needed; |
190 | 190 | ||
191 | ret = svc_rdma_send(xprt, &read_wr); | 191 | ret = svc_rdma_send(xprt, &read_wr.wr); |
192 | if (ret) { | 192 | if (ret) { |
193 | pr_err("svcrdma: Error %d posting RDMA_READ\n", ret); | 193 | pr_err("svcrdma: Error %d posting RDMA_READ\n", ret); |
194 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | 194 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
@@ -218,9 +218,9 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, | |||
218 | u64 rs_offset, | 218 | u64 rs_offset, |
219 | bool last) | 219 | bool last) |
220 | { | 220 | { |
221 | struct ib_send_wr read_wr; | 221 | struct ib_rdma_wr read_wr; |
222 | struct ib_send_wr inv_wr; | 222 | struct ib_send_wr inv_wr; |
223 | struct ib_send_wr fastreg_wr; | 223 | struct ib_fast_reg_wr fastreg_wr; |
224 | u8 key; | 224 | u8 key; |
225 | int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT; | 225 | int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT; |
226 | struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt); | 226 | struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt); |
@@ -289,31 +289,31 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, | |||
289 | 289 | ||
290 | /* Prepare FASTREG WR */ | 290 | /* Prepare FASTREG WR */ |
291 | memset(&fastreg_wr, 0, sizeof(fastreg_wr)); | 291 | memset(&fastreg_wr, 0, sizeof(fastreg_wr)); |
292 | fastreg_wr.opcode = IB_WR_FAST_REG_MR; | 292 | fastreg_wr.wr.opcode = IB_WR_FAST_REG_MR; |
293 | fastreg_wr.send_flags = IB_SEND_SIGNALED; | 293 | fastreg_wr.wr.send_flags = IB_SEND_SIGNALED; |
294 | fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva; | 294 | fastreg_wr.iova_start = (unsigned long)frmr->kva; |
295 | fastreg_wr.wr.fast_reg.page_list = frmr->page_list; | 295 | fastreg_wr.page_list = frmr->page_list; |
296 | fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len; | 296 | fastreg_wr.page_list_len = frmr->page_list_len; |
297 | fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; | 297 | fastreg_wr.page_shift = PAGE_SHIFT; |
298 | fastreg_wr.wr.fast_reg.length = frmr->map_len; | 298 | fastreg_wr.length = frmr->map_len; |
299 | fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags; | 299 | fastreg_wr.access_flags = frmr->access_flags; |
300 | fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey; | 300 | fastreg_wr.rkey = frmr->mr->lkey; |
301 | fastreg_wr.next = &read_wr; | 301 | fastreg_wr.wr.next = &read_wr.wr; |
302 | 302 | ||
303 | /* Prepare RDMA_READ */ | 303 | /* Prepare RDMA_READ */ |
304 | memset(&read_wr, 0, sizeof(read_wr)); | 304 | memset(&read_wr, 0, sizeof(read_wr)); |
305 | read_wr.send_flags = IB_SEND_SIGNALED; | 305 | read_wr.wr.send_flags = IB_SEND_SIGNALED; |
306 | read_wr.wr.rdma.rkey = rs_handle; | 306 | read_wr.rkey = rs_handle; |
307 | read_wr.wr.rdma.remote_addr = rs_offset; | 307 | read_wr.remote_addr = rs_offset; |
308 | read_wr.sg_list = ctxt->sge; | 308 | read_wr.wr.sg_list = ctxt->sge; |
309 | read_wr.num_sge = 1; | 309 | read_wr.wr.num_sge = 1; |
310 | if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) { | 310 | if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) { |
311 | read_wr.opcode = IB_WR_RDMA_READ_WITH_INV; | 311 | read_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV; |
312 | read_wr.wr_id = (unsigned long)ctxt; | 312 | read_wr.wr.wr_id = (unsigned long)ctxt; |
313 | read_wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey; | 313 | read_wr.wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey; |
314 | } else { | 314 | } else { |
315 | read_wr.opcode = IB_WR_RDMA_READ; | 315 | read_wr.wr.opcode = IB_WR_RDMA_READ; |
316 | read_wr.next = &inv_wr; | 316 | read_wr.wr.next = &inv_wr; |
317 | /* Prepare invalidate */ | 317 | /* Prepare invalidate */ |
318 | memset(&inv_wr, 0, sizeof(inv_wr)); | 318 | memset(&inv_wr, 0, sizeof(inv_wr)); |
319 | inv_wr.wr_id = (unsigned long)ctxt; | 319 | inv_wr.wr_id = (unsigned long)ctxt; |
@@ -321,10 +321,10 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, | |||
321 | inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE; | 321 | inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE; |
322 | inv_wr.ex.invalidate_rkey = frmr->mr->lkey; | 322 | inv_wr.ex.invalidate_rkey = frmr->mr->lkey; |
323 | } | 323 | } |
324 | ctxt->wr_op = read_wr.opcode; | 324 | ctxt->wr_op = read_wr.wr.opcode; |
325 | 325 | ||
326 | /* Post the chain */ | 326 | /* Post the chain */ |
327 | ret = svc_rdma_send(xprt, &fastreg_wr); | 327 | ret = svc_rdma_send(xprt, &fastreg_wr.wr); |
328 | if (ret) { | 328 | if (ret) { |
329 | pr_err("svcrdma: Error %d posting RDMA_READ\n", ret); | 329 | pr_err("svcrdma: Error %d posting RDMA_READ\n", ret); |
330 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | 330 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 1dfae8317065..969a1ab75fc3 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -217,7 +217,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, | |||
217 | u32 xdr_off, int write_len, | 217 | u32 xdr_off, int write_len, |
218 | struct svc_rdma_req_map *vec) | 218 | struct svc_rdma_req_map *vec) |
219 | { | 219 | { |
220 | struct ib_send_wr write_wr; | 220 | struct ib_rdma_wr write_wr; |
221 | struct ib_sge *sge; | 221 | struct ib_sge *sge; |
222 | int xdr_sge_no; | 222 | int xdr_sge_no; |
223 | int sge_no; | 223 | int sge_no; |
@@ -282,17 +282,17 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, | |||
282 | /* Prepare WRITE WR */ | 282 | /* Prepare WRITE WR */ |
283 | memset(&write_wr, 0, sizeof write_wr); | 283 | memset(&write_wr, 0, sizeof write_wr); |
284 | ctxt->wr_op = IB_WR_RDMA_WRITE; | 284 | ctxt->wr_op = IB_WR_RDMA_WRITE; |
285 | write_wr.wr_id = (unsigned long)ctxt; | 285 | write_wr.wr.wr_id = (unsigned long)ctxt; |
286 | write_wr.sg_list = &sge[0]; | 286 | write_wr.wr.sg_list = &sge[0]; |
287 | write_wr.num_sge = sge_no; | 287 | write_wr.wr.num_sge = sge_no; |
288 | write_wr.opcode = IB_WR_RDMA_WRITE; | 288 | write_wr.wr.opcode = IB_WR_RDMA_WRITE; |
289 | write_wr.send_flags = IB_SEND_SIGNALED; | 289 | write_wr.wr.send_flags = IB_SEND_SIGNALED; |
290 | write_wr.wr.rdma.rkey = rmr; | 290 | write_wr.rkey = rmr; |
291 | write_wr.wr.rdma.remote_addr = to; | 291 | write_wr.remote_addr = to; |
292 | 292 | ||
293 | /* Post It */ | 293 | /* Post It */ |
294 | atomic_inc(&rdma_stat_write); | 294 | atomic_inc(&rdma_stat_write); |
295 | if (svc_rdma_send(xprt, &write_wr)) | 295 | if (svc_rdma_send(xprt, &write_wr.wr)) |
296 | goto err; | 296 | goto err; |
297 | return write_len - bc; | 297 | return write_len - bc; |
298 | err: | 298 | err: |