aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorDoug Ledford <dledford@redhat.com>2015-10-28 22:23:34 -0400
committerDoug Ledford <dledford@redhat.com>2015-10-28 22:23:34 -0400
commit63e8790d39a2d7c9a0ebeab987a6033d184bc6ba (patch)
tree9436939401b222d344f66e2bda59b445d5b9189f /drivers/infiniband/core
parent95893dde99d9d14f8a6ac99ea3103792a8da5f25 (diff)
parenteb14ab3ba14081e403be93dc6396627567fadf60 (diff)
Merge branch 'wr-cleanup' into k.o/for-4.4
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/agent.c2
-rw-r--r--drivers/infiniband/core/mad.c40
-rw-r--r--drivers/infiniband/core/mad_priv.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c143
4 files changed, 104 insertions, 83 deletions
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 0429040304fd..4fa524dfb6cf 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -126,7 +126,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
126 mad_send_wr = container_of(send_buf, 126 mad_send_wr = container_of(send_buf,
127 struct ib_mad_send_wr_private, 127 struct ib_mad_send_wr_private,
128 send_buf); 128 send_buf);
129 mad_send_wr->send_wr.wr.ud.port_num = port_num; 129 mad_send_wr->send_wr.port_num = port_num;
130 } 130 }
131 131
132 if (ib_post_send_mad(send_buf, NULL)) { 132 if (ib_post_send_mad(send_buf, NULL)) {
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index fa63b89e15aa..8d8af7a41a30 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -752,7 +752,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
752 struct ib_device *device = mad_agent_priv->agent.device; 752 struct ib_device *device = mad_agent_priv->agent.device;
753 u8 port_num; 753 u8 port_num;
754 struct ib_wc mad_wc; 754 struct ib_wc mad_wc;
755 struct ib_send_wr *send_wr = &mad_send_wr->send_wr; 755 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
756 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); 756 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
757 u16 out_mad_pkey_index = 0; 757 u16 out_mad_pkey_index = 0;
758 u16 drslid; 758 u16 drslid;
@@ -761,7 +761,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
761 761
762 if (rdma_cap_ib_switch(device) && 762 if (rdma_cap_ib_switch(device) &&
763 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 763 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
764 port_num = send_wr->wr.ud.port_num; 764 port_num = send_wr->port_num;
765 else 765 else
766 port_num = mad_agent_priv->agent.port_num; 766 port_num = mad_agent_priv->agent.port_num;
767 767
@@ -832,9 +832,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
832 } 832 }
833 833
834 build_smp_wc(mad_agent_priv->agent.qp, 834 build_smp_wc(mad_agent_priv->agent.qp,
835 send_wr->wr_id, drslid, 835 send_wr->wr.wr_id, drslid,
836 send_wr->wr.ud.pkey_index, 836 send_wr->pkey_index,
837 send_wr->wr.ud.port_num, &mad_wc); 837 send_wr->port_num, &mad_wc);
838 838
839 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { 839 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
840 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len 840 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
@@ -894,7 +894,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
894 894
895 local->mad_send_wr = mad_send_wr; 895 local->mad_send_wr = mad_send_wr;
896 if (opa) { 896 if (opa) {
897 local->mad_send_wr->send_wr.wr.ud.pkey_index = out_mad_pkey_index; 897 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
898 local->return_wc_byte_len = mad_size; 898 local->return_wc_byte_len = mad_size;
899 } 899 }
900 /* Reference MAD agent until send side of local completion handled */ 900 /* Reference MAD agent until send side of local completion handled */
@@ -1039,14 +1039,14 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
1039 1039
1040 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; 1040 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1041 1041
1042 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr; 1042 mad_send_wr->send_wr.wr.wr_id = (unsigned long) mad_send_wr;
1043 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; 1043 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1044 mad_send_wr->send_wr.num_sge = 2; 1044 mad_send_wr->send_wr.wr.num_sge = 2;
1045 mad_send_wr->send_wr.opcode = IB_WR_SEND; 1045 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1046 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED; 1046 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1047 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn; 1047 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1048 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; 1048 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1049 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index; 1049 mad_send_wr->send_wr.pkey_index = pkey_index;
1050 1050
1051 if (rmpp_active) { 1051 if (rmpp_active) {
1052 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); 1052 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
@@ -1151,7 +1151,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1151 1151
1152 /* Set WR ID to find mad_send_wr upon completion */ 1152 /* Set WR ID to find mad_send_wr upon completion */
1153 qp_info = mad_send_wr->mad_agent_priv->qp_info; 1153 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1154 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; 1154 mad_send_wr->send_wr.wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1155 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 1155 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1156 1156
1157 mad_agent = mad_send_wr->send_buf.mad_agent; 1157 mad_agent = mad_send_wr->send_buf.mad_agent;
@@ -1179,7 +1179,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1179 1179
1180 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1180 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1181 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1181 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1182 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, 1182 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1183 &bad_send_wr); 1183 &bad_send_wr);
1184 list = &qp_info->send_queue.list; 1184 list = &qp_info->send_queue.list;
1185 } else { 1185 } else {
@@ -1244,7 +1244,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1244 * request associated with the completion 1244 * request associated with the completion
1245 */ 1245 */
1246 next_send_buf = send_buf->next; 1246 next_send_buf = send_buf->next;
1247 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah; 1247 mad_send_wr->send_wr.ah = send_buf->ah;
1248 1248
1249 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == 1249 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
@@ -2457,7 +2457,7 @@ retry:
2457 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2457 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2458 2458
2459 if (queued_send_wr) { 2459 if (queued_send_wr) {
2460 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, 2460 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2461 &bad_send_wr); 2461 &bad_send_wr);
2462 if (ret) { 2462 if (ret) {
2463 dev_err(&port_priv->device->dev, 2463 dev_err(&port_priv->device->dev,
@@ -2515,7 +2515,7 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
2515 struct ib_send_wr *bad_send_wr; 2515 struct ib_send_wr *bad_send_wr;
2516 2516
2517 mad_send_wr->retry = 0; 2517 mad_send_wr->retry = 0;
2518 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr, 2518 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2519 &bad_send_wr); 2519 &bad_send_wr);
2520 if (ret) 2520 if (ret)
2521 ib_mad_send_done_handler(port_priv, wc); 2521 ib_mad_send_done_handler(port_priv, wc);
@@ -2713,7 +2713,7 @@ static void local_completions(struct work_struct *work)
2713 build_smp_wc(recv_mad_agent->agent.qp, 2713 build_smp_wc(recv_mad_agent->agent.qp,
2714 (unsigned long) local->mad_send_wr, 2714 (unsigned long) local->mad_send_wr,
2715 be16_to_cpu(IB_LID_PERMISSIVE), 2715 be16_to_cpu(IB_LID_PERMISSIVE),
2716 local->mad_send_wr->send_wr.wr.ud.pkey_index, 2716 local->mad_send_wr->send_wr.pkey_index,
2717 recv_mad_agent->agent.port_num, &wc); 2717 recv_mad_agent->agent.port_num, &wc);
2718 2718
2719 local->mad_priv->header.recv_wc.wc = &wc; 2719 local->mad_priv->header.recv_wc.wc = &wc;
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 4a4f7aad0978..990698a6ab4b 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -123,7 +123,7 @@ struct ib_mad_send_wr_private {
123 struct ib_mad_send_buf send_buf; 123 struct ib_mad_send_buf send_buf;
124 u64 header_mapping; 124 u64 header_mapping;
125 u64 payload_mapping; 125 u64 payload_mapping;
126 struct ib_send_wr send_wr; 126 struct ib_ud_wr send_wr;
127 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 127 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
128 __be64 tid; 128 __be64 tid;
129 unsigned long timeout; 129 unsigned long timeout;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 8fd081ae9aa9..94816aeb95a0 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2427,6 +2427,12 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2427 return in_len; 2427 return in_len;
2428} 2428}
2429 2429
2430static void *alloc_wr(size_t wr_size, __u32 num_sge)
2431{
2432 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2433 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2434};
2435
2430ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2436ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2431 struct ib_device *ib_dev, 2437 struct ib_device *ib_dev,
2432 const char __user *buf, int in_len, 2438 const char __user *buf, int in_len,
@@ -2475,14 +2481,83 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2475 goto out_put; 2481 goto out_put;
2476 } 2482 }
2477 2483
2478 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2484 if (is_ud) {
2479 user_wr->num_sge * sizeof (struct ib_sge), 2485 struct ib_ud_wr *ud;
2480 GFP_KERNEL); 2486
2481 if (!next) { 2487 if (user_wr->opcode != IB_WR_SEND &&
2482 ret = -ENOMEM; 2488 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2489 ret = -EINVAL;
2490 goto out_put;
2491 }
2492
2493 ud = alloc_wr(sizeof(*ud), user_wr->num_sge);
2494 if (!ud) {
2495 ret = -ENOMEM;
2496 goto out_put;
2497 }
2498
2499 ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext);
2500 if (!ud->ah) {
2501 kfree(ud);
2502 ret = -EINVAL;
2503 goto out_put;
2504 }
2505 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2506 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2507
2508 next = &ud->wr;
2509 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2510 user_wr->opcode == IB_WR_RDMA_WRITE ||
2511 user_wr->opcode == IB_WR_RDMA_READ) {
2512 struct ib_rdma_wr *rdma;
2513
2514 rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge);
2515 if (!rdma) {
2516 ret = -ENOMEM;
2517 goto out_put;
2518 }
2519
2520 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2521 rdma->rkey = user_wr->wr.rdma.rkey;
2522
2523 next = &rdma->wr;
2524 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2525 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2526 struct ib_atomic_wr *atomic;
2527
2528 atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge);
2529 if (!atomic) {
2530 ret = -ENOMEM;
2531 goto out_put;
2532 }
2533
2534 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2535 atomic->compare_add = user_wr->wr.atomic.compare_add;
2536 atomic->swap = user_wr->wr.atomic.swap;
2537 atomic->rkey = user_wr->wr.atomic.rkey;
2538
2539 next = &atomic->wr;
2540 } else if (user_wr->opcode == IB_WR_SEND ||
2541 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2542 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2543 next = alloc_wr(sizeof(*next), user_wr->num_sge);
2544 if (!next) {
2545 ret = -ENOMEM;
2546 goto out_put;
2547 }
2548 } else {
2549 ret = -EINVAL;
2483 goto out_put; 2550 goto out_put;
2484 } 2551 }
2485 2552
2553 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2554 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2555 next->ex.imm_data =
2556 (__be32 __force) user_wr->ex.imm_data;
2557 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2558 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2559 }
2560
2486 if (!last) 2561 if (!last)
2487 wr = next; 2562 wr = next;
2488 else 2563 else
@@ -2495,60 +2570,6 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2495 next->opcode = user_wr->opcode; 2570 next->opcode = user_wr->opcode;
2496 next->send_flags = user_wr->send_flags; 2571 next->send_flags = user_wr->send_flags;
2497 2572
2498 if (is_ud) {
2499 if (next->opcode != IB_WR_SEND &&
2500 next->opcode != IB_WR_SEND_WITH_IMM) {
2501 ret = -EINVAL;
2502 goto out_put;
2503 }
2504
2505 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2506 file->ucontext);
2507 if (!next->wr.ud.ah) {
2508 ret = -EINVAL;
2509 goto out_put;
2510 }
2511 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
2512 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2513 if (next->opcode == IB_WR_SEND_WITH_IMM)
2514 next->ex.imm_data =
2515 (__be32 __force) user_wr->ex.imm_data;
2516 } else {
2517 switch (next->opcode) {
2518 case IB_WR_RDMA_WRITE_WITH_IMM:
2519 next->ex.imm_data =
2520 (__be32 __force) user_wr->ex.imm_data;
2521 case IB_WR_RDMA_WRITE:
2522 case IB_WR_RDMA_READ:
2523 next->wr.rdma.remote_addr =
2524 user_wr->wr.rdma.remote_addr;
2525 next->wr.rdma.rkey =
2526 user_wr->wr.rdma.rkey;
2527 break;
2528 case IB_WR_SEND_WITH_IMM:
2529 next->ex.imm_data =
2530 (__be32 __force) user_wr->ex.imm_data;
2531 break;
2532 case IB_WR_SEND_WITH_INV:
2533 next->ex.invalidate_rkey =
2534 user_wr->ex.invalidate_rkey;
2535 break;
2536 case IB_WR_ATOMIC_CMP_AND_SWP:
2537 case IB_WR_ATOMIC_FETCH_AND_ADD:
2538 next->wr.atomic.remote_addr =
2539 user_wr->wr.atomic.remote_addr;
2540 next->wr.atomic.compare_add =
2541 user_wr->wr.atomic.compare_add;
2542 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2543 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2544 case IB_WR_SEND:
2545 break;
2546 default:
2547 ret = -EINVAL;
2548 goto out_put;
2549 }
2550 }
2551
2552 if (next->num_sge) { 2573 if (next->num_sge) {
2553 next->sg_list = (void *) next + 2574 next->sg_list = (void *) next +
2554 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2575 ALIGN(sizeof *next, sizeof (struct ib_sge));
@@ -2582,8 +2603,8 @@ out_put:
2582 put_qp_read(qp); 2603 put_qp_read(qp);
2583 2604
2584 while (wr) { 2605 while (wr) {
2585 if (is_ud && wr->wr.ud.ah) 2606 if (is_ud && ud_wr(wr)->ah)
2586 put_ah_read(wr->wr.ud.ah); 2607 put_ah_read(ud_wr(wr)->ah);
2587 next = wr->next; 2608 next = wr->next;
2588 kfree(wr); 2609 kfree(wr);
2589 wr = next; 2610 wr = next;