aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/isert/ib_isert.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-10-08 04:16:33 -0400
committerChristoph Hellwig <hch@lst.de>2015-10-08 06:09:10 -0400
commite622f2f4ad2142d2a613a57fb85f8cf737935ef5 (patch)
tree19fa458bcaacf3f8b2f5e40676f748afc3df1e84 /drivers/infiniband/ulp/isert/ib_isert.c
parentb8cab5dab15ff5c2acc3faefdde28919b0341c11 (diff)
IB: split struct ib_send_wr
This patch split up struct ib_send_wr so that all non-trivial verbs use their own structure which embedds struct ib_send_wr. This dramaticly shrinks the size of a WR for most common operations: sizeof(struct ib_send_wr) (old): 96 sizeof(struct ib_send_wr): 48 sizeof(struct ib_rdma_wr): 64 sizeof(struct ib_atomic_wr): 96 sizeof(struct ib_ud_wr): 88 sizeof(struct ib_fast_reg_wr): 88 sizeof(struct ib_bind_mw_wr): 96 sizeof(struct ib_sig_handover_wr): 80 And with Sagi's pending MR rework the fast registration WR will also be down to a reasonable size: sizeof(struct ib_fastreg_wr): 64 Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> [srp, srpt] Reviewed-by: Chuck Lever <chuck.lever@oracle.com> [sunrpc] Tested-by: Haggai Eran <haggaie@mellanox.com> Tested-by: Sagi Grimberg <sagig@mellanox.com> Tested-by: Steve Wise <swise@opengridcomputing.com>
Diffstat (limited to 'drivers/infiniband/ulp/isert/ib_isert.c')
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c156
1 files changed, 78 insertions, 78 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 403bd29443b8..02c4c0b4569d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1703,10 +1703,10 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1703 isert_unmap_data_buf(isert_conn, &wr->data); 1703 isert_unmap_data_buf(isert_conn, &wr->data);
1704 } 1704 }
1705 1705
1706 if (wr->send_wr) { 1706 if (wr->rdma_wr) {
1707 isert_dbg("Cmd %p free send_wr\n", isert_cmd); 1707 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
1708 kfree(wr->send_wr); 1708 kfree(wr->rdma_wr);
1709 wr->send_wr = NULL; 1709 wr->rdma_wr = NULL;
1710 } 1710 }
1711 1711
1712 if (wr->ib_sge) { 1712 if (wr->ib_sge) {
@@ -1741,7 +1741,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1741 } 1741 }
1742 1742
1743 wr->ib_sge = NULL; 1743 wr->ib_sge = NULL;
1744 wr->send_wr = NULL; 1744 wr->rdma_wr = NULL;
1745} 1745}
1746 1746
1747static void 1747static void
@@ -1910,7 +1910,7 @@ isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1910 } 1910 }
1911 1911
1912 device->unreg_rdma_mem(isert_cmd, isert_conn); 1912 device->unreg_rdma_mem(isert_cmd, isert_conn);
1913 wr->send_wr_num = 0; 1913 wr->rdma_wr_num = 0;
1914 if (ret) 1914 if (ret)
1915 transport_send_check_condition_and_sense(se_cmd, 1915 transport_send_check_condition_and_sense(se_cmd,
1916 se_cmd->pi_err, 0); 1916 se_cmd->pi_err, 0);
@@ -1938,7 +1938,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1938 iscsit_stop_dataout_timer(cmd); 1938 iscsit_stop_dataout_timer(cmd);
1939 device->unreg_rdma_mem(isert_cmd, isert_conn); 1939 device->unreg_rdma_mem(isert_cmd, isert_conn);
1940 cmd->write_data_done = wr->data.len; 1940 cmd->write_data_done = wr->data.len;
1941 wr->send_wr_num = 0; 1941 wr->rdma_wr_num = 0;
1942 1942
1943 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1943 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1944 spin_lock_bh(&cmd->istate_lock); 1944 spin_lock_bh(&cmd->istate_lock);
@@ -2384,7 +2384,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2384 2384
2385static int 2385static int
2386isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 2386isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2387 struct ib_sge *ib_sge, struct ib_send_wr *send_wr, 2387 struct ib_sge *ib_sge, struct ib_rdma_wr *rdma_wr,
2388 u32 data_left, u32 offset) 2388 u32 data_left, u32 offset)
2389{ 2389{
2390 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 2390 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
@@ -2399,8 +2399,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2399 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge); 2399 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2400 page_off = offset % PAGE_SIZE; 2400 page_off = offset % PAGE_SIZE;
2401 2401
2402 send_wr->sg_list = ib_sge; 2402 rdma_wr->wr.sg_list = ib_sge;
2403 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; 2403 rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc;
2404 /* 2404 /*
2405 * Perform mapping of TCM scatterlist memory ib_sge dma_addr. 2405 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2406 */ 2406 */
@@ -2425,11 +2425,11 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2425 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); 2425 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
2426 } 2426 }
2427 2427
2428 send_wr->num_sge = ++i; 2428 rdma_wr->wr.num_sge = ++i;
2429 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", 2429 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2430 send_wr->sg_list, send_wr->num_sge); 2430 rdma_wr->wr.sg_list, rdma_wr->wr.num_sge);
2431 2431
2432 return send_wr->num_sge; 2432 return rdma_wr->wr.num_sge;
2433} 2433}
2434 2434
2435static int 2435static int
@@ -2440,7 +2440,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2440 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2440 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2441 struct isert_conn *isert_conn = conn->context; 2441 struct isert_conn *isert_conn = conn->context;
2442 struct isert_data_buf *data = &wr->data; 2442 struct isert_data_buf *data = &wr->data;
2443 struct ib_send_wr *send_wr; 2443 struct ib_rdma_wr *rdma_wr;
2444 struct ib_sge *ib_sge; 2444 struct ib_sge *ib_sge;
2445 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0; 2445 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2446 int ret = 0, i, ib_sge_cnt; 2446 int ret = 0, i, ib_sge_cnt;
@@ -2465,11 +2465,11 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2465 } 2465 }
2466 wr->ib_sge = ib_sge; 2466 wr->ib_sge = ib_sge;
2467 2467
2468 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge); 2468 wr->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2469 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2469 wr->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) * wr->rdma_wr_num,
2470 GFP_KERNEL); 2470 GFP_KERNEL);
2471 if (!wr->send_wr) { 2471 if (!wr->rdma_wr) {
2472 isert_dbg("Unable to allocate wr->send_wr\n"); 2472 isert_dbg("Unable to allocate wr->rdma_wr\n");
2473 ret = -ENOMEM; 2473 ret = -ENOMEM;
2474 goto unmap_cmd; 2474 goto unmap_cmd;
2475 } 2475 }
@@ -2477,31 +2477,31 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2477 wr->isert_cmd = isert_cmd; 2477 wr->isert_cmd = isert_cmd;
2478 rdma_write_max = isert_conn->max_sge * PAGE_SIZE; 2478 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2479 2479
2480 for (i = 0; i < wr->send_wr_num; i++) { 2480 for (i = 0; i < wr->rdma_wr_num; i++) {
2481 send_wr = &isert_cmd->rdma_wr.send_wr[i]; 2481 rdma_wr = &isert_cmd->rdma_wr.rdma_wr[i];
2482 data_len = min(data_left, rdma_write_max); 2482 data_len = min(data_left, rdma_write_max);
2483 2483
2484 send_wr->send_flags = 0; 2484 rdma_wr->wr.send_flags = 0;
2485 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2485 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2486 send_wr->opcode = IB_WR_RDMA_WRITE; 2486 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
2487 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset; 2487 rdma_wr->remote_addr = isert_cmd->read_va + offset;
2488 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2488 rdma_wr->rkey = isert_cmd->read_stag;
2489 if (i + 1 == wr->send_wr_num) 2489 if (i + 1 == wr->rdma_wr_num)
2490 send_wr->next = &isert_cmd->tx_desc.send_wr; 2490 rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr;
2491 else 2491 else
2492 send_wr->next = &wr->send_wr[i + 1]; 2492 rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr;
2493 } else { 2493 } else {
2494 send_wr->opcode = IB_WR_RDMA_READ; 2494 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
2495 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset; 2495 rdma_wr->remote_addr = isert_cmd->write_va + va_offset;
2496 send_wr->wr.rdma.rkey = isert_cmd->write_stag; 2496 rdma_wr->rkey = isert_cmd->write_stag;
2497 if (i + 1 == wr->send_wr_num) 2497 if (i + 1 == wr->rdma_wr_num)
2498 send_wr->send_flags = IB_SEND_SIGNALED; 2498 rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
2499 else 2499 else
2500 send_wr->next = &wr->send_wr[i + 1]; 2500 rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr;
2501 } 2501 }
2502 2502
2503 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge, 2503 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2504 send_wr, data_len, offset); 2504 rdma_wr, data_len, offset);
2505 ib_sge += ib_sge_cnt; 2505 ib_sge += ib_sge_cnt;
2506 2506
2507 offset += data_len; 2507 offset += data_len;
@@ -2581,8 +2581,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2581 struct ib_device *ib_dev = device->ib_device; 2581 struct ib_device *ib_dev = device->ib_device;
2582 struct ib_mr *mr; 2582 struct ib_mr *mr;
2583 struct ib_fast_reg_page_list *frpl; 2583 struct ib_fast_reg_page_list *frpl;
2584 struct ib_send_wr fr_wr, inv_wr; 2584 struct ib_fast_reg_wr fr_wr;
2585 struct ib_send_wr *bad_wr, *wr = NULL; 2585 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
2586 int ret, pagelist_len; 2586 int ret, pagelist_len;
2587 u32 page_off; 2587 u32 page_off;
2588 2588
@@ -2620,20 +2620,20 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2620 2620
2621 /* Prepare FASTREG WR */ 2621 /* Prepare FASTREG WR */
2622 memset(&fr_wr, 0, sizeof(fr_wr)); 2622 memset(&fr_wr, 0, sizeof(fr_wr));
2623 fr_wr.wr_id = ISER_FASTREG_LI_WRID; 2623 fr_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
2624 fr_wr.opcode = IB_WR_FAST_REG_MR; 2624 fr_wr.wr.opcode = IB_WR_FAST_REG_MR;
2625 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off; 2625 fr_wr.iova_start = frpl->page_list[0] + page_off;
2626 fr_wr.wr.fast_reg.page_list = frpl; 2626 fr_wr.page_list = frpl;
2627 fr_wr.wr.fast_reg.page_list_len = pagelist_len; 2627 fr_wr.page_list_len = pagelist_len;
2628 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 2628 fr_wr.page_shift = PAGE_SHIFT;
2629 fr_wr.wr.fast_reg.length = mem->len; 2629 fr_wr.length = mem->len;
2630 fr_wr.wr.fast_reg.rkey = mr->rkey; 2630 fr_wr.rkey = mr->rkey;
2631 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; 2631 fr_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
2632 2632
2633 if (!wr) 2633 if (!wr)
2634 wr = &fr_wr; 2634 wr = &fr_wr.wr;
2635 else 2635 else
2636 wr->next = &fr_wr; 2636 wr->next = &fr_wr.wr;
2637 2637
2638 ret = ib_post_send(isert_conn->qp, wr, &bad_wr); 2638 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2639 if (ret) { 2639 if (ret) {
@@ -2714,8 +2714,8 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
2714 struct isert_rdma_wr *rdma_wr, 2714 struct isert_rdma_wr *rdma_wr,
2715 struct fast_reg_descriptor *fr_desc) 2715 struct fast_reg_descriptor *fr_desc)
2716{ 2716{
2717 struct ib_send_wr sig_wr, inv_wr; 2717 struct ib_sig_handover_wr sig_wr;
2718 struct ib_send_wr *bad_wr, *wr = NULL; 2718 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
2719 struct pi_context *pi_ctx = fr_desc->pi_ctx; 2719 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2720 struct ib_sig_attrs sig_attrs; 2720 struct ib_sig_attrs sig_attrs;
2721 int ret; 2721 int ret;
@@ -2733,20 +2733,20 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
2733 } 2733 }
2734 2734
2735 memset(&sig_wr, 0, sizeof(sig_wr)); 2735 memset(&sig_wr, 0, sizeof(sig_wr));
2736 sig_wr.opcode = IB_WR_REG_SIG_MR; 2736 sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
2737 sig_wr.wr_id = ISER_FASTREG_LI_WRID; 2737 sig_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
2738 sig_wr.sg_list = &rdma_wr->ib_sg[DATA]; 2738 sig_wr.wr.sg_list = &rdma_wr->ib_sg[DATA];
2739 sig_wr.num_sge = 1; 2739 sig_wr.wr.num_sge = 1;
2740 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; 2740 sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
2741 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; 2741 sig_wr.sig_attrs = &sig_attrs;
2742 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; 2742 sig_wr.sig_mr = pi_ctx->sig_mr;
2743 if (se_cmd->t_prot_sg) 2743 if (se_cmd->t_prot_sg)
2744 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT]; 2744 sig_wr.prot = &rdma_wr->ib_sg[PROT];
2745 2745
2746 if (!wr) 2746 if (!wr)
2747 wr = &sig_wr; 2747 wr = &sig_wr.wr;
2748 else 2748 else
2749 wr->next = &sig_wr; 2749 wr->next = &sig_wr.wr;
2750 2750
2751 ret = ib_post_send(isert_conn->qp, wr, &bad_wr); 2751 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2752 if (ret) { 2752 if (ret) {
@@ -2840,7 +2840,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2840 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2840 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2841 struct isert_conn *isert_conn = conn->context; 2841 struct isert_conn *isert_conn = conn->context;
2842 struct fast_reg_descriptor *fr_desc = NULL; 2842 struct fast_reg_descriptor *fr_desc = NULL;
2843 struct ib_send_wr *send_wr; 2843 struct ib_rdma_wr *rdma_wr;
2844 struct ib_sge *ib_sg; 2844 struct ib_sge *ib_sg;
2845 u32 offset; 2845 u32 offset;
2846 int ret = 0; 2846 int ret = 0;
@@ -2881,26 +2881,26 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2881 2881
2882 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg)); 2882 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2883 wr->ib_sge = &wr->s_ib_sge; 2883 wr->ib_sge = &wr->s_ib_sge;
2884 wr->send_wr_num = 1; 2884 wr->rdma_wr_num = 1;
2885 memset(&wr->s_send_wr, 0, sizeof(*send_wr)); 2885 memset(&wr->s_rdma_wr, 0, sizeof(wr->s_rdma_wr));
2886 wr->send_wr = &wr->s_send_wr; 2886 wr->rdma_wr = &wr->s_rdma_wr;
2887 wr->isert_cmd = isert_cmd; 2887 wr->isert_cmd = isert_cmd;
2888 2888
2889 send_wr = &isert_cmd->rdma_wr.s_send_wr; 2889 rdma_wr = &isert_cmd->rdma_wr.s_rdma_wr;
2890 send_wr->sg_list = &wr->s_ib_sge; 2890 rdma_wr->wr.sg_list = &wr->s_ib_sge;
2891 send_wr->num_sge = 1; 2891 rdma_wr->wr.num_sge = 1;
2892 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; 2892 rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc;
2893 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2893 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2894 send_wr->opcode = IB_WR_RDMA_WRITE; 2894 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
2895 send_wr->wr.rdma.remote_addr = isert_cmd->read_va; 2895 rdma_wr->remote_addr = isert_cmd->read_va;
2896 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2896 rdma_wr->rkey = isert_cmd->read_stag;
2897 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? 2897 rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2898 0 : IB_SEND_SIGNALED; 2898 0 : IB_SEND_SIGNALED;
2899 } else { 2899 } else {
2900 send_wr->opcode = IB_WR_RDMA_READ; 2900 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
2901 send_wr->wr.rdma.remote_addr = isert_cmd->write_va; 2901 rdma_wr->remote_addr = isert_cmd->write_va;
2902 send_wr->wr.rdma.rkey = isert_cmd->write_stag; 2902 rdma_wr->rkey = isert_cmd->write_stag;
2903 send_wr->send_flags = IB_SEND_SIGNALED; 2903 rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
2904 } 2904 }
2905 2905
2906 return 0; 2906 return 0;
@@ -2948,11 +2948,11 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2948 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2948 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2949 isert_init_send_wr(isert_conn, isert_cmd, 2949 isert_init_send_wr(isert_conn, isert_cmd,
2950 &isert_cmd->tx_desc.send_wr); 2950 &isert_cmd->tx_desc.send_wr);
2951 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; 2951 isert_cmd->rdma_wr.s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
2952 wr->send_wr_num += 1; 2952 wr->rdma_wr_num += 1;
2953 } 2953 }
2954 2954
2955 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); 2955 rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed);
2956 if (rc) 2956 if (rc)
2957 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2957 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2958 2958
@@ -2986,7 +2986,7 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2986 return rc; 2986 return rc;
2987 } 2987 }
2988 2988
2989 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); 2989 rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed);
2990 if (rc) 2990 if (rc)
2991 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2991 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2992 2992