aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicholas Bellinger <nab@linux-iscsi.org>2014-02-27 12:05:03 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2014-03-04 20:54:09 -0500
commitb6b87a1df604678ed1be40158080db012a99ccca (patch)
tree5f8b4ad0c24e576730b0a4f1f248ca3d77281416
parentdefd884845297fd5690594bfe89656b01f16d87e (diff)
iser-target: Fix post_send_buf_count for RDMA READ/WRITE
This patch fixes the incorrect setting of ->post_send_buf_count related to RDMA WRITEs + READs where isert_rdma_rw->send_wr_num was not being taken into account. This includes incrementing ->post_send_buf_count within isert_put_datain() + isert_get_dataout(), decrementing within __isert_send_completion() + isert_response_completion(), and clearing wr->send_wr_num within isert_completion_rdma_read() This is necessary because even though IB_SEND_SIGNALED is not set for RDMA WRITEs + READs, during a QP failure event the work requests will be returned with exception status from the TX completion queue. Acked-by: Sagi Grimberg <sagig@mellanox.com> Cc: Or Gerlitz <ogerlitz@mellanox.com> Cc: <stable@vger.kernel.org> #3.10+ Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 862d7b4b0411..a70b0cf2b4c8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1549,6 +1549,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1549 iscsit_stop_dataout_timer(cmd); 1549 iscsit_stop_dataout_timer(cmd);
1550 device->unreg_rdma_mem(isert_cmd, isert_conn); 1550 device->unreg_rdma_mem(isert_cmd, isert_conn);
1551 cmd->write_data_done = wr->cur_rdma_length; 1551 cmd->write_data_done = wr->cur_rdma_length;
1552 wr->send_wr_num = 0;
1552 1553
1553 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1554 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1554 spin_lock_bh(&cmd->istate_lock); 1555 spin_lock_bh(&cmd->istate_lock);
@@ -1613,6 +1614,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1613 struct ib_device *ib_dev) 1614 struct ib_device *ib_dev)
1614{ 1615{
1615 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1616 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1617 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1616 1618
1617 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1619 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1618 cmd->i_state == ISTATE_SEND_LOGOUTRSP || 1620 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1624,7 +1626,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1624 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1626 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1625 return; 1627 return;
1626 } 1628 }
1627 atomic_dec(&isert_conn->post_send_buf_count); 1629 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1628 1630
1629 cmd->i_state = ISTATE_SENT_STATUS; 1631 cmd->i_state = ISTATE_SENT_STATUS;
1630 isert_completion_put(tx_desc, isert_cmd, ib_dev); 1632 isert_completion_put(tx_desc, isert_cmd, ib_dev);
@@ -1662,7 +1664,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
1662 case ISER_IB_RDMA_READ: 1664 case ISER_IB_RDMA_READ:
1663 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); 1665 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1664 1666
1665 atomic_dec(&isert_conn->post_send_buf_count); 1667 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1666 isert_completion_rdma_read(tx_desc, isert_cmd); 1668 isert_completion_rdma_read(tx_desc, isert_cmd);
1667 break; 1669 break;
1668 default: 1670 default:
@@ -2386,12 +2388,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2386 isert_init_send_wr(isert_conn, isert_cmd, 2388 isert_init_send_wr(isert_conn, isert_cmd,
2387 &isert_cmd->tx_desc.send_wr, true); 2389 &isert_cmd->tx_desc.send_wr, true);
2388 2390
2389 atomic_inc(&isert_conn->post_send_buf_count); 2391 atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2390 2392
2391 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2393 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2392 if (rc) { 2394 if (rc) {
2393 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2395 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2394 atomic_dec(&isert_conn->post_send_buf_count); 2396 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2395 } 2397 }
2396 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", 2398 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2397 isert_cmd); 2399 isert_cmd);
@@ -2419,12 +2421,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2419 return rc; 2421 return rc;
2420 } 2422 }
2421 2423
2422 atomic_inc(&isert_conn->post_send_buf_count); 2424 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2423 2425
2424 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2426 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2425 if (rc) { 2427 if (rc) {
2426 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2428 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2427 atomic_dec(&isert_conn->post_send_buf_count); 2429 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2428 } 2430 }
2429 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2431 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2430 isert_cmd); 2432 isert_cmd);