diff options
author | Sagi Grimberg <sagig@mellanox.com> | 2014-12-02 09:57:38 -0500 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2014-12-13 02:32:28 -0500 |
commit | df43debdfd926fd1f13d5b0902d330d3e04ca05f (patch) | |
tree | 367acecb036a063e52d9b42d678928d377b9cef1 /drivers/infiniband | |
parent | 68a86dee8a32358ffd9dfa6d2acde4f71a572285 (diff) |
iser-target: Unite error completion handler for RX and TX
As a pre-step to a single CQ, we unite the error completion
handlers to a single handler.
This patch does not change any functionality.
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 61 |
1 files changed, 28 insertions, 33 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 992e452c9570..bbfdd62f9724 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -2014,40 +2014,38 @@ isert_send_completion(struct iser_tx_desc *tx_desc, | |||
2014 | } | 2014 | } |
2015 | 2015 | ||
2016 | static void | 2016 | static void |
2017 | isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) | 2017 | isert_cq_comp_err(void *desc, struct isert_conn *isert_conn, bool tx) |
2018 | { | 2018 | { |
2019 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; | 2019 | if (tx) { |
2020 | struct isert_cmd *isert_cmd = tx_desc->isert_cmd; | 2020 | struct ib_device *ib_dev = isert_conn->conn_cm_id->device; |
2021 | 2021 | struct isert_cmd *isert_cmd; | |
2022 | if (!isert_cmd) | ||
2023 | isert_unmap_tx_desc(tx_desc, ib_dev); | ||
2024 | else | ||
2025 | isert_completion_put(tx_desc, isert_cmd, ib_dev, true); | ||
2026 | } | ||
2027 | |||
2028 | static void | ||
2029 | isert_cq_rx_comp_err(struct isert_conn *isert_conn) | ||
2030 | { | ||
2031 | struct iscsi_conn *conn = isert_conn->conn; | ||
2032 | |||
2033 | if (isert_conn->post_recv_buf_count) | ||
2034 | return; | ||
2035 | 2022 | ||
2036 | if (conn->sess) { | 2023 | isert_cmd = ((struct iser_tx_desc *)desc)->isert_cmd; |
2037 | target_sess_cmd_list_set_waiting(conn->sess->se_sess); | 2024 | if (!isert_cmd) |
2038 | target_wait_for_sess_cmds(conn->sess->se_sess); | 2025 | isert_unmap_tx_desc(desc, ib_dev); |
2026 | else | ||
2027 | isert_completion_put(desc, isert_cmd, ib_dev, true); | ||
2028 | atomic_dec(&isert_conn->post_send_buf_count); | ||
2029 | } else { | ||
2030 | isert_conn->post_recv_buf_count--; | ||
2039 | } | 2031 | } |
2040 | 2032 | ||
2041 | while (atomic_read(&isert_conn->post_send_buf_count)) | 2033 | if (isert_conn->post_recv_buf_count == 0 && |
2042 | msleep(3000); | 2034 | atomic_read(&isert_conn->post_send_buf_count) == 0) { |
2035 | struct iscsi_conn *conn = isert_conn->conn; | ||
2043 | 2036 | ||
2044 | mutex_lock(&isert_conn->conn_mutex); | 2037 | if (conn->sess) { |
2045 | isert_conn_terminate(isert_conn); | 2038 | target_sess_cmd_list_set_waiting(conn->sess->se_sess); |
2046 | mutex_unlock(&isert_conn->conn_mutex); | 2039 | target_wait_for_sess_cmds(conn->sess->se_sess); |
2040 | } | ||
2047 | 2041 | ||
2048 | iscsit_cause_connection_reinstatement(isert_conn->conn, 0); | 2042 | mutex_lock(&isert_conn->conn_mutex); |
2043 | isert_conn_terminate(isert_conn); | ||
2044 | mutex_unlock(&isert_conn->conn_mutex); | ||
2049 | 2045 | ||
2050 | complete(&isert_conn->conn_wait_comp_err); | 2046 | iscsit_cause_connection_reinstatement(isert_conn->conn, 0); |
2047 | complete(&isert_conn->conn_wait_comp_err); | ||
2048 | } | ||
2051 | } | 2049 | } |
2052 | 2050 | ||
2053 | static void | 2051 | static void |
@@ -2073,10 +2071,8 @@ isert_cq_tx_work(struct work_struct *work) | |||
2073 | pr_debug("TX wc.status: 0x%08x\n", wc.status); | 2071 | pr_debug("TX wc.status: 0x%08x\n", wc.status); |
2074 | pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); | 2072 | pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); |
2075 | 2073 | ||
2076 | if (wc.wr_id != ISER_FASTREG_LI_WRID) { | 2074 | if (wc.wr_id != ISER_FASTREG_LI_WRID) |
2077 | atomic_dec(&isert_conn->post_send_buf_count); | 2075 | isert_cq_comp_err(tx_desc, isert_conn, true); |
2078 | isert_cq_tx_comp_err(tx_desc, isert_conn); | ||
2079 | } | ||
2080 | } | 2076 | } |
2081 | } | 2077 | } |
2082 | 2078 | ||
@@ -2118,8 +2114,7 @@ isert_cq_rx_work(struct work_struct *work) | |||
2118 | pr_debug("RX wc.vendor_err: 0x%08x\n", | 2114 | pr_debug("RX wc.vendor_err: 0x%08x\n", |
2119 | wc.vendor_err); | 2115 | wc.vendor_err); |
2120 | } | 2116 | } |
2121 | isert_conn->post_recv_buf_count--; | 2117 | isert_cq_comp_err(rx_desc, isert_conn, false); |
2122 | isert_cq_rx_comp_err(isert_conn); | ||
2123 | } | 2118 | } |
2124 | } | 2119 | } |
2125 | 2120 | ||