aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/isert/ib_isert.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/isert/ib_isert.c')
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c181
1 files changed, 109 insertions, 72 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 2b161be3c1a3..8ee228e9ab5a 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -453,6 +453,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
453 if (ret) { 453 if (ret) {
454 pr_err("Failed to create fastreg descriptor err=%d\n", 454 pr_err("Failed to create fastreg descriptor err=%d\n",
455 ret); 455 ret);
456 kfree(fr_desc);
456 goto err; 457 goto err;
457 } 458 }
458 459
@@ -491,12 +492,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
491 isert_conn->state = ISER_CONN_INIT; 492 isert_conn->state = ISER_CONN_INIT;
492 INIT_LIST_HEAD(&isert_conn->conn_accept_node); 493 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
493 init_completion(&isert_conn->conn_login_comp); 494 init_completion(&isert_conn->conn_login_comp);
494 init_waitqueue_head(&isert_conn->conn_wait); 495 init_completion(&isert_conn->conn_wait);
495 init_waitqueue_head(&isert_conn->conn_wait_comp_err); 496 init_completion(&isert_conn->conn_wait_comp_err);
496 kref_init(&isert_conn->conn_kref); 497 kref_init(&isert_conn->conn_kref);
497 kref_get(&isert_conn->conn_kref); 498 kref_get(&isert_conn->conn_kref);
498 mutex_init(&isert_conn->conn_mutex); 499 mutex_init(&isert_conn->conn_mutex);
499 mutex_init(&isert_conn->conn_comp_mutex);
500 spin_lock_init(&isert_conn->conn_lock); 500 spin_lock_init(&isert_conn->conn_lock);
501 501
502 cma_id->context = isert_conn; 502 cma_id->context = isert_conn;
@@ -687,11 +687,11 @@ isert_disconnect_work(struct work_struct *work)
687 687
688 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 688 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
689 mutex_lock(&isert_conn->conn_mutex); 689 mutex_lock(&isert_conn->conn_mutex);
690 isert_conn->state = ISER_CONN_DOWN; 690 if (isert_conn->state == ISER_CONN_UP)
691 isert_conn->state = ISER_CONN_TERMINATING;
691 692
692 if (isert_conn->post_recv_buf_count == 0 && 693 if (isert_conn->post_recv_buf_count == 0 &&
693 atomic_read(&isert_conn->post_send_buf_count) == 0) { 694 atomic_read(&isert_conn->post_send_buf_count) == 0) {
694 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
695 mutex_unlock(&isert_conn->conn_mutex); 695 mutex_unlock(&isert_conn->conn_mutex);
696 goto wake_up; 696 goto wake_up;
697 } 697 }
@@ -711,7 +711,7 @@ isert_disconnect_work(struct work_struct *work)
711 mutex_unlock(&isert_conn->conn_mutex); 711 mutex_unlock(&isert_conn->conn_mutex);
712 712
713wake_up: 713wake_up:
714 wake_up(&isert_conn->conn_wait); 714 complete(&isert_conn->conn_wait);
715 isert_put_conn(isert_conn); 715 isert_put_conn(isert_conn);
716} 716}
717 717
@@ -887,16 +887,17 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
887 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED 887 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
888 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. 888 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
889 */ 889 */
890 mutex_lock(&isert_conn->conn_comp_mutex); 890 mutex_lock(&isert_conn->conn_mutex);
891 if (coalesce && 891 if (coalesce && isert_conn->state == ISER_CONN_UP &&
892 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { 892 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
893 tx_desc->llnode_active = true;
893 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); 894 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
894 mutex_unlock(&isert_conn->conn_comp_mutex); 895 mutex_unlock(&isert_conn->conn_mutex);
895 return; 896 return;
896 } 897 }
897 isert_conn->conn_comp_batch = 0; 898 isert_conn->conn_comp_batch = 0;
898 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); 899 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
899 mutex_unlock(&isert_conn->conn_comp_mutex); 900 mutex_unlock(&isert_conn->conn_mutex);
900 901
901 send_wr->send_flags = IB_SEND_SIGNALED; 902 send_wr->send_flags = IB_SEND_SIGNALED;
902} 903}
@@ -1463,7 +1464,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1463 case ISCSI_OP_SCSI_CMD: 1464 case ISCSI_OP_SCSI_CMD:
1464 spin_lock_bh(&conn->cmd_lock); 1465 spin_lock_bh(&conn->cmd_lock);
1465 if (!list_empty(&cmd->i_conn_node)) 1466 if (!list_empty(&cmd->i_conn_node))
1466 list_del(&cmd->i_conn_node); 1467 list_del_init(&cmd->i_conn_node);
1467 spin_unlock_bh(&conn->cmd_lock); 1468 spin_unlock_bh(&conn->cmd_lock);
1468 1469
1469 if (cmd->data_direction == DMA_TO_DEVICE) 1470 if (cmd->data_direction == DMA_TO_DEVICE)
@@ -1475,7 +1476,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1475 case ISCSI_OP_SCSI_TMFUNC: 1476 case ISCSI_OP_SCSI_TMFUNC:
1476 spin_lock_bh(&conn->cmd_lock); 1477 spin_lock_bh(&conn->cmd_lock);
1477 if (!list_empty(&cmd->i_conn_node)) 1478 if (!list_empty(&cmd->i_conn_node))
1478 list_del(&cmd->i_conn_node); 1479 list_del_init(&cmd->i_conn_node);
1479 spin_unlock_bh(&conn->cmd_lock); 1480 spin_unlock_bh(&conn->cmd_lock);
1480 1481
1481 transport_generic_free_cmd(&cmd->se_cmd, 0); 1482 transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1485,7 +1486,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
1485 case ISCSI_OP_TEXT: 1486 case ISCSI_OP_TEXT:
1486 spin_lock_bh(&conn->cmd_lock); 1487 spin_lock_bh(&conn->cmd_lock);
1487 if (!list_empty(&cmd->i_conn_node)) 1488 if (!list_empty(&cmd->i_conn_node))
1488 list_del(&cmd->i_conn_node); 1489 list_del_init(&cmd->i_conn_node);
1489 spin_unlock_bh(&conn->cmd_lock); 1490 spin_unlock_bh(&conn->cmd_lock);
1490 1491
1491 /* 1492 /*
@@ -1548,6 +1549,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1548 iscsit_stop_dataout_timer(cmd); 1549 iscsit_stop_dataout_timer(cmd);
1549 device->unreg_rdma_mem(isert_cmd, isert_conn); 1550 device->unreg_rdma_mem(isert_cmd, isert_conn);
1550 cmd->write_data_done = wr->cur_rdma_length; 1551 cmd->write_data_done = wr->cur_rdma_length;
1552 wr->send_wr_num = 0;
1551 1553
1552 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1554 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1553 spin_lock_bh(&cmd->istate_lock); 1555 spin_lock_bh(&cmd->istate_lock);
@@ -1588,7 +1590,7 @@ isert_do_control_comp(struct work_struct *work)
1588 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); 1590 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1589 /* 1591 /*
1590 * Call atomic_dec(&isert_conn->post_send_buf_count) 1592 * Call atomic_dec(&isert_conn->post_send_buf_count)
1591 * from isert_free_conn() 1593 * from isert_wait_conn()
1592 */ 1594 */
1593 isert_conn->logout_posted = true; 1595 isert_conn->logout_posted = true;
1594 iscsit_logout_post_handler(cmd, cmd->conn); 1596 iscsit_logout_post_handler(cmd, cmd->conn);
@@ -1612,6 +1614,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1612 struct ib_device *ib_dev) 1614 struct ib_device *ib_dev)
1613{ 1615{
1614 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1616 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1617 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1615 1618
1616 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1619 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1617 cmd->i_state == ISTATE_SEND_LOGOUTRSP || 1620 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1623,7 +1626,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1623 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1626 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1624 return; 1627 return;
1625 } 1628 }
1626 atomic_dec(&isert_conn->post_send_buf_count); 1629 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1627 1630
1628 cmd->i_state = ISTATE_SENT_STATUS; 1631 cmd->i_state = ISTATE_SENT_STATUS;
1629 isert_completion_put(tx_desc, isert_cmd, ib_dev); 1632 isert_completion_put(tx_desc, isert_cmd, ib_dev);
@@ -1661,7 +1664,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
1661 case ISER_IB_RDMA_READ: 1664 case ISER_IB_RDMA_READ:
1662 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); 1665 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1663 1666
1664 atomic_dec(&isert_conn->post_send_buf_count); 1667 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1665 isert_completion_rdma_read(tx_desc, isert_cmd); 1668 isert_completion_rdma_read(tx_desc, isert_cmd);
1666 break; 1669 break;
1667 default: 1670 default:
@@ -1690,31 +1693,76 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
1690} 1693}
1691 1694
1692static void 1695static void
1693isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) 1696isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
1697{
1698 struct llist_node *llnode;
1699 struct isert_rdma_wr *wr;
1700 struct iser_tx_desc *t;
1701
1702 mutex_lock(&isert_conn->conn_mutex);
1703 llnode = llist_del_all(&isert_conn->conn_comp_llist);
1704 isert_conn->conn_comp_batch = 0;
1705 mutex_unlock(&isert_conn->conn_mutex);
1706
1707 while (llnode) {
1708 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1709 llnode = llist_next(llnode);
1710 wr = &t->isert_cmd->rdma_wr;
1711
1712 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1713 isert_completion_put(t, t->isert_cmd, ib_dev);
1714 }
1715}
1716
1717static void
1718isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1694{ 1719{
1695 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1720 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1721 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1722 struct llist_node *llnode = tx_desc->comp_llnode_batch;
1723 struct isert_rdma_wr *wr;
1724 struct iser_tx_desc *t;
1696 1725
1697 if (tx_desc) { 1726 while (llnode) {
1698 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1727 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1728 llnode = llist_next(llnode);
1729 wr = &t->isert_cmd->rdma_wr;
1699 1730
1700 if (!isert_cmd) 1731 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1701 isert_unmap_tx_desc(tx_desc, ib_dev); 1732 isert_completion_put(t, t->isert_cmd, ib_dev);
1702 else
1703 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1704 } 1733 }
1734 tx_desc->comp_llnode_batch = NULL;
1705 1735
1706 if (isert_conn->post_recv_buf_count == 0 && 1736 if (!isert_cmd)
1707 atomic_read(&isert_conn->post_send_buf_count) == 0) { 1737 isert_unmap_tx_desc(tx_desc, ib_dev);
1708 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 1738 else
1709 pr_debug("Calling wake_up from isert_cq_comp_err\n"); 1739 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1740}
1710 1741
1711 mutex_lock(&isert_conn->conn_mutex); 1742static void
1712 if (isert_conn->state != ISER_CONN_DOWN) 1743isert_cq_rx_comp_err(struct isert_conn *isert_conn)
1713 isert_conn->state = ISER_CONN_TERMINATING; 1744{
1714 mutex_unlock(&isert_conn->conn_mutex); 1745 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1746 struct iscsi_conn *conn = isert_conn->conn;
1715 1747
1716 wake_up(&isert_conn->conn_wait_comp_err); 1748 if (isert_conn->post_recv_buf_count)
1749 return;
1750
1751 isert_cq_drain_comp_llist(isert_conn, ib_dev);
1752
1753 if (conn->sess) {
1754 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1755 target_wait_for_sess_cmds(conn->sess->se_sess);
1717 } 1756 }
1757
1758 while (atomic_read(&isert_conn->post_send_buf_count))
1759 msleep(3000);
1760
1761 mutex_lock(&isert_conn->conn_mutex);
1762 isert_conn->state = ISER_CONN_DOWN;
1763 mutex_unlock(&isert_conn->conn_mutex);
1764
1765 complete(&isert_conn->conn_wait_comp_err);
1718} 1766}
1719 1767
1720static void 1768static void
@@ -1739,8 +1787,14 @@ isert_cq_tx_work(struct work_struct *work)
1739 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); 1787 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1740 pr_debug("TX wc.status: 0x%08x\n", wc.status); 1788 pr_debug("TX wc.status: 0x%08x\n", wc.status);
1741 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); 1789 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
1742 atomic_dec(&isert_conn->post_send_buf_count); 1790
1743 isert_cq_comp_err(tx_desc, isert_conn); 1791 if (wc.wr_id != ISER_FASTREG_LI_WRID) {
1792 if (tx_desc->llnode_active)
1793 continue;
1794
1795 atomic_dec(&isert_conn->post_send_buf_count);
1796 isert_cq_tx_comp_err(tx_desc, isert_conn);
1797 }
1744 } 1798 }
1745 } 1799 }
1746 1800
@@ -1783,7 +1837,7 @@ isert_cq_rx_work(struct work_struct *work)
1783 wc.vendor_err); 1837 wc.vendor_err);
1784 } 1838 }
1785 isert_conn->post_recv_buf_count--; 1839 isert_conn->post_recv_buf_count--;
1786 isert_cq_comp_err(NULL, isert_conn); 1840 isert_cq_rx_comp_err(isert_conn);
1787 } 1841 }
1788 } 1842 }
1789 1843
@@ -2201,6 +2255,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2201 2255
2202 if (!fr_desc->valid) { 2256 if (!fr_desc->valid) {
2203 memset(&inv_wr, 0, sizeof(inv_wr)); 2257 memset(&inv_wr, 0, sizeof(inv_wr));
2258 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2204 inv_wr.opcode = IB_WR_LOCAL_INV; 2259 inv_wr.opcode = IB_WR_LOCAL_INV;
2205 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; 2260 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
2206 wr = &inv_wr; 2261 wr = &inv_wr;
@@ -2211,6 +2266,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2211 2266
2212 /* Prepare FASTREG WR */ 2267 /* Prepare FASTREG WR */
2213 memset(&fr_wr, 0, sizeof(fr_wr)); 2268 memset(&fr_wr, 0, sizeof(fr_wr));
2269 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2214 fr_wr.opcode = IB_WR_FAST_REG_MR; 2270 fr_wr.opcode = IB_WR_FAST_REG_MR;
2215 fr_wr.wr.fast_reg.iova_start = 2271 fr_wr.wr.fast_reg.iova_start =
2216 fr_desc->data_frpl->page_list[0] + page_off; 2272 fr_desc->data_frpl->page_list[0] + page_off;
@@ -2376,12 +2432,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2376 isert_init_send_wr(isert_conn, isert_cmd, 2432 isert_init_send_wr(isert_conn, isert_cmd,
2377 &isert_cmd->tx_desc.send_wr, true); 2433 &isert_cmd->tx_desc.send_wr, true);
2378 2434
2379 atomic_inc(&isert_conn->post_send_buf_count); 2435 atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2380 2436
2381 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2437 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2382 if (rc) { 2438 if (rc) {
2383 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2439 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2384 atomic_dec(&isert_conn->post_send_buf_count); 2440 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
2385 } 2441 }
2386 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", 2442 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2387 isert_cmd); 2443 isert_cmd);
@@ -2409,12 +2465,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2409 return rc; 2465 return rc;
2410 } 2466 }
2411 2467
2412 atomic_inc(&isert_conn->post_send_buf_count); 2468 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2413 2469
2414 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2470 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2415 if (rc) { 2471 if (rc) {
2416 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2472 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2417 atomic_dec(&isert_conn->post_send_buf_count); 2473 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2418 } 2474 }
2419 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2475 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2420 isert_cmd); 2476 isert_cmd);
@@ -2701,22 +2757,11 @@ isert_free_np(struct iscsi_np *np)
2701 kfree(isert_np); 2757 kfree(isert_np);
2702} 2758}
2703 2759
2704static int isert_check_state(struct isert_conn *isert_conn, int state) 2760static void isert_wait_conn(struct iscsi_conn *conn)
2705{
2706 int ret;
2707
2708 mutex_lock(&isert_conn->conn_mutex);
2709 ret = (isert_conn->state == state);
2710 mutex_unlock(&isert_conn->conn_mutex);
2711
2712 return ret;
2713}
2714
2715static void isert_free_conn(struct iscsi_conn *conn)
2716{ 2761{
2717 struct isert_conn *isert_conn = conn->context; 2762 struct isert_conn *isert_conn = conn->context;
2718 2763
2719 pr_debug("isert_free_conn: Starting \n"); 2764 pr_debug("isert_wait_conn: Starting \n");
2720 /* 2765 /*
2721 * Decrement post_send_buf_count for special case when called 2766 * Decrement post_send_buf_count for special case when called
2722 * from isert_do_control_comp() -> iscsit_logout_post_handler() 2767 * from isert_do_control_comp() -> iscsit_logout_post_handler()
@@ -2726,38 +2771,29 @@ static void isert_free_conn(struct iscsi_conn *conn)
2726 atomic_dec(&isert_conn->post_send_buf_count); 2771 atomic_dec(&isert_conn->post_send_buf_count);
2727 2772
2728 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { 2773 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
2729 pr_debug("Calling rdma_disconnect from isert_free_conn\n"); 2774 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
2730 rdma_disconnect(isert_conn->conn_cm_id); 2775 rdma_disconnect(isert_conn->conn_cm_id);
2731 } 2776 }
2732 /* 2777 /*
2733 * Only wait for conn_wait_comp_err if the isert_conn made it 2778 * Only wait for conn_wait_comp_err if the isert_conn made it
2734 * into full feature phase.. 2779 * into full feature phase..
2735 */ 2780 */
2736 if (isert_conn->state == ISER_CONN_UP) {
2737 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2738 isert_conn->state);
2739 mutex_unlock(&isert_conn->conn_mutex);
2740
2741 wait_event(isert_conn->conn_wait_comp_err,
2742 (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
2743
2744 wait_event(isert_conn->conn_wait,
2745 (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2746
2747 isert_put_conn(isert_conn);
2748 return;
2749 }
2750 if (isert_conn->state == ISER_CONN_INIT) { 2781 if (isert_conn->state == ISER_CONN_INIT) {
2751 mutex_unlock(&isert_conn->conn_mutex); 2782 mutex_unlock(&isert_conn->conn_mutex);
2752 isert_put_conn(isert_conn);
2753 return; 2783 return;
2754 } 2784 }
2755 pr_debug("isert_free_conn: wait_event conn_wait %d\n", 2785 if (isert_conn->state == ISER_CONN_UP)
2756 isert_conn->state); 2786 isert_conn->state = ISER_CONN_TERMINATING;
2757 mutex_unlock(&isert_conn->conn_mutex); 2787 mutex_unlock(&isert_conn->conn_mutex);
2758 2788
2759 wait_event(isert_conn->conn_wait, 2789 wait_for_completion(&isert_conn->conn_wait_comp_err);
2760 (isert_check_state(isert_conn, ISER_CONN_DOWN))); 2790
2791 wait_for_completion(&isert_conn->conn_wait);
2792}
2793
2794static void isert_free_conn(struct iscsi_conn *conn)
2795{
2796 struct isert_conn *isert_conn = conn->context;
2761 2797
2762 isert_put_conn(isert_conn); 2798 isert_put_conn(isert_conn);
2763} 2799}
@@ -2770,6 +2806,7 @@ static struct iscsit_transport iser_target_transport = {
2770 .iscsit_setup_np = isert_setup_np, 2806 .iscsit_setup_np = isert_setup_np,
2771 .iscsit_accept_np = isert_accept_np, 2807 .iscsit_accept_np = isert_accept_np,
2772 .iscsit_free_np = isert_free_np, 2808 .iscsit_free_np = isert_free_np,
2809 .iscsit_wait_conn = isert_wait_conn,
2773 .iscsit_free_conn = isert_free_conn, 2810 .iscsit_free_conn = isert_free_conn,
2774 .iscsit_get_login_rx = isert_get_login_rx, 2811 .iscsit_get_login_rx = isert_get_login_rx,
2775 .iscsit_put_login_tx = isert_put_login_tx, 2812 .iscsit_put_login_tx = isert_put_login_tx,