aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c133
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c13
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c47
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h8
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c128
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c48
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c3
9 files changed, 214 insertions, 180 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index c639f90cfda4..3edce617c31b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -86,7 +86,6 @@ enum {
86 IPOIB_FLAG_INITIALIZED = 1, 86 IPOIB_FLAG_INITIALIZED = 1,
87 IPOIB_FLAG_ADMIN_UP = 2, 87 IPOIB_FLAG_ADMIN_UP = 2,
88 IPOIB_PKEY_ASSIGNED = 3, 88 IPOIB_PKEY_ASSIGNED = 3,
89 IPOIB_PKEY_STOP = 4,
90 IPOIB_FLAG_SUBINTERFACE = 5, 89 IPOIB_FLAG_SUBINTERFACE = 5,
91 IPOIB_MCAST_RUN = 6, 90 IPOIB_MCAST_RUN = 6,
92 IPOIB_STOP_REAPER = 7, 91 IPOIB_STOP_REAPER = 7,
@@ -312,7 +311,6 @@ struct ipoib_dev_priv {
312 struct list_head multicast_list; 311 struct list_head multicast_list;
313 struct rb_root multicast_tree; 312 struct rb_root multicast_tree;
314 313
315 struct delayed_work pkey_poll_task;
316 struct delayed_work mcast_task; 314 struct delayed_work mcast_task;
317 struct work_struct carrier_on_task; 315 struct work_struct carrier_on_task;
318 struct work_struct flush_light; 316 struct work_struct flush_light;
@@ -473,10 +471,11 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
473void ipoib_pkey_event(struct work_struct *work); 471void ipoib_pkey_event(struct work_struct *work);
474void ipoib_ib_dev_cleanup(struct net_device *dev); 472void ipoib_ib_dev_cleanup(struct net_device *dev);
475 473
476int ipoib_ib_dev_open(struct net_device *dev); 474int ipoib_ib_dev_open(struct net_device *dev, int flush);
477int ipoib_ib_dev_up(struct net_device *dev); 475int ipoib_ib_dev_up(struct net_device *dev);
478int ipoib_ib_dev_down(struct net_device *dev, int flush); 476int ipoib_ib_dev_down(struct net_device *dev, int flush);
479int ipoib_ib_dev_stop(struct net_device *dev, int flush); 477int ipoib_ib_dev_stop(struct net_device *dev, int flush);
478void ipoib_pkey_dev_check_presence(struct net_device *dev);
480 479
481int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); 480int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
482void ipoib_dev_cleanup(struct net_device *dev); 481void ipoib_dev_cleanup(struct net_device *dev);
@@ -532,8 +531,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf);
532 531
533void ipoib_setup(struct net_device *dev); 532void ipoib_setup(struct net_device *dev);
534 533
535void ipoib_pkey_poll(struct work_struct *work); 534void ipoib_pkey_open(struct ipoib_dev_priv *priv);
536int ipoib_pkey_dev_delay_open(struct net_device *dev);
537void ipoib_drain_cq(struct net_device *dev); 535void ipoib_drain_cq(struct net_device *dev);
538 536
539void ipoib_set_ethtool_ops(struct net_device *dev); 537void ipoib_set_ethtool_ops(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 50061854616e..6bd5740e2691 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -281,10 +281,8 @@ void ipoib_delete_debug_files(struct net_device *dev)
281{ 281{
282 struct ipoib_dev_priv *priv = netdev_priv(dev); 282 struct ipoib_dev_priv *priv = netdev_priv(dev);
283 283
284 if (priv->mcg_dentry) 284 debugfs_remove(priv->mcg_dentry);
285 debugfs_remove(priv->mcg_dentry); 285 debugfs_remove(priv->path_dentry);
286 if (priv->path_dentry)
287 debugfs_remove(priv->path_dentry);
288} 286}
289 287
290int ipoib_register_debugfs(void) 288int ipoib_register_debugfs(void)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 6a7003ddb0be..72626c348174 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -664,17 +664,18 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
664 drain_tx_cq((struct net_device *)ctx); 664 drain_tx_cq((struct net_device *)ctx);
665} 665}
666 666
667int ipoib_ib_dev_open(struct net_device *dev) 667int ipoib_ib_dev_open(struct net_device *dev, int flush)
668{ 668{
669 struct ipoib_dev_priv *priv = netdev_priv(dev); 669 struct ipoib_dev_priv *priv = netdev_priv(dev);
670 int ret; 670 int ret;
671 671
672 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) { 672 ipoib_pkey_dev_check_presence(dev);
673 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey); 673
674 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 674 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
675 ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
676 (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
675 return -1; 677 return -1;
676 } 678 }
677 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
678 679
679 ret = ipoib_init_qp(dev); 680 ret = ipoib_init_qp(dev);
680 if (ret) { 681 if (ret) {
@@ -705,16 +706,17 @@ int ipoib_ib_dev_open(struct net_device *dev)
705dev_stop: 706dev_stop:
706 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 707 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
707 napi_enable(&priv->napi); 708 napi_enable(&priv->napi);
708 ipoib_ib_dev_stop(dev, 1); 709 ipoib_ib_dev_stop(dev, flush);
709 return -1; 710 return -1;
710} 711}
711 712
712static void ipoib_pkey_dev_check_presence(struct net_device *dev) 713void ipoib_pkey_dev_check_presence(struct net_device *dev)
713{ 714{
714 struct ipoib_dev_priv *priv = netdev_priv(dev); 715 struct ipoib_dev_priv *priv = netdev_priv(dev);
715 u16 pkey_index = 0;
716 716
717 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) 717 if (!(priv->pkey & 0x7fff) ||
718 ib_find_pkey(priv->ca, priv->port, priv->pkey,
719 &priv->pkey_index))
718 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 720 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
719 else 721 else
720 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 722 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
@@ -745,14 +747,6 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
745 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 747 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
746 netif_carrier_off(dev); 748 netif_carrier_off(dev);
747 749
748 /* Shutdown the P_Key thread if still active */
749 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
750 mutex_lock(&pkey_mutex);
751 set_bit(IPOIB_PKEY_STOP, &priv->flags);
752 cancel_delayed_work_sync(&priv->pkey_poll_task);
753 mutex_unlock(&pkey_mutex);
754 }
755
756 ipoib_mcast_stop_thread(dev, flush); 750 ipoib_mcast_stop_thread(dev, flush);
757 ipoib_mcast_dev_flush(dev); 751 ipoib_mcast_dev_flush(dev);
758 752
@@ -924,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
924 (unsigned long) dev); 918 (unsigned long) dev);
925 919
926 if (dev->flags & IFF_UP) { 920 if (dev->flags & IFF_UP) {
927 if (ipoib_ib_dev_open(dev)) { 921 if (ipoib_ib_dev_open(dev, 1)) {
928 ipoib_transport_dev_cleanup(dev); 922 ipoib_transport_dev_cleanup(dev);
929 return -ENODEV; 923 return -ENODEV;
930 } 924 }
@@ -966,13 +960,27 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
966 960
967 return 1; 961 return 1;
968} 962}
963/*
964 * returns 0 if pkey value was found in a different slot.
965 */
966static inline int update_child_pkey(struct ipoib_dev_priv *priv)
967{
968 u16 old_index = priv->pkey_index;
969
970 priv->pkey_index = 0;
971 ipoib_pkey_dev_check_presence(priv->dev);
972
973 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
974 (old_index == priv->pkey_index))
975 return 1;
976 return 0;
977}
969 978
970static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, 979static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
971 enum ipoib_flush_level level) 980 enum ipoib_flush_level level)
972{ 981{
973 struct ipoib_dev_priv *cpriv; 982 struct ipoib_dev_priv *cpriv;
974 struct net_device *dev = priv->dev; 983 struct net_device *dev = priv->dev;
975 u16 new_index;
976 int result; 984 int result;
977 985
978 down_read(&priv->vlan_rwsem); 986 down_read(&priv->vlan_rwsem);
@@ -986,16 +994,20 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
986 994
987 up_read(&priv->vlan_rwsem); 995 up_read(&priv->vlan_rwsem);
988 996
989 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { 997 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
990 /* for non-child devices must check/update the pkey value here */ 998 level != IPOIB_FLUSH_HEAVY) {
991 if (level == IPOIB_FLUSH_HEAVY &&
992 !test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
993 update_parent_pkey(priv);
994 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); 999 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
995 return; 1000 return;
996 } 1001 }
997 1002
998 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { 1003 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1004 /* interface is down. update pkey and leave. */
1005 if (level == IPOIB_FLUSH_HEAVY) {
1006 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1007 update_parent_pkey(priv);
1008 else
1009 update_child_pkey(priv);
1010 }
999 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); 1011 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1000 return; 1012 return;
1001 } 1013 }
@@ -1005,20 +1017,13 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1005 * (parent) devices should always takes what present in pkey index 0 1017 * (parent) devices should always takes what present in pkey index 0
1006 */ 1018 */
1007 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 1019 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1008 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { 1020 result = update_child_pkey(priv);
1009 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 1021 if (result) {
1010 ipoib_ib_dev_down(dev, 0); 1022 /* restart QP only if P_Key index is changed */
1011 ipoib_ib_dev_stop(dev, 0);
1012 if (ipoib_pkey_dev_delay_open(dev))
1013 return;
1014 }
1015 /* restart QP only if P_Key index is changed */
1016 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
1017 new_index == priv->pkey_index) {
1018 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); 1023 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
1019 return; 1024 return;
1020 } 1025 }
1021 priv->pkey_index = new_index; 1026
1022 } else { 1027 } else {
1023 result = update_parent_pkey(priv); 1028 result = update_parent_pkey(priv);
1024 /* restart QP only if P_Key value changed */ 1029 /* restart QP only if P_Key value changed */
@@ -1038,8 +1043,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1038 ipoib_ib_dev_down(dev, 0); 1043 ipoib_ib_dev_down(dev, 0);
1039 1044
1040 if (level == IPOIB_FLUSH_HEAVY) { 1045 if (level == IPOIB_FLUSH_HEAVY) {
1041 ipoib_ib_dev_stop(dev, 0); 1046 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1042 ipoib_ib_dev_open(dev); 1047 ipoib_ib_dev_stop(dev, 0);
1048 if (ipoib_ib_dev_open(dev, 0) != 0)
1049 return;
1050 if (netif_queue_stopped(dev))
1051 netif_start_queue(dev);
1043 } 1052 }
1044 1053
1045 /* 1054 /*
@@ -1094,54 +1103,4 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
1094 ipoib_transport_dev_cleanup(dev); 1103 ipoib_transport_dev_cleanup(dev);
1095} 1104}
1096 1105
1097/*
1098 * Delayed P_Key Assigment Interim Support
1099 *
1100 * The following is initial implementation of delayed P_Key assigment
1101 * mechanism. It is using the same approach implemented for the multicast
1102 * group join. The single goal of this implementation is to quickly address
1103 * Bug #2507. This implementation will probably be removed when the P_Key
1104 * change async notification is available.
1105 */
1106
1107void ipoib_pkey_poll(struct work_struct *work)
1108{
1109 struct ipoib_dev_priv *priv =
1110 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
1111 struct net_device *dev = priv->dev;
1112
1113 ipoib_pkey_dev_check_presence(dev);
1114
1115 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
1116 ipoib_open(dev);
1117 else {
1118 mutex_lock(&pkey_mutex);
1119 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
1120 queue_delayed_work(ipoib_workqueue,
1121 &priv->pkey_poll_task,
1122 HZ);
1123 mutex_unlock(&pkey_mutex);
1124 }
1125}
1126
1127int ipoib_pkey_dev_delay_open(struct net_device *dev)
1128{
1129 struct ipoib_dev_priv *priv = netdev_priv(dev);
1130
1131 /* Look for the interface pkey value in the IB Port P_Key table and */
1132 /* set the interface pkey assigment flag */
1133 ipoib_pkey_dev_check_presence(dev);
1134 1106
1135 /* P_Key value not assigned yet - start polling */
1136 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
1137 mutex_lock(&pkey_mutex);
1138 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
1139 queue_delayed_work(ipoib_workqueue,
1140 &priv->pkey_poll_task,
1141 HZ);
1142 mutex_unlock(&pkey_mutex);
1143 return 1;
1144 }
1145
1146 return 0;
1147}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 5786a78ff8bc..1310acf6bf92 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,11 +108,11 @@ int ipoib_open(struct net_device *dev)
108 108
109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
110 110
111 if (ipoib_pkey_dev_delay_open(dev)) 111 if (ipoib_ib_dev_open(dev, 1)) {
112 return 0; 112 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
113 113 return 0;
114 if (ipoib_ib_dev_open(dev))
115 goto err_disable; 114 goto err_disable;
115 }
116 116
117 if (ipoib_ib_dev_up(dev)) 117 if (ipoib_ib_dev_up(dev))
118 goto err_stop; 118 goto err_stop;
@@ -1379,7 +1379,6 @@ void ipoib_setup(struct net_device *dev)
1379 INIT_LIST_HEAD(&priv->dead_ahs); 1379 INIT_LIST_HEAD(&priv->dead_ahs);
1380 INIT_LIST_HEAD(&priv->multicast_list); 1380 INIT_LIST_HEAD(&priv->multicast_list);
1381 1381
1382 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
1383 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 1382 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
1384 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 1383 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1385 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 1384 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
@@ -1394,8 +1393,8 @@ struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1394{ 1393{
1395 struct net_device *dev; 1394 struct net_device *dev;
1396 1395
1397 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name, 1396 dev = alloc_netdev((int)sizeof(struct ipoib_dev_priv), name,
1398 ipoib_setup); 1397 NET_NAME_UNKNOWN, ipoib_setup);
1399 if (!dev) 1398 if (!dev)
1400 return NULL; 1399 return NULL;
1401 1400
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index eb7973957a6e..61ee91d88380 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -596,20 +596,28 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
596 struct iser_conn *ib_conn; 596 struct iser_conn *ib_conn;
597 struct iscsi_endpoint *ep; 597 struct iscsi_endpoint *ep;
598 598
599 ep = iscsi_create_endpoint(sizeof(*ib_conn)); 599 ep = iscsi_create_endpoint(0);
600 if (!ep) 600 if (!ep)
601 return ERR_PTR(-ENOMEM); 601 return ERR_PTR(-ENOMEM);
602 602
603 ib_conn = ep->dd_data; 603 ib_conn = kzalloc(sizeof(*ib_conn), GFP_KERNEL);
604 if (!ib_conn) {
605 err = -ENOMEM;
606 goto failure;
607 }
608
609 ep->dd_data = ib_conn;
604 ib_conn->ep = ep; 610 ib_conn->ep = ep;
605 iser_conn_init(ib_conn); 611 iser_conn_init(ib_conn);
606 612
607 err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, 613 err = iser_connect(ib_conn, NULL, dst_addr, non_blocking);
608 non_blocking);
609 if (err) 614 if (err)
610 return ERR_PTR(err); 615 goto failure;
611 616
612 return ep; 617 return ep;
618failure:
619 iscsi_destroy_endpoint(ep);
620 return ERR_PTR(err);
613} 621}
614 622
615static int 623static int
@@ -619,15 +627,16 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
619 int rc; 627 int rc;
620 628
621 ib_conn = ep->dd_data; 629 ib_conn = ep->dd_data;
622 rc = wait_event_interruptible_timeout(ib_conn->wait, 630 rc = wait_for_completion_interruptible_timeout(&ib_conn->up_completion,
623 ib_conn->state == ISER_CONN_UP, 631 msecs_to_jiffies(timeout_ms));
624 msecs_to_jiffies(timeout_ms));
625
626 /* if conn establishment failed, return error code to iscsi */ 632 /* if conn establishment failed, return error code to iscsi */
627 if (!rc && 633 if (rc == 0) {
628 (ib_conn->state == ISER_CONN_TERMINATING || 634 mutex_lock(&ib_conn->state_mutex);
629 ib_conn->state == ISER_CONN_DOWN)) 635 if (ib_conn->state == ISER_CONN_TERMINATING ||
630 rc = -1; 636 ib_conn->state == ISER_CONN_DOWN)
637 rc = -1;
638 mutex_unlock(&ib_conn->state_mutex);
639 }
631 640
632 iser_info("ib conn %p rc = %d\n", ib_conn, rc); 641 iser_info("ib conn %p rc = %d\n", ib_conn, rc);
633 642
@@ -646,19 +655,25 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
646 655
647 ib_conn = ep->dd_data; 656 ib_conn = ep->dd_data;
648 iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state); 657 iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state);
658 mutex_lock(&ib_conn->state_mutex);
649 iser_conn_terminate(ib_conn); 659 iser_conn_terminate(ib_conn);
650 660
651 /* 661 /*
652 * if iser_conn and iscsi_conn are bound, we must wait iscsi_conn_stop 662 * if iser_conn and iscsi_conn are bound, we must wait for
653 * call and ISER_CONN_DOWN state before freeing the iser resources. 663 * iscsi_conn_stop and flush errors completion before freeing
654 * otherwise we are safe to free resources immediately. 664 * the iser resources. Otherwise we are safe to free resources
665 * immediately.
655 */ 666 */
656 if (ib_conn->iscsi_conn) { 667 if (ib_conn->iscsi_conn) {
657 INIT_WORK(&ib_conn->release_work, iser_release_work); 668 INIT_WORK(&ib_conn->release_work, iser_release_work);
658 queue_work(release_wq, &ib_conn->release_work); 669 queue_work(release_wq, &ib_conn->release_work);
670 mutex_unlock(&ib_conn->state_mutex);
659 } else { 671 } else {
672 ib_conn->state = ISER_CONN_DOWN;
673 mutex_unlock(&ib_conn->state_mutex);
660 iser_conn_release(ib_conn); 674 iser_conn_release(ib_conn);
661 } 675 }
676 iscsi_destroy_endpoint(ep);
662} 677}
663 678
664static umode_t iser_attr_is_visible(int param_type, int param) 679static umode_t iser_attr_is_visible(int param_type, int param)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 97cd385bf7f7..c877dad381cb 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -326,7 +326,6 @@ struct iser_conn {
326 struct iser_device *device; /* device context */ 326 struct iser_device *device; /* device context */
327 struct rdma_cm_id *cma_id; /* CMA ID */ 327 struct rdma_cm_id *cma_id; /* CMA ID */
328 struct ib_qp *qp; /* QP */ 328 struct ib_qp *qp; /* QP */
329 wait_queue_head_t wait; /* waitq for conn/disconn */
330 unsigned qp_max_recv_dtos; /* num of rx buffers */ 329 unsigned qp_max_recv_dtos; /* num of rx buffers */
331 unsigned qp_max_recv_dtos_mask; /* above minus 1 */ 330 unsigned qp_max_recv_dtos_mask; /* above minus 1 */
332 unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */ 331 unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */
@@ -335,6 +334,9 @@ struct iser_conn {
335 char name[ISER_OBJECT_NAME_SIZE]; 334 char name[ISER_OBJECT_NAME_SIZE];
336 struct work_struct release_work; 335 struct work_struct release_work;
337 struct completion stop_completion; 336 struct completion stop_completion;
337 struct mutex state_mutex;
338 struct completion flush_completion;
339 struct completion up_completion;
338 struct list_head conn_list; /* entry in ig conn list */ 340 struct list_head conn_list; /* entry in ig conn list */
339 341
340 char *login_buf; 342 char *login_buf;
@@ -448,8 +450,8 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task,
448 enum iser_data_dir cmd_dir); 450 enum iser_data_dir cmd_dir);
449 451
450int iser_connect(struct iser_conn *ib_conn, 452int iser_connect(struct iser_conn *ib_conn,
451 struct sockaddr_in *src_addr, 453 struct sockaddr *src_addr,
452 struct sockaddr_in *dst_addr, 454 struct sockaddr *dst_addr,
453 int non_blocking); 455 int non_blocking);
454 456
455int iser_reg_page_vec(struct iser_conn *ib_conn, 457int iser_reg_page_vec(struct iser_conn *ib_conn,
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index ea01075f9f9b..3ef167f97d6f 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -491,10 +491,9 @@ out_err:
491} 491}
492 492
493/** 493/**
494 * releases the QP objects, returns 0 on success, 494 * releases the QP object
495 * -1 on failure
496 */ 495 */
497static int iser_free_ib_conn_res(struct iser_conn *ib_conn) 496static void iser_free_ib_conn_res(struct iser_conn *ib_conn)
498{ 497{
499 int cq_index; 498 int cq_index;
500 BUG_ON(ib_conn == NULL); 499 BUG_ON(ib_conn == NULL);
@@ -513,8 +512,6 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
513 } 512 }
514 513
515 ib_conn->qp = NULL; 514 ib_conn->qp = NULL;
516
517 return 0;
518} 515}
519 516
520/** 517/**
@@ -568,31 +565,40 @@ static void iser_device_try_release(struct iser_device *device)
568 mutex_unlock(&ig.device_list_mutex); 565 mutex_unlock(&ig.device_list_mutex);
569} 566}
570 567
568/**
569 * Called with state mutex held
570 **/
571static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, 571static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
572 enum iser_ib_conn_state comp, 572 enum iser_ib_conn_state comp,
573 enum iser_ib_conn_state exch) 573 enum iser_ib_conn_state exch)
574{ 574{
575 int ret; 575 int ret;
576 576
577 spin_lock_bh(&ib_conn->lock);
578 if ((ret = (ib_conn->state == comp))) 577 if ((ret = (ib_conn->state == comp)))
579 ib_conn->state = exch; 578 ib_conn->state = exch;
580 spin_unlock_bh(&ib_conn->lock);
581 return ret; 579 return ret;
582} 580}
583 581
584void iser_release_work(struct work_struct *work) 582void iser_release_work(struct work_struct *work)
585{ 583{
586 struct iser_conn *ib_conn; 584 struct iser_conn *ib_conn;
585 int rc;
587 586
588 ib_conn = container_of(work, struct iser_conn, release_work); 587 ib_conn = container_of(work, struct iser_conn, release_work);
589 588
590 /* wait for .conn_stop callback */ 589 /* wait for .conn_stop callback */
591 wait_for_completion(&ib_conn->stop_completion); 590 rc = wait_for_completion_timeout(&ib_conn->stop_completion, 30 * HZ);
591 WARN_ON(rc == 0);
592 592
593 /* wait for the qp`s post send and post receive buffers to empty */ 593 /* wait for the qp`s post send and post receive buffers to empty */
594 wait_event_interruptible(ib_conn->wait, 594 rc = wait_for_completion_timeout(&ib_conn->flush_completion, 30 * HZ);
595 ib_conn->state == ISER_CONN_DOWN); 595 WARN_ON(rc == 0);
596
597 ib_conn->state = ISER_CONN_DOWN;
598
599 mutex_lock(&ib_conn->state_mutex);
600 ib_conn->state = ISER_CONN_DOWN;
601 mutex_unlock(&ib_conn->state_mutex);
596 602
597 iser_conn_release(ib_conn); 603 iser_conn_release(ib_conn);
598} 604}
@@ -604,23 +610,27 @@ void iser_conn_release(struct iser_conn *ib_conn)
604{ 610{
605 struct iser_device *device = ib_conn->device; 611 struct iser_device *device = ib_conn->device;
606 612
607 BUG_ON(ib_conn->state == ISER_CONN_UP);
608
609 mutex_lock(&ig.connlist_mutex); 613 mutex_lock(&ig.connlist_mutex);
610 list_del(&ib_conn->conn_list); 614 list_del(&ib_conn->conn_list);
611 mutex_unlock(&ig.connlist_mutex); 615 mutex_unlock(&ig.connlist_mutex);
616
617 mutex_lock(&ib_conn->state_mutex);
618 BUG_ON(ib_conn->state != ISER_CONN_DOWN);
619
612 iser_free_rx_descriptors(ib_conn); 620 iser_free_rx_descriptors(ib_conn);
613 iser_free_ib_conn_res(ib_conn); 621 iser_free_ib_conn_res(ib_conn);
614 ib_conn->device = NULL; 622 ib_conn->device = NULL;
615 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 623 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
616 if (device != NULL) 624 if (device != NULL)
617 iser_device_try_release(device); 625 iser_device_try_release(device);
626 mutex_unlock(&ib_conn->state_mutex);
627
618 /* if cma handler context, the caller actually destroy the id */ 628 /* if cma handler context, the caller actually destroy the id */
619 if (ib_conn->cma_id != NULL) { 629 if (ib_conn->cma_id != NULL) {
620 rdma_destroy_id(ib_conn->cma_id); 630 rdma_destroy_id(ib_conn->cma_id);
621 ib_conn->cma_id = NULL; 631 ib_conn->cma_id = NULL;
622 } 632 }
623 iscsi_destroy_endpoint(ib_conn->ep); 633 kfree(ib_conn);
624} 634}
625 635
626/** 636/**
@@ -642,22 +652,31 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
642 ib_conn,err); 652 ib_conn,err);
643} 653}
644 654
655/**
656 * Called with state mutex held
657 **/
645static void iser_connect_error(struct rdma_cm_id *cma_id) 658static void iser_connect_error(struct rdma_cm_id *cma_id)
646{ 659{
647 struct iser_conn *ib_conn; 660 struct iser_conn *ib_conn;
648 661
649 ib_conn = (struct iser_conn *)cma_id->context; 662 ib_conn = (struct iser_conn *)cma_id->context;
650
651 ib_conn->state = ISER_CONN_DOWN; 663 ib_conn->state = ISER_CONN_DOWN;
652 wake_up_interruptible(&ib_conn->wait);
653} 664}
654 665
666/**
667 * Called with state mutex held
668 **/
655static void iser_addr_handler(struct rdma_cm_id *cma_id) 669static void iser_addr_handler(struct rdma_cm_id *cma_id)
656{ 670{
657 struct iser_device *device; 671 struct iser_device *device;
658 struct iser_conn *ib_conn; 672 struct iser_conn *ib_conn;
659 int ret; 673 int ret;
660 674
675 ib_conn = (struct iser_conn *)cma_id->context;
676 if (ib_conn->state != ISER_CONN_PENDING)
677 /* bailout */
678 return;
679
661 device = iser_device_find_by_ib_device(cma_id); 680 device = iser_device_find_by_ib_device(cma_id);
662 if (!device) { 681 if (!device) {
663 iser_err("device lookup/creation failed\n"); 682 iser_err("device lookup/creation failed\n");
@@ -665,7 +684,6 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
665 return; 684 return;
666 } 685 }
667 686
668 ib_conn = (struct iser_conn *)cma_id->context;
669 ib_conn->device = device; 687 ib_conn->device = device;
670 688
671 /* connection T10-PI support */ 689 /* connection T10-PI support */
@@ -689,18 +707,27 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
689 } 707 }
690} 708}
691 709
710/**
711 * Called with state mutex held
712 **/
692static void iser_route_handler(struct rdma_cm_id *cma_id) 713static void iser_route_handler(struct rdma_cm_id *cma_id)
693{ 714{
694 struct rdma_conn_param conn_param; 715 struct rdma_conn_param conn_param;
695 int ret; 716 int ret;
696 struct iser_cm_hdr req_hdr; 717 struct iser_cm_hdr req_hdr;
718 struct iser_conn *ib_conn = (struct iser_conn *)cma_id->context;
719 struct iser_device *device = ib_conn->device;
720
721 if (ib_conn->state != ISER_CONN_PENDING)
722 /* bailout */
723 return;
697 724
698 ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); 725 ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
699 if (ret) 726 if (ret)
700 goto failure; 727 goto failure;
701 728
702 memset(&conn_param, 0, sizeof conn_param); 729 memset(&conn_param, 0, sizeof conn_param);
703 conn_param.responder_resources = 4; 730 conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
704 conn_param.initiator_depth = 1; 731 conn_param.initiator_depth = 1;
705 conn_param.retry_count = 7; 732 conn_param.retry_count = 7;
706 conn_param.rnr_retry_count = 6; 733 conn_param.rnr_retry_count = 6;
@@ -728,12 +755,16 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id)
728 struct ib_qp_attr attr; 755 struct ib_qp_attr attr;
729 struct ib_qp_init_attr init_attr; 756 struct ib_qp_init_attr init_attr;
730 757
758 ib_conn = (struct iser_conn *)cma_id->context;
759 if (ib_conn->state != ISER_CONN_PENDING)
760 /* bailout */
761 return;
762
731 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); 763 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
732 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); 764 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
733 765
734 ib_conn = (struct iser_conn *)cma_id->context; 766 ib_conn->state = ISER_CONN_UP;
735 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_PENDING, ISER_CONN_UP)) 767 complete(&ib_conn->up_completion);
736 wake_up_interruptible(&ib_conn->wait);
737} 768}
738 769
739static void iser_disconnected_handler(struct rdma_cm_id *cma_id) 770static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
@@ -752,19 +783,25 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
752 iser_err("iscsi_iser connection isn't bound\n"); 783 iser_err("iscsi_iser connection isn't bound\n");
753 } 784 }
754 785
755 /* Complete the termination process if no posts are pending */ 786 /* Complete the termination process if no posts are pending. This code
787 * block also exists in iser_handle_comp_error(), but it is needed here
788 * for cases of no flushes at all, e.g. discovery over rdma.
789 */
756 if (ib_conn->post_recv_buf_count == 0 && 790 if (ib_conn->post_recv_buf_count == 0 &&
757 (atomic_read(&ib_conn->post_send_buf_count) == 0)) { 791 (atomic_read(&ib_conn->post_send_buf_count) == 0)) {
758 ib_conn->state = ISER_CONN_DOWN; 792 complete(&ib_conn->flush_completion);
759 wake_up_interruptible(&ib_conn->wait);
760 } 793 }
761} 794}
762 795
763static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 796static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
764{ 797{
798 struct iser_conn *ib_conn;
799
800 ib_conn = (struct iser_conn *)cma_id->context;
765 iser_info("event %d status %d conn %p id %p\n", 801 iser_info("event %d status %d conn %p id %p\n",
766 event->event, event->status, cma_id->context, cma_id); 802 event->event, event->status, cma_id->context, cma_id);
767 803
804 mutex_lock(&ib_conn->state_mutex);
768 switch (event->event) { 805 switch (event->event) {
769 case RDMA_CM_EVENT_ADDR_RESOLVED: 806 case RDMA_CM_EVENT_ADDR_RESOLVED:
770 iser_addr_handler(cma_id); 807 iser_addr_handler(cma_id);
@@ -785,24 +822,28 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
785 case RDMA_CM_EVENT_DISCONNECTED: 822 case RDMA_CM_EVENT_DISCONNECTED:
786 case RDMA_CM_EVENT_DEVICE_REMOVAL: 823 case RDMA_CM_EVENT_DEVICE_REMOVAL:
787 case RDMA_CM_EVENT_ADDR_CHANGE: 824 case RDMA_CM_EVENT_ADDR_CHANGE:
825 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
788 iser_disconnected_handler(cma_id); 826 iser_disconnected_handler(cma_id);
789 break; 827 break;
790 default: 828 default:
791 iser_err("Unexpected RDMA CM event (%d)\n", event->event); 829 iser_err("Unexpected RDMA CM event (%d)\n", event->event);
792 break; 830 break;
793 } 831 }
832 mutex_unlock(&ib_conn->state_mutex);
794 return 0; 833 return 0;
795} 834}
796 835
797void iser_conn_init(struct iser_conn *ib_conn) 836void iser_conn_init(struct iser_conn *ib_conn)
798{ 837{
799 ib_conn->state = ISER_CONN_INIT; 838 ib_conn->state = ISER_CONN_INIT;
800 init_waitqueue_head(&ib_conn->wait);
801 ib_conn->post_recv_buf_count = 0; 839 ib_conn->post_recv_buf_count = 0;
802 atomic_set(&ib_conn->post_send_buf_count, 0); 840 atomic_set(&ib_conn->post_send_buf_count, 0);
803 init_completion(&ib_conn->stop_completion); 841 init_completion(&ib_conn->stop_completion);
842 init_completion(&ib_conn->flush_completion);
843 init_completion(&ib_conn->up_completion);
804 INIT_LIST_HEAD(&ib_conn->conn_list); 844 INIT_LIST_HEAD(&ib_conn->conn_list);
805 spin_lock_init(&ib_conn->lock); 845 spin_lock_init(&ib_conn->lock);
846 mutex_init(&ib_conn->state_mutex);
806} 847}
807 848
808 /** 849 /**
@@ -810,22 +851,21 @@ void iser_conn_init(struct iser_conn *ib_conn)
810 * sleeps until the connection is established or rejected 851 * sleeps until the connection is established or rejected
811 */ 852 */
812int iser_connect(struct iser_conn *ib_conn, 853int iser_connect(struct iser_conn *ib_conn,
813 struct sockaddr_in *src_addr, 854 struct sockaddr *src_addr,
814 struct sockaddr_in *dst_addr, 855 struct sockaddr *dst_addr,
815 int non_blocking) 856 int non_blocking)
816{ 857{
817 struct sockaddr *src, *dst;
818 int err = 0; 858 int err = 0;
819 859
820 sprintf(ib_conn->name, "%pI4:%d", 860 mutex_lock(&ib_conn->state_mutex);
821 &dst_addr->sin_addr.s_addr, dst_addr->sin_port); 861
862 sprintf(ib_conn->name, "%pISp", dst_addr);
863
864 iser_info("connecting to: %s\n", ib_conn->name);
822 865
823 /* the device is known only --after-- address resolution */ 866 /* the device is known only --after-- address resolution */
824 ib_conn->device = NULL; 867 ib_conn->device = NULL;
825 868
826 iser_info("connecting to: %pI4, port 0x%x\n",
827 &dst_addr->sin_addr, dst_addr->sin_port);
828
829 ib_conn->state = ISER_CONN_PENDING; 869 ib_conn->state = ISER_CONN_PENDING;
830 870
831 ib_conn->cma_id = rdma_create_id(iser_cma_handler, 871 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
@@ -837,23 +877,21 @@ int iser_connect(struct iser_conn *ib_conn,
837 goto id_failure; 877 goto id_failure;
838 } 878 }
839 879
840 src = (struct sockaddr *)src_addr; 880 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
841 dst = (struct sockaddr *)dst_addr;
842 err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000);
843 if (err) { 881 if (err) {
844 iser_err("rdma_resolve_addr failed: %d\n", err); 882 iser_err("rdma_resolve_addr failed: %d\n", err);
845 goto addr_failure; 883 goto addr_failure;
846 } 884 }
847 885
848 if (!non_blocking) { 886 if (!non_blocking) {
849 wait_event_interruptible(ib_conn->wait, 887 wait_for_completion_interruptible(&ib_conn->up_completion);
850 (ib_conn->state != ISER_CONN_PENDING));
851 888
852 if (ib_conn->state != ISER_CONN_UP) { 889 if (ib_conn->state != ISER_CONN_UP) {
853 err = -EIO; 890 err = -EIO;
854 goto connect_failure; 891 goto connect_failure;
855 } 892 }
856 } 893 }
894 mutex_unlock(&ib_conn->state_mutex);
857 895
858 mutex_lock(&ig.connlist_mutex); 896 mutex_lock(&ig.connlist_mutex);
859 list_add(&ib_conn->conn_list, &ig.connlist); 897 list_add(&ib_conn->conn_list, &ig.connlist);
@@ -865,6 +903,7 @@ id_failure:
865addr_failure: 903addr_failure:
866 ib_conn->state = ISER_CONN_DOWN; 904 ib_conn->state = ISER_CONN_DOWN;
867connect_failure: 905connect_failure:
906 mutex_unlock(&ib_conn->state_mutex);
868 iser_conn_release(ib_conn); 907 iser_conn_release(ib_conn);
869 return err; 908 return err;
870} 909}
@@ -1049,18 +1088,19 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
1049 1088
1050 if (ib_conn->post_recv_buf_count == 0 && 1089 if (ib_conn->post_recv_buf_count == 0 &&
1051 atomic_read(&ib_conn->post_send_buf_count) == 0) { 1090 atomic_read(&ib_conn->post_send_buf_count) == 0) {
1052 /* getting here when the state is UP means that the conn is * 1091 /**
1053 * being terminated asynchronously from the iSCSI layer's * 1092 * getting here when the state is UP means that the conn is
1054 * perspective. */ 1093 * being terminated asynchronously from the iSCSI layer's
1055 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, 1094 * perspective. It is safe to peek at the connection state
1056 ISER_CONN_TERMINATING)) 1095 * since iscsi_conn_failure is allowed to be called twice.
1096 **/
1097 if (ib_conn->state == ISER_CONN_UP)
1057 iscsi_conn_failure(ib_conn->iscsi_conn, 1098 iscsi_conn_failure(ib_conn->iscsi_conn,
1058 ISCSI_ERR_CONN_FAILED); 1099 ISCSI_ERR_CONN_FAILED);
1059 1100
1060 /* no more non completed posts to the QP, complete the 1101 /* no more non completed posts to the QP, complete the
1061 * termination process w.o worrying on disconnect event */ 1102 * termination process w.o worrying on disconnect event */
1062 ib_conn->state = ISER_CONN_DOWN; 1103 complete(&ib_conn->flush_completion);
1063 wake_up_interruptible(&ib_conn->wait);
1064 } 1104 }
1065} 1105}
1066 1106
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index e3c2c5b4297f..62d2a18e1b41 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -130,6 +130,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
130static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 130static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
131 131
132static struct scsi_transport_template *ib_srp_transport_template; 132static struct scsi_transport_template *ib_srp_transport_template;
133static struct workqueue_struct *srp_remove_wq;
133 134
134static struct ib_client srp_client = { 135static struct ib_client srp_client = {
135 .name = "srp", 136 .name = "srp",
@@ -731,7 +732,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
731 spin_unlock_irq(&target->lock); 732 spin_unlock_irq(&target->lock);
732 733
733 if (changed) 734 if (changed)
734 queue_work(system_long_wq, &target->remove_work); 735 queue_work(srp_remove_wq, &target->remove_work);
735 736
736 return changed; 737 return changed;
737} 738}
@@ -1643,10 +1644,14 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
1643 SCSI_SENSE_BUFFERSIZE)); 1644 SCSI_SENSE_BUFFERSIZE));
1644 } 1645 }
1645 1646
1646 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) 1647 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1647 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1648 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
1649 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 1648 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1649 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1650 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1651 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1652 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1653 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1654 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1650 1655
1651 srp_free_req(target, req, scmnd, 1656 srp_free_req(target, req, scmnd,
1652 be32_to_cpu(rsp->req_lim_delta)); 1657 be32_to_cpu(rsp->req_lim_delta));
@@ -3261,9 +3266,10 @@ static void srp_remove_one(struct ib_device *device)
3261 spin_unlock(&host->target_lock); 3266 spin_unlock(&host->target_lock);
3262 3267
3263 /* 3268 /*
3264 * Wait for target port removal tasks. 3269 * Wait for tl_err and target port removal tasks.
3265 */ 3270 */
3266 flush_workqueue(system_long_wq); 3271 flush_workqueue(system_long_wq);
3272 flush_workqueue(srp_remove_wq);
3267 3273
3268 kfree(host); 3274 kfree(host);
3269 } 3275 }
@@ -3313,16 +3319,22 @@ static int __init srp_init_module(void)
3313 indirect_sg_entries = cmd_sg_entries; 3319 indirect_sg_entries = cmd_sg_entries;
3314 } 3320 }
3315 3321
3322 srp_remove_wq = create_workqueue("srp_remove");
3323 if (!srp_remove_wq) {
3324 ret = -ENOMEM;
3325 goto out;
3326 }
3327
3328 ret = -ENOMEM;
3316 ib_srp_transport_template = 3329 ib_srp_transport_template =
3317 srp_attach_transport(&ib_srp_transport_functions); 3330 srp_attach_transport(&ib_srp_transport_functions);
3318 if (!ib_srp_transport_template) 3331 if (!ib_srp_transport_template)
3319 return -ENOMEM; 3332 goto destroy_wq;
3320 3333
3321 ret = class_register(&srp_class); 3334 ret = class_register(&srp_class);
3322 if (ret) { 3335 if (ret) {
3323 pr_err("couldn't register class infiniband_srp\n"); 3336 pr_err("couldn't register class infiniband_srp\n");
3324 srp_release_transport(ib_srp_transport_template); 3337 goto release_tr;
3325 return ret;
3326 } 3338 }
3327 3339
3328 ib_sa_register_client(&srp_sa_client); 3340 ib_sa_register_client(&srp_sa_client);
@@ -3330,13 +3342,22 @@ static int __init srp_init_module(void)
3330 ret = ib_register_client(&srp_client); 3342 ret = ib_register_client(&srp_client);
3331 if (ret) { 3343 if (ret) {
3332 pr_err("couldn't register IB client\n"); 3344 pr_err("couldn't register IB client\n");
3333 srp_release_transport(ib_srp_transport_template); 3345 goto unreg_sa;
3334 ib_sa_unregister_client(&srp_sa_client);
3335 class_unregister(&srp_class);
3336 return ret;
3337 } 3346 }
3338 3347
3339 return 0; 3348out:
3349 return ret;
3350
3351unreg_sa:
3352 ib_sa_unregister_client(&srp_sa_client);
3353 class_unregister(&srp_class);
3354
3355release_tr:
3356 srp_release_transport(ib_srp_transport_template);
3357
3358destroy_wq:
3359 destroy_workqueue(srp_remove_wq);
3360 goto out;
3340} 3361}
3341 3362
3342static void __exit srp_cleanup_module(void) 3363static void __exit srp_cleanup_module(void)
@@ -3345,6 +3366,7 @@ static void __exit srp_cleanup_module(void)
3345 ib_sa_unregister_client(&srp_sa_client); 3366 ib_sa_unregister_client(&srp_sa_client);
3346 class_unregister(&srp_class); 3367 class_unregister(&srp_class);
3347 srp_release_transport(ib_srp_transport_template); 3368 srp_release_transport(ib_srp_transport_template);
3369 destroy_workqueue(srp_remove_wq);
3348} 3370}
3349 3371
3350module_init(srp_init_module); 3372module_init(srp_init_module);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index fe09f2788b15..d28a8c284da9 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -198,6 +198,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
198 case IB_EVENT_PKEY_CHANGE: 198 case IB_EVENT_PKEY_CHANGE:
199 case IB_EVENT_SM_CHANGE: 199 case IB_EVENT_SM_CHANGE:
200 case IB_EVENT_CLIENT_REREGISTER: 200 case IB_EVENT_CLIENT_REREGISTER:
201 case IB_EVENT_GID_CHANGE:
201 /* Refresh port data asynchronously. */ 202 /* Refresh port data asynchronously. */
202 if (event->element.port_num <= sdev->device->phys_port_cnt) { 203 if (event->element.port_num <= sdev->device->phys_port_cnt) {
203 sport = &sdev->port[event->element.port_num - 1]; 204 sport = &sdev->port[event->element.port_num - 1];
@@ -563,7 +564,7 @@ static int srpt_refresh_port(struct srpt_port *sport)
563 &reg_req, 0, 564 &reg_req, 0,
564 srpt_mad_send_handler, 565 srpt_mad_send_handler,
565 srpt_mad_recv_handler, 566 srpt_mad_recv_handler,
566 sport); 567 sport, 0);
567 if (IS_ERR(sport->mad_agent)) { 568 if (IS_ERR(sport->mad_agent)) {
568 ret = PTR_ERR(sport->mad_agent); 569 ret = PTR_ERR(sport->mad_agent);
569 sport->mad_agent = NULL; 570 sport->mad_agent = NULL;