aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorOr Gerlitz <ogerlitz@voltaire.com>2008-07-15 02:48:53 -0400
committerRoland Dreier <rolandd@cisco.com>2008-07-15 02:48:53 -0400
commitde910bd92137005b5e1ecaf2ce68053d7d7d5350 (patch)
tree8c901c29a83263b7a6ff9ce77a9ac2962dbbb32c /drivers/infiniband
parent64c5e613b9dd34ef1281ed6d22478609667ae36a (diff)
RDMA/cma: Simplify locking needed for serialization of callbacks
The RDMA CM has some logic in place to make sure that callbacks on a given CM ID are delivered to the consumer in a serialized manner. Specifically it has code to protect against a device removal racing with a running callback function. This patch simplifies this logic by using a mutex per ID instead of a wait queue and atomic variable. This means that cma_disable_remove() now is more properly named to cma_disable_callback(), and cma_enable_remove() can now be removed because it just would become a trivial wrapper around mutex_unlock(). Signed-off-by: Or Gerlitz <ogerlitz@voltaire.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cma.c106
1 files changed, 50 insertions, 56 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 5fb506a41776..ae11d5cc74d0 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -130,8 +130,7 @@ struct rdma_id_private {
130 130
131 struct completion comp; 131 struct completion comp;
132 atomic_t refcount; 132 atomic_t refcount;
133 wait_queue_head_t wait_remove; 133 struct mutex handler_mutex;
134 atomic_t dev_remove;
135 134
136 int backlog; 135 int backlog;
137 int timeout_ms; 136 int timeout_ms;
@@ -355,26 +354,15 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
355 complete(&id_priv->comp); 354 complete(&id_priv->comp);
356} 355}
357 356
358static int cma_disable_remove(struct rdma_id_private *id_priv, 357static int cma_disable_callback(struct rdma_id_private *id_priv,
359 enum cma_state state) 358 enum cma_state state)
360{ 359{
361 unsigned long flags; 360 mutex_lock(&id_priv->handler_mutex);
362 int ret; 361 if (id_priv->state != state) {
363 362 mutex_unlock(&id_priv->handler_mutex);
364 spin_lock_irqsave(&id_priv->lock, flags); 363 return -EINVAL;
365 if (id_priv->state == state) { 364 }
366 atomic_inc(&id_priv->dev_remove); 365 return 0;
367 ret = 0;
368 } else
369 ret = -EINVAL;
370 spin_unlock_irqrestore(&id_priv->lock, flags);
371 return ret;
372}
373
374static void cma_enable_remove(struct rdma_id_private *id_priv)
375{
376 if (atomic_dec_and_test(&id_priv->dev_remove))
377 wake_up(&id_priv->wait_remove);
378} 366}
379 367
380static int cma_has_cm_dev(struct rdma_id_private *id_priv) 368static int cma_has_cm_dev(struct rdma_id_private *id_priv)
@@ -399,8 +387,7 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
399 mutex_init(&id_priv->qp_mutex); 387 mutex_init(&id_priv->qp_mutex);
400 init_completion(&id_priv->comp); 388 init_completion(&id_priv->comp);
401 atomic_set(&id_priv->refcount, 1); 389 atomic_set(&id_priv->refcount, 1);
402 init_waitqueue_head(&id_priv->wait_remove); 390 mutex_init(&id_priv->handler_mutex);
403 atomic_set(&id_priv->dev_remove, 0);
404 INIT_LIST_HEAD(&id_priv->listen_list); 391 INIT_LIST_HEAD(&id_priv->listen_list);
405 INIT_LIST_HEAD(&id_priv->mc_list); 392 INIT_LIST_HEAD(&id_priv->mc_list);
406 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 393 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
@@ -927,7 +914,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
927 struct rdma_cm_event event; 914 struct rdma_cm_event event;
928 int ret = 0; 915 int ret = 0;
929 916
930 if (cma_disable_remove(id_priv, CMA_CONNECT)) 917 if (cma_disable_callback(id_priv, CMA_CONNECT))
931 return 0; 918 return 0;
932 919
933 memset(&event, 0, sizeof event); 920 memset(&event, 0, sizeof event);
@@ -984,12 +971,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
984 /* Destroy the CM ID by returning a non-zero value. */ 971 /* Destroy the CM ID by returning a non-zero value. */
985 id_priv->cm_id.ib = NULL; 972 id_priv->cm_id.ib = NULL;
986 cma_exch(id_priv, CMA_DESTROYING); 973 cma_exch(id_priv, CMA_DESTROYING);
987 cma_enable_remove(id_priv); 974 mutex_unlock(&id_priv->handler_mutex);
988 rdma_destroy_id(&id_priv->id); 975 rdma_destroy_id(&id_priv->id);
989 return ret; 976 return ret;
990 } 977 }
991out: 978out:
992 cma_enable_remove(id_priv); 979 mutex_unlock(&id_priv->handler_mutex);
993 return ret; 980 return ret;
994} 981}
995 982
@@ -1101,7 +1088,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1101 int offset, ret; 1088 int offset, ret;
1102 1089
1103 listen_id = cm_id->context; 1090 listen_id = cm_id->context;
1104 if (cma_disable_remove(listen_id, CMA_LISTEN)) 1091 if (cma_disable_callback(listen_id, CMA_LISTEN))
1105 return -ECONNABORTED; 1092 return -ECONNABORTED;
1106 1093
1107 memset(&event, 0, sizeof event); 1094 memset(&event, 0, sizeof event);
@@ -1122,7 +1109,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1122 goto out; 1109 goto out;
1123 } 1110 }
1124 1111
1125 atomic_inc(&conn_id->dev_remove); 1112 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1126 mutex_lock(&lock); 1113 mutex_lock(&lock);
1127 ret = cma_acquire_dev(conn_id); 1114 ret = cma_acquire_dev(conn_id);
1128 mutex_unlock(&lock); 1115 mutex_unlock(&lock);
@@ -1144,7 +1131,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1144 !cma_is_ud_ps(conn_id->id.ps)) 1131 !cma_is_ud_ps(conn_id->id.ps))
1145 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1132 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1146 mutex_unlock(&lock); 1133 mutex_unlock(&lock);
1147 cma_enable_remove(conn_id); 1134 mutex_unlock(&conn_id->handler_mutex);
1148 goto out; 1135 goto out;
1149 } 1136 }
1150 1137
@@ -1153,11 +1140,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1153 1140
1154release_conn_id: 1141release_conn_id:
1155 cma_exch(conn_id, CMA_DESTROYING); 1142 cma_exch(conn_id, CMA_DESTROYING);
1156 cma_enable_remove(conn_id); 1143 mutex_unlock(&conn_id->handler_mutex);
1157 rdma_destroy_id(&conn_id->id); 1144 rdma_destroy_id(&conn_id->id);
1158 1145
1159out: 1146out:
1160 cma_enable_remove(listen_id); 1147 mutex_unlock(&listen_id->handler_mutex);
1161 return ret; 1148 return ret;
1162} 1149}
1163 1150
@@ -1223,7 +1210,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1223 struct sockaddr_in *sin; 1210 struct sockaddr_in *sin;
1224 int ret = 0; 1211 int ret = 0;
1225 1212
1226 if (cma_disable_remove(id_priv, CMA_CONNECT)) 1213 if (cma_disable_callback(id_priv, CMA_CONNECT))
1227 return 0; 1214 return 0;
1228 1215
1229 memset(&event, 0, sizeof event); 1216 memset(&event, 0, sizeof event);
@@ -1267,12 +1254,12 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1267 /* Destroy the CM ID by returning a non-zero value. */ 1254 /* Destroy the CM ID by returning a non-zero value. */
1268 id_priv->cm_id.iw = NULL; 1255 id_priv->cm_id.iw = NULL;
1269 cma_exch(id_priv, CMA_DESTROYING); 1256 cma_exch(id_priv, CMA_DESTROYING);
1270 cma_enable_remove(id_priv); 1257 mutex_unlock(&id_priv->handler_mutex);
1271 rdma_destroy_id(&id_priv->id); 1258 rdma_destroy_id(&id_priv->id);
1272 return ret; 1259 return ret;
1273 } 1260 }
1274 1261
1275 cma_enable_remove(id_priv); 1262 mutex_unlock(&id_priv->handler_mutex);
1276 return ret; 1263 return ret;
1277} 1264}
1278 1265
@@ -1288,7 +1275,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1288 struct ib_device_attr attr; 1275 struct ib_device_attr attr;
1289 1276
1290 listen_id = cm_id->context; 1277 listen_id = cm_id->context;
1291 if (cma_disable_remove(listen_id, CMA_LISTEN)) 1278 if (cma_disable_callback(listen_id, CMA_LISTEN))
1292 return -ECONNABORTED; 1279 return -ECONNABORTED;
1293 1280
1294 /* Create a new RDMA id for the new IW CM ID */ 1281 /* Create a new RDMA id for the new IW CM ID */
@@ -1300,19 +1287,19 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1300 goto out; 1287 goto out;
1301 } 1288 }
1302 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 1289 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1303 atomic_inc(&conn_id->dev_remove); 1290 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1304 conn_id->state = CMA_CONNECT; 1291 conn_id->state = CMA_CONNECT;
1305 1292
1306 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); 1293 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
1307 if (!dev) { 1294 if (!dev) {
1308 ret = -EADDRNOTAVAIL; 1295 ret = -EADDRNOTAVAIL;
1309 cma_enable_remove(conn_id); 1296 mutex_unlock(&conn_id->handler_mutex);
1310 rdma_destroy_id(new_cm_id); 1297 rdma_destroy_id(new_cm_id);
1311 goto out; 1298 goto out;
1312 } 1299 }
1313 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); 1300 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1314 if (ret) { 1301 if (ret) {
1315 cma_enable_remove(conn_id); 1302 mutex_unlock(&conn_id->handler_mutex);
1316 rdma_destroy_id(new_cm_id); 1303 rdma_destroy_id(new_cm_id);
1317 goto out; 1304 goto out;
1318 } 1305 }
@@ -1321,7 +1308,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1321 ret = cma_acquire_dev(conn_id); 1308 ret = cma_acquire_dev(conn_id);
1322 mutex_unlock(&lock); 1309 mutex_unlock(&lock);
1323 if (ret) { 1310 if (ret) {
1324 cma_enable_remove(conn_id); 1311 mutex_unlock(&conn_id->handler_mutex);
1325 rdma_destroy_id(new_cm_id); 1312 rdma_destroy_id(new_cm_id);
1326 goto out; 1313 goto out;
1327 } 1314 }
@@ -1337,7 +1324,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1337 1324
1338 ret = ib_query_device(conn_id->id.device, &attr); 1325 ret = ib_query_device(conn_id->id.device, &attr);
1339 if (ret) { 1326 if (ret) {
1340 cma_enable_remove(conn_id); 1327 mutex_unlock(&conn_id->handler_mutex);
1341 rdma_destroy_id(new_cm_id); 1328 rdma_destroy_id(new_cm_id);
1342 goto out; 1329 goto out;
1343 } 1330 }
@@ -1353,14 +1340,17 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1353 /* User wants to destroy the CM ID */ 1340 /* User wants to destroy the CM ID */
1354 conn_id->cm_id.iw = NULL; 1341 conn_id->cm_id.iw = NULL;
1355 cma_exch(conn_id, CMA_DESTROYING); 1342 cma_exch(conn_id, CMA_DESTROYING);
1356 cma_enable_remove(conn_id); 1343 mutex_unlock(&conn_id->handler_mutex);
1357 rdma_destroy_id(&conn_id->id); 1344 rdma_destroy_id(&conn_id->id);
1345 goto out;
1358 } 1346 }
1359 1347
1348 mutex_unlock(&conn_id->handler_mutex);
1349
1360out: 1350out:
1361 if (dev) 1351 if (dev)
1362 dev_put(dev); 1352 dev_put(dev);
1363 cma_enable_remove(listen_id); 1353 mutex_unlock(&listen_id->handler_mutex);
1364 return ret; 1354 return ret;
1365} 1355}
1366 1356
@@ -1592,7 +1582,7 @@ static void cma_work_handler(struct work_struct *_work)
1592 struct rdma_id_private *id_priv = work->id; 1582 struct rdma_id_private *id_priv = work->id;
1593 int destroy = 0; 1583 int destroy = 0;
1594 1584
1595 atomic_inc(&id_priv->dev_remove); 1585 mutex_lock(&id_priv->handler_mutex);
1596 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 1586 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
1597 goto out; 1587 goto out;
1598 1588
@@ -1601,7 +1591,7 @@ static void cma_work_handler(struct work_struct *_work)
1601 destroy = 1; 1591 destroy = 1;
1602 } 1592 }
1603out: 1593out:
1604 cma_enable_remove(id_priv); 1594 mutex_unlock(&id_priv->handler_mutex);
1605 cma_deref_id(id_priv); 1595 cma_deref_id(id_priv);
1606 if (destroy) 1596 if (destroy)
1607 rdma_destroy_id(&id_priv->id); 1597 rdma_destroy_id(&id_priv->id);
@@ -1764,7 +1754,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1764 struct rdma_cm_event event; 1754 struct rdma_cm_event event;
1765 1755
1766 memset(&event, 0, sizeof event); 1756 memset(&event, 0, sizeof event);
1767 atomic_inc(&id_priv->dev_remove); 1757 mutex_lock(&id_priv->handler_mutex);
1768 1758
1769 /* 1759 /*
1770 * Grab mutex to block rdma_destroy_id() from removing the device while 1760 * Grab mutex to block rdma_destroy_id() from removing the device while
@@ -1793,13 +1783,13 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1793 1783
1794 if (id_priv->id.event_handler(&id_priv->id, &event)) { 1784 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1795 cma_exch(id_priv, CMA_DESTROYING); 1785 cma_exch(id_priv, CMA_DESTROYING);
1796 cma_enable_remove(id_priv); 1786 mutex_unlock(&id_priv->handler_mutex);
1797 cma_deref_id(id_priv); 1787 cma_deref_id(id_priv);
1798 rdma_destroy_id(&id_priv->id); 1788 rdma_destroy_id(&id_priv->id);
1799 return; 1789 return;
1800 } 1790 }
1801out: 1791out:
1802 cma_enable_remove(id_priv); 1792 mutex_unlock(&id_priv->handler_mutex);
1803 cma_deref_id(id_priv); 1793 cma_deref_id(id_priv);
1804} 1794}
1805 1795
@@ -2126,7 +2116,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2126 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 2116 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2127 int ret = 0; 2117 int ret = 0;
2128 2118
2129 if (cma_disable_remove(id_priv, CMA_CONNECT)) 2119 if (cma_disable_callback(id_priv, CMA_CONNECT))
2130 return 0; 2120 return 0;
2131 2121
2132 memset(&event, 0, sizeof event); 2122 memset(&event, 0, sizeof event);
@@ -2167,12 +2157,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2167 /* Destroy the CM ID by returning a non-zero value. */ 2157 /* Destroy the CM ID by returning a non-zero value. */
2168 id_priv->cm_id.ib = NULL; 2158 id_priv->cm_id.ib = NULL;
2169 cma_exch(id_priv, CMA_DESTROYING); 2159 cma_exch(id_priv, CMA_DESTROYING);
2170 cma_enable_remove(id_priv); 2160 mutex_unlock(&id_priv->handler_mutex);
2171 rdma_destroy_id(&id_priv->id); 2161 rdma_destroy_id(&id_priv->id);
2172 return ret; 2162 return ret;
2173 } 2163 }
2174out: 2164out:
2175 cma_enable_remove(id_priv); 2165 mutex_unlock(&id_priv->handler_mutex);
2176 return ret; 2166 return ret;
2177} 2167}
2178 2168
@@ -2570,8 +2560,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2570 int ret; 2560 int ret;
2571 2561
2572 id_priv = mc->id_priv; 2562 id_priv = mc->id_priv;
2573 if (cma_disable_remove(id_priv, CMA_ADDR_BOUND) && 2563 if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) &&
2574 cma_disable_remove(id_priv, CMA_ADDR_RESOLVED)) 2564 cma_disable_callback(id_priv, CMA_ADDR_RESOLVED))
2575 return 0; 2565 return 0;
2576 2566
2577 mutex_lock(&id_priv->qp_mutex); 2567 mutex_lock(&id_priv->qp_mutex);
@@ -2596,12 +2586,12 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2596 ret = id_priv->id.event_handler(&id_priv->id, &event); 2586 ret = id_priv->id.event_handler(&id_priv->id, &event);
2597 if (ret) { 2587 if (ret) {
2598 cma_exch(id_priv, CMA_DESTROYING); 2588 cma_exch(id_priv, CMA_DESTROYING);
2599 cma_enable_remove(id_priv); 2589 mutex_unlock(&id_priv->handler_mutex);
2600 rdma_destroy_id(&id_priv->id); 2590 rdma_destroy_id(&id_priv->id);
2601 return 0; 2591 return 0;
2602 } 2592 }
2603 2593
2604 cma_enable_remove(id_priv); 2594 mutex_unlock(&id_priv->handler_mutex);
2605 return 0; 2595 return 0;
2606} 2596}
2607 2597
@@ -2760,6 +2750,7 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2760{ 2750{
2761 struct rdma_cm_event event; 2751 struct rdma_cm_event event;
2762 enum cma_state state; 2752 enum cma_state state;
2753 int ret = 0;
2763 2754
2764 /* Record that we want to remove the device */ 2755 /* Record that we want to remove the device */
2765 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); 2756 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
@@ -2767,15 +2758,18 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2767 return 0; 2758 return 0;
2768 2759
2769 cma_cancel_operation(id_priv, state); 2760 cma_cancel_operation(id_priv, state);
2770 wait_event(id_priv->wait_remove, !atomic_read(&id_priv->dev_remove)); 2761 mutex_lock(&id_priv->handler_mutex);
2771 2762
2772 /* Check for destruction from another callback. */ 2763 /* Check for destruction from another callback. */
2773 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 2764 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2774 return 0; 2765 goto out;
2775 2766
2776 memset(&event, 0, sizeof event); 2767 memset(&event, 0, sizeof event);
2777 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 2768 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
2778 return id_priv->id.event_handler(&id_priv->id, &event); 2769 ret = id_priv->id.event_handler(&id_priv->id, &event);
2770out:
2771 mutex_unlock(&id_priv->handler_mutex);
2772 return ret;
2779} 2773}
2780 2774
2781static void cma_process_remove(struct cma_device *cma_dev) 2775static void cma_process_remove(struct cma_device *cma_dev)