aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorNir Muchtar <nirm@voltaire.com>2011-05-20 14:46:11 -0400
committerRoland Dreier <roland@purestorage.com>2011-05-25 16:46:22 -0400
commit550e5ca77e96989c5e19f60e017205b2bcc615a5 (patch)
treeac2a52507f095a476875eab4ead8957655671bc3 /drivers/infiniband/core
parentb2cbae2c248776d81cc265ff7d48405b6a4cc463 (diff)
RDMA/cma: Export enum cma_state in <rdma/rdma_cm.h>
Move cma.c's internal definition of enum cma_state to enum rdma_cm_state in an exported header so that it can be exported via RDMA netlink. Signed-off-by: Nir Muchtar <nirm@voltaire.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/cma.c170
1 files changed, 80 insertions, 90 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 99dde874fbbd..451d39e19cb4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -89,20 +89,6 @@ struct cma_device {
89 struct list_head id_list; 89 struct list_head id_list;
90}; 90};
91 91
92enum cma_state {
93 CMA_IDLE,
94 CMA_ADDR_QUERY,
95 CMA_ADDR_RESOLVED,
96 CMA_ROUTE_QUERY,
97 CMA_ROUTE_RESOLVED,
98 CMA_CONNECT,
99 CMA_DISCONNECT,
100 CMA_ADDR_BOUND,
101 CMA_LISTEN,
102 CMA_DEVICE_REMOVAL,
103 CMA_DESTROYING
104};
105
106struct rdma_bind_list { 92struct rdma_bind_list {
107 struct idr *ps; 93 struct idr *ps;
108 struct hlist_head owners; 94 struct hlist_head owners;
@@ -126,7 +112,7 @@ struct rdma_id_private {
126 struct list_head mc_list; 112 struct list_head mc_list;
127 113
128 int internal_id; 114 int internal_id;
129 enum cma_state state; 115 enum rdma_cm_state state;
130 spinlock_t lock; 116 spinlock_t lock;
131 struct mutex qp_mutex; 117 struct mutex qp_mutex;
132 118
@@ -165,8 +151,8 @@ struct cma_multicast {
165struct cma_work { 151struct cma_work {
166 struct work_struct work; 152 struct work_struct work;
167 struct rdma_id_private *id; 153 struct rdma_id_private *id;
168 enum cma_state old_state; 154 enum rdma_cm_state old_state;
169 enum cma_state new_state; 155 enum rdma_cm_state new_state;
170 struct rdma_cm_event event; 156 struct rdma_cm_event event;
171}; 157};
172 158
@@ -217,7 +203,7 @@ struct sdp_hah {
217#define CMA_VERSION 0x00 203#define CMA_VERSION 0x00
218#define SDP_MAJ_VERSION 0x2 204#define SDP_MAJ_VERSION 0x2
219 205
220static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) 206static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
221{ 207{
222 unsigned long flags; 208 unsigned long flags;
223 int ret; 209 int ret;
@@ -229,7 +215,7 @@ static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
229} 215}
230 216
231static int cma_comp_exch(struct rdma_id_private *id_priv, 217static int cma_comp_exch(struct rdma_id_private *id_priv,
232 enum cma_state comp, enum cma_state exch) 218 enum rdma_cm_state comp, enum rdma_cm_state exch)
233{ 219{
234 unsigned long flags; 220 unsigned long flags;
235 int ret; 221 int ret;
@@ -241,11 +227,11 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
241 return ret; 227 return ret;
242} 228}
243 229
244static enum cma_state cma_exch(struct rdma_id_private *id_priv, 230static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
245 enum cma_state exch) 231 enum rdma_cm_state exch)
246{ 232{
247 unsigned long flags; 233 unsigned long flags;
248 enum cma_state old; 234 enum rdma_cm_state old;
249 235
250 spin_lock_irqsave(&id_priv->lock, flags); 236 spin_lock_irqsave(&id_priv->lock, flags);
251 old = id_priv->state; 237 old = id_priv->state;
@@ -413,7 +399,7 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
413} 399}
414 400
415static int cma_disable_callback(struct rdma_id_private *id_priv, 401static int cma_disable_callback(struct rdma_id_private *id_priv,
416 enum cma_state state) 402 enum rdma_cm_state state)
417{ 403{
418 mutex_lock(&id_priv->handler_mutex); 404 mutex_lock(&id_priv->handler_mutex);
419 if (id_priv->state != state) { 405 if (id_priv->state != state) {
@@ -437,7 +423,7 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
437 if (!id_priv) 423 if (!id_priv)
438 return ERR_PTR(-ENOMEM); 424 return ERR_PTR(-ENOMEM);
439 425
440 id_priv->state = CMA_IDLE; 426 id_priv->state = RDMA_CM_IDLE;
441 id_priv->id.context = context; 427 id_priv->id.context = context;
442 id_priv->id.event_handler = event_handler; 428 id_priv->id.event_handler = event_handler;
443 id_priv->id.ps = ps; 429 id_priv->id.ps = ps;
@@ -858,16 +844,16 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
858} 844}
859 845
860static void cma_cancel_operation(struct rdma_id_private *id_priv, 846static void cma_cancel_operation(struct rdma_id_private *id_priv,
861 enum cma_state state) 847 enum rdma_cm_state state)
862{ 848{
863 switch (state) { 849 switch (state) {
864 case CMA_ADDR_QUERY: 850 case RDMA_CM_ADDR_QUERY:
865 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 851 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
866 break; 852 break;
867 case CMA_ROUTE_QUERY: 853 case RDMA_CM_ROUTE_QUERY:
868 cma_cancel_route(id_priv); 854 cma_cancel_route(id_priv);
869 break; 855 break;
870 case CMA_LISTEN: 856 case RDMA_CM_LISTEN:
871 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) 857 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
872 && !id_priv->cma_dev) 858 && !id_priv->cma_dev)
873 cma_cancel_listens(id_priv); 859 cma_cancel_listens(id_priv);
@@ -918,10 +904,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
918void rdma_destroy_id(struct rdma_cm_id *id) 904void rdma_destroy_id(struct rdma_cm_id *id)
919{ 905{
920 struct rdma_id_private *id_priv; 906 struct rdma_id_private *id_priv;
921 enum cma_state state; 907 enum rdma_cm_state state;
922 908
923 id_priv = container_of(id, struct rdma_id_private, id); 909 id_priv = container_of(id, struct rdma_id_private, id);
924 state = cma_exch(id_priv, CMA_DESTROYING); 910 state = cma_exch(id_priv, RDMA_CM_DESTROYING);
925 cma_cancel_operation(id_priv, state); 911 cma_cancel_operation(id_priv, state);
926 912
927 /* 913 /*
@@ -1015,9 +1001,9 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1015 int ret = 0; 1001 int ret = 0;
1016 1002
1017 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1003 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
1018 cma_disable_callback(id_priv, CMA_CONNECT)) || 1004 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
1019 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1005 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
1020 cma_disable_callback(id_priv, CMA_DISCONNECT))) 1006 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
1021 return 0; 1007 return 0;
1022 1008
1023 memset(&event, 0, sizeof event); 1009 memset(&event, 0, sizeof event);
@@ -1048,7 +1034,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1048 event.status = -ETIMEDOUT; /* fall through */ 1034 event.status = -ETIMEDOUT; /* fall through */
1049 case IB_CM_DREQ_RECEIVED: 1035 case IB_CM_DREQ_RECEIVED:
1050 case IB_CM_DREP_RECEIVED: 1036 case IB_CM_DREP_RECEIVED:
1051 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 1037 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
1038 RDMA_CM_DISCONNECT))
1052 goto out; 1039 goto out;
1053 event.event = RDMA_CM_EVENT_DISCONNECTED; 1040 event.event = RDMA_CM_EVENT_DISCONNECTED;
1054 break; 1041 break;
@@ -1075,7 +1062,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1075 if (ret) { 1062 if (ret) {
1076 /* Destroy the CM ID by returning a non-zero value. */ 1063 /* Destroy the CM ID by returning a non-zero value. */
1077 id_priv->cm_id.ib = NULL; 1064 id_priv->cm_id.ib = NULL;
1078 cma_exch(id_priv, CMA_DESTROYING); 1065 cma_exch(id_priv, RDMA_CM_DESTROYING);
1079 mutex_unlock(&id_priv->handler_mutex); 1066 mutex_unlock(&id_priv->handler_mutex);
1080 rdma_destroy_id(&id_priv->id); 1067 rdma_destroy_id(&id_priv->id);
1081 return ret; 1068 return ret;
@@ -1132,7 +1119,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1132 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1119 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1133 1120
1134 id_priv = container_of(id, struct rdma_id_private, id); 1121 id_priv = container_of(id, struct rdma_id_private, id);
1135 id_priv->state = CMA_CONNECT; 1122 id_priv->state = RDMA_CM_CONNECT;
1136 return id_priv; 1123 return id_priv;
1137 1124
1138destroy_id: 1125destroy_id:
@@ -1172,7 +1159,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1172 } 1159 }
1173 1160
1174 id_priv = container_of(id, struct rdma_id_private, id); 1161 id_priv = container_of(id, struct rdma_id_private, id);
1175 id_priv->state = CMA_CONNECT; 1162 id_priv->state = RDMA_CM_CONNECT;
1176 return id_priv; 1163 return id_priv;
1177err: 1164err:
1178 rdma_destroy_id(id); 1165 rdma_destroy_id(id);
@@ -1201,7 +1188,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1201 int offset, ret; 1188 int offset, ret;
1202 1189
1203 listen_id = cm_id->context; 1190 listen_id = cm_id->context;
1204 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1191 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
1205 return -ECONNABORTED; 1192 return -ECONNABORTED;
1206 1193
1207 memset(&event, 0, sizeof event); 1194 memset(&event, 0, sizeof event);
@@ -1243,7 +1230,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1243 * while we're accessing the cm_id. 1230 * while we're accessing the cm_id.
1244 */ 1231 */
1245 mutex_lock(&lock); 1232 mutex_lock(&lock);
1246 if (cma_comp(conn_id, CMA_CONNECT) && 1233 if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
1247 !cma_is_ud_ps(conn_id->id.ps)) 1234 !cma_is_ud_ps(conn_id->id.ps))
1248 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1235 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1249 mutex_unlock(&lock); 1236 mutex_unlock(&lock);
@@ -1257,7 +1244,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1257 conn_id->cm_id.ib = NULL; 1244 conn_id->cm_id.ib = NULL;
1258 1245
1259release_conn_id: 1246release_conn_id:
1260 cma_exch(conn_id, CMA_DESTROYING); 1247 cma_exch(conn_id, RDMA_CM_DESTROYING);
1261 mutex_unlock(&conn_id->handler_mutex); 1248 mutex_unlock(&conn_id->handler_mutex);
1262 rdma_destroy_id(&conn_id->id); 1249 rdma_destroy_id(&conn_id->id);
1263 1250
@@ -1328,7 +1315,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1328 struct sockaddr_in *sin; 1315 struct sockaddr_in *sin;
1329 int ret = 0; 1316 int ret = 0;
1330 1317
1331 if (cma_disable_callback(id_priv, CMA_CONNECT)) 1318 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
1332 return 0; 1319 return 0;
1333 1320
1334 memset(&event, 0, sizeof event); 1321 memset(&event, 0, sizeof event);
@@ -1371,7 +1358,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1371 if (ret) { 1358 if (ret) {
1372 /* Destroy the CM ID by returning a non-zero value. */ 1359 /* Destroy the CM ID by returning a non-zero value. */
1373 id_priv->cm_id.iw = NULL; 1360 id_priv->cm_id.iw = NULL;
1374 cma_exch(id_priv, CMA_DESTROYING); 1361 cma_exch(id_priv, RDMA_CM_DESTROYING);
1375 mutex_unlock(&id_priv->handler_mutex); 1362 mutex_unlock(&id_priv->handler_mutex);
1376 rdma_destroy_id(&id_priv->id); 1363 rdma_destroy_id(&id_priv->id);
1377 return ret; 1364 return ret;
@@ -1393,7 +1380,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1393 struct ib_device_attr attr; 1380 struct ib_device_attr attr;
1394 1381
1395 listen_id = cm_id->context; 1382 listen_id = cm_id->context;
1396 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1383 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
1397 return -ECONNABORTED; 1384 return -ECONNABORTED;
1398 1385
1399 /* Create a new RDMA id for the new IW CM ID */ 1386 /* Create a new RDMA id for the new IW CM ID */
@@ -1406,7 +1393,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1406 } 1393 }
1407 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 1394 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1408 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1395 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1409 conn_id->state = CMA_CONNECT; 1396 conn_id->state = RDMA_CM_CONNECT;
1410 1397
1411 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); 1398 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
1412 if (!dev) { 1399 if (!dev) {
@@ -1461,7 +1448,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1461 if (ret) { 1448 if (ret) {
1462 /* User wants to destroy the CM ID */ 1449 /* User wants to destroy the CM ID */
1463 conn_id->cm_id.iw = NULL; 1450 conn_id->cm_id.iw = NULL;
1464 cma_exch(conn_id, CMA_DESTROYING); 1451 cma_exch(conn_id, RDMA_CM_DESTROYING);
1465 mutex_unlock(&conn_id->handler_mutex); 1452 mutex_unlock(&conn_id->handler_mutex);
1466 cma_deref_id(conn_id); 1453 cma_deref_id(conn_id);
1467 rdma_destroy_id(&conn_id->id); 1454 rdma_destroy_id(&conn_id->id);
@@ -1554,7 +1541,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1554 1541
1555 dev_id_priv = container_of(id, struct rdma_id_private, id); 1542 dev_id_priv = container_of(id, struct rdma_id_private, id);
1556 1543
1557 dev_id_priv->state = CMA_ADDR_BOUND; 1544 dev_id_priv->state = RDMA_CM_ADDR_BOUND;
1558 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1545 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1559 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 1546 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
1560 1547
@@ -1601,8 +1588,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1601 route->num_paths = 1; 1588 route->num_paths = 1;
1602 *route->path_rec = *path_rec; 1589 *route->path_rec = *path_rec;
1603 } else { 1590 } else {
1604 work->old_state = CMA_ROUTE_QUERY; 1591 work->old_state = RDMA_CM_ROUTE_QUERY;
1605 work->new_state = CMA_ADDR_RESOLVED; 1592 work->new_state = RDMA_CM_ADDR_RESOLVED;
1606 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 1593 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
1607 work->event.status = status; 1594 work->event.status = status;
1608 } 1595 }
@@ -1660,7 +1647,7 @@ static void cma_work_handler(struct work_struct *_work)
1660 goto out; 1647 goto out;
1661 1648
1662 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1649 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1663 cma_exch(id_priv, CMA_DESTROYING); 1650 cma_exch(id_priv, RDMA_CM_DESTROYING);
1664 destroy = 1; 1651 destroy = 1;
1665 } 1652 }
1666out: 1653out:
@@ -1678,12 +1665,12 @@ static void cma_ndev_work_handler(struct work_struct *_work)
1678 int destroy = 0; 1665 int destroy = 0;
1679 1666
1680 mutex_lock(&id_priv->handler_mutex); 1667 mutex_lock(&id_priv->handler_mutex);
1681 if (id_priv->state == CMA_DESTROYING || 1668 if (id_priv->state == RDMA_CM_DESTROYING ||
1682 id_priv->state == CMA_DEVICE_REMOVAL) 1669 id_priv->state == RDMA_CM_DEVICE_REMOVAL)
1683 goto out; 1670 goto out;
1684 1671
1685 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1672 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1686 cma_exch(id_priv, CMA_DESTROYING); 1673 cma_exch(id_priv, RDMA_CM_DESTROYING);
1687 destroy = 1; 1674 destroy = 1;
1688 } 1675 }
1689 1676
@@ -1707,8 +1694,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1707 1694
1708 work->id = id_priv; 1695 work->id = id_priv;
1709 INIT_WORK(&work->work, cma_work_handler); 1696 INIT_WORK(&work->work, cma_work_handler);
1710 work->old_state = CMA_ROUTE_QUERY; 1697 work->old_state = RDMA_CM_ROUTE_QUERY;
1711 work->new_state = CMA_ROUTE_RESOLVED; 1698 work->new_state = RDMA_CM_ROUTE_RESOLVED;
1712 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1699 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1713 1700
1714 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 1701 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
@@ -1737,7 +1724,8 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
1737 int ret; 1724 int ret;
1738 1725
1739 id_priv = container_of(id, struct rdma_id_private, id); 1726 id_priv = container_of(id, struct rdma_id_private, id);
1740 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1727 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
1728 RDMA_CM_ROUTE_RESOLVED))
1741 return -EINVAL; 1729 return -EINVAL;
1742 1730
1743 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, 1731 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
@@ -1750,7 +1738,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
1750 id->route.num_paths = num_paths; 1738 id->route.num_paths = num_paths;
1751 return 0; 1739 return 0;
1752err: 1740err:
1753 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); 1741 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
1754 return ret; 1742 return ret;
1755} 1743}
1756EXPORT_SYMBOL(rdma_set_ib_paths); 1744EXPORT_SYMBOL(rdma_set_ib_paths);
@@ -1765,8 +1753,8 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1765 1753
1766 work->id = id_priv; 1754 work->id = id_priv;
1767 INIT_WORK(&work->work, cma_work_handler); 1755 INIT_WORK(&work->work, cma_work_handler);
1768 work->old_state = CMA_ROUTE_QUERY; 1756 work->old_state = RDMA_CM_ROUTE_QUERY;
1769 work->new_state = CMA_ROUTE_RESOLVED; 1757 work->new_state = RDMA_CM_ROUTE_RESOLVED;
1770 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1758 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1771 queue_work(cma_wq, &work->work); 1759 queue_work(cma_wq, &work->work);
1772 return 0; 1760 return 0;
@@ -1830,8 +1818,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
1830 goto err2; 1818 goto err2;
1831 } 1819 }
1832 1820
1833 work->old_state = CMA_ROUTE_QUERY; 1821 work->old_state = RDMA_CM_ROUTE_QUERY;
1834 work->new_state = CMA_ROUTE_RESOLVED; 1822 work->new_state = RDMA_CM_ROUTE_RESOLVED;
1835 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1823 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1836 work->event.status = 0; 1824 work->event.status = 0;
1837 1825
@@ -1853,7 +1841,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1853 int ret; 1841 int ret;
1854 1842
1855 id_priv = container_of(id, struct rdma_id_private, id); 1843 id_priv = container_of(id, struct rdma_id_private, id);
1856 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) 1844 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
1857 return -EINVAL; 1845 return -EINVAL;
1858 1846
1859 atomic_inc(&id_priv->refcount); 1847 atomic_inc(&id_priv->refcount);
@@ -1882,7 +1870,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1882 1870
1883 return 0; 1871 return 0;
1884err: 1872err:
1885 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); 1873 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
1886 cma_deref_id(id_priv); 1874 cma_deref_id(id_priv);
1887 return ret; 1875 return ret;
1888} 1876}
@@ -1941,14 +1929,16 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1941 1929
1942 memset(&event, 0, sizeof event); 1930 memset(&event, 0, sizeof event);
1943 mutex_lock(&id_priv->handler_mutex); 1931 mutex_lock(&id_priv->handler_mutex);
1944 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) 1932 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
1933 RDMA_CM_ADDR_RESOLVED))
1945 goto out; 1934 goto out;
1946 1935
1947 if (!status && !id_priv->cma_dev) 1936 if (!status && !id_priv->cma_dev)
1948 status = cma_acquire_dev(id_priv); 1937 status = cma_acquire_dev(id_priv);
1949 1938
1950 if (status) { 1939 if (status) {
1951 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1940 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
1941 RDMA_CM_ADDR_BOUND))
1952 goto out; 1942 goto out;
1953 event.event = RDMA_CM_EVENT_ADDR_ERROR; 1943 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1954 event.status = status; 1944 event.status = status;
@@ -1959,7 +1949,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1959 } 1949 }
1960 1950
1961 if (id_priv->id.event_handler(&id_priv->id, &event)) { 1951 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1962 cma_exch(id_priv, CMA_DESTROYING); 1952 cma_exch(id_priv, RDMA_CM_DESTROYING);
1963 mutex_unlock(&id_priv->handler_mutex); 1953 mutex_unlock(&id_priv->handler_mutex);
1964 cma_deref_id(id_priv); 1954 cma_deref_id(id_priv);
1965 rdma_destroy_id(&id_priv->id); 1955 rdma_destroy_id(&id_priv->id);
@@ -2004,8 +1994,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
2004 1994
2005 work->id = id_priv; 1995 work->id = id_priv;
2006 INIT_WORK(&work->work, cma_work_handler); 1996 INIT_WORK(&work->work, cma_work_handler);
2007 work->old_state = CMA_ADDR_QUERY; 1997 work->old_state = RDMA_CM_ADDR_QUERY;
2008 work->new_state = CMA_ADDR_RESOLVED; 1998 work->new_state = RDMA_CM_ADDR_RESOLVED;
2009 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1999 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2010 queue_work(cma_wq, &work->work); 2000 queue_work(cma_wq, &work->work);
2011 return 0; 2001 return 0;
@@ -2034,13 +2024,13 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2034 int ret; 2024 int ret;
2035 2025
2036 id_priv = container_of(id, struct rdma_id_private, id); 2026 id_priv = container_of(id, struct rdma_id_private, id);
2037 if (id_priv->state == CMA_IDLE) { 2027 if (id_priv->state == RDMA_CM_IDLE) {
2038 ret = cma_bind_addr(id, src_addr, dst_addr); 2028 ret = cma_bind_addr(id, src_addr, dst_addr);
2039 if (ret) 2029 if (ret)
2040 return ret; 2030 return ret;
2041 } 2031 }
2042 2032
2043 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) 2033 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
2044 return -EINVAL; 2034 return -EINVAL;
2045 2035
2046 atomic_inc(&id_priv->refcount); 2036 atomic_inc(&id_priv->refcount);
@@ -2056,7 +2046,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2056 2046
2057 return 0; 2047 return 0;
2058err: 2048err:
2059 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); 2049 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
2060 cma_deref_id(id_priv); 2050 cma_deref_id(id_priv);
2061 return ret; 2051 return ret;
2062} 2052}
@@ -2070,7 +2060,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
2070 2060
2071 id_priv = container_of(id, struct rdma_id_private, id); 2061 id_priv = container_of(id, struct rdma_id_private, id);
2072 spin_lock_irqsave(&id_priv->lock, flags); 2062 spin_lock_irqsave(&id_priv->lock, flags);
2073 if (id_priv->state == CMA_IDLE) { 2063 if (id_priv->state == RDMA_CM_IDLE) {
2074 id_priv->reuseaddr = reuse; 2064 id_priv->reuseaddr = reuse;
2075 ret = 0; 2065 ret = 0;
2076 } else { 2066 } else {
@@ -2177,7 +2167,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
2177 if (id_priv == cur_id) 2167 if (id_priv == cur_id)
2178 continue; 2168 continue;
2179 2169
2180 if ((cur_id->state == CMA_LISTEN) || 2170 if ((cur_id->state == RDMA_CM_LISTEN) ||
2181 !reuseaddr || !cur_id->reuseaddr) { 2171 !reuseaddr || !cur_id->reuseaddr) {
2182 cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; 2172 cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
2183 if (cma_any_addr(cur_addr)) 2173 if (cma_any_addr(cur_addr))
@@ -2280,14 +2270,14 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
2280 int ret; 2270 int ret;
2281 2271
2282 id_priv = container_of(id, struct rdma_id_private, id); 2272 id_priv = container_of(id, struct rdma_id_private, id);
2283 if (id_priv->state == CMA_IDLE) { 2273 if (id_priv->state == RDMA_CM_IDLE) {
2284 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; 2274 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
2285 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); 2275 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
2286 if (ret) 2276 if (ret)
2287 return ret; 2277 return ret;
2288 } 2278 }
2289 2279
2290 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) 2280 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
2291 return -EINVAL; 2281 return -EINVAL;
2292 2282
2293 if (id_priv->reuseaddr) { 2283 if (id_priv->reuseaddr) {
@@ -2319,7 +2309,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
2319 return 0; 2309 return 0;
2320err: 2310err:
2321 id_priv->backlog = 0; 2311 id_priv->backlog = 0;
2322 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); 2312 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
2323 return ret; 2313 return ret;
2324} 2314}
2325EXPORT_SYMBOL(rdma_listen); 2315EXPORT_SYMBOL(rdma_listen);
@@ -2333,7 +2323,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2333 return -EAFNOSUPPORT; 2323 return -EAFNOSUPPORT;
2334 2324
2335 id_priv = container_of(id, struct rdma_id_private, id); 2325 id_priv = container_of(id, struct rdma_id_private, id);
2336 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) 2326 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
2337 return -EINVAL; 2327 return -EINVAL;
2338 2328
2339 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 2329 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
@@ -2360,7 +2350,7 @@ err2:
2360 if (id_priv->cma_dev) 2350 if (id_priv->cma_dev)
2361 cma_release_dev(id_priv); 2351 cma_release_dev(id_priv);
2362err1: 2352err1:
2363 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); 2353 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
2364 return ret; 2354 return ret;
2365} 2355}
2366EXPORT_SYMBOL(rdma_bind_addr); 2356EXPORT_SYMBOL(rdma_bind_addr);
@@ -2433,7 +2423,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2433 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 2423 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2434 int ret = 0; 2424 int ret = 0;
2435 2425
2436 if (cma_disable_callback(id_priv, CMA_CONNECT)) 2426 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
2437 return 0; 2427 return 0;
2438 2428
2439 memset(&event, 0, sizeof event); 2429 memset(&event, 0, sizeof event);
@@ -2479,7 +2469,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2479 if (ret) { 2469 if (ret) {
2480 /* Destroy the CM ID by returning a non-zero value. */ 2470 /* Destroy the CM ID by returning a non-zero value. */
2481 id_priv->cm_id.ib = NULL; 2471 id_priv->cm_id.ib = NULL;
2482 cma_exch(id_priv, CMA_DESTROYING); 2472 cma_exch(id_priv, RDMA_CM_DESTROYING);
2483 mutex_unlock(&id_priv->handler_mutex); 2473 mutex_unlock(&id_priv->handler_mutex);
2484 rdma_destroy_id(&id_priv->id); 2474 rdma_destroy_id(&id_priv->id);
2485 return ret; 2475 return ret;
@@ -2645,7 +2635,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2645 int ret; 2635 int ret;
2646 2636
2647 id_priv = container_of(id, struct rdma_id_private, id); 2637 id_priv = container_of(id, struct rdma_id_private, id);
2648 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) 2638 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
2649 return -EINVAL; 2639 return -EINVAL;
2650 2640
2651 if (!id->qp) { 2641 if (!id->qp) {
@@ -2672,7 +2662,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2672 2662
2673 return 0; 2663 return 0;
2674err: 2664err:
2675 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); 2665 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
2676 return ret; 2666 return ret;
2677} 2667}
2678EXPORT_SYMBOL(rdma_connect); 2668EXPORT_SYMBOL(rdma_connect);
@@ -2758,7 +2748,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2758 int ret; 2748 int ret;
2759 2749
2760 id_priv = container_of(id, struct rdma_id_private, id); 2750 id_priv = container_of(id, struct rdma_id_private, id);
2761 if (!cma_comp(id_priv, CMA_CONNECT)) 2751 if (!cma_comp(id_priv, RDMA_CM_CONNECT))
2762 return -EINVAL; 2752 return -EINVAL;
2763 2753
2764 if (!id->qp && conn_param) { 2754 if (!id->qp && conn_param) {
@@ -2887,8 +2877,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2887 int ret; 2877 int ret;
2888 2878
2889 id_priv = mc->id_priv; 2879 id_priv = mc->id_priv;
2890 if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && 2880 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
2891 cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) 2881 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
2892 return 0; 2882 return 0;
2893 2883
2894 mutex_lock(&id_priv->qp_mutex); 2884 mutex_lock(&id_priv->qp_mutex);
@@ -2912,7 +2902,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2912 2902
2913 ret = id_priv->id.event_handler(&id_priv->id, &event); 2903 ret = id_priv->id.event_handler(&id_priv->id, &event);
2914 if (ret) { 2904 if (ret) {
2915 cma_exch(id_priv, CMA_DESTROYING); 2905 cma_exch(id_priv, RDMA_CM_DESTROYING);
2916 mutex_unlock(&id_priv->handler_mutex); 2906 mutex_unlock(&id_priv->handler_mutex);
2917 rdma_destroy_id(&id_priv->id); 2907 rdma_destroy_id(&id_priv->id);
2918 return 0; 2908 return 0;
@@ -3095,8 +3085,8 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
3095 int ret; 3085 int ret;
3096 3086
3097 id_priv = container_of(id, struct rdma_id_private, id); 3087 id_priv = container_of(id, struct rdma_id_private, id);
3098 if (!cma_comp(id_priv, CMA_ADDR_BOUND) && 3088 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
3099 !cma_comp(id_priv, CMA_ADDR_RESOLVED)) 3089 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
3100 return -EINVAL; 3090 return -EINVAL;
3101 3091
3102 mc = kmalloc(sizeof *mc, GFP_KERNEL); 3092 mc = kmalloc(sizeof *mc, GFP_KERNEL);
@@ -3261,19 +3251,19 @@ static void cma_add_one(struct ib_device *device)
3261static int cma_remove_id_dev(struct rdma_id_private *id_priv) 3251static int cma_remove_id_dev(struct rdma_id_private *id_priv)
3262{ 3252{
3263 struct rdma_cm_event event; 3253 struct rdma_cm_event event;
3264 enum cma_state state; 3254 enum rdma_cm_state state;
3265 int ret = 0; 3255 int ret = 0;
3266 3256
3267 /* Record that we want to remove the device */ 3257 /* Record that we want to remove the device */
3268 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); 3258 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
3269 if (state == CMA_DESTROYING) 3259 if (state == RDMA_CM_DESTROYING)
3270 return 0; 3260 return 0;
3271 3261
3272 cma_cancel_operation(id_priv, state); 3262 cma_cancel_operation(id_priv, state);
3273 mutex_lock(&id_priv->handler_mutex); 3263 mutex_lock(&id_priv->handler_mutex);
3274 3264
3275 /* Check for destruction from another callback. */ 3265 /* Check for destruction from another callback. */
3276 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 3266 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
3277 goto out; 3267 goto out;
3278 3268
3279 memset(&event, 0, sizeof event); 3269 memset(&event, 0, sizeof event);